From a57f5a30948cfa439b16dd870e64dda297451a33 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 4 Sep 2022 01:28:56 +0400 Subject: [PATCH 001/452] G973F - HVG4 Signed-off-by: Denis Efremov --- drivers/dma-buf/dma-buf-trace.c | 14 + drivers/dma-buf/dma-buf.c | 8 +- drivers/hid/Kconfig | 10 +- drivers/hid/hid-chicony.c | 8 +- drivers/hid/hid-corsair.c | 7 +- drivers/hid/hid-elo.c | 3 + drivers/hid/hid-holtek-kbd.c | 9 +- drivers/hid/hid-holtek-mouse.c | 9 + drivers/hid/hid-lg.c | 10 +- drivers/hid/hid-prodikeys.c | 10 +- drivers/hid/hid-roccat-arvo.c | 3 + drivers/hid/hid-roccat-isku.c | 3 + drivers/hid/hid-roccat-kone.c | 3 + drivers/hid/hid-roccat-koneplus.c | 3 + drivers/hid/hid-roccat-konepure.c | 3 + drivers/hid/hid-roccat-kovaplus.c | 3 + drivers/hid/hid-roccat-lua.c | 3 + drivers/hid/hid-roccat-pyra.c | 3 + drivers/hid/hid-roccat-ryos.c | 3 + drivers/hid/hid-roccat-savu.c | 3 + drivers/hid/hid-uclogic.c | 3 + drivers/hid/wacom_sys.c | 19 +- drivers/media/platform/exynos/mfc/mfc.c | 4 +- drivers/media/platform/exynos/mfc/mfc_qos.c | 4 +- .../brcm/bbdpl/r_os/bbd_patch_file_beyond.h | 35093 ++++++++-------- .../brcm/bbdpl/r_os/bbd_patch_file_davinci.h | 28418 ++++++------- drivers/sensorhub/brcm/ssp_firmware.c | 4 +- drivers/video/fbdev/exynos/dpu20/decon_core.c | 15 +- .../video/fbdev/exynos/dpu20/displayport.h | 10 + drivers/vision/iva/iva_mem.c | 5 +- include/linux/hid.h | 5 + net/ipv4/igmp.c | 2 + security/sdp/sdp_mm.c | 5 + sound/soc/samsung/abox/abox_mmapfd.c | 2 - sound/soc/samsung/abox/abox_rdma.c | 4 +- sound/soc/samsung/abox/abox_wdma.c | 4 +- 36 files changed, 31993 insertions(+), 31722 deletions(-) diff --git a/drivers/dma-buf/dma-buf-trace.c b/drivers/dma-buf/dma-buf-trace.c index 5a65629e08c5..795ce69f4614 100644 --- a/drivers/dma-buf/dma-buf-trace.c +++ b/drivers/dma-buf/dma-buf-trace.c @@ -378,6 +378,10 @@ void dmabuf_trace_free(struct dma_buf *dmabuf) mutex_lock(&trace_lock); buffer = dmabuf_trace_get_buffer(dmabuf); + if (!buffer) { + mutex_unlock(&trace_lock); + return; + } list_for_each_entry_safe(ref, tmp, &buffer->ref_list, buffer_node) dmabuf_trace_free_ref_force(ref); @@ -410,6 +414,11 @@ int dmabuf_trace_track_buffer(struct dma_buf *dmabuf) } buffer = dmabuf_trace_get_buffer(dmabuf); + if (!buffer) { + ret = -EINVAL; + goto err; + } + ref = dmabuf_trace_get_ref(buffer, task); if (IS_ERR(ref)) { /* @@ -459,6 +468,11 @@ int dmabuf_trace_untrack_buffer(struct dma_buf *dmabuf) } buffer = dmabuf_trace_get_buffer(dmabuf); + if (!buffer) { + ret = -EINVAL; + goto err_unregister; + } + ref = dmabuf_trace_get_ref_noalloc(buffer, task); if (!ref) { ret = -ENOENT; diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 4f7c62714945..6257c09e7d6a 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -471,6 +471,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) goto err_dmabuf; } + ret = dmabuf_trace_alloc(dmabuf); + if (ret) + goto err_file; + file->f_mode |= FMODE_LSEEK; dmabuf->file = file; @@ -481,10 +485,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) list_add(&dmabuf->list_node, &db_list.head); mutex_unlock(&db_list.lock); - dmabuf_trace_alloc(dmabuf); - return dmabuf; +err_file: + fput(file); err_dmabuf: kfree(dmabuf->exp_name); err_expname: diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 184a14655f98..9974ff59a534 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -191,14 +191,14 @@ config HID_CHERRY config HID_CHICONY tristate "Chicony devices" - depends on HID + depends on USB_HID default !EXPERT ---help--- Support for Chicony Tactical pad and special keys on Chicony keyboards. config HID_CORSAIR tristate "Corsair devices" - depends on HID && USB && LEDS_CLASS + depends on USB_HID && LEDS_CLASS ---help--- Support for Corsair devices that are not fully compliant with the HID standard. @@ -209,7 +209,7 @@ config HID_CORSAIR config HID_PRODIKEYS tristate "Prodikeys PC-MIDI Keyboard support" - depends on HID && SND + depends on USB_HID && SND select SND_RAWMIDI ---help--- Support for Prodikeys PC-MIDI Keyboard device support. @@ -448,7 +448,7 @@ config HID_LENOVO config HID_LOGITECH tristate "Logitech devices" - depends on HID + depends on USB_HID default !EXPERT ---help--- Support for Logitech devices that are not fully compliant with HID standard. @@ -809,7 +809,7 @@ config HID_SAITEK config HID_SAMSUNG tristate "Samsung InfraRed remote control or keyboards" - depends on HID + depends on USB_HID ---help--- Support for Samsung InfraRed remote control or keyboards. diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c index 397a789a41be..218f0e090f63 100644 --- a/drivers/hid/hid-chicony.c +++ b/drivers/hid/hid-chicony.c @@ -61,8 +61,12 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi, static __u8 *ch_switch12_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { - struct usb_interface *intf = to_usb_interface(hdev->dev.parent); - + struct usb_interface *intf; + + if (!hid_is_usb(hdev)) + return rdesc; + + intf = to_usb_interface(hdev->dev.parent); if (intf->cur_altsetting->desc.bInterfaceNumber == 1) { /* Change usage maximum and logical maximum from 0x7fff to * 0x2fff, so they don't exceed HID_MAX_USAGES */ diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c index 9ba5d98a1180..d8cf08b6b31c 100644 --- a/drivers/hid/hid-corsair.c +++ b/drivers/hid/hid-corsair.c @@ -553,7 +553,12 @@ static int corsair_probe(struct hid_device *dev, const struct hid_device_id *id) int ret; unsigned long quirks = id->driver_data; struct corsair_drvdata *drvdata; - struct usb_interface *usbif = to_usb_interface(dev->dev.parent); + struct usb_interface *usbif; + + if (!hid_is_usb(dev)) + return -EINVAL; + + usbif = to_usb_interface(dev->dev.parent); drvdata = devm_kzalloc(&dev->dev, sizeof(struct corsair_drvdata), GFP_KERNEL); diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c index 5eea6fe0d7bd..c3ecac13e620 100644 --- a/drivers/hid/hid-elo.c +++ b/drivers/hid/hid-elo.c @@ -230,6 +230,9 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id) struct elo_priv *priv; int ret; + if (!hid_is_usb(hdev)) + return -EINVAL; + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c index 6e1a4a4fc0c1..a64f73f9811f 100644 --- a/drivers/hid/hid-holtek-kbd.c +++ b/drivers/hid/hid-holtek-kbd.c @@ -138,12 +138,17 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type, static int holtek_kbd_probe(struct hid_device *hdev, const struct hid_device_id *id) { - struct usb_interface *intf = to_usb_interface(hdev->dev.parent); - int ret = hid_parse(hdev); + struct usb_interface *intf; + int ret; + + if (!hid_is_usb(hdev)) + return -EINVAL; + ret = hid_parse(hdev); if (!ret) ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); + intf = to_usb_interface(hdev->dev.parent); if (!ret && intf->cur_altsetting->desc.bInterfaceNumber == 1) { struct hid_input *hidinput; list_for_each_entry(hidinput, &hdev->inputs, list) { diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c index 78b3a0c76775..27c08ddab0e1 100644 --- a/drivers/hid/hid-holtek-mouse.c +++ b/drivers/hid/hid-holtek-mouse.c @@ -65,6 +65,14 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, return rdesc; } +static int holtek_mouse_probe(struct hid_device *hdev, + const struct hid_device_id *id) +{ + if (!hid_is_usb(hdev)) + return -EINVAL; + return 0; +} + static const struct hid_device_id holtek_mouse_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, @@ -86,6 +94,7 @@ static struct hid_driver holtek_mouse_driver = { .name = "holtek_mouse", .id_table = holtek_mouse_devices, .report_fixup = holtek_mouse_report_fixup, + .probe = holtek_mouse_probe, }; module_hid_driver(holtek_mouse_driver); diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c index 52026dc94d5c..e75211211803 100644 --- a/drivers/hid/hid-lg.c +++ b/drivers/hid/hid-lg.c @@ -714,12 +714,18 @@ static int lg_raw_event(struct hid_device *hdev, struct hid_report *report, static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) { - struct usb_interface *iface = to_usb_interface(hdev->dev.parent); - __u8 iface_num = iface->cur_altsetting->desc.bInterfaceNumber; + struct usb_interface *iface; + __u8 iface_num; unsigned int connect_mask = HID_CONNECT_DEFAULT; struct lg_drv_data *drv_data; int ret; + if (!hid_is_usb(hdev)) + return -EINVAL; + + iface = to_usb_interface(hdev->dev.parent); + iface_num = iface->cur_altsetting->desc.bInterfaceNumber; + /* G29 only work with the 1st interface */ if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) && (iface_num != 0)) { diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c index 49c4bd34b3c5..cdc1c35bdbe5 100644 --- a/drivers/hid/hid-prodikeys.c +++ b/drivers/hid/hid-prodikeys.c @@ -795,12 +795,18 @@ static int pk_raw_event(struct hid_device *hdev, struct hid_report *report, static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; - struct usb_interface *intf = to_usb_interface(hdev->dev.parent); - unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber; + struct usb_interface *intf; + unsigned short ifnum; unsigned long quirks = id->driver_data; struct pk_device *pk; struct pcmidi_snd *pm = NULL; + if (!hid_is_usb(hdev)) + return -EINVAL; + + intf = to_usb_interface(hdev->dev.parent); + ifnum = intf->cur_altsetting->desc.bInterfaceNumber; + pk = kzalloc(sizeof(*pk), GFP_KERNEL); if (pk == NULL) { hid_err(hdev, "can't alloc descriptor\n"); diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c index 329c5d1270f9..fb545a11214f 100644 --- a/drivers/hid/hid-roccat-arvo.c +++ b/drivers/hid/hid-roccat-arvo.c @@ -347,6 +347,9 @@ static int arvo_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c index 02db537f8f3e..c07a7ea8a687 100644 --- a/drivers/hid/hid-roccat-isku.c +++ b/drivers/hid/hid-roccat-isku.c @@ -327,6 +327,9 @@ static int isku_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c index bf4675a27396..e102e06ad14c 100644 --- a/drivers/hid/hid-roccat-kone.c +++ b/drivers/hid/hid-roccat-kone.c @@ -743,6 +743,9 @@ static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c index 09e8fc72aa1d..b63de4c5b5dd 100644 --- a/drivers/hid/hid-roccat-koneplus.c +++ b/drivers/hid/hid-roccat-koneplus.c @@ -434,6 +434,9 @@ static int koneplus_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-konepure.c b/drivers/hid/hid-roccat-konepure.c index 07de2f9014c6..ef9508822e5f 100644 --- a/drivers/hid/hid-roccat-konepure.c +++ b/drivers/hid/hid-roccat-konepure.c @@ -136,6 +136,9 @@ static int konepure_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c index 317c9c2c0a7c..6256c211398a 100644 --- a/drivers/hid/hid-roccat-kovaplus.c +++ b/drivers/hid/hid-roccat-kovaplus.c @@ -504,6 +504,9 @@ static int kovaplus_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-lua.c b/drivers/hid/hid-roccat-lua.c index ac1a7313e259..13ae2a7d176d 100644 --- a/drivers/hid/hid-roccat-lua.c +++ b/drivers/hid/hid-roccat-lua.c @@ -163,6 +163,9 @@ static int lua_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c index b30aa7b82bf8..027aa9d0ec1f 100644 --- a/drivers/hid/hid-roccat-pyra.c +++ b/drivers/hid/hid-roccat-pyra.c @@ -452,6 +452,9 @@ static int pyra_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-ryos.c b/drivers/hid/hid-roccat-ryos.c index 47cc8f30ff6d..fda4a396a12e 100644 --- a/drivers/hid/hid-roccat-ryos.c +++ b/drivers/hid/hid-roccat-ryos.c @@ -144,6 +144,9 @@ static int ryos_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c index 6dbf6e04dce7..0230fb54f08a 100644 --- a/drivers/hid/hid-roccat-savu.c +++ b/drivers/hid/hid-roccat-savu.c @@ -116,6 +116,9 @@ static int savu_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c index e3e6e5c893cc..0aa78711bf39 100644 --- a/drivers/hid/hid-uclogic.c +++ b/drivers/hid/hid-uclogic.c @@ -941,6 +941,9 @@ static int uclogic_probe(struct hid_device *hdev, struct usb_device *udev = hid_to_usb_dev(hdev); struct uclogic_drvdata *drvdata; + if (!hid_is_usb(hdev)) + return -EINVAL; + /* * libinput requires the pad interface to be on a different node * than the pen, so use QUIRK_MULTI_INPUT for all tablets. diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 18d5b99d13f1..fc6560a7b4c7 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -571,7 +571,7 @@ static void wacom_retrieve_hid_descriptor(struct hid_device *hdev, * Skip the query for this type and modify defaults based on * interface number. */ - if (features->type == WIRELESS) { + if (features->type == WIRELESS && intf) { if (intf->cur_altsetting->desc.bInterfaceNumber == 0) features->device_type = WACOM_DEVICETYPE_WL_MONITOR; else @@ -2042,7 +2042,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix) if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) { char *product_name = wacom->hdev->name; - if (hid_is_using_ll_driver(wacom->hdev, &usb_hid_driver)) { + if (hid_is_usb(wacom->hdev)) { struct usb_interface *intf = to_usb_interface(wacom->hdev->dev.parent); struct usb_device *dev = interface_to_usbdev(intf); product_name = dev->product; @@ -2273,6 +2273,9 @@ static void wacom_wireless_work(struct work_struct *work) wacom_destroy_battery(wacom); + if (!usbdev) + return; + /* Stylus interface */ hdev1 = usb_get_intfdata(usbdev->config->interface[1]); wacom1 = hid_get_drvdata(hdev1); @@ -2552,8 +2555,6 @@ static void wacom_mode_change_work(struct work_struct *work) static int wacom_probe(struct hid_device *hdev, const struct hid_device_id *id) { - struct usb_interface *intf = to_usb_interface(hdev->dev.parent); - struct usb_device *dev = interface_to_usbdev(intf); struct wacom *wacom; struct wacom_wac *wacom_wac; struct wacom_features *features; @@ -2586,8 +2587,14 @@ static int wacom_probe(struct hid_device *hdev, wacom_wac->hid_data.inputmode = -1; wacom_wac->mode_report = -1; - wacom->usbdev = dev; - wacom->intf = intf; + if (hid_is_usb(hdev)) { + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); + struct usb_device *dev = interface_to_usbdev(intf); + + wacom->usbdev = dev; + wacom->intf = intf; + } + mutex_init(&wacom->lock); INIT_DELAYED_WORK(&wacom->init_work, wacom_init_work); INIT_WORK(&wacom->wireless_work, wacom_wireless_work); diff --git a/drivers/media/platform/exynos/mfc/mfc.c b/drivers/media/platform/exynos/mfc/mfc.c index 62c15ae3430f..45ec17e38dcb 100644 --- a/drivers/media/platform/exynos/mfc/mfc.c +++ b/drivers/media/platform/exynos/mfc/mfc.c @@ -464,13 +464,13 @@ static int mfc_open(struct file *file) enum mfc_node_type node; struct video_device *vdev = NULL; - mfc_debug(2, "mfc driver open called\n"); - if (!dev) { mfc_err_dev("no mfc device to run\n"); goto err_no_device; } + mfc_info_dev("mfc driver open called\n"); + if (mutex_lock_interruptible(&dev->mfc_mutex)) return -ERESTARTSYS; diff --git a/drivers/media/platform/exynos/mfc/mfc_qos.c b/drivers/media/platform/exynos/mfc/mfc_qos.c index a933e44c8138..7b3d25663768 100644 --- a/drivers/media/platform/exynos/mfc/mfc_qos.c +++ b/drivers/media/platform/exynos/mfc/mfc_qos.c @@ -647,8 +647,10 @@ void mfc_qos_off(struct mfc_ctx *ctx) return; } - if (ON_RES_CHANGE(ctx)) + if (ON_RES_CHANGE(ctx)) { + mutex_unlock(&dev->qos_mutex); return; + } #ifdef CONFIG_EXYNOS_BTS mfc_bw.peak = 0; diff --git a/drivers/sensorhub/brcm/bbdpl/r_os/bbd_patch_file_beyond.h b/drivers/sensorhub/brcm/bbdpl/r_os/bbd_patch_file_beyond.h index 2e782d90ba59..6f9644808307 100644 --- a/drivers/sensorhub/brcm/bbdpl/r_os/bbd_patch_file_beyond.h +++ b/drivers/sensorhub/brcm/bbdpl/r_os/bbd_patch_file_beyond.h @@ -1,6 +1,6 @@ "\n" -"\n" -"\n" +"\n" +"\n" "\n" "\n" "\n" @@ -9,7 +9,7 @@ "\n" "\n" "\n" -"\n" +"00DFF800F069240D00DFF800F021B60A00DFF800F085AF\n" +"0A00F8B504460D461646FFF7EEFF04270168896E884728\n" +"B9FFF7E7FF0168096F8847B0B16078012813D1E078FF28\n" +"18BF00272079FF2818BF01276079FF2818BF0227A079FF\n" +"2818BF0327E079FF2818BF0427314605EB8700BDE8F440\n" +"8068FFF7C8BFF8B504460D461646FFF7BEFF0427016889\n" +"6E884728B9FFF7B7FF0168096F8847B0B16078012813D1\n" +"E078FF2818BF00272079FF2818BF01276079FF2818BF02\n" +"27A079FF2818BF0327E079FF2818BF042705EB87003146\n" +"8068FFF79AFF3046BDE8F240FFF799BF000000680A4940\n" +"1A022805D9C01E06D00849401A032805D80FF220007047\n" +"0FF22000704701A070470000000000000C000030FDFFFE\n" +"00E4E70000343737355F464600343737355F42300010B5\n" +"82B0049C009400F002F813BD000038B50546107808280C\n" +"4653D8DFE800F0051052105238411D2B004F2020803C20\n" +"60800020207160710720A07103203FE040F25F10208038\n" +"20608000202071012060710720A071032032E040F23620\n" +"20804FF40D70608000202071012060710720A071042024\n" +"E040F6132020800720608000202071012060710720A071\n" +"022017E045202080322060800220A073002020710AE04F\n" +"F44D7020804FF40D7060800320A0730020207101206071\n" +"0720A0710020E07107E09FF726F840F255120FF2C811AD\n" +"F790FBA8680168D1F88C108847002800F08C80049818B1\n" +"022848D03FD380E0A8680168C96C88475F49884277D0A8\n" +"680168C96C88475D49884270D0A8680168C96C88475A49\n" +"884269D0A8680168C96C88475849884262D0A8680168C9\n" +"6C8847554988425BD0A8680168C96C88475349884254D0\n" +"A8680168C96C8847504988424DD0A8680168C96C88474E\n" +"49884246D0292020810420A0724FF4807005E028202081\n" +"0520A0724FF4A070A08146E0A8680168C96C88473C4988\n" +"4230D0A8680168C96C88473949884229D0A8680168C96C\n" +"88473749884222D0A8680168C96C8847344988421BD0A8\n" +"680168C96C88473249884214D0A8680168C96C88472F49\n" +"88420DD0A8680168C96C88472D49884206D0A8680168C9\n" +"6C88472A498842F19403D04D8600B8D14D20B7E79EF78F\n" +"FF40F2BB1203E09EF78AFF40F2DB120FF29001ADF7F4FA\n" +"2188608801F00F020F2A0EBF002200F00302521C01F0F0\n" +"01F02903D0C0F3810189184A1CC0F3011080186179401C\n" +"012908BF401CA179C0B2491C884207D09EF763FF40F2E3\n" +"120FF24401ADF7CDFAA07908B1082809DB9EF757FFBDE8\n" +"38404FF4F2720FF22801ADF7BFBA31BD0D0000300C0000\n" +"300D00FF300C00FF300F0000300E0000300F00FF300E00\n" +"FF3070617463685F676C6D657372645F72656365697665\n" +"725F706172616D65746572735F70726F6772616D6D6572\n" +"2E63707000000000DFF800F0E5FC0600DFF800F0A12D04\n" +"00DFF800F03F2E0400DFF800F061560500DFF800F0CB53\n" +"0500DFF800F0D3530500DFF800F0EF1D0700DFF800F021\n" +"310400DFF800F05D2F0400DFF800F08D330400DFF800F0\n" +"6B16070000BF00BF2DE9F54F81B00C462DED088B8CB0AD\n" +"F5F05D0DF5E050D0F85402806830B99EF7E5FE5E220FF6\n" +"3C21ADF750FA65682FA8C9F734FA41F6F03AEA440C2751\n" +"463020A3F7F2FC08B1EDF7DDF87F1E0AF1300AF4D125B1\n" +"A06810B94FF0000812E0A0684FF0000870B92FA8009041\n" +"F6F0330DF5E0502268D0F854026B4404F12001FFF791FF\n" +"8046092104F12000B8F7B7FB042500B10325092104F120\n" +"00F6F7C3FEE0B10A2104F12000B8F7A9FB089000B16D1C\n" +"A845C0F2D582B8F10D0F10DB6B209EF7B9FE002800F0CC\n" +"829EF790FE0C230FF6A8126B219EF78EFEC2E20890E8E7\n" +"05200790089808B90420079000208DF81400092104F120\n" +"00B8F781FB68B1B8F1040F0ADACDF81C800B2104F12000\n" +"F6F78AFE10B101208DF8140041F67810079A41466844FF\n" +"F743FF012241460DF5B850FFF73DFFFFF73FFF41F67810\n" +"6844FFF73AFF41F288406844FFF739FF41F210206844FF\n" +"F734FF40F698706844FFF72FFF424641460DF55260FFF7\n" +"21FF41F288406844FFF720FF41F210206844FFF71BFF40\n" +"F698706844FFF716FF0DF55260FFF712FF27A81FAE0021\n" +"27911F91416071608160B160C160F16001613161416171\n" +"618161B161C161F16140F6B8406844ECF702FF0821EDF7\n" +"2BFAE1680EA8C9F763F902A8FFF7F8FE6846FFF7F5FE0A\n" +"A8FFF7F2FE014602AB6A460DF54A60FFF7EFFE207F0A90\n" +"9FEDE7AA00208DF81800DFEDE4AA0DF5FD60ECF7DAFE0D\n" +"F5F060ECF7DAFE0DF5E360ECF7D6FE0DF5D660ECF7D2FE\n" +"012000FA08F0401E4FF000090990B7EE00BA7CE09DED1F\n" +"0AECF798FFB0EE408A96ED010AECF792FFF0EE408A96ED\n" +"020AECF78CFFF194036051860078EE280A30EE808A96ED\n" +"030AECF784FF38EE000AF7EE000AB4EE600AF1EE10FA44\n" +"BF01208DF81800B0689DF70BFA41EC120B7068B0EE428A\n" +"F0EE628A9DF702FA41EC110BB0EE482AF0EE682A1F98B0\n" +"EE418AF0EE618AB0EE429AF0EE629A9DF7F1F941EC100B\n" +"B0EE492AF0EE692AB0EE481AF0EE681A3AA8ECF7C2FF02\n" +"460EA914A8ECF785FF14A90EA8BDF735FC40F6B8400521\n" +"6844C9F7DBF80546F0689DF7D1F9D5E900239DF7D5F9C5\n" +"E90001089878B140F6B84008216844C9F7C9F805463069\n" +"9DF7BFF9D5E900239DF7C3F9C5E9000109F101005FFA80\n" +"F90A98814580F29A819DF81800002840F095810BA8ECF7\n" +"8FFE002104F12000B8F756FA014600220BA8BDF72DFA10\n" +"2104F12000B8F74CFA014607220BA8BDF723FA0BA914A8\n" +"ECF77FFE0021039102910190009140F6B841A36994ED04\n" +"0B0EAA694448A8ECF77CFE002004900390029001900DF5\n" +"FD630DF5F06000900DF5D6620DF5E36140A8ECF76FFE09\n" +"991AA8ECF773FE0DF5E0501121D0F85402456804F12000\n" +"B8F715FA019041F6F0331AA800906B4428462D682D6840\n" +"AA48A9A847002840F038810125B8F1010FC0F2D7800DF5\n" +"E05A012229460DF5B850FFF7E5FD074629B20DF5E360C9\n" +"F747F8834629B20DF5D660C9F741F8024653681268DBE9\n" +"00019EF7DAFC9EF7D4FC3860012241F6781029466844FF\n" +"F7C8FD0746012229460DF5FD60ECF7BDFD416800689EF7\n" +"C1FC3860022241F6781029466844FFF7B5FD0746032229\n" +"460DF5FD60ECF7AAFD416800689EF7AEFC3860032241F6\n" +"781029466844FFF7A2FD0746072229460DF5FD60ECF797\n" +"FD416800689EF79BFC3860042241F6781029466844FFF7\n" +"8FFD0746052229460DF5FD60ECF784FD416800689EF788\n" +"FC3860089890B141F67810052229466844FFF77AFD0746\n" +"082229460DF5FD60ECF76FFD416800689EF773FC3860B9\n" +"F1000F04BF9AF9000000281DD129B20DF5F060C8F7CAFF\n" +"416800689EF762FC00EE100A30EE0AAA29B20DF5F060C8\n" +"F7BDFF416800689EF755FC00EE900AB0EE6A0AECF70BFE\n" +"F0EE40AA2A4629460DF55260FFF743FD074629B20DF5F0\n" +"60C8F7A5FF012241680068DFF8E0339EF732FC14D829B2\n" +"0DF5F060C8F798FF0246002053681268DFF8C413A3F710\n" +"FC9EF72AFC00EE100A04E000BF00000000B0EE4B0A6D1C\n" +"A84587ED000A0AF1300ABFF62BAF062104F12000B8F725\n" +"F900B341F6781240F698700DF552636A4401216844FFF7\n" +"09FD41F6781240F6987141F210206A4469446844FFF702\n" +"FD40F69871F19403F054860041F288400DF5B852694468\n" +"44FFF7F8FC16E041F6781341F6781241F210206B446A44\n" +"01216844FFF7E7FC41F6781241F288400DF5B8536A4401\n" +"216844FFF7DCFC0798002501280FDB0DF19C0A6D1C41F2\n" +"8840012229466844FFF7CAFC00684AF8040B07988542F1\n" +"DB41F210201FAA27A96844FFF7C9FC00287FF4EAAD0CB0\n" +"0DF5F05D0020F6E141F6F030684408EB480100EB011010\n" +"F9200C032808D10798A8EB0000012540B2022802DAAA46\n" +"02E000254FF0000A39A8ECF72FFD02460DF5E050D0F854\n" +"02416840F264606844ECF728FD09A8ECF71DFD0A2104F1\n" +"2000B8F79CF80146002209A8BDF773F80DF5E0500122D0\n" +"F85402806890F8201009A8BDF768F89DF81410032209A8\n" +"BDF762F80422514609A8BDF75DF80DF5E050D0F8540200\n" +"68BDF72AFB06462FA939A8ECF7FDFC40F6B84005216844\n" +"C8F7C3FE024653681268D4E904019CF7BEFFCDE90C0109\n" +"A914A8ECF7EEFC0390002104F11800029005910CA80190\n" +"04910EA8009040F264610DF5E05039ABD0F85402806890\n" +"ED020B40F21C60324669446844ECF7D6FCFFA840F21C61\n" +"FF306944D930FFF73AFC0DF5E0500EA9D0F85802BDF7DF\n" +"F940F6B84005216844C8F785FE02460DF5E050D0F85802\n" +"5368126800F11006D4E904019CF77AFFC6E90201A0699C\n" +"F77DFF41EC100BF6F78DFD40F6B840B0EE408AF0EE608A\n" +"08216844C8F765FE024651EC180B536812689CF760FFC6\n" +"E904010DF5E05040F21C61D0F8580269445430ECF73AFF\n" +"0DF5E0510120D1F8581281F89C000DF5E050FFA9D0F858\n" +"02FF31D931A030ECF729FF2FA8BCF7D2FF5FFA80F9B9F1\n" +"000F07D19EF7DBFA4FF4D6720FF22821ACF745FE484600\n" +"EE100AB8EE400A8AEE008AB5EE408AF1EE10FA07DC9EF7\n" +"C7FA4FF4D7720FF20021ACF731FEB0EE480AECF7B1FB0D\n" +"F5E050B0EE40AAD0F8580200F1E80B1AEE100A9CF707FF\n" +"CBE90201B0EE6A0AECF79FFB10EE100A9CF7FDFECBE904\n" +"014DF804CD0DF580609FED270A90ED948A5DF804CBB4EE\n" +"408AF1EE10FA4CBF4FF0010A4FF0000A132104F12000B7\n" +"F795FF9FED1E9B38B1152104F12000B7F78DFF08B19FED\n" +"1B9B28EE0A0A10EE100A9CF7D2FE02460B4651EC190B9E\n" +"F766FA94BF01270027D6E90001A3F74FFA00F2E7301249\n" +"88428041C00F8DF80000089858B1B9F1060F08DAD6E904\n" +"0121F000410022034B9EF746FA12D2012011E000BF0000\n" +"59400000F03F0000204200000000000079400000000000\n" +"C08240E774020000209DF8002007EA0A011140014214BF\n" +"01200020F19403805886008BF801008BF8000075764146\n" +"089830760DF5E050D0F8580200F580763046ECF743FE0D\n" +"F5E0502FA9D0F858022C30ECF7C7FC9DF818008BF80200\n" +"41463046ECF733FE0C2104F12000B7F71EFF002845D001\n" +"224146D7A8FFF7EFFA0799012239A8FFF7EAFA07980024\n" +"01280CDB1FAD641C0122214639A8FFF7F3FA55F8041B01\n" +"6007988442F3DB41F6781139AA6944D7A8FFF7EEFA0125\n" +"B8F1010F1FDB29B23046C8F746FD0446012229460DF5B8\n" +"50FFF7D7FA074601222946D7A8FFF7D1FA97ED000AD0ED\n" +"000A30EE600A10EE100A9CF729FEC4E900016D1CA845DF\n" +"DA0CB00DF5F05D0120BDEC088B03B0BDE8F08F00007061\n" +"7463685F676C70655F6C73712E63707000005761726E69\n" +"6E673A204C73506F732063616E6E6F74206F7065726174\n" +"652077697468206D6F7265207468616E202564206D6561\n" +"737572656D656E74730A000000DFF800F0953B0300DFF8\n" +"00F065F1020008B400B501AB00F003F85DF808FB0000F8\n" +"B504460D4616461F4628469EF7A6F910E0000008B400B5\n" +"01AB00F003F85DF808FB0000F8B504460D4616464FEA03\n" +"07FFF7D6FF98B1206801684968884730B13B4632462946\n" +"204600F00AF806E020680468E4683B4632462946A04701\n" +"20F2BD2DE9F04F0D460FF23421E1B0B1E8C010044605A8\n" +"A0E8C0100FF22C200088ADF8080004F5B576B6F90400C0\n" +"F5AF773FB2001939460E30C5F757FB00B2B6F90410B842\n" +"08DA002806D44018B08000B2B0F5AF7F09DB02E04FF4AF\n" +"70B0803C2030700A2070700020B070B6F90400B0F5AF7F\n" +"06DD9EF71FF981220FF2D011ACF78AFC0020B07004F10E\n" +"0AB6F9040000190E300490002705F0070003900FE038B2\n" +"01280ADB0A1A001900F10E0104F10E00C6F7AFF9B088C0\n" +"1BB08008F1010A0498A0EB0A020A215046FFF75FFF5FEA\n" +"000800F0AF80A07B09281CBF07287E285FD02D2806D1E0\n" +"7B2D280CD1207C002857D110E0212804BFE07B402803D1\n" +"207C232808D14DE0E07B232801BF207C4028607C212845\n" +"D004F10E00A5F7B5F9092837D2E80807283EDA4FF02609\n" +"012808BF4FF0240920469EF7EFF8039A05A94B4651563E\n" +"4A01913D49B0FBF1F102FB110000904FF4AF710FF21412\n" +"08A8A3F790F800B2414201F25B19A8EB0A01491C0FFA89\n" +"F90FFA81FA09EB0001ADF802104FF0000B0AEB0001ADF8\n" +"001008A9401801902BE0207D232801BF607D4028A07D21\n" +"28BFD10020DDE74FF02309C3E70BEB040000F10E010198\n" +"4A46A1F758FCBDF9020008AA5C2181544F44401C02A908\n" +"AA00B2F19403105C860080180A780270CB444A78427008\n" +"A920680268926890470FFA8BFBD34516DA09EB0B008245\n" +"DADA0BEB040000F10E010198AAEB0B02A1F732FCBDF800\n" +"00A0EB0B000AEB0701401EA1EB0B07D6E7B6F904000146\n" +"0129FFF64AAF3AB28242FFF638AF002042E761B0BDE8F0\n" +"8F000040420F004D414345574E4944000000000A000000\n" +"70617463685F676C75746C5F7574696C735F76612E6370\n" +"700000000025632530366C642563200000DFF800F0C31C\n" +"0700DFF800F06B2E0400DFF800F0952E0400DFF800F05F\n" +"1E07002DE9F0472DED028B8AB088460446149D159E169F\n" +"179991469A46B0EE408AF0EE608A06A8ECF790F9199905\n" +"910390B0EE480A18990491029701960095F0EE680A5346\n" +"4A464146204600F006F80AB0BDEC028BBDE8F08700002D\n" +"E9F54F81B088462DED088BA2B0ADF5E05D0DF5E0500DF5\n" +"E055D0F8E8000DF5E0570DF5E0565FFA80FBD5F8DC50D7\n" +"F8E070D6F8EC60BBF1000F9946B0EE409AF0EE609A38D1\n" +"41F6482441F648266C446E444FF00C0A31463020A2F7D3\n" +"FD08B1ECF7BEF93036BAF1010AF4D1494612A8BCF7E2FD\n" +"41F648266E4410E012A96846BCF7E6FD0DF5E0536A46D3\n" +"F8B03021464046FFF779FF12A8BCF7D2FD303412A8BCF7\n" +"D2FD28B9A01B302190FBF1F00C28E4DB41F648266E440A\n" +"A8ECF7C1F80C21B7F7A2FC08210AA8B7F79EFC41F29050\n" +"6844ECF761F841F228506844ECF760F80DF5A650ECF75C\n" +"F841F258406844ECF757F80AA912A8ECF7ABF800210DF5\n" +"E052039102910190009141F228403B68D2F8D82095ED00\n" +"0B41F2584169446844ECF7A3F800200490039002900190\n" +"009041F2905341F2285141F208406B440DF5A652694468\n" +"44ECF794F84846BCF7B5FC0C21C5F740FC012101FA00F0\n" +"411E41F2F4306844ECF78DF80146D8F848000091002201\n" +"9241F2084205682D6841F2284133466A446944A8470500\n" +"18D06C209DF740FF60B14846BCF790FC04469DF715FF00\n" +"942B460FF290526C219DF712FF0DA8ECF793F801464046\n" +"ECF74BFBA8E24846BCF77BFC45B20DF5E0500D2DA8BF0C\n" +"25D0F8E4000321B7F700FC04460DF5E0500421D0F8E400\n" +"B7F7F8FBDFED708A18B1F2EE048A01241CE0D4B10DF5E0\n" +"500121D0F8E400B7F7E9FB90B10C2D10DAF2EE040AB0EE\n" +"608A51EC190B9DF7CEFE00EE100AF0EE480AECF784F8F0\n" +"EE408A00E00024494608F10400BCF7F3FC9FED5D8A032D\n" +"88ED0C8A88ED0D8A88ED0E8A88ED0F8A88ED108A88ED11\n" +"8AC8ED0B8AC0F2578241F27C106844FEF797FF4FF00509\n" +"1CB1F19403A05F86000C2DB8BF6A1C00DB2A4652B240F6\n" +"047011466844FEF780FFFEF782FF01270836B7EE009A41\n" +"F29050012239466844EBF783FF416800689DF787FE09EE\n" +"900A41F29050032239466844EBF776FF416800689DF77A\n" +"FE0AEE100A41F29050072239466844EBF769FF41680068\n" +"9DF76DFE0AEE900A41F29050052239466844EBF75CFF41\n" +"6800689DF760FE0BEE100A41F29050082239466844EBF7\n" +"4FFF416800689DF753FE0BEE900A41F27C100122394668\n" +"44FEF746FF0222C0ED009A41F27C1039466844FEF73DFF\n" +"032280ED00AA41F27C1039466844FEF734FF0422C0ED00\n" +"AA41F27C1039466844FEF72BFF052280ED00BA41F27C10\n" +"39466844FEF722FFBBF1000FC0ED00BA40F604703A4639\n" +"46684419D0FEF716FF824696ED000B53EC102B51EC100B\n" +"9CF773FA02460B460020E449A2F7EFFD9DF709FECAF800\n" +"0007E00000FA440000C642FEF7FCFE80ED009A7F1C3036\n" +"BD42BFF667AF41F27C10052229466844FFF7DEFD002C58\n" +"D00C2D56DA41F27C106844FFF7D9FD41F27C106844FFF7\n" +"D8FD054641F27C10012229466844FEF7D8FE0021016041\n" +"F27C10022229466844FEF7CFFE0021016041F27C100322\n" +"29466844FEF7C6FEB2EE040A80EE280A80ED000A41F27C\n" +"10042229466844FEF7B9FE0021016041F27C1005222946\n" +"6844FEF7B0FE00210160BBF1000F40F604702A46294668\n" +"440ED0FEF7A4FE28EEA80AF7EE000A80EE800A80ED000A\n" +"41F27C10032229466844FEF795FE80ED009A0DF5E05000\n" +"21D0F8E400F5F7B4FD68B141F27C106844FFF77EFD0146\n" +"41F27C1004226844FFF76FFD4FF0040940F68C406844FE\n" +"F76CFE40F614204A4649466844FEF75DFE40F29C704A46\n" +"49466844FEF756FEFFA8FF304A4649462930FEF74FFE41\n" +"F27C1341F27C1240F68C406B446A4401216844FEF75BFE\n" +"ABA8FEF748FE40F6047341F27C126B446A440121FEF74F\n" +"FE41F27C1240F614206A44ABA96844FEF74AFE002605AD\n" +"0DAF05AC2EE0761C40F29C70324601216844FEF735FE0D\n" +"99016040F29C70324602216844FEF72CFE7968016040F2\n" +"9C70324603216844FEF723FEB968016040F29C70324604\n" +"216844FEF71AFEF968016040F29C70324605216844FEF7\n" +"11FE396901602D1D4E4511DA002005906060A060E06020\n" +"6185ED009A40F614200DAA05A96844FEF709FE0028BDD1\n" +"A8E00DA8FEF7EBFD40F29C7240F68C416A446944FEF7F7\n" +"FDFFA840F29C71FF300DAA69442930FEF7EEFDFFA8FF30\n" +"012201212930FEF7DFFD90ED000AEBF7DBFDFFA8FF3088\n" +"EDF19403306386000D0A022202212930FEF7D2FD90ED00\n" +"0AEBF7CEFDFFA8FF3088ED0E0A012201212930FEF7C5FD\n" +"0546FFA8FF30022202212930FEF7BDFD95ED000AD0ED00\n" +"0A30EE200AEBF7B5FDFFA8FF3088ED0C0A032203212930\n" +"FEF7ACFD90ED000AEBF7A8FDFFA8FF3088ED0F0A042204\n" +"212930FEF79FFD90ED000AEBF79BFDB9F1050F88ED100A\n" +"0DD1FFA8FF30052205212930FEF78FFD90ED000AEBF78B\n" +"FD88ED110A01E088ED118ABBF1000F18BF9FED208AF0EE\n" +"480A98ED0C0AEBF7A7FD88ED0C0AF0EE480A98ED0D0AEB\n" +"F79FFD88ED0D0AF0EE480A98ED0E0AEBF797FD88ED0E0A\n" +"F0EE480A98ED0F0AEBF78FFD88ED0F0AF0EE480A98ED10\n" +"0AEBF787FD88ED100AF0EE480A98ED110AEBF77FFD88ED\n" +"110A0DF5E0504146D0F8AC00FEF75AFD22B00DF5E05DBD\n" +"EC088B03B0BDE8F08F0000C8430000F03F476C5065446F\n" +"70733A3A476574446F70732063616C6C20746F20476C50\n" +"65436F6D707574655A524849663A3A436F6D7075746520\n" +"6661696C6564207769746820256420666F722025640A00\n" +"0010B50C46A1F7A2FD2146BDE81040A1F737BE000000BF\n" +"00BF10B504460121FFF7EFFF94F82C0830B120680FF254\n" +"0103689B682F229847012084F82C0810BD000000BF00BF\n" +"38B5044604F62805287930B920680FF22C0103689B683C\n" +"2298470020287104F58561206803682A681B6992B29847\n" +"0020286000212046BDE83440BFE770617463685F727063\n" +"5F656E67696E652E63707000000000DFF800F05D0C0200\n" +"DFF800F0D90B0200DFF800F01F0B020038B50446002002\n" +"2C009024DA614D286C400840002864FFF7E7FFA86C40F0\n" +"0100A864286C20F480002864286C0090009820F0007000\n" +"90009840EA4460009000982864286840F001002860FFF7\n" +"D1FF401E8041C043C00F32BD2DE9F04D06460127002E82\n" +"B000F09380FFF7C6FFDFF8248100F57A7AD8F8400040F0\n" +"0400C8F840004FF0000BD8F8500040F00100C8F85000D8\n" +"F8400040F48000C8F840000A209AF747FCD8F8400040F0\n" +"0100C8F84000A4F78FFF04460D46D8F80000C00708D4A4\n" +"F787FF001BA941594501D85045F3D30027CDF800B0D8F8\n" +"40000090009840F400400090009840F040000090009820\n" +"F080000090009820F4804000900098C8F8400077B3D8F8\n" +"08000105090D318040F2546010FB01F140F2DC5291FBF2\n" +"F1B1F5805FA8BF40F6FF713180D8F84010890717D50021\n" +"194A40F6FF7340F2DC5552F821402405240D26F8024F10\n" +"FB04F494FBF5F4B4F5805FA8BF1C46491C09293480EDDB\n" +"F19403C06686000120C8F80000FFF743FF00B90027D8F8\n" +"400040084000C8F8400064209AF7D8FBD8F85000400840\n" +"00C8F850003846BDE8F68D0000001020400C102040DFF8\n" +"00F0E98E0100DFF800F04D8D0100DFF800F02B8D0100DF\n" +"F800F0C1960100DFF800F08B8E0100DFF800F0838D0100\n" +"DFF800F0A7700100DFF800F0F3770100DFF800F09B7601\n" +"00DFF800F0D38E0100DFF800F0BF8E0100DFF800F0EB79\n" +"0100DFF800F0478F0100DFF800F007710100DFF800F085\n" +"5B0100DFF800F06F8C0100DFF800F0C5750100DFF800F0\n" +"45700100DFF800F051780100DFF800F013790100DFF800\n" +"F067710100DFF800F06D720100DFF800F02B150100DFF8\n" +"00F0598E0100DFF800F0FF710100DFF800F063780100DF\n" +"F800F031150100012100F01F029140DFF89C2606E00000\n" +"012100F01F029140DFF89026400942F820107047012100\n" +"F01F029140DFF88026F4E700002DE9F0410446002A8846\n" +"14BF4FF048704FF0C8709BF7D2FE06460F4620469BF7D1\n" +"FE04460D4640469BF7CCFE0022DFF850369BF7B7FE2246\n" +"2B469BF7B7FE0022DFF840369BF7AEFE02460B46304639\n" +"46A2F72AFABDE8F041ACF746BA38B5044601210120FFF7\n" +"46FFDFF81C56696A286AFFF744FF696E286EFFF744FF14\n" +"F003001CBF0720FFF742FFDFF80006016804F003021143\n" +"016031BD80B510F0030004D0022814D0032819D001BDFF\n" +"F733FFDFF8D8058088FFF732FFDFF8D005016841F40011\n" +"0160816A41F02801816201BDDFF8B8058088BDE80240FF\n" +"F720BFBDE80240DFF8AC05FFF716BF10B5DFF8A8452078\n" +"B8B1DFF894050078FFF7CEFF0E20FFF777FFDFF89405DF\n" +"F8941500680840DFF8901508600020207060700E20BDE8\n" +"104054E710BD70B5DFF8604504F15800FFF7F9FE28B9DF\n" +"F85C5528706870012070BDDFF86065306A000404D0E421\n" +"0FF27C5099F758FDDFF83C550020287000F018FA58B130\n" +"68000408D100F028FA28B12078032804D1FFF7DAFE08B9\n" +"002070BD2078032804BF0121FFF7D5FE0120687070BD70\n" +"B5DFF8F44404F15800FFF7C3FE20B9DFF8F00400210170\n" +"4DE0DFF8F454686C000405D040F20B110FF2105099F722\n" +"FD00F0E6F910B1686A000407D02078032804BF0021FFF7\n" +"AEFE002070BD0E20FFF7FDFE2868DFF8B064DFF8B01408\n" +"403060DFF8B4142868084328600020FFF79EFE0020FFF7\n" +"9FFEA86980B2B060686980B27060686B80B2F060207803\n" +"280ED1DFF88C040068000A00F0010080F001010020FFF7\n" +"8CFE01210120FFF788FEDFF854040121017000214170F1\n" +"9403506A8600012070BD38B5DFF83844DFF8405404F158\n" +"00FFF763FE08B9287008E0287840B92078032804BF0021\n" +"FFF760FE0020687031BD0E20FFF7AEFEFFF768FE0020FF\n" +"F759FE0020FFF75AFEDFF80C54DFF81014286808432860\n" +"2078FFF7DCFEDFF8F003816889B2A961416889B26961C0\n" +"6880B2686304F15800FFF74DFE04F11800B7E180B5FFF7\n" +"43FEDFF8E003DFF8E0138160DFF8DC134161BDE80240FF\n" +"F73FBE00002DE9F04107000C4618BF002C154698462BD0\n" +"55B3A1882068FFF734FE28B3A9882868FFF72FFE00B338\n" +"7870B17969E1B9032814D0DFF8686306F15800FFF7FEFD\n" +"B8B16FF00100BDE8F081F868DFF88413A0F51650884224\n" +"BF4FF46120F86007F10800FFF713FE0028E4D14FF0FF30\n" +"BDE8F0810E20FFF73EFEFFF7F8FD97EC060B86EC060B37\n" +"7843460022214606F15800FFF700FE00230122294606F1\n" +"1800FFF7F9FD3846FFF766FE3846FFF781FE802417B94F\n" +"F420740DE0032F0BD1317CC64CFF291CBF0320FFF7E9FD\n" +"B17A01220320FFF7E8FDB8480168A14301600E20FFF701\n" +"FE0020BDE8F081F8B504460D461646A3F7BAF8AA4F3346\n" +"2A46214607F15800FFF7D4FD040008D1B86DC0F3400020\n" +"B1A948016821F481710160A3F7A9F82046F2BD000030B4\n" +"9E4CA36DC3F3400323B1A14B1D6825F481751D6013460A\n" +"46014604F1580030BCFFF7B5BD00007CB5FFF7B5FD994C\n" +"9749606822680840114040F213120240884342EA000512\n" +"B12068104320609A48026810001CBF28469047A805874E\n" +"48D5206840F400702060206F00F003000328307020D1B0\n" +"7AFFF793FD206820F400302060317CFF291CBF0320FFF7\n" +"75FDB17A01220320FFF774FDA06F0203120BD004C2F345\n" +"31C00CD20C521E9241D243D20FFFF7A0FDF06071690800\n" +"1AD030788DF8000003280FD1606F0005000DADF8020060\n" +"6F0002000DADF80400206FC003400CADF8060003E00020\n" +"ADF8020001906846884740F2031005421CBF06F15800FF\n" +"F753FD15F0900F1CBF06F11800FFF74CFDA8030CD53078\n" +"032809D1307CFF2806D0606C000403D100210320FFF741\n" +"FD6560FFF742FD73BD000080B50120554A19B9516C0904\n" +"1CD11AE011F1010F0CD08BB25363136823F01003136053\n" +"6C9BB2994202D25168C9060BD5106840F0100010600020\n" +"5063102050600E20FFF733FD002002BD00003D49097800\n" +"20012907D007D3032902D038BF042002E0022000E00120\n" +"46490968014081420CBF01200020704732490978002001\n" +"2907D007D3032902D038BF042002E0022000E00120F194\n" +"03E06D86003B490968014081420CBF01200020704738B5\n" +"0E20FFF7F6FCFFF7B0FC0020FFF7A1FC0020FFF7A2FC27\n" +"4C29492068204D084320602878FFF725FD2878FFF740FD\n" +"1F48816889B2A161416889B26161C06880B2606305F158\n" +"00FFF794FC05F11800BDE83240FFF78EBC38B5114C04F1\n" +"5800FFF76CFC80B10E20FFF7C4FC0F4D287808B1FFF7C5\n" +"FF687828B12078032804BF0021FFF763FC012032BD0000\n" +"00E100E080E100E080E200E00000903F00003040D0B191\n" +"00A0401040D8B1910068389100CCC59100DFFF7F003440\n" +"104058401040DFFF3F00D840104068B291002D698600F1\n" +"68860081DA310080000200ECC591003040104070617463\n" +"685F6873692E630010B551B1012811BF02280A78C02422\n" +"4203D14A888C88A24202D24FF0FF3010BDEFF3108272B6\n" +"012828D15D4B1B68DB072CD45B4B1C6844F4003401281C\n" +"6002D0022828D042E008785C6960F397445C61586940F0\n" +"8040586148885C6860F30F045C608888996860F30F0199\n" +"60186840F480701860186840F0010026E00228D8D1474B\n" +"1B68C3F34003002BD2D082F310886FF0050010BD08785C\n" +"6960F311345C61586940F0004058614888DC6860F30F04\n" +"DC608888196960F30F011961186840F400701860186840\n" +"F00200186082F310889FF74CFC002010BD80B5012818BF\n" +"022802D04FF0FF3002BDEFF3108172B601282B4A1FD113\n" +"68DB0723D5012802D0022824D03BE0506920F47C005061\n" +"106820F040001060506920F0804050615068000C000450\n" +"609068000C0004906010684008400022E0022802BF1368\n" +"C3F34003002BDBD181F310886FF0050002BD506920F47C\n" +"305061106820F0800010605069400040085061D068000C\n" +"0004D0601069000C00041061106820F0020010601068C0\n" +"0707D41068C0F3400018B9106820F40030106081F31088\n" +"9FF7EDFB002002BD000060804040DFF800F04B470200DF\n" +"F800F059470200DFF800F055480200DFF800F0C5450200\n" +"DFF800F005450200DFF800F091440200DFF800F0EF4402\n" +"00DFF800F031440200DFF800F083450200DFF800F06345\n" +"0200DFF800F001440200DFF800F013450200DFF800F0AB\n" +"450200DFF800F071450200DFF800F057450200DFF800F0\n" +"5BF50100DFF800F083470200DFF800F0B3470200012100\n" +"F01F029140A04A05E0012100F01F029140DFF878224009\n" +"42F820107047012100F01F0291409A4AF5E770B582B00D\n" +"46044616466946FFF79AFF002D14BF80200020009921F0\n" +"800108430090002E14BF4FF480500020009921F4F19403\n" +"7071860080510843009069462046FFF787FF73BDF8B504\n" +"46022C0F4619DAC7B138790228BCBF7879022812DAB879\n" +"0428BCBF787A02280CDA3868322809D2F87A50B9F87905\n" +"28BFBF387A3328B87A022802DB4FF0FF30F2BD20467949\n" +"8847A82010FB04F07749425C2AB12046764988476FF001\n" +"00F2BD01224254451897E80F0005F16C0686E80F002046\n" +"FFF74DFF90F904603046FFF78EFF4FF0FF312046FFF747\n" +"FF2C2105F17C009CF764FD2046FFF743FF012201212046\n" +"FFF78CFF7A7939792046FFF73DFFB9792046FFF73DFF79\n" +"7A2046FFF73DFF20212046FFF73DFF40212046FFF73DFF\n" +"204658498847204658498847204657498847687818B900\n" +"212046FFF732FFF87A48BBB87918B1022806D002D307E0\n" +"002106E00021012704E00121002701E001210F46002C0C\n" +"BF0C200F20FFF75AFA002C0CBF0D2010203946FFF753FA\n" +"687850B195F87500411E8941C90F002C0CBF0B200E20FF\n" +"F746FA2046FFF707FF3046FFF730FF4FF0FF312046FFF7\n" +"03FF69462046FFF7CFFE009840F00100009069462046FF\n" +"F7CBFE3046FFF70CFF20462D4988470020F2BD000038B5\n" +"0024284D2878C8B10DF102012046FFF7E8FE69462046FF\n" +"F7E8FE2046FFF7E9FE28B9BDF80200BDF80010084301D0\n" +"002032BD2046FFF7E1FE0120E870641CA835E4B2022CDE\n" +"DB012032BD10B5164C207800281CBFE07800280AD00020\n" +"FFF79DFE0020FFF7CEFE0020FFF7B3FE0020E07014F8A8\n" +"0F00281CBFE07800280AD00120FFF78BFE0120FFF7BCFE\n" +"0120FFF7A1FE0020E07010BD00E100E080E100E080E200\n" +"E065EE010030B491007FEE010065EF010039F00100D1EE\n" +"0100DFF800F06D150100DFF800F01B800100DFF800F023\n" +"870100DFF800F0253A0100DFF800F0693B0100DFF800F0\n" +"DB780100DFF800F04B300100DFF800F0C9800100DFF800\n" +"F09B870100DFF800F037310100DFF800F0DB800100DFF8\n" +"00F0AF870100DFF800F053310100DFF800F067310100DF\n" +"F800F0B7800100DFF800F065870100DFF800F0C7300100\n" +"DFF800F0FD750100DFF800F099780100DFF800F01D3101\n" +"00DFF800F0D5310100DFF800F005780100DFF800F00577\n" +"0100DFF800F02F3B0100DFF800F09D750100DFF800F08F\n" +"3B0100012100F01F029140DFF8C02706E00000012100F0\n" +"1F029140DFF8B427400942F820107047012100F01F0291\n" +"40DFF8A427F4E7000030B4DFF8A0271268C0EB40118900\n" +"8B181C1D0025A5640FF2B87555F8200088500C20207060\n" +"70002083F8710030BC7047DFF8741701EBC000F1940300\n" +"7586004068AAF75BBA000070B50446DFF86057A0F73CFD\n" +"45F8340005EBC4060020AAF745FA706055F8340000281C\n" +"BF7068002806D1BDE87040CF210FF24C7098F766BF70BD\n" +"000038B5DFF82857044655F8340020B1FFF737FF002045\n" +"F8340005EBC400406820B1AAF71EFA002045F8340031BD\n" +"70B50446032804BFBDE87040FFF726BF022804BFBDE870\n" +"40FFF724BFDFF8DC66C0EB401130688D00281890F94800\n" +"012802D06FF0060070BD01212046FFF7C6F800212046FF\n" +"F712FF2046FFF7C3FF00212046FFF70FFF2046FFF710FF\n" +"30682818002290F852102046FFF7DCF83068281840790C\n" +"2806D0FFF705FF306828180079FFF700FF2046FFF767FF\n" +"002070BD0000DFF870162DE9F84351F8201001F1480401\n" +"F1440223681768002523EA07073B0680460BD5136843F0\n" +"8003136023681B0548BF0125116821F400611160C0EB40\n" +"1138054FEA8109DFF8246600D485B14046FFF752FF3068\n" +"484490F8780018B900214046FFF7AFF81DB1206840F400\n" +"60206030684844C26C100002D03946404690472760BDE8\n" +"F183000038B5DFF8E82552F8202002F10C0502F1080300\n" +"F11F04D9B1186840F02000186002F12C01186840F00800\n" +"1860286840F001002860086840F0110008604FF4086010\n" +"6460B2FFF7F4FE60B2BDE83240DDE62968490849002960\n" +"196821F008011960196821F02001196002F12C010A6822\n" +"F011020A600021DFF87825904760B2FFF7D7FE60B2BDE8\n" +"3240C8E6000038B50446084611461A46049B032C04D101\n" +"B0BDE83040FFF767BE022C04D101B0BDE83040FFF764BE\n" +"0093C4EB441513460A460146DFF82005006800EB850000\n" +"79FFF75AFE32BD00002DE9F84304462546032D884604D1\n" +"4046BDE8F243FFF750BE022D04D14046BDE8F243FFF74D\n" +"BEEFF3108972B60121DFF8E074FFF719F83968C5EB4510\n" +"86007118012081F8780089F31088414638683018EFF310\n" +"890079FFF737FE804672B638683018002180F878100079\n" +"FFF731FE58B92046FEF7B1FF38B100212046FEF7F4FF20\n" +"46FFF78BFE0CE0DFF88C0450F8250050F8441F21F08001\n" +"0160016841F40061016089F310884046BDE8F2832DE9F8\n" +"4F81464D46032D884614469A4606D15246A1B24046BDE8\n" +"F84FFFF706BE022D06D1524621464046BDE8F84FFFF701\n" +"BEEFF3108B72B60121DFF82874FEF7BDFF3968C5EB4510\n" +"86007118012081F878008BF31088534638683018224600\n" +"794146FFF7EBFD8046EFF3108472B638683018002180F8\n" +"78100079FFF7D3FD40B94846FEF753FF20B1F194039078\n" +"860000214846FEF796FF0CE0DFF8D80350F8250050F844\n" +"1F21F080010160016841F40061016084F310884046BDE8\n" +"F28F000030B4EFF3108272B6DFF8A83353F8203003F148\n" +"0425680D43256003F1400425680D43256053F8444F8C43\n" +"1C60C0EB40139800DE4B1B68C4582143C15082F3108830\n" +"BC704770B584B0044600208DF804000DF1060301A80090\n" +"02AA2046FFF796FD9DF804102046FFF795FD9DF8040010\n" +"B1012098F7D3FFCF4850F8245005F13C06306820F44030\n" +"3060306840F0405030609DF804000028306809D040F480\n" +"203060204600F0C5F9012098F74EFF02E020F480203060\n" +"306840F440303060BDF80610BDF8080040EA4130286010\n" +"216960286004B070BD38B5C0EB4011B44D8C0029686118\n" +"483103220A764A6A521C4A6201220A700121FFF712FD28\n" +"682018BDE83240407900F0A7BA000070B500240025A74E\n" +"30682818002180F87110401D90F94310012921D10078FF\n" +"F739FDD8B9306828180079FFF71BFDA8B9306828184079\n" +"FFF731FD00280ED0306828180079FFF72AFD40B12046FE\n" +"F78EFE002803D02046FFF725FD08B9002070BD641CE4B2\n" +"7C35022CCEDB306890F94800012807D101210020FEF77C\n" +"FE3168012081F87100306890F9C400012806D10121FEF7\n" +"70FE3168012081F8ED00012070BD2DE9F041044681487E\n" +"4F50F82450286CC4EB44114FEA8108396841444C3104F1\n" +"1F060861086888610220087501212046FEF750FE05F12C\n" +"00754A016821F01101016020460021904770B2FFF701FD\n" +"70B2FFF7F4FC38684044032100F8481F016A491C016200\n" +"2C0CBF06200920A969090A01F0010181F00101FEF738FE\n" +"2046BDE8F0410021FFF776BCF8B50124002500265C4F38\n" +"68301890F9481001292CD14079FFF7A8FCC8B938683018\n" +"0079FFF78AFC98B9386830184079FFF7A0FC68B1386830\n" +"180079FFF79AFC38B1E8B2FEF7FEFD18B1E8B2FFF796FC\n" +"70B93868301890F8710040B10021E8B2FEF7F4FD396871\n" +"18002081F8710000246D1C7C36022DC8DB0CB90020F2BD\n" +"386890F94800012804BF0020FFF775FF386890F9C40001\n" +"2808BFFFF76EFF0120F2BD38B50D4604466968FFF7B4FE\n" +"29462046FFF768FC2046BDE83240ADE4F8B5040004BF08\n" +"2099F738FC2546012D04BF092099F732FC01212046FFF7\n" +"58FC20462B49264F884700212046FFF700FC2046FFF751\n" +"FCC5EB4510860038683018416D2046FFF788FE38683018\n" +"00F150012046FFF739FC1C4850F825002221C162002200\n" +"F130010A6040F8342F194A127812B1194AF19403207C86\n" +"000A6002603868301890F858102046FEF7ADFD38683018\n" +"0168426E41F40061016011001FBFC16D0FF2500050F825\n" +"00814204D02046BDE8F840FFF718BCF1BD000000E100E0\n" +"80E100E080E200E0BC0B8700D8329100A4790100CF3401\n" +"0089330100BBE091000400100070617463685F75617274\n" +"2E630000000000000000000000000000000000000000DF\n" +"F800F0BD8C0100DFF800F06F7A0100864A014652F82120\n" +"02F10803D3F800C02CF0FE4C4CF02C4CC3F800C0102353\n" +"60002812BF0129002010627D4A02EBC101FF2048717047\n" +"000038B505464FF47A7000900024FFF7D4FF48BB3DB102\n" +"2D0BD007D3032D0BD14FF440740EE04FF440540BE04FF4\n" +"403408E04FF4401405E02A468A210FF2BC1098F737FD00\n" +"98401E00906848006820420AD100980028F5D12A46BDE8\n" +"384097210FF2981098F725BD31BDF8B514465E4A02EBC0\n" +"02FF25FF261171012916BF03290127002740B102280ED0\n" +"09D303280FD100259FB1012611E006257FB107260DE009\n" +"255FB10A2609E003253FB1042605E00246CA210FF24410\n" +"98F7FBFC54B101212846FEF7D0FC77B13046BDE8F44001\n" +"21FEF7C9BC2846FFF774FF27B13046BDE8F240FFF76EBF\n" +"F1BD00002DE9F04188462DED028B0020089CB8F5C81F16\n" +"461F469FED378A207002D2FFF757FF18B1012020709FED\n" +"338AB8EE480A10EE100A9AF7CAFB00EE108A0446B8EE40\n" +"0A0D4610EE100A9AF7C0FB00222E4B9AF7C0FB02460B46\n" +"20462946A0F73CFF9BF756FF00EE100AFDEEC00A10EE90\n" +"0A308080B200EE900AF8EE600A30EE600A10EE100A9AF7\n" +"A2FB0022204B9AF7A2FB41EC100BC3F78CF851EC100BA0\n" +"F720FF38803888402804DB002038803088401C3080BDEC\n" +"028BBDE8F081000038B50D46FF2D044615D0002128469A\n" +"F72BFB01208DF80000694600208DF80100284699F785FF\n" +"074800EBC400002145712046FEF77DFC31BD0000900100\n" +"002003A479010080E09100680041400000304000005040\n" +"70617463685F756172745F636F6D6D6F6E2E6300DFF800\n" +"F01B670100DFF800F09D660100DFF800F0C766010010B5\n" +"0446FFF7F0FF2046FFF7F1FF2046BDE81040FFF7F0BFDF\n" +"F800F06B400100DFF800F05B400100DFF800F0C5480100\n" +"DFF800F0CD48010038B50446EFF3108572B698F7ADFA01\n" +"20A9F7C2FC0020FFF7E3FF0020FFF7E4FF54B9FFF7E5FF\n" +"28B10749084A09680A6048600846FFF7D8FFFFF7DEFF85\n" +"F3108831BD000002487047C00B87007547BCBC66510800\n" +"DFF800F00D6D0100DFF800F0E1650100F19403B07F8600\n" +"DFF800F0E56B0100012100F01F029140554A400942F820\n" +"107047000010B504460A2C05DB024644210FF2481098F7\n" +"E0FB04EB84004D4901EB0010032201680260016010BDF8\n" +"B50279484D002402EB820311F1010F05EB031309D103F1\n" +"30010A6842F001020A609C63012159631DE003F1240531\n" +"B9286830B308211046FFF7B8FF20E003F1380603F13402\n" +"316017682D68A94201D2F9070FD503F130010B6843F001\n" +"030B6034600121116090F90400801C40B2FFF7A9FF06E0\n" +"03F13001086840084000086001242046F2BD000070B502\n" +"7902EB82030024254A02EB031511F1010F05F1340205F1\n" +"38030FD105F130010D6845F001050D601C600121116090\n" +"F90400801C40B2FFF780FF17E0196010686E6A8E4201D2\n" +"C00709D505F13001086840F0010008601C600120106006\n" +"E005F1300101240868400840000860204670BD000038B5\n" +"FFF759FF0024094D2846FEF716FB10B12846FEF72EFBE0\n" +"B2FFF751FF641C64350A2CF1DB31BD80E200E010301040\n" +"1CBA910070617463685F646D615F6C6C2E630000DFF800\n" +"F06D160100DFF800F07FF20200DFF800F0BD5D0100DFF8\n" +"00F0FB510100DFF800F001160100DFF800F0C3520100DF\n" +"F800F0FB200100DFF800F049280100DFF800F0EF400100\n" +"DFF800F00F410100DFF800F02F410100DFF800F0AD4001\n" +"00002220291ADB50F8043B9A1850F8043B9A1850F8043B\n" +"9A1850F8043B9A1850F8043B9A1850F8043B9A1850F804\n" +"3B9A1850F8043B9A1820392029E4DA0429A2BF50F8043B\n" +"9A18091FF8DA012907DB0123C900006803FA01F1491E08\n" +"40821810467047000080B500F07FF9FFF799FF82490870\n" +"01BDF0B5824D87B004462E6A21462920FFF791FF012028\n" +"704FF480110020FFF7B6FF6860002028617A497A48A860\n" +"E960002818BF002902D0FFF7A9FF286105F11401002099\n" +"F78DF905F11801012099F788F905F11C01022099F783F9\n" +"686A38B3286A694F002818BFB84221D104F1F800BE4201\n" +"69009101680191416802918068039004F1440041680591\n" +"006806902068049003D072B60320FFF751FF696A684688\n" +"47BE421AD00120FFF74DFF07B0F0BD72B60320FFF743FF\n" +"5348017809B9012101700078FFF743FF54485449446118\n" +"3000F2FF3000090001FFF73DFF07B0F0BD00007CB54D4C\n" +"00202061FFF738FF012813D00120FFF733FF01280ED002\n" +"20FFF72EFF012809D00320FFF729FF012804D00420FFF7\n" +"24FF012801D1012500E000250020FFF71CFF022813D001\n" +"20FFF717FF02280ED00220FFF712FFF194034083860002\n" +"2809D00320FFF70DFF022804D00420FFF708FF022831D1\n" +"012685BB0120022620703146002099F731FD3146012099\n" +"F72DFD3146022099F729FD3146032099F725FD31460420\n" +"99F721FDFFF7EFFE0020FFF7F0FE0020FFF7F1FE0020FF\n" +"F7F2FE0020FFF7F3FE207858B91F4860601F48A0601F48\n" +"E060FEF747FD08E001260020CEE71C4860601C48A0601C\n" +"48E06000210420A1F729FE6946042099F707FD01208DF8\n" +"0000694600208DF80100042099F7E9FC01A9052099F7F9\n" +"FC9DF80400012803D101210520A1F70FFE73BD50C29100\n" +"EFBEADDE5C31910050BD06000000800038BE9100C11301\n" +"00198201002F82010065820100E13E0100F13E0100073F\n" +"0100DFF800F0DD5D0100DFF800F089140100DFF800F091\n" +"5E0100DFF800F0635E01002E48704780B5FFF7FBFF0068\n" +"02BD000080B5FFF7F7FF806802BD000010B5032805D002\n" +"4627210FF2B00098F789F9FFF7EFFF014622480B6C8C6A\n" +"00F1F0021471204CE3189361204B1360137943B90B6B93\n" +"60CB6B53614B6BD360896B116118E05B1E042B12D80A68\n" +"C2604A6802618A684261CA6882610A69C2634A6942648A\n" +"6982640A6A4260496A016002E00FF260019161BDE81040\n" +"FFF7A7BF000010B50B490320FFF79DFFEFF3108472B603\n" +"20FFF79FFF0320FFF7A0FF0AE00000007E404058C29100\n" +"000040405C7714917D84860084F3108810BD0000706174\n" +"63685F6661696C736166655F636D302E6300000000556E\n" +"6B6E6F776E20486172647761726520457863657074696F\n" +"6E0000F8B50D4605F120060446B11C89B225200C4A9047\n" +"28B9BDE8F4400FF240000A4908470A4FF0B2B847360430\n" +"0EB847084E29462046B04720210648B047BDE8F2400548\n" +"0047411701000319010035170100E51601005C319100CF\n" +"17010053656E645270635379734572726F725265636F72\n" +"64206661696C656420746F2073656E6420686561646572\n" +"0A00000000DFF800F0E3420200DFF800F0F9420200DFF8\n" +"00F09F420100DFF800F0A7420200DFF800F0BB420200DF\n" +"F800F00B430200DFF800F05D420100DFF800F0BB700100\n" +"012100F01F0291408B4A05E0012100F01F029140DFF824\n" +"22400942F820107047012100F01F029140854AF5E770B5\n" +"FEF7A1F8844D6C682E68A00504D580210FF2342097F7C7\n" +"FEB44314F0130002D02968084328606C6014F0030F1CBF\n" +"7A48FEF791F8E00509D5794880477948416CC1F3400111\n" +"B14430FEF785F814F0900F1CBF7448FEF77FF82420FFF7\n" +"CAFFBDE87040FEF780B810B56C48F19403D0868600FEF7\n" +"2CF868B36E4C2068C00729D5E06B20B1AD210FF2C41097\n" +"F790FE0020FFF783FF0120FFF780FF206AE169884202D1\n" +"2068C00707D40020FFF77AFF0120FFF777FF002010BDA0\n" +"695E49886060694860206BC86060680860FFF76EFF5A49\n" +"01200870012010BD000030B5534C83B004F14400FDF7F3\n" +"FF54490A78104249D000200870FFF759FF2420FFF774FF\n" +"0120FFF757FF0120FFF758FF0020FFF745FF0120FFF742\n" +"FFE06C474D00906946206DADF804006868ADF80600A868\n" +"ADF808000020FFF746FFE068009069462069ADF80400E8\n" +"68ADF806000020ADF808000120FFF737FF04F14400FDF7\n" +"D5FF201DFDF7D2FF2E4C2868206040F2DF3060600020FF\n" +"F717FF0120FFF714FF882060602420FFF72BFF03B030BD\n" +"000070B5274C04F14400FDF79CFF10B96FF0060070BD14\n" +"25244E03E0012098F740FB6D1EF06B10B93068800701D4\n" +"002DF4D10020FFF7EDFE0120FFF7EAFE0020FFF7F3FE00\n" +"20FFF7FCFE04F14400FFF7FCFE201DFFF7F9FEEFF31085\n" +"72B6207808B10E4880472320FFF7F9FE2420FFF7F6FE85\n" +"F310880120A9F741F8002814BF00206FF0070070BD0000\n" +"00E100E080E100E080E200E004501040F0C39100E7E801\n" +"00ACC39100B0C391000050104048C491007938910080B5\n" +"322098F7F2FABDE8014099F778BB70617463685F676969\n" +"2E630010490A6801281CBF0228032822F0040207D122F4\n" +"001040F060000860112048617047042806D122F4E87040\n" +"F064021120486105E022F0600040F4001242F002020A60\n" +"7047CC401040DFF800F0DBA30100DFF800F0058B0100DF\n" +"F800F08BA20100DFF800F0DB880100012100F01F029140\n" +"DFF89423400942F820107047F8B5DFF88C63DFF88C4370\n" +"680F4601254008400017F1010F70600AD0B8B260610020\n" +"DFF874138847B84202D26068C0070BD5206840F0010020\n" +"600020606165602120FFF7D3FF002507E0206840084000\n" +"2060706840F0010070602846F2BD00002DE9F041CA4FB8\n" +"680E46012520F01000B86016F1010F08BFC64C11D0C749\n" +"1EB90020884708BB16E0C24CB0B260630020D4F8048088\n" +"47864202D25FEAC8600CD5206840F01000206000206063\n" +"102060602120FFF79BFF002507E0206820F010002060B8\n" +"6840F01000B8602846BDE8F0812DE9F8430446AE4F0D46\n" +"1646984607F11000FDF789FE18B16FF00100BDE8F28300\n" +"2D18BF002E12D08CB1A9882868FDF79FFE60B1B1883068\n" +"FDF79AFE38B12078012808D104F10800FDF796FE18B94F\n" +"F0FF30BDE8F2830A2098F7E4FCF19403608A86000020FF\n" +"F74DFF8C2038608C210C207860C9438020B86014229348\n" +"416007F1BC093968C943016007F19400214699F796FD06\n" +"CD07F1A800083D06C006CE06C089F8188028468B498847\n" +"43460022294607F11000FDF767FE083E30468749884700\n" +"230122314607F15000FDF75CFE002089F8000020468149\n" +"88470120FFF715FF0120FFF70EFF0020BDE8F283000010\n" +"B5754C04F11000FDF71AFEC8B1002073498847B8B97349\n" +"8847A0B994F8940001280BD10220FDF70FFE60B10220FE\n" +"F7A7FC40B101210220FDF70AFE012084F8C800012010BD\n" +"002010BD000070B5624D05F11000FDF7F4FD002857D000\n" +"206049884710B95F49884720B195F89400012815D10CE0\n" +"05F194042078012828D10220FDF7E2FD18B10220FEF77A\n" +"FC78B905F1BC00007B18B100210220FDF7D9FD05F1BC00\n" +"0021017304210170002070BD00210220FFF7B7FE0220FE\n" +"F722FC4D480068000A00F0010080F001010320FDF7CCFD\n" +"00204949414E88470020484988474FF0FF307060306000\n" +"20FFF794FE716905F1BC0089B24160716B89B281603168\n" +"C94361624169491C416102210170012070BDF8B5314C04\n" +"F11000FDF792FD00284AD004F1940595F92800022844D1\n" +"0A2098F705FC0020FFF76EFE686A206004F1C8060C2060\n" +"608020A060307B38B1206840F4817020600C2040F48170\n" +"606020681F4F2149C04378602068C043386004F1A80088\n" +"4704F1B0001D498847E86A80B27861286B80B2786304F1\n" +"1000FDF775FD04F15000FDF771FD284615498847E16808\n" +"001CBF686AFFF742FE0120FFF733FE7068401C7060F1BD\n" +"04F1C806307848B194F89400012803D100210220FDF741\n" +"FD00203070F1BD80E200E080B291003490104037850100\n" +"3D850100258401006384010001840100D8901040A78401\n" +"00BD840100334A52F8200000F1980261B150F8E41F304B\n" +"21F404310160016819430160106840F0060002E010682B\n" +"49084010607047F0B583B0044600208DF804000DF10603\n" +"01A8009002AA2046FEF78AFB9DF804102046FEF789FB1E\n" +"4850F8245005F1980605F1A007306820F0060030603068\n" +"400840003060306820F4001030609DF80400002838680A\n" +"D040F480003860306840F0010030602046FEF7B5FF06E0\n" +"20F480003860306840F001003060306840F00600306010\n" +"20C5F8C400BDF80610BDF80800C90341EA800040F00300\n" +"386003B0F0BDA0A301000000408008C0FFFFDFF800F0F5\n" +"3D0200DFF800F07D480100DFF800F04F3B0200DFF800F0\n" +"313C0200DFF800F05D400200F19403F08D8600DFF800F0\n" +"F73B0200DFF800F0A7390200DFF800F00D3B0200DFF800\n" +"F0CD3A0200012100F01F029140DFF8802406E000000121\n" +"00F01F029140DFF87424400942F820107047012100F01F\n" +"029140DFF86424F4E7000070B40346080008BF002029D0\n" +"196DC1F34601C1F14001814298BF0846014604291CD312\n" +"F003040CD0091B12F0030506D003F5017512F8016B2E70\n" +"641EFAD104290CD352F8044B03F50175091F2C60F6E712\n" +"F8014B03F50175491E2C700029F7D170BC704770B50446\n" +"FFF78CFF0025060046D0F08831684218B188091A2068FF\n" +"F7BFFF05003CD0F088B1882818F08080B2884207D10120\n" +"6074E17CA07C88423CBF401CA0742068406D000720D420\n" +"46FFF769FF06001BD0F08831684218B188091A2068FFF7\n" +"9DFF451988B1F1884018F08080B2B188884208D1012060\n" +"74E17CA07C884204D2401CA07401E00020607402202168\n" +"8864206850F8441F41F002010160284670BD10B5044600\n" +"22A169672000F021FA94F90400FFF761FF207A70B1A07A\n" +"032803D100212046CE4A904700202072A1696079E269BD\n" +"E81040104710BD00002DE9F0418046C849884700250068\n" +"00F1700100F120060A68520852000A6030684008400030\n" +"6040F0800040F0400720F0400434600520FFF709FF3760\n" +"0520FFF705FF6D1C092DF4D32846451E58B13068000608\n" +"D434600520FFF7F8FE37600220FFF7F4FEF0E734600120\n" +"FFF7EFFE24F0800535600520FFF7E9FE45F04005356001\n" +"20FFF7E3FE45F0800030604046BDE8F041FFF7DFBE0000\n" +"F8B50446A249257828468847064606F12007F179306800\n" +"2902BF016A01F0C001C02902D1406D40070ED42846FFF7\n" +"C6FE30B92846FFF798FF10B96FF00900F2BDB17A2846FF\n" +"F7BEFE6178B07A88421CBF2846FFF7B7FE96F90400FFF7\n" +"CBFE0020B0611C210120307270720020B074F074384697\n" +"F7B9F9A078B870A0883880A06858B1F17CE28807EBC101\n" +"0A81F17C07EBC1014860F07C401CF074F07CA18907EBC0\n" +"000181F07C216907EBC0004160206910B1F07C401CF074\n" +"A08A3883A06978613046FFF783FE0020F2BD00002DE9F8\n" +"4381466F49884701270668D6F8840010F07F0F1FD00021\n" +"4846FFF774FE06F168054FF48070286097F76DF8C41C41\n" +"F1000854363068400707D497F764F88845F8D828BF8442\n" +"F5D200270020286001214846FFF758FE3846BDE8F28300\n" +"002DE9F04782465749884701250768386D10F0FE0F22D0\n" +"00215046FFF745FE07F13C06306840F08009C6F8009097\n" +"F73CF8C41C41F100085437F19403809186003868C00607\n" +"D497F733F88845F8D828BF8442F5D2002529F080003060\n" +"01215046FFF726FE2846BDE8F087000080B53F49884700\n" +"68006A00F0C000C0280CBF0120002002BD2DE9F0418046\n" +"38498847054640462E68FFF710FE06F144040027276040\n" +"F2BF30B06495F90400FFF723FE3048006808B140F08000\n" +"F062052020602D48407870620120F064776630623067B7\n" +"664046FFF7F5FD00B901274046FFF7F4FD00B97F1C0121\n" +"4046FFF7E2FD4020F0634046FFF7BBFF00B97F1C0020E8\n" +"71A872E8606872781E8041C00FBDE8F081000017490020\n" +"0860FFF7CFBD000038B5164909680A001CBFBDE8344008\n" +"470F4988470446206800F148010A680A60406C00EA0205\n" +"627A2946692000F086F8E0680B492843E0602046BDE834\n" +"40084700E100E080E100E080E200E0B73D020045390200\n" +"A0B3910088B39100E0C6910023410200DFF800F09D9001\n" +"00DFF800F06F410100DFF800F0EB910100DFF800F0B515\n" +"0100DFF800F0BF910100DFF800F0450E0100DFF800F0A7\n" +"410100DFF800F08B4B0100DFF800F057440100DFF800F0\n" +"4F410100DFF800F005410100DFF800F025410100DFF800\n" +"F045410100DFF800F065410100DFF800F00D4E0100DFF8\n" +"00F035470100DFF800F07D200100DFF800F091200100DF\n" +"F800F0415C0100DFF800F05D5C0100DFF800F06D440100\n" +"DFF800F0B7200100DFF800F02B540100DFF800F00D6101\n" +"00DFF800F02D930100DFF800F0F7630100DFF888047047\n" +"00002DE9F843DFF880648846044631690009C140C80715\n" +"4622D5EFF3108972B6307828B901203070A2F7B7F8C6E9\n" +"02017069DFF8581401EB0017401C00F01F007061A2F7AA\n" +"F83860D6E90223821ABA60C6E90201BC80C7F80C80FD80\n" +"89F31088BDE8F183000010B5DFF828446421204696F7FD\n" +"FFDFF820040021018101604160DFF81804016821F00401\n" +"0160002020700420E080022020816081322020710120FF\n" +"F743FFBDE8104002210120FFF741BF000010B5EFF31084\n" +"72B6FFF73EFFDFF8D003007828B997F7C2FC30BF97F7C7\n" +"FC13E0DFF8C8038047DFF8C8130860DFF8C4038047DFF8\n" +"C403804710B100F0D7F808B900F02CF8FFF724FFFFF726\n" +"FF84F3108810BD000040B50646DFF8A0030021D0E90023\n" +"4FF47A7070431218DFF8940300684B41C1171018594149\n" +"0241EAD051400243F6095200239DF7F9FF00230022FFF7\n" +"07FF00B90026304640BD000010B5DFF850030468032C0B\n" +"DB002221468020FFF74FFFD3496FF0630008602046D149\n" +"884797F76CFC30BFBDE8F1940310958600104097F76FBC\n" +"0000CD48CE4A01681368194031F0006102D14068516801\n" +"40481E8041C043C00F704770B5FFF7D9FEC648C64CBB4D\n" +"804797F74DFC96F751FE2060A068401CA060286840F004\n" +"002860C04880470646286820F00400286096F740FE6060\n" +"26B197F764FC00F034F901E097F739FC304670BD000010\n" +"B5FFF701FF04460120FEF7EFFD94F8290110B9FFF7ACFE\n" +"50B900210D2098F711FC0D2098F744F90120FEF7E3FD02\n" +"E0FFF7A2FE10B90120FEF7E0FD97F76CFC20B1BDE81040\n" +"0120FFF79ABE10BD000038B50020FEF7CAFD0020FEF7CB\n" +"FD0020FEF7CCFD97F758FC04001CBF0020FFF787FE4FF4\n" +"7A75FFF787FE48B9FFF788FE30B9FFF789FE18B92CB1FF\n" +"F789FE10B16D1E012DEFDA3DB9BDE8344040F23D110FF2\n" +"4C2096F7EEBE31BD000070B5FFF76FFF08B1002070BDFF\n" +"F776FEFFF768FF08B10025A1E085488047050000F09D80\n" +"FFF79AFE00F594747548714E0568052D0EDB7088C0F340\n" +"0050B9FFF761FE38B997F712FC20B930892D1A00200126\n" +"04E070892D1A4FF42F7000266D490860002229468120FF\n" +"F77CFE28466A498847050002D16F48804770E0FFF746FE\n" +"4FF4FA61112098F791F828B94FF4C3710FF2B01096F7A0\n" +"FE01202070FFF75AFF16B10120FEF758FD112098F783F8\n" +"3046FFF728FF4FF4FA61112098F777F828B94FF4CA710F\n" +"F27C1096F786FE97F730FD30B9FFF71FFEFFF721FEFFF7\n" +"63FF15E0FFF760FFFFF716FEFFF718FE607840B197F7BE\n" +"F8FFF7E6FD40B90220FEF708FD04E097F71DFC08B997F7\n" +"B2F80020FEF723FD142096F79EFD28B940F2C7110FF224\n" +"1096F759FEFFF7FDFD4348804700202070FFF7FBFD0146\n" +"00228A20FFF712FE3E484268016892B28B20FFF70BFE11\n" +"2098F730F8002229468220FFF703FE0120FFF7CCFD2846\n" +"70BD70B504460D46FFF7F5FD00F1B00629462046A0F73B\n" +"FC202CB2BF06F1340006F13800203C01220168A240012D\n" +"0CBF11439143016070BD000014490A79322A02D0014610\n" +"46DCE7704780B500F04DF828B940F213210FF2840096F7\n" +"0AFE96F742FDFFF7B4FD97F756F8FFF7B4FDFFF7B6FD19\n" +"48FFF7B7FDBDE80140FFF7B7BD0000007E4040D00B8700\n" +"1000910064CA9100D0CA910010ED00E01F4E0100CCCA91\n" +"00234E01009D4C0100B8B8910060389100834E010000E1\n" +"00E000E200E04F4C01005037910091090100AD4D0100D1\n" +"4D01004F4F010008F3504070CA910070617463685F706D\n" +"2E6300000848807860B14FF4803007490A68C2F3003212\n" +"B9401EF9D100E008B9F19403A098860000207047012070\n" +"47C8C9910050002040DFF800F0B7390200DFF800F06115\n" +"0100012100F01F029140554A400942F820107047000010\n" +"B5818A8289407812FA81F10A22012803D0022802D16422\n" +"00E0282201EB41005100B0FBF1F4052C38BF052408D3B4\n" +"F57A7F05D9224658210FF2181096F746FF204610BD2DE9\n" +"F84F0446414926780968304688470546E08808B1A06838\n" +"B1A08908B1216919B1A18A29B1A2691AB94FF0FF30BDE8\n" +"F28F237943EA0623C0B240EA0320C9B241EA0021002260\n" +"20FFF71AFD6868A8F72DF895F900004FF6FF7197F735FF\n" +"60B900226FF002016A20FFF70AFD6868A8F719F86FF002\n" +"00BDE8F28F98F7FFFA28780F3040B2FFF790FF2046FFF7\n" +"97FF064603276FF008086FF003096FF0070A2046FFF77A\n" +"FF28B195F90000FFF709FAD34610E0A8683146FFF773FF\n" +"20B92856FFF7FFF9CB4606E095F901B0BBF1000F18BFC3\n" +"4501D07F1EE2D195F90000FFF7DAFB08B96FF0090B0022\n" +"59466A20FFF7C8FC95F9000097F7ECFE6868A7F7D3FF98\n" +"F7C1FA5846BDE8F28F80E200E0C40B870070617463685F\n" +"6932636D2E6300000000DFF800F0F52C0100F8B5044620\n" +"000D46164602D0022C05D012E0BDE8F240374840680047\n" +"354C206820B9AB210FF2D00096F7D0FC2268E9B23046BD\n" +"E8F84010472C4FF86820B9B1210FF2B40096F7C2FCFB68\n" +"32462946204601B0BDE8F040184710B500210120FEF7CF\n" +"FC00210220FEF7CBFC204C6068FFF7C3FFA068FFF7C0FF\n" +"20681D4CA7F772FF2068A7F76FFF0020206010BD00B585\n" +"B004A8009003AB1548806802AA01A999F793FC02980499\n" +"0818032838BF00201AD30199491C01910299012903D103\n" +"99039A097808E00199029A0978022A02D1039A127801E0\n" +"019A527841EA0221C91C88428041C043C00F05B000BD00\n" +"00C4C7910058389100C80B870070617463685F6970632E\n" +"6300DFF800F0B12B010010B5044699F790FD20B12D210F\n" +"F2480096F756FC2046BDE81040FFF7EDBF000038B50C4D\n" +"0446286840B99EF709FA286020B93D210FF2200096F742\n" +"FC2868002C1CBFBDE832409EF7FFB9BDE832409EF7FFB9\n" +"00005C38910070617463685F6C6F676765722E63707000\n" +"000000012100F01F029140684A400942F8201070470000\n" +"80B54FF47A710F2097F704FE20B946210FF2102096F714\n" +"FC60480068011D00220A7700685E4A5063486810645D48\n" +"D0F8080150614869106088695060487F28B10868D06188\n" +"68D06308691061106A20F40040106201BD0000F8B50546\n" +"202D0E4605D32A46F19403309C860067210FF2BC1096F7\n" +"B3FDEFF3108472B64B4F57F82500310001D038B100E028\n" +"B92A466C210FF2981096F7A2FD47F82560012044494A68\n" +"A84082434A6008601420FFF7A2FF84F31088F1BD000010\n" +"B50446202C05D3224679210FF2641096F788FDEFF31080\n" +"72B60121364AA140116080F3108810BD2DE9F041044620\n" +"2C884605D3224684210FF2381096F772FD012000FA04F6\n" +"A0F749F82C4F3868304204D088210FF21C1096F79AFB38\n" +"68304338600125786830420FD100255FEA080008D02048\n" +"00EB8400C0F88080386A3043386203E03868B0433860BE\n" +"60A0F72AF86CE000002DE9F0410546202D0C4605D32A46\n" +"AA210FF2CC0096F73CFD012000FA05F6EFF3108872B610\n" +"4F3868304204D0AD210FF2B00096F763FB7868304204D0\n" +"AE210FF2A00096F75BFB386830430125386088F310881F\n" +"E080E200E0A8C99100088040409CC89100388040402880\n" +"40402DE9F0410C46012101FA00F60125EFF3108072B600\n" +"4F01E02880404039683143C7F8001080F3108878683042\n" +"1DD196F721FA620243F60953B2FBF3F20023841841EB03\n" +"0800257868304209D196F712FA4145F8D301D8A042F5D3\n" +"7868304201D0012503E03868B0433860BE602846BDE8F0\n" +"81000070617463685F6C706C2E6300DFF800F015150100\n" +"DFF800F01B15010000487047007E404080B5FFF7F9FF00\n" +"F5967002BD70B582B00C4600F06FF80646FFF7F2FF0546\n" +"6946304697F7D9FF01208DF800006946304697F7BEFF21\n" +"463046A0F7ECF8FFF7D4FF4FF47A71102097F7C1FC20B9\n" +"52210FF2942096F7D1FA0120202E0BD22968B040014329\n" +"60002C696814BF084321EA000068600BE0203EA968B040\n" +"0143A960002CE96814BF084321EA0000E860102097F7A1\n" +"FCFFF7ADFF73BD38B50546FFF7B0FF0446284600F026F8\n" +"0546FFF79DFF4FF47A71102097F78AFC20B97D210FF224\n" +"2096F79AFA0120202D05D22168A84021EA0000206006E0\n" +"A168A5F12002904021EA0000A060102097F775FCBDE831\n" +"40FFF77FBF000038B5FF251728044617D8DFE800F00C1F\n" +"30434A515F6D7B82899098A0A8B0B8C0C8D0D8E0E82A00\n" +"20FEF718F9012859D00C20FEF713F902284DD0224640F2\n" +"43110FF2AC1096F728FC284632BD0320FEF705F9012854\n" +"D00F20FEF700F90228ECD147E00320FEF7FAF80128E6D1\n" +"48E00420FEF7F4F801284AD02520FEF7EFF8022808BF25\n" +"25E0D00E20FEF7E8F80328D4D10E25D9E71420FEF7E1F8\n" +"0128CDD11425D2E71720FEF7DAF80128C6D11725CBE731\n" +"20FEF7D3F80128F19403C09F860008BF3125C4D01620FE\n" +"F7CCF80228B8D11625BDE70C20FEF7C5F8012801D10C25\n" +"B6E70020FEF7BEF80228AAD10025AFE70F20FEF7B7F801\n" +"2801D10F25A8E70320FEF7B0F802289CD10325A1E70420\n" +"FEF7A9F8012895D104259AE71020FEF7A2F801288ED110\n" +"2593E71320FEF79BF8012887D113258CE71F20FEF794F8\n" +"01287FF480AF1F2584E71E20FEF78CF801287FF478AF1E\n" +"257CE72020FEF784F801287FF470AF202574E72720FEF7\n" +"7CF801287FF468AF27256CE70920FEF774F801287FF460\n" +"AF092564E70620FEF76CF801287FF458AF06255CE72F20\n" +"FEF764F802287FF450AF2F2554E72B20FEF75CF801287F\n" +"F448AF2B254CE72E20FEF754F801287FF440AF2E2544E7\n" +"2D20FEF74CF801287FF438AF2D253CE70820FEF744F801\n" +"287FF430AF082534E70B20FEF73CF801287FF428AF0B25\n" +"2CE7000070617463685F706D692E6300DFF800F05B2B01\n" +"00DFF800F0A1110200DFF800F031140200DFF800F0BB4B\n" +"0100DFF800F035910100DFF800F0151001000148704701\n" +"487047007E40400002000080B5FFF7F5FF006802BD0000\n" +"10B5FFF7D7FFFFF7D9FFFFF7EBFF040004D13A210FF24C\n" +"0096F750F914F0030004D03B210FF23C0096F748F9FFF7\n" +"DCFFB0F5A07F04D23C210FF2280096F73EF9FFF7D8FF00\n" +"F10C0280236821201DFFF7B9FFFFF7BBFFFFF7BDFFBDE8\n" +"1040FFF7BDBF000070617463685F696E69745F73657276\n" +"696365732E6300000038B50446EFF3108572B6FCF7A9FA\n" +"00B90AE00FE038B50446EFF3108572B6FCF79FFA00B900\n" +"E005E085F310886FF0060032BD00006068216A884785F3\n" +"108832BDDFF800F07D9F0100012100F01F029140254A05\n" +"E0012100F01F029140DFF88C20400942F8201070470121\n" +"00F01F0291401F4AF5E770B51F4CA06E00071BD5EFF310\n" +"8572B62068C0070ED54FF4805601E02EB1761EFFF7D4FF\n" +"0328F9D326B936210FF2580096F7CCF80820A0661B20FF\n" +"F7DBFF85F3108870BD10B50F4C48B1FFF7D9FF206820F0\n" +"080020601B20BDE81040BAE71B20FFF7BEFF206840F008\n" +"002060BDE81040C6E7000000E100E080E100E080E200E0\n" +"002050407020504070617463685F74696D5F6C6C2E6300\n" +"00F193F9FFD1DCF9FFB907FBFF457AFEFF4149FCFF9D83\n" +"FEFF80B521B14A68C0F8B42009684163CEF751F8002002\n" +"BDD0F8B4204A60406B08600020704780B5492100209DF7\n" +"A7FB022802D0DEF767F8012002BD80B5492100209DF7E3\n" +"FB032818BF012002BD10B5052882B00C4617D1A0F71BFE\n" +"01462046DEF7F1940350A3860089F98DF8000070B12A20\n" +"9CF77FFA040009D0E068A36994F8202000A918189DF79D\n" +"F80120A077012016BD704710B50446FFF7FBFF2046BDE8\n" +"1040B2F7B3BF10B504466068016809698847E06010BD40\n" +"680168096B0847406802685269104710B5044600F005F8\n" +"2046BDE81040B2F79ABF10B50446E7F7EEFB204610BD10\n" +"B5044600F005F82046BDE81040B2F78BBF10B5044604F1\n" +"140000F008F8201DFFF7E8FF204600F008F8204610BD10\n" +"B5044600F002F8204610BD00F000B870470020704710B5\n" +"0446FFF7F1FF2046BDE81040B2F769BF38B504460D46AC\n" +"4205D0206820188047241DAC42F9D131BD38B504460D46\n" +"284600F005F8A84218BF0024204632BD10B5044614F101\n" +"0F82B00AD08DF80000012200A9012000F007F8012801D1\n" +"204616BD4FF0FF3016BDF8B50C00154615D1012818BF10\n" +"F1010F0ED142F6B036C0F29106326D42B1002030653146\n" +"0120A1F746F810F1010F48D00020F2BD01281CBFBDE8F8\n" +"40A1F73BB8512D17D342F6B036C0F29106326D42B10020\n" +"306531460120A1F72DF810F1010F2FD02A4621460120A1\n" +"F725F810F1010F2AD126E042F6B0362F46C0F291061FB3\n" +"316D00208A1950290BD214F8013B02F8013B0A2B08BF01\n" +"20491C7F1EF3D1316501E031651FB978B1306D0028E7D0\n" +"326D0020306531460120A0F7FDFF10F1010FDDD14FF0FF\n" +"30F2BD2846F2BD002200214FF0FF30FFF794BF10B50446\n" +"4AF2F0214AF2D820C0F28601C0F28600FFF75DFF204610\n" +"BD816807E002681168536889185B18C36008320260C268\n" +"9142F4D04A1C82600878704738B504460D46A068E16888\n" +"4212D120686168884208BFAEF722FA2068227C016802B1\n" +"4944A160426852180830E26091422060ECD0A068401CA0\n" +"6000F8015D31BD2DE9F0418AB000F10801059102685208\n" +"01EBC20106910021079108910168490800EBC101083100\n" +"91017901F001018DF8101040680099400801EBC0000190\n" +"00200290039084E005A8FFF7A9FF044614F0030603D105\n" +"A8FFF7A2FFC61C25090F2D1BD105A8FFF79BFF00F10F05\n" +"15E0059801680818059907904968081808900598083005\n" +"90079808998842F0D0401C079010F8011D00A8FFF793FF\n" +"761EF2D1002D51D005A8FFF77BFF0646A00800F0030003\n" +"2804BF05A8FFF772FFDDF800800299039F06EB002008E0\n" +"A8F10808801A58F8081C58F8042C5718394658F8082C8A\n" +"1A8242F1D30C1AAE1C2DD0BC4206D158F8044B58F8040B\n" +"0719BC42F8D00298039914F8015B884217D10098019988\n" +"4208BFAEF7F19403E0A6860085F9009800689DF8101002\n" +"9009B148440290009902984968009A0918083288420391\n" +"0092E7D00298401C0290761E00F8015DD1D10298039988\n" +"427FF47BAF0198009981427FF476AF0AB0BDE8F0810000\n" +"DFDCFAFFD8010000007E404094940900E8A38700000000\n" +"0087FEFFFF020000000200000014000000E815000000E0\n" +"8600E82B0000EDFDFFFFF0070200000C0023DB793E0001\n" +"100143E0801E100100009308122B0812D5081203081003\n" +"18F58200140812050443DCF40C12060410035CF782003C\n" +"081207044388EF181208044344F70C121004433CF52412\n" +"150443C4F40C121704525024211052980C121A04437CEF\n" +"24121F04526818122004522C0C122104528C0C12220412\n" +"4860220A20110452749012380452B86C12360452B01822\n" +"371C513C123A045200D8123B0452202412400452E8E412\n" +"42045294481243045224CC124404528030123D0452AC24\n" +"255512D00C12B40812460452A46C12470452A024124804\n" +"520C4812490452F46C124A04523018124B0452549CA103\n" +"3001BCE18604F14301300118E2860410E5600380008100\n" +"820083008400850086008700880089008A008B008C008D\n" +"008E008F00900091009218930094009500960097009800\n" +"99009A009B009C009D009E009F00A000A100A200A300A4\n" +"00A500A600A700A940AA09AB00AC0CAD00AE00AF00B06A\n" +"B198B200B3F2B44DB500B677B700B800B900BA00BB0ABC\n" +"4FBD00BE02BFD4C000C100C200C300C400C500C640C700\n" +"C800C900CAE7CB03CC00CD3FCE3FCFA7D000D100D200D3\n" +"00D500D6FFD721D868D964DA91DB00DC00DD00DE00DF00\n" +"E000E100E200E300E400E500E600E700E800EA00EB44EC\n" +"0CED50EE00F900FA00FB00FC00FD00FE00FFF218056F00\n" +"7100F300E8E11AD0C117A00FFC19751004ECFC00070118\n" +"0524040218FC0107021BF401DD03600214F40100F40100\n" +"182A80DD127D042AFAB5F101215538565133F8062600C0\n" +"CA500170A6818EF001FF80BF0001F5FF11F5B2D3F61A01\n" +"FFF819206E55AB47CBC30043D038AFF4CED5005CFA0037\n" +"30BDFCABF30D2DFA4B2DA3031003DB76820004083C0125\n" +"7782E4071265082DBE077002295C0F3D29F21D0458F31D\n" +"0100D4612B12FF01A108A1186104F11348FC05FF544353\n" +"333430373B04F56C10F6C652541209041003EFAB830018\n" +"083C0133AC83A40A12810812280812C5082D1F07F13B04\n" +"1D1407F23D3F041D6C0BFF047D23AF031D6F03FE027FDB\n" +"03F15403F2160C28FA9E0130520604A10346026C6007FF\n" +"8CD9B83FF1940370AA8600B8239C3D1050073F91BF3E7B\n" +"6AF53C04103005A0548B3ECD59FF2021F86C06B2D6603E\n" +"6DA8983D700550074F028B3EF470E23D0A306006CA3505\n" +"3E6A4B2D3E406006598B0F3E3F1D2F3E403005E82D3E3E\n" +"15E69D4021206006122F2F3E1A8A6B3E805007A0350F3E\n" +"CFF6683C06305007753E1C3ED8D64F3E06207005F146E6\n" +"3D15C6B6502002F1A0F93D90520F1060064D660C3EDFC3\n" +"A53C505007193AF63DE273A73C0C10500738F6EC3DCAA3\n" +"533F0E307005A1BDDA3D7CF2B0F0300570D2B43D745DB8\n" +"502110740598A6C83D569AB4002002C5ACF73DC0619050\n" +"077D59DA3D2EFE0A3F082064064F58C23D2CB9823E1060\n" +"06B361AD3DEBA9153E207405E1EECC3D1A4DAE7060065E\n" +"2EA23D1EDF5E3C70B001E76E97706402D40FAA3D302104\n" +"70054BABA13D25223C107405863A8C3DBD8C7240700549\n" +"A18C3D43C53840600630D9983D69C9E33C90740534A274\n" +"3DA1F8814030014031926052052070057DEA983D8639C1\n" +"3070054163863D92E805C06406F9D85D3DBA83183EC070\n" +"050114633D48A8F9D0740518B27A3D999C1A907405E129\n" +"643DD906EE805007274E6E3D043ADB3E0C20600623A48E\n" +"3D59C3093FD06406836B6E3D9560213E107005CA8D623D\n" +"38BC205074059E7B8F3DAF773FA070058EEA743D462274\n" +"C064061AA6763D0C233D3BA06006F913553D6FD3073FF0\n" +"3801726A8730600E6AF9D13E4445AF43D5CA443DBD9AE4\n" +"4110601628CBA441BC71E343D62D5B410EE3B646F38E65\n" +"44FF33824610320001833C42101E80B70F7206301C0129\n" +"7549D30F92C820600C00007B3AA1414A41073EA108D641\n" +"506110612861106140605E00A101C1AA29B1BFFC81F3C0\n" +"CAC224C0E7175CC0990E5D3E57B549C05C020F4087C144\n" +"C01EA51B40B74167C0EF5711BF8F3862C04B037F40AED4\n" +"07C02A52A13D74D0B5BFD88234C0904A17C066311DBFCA\n" +"349A3B49148A3EAAB9DC3A2097E03E106016205F423D40\n" +"DCAD3E8E731B3A2F8ABE3D9E42AE3DA0A4183F1019AC70\n" +"023C55DF59107005558A1D3D6DE446101C01D46270CF10\n" +"728010700DED0D3E3ACA6F193F5A2F063C7461E4506110\n" +"612861106140F206000118539754B33F6132D33FEAB30E\n" +"42098F25453F391541617B2B412F34D33F0A66E63FAEA5\n" +"3D42D2B76245DB5F14413BCA2B41EECCEC3FD6FF0E407C\n" +"85354261E4BB4562161041629039417217A13D096C7E3E\n" +"0DC31F3FF618101B07993C3F26C4C43E56D4083FE6FEFF\n" +"FFDC64783FADDDE63E9A05723F5620100B9E996C3FA778\n" +"383F15F1940300AE86006F703FF620100B166D6E3F1E89\n" +"4B3FD9950E3FA420100B1498623FCBBF563FB3284C3EFB\n" +"10100BCF9F4A3FFAD4A13E83DC093FE330100B9947423F\n" +"7159853D1DE4393F3220280A19C96A3F787C033FAC5450\n" +"3E98100BF756683F9E271E3F174A0E3FD210100BE770BD\n" +"3E923BEC3EC6A20D3F1910100BB474293F9450823E494D\n" +"4B3ECF10100BA6B5493F08393F3E2EA92E3FAB10100B3D\n" +"81B83E89234F3EDB17983E0610100B431F243F302F703E\n" +"E04D373FFC80100B17B66E3FADBF753EA08B6E3FBF2010\n" +"0B31285B3F8A73FC3E5D34103F9310200A4608233FC382\n" +"CB3E14590F3FC0100BC9E56B3FC285B43EDDB1083F0910\n" +"100B067F8F3E5457F63E5D322E3E6F10100BBB0A693F99\n" +"B8B53D69370E3F6810100BE679303F4A7D1D3F4EEF0E3F\n" +"9B10100BB5FE3A3F371CD63D0D1D0B3FE110100B38F327\n" +"3FA0DC763E0E30033F9810100B82E66F3F4BAF853E22A7\n" +"2F3F6B10100BA051463F950BB53EC26B6F3FFAB0100B28\n" +"7E6C3FB477A63E645C613ED810100BE751653FD5EA4B3E\n" +"18B3553EB330100B56BC613F51136D3EE1B2023F801010\n" +"0B96ED033F1EA5323E029A103F4610100B30632E3F2D7C\n" +"0D3F1EDD583EB910100BD5926A3F5936733D33E13B3F21\n" +"10100B17F48E3EE0BC383E992B173FAE10240AE0641B3F\n" +"33E0AC3DE8A40F3F60100B36210D3FE8F6AA3EA6B5053F\n" +"1010240BECC10C3F0B2A8A3D096B3B3F5220100AC95C3F\n" +"FF093E3E2844303FD810100BAD345D3F2DD0EE3DAC1AE4\n" +"3ECB10100BCD03403F999BAF3ED8D72E3F7210100B6822\n" +"583FBE17073FF33D2B3FF210100B78286A3F014C913E72\n" +"C2543EF11014059EF01A3FA60A26001001373F47103009\n" +"946C013F8928DE3ECEFF2BE034098C86183F4BC8233F0D\n" +"7035F0100B17F3EB3E2AA9233E274C783EC010100B6DCA\n" +"213F69C3313E1497733EDC10100B4D49BE3E43E1433EB6\n" +"2C2B3F9C10280A1079A33EF303CF3EE3FE2B3F71100BC5\n" +"578B3E5322553F7AA8313F6D10100BE8F9BB3EFDFAA13E\n" +"596E793E4A10200AB152713EB29E2A3EAC017A3E70100B\n" +"B41CC83D73F7393ED7F6323FE41030097B88663E4E0BBE\n" +"3D344C1DA0200B92ECB13DBA6BE13EA794173F8DF02C09\n" +"78D33D4A40D43E139E603E6003100B2506A13D80D8323E\n" +"4948743EFC10100B541B2C3EEC69973E807E6F3FCE1020\n" +"0B990FD83E529CC33E7B876F3FF970100A031E3E3BE210\n" +"3FC1E5713F7710100B2FF8643E1D933D3F1E375C3EDE10\n" +"100B0CCCEA3DB8231C3E0F5E0B3F60102C0A9ACD033FC3\n" +"6670F1940390B186003EA27DEC3E30162C0AE8DA073E55\n" +"BDC43EA2D1E53E200B100B6B9E433ED28A9F3E94D9B03E\n" +"D1301F3738F4063C0446F73D85D11400043009DC4A6F3D\n" +"8027ED3DC0B35DA03C09C5C7FF3E0A65E13DA29A721C08\n" +"100BFCFB0C3D6155DD3D2638F53DDB203C09DDCFC93DB7\n" +"7AEE3D8577F9F00310043145B93EE82DB4100238103FFA\n" +"103C09A92EE03C3065803E17D853B003100B0E2C4B3F3F\n" +"1AEE3DED464F3FE7102C0B8543433F43E51F3EA297C93E\n" +"2488062C09A7303F0F0B413F9A261C3E9416100B7D3D1F\n" +"3EBCAD403F9DB8003FDA30100B53E7993EBBEF883E0B0D\n" +"083FD7101C0170981C80031C022D3F90F450041130100B\n" +"E21F9E3EEA5DDC3E6308D03EEE20100BCADD273EE25952\n" +"3E990DE23EEA102C0A0F98C73EA7B16D3EA3220E3F2C07\n" +"100B5050023FBC03E43EC87D2B3FF320100BF31E973E89\n" +"0C8B3DEC30763E2920100B0C5A083D69E4EB3E12F65D3E\n" +"F420100B41D6533EAFCDEE3E1748E83E2720100B71AC33\n" +"3F0666093F08C8373E11105C0BC2A1273E1285F63D1C28\n" +"743F045C07920008F12F076D5A1052FF01920010E12C61\n" +"14F003FFFFFFFFFF0001F5FF11F5DCFDFD0D1003ED5C03\n" +"61082D541B4FF1D6DC1A1F35D79C131C0917E1800001DD\n" +"0D810002A7AB131005059DCF8300077D19F0110C0909AD\n" +"83000D090F85001D6F4C84001CABDF836033FC86045208\n" +"04722819FD073604FE0402E803F12F04F1134830011112\n" +"1101722109201400761425234133121424532315143244\n" +"2123135400762C52323421081201B31771E3341C080A01\n" +"00B2E70300100C01CB1410014FF014081003ACE705001C\n" +"08100BF981860014100100E9828600C6082002F0811444\n" +"081209101788140C2003BF00BF381608100185F03C0810\n" +"046D858600CC183020112080002C1D010049248000B620\n" +"010001BDF8FD08124D1032C6102332280812C11012FA08\n" +"200270470BD2082004B11B80003E231023322C08122D10\n" +"12E40812690810101C240100E5198000A82501003D1A80\n" +"000426A01108126E08200220E7322C0812791012A60820\n" +"0231BD322D08200A2D1C80009E27010010BD684D081003\n" +"D1188000F408124D48134828B010039B8600462BD82001\n" +"83C41008124D1012D4081004C1788600CE341820018710\n" +"8208200AAD7B860006360100F2B95FEA08100395798600\n" +"BA082002F081C0EB0812C91012DA082002A4E150480810\n" +"04F17A86005237C82001BDD857082004157786004E3888\n" +"23084608125D10129208200232BD032C08130176501639\n" +"00F1940320B586002001BBDFF8081289101003243A0100\n" +"055812B808100720462046D03C0100918812D40812DE04\n" +"12D8041C03717586002A620F2002F08D0328081209B812\n" +"A4082404F9108000E63F4823B007082404D10F8000EE42\n" +"1823F04408129920136C4388200B36800038480100BD17\n" +"8000424BA02372B60813FD9390110820027047AC430824\n" +"04499486000E4CB8231084081007D59786000C4E01009D\n" +"181232082002704750480820044D7F8600FA5148230020\n" +"081C019D1480711611301108122A0820027047DFF80810\n" +"07259C8600DC5D01007D081003905E0100A50812D20820\n" +"0210BD04460817119D98265F6023054608127D1012CC08\n" +"16CDA0130C6108116810039C660100F50813D069202403\n" +"8086005A6A702302EB0812F110132C6C282403A186007E\n" +"746823206B0812E110100468750100B97C4011081003D9\n" +"7D8600FC081007957E8600507801005910200612790100\n" +"D7BEFF26081003219E8600EA08200210BD0C460812AD10\n" +"20066E7A010073BDFFF70813996910227DD8231C06081F\n" +"556A201E167E70110812C80812F108132C7F6820036B86\n" +"009E8088230146081003156C8600E208200212BAA36D08\n" +"123D10232E814823F9F708134D6D381403830100358918\n" +"228418230126081295101B80851820098A86003E860100\n" +"A3BA04460812ED10180418890100398B781108200210BD\n" +"06F10812FD1013008AF01C06888600588E01008598031F\n" +"0890280318039E800034911010029F8000BE08200270BD\n" +"A0780812651022EA181110210814040D0D800030966012\n" +"09301C03970100410CFC201A99F010020B80005C081219\n" +"10127008127D2812A00812A50812B208200208E00A6808\n" +"12AD2012EE08200270478821081404B9168000289B5811\n" +"082F1A9C7803230C4608126D101250081691D012C40813\n" +"85A2B0129FD81C078C8600C0A101001D8D100316A3D011\n" +"601C0818A7010045808000DCB3180418068180005EB601\n" +"00023821081651E02408A8C60100C56486003AC9082301\n" +"210812F110133CCAD01003E0850054CEB81803998600F0\n" +"E6A015181F14EAB004168770110813CD86801108123910\n" +"1730EB1011101FA0EC58031C0273860022D01D1EC1A003\n" +"2108125510126C0813817108100AF5010061658600040C\n" +"0200B90812AC0813419AC01002120200CD08128A082002\n" +"184704A8081299101C08E8140200519286005639810520\n" +"012001200810031D908600B608200279E027780812E910\n" +"1804CC3A02004991F010023B0200C508124E081A707821\n" +"F19403B0B886000813498F201DCA121BA98E101D221312\n" +"5D5013DE41202001470968081FE16EC004175802C8233E\n" +"D00813B96F2022592023022808200AB103860036F90200\n" +"F2BD8FB0081010B51D86008C010300851E8500102B0300\n" +"D559A81002410300F90812F808138D1EA02005AE0300F1\n" +"BD048B081C1395D385005CCE0300C5DF85008CE00300AD\n" +"AE8500AC5E0B200ABDEE8500769C040086BF91FE081805\n" +"1DEE82003ADC04F8230D46083C0539EC820036E204D004\n" +"21081219201C0410E30400FDEB10221C13C70600DDA585\n" +"0048DE0600C1A88500E4E10600094FA0031810FD0600D9\n" +"5C8600B0160700A18B8000522C073823044608100FFD63\n" +"8100106407003DAD8500906A0700C10812E2081A371021\n" +"08200A4D568200B28407002FBBCDB0081007212D8200BC\n" +"8F07002D081003D4900700150813DA93581D4803210810\n" +"27D92C8200709707001D4182002CA40700115B8200E8AB\n" +"0700B14E82004CB70700A554820090BD0700E9281003A8\n" +"C00700C9081003CCC70700ED0810035CC9070005501003\n" +"84CC070029081EFAF81F2002CCBE044608121D101003B8\n" +"D307001908100358DA07002508100864E30700A9528200\n" +"08E748126A48100A5708002D668200345A0800E1081003\n" +"245C0800710820064E5D0800704086B00810032D688200\n" +"9808200A3D618200E25E0800A0BA41F20817C162682F5F\n" +"087805222608125D101C04F8620800E54BBC08140B9C08\n" +"006DAF850064EF0800DDB0601C1304090029BD82005462\n" +"090071BC82005068090059BB48231C1869090075B98200\n" +"E46A0900FDC285006C9609000DC385000A9B0908072304\n" +"8B0812F5181003A09D09000118100338A1090005081003\n" +"50A60900F918200EA4AC0900D54A860096B50A0044E03C\n" +"F80812358012D4081C1149E48500BC3B0B00CDE2850024\n" +"3E0B00E5E885212410010B00D5081801FA6B0B582383B0\n" +"081F4929A8031003920B0041BD301803E50B00B5265820\n" +"0D120C003988850082160C0050460E200812A91810038C\n" +"180C00B1081003C8190C00AD081F5C1BAA151D9008100B\n" +"98360C001580850044370C002508100B9C3B0C00819985\n" +"0058450C00A5281C19804B0C0049B38500848B0C000130\n" +"860010270D00D93D8600FE320D2003232DED0810078146\n" +"860048430D00D1C01401AE4E0D5823CBFF081F95271003\n" +"1C03530D009D26E8031C03540D00E523A003100A920D00\n" +"1924860020950D0005081C01B8970D3C1711801008570E\n" +"00351D8500EAF90E58231D460812691012FE08200232F1\n" +"140140BC8600BD059E08200A81D2850042360F004EBA00\n" +"F0081F612E1807140A450F00652A860034640F00951020\n" +"06026A0F0079BBCC4D082D7E0A9203042110520504D201\n" +"18A11CF10704D20104F10F286110F31109005CF10F0461\n" +"5421B0F3010900CCF10730F10B50F10B14F30D010058F3\n" +"09010084F10724F107042124F3190900B0F12778F20601\n" +"04F10F6CF10F38F10730F20E0934F10B1C520D04E130F1\n" +"0710F10704F11380E134617CF103C0F11384F10B6CF315\n" +"010054F10F24F10774F11740F12718651CA114F202033C\n" +"F10344F10FACF2060504E120436C020421024F36014E0F\n" +"1E6C8C0821015C01FF2292040FFD09F026525801210C20\n" +"089CB59100ACB9910091E662F00300000051120230434C\n" +"F108F1100050BD8600000000000000000000000000>\n" diff --git a/drivers/sensorhub/brcm/bbdpl/r_os/bbd_patch_file_davinci.h b/drivers/sensorhub/brcm/bbdpl/r_os/bbd_patch_file_davinci.h index fea4abea1d69..b93463e25702 100644 --- a/drivers/sensorhub/brcm/bbdpl/r_os/bbd_patch_file_davinci.h +++ b/drivers/sensorhub/brcm/bbdpl/r_os/bbd_patch_file_davinci.h @@ -1,15 +1,15 @@ "\n" -"\n" -"\n" +"\n" +"\n" "\n" "\n" "\n" "\n" -"\n" +"\n" "\n" "\n" "\n" -"\n" +"F30D2DFA78120204100387D5800004084FD1D53C082F11\n" +"D6000570040800295C0F3D29F22D04DC61E012FF01A108\n" +"F10B04F111241001803F090610036F718100180812B308\n" +"2D780810033B728100280812F7102DF405F13B042174F1\n" +"3D04F12D021402649A230127227F04F50277F15403FA1A\n" +"0C08F19B03520604A1035A029860068CD9B83FB8239C3D\n" +"1050073F91BF3E7B6AF53C04103005A0548B3ECD59FF20\n" +"21F86C06B2D6603E6DA8983D0C0350074F028B3EF470E2\n" +"3D0A306006CA35053E6A4B2D3E406006598B0F3E3F1D2F\n" +"3E403005E82D3E3E15E69D4021206006122F2F3E1A8A6B\n" +"3E805007A0350F3ECFF6683C06305007753E1C3ED8D64F\n" +"3E06207005F146E63D15C6B6502002F1A0F93D90520F10\n" +"60064D660C3EDFC3A53C505007193AF63DE273A73C0C10\n" +"500738F6EC3DCAA3533F0E307005A1BDDA3D7CF2B0F030\n" +"0570D2B43D745DB8502110740598A6C83D569AB4002002\n" +"C5ACF73DC0619050077D59DA3D2EFE0A3F082064064F58\n" +"C23D2CB9823E106006B361AD3DEBA9153E207405E1EECC\n" +"3D1A4DAE7060065E2EA23D1EDF5E3C70B001E76E977064\n" +"02D40FAA3D30210470054BABA13D25223C107405863A8C\n" +"3DBD8C7240700549A18C3D43C53840600630D9983D69C9\n" +"E33C90740534A2743DA1F8814030014031926052052070\n" +"057DEA983D8639C13070054163863D92E805C06406F9D8\n" +"5D3DBA83183EC070050114633D48A8F9D0740518B27A3D\n" +"999C1A907405E129643DD906EE805007274E6E3D043ADB\n" +"3E0C20600623A48E3D59C3093FD06406836B6E3D956021\n" +"3E107005CA8D623D38BC205074059E7B8F3DAF773FA070\n" +"058EEA743D462274C064061AA6763D0C233D3BA06006F9\n" +"13553D6FD3073FF03801726A8730600E6AF9D13E4445AF\n" +"43D5CA443DBD9AE44110601628CBA441BC71E343D62D5B\n" +"410EE3B646F38E6544FF33824610320001833C42101E80\n" +"B70D7206301C01297549D30D92C820600C00007B3AA141\n" +"4A41073EA108D641506110612861106140605E00A101C1\n" +"AA29B1BFFC81F3C0CAC224C0E7175CC0990E5D3E57B549\n" +"C05C020F4087C144C01EA51B40B74167C0EF5711BF8F38\n" +"62C0F19403E01785004B037F40AED407C02A52A13D74D0\n" +"B5BFD88234C0904A17C066311DBFCA349A3B49148A3EAA\n" +"B9DC3A2097E03E106016205F423D40DCAD3E8E731B3A2F\n" +"8ABE3D9E42AE3DA0A4183F1019AC70023C55DF59107005\n" +"558A1D3D6DE446101C01D46270CF0E728010700DED0D3E\n" +"3ACA6F193F5A2F063C7461E4506110612861106140F206\n" +"000118539754B33F6132D33FEAB30E42098F25453F3915\n" +"41617B2B412F34D33F0A66E63FAEA53D42D2B76245DB5F\n" +"14413BCA2B41EECCEC3FD6FF0E407C85354261E4BB4562\n" +"161041629039417217A13D096C7E3E0DC31F3FF618101B\n" +"07993C3F26C4C43E56D4083FE6FEFFFFDC64783FADDDE6\n" +"3E9A05723F5620100B9E996C3FA778383F156F703FF620\n" +"100B166D6E3F1E894B3FD9950E3FA420100B1498623FCB\n" +"BF563FB3284C3EFB10100BCF9F4A3FFAD4A13E83DC093F\n" +"E330100B9947423F7159853D1DE4393F3220280A19C96A\n" +"3F787C033FAC54503E98100BF756683F9E271E3F174A0E\n" +"3FD210100BE770BD3E923BEC3EC6A20D3F1910100BB474\n" +"293F9450823E494D4B3ECF10100BA6B5493F08393F3E2E\n" +"A92E3FAB10100B3D81B83E89234F3EDB17983E0610100B\n" +"431F243F302F703EE04D373FFC80100B17B66E3FADBF75\n" +"3EA08B6E3FBF20100B31285B3F8A73FC3E5D34103F9310\n" +"200A4608233FC382CB3E14590F3FC0100BC9E56B3FC285\n" +"B43EDDB1083F0910100B067F8F3E5457F63E5D322E3E6F\n" +"10100BBB0A693F99B8B53D69370E3F6810100BE679303F\n" +"4A7D1D3F4EEF0E3F9B10100BB5FE3A3F371CD63D0D1D0B\n" +"3FE110100B38F3273FA0DC763E0E30033F9810100B82E6\n" +"6F3F4BAF853E22A72F3F6B10100BA051463F950BB53EC2\n" +"6B6F3FFAB0100B287E6C3FB477A63E645C613ED810100B\n" +"E751653FD5EA4B3E18B3553EB330100B56BC613F51136D\n" +"3EE1B2023F8010100B96ED033F1EA5323E029A103F4610\n" +"100B30632E3F2D7C0D3F1EDD583EB910100BD5926A3F59\n" +"36733D33E13B3F2110100B17F48E3EE0BC383E992B173F\n" +"AE10240AE0641B3F33E0AC3DE8A40F3F60100B36210D3F\n" +"E8F6AA3EA6B5053F1010240BECC10C3F0B2A8A3D096B3B\n" +"3F5220100AC95C3FFF093E3E2844303FD810100BAD345D\n" +"3F2DD0EE3DAC1AE43ECB10100BCD03403F999BAF3ED8D7\n" +"2E3F7210100B6822583FBE17073FF33D2B3FF210100B78\n" +"286A3F014C913E72C2543EF11014059EF01A3FA60A2600\n" +"1001373F47103009946C013F8928DE3ECEFF2BE034098C\n" +"86183F4BC8233F0D7035F0100B17F3EB3E2AA9233E274C\n" +"78F19403701B85003EC010100B6DCA213F69C3313E1497\n" +"733EDC10100B4D49BE3E43E1433EB62C2B3F9C10280A10\n" +"79A33EF303CF3EE3FE2B3F71100BC5578B3E5322553F7A\n" +"A8313F6D10100BE8F9BB3EFDFAA13E596E793E4A10200A\n" +"B152713EB29E2A3EAC017A3E70100BB41CC83D73F7393E\n" +"D7F6323FE41030097B88663E4E0BBE3D344C1DA0200B92\n" +"ECB13DBA6BE13EA794173F8DF02C0978D33D4A40D43E13\n" +"9E603E6003100B2506A13D80D8323E4948743EFC10100B\n" +"541B2C3EEC69973E807E6F3FCE10200B990FD83E529CC3\n" +"3E7B876F3FF970100A031E3E3BE2103FC1E5713F771010\n" +"0B2FF8643E1D933D3F1E375C3EDE10100B0CCCEA3DB823\n" +"1C3E0F5E0B3F60102C0A9ACD033FC366703EA27DEC3E30\n" +"142C0AE8DA073E55BDC43EA2D1E53E4C0B100B6B9E433E\n" +"D28A9F3E94D9B03ED1301F3738F4063C0446F73D85D114\n" +"00043009DC4A6F3D8027ED3DC0B35DA03C09C5C7FF3E0A\n" +"65E13DA29A721C08100BFCFB0C3D6155DD3D2638F53DDB\n" +"203C09DDCFC93DB77AEE3D8577F9F00310043145B93EE8\n" +"2DB4100238103FFA103C09A92EE03C3065803E17D853B0\n" +"03100B0E2C4B3F3F1AEE3DED464F3FE7102C0B8543433F\n" +"43E51F3EA297C93E2488062C09A7303F0F0B413F9A261C\n" +"3E9414100B7D3D1F3EBCAD403F9DB8003FDA30100B53E7\n" +"993EBBEF883E0B0D083FD7101C0170981C80031C022D3F\n" +"90F450041130100BE21F9E3EEA5DDC3E6308D03EEE2010\n" +"0BCADD273EE259523E990DE23EEA102C0A0F98C73EA7B1\n" +"6D3EA3220E3F2C07100B5050023FBC03E43EC87D2B3FF3\n" +"20100BF31E973E890C8B3DEC30763E2920100B0C5A083D\n" +"69E4EB3E12F65D3EF420100B41D6533EAFCDEE3E1748E8\n" +"3E2720100B71AC333F0666093F08C8373E11105C0BC2A1\n" +"273E1285F63D1C28743F045C074C042CD9900019007807\n" +"7DC1051119FD11220D41346104F1112CF313000028F19B\n" +"20940348E790000A3C9003BCDB90003210900368E99000\n" +"0710D25C30900360E390001410D27020213C4F612CB016\n" +"12A5081F00CB61171C05019D9780000253270F10050575\n" +"39820007E9191F097F760EC40C0DE97D83001D2FB08200\n" +"1CB749827CC5A43001D457860452DC0472282AF3050100\n" +"01E3020050F2320001F11348101CE5978300C41101004D\n" +"0880005C200100090480006023010079928300B8481010\n" +"1E9D830094570100A5948300F85B010039968300A45C01\n" +"00E19583000C5E0100C91812B808122110101B585F0100\n" +"302C0D4684650100318D830080A30100198E830064A401\n" +"F19403001F850000650812F8081013718C830044A60100\n" +"CD9083008CB70100118F8300F408100F69998300D4C901\n" +"00959A8300FCCA0100F5801380D0881128101BD0FE0100\n" +"15A58300BC310200290F850008D002001DC68400C00E03\n" +"005D08101B5C0F030051C78400E0110300550E850038D4\n" +"0300499D8400DC100400CD081023D8110400F1978400D0\n" +"160400799A8400341F0400F94C8400405104006DB28300\n" +"F0C404009108100324C50400AD081248081018D1A68300\n" +"6CC70400E961840088E004002D6484009CE804000DB690\n" +"1403FD0400EDD708101332050021DD83002C4105002549\n" +"840090810500B1A0701433920500F94D8400ECA2050045\n" +"038500DCAF0500D9DB840070C00500059F840024DE0500\n" +"61D28400D40B0600E5C683007C60060021C72810036106\n" +"004DED381013D90600A1E68400C8DC0600C1F184001CE7\n" +"060071F0401003F406009DD1D810020307008968100394\n" +"13070065581C10D8740700D5A48400F8F80700A5888400\n" +"8CAB9A0C101FBF00BF6CB908004D92840020CD0800CD01\n" +"850034E6080089088500BC2D09002D231810136C09000D\n" +"258400E46D0900C5288400F4770900ED31981002850900\n" +"5508101C2C86090071D58400B42F0A00990D8500F0820A\n" +"002504850064800C001D52301012A50C002DEF8300ACBA\n" +"0C009D028400A0BB0C004908100744C00C0021FC830090\n" +"0813390DC01403C20C00690780100ECC0C00E1F5830018\n" +"CE0C00CDF1830040081BC9F2001108173DF6F012CF1816\n" +"169812D218100B0F840004D50C006DFD8300E0D8381168\n" +"140CE4E00C00F90A840058E20C00FD0B281002E40C00F5\n" +"28100B28EE0C00A1F9830000F60C00B9481C04F4F70C00\n" +"95EB2E1912F808200AEE830070FA0C0061FA8300B40811\n" +"781300FC281AFBC01012FD0C00110884006C020D00350E\n" +"8400BC090D007518100CD80D0D00DD7584003C2A0D0081\n" +"77D810022B0D00E5081010782E0D00FD418400E8300D00\n" +"1D438400B032201406398400A8370D00F1101413E45A0D\n" +"00C5FB84004C710D000DFD8400687A0D0025781013587F\n" +"0D00510B85005C940D004594840010980D00F508141478\n" +"A00D00E1D1830054B40D0085338400A4C30D002937D010\n" +"02C70D000110200398C80D008918180BD30D008DBF8100\n" +"D8050E00615D901B130E00101B81007C190E002D838100\n" +"8C290E0069B38100BC400E00E192810040500E681DD41A\n" +"1256201C079C8100345B0E004D9EC01A1C0B5E0E0029AE\n" +"810060610E0021965813100A650E00D5B28100A47C0EF1\n" +"3D019022850000D1281801F8960EC0100281004CB23814\n" +"17BB8100ECB90E0035BD8100B8BB0E009D57840014BD0E\n" +"00D979801C03D00E008105680312EF181AB7881003F80E\n" +"0049C3F01403020F0089D990100A160F0015DA84000C19\n" +"0F00E1081404281A0F0099E2381DA20E1BB1D3F01C0359\n" +"0F00C9D45804100A5C0F00A50A8500747B0F00B508128C\n" +"0817E9E3201C0AAF0F005DA1830038C80F00ED980412F8\n" +"081EC588041003C6CA0F00F0842108200A09A38300BAD6\n" +"0F0080B9A5B0081004619B83001CDA581138200642DD0F\n" +"00CFB886B008128910200E98DE0F0055A0830082EC0F00\n" +"70479AFC081B599F58110812291813C6EE182001BD1546\n" +"086D740FF10F04F2160328F18B04F1B3C4F19304D20504\n" +"F212010C43440104210212A2043C0128F691001741261D\n" +"E81CF00149BB0D00014380BF046DA80AFD09FC1C>\n" diff --git a/drivers/sensorhub/brcm/ssp_firmware.c b/drivers/sensorhub/brcm/ssp_firmware.c index b612370cfb4b..2b71ebd63ae1 100644 --- a/drivers/sensorhub/brcm/ssp_firmware.c +++ b/drivers/sensorhub/brcm/ssp_firmware.c @@ -21,7 +21,7 @@ #elif ANDROID_VERSION < 110000 //q_os #define SSP_FIRMWARE_REVISION_BCM 20091700 #else //r_os -#define SSP_FIRMWARE_REVISION_BCM 22021700 +#define SSP_FIRMWARE_REVISION_BCM 22072000 #endif #elif defined(CONFIG_SENSORS_SSP_DAVINCI) @@ -31,7 +31,7 @@ #elif ANDROID_VERSION < 110000 //q_os #define SSP_FIRMWARE_REVISION_BCM 20081200 #else //r_os -#define SSP_FIRMWARE_REVISION_BCM 22032201 +#define SSP_FIRMWARE_REVISION_BCM 22072000 #endif #else diff --git a/drivers/video/fbdev/exynos/dpu20/decon_core.c b/drivers/video/fbdev/exynos/dpu20/decon_core.c index 5abd657fff43..9d5d15e5c204 100644 --- a/drivers/video/fbdev/exynos/dpu20/decon_core.c +++ b/drivers/video/fbdev/exynos/dpu20/decon_core.c @@ -701,10 +701,21 @@ static int decon_enable(struct decon_device *decon) retry_enable: DPU_EVENT_LOG(DPU_EVT_UNBLANK, &decon->sd, ktime_set(0, 0)); decon_info("decon-%d %s +\n", decon->id, __func__); + + if (decon->dt.out_type == DECON_OUT_DP) { #if defined(CONFIG_SEC_DISPLAYPORT_LOGGER) - if (decon->dt.out_type == DECON_OUT_DP) dp_logger_print("decon enable\n"); #endif + if (!IS_DISPLAYPORT_HPD_PLUG_STATE()) { +#if defined(CONFIG_SEC_DISPLAYPORT_LOGGER) + dp_logger_print("DP is not connected\n"); +#endif + decon_warn("decon-2: DP is not connected\n"); + ret = -ENODEV; + goto out; + } + } + ret = _decon_enable(decon, next_state); if (ret < 0) { decon_err("decon-%d failed to set %s (ret %d)\n", @@ -1131,7 +1142,7 @@ int decon_update_pwr_state(struct decon_device *decon, u32 mode) } } if (mode == DISP_PWR_OFF && decon->dt.out_type == DECON_OUT_DP - && IS_DISPLAYPORT_HPD_PLUG_STATE()) { + && IS_DISPLAYPORT_SWITCH_STATE()) { decon_info("skip decon-%d disable(hpd plug)\n", decon->id); goto out; } diff --git a/drivers/video/fbdev/exynos/dpu20/displayport.h b/drivers/video/fbdev/exynos/dpu20/displayport.h index b4e29f88d786..3cd59c783af4 100644 --- a/drivers/video/fbdev/exynos/dpu20/displayport.h +++ b/drivers/video/fbdev/exynos/dpu20/displayport.h @@ -1274,6 +1274,16 @@ static inline bool IS_DISPLAYPORT_HPD_PLUG_STATE(void) return (bool)displayport->hpd_current_state; } +static inline bool IS_DISPLAYPORT_SWITCH_STATE(void) +{ + struct displayport_device *displayport = get_displayport_drvdata(); + + if (extcon_get_state(displayport->extcon_displayport, EXTCON_DISP_DP) == true) + return true; + else + return false; +} + int displayport_enable(struct displayport_device *displayport); int displayport_disable(struct displayport_device *displayport); diff --git a/drivers/vision/iva/iva_mem.c b/drivers/vision/iva/iva_mem.c index ea477a2de648..44c59ebf64e0 100644 --- a/drivers/vision/iva/iva_mem.c +++ b/drivers/vision/iva/iva_mem.c @@ -203,6 +203,9 @@ static struct iva_mem_map *iva_mem_ion_alloc(struct iva_proc *proc, goto err_dmabuf; } + /* increase file cnt. iva_mem_ion_free() required */ + get_dma_buf(dmabuf); + /* close() should be called for full release operation*/ ion_shared_fd = dma_buf_fd(dmabuf, O_CLOEXEC); if (ion_shared_fd < 0) { @@ -219,8 +222,6 @@ static struct iva_mem_map *iva_mem_ion_alloc(struct iva_proc *proc, iva_map_node->shared_fd = ion_shared_fd; iva_map_node->dmabuf = dmabuf; - /* increase file cnt. iva_mem_ion_free() required */ - get_dma_buf(dmabuf); iva_map_node->attachment = NULL; iva_map_node->sg_table = NULL; iva_map_node->io_va = 0x0; diff --git a/include/linux/hid.h b/include/linux/hid.h index 2ed49184644e..5fc336a89698 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -806,6 +806,11 @@ static inline bool hid_is_using_ll_driver(struct hid_device *hdev, return hdev->ll_driver == driver; } +static inline bool hid_is_usb(struct hid_device *hdev) +{ + return hid_is_using_ll_driver(hdev, &usb_hid_driver); +} + #define PM_HINT_FULLON 1<<5 #define PM_HINT_NORMAL 1<<1 diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index b6f0ee01f2e0..3979e5af6e49 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -2694,6 +2694,7 @@ int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u rv = 1; } else if (im) { if (src_addr) { + spin_lock_bh(&im->lock); for (psf = im->sources; psf; psf = psf->sf_next) { if (psf->sf_inaddr == src_addr) break; @@ -2704,6 +2705,7 @@ int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u im->sfcount[MCAST_EXCLUDE]; else rv = im->sfcount[MCAST_EXCLUDE] != 0; + spin_unlock_bh(&im->lock); } else rv = 1; /* unspecified source; tentatively allow */ } diff --git a/security/sdp/sdp_mm.c b/security/sdp/sdp_mm.c index 55612103cf15..8b59541a9d80 100644 --- a/security/sdp/sdp_mm.c +++ b/security/sdp/sdp_mm.c @@ -88,9 +88,11 @@ int32_t sdp_mm_set_process_sensitive(unsigned int proc_id) struct task_struct *task = NULL; uid_t uid; + rcu_read_lock(); /* current.task.sensitive = 1 */ task = pid_task(find_vpid(proc_id), PIDTYPE_PID); if (task) { + get_task_struct(task); uid = from_kuid(&init_user_ns, task_uid(task)); if (((uid/PER_USER_RANGE) <= 199) && ((uid/PER_USER_RANGE) >= 100)) { if (dek_is_sdp_uid(uid)) { @@ -109,6 +111,9 @@ int32_t sdp_mm_set_process_sensitive(unsigned int proc_id) } task_err: + if (task) + put_task_struct(task); + rcu_read_unlock(); return ret; } diff --git a/sound/soc/samsung/abox/abox_mmapfd.c b/sound/soc/samsung/abox/abox_mmapfd.c index 82e3b0cd5525..2b048b7d39a2 100644 --- a/sound/soc/samsung/abox/abox_mmapfd.c +++ b/sound/soc/samsung/abox/abox_mmapfd.c @@ -46,8 +46,6 @@ int abox_mmap_fd(struct abox_platform_data *data, } else { ret = -EFAULT; dev_err(dev, "%s dma_buf_fd is failed\n", __func__); - dma_buf_put(buf->dma_buf); - goto error_get_fd; } diff --git a/sound/soc/samsung/abox/abox_rdma.c b/sound/soc/samsung/abox/abox_rdma.c index dd556eed65ed..f02f667fdd9f 100644 --- a/sound/soc/samsung/abox/abox_rdma.c +++ b/sound/soc/samsung/abox/abox_rdma.c @@ -2276,8 +2276,10 @@ static int abox_rdma_fio_common_ioctl(struct snd_hwdep *hw, struct file *filp, switch (cmd) { case SNDRV_PCM_IOCTL_MMAP_DATA_FD: ret = abox_mmap_fd(data, &mmap_fd); - if (ret < 0) + if (ret < 0) { dev_err(dev, "%s MMAP_FD failed: %d\n", __func__, ret); + return ret; + } if (copy_to_user(_arg, &mmap_fd, sizeof(mmap_fd))) return -EFAULT; diff --git a/sound/soc/samsung/abox/abox_wdma.c b/sound/soc/samsung/abox/abox_wdma.c index 11bf6b1df6e4..c409ce297ebb 100644 --- a/sound/soc/samsung/abox/abox_wdma.c +++ b/sound/soc/samsung/abox/abox_wdma.c @@ -744,8 +744,10 @@ static int abox_wdma_fio_common_ioctl(struct snd_hwdep *hw, struct file *filp, switch (cmd) { case SNDRV_PCM_IOCTL_MMAP_DATA_FD: ret = abox_mmap_fd(data, &mmap_fd); - if (ret < 0) + if (ret < 0) { dev_err(dev, "%s MMAP_FD failed: %d\n", __func__, ret); + return ret; + } if (copy_to_user(_arg, &mmap_fd, sizeof(mmap_fd))) return -EFAULT; From c5af48e9588c57d02c90bf877670d4795c63b0c4 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Sun, 4 Sep 2022 11:55:40 +0400 Subject: [PATCH 002/452] G977N - HVG5 Signed-off-by: Denis Efremov --- arch/arm64/boot/dts/G977N.mk | 8 ++++---- arch/arm64/configs/exynos9820-beyondxks_defconfig | 9 +++------ 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/arch/arm64/boot/dts/G977N.mk b/arch/arm64/boot/dts/G977N.mk index 836a6f2d4f2e..94efe66ca5f6 100644 --- a/arch/arm64/boot/dts/G977N.mk +++ b/arch/arm64/boot/dts/G977N.mk @@ -1,14 +1,14 @@ # SPDX-License-Identifier: GPL-2.0 dtb-y += exynos/exynos9820.dtb -dtbo-y += samsung/exynos9820-beyondx_kor_08.dtbo dtbo-y += samsung/exynos9820-beyondx_kor_03.dtbo -dtbo-y += samsung/exynos9820-beyondx_kor_07.dtbo dtbo-y += samsung/exynos9820-beyondx_kor_06.dtbo dtbo-y += samsung/exynos9820-beyondx_kor_04.dtbo +dtbo-y += samsung/exynos9820-beyondx_kor_07.dtbo dtbo-y += samsung/exynos9820-beyondx_kor_01.dtbo -dtbo-y += samsung/exynos9820-beyondx_kor_00.dtbo -dtbo-y += samsung/exynos9820-beyondx_kor_05.dtbo dtbo-y += samsung/exynos9820-beyondx_kor_02.dtbo +dtbo-y += samsung/exynos9820-beyondx_kor_08.dtbo +dtbo-y += samsung/exynos9820-beyondx_kor_05.dtbo +dtbo-y += samsung/exynos9820-beyondx_kor_00.dtbo targets += dtbs DTB_LIST := $(dtb-y) $(dtbo-y) diff --git a/arch/arm64/configs/exynos9820-beyondxks_defconfig b/arch/arm64/configs/exynos9820-beyondxks_defconfig index b9001ab0881b..0d0d503e78d6 100644 --- a/arch/arm64/configs/exynos9820-beyondxks_defconfig +++ b/arch/arm64/configs/exynos9820-beyondxks_defconfig @@ -1350,6 +1350,7 @@ CONFIG_CMA_SIZE_SEL_MBYTES=y # CONFIG_CMA_SIZE_SEL_MAX is not set CONFIG_CMA_ALIGNMENT=8 CONFIG_GENERIC_ARCH_TOPOLOGY=y +CONFIG_CPU_CAPACITY_FIXUP=y # # Bus devices @@ -4001,6 +4002,8 @@ CONFIG_HID_PICOLCD=y # CONFIG_HID_PICOLCD_LEDS is not set # CONFIG_HID_PICOLCD_CIR is not set CONFIG_HID_PLANTRONICS=y +CONFIG_HID_PLAYSTATION=y +CONFIG_PLAYSTATION_FF=y CONFIG_HID_PRIMAX=y # CONFIG_HID_RETRODE is not set CONFIG_HID_ROCCAT=y @@ -5557,9 +5560,6 @@ CONFIG_SENSORS_FINGERPRINT=y # CONFIG_SENSORS_FPRINT_SECURE is not set CONFIG_SENSORS_QBT2000=y # CONFIG_SENSORS_ET5XX is not set -# CONFIG_FIVE_TEE_DRIVER is not set -CONFIG_ICD=y -CONFIG_ICD_USE_TZDEV=y CONFIG_SEC_EXT=y CONFIG_SEC_REBOOT=y @@ -5660,9 +5660,6 @@ CONFIG_SEC_HEAVY_TASK_CPU=y # CONFIG_VBUS_NOTIFIER=y CONFIG_KPERFMON=y -CONFIG_TZIC=y -CONFIG_TZIC_USE_TZDEV=y -# CONFIG_TZIC_DEFAULT is not set CONFIG_SPU_VERIFY=y # From d0615d6495df6e7a090dbf82941518d71580b1ae Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Nov 2022 18:02:08 +0400 Subject: [PATCH 003/452] N975F - HVJ1 Signed-off-by: Denis Efremov --- arch/arm64/boot/dts/N975F.mk | 12 +- arch/arm64/configs/exynos9820-d2s_defconfig | 1 + include/net/af_unix.h | 1 + net/Makefile | 2 +- net/unix/Kconfig | 5 + net/unix/Makefile | 2 + net/unix/af_unix.c | 102 ++++++-------- net/unix/garbage.c | 68 +-------- net/unix/scm.c | 149 ++++++++++++++++++++ net/unix/scm.h | 10 ++ 10 files changed, 222 insertions(+), 130 deletions(-) create mode 100644 net/unix/scm.c create mode 100644 net/unix/scm.h diff --git a/arch/arm64/boot/dts/N975F.mk b/arch/arm64/boot/dts/N975F.mk index 3958048b6220..d032a91c0d47 100644 --- a/arch/arm64/boot/dts/N975F.mk +++ b/arch/arm64/boot/dts/N975F.mk @@ -1,15 +1,15 @@ # SPDX-License-Identifier: GPL-2.0 dtb-y += exynos/exynos9825.dtb -dtbo-y += samsung/exynos9820-d2_eur_open_19.dtbo -dtbo-y += samsung/exynos9820-d2_eur_open_18.dtbo -dtbo-y += samsung/exynos9820-d2_eur_open_17.dtbo -dtbo-y += samsung/exynos9820-d2_eur_open_24.dtbo dtbo-y += samsung/exynos9820-d2_eur_open_02.dtbo -dtbo-y += samsung/exynos9820-d2_eur_open_22.dtbo +dtbo-y += samsung/exynos9820-d2_eur_open_17.dtbo +dtbo-y += samsung/exynos9820-d2_eur_open_20.dtbo dtbo-y += samsung/exynos9820-d2_eur_open_16.dtbo +dtbo-y += samsung/exynos9820-d2_eur_open_19.dtbo dtbo-y += samsung/exynos9820-d2_eur_open_21.dtbo +dtbo-y += samsung/exynos9820-d2_eur_open_24.dtbo dtbo-y += samsung/exynos9820-d2_eur_open_23.dtbo -dtbo-y += samsung/exynos9820-d2_eur_open_20.dtbo +dtbo-y += samsung/exynos9820-d2_eur_open_22.dtbo +dtbo-y += samsung/exynos9820-d2_eur_open_18.dtbo targets += dtbs DTB_LIST := $(dtb-y) $(dtbo-y) diff --git a/arch/arm64/configs/exynos9820-d2s_defconfig b/arch/arm64/configs/exynos9820-d2s_defconfig index 46b1eb8da316..95ae3d3796e0 100644 --- a/arch/arm64/configs/exynos9820-d2s_defconfig +++ b/arch/arm64/configs/exynos9820-d2s_defconfig @@ -771,6 +771,7 @@ CONFIG_NET_EGRESS=y CONFIG_PACKET=y # CONFIG_PACKET_DIAG is not set CONFIG_UNIX=y +CONFIG_UNIX_SCM=y # CONFIG_UNIX_DIAG is not set # CONFIG_TLS is not set CONFIG_XFRM=y diff --git a/include/net/af_unix.h b/include/net/af_unix.h index a5ba41b3b867..7ec1cdb66be8 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -10,6 +10,7 @@ void unix_inflight(struct user_struct *user, struct file *fp); void unix_notinflight(struct user_struct *user, struct file *fp); +void unix_destruct_scm(struct sk_buff *skb); void unix_gc(void); void wait_for_unix_gc(void); struct sock *unix_get_socket(struct file *filp); diff --git a/net/Makefile b/net/Makefile index 4b8efa744363..864f6593220e 100644 --- a/net/Makefile +++ b/net/Makefile @@ -18,7 +18,7 @@ obj-$(CONFIG_NETFILTER) += netfilter/ obj-$(CONFIG_INET) += ipv4/ obj-$(CONFIG_TLS) += tls/ obj-$(CONFIG_XFRM) += xfrm/ -obj-$(CONFIG_UNIX) += unix/ +obj-$(CONFIG_UNIX_SCM) += unix/ obj-$(CONFIG_NET) += ipv6/ obj-$(CONFIG_MPTCP) += mptcp/ obj-$(CONFIG_PACKET) += packet/ diff --git a/net/unix/Kconfig b/net/unix/Kconfig index 8b31ab85d050..3b9e450656a4 100644 --- a/net/unix/Kconfig +++ b/net/unix/Kconfig @@ -19,6 +19,11 @@ config UNIX Say Y unless you know what you are doing. +config UNIX_SCM + bool + depends on UNIX + default y + config UNIX_DIAG tristate "UNIX: socket monitoring interface" depends on UNIX diff --git a/net/unix/Makefile b/net/unix/Makefile index ffd0a275c3a7..54e58cc4f945 100644 --- a/net/unix/Makefile +++ b/net/unix/Makefile @@ -10,3 +10,5 @@ unix-$(CONFIG_SYSCTL) += sysctl_net_unix.o obj-$(CONFIG_UNIX_DIAG) += unix_diag.o unix_diag-y := diag.o + +obj-$(CONFIG_UNIX_SCM) += scm.o diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 2adfcc6dec5a..94d33f541b7d 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -119,6 +119,8 @@ #include #include +#include "scm.h" + struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE]; EXPORT_SYMBOL_GPL(unix_socket_table); DEFINE_SPINLOCK(unix_table_lock); @@ -1498,65 +1500,51 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_ return err; } -static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) -{ - int i; - - scm->fp = UNIXCB(skb).fp; - UNIXCB(skb).fp = NULL; - - for (i = scm->fp->count-1; i >= 0; i--) - unix_notinflight(scm->fp->user, scm->fp->fp[i]); -} - -static void unix_destruct_scm(struct sk_buff *skb) -{ - struct scm_cookie scm; - memset(&scm, 0, sizeof(scm)); - scm.pid = UNIXCB(skb).pid; - if (UNIXCB(skb).fp) - unix_detach_fds(&scm, skb); - - /* Alas, it calls VFS */ - /* So fscking what? fput() had been SMP-safe since the last Summer */ - scm_destroy(&scm); - sock_wfree(skb); -} - -/* - * The "user->unix_inflight" variable is protected by the garbage - * collection lock, and we just read it locklessly here. If you go - * over the limit, there might be a tiny race in actually noticing - * it across threads. Tough. - */ -static inline bool too_many_unix_fds(struct task_struct *p) -{ - struct user_struct *user = current_user(); - - if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE))) - return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); - return false; -} - -static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) +static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb) { - int i; - - if (too_many_unix_fds(current)) - return -ETOOMANYREFS; + scm->fp = scm_fp_dup(UNIXCB(skb).fp); /* - * Need to duplicate file references for the sake of garbage - * collection. Otherwise a socket in the fps might become a - * candidate for GC while the skb is not yet queued. + * Garbage collection of unix sockets starts by selecting a set of + * candidate sockets which have reference only from being in flight + * (total_refs == inflight_refs). This condition is checked once during + * the candidate collection phase, and candidates are marked as such, so + * that non-candidates can later be ignored. While inflight_refs is + * protected by unix_gc_lock, total_refs (file count) is not, hence this + * is an instantaneous decision. + * + * Once a candidate, however, the socket must not be reinstalled into a + * file descriptor while the garbage collection is in progress. + * + * If the above conditions are met, then the directed graph of + * candidates (*) does not change while unix_gc_lock is held. + * + * Any operations that changes the file count through file descriptors + * (dup, close, sendmsg) does not change the graph since candidates are + * not installed in fds. + * + * Dequeing a candidate via recvmsg would install it into an fd, but + * that takes unix_gc_lock to decrement the inflight count, so it's + * serialized with garbage collection. + * + * MSG_PEEK is special in that it does not change the inflight count, + * yet does install the socket into an fd. The following lock/unlock + * pair is to ensure serialization with garbage collection. It must be + * done between incrementing the file count and installing the file into + * an fd. + * + * If garbage collection starts after the barrier provided by the + * lock/unlock, then it will see the elevated refcount and not mark this + * as a candidate. If a garbage collection is already in progress + * before the file count was incremented, then the lock/unlock pair will + * ensure that garbage collection is finished before progressing to + * installing the fd. + * + * (*) A -> B where B is on the queue of A or B is on the queue of C + * which is on the queue of listening socket A. */ - UNIXCB(skb).fp = scm_fp_dup(scm->fp); - if (!UNIXCB(skb).fp) - return -ENOMEM; - - for (i = scm->fp->count - 1; i >= 0; i--) - unix_inflight(scm->fp->user, scm->fp->fp[i]); - return 0; + spin_lock(&unix_gc_lock); + spin_unlock(&unix_gc_lock); } static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) @@ -2183,7 +2171,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, sk_peek_offset_fwd(sk, size); if (UNIXCB(skb).fp) - scm.fp = scm_fp_dup(UNIXCB(skb).fp); + unix_peek_fds(&scm, skb); } err = (flags & MSG_TRUNC) ? skb->len - skip : size; @@ -2424,7 +2412,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state, /* It is questionable, see note in unix_dgram_recvmsg. */ if (UNIXCB(skb).fp) - scm.fp = scm_fp_dup(UNIXCB(skb).fp); + unix_peek_fds(&scm, skb); sk_peek_offset_fwd(sk, chunk); diff --git a/net/unix/garbage.c b/net/unix/garbage.c index c36757e72844..8bbe1b8e4ff7 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -86,77 +86,13 @@ #include #include +#include "scm.h" + /* Internal data structures and random procedures: */ -static LIST_HEAD(gc_inflight_list); static LIST_HEAD(gc_candidates); -static DEFINE_SPINLOCK(unix_gc_lock); static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait); -unsigned int unix_tot_inflight; - -struct sock *unix_get_socket(struct file *filp) -{ - struct sock *u_sock = NULL; - struct inode *inode = file_inode(filp); - - /* Socket ? */ - if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { - struct socket *sock = SOCKET_I(inode); - struct sock *s = sock->sk; - - /* PF_UNIX ? */ - if (s && sock->ops && sock->ops->family == PF_UNIX) - u_sock = s; - } - return u_sock; -} - -/* Keep the number of times in flight count for the file - * descriptor if it is for an AF_UNIX socket. - */ - -void unix_inflight(struct user_struct *user, struct file *fp) -{ - struct sock *s = unix_get_socket(fp); - - spin_lock(&unix_gc_lock); - - if (s) { - struct unix_sock *u = unix_sk(s); - - if (atomic_long_inc_return(&u->inflight) == 1) { - BUG_ON(!list_empty(&u->link)); - list_add_tail(&u->link, &gc_inflight_list); - } else { - BUG_ON(list_empty(&u->link)); - } - unix_tot_inflight++; - } - user->unix_inflight++; - spin_unlock(&unix_gc_lock); -} - -void unix_notinflight(struct user_struct *user, struct file *fp) -{ - struct sock *s = unix_get_socket(fp); - - spin_lock(&unix_gc_lock); - - if (s) { - struct unix_sock *u = unix_sk(s); - - BUG_ON(!atomic_long_read(&u->inflight)); - BUG_ON(list_empty(&u->link)); - - if (atomic_long_dec_and_test(&u->inflight)) - list_del_init(&u->link); - unix_tot_inflight--; - } - user->unix_inflight--; - spin_unlock(&unix_gc_lock); -} - static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), struct sk_buff_head *hitlist) { diff --git a/net/unix/scm.c b/net/unix/scm.c new file mode 100644 index 000000000000..e13d320c41c7 --- /dev/null +++ b/net/unix/scm.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "scm.h" + +unsigned int unix_tot_inflight; +EXPORT_SYMBOL(unix_tot_inflight); + +LIST_HEAD(gc_inflight_list); +EXPORT_SYMBOL(gc_inflight_list); + +DEFINE_SPINLOCK(unix_gc_lock); +EXPORT_SYMBOL(unix_gc_lock); + +struct sock *unix_get_socket(struct file *filp) +{ + struct sock *u_sock = NULL; + struct inode *inode = file_inode(filp); + + /* Socket ? */ + if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { + struct socket *sock = SOCKET_I(inode); + struct sock *s = sock->sk; + + /* PF_UNIX ? */ + if (s && sock->ops && sock->ops->family == PF_UNIX) + u_sock = s; + } + return u_sock; +} +EXPORT_SYMBOL(unix_get_socket); + +/* Keep the number of times in flight count for the file + * descriptor if it is for an AF_UNIX socket. + */ +void unix_inflight(struct user_struct *user, struct file *fp) +{ + struct sock *s = unix_get_socket(fp); + + spin_lock(&unix_gc_lock); + + if (s) { + struct unix_sock *u = unix_sk(s); + + if (atomic_long_inc_return(&u->inflight) == 1) { + BUG_ON(!list_empty(&u->link)); + list_add_tail(&u->link, &gc_inflight_list); + } else { + BUG_ON(list_empty(&u->link)); + } + unix_tot_inflight++; + } + user->unix_inflight++; + spin_unlock(&unix_gc_lock); +} + +void unix_notinflight(struct user_struct *user, struct file *fp) +{ + struct sock *s = unix_get_socket(fp); + + spin_lock(&unix_gc_lock); + + if (s) { + struct unix_sock *u = unix_sk(s); + + BUG_ON(!atomic_long_read(&u->inflight)); + BUG_ON(list_empty(&u->link)); + + if (atomic_long_dec_and_test(&u->inflight)) + list_del_init(&u->link); + unix_tot_inflight--; + } + user->unix_inflight--; + spin_unlock(&unix_gc_lock); +} + +/* + * The "user->unix_inflight" variable is protected by the garbage + * collection lock, and we just read it locklessly here. If you go + * over the limit, there might be a tiny race in actually noticing + * it across threads. Tough. + */ +static inline bool too_many_unix_fds(struct task_struct *p) +{ + struct user_struct *user = current_user(); + + if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE))) + return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); + return false; +} + +int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) +{ + int i; + + if (too_many_unix_fds(current)) + return -ETOOMANYREFS; + + /* + * Need to duplicate file references for the sake of garbage + * collection. Otherwise a socket in the fps might become a + * candidate for GC while the skb is not yet queued. + */ + UNIXCB(skb).fp = scm_fp_dup(scm->fp); + if (!UNIXCB(skb).fp) + return -ENOMEM; + + for (i = scm->fp->count - 1; i >= 0; i--) + unix_inflight(scm->fp->user, scm->fp->fp[i]); + return 0; +} +EXPORT_SYMBOL(unix_attach_fds); + +void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) +{ + int i; + + scm->fp = UNIXCB(skb).fp; + UNIXCB(skb).fp = NULL; + + for (i = scm->fp->count-1; i >= 0; i--) + unix_notinflight(scm->fp->user, scm->fp->fp[i]); +} +EXPORT_SYMBOL(unix_detach_fds); + +void unix_destruct_scm(struct sk_buff *skb) +{ + struct scm_cookie scm; + + memset(&scm, 0, sizeof(scm)); + scm.pid = UNIXCB(skb).pid; + if (UNIXCB(skb).fp) + unix_detach_fds(&scm, skb); + + /* Alas, it calls VFS */ + /* So fscking what? fput() had been SMP-safe since the last Summer */ + scm_destroy(&scm); + sock_wfree(skb); +} +EXPORT_SYMBOL(unix_destruct_scm); diff --git a/net/unix/scm.h b/net/unix/scm.h new file mode 100644 index 000000000000..5a255a477f16 --- /dev/null +++ b/net/unix/scm.h @@ -0,0 +1,10 @@ +#ifndef NET_UNIX_SCM_H +#define NET_UNIX_SCM_H + +extern struct list_head gc_inflight_list; +extern spinlock_t unix_gc_lock; + +int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb); +void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb); + +#endif From 983e7fa0c3baf8a513c9602fdd3767cc7dc83426 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Nov 2022 20:13:28 +0400 Subject: [PATCH 004/452] N976B - HVJ1 Signed-off-by: Denis Efremov --- arch/arm64/boot/dts/N976B.mk | 14 +++++++------- arch/arm64/configs/exynos9820-d2x_defconfig | 1 + 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/arch/arm64/boot/dts/N976B.mk b/arch/arm64/boot/dts/N976B.mk index d81666604981..3a7c8af03efa 100644 --- a/arch/arm64/boot/dts/N976B.mk +++ b/arch/arm64/boot/dts/N976B.mk @@ -1,15 +1,15 @@ # SPDX-License-Identifier: GPL-2.0 dtb-y += exynos/exynos9825.dtb -dtbo-y += samsung/exynos9820-d2x_eur_open_20.dtbo -dtbo-y += samsung/exynos9820-d2x_eur_open_19.dtbo -dtbo-y += samsung/exynos9820-d2x_eur_open_17.dtbo +dtbo-y += samsung/exynos9820-d2x_eur_open_22.dtbo +dtbo-y += samsung/exynos9820-d2x_eur_open_23.dtbo dtbo-y += samsung/exynos9820-d2x_eur_open_24.dtbo -dtbo-y += samsung/exynos9820-d2x_eur_open_16.dtbo +dtbo-y += samsung/exynos9820-d2x_eur_open_19.dtbo +dtbo-y += samsung/exynos9820-d2x_eur_open_18.dtbo +dtbo-y += samsung/exynos9820-d2x_eur_open_20.dtbo dtbo-y += samsung/exynos9820-d2x_eur_open_21.dtbo +dtbo-y += samsung/exynos9820-d2x_eur_open_16.dtbo +dtbo-y += samsung/exynos9820-d2x_eur_open_17.dtbo dtbo-y += samsung/exynos9820-d2x_eur_open_02.dtbo -dtbo-y += samsung/exynos9820-d2x_eur_open_18.dtbo -dtbo-y += samsung/exynos9820-d2x_eur_open_22.dtbo -dtbo-y += samsung/exynos9820-d2x_eur_open_23.dtbo targets += dtbs DTB_LIST := $(dtb-y) $(dtbo-y) diff --git a/arch/arm64/configs/exynos9820-d2x_defconfig b/arch/arm64/configs/exynos9820-d2x_defconfig index 4bf2209f77d9..b2a22ad610e1 100644 --- a/arch/arm64/configs/exynos9820-d2x_defconfig +++ b/arch/arm64/configs/exynos9820-d2x_defconfig @@ -771,6 +771,7 @@ CONFIG_NET_EGRESS=y CONFIG_PACKET=y # CONFIG_PACKET_DIAG is not set CONFIG_UNIX=y +CONFIG_UNIX_SCM=y # CONFIG_UNIX_DIAG is not set # CONFIG_TLS is not set CONFIG_XFRM=y From 67796261419790b15ce2e629259293c8694a8d33 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 14 Nov 2022 16:25:52 +0400 Subject: [PATCH 005/452] G973F - HVJ5 Signed-off-by: Denis Efremov --- arch/arm/include/asm/kvm_host.h | 6 + arch/arm64/Kconfig | 25 + arch/arm64/boot/dts/G973F.mk | 10 +- .../exynos9820-beyond1lte_eur_open_17.dts | 5 +- .../exynos9820-beyond1lte_eur_open_18.dts | 5 +- .../exynos9820-beyond1lte_eur_open_19.dts | 5 +- .../exynos9820-beyond1lte_eur_open_20.dts | 5 +- .../exynos9820-beyond1lte_eur_open_21.dts | 5 +- .../exynos9820-beyond1lte_eur_open_22.dts | 5 +- .../exynos9820-beyond1lte_eur_open_23.dts | 5 +- .../exynos9820-beyond1lte_eur_open_24.dts | 7 +- .../exynos9820-beyond1lte_eur_open_26.dts | 7 +- .../configs/exynos9820-beyond1lte_defconfig | 10 +- arch/arm64/include/asm/assembler.h | 34 + arch/arm64/include/asm/cpu.h | 1 + arch/arm64/include/asm/cpucaps.h | 10 +- arch/arm64/include/asm/cpufeature.h | 289 +++++- arch/arm64/include/asm/cputype.h | 65 +- arch/arm64/include/asm/fixmap.h | 6 +- arch/arm64/include/asm/kvm_asm.h | 2 - arch/arm64/include/asm/kvm_host.h | 16 + arch/arm64/include/asm/kvm_mmu.h | 2 +- arch/arm64/include/asm/mmu.h | 8 +- arch/arm64/include/asm/processor.h | 22 +- arch/arm64/include/asm/ptrace.h | 58 +- arch/arm64/include/asm/sections.h | 6 + arch/arm64/include/asm/sysreg.h | 100 +- arch/arm64/include/asm/traps.h | 6 + arch/arm64/include/asm/vectors.h | 74 ++ arch/arm64/include/asm/virt.h | 6 - arch/arm64/include/uapi/asm/hwcap.h | 12 + arch/arm64/include/uapi/asm/ptrace.h | 1 + arch/arm64/kernel/armv8_deprecated.c | 17 +- arch/arm64/kernel/cpu_errata.c | 870 ++++++++++++++---- arch/arm64/kernel/cpufeature.c | 561 ++++++++--- arch/arm64/kernel/cpuinfo.c | 13 + arch/arm64/kernel/fpsimd.c | 1 + arch/arm64/kernel/process.c | 31 + arch/arm64/kernel/ptrace.c | 13 +- arch/arm64/kernel/smp.c | 44 - arch/arm64/kernel/ssbd.c | 22 + arch/arm64/kernel/traps.c | 26 +- arch/arm64/kvm/hyp/switch.c | 19 +- arch/arm64/kvm/hyp/sysreg-sr.c | 11 + arch/arm64/mm/fault.c | 3 +- arch/arm64/mm/mmu.c | 11 +- drivers/android/binder.c | 12 + drivers/battery_v2/include/sec_battery.h | 6 + .../battery_v2/include/sec_battery_sysfs.h | 9 +- .../battery_v2/include/sec_charging_common.h | 58 +- drivers/battery_v2/sec_battery.c | 375 +++++++- drivers/battery_v2/sec_battery_dt.c | 208 ++++- drivers/battery_v2/sec_battery_sysfs.c | 192 ++-- drivers/block/zram/zram_drv.c | 16 +- drivers/clocksource/arm_arch_timer.c | 15 + drivers/dma-buf/dma-buf-trace.h | 8 +- drivers/fingerprint/et5xx-spi.c | 1 + drivers/media/platform/exynos/mfc/mfc_qos.c | 4 +- drivers/mfd/max77705-irq.c | 3 +- drivers/misc/tzdev/Kconfig | 23 +- drivers/misc/tzdev/Makefile | 1 + drivers/misc/tzdev/startup.tzar | Bin 5048986 -> 5044890 bytes drivers/misc/tzdev/sysdep.h | 20 - drivers/misc/tzdev/teec/shared_memory.c | 2 - drivers/misc/tzdev/tz_common.h | 4 - drivers/misc/tzdev/tz_mem.c | 374 +------- drivers/misc/tzdev/tz_mem.h | 20 +- drivers/misc/tzdev/tzdev.c | 83 +- drivers/misc/tzdev/tzdev.h | 3 - drivers/misc/tzdev/umem.c | 198 ++++ drivers/misc/tzdev/umem.h | 22 + drivers/net/usb/ax88179_178a.c | 4 + drivers/net/usb/cdc_ncm.c | 2 +- .../wireless/broadcom/bcmdhd_101_16/Kconfig | 2 +- .../wireless/broadcom/bcmdhd_101_16/Makefile | 6 +- .../wireless/broadcom/bcmdhd_101_16/aiutils.c | 2 +- .../broadcom/bcmdhd_101_16/bcm_app_utils.c | 2 +- .../broadcom/bcmdhd_101_16/bcm_l2_filter.c | 2 +- .../broadcom/bcmdhd_101_16/bcmbloom.c | 2 +- .../broadcom/bcmdhd_101_16/bcmevent.c | 2 +- .../wireless/broadcom/bcmdhd_101_16/bcmsdh.c | 2 +- .../broadcom/bcmdhd_101_16/bcmsdh_linux.c | 2 +- .../broadcom/bcmdhd_101_16/bcmsdh_sdmmc.c | 2 +- .../bcmdhd_101_16/bcmsdh_sdmmc_linux.c | 2 +- .../broadcom/bcmdhd_101_16/bcmsdstd.h | 2 +- .../broadcom/bcmdhd_101_16/bcmstdlib_s.c | 2 +- .../broadcom/bcmdhd_101_16/bcmutils.c | 2 +- .../broadcom/bcmdhd_101_16/bcmwifi_channels.c | 2 +- .../wireless/broadcom/bcmdhd_101_16/bcmxtlv.c | 2 +- .../net/wireless/broadcom/bcmdhd_101_16/dhd.h | 2 +- .../broadcom/bcmdhd_101_16/dhd_bitpack.c | 2 +- .../broadcom/bcmdhd_101_16/dhd_bitpack.h | 2 +- .../wireless/broadcom/bcmdhd_101_16/dhd_bus.h | 2 +- .../wireless/broadcom/bcmdhd_101_16/dhd_cdc.c | 2 +- .../broadcom/bcmdhd_101_16/dhd_cfg80211.c | 2 +- .../broadcom/bcmdhd_101_16/dhd_cfg80211.h | 2 +- .../broadcom/bcmdhd_101_16/dhd_common.c | 2 +- .../broadcom/bcmdhd_101_16/dhd_custom_cis.c | 2 +- .../bcmdhd_101_16/dhd_custom_exynos.c | 2 +- .../broadcom/bcmdhd_101_16/dhd_custom_gpio.c | 2 +- .../bcmdhd_101_16/dhd_custom_memprealloc.c | 2 +- .../broadcom/bcmdhd_101_16/dhd_custom_msm.c | 2 +- .../broadcom/bcmdhd_101_16/dhd_custom_sec.c | 2 +- .../wireless/broadcom/bcmdhd_101_16/dhd_dbg.h | 2 +- .../broadcom/bcmdhd_101_16/dhd_dbg_ring.c | 2 +- .../broadcom/bcmdhd_101_16/dhd_dbg_ring.h | 2 +- .../broadcom/bcmdhd_101_16/dhd_debug.c | 2 +- .../broadcom/bcmdhd_101_16/dhd_debug.h | 2 +- .../broadcom/bcmdhd_101_16/dhd_debug_linux.c | 2 +- .../bcmdhd_101_16/dhd_event_log_filter.c | 2 +- .../bcmdhd_101_16/dhd_event_log_filter.h | 2 +- .../broadcom/bcmdhd_101_16/dhd_flowring.c | 2 +- .../broadcom/bcmdhd_101_16/dhd_flowring.h | 2 +- .../wireless/broadcom/bcmdhd_101_16/dhd_ip.c | 2 +- .../wireless/broadcom/bcmdhd_101_16/dhd_ip.h | 2 +- .../broadcom/bcmdhd_101_16/dhd_linux.c | 11 +- .../broadcom/bcmdhd_101_16/dhd_linux.h | 2 +- .../bcmdhd_101_16/dhd_linux_exportfs.c | 2 +- .../broadcom/bcmdhd_101_16/dhd_linux_lb.c | 2 +- .../bcmdhd_101_16/dhd_linux_pktdump.c | 28 +- .../bcmdhd_101_16/dhd_linux_pktdump.h | 2 +- .../bcmdhd_101_16/dhd_linux_platdev.c | 2 +- .../broadcom/bcmdhd_101_16/dhd_linux_priv.h | 2 +- .../broadcom/bcmdhd_101_16/dhd_linux_sched.c | 2 +- .../bcmdhd_101_16/dhd_linux_sock_qos.h | 2 +- .../broadcom/bcmdhd_101_16/dhd_linux_tx.c | 2 +- .../broadcom/bcmdhd_101_16/dhd_linux_tx.h | 2 +- .../broadcom/bcmdhd_101_16/dhd_linux_wq.c | 2 +- .../broadcom/bcmdhd_101_16/dhd_linux_wq.h | 2 +- .../broadcom/bcmdhd_101_16/dhd_mschdbg.c | 2 +- .../broadcom/bcmdhd_101_16/dhd_mschdbg.h | 2 +- .../broadcom/bcmdhd_101_16/dhd_msgbuf.c | 2 +- .../broadcom/bcmdhd_101_16/dhd_pcie.c | 4 +- .../broadcom/bcmdhd_101_16/dhd_pcie.h | 2 +- .../broadcom/bcmdhd_101_16/dhd_pcie_linux.c | 2 +- .../broadcom/bcmdhd_101_16/dhd_pktlog.c | 2 +- .../broadcom/bcmdhd_101_16/dhd_pktlog.h | 2 +- .../wireless/broadcom/bcmdhd_101_16/dhd_pno.c | 12 +- .../wireless/broadcom/bcmdhd_101_16/dhd_pno.h | 2 +- .../broadcom/bcmdhd_101_16/dhd_proto.h | 2 +- .../broadcom/bcmdhd_101_16/dhd_qos_algo.h | 2 +- .../wireless/broadcom/bcmdhd_101_16/dhd_rtt.c | 2 +- .../wireless/broadcom/bcmdhd_101_16/dhd_rtt.h | 2 +- .../broadcom/bcmdhd_101_16/dhd_sdio.c | 2 +- .../broadcom/bcmdhd_101_16/dhd_sec_feature.h | 2 +- .../broadcom/bcmdhd_101_16/dhd_statlog.c | 2 +- .../broadcom/bcmdhd_101_16/dhd_statlog.h | 2 +- .../broadcom/bcmdhd_101_16/dhd_wlfc.c | 2 +- .../broadcom/bcmdhd_101_16/dhd_wlfc.h | 2 +- .../wireless/broadcom/bcmdhd_101_16/frag.c | 2 +- .../wireless/broadcom/bcmdhd_101_16/frag.h | 2 +- .../broadcom/bcmdhd_101_16/hnd_pktpool.c | 2 +- .../broadcom/bcmdhd_101_16/hnd_pktq.c | 2 +- .../wireless/broadcom/bcmdhd_101_16/hndpmu.c | 2 +- .../broadcom/bcmdhd_101_16/include/802.11.h | 2 +- .../broadcom/bcmdhd_101_16/include/802.11ax.h | 2 +- .../broadcom/bcmdhd_101_16/include/802.11s.h | 2 +- .../broadcom/bcmdhd_101_16/include/802.1d.h | 2 +- .../broadcom/bcmdhd_101_16/include/802.3.h | 2 +- .../broadcom/bcmdhd_101_16/include/aidmp.h | 2 +- .../bcmdhd_101_16/include/bcm_l2_filter.h | 2 +- .../bcmdhd_101_16/include/bcm_mpool_pub.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcm_ring.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcmarp.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcmbloom.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcmcdc.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcmdefs.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcmdevs.h | 2 +- .../bcmdhd_101_16/include/bcmdevs_legacy.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcmdhcp.h | 2 +- .../bcmdhd_101_16/include/bcmendian.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcmerror.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcmeth.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcmevent.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcmicmp.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcmiov.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcmip.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcmipv6.h | 2 +- .../bcmdhd_101_16/include/bcmmsgbuf.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcmpcie.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcmproto.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcmrand.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcmsdbus.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcmsdh.h | 2 +- .../bcmdhd_101_16/include/bcmsdh_sdmmc.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcmsdpcm.h | 2 +- .../bcmdhd_101_16/include/bcmstdlib_s.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcmtcp.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcmtlv.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcmudp.h | 2 +- .../broadcom/bcmdhd_101_16/include/bcmutils.h | 2 +- .../bcmdhd_101_16/include/bcmwifi_channels.h | 2 +- .../bcmdhd_101_16/include/bcmwifi_rates.h | 2 +- .../bcmdhd_101_16/include/bcmwifi_rspec.h | 2 +- .../bcmdhd_101_16/include/brcm_nl80211.h | 2 +- .../bcmdhd_101_16/include/dhd_daemon.h | 2 +- .../broadcom/bcmdhd_101_16/include/dhdioctl.h | 2 +- .../bcmdhd_101_16/include/dngl_stats.h | 2 +- .../bcmdhd_101_16/include/dnglevent.h | 2 +- .../bcmdhd_101_16/include/dnglioctl.h | 2 +- .../broadcom/bcmdhd_101_16/include/eap.h | 2 +- .../broadcom/bcmdhd_101_16/include/eapol.h | 2 +- .../broadcom/bcmdhd_101_16/include/epivers.h | 16 +- .../broadcom/bcmdhd_101_16/include/etd.h | 2 +- .../broadcom/bcmdhd_101_16/include/ethernet.h | 2 +- .../bcmdhd_101_16/include/event_log.h | 2 +- .../bcmdhd_101_16/include/event_log_payload.h | 2 +- .../bcmdhd_101_16/include/event_log_set.h | 2 +- .../bcmdhd_101_16/include/event_log_tag.h | 2 +- .../bcmdhd_101_16/include/event_trace.h | 2 +- .../broadcom/bcmdhd_101_16/include/fils.h | 2 +- .../bcmdhd_101_16/include/hnd_armtrap.h | 2 +- .../broadcom/bcmdhd_101_16/include/hnd_cons.h | 2 +- .../bcmdhd_101_16/include/hnd_debug.h | 2 +- .../bcmdhd_101_16/include/hnd_pktpool.h | 2 +- .../broadcom/bcmdhd_101_16/include/hnd_pktq.h | 2 +- .../broadcom/bcmdhd_101_16/include/hnd_trap.h | 2 +- .../broadcom/bcmdhd_101_16/include/hndchipc.h | 2 +- .../broadcom/bcmdhd_101_16/include/hndlhl.h | 2 +- .../broadcom/bcmdhd_101_16/include/hndoobr.h | 2 +- .../broadcom/bcmdhd_101_16/include/hndpmu.h | 2 +- .../broadcom/bcmdhd_101_16/include/hndsoc.h | 2 +- .../bcmdhd_101_16/include/linux_osl.h | 2 +- .../bcmdhd_101_16/include/linux_pkt.h | 2 +- .../broadcom/bcmdhd_101_16/include/linuxver.h | 2 +- .../broadcom/bcmdhd_101_16/include/lpflags.h | 2 +- .../broadcom/bcmdhd_101_16/include/mbo.h | 2 +- .../broadcom/bcmdhd_101_16/include/msgtrace.h | 2 +- .../broadcom/bcmdhd_101_16/include/nan.h | 2 +- .../broadcom/bcmdhd_101_16/include/osl.h | 2 +- .../broadcom/bcmdhd_101_16/include/osl_decl.h | 2 +- .../broadcom/bcmdhd_101_16/include/osl_ext.h | 2 +- .../broadcom/bcmdhd_101_16/include/p2p.h | 2 +- .../include/packed_section_end.h | 2 +- .../include/packed_section_start.h | 2 +- .../broadcom/bcmdhd_101_16/include/pcicfg.h | 2 +- .../bcmdhd_101_16/include/pcie_core.h | 2 +- .../broadcom/bcmdhd_101_16/include/sbchipc.h | 2 +- .../broadcom/bcmdhd_101_16/include/sbconfig.h | 2 +- .../broadcom/bcmdhd_101_16/include/sbgci.h | 2 +- .../broadcom/bcmdhd_101_16/include/sbhndarm.h | 2 +- .../broadcom/bcmdhd_101_16/include/sbhnddma.h | 2 +- .../broadcom/bcmdhd_101_16/include/sbpcmcia.h | 2 +- .../broadcom/bcmdhd_101_16/include/sbsdio.h | 2 +- .../bcmdhd_101_16/include/sbsdpcmdev.h | 2 +- .../broadcom/bcmdhd_101_16/include/sbsocram.h | 2 +- .../broadcom/bcmdhd_101_16/include/sbsysmem.h | 2 +- .../broadcom/bcmdhd_101_16/include/sdio.h | 2 +- .../broadcom/bcmdhd_101_16/include/sdioh.h | 2 +- .../broadcom/bcmdhd_101_16/include/sdiovar.h | 2 +- .../broadcom/bcmdhd_101_16/include/siutils.h | 2 +- .../broadcom/bcmdhd_101_16/include/typedefs.h | 2 +- .../broadcom/bcmdhd_101_16/include/vlan.h | 2 +- .../broadcom/bcmdhd_101_16/include/wl_bam.h | 2 +- .../bcmdhd_101_16/include/wl_bigdata.h | 2 +- .../bcmdhd_101_16/include/wldev_common.h | 2 +- .../bcmdhd_101_16/include/wlfc_proto.h | 2 +- .../broadcom/bcmdhd_101_16/include/wlioctl.h | 2 +- .../bcmdhd_101_16/include/wlioctl_defs.h | 2 +- .../bcmdhd_101_16/include/wlioctl_utils.h | 2 +- .../broadcom/bcmdhd_101_16/include/wpa.h | 2 +- .../broadcom/bcmdhd_101_16/linux_osl.c | 2 +- .../broadcom/bcmdhd_101_16/linux_osl_priv.h | 2 +- .../broadcom/bcmdhd_101_16/linux_pkt.c | 2 +- .../broadcom/bcmdhd_101_16/pcie_core.c | 2 +- .../wireless/broadcom/bcmdhd_101_16/sbutils.c | 2 +- .../wireless/broadcom/bcmdhd_101_16/siutils.c | 2 +- .../broadcom/bcmdhd_101_16/siutils_priv.h | 2 +- .../bcmdhd_101_16/wb_regon_coordinator.c | 2 +- .../broadcom/bcmdhd_101_16/wifi_stats.h | 2 +- .../broadcom/bcmdhd_101_16/wl_android.c | 2 +- .../broadcom/bcmdhd_101_16/wl_android.h | 2 +- .../wireless/broadcom/bcmdhd_101_16/wl_bam.c | 13 +- .../broadcom/bcmdhd_101_16/wl_bigdata.c | 14 +- .../broadcom/bcmdhd_101_16/wl_cfg80211.c | 4 +- .../broadcom/bcmdhd_101_16/wl_cfg80211.h | 2 +- .../broadcom/bcmdhd_101_16/wl_cfg_btcoex.c | 2 +- .../broadcom/bcmdhd_101_16/wl_cfgdbg.c | 2 +- .../broadcom/bcmdhd_101_16/wl_cfgnan.c | 2 +- .../broadcom/bcmdhd_101_16/wl_cfgnan.h | 9 +- .../broadcom/bcmdhd_101_16/wl_cfgp2p.c | 2 +- .../broadcom/bcmdhd_101_16/wl_cfgp2p.h | 2 +- .../broadcom/bcmdhd_101_16/wl_cfgscan.c | 20 +- .../broadcom/bcmdhd_101_16/wl_cfgscan.h | 2 +- .../broadcom/bcmdhd_101_16/wl_cfgvendor.c | 2 +- .../broadcom/bcmdhd_101_16/wl_cfgvendor.h | 2 +- .../broadcom/bcmdhd_101_16/wl_cfgvif.c | 2 +- .../broadcom/bcmdhd_101_16/wl_cfgvif.h | 2 +- .../broadcom/bcmdhd_101_16/wl_linux_mon.c | 2 +- .../wireless/broadcom/bcmdhd_101_16/wl_roam.c | 2 +- .../broadcom/bcmdhd_101_16/wlc_types.h | 2 +- .../broadcom/bcmdhd_101_16/wldev_common.c | 2 +- drivers/sensorhub/brcm/ssp.h | 2 +- drivers/sensorhub/brcm/ssp_iio.c | 8 +- drivers/sensorhub/brcm/sx9330.c | 246 ++++- drivers/uh/rkp_test.c | 2 +- drivers/usb/gadget/function/f_mtp_samsung.c | 6 +- drivers/usb/gadget/function/rndis.c | 1 + drivers/usb/notify/usb_notify.c | 11 +- fs/crypto/fscrypt_ddar.c | 5 + fs/crypto/keyinfo.c | 24 + fs/crypto/policy.c | 44 +- fs/crypto/sdp/sdp_dek.c | 28 + fs/f2fs/checkpoint.c | 2 +- fs/f2fs/data.c | 5 +- fs/f2fs/debug.c | 11 +- fs/f2fs/f2fs.h | 10 +- fs/f2fs/node.c | 29 +- fs/f2fs/node.h | 4 +- fs/f2fs/segment.c | 3 + fs/f2fs/shrinker.c | 4 +- fs/proc/base.c | 2 +- fs/quota/quota_tree.c | 8 +- fs/quota/quota_v2.c | 19 + include/linux/arm-smccc.h | 7 + include/linux/cpu.h | 24 + include/linux/defex.h | 2 +- kernel/cpu.c | 15 + security/samsung/defex_lsm/Makefile | 20 +- security/samsung/defex_lsm/cert/defex_sign.c | 30 +- .../samsung/defex_lsm/core/defex_common.c | 6 +- .../samsung/defex_lsm/core/defex_get_mode.c | 22 + security/samsung/defex_lsm/core/defex_main.c | 130 ++- .../samsung/defex_lsm/core/defex_rules_proc.c | 32 +- .../samsung/defex_lsm/core/defex_tailer.c | 98 ++ .../samsung/defex_lsm/debug/defex_debug.c | 8 +- security/samsung/defex_lsm/defex_rules.c | 9 + .../feature_safeplace/defex_integrity.c | 37 + .../feature_trusted_map/defex_trusted_map.c | 16 + .../defex_lsm/feature_trusted_map/dtm.c | 206 +++++ .../feature_trusted_map/dtm_engine.c | 193 ++++ .../defex_lsm/feature_trusted_map/dtm_log.c | 115 +++ .../defex_lsm/feature_trusted_map/dtm_utils.c | 137 +++ .../feature_trusted_map/include/dtm.h | 84 ++ .../feature_trusted_map/include/dtm_engine.h | 21 + .../feature_trusted_map/include/dtm_log.h | 43 + .../feature_trusted_map/include/dtm_utils.h | 45 + .../defex_lsm/feature_trusted_map/ptree.c | 476 ++++++++++ .../samsung/defex_lsm/include/defex_config.h | 16 +- .../samsung/defex_lsm/include/defex_debug.h | 1 + .../defex_lsm/include/defex_internal.h | 33 + .../samsung/defex_lsm/include/defex_rules.h | 13 +- .../samsung/defex_lsm/include/defex_tailer.h | 57 ++ security/samsung/defex_lsm/include/ptree.h | 171 ++++ security/samsung/defex_lsm/pack_rules.c | 1 + .../test/security_dsms_access_control_test.c | 86 -- .../dsms/test/security_dsms_debug_test.c | 36 - .../dsms/test/security_dsms_init_test.c | 42 - .../dsms/test/security_dsms_kernel_api_test.c | 111 --- .../dsms/test/security_dsms_policy_test.c | 45 - .../dsms/test/security_dsms_rate_limit_test.c | 110 --- .../dsms/test/security_dsms_test_utils.c | 97 -- .../dsms/test/security_dsms_test_utils.h | 24 - security/samsung/five/five_appraise.c | 3 +- security/samsung/five/five_main.c | 24 +- security/samsung/five/five_porting.h | 20 +- security/samsung/five/five_testing.h | 10 +- security/samsung/five/gki/five_appraise.c | 8 +- security/samsung/five/s_os/five_appraise.c | 3 +- security/samsung/mz/Kconfig | 26 + security/samsung/mz/Makefile | 21 + security/samsung/mz/include/linux/mz.h | 31 + security/samsung/mz/mz.c | 441 +++++++++ security/samsung/mz/mz.h | 31 + security/samsung/mz/mz_crypto.c | 68 ++ security/samsung/mz/mz_internal.h | 100 ++ security/samsung/mz/mz_ioctl.c | 222 +++++ security/samsung/mz/mz_log.c | 58 ++ security/samsung/mz/mz_log.h | 18 + security/samsung/mz/mz_mem.c | 147 +++ security/samsung/mz/mz_page.c | 290 ++++++ security/samsung/mz/mz_page.h | 27 + security/samsung/mz/mz_page_v5_10.c | 297 ++++++ security/samsung/mz/test/Makefile | 1 + security/samsung/mz/test/security_mz_test.c | 152 +++ security/samsung/mz_tee_driver/Kconfig | 48 + security/samsung/mz_tee_driver/Makefile | 18 + .../samsung/mz_tee_driver/mz_tee_driver.c | 397 ++++++++ .../samsung/mz_tee_driver/mz_tee_driver_qc.c | 393 ++++++++ .../mz_tee_driver/mz_tee_driver_teegris.c | 495 ++++++++++ .../mz_tee_driver/security_mz_tee_driver.py | 50 + security/samsung/proca/Makefile | 8 +- security/samsung/proca/gaf/Makefile | 7 + security/samsung/proca/gaf/gaf_v6.c | 6 +- security/samsung/proca/proca_certificate.c | 15 +- security/samsung/proca/proca_config.c | 4 + security/samsung/proca/proca_porting.h | 12 +- security/samsung/proca/security_proca.py | 95 ++ virt/kvm/arm/psci.c | 12 + 389 files changed, 9261 insertions(+), 2066 deletions(-) create mode 100644 arch/arm64/include/asm/vectors.h create mode 100644 drivers/misc/tzdev/umem.c create mode 100644 drivers/misc/tzdev/umem.h create mode 100644 security/samsung/defex_lsm/core/defex_tailer.c create mode 100644 security/samsung/defex_lsm/feature_safeplace/defex_integrity.c create mode 100644 security/samsung/defex_lsm/feature_trusted_map/defex_trusted_map.c create mode 100644 security/samsung/defex_lsm/feature_trusted_map/dtm.c create mode 100644 security/samsung/defex_lsm/feature_trusted_map/dtm_engine.c create mode 100644 security/samsung/defex_lsm/feature_trusted_map/dtm_log.c create mode 100644 security/samsung/defex_lsm/feature_trusted_map/dtm_utils.c create mode 100644 security/samsung/defex_lsm/feature_trusted_map/include/dtm.h create mode 100644 security/samsung/defex_lsm/feature_trusted_map/include/dtm_engine.h create mode 100644 security/samsung/defex_lsm/feature_trusted_map/include/dtm_log.h create mode 100644 security/samsung/defex_lsm/feature_trusted_map/include/dtm_utils.h create mode 100644 security/samsung/defex_lsm/feature_trusted_map/ptree.c create mode 100644 security/samsung/defex_lsm/include/defex_tailer.h create mode 100644 security/samsung/defex_lsm/include/ptree.h delete mode 100644 security/samsung/dsms/test/security_dsms_access_control_test.c delete mode 100644 security/samsung/dsms/test/security_dsms_debug_test.c delete mode 100644 security/samsung/dsms/test/security_dsms_init_test.c delete mode 100644 security/samsung/dsms/test/security_dsms_kernel_api_test.c delete mode 100644 security/samsung/dsms/test/security_dsms_policy_test.c delete mode 100644 security/samsung/dsms/test/security_dsms_rate_limit_test.c delete mode 100644 security/samsung/dsms/test/security_dsms_test_utils.c delete mode 100644 security/samsung/dsms/test/security_dsms_test_utils.h create mode 100644 security/samsung/mz/Kconfig create mode 100644 security/samsung/mz/Makefile create mode 100644 security/samsung/mz/include/linux/mz.h create mode 100644 security/samsung/mz/mz.c create mode 100644 security/samsung/mz/mz.h create mode 100644 security/samsung/mz/mz_crypto.c create mode 100644 security/samsung/mz/mz_internal.h create mode 100644 security/samsung/mz/mz_ioctl.c create mode 100644 security/samsung/mz/mz_log.c create mode 100644 security/samsung/mz/mz_log.h create mode 100644 security/samsung/mz/mz_mem.c create mode 100644 security/samsung/mz/mz_page.c create mode 100644 security/samsung/mz/mz_page.h create mode 100644 security/samsung/mz/mz_page_v5_10.c create mode 100644 security/samsung/mz/test/Makefile create mode 100644 security/samsung/mz/test/security_mz_test.c create mode 100644 security/samsung/mz_tee_driver/Kconfig create mode 100644 security/samsung/mz_tee_driver/Makefile create mode 100644 security/samsung/mz_tee_driver/mz_tee_driver.c create mode 100644 security/samsung/mz_tee_driver/mz_tee_driver_qc.c create mode 100644 security/samsung/mz_tee_driver/mz_tee_driver_teegris.c create mode 100755 security/samsung/mz_tee_driver/security_mz_tee_driver.py create mode 100755 security/samsung/proca/security_proca.py diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index b60232639984..dbd9615b428c 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #define __KVM_HAVE_ARCH_INTC_INITIALIZED @@ -324,4 +325,9 @@ static inline int kvm_arm_have_ssbd(void) return KVM_SSBD_UNKNOWN; } +static inline int kvm_arm_get_spectre_bhb_state(void) +{ + /* 32bit guests don't need firmware for this */ + return SPECTRE_VULNERABLE; /* aka SMCCC_RET_NOT_SUPPORTED */ +} #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 617b1760ee2e..84166a9b7c05 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -50,6 +50,7 @@ config ARM64 select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS_BROADCAST select GENERIC_CPU_AUTOPROBE + select GENERIC_CPU_VULNERABILITIES select GENERIC_EARLY_IOREMAP select GENERIC_IDLE_POLL_SETUP select GENERIC_IRQ_PROBE @@ -458,6 +459,20 @@ config ARM64_ERRATUM_1024718 If unsure, say Y. +config ARM64_ERRATUM_1188873 + bool "Cortex-A76: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result" + default y + depends on COMPAT + select ARM_ARCH_TIMER_OOL_WORKAROUND + help + This option adds work arounds for ARM Cortex-A76 erratum 1188873 + + Affected Cortex-A76 cores (r0p0, r1p0, r2p0) could cause + register corruption when accessing the timer registers from + AArch32 userspace. + + If unsure, say Y. + config CAVIUM_ERRATUM_22375 bool "Cavium erratum 22375, 24313" default y @@ -858,6 +873,16 @@ config ARM64_SSBD If unsure, say Y. +config MITIGATE_SPECTRE_BRANCH_HISTORY + bool "Mitigate Spectre style attacks against branch history" if EXPERT + default y + depends on HARDEN_BRANCH_PREDICTOR || !KVM + help + Speculation attacks against some high-performance processors can + make use of branch history to influence future speculation. + When taking an exception from user-space, a sequence of branches + or a firmware call overwrites the branch history. + menuconfig ARMV8_DEPRECATED bool "Emulate deprecated/obsolete ARMv8 instructions" depends on COMPAT diff --git a/arch/arm64/boot/dts/G973F.mk b/arch/arm64/boot/dts/G973F.mk index 563cebfe0bcb..a2b29a3c7b02 100644 --- a/arch/arm64/boot/dts/G973F.mk +++ b/arch/arm64/boot/dts/G973F.mk @@ -1,14 +1,14 @@ # SPDX-License-Identifier: GPL-2.0 dtb-y += exynos/exynos9820.dtb +dtbo-y += samsung/exynos9820-beyond1lte_eur_open_21.dtbo +dtbo-y += samsung/exynos9820-beyond1lte_eur_open_17.dtbo dtbo-y += samsung/exynos9820-beyond1lte_eur_open_18.dtbo -dtbo-y += samsung/exynos9820-beyond1lte_eur_open_19.dtbo -dtbo-y += samsung/exynos9820-beyond1lte_eur_open_20.dtbo dtbo-y += samsung/exynos9820-beyond1lte_eur_open_26.dtbo -dtbo-y += samsung/exynos9820-beyond1lte_eur_open_21.dtbo dtbo-y += samsung/exynos9820-beyond1lte_eur_open_23.dtbo -dtbo-y += samsung/exynos9820-beyond1lte_eur_open_24.dtbo -dtbo-y += samsung/exynos9820-beyond1lte_eur_open_17.dtbo dtbo-y += samsung/exynos9820-beyond1lte_eur_open_22.dtbo +dtbo-y += samsung/exynos9820-beyond1lte_eur_open_24.dtbo +dtbo-y += samsung/exynos9820-beyond1lte_eur_open_20.dtbo +dtbo-y += samsung/exynos9820-beyond1lte_eur_open_19.dtbo targets += dtbs DTB_LIST := $(dtb-y) $(dtbo-y) diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_17.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_17.dts index cecc5bdcb8bb..2c004d987d04 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_17.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_17.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6524,8 +6525,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x82f>; - sx9330,adcfiltph1_reg = <0x10b115>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_18.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_18.dts index a4bd3f9bb61d..d786bda4ec74 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_18.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_18.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6562,8 +6563,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x82f>; - sx9330,adcfiltph1_reg = <0x10b115>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_19.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_19.dts index 0a3dc1ebed06..b36a76b986ee 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_19.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_19.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6591,8 +6592,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x82f>; - sx9330,adcfiltph1_reg = <0x10b115>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_20.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_20.dts index 1fe6a5474333..96723169c121 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_20.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_20.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6590,8 +6591,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x82f>; - sx9330,adcfiltph1_reg = <0x10b115>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_21.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_21.dts index 1edf0e3a8434..e226b503911e 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_21.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_21.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6590,8 +6591,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x82f>; - sx9330,adcfiltph1_reg = <0x10b115>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_22.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_22.dts index cbc35e4df9dc..0a98be183471 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_22.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_22.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6590,8 +6591,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x82f>; - sx9330,adcfiltph1_reg = <0x10b115>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_23.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_23.dts index 306dd3358adb..f646e6d902ff 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_23.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_23.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6590,8 +6591,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x82f>; - sx9330,adcfiltph1_reg = <0x10b115>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_24.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_24.dts index 3f27da6cb431..7aa7df49a332 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_24.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_24.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6590,8 +6591,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x82f>; - sx9330,adcfiltph1_reg = <0x10b115>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; @@ -7877,7 +7878,7 @@ sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e2f>; sx9330,adcfiltph1_reg = <0x104615>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_26.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_26.dts index e106c7dc5e7c..d4cb3df77841 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_26.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond1lte_eur_open_26.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6589,8 +6590,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x82f>; - sx9330,adcfiltph1_reg = <0x10b115>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; @@ -7806,7 +7807,7 @@ sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e2f>; sx9330,adcfiltph1_reg = <0x104615>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/configs/exynos9820-beyond1lte_defconfig b/arch/arm64/configs/exynos9820-beyond1lte_defconfig index 6b14e99a28be..59abfd915e2e 100644 --- a/arch/arm64/configs/exynos9820-beyond1lte_defconfig +++ b/arch/arm64/configs/exynos9820-beyond1lte_defconfig @@ -513,6 +513,7 @@ CONFIG_ARM64_ERRATUM_832075=y CONFIG_ARM64_ERRATUM_845719=y CONFIG_ARM64_ERRATUM_843419=y CONFIG_ARM64_ERRATUM_1024718=y +CONFIG_ARM64_ERRATUM_1188873=y # CONFIG_CAVIUM_ERRATUM_22375 is not set # CONFIG_CAVIUM_ERRATUM_23154 is not set # CONFIG_CAVIUM_ERRATUM_27456 is not set @@ -614,6 +615,7 @@ CONFIG_FORCE_MAX_ZONEORDER=11 CONFIG_UNMAP_KERNEL_AT_EL0=y CONFIG_HARDEN_BRANCH_PREDICTOR=y CONFIG_ARM64_SSBD=y +CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y CONFIG_ARMV8_DEPRECATED=y CONFIG_SWP_EMULATION=y CONFIG_CP15_BARRIER_EMULATION=y @@ -771,6 +773,7 @@ CONFIG_NET_EGRESS=y CONFIG_PACKET=y # CONFIG_PACKET_DIAG is not set CONFIG_UNIX=y +CONFIG_UNIX_SCM=y # CONFIG_UNIX_DIAG is not set # CONFIG_TLS is not set CONFIG_XFRM=y @@ -1330,6 +1333,7 @@ CONFIG_ALLOW_DEV_COREDUMP=y # CONFIG_SYS_HYPERVISOR is not set # CONFIG_GENERIC_CPU_DEVICES is not set CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y CONFIG_SOC_BUS=y CONFIG_REGMAP=y CONFIG_REGMAP_I2C=y @@ -1563,8 +1567,6 @@ CONFIG_TZDEV_SK_MULTICORE=y CONFIG_ION_FD2PHYS=y CONFIG_TZIRS=y # CONFIG_TZTUI is not set -CONFIG_TZDEV_PAGE_MIGRATION=y -# CONFIG_TZ_TRANSPORT is not set # CONFIG_TZDEV_HOTPLUG is not set CONFIG_TZDEV_BOOST=y CONFIG_TZ_BOOT_LOG=y @@ -2017,7 +2019,7 @@ CONFIG_USB_NET_CDCETHER=y CONFIG_USB_NET_CDC_EEM=y CONFIG_USB_NET_CDC_NCM=y # CONFIG_USB_NET_HUAWEI_CDC_NCM is not set -CONFIG_USB_NET_CDC_MBIM=y +# CONFIG_USB_NET_CDC_MBIM is not set # CONFIG_USB_NET_DM9601 is not set # CONFIG_USB_NET_SR9700 is not set # CONFIG_USB_NET_SR9800 is not set @@ -4075,7 +4077,7 @@ CONFIG_USB_XHCI_PLATFORM=y # CONFIG_USB_ACM=y CONFIG_USB_PRINTER=y -CONFIG_USB_WDM=y +# CONFIG_USB_WDM is not set # CONFIG_USB_TMC is not set # diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 8a2e878e9ec6..024cd921e41d 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -129,6 +129,13 @@ hint #20 .endm +/* + * Clear Branch History instruction + */ + .macro clearbhb + hint #22 + .endm + /* * Sanitise a 64-bit bounded index wrt speculation, returning zero if out * of bounds. @@ -575,4 +582,31 @@ alternative_endif .Ldone\@: .endm + .macro __mitigate_spectre_bhb_loop tmp +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY +alternative_cb spectre_bhb_patch_loop_iter + mov \tmp, #32 // Patched to correct the immediate +alternative_cb_end +.Lspectre_bhb_loop\@: + b . + 4 + subs \tmp, \tmp, #1 + b.ne .Lspectre_bhb_loop\@ + dsb nsh + isb +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ + .endm + + /* Save/restores x0-x3 to the stack */ + .macro __mitigate_spectre_bhb_fw +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY + stp x0, x1, [sp, #-16]! + stp x2, x3, [sp, #-16]! + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3 +alternative_cb arm64_update_smccc_conduit + nop // Patched to SMC/HVC #0 +alternative_cb_end + ldp x2, x3, [sp], #16 + ldp x0, x1, [sp], #16 +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ + .endm #endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index 889226b4c6e1..c7f17e663e72 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -36,6 +36,7 @@ struct cpuinfo_arm64 { u64 reg_id_aa64dfr1; u64 reg_id_aa64isar0; u64 reg_id_aa64isar1; + u64 reg_id_aa64isar2; u64 reg_id_aa64mmfr0; u64 reg_id_aa64mmfr1; u64 reg_id_aa64mmfr2; diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 7d6425d426ac..20ca422eb094 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -42,10 +42,12 @@ #define ARM64_HAS_DCPOP 21 #define ARM64_UNMAP_KERNEL_AT_EL0 23 #define ARM64_HARDEN_BRANCH_PREDICTOR 24 -#define ARM64_HARDEN_BP_POST_GUEST_EXIT 25 -#define ARM64_SSBD 26 -#define ARM64_MISMATCHED_CACHE_TYPE 27 +#define ARM64_SSBD 25 +#define ARM64_MISMATCHED_CACHE_TYPE 26 +#define ARM64_SSBS 27 +#define ARM64_WORKAROUND_1188873 28 +#define ARM64_SPECTRE_BHB 29 -#define ARM64_NCAPS 28 +#define ARM64_NCAPS 30 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index fd2a58d63545..070fccf5f54b 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -10,6 +10,7 @@ #define __ASM_CPUFEATURE_H #include +#include #include #include @@ -87,24 +88,227 @@ struct arm64_ftr_reg { extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; -/* scope of capability check */ -enum { - SCOPE_SYSTEM, - SCOPE_LOCAL_CPU, -}; +/* + * CPU capabilities: + * + * We use arm64_cpu_capabilities to represent system features, errata work + * arounds (both used internally by kernel and tracked in cpu_hwcaps) and + * ELF HWCAPs (which are exposed to user). + * + * To support systems with heterogeneous CPUs, we need to make sure that we + * detect the capabilities correctly on the system and take appropriate + * measures to ensure there are no incompatibilities. + * + * This comment tries to explain how we treat the capabilities. + * Each capability has the following list of attributes : + * + * 1) Scope of Detection : The system detects a given capability by + * performing some checks at runtime. This could be, e.g, checking the + * value of a field in CPU ID feature register or checking the cpu + * model. The capability provides a call back ( @matches() ) to + * perform the check. Scope defines how the checks should be performed. + * There are three cases: + * + * a) SCOPE_LOCAL_CPU: check all the CPUs and "detect" if at least one + * matches. This implies, we have to run the check on all the + * booting CPUs, until the system decides that state of the + * capability is finalised. (See section 2 below) + * Or + * b) SCOPE_SYSTEM: check all the CPUs and "detect" if all the CPUs + * matches. This implies, we run the check only once, when the + * system decides to finalise the state of the capability. If the + * capability relies on a field in one of the CPU ID feature + * registers, we use the sanitised value of the register from the + * CPU feature infrastructure to make the decision. + * Or + * c) SCOPE_BOOT_CPU: Check only on the primary boot CPU to detect the + * feature. This category is for features that are "finalised" + * (or used) by the kernel very early even before the SMP cpus + * are brought up. + * + * The process of detection is usually denoted by "update" capability + * state in the code. + * + * 2) Finalise the state : The kernel should finalise the state of a + * capability at some point during its execution and take necessary + * actions if any. Usually, this is done, after all the boot-time + * enabled CPUs are brought up by the kernel, so that it can make + * better decision based on the available set of CPUs. However, there + * are some special cases, where the action is taken during the early + * boot by the primary boot CPU. (e.g, running the kernel at EL2 with + * Virtualisation Host Extensions). The kernel usually disallows any + * changes to the state of a capability once it finalises the capability + * and takes any action, as it may be impossible to execute the actions + * safely. A CPU brought up after a capability is "finalised" is + * referred to as "Late CPU" w.r.t the capability. e.g, all secondary + * CPUs are treated "late CPUs" for capabilities determined by the boot + * CPU. + * + * At the moment there are two passes of finalising the capabilities. + * a) Boot CPU scope capabilities - Finalised by primary boot CPU via + * setup_boot_cpu_capabilities(). + * b) Everything except (a) - Run via setup_system_capabilities(). + * + * 3) Verification: When a CPU is brought online (e.g, by user or by the + * kernel), the kernel should make sure that it is safe to use the CPU, + * by verifying that the CPU is compliant with the state of the + * capabilities finalised already. This happens via : + * + * secondary_start_kernel()-> check_local_cpu_capabilities() + * + * As explained in (2) above, capabilities could be finalised at + * different points in the execution. Each newly booted CPU is verified + * against the capabilities that have been finalised by the time it + * boots. + * + * a) SCOPE_BOOT_CPU : All CPUs are verified against the capability + * except for the primary boot CPU. + * + * b) SCOPE_LOCAL_CPU, SCOPE_SYSTEM: All CPUs hotplugged on by the + * user after the kernel boot are verified against the capability. + * + * If there is a conflict, the kernel takes an action, based on the + * severity (e.g, a CPU could be prevented from booting or cause a + * kernel panic). The CPU is allowed to "affect" the state of the + * capability, if it has not been finalised already. See section 5 + * for more details on conflicts. + * + * 4) Action: As mentioned in (2), the kernel can take an action for each + * detected capability, on all CPUs on the system. Appropriate actions + * include, turning on an architectural feature, modifying the control + * registers (e.g, SCTLR, TCR etc.) or patching the kernel via + * alternatives. The kernel patching is batched and performed at later + * point. The actions are always initiated only after the capability + * is finalised. This is usally denoted by "enabling" the capability. + * The actions are initiated as follows : + * a) Action is triggered on all online CPUs, after the capability is + * finalised, invoked within the stop_machine() context from + * enable_cpu_capabilitie(). + * + * b) Any late CPU, brought up after (1), the action is triggered via: + * + * check_local_cpu_capabilities() -> verify_local_cpu_capabilities() + * + * 5) Conflicts: Based on the state of the capability on a late CPU vs. + * the system state, we could have the following combinations : + * + * x-----------------------------x + * | Type | System | Late CPU | + * |-----------------------------| + * | a | y | n | + * |-----------------------------| + * | b | n | y | + * x-----------------------------x + * + * Two separate flag bits are defined to indicate whether each kind of + * conflict can be allowed: + * ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU - Case(a) is allowed + * ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU - Case(b) is allowed + * + * Case (a) is not permitted for a capability that the system requires + * all CPUs to have in order for the capability to be enabled. This is + * typical for capabilities that represent enhanced functionality. + * + * Case (b) is not permitted for a capability that must be enabled + * during boot if any CPU in the system requires it in order to run + * safely. This is typical for erratum work arounds that cannot be + * enabled after the corresponding capability is finalised. + * + * In some non-typical cases either both (a) and (b), or neither, + * should be permitted. This can be described by including neither + * or both flags in the capability's type field. + */ + + +/* + * Decide how the capability is detected. + * On any local CPU vs System wide vs the primary boot CPU + */ +#define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0)) +#define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1)) +/* + * The capabilitiy is detected on the Boot CPU and is used by kernel + * during early boot. i.e, the capability should be "detected" and + * "enabled" as early as possibly on all booting CPUs. + */ +#define ARM64_CPUCAP_SCOPE_BOOT_CPU ((u16)BIT(2)) +#define ARM64_CPUCAP_SCOPE_MASK \ + (ARM64_CPUCAP_SCOPE_SYSTEM | \ + ARM64_CPUCAP_SCOPE_LOCAL_CPU | \ + ARM64_CPUCAP_SCOPE_BOOT_CPU) + +#define SCOPE_SYSTEM ARM64_CPUCAP_SCOPE_SYSTEM +#define SCOPE_LOCAL_CPU ARM64_CPUCAP_SCOPE_LOCAL_CPU +#define SCOPE_BOOT_CPU ARM64_CPUCAP_SCOPE_BOOT_CPU +#define SCOPE_ALL ARM64_CPUCAP_SCOPE_MASK + +/* + * Is it permitted for a late CPU to have this capability when system + * hasn't already enabled it ? + */ +#define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4)) +/* Is it safe for a late CPU to miss this capability when system has it */ +#define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5)) + +/* + * CPU errata workarounds that need to be enabled at boot time if one or + * more CPUs in the system requires it. When one of these capabilities + * has been enabled, it is safe to allow any CPU to boot that doesn't + * require the workaround. However, it is not safe if a "late" CPU + * requires a workaround and the system hasn't enabled it already. + */ +#define ARM64_CPUCAP_LOCAL_CPU_ERRATUM \ + (ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU) +/* + * CPU feature detected at boot time based on system-wide value of a + * feature. It is safe for a late CPU to have this feature even though + * the system hasn't enabled it, although the featuer will not be used + * by Linux in this case. If the system has enabled this feature already, + * then every late CPU must have it. + */ +#define ARM64_CPUCAP_SYSTEM_FEATURE \ + (ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU) +/* + * CPU feature detected at boot time based on feature of one or more CPUs. + * All possible conflicts for a late CPU are ignored. + */ +#define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \ + (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \ + ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU | \ + ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU) + +/* + * CPU feature detected at boot time, on one or more CPUs. A late CPU + * is not allowed to have the capability when the system doesn't have it. + * It is Ok for a late CPU to miss the feature. + */ +#define ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE \ + (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \ + ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU) + +/* + * CPU feature used early in the boot based on the boot CPU. All secondary + * CPUs must match the state of the capability as detected by the boot CPU. + */ +#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE ARM64_CPUCAP_SCOPE_BOOT_CPU struct arm64_cpu_capabilities { const char *desc; u16 capability; - int def_scope; /* default scope */ + u16 type; bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope); - int (*enable)(void *); /* Called on all active CPUs */ + /* + * Take the appropriate actions to enable this capability for this CPU. + * For each successfully booted CPU, this method is called for each + * globally detected capability. + */ + void (*cpu_enable)(const struct arm64_cpu_capabilities *cap); union { struct { /* To be used for erratum handling only */ - u32 midr_model; - u32 midr_range_min, midr_range_max; + struct midr_range midr_range; }; + const struct midr_range *midr_range_list; struct { /* Feature register checking */ u32 sys_reg; u8 field_pos; @@ -116,6 +320,23 @@ struct arm64_cpu_capabilities { }; }; +static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap) +{ + return cap->type & ARM64_CPUCAP_SCOPE_MASK; +} + +static inline bool +cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap) +{ + return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU); +} + +static inline bool +cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap) +{ + return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU); +} + extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; extern struct static_key_false arm64_const_caps_ready; @@ -227,15 +448,8 @@ static inline bool id_aa64pfr0_32bit_el0(u64 pfr0) } void __init setup_cpu_features(void); - -void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, - const char *info); -void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps); void check_local_cpu_capabilities(void); -void update_cpu_errata_workarounds(void); -void __init enable_errata_workarounds(void); -void verify_local_cpu_errata_workarounds(void); u64 read_sanitised_ftr_reg(u32 id); @@ -244,6 +458,34 @@ static inline bool cpu_supports_mixed_endian_el0(void) return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1)); } +static inline bool supports_csv2p3(int scope) +{ + u64 pfr0; + u8 csv2_val; + + if (scope == SCOPE_LOCAL_CPU) + pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1); + else + pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); + + csv2_val = cpuid_feature_extract_unsigned_field(pfr0, + ID_AA64PFR0_CSV2_SHIFT); + return csv2_val == 3; +} + +static inline bool supports_clearbhb(int scope) +{ + u64 isar2; + + if (scope == SCOPE_LOCAL_CPU) + isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1); + else + isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1); + + return cpuid_feature_extract_unsigned_field(isar2, + ID_AA64ISAR2_CLEARBHB_SHIFT); +} + static inline bool system_supports_32bit_el0(void) { return cpus_have_const_cap(ARM64_HAS_32BIT_EL0); @@ -281,12 +523,19 @@ static inline int arm64_get_ssbd_state(void) #endif } -#ifdef CONFIG_ARM64_SSBD void arm64_set_ssbd_mitigation(bool state); -#else -static inline void arm64_set_ssbd_mitigation(bool state) {} -#endif +/* Watch out, ordering is important here. */ +enum mitigation_state { + SPECTRE_UNAFFECTED, + SPECTRE_MITIGATED, + SPECTRE_VULNERABLE, +}; + +enum mitigation_state arm64_get_spectre_bhb_state(void); +bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope); +u8 spectre_bhb_loop_affected(int scope); +void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused); #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index d92246e2e5ef..98ee95550977 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -94,8 +94,20 @@ #define ARM_CPU_PART_ANANKE 0xD05 #define ARM_CPU_PART_MONGOOSE 0x001 #define ARM_CPU_PART_MEERKAT 0x002 -#define ARM_CPU_PART_CORTEX_A75 0xD0A #define ARM_CPU_PART_CHEETAH 0x003 +#define ARM_CPU_PART_CORTEX_A75 0xD0A +#define ARM_CPU_PART_CORTEX_A35 0xD04 +#define ARM_CPU_PART_CORTEX_A55 0xD05 +#define ARM_CPU_PART_CORTEX_A76 0xD0B +#define ARM_CPU_PART_NEOVERSE_N1 0xD0C +#define ARM_CPU_PART_CORTEX_A77 0xD0D +#define ARM_CPU_PART_NEOVERSE_V1 0xD40 +#define ARM_CPU_PART_CORTEX_A78 0xD41 +#define ARM_CPU_PART_CORTEX_X1 0xD44 +#define ARM_CPU_PART_CORTEX_A710 0xD47 +#define ARM_CPU_PART_CORTEX_X2 0xD48 +#define ARM_CPU_PART_NEOVERSE_N2 0xD49 +#define ARM_CPU_PART_CORTEX_A78C 0xD4B #define APM_CPU_PART_POTENZA 0x000 @@ -119,6 +131,18 @@ #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73) #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75) +#define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35) +#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55) +#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) +#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1) +#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77) +#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1) +#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78) +#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1) +#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710) +#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2) +#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2) +#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) @@ -140,6 +164,45 @@ #define read_cpuid(reg) read_sysreg_s(SYS_ ## reg) +/* + * Represent a range of MIDR values for a given CPU model and a + * range of variant/revision values. + * + * @model - CPU model as defined by MIDR_CPU_MODEL + * @rv_min - Minimum value for the revision/variant as defined by + * MIDR_CPU_VAR_REV + * @rv_max - Maximum value for the variant/revision for the range. + */ +struct midr_range { + u32 model; + u32 rv_min; + u32 rv_max; +}; + +#define MIDR_RANGE(m, v_min, r_min, v_max, r_max) \ + { \ + .model = m, \ + .rv_min = MIDR_CPU_VAR_REV(v_min, r_min), \ + .rv_max = MIDR_CPU_VAR_REV(v_max, r_max), \ + } + +#define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf) + +static inline bool is_midr_in_range(u32 midr, struct midr_range const *range) +{ + return MIDR_IS_CPU_MODEL_RANGE(midr, range->model, + range->rv_min, range->rv_max); +} + +static inline bool +is_midr_in_range_list(u32 midr, struct midr_range const *ranges) +{ + while (ranges->model) + if (is_midr_in_range(midr, ranges++)) + return true; + return false; +} + /* * The CPU ID never changes at run time, so we might as well tell the * compiler that it's constant. Use this function to read the CPU ID diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index ec1e6d6fa14c..3c962ef081f8 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h @@ -59,9 +59,11 @@ enum fixed_addresses { #endif /* CONFIG_ACPI_APEI_GHES */ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + FIX_ENTRY_TRAMP_TEXT3, + FIX_ENTRY_TRAMP_TEXT2, + FIX_ENTRY_TRAMP_TEXT1, FIX_ENTRY_TRAMP_DATA, - FIX_ENTRY_TRAMP_TEXT, -#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT)) +#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT1)) #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ __end_of_permanent_fixed_addresses, diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 1a6d02350fc6..c59e81b65132 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -70,8 +70,6 @@ extern u32 __kvm_get_mdcr_el2(void); extern u32 __init_stage2_translation(void); -extern void __qcom_hyp_sanitize_btac_predictors(void); - /* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */ #define __hyp_this_cpu_ptr(sym) \ ({ \ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index b01ad3489bd8..b3a54b76c280 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -356,6 +356,8 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); void __kvm_set_tpidr_el2(u64 tpidr_el2); DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state); +void __kvm_enable_ssbs(void); + static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, unsigned long hyp_stack_ptr, unsigned long vector_ptr) @@ -380,6 +382,15 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, - (u64)kvm_ksym_ref(kvm_host_cpu_state); kvm_call_hyp(__kvm_set_tpidr_el2, tpidr_el2); + + /* + * Disabling SSBD on a non-VHE system requires us to enable SSBS + * at EL2. + */ + if (!has_vhe() && this_cpu_has_cap(ARM64_SSBS) && + arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) { + kvm_call_hyp(__kvm_enable_ssbs); + } } static inline void kvm_arch_hardware_unsetup(void) {} @@ -435,4 +446,9 @@ static inline int kvm_arm_have_ssbd(void) } } +static inline enum mitigation_state kvm_arm_get_spectre_bhb_state(void) +{ + return arm64_get_spectre_bhb_state(); +} + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index e42c1f0ae6cf..720fb882bd0e 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -353,7 +353,7 @@ static inline void *kvm_get_hyp_vector(void) struct bp_hardening_data *data = arm64_get_bp_hardening_data(); void *vect = kvm_ksym_ref(__kvm_hyp_vector); - if (data->fn) { + if (data->template_start) { vect = __bp_harden_hyp_vecs_start + data->hyp_vectors_slot * SZ_2K; diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 6dd83d75b82a..5a77dc775cc3 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -35,7 +35,7 @@ typedef struct { */ #define ASID(mm) ((mm)->context.id.counter & 0xffff) -static inline bool arm64_kernel_unmapped_at_el0(void) +static __always_inline bool arm64_kernel_unmapped_at_el0(void) { return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); @@ -46,6 +46,12 @@ typedef void (*bp_hardening_cb_t)(void); struct bp_hardening_data { int hyp_vectors_slot; bp_hardening_cb_t fn; + + /* + * template_start is only used by the BHB mitigation to identify the + * hyp_vectors_slot sequence. + */ + const char *template_start; }; #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 907ccfd0a4ff..976972ca215c 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -37,6 +37,7 @@ #include #include +#include #include #include #include @@ -140,11 +141,25 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) regs->pc = pc; } +static inline void set_ssbs_bit(struct pt_regs *regs) +{ + regs->pstate |= PSR_SSBS_BIT; +} + +static inline void set_compat_ssbs_bit(struct pt_regs *regs) +{ + regs->pstate |= PSR_AA32_SSBS_BIT; +} + static inline void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { start_thread_common(regs, pc); regs->pstate = PSR_MODE_EL0t; + + if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) + set_ssbs_bit(regs); + regs->sp = sp; } @@ -161,6 +176,9 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, regs->pstate |= COMPAT_PSR_E_BIT; #endif + if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) + set_compat_ssbs_bit(regs); + regs->compat_sp = sp; } #endif @@ -215,8 +233,8 @@ static inline void spin_lock_prefetch(const void *ptr) #endif -int cpu_enable_pan(void *__unused); -int cpu_enable_cache_maint_trap(void *__unused); +void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused); +void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused); #endif /* __ASSEMBLY__ */ #endif /* __ASM_PROCESSOR_H */ diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 7a18348b73b0..88d02ef26c42 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -35,7 +35,38 @@ #define COMPAT_PTRACE_GETHBPREGS 29 #define COMPAT_PTRACE_SETHBPREGS 30 -/* AArch32 CPSR bits */ +/* SPSR_ELx bits for exceptions taken from AArch32 */ +#define PSR_AA32_MODE_MASK 0x0000001f +#define PSR_AA32_MODE_USR 0x00000010 +#define PSR_AA32_MODE_FIQ 0x00000011 +#define PSR_AA32_MODE_IRQ 0x00000012 +#define PSR_AA32_MODE_SVC 0x00000013 +#define PSR_AA32_MODE_ABT 0x00000017 +#define PSR_AA32_MODE_HYP 0x0000001a +#define PSR_AA32_MODE_UND 0x0000001b +#define PSR_AA32_MODE_SYS 0x0000001f +#define PSR_AA32_T_BIT 0x00000020 +#define PSR_AA32_F_BIT 0x00000040 +#define PSR_AA32_I_BIT 0x00000080 +#define PSR_AA32_A_BIT 0x00000100 +#define PSR_AA32_E_BIT 0x00000200 +#define PSR_AA32_SSBS_BIT 0x00800000 +#define PSR_AA32_DIT_BIT 0x01000000 +#define PSR_AA32_Q_BIT 0x08000000 +#define PSR_AA32_V_BIT 0x10000000 +#define PSR_AA32_C_BIT 0x20000000 +#define PSR_AA32_Z_BIT 0x40000000 +#define PSR_AA32_N_BIT 0x80000000 +#define PSR_AA32_IT_MASK 0x0600fc00 /* If-Then execution state mask */ +#define PSR_AA32_GE_MASK 0x000f0000 + +#ifdef CONFIG_CPU_BIG_ENDIAN +#define PSR_AA32_ENDSTATE PSR_AA32_E_BIT +#else +#define PSR_AA32_ENDSTATE 0 +#endif + +/* AArch32 CPSR bits, as seen in AArch32 */ #define COMPAT_PSR_MODE_MASK 0x0000001f #define COMPAT_PSR_MODE_USR 0x00000010 #define COMPAT_PSR_MODE_FIQ 0x00000011 @@ -50,6 +81,7 @@ #define COMPAT_PSR_I_BIT 0x00000080 #define COMPAT_PSR_A_BIT 0x00000100 #define COMPAT_PSR_E_BIT 0x00000200 +#define COMPAT_PSR_DIT_BIT 0x00200000 #define COMPAT_PSR_J_BIT 0x01000000 #define COMPAT_PSR_Q_BIT 0x08000000 #define COMPAT_PSR_V_BIT 0x10000000 @@ -111,6 +143,30 @@ #define compat_sp_fiq regs[29] #define compat_lr_fiq regs[30] +static inline unsigned long compat_psr_to_pstate(const unsigned long psr) +{ + unsigned long pstate; + + pstate = psr & ~COMPAT_PSR_DIT_BIT; + + if (psr & COMPAT_PSR_DIT_BIT) + pstate |= PSR_AA32_DIT_BIT; + + return pstate; +} + +static inline unsigned long pstate_to_compat_psr(const unsigned long pstate) +{ + unsigned long psr; + + psr = pstate & ~PSR_AA32_DIT_BIT; + + if (pstate & PSR_AA32_DIT_BIT) + psr |= COMPAT_PSR_DIT_BIT; + + return psr; +} + /* * This struct defines the way the registers are stored on the stack during an * exception. Note that sizeof(struct pt_regs) has to be a multiple of 16 (for diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h index 941267caa39c..8d3f1eab58e0 100644 --- a/arch/arm64/include/asm/sections.h +++ b/arch/arm64/include/asm/sections.h @@ -28,5 +28,11 @@ extern char __initdata_begin[], __initdata_end[]; extern char __inittext_begin[], __inittext_end[]; extern char __irqentry_text_start[], __irqentry_text_end[]; extern char __mmuoff_data_start[], __mmuoff_data_end[]; +extern char __entry_tramp_text_start[], __entry_tramp_text_end[]; + +static inline size_t entry_tramp_text_size(void) +{ + return __entry_tramp_text_end - __entry_tramp_text_start; +} #endif /* __ASM_SECTIONS_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 3bdec2f5cbb4..d04da1abf3ca 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -20,6 +20,7 @@ #ifndef __ASM_SYSREG_H #define __ASM_SYSREG_H +#include #include /* @@ -85,11 +86,14 @@ #define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4) #define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3) +#define REG_PSTATE_SSBS_IMM sys_reg(0, 3, 4, 0, 1) #define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM | \ (!!x)<<8 | 0x1f) #define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM | \ (!!x)<<8 | 0x1f) +#define SET_PSTATE_SSBS(x) __emit_inst(0xd5000000 | REG_PSTATE_SSBS_IMM | \ + (!!x)<<8 | 0x1f) #define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2) #define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2) @@ -151,6 +155,7 @@ #define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0) #define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1) +#define SYS_ID_AA64ISAR2_EL1 sys_reg(3, 0, 0, 6, 2) #define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0) #define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1) @@ -296,28 +301,94 @@ #define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7) /* Common SCTLR_ELx flags. */ +#define SCTLR_ELx_DSSBS (1UL << 44) #define SCTLR_ELx_EE (1 << 25) +#define SCTLR_ELx_WXN (1 << 19) #define SCTLR_ELx_I (1 << 12) #define SCTLR_ELx_SA (1 << 3) #define SCTLR_ELx_C (1 << 2) #define SCTLR_ELx_A (1 << 1) #define SCTLR_ELx_M 1 +#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ + SCTLR_ELx_SA | SCTLR_ELx_I) + +/* SCTLR_EL2 specific flags. */ #define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \ (1 << 18) | (1 << 22) | (1 << 23) | (1 << 28) | \ (1 << 29)) +#define SCTLR_EL2_RES0 ((1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | \ + (1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \ + (1 << 17) | (1 << 20) | (1 << 21) | (1 << 24) | \ + (1 << 26) | (1 << 27) | (1 << 30) | (1 << 31) | \ + (0xffffefffUL << 32)) + +#ifdef CONFIG_CPU_BIG_ENDIAN +#define ENDIAN_SET_EL2 SCTLR_ELx_EE +#define ENDIAN_CLEAR_EL2 0 +#else +#define ENDIAN_SET_EL2 0 +#define ENDIAN_CLEAR_EL2 SCTLR_ELx_EE +#endif -#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ - SCTLR_ELx_SA | SCTLR_ELx_I) +/* SCTLR_EL2 value used for the hyp-stub */ +#define SCTLR_EL2_SET (ENDIAN_SET_EL2 | SCTLR_EL2_RES1) +#define SCTLR_EL2_CLEAR (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ + SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \ + SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0) + +#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff +#error "Inconsistent SCTLR_EL2 set/clear bits" +#endif /* SCTLR_EL1 specific flags. */ #define SCTLR_EL1_UCI (1 << 26) +#define SCTLR_EL1_E0E (1 << 24) #define SCTLR_EL1_SPAN (1 << 23) +#define SCTLR_EL1_NTWE (1 << 18) +#define SCTLR_EL1_NTWI (1 << 16) #define SCTLR_EL1_UCT (1 << 15) +#define SCTLR_EL1_DZE (1 << 14) +#define SCTLR_EL1_UMA (1 << 9) #define SCTLR_EL1_SED (1 << 8) +#define SCTLR_EL1_ITD (1 << 7) #define SCTLR_EL1_CP15BEN (1 << 5) +#define SCTLR_EL1_SA0 (1 << 4) + +#define SCTLR_EL1_RES1 ((1 << 11) | (1 << 20) | (1 << 22) | (1 << 28) | \ + (1 << 29)) +#define SCTLR_EL1_RES0 ((1 << 6) | (1 << 10) | (1 << 13) | (1 << 17) | \ + (1 << 21) | (1 << 27) | (1 << 30) | (1 << 31) | \ + (0xffffefffUL << 32)) + +#ifdef CONFIG_CPU_BIG_ENDIAN +#define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE) +#define ENDIAN_CLEAR_EL1 0 +#else +#define ENDIAN_SET_EL1 0 +#define ENDIAN_CLEAR_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE) +#endif + +#define SCTLR_EL1_SET (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA |\ + SCTLR_EL1_SA0 | SCTLR_EL1_SED | SCTLR_ELx_I |\ + SCTLR_EL1_DZE | SCTLR_EL1_UCT | SCTLR_EL1_NTWI |\ + SCTLR_EL1_NTWE | SCTLR_EL1_SPAN | ENDIAN_SET_EL1 |\ + SCTLR_EL1_UCI | SCTLR_EL1_RES1) +#define SCTLR_EL1_CLEAR (SCTLR_ELx_A | SCTLR_EL1_CP15BEN | SCTLR_EL1_ITD |\ + SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\ + SCTLR_ELx_DSSBS | SCTLR_EL1_RES0) + +#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff +#error "Inconsistent SCTLR_EL1 set/clear bits" +#endif /* id_aa64isar0 */ +#define ID_AA64ISAR0_TS_SHIFT 52 +#define ID_AA64ISAR0_FHM_SHIFT 48 +#define ID_AA64ISAR0_DP_SHIFT 44 +#define ID_AA64ISAR0_SM4_SHIFT 40 +#define ID_AA64ISAR0_SM3_SHIFT 36 +#define ID_AA64ISAR0_SHA3_SHIFT 32 #define ID_AA64ISAR0_RDM_SHIFT 28 #define ID_AA64ISAR0_ATOMICS_SHIFT 20 #define ID_AA64ISAR0_CRC32_SHIFT 16 @@ -331,9 +402,13 @@ #define ID_AA64ISAR1_JSCVT_SHIFT 12 #define ID_AA64ISAR1_DPB_SHIFT 0 +/* id_aa64isar2 */ +#define ID_AA64ISAR2_CLEARBHB_SHIFT 28 + /* id_aa64pfr0 */ #define ID_AA64PFR0_CSV3_SHIFT 60 #define ID_AA64PFR0_CSV2_SHIFT 56 +#define ID_AA64PFR0_DIT_SHIFT 48 #define ID_AA64PFR0_GIC_SHIFT 24 #define ID_AA64PFR0_ASIMD_SHIFT 20 #define ID_AA64PFR0_FP_SHIFT 16 @@ -350,6 +425,13 @@ #define ID_AA64PFR0_EL0_64BIT_ONLY 0x1 #define ID_AA64PFR0_EL0_32BIT_64BIT 0x2 +/* id_aa64pfr1 */ +#define ID_AA64PFR1_SSBS_SHIFT 4 + +#define ID_AA64PFR1_SSBS_PSTATE_NI 0 +#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1 +#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2 + /* id_aa64mmfr0 */ #define ID_AA64MMFR0_TGRAN4_SHIFT 28 #define ID_AA64MMFR0_TGRAN64_SHIFT 24 @@ -368,6 +450,7 @@ #define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1 /* id_aa64mmfr1 */ +#define ID_AA64MMFR1_ECBHB_SHIFT 60 #define ID_AA64MMFR1_PAN_SHIFT 20 #define ID_AA64MMFR1_LOR_SHIFT 16 #define ID_AA64MMFR1_HPD_SHIFT 12 @@ -379,6 +462,7 @@ #define ID_AA64MMFR1_VMIDBITS_16 2 /* id_aa64mmfr2 */ +#define ID_AA64MMFR2_AT_SHIFT 32 #define ID_AA64MMFR2_LVA_SHIFT 16 #define ID_AA64MMFR2_IESB_SHIFT 12 #define ID_AA64MMFR2_LSM_SHIFT 8 @@ -463,6 +547,7 @@ #else +#include #include #define __DEFINE_MRS_MSR_S_REGNUM \ @@ -534,6 +619,17 @@ asm volatile(__msr_s(r, __val)); \ } while (0) +/* + * Modify bits in a sysreg. Bits in the clear mask are zeroed, then bits in the + * set mask are set. Other bits are left as-is. + */ +#define sysreg_clear_set(sysreg, clear, set) do { \ + u64 __scs_val = read_sysreg(sysreg); \ + u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \ + if (__scs_new != __scs_val) \ + write_sysreg(__scs_new, sysreg); \ +} while (0) + static inline void config_sctlr_el1(u32 clear, u32 set) { u32 val; diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h index d131501c6222..45e3da34bdc4 100644 --- a/arch/arm64/include/asm/traps.h +++ b/arch/arm64/include/asm/traps.h @@ -37,6 +37,12 @@ void unregister_undef_hook(struct undef_hook *hook); void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr); +/* + * Move regs->pc to next instruction and do necessary setup before it + * is executed. + */ +void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size); + static inline int __in_irqentry_text(unsigned long ptr) { return ptr >= (unsigned long)&__irqentry_text_start && diff --git a/arch/arm64/include/asm/vectors.h b/arch/arm64/include/asm/vectors.h new file mode 100644 index 000000000000..695583b9a145 --- /dev/null +++ b/arch/arm64/include/asm/vectors.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2022 ARM Ltd. + */ +#ifndef __ASM_VECTORS_H +#define __ASM_VECTORS_H + +#include +#include + +#include +#include + +extern char vectors[]; +extern char tramp_vectors[]; +extern char __bp_harden_el1_vectors[]; + +/* + * Note: the order of this enum corresponds to two arrays in entry.S: + * tramp_vecs and __bp_harden_el1_vectors. By default the canonical + * 'full fat' vectors are used directly. + */ +enum arm64_bp_harden_el1_vectors { +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY + /* + * Perform the BHB loop mitigation, before branching to the canonical + * vectors. + */ + EL1_VECTOR_BHB_LOOP, + + /* + * Make the SMC call for firmware mitigation, before branching to the + * canonical vectors. + */ + EL1_VECTOR_BHB_FW, + + /* + * Use the ClearBHB instruction, before branching to the canonical + * vectors. + */ + EL1_VECTOR_BHB_CLEAR_INSN, +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ + + /* + * Remap the kernel before branching to the canonical vectors. + */ + EL1_VECTOR_KPTI, +}; + +#ifndef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY +#define EL1_VECTOR_BHB_LOOP -1 +#define EL1_VECTOR_BHB_FW -1 +#define EL1_VECTOR_BHB_CLEAR_INSN -1 +#endif /* !CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ + +/* The vectors to use on return from EL0. e.g. to remap the kernel */ +DECLARE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector); + +#ifndef CONFIG_UNMAP_KERNEL_AT_EL0 +#define TRAMP_VALIAS 0 +#endif + +static inline const char * +arm64_get_bp_hardening_vector(enum arm64_bp_harden_el1_vectors slot) +{ + if (arm64_kernel_unmapped_at_el0()) + return (char *)TRAMP_VALIAS + SZ_2K * slot; + + WARN_ON_ONCE(slot == EL1_VECTOR_KPTI); + + return __bp_harden_el1_vectors + SZ_2K * slot; +} + +#endif /* __ASM_VECTORS_H */ diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index c5f89442785c..9d1e24e030b3 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -102,12 +102,6 @@ static inline bool has_vhe(void) return false; } -#ifdef CONFIG_ARM64_VHE -extern void verify_cpu_run_el(void); -#else -static inline void verify_cpu_run_el(void) {} -#endif - #endif /* __ASSEMBLY__ */ #endif /* ! __ASM__VIRT_H */ diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index b3fdeee739ea..2bcd6e4f3474 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -37,5 +37,17 @@ #define HWCAP_FCMA (1 << 14) #define HWCAP_LRCPC (1 << 15) #define HWCAP_DCPOP (1 << 16) +#define HWCAP_SHA3 (1 << 17) +#define HWCAP_SM3 (1 << 18) +#define HWCAP_SM4 (1 << 19) +#define HWCAP_ASIMDDP (1 << 20) +#define HWCAP_SHA512 (1 << 21) +#define HWCAP_SVE (1 << 22) +#define HWCAP_ASIMDFHM (1 << 23) +#define HWCAP_DIT (1 << 24) +#define HWCAP_USCAT (1 << 25) +#define HWCAP_ILRCPC (1 << 26) +#define HWCAP_FLAGM (1 << 27) +#define HWCAP_SSBS (1 << 28) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index 67d4c33974e8..eea58f8ec355 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -45,6 +45,7 @@ #define PSR_I_BIT 0x00000080 #define PSR_A_BIT 0x00000100 #define PSR_D_BIT 0x00000200 +#define PSR_SSBS_BIT 0x00001000 #define PSR_PAN_BIT 0x00400000 #define PSR_UAO_BIT 0x00800000 #define PSR_Q_BIT 0x08000000 diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index d06fbe4cd38d..7d68c442bbe7 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -63,6 +63,7 @@ struct insn_emulation { static LIST_HEAD(insn_emulation); static int nr_insn_emulated __initdata; static DEFINE_RAW_SPINLOCK(insn_emulation_lock); +static DEFINE_MUTEX(insn_emulation_mutex); static void register_emulation_hooks(struct insn_emulation_ops *ops) { @@ -208,10 +209,10 @@ static int emulation_proc_handler(struct ctl_table *table, int write, loff_t *ppos) { int ret = 0; - struct insn_emulation *insn = (struct insn_emulation *) table->data; + struct insn_emulation *insn = container_of(table->data, struct insn_emulation, current_mode); enum insn_emulation_mode prev_mode = insn->current_mode; - table->data = &insn->current_mode; + mutex_lock(&insn_emulation_mutex); ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret || !write || prev_mode == insn->current_mode) @@ -224,7 +225,7 @@ static int emulation_proc_handler(struct ctl_table *table, int write, update_insn_emulation_mode(insn, INSN_UNDEF); } ret: - table->data = insn; + mutex_unlock(&insn_emulation_mutex); return ret; } @@ -254,7 +255,7 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table) sysctl->maxlen = sizeof(int); sysctl->procname = insn->ops->name; - sysctl->data = insn; + sysctl->data = &insn->current_mode; sysctl->extra1 = &insn->min; sysctl->extra2 = &insn->max; sysctl->proc_handler = emulation_proc_handler; @@ -431,7 +432,7 @@ static int swp_handler(struct pt_regs *regs, u32 instr) pr_warn_ratelimited("\"%s\" (%ld) uses obsolete SWP{B} instruction at 0x%llx\n", current->comm, (unsigned long)current->pid, regs->pc); - regs->pc += 4; + arm64_skip_faulting_instruction(regs, 4); return 0; fault: @@ -512,7 +513,7 @@ static int cp15barrier_handler(struct pt_regs *regs, u32 instr) pr_warn_ratelimited("\"%s\" (%ld) uses deprecated CP15 Barrier instruction at 0x%llx\n", current->comm, (unsigned long)current->pid, regs->pc); - regs->pc += 4; + arm64_skip_faulting_instruction(regs, 4); return 0; } @@ -586,14 +587,14 @@ static int compat_setend_handler(struct pt_regs *regs, u32 big_endian) static int a32_setend_handler(struct pt_regs *regs, u32 instr) { int rc = compat_setend_handler(regs, (instr >> 9) & 1); - regs->pc += 4; + arm64_skip_faulting_instruction(regs, 4); return rc; } static int t16_setend_handler(struct pt_regs *regs, u32 instr) { int rc = compat_setend_handler(regs, (instr >> 3) & 1); - regs->pc += 2; + arm64_skip_faulting_instruction(regs, 2); return rc; } diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 3d6d7fae45de..ed627d44746a 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -19,17 +19,27 @@ #include #include #include +#include #include #include #include +#include static bool __maybe_unused is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) +{ + u32 midr = read_cpuid_id(); + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + return is_midr_in_range(midr, &entry->midr_range); +} + +static bool __maybe_unused +is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry, + int scope) { WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); - return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model, - entry->midr_range_min, - entry->midr_range_max); + return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list); } static bool __maybe_unused @@ -43,7 +53,7 @@ is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) | MIDR_ARCHITECTURE_MASK; - return model == entry->midr_model; + return model == entry->midr_range.model; } static bool @@ -61,26 +71,31 @@ has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry, (arm64_ftr_reg_ctrel0.sys_val & mask); } -static int cpu_enable_trap_ctr_access(void *__unused) +static void +cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused) { /* Clear SCTLR_EL1.UCT */ config_sctlr_el1(SCTLR_EL1_UCT, 0); - return 0; } -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR #include #include DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); #ifdef CONFIG_KVM -extern char __qcom_hyp_sanitize_link_stack_start[]; -extern char __qcom_hyp_sanitize_link_stack_end[]; extern char __smccc_workaround_1_smc_start[]; extern char __smccc_workaround_1_smc_end[]; -extern char __smccc_workaround_1_hvc_start[]; -extern char __smccc_workaround_1_hvc_end[]; +extern char __smccc_workaround_3_smc_start[]; +extern char __smccc_workaround_3_smc_end[]; +extern char __spectre_bhb_loop_k8_start[]; +extern char __spectre_bhb_loop_k8_end[]; +extern char __spectre_bhb_loop_k24_start[]; +extern char __spectre_bhb_loop_k24_end[]; +extern char __spectre_bhb_loop_k32_start[]; +extern char __spectre_bhb_loop_k32_end[]; +extern char __spectre_bhb_clearbhb_start[]; +extern char __spectre_bhb_clearbhb_end[]; static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, const char *hyp_vecs_end) @@ -94,12 +109,14 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); } -static void __install_bp_hardening_cb(bp_hardening_cb_t fn, - const char *hyp_vecs_start, - const char *hyp_vecs_end) +static DEFINE_SPINLOCK(bp_lock); +static int last_slot = -1; + +static void install_bp_hardening_cb(bp_hardening_cb_t fn, + const char *hyp_vecs_start, + const char *hyp_vecs_end) { - static int last_slot = -1; - static DEFINE_SPINLOCK(bp_lock); + int cpu, slot = -1; spin_lock(&bp_lock); @@ -120,17 +137,14 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); __this_cpu_write(bp_hardening_data.fn, fn); + __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start); spin_unlock(&bp_lock); } #else -#define __qcom_hyp_sanitize_link_stack_start NULL -#define __qcom_hyp_sanitize_link_stack_end NULL #define __smccc_workaround_1_smc_start NULL #define __smccc_workaround_1_smc_end NULL -#define __smccc_workaround_1_hvc_start NULL -#define __smccc_workaround_1_hvc_end NULL -static void __install_bp_hardening_cb(bp_hardening_cb_t fn, +static void install_bp_hardening_cb(bp_hardening_cb_t fn, const char *hyp_vecs_start, const char *hyp_vecs_end) { @@ -138,23 +152,6 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, } #endif /* CONFIG_KVM */ -static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry, - bp_hardening_cb_t fn, - const char *hyp_vecs_start, - const char *hyp_vecs_end) -{ - u64 pfr0; - - if (!entry->matches(entry, SCOPE_LOCAL_CPU)) - return; - - pfr0 = read_cpuid(ID_AA64PFR0_EL1); - if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT)) - return; - - __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end); -} - #include #include #include @@ -169,77 +166,95 @@ static void call_hvc_arch_workaround_1(void) arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); } -static int enable_smccc_arch_workaround_1(void *data) +static void qcom_link_stack_sanitization(void) +{ + u64 tmp; + + asm volatile("mov %0, x30 \n" + ".rept 16 \n" + "bl . + 4 \n" + ".endr \n" + "mov x30, %0 \n" + : "=&r" (tmp)); +} + +static bool __nospectre_v2; +static int __init parse_nospectre_v2(char *str) +{ + __nospectre_v2 = true; + return 0; +} +early_param("nospectre_v2", parse_nospectre_v2); + +/* + * -1: No workaround + * 0: No workaround required + * 1: Workaround installed + */ +static int detect_harden_bp_fw(void) { - const struct arm64_cpu_capabilities *entry = data; bp_hardening_cb_t cb; void *smccc_start, *smccc_end; struct arm_smccc_res res; - - if (!entry->matches(entry, SCOPE_LOCAL_CPU)) - return 0; + u32 midr = read_cpuid_id(); if (psci_ops.smccc_version == SMCCC_VERSION_1_0) - return 0; + return -1; switch (psci_ops.conduit) { case PSCI_CONDUIT_HVC: arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_ARCH_WORKAROUND_1, &res); - if ((int)res.a0 < 0) + switch ((int)res.a0) { + case 1: + /* Firmware says we're just fine */ return 0; - cb = call_hvc_arch_workaround_1; - smccc_start = __smccc_workaround_1_hvc_start; - smccc_end = __smccc_workaround_1_hvc_end; + case 0: + cb = call_hvc_arch_workaround_1; + /* This is a guest, no need to patch KVM vectors */ + smccc_start = NULL; + smccc_end = NULL; + break; + default: + return -1; + } break; case PSCI_CONDUIT_SMC: arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_ARCH_WORKAROUND_1, &res); - if ((int)res.a0 < 0) + switch ((int)res.a0) { + case 1: + /* Firmware says we're just fine */ return 0; - cb = call_smc_arch_workaround_1; - smccc_start = __smccc_workaround_1_smc_start; - smccc_end = __smccc_workaround_1_smc_end; + case 0: + cb = call_smc_arch_workaround_1; + smccc_start = __smccc_workaround_1_smc_start; + smccc_end = __smccc_workaround_1_smc_end; + break; + default: + return -1; + } break; default: - return 0; + return -1; } - install_bp_hardening_cb(entry, cb, smccc_start, smccc_end); + if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || + ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) + cb = qcom_link_stack_sanitization; - return 0; -} + if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) + install_bp_hardening_cb(cb, smccc_start, smccc_end); -static void qcom_link_stack_sanitization(void) -{ - u64 tmp; - - asm volatile("mov %0, x30 \n" - ".rept 16 \n" - "bl . + 4 \n" - ".endr \n" - "mov x30, %0 \n" - : "=&r" (tmp)); + return 1; } -static int qcom_enable_link_stack_sanitization(void *data) -{ - const struct arm64_cpu_capabilities *entry = data; - - install_bp_hardening_cb(entry, qcom_link_stack_sanitization, - __qcom_hyp_sanitize_link_stack_start, - __qcom_hyp_sanitize_link_stack_end); - - return 0; -} -#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ - -#ifdef CONFIG_ARM64_SSBD DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; +static bool __ssb_safe = true; static const struct ssbd_options { const char *str; @@ -309,6 +324,19 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt, void arm64_set_ssbd_mitigation(bool state) { + if (!IS_ENABLED(CONFIG_ARM64_SSBD)) { + pr_info_once("SSBD disabled by kernel configuration\n"); + return; + } + + if (this_cpu_has_cap(ARM64_SSBS)) { + if (state) + asm volatile(SET_PSTATE_SSBS(0)); + else + asm volatile(SET_PSTATE_SSBS(1)); + return; + } + switch (psci_ops.conduit) { case PSCI_CONDUIT_HVC: arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL); @@ -330,11 +358,28 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, struct arm_smccc_res res; bool required = true; s32 val; + bool this_cpu_safe = false; WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + if (cpu_mitigations_off()) + ssbd_state = ARM64_SSBD_FORCE_DISABLE; + + /* delay setting __ssb_safe until we get a firmware response */ + if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list)) + this_cpu_safe = true; + + if (this_cpu_has_cap(ARM64_SSBS)) { + if (!this_cpu_safe) + __ssb_safe = false; + required = false; + goto out_printmsg; + } + if (psci_ops.smccc_version == SMCCC_VERSION_1_0) { ssbd_state = ARM64_SSBD_UNKNOWN; + if (!this_cpu_safe) + __ssb_safe = false; return false; } @@ -351,6 +396,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, default: ssbd_state = ARM64_SSBD_UNKNOWN; + if (!this_cpu_safe) + __ssb_safe = false; return false; } @@ -359,14 +406,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, switch (val) { case SMCCC_RET_NOT_SUPPORTED: ssbd_state = ARM64_SSBD_UNKNOWN; + if (!this_cpu_safe) + __ssb_safe = false; return false; + /* machines with mixed mitigation requirements must not return this */ case SMCCC_RET_NOT_REQUIRED: pr_info_once("%s mitigation not required\n", entry->desc); ssbd_state = ARM64_SSBD_MITIGATED; return false; case SMCCC_RET_SUCCESS: + __ssb_safe = false; required = true; break; @@ -376,12 +427,13 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, default: WARN_ON(1); + if (!this_cpu_safe) + __ssb_safe = false; return false; } switch (ssbd_state) { case ARM64_SSBD_FORCE_DISABLE: - pr_info_once("%s disabled from command-line\n", entry->desc); arm64_set_ssbd_mitigation(false); required = false; break; @@ -394,7 +446,6 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, break; case ARM64_SSBD_FORCE_ENABLE: - pr_info_once("%s forced from command-line\n", entry->desc); arm64_set_ssbd_mitigation(true); required = true; break; @@ -404,23 +455,126 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, break; } +out_printmsg: + switch (ssbd_state) { + case ARM64_SSBD_FORCE_DISABLE: + pr_info_once("%s disabled from command-line\n", entry->desc); + break; + + case ARM64_SSBD_FORCE_ENABLE: + pr_info_once("%s forced from command-line\n", entry->desc); + break; + } + return required; } -#endif /* CONFIG_ARM64_SSBD */ -#define MIDR_RANGE(model, min, max) \ - .def_scope = SCOPE_LOCAL_CPU, \ - .matches = is_affected_midr_range, \ - .midr_model = model, \ - .midr_range_min = min, \ - .midr_range_max = max +/* known invulnerable cores */ +static const struct midr_range arm64_ssb_cpus[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), + {}, +}; + +#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ + .matches = is_affected_midr_range, \ + .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max) + +#define CAP_MIDR_ALL_VERSIONS(model) \ + .matches = is_affected_midr_range, \ + .midr_range = MIDR_ALL_VERSIONS(model) + +#define MIDR_FIXED(rev, revidr_mask) \ + .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}} + +#define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ + CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) + +#define CAP_MIDR_RANGE_LIST(list) \ + .matches = is_affected_midr_range_list, \ + .midr_range_list = list + +/* Errata affecting a range of revisions of given model variant */ +#define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \ + ERRATA_MIDR_RANGE(m, var, r_min, var, r_max) + +/* Errata affecting a single variant/revision of a model */ +#define ERRATA_MIDR_REV(model, var, rev) \ + ERRATA_MIDR_RANGE(model, var, rev, var, rev) + +/* Errata affecting all variants/revisions of a given a model */ +#define ERRATA_MIDR_ALL_VERSIONS(model) \ + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ + CAP_MIDR_ALL_VERSIONS(model) + +/* Errata affecting a list of midr ranges, with same work around */ +#define ERRATA_MIDR_RANGE_LIST(midr_list) \ + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ + CAP_MIDR_RANGE_LIST(midr_list) + +/* Track overall mitigation state. We are only mitigated if all cores are ok */ +static bool __hardenbp_enab = true; +static bool __spectrev2_safe = true; + +/* + * List of CPUs that do not need any Spectre-v2 mitigation at all. + */ +static const struct midr_range spectre_v2_safe_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), + { /* sentinel */ } +}; + +/* + * Track overall bp hardening for all heterogeneous cores in the machine. + * We are only considered "safe" if all booted cores are known safe. + */ +static bool __maybe_unused +check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) +{ + int need_wa; + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + + /* If the CPU has CSV2 set, we're safe */ + if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1), + ID_AA64PFR0_CSV2_SHIFT)) + return false; + + /* Alternatively, we have a list of unaffected CPUs */ + if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list)) + return false; + + /* Fallback to firmware detection */ + need_wa = detect_harden_bp_fw(); + if (!need_wa) + return false; + + __spectrev2_safe = false; + + if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) { + pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n"); + __hardenbp_enab = false; + return false; + } + + /* forced off */ + if (__nospectre_v2 || cpu_mitigations_off()) { + pr_info_once("spectrev2 mitigation disabled by command line option\n"); + __hardenbp_enab = false; + return false; + } -#define MIDR_ALL_VERSIONS(model) \ - .def_scope = SCOPE_LOCAL_CPU, \ - .matches = is_affected_midr_range, \ - .midr_model = model, \ - .midr_range_min = 0, \ - .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK) + if (need_wa < 0) { + pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n"); + __hardenbp_enab = false; + } + + return (need_wa > 0); +} const struct arm64_cpu_capabilities arm64_errata[] = { #if defined(CONFIG_ARM64_ERRATUM_826319) || \ @@ -430,8 +584,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = { /* Cortex-A53 r0p[012] */ .desc = "ARM errata 826319, 827319, 824069", .capability = ARM64_WORKAROUND_CLEAN_CACHE, - MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02), - .enable = cpu_enable_cache_maint_trap, + ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2), + .cpu_enable = cpu_enable_cache_maint_trap, }, #endif #ifdef CONFIG_ARM64_ERRATUM_819472 @@ -439,8 +593,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = { /* Cortex-A53 r0p[01] */ .desc = "ARM errata 819472", .capability = ARM64_WORKAROUND_CLEAN_CACHE, - MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01), - .enable = cpu_enable_cache_maint_trap, + ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1), + .cpu_enable = cpu_enable_cache_maint_trap, }, #endif #ifdef CONFIG_ARM64_ERRATUM_832075 @@ -448,9 +602,9 @@ const struct arm64_cpu_capabilities arm64_errata[] = { /* Cortex-A57 r0p0 - r1p2 */ .desc = "ARM erratum 832075", .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, - MIDR_RANGE(MIDR_CORTEX_A57, - MIDR_CPU_VAR_REV(0, 0), - MIDR_CPU_VAR_REV(1, 2)), + ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, + 0, 0, + 1, 2), }, #endif #ifdef CONFIG_ARM64_ERRATUM_834220 @@ -458,9 +612,9 @@ const struct arm64_cpu_capabilities arm64_errata[] = { /* Cortex-A57 r0p0 - r1p2 */ .desc = "ARM erratum 834220", .capability = ARM64_WORKAROUND_834220, - MIDR_RANGE(MIDR_CORTEX_A57, - MIDR_CPU_VAR_REV(0, 0), - MIDR_CPU_VAR_REV(1, 2)), + ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, + 0, 0, + 1, 2), }, #endif #ifdef CONFIG_ARM64_ERRATUM_845719 @@ -468,7 +622,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { /* Cortex-A53 r0p[01234] */ .desc = "ARM erratum 845719", .capability = ARM64_WORKAROUND_845719, - MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04), + ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), }, #endif #ifdef CONFIG_CAVIUM_ERRATUM_23154 @@ -476,7 +630,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { /* Cavium ThunderX, pass 1.x */ .desc = "Cavium erratum 23154", .capability = ARM64_WORKAROUND_CAVIUM_23154, - MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01), + ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1), }, #endif #ifdef CONFIG_CAVIUM_ERRATUM_27456 @@ -484,15 +638,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = { /* Cavium ThunderX, T88 pass 1.x - 2.1 */ .desc = "Cavium erratum 27456", .capability = ARM64_WORKAROUND_CAVIUM_27456, - MIDR_RANGE(MIDR_THUNDERX, - MIDR_CPU_VAR_REV(0, 0), - MIDR_CPU_VAR_REV(1, 1)), + ERRATA_MIDR_RANGE(MIDR_THUNDERX, + 0, 0, + 1, 1), }, { /* Cavium ThunderX, T81 pass 1.0 */ .desc = "Cavium erratum 27456", .capability = ARM64_WORKAROUND_CAVIUM_27456, - MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00), + ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0), }, #endif #ifdef CONFIG_CAVIUM_ERRATUM_30115 @@ -500,49 +654,48 @@ const struct arm64_cpu_capabilities arm64_errata[] = { /* Cavium ThunderX, T88 pass 1.x - 2.2 */ .desc = "Cavium erratum 30115", .capability = ARM64_WORKAROUND_CAVIUM_30115, - MIDR_RANGE(MIDR_THUNDERX, 0x00, - (1 << MIDR_VARIANT_SHIFT) | 2), + ERRATA_MIDR_RANGE(MIDR_THUNDERX, + 0, 0, + 1, 2), }, { /* Cavium ThunderX, T81 pass 1.0 - 1.2 */ .desc = "Cavium erratum 30115", .capability = ARM64_WORKAROUND_CAVIUM_30115, - MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x02), + ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2), }, { /* Cavium ThunderX, T83 pass 1.0 */ .desc = "Cavium erratum 30115", .capability = ARM64_WORKAROUND_CAVIUM_30115, - MIDR_RANGE(MIDR_THUNDERX_83XX, 0x00, 0x00), + ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0), }, #endif { .desc = "Mismatched cache line size", .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE, .matches = has_mismatched_cache_type, - .def_scope = SCOPE_LOCAL_CPU, - .enable = cpu_enable_trap_ctr_access, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .cpu_enable = cpu_enable_trap_ctr_access, }, { .desc = "Mismatched cache type", .capability = ARM64_MISMATCHED_CACHE_TYPE, .matches = has_mismatched_cache_type, - .def_scope = SCOPE_LOCAL_CPU, - .enable = cpu_enable_trap_ctr_access, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .cpu_enable = cpu_enable_trap_ctr_access, }, #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 { .desc = "Qualcomm Technologies Falkor erratum 1003", .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, - MIDR_RANGE(MIDR_QCOM_FALKOR_V1, - MIDR_CPU_VAR_REV(0, 0), - MIDR_CPU_VAR_REV(0, 0)), + ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), }, { .desc = "Qualcomm Technologies Kryo erratum 1003", .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, - .def_scope = SCOPE_LOCAL_CPU, - .midr_model = MIDR_QCOM_KRYO, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .midr_range.model = MIDR_QCOM_KRYO, .matches = is_kryo_midr, }, #endif @@ -550,9 +703,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { { .desc = "Qualcomm Technologies Falkor erratum 1009", .capability = ARM64_WORKAROUND_REPEAT_TLBI, - MIDR_RANGE(MIDR_QCOM_FALKOR_V1, - MIDR_CPU_VAR_REV(0, 0), - MIDR_CPU_VAR_REV(0, 0)), + ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), }, #endif #ifdef CONFIG_ARM64_ERRATUM_858921 @@ -560,100 +711,427 @@ const struct arm64_cpu_capabilities arm64_errata[] = { /* Cortex-A73 all versions */ .desc = "ARM erratum 858921", .capability = ARM64_WORKAROUND_858921, - MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), + ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), }, #endif -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR { .capability = ARM64_HARDEN_BRANCH_PREDICTOR, - MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), - .enable = enable_smccc_arch_workaround_1, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = check_branch_predictor, }, { - .capability = ARM64_HARDEN_BRANCH_PREDICTOR, - MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), - .enable = enable_smccc_arch_workaround_1, - }, - { - .capability = ARM64_HARDEN_BRANCH_PREDICTOR, - MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), - .enable = enable_smccc_arch_workaround_1, - }, - { - .capability = ARM64_HARDEN_BRANCH_PREDICTOR, - MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), - .enable = enable_smccc_arch_workaround_1, - }, - { - .capability = ARM64_HARDEN_BRANCH_PREDICTOR, - MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), - .enable = qcom_enable_link_stack_sanitization, - }, - { - .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT, - MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), - }, - { - .capability = ARM64_HARDEN_BRANCH_PREDICTOR, - MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), - .enable = qcom_enable_link_stack_sanitization, - }, - { - .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT, - MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), - }, - { - .capability = ARM64_HARDEN_BRANCH_PREDICTOR, - MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), - .enable = enable_smccc_arch_workaround_1, + .desc = "Speculative Store Bypass Disable", + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .capability = ARM64_SSBD, + .matches = has_ssbd_mitigation, + .midr_range_list = arm64_ssb_cpus, }, +#ifdef CONFIG_ARM64_ERRATUM_1188873 { - .capability = ARM64_HARDEN_BRANCH_PREDICTOR, - MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), - .enable = enable_smccc_arch_workaround_1, + /* Cortex-A76 r0p0 to r2p0 */ + .desc = "ARM erratum 1188873", + .capability = ARM64_WORKAROUND_1188873, + ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), }, #endif -#ifdef CONFIG_ARM64_SSBD { - .desc = "Speculative Store Bypass Disable", - .def_scope = SCOPE_LOCAL_CPU, - .capability = ARM64_SSBD, - .matches = has_ssbd_mitigation, + .desc = "Spectre-BHB", + .capability = ARM64_SPECTRE_BHB, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = is_spectre_bhb_affected, + .cpu_enable = spectre_bhb_enable_mitigation, }, -#endif { } }; +ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "Mitigation: __user pointer sanitization\n"); +} + +static const char *get_bhb_affected_string(enum mitigation_state bhb_state) +{ + switch (bhb_state) { + case SPECTRE_UNAFFECTED: + return ""; + default: + case SPECTRE_VULNERABLE: + return ", but not BHB"; + case SPECTRE_MITIGATED: + return ", BHB"; + } +} + +ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, + char *buf) +{ + enum mitigation_state bhb_state = arm64_get_spectre_bhb_state(); + const char *bhb_str = get_bhb_affected_string(bhb_state); + const char *v2_str = "Branch predictor hardening"; + + if (__spectrev2_safe) { + if (bhb_state == SPECTRE_UNAFFECTED) + return sprintf(buf, "Not affected\n"); + + /* + * Platforms affected by Spectre-BHB can't report + * "Not affected" for Spectre-v2. + */ + v2_str = "CSV2"; + } + + if (__hardenbp_enab) + return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str); + + return sprintf(buf, "Vulnerable\n"); +} + +ssize_t cpu_show_spec_store_bypass(struct device *dev, + struct device_attribute *attr, char *buf) +{ + if (__ssb_safe) + return sprintf(buf, "Not affected\n"); + + switch (ssbd_state) { + case ARM64_SSBD_KERNEL: + case ARM64_SSBD_FORCE_ENABLE: + if (IS_ENABLED(CONFIG_ARM64_SSBD)) + return sprintf(buf, + "Mitigation: Speculative Store Bypass disabled via prctl\n"); + } + + return sprintf(buf, "Vulnerable\n"); +} + +/* + * We try to ensure that the mitigation state can never change as the result of + * onlining a late CPU. + */ +static void update_mitigation_state(enum mitigation_state *oldp, + enum mitigation_state new) +{ + enum mitigation_state state; + + do { + state = READ_ONCE(*oldp); + if (new <= state) + break; + } while (cmpxchg_relaxed(oldp, state, new) != state); +} + +/* + * Spectre BHB. + * + * A CPU is either: + * - Mitigated by a branchy loop a CPU specific number of times, and listed + * in our "loop mitigated list". + * - Mitigated in software by the firmware Spectre v2 call. + * - Has the ClearBHB instruction to perform the mitigation. + * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no + * software mitigation in the vectors is needed. + * - Has CSV2.3, so is unaffected. + */ +static enum mitigation_state spectre_bhb_state; + +enum mitigation_state arm64_get_spectre_bhb_state(void) +{ + return spectre_bhb_state; +} + /* - * The CPU Errata work arounds are detected and applied at boot time - * and the related information is freed soon after. If the new CPU requires - * an errata not detected at boot, fail this CPU. + * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any + * SCOPE_SYSTEM call will give the right answer. */ -void verify_local_cpu_errata_workarounds(void) -{ - const struct arm64_cpu_capabilities *caps = arm64_errata; - - for (; caps->matches; caps++) { - if (cpus_have_cap(caps->capability)) { - if (caps->enable) - caps->enable((void *)caps); - } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) { - pr_crit("CPU%d: Requires work around for %s, not detected" - " at boot time\n", - smp_processor_id(), - caps->desc ? : "an erratum"); - cpu_die_early(); +u8 spectre_bhb_loop_affected(int scope) +{ + u8 k = 0; + static u8 max_bhb_k; + + if (scope == SCOPE_LOCAL_CPU) { + static const struct midr_range spectre_bhb_k32_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X2), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), + {}, + }; + static const struct midr_range spectre_bhb_k24_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A77), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A76), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), + {}, + }; + static const struct midr_range spectre_bhb_k8_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), + {}, + }; + + if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list)) + k = 32; + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list)) + k = 24; + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list)) + k = 8; + + max_bhb_k = max(max_bhb_k, k); + } else { + k = max_bhb_k; + } + + return k; +} + +static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void) +{ + int ret; + struct arm_smccc_res res; + + if (psci_ops.smccc_version == SMCCC_VERSION_1_0) + return SPECTRE_VULNERABLE; + + switch (psci_ops.conduit) { + case PSCI_CONDUIT_HVC: + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_3, &res); + break; + + case PSCI_CONDUIT_SMC: + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_3, &res); + break; + + default: + return SPECTRE_VULNERABLE; + } + + ret = res.a0; + switch (ret) { + case SMCCC_RET_SUCCESS: + return SPECTRE_MITIGATED; + case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED: + return SPECTRE_UNAFFECTED; + default: + case SMCCC_RET_NOT_SUPPORTED: + return SPECTRE_VULNERABLE; + } +} + +static bool is_spectre_bhb_fw_affected(int scope) +{ + static bool system_affected; + enum mitigation_state fw_state; + bool has_smccc = (psci_ops.smccc_version >= SMCCC_VERSION_1_1); + static const struct midr_range spectre_bhb_firmware_mitigated_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), + {}, + }; + bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(), + spectre_bhb_firmware_mitigated_list); + + if (scope != SCOPE_LOCAL_CPU) + return system_affected; + + fw_state = spectre_bhb_get_cpu_fw_mitigation_state(); + if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) { + system_affected = true; + return true; + } + + return false; +} + +static bool supports_ecbhb(int scope) +{ + u64 mmfr1; + + if (scope == SCOPE_LOCAL_CPU) + mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1); + else + mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); + + return cpuid_feature_extract_unsigned_field(mmfr1, + ID_AA64MMFR1_ECBHB_SHIFT); +} + +bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, + int scope) +{ + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + + if (supports_csv2p3(scope)) + return false; + + if (supports_clearbhb(scope)) + return true; + + if (spectre_bhb_loop_affected(scope)) + return true; + + if (is_spectre_bhb_fw_affected(scope)) + return true; + + return false; +} + +static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot) +{ + const char *v = arm64_get_bp_hardening_vector(slot); + + if (slot < 0) + return; + + __this_cpu_write(this_cpu_vector, v); + + /* + * When KPTI is in use, the vectors are switched when exiting to + * user-space. + */ + if (arm64_kernel_unmapped_at_el0()) + return; + + write_sysreg(v, vbar_el1); + isb(); +} + +#ifdef CONFIG_KVM +static const char *kvm_bhb_get_vecs_end(const char *start) +{ + if (start == __smccc_workaround_3_smc_start) + return __smccc_workaround_3_smc_end; + else if (start == __spectre_bhb_loop_k8_start) + return __spectre_bhb_loop_k8_end; + else if (start == __spectre_bhb_loop_k24_start) + return __spectre_bhb_loop_k24_end; + else if (start == __spectre_bhb_loop_k32_start) + return __spectre_bhb_loop_k32_end; + else if (start == __spectre_bhb_clearbhb_start) + return __spectre_bhb_clearbhb_end; + + return NULL; +} + +static void kvm_setup_bhb_slot(const char *hyp_vecs_start) +{ + int cpu, slot = -1; + const char *hyp_vecs_end; + + if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available()) + return; + + hyp_vecs_end = kvm_bhb_get_vecs_end(hyp_vecs_start); + if (WARN_ON_ONCE(!hyp_vecs_start || !hyp_vecs_end)) + return; + + spin_lock(&bp_lock); + for_each_possible_cpu(cpu) { + if (per_cpu(bp_hardening_data.template_start, cpu) == hyp_vecs_start) { + slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu); + break; } } + + if (slot == -1) { + last_slot++; + BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start) + / SZ_2K) <= last_slot); + slot = last_slot; + __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end); + } + + __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); + __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start); + spin_unlock(&bp_lock); } +#else +#define __smccc_workaround_3_smc_start NULL +#define __spectre_bhb_loop_k8_start NULL +#define __spectre_bhb_loop_k24_start NULL +#define __spectre_bhb_loop_k32_start NULL +#define __spectre_bhb_clearbhb_start NULL + +static void kvm_setup_bhb_slot(const char *hyp_vecs_start) { }; +#endif -void update_cpu_errata_workarounds(void) +void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry) { - update_cpu_capabilities(arm64_errata, "enabling workaround for"); + enum mitigation_state fw_state, state = SPECTRE_VULNERABLE; + + if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU)) + return; + + if (!__spectrev2_safe && !__hardenbp_enab) { + /* No point mitigating Spectre-BHB alone. */ + } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) { + pr_info_once("spectre-bhb mitigation disabled by compile time option\n"); + } else if (cpu_mitigations_off()) { + pr_info_once("spectre-bhb mitigation disabled by command line option\n"); + } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) { + state = SPECTRE_MITIGATED; + } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) { + kvm_setup_bhb_slot(__spectre_bhb_clearbhb_start); + this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN); + + state = SPECTRE_MITIGATED; + } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) { + switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) { + case 8: + kvm_setup_bhb_slot(__spectre_bhb_loop_k8_start); + break; + case 24: + kvm_setup_bhb_slot(__spectre_bhb_loop_k24_start); + break; + case 32: + kvm_setup_bhb_slot(__spectre_bhb_loop_k32_start); + break; + default: + WARN_ON_ONCE(1); + } + this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP); + + state = SPECTRE_MITIGATED; + } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) { + fw_state = spectre_bhb_get_cpu_fw_mitigation_state(); + if (fw_state == SPECTRE_MITIGATED) { + kvm_setup_bhb_slot(__smccc_workaround_3_smc_start); + this_cpu_set_vectors(EL1_VECTOR_BHB_FW); + + /* + * With WA3 in the vectors, the WA1 calls can be + * removed. + */ + __this_cpu_write(bp_hardening_data.fn, NULL); + + state = SPECTRE_MITIGATED; + } + } + + update_mitigation_state(&spectre_bhb_state, state); } -void __init enable_errata_workarounds(void) +/* Patched to correct the immediate */ +void __init spectre_bhb_patch_loop_iter(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst) { - enable_cpu_capabilities(arm64_errata); + u8 rd; + u32 insn; + u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM); + + BUG_ON(nr_inst != 1); /* MOV -> MOV */ + + if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) + return; + + insn = le32_to_cpu(*origptr); + rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn); + insn = aarch64_insn_gen_movewide(rd, loop_count, 0, + AARCH64_INSN_VARIANT_64BIT, + AARCH64_INSN_MOVEWIDE_ZERO); + *updptr++ = cpu_to_le32(insn); } diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 623f11ff8d79..e4fc648e3627 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -20,10 +20,13 @@ #include #include +#include #include #include #include #include +#include + #include #include #include @@ -31,6 +34,7 @@ #include #include #include +#include #include unsigned long elf_hwcap __read_mostly; @@ -51,6 +55,8 @@ unsigned int compat_elf_hwcap2 __read_mostly; DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); EXPORT_SYMBOL(cpu_hwcaps); +DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors; + static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p) { /* file-wide pr_fmt adds "CPU features: " prefix */ @@ -107,8 +113,14 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused) * sync with the documentation of the CPU feature register ABI. */ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { - ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0), @@ -117,36 +129,47 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { }; static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { - ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_FCMA_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, ID_AA64ISAR1_DPB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_aa64isar2[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0), ARM64_FTR_END, }; static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0), - S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), - S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0), + S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), + S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), /* Linux doesn't care about the EL3 */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL2_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI), ARM64_FTR_END, }; static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI), - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0), + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI), + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0), /* Linux shouldn't care about secure memory */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_ASID_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0), /* * Differing PARange is fine as long as all peripherals and memory are mapped * within the minimum PARange of all CPUs @@ -156,27 +179,22 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { }; static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT_WITH(CONFIG_ARM64_PAN), - FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR1_LOR_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR1_HPD_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT_WITH(CONFIG_ARM64_VHE), - FTR_EXACT, ID_AA64MMFR1_VHE_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT_WITH(CONFIG_KVM), - FTR_EXACT, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR1_HADBS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0), ARM64_FTR_END, }; static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LVA_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT_WITH(CONFIG_ARM64_RAS_EXTN), - FTR_EXACT, ID_AA64MMFR2_IESB_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LSM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT_WITH(CONFIG_ARM64_UAO), - FTR_EXACT, ID_AA64MMFR2_UAO_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT_WITH(CONFIG_ARM64_CNP), - FTR_EXACT, ID_AA64MMFR2_CNP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -203,14 +221,14 @@ struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = { }; static const struct arm64_ftr_bits ftr_id_mmfr0[] = { - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 28, 4, 0xf), /* InnerShr */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 24, 4, 0), /* FCSE */ + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf), /* InnerShr */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), /* FCSE */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 16, 4, 0), /* TCM */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 12, 4, 0), /* ShareLvl */ - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 8, 4, 0xf), /* OuterShr */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* PMSA */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 4, 0), /* VMSA */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), /* TCM */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* ShareLvl */ + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf), /* OuterShr */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* PMSA */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* VMSA */ ARM64_FTR_END, }; @@ -231,8 +249,8 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = { }; static const struct arm64_ftr_bits ftr_mvfr2[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* FPMisc */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 4, 0), /* SIMDMisc */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* FPMisc */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* SIMDMisc */ ARM64_FTR_END, }; @@ -244,25 +262,25 @@ static const struct arm64_ftr_bits ftr_dczid[] = { static const struct arm64_ftr_bits ftr_id_isar5[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_ISAR5_RDM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_CRC32_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA2_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA1_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_AES_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_SEVL_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0), ARM64_FTR_END, }; static const struct arm64_ftr_bits ftr_id_mmfr4[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* ac2 */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* ac2 */ ARM64_FTR_END, }; static const struct arm64_ftr_bits ftr_id_pfr0[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 12, 4, 0), /* State3 */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 8, 4, 0), /* State2 */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* State1 */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 4, 0), /* State0 */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* State3 */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), /* State2 */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* State1 */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* State0 */ ARM64_FTR_END, }; @@ -343,7 +361,7 @@ static const struct __ftr_reg_entry { /* Op1 = 0, CRn = 0, CRm = 4 */ ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0), - ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz), + ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1), /* Op1 = 0, CRn = 0, CRm = 5 */ ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0), @@ -352,6 +370,7 @@ static const struct __ftr_reg_entry { /* Op1 = 0, CRn = 0, CRm = 6 */ ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0), ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1), + ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2), /* Op1 = 0, CRn = 0, CRm = 7 */ ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0), @@ -478,6 +497,9 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new) reg->user_mask = user_mask; } +extern const struct arm64_cpu_capabilities arm64_errata[]; +static void __init setup_boot_cpu_capabilities(void); + void __init init_cpu_features(struct cpuinfo_arm64 *info) { /* Before we start using the tables, make sure it is sorted */ @@ -490,6 +512,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1); init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0); init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1); + init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2); init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0); init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1); init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2); @@ -515,6 +538,11 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2); } + /* + * Detect and enable early CPU capabilities based on the boot CPU, + * after we have initialised the CPU feature infrastructure. + */ + setup_boot_cpu_capabilities(); } static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new) @@ -596,6 +624,8 @@ void update_cpu_features(int cpu, info->reg_id_aa64isar0, boot->reg_id_aa64isar0); taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu, info->reg_id_aa64isar1, boot->reg_id_aa64isar1); + taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu, + info->reg_id_aa64isar2, boot->reg_id_aa64isar2); /* * Differing PARange support is fine as long as all peripherals and @@ -611,7 +641,6 @@ void update_cpu_features(int cpu, /* * EL3 is not our concern. - * ID_AA64PFR1 is currently RES0. */ taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu, info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0); @@ -720,6 +749,7 @@ static u64 __read_sysreg_by_encoding(u32 sys_id) read_sysreg_case(SYS_ID_AA64MMFR2_EL1); read_sysreg_case(SYS_ID_AA64ISAR0_EL1); read_sysreg_case(SYS_ID_AA64ISAR1_EL1); + read_sysreg_case(SYS_ID_AA64ISAR2_EL1); read_sysreg_case(SYS_CNTFRQ_EL0); read_sysreg_case(SYS_CTR_EL0); @@ -780,11 +810,6 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int _ MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK)); } -static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused) -{ - return is_kernel_in_hyp_mode(); -} - static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry, int __unused) { @@ -806,14 +831,34 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus ID_AA64PFR0_FP_SHIFT) < 0; } -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +static bool __meltdown_safe = true; static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, - int __unused) + int scope) { - char const *str = "command line option"; - u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); + /* List of CPUs that are not vulnerable and don't need KPTI */ + static const struct midr_range kpti_safe_list[] = { + MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), + MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), + }; + char const *str = "kpti command line option"; + bool meltdown_safe; + + meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list); + + /* Defer to CPU feature registers */ + if (has_cpuid_feature(entry, scope)) + meltdown_safe = true; + + if (!meltdown_safe) + __meltdown_safe = false; /* * For reasons that aren't entirely clear, enabling KPTI on Cavium @@ -825,6 +870,24 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, __kpti_forced = -1; } + /* Useful for KASLR robustness */ + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) { + if (!__kpti_forced) { + str = "KASLR"; + __kpti_forced = 1; + } + } + + if (cpu_mitigations_off() && !__kpti_forced) { + str = "mitigations=off"; + __kpti_forced = -1; + } + + if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) { + pr_info_once("kernel page table isolation disabled by kernel configuration\n"); + return false; + } + /* Forced? */ if (__kpti_forced) { pr_info_once("kernel page table isolation forced %s by %s\n", @@ -832,23 +895,12 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, return __kpti_forced > 0; } - /* Useful for KASLR robustness */ - if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) - return true; - - /* Don't force KPTI for CPUs that are not vulnerable */ - switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) { - case MIDR_CAVIUM_THUNDERX2: - case MIDR_BRCM_VULCAN: - return false; - } - - /* Defer to CPU feature registers */ - return !cpuid_feature_extract_unsigned_field(pfr0, - ID_AA64PFR0_CSV3_SHIFT); + return !meltdown_safe; } -static int __nocfi kpti_install_ng_mappings(void *__unused) +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +static void +kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) { typedef void (kpti_remap_fn)(int, int, phys_addr_t); extern kpti_remap_fn idmap_kpti_install_ng_mappings; @@ -857,8 +909,14 @@ static int __nocfi kpti_install_ng_mappings(void *__unused) static bool kpti_applied = false; int cpu = smp_processor_id(); + if (__this_cpu_read(this_cpu_vector) == vectors) { + const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI); + + __this_cpu_write(this_cpu_vector, v); + } + if (kpti_applied) - return 0; + return; remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); @@ -869,8 +927,14 @@ static int __nocfi kpti_install_ng_mappings(void *__unused) if (!cpu) kpti_applied = true; - return 0; + return; +} +#else +static void +kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) +{ } +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ static int __init parse_kpti(char *str) { @@ -884,9 +948,14 @@ static int __init parse_kpti(char *str) return 0; } early_param("kpti", parse_kpti); -#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ -static int cpu_copy_el2regs(void *__unused) +#ifdef CONFIG_ARM64_VHE +static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused) +{ + return is_kernel_in_hyp_mode(); +} + +static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused) { /* * Copy register values that aren't redirected by hardware. @@ -898,15 +967,56 @@ static int cpu_copy_el2regs(void *__unused) */ if (!alternatives_applied) write_sysreg(read_sysreg(tpidr_el1), tpidr_el2); +} +#endif +#ifdef CONFIG_ARM64_SSBD +static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr) +{ + if (user_mode(regs)) + return 1; + + if (instr & BIT(CRm_shift)) + regs->pstate |= PSR_SSBS_BIT; + else + regs->pstate &= ~PSR_SSBS_BIT; + + arm64_skip_faulting_instruction(regs, 4); return 0; } +static struct undef_hook ssbs_emulation_hook = { + .instr_mask = ~(1U << CRm_shift), + .instr_val = 0xd500001f | REG_PSTATE_SSBS_IMM, + .fn = ssbs_emulation_handler, +}; + +static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused) +{ + static bool undef_hook_registered = false; + static DEFINE_SPINLOCK(hook_lock); + + spin_lock(&hook_lock); + if (!undef_hook_registered) { + register_undef_hook(&ssbs_emulation_hook); + undef_hook_registered = true; + } + spin_unlock(&hook_lock); + + if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) { + sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS); + arm64_set_ssbd_mitigation(false); + } else { + arm64_set_ssbd_mitigation(true); + } +} +#endif /* CONFIG_ARM64_SSBD */ + static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", .capability = ARM64_HAS_SYSREG_GIC_CPUIF, - .def_scope = SCOPE_SYSTEM, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_useable_gicv3_cpuif, .sys_reg = SYS_ID_AA64PFR0_EL1, .field_pos = ID_AA64PFR0_GIC_SHIFT, @@ -917,20 +1027,20 @@ static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "Privileged Access Never", .capability = ARM64_HAS_PAN, - .def_scope = SCOPE_SYSTEM, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64MMFR1_EL1, .field_pos = ID_AA64MMFR1_PAN_SHIFT, .sign = FTR_UNSIGNED, .min_field_value = 1, - .enable = cpu_enable_pan, + .cpu_enable = cpu_enable_pan, }, #endif /* CONFIG_ARM64_PAN */ #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) { .desc = "LSE atomic instructions", .capability = ARM64_HAS_LSE_ATOMICS, - .def_scope = SCOPE_SYSTEM, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64ISAR0_EL1, .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT, @@ -941,14 +1051,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "Software prefetching using PRFM", .capability = ARM64_HAS_NO_HW_PREFETCH, - .def_scope = SCOPE_SYSTEM, + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, .matches = has_no_hw_prefetch, }, #ifdef CONFIG_ARM64_UAO { .desc = "User Access Override", .capability = ARM64_HAS_UAO, - .def_scope = SCOPE_SYSTEM, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64MMFR2_EL1, .field_pos = ID_AA64MMFR2_UAO_SHIFT, @@ -962,21 +1072,23 @@ static const struct arm64_cpu_capabilities arm64_features[] = { #ifdef CONFIG_ARM64_PAN { .capability = ARM64_ALT_PAN_NOT_UAO, - .def_scope = SCOPE_SYSTEM, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = cpufeature_pan_not_uao, }, #endif /* CONFIG_ARM64_PAN */ +#ifdef CONFIG_ARM64_VHE { .desc = "Virtualization Host Extensions", .capability = ARM64_HAS_VIRT_HOST_EXTN, - .def_scope = SCOPE_SYSTEM, + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .matches = runs_at_el2, - .enable = cpu_copy_el2regs, + .cpu_enable = cpu_copy_el2regs, }, +#endif /* CONFIG_ARM64_VHE */ { .desc = "32-bit EL0 Support", .capability = ARM64_HAS_32BIT_EL0, - .def_scope = SCOPE_SYSTEM, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64PFR0_EL1, .sign = FTR_UNSIGNED, @@ -986,22 +1098,28 @@ static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "Reduced HYP mapping offset", .capability = ARM64_HYP_OFFSET_LOW, - .def_scope = SCOPE_SYSTEM, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = hyp_offset_low, }, -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 { .desc = "Kernel page table isolation (KPTI)", .capability = ARM64_UNMAP_KERNEL_AT_EL0, - .def_scope = SCOPE_SYSTEM, + .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE, + /* + * The ID feature fields below are used to indicate that + * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for + * more details. + */ + .sys_reg = SYS_ID_AA64PFR0_EL1, + .field_pos = ID_AA64PFR0_CSV3_SHIFT, + .min_field_value = 1, .matches = unmap_kernel_at_el0, - .enable = kpti_install_ng_mappings, + .cpu_enable = kpti_install_ng_mappings, }, -#endif { /* FP/SIMD is not implemented */ .capability = ARM64_HAS_NO_FPSIMD, - .def_scope = SCOPE_SYSTEM, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, .min_field_value = 0, .matches = has_no_fpsimd, }, @@ -1009,26 +1127,39 @@ static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "Data cache clean to Point of Persistence", .capability = ARM64_HAS_DCPOP, - .def_scope = SCOPE_SYSTEM, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64ISAR1_EL1, .field_pos = ID_AA64ISAR1_DPB_SHIFT, .min_field_value = 1, }, +#endif +#ifdef CONFIG_ARM64_SSBD + { + .desc = "Speculative Store Bypassing Safe (SSBS)", + .capability = ARM64_SSBS, + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, + .matches = has_cpuid_feature, + .sys_reg = SYS_ID_AA64PFR1_EL1, + .field_pos = ID_AA64PFR1_SSBS_SHIFT, + .sign = FTR_UNSIGNED, + .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY, + .cpu_enable = cpu_enable_ssbs, + }, #endif {}, }; -#define HWCAP_CAP(reg, field, s, min_value, type, cap) \ +#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \ { \ .desc = #cap, \ - .def_scope = SCOPE_SYSTEM, \ + .type = ARM64_CPUCAP_SYSTEM_FEATURE, \ .matches = has_cpuid_feature, \ .sys_reg = reg, \ .field_pos = field, \ .sign = s, \ .min_field_value = min_value, \ - .hwcap_type = type, \ + .hwcap_type = cap_type, \ .hwcap = cap, \ } @@ -1037,17 +1168,28 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FLAGM), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_DIT), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC), + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC), + HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT), + HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS), {}, }; @@ -1112,7 +1254,7 @@ static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps) /* We support emulation of accesses to CPU ID feature registers */ elf_hwcap |= HWCAP_CPUID; for (; hwcaps->matches; hwcaps++) - if (hwcaps->matches(hwcaps, hwcaps->def_scope)) + if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps))) cap_set_elf_hwcap(hwcaps); } @@ -1135,11 +1277,13 @@ static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array, return false; } -void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, - const char *info) +static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, + u16 scope_mask, const char *info) { + scope_mask &= ARM64_CPUCAP_SCOPE_MASK; for (; caps->matches; caps++) { - if (!caps->matches(caps, caps->def_scope)) + if (!(caps->type & scope_mask) || + !caps->matches(caps, cpucap_default_scope(caps))) continue; if (!cpus_have_cap(caps->capability) && caps->desc) @@ -1148,33 +1292,69 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, } } +static void update_cpu_capabilities(u16 scope_mask) +{ + __update_cpu_capabilities(arm64_features, scope_mask, "detected:"); + __update_cpu_capabilities(arm64_errata, scope_mask, + "enabling workaround for"); +} + +static int __enable_cpu_capability(void *arg) +{ + const struct arm64_cpu_capabilities *cap = arg; + + cap->cpu_enable(cap); + return 0; +} + /* * Run through the enabled capabilities and enable() it on all active * CPUs */ -void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) +static void __init +__enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps, + u16 scope_mask) { + scope_mask &= ARM64_CPUCAP_SCOPE_MASK; for (; caps->matches; caps++) { unsigned int num = caps->capability; - if (!cpus_have_cap(num)) + if (!(caps->type & scope_mask) || !cpus_have_cap(num)) continue; /* Ensure cpus_have_const_cap(num) works */ static_branch_enable(&cpu_hwcap_keys[num]); - if (caps->enable) { + if (caps->cpu_enable) { /* - * Use stop_machine() as it schedules the work allowing - * us to modify PSTATE, instead of on_each_cpu() which - * uses an IPI, giving us a PSTATE that disappears when - * we return. + * Capabilities with SCOPE_BOOT_CPU scope are finalised + * before any secondary CPU boots. Thus, each secondary + * will enable the capability as appropriate via + * check_local_cpu_capabilities(). The only exception is + * the boot CPU, for which the capability must be + * enabled here. This approach avoids costly + * stop_machine() calls for this case. + * + * Otherwise, use stop_machine() as it schedules the + * work allowing us to modify PSTATE, instead of + * on_each_cpu() which uses an IPI, giving us a PSTATE + * that disappears when we return. */ - stop_machine(caps->enable, (void *)caps, cpu_online_mask); + if (scope_mask & SCOPE_BOOT_CPU) + caps->cpu_enable(caps); + else + stop_machine(__enable_cpu_capability, + (void *)caps, cpu_online_mask); } } } +static void __init enable_cpu_capabilities(u16 scope_mask) +{ + __enable_cpu_capabilities(arm64_features, scope_mask); + __enable_cpu_capabilities(arm64_errata, scope_mask); +} + /* * Flag to indicate if we have computed the system wide * capabilities based on the boot time active CPUs. This @@ -1190,14 +1370,83 @@ static inline void set_sys_caps_initialised(void) sys_caps_initialised = true; } +/* + * Run through the list of capabilities to check for conflicts. + * If the system has already detected a capability, take necessary + * action on this CPU. + * + * Returns "false" on conflicts. + */ +static bool +__verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps_list, + u16 scope_mask) +{ + bool cpu_has_cap, system_has_cap; + const struct arm64_cpu_capabilities *caps; + + scope_mask &= ARM64_CPUCAP_SCOPE_MASK; + + for (caps = caps_list; caps->matches; caps++) { + if (!(caps->type & scope_mask)) + continue; + + cpu_has_cap = __this_cpu_has_cap(caps_list, caps->capability); + system_has_cap = cpus_have_cap(caps->capability); + + if (system_has_cap) { + /* + * Check if the new CPU misses an advertised feature, + * which is not safe to miss. + */ + if (!cpu_has_cap && !cpucap_late_cpu_optional(caps)) + break; + /* + * We have to issue cpu_enable() irrespective of + * whether the CPU has it or not, as it is enabeld + * system wide. It is upto the call back to take + * appropriate action on this CPU. + */ + if (caps->cpu_enable) + caps->cpu_enable(caps); + } else { + /* + * Check if the CPU has this capability if it isn't + * safe to have when the system doesn't. + */ + if (cpu_has_cap && !cpucap_late_cpu_permitted(caps)) + break; + } + } + + if (caps->matches) { + pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n", + smp_processor_id(), caps->capability, + caps->desc, system_has_cap, cpu_has_cap); + return false; + } + + return true; +} + +static bool verify_local_cpu_caps(u16 scope_mask) +{ + return __verify_local_cpu_caps(arm64_errata, scope_mask) && + __verify_local_cpu_caps(arm64_features, scope_mask); +} + /* * Check for CPU features that are used in early boot * based on the Boot CPU value. */ static void check_early_cpu_features(void) { - verify_cpu_run_el(); verify_cpu_asid_bits(); + /* + * Early features are used by the kernel already. If there + * is a conflict, we cannot proceed further. + */ + if (!verify_local_cpu_caps(SCOPE_BOOT_CPU)) + cpu_panic_kernel(); } static void @@ -1212,26 +1461,6 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps) } } -static void -verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list) -{ - const struct arm64_cpu_capabilities *caps = caps_list; - for (; caps->matches; caps++) { - if (!cpus_have_cap(caps->capability)) - continue; - /* - * If the new CPU misses an advertised feature, we cannot proceed - * further, park the cpu. - */ - if (!__this_cpu_has_cap(caps_list, caps->capability)) { - pr_crit("CPU%d: missing feature: %s\n", - smp_processor_id(), caps->desc); - cpu_die_early(); - } - if (caps->enable) - caps->enable((void *)caps); - } -} /* * Run through the enabled system capabilities and enable() it on this CPU. @@ -1243,8 +1472,14 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list) */ static void verify_local_cpu_capabilities(void) { - verify_local_cpu_errata_workarounds(); - verify_local_cpu_features(arm64_features); + /* + * The capabilities with SCOPE_BOOT_CPU are checked from + * check_early_cpu_features(), as they need to be verified + * on all secondary CPUs. + */ + if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU)) + cpu_die_early(); + verify_local_elf_hwcaps(arm64_elf_hwcaps); if (system_supports_32bit_el0()) verify_local_elf_hwcaps(compat_elf_hwcaps); @@ -1260,20 +1495,22 @@ void check_local_cpu_capabilities(void) /* * If we haven't finalised the system capabilities, this CPU gets - * a chance to update the errata work arounds. + * a chance to update the errata work arounds and local features. * Otherwise, this CPU should verify that it has all the system * advertised capabilities. */ if (!sys_caps_initialised) - update_cpu_errata_workarounds(); + update_cpu_capabilities(SCOPE_LOCAL_CPU); else verify_local_cpu_capabilities(); } -static void __init setup_feature_capabilities(void) +static void __init setup_boot_cpu_capabilities(void) { - update_cpu_capabilities(arm64_features, "detected feature:"); - enable_cpu_capabilities(arm64_features); + /* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */ + update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU); + /* Enable the SCOPE_BOOT_CPU capabilities alone right away */ + enable_cpu_capabilities(SCOPE_BOOT_CPU); } DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready); @@ -1292,14 +1529,24 @@ bool this_cpu_has_cap(unsigned int cap) __this_cpu_has_cap(arm64_errata, cap)); } +static void __init setup_system_capabilities(void) +{ + /* + * We have finalised the system-wide safe feature + * registers, finalise the capabilities that depend + * on it. Also enable all the available capabilities, + * that are not enabled already. + */ + update_cpu_capabilities(SCOPE_SYSTEM); + enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU); +} + void __init setup_cpu_features(void) { u32 cwg; int cls; - /* Set the CPU feature capabilies */ - setup_feature_capabilities(); - enable_errata_workarounds(); + setup_system_capabilities(); mark_const_caps_ready(); setup_elf_hwcaps(arm64_elf_hwcaps); @@ -1404,7 +1651,7 @@ static int emulate_mrs(struct pt_regs *regs, u32 insn) if (!rc) { dst = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn); pt_regs_write_reg(regs, dst, val); - regs->pc += 4; + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); } return rc; @@ -1425,3 +1672,15 @@ static int __init enable_mrs_emulation(void) } core_initcall(enable_mrs_emulation); + +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, + char *buf) +{ + if (__meltdown_safe) + return sprintf(buf, "Not affected\n"); + + if (arm64_kernel_unmapped_at_el0()) + return sprintf(buf, "Mitigation: PTI\n"); + + return sprintf(buf, "Vulnerable\n"); +} diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 07ac827e9fce..8a12ebe62127 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -69,6 +69,18 @@ static const char *const hwcap_str[] = { "fcma", "lrcpc", "dcpop", + "sha3", + "sm3", + "sm4", + "asimddp", + "sha512", + "sve", + "asimdfhm", + "dit", + "uscat", + "ilrcpc", + "flagm", + "ssbs", NULL }; @@ -321,6 +333,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1); info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1); info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1); + info->reg_id_aa64isar2 = read_cpuid(ID_AA64ISAR2_EL1); info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1); diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 9a25f2ef60f7..b961d00c6bea 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -27,6 +27,7 @@ #include #include +#include #include #include #include diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index e752b68088cf..6e2f2cc62e14 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -421,6 +421,10 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, if (IS_ENABLED(CONFIG_ARM64_UAO) && cpus_have_const_cap(ARM64_HAS_UAO)) childregs->pstate |= PSR_UAO_BIT; + + if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) + set_ssbs_bit(childregs); + p->thread.cpu_context.x19 = stack_start; p->thread.cpu_context.x20 = stk_sz; } @@ -460,6 +464,32 @@ void uao_thread_switch(struct task_struct *next) } } +/* + * Force SSBS state on context-switch, since it may be lost after migrating + * from a CPU which treats the bit as RES0 in a heterogeneous system. + */ +static void ssbs_thread_switch(struct task_struct *next) +{ + struct pt_regs *regs = task_pt_regs(next); + + /* + * Nothing to do for kernel threads, but 'regs' may be junk + * (e.g. idle task) so check the flags and bail early. + */ + if (unlikely(next->flags & PF_KTHREAD)) + return; + + /* If the mitigation is enabled, then we leave SSBS clear. */ + if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) || + test_tsk_thread_flag(next, TIF_SSBD)) + return; + + if (compat_user_mode(regs)) + set_compat_ssbs_bit(regs); + else if (user_mode(regs)) + set_ssbs_bit(regs); +} + /* * We store our current task in sp_el0, which is clobbered by userspace. Keep a * shadow copy so that we can restore this upon entry from userspace. @@ -488,6 +518,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, contextidr_thread_switch(next); entry_task_switch(next); uao_thread_switch(next); + ssbs_thread_switch(next); /* * Complete any pending TLB or cache maintenance on this CPU in case diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 34d915b6974b..242527f29c41 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -1402,15 +1402,20 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs) } /* - * Bits which are always architecturally RES0 per ARM DDI 0487A.h + * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a. + * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is + * not described in ARM DDI 0487D.a. + * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may + * be allocated an EL0 meaning in future. * Userspace cannot use these until they have an architectural meaning. + * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. * We also reserve IL for the kernel; SS is handled dynamically. */ #define SPSR_EL1_AARCH64_RES0_BITS \ - (GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \ - GENMASK_ULL(5, 5)) + (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \ + GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5)) #define SPSR_EL1_AARCH32_RES0_BITS \ - (GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20)) + (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20)) static int valid_compat_regs(struct user_pt_regs *regs) { diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 3484e985a989..31f14371e747 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -86,43 +86,6 @@ enum ipi_msg_type { IPI_CPU_CRASH_STOP }; -#ifdef CONFIG_ARM64_VHE - -/* Whether the boot CPU is running in HYP mode or not*/ -static bool boot_cpu_hyp_mode; - -static inline void save_boot_cpu_run_el(void) -{ - boot_cpu_hyp_mode = is_kernel_in_hyp_mode(); -} - -static inline bool is_boot_cpu_in_hyp_mode(void) -{ - return boot_cpu_hyp_mode; -} - -/* - * Verify that a secondary CPU is running the kernel at the same - * EL as that of the boot CPU. - */ -void verify_cpu_run_el(void) -{ - bool in_el2 = is_kernel_in_hyp_mode(); - bool boot_cpu_el2 = is_boot_cpu_in_hyp_mode(); - - if (in_el2 ^ boot_cpu_el2) { - pr_crit("CPU%d: mismatched Exception Level(EL%d) with boot CPU(EL%d)\n", - smp_processor_id(), - in_el2 ? 2 : 1, - boot_cpu_el2 ? 2 : 1); - cpu_panic_kernel(); - } -} - -#else -static inline void save_boot_cpu_run_el(void) {} -#endif - #ifdef CONFIG_HOTPLUG_CPU static int op_cpu_kill(unsigned int cpu); #else @@ -451,13 +414,6 @@ void __init smp_prepare_boot_cpu(void) */ jump_label_init(); cpuinfo_store_boot_cpu(); - save_boot_cpu_run_el(); - /* - * Run the errata work around checks on the boot CPU, once we have - * initialised the cpu feature infrastructure from - * cpuinfo_store_boot_cpu() above. - */ - update_cpu_errata_workarounds(); } static u64 __init of_get_cpu_mpidr(struct device_node *dn) diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c index 0560738c1d5c..58de005cd756 100644 --- a/arch/arm64/kernel/ssbd.c +++ b/arch/arm64/kernel/ssbd.c @@ -3,13 +3,32 @@ * Copyright (C) 2018 ARM Ltd, All Rights Reserved. */ +#include #include #include #include +#include #include +#include #include +static void ssbd_ssbs_enable(struct task_struct *task) +{ + u64 val = is_compat_thread(task_thread_info(task)) ? + PSR_AA32_SSBS_BIT : PSR_SSBS_BIT; + + task_pt_regs(task)->pstate |= val; +} + +static void ssbd_ssbs_disable(struct task_struct *task) +{ + u64 val = is_compat_thread(task_thread_info(task)) ? + PSR_AA32_SSBS_BIT : PSR_SSBS_BIT; + + task_pt_regs(task)->pstate &= ~val; +} + /* * prctl interface for SSBD */ @@ -45,12 +64,14 @@ static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl) return -EPERM; task_clear_spec_ssb_disable(task); clear_tsk_thread_flag(task, TIF_SSBD); + ssbd_ssbs_enable(task); break; case PR_SPEC_DISABLE: if (state == ARM64_SSBD_FORCE_DISABLE) return -EPERM; task_set_spec_ssb_disable(task); set_tsk_thread_flag(task, TIF_SSBD); + ssbd_ssbs_disable(task); break; case PR_SPEC_FORCE_DISABLE: if (state == ARM64_SSBD_FORCE_DISABLE) @@ -58,6 +79,7 @@ static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl) task_set_spec_ssb_disable(task); task_set_spec_ssb_force_disable(task); set_tsk_thread_flag(task, TIF_SSBD); + ssbd_ssbs_disable(task); break; default: return -ERANGE; diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 9d23b5eded37..ea2d5bf0d9eb 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -39,6 +39,7 @@ #include #include +#include #include #include #include @@ -437,6 +438,18 @@ void arm64_notify_die(const char *str, struct pt_regs *regs, } } +void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size) +{ + regs->pc += size; + + /* + * If we were single stepping, we want to get the step exception after + * we return from the trap. + */ + if (user_mode(regs)) + user_fastforward_single_step(current); +} + static LIST_HEAD(undef_hook); static DEFINE_RAW_SPINLOCK(undef_lock); @@ -575,10 +588,9 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs, unsigned int esr force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0, esr); } -int cpu_enable_cache_maint_trap(void *__unused) +void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) { config_sctlr_el1(SCTLR_EL1_UCI, 0); - return 0; } #define __user_cache_maint(insn, address, res) \ @@ -656,7 +668,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) if (ret) arm64_notify_segfault(regs, address); else - regs->pc += 4; + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); } static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) @@ -666,7 +678,7 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) pt_regs_write_reg(regs, rt, val); - regs->pc += 4; + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); } static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) @@ -674,7 +686,7 @@ static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; pt_regs_write_reg(regs, rt, arch_counter_get_cntvct()); - regs->pc += 4; + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); } static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) @@ -682,7 +694,7 @@ static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; pt_regs_write_reg(regs, rt, arch_timer_get_rate()); - regs->pc += 4; + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); } struct sys64_hook { @@ -953,7 +965,7 @@ static int bug_handler(struct pt_regs *regs, unsigned int esr) } /* If thread survives, skip over the BUG instruction and continue: */ - regs->pc += AARCH64_INSN_SIZE; /* skip BRK and resume */ + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); return DBG_HOOK_HANDLED; } diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 44845996b554..02f8830492ce 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -26,6 +26,7 @@ #include #include #include +#include static bool __hyp_text __fpsimd_enabled_nvhe(void) { @@ -106,17 +107,21 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) static void __hyp_text __deactivate_traps_vhe(void) { - extern char vectors[]; /* kernel exception vectors */ + const char *host_vectors = vectors; u64 mdcr_el2 = read_sysreg(mdcr_el2); mdcr_el2 &= MDCR_EL2_HPMN_MASK | MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT | MDCR_EL2_TPMS; + write_sysreg(mdcr_el2, mdcr_el2); write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); write_sysreg(CPACR_EL1_FPEN, cpacr_el1); - write_sysreg(vectors, vbar_el1); + + if (!arm64_kernel_unmapped_at_el0()) + host_vectors = __this_cpu_read(this_cpu_vector); + write_sysreg(host_vectors, vbar_el1); } static void __hyp_text __deactivate_traps_nvhe(void) @@ -405,16 +410,6 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) __set_host_arch_workaround_state(vcpu); - if (cpus_have_const_cap(ARM64_HARDEN_BP_POST_GUEST_EXIT)) { - u32 midr = read_cpuid_id(); - - /* Apply BTAC predictors mitigation to all Falkor chips */ - if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || - ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) { - __qcom_hyp_sanitize_btac_predictors(); - } - } - fp_enabled = __fpsimd_enabled(); __sysreg_save_guest_state(guest_ctxt); diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index e19d89cabf2a..3773311ffcd0 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -188,3 +188,14 @@ void __hyp_text __kvm_set_tpidr_el2(u64 tpidr_el2) { asm("msr tpidr_el2, %0": : "r" (tpidr_el2)); } + +void __hyp_text __kvm_enable_ssbs(void) +{ + u64 tmp; + + asm volatile( + "mrs %0, sctlr_el2\n" + "orr %0, %0, %1\n" + "msr sctlr_el2, %0" + : "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS)); +} diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index c9dd7bd24317..6b158b39af7d 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -990,7 +990,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint, NOKPROBE_SYMBOL(do_debug_exception); #ifdef CONFIG_ARM64_PAN -int cpu_enable_pan(void *__unused) +void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused) { /* * We modify PSTATE. This won't work from irq context as the PSTATE @@ -1000,6 +1000,5 @@ int cpu_enable_pan(void *__unused) config_sctlr_el1(SCTLR_EL1_SPAN, 0); asm(SET_PSTATE_PAN(1)); - return 0; } #endif /* CONFIG_ARM64_PAN */ diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 702f30ddb014..72b9e21c4991 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -587,6 +587,7 @@ early_param("rodata", parse_rodata); #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 static int __init map_entry_trampoline(void) { + int i; extern char __entry_tramp_text_start[]; pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; @@ -597,11 +598,15 @@ static int __init map_entry_trampoline(void) /* Map only the text into the trampoline page table */ memset(tramp_pg_dir, 0, PGD_SIZE); - __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE, - prot, pgd_pgtable_alloc, 0); + __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, + entry_tramp_text_size(), prot, pgd_pgtable_alloc, + 0); /* Map both the text and data into the kernel page table */ - __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); + for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++) + __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i, + pa_start + i * PAGE_SIZE, prot); + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { extern char __entry_tramp_data_start[]; diff --git a/drivers/android/binder.c b/drivers/android/binder.c index b4c61d0f9f32..ceaf989efd00 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2076,6 +2076,18 @@ static int binder_inc_ref_for_node(struct binder_proc *proc, } ret = binder_inc_ref_olocked(ref, strong, target_list); *rdata = ref->data; + if (ret && ref == new_ref) { + /* + * Cleanup the failed reference here as the target + * could now be dead and have already released its + * references by now. Calling on the new reference + * with strong=0 and a tmp_refs will not decrement + * the node. The new_ref gets kfree'd below. + */ + binder_cleanup_ref_olocked(new_ref); + ref = NULL; + } + binder_proc_unlock(proc); if (new_ref && ref != new_ref) /* diff --git a/drivers/battery_v2/include/sec_battery.h b/drivers/battery_v2/include/sec_battery.h index bf320a61c0aa..5baf9207c2fb 100644 --- a/drivers/battery_v2/include/sec_battery.h +++ b/drivers/battery_v2/include/sec_battery.h @@ -399,6 +399,10 @@ struct sec_battery_info { unsigned int mix_limit; unsigned int vbus_limit; + /* lrp temperature check */ + unsigned int lrp_limit; + unsigned int lrp_step; + /* temperature check */ int temperature; /* battery temperature */ #if defined(CONFIG_ENG_BATTERY_CONCEPT) @@ -427,6 +431,8 @@ struct sec_battery_info { #if defined(CONFIG_DIRECT_CHARGING) int dchg_temp; #endif + int lrp; + int lrp_test; int temp_adc; int temp_ambient_adc; diff --git a/drivers/battery_v2/include/sec_battery_sysfs.h b/drivers/battery_v2/include/sec_battery_sysfs.h index 1df8a96f2832..b5b7b611854c 100644 --- a/drivers/battery_v2/include/sec_battery_sysfs.h +++ b/drivers/battery_v2/include/sec_battery_sysfs.h @@ -184,17 +184,13 @@ enum { BATT_TUNE_CHG_TEMP_HIGH, BATT_TUNE_CHG_TEMP_REC, BATT_TUNE_CHG_LIMMIT_CUR, + BATT_TUNE_LRP_TEMP_HIGH_LCDON, + BATT_TUNE_LRP_TEMP_HIGH_LCDOFF, BATT_TUNE_COIL_TEMP_HIGH, BATT_TUNE_COIL_TEMP_REC, BATT_TUNE_COIL_LIMMIT_CUR, BATT_TUNE_WPC_TEMP_HIGH, BATT_TUNE_WPC_TEMP_HIGH_REC, - BATT_TUNE_DCHG_TEMP_HIGH, - BATT_TUNE_DCHG_TEMP_HIGH_REC, - BATT_TUNE_DCHG_BATT_TEMP_HIGH, - BATT_TUNE_DCHG_BATT_TEMP_HIGH_REC, - BATT_TUNE_DCHG_LIMMIT_INPUT_CUR, - BATT_TUNE_DCHG_LIMMIT_CHG_CUR, #endif #if defined(CONFIG_UPDATE_BATTERY_DATA) BATT_UPDATE_DATA, @@ -258,6 +254,7 @@ enum { #endif BOOT_COMPLETED, BATT_FULL_CAPACITY, + LRP, }; enum { diff --git a/drivers/battery_v2/include/sec_charging_common.h b/drivers/battery_v2/include/sec_charging_common.h index f050c0627222..7165a0825b40 100644 --- a/drivers/battery_v2/include/sec_charging_common.h +++ b/drivers/battery_v2/include/sec_charging_common.h @@ -826,6 +826,50 @@ struct sec_wireless_rx_power_info { #define sec_wireless_rx_power_info_t \ struct sec_wireless_rx_power_info +#define LRP_PROPS 12 +#define FOREACH_LRP_TYPE(GEN_LRP_TYPE) \ + GEN_LRP_TYPE(LRP_NORMAL) \ + GEN_LRP_TYPE(LRP_25W) \ + GEN_LRP_TYPE(LRP_45W) \ + GEN_LRP_TYPE(LRP_MAX) + +#define GENERATE_LRP_ENUM(ENUM) ENUM, +#define GENERATE_LRP_STRING(STRING) #STRING, + +enum LRP_TYPE_ENUM { + FOREACH_LRP_TYPE(GENERATE_LRP_ENUM) +}; + +static const char * const LRP_TYPE_STRING[] = { + FOREACH_LRP_TYPE(GENERATE_LRP_STRING) +}; + +enum { + LRP_NONE = 0, + LRP_STEP1, + LRP_STEP2, +}; + +enum { + ST1 = 0, + ST2, +}; + +enum { + LCD_ON = 0, + LCD_OFF, +}; + +struct lrp_temp_t { + int trig[2][2]; + int recov[2][2]; +}; + +struct lrp_current_t { + int st_icl[2]; + int st_fcc[2]; +}; + struct sec_battery_platform_data { /* NO NEED TO BE CHANGED */ /* callback functions */ @@ -1016,6 +1060,7 @@ struct sec_battery_platform_data { #endif sec_battery_temp_check_t temp_check_type; + int lrp_temp_check_type; unsigned int temp_check_count; sec_battery_temp_check_t usb_temp_check_type; sec_battery_temp_check_t usb_temp_check_type_backup; @@ -1058,10 +1103,15 @@ struct sec_battery_platform_data { int chg_12v_high_temp; int chg_high_temp; int chg_high_temp_recovery; - int dchg_high_temp; - int dchg_high_temp_recovery; - int dchg_high_batt_temp; - int dchg_high_batt_temp_recovery; + + int dchg_high_temp[4]; + int dchg_high_temp_recovery[4]; + int dchg_high_batt_temp[4]; + int dchg_high_batt_temp_recovery[4]; + + struct lrp_temp_t lrp_temp[LRP_MAX]; + struct lrp_current_t lrp_curr[LRP_MAX]; + unsigned int chg_charging_limit_current; unsigned int chg_input_limit_current; #if defined(CONFIG_DIRECT_CHARGING) diff --git a/drivers/battery_v2/sec_battery.c b/drivers/battery_v2/sec_battery.c index 8b8aa5ff1fc5..8f4b15fa3422 100644 --- a/drivers/battery_v2/sec_battery.c +++ b/drivers/battery_v2/sec_battery.c @@ -392,7 +392,8 @@ static int sec_bat_get_wireless_current(struct sec_battery_info *battery, int in static void sec_bat_get_charging_current_by_siop(struct sec_battery_info *battery, int *input_current, int *charging_current) { - if (battery->siop_level < 100) { + if (battery->siop_level < 100 && + !((battery->siop_level == 80) && is_wired_type(battery->cable_type))) { int max_charging_current; if (is_wireless_type(battery->cable_type)) { @@ -736,6 +737,275 @@ static void sec_bat_check_wpc_temp(struct sec_battery_info *battery, int *input_ } } +int get_chg_power_type(int ct, int ws, int pd_max_pw, int max_pw) +{ + if (!is_wireless_type(ct)) { + if (is_pd_wire_type(ct) && + pd_max_pw >= HV_CHARGER_STATUS_STANDARD4) + return SFC_45W; + else if (is_pd_wire_type(ct) && + pd_max_pw >= HV_CHARGER_STATUS_STANDARD3) + return SFC_25W; + else if (is_hv_wire_12v_type(ct) || + max_pw >= HV_CHARGER_STATUS_STANDARD2) /* 20000mW */ + return AFC_12V_OR_20W; + else if (is_hv_wire_type(ct) || + (is_pd_wire_type(ct) && + pd_max_pw >= HV_CHARGER_STATUS_STANDARD1) || + ws == SEC_BATTERY_CABLE_PREPARE_TA || + max_pw >= HV_CHARGER_STATUS_STANDARD1) /* 12000mW */ + return AFC_9V_OR_15W; + } + + return NORMAL_TA; +} + +int sec_bat_check_power_type( + int max_chg_pwr, int pd_max_chg_pwr, int ct, int ws, int is_apdo) +{ + if (is_pd_wire_type(ct) && is_apdo) { + if (get_chg_power_type(ct, ws, pd_max_chg_pwr, max_chg_pwr) == SFC_45W) + return SFC_45W; + else if (get_chg_power_type(ct, ws, pd_max_chg_pwr, max_chg_pwr) == SFC_25W) + return SFC_25W; + else + return NORMAL_TA; + } else + return NORMAL_TA; +} + +#if defined(CONFIG_DIRECT_CHARGING) +static int sec_bat_check_lpm_power(int lpm, int pt) +{ + int ret = 0; + + if (pt == SFC_25W) + ret |= 0x02; + + if (lpm) + ret |= 0x01; + + return ret; +} +#endif + +int sec_bat_check_lrp_temp_cond(int prev_step, + int temp, int trig, int recov) +{ + if (trig <= temp) + prev_step++; + else if (recov >= temp) + prev_step--; + + if (prev_step < LRP_NONE) + prev_step = LRP_NONE; + else if (prev_step > LRP_STEP2) + prev_step = LRP_STEP2; + + return prev_step; +} + +int sec_bat_check_lrp_step( + struct sec_battery_info *battery, int temp, int pt, bool lcd_sts) +{ + int step = LRP_NONE; + int lcd_st = LCD_OFF; + int lrp_pt = LRP_NORMAL; + int lrp_high_temp_st1 = battery->pdata->lrp_temp[LRP_NORMAL].trig[ST1][LCD_OFF]; + int lrp_high_temp_st2 = battery->pdata->lrp_temp[LRP_NORMAL].trig[ST2][LCD_OFF]; + int lrp_high_temp_recov_st1 = battery->pdata->lrp_temp[LRP_NORMAL].recov[ST1][LCD_OFF]; + int lrp_high_temp_recov_st2 = battery->pdata->lrp_temp[LRP_NORMAL].recov[ST2][LCD_OFF]; + + if (lcd_sts) + lcd_st = LCD_ON; + + if (pt == SFC_45W) + lrp_pt = LRP_45W; + else if (pt == SFC_25W) + lrp_pt = LRP_25W; + + lrp_high_temp_st1 = battery->pdata->lrp_temp[lrp_pt].trig[ST1][lcd_st]; + lrp_high_temp_st2 = battery->pdata->lrp_temp[lrp_pt].trig[ST2][lcd_st]; + lrp_high_temp_recov_st1 = battery->pdata->lrp_temp[lrp_pt].recov[ST1][lcd_st]; + lrp_high_temp_recov_st2 = battery->pdata->lrp_temp[lrp_pt].recov[ST2][lcd_st]; + + pr_info("%s: st1(%d), st2(%d), recv_st1(%d), recv_st2(%d), lrp(%d)\n", __func__, + lrp_high_temp_st1, lrp_high_temp_st2, + lrp_high_temp_recov_st1, lrp_high_temp_recov_st2, temp); + + switch (battery->lrp_step) { + case LRP_STEP2: + step = sec_bat_check_lrp_temp_cond(battery->lrp_step, + temp, 900, lrp_high_temp_recov_st2); + break; + case LRP_STEP1: + step = sec_bat_check_lrp_temp_cond(battery->lrp_step, + temp, lrp_high_temp_st2, lrp_high_temp_recov_st1); + break; + case LRP_NONE: + step = sec_bat_check_lrp_temp_cond(battery->lrp_step, + temp, lrp_high_temp_st1, -200); + break; + default: + break; + } + + if ((battery->lrp_step != LRP_STEP1) && (step == LRP_STEP1)) + step = sec_bat_check_lrp_temp_cond(step, + temp, lrp_high_temp_st2, lrp_high_temp_recov_st1); + + return step; +} + +int sec_bat_get_lrp_step(struct sec_battery_info *battery) +{ + bool is_apdo = false; + int power_type = NORMAL_TA; + int ct = battery->cable_type, ws = battery->wire_status; + +#if IS_ENABLED(CONFIG_DIRECT_CHARGING) + is_apdo = (is_pd_apdo_wire_type(ct) && battery->pd_list.now_isApdo) ? 1 : 0; +#endif + power_type = sec_bat_check_power_type(battery->max_charge_power, + battery->pd_max_charge_power, ct, ws, is_apdo); + + return sec_bat_check_lrp_step(battery, battery->lrp, power_type, battery->lcd_status); +} + +void sec_bat_check_lrp_temp( + struct sec_battery_info *battery, int ct, int ws, int siop_level, bool lcd_sts, int *input_current, int *charging_current) +{ + int lrp_step = LRP_NONE; + bool is_apdo = false; + int power_type = NORMAL_TA; + bool force_check = false; + int ret = 0; + int max_charging_current = 1800; + int temp_ic = *input_current, temp_cc = *charging_current; + + if (battery->pdata->lrp_temp_check_type == SEC_BATTERY_TEMP_CHECK_NONE) + return; + +#if IS_ENABLED(CONFIG_DIRECT_CHARGING) + is_apdo = (is_pd_apdo_wire_type(ct) && battery->pd_list.now_isApdo) ? 1 : 0; +#endif + power_type = sec_bat_check_power_type(battery->max_charge_power, + battery->pd_max_charge_power, ct, ws, is_apdo); + + lrp_step = sec_bat_check_lrp_step(battery, battery->lrp, power_type, lcd_sts); + + /* 15w afc or pd ta */ + if (power_type == NORMAL_TA) { + ret = battery->input_voltage; + if (((ret == SEC_INPUT_VOLTAGE_5V) && !lcd_sts) || + ((ret != SEC_INPUT_VOLTAGE_5V) && lcd_sts)) + force_check = true; + pr_info("%s: force_check(%d), ret(%d), lcd(%d)\n", __func__, + (force_check ? 1 : 0), ret, (lcd_sts ? 1 : 0)); + } + + if ((lrp_step == LRP_STEP2) || (lrp_step == LRP_STEP1)) { + if (*charging_current > max_charging_current) + *charging_current = max_charging_current; + + if (is_pd_wire_type(ct)) { + if (power_type == SFC_45W) { + *input_current = battery->pdata->lrp_curr[LRP_45W].st_icl[lrp_step - 1]; + *charging_current = battery->pdata->lrp_curr[LRP_45W].st_fcc[lrp_step - 1]; + } else if (power_type == SFC_25W) { + *input_current = battery->pdata->lrp_curr[LRP_25W].st_icl[lrp_step - 1]; + *charging_current = battery->pdata->lrp_curr[LRP_25W].st_fcc[lrp_step - 1]; + } else { + if (*input_current > (60000 / battery->input_voltage)) + *input_current = 60000 / battery->input_voltage; + } + } else if (is_hv_wire_type(ct)) { + if (is_hv_wire_12v_type(battery->cable_type)) { + if (*input_current > battery->pdata->siop_hv_12v_input_limit_current) + *input_current = battery->pdata->siop_hv_12v_input_limit_current; + } else { + if (*input_current > battery->pdata->siop_hv_input_limit_current) + *input_current = battery->pdata->siop_hv_input_limit_current; + } + } else { + if (*input_current > battery->pdata->siop_input_limit_current) + *input_current = battery->pdata->siop_input_limit_current; + } + if ((battery->lrp_step != lrp_step) || force_check) + pr_info( + "%s:LRP:%d%%,%dmV,lrp_step(%d),lcd(%d),tlrp(%d),icl(%d),fcc(%d),ct(%d),is_apdo(%d),mcp(%d,%d)", + __func__, battery->capacity, battery->voltage_now, lrp_step, lcd_sts, + battery->lrp, *input_current, *charging_current, battery->cable_type, + is_apdo, battery->pd_max_charge_power, battery->max_charge_power); + battery->lrp_limit = true; + } else if ((battery->lrp_limit == true) && (lrp_step == LRP_NONE)) { + battery->lrp_limit = false; + pr_info( + "%s:LRP:SOC(%d),Vnow(%d),lrp_lim(%d),tlrp(%d),ct(%d)", + __func__, battery->capacity, battery->voltage_now, battery->lrp_limit, + battery->lrp, battery->cable_type); + } + battery->lrp_step = lrp_step; + + pr_info("%s: cable_type(%d), lrp_step(%d), lrp(%d)\n", __func__, + ct, battery->lrp_step, battery->lrp); + + if (temp_ic < *input_current || (power_type >= SFC_25W && temp_cc < *charging_current)) { + pr_info("%s: do not set new icl(%d) cc(%d) because of old icl(%d) cc(%d)\n", + __func__, *input_current, temp_ic, *charging_current, temp_cc); + *input_current = temp_ic; + *charging_current = temp_cc; + } +} + +#if defined(CONFIG_DIRECT_CHARGING) +void sec_bat_set_dchg_current(struct sec_battery_info *battery, int power_type, int is_apdo, int pt, int *input_current, int *charging_current) +{ + int temp_ic = *input_current, temp_cc = *charging_current; + + /* skip power_type check for non-use lrp_temp_check models */ + if (battery->pdata->lrp_temp_check_type == SEC_BATTERY_TEMP_CHECK_NONE) + power_type = NORMAL_TA; + + if (power_type == SFC_45W) { + if (pt & 0x01) { + *input_current = battery->pdata->lrp_curr[LRP_45W].st_icl[ST1]; + *charging_current = battery->pdata->lrp_curr[LRP_45W].st_fcc[ST1]; + } else { + *input_current = battery->pdata->lrp_curr[LRP_45W].st_icl[ST2]; + *charging_current = battery->pdata->lrp_curr[LRP_45W].st_fcc[ST2]; + } + } else if (power_type == SFC_25W) { + if (pt & 0x01) { + *input_current = battery->pdata->lrp_curr[LRP_25W].st_icl[ST1]; + *charging_current = battery->pdata->lrp_curr[LRP_25W].st_fcc[ST1]; + } else { + *input_current = battery->pdata->lrp_curr[LRP_25W].st_icl[ST2]; + *charging_current = battery->pdata->lrp_curr[LRP_25W].st_fcc[ST2]; + } + } else { + if (battery->input_voltage == SEC_INPUT_VOLTAGE_5V) { + *input_current = battery->pdata->default_input_current; + *charging_current = battery->pdata->default_charging_current; + } else { + if (is_apdo) { + *input_current = battery->pdata->dchg_input_limit_current; + *charging_current = battery->pdata->dchg_charging_limit_current; + } else { + *input_current = battery->pdata->chg_input_limit_current; + *charging_current = battery->pdata->chg_charging_limit_current; + } + } + } + if (temp_ic < *input_current || (power_type >= SFC_25W && temp_cc < *charging_current)) { + pr_info("%s: do not set new icl(%d) cc(%d) because of old icl(%d) cc(%d)\n", + __func__, *input_current, *charging_current, temp_ic, temp_cc); + *input_current = temp_ic; + *charging_current = temp_cc; + } +} +#endif + static bool sec_bat_change_vbus(struct sec_battery_info *battery, int *input_current) { if (battery->pdata->chg_temp_check_type == SEC_BATTERY_TEMP_CHECK_NONE) @@ -744,9 +1014,14 @@ static bool sec_bat_change_vbus(struct sec_battery_info *battery, int *input_cur #if defined(CONFIG_SUPPORT_HV_CTRL) union power_supply_propval value; unsigned int target_vbus = SEC_INPUT_VOLTAGE_0V; + int lrp_step = LRP_NONE; - if (battery->store_mode) + if (battery->store_mode || + ((battery->siop_level == 80) && is_wired_type(battery->cable_type))) { + pr_info("%s : store_mode(%d) siop(%d) ct(%d)\n", + __func__, battery->store_mode, battery->siop_level, battery->cable_type); return false; + } if (is_hv_wire_type(battery->cable_type) && (battery->cable_type != SEC_BATTERY_CABLE_QC30)) { @@ -757,12 +1032,15 @@ static bool sec_bat_change_vbus(struct sec_battery_info *battery, int *input_cur return false; } + lrp_step = sec_bat_get_lrp_step(battery); + pr_info("%s: lrp_step: %d\n", __func__, lrp_step); + /* check target vbus */ if (battery->vbus_limit) target_vbus = SEC_INPUT_VOLTAGE_0V; else if (battery->vbus_chg_by_full) target_vbus = SEC_INPUT_VOLTAGE_5V; - else if (battery->siop_level >= 100 && !battery->lcd_status) { + else if (battery->siop_level >= 100 && lrp_step == LRP_NONE) { if (is_hv_wire_12v_type(battery->cable_type)) target_vbus = SEC_INPUT_VOLTAGE_12V; else @@ -817,52 +1095,45 @@ static bool sec_bat_change_vbus(struct sec_battery_info *battery, int *input_cur #if defined(CONFIG_DIRECT_CHARGING) static void sec_bat_check_direct_chg_temp(struct sec_battery_info *battery, int *input_current, int *charging_current) { + int pt = 0; + int ct = battery->cable_type, ws = battery->wire_status; + bool is_apdo = false; + int power_type = NORMAL_TA; + if (battery->pdata->dchg_temp_check_type == SEC_BATTERY_TEMP_CHECK_NONE) return; - if (battery->siop_level >= 100 && !battery->lcd_status) { - if (!battery->chg_limit && battery->pd_list.now_isApdo && - ((battery->dchg_temp >= battery->pdata->dchg_high_temp) || - (battery->temperature >= battery->pdata->dchg_high_batt_temp))) { - *input_current = battery->pdata->dchg_input_limit_current; - *charging_current = battery->pdata->dchg_charging_limit_current; + is_apdo = (is_pd_apdo_wire_type(ct) && battery->pd_list.now_isApdo) ? 1 : 0; + power_type = sec_bat_check_power_type(battery->max_charge_power, + battery->pd_max_charge_power, ct, ws, is_apdo); + + pt = sec_bat_check_lpm_power(lpcharge, power_type); + + if (battery->siop_level >= 100) { + if (!battery->chg_limit && is_apdo && + ((battery->dchg_temp >= battery->pdata->dchg_high_temp[pt]) || + (battery->temperature >= battery->pdata->dchg_high_batt_temp[pt]))) { + sec_bat_set_dchg_current(battery, power_type, pt, is_apdo, input_current, charging_current); battery->chg_limit = true; - } else if (!battery->chg_limit && (!battery->pd_list.now_isApdo) && + } else if (!battery->chg_limit && (!is_apdo) && (battery->chg_temp >= battery->pdata->chg_high_temp)) { - if (battery->input_voltage == SEC_INPUT_VOLTAGE_5V) { - *input_current = battery->pdata->default_input_current; - *charging_current = battery->pdata->default_charging_current; - } else { - *input_current = battery->pdata->chg_input_limit_current; - *charging_current = battery->pdata->chg_charging_limit_current; - } + sec_bat_set_dchg_current(battery, power_type, pt, is_apdo, input_current, charging_current); battery->chg_limit = true; } else if (battery->chg_limit) { - if (((battery->dchg_temp <= battery->pdata->dchg_high_temp_recovery) && - (battery->temperature <= battery->pdata->dchg_high_batt_temp_recovery) && - battery->pd_list.now_isApdo) || ((battery->chg_temp <= battery->pdata->chg_high_temp_recovery) && - (!battery->pd_list.now_isApdo))) { + if (((battery->dchg_temp <= battery->pdata->dchg_high_temp_recovery[pt]) && + (battery->temperature <= battery->pdata->dchg_high_batt_temp_recovery[pt]) && + is_apdo) || ((battery->chg_temp <= battery->pdata->chg_high_temp_recovery) && + (!is_apdo))) { *input_current = battery->pdata->charging_current[battery->cable_type].input_current_limit; *charging_current = battery->pdata->charging_current[battery->cable_type].fast_charging_current; battery->chg_limit = false; } else { - if (battery->pd_list.now_isApdo) { - *input_current = battery->pdata->dchg_input_limit_current; - *charging_current = battery->pdata->dchg_charging_limit_current; - } else { - if (battery->input_voltage == SEC_INPUT_VOLTAGE_5V) { - *input_current = battery->pdata->default_input_current; - *charging_current = battery->pdata->default_charging_current; - } else { - *input_current = battery->pdata->chg_input_limit_current; - *charging_current = battery->pdata->chg_charging_limit_current; - } - } + sec_bat_set_dchg_current(battery, power_type, pt, is_apdo, input_current, charging_current); battery->chg_limit = true; } } - pr_info("%s: cable_type(%d), chg_limit(%d) vbus_by_siop(%d)\n", __func__, - battery->cable_type, battery->chg_limit, battery->vbus_chg_by_siop); + pr_info("%s: cable_type(%d), chg_limit(%d) vbus_by_siop(%d) ic(%d) cc(%d)\n", __func__, + battery->cable_type, battery->chg_limit, battery->vbus_chg_by_siop, *input_current, *charging_current); } } #endif @@ -873,7 +1144,7 @@ static void sec_bat_check_afc_temp(struct sec_battery_info *battery, int *input_ return; #if defined(CONFIG_SUPPORT_HV_CTRL) - if (battery->siop_level >= 100 && !battery->lcd_status) { + if (battery->siop_level >= 100) { if (!battery->chg_limit && is_hv_wire_type(battery->cable_type) && (battery->chg_temp >= battery->pdata->chg_high_temp)) { *input_current = battery->pdata->chg_input_limit_current; *charging_current = battery->pdata->chg_charging_limit_current; @@ -926,12 +1197,17 @@ static bool sec_bat_change_vbus_pd(struct sec_battery_info *battery, int *input_ { #if defined(CONFIG_SUPPORT_HV_CTRL) int target_pd_index = 0; + int lrp_step = LRP_NONE; if (battery->pdata->chg_temp_check_type == SEC_BATTERY_TEMP_CHECK_NONE) return false; - if (battery->store_mode) + if (battery->store_mode || + ((battery->siop_level == 80) && is_wired_type(battery->cable_type))) { + pr_info("%s : store_mode(%d) siop(%d) ct(%d)\n", + __func__, battery->store_mode, battery->siop_level, battery->cable_type); return false; + } if (battery->cable_type == SEC_BATTERY_CABLE_PDIC) { if (battery->current_event & SEC_BAT_CURRENT_EVENT_SELECT_PDO) { @@ -940,7 +1216,10 @@ static bool sec_bat_change_vbus_pd(struct sec_battery_info *battery, int *input_ return false; } - if (battery->siop_level >= 100) { + lrp_step = sec_bat_get_lrp_step(battery); + pr_info("%s: lrp_step: %d siop(%d)\n", __func__, lrp_step, battery->siop_level); + + if (battery->siop_level >= 100 && lrp_step == LRP_NONE) { /* select PDO greater than 5V */ target_pd_index = battery->pd_list.max_pd_count - 1; } else { @@ -1220,6 +1499,8 @@ int sec_bat_set_charging_current(struct sec_battery_info *battery) } sec_bat_get_charging_current_by_siop(battery, &input_current, &charging_current); + sec_bat_check_lrp_temp(battery, battery->cable_type, battery->wire_status, + battery->siop_level, battery->lcd_status, &input_current, &charging_current); /* Calculate wireless input current under the specific conditions (wpc_sleep_mode, chg_limit)*/ if (battery->wc_status != SEC_WIRELESS_PAD_NONE) { @@ -3517,21 +3798,21 @@ void sec_bat_get_battery_info(struct sec_battery_info *battery) { #if defined(CONFIG_DIRECT_CHARGING) pr_info("%s:Vnow(%dmV),Vavg(%dmV),Inow(%dmA),Iavg(%dmA),Isysavg(%dmA),Imax(%dmA),Ichg(%dmA),SOC(%d%%)," - "Tbat(%d),Tusb(%d),Tchg(%d),Twpc(%d),Tdchg(%d)\n", __func__, + "Tbat(%d),Tusb(%d),Tchg(%d),Twpc(%d),Tdchg(%d) lrp(%d)\n", __func__, battery->voltage_now, battery->voltage_avg, battery->current_now, battery->current_avg, battery->current_sys_avg, battery->current_max, battery->charging_current, battery->capacity, battery->temperature, - battery->usb_temp, battery->chg_temp, battery->wpc_temp, battery->dchg_temp + battery->usb_temp, battery->chg_temp, battery->wpc_temp, battery->dchg_temp, battery->lrp ); #else pr_info("%s:Vnow(%dmV),Vavg(%dmV),Inow(%dmA),Iavg(%dmA),Isysavg(%dmA),Imax(%dmA),Ichg(%dmA),SOC(%d%%)," - "Tbat(%d),Tusb(%d),Tchg(%d),Twpc(%d)\n", __func__, + "Tbat(%d),Tusb(%d),Tchg(%d),Twpc(%d) lrp(%d)\n", __func__, battery->voltage_now, battery->voltage_avg, battery->current_now, battery->current_avg, battery->current_sys_avg, battery->current_max, battery->charging_current, battery->capacity, battery->temperature, - battery->usb_temp, battery->chg_temp, battery->wpc_temp + battery->usb_temp, battery->chg_temp, battery->wpc_temp, battery->lrp ); #endif dev_dbg(battery->dev, @@ -5554,6 +5835,8 @@ static void sec_bat_cable_work(struct work_struct *work) sec_bat_set_charging_status(battery, POWER_SUPPLY_STATUS_DISCHARGING); battery->chg_limit = false; + battery->lrp_limit = false; + battery->lrp_step = LRP_NONE; battery->mix_limit = false; battery->chg_limit_recovery_cable = SEC_BATTERY_CABLE_NONE; battery->wc_heating_start_time = 0; @@ -5625,6 +5908,8 @@ static void sec_bat_cable_work(struct work_struct *work) battery->cable_type == SEC_BATTERY_CABLE_POWER_SHARING) { battery->charging_mode = SEC_BATTERY_CHARGING_NONE; battery->status = POWER_SUPPLY_STATUS_DISCHARGING; + } else if (battery->misc_event & BATT_MISC_EVENT_FULL_CAPACITY) { + battery->status = POWER_SUPPLY_STATUS_NOT_CHARGING; } else if (!battery->is_sysovlo && !battery->is_vbatovlo && !battery->is_abnormal_temp && (!battery->charging_block || !battery->swelling_mode)) { if (battery->pdata->full_check_type != @@ -7498,6 +7783,8 @@ static int usb_typec_handle_notification(struct notifier_block *nb, if (battery->wire_status == SEC_BATTERY_CABLE_USB || battery->wire_status == SEC_BATTERY_CABLE_TA) { cable_type = battery->wire_status; battery->chg_limit = false; + battery->lrp_limit = false; + battery->lrp_step = LRP_NONE; sec_bat_set_rp_current(battery, cable_type); goto skip_cable_check; } @@ -8379,6 +8666,8 @@ static int sec_battery_probe(struct platform_device *pdev) #endif battery->charging_block = false; battery->chg_limit = false; + battery->lrp_limit = false; + battery->lrp_step = LRP_NONE; battery->mix_limit = false; battery->vbus_limit = false; battery->vbus_chg_by_siop = SEC_INPUT_VOLTAGE_0V; @@ -8390,6 +8679,8 @@ static int sec_battery_probe(struct platform_device *pdev) #if defined(CONFIG_ENG_BATTERY_CONCEPT) || defined(CONFIG_SEC_FACTORY) battery->cooldown_mode = true; #endif + battery->lrp = 0; + battery->lrp_test = 0; battery->skip_swelling = false; battery->led_cover = 0; battery->hiccup_status = 0; diff --git a/drivers/battery_v2/sec_battery_dt.c b/drivers/battery_v2/sec_battery_dt.c index 3d136a3b7fd5..0ae35a466227 100644 --- a/drivers/battery_v2/sec_battery_dt.c +++ b/drivers/battery_v2/sec_battery_dt.c @@ -14,6 +14,159 @@ #ifdef CONFIG_OF + +#define PROPERTY_NAME_SIZE 128 + +int sec_bat_parse_dt_lrp( + struct sec_battery_info *battery, struct device_node *np, int type) +{ + sec_battery_platform_data_t *pdata = battery->pdata; + int ret = 0, len = 0; + char prop_name[PROPERTY_NAME_SIZE]; + int lrp_table[LRP_PROPS]; + + snprintf(prop_name, PROPERTY_NAME_SIZE, + "battery,temp_table_%s", LRP_TYPE_STRING[type]); + len = of_property_count_u32_elems(np, prop_name); + if (len != LRP_PROPS) + return -1; + + ret = of_property_read_u32_array(np, prop_name, + (u32 *)lrp_table, LRP_PROPS); + if (ret) { + pr_info("%s: failed to parse %s!!, ret = %d\n", + __func__, LRP_TYPE_STRING[type], ret); + return ret; + } + + pdata->lrp_temp[type].trig[ST2][LCD_OFF] = lrp_table[0]; + pdata->lrp_temp[type].recov[ST2][LCD_OFF] = lrp_table[1]; + pdata->lrp_temp[type].trig[ST1][LCD_OFF] = lrp_table[2]; + pdata->lrp_temp[type].recov[ST1][LCD_OFF] = lrp_table[3]; + pdata->lrp_temp[type].trig[ST2][LCD_ON] = lrp_table[4]; + pdata->lrp_temp[type].recov[ST2][LCD_ON] = lrp_table[5]; + pdata->lrp_temp[type].trig[ST1][LCD_ON] = lrp_table[6]; + pdata->lrp_temp[type].recov[ST1][LCD_ON] = lrp_table[7]; + pdata->lrp_curr[type].st_icl[ST1] = lrp_table[8]; + pdata->lrp_curr[type].st_fcc[ST1] = lrp_table[9]; + pdata->lrp_curr[type].st_icl[ST2] = lrp_table[10]; + pdata->lrp_curr[type].st_fcc[ST2] = lrp_table[11]; + + pr_info("%s: pdata->lrp_temp[%s].trig_st1=%d\n", + __func__, LRP_TYPE_STRING[type], pdata->lrp_temp[type].trig[ST1][LCD_OFF]); + pr_info("%s: pdata->lrp_temp[%s].trig_st2=%d\n", + __func__, LRP_TYPE_STRING[type], pdata->lrp_temp[type].trig[ST2][LCD_OFF]); + pr_info("%s: pdata->lrp_temp[%s].recov_st1=%d\n", + __func__, LRP_TYPE_STRING[type], pdata->lrp_temp[type].recov[ST1][LCD_OFF]); + pr_info("%s: pdata->lrp_temp[%s].recov_st2=%d\n", + __func__, LRP_TYPE_STRING[type], pdata->lrp_temp[type].recov[ST2][LCD_OFF]); + pr_info("%s: pdata->lrp_temp[%s].trig_st1_lcdon=%d\n", + __func__, LRP_TYPE_STRING[type], pdata->lrp_temp[type].trig[ST1][LCD_ON]); + pr_info("%s: pdata->lrp_temp[%s].trig_st2_lcdon=%d\n", + __func__, LRP_TYPE_STRING[type], pdata->lrp_temp[type].trig[ST2][LCD_ON]); + pr_info("%s: pdata->lrp_temp[%s].recov_st1_lcdon=%d\n", + __func__, LRP_TYPE_STRING[type], pdata->lrp_temp[type].recov[ST1][LCD_ON]); + pr_info("%s: pdata->lrp_temp[%s].recov_st2_lcdon=%d\n", + __func__, LRP_TYPE_STRING[type], pdata->lrp_temp[type].recov[ST2][LCD_ON]); + pr_info("%s: pdata->lrp_temp[%s].st1_icl=%d\n", + __func__, LRP_TYPE_STRING[type], pdata->lrp_curr[type].st_icl[ST1]); + pr_info("%s: pdata->lrp_temp[%s].st1_fcc=%d\n", + __func__, LRP_TYPE_STRING[type], pdata->lrp_curr[type].st_fcc[ST1]); + pr_info("%s: pdata->lrp_temp[%s].st2_icl=%d\n", + __func__, LRP_TYPE_STRING[type], pdata->lrp_curr[type].st_icl[ST2]); + pr_info("%s: pdata->lrp_temp[%s].st2_fcc=%d\n", + __func__, LRP_TYPE_STRING[type], pdata->lrp_curr[type].st_fcc[ST2]); + + return 0; +} + +#if IS_ENABLED(CONFIG_DIRECT_CHARGING) +static void sec_bat_parse_dc_thm(struct device_node *np, sec_battery_platform_data_t *pdata) +{ + int ret = 0, len = 0, i = 0; + const u32 *p; + + /* dchg_high_temp */ + p = of_get_property(np, "battery,dchg_high_temp", &len); + if (!p) { + pr_info("%s: failed to parse dchg_high_temp!\n", __func__); + return; + } + len = len / sizeof(u32); + if (len != 4) + goto failed_dchg_high_temp; + ret = of_property_read_u32_array(np, "battery,dchg_high_temp", + pdata->dchg_high_temp, len); + if (ret) { + pr_err("%s failed to read dchg_high_temp: %d\n", __func__, ret); + goto failed_dchg_high_temp; + } + + /* dchg_high_temp_recovery */ + p = of_get_property(np, "battery,dchg_high_temp_recovery", &len); + if (!p) { + pr_info("%s: failed to parse dchg_high_temp_recovery!\n", __func__); + goto failed_dchg_high_temp; + } + len = len / sizeof(u32); + if (len != 4) + goto failed_dchg_high_temp_recovery; + ret = of_property_read_u32_array(np, "battery,dchg_high_temp_recovery", + pdata->dchg_high_temp_recovery, len); + if (ret) { + pr_err("%s failed to read dchg_high_temp_recovery: %d\n", __func__, ret); + goto failed_dchg_high_temp_recovery; + } + + /* dchg_high_batt_temp */ + p = of_get_property(np, "battery,dchg_high_batt_temp", &len); + if (!p) { + pr_info("%s: failed to parse dchg_high_batt_temp!\n", __func__); + goto failed_dchg_high_temp_recovery; + } + len = len / sizeof(u32); + if (len != 4) + goto failed_dchg_high_batt_temp; + ret = of_property_read_u32_array(np, "battery,dchg_high_batt_temp", + pdata->dchg_high_batt_temp, len); + if (ret) { + pr_err("%s failed to read dchg_high_batt_temp: %d\n", __func__, ret); + goto failed_dchg_high_batt_temp; + } + + /* dchg_high_batt_temp_recovery */ + p = of_get_property(np, "battery,dchg_high_batt_temp_recovery", &len); + if (!p) { + pr_info("%s: failed to parse dchg_high_batt_temp_recovery!\n", __func__); + goto failed_dchg_high_batt_temp; + } + len = len / sizeof(u32); + if (len != 4) + goto failed_dchg_high_batt_temp_recovery; + ret = of_property_read_u32_array(np, "battery,dchg_high_batt_temp_recovery", + pdata->dchg_high_batt_temp_recovery, len); + if (ret) { + pr_err("%s failed to read dchg_high_batt_temp_recovery: %d\n", __func__, ret); + goto failed_dchg_high_batt_temp_recovery; + } + + return; + +failed_dchg_high_temp: + for (i = 0; i < 4; i++) + pdata->dchg_high_temp[i] = 690; +failed_dchg_high_temp_recovery: + for (i = 0; i < 4; i++) + pdata->dchg_high_temp_recovery[i] = 630; +failed_dchg_high_batt_temp: + for (i = 0; i < 4; i++) + pdata->dchg_high_batt_temp[i] = 400; +failed_dchg_high_batt_temp_recovery: + for (i = 0; i < 4; i++) + pdata->dchg_high_batt_temp_recovery[i] = 380; +} +#endif + int sec_bat_parse_dt(struct device *dev, struct sec_battery_info *battery) { @@ -600,21 +753,6 @@ int sec_bat_parse_dt(struct device *dev, #if defined(CONFIG_DIRECT_CHARGING) if (pdata->dchg_temp_check_type) { - ret = of_property_read_u32(np, "battery,dchg_high_temp", - &temp); - pdata->dchg_high_temp = (int)temp; - if (ret) { - pr_info("%s : dchg_high_temp is Empty\n", __func__); - pdata->dchg_high_temp = pdata->chg_high_temp; - } - ret = of_property_read_u32(np, "battery,dchg_high_temp_recovery", - &temp); - pdata->dchg_high_temp_recovery = (int)temp; - if (ret) { - pr_info("%s : dchg_temp_recovery is Empty\n", __func__); - pdata->dchg_high_temp_recovery = pdata->chg_high_temp_recovery; - } - ret = of_property_read_u32(np, "battery,dchg_charging_limit_current", &pdata->dchg_charging_limit_current); if (ret) { @@ -629,20 +767,8 @@ int sec_bat_parse_dt(struct device *dev, pdata->dchg_input_limit_current = pdata->chg_input_limit_current; } - ret = of_property_read_u32(np, "battery,dchg_high_batt_temp", - &temp); - pdata->dchg_high_batt_temp = (int)temp; - if (ret) { - pr_info("%s : dchg_high_batt_temp is Empty\n", __func__); - pdata->dchg_high_batt_temp = pdata->chg_high_temp; - } - ret = of_property_read_u32(np, "battery,dchg_high_batt_temp_recovery", - &temp); - pdata->dchg_high_batt_temp_recovery = (int)temp; - if (ret) { - pr_info("%s : dchg_high_batt_temp_recovery is Empty\n", __func__); - pdata->dchg_high_batt_temp_recovery = pdata->chg_high_temp_recovery; - } + /* parse dc thm info */ + sec_bat_parse_dc_thm(np, pdata); } #endif ret = of_property_read_u32(np, "battery,mix_high_temp", @@ -664,6 +790,30 @@ int sec_bat_parse_dt(struct device *dev, pr_info("%s : mix_high_temp_recovery is Empty\n", __func__); } + ret = of_property_read_u32(np, "battery,lrp_temp_check_type", + &pdata->lrp_temp_check_type); + if (ret) + pr_info("%s : lrp_temp_check_type is Empty\n", __func__); + + if (pdata->lrp_temp_check_type) { + for (i = 0; i < LRP_MAX; i++) { + if (sec_bat_parse_dt_lrp(battery, np, i) < 0) { + pdata->lrp_temp[i].trig[ST1][LCD_OFF] = 375; + pdata->lrp_temp[i].trig[ST2][LCD_OFF] = 375; + pdata->lrp_temp[i].recov[ST1][LCD_OFF] = 365; + pdata->lrp_temp[i].recov[ST2][LCD_OFF] = 365; + pdata->lrp_temp[i].trig[ST1][LCD_ON] = 375; + pdata->lrp_temp[i].trig[ST2][LCD_ON] = 375; + pdata->lrp_temp[i].recov[ST1][LCD_ON] = 365; + pdata->lrp_temp[i].recov[ST2][LCD_ON] = 365; + pdata->lrp_curr[i].st_icl[0] = pdata->default_input_current; + pdata->lrp_curr[i].st_fcc[0] = pdata->default_charging_current; + pdata->lrp_curr[i].st_icl[1] = pdata->default_input_current; + pdata->lrp_curr[i].st_fcc[1] = pdata->default_charging_current; + } + } + } + if (pdata->wpc_temp_check_type) { ret = of_property_read_u32(np, "battery,wpc_temp_control_source", &pdata->wpc_temp_control_source); diff --git a/drivers/battery_v2/sec_battery_sysfs.c b/drivers/battery_v2/sec_battery_sysfs.c index 0a36a78448e5..93641e556883 100644 --- a/drivers/battery_v2/sec_battery_sysfs.c +++ b/drivers/battery_v2/sec_battery_sysfs.c @@ -164,17 +164,13 @@ static struct device_attribute sec_battery_attrs[] = { SEC_BATTERY_ATTR(batt_tune_chg_temp_high), SEC_BATTERY_ATTR(batt_tune_chg_temp_rec), SEC_BATTERY_ATTR(batt_tune_chg_limit_cur), + SEC_BATTERY_ATTR(batt_tune_lrp_temp_high_lcdon), + SEC_BATTERY_ATTR(batt_tune_lrp_temp_high_lcdoff), SEC_BATTERY_ATTR(batt_tune_coil_temp_high), SEC_BATTERY_ATTR(batt_tune_coil_temp_rec), SEC_BATTERY_ATTR(batt_tune_coil_limit_cur), SEC_BATTERY_ATTR(batt_tune_wpc_temp_high), SEC_BATTERY_ATTR(batt_tune_wpc_temp_high_rec), - SEC_BATTERY_ATTR(batt_tune_dchg_temp_high), - SEC_BATTERY_ATTR(batt_tune_dchg_temp_high_rec), - SEC_BATTERY_ATTR(batt_tune_dchg_batt_temp_high), - SEC_BATTERY_ATTR(batt_tune_dchg_batt_temp_high_rec), - SEC_BATTERY_ATTR(batt_tune_dchg_limit_input_cur), - SEC_BATTERY_ATTR(batt_tune_dchg_limit_chg_cur), #endif #if defined(CONFIG_UPDATE_BATTERY_DATA) SEC_BATTERY_ATTR(batt_update_data), @@ -237,6 +233,7 @@ static struct device_attribute sec_battery_attrs[] = { #endif SEC_BATTERY_ATTR(boot_completed), SEC_BATTERY_ATTR(batt_full_capacity), + SEC_BATTERY_ATTR(lrp), }; void update_external_temp_table(struct sec_battery_info *battery, int temp[]) @@ -1084,6 +1081,58 @@ ssize_t sec_bat_show_attrs(struct device *dev, i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", ret); break; + case BATT_TUNE_LRP_TEMP_HIGH_LCDON: + { + char temp_buf[1024] = {0,}; + int j = 0; + int size = 0; + + snprintf(temp_buf, sizeof(temp_buf), "%d %d %d %d\n", + battery->pdata->lrp_temp[LRP_NORMAL].trig[ST2][LCD_ON], + battery->pdata->lrp_temp[LRP_NORMAL].recov[ST2][LCD_ON], + battery->pdata->lrp_temp[LRP_NORMAL].trig[ST1][LCD_ON], + battery->pdata->lrp_temp[LRP_NORMAL].recov[ST1][LCD_ON]); + size = sizeof(temp_buf) - strlen(temp_buf); + + for (j = LRP_NORMAL + 1; j < LRP_MAX; j++) { + snprintf(temp_buf+strlen(temp_buf), + size, "%d %d %d %d\n", + battery->pdata->lrp_temp[j].trig[ST2][LCD_ON], + battery->pdata->lrp_temp[j].recov[ST2][LCD_ON], + battery->pdata->lrp_temp[j].trig[ST1][LCD_ON], + battery->pdata->lrp_temp[j].recov[ST1][LCD_ON]); + size = sizeof(temp_buf) - strlen(temp_buf); + } + + i += scnprintf(buf + i, PAGE_SIZE - i, "%s\n", temp_buf); + } + break; + case BATT_TUNE_LRP_TEMP_HIGH_LCDOFF: + { + char temp_buf[1024] = {0,}; + int j = 0; + int size = 0; + + snprintf(temp_buf, sizeof(temp_buf), "%d %d %d %d\n", + battery->pdata->lrp_temp[LRP_NORMAL].trig[ST2][LCD_OFF], + battery->pdata->lrp_temp[LRP_NORMAL].recov[ST2][LCD_OFF], + battery->pdata->lrp_temp[LRP_NORMAL].trig[ST1][LCD_OFF], + battery->pdata->lrp_temp[LRP_NORMAL].recov[ST1][LCD_OFF]); + size = sizeof(temp_buf) - strlen(temp_buf); + + for (j = LRP_NORMAL + 1; j < LRP_MAX; j++) { + snprintf(temp_buf+strlen(temp_buf), + size, "%d %d %d %d\n", + battery->pdata->lrp_temp[j].trig[ST2][LCD_OFF], + battery->pdata->lrp_temp[j].recov[ST2][LCD_OFF], + battery->pdata->lrp_temp[j].trig[ST1][LCD_OFF], + battery->pdata->lrp_temp[j].recov[ST1][LCD_OFF]); + size = sizeof(temp_buf) - strlen(temp_buf); + } + + i += scnprintf(buf + i, PAGE_SIZE - i, "%s\n", temp_buf); + } + break; case BATT_TUNE_COIL_TEMP_HIGH: break; case BATT_TUNE_COIL_TEMP_REC: @@ -1100,38 +1149,6 @@ ssize_t sec_bat_show_attrs(struct device *dev, i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", ret); break; - case BATT_TUNE_DCHG_TEMP_HIGH: - ret = battery->pdata->dchg_high_temp; - i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", - ret); - break; - case BATT_TUNE_DCHG_TEMP_HIGH_REC: - ret = battery->pdata->dchg_high_temp_recovery; - i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", - ret); - break; - case BATT_TUNE_DCHG_BATT_TEMP_HIGH: - ret = battery->pdata->dchg_high_batt_temp; - i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", - ret); - break; - case BATT_TUNE_DCHG_BATT_TEMP_HIGH_REC: - ret = battery->pdata->dchg_high_batt_temp_recovery; - i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", - ret); - break; -#if defined(CONFIG_DIRECT_CHARGING) - case BATT_TUNE_DCHG_LIMMIT_INPUT_CUR: - ret = battery->pdata->dchg_input_limit_current; - i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", - ret); - break; - case BATT_TUNE_DCHG_LIMMIT_CHG_CUR: - ret = battery->pdata->dchg_charging_limit_current; - i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", - ret); - break; -#endif #endif #if defined(CONFIG_UPDATE_BATTERY_DATA) case BATT_UPDATE_DATA: @@ -1542,12 +1559,13 @@ ssize_t sec_bat_show_attrs(struct device *dev, break; #if defined(CONFIG_ENG_BATTERY_CONCEPT) case BATT_TEMP_TEST: - i += scnprintf(buf + i, PAGE_SIZE - i, "%d %d %d %d %d\n", + i += scnprintf(buf + i, PAGE_SIZE - i, "%d %d %d %d %d %d\n", battery->temperature_test_battery, battery->temperature_test_usb, battery->temperature_test_wpc, battery->temperature_test_chg, - battery->temperature_test_dchg); + battery->temperature_test_dchg, + battery->lrp_test); break; #endif case BATT_CURRENT_EVENT: @@ -1676,6 +1694,9 @@ ssize_t sec_bat_show_attrs(struct device *dev, pr_info("%s: BATT_FULL_CAPACITY = %d\n", __func__, battery->batt_full_capacity); i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", battery->batt_full_capacity); break; + case LRP: + i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", battery->lrp); + break; default: i = -EINVAL; break; @@ -2677,6 +2698,54 @@ ssize_t sec_bat_store_attrs( battery->pdata->charging_current[SEC_BATTERY_CABLE_9V_TA].input_current_limit= x; } break; + case BATT_TUNE_LRP_TEMP_HIGH_LCDON: + { + int lrp_m = 0, lrp_t[4] = {0, }; + int lrp_pt = LRP_NORMAL; + + if (sscanf(buf, "%10d %10d %10d %10d %10d\n", + &lrp_m, &lrp_t[0], &lrp_t[1], &lrp_t[2], &lrp_t[3]) == 5) { + pr_info("%s : lrp_high_temp_lcd on lrp_m: %c, temp: %d %d %d %d\n", + __func__, lrp_m, lrp_t[0], lrp_t[1], lrp_t[2], lrp_t[3]); + + if (lrp_m == 45) + lrp_pt = LRP_45W; + else if (lrp_m == 25) + lrp_pt = LRP_25W; + + if (x < 1000 && x >= -200) { + battery->pdata->lrp_temp[lrp_pt].trig[ST2][LCD_ON] = lrp_t[0]; + battery->pdata->lrp_temp[lrp_pt].recov[ST2][LCD_ON] = lrp_t[1]; + battery->pdata->lrp_temp[lrp_pt].trig[ST1][LCD_ON] = lrp_t[2]; + battery->pdata->lrp_temp[lrp_pt].recov[ST1][LCD_ON] = lrp_t[3]; + } + } + break; + } + case BATT_TUNE_LRP_TEMP_HIGH_LCDOFF: + { + int lrp_m = 0, lrp_t[4] = {0, }; + int lrp_pt = LRP_NORMAL; + + if (sscanf(buf, "%10d %10d %10d %10d %10d\n", + &lrp_m, &lrp_t[0], &lrp_t[1], &lrp_t[2], &lrp_t[3]) == 5) { + pr_info("%s : lrp_high_temp_lcd off lrp_m: %dW, temp: %d %d %d %d\n", + __func__, lrp_m, lrp_t[0], lrp_t[1], lrp_t[2], lrp_t[3]); + + if (lrp_m == 45) + lrp_pt = LRP_45W; + else if (lrp_m == 25) + lrp_pt = LRP_25W; + + if (x < 1000 && x >= -200) { + battery->pdata->lrp_temp[lrp_pt].trig[ST2][LCD_OFF] = lrp_t[0]; + battery->pdata->lrp_temp[lrp_pt].recov[ST2][LCD_OFF] = lrp_t[1]; + battery->pdata->lrp_temp[lrp_pt].trig[ST1][LCD_OFF] = lrp_t[2]; + battery->pdata->lrp_temp[lrp_pt].recov[ST1][LCD_OFF] = lrp_t[3]; + } + } + break; + } case BATT_TUNE_COIL_TEMP_HIGH: break; case BATT_TUNE_COIL_TEMP_REC: @@ -2701,38 +2770,6 @@ ssize_t sec_bat_store_attrs( pr_info("%s wpc_high_temp_recovery = %d ",__func__, x); battery->pdata->wpc_high_temp_recovery = x; break; - case BATT_TUNE_DCHG_TEMP_HIGH: - sscanf(buf, "%10d\n", &x); - pr_info("%s dchg_high_temp = %d ",__func__, x); - battery->pdata->dchg_high_temp = x; - break; - case BATT_TUNE_DCHG_TEMP_HIGH_REC: - sscanf(buf, "%10d\n", &x); - pr_info("%s dchg_high_temp_recovery = %d ",__func__, x); - battery->pdata->dchg_high_temp_recovery = x; - break; - case BATT_TUNE_DCHG_BATT_TEMP_HIGH: - sscanf(buf, "%10d\n", &x); - pr_info("%s dchg_high_batt_temp = %d ",__func__, x); - battery->pdata->dchg_high_batt_temp = x; - break; - case BATT_TUNE_DCHG_BATT_TEMP_HIGH_REC: - sscanf(buf, "%10d\n", &x); - pr_info("%s dchg_high_batt_temp_recovery = %d ",__func__, x); - battery->pdata->dchg_high_batt_temp_recovery = x; - break; -#if defined(CONFIG_DIRECT_CHARGING) - case BATT_TUNE_DCHG_LIMMIT_INPUT_CUR: - sscanf(buf, "%10d\n", &x); - pr_info("%s dchg_input_limit_current = %d ",__func__, x); - battery->pdata->dchg_input_limit_current = x; - break; - case BATT_TUNE_DCHG_LIMMIT_CHG_CUR: - sscanf(buf, "%10d\n", &x); - pr_info("%s dchg_charging_limit_current = %d ",__func__, x); - battery->pdata->dchg_charging_limit_current = x; - break; -#endif #endif #if defined(CONFIG_UPDATE_BATTERY_DATA) case BATT_UPDATE_DATA: @@ -3233,6 +3270,9 @@ ssize_t sec_bat_store_attrs( else battery->temperature_test_dchg = x; #endif + } else if (tc == 'r') { + battery->lrp_test = x; + battery->lrp = x; } ret = count; } @@ -3301,6 +3341,14 @@ ssize_t sec_bat_store_attrs( ret = count; } break; + case LRP: + if (sscanf(buf, "%10d\n", &x) == 1) { + dev_info(battery->dev, "%s: LRP(%d)\n", __func__, x); + if ((x >= -200 && x <= 900) && (battery->lrp_test == 0)) + battery->lrp = x; + ret = count; + } + break; case BATT_FULL_CAPACITY: if (sscanf(buf, "%10d\n", &x) == 1) { if (x >= 0 && x <= 100) { diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index dbf5494cd10f..9f971c832d74 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -483,6 +483,7 @@ static void stop_lru_writeback(struct zram *zram) static void deinit_lru_writeback(struct zram *zram) { unsigned long flags; + u8 *wb_table_tmp = zram->wb_table; stop_lru_writeback(zram); if (zram->chunk_bitmap) { @@ -494,11 +495,9 @@ static void deinit_lru_writeback(struct zram *zram) zram->blk_bitmap = NULL; } spin_lock_irqsave(&zram->wb_table_lock, flags); - if (zram->wb_table) { - kvfree(zram->wb_table); - zram->wb_table = NULL; - } + zram->wb_table = NULL; spin_unlock_irqrestore(&zram->wb_table_lock, flags); + kvfree(wb_table_tmp); } #endif @@ -1088,10 +1087,11 @@ static bool zram_should_writeback(struct zram *zram, if (min_writtenback_ratio < writtenback_ratio) ret = false; - if (zram->disksize < SZ_4G) - min_stored_byte = SZ_512M; - else + if (zram->disksize / 4 > SZ_1G) min_stored_byte = SZ_1G; + else + min_stored_byte = zram->disksize / 4; + if ((stored << PAGE_SHIFT) < min_stored_byte) ret = false; @@ -3525,4 +3525,4 @@ MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Nitin Gupta "); -MODULE_DESCRIPTION("Compressed RAM Block Device"); \ No newline at end of file +MODULE_DESCRIPTION("Compressed RAM Block Device"); diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index fa4956aa7005..b72666673ccb 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -299,6 +299,13 @@ static u64 notrace arm64_858921_read_cntvct_el0(void) } #endif +#ifdef CONFIG_ARM64_ERRATUM_1188873 +static u64 notrace arm64_1188873_read_cntvct_el0(void) +{ + return read_sysreg(cntvct_el0); +} +#endif + #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround); @@ -382,6 +389,14 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = { .read_cntvct_el0 = arm64_858921_read_cntvct_el0, }, #endif +#ifdef CONFIG_ARM64_ERRATUM_1188873 + { + .match_type = ate_match_local_cap_id, + .id = (void *)ARM64_WORKAROUND_1188873, + .desc = "ARM erratum 1188873", + .read_cntvct_el0 = arm64_1188873_read_cntvct_el0, + }, +#endif }; typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *, diff --git a/drivers/dma-buf/dma-buf-trace.h b/drivers/dma-buf/dma-buf-trace.h index b592210ca07d..d6f3edb3cbba 100644 --- a/drivers/dma-buf/dma-buf-trace.h +++ b/drivers/dma-buf/dma-buf-trace.h @@ -40,18 +40,18 @@ int dmabuf_trace_untrack_buffer(struct dma_buf *dmabuf); #else static inline int dmabuf_trace_alloc(struct dma_buf *dmabuf) { - return -EINVAL; + return 0; } static inline void dmabuf_trace_free(struct dma_buf *dmabuf) { } static inline int dmabuf_trace_track_buffer(struct dma_buf *dmabuf) { - return -ENOTTY; + return 0; } -static inline int dmabuf_trace_untrack_bufer(struct dma_buf *dmabuf) +static inline int dmabuf_trace_untrack_buffer(struct dma_buf *dmabuf) { - return -ENOTTY; + return 0; } #endif diff --git a/drivers/fingerprint/et5xx-spi.c b/drivers/fingerprint/et5xx-spi.c index 9bbf4829b723..7a06dc9e7470 100644 --- a/drivers/fingerprint/et5xx-spi.c +++ b/drivers/fingerprint/et5xx-spi.c @@ -148,6 +148,7 @@ int etspi_Interrupt_Free(struct etspi_data *etspi) void etspi_Interrupt_Abort(struct etspi_data *etspi) { + etspi->finger_on = 1; wake_up_interruptible(&interrupt_waitq); } diff --git a/drivers/media/platform/exynos/mfc/mfc_qos.c b/drivers/media/platform/exynos/mfc/mfc_qos.c index 7b3d25663768..ae07680f3ac8 100644 --- a/drivers/media/platform/exynos/mfc/mfc_qos.c +++ b/drivers/media/platform/exynos/mfc/mfc_qos.c @@ -583,7 +583,7 @@ void mfc_qos_on(struct mfc_ctx *ctx) } start_qos_step = pdata->num_qos_steps; - if (enc_found) + if (enc_found && (dev->num_inst == 1)) start_qos_step = pdata->max_qos_steps; /* search the suitable qos table */ @@ -681,7 +681,7 @@ void mfc_qos_off(struct mfc_ctx *ctx) list_del(&ctx->qos_list); start_qos_step = pdata->num_qos_steps; - if (enc_found) + if (enc_found && (dev->num_inst == 1)) start_qos_step = pdata->max_qos_steps; /* search the suitable qos table */ diff --git a/drivers/mfd/max77705-irq.c b/drivers/mfd/max77705-irq.c index df46b5a9f2c9..917e7bb47b66 100644 --- a/drivers/mfd/max77705-irq.c +++ b/drivers/mfd/max77705-irq.c @@ -263,8 +263,7 @@ static irqreturn_t max77705_irq_thread(int irq, void *data) pr_debug("[%s] fuelgauge interrupt\n", __func__); pr_debug("[%s]IRQ_BASE(%d), NESTED_IRQ(%d)\n", __func__, max77705->irq_base, max77705->irq_base + MAX77705_FG_IRQ_ALERT); - handle_nested_irq(max77705->irq_base + MAX77705_FG_IRQ_ALERT); - return IRQ_HANDLED; + irq_reg[FUEL_INT] = 1 << 1; } if (irq_src & MAX77705_IRQSRC_TOP) { diff --git a/drivers/misc/tzdev/Kconfig b/drivers/misc/tzdev/Kconfig index db68796dfbfb..44863bf4f5b9 100644 --- a/drivers/misc/tzdev/Kconfig +++ b/drivers/misc/tzdev/Kconfig @@ -129,27 +129,6 @@ config TZTUI help Enable Trusted user interface support for Samsung Secure OS. -config TZDEV_PAGE_MIGRATION - bool "Page migration" - depends on MIGRATION && CMA - default n - help - Enable Page migration functionality for CMA Migration. - -config TZ_TRANSPORT - bool "Transport module" - depends on TZDEV - default n - help - Enable Transport module. It is used for transfering data from SK to NWd. - -config TZ_TRANSPORT_PG_CNT - int "TZDEV transport module per-CPU buffer size (in pages)" - depends on TZ_TRANSPORT - default 1 - help - Select TZDEV transport buffer size in pages per CPU. - config TZDEV_HOTPLUG bool "Core hotplug" depends on HOTPLUG_CPU @@ -200,7 +179,7 @@ config TZDEV_DEBUG config TZ_NWFS bool "Normal world file system interface driver" - default y + default n help Normal world file system interface driver. diff --git a/drivers/misc/tzdev/Makefile b/drivers/misc/tzdev/Makefile index 3a713ff04a0e..d0f764b404f4 100644 --- a/drivers/misc/tzdev/Makefile +++ b/drivers/misc/tzdev/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_TZDEV) += tz_platform.o obj-$(CONFIG_TZDEV) += tz_ree_time.o obj-$(CONFIG_TZDEV) += tz_uiwsock.o obj-$(CONFIG_TZDEV) += tzdev.o +obj-$(CONFIG_TZDEV) += umem.o obj-$(CONFIG_TZDEV_KTHREADS_NOTIFIER) += tz_kthread_pool_notifier.o obj-$(CONFIG_TZDEV_KTHREADS_SMPBOOT) += tz_kthread_pool_smpboot.o diff --git a/drivers/misc/tzdev/startup.tzar b/drivers/misc/tzdev/startup.tzar index 35df0f917b165f3cc09dfdbd70988627c104b1fa..9efd87edccd277b32053a3b623ad64c47d764eea 100644 GIT binary patch delta 112897 zcmeEvdt4ON`~S@BETW=l9iSjBAJyHDwdVWig-ucHS;ku zMZz1It$A53D-X2^pYQq_XWr+zoaa2}dCoa= zW_DTm%64hxpWCE{IqOCy34$Q5yf$oW!*DTp+<-x|8m_qzJllMyO_lp^dU90%p4$e9 z2VV?%SO}c`QFhFP8=PiP@9&Nk7?o zck_<@2h^Lfb@H=g#d)1lL}#ZIY1u$=1(ht2S~n~>DhtM8e>}px5pW{~E|lI2ZXg_a z?AmkjeSU29&XOA8ujDy3cg=PlTPTeQfNTOuF9k(i7llKpxT)YTGzt=gRa8z=Yf!J_ z6fw+U5@s~hsSa5X_JxT;Y(pI{HVHx^C9DA3o>v87GssNIK#+@8ib62R40mfH2oq_> z3Tb%IGDQ-ijk=y`AqZ7e4(XulzllP)sB^bMaUuo0Ds>g3DdAOEl0{?KDUW8b!p*dq zA(i$41?~GAVus|-P(e6E0V}29;t(3MQhGQjrJ*E*7}Yrg1z{BJL$Or-3Uz)>>JqdW zdLC}tI_wnh|Y7?Q)#yy-Lv#StaX zNy2<6Vy&LhNDyMb5rut*bsHo>NT)rkpjoBs(A9MQ3Q;IBsNb{_gzK5q`E@BQD5^{p z(hcSNn+k%9#=MTI1)9N5@wDo7RCRr*C`>n)hN9scw@ZS5fL`@OXzDOJ3+|??28gvD zIA<1wD=E}!wbVZ%^Rg&xHpGvh->!EMh3)lq@t!yb(2Uhm>$vJaMWMIBc20%Xb|xX- zz}?VM`zFA_Ml{G!@1T7U6KBv_oRYRDiULg0+))m<#MhUERHMO3xVa{bgK^S?LClP*z-PQHzuC>xr;w0L$7E-I| zEKW_c7ny`8qfuSJb>%vBE<~?YxAb&KPjtw38dC_Vjg()A2;Z0utKlHl)Rl;g&u9;$ z!V*SZ0i7Q-DnS^NA%XAeZ_yQ@Q2)gnp zp-9RJ8ukgCZnSC)oET2~icrPGv&dV+?AGCekU-a&P&UP^1GPdi30Z~#aj0Sijadh3 zFy*sTChcLT5IW0F>7=gL`Z$4|N@&b__&J?ctw&yzC5ysoqn0hGJcP-2mA=lz>yv#Mz>7(HJ|P!T=OQlTe(r3=uxj(Dfr6y7{0agxAxX{WQu;W(vX( z!=SQ6rm+JagO!s&}9!C|n$5Fz@KD2y@i!${g?G-D%Lo?TBCWW%U( zq^Cf8pe^$HUl<+@ZhLb<$R8;Qs=;PKFrVH>e{aMHHKark3XH7&3AM*i!X|XVcAB&a z@sUkuH)+F3OfgOvy^5u$FqS^ zO=wP`8E+!wmMt|2`wXVW=$e8SqL6LihUkMaboNd3Trgt3+$i3Mu$e^aX7p$o>l1ls`lv%IKFn5jq!Jp`Q%;tHAydQf*vc z4QDYl<}GGIHPjkn^U%x{l>e4AASiE`NtkF1OePeMq;hbJ4!RDih)HV;P#%rJDJ}qG zA>v1ifQOM+*>`#dz3uQ=9y}Ip(EA}G1lkO-h(oWNgcU~3^)VLic|;QO47O&7fB-tX zMS40YESrnas;W- z9QaL`v`7$+7=zxYD6c9Ig$hG#DNG(ilN{&)40{d?bXUfkYJ<>(P#^iENjPg*is%=r zKcK%IQdr!s*F_=IC?5qa!QAw@2y(G-lza4l6mlzQ*jCtFa2^4IF^y%=FN!wTZ^aCi z!p-&XB6hDQiNa@w_I3!oEZPTYaT%QjH^vzC9qDOwN&Y)197UUff>M{GCk)?BflH$( zW}7rzs%}6pZj-to;x})TT1!_#Y5z8i!2XgXAbB-A*pg)zUEhWQI{>YBw{`)u;}G@R zF7=nLU!xbdOJSJVS8Yd?Luk)-gvMud9Vn8gZ@SKy0!Ve@luZdvNRDx? za!NswDcMhEbsaY?`|0dQre=4|d<_4^#rNpeHT(Igle#{g@p#v)?1@tz8JGERQrcZz z`*yuKH?3~1s4aZZ4!2hN`l7{zD@o3MN2TG7qG#m7QJ=h2IqHl2${By`IqAO6=B_8< z+S&TF6ddTh|B5tS5@X4AO?pr~>x}tJYVY46@oBSQg_?xvX2FSI4G?N-DzeSOR9xrd zT=J}0XoKrXG{a}v8Dly z#7t-WB-5Hk;vVOr=h#_wX3sO-DT`KT$s*Ik{!kRY%=EIKIMTU&rRg5Cc%A$UOdp9+ zw6DPQC5G}9YfbaSL(cF*)7xe=!?nTmasybC`HtyWeX+!uvd5IuPzrd<`SGWwrDCZ6 z23ZJz|AZl(f05n(oB#gl)0FkONf8fG{^zD2#Ldo$UzmQEF=>xFU^*c>oCz*dOHu6Y z9C^@mK*E|f>X7LrG2XfTkm+_QG-a8rb^Za2FRji(4L)A95!CX!=w}68Tq} zy0eL<($uTfL}SCo-Cy7JZrjB zsy}I_UUb?y@|RLZhE+!QlOrmrl1579PRPBCA6LdFXCx-8#cKmSQt$a+c^#yH2A<(6gyS9SyW zeo?$a`x?sQu`rDdke|lvTo51+#+Y?FK)wyV8r4XCO-5jq1j+Rz1mb7SU>HMDVPKD^kk;}KVyg3EYgv(yc!81f z8zOpZdmGfaouZ=TUS{jvFwdDDC6gqEIIpymUp6&5JyaI9y^UU&jx~&PMMrsq7+G?M zEWEY_E8u4(VboyA0N)8b3i#|Gtgh~Gj*F37pk2=Con@!kHZMVUTNa2BZ(+Q~(mMdx zF*vRaM5yC@$T_mBtcabXfsellMN=eUUy7_93xJmaX9F+8(HXcm@DN%RD?95~_0iQ= z(U>@>h@t6m@|qyoD6L4Ag*`Z2&ggFPWz(RGy>xC@5|rU6!tu~XNobBE{Ao$3PJ}@y z+m5pf$Gx~sOn*uep2GRJ^^$PqNlB0s>5-mtVg0k+bzNl?*h_x4{v-pYIBmV;6VmAF zZaR7M#hMxid@vu!L>%81N@Y1LqQ) zxA&EW=Wv~eYpZi#UvzHU5;XBigyD2d{RO!0fa`c%N8|dpHISbm2^XFI3G%NJrnQUx zO2F@$;6?8 zwmt)SkP~Bm2g!9`pd5S2>EuD9al;4%bBBwlB-X=AQ57asQ zFp!iv%N~@Quv9oSM&2gI1vi(4pYkQ)zJcheMUoJ^7z-8fZ4Q=&={UyVNW~Ebo|F{0 z$GK~)+_FBh-1VsZv>$RN@hSODlbBCuC(E-<2xRA!De^QH$ET;ttuZEEa@K!FZY*M0 z4|!I84YSMkXAy9?<8n<$b`?1{KPR`5u%vapAh&EZ@|sByQ)_D0AW|!w7hjMYN}_f7 zOj)Tv|FTIN2`ijYGv)V0Otcj_@`I9pHTAQ~gTx}rv&sXI>19?~F<}I#m@R)Oi3LhM7%XBVpz% zS&l}s-HsJ7W4kl!RXInJ2~Jw9?R8=xwjL*9_zGf4p-JIxzp z3koQFBfPcGnXpl=5y78OEWg$m1KW{ZahJPG%oBOl^0xZob!YEu z@^{Rxz<=aVWhr=)^Lxph(MS>=b@mH1KVU}9E1H`Jo6v6+A?6d9Fr8(Jd4P;~lEck! zHo&sU(b3${gvG+1&gN~hIK!FM&3wQg-JW=x`BzEu@8ay--+Z)z6d2>Ie}{RCSqcny z?!Mb>F+FflZu?$C*3&qhQx#<~RJIYs?h$hQLc@bxxS2{jjanOQrV~G0hpHL&&>DK!%REsfLZZFd$YeaA7~&2EOQR5FgM0Bi$ zvk)9sW&Tb^$3*?Y{J6|H>8!asLXir7GuNQ01?SA2M4b2iZhl)5^PMyPFxO+LoOj7Q z9y#wkecAk>A7-$P|CoOl#Zfy}_5$dXPm$4| z`Az&T$Wp)q&RM~JJ7sJJ^$ztLE(MRK{T9Em^mC|Rin)9gZagT~;&-PQPBSci!*Lg} z&*Jxn*>*ohbIMlyK9z*w)KB&M0t+PnR(|#BqoTlee(#yY)6RVz{JO#(=k<<$ze`x! zo$c)RtcW2jv#Z}_KQwzxPrqX#nw!wuZ-6NIcXPhj$1fugYl@0H{CYLT^2!-H#&0=m zv16>?)K*yEMa}SQ&q8rnj^ETKQoygyy-WNW`(wm$E%!q%9C5l<_|0P@*{GFFb#G_y zRen=h=x=`A4>?uh+_=WC=5s3fqG1T`X*NDqZ@hFhk{Uf;uU-8a_Xq;+3~_Ut@OZu9 zIm54Fs)Anoz+!sbz!O01u?SU^B0HitW3~)9STp z;0Tcgc2w`Bp!rf*J>vl|DL;pVI-j0aZ<`bxdVKBRgU^1jXu5Mg+ka zUgP1x`El)^bpBnEfA00vbr%=6&50UrJ34RFr}c37|+;2+0@H_d>hoh!GK0rmdv}*Hq?2ct>x7fBGKXx zEZv$GtkK*qBHU_jp%o9ZL_pNB2O#*cNBoT6MX_I5T4Hx-$XDRH=+dbpb!P1e zKjX?H8@MF3k~V#1c}BV@QN#gD3n@O8`W&!yu?#C`nTO}T`blVvrzA|QXb{ahU>RhI z8m;RaY0z8YNk$O<71-%G^x)?6$^lDPQ#New=&}sNPJNck(iL|p1un~AOCla9u}`)3 zzaVVDGeV}_wvxnyu%`M2dhVcQNw@8I(!xG$SYe-yhD)mOXI(mBB2EJ`BgnH0rQruvBp` ztvzgMVacAaS6PmRv(MdzV+wwygNGrtart+sIfg>MvpgJH!k*vavr79PPoIoHi6$H4 zDxrF5KS}4mv$P29Jy@^uy3rZzodHIVrtl+hUdm}2aRfE* zA?p!KOUsapx;FO2g?;WbS~;X8Z9Rep_+x{NeXig^8T)i9Fe2+qogEuT+9x0XGb75D9|d<*J-Xv4xaCQ-=_oqz`bYFVJ6H6k z`aeMF5K8?4JrVUiZT-R0qQlwyb>lYcx`ZQ!6a6pf<%x#vCn)aS)-9=Cxy33~NVK3F zs>;b#jtIMQo1O+n&kL;$6?wl<+>h`^I;H=J&fGPZp8L@SEq|x~wk!|VU zQT?BwEuK<;f)C{cn)MTs2TuWhvV4fmqfy5!*eZRC4jzNc9HL&=5MvB_%?QIWCrPY; z^0O!Cjta|q1Whw2L{o{zY%}4cSxcfCQTz6Ua^EBc(>JFq8$I<->Kk6=Z8I5MI z5-`t}AEo1rEr6mc;o>WcX>uhhs|ust?3|KJ;t8}U^Jj`<=aIc=+zHEI_dw~+ysbt` z>^nkxPavg+-K|$sV7Pvb(fxt`6j6ngnh~hCHQ%s?y?MZ_NhE6(9OZAKtyOTZKowP% zmZr@ZWv*3OI$N^u)%hnGYQHkZmG}ZmItdp}+@lj>4ZEfo-8JJZO$HtBsmwnKUj!V} zeX+`@{YyM`V6|VFP6xpjnzCJ|R~g;T-aTM+dHE?gzC4jaPr>o2c<{?UiAH4-UOix3 z+gH%IQNW|fcuLCXr!R}N^ps^_=$MVV6OI@R>?H)oFp8>A!3o<>Q^e1( zCV)L=#pjU0#$G~TY*tGB89kABRxeL9h6?sB0wa$h2gs&8=DVNaOFMNqZCN6g(XP|T zk?BIHj@*!&F}4XixS7=cp_ z{_S{p%*t2wrN_=dedh1<`5A;!+4B_itL2$cTaw=7?FN5iW2lOyf?qLIIcV3f=$}=c zbt}pYF82Nc)4KUC63?P3BPsT*WiTRp`dOHsPt`buUOb`;x$}U%$iRfk$@&`xtlkwm z*%&K@1S2A=W!m~1LMOJK?&S)j&NJbE?h{u=^6{wE4LhY%{D=yMozulVT=$uqpp zUSwdkWl_O7^j#`eH0RL%k!>jMcS!f{P2<=(tBUNuTUvDMeV=X(#$fHU&hTfIF(F19 zQ)Hsbp~er5Gputc}eomqPRa0c_Tm3%{XGz#$IP&Y*tGD z18KPjq4oS9+T>sI2V!c5g(5CMY(}K6-5uAz7(o<5sTbgziz{gs*epd}lm853yt7u( zR?zWq^E0qpve_F`_^?$P`y9n<49tiC7X@9!P_j?btz*yg*@wN)z~}{(brIRReFQDI zh?p5dyDq{Ljvq+;)3V-@zFXJoPO4+*zaYqMUPgQWMCF-u9%P8xj~eO&YdEx_4w1#eEW&m_aiPZ|*yQjZQ^hT)q- zbvL;*EOr_HIe_>^2 zJ47R{qt@uXGz({OJ~^%EP5_rofzv5NC*C_X2QrDnUMtQhbd46c7y+lSGmpL4}1xpL;Zrg3&LO4T^Yg%TzNWPhGL&^ukzi7qTXKR zf}tnTtGvw6GlVN=^$VvBMd=)Yl_$D`&a1rMZ@Nq8dzFVT)eX0Km5(&0hZSDsU4Jua z^$QL!f?z1x73q|pLr1y41ptFoX%@<{EHWk>##1qig4!u9LyfUmM4!y& zG+dcQNsP?iV8e%zjr%`+xg@r)TX!i( z9+FFOUSwlkq18QoF^vORTuiCC;Vq=-C1eMba?KQXs#|l(noD~*X$cL(4N>jKmP_Xi z_5&LdTZp+75$!RBabx$T_T$K<5g;?=4{uCtE|uicEMPij4{s^Ca%roVY0_xthBZueFS}*d0962_R4tkLv;>d}4BzCS__c)H6 znn!V7`!v+);!wlMV`!&ZFzLwi#(Mh7vxbym%64t z!;u|%Gy-I1OU+v>J@ROlfxB)2cIDAlFL8bhs>q|`UgWu)_Ub$e>RPw*1sqwJLrIK0 z$J2$2IC9_|n(Re>i6e*4p|xJ*r5rhS4jlwpoI~l$5lxA6NQ|vp*9wk1bq>XOkzeD; z>2qkD7kM>D&YDAZkWp7PtIIlv_UbU40Bv*Vybe#7trwY}@nh%FK`-(-PC9WOi9KP;JW5>&Q&Q(q93XGX1&%v?9*y%NU*^bJ z^T-aeG!H}EI|vl(JlbpECfk5*^XR;n_&=O@!90rSRkw1sesX^t^JoOfFg5Tsm|8NA zW_fYf$8pjH^C_Y)dVju|a(1A%9rI~~f!}7hWIoLT=Ka!( z6L-z0tsujvsjtI}74zx17k7V-yLvta-Bwqb`%#2u>jFw*WVRoK4_k=c&PfL@pvfRh z3(T};Crk@pKx++rfZ^B$bkIwD7$=^%fW!otx_|;#V^~>0aX@@sBRJ;t1vCz1R@X~T z=+0U|b}x40#!xfWx`6h2kw-}0E?+>i zfO*p%0Uh@uKgE%&7f?|Dy2>YWWMLsCG4euB7d*?60~gX{kfnuY zI>GFeHq7Iu#h4W>uMv8>{v)6yvVO|_P{N%i|8P*xQJp4Sr;rKF}bett(mu6gCBBCfw=JUc27A}{!CtV4B5tp1%wCS&v4}>1%xZg>v6n2OS-@yYHEW7%5pDI7 z{*05ZSVYGS=?~w>ec2)kN~v4dR~)yHPe~x-kxJ?t7##9xGGos7biubAb9g?j^&%UO zV6=#c&8LH2WaCMtMo!EpabR8L<(%@=e2N2E%Exs40a~4&PvZ>S^i zTBd9<#jb<0*u`{Ehv^I^E+%mZEL&`*-%DUw>SBrmZf(?s??>19RKM`yCYvPK=;c%;! za4ZTc-37oIz*0kx0TX~DIs706P{}bA0!xiN2CN5;yqo0G!O>#=sdIZVH^s;TFJ!9JT-$yaj%OSyv8 zV5sD9A+XfkW59agNDdbRCv$iUa0Z9p0nX*{PT)cgznk0&eZUob0ES8rrq}LPVsf(Z zCCZ?$nCcdqDH{HospjxurkcY?flE336L2MmD}kk!9(|{PBRPBqI9bCvhqVgNp&)}} z_yahX!yIeZPcl*9i3S8`ao9R`GW4DbVv%uafFn8F5jYu`4N;l_T~Lt0F?0jY<#12nLJs!@F6D54;7SfB z14|Z<0fT@eIXo0N*`g0ongMsCAcJE_1J32}eZYkregL?X!()LfIh+A3DINnR07r88 zao}V{AEGn^oD{-$vIrX5)@=` zcqwo$hnE8va(E?hDTh}BS8})zSZd`lU_Edohl_!8lDUE{D9GUOJHWXd-U(dD;dg;c zIs5@|C5KCar7({Hp8!X4_>0>yL?v?tUx6Wm!(RjEa`-TCA%~9wmvZbI=D|!8tHwaQF}4Tn=9XF68hv;8G6%16;{rDFp_EdkpXcj^uEI926vT z1&x6-INTICm%}Z93ps27F6D3-a3zN$fu#tK0quYzIh@lG1<71N7vKyIcLUDla8KYu z4)+Bv<#2!CN)9IjOOYM}1_4KUa8A%r6eN2Jg6;;+;BXpnE{E>}F68h7z@;1>3tY+J z3}C5^$AAgIk+nFdI_Pl}B-a*H2R#Lx!Qm;uxg355xRAro1DA4mCU7N(bAhEOj{$Rm zBi%R$v)@7#B)bcMmjGvQcqwo$hnE8va(E?hDTh}BS8})zSZeDrU_Ef8fpLpc9aM~h zWR77Aa0Z9p0nX*{PT)cgzYAQ-;SYc-Ia~@XweuM832>y2_4)q`6eM#DUjb)u_-o)? z4j%?C_a3rwQ!DB!> z;7H)w`M)Cyk~xMhz!@Cw2As>`p1_41?h9PX;r_st98Lz7I(iHk1RU9soBxNRAem#h z8#sf*X~4N0z7M#N!w&$La(FCoC5JPBrA{6LCIClv%JCFDj)G*4;VIw@4o?Bj)fh#$j3oONW>YEE3nZp$ z0u0F<{t7sQ!(RjEa`-TCA%~9wmvZTz@;4i2e^{M(jXWR>oLGD2L+K_K?C4q4mSqQ;BZsmTn@JYF66KUxRk?T zz?B@11eW4F2IRCuK_pku5jdH{U4Sz<+zmLF!##luIoubxl*9djD>uVJpi1nrfPb2tP;`jE@4@vsaf$_QLsg1N7*ZK65l!TcMTo?XHC=Ag80NPmy1I6;f&VySMURpJ8%bARbWl4G_@!`ZfagU8?^C>s@;mS5YyKF&_6ZQ zOt3c9%B@CuRa+YRuo98xF3bgUa(Zj~EGUd9vFO67(e}X*ZjIjyyX$%lnT5pNdF!mV zmKj6uKdgkczNOyH?djUX%CPRP@cE5)SNv7eGt8}h{l93(w`kIp+nSXK!BZy)uid|$ zoyFJAZxe!@lV4V=n$VkBN)vp&<-IJWO@u3ax_Grj5Ozp|tZ8Uib;PPDdpca&1Pz)h z;g@=AvXq#528Fs!R)*mFl`l+IT90(#aa?tuH{07^Zjp4=Bw4>M6s&2-O`*kA=%roV zg=P4up^oAB@u!!;H+6NCo%P;H(B(Ug79@r+U)pMYS>B$`O;%Ld)#q{qHJ+lhy4_o} z%D1R}+0`17hFQ~2)zq}Cn38lA=DGTO)PtUaqGWGHWnk2U;07D)GoYvv^sjhDQ)&Mc zCF}-0MR)nulLlp?;g`ltI^*l|Bp{J8~yE2FS)I*1- zDs6M{h_xE8_u5$}RiHB>S*N`dZI3{QUAncrjLQZ6xUR2bPZ(JM@3VR? zckG}AM;pQVb+Q&5E$R7eCCIB=TNbB`@Lj?6D>ak>`bl2#$`15Z_6^&BMFW%u~jvk!VILEiyA>B)(GtR|s{*4m+DpQf}M?5(S6 zqz_%??d@BkOS;$Xz2J_ne`;Dnm+SJip7hT&W#FB7U|zlKr;hd$?yfQg7u&iC%kX1e z9V_rNt92_2E-t&zXJxeH8RafNO`(O3KciR@y@gXo`w)ir(pfiv9)nho!?=*n#)jLT zRkH5X(x@QBt__bEJ?f-%W+!{u7;9QjNLM{*FU3yj-7G)b>5@LsKwyA?G z_7s_o+RchNtv11Bp$XHKA@v<{K@U5o4Hr45E4?tL{XSi3kMXJHb4sTswulsqy|kV+ z4THQ5zXLqtIi(MDhlkecmh0bK_m=0B@Ca9WQ(<@bE^#+n>S}gGxO$;s;?=)jDBW0M zdiXFaqu-xX6x7-9dF9I($M}-l?0=gC`-O(W)fBTeO|!qRq78a=Y+s>}^n#KI!P{R@ z?o72!HJGaWH1{~h3kkdn9fG5Qz29Yo9Ox{`@O8Xk{O+MkFb(cA4d-xC=u!BGpFS(? zYe+x6pbY6`+o5S~Ui`BljyQ>!sX|lHQ8#K&pb;~a&TTwqH1NSVgVxSa2DW$XkP^P8w5E z(h{YaR1!$yBC;ztn4FxWNa!-DLyN-Z>KrqZs3LDS01o7j(=gx!pm zJ5w3d7PM(dpPi7+W@)4u+Ke~U219?F2}5nn(6AiE0z>=cD1Ag5P0dlpw4Vw&cL$X< z)jVSxpO|1DhjKP;rQ=Jb#>gxtozgI zwyxbQ&5Z+RDLwwP^x9c&>91!giJfeRjY^sK-IW%#w5FZFvZ&J35=$ek|7`fP0R3{6 zINdE%a+OT@KGORFZ$y_Cr1{5l0sk#SknXAOMVR5AeC0lgX!xWeqX-#|bVh!~^kKq*8 zv-*-aPwCO}X8P=M6ir^CG{Y}hjGO1~jpg%{EVQ@Yd?lq$d&}Cod&ARSWQ9#A5@^bN zrB5B^DqqYMB7Hqy8OFNfKRIXM0=ILXS)g^tD+`nzFz?QVUfogF$(=m?>vqRJtkxGI zr0XQlEtta=X?>U6|G)IztVK%a)?6P1Ykd&a#hTXVLJi5)H8id-?O&vHfd8SEBUHzDmufE@LotdCy7OFpW9Mu5_d(Zy-O}v}D(^d9+<=hhK5Qts-X(-|{}K z0>5TjI=#Bo@F_Fx@KWUt-Dz!JR)%|A5tU##EpY%#B9GI?&?_%1J#|+k4`7+`Z?5?B zWzH4tmg!T=GG&5%59_P9mnnC8sx2B|R9oOx?F?%5iqcoFw(vhyJL?srT8|s`z?PG( zX}gh+dB4GpxJFs*iN70?&S=>IxWCiR-m} zY1XSshZtK3*0F3X_#Vm;$F9rT$|(e6K`6$8mc^MvjIkg;j1Ir5^rO~m^qjJ+RKk)V zl*4X~8d!JZw@rR6m4%V`Qx*TJwCoN;=@5EurP9Av=Laj5#kFDZpl1Y;^*eam_{2I9 zP%V^or>|a9dV4sdd~ue?Q^!?GtcNq@7MwFzY4c+8e+q*0t9%53jQjH+F;r~)RSSZC zuj_NqjX{ty+#Lk=*SR40@a$BwbH2L z?YHi48Ta?l;}Wo}sa&lj$}YBCZnx&JVMLI_JA&8^KQubS*e+R*BQcsa&o9 z9L7xHxXDc6!m@k8TC8idR=&Gd&$_ir|1R1z0R>1{hcQiPJBul}>Gn*t&5Jc(Wz)by zOthMk=LVogrxf|bLy<#zA zR7rk_l3rI@(17(yfbI-v^6rbghi-5N>(J-cyPdIRz1A1|andJgC_xfAjFHsA7C`(+ zS_XwihlXxY`sf{c-v%Y@E|~Nd2D%hJ$2!xCS#M5kF5NC6ILR%{Twykh#Wy=)2}Bh zqi9?)A2}($7>CgN#mYlath}i_>QS8eh@m*$OK}LTdy`k3=bKU7tn~0`Ec3+}L!;|3 zMosWhbqsCYti0IC;oeg@g}Y?7Jg)L3kV2#1axcd+M7C)7xAA88Tkc>#^A>i8(M$N! zW!mG2t{rD;2*2tTuZ=y1GGmdi4PJSV@$nw4k3HpnG;|AgQnbNm<`!j82VK%;NZQ!2 zm-+Y&k`A^XvIUvqU;}*Q+sd#`uERGG^$d2w#VJo1VU$g;zK#6QL=V5MjOygNY=}OG z_8?ESTJ_Xo>j;QWsudmWK=f;(s~t*eCs&{Gn%`{C*AWfH62zj3Mm=eW#!%>1td=#= z30swsv6@I<fZYI3dkxNUCnE!&jM*cE(myYgU^%{~1UP1SW5R!Mhtklazsk21E!?5(@o?s4`>^Ct7(KpI3HNZO zO!L9Hu{CYkiB*(qN1@)k#w=_EhG+8&V-O3)2q*4=S{~})YLT#iy^sc-1!grNbH1uPo2TgrX>FU}0Et+X`Tt%H=vk5AF{2m5h zPptW3Or(fCSP#@PR{3U}R))s%2TX9bf#0% z2g*2vVdZ}c!_Pm!;A~?f;SDjhmG4nuk*=2o^6yccKf8T0HL!p(+od+bAm zo2Fs)huYl!{)bq5lVnSe`*8M-x~M#(^RlUa38GHZFsMY+@JI>M5OPA)B)^A- z`=P;mot^2?u(br;uW7iXYiPb#iHmXcF*F>7hEdQE@)2A9xJSy6;*>eov_zB(XCrCK zUW}BQiVyZGDgQ28=9VpQLtRSUvd@=dEBasSEt+ezJ%&z}Vu;kL?e>w9603=RSye;K zWxmYLn`fAfTh#R*xvT!;Bjk-Hzvf@$Z}7yqF!dk65Bf}V{{8o6nbZ+Y`YLo08ctVPQV@zZ^&c)z>qOZ&BCg?!k*$ji%XlXdi0?&{ZnrL2?jbG6t$ zJfMvC?D15s(BjiJzCg=V7t2&FK8vF0@Byr-T<)BfU-iNHSzCK6m?XWbt*E+=I#!!` zwX#vZWofQU;rE<;b2^W@6n~L3-4`pSKEJP(aL;CD-pZSr&bCMI_!?86ZAYP&ciJ{F z(#7SScbZ4C>EPGc#~NOxn7ubMt6sOJJ%SM@_9ty0tJOE!J{FWRWyXxCZJ|f4#?L2f zTj-O&QM%OA^kODm|BaRxAAf_quw8~6GE3Xc#GtM{gWzw$Gglk^n@vH7u#ff9NqxKV z?n9cACl4_tA#Z5OrEOYbD!gISvdE)k_aUW!ttJ2HN?Lu3{Ic~ilomwRHO)drY;7 zkhji=M8U)+Mo+p;q>sN>`gpRf$QR>^ZuYe>s&E~}ty03emY3g{ZDqb?>6xRri>Wmy zbG;99kV5zUz^9PB;ijA{g%x|0hoJx?fwoJE%+_{E^WLgE3nP?Wm@7vg!`j%zR@Y08Y3cmdF{Ni$Z7~Za zOxRc+->@^6x5Ws>&n(omLg}q*xw}Fi6e^U+F8@VK=G%sr7<#_~)3#>HwF)JvyIXqn zk2Q2FJ2D-H28TjV9e3MNd|Zj^=2j8+FDlBmYAV>aZKF!e{F)JImC6ciko{Vz-0#`W zs@i5vyB!`LU!ujB!T0d*MQ>?pgG7GRSYFmZJ z=**-#_u)}Ht!aU1*AL%oQ^4dZci%&)ci(4%m%j}+`u^i8ci&&D(jO*#ht=h3C}f+d z1z6H)4ury6`9I2O_#ZoZC*3``{G_hX0foQ9O1v;vZ2gZ4%iId_{!OjIZl{z!-Sj&o zlf4lJA!gEUEL#!pMre9&`crlpCMDAAr`-Knc}iJ@{>=HA-|)xE8yFiS+$hWgO&g`%QTo zr#FAA^C0MZ!#=^xJIW>GH>1dN7+h=HT;z+>6-+bFDc#yiX3Y@ep5taw??BXldk&+U zgAGi6ziUHE+uxNwEnSBpjqtV^iyRxf?N2GE5oWa+{tCc?-x1N8&2H=z&bCRcwSUrj%lE$9LQv{~jS$H0+Q0z>;`G@kbXLqHn zQtJ=bc&pYQHe(>N;Sbn%zQAu5Rrz9^L<=tPn?>>o9~vjok98QMd^5&fRPONX6;++| zp|PkBt-gp`c-M|xEf#D>EZA-`0bJ$JoUK5W&zJ&t-qR61%?ICIucqdalN)L*)J(QdfQH;ARI60pTb#;!CzmP z5poay@@_|Dtk$*c%}d@)WGw4y`sb1okIm27W;XuWjQuB@_x@AVFNR%#^w4D`iMOkz z{yrQS`oS)LRG)$Cy|~-HjOF~lYR)`Un*)umV1(6jAnl4WtgGg%=7{=RIjrbc!>|C_ zf5n|b4X!E)y7;d!=T_oTzZv2~Xxde`_&ZmXA+cKRAN-5Be9jPeB~Zs}STShUPq>Ep z!9k|Kl<{1eRU08(;gx2YwBRrHLbH2*sPe_Qu`8YWONsSx%IAHk1ZUstJZID`IG0>k zQaw7WF8I)iOq2drqSL(ph~e?jP*xx-LWDRy{BOX~bPN&G|>U&GR0Uo@b0a4LTE{Q^NJy zzW9QY(oyb?)fP3WQ54lN9zAJfmCc=|5g@ay8dQUU*xQtX7A_VxQS; zHyx^>9;lT~9?ZHMa{Y>7)#+GjA*zY`mhA(gx)Co*nj|%|J##{BxL(r&#xXwK&u+Bd zuC=MEBQ25C9_@H$`7a;LA@qx+cB#V~<(s#k$%{9|H?Q5BH`6!oNs~IEPUGYnANC>} zrpRh6_B}KF?5u@s-igQgCi9LB^Umk8>VEVftL!cMP_v3{|HF`GQfv*vx!~ay5CpzbBDbE3lz0HOu~blQQ|x_k z?xQ`Xu;6*Hf|W~XagjL7of60EsU!5*>*=qC;pa>3qp>79;Hn{fA+0#kEbMpy)A7G5 zlne*N&@29Kg=hTLq;76A9zu${HQm%ClVK8eO6t2cyi;G@9AUe?qK7?ckKa{nT3=;f zC&2`46ZQZ9!S;s)xMk@#wfpZ3*~WSE_t?_ZlP+L(6M-`;~#1JsuEYeV(+ zaJaJGt(fWV0CkmuuYuIF-&CDI=0<7+7)pFFWK&Wjwa+aWV(FDe>In3ywUJ&$R$yI= z_XIkn#%h4r!#2JozIf@s$x!hihO+ML2mfww zqJ9^Lukkd}zixAnxXI0O0czZ$nyP&b?V%8hXJU5-s`0YO_H1Scs$VLoTQg77T}NmX zJr<<)l<{ka)$~S?dOujh8?%<&$QnS+o2mm5YmNpqwyCNH*v2nwqO;-7WZHZ{BEY8bYTddvaCB)?xHZh`L59X+sM`)qYYliAEf2 z)r`tR)kySaI^HB>z3FY;Bnq{tNzvH+YHi0?rl7WYvDPdIxYf&exTS?wRV&K1s4;iA zgz2I7C*hbBcpN`=X^XISu#4rwRa`qFgsyh6Lb!@Q2ew#1tGY*E{^(JC)m!aC{3%C(9!sh^_uXlx5VlC)cpg`n^w;_gipG_AAZ|L23! zrsi7MD(YZF9x~{r4Rv)VKUM80cB42|4K>*=PocY1wQY`Vd`0NZC5A=X{SL5iNDIMe zi)-0ZlbwCbw&3=pKH>QGBOAWVn0+MLSMbrmD}|`4L`vMKx)Y4Wt_(6Dzm zJ*)~-G-cJ1TC%e7_cF#_2;f_kt<_OsT9rbJn^ze~ox|0qnj#;nS!<;hT5BEndgWW; zYGmtM%9gdEbKz<-%gb&NYL@7tHzL%`#%^6`67)EmMjwFa#L-+Bek?&eL~)(tz0tbuC^FT9rS2hbyW+utCJ9P5u*fl_iMx6RA@W3 zueZ8!?bJ3oScz!8F}<68E;{>DMBCplyuXP#lzkyI{9@u&Nl3g}*b<44(Gz`J%_5k6 zQS<^FZ^hn>R)&NQ$F=5;tay9j8bSMwgO9&N597H$yD86n(3+;XxFEE)dzwJN6B1{m zscCz45`3G}UQN{dqe7`=uETqzKmF8RjRLPJT20j5>5F#^jf+;hc(?!cXw_2dFU@e* z0Jp#PN2?QVU4PX8O6#EB$Ks*5gBlJyzU-jJc-LX>sP=5<4rla5kB^?Prn&n_t8-O% zcb(%ps&{+Kzu!@{=<;x|CcnT-zIQ9P{NEkb(JUraM-a%XjHZC*vj*XGsn&YDfPby3$KLVxI@-lMw4Cu$>t z%NP-C#)#k&4t1lUUDX+2{IsijCm02-?{8$B--o)ys*YQ56eQ5KST%LLZyniv?XJ&j z$ccbYUx@)VN?0~+sc>~dWa92)lZ45ii^9l9nhN$Zs}{7r^#sscaca^nOmW>t4Z5lG zqZ}8o+GDG{ic5)C*=#2wuhu@zCR^**$U&D-9ZWmBsgW$Cj&@UHp%Y&vN3h)JfQ=of zU3WEI@y!uVYrA71>ze@`+c;hwc?%VZ^kTf)=N2l;Iy%3OSA)e{u;j&&)KiTaf=>6* zl+&H;^jQtDH4FQu1_lV`7lW3X5a+A`re! z9nw{wG)4V>2D3t@pEa#JCfKP{MD49C&KcFhK9T0$rY45Egp!7KwgP8s1p#+!?rS=B zo0_7JNLFK(z>hN2(NP^m$qDMm^|ZaEU)xgSe(Jq3x)sJ4c7sG)dl9}h``IToRDqQ# z#f=*lLL2(2v2A>bWp*`ogNoYGwSMYz#`NMte+t?AtKH+!iPzDo_~nJG>HpO1&?31) z6LBTOB$2H;u=ru$>C@6Zoc`>ujU0(LGHDE_+fdp7H5+$HpAJxu;j|%9oyU61H~-Ki zbz^f^L`Y|?KZ<4xcI)-lz2RjC)k{`81vuEG4R^XCR&=KR$!dI(E8=u#`+SUo7w$>g zeRZXslEV?U29gT}j+LWU9!^I5xB}Ki% zZ4pk*Bg2&ylSj6(}X+K7`zN)bZMq9&Uo5!r+QHL=)z%Yitf?z9?2rSJfYc| zuGDpAG~Iib+S$Wc<%@9)t-DM8Sg+!#yVV$Xr@eQ#Isi|B-4<@w>T!%O(duy-D+`AZ zO|6Ebo?6BtUyPIJ$>A7V9IVSECr!RvRhz!QDM_@ZX+Mv`W^kl2J({ZaigK{u;KER+ zX-(O!?@b`fJ=Io7RGO+jDQhy_N2u*R7F50GHdSRV@yI05izCzyF}0?)MFs1G)^_%U z8$Lwayae{h^flY>)+%Y8hG=rEv$nQtyos&Y`oUogKb}e;GdE3rNSFCJO&tIkAzYWi zH!gh147~?37nuXzyhpv$%SLOsJ?|sK#>B9?jY55qBh@i@%5C&inJ>;|&7FlKRkJ8w zpwwSlwWJU4Roi$~R1t2M@dRFLV8M+QHNH>H^7uLPOCOx^wDvxAS}WVQYl&Cy!_PqF zT&w#dIhf&W)ct;p6vtVEAHHAhifzG?0lIVY`_yfqs}B|5ul8m;i#|AF>F@j1w%vU* zl=QVv#ZQ-omef!=hS+g+@0 zrU?i*-1PPst@gWx-v7thmB&R@uKzPIE~se8CUA~`U<&RDDw;VEX4+zodb7-8b1OGe zbC;PBw@jVa?a9&{a9kL*Ff}DLk*I7()0SJ_&}i}A7TdL(@%ujKoMDE6@!IbXK65zd zd7u4x-sfG;dlG5&1M)bnRlrSkt*$;Gr=kMoCt>2l-=e6N6501rX+14IS?W z`IAjeS20Uxr&q_yJ)%_gXsU@4(E$1Jm^!;=X08Q!U>h`ewz<{cm-y6iQq1v?bYLE`YeC3y@YBUuZaQsDBG9Tf@^NL=H zg}29@d|2)nRK-INHHLq9SRRsH%0xf_lNV89D8z|lalnHu&~oMij?akrEU=9K5C2&E z{WUN(IoxPe?@4htiOi$5w0(&|Sm3XiHGCW=DdB5!*b@Cz`yvU-ijaaN-eboj0rOY)lm9!w5z^WtFmJNJ1Q&Na^+pY*jg1RD&V`n zvqnee$vs=~Ljx?L=Bc%L^4PAW(%nX}89?l4YM!(hv}lDKaEj8pj#bq7w6hv3Ft6XZng#!T(T(Ft-=j~N5RiGt=Oajldq zCRpqfmZPqbvy9Um(1f(b+DBB8#5g`5d^RB)u4&;BT4lX(F1@@U#_Lw3ry>I zWh?-b(iw4W1j~5%jfrwUgU--oxf24`%Xi;_HMf^(zDu`H1_@PZ(fbBB{Bev39Q>(@m78WFoN6GMfR0Q?x-pwfgf}w-zu@7Mu z5m7S;bG{CoeyUpQ{V8$+==#G2jkcaDXI=-w5k%9b${n?QLsRA9*MTi;K>^d`&gcOp z&vIN!oTR2#wQuW8gQv-(0TAcM#COu6Y3l5{24K(7+{LcPmt88niT;=-r|?ReilQMz zO)w3c4levd+)1maV@|_AfJEn~%e`9K<3U|grx++|hCKQ@L^rgiCuhi+z-jTrmQ0ss z$i3qI1Kb%bJ}bj{kCxB7htcqSxtXDq?$4JW15StP3Jpa~q4W9jK(OTxpu8FNd7SrN z#Fsar`H#z8|6#8UkRF3A>EV(13^M?V_ zJv>X5uDS^ob0Ul$+DmItw&I&e(eGb{=xL628gDb)Q5xvSBhVQ)_lKOrYx z2lilldhQ8%Ol+grz6uOaR;@Lc!so~xaq=U+r1B;9avGaytB79(MkXU4tBvci(3!^1 zk$VMVI*m5ZktgH4OfXr|TrHOPX2@JQ9v<0j9!B;f=gJd1z%C7?!9fTb{qlJ6GnLMh z6R38s>dI)i96ZM*G|)g@n@rj_PwoaHpU#t??PA|io*)v@x2x=CHEj2lo0f@+=F6#Q z_tAX0FZ)1$od%ZFJX$Wr{-~q{az}Jo!xzZCv02SFNYun>g`G+R?drjoO0;u<+*Q@d zxdoUCfldZd(}nVVm;kxlfgW5acMj!yB2cod>i`Yw$ztO|ISK5XSSa6xB!cJq{qRNd zA}p}BJ-l&|Jj~D*#4g7_HBbb!!uyjF>aDr-<6=1tg>jbxxo0#QU6y(#isAxOSvYzp zM_=ixdQYO+1#)q0AfD%%`ilVkW`M{Aps34}jsWUokyAAwiyxrsKzh<5KakuG7}_lv z1V@QHaAm+fLflh%5S#U8V_ikr5R+k$#*1O z7v@icnqV(m61lT3Q$s4Ma%u+BrX_N!#)`!c&_i@- ziQNDCFgH%{-7P|nuy;pA6eiRXdU=^V_~uX; zl---hEJhp?r|R5tYn_Xzz_(Jz<=6q|4V&o?HjCyhmpc@A!xs93wf7Kd2eB~|&p&}3 z9n(!^`(9M*$dnzp_Qv7#hvP_3(HS_3@%$8U6a-3FN8zl&=oNC5ZvI;vXjv7irWcu4 z$cu)n665S#UsGuMHHhyI)KUbFc>gjmI@vhLqaj~-6=`)pYQNI$Q;i^1fksjON;$c; z9uGo~k&=~idyQj%?LNDOzFH}#-=)Qce~zC9$INZz|Kiq;U4>7|4`@_P4H2r`+I!Q# zpOn)zKlBIGj)tz1yI)`7I5ln4DtV|LOMa-ybZwRVd=D0p70j9HjX+6thh;4+XDTs+ zpKG~Qa2DeXrgHdMANuE0@`EA1_az#Gj2(xoHZDEpy0RD_BC=R z%yc7u-diK5a#UAwu5Ync?$`+qTjdTWK5-sWROMEy!BnMlTP3~IiT<%x?)IoRaNga1 zfQ?CF5F#Q6&;JAO#u`fUfl@6(yxU_IBDvBZjuiTCtvnf%IN$_2tqW-jC zGX!t@8ixBkBd{Ga0>%HZnBDXdhoBoJlA&J`uiNF= z0<`9#s<-<2AT`z2rh%L|J(_LRQAF*>X)}1vuhsDP5x(08u3B8_8*7sCO##vq9-eP1YwrAk&7b|G+oMIEGRKk9x*gK{COHk=*&}j-fUt%r}=q~d< zSWYGxr;Og_LCMqvpEGEZJP#3N(RpkAJe}StM;74UfIgTlf+$R446KMDb21-Vk*}k_ zF6uvUay~hx=&%E;%L~;QVNy*Q2-g(S=>|kR&`ZVWZp4`lEwT&`eCl z72j{VnjK)enl2gMvNo3v{?3cK~U6|Z3~!`KN&;OP@dfSKoYKf z^kaz}W7tQ*F1d?gFQvKUPWUg|CEsO$cj0D|N+iNnq5g)urs%pTiZxx$f-d4D=`E8a z9lQfP?vY2TlqO1Lv4I+;Fs8KLAnw{nQ}@U*A!vOLUp>L>8vLSN@)Yp52bI@eI=)9` z;L;wscPntq^38_9t0?}%?(y{R>7859$h~q}Fc1{Q7=$C#AhztG&3omT!Lyo)LhP@7 zwL_37Zr_a0s25j&;%kgrz5)%dwY{I!09Nhy;+l)^RZ^kJ#^>qkUb!zs@3l`3O^0bD z?BZ2D6eD%yn9DQo4HCRTfOCvjj>5)(An`7+g5PwpMms-=j7psu}i zav!w3m(K5#dvH$t`5#h}1Fq}XOXKpfRMhi%*%$<(j@yw9_RzrR<)4|A7v#_fU{sxz zDxU07(;ZkbVoFzMrL?YVytAda2iSSVgITeFm3VG%l-e-06A6YiM}lbW7rqjYs;~#!vVwEFvnb!U=SJ4Q%EUB1^XzeR9?^xx@RcMe`{0Nd#(aE zhXs5vxwwZXG}7S?XZFzPQn|;7xvj-Hu#|Bf7rMcx*e}Q41^>xsF7A(qr(2d%IX`u3c zUE_v*P@-Wh(=n>dVU3_s#S zMUm80RI$6p1^V`lUE9(61MjBc6_x&Rj$4tvl|@J5HNJF0Vg z=~`7a94ixZL5$AqgYK&mocJf_rbF0rMBf0Q<25{0SnTZaMaLWCYVef{6>V0y=%k_x zOFmvgr;o@H0sk$bZbv-mT6v?ZZX8`^w2rPYR9r!raOzp3qKh=B*y1RtOrDs%OLdBr zv#=zmnp-$h zzI`aja2nd9lhLf+5WQQkg|V7mNk{L_YF$d$OffX~Wx16b=UQ)^<^FNz#cMbh_~2|) zLhrsTC%Dng@{;@L1^#Kbe3KFbH)FS{x2sC+w=yyz&B0z5#1;A(I5vaHU2 z@XIo)0jQ=;JNy4e7VIOjP^x+@wSRu286rHz?}n&h8^%fA~3lc70yB2 zD@O!+(0LN9KUM-DelO8Yno4Y zb4?RQ&o}GQg5EwRcPjuO^T@T7#%H)|I_~-E!szagOg_5U9VI@6n+M=I>%swDyZ|P= zp!r6n5!w;M6-3Ku(SA7n?rO?ACBwRf_gsL*+{V`i zgX2)0VcQwvqEu={>E-BYfZc+H_8h!H#N9+WlrbL}5zv0|9*h?T7wssQM{>wypar{* z0sOIuT|8>UO5V+^#?qO;B?zvL^y1FL;+=`&Q*e-hXRJmOin|wfLNi}T;*%ZPurv$d z@=xG&4@=QemB#;)AS%%2rfMEGVR-&B^eO;EKfEf(MI2*GAiHIGmGZQUj-SM~k=+zh zA>Xb5LUk4_@MGLy{=BKwE3US7lw+nrJh_YVE1(&Gtic}-8b8P2G#x{Xwk1?lA@_*S z!jPLa4mRp(d`3sLe3({3KUT=YK`Z@LIkW%*>3&t3>+mQf5j|}Kg?!vWaK+RC&sZUG zP>6!|FF_$ZLdpWKgLTF59K;1MH5g1vV3J4B)fhpi<9}~uMXe zhg0b6vmUDhrMr0VZg;S62-7Ac*6*Yd;c}n0t0A5RVzS%}7qydiyebKEpxJrR0hVh=pZmz4dwo{gA8xVp&hHcX{0{xUZ0fQ17&Z7E3u`;{Mjx_dlTct9W?q)Ibkw{ zkArF*uNu>9N-epjmBoAz z_#sX5zjxCGkQ=O{;x7038$mUtCo`U$i9v|^l4F8RNh0=738oEqEpt-- zTgbmoTKX2Uv6GZ{A zYEZRZ7sZxaM9?l;`i?4J$vbk)18A8gfjHM@K>8B4TJdBamo*jq42)eDw9vj;)Aaa-IhLL@lM+Ro*Zcev_$3SP<&OtC*Q4PrLx5K3`l%Z zUn+&A>=Gm%vy+CtFUJId@d(`da2id0AN{wNO!qoTybrSekU)z5JL%;6a*uIfnzNz_ z34)o<0hV5M8o?R?_98(ec$e7^Kaha?plS?^UxPx%Am_zmaLTxb@1lFEEi(VpuME#!I^QO0udXIyz2mhnitXys{n{dh3!cBZGmEUVA~ zXWHQ*;W8rSsH&6vs1BS9kYHa@ofi{I?jct?oOi-@vYtVO0I96l`-(2}Ojd>@=HY*? z46LdzhbM0>(sWg>S};7B;~@b1K@GaE2Z6-GI<~@+^Md~eTabK`E;*l(*ZvzJcC&8AdfpBN)}_T9}4CxuMBUc*j{_{S<8^w3%E?0$@dQh7w-M7Y;3nzyS!0k^=lxXo0~geMV>I)IR- zgG~<)14RIyt0j5X67wxZl>T!}OfR4*hh4qVBwh#25GSqvNRAx}H-JmH;&5Ohwp-c{ zWens&f&B?^au%-Ods>ZJj^TW>nwu@=p5SB~$X0{w)*^~u6ccf~`rPOENQR9E?6*+f zMOwg+EvQqd%@)PnXQd=>Y=~8uXquPsK7Hf`eobb$Mf=kgIJ}Y_}X^*^UPHMP^%@c?X(> z>Db#gjy(@xn*f&m9F4w+!N+qn`=Z>#u|;Bp4^3xiharOsvPsUD(J z(c$14v>Ls&Hger_wUH(GySCt!5?;wh2E(#z`#Rq%oMkby5xjiQgUixYyvObg7`Jm2 zZzb!O@>s)GI{&5Iw-U(2!gY();nk%+NILw;5ZX%sTyd+W&2Rv8pfrJ+Ueakr06DKAqydjZ>@v*cpIreU`?5 zhcWCnTKOG<&`Y1?pZz_;h08Jb1&pyeVMQaH?GuDffE593%CpE2xc>gH+Q@AIf%(~Z zr(z~F^#M9C@07*sNqNf}rY+TRQVe3vbCmx*c21z_D4^1slEAHSuO#P&Uici9eJ?)} z^^86dCQB)k!7mSq(6VY~3+nZQ9GYZl&*`yLPux{f&lE8VJafnTBnWW@Yl4&BzlEm$ zfHE7kdOzH515BKYbH-zcjX_fkMBLcHbQQ-$9n!FCHAD@bBMak92OlzfQ$?TW==2Zr z@+8DIPvY^aOy&$f;HVE;JV(~cs9d%jF1d`!TLYZ*N2IhCr2HgDcJ#%c*+)E$$S$5_ zG3P+dJXLdc3q9}?_I+%n9Y4tvfd)*+hq)3@y2+}2g-03R_7FM1l7Z*i0^;p2Z|gT3LI!>?UK z=NL>{tzM47>E35)Xs!HFIGQl%LX2S-Y{vBf)75#KH>N6G49{%LRyvIaOTR(e=rqjn z_^XCvNzU%#d)&A5m&()$S?rK%3buG~R*2nv@Q%`4sXceGrMhP9?2Kr8S;DK{KX2Z6 zUWqhp-uSE1qhOQf%a(6yQ@#Rwx-XY<)xcXEYp{!u2g`1paBL5*ejUYW@XU_g405Q` zAQ1g5@VIA!*`otSs#T)8lyC@qKZWNes8)tN8S!ACyrh=bg)j%H_Shohf(xBuT4ManmDlx3$tZ36!6UN;CCdzNBbO}KVt{AKv*vgVv zu!;6Hh5VbyV3wol^QKDDXtc@zH|k0YhOU^XaP8Wt4eM0Ku!e>hR2i%JxISYN?n_sK z#Qm_qMLc81EMTn5MjF>lNdOsZGbI#1y)G$XRMJctFdic3Fts64nU!sdVqk_&%}s@M zUiqnROGg|h6jco>wXWDC1Zov{&eSBU)%**DJBeLvECmbe*hE8{E3qBbOn~oSe&z?& z!B(Q#)3my|GDHCf+QyFXWbei6_(194TN~-e=E^n*A%c|N!Kh3FYQR6FT(f~r2Pu7{ z(0Btchen#NTD;bU7H_2Z7RnGe8-?HKY{bQTON2vF0>pBn)h(1k$Ug73P$CO_A)XEt zQ(z^|8O4|2PoQmr%vtSCW#xtyeZ2LS`ENf|jv2*R;K;%Avxq6lVDFO^VY=D^cvxo^ z*oE;)bA#JbZcAmLn{7TqT6F_ore-t|X+*EJRJuH%E1HjsZWI(7B0GY12RwgH>!eg~ zVmlZ5Yjaa`(GG6iN=a~YQuu=(^zSqmO^HM+Wk@QhsF8@hzr_u0=wjf}+}a@`AIapN6H$qU{FCZl^36 z%xdZ`rK*#J7vr@_LMY=P9KbU{qYaH!CA7k%MwHe31)Rbx&=wAKpP>CQN`s+s9>C55 zY@40NMJYeHrOAtK$P}`UT|()(WLWU7dYQf&=A%wQ}BTp^ME~$XCbK`K+K$icJb*W zexo*JBam@oeHCjlHXz}ApEW;9%mK{3c*Yg-F^ms; zs+#V#(}niR*wKi(4p5(CIBy+`Oy;vP{)HwIyys_d;GWsbj75~<|6FWCget1)YW>2d zVi-8iz;iW?IjrXPYe~d<>cYf0KTS)UiPUv;E>;K%DOKZ?t^?9>t02{L!HKLy}U@Rx&Y&I0yLOJl+) zD_$8@0Bt%Rk;+)hyb4{7LpV&0Y4~Pc5{Hk5FB`;OV9&sF)^Hp&Y+*{kr&adhu5a*v zIau_zSe}lrJHK>Z&t!3d;%Okv0l>G2lA^kjKGGF!r|b?&Ul?L@2PLKel7EE5cf8S7 zcJjmKLR3S*dJ)fgjX4W%48?}wjUcSsc(>2%?591vAsg*&cz%f6gDdKtpd`UJ#w94d z@gowH$Ol%#2U^vaG_#u@l<^NqY@WK5u54#l%?EM24eMnVJMC_ z+`1vsU7Lu)E>`pR;C!4;daOn|rU7Xu06zzyYc=)ih%jL35m6jXoPZAZmaY~pk;;C0 z3jX;+{iw?8?nk5FG!d`E5FM4rMsuD8Dt2usQ%xM*anCn#)b!L*NoxJ*P8^uAYA2on zOBeB6O&sM`^E7-5_-3V38qmy!`S z;ZV3E_jYyd!;u@9#(OA#SWV|UE0IAg0%9icDZ18KnTf=ZpQLnhS6`VAdGz6xVydJK zvEt}jI-aE5WmrQ&$;tqC3*%TnEiShg_O&!QSy^IuioQ!$9%E(l;x3PEz6j(`dqhHs zN6b5M-xA}K0mk{k`mroBAY2Zf??xy6EUx*jkfp0?iMLgGC#ISK^>GyFT^vpkZZCSF zVZg0cGXcp(SdkA!xq{!1eUMa|>Lc;S5eFO>@q8I@oX0gELevb_;}gQ`F>t zuM2>YHfw2WSEW-r47(rg67c&1e#0TqJ|8^JJN)zV>l!)-GzDnJYd0m~9s~i$Smk|guft_|fHC5vBHAO55#hS`HT z9r+}L0M^kywF9fuQBA6U9X*Hs@D_y(Ptu#WV1U1hF5RNUjDt9N>u{bN=9X9#&Ndg2 zbqa9bmPS2EJ9;a( z5611Rze(9S#cJ*VltQPxR-^1YBC7;gD8T-;lCo2j-b_jQyOgF{&A&tO6Ld;>ekcL# z8-N{HNx`X@QCLYy+RxxrrAPQmNH|Qb$DnK3RURO%Dql$}Qw4EM>09yyJaUfyCH7c^HeU#YIXyw452mLV?hqSdV z)p4ank@z?;dz`mQs_Z8g0EM-^wgI*j*F4ep9(2mxtPSvSeW{X4Ih&x3TI-10pPELS$3sLSPVfq+9xQT0J{@lp#Uq! z^-IVgnYeb~MQ%6Mxqhgw^P$?azi`g7nmO}0bgIv3RDDxHCBQlW>^sO;fonUc1~hk9 z7vZiKR?)zIN+%e7d_SeYu!6qqr}W4MFbCQ(tc7u)X~*`-YOwu*XC}8JX54_)5p0?B ztmZ1DtU)@j12tZKgDnqWCjs^pNaWz!9c&v`P+6MNX)tcrg}@C`uAFZ*JAsmw&bA5B zDEkIm4Zt=6ENcZl&>zzuD`-)FOmwcKlO-_`n}X7tP-TCN650LgMv45YQKai@AlA$2 zy2Mr}^u(6cv3@?iDPD{O6&s#&Ih_Uf>%_9wjS|CLYw>?M7-b)GcP369Uqr4i!dp9t zL91xr01V4l(rW`eMPK1MKP;dGzj+mf-0CU86W99z-H;$6SJ6GUdZueD{eeD|D4MOJ z)we1$yGb3zMexq+jb7o1-m9aydTm?Vi_qcv0K684^7Ydd)NLRV&I%ejPT(!7D!hE1j@oJ#95V2aLhc8k@J}3N`e6(@7q{)&lH7kjTL`(#gErlwHtS{2(Q? zfc;9FN-3>V`7^3gN^^1i1;nuH{kL7p3Kb)OsTj}Ifj&&w5m@n=x@3$R4th)(?plTW zx+&}MRyKmS1x7G-$sQ&KJW0m~VP$}Y!5eMyz|E=W#JAeP2YmhDO8Rk-63dan6-?$9 zKWxIx3fRoS^LG&O`jSas=(LWN8h>qJcckTflgI9bTm6uM-Tzui`%Oykdo;VN>&*2O z_HDSRSz|R{LY({o`@h7tKY+ddP>#059-Uk~#IwX+)LT0E@^b2x4n6oo30FFee3h;& z8Np$VYM?2i$}z`OmZc>ooHeTYPpMu{*&C^+?2aGNs9bjXZA&KIe)qfk*I`v*@1FuM_9mEP+nV}2`z_!;j8OqIuWn{=yqVKSxNqJkf z%DWM+a3XCNQRzg70&=gTEpt`5Oy#OU(gN6>01I74t8Z8OGUeBASNdy|V+NzKo-J>f zuyL2w%pHEAPT1Bs;Y5Hn1=#7Ov@uf|U|340p|t-GLFaCS5#0Sgo#4HV6RZT-8GvnF zO5+D(jTO-7t5Z{0M9jF)$DRq^@Mro|ojkk|aq*GNNRhe6YCZt89dzn(jZ@D9m=j>r zmy&Xa5<7S)sQ#hmsT-lBTC}FbYF-T#f1)j0^pB;DTGZ&Wnim6X=+eW7?oe(wj0R)s z{Ehm~a0EIWbl5DkPqwPrug7QH&EDAxhbe}O7~s2z=W2&jZ8iT;NcY@{seO2S7Ek}R zs++&5%M6(Z{GjU`Aq+s5gXjF-7H5?#X+>Xl|4xigU4ysR%QA%r8^_gDGDeE;m(!1T zdKRxL4>b-e9g7s7EvK}hs8gW0q3G?F(!8O{NOn0@KkCFd>nGCHUpJ^KmsyVHdd9jDN-5Ez=BU+N3&l^|XVJoV+-m+O@DA0<55cdx z6gM$b3HPls69N1-fH#4O4!Gt|4LW)oL`5Nqd*E7yyyg;0$L>)^!OcSMMS;K(?-O#F zuj*#PtH$+Q>a*5jBd}-SxjJPEpPCFxp28r^bsGPd!yeu)n&%H+*i9muJCPg!@LY%0 zYb79XDILET)9!`zKTMIr_&5Puhc^t+i2MJ z>)KW}(zYBpBoqq6(Dovp^J)OC8m@%6CkpajZ%9yWgdpg@j7|<$hPy#CKWGT5mPE6{ zgtClM@55ThQkr@n;>8BnOi&f);jYS0YsAa7rF7^%W%xbNw|hsigilAJ!n7YMR%6J{ z6~YFd-c?U05628<-2#(cDsL;C?^?~D!2Wjv2glGIOK8N1hAZEd0DB8y8*Q|7gwi_* zYN|;To|9=SXU4c3mllY7&WkekKHj*Z z5yO>lVL}FYDs{>g8fD)UQUkC%02TtUi@4?qRZbtpOZffKNY{2NVhgpwK5teo{|#2; zwxaTFtGOF!Yypm@tc7P9*Fp{8+5_&Al~PA3o$m1>Y5LFKAUP^jIN!r6Hc&kd987UW z;}k0a_6x%Ngq6;ZQXL-|f!D1I!%D{7Vz=V9b0IYDg zt*x|1x;g=$FKc-^zKGmL5UwwzasN>Ira@u6#FB-RHS{IWI(`yA^25)J)?!W}z55T$ zD#CgHp~SR<%C*%-enmghHHJ>$_wk`~&5WJ=a&4cefR%me9%vJaf^B zml_2{zZ5GDETQuN_1*wqbD>f2Dk&^fY+XXB4=Q~%@K9`Aj7Tp1mnn|N(V~FDT*&uSQ6g*am9f$~c8K8W0 zn6m&AlK8D>(=YuHcZP}kmeAy}$`B4r{uD-6FmV#Uzv2HAK4|j3@sFlE;B9#R2l9v7 zA?9@AqS9}NsM_z&aN+#7)f|hqE!IFpPE}7?)Goreld1&VO@R9p;#J@poz(1ylrE#W zRk)No!%IPypY&vTBP>V}qUJv+BO&%9ItSx44!Ze_+Kt~L>;SeG90cI{G_=E};#JNoSk15E|6GTfx2w1)h6?arMSD@neH5N^-gY2wKfRbrvOW8y zjQ{o16bbtUD}9&knO}7_4vO{n1mUn!QjRBYX8!31>pQW+Zl%dNo;CWsYkolc#fiCA zdNT)G1dIQtwM!*qyh!}ZYR-eWA1v0aeR{F#SH1zA39!)sivw5&t`X4LxyslZqEZ?! zoKRv!6=~XeCI~wDeNFJ18nA|KrvgVG_!F^c>wzz zUQw}#PUk6+QGiz8T)=T*mH^M;w68BF!+0gP0FLOm-(3gsF>~!)qu8;1d@9pu(|oGh z$EU*DiE>cOz;pH~3*_9e$a`EE?)nq|bARaVqj@^M?zz4Qdq#=9K$ru7`a8s;T-^&O zWP)cgAhU@dp5UzOEi`U|$60F{2UVT5&_bKNu;w-MgLQ7S$hXj!6Yw$sc&-K!Xpjh!Nt^TA{GM{(>)WFE_FpmkRKvOS=?@+trI;`7PjyM z+D;KYEp%xjrtYBNICn0*_&=jV%|y&)FSKa7Qax_e@i?b6vv!4cyTmzEjP_!<&IH2LSAj zhCuf_NHMM$4n!t*`rD)_xeX+gdg$X*^|LdF`gd- zH}3Th@5oAwY_zTF>JJuB`eUA`sB9b*Usw$icNNg0$FNqoh)NzqSqsBS5d8!`;Dk?| zkIIYk5b+lYT?^>yW1hYL&S+1QdO>qGNJJFSh$&t!AL9qKyoCrXpsiDs0fV9EI+xGs z{J*+?)#VcvoB1o)Y&6=k%a2^By1egDJQH9a0jzid{0P@2{yiH^R zdoiAK$i^XL+o3(<)T*3zu1d(Lhio0bN*&+B4G?x6Bzgj21pqi?J>ePUng+zah-w;P z4CNBmL?zK4*?R`Ub0H0#;R(;mgodK1EfAhfK=~q`^Vr2JJnPubOKh}lDVI&Zq064( zDVvNP8wJJu9X<@Yh(hww6G54XKXq9eL8U5HeiDlu=?Yko>Y0a1d75Wn|V2S{|)%qF@KrQp&FG?<-7mg3Bb7kyd8kW zxJLDM?s4>p_*c?;#2ceI#waS2ZRSUS>OGz2I~vWH1~h8`cpm^`=F`-f%J8<3=`3u( zJ9pN@1~V(7>AjgsbjRjR#Eyl5BfA4sm1|2Ad?=D- zIS3v4>YcY5DjRQG#fx`bE?mU(7r8CI^Ww{T`^*7;2p{P!9$!coz@xj&%e>VOXkLnV zWFd8%gFTMAI5qx2zfBRt!BW6Lo7n=EMlGUwb1=y_A79E-f6C@4@4M+G4)R0qN~)+` zK>Ox;79|r+en5Bk5#KGKU*{@!xv?74{ea%oSDaozBawp->MjpC@{v!+WNB;RT78h|>N!YZfyd$Ph+ais*N*yeF723)`B+;L z*B94MT1WZ`=iN5*B%u5VoO9AThfnG?nzSkb_y7PC0a$@+Ug%hW%#3`3pD0Mc9PxL7 zpZKEo5Kb6A(+-{C#ko|pKq)9dIB~z|SjC|p>UtQ64mJ|%HTUWYsP+kUQ)dx^HW%?+ z?Qaq4U(WFkb=f5v30dQC4d27Lz6kf?Krrwe0D$`aZ9CThNa!0ti8`VNc)m>*|CvwM z7NWnMN8yV+VU#$+k0=QBGsusNJSDBOaZoj~U!6}|7kRqZM6(~P=w1)Zr}N$njPwIK zrmJwwr=-Omtd;&i5y$rVGaMFYN2|Qya0%7Uo~DTTd*j#{D&XV>aJqId^%p> znTJWd-w!BaK4Cs77G+xy+Z-`Ja4x-O!AJq#Zovi&u=ihN0FoXmj2`i{HB2bmrHyS$q96!P3O=E8(xLKs0MziUad6PZsp+e zwX&OVj<=bY0L@R}nFIgw6Leu2T*7xO4X{}N8w9WlT=T#p=Pwf2l}tTfi)l`+mMCLb ze~vj^O7mn)sB1ME=%sX>$qVP|@|%*m7x$Rlk|xptZ^LsA?JR_LIEZPbwDyAKN$69L z$U1yZFMJQyLwKdX=mvzv0H`0@;jYJk*cVZy7ovK2GH(?z^JripLVFI4F7zBokT~Cu zD8~khztDjcda^*JKhV1eiT}){YlX@jcaK%Ls3BIh1e+p>N_=B>sV64#7W)C6VGu9Q zr3*`yvAt$%XViGRU}b@yW>*Yi?Od9=OzAQd&9IE8o?MARS?QsSWbQ`%dXviFtN={W z+RU@z1vO~T-gI%c>P^1;u4@1`0btDCMO?EtCBlDm{#)f2U$WxC5eI=%w0TFz3v9tClXs{5V#ZxgXT9 z>Kk)t^-8=TjHYZ=j4lq{uvH%l6p1S_dIxtg;E1gnrCD`^Pmah0*j9i|f&wyd-4`6% z78k5@TQnKZ*n*STf?gKQ13X(aXExFR za6UtqHi5N>=N+NNW2lE7g->k5x2tWK;c(n;y2@^M4A1sHL20XS-VapbSYRr1r+x=i zbA%0u0U+;Rbi_-ecHsF9tE)Ot810Z9Z0rOcR+n>@8U(&`HHiT046vVPBFo{r0$eeN zUBDsUbW9$K=3I});gB)D?>2SPuf}hvkyeT(4|e&g1`gj3NZMFXDL>-ZJ_b zM=sIKr<9@NAa6Mam%OTeClF2alGPF^@eIt31^e74B5}RV{5A0XLzi=uCa3S_u1tV^ z46yBxGXvL;>vDcNhsLeOh6%7)ZoJZmIpVrG&5NV*kQ?JjH9o4?ybU&U8Ax^3Il5Wn z$afX85MVBV%?3v{TxT*zY7U8X{W6E5)_8J!<|aQ1Mz&b}1kGNf3?B_F*us?hHUH{v zW5N0|RB6}qrk#7?S@2Nb)jgH*o=rvGW}A5;IQkGc*k#Vnq|?;MIwHWv04!l9-Ln>D z%M&zjt!Du&?`f4+J3B)*h}9Kh$N&SLpyO+mRRvItI@Vsm88gf^3dj!Og=W2%wz~^6 z;}#uNxeuxp%|tV_3BYsir7h^CKYiT0myU8R2aR&b0DrB+m#E{5$n{0oJV^XJo6g&n z$W*j*Z$I#Eo9T}iG_9UZNy1b4)ok^{_CqbisoAtrczWc*=NbjAl2Gd&noZ}tKoht5 z0Y&FsJe%Uzp{r>@1J@~$BYdUn+)CJy?E)M&^GakxQ!J0k?R~w)E5tK|u)Tq1erYAF zvuVdV>~(2DhV|&`e6eo`7LUxPZtFb?NObx^26r7Yn^v#)bZ#~NKv6uTLIJ=!9&1D$ zcG}GNK1^gJ#NfVz6Qms$cYAtywC?E@YK3rpPcLnauQS-pHvv}xaImK@dR+DN1}mEY zGXU(Jd?XEAW0<*rgA$sJf3pH`#09wE9zO^md0yO@!v6+0W7>$q-8OR#{BESq%?OPf z-#50L06PaT2gn!WnolSTz|fZ8zLNswRgdOV{zkkEiRjgO>`>Ro0N;ie@#8v4qYnMJ zx$Q4EV^GZ*;YRdyrHwG|v6&A5M+cp=IE^#k`Rzo2IRQ33pOj4~4gk#@d;kW$ovgCN zk6$1)teQoOHhJo!#OM8h!i652Mb|cYT*&x>A5hds56q&$q;v^}#@KDxU`O+55t3Tk zjc8!YO(Jo>&CH#_ZlGXK*_p3;if=cX39w{<{W=4lf@@yx$U&9y;o|fUv`?$HbG?FR zP>ae1yUkAgzZz)WPMoE!J=~CSA|&`t-itOf*C`8i-sU&XTOq*yf>XQ+dNy2NLr0T? zN{9K90wY$f$9=aeFT{1O7f*56ta-x@!>)Y5W^Vljh?#Ys?rWSUC*aNkY#Znme zG%G+ViN(F`C^NXM(AGktTx}6vZigNYcsO*gcz4%!BWAoiM6|6rWHTQD@=iKm9W}mu zJ=yq@&0GSoSu-f;8Kp-mE}1lknztus0$Zc$k_Rs{Y3egb#`(1J85E8AR02fqaaG=t zMjKXf(fHU*y7r7RgexxeK<@hZhTK8A5+aO8ZRTO%w+t#`Pdz+C^;F+1k_fN?0BZ`c z4!G{G7ecpzEEjwa&7e=7Rbq$YF@NQw6^>WAVF%dKR^%PS7qfu=DV>WaH7YthujcV<(glzEX<1}!qa(gf^u%&+ax;C}% z3|hSv(-hO`(ANLIB*qGif{@9=bxA@Smn0Ej1hAaxG~ziWG9CWKl5l|k61gqdK?nvBZG?b4^{i(yG!c ztt#oxTZ9?#0eG&?PnvAzo2OI$HYIkPK9%fz!%x!rEQ4rRz>$XE!r~)_C7ewD=InjeVh41v>QAfuebwu9Q2zpowAFR&|VmmtX&D?Lh~Sh61TUMT&Rb)#stEGjI_nnC15~F_Z3kaXOtY#y%5&@)f3u z#1Cxd$!N}>uKEIT$_btigyCpkJGHKM$GS&5*F^je*;CwnH_CRu{-Opr*-M#a8(5Ck zdfHIea-Gi|;FFJS9x|O4ZO1wqMClB@NEmt^f@Ke7bP;TLeO%OF*CWP7Rg!&xaKb

3W$Ns$;vx|4PzH=sMTNcbt96<(bQtZL4Em7s&<)GU z^~Aqy<`d9kcU{h|UUD`_;D9X!?1HKE-3}#k@c)Bc4NyD_6fJae&ArI^)>p>QvCjgq z_k&IVu6d%Hr=)&ITX$!9!=nB)Z zqh-oe3fhH?3ZFvpq%J(K#x=*@D)l$q<@T++@V}cmjK3TAv)#7|XSL0IH&7qdnJSy= z@iSkAR04J&V4DNB0@w3Yg@n2$={!tOe`8!@@cXy?ec>Qcc>x^;aBtN4SdU-&;#5p{ zqY9}3bR?iJPN5OIak>S7aq6MEe8GYF5(4Z)1ll!N>L9KOX>~S~a0u^J@$vI~W{Y-S zOE~t^BHd<6qUIv1R4_h8XW-GPbP}jN!!!NUK3`x{9Kgz^z$tKTZ9&&|<78;uLh8em zWw`4@P%S{ch|g-^BV`dT4r4Y2n}$uX9h|ujvQ(*e#Ro=eP@4kB?K<^z{CfL_yG<5z ztIs~V531}jiA82ih^n~elPkEO9Zbo z+B5ov)>lets0aso{(_zQDc4Zb)oTl0-MxECZR7?RXv0o?auQ09R~Ht-T)#~x>t1x3 zTK-ZGgKGgU+=nk`_YW00IArzz>2$9u&^)UYj^mZg{u(l za?t*J#?5vGJRfz&rA0hG;?^8K7zdRjK9UR0LKYd(t2!hO9=(JHZYDZ3!6B7E^f?-H zJ^K<6a$vy;Qa;z=a^cJwfa8U~f8rj0VjvIyyBV~hy_-qZdHRKkQ^3P}Q_#nEw}($L zsLLkXbmS7U@hKhoNiY`=MKh6rrWW&Yia*{Zlq?%Nx1y|;U5%}uz&WgEJG8~ep2A$_ z>Gb11M1Qq^0~LV=1kntnerxyD-6=58OrXh{mR@EGF7dzbJfaL*&3H&^(^Rone zuw`&gPMu{I!ZIWAJQ4R;=;&N9!0N9jwHz*N)2Qfq9BcL{HYeZ+!6mw~R7IhQTI6U> zRazIch2f$ScxQl@Ie=$NHo-6R#rzh7HXDw4oR0f$!)4$*bVcxp_96fm(c;onYO){m*=~;F{g6qC5g$*bq5E;J;#4}mU+JWn zA~h=-+tt_IH!;G6O!6XT10JLJ7jY(vPcm2p&0@(vxfwbJ_}#kI~f{4k*3H z14XrtVjWNn3YGFDeCShE;+aS>?tgHpqP#3~5ARBpEoMFyZ$12JX4@d~Bd(dXh0|#B z0cB7kTupU;U$q@m!~yJn>0azL5Dog1^qV zaegGpYNm3Oi6A}-suhpY$wQb=nnLFf;d3mLv>=m!ZLf}v%P7^h{xJ4yE^6>C=%;~g z8`^|IcJ8O%0cYh1)@N|9&OiL&z&b}EKDQdMdtb_*b#>cP= zOfRl~l*S!aW(GqIMPCGpqOsD|pC6_3hjDV&WHKDV8Gw^1{0Lf4rd0ey0nQN`C~WF1 zHm3E5OINw0X*QXr9>E3^JhtE)*%vz`ggB<-z}wtFWcqfl!?{hx#Yd?~Lv#|5VB?b` zN=y(8>lkag`uh~Rrae^3l*n$lYrKq9PqH|tx1$! zrVNRf2Ke!^7NXK*F&}x1-YkO{lju^JG6%ZIei`q>LX9STr>y8MjZRhv?3KVh$4|AX z7HP$qnYTz{?MWma^~#luZkCr?DGhh2t4ehIcZ zkLT<`%;35yRCE+)>VUzeflXSfrHEH3WZtcRNvCs5aSLSRCKi}|E@G~DCV#jn2J@=X zt}V@^kPef`a7=Y9<(P7mxzFMR5@RTHY!B47#lWl=5kQVy*ahb8GMTO&!^GMo>UbQJ zc4&biXELA0%yeFvNaDEihyw1}_*1}rRwpU-K!NG1^q2LFMRNS~{zM9X1?HMa9bdu9 z;Y1qo3bZ(p@?TLx!_h9QSW=y#Xk?(k$8WAKoJgBrQAPsdJMAa^gbJU10zLuGqo&-TtbntPJbg4C;W?e?Cf=fS840 zKVyH(YHJeExA^A-ia)6eGy9~nRsl^1EOH3vCOQgCnCT05nQ-hppD_N+c#5x3VnY}N z{-8Q7++QK^Iu1Ufzln{5o|OIGl|1u+u{)NE&g zX4)Vy$`t@d4L}~zwtz&smQA7yl}c_HnsEFx)$~Bas%sGYGm4K%il=$6DI-TD8bvW~ zVfRBa7lucHkui^lY{<4%xuyXkwx^V@_FV`;W4kf)tMOSjw>FPSmgqu!fzH8|&O=7A zeG)0JV|yRa1aQ7XtR2ZiM~zxA4%hsxt1MgFG)#ykzr~Jrtv2Opz}6WLqkUx(^sIk&c6pa2WDLK~M`tfySgz*&o z1`LW;0g~;E4S~x!gDc}JlPK*Cyv>eBd`>nSic`uE{9gp)Fi5-<_VyPOXwe&Z>B$Ss zVU|Qc^?&W#^Nt)zq;Rv_jcUj{iX}1VsmLQe#?!lRsJY?l8wdeFa3u1EWX|7rj;Ewk zI6>LcU%2(|*0-wXvqsSd3P^x50|Q=p{O7k*DzDtQa#U>+%N!_c|#$YxSXHbh_}x(&w>m)Xp!mATahjw&_!*tn_;g+bzO zV9x=}O_j@P6t}puL4J4Quns!==2=`^;o35gAJZ(OWm#lQ{ z*R1!$@3-AV>3F(y8ZTSupqboWM7 zlEDLGw}A}nd<9r=*Se3cwz(7#8x!}Ji@>W5<*T5og&Hm``-dr>?TXDX@^LrFX!bN9 zXQR9j$dcLiZYhLBztL{R91y3Kl@Q8yu{$xn%6cq&}p0D?6DU~}&Ei(vc9AnZ)2(VbwHNO@%?T|Q!p=IF{T+aCix9G7c%x~U7i z;`&M|_yD6Nx-mTE1{7W~xsv1$Oj8`&r_t^P9G($dNrn%R`ywg-Lu_D!Dsu0My}+lO zs__$iX4i+N;0!ApeuzCx>38|Y`r}zYl%k%vyFAif3ytpK{%`_M4+hzvK-})|@UVAS z$?y@jI6);oG6iRqs;Qm(R_{S|TAJtyQIS5Hv{tOi?;CKNq~Au#`zq+!k4$sDK;|eC zaZ?$2{S)yQlAF(UCq4;^&W;!PyvzD4jweIrY?MoAR30LHuzR~tJ7`$~rNjqZ(8_(z zHLd>a#TzSV_dhWd3H+m{x)V3L!f$qs&*(QomU5JTggG5YE6?5I1_!=h!Z;{@hnI5@ zzQDp5I!*bPX-E|si1#8g_ml9dp?89PV{eAuEQnA=E`*DG!9AlaRusycz=->;yx%7) zY%MBVf4n;}%ZXkx+jgTnf~ajA2)3d8F%Z;)%PZyN^|2{pGPf-k9Z%ivmDG&$A9G*E zvD%gYQ2WygXq^@&4{2|absWYk%W1@C=vtN0ZJ(J!xf@-N=o?&0kAH@b0MH8R zYr5KXo(p-*P(o_2JWYDftG!)g(y9b^oe6d_^^&8?$^WQnc7`So_kuj%bO~N3T{6o9 zpu2FCcfe@eGQTPBLax^BL5Kj}rb{DV2SWH;Yob>vIfVf;gugKG#?MJSoV2QELgE z1KXqL+gcT*No)eWoEqcho#13dIdigs(|X|lLtUoRqp?mm`2D3*IB>I!@;iESNaH(h zcwrd*-ZI+KfyMEfc5@H90ktK_cS5Xkl=EI%t%l(#eX8>}H+Uh5@{MI=_yR*K8jD7E zpi+{&sEjhcFa>)u5RSN+%Q4krUXo~QO=tkA_eOG{U+}3XM;2!(r7_XXVDTW`O*}a zkDqwk1)iehj9CUFTMkum zlYmF)9`=$Rs>c^f;~}916_hjy6{_aUp1@bzaYf*3;=al$7{LydDZOhnU@H{Gbdy1k zM}v-{K^>>o2bm@5fCU4lFO~3l=b45jwB`g>jUYX=;lywoNIV7-Hi*Q!uSRnnU8LrQ zzkrmpV^ki~6wsRK7%3IqKUYehpMVXOa0m`CZYm+=IQQtUaV+5yD)<`n1)P+?ap_{Z z?Q2tn7Z}xJ(Bd~RzOOOFsK=zQ6dp|#*v`Zu8Y?{3q55qo=jwj}MW2V=a8kLkgwBEi zcn>*gDrKctd%#n$PZ-Z{1fyx#n));)&Ky_Lzod*W^v3+(j*>IMB^>3<#R_?Sp&|A> z16~<8)~(00_U6$OGWd*hot}?_*mcrZ(ejv5x_lBJy8w?T7MyV_9dMmdgGy=IDO0cy zs)U6!d$La|Q_W3b7#F+!lxZUCuoYGFETO#s)8l$O4oKIfaiG{Fz2YUGT0&i?Omm}H z`*RKQVF<-x@;!vh9m_0T)mo^`!pDvF#kA(M$@dyCPelFwFvRyY73_%%rl(Jv@}TXr zD6Ik^XYDqg)%e?-w0q|``EgL&fpR4VS+SKO@L9Wx?O}BEBRngeG&IOKv%CMrE;5II#_j2c;DD|93QTDsamC`p1v2}wkLvn2gI(t_YEDkvaFlVg+R&`OTS$k`qQAU| z&Yne&Y!Mlr4)G(?w}?TDspMOnmWXIz1z^QO+VCx=S%9y0AOe(r#)Av!lW$E41q+}U z^dnY59PO30S202vn8Rl3MvfGUINHEm9rsp?L9zodEE1bIA3VQs5tW_mZJN%ID@oL& zri`9jLXVy^jWs?MO$W~5@byJ>;v8bkV(NDuvoH%O=DcZcJT$leRtX~t>Y|Kph^B4*PNDbm`!xvbBj#-ye`tvzFV_^TQ1#|*n-u!dV zOiGabuEpg49s2kSXw7$~;Atq;yIedk^Tm7^0S3h0Vq-wxsThBGnS(wO&r@BqATJyJ zKv?V@NDK9lbAJiF{~hM&7Lw@#8f+nDTtI^@q@oL^3V-n6304k!JZTdq1-`v)F&(^M zn!-JxC(r9Wpx{_O0$4E?ht^PxV_b^7a4}_F#GE6j@toCBPpO$FLU>@Sa}lk$Xc|+X zHGh?dwC3{x+eaFqov;a#cPT1`S>D7vRL0q|JEN}Ts5f5xVmb!07DmSy+_f&ybHm)D z2SHEeD1VQ3$Ha}G6_-qzCXiqU!4j@LZPCghXLSg}vIRDLcs{)W5cRE49q(_C)2K@x zJ;tf1WA3_>hFXWd&prH5K2DXD=cT%f0##OlJMkZIU_ItUDJR9BV7 zPx1Ge#nkl!x^57O{XN!&NR26)NUgm!cNEDV)l*MzRsEfNltG}wcEc|ctv@I)5w2VH%oH9%yEt28?Zhjo>BL?^Wp|+b zB}l}&(Sj-c?XoFgsY7iPcnljn1Na)8v1$iAD~Y{fKN{>W;LrB(j0^se>F)VIvXG|z zXxav+KK7$2ja82ox`>IscAWFnb^G-c-;JQeErPYxZ=eYzEY9Xn;Ty1!mpkVSx@-}x z?#5PS;Jwfz=>GK{0W)~RlLTm(_szZxbY8l+2??P>3YKf|H@h)}jlYhQrHvDX*(7&j zpfMTA0UQm*bQ%=nfN3*GNk56X8JPZiEA#p|eCtLbqJ9Qc5i4=) zei3RY-uk)l^+NS*m>pVFy!En(2y<7FIw8M@FFgObfU*oC8WmY#5D|v!X^TMwd!vR#G}wk)uAMpO2Z1JNUTnhq3B1z z$S6X?f%<%eLB=XgZJOQJ9*E)=oCL2_=#dbz8%1n>kv6alc6ax%^ZjXd+VZ6vKyNj0 zd?6xIM(qDBq%aS$k{#3x6MPUVUi-%oWh}iN%LtVkSc3aDw_kvq1~P3Z=di>ZI7`x> zZXF5*l_S)tT|R&N?d(?Q?m5!zbVC#8h6Ku^7Ld262!z^WJjL87uyv`pGeL4=n62;} zcFHj>paxG7oWu?U&O9R3Y3`NUC3z;Jk- z-72u){;PG6l$bmEXINNA%6|pk4!nPHGUPNnAI~|Mh<o{G3X zN8-{5x~adIHI&;QN1#`u+F^|$bfCZZT@}P|wu#0uTGi{FY@*3qp2}55xndJ$)h2>r zs*X0X3D3?p(e8$3f9aBsoG_n086X0=g|V0;srwJon&p`J%#G`n!=-9g`N`(_6f;nS z`h(?*IY!xtV?5M8V~~At5tR%SnI_c2CH7-}@_&kHFR)XgnEDnheO!sD7{sRbu|LDu z(_i*QXgyv`-d-Z2$Q=h5F&psvApZia_GJt{M#j4*jnLO5orza0_piltpO*-ZnWx61 zGQ=Y33rXIC=lYe9R2rVlajI;GB%6ID`Oov{pclMq9v$-%V`E$h6q%^m`$PWn;}D3~ zB_|Y7#vl>MKHd6FuU(W662FUwLfI}9(=>z#h`WF`n0_8cZSaxOIt6DbSOn7r6d?H2mk&%h=m-6kykRGR?F z%0Sg79LQo5ym&bhWNT2yWUWVSg_jCpycqHsIgjFp3tz~KaVuzFi#b$TvdfWbXvm6*SNtjP~77X~?o3CIM^&^#K`6-UbjVM#>&sjKIocxC(iuVz|QCeeH zVo|wE(Ddu%x*}TPBT{-mO>RKXUnk#HL~r=Oato=;Xzf6GC)4t?bWLJe z?sOnFZD3zwje38?xy4bhhhfD5!q-tPC;Ih8Z;x>=%O=Ruf%4Z_7PDHf!a~{&lDcJh zUh5wHmU(m{K+HB2P->u<;n=h%{?pyZGTM=Y+_g>GJbE=yjIDA|!LF{p=#2^P(Zf)O zHk2>sI%rLMra<*dHqvPq`_ih^^RSaT`%BR0(0Q~b2>NuuKXJYgn-mp48|-&&MIY9g;wBIqDcJf_1N}&2-6I35D}nn<)PnPHS}$3Ue(kaSurd zh0}mrj`D-R{m}4LpAhPqg+!-VqjQB37}Ms`tHB}x8a)d!@n=Ygco2UYLhvFY4d_XI zoq1Q}BSq+Mp>X45Rbgf*>@TzFY={VqhTfDpp1|Iegsk5D8h+$SfZO5(5XLRd|x}4G?B+@Br_Q zXiHa_Hh-}n8u`O)Y6ue*1*lg&=7JJN@g$tD{h!|}b>c*-)J0j7Qmq)kp$chW4_g~| zm5@gt1xv0^8(<_ub>Zq+R1_}m(A}`ns-ap2IHDesMtjINq3y%P6mLji3{q=tMQ{7m z9GVs(D%hj2y-t^H5Ly!QrttG1JP%c$6Zm;3o_*BkvHa|V=K%G23_l0pd8GOr&Cet8 zEY#-+einERN5`eByE&Af!=3){GLt;m#il z7r|Hn_6%4WabNbDKaGzPu~ks|#r~4_k#b4q9bH<{u0Nu!xr?idISV*2sKg15VtV~z zF%L<@4RSkpz6k!&nwU}e%b%aq9ixQ5^ELpT0Nw@Qbzt0qd!A9yg-Owb@pcdf!u?X6 z9g@sOx$$}o^MJmj31j>*Us0N~9PNOs0bD+Kw&A`3a;&>sVkfW%K}puHrM*f!oY|j& z@A3Cq+idAP*|!FC_rO^i#$su=-kTjDHP68B>+ZJn7}hcNjg7X9tstf zqnw?)9>J<(rWybZ+VJ~ys8`EKJ@gtidI1%Z6Txwb^7pf8|LERy>)GapH! z1p9+U1HzV7Zw=J&m3)ei6#+DE_UM6h=%JW?G&4pd8fH;-4EmTraIRzR>lz=clSq|G z@?Mxj4a&KWt*GM`)bV{ai}lp0(;YtK+y#6imW1)n0b7mIUco_^78+uZ6eQx|vikrdP52 zp%1byGLjQ&9Gm&$k~*2tq9aJNab%_^1=S#sq*02wBcHDuVDuDi$m{W z20a=lf_21Ne3j%<5A!F=&9msEII&L$?R>-ysFW_RpGB`;C-9M=I#f_wuYfjb+HKPE zdz}Q(nX~Awv0_`k2I~2M8_;Xg(o9!YF&pq%GsneVReL#)+}1H3PZd)TCvfCg<#9)^A56JRtSR5|-xKZCsEugpHR0Q)Dvo}3OZ!98c6S@GgVT@{s&x~U&j@q9jg5-*-M zOsD(Ci{N}v&{JjoH{BfeoA&%dVRN_J3fDl2qoBdUemY$d)_E_SN`RFEEN(hQC5UCZ zjGhhdWt@~xyA#A19kli*Kx6aCkk||Axx+oxfP5-Q6f1SAk#_f>-_E4Z6MJRtY;*$( zd-?lJTAhSRlxf&u9`8+#`aJUA>9g@ETj6fRIkTou;aZ>V0NVku)zc^{Sv;s$hu7`H zsa%@>+L`o8vRKpu`kZ^vxihIGMO5gZmCw2XMa|=9(yLB|_2)pZrTr5{#KI}R0Q3w_7JJ!NxRuip65rIHA#X+PuwfcaN)sF5HLs?L z2tDiNzO7kXtI{LT8}SUk&cr|o&zK2O(_SZ0iNg{9rtW7udC;1+xZZ=EJH_d4I ze3E#=V48`K%UTofx8k+VFaQbD*RuB^qj7LH_6Yoba!Z3ntTiAt`e%!(AyCUsd;@gU zV}SkZY1EJ{d`++*ow}XxHAh+t(BqToa5iRprqGFOOny%xuN+aF+UCXSwN6W~buQBD z83{`y8&BH`r$F2nCaa;Pc{1(KL#NB><&M`OP5_t(z}j$+^ePWUcVOGu?q(}ee?1(W zS0IdmJPi;XtADy0@S9n(bUgq(E1vNVwTFPOCs)e_B?@Ar)+8coL4|E-M3ERjFh3L`-0 z0Zqm_M;TW;={^_0egOUqjIwe6F`SlD(z*YRaUJjH*$I3DXx*YQz9~AhOq4-U*Xm1F3M7ah9{-4e|j< z&hv)NkivN~@ArA?B;wg=4lhAiII1?sARaeRXe-HUv$tF|g`S-*a`YG3%7Jb`YX{3q zAw~_#Squrikknj?njv!ZoYH7=gSu|GJY@=PnbF%VjSO@HI&hdgVhZKW?45%1^l}6G z)ittl3N_3WZ!XXheCH51prHCDn!q>|r`S6=Ui%<7I3Ev@UqV317YV5v`CND8D~HOB zAYYF1Opxb6v^^N@Tc1O5v&7v?9Q{1k=Sp@;NQ8*DGYyn`gT`83U7+!;9fpO)c&@++ zyasKeTEVHrfy9%k-)s@OS_5swo&~N!hsyIoz76H)VE(#@V{Dlyop~%q}EwoQ?g9`VU57dqcB+n4BbjMt=RF|xB zyc-;-DmIS{^LqUxH_{C#`~=atWS%JY%tF|qh8to+>4SN~TW8`q$_*x5;LBVZQq=1L zwWHmDLbrzzly;ze5p0Q7a0D*!R2JP;BuaHokuh%2QJtrA>2gtT2em!c4JeGhE|*qZ zk6w-L_Sd=t9V%BrC?i%cdpQVoeHQJ%zBg>uqF6C4=hlZ%)?A7y7HPWtyjoiAo*=urdhxz>>O%Xgvm$?y}t;9EZNkx zNMt5~-bHjJt=Hm38-ICX?deSH9;Ni}0_}AcEIAz`B`!>ok}lxl@^hKAda=?H&o1t5 z=E_Vrtl>&uX48-okv0OvY&<)~6BXm7=9;f9dwGwU1DRA-B0~G2I4O(nDG?XrH2o{O zchFev^lIJ+^o(@MN3O}{x7HB*=4{%vM0|;Q$)!-cg^rbq76$@{DpUf8xF=m(s1P_L zt@4$>hi5Z^sTO+6D#!pS%EZl8Aj>-+!9<(CfJD3}JKrDWdu@1P7hpo5CH;-WIs~JY z%FNhc83FhZzdZTjuZx!_95v)C=awN@gZp66;NGGa{&H`Tcf+Q^Hwka&IkHM1Yyraj zNyv7%=TXX5jlLX1TXmR6u)GU>?*@uoW!MBb>WV7Z^%Ur~X-{T&NE zS}9&POrnyd;xqVI#tkqL3zgjR*Lfr7AQYPlQ`BTl6Bt;psTOkF${f zGBE?tWy`QCE0b4VsQ0 zFq*aXR>n!>y8hev$4Zs*PsZ_N=lod$KJuG|6j5hk? z3NaaGaC(Je4MT23yG^3J8!?=#)yOl#B|&(CknhQ)tv8B<9#B6wpqod^H)qo68^Oau zLsp8E5*S4odTwFZvWzG9lhC+OFHRfqw|i()$&aALczrR4jt#g^IMJPG<0-*lsmla- zOPaJ-!eTE;dg)fn7))F8lwY*e6_7y3Y!L9eQRKEfXbVH*(s$OtdqYQdSpJo#-Y zmb$)AC+}52NT)dU&$Lw{XMtAz_5e3^ni?Vh2&p{Akn+Q;>n{|eNy6)RBvv3k3;H3N7{-yR99O&%0`3oh zTLFIAxc>;+;x;{uHssgkU&!;2B;i92;NK5g--8xc{Zg7zb!V2ISlsw4fKB15 zxd_i1%cec5wwrEOwSC2W5Mnovk;=8eUjbSlYAlbWk@wA4YP%6&3jr1dupPKphhqKh z)p+jG5@Y}xeJ@}*aUPu@HE+bTvhP!h)Vy9Z2s7?kJA(n|2~YR}K?mOmVl&VY_Ko<> zoGc|4!vp$rx4|c!c0(3e?m_j>iJQfY7*IkQw%h!0iX*1J@!7(jsrZe#TJYOe_{moM zmXk>(x4_-gY3nT_GzP86-e$p0c#j;Bc5$f1NiAJDkl%&{9lGbenMR-70+wmig+C^g zTCi;0Jek+x!w(n1t+2+#k|}jPmqzhx6v+HFNG=m`hL*_m2b&gTMVmVP&)Z$uJJRX% zHNv+DMIoT4C&$)sY-8Nqoe2B**~UVsZjy$F$&ur1h1+0PgFuJvW1!kt&Lal70Q)V# ziYL;BTamCPQp2qxClvx%jh+ReQplRO)Uev5$Mr}o?n7M1+0#mXXwpX4@LhD6T#-Sh zwV*PIvepV;f31OAhfB?phplkmgVNr@Oj@xPc{YO{T`Qu!CqlS^P$_2aIhYMfqr+9dJ&`ijVNKRVTDndwGo;eNb!fv>>UWzGxifA9 zg>+hQo5=Bp4ir@*Wd&}=qS^Xzxjolb_(v$npM{5J zeJ3hz;H=qBfDypTQ>ox~5xf9OUdNpvSko(DiYTcpq}q%7U)-+kCOOg!hX&b`sVesq zsV=$a0&F$FCZtl=?ILFyw(tJVSL)&n!(&i6kj9D+no3Xy{(eenP-~Q2j)u)PM{?p< z8rIB-U)j1FJ3yidr=`<9cZi5-6JVSeNJ3>|Zv&}Zkl}CnBGtY_Q-upsiu%??%JGnj zS5q_Y4HBRC`9@scKr`bb1zviyW9OW%2AB+QN3uUe zYCfJy)prWtk?zQ}kD@=_DZ~hO`7sFi3qpUFO8-pSQO~C{8S7*jPfdX z+%fjqKG;^c2Zq2W6qR~PYpUUMR`jQoW^spCIYzF$2IGCeY3A?P%(dEgY<~7+&?=wA zJM=1FaoQh7dr`vO7)QlT`K{>hWMgaES@?W~;7hpw%>*jDTjUg>I2lcTq|a{e0rq83 zK7wX!D>go}!SK{i?gH%X!?al7LPp(mRv|v#4L8Nc{_90VUW&RPAJH!M2rOs|C@=1% z7~6ZENV zon0@snd;)?GPdsF*U7KrlQ4)`CoPxcC_35yZNS7*3N6@(c$Y%=Z4|*wLIQ~{@@^Ot zMh7>FVMD-7>HZ)dY)+;>_>KwpOMpVGeBs`$5>R9a(0FioW`Hn9>cu{biqkW8j(5zLVw z6vsqcV6kxFy6kn=v3l#mq_MJxKld>4stNU8Nb06X6}f!v{{jy0M5*woU>?5qNpRt6 z5z4ARV)VbCH<;j`4@{ul)go{z6uQoierMpKeF~KTg$fUH^Kao?6Kq<;_bn(t zl5M`9@0)S|P`3H)FDd?mDZw$|OKb451qL-(Ge3b0o5WnTRcxGGTy6R_rZJ$7w zH;J4$)Kzcg+1%ng)E1w~_Z>$0++aSBrR^!Uf+?xAx>lqm0j2fF?nLFwSQrSSpMz$V zXP`5Qf9ua3c*b=yspXSeao{a#Juuj8?A%Se4-6hfmV-e9>4RCJ1x8yx3j1KZiN?J& zemE7+4mBIU4kzoMLxs^=Lyyi5y)OQ%K z2;=fCWVmGjK1ccE_~FL7EtItw{p*KMQrY6rfQh?KdT>nY`S+~$j6oRXUzGNDzZ2Z? zleBwrXrQ;;x1)XWBpFIV1AN9g{XX&({~qYQr7!roHWmCCr{8lMA^;18H8Qk|GR>V&R0!c$H)`}1k~oU7pU^P8tty_KVXL zUwUQLjNho>=}zEb-%vG!Ycgw9kfwaY+7B=$%Cx^UGzFgvb(MxjhdCkJ^yi(IA9{8B zk_B~gOO_Q_{0bLe6e-9uJYl$3Q<+M&iR#KjQwHg;b6+i|!{wpLUOn$aG`u1-#kYRD z3a$U$9N>WuDyLuP^@6IPaW9YzI4_%KMm9R!_tCquw{*CT3LG9s+eqfeY8CgxYC6o1 z+WRBkH@%nQQByR0rhp%{I@aU;(c!UCro&4^Gocvo8~9I4!409$8vLe~?i)?tcYXTl z+sC#&y!M$0zo%b#xy5IB=;YacA8g;Zcyw#$z6byBFZ9%mrX9;)Fd6P$)e?%!j4KRR zIIaj>k+`C8jlva;Yc#GHT(P*u;JOx99Ioqdjm0$%SA5H=mhrIxesllr9d)MclLH${ zw$E(;%rJJf*W_nf><7o6^7dQmHS+!^Hh3Le6CCS#`^9mVo6f|e=(F+3VL|VF-!|=< zvgB*Nd}Q!_caAtc>)4v23)~Kr?l+>9=ClpChR*yW)3fEThJ<7vzrkyF<;_{MZrGDu zmgIr;1uxWJ-*E6=AHM`#iMWz*CF4rLH33&Du8FwPaHZqQz%{AG$1gMcX}|Xyp8oN! zmGeTLiyD*nY1ZX`CMA4rzjyh*Ok5URS-7%s<>1Q2m4|CGt|_>twk+Q_jb??_b-Y%2 zdE)~fnOo1TIFLVco-JY8DYg}FTsRI$8itjD(|oxS1F5xr{J@r+q0>JY;&;>5=N|iV z=eAo0t~vh913AGDc*UQe(sEtMbc46wxPu=(Qa$JR1v&Sz*ljoY&Aa7^=U=)vCvJL6 zPTY(G7(jFvF#%&1L%pVVz#;kP#D>(0~H W-7)8E@%s_U-CJ4~elgP${r>>DXuakD delta 112912 zcmeEveOwe(8}`iXEaGRNh_EvvASxm%_>~$e5Gs}#k`u;R7&i#4rbDxhn zGrO!h_qMcZ#dfJ}_G^<81VIp2wYg<$+fXs!)pPUbiF=j>zJDtJ#(RX1KlF*4JK^n7 z;(=#^Udqef^W2%|4#xyPV(+u9_rCelK05_TVfiRAg~~r|+wSe$?B$gKbAJmR)IDe3 z@0b66@6{F8?`*a1>6JGx>XRfo`y@$uqeTamzaVvMTXsYijKlt5n7JIVocvyt-U*mG zHuJNWU%Y(w+-t>WK3{qJ@^S6AM>v0cQJUNivT-E6;vaWO6b{00N5NNU?=J}3shXs2 zpkBf$EYo2U7If074p|WDLPQ~^t&U$d2|^mh6@cyFNg7$)t{Iw|TH@tgY5`-+;QwYt2=n`~wEGiI%5`+46S3wB4kNU2WLj2<@ zMIqf#{&`11I6;%wz;qBTV5jj^xCW*IZWe_sgXtzzynMSP__otc-;1hd(n)Z4+&)Td zj=(9iAoz`@u4|={VGGWSLa8Bs80{7iB?{YH>*7Ojj-Ul=rEamle~Q9TgYA?GtvyY` zAOjCTL)FbdfQ@XBp?)9jhnSd0Cvi&HA1?}UMGHqY!ZNz7G{nPB#hF2n!3B zY=O4Jp!@20IC3tJ#;-$*BX`)zL8a@YQDK=O?lwIQW%0{k*2uIaFjh_Nioo4SaYYDq z3N0vtLslwer*hiEXjVE2T6op3qTn#xIqxF~vu<&BgrDKJuP@!QUV22#r9JB*wVh7l z)N#p6CLzM8R6lU}U8cUpXf@v;cY_Q;gY2it#gHneCB?|_@_FzY0b*5Mh0Lg-J&X!Z z7}WutRYoB02G@R4UzM`O*;M$dbZgk4qmr=E2ul&dIB$$eh=jjP*JdPNB(*Dnu0o0{ zk+S_WKSI!rT1`d}htmEMn6REizct+M7Agpn=n@lJLeZ~*+NhX>*@gqLFyWxduYnp( zOW0`v?O~@FI>}C1q;AmKIF6kT)8q|^a~2hDK)VYJK5NS$`*=g|gzC2(k%)STBBLRk(? zev`RS54DEaB2?2sOWu@5`4^5i2{Vm>$%NurR1I!%A6)`f#H6(asE{V(6dQrD5c#8J zz*O|BB^X*=gWh&TtPl~4H0UFc5kXW6v9ObCOhSQSxi!YZgZD{7uEEv`84y7yw@8or zPksdho58jW=NOvx77`=qoO`&a#>g5&rEjsmFds^dMCb@#qN(~VX=?0=d?cw+9mGw@ zeF+U|40<1<{8Eu9)EHvR;PO10tVaMJJ;*)RCMPnR81 zNNm*_QOGpP)1W1qn?7HHTrvXX8vXBwoP)-1h0kSYkfa#XSP%L|(dPQCn4ufRR zOV7M5^+U#&zAbf={4DhO+ZcgEB}qW%)%;*fmMXgRHU{j7>F(OT!2I}zMr@ZxN&$b- zGux#Q%pw56|6_Bf_rE1!+=PMs@5_4fkw+gJHgrG?o%v4c6qb5R|Ka`riA}w>qRMW{@N07f zgenQn?MI|r+sm0bX2IF#gcRTxCVcct?VX=4sVzOf=O@>733pxSjBAvhmatMOyC~fw z9&`r$CH3;{9r>tPup;mgSeuKoHssU7P{kl5@ni9tHqdn*>s~= zO-Zt8tkf{u`Lb;C5j%uFY!(haXco5PI5~^fn@vm2vv6HRgM3V{n)f_p#*ka>W9lZ( zqf0&}rn_TqS_I?J@Ky;$Ph^CUaV&dG~RH_PHo=f;;zQ+=VxH_x=( zM~rh8t}@+bMmMNmXZk>FN85``pP>g?*P9lJdz`+-rnk(fMcM17iTZXu46ts-fl($a|>qkm;S) zla|X`>!&#Pd~3Q@k{WEz_D4*gN^taWwds0MAiwV!w=?p4)5o$HO7%xg1JOMqYC%k) z$+ad03x%v&)2QBAc3DWrKvROw9e}Ak7d^3TsVuC+0x4v#Bs@`rsf{g`oHezkCqx7+ zt!5V zeVs!^`DQ6to~avHR4xfVMUs&BK5ja29Z7{I`Brfjon)r~ij(D6&D9UdNaH$LzO(hb zhh-t{5m`ubCYj|fW@N|YHu9Y!2B__A<>{DJgWAcDV$@pPP9BR@W>q`+1~g2&_VQ{O znX%DdZY3d;9G&DpM5Jy>XSu&5&UPN`B3~yVrR#!alOzsuo>1hrBF+t}j0R1jb|K(b z^SjCa6^A;jyUB`#j?@??zbc|h3cJH%A+?K;uQ$(}0Qa295i&`l;H>E>FE_QXx=9w^ zehaIxEUe?5)+qUPF?{1VSy;UVZSsU9#E*px@Xf$!zz4_3!pUUkxM(>L^>U{7m7QXb zS#f&2(m)J*6T>Z5&JCkwApp1rXLh~E8P{J{#J+yGKKh0voSZ8O+mmGNcmX&Mcrx$; z9DRX@0>@BpjO^^Of0(X*|BbTX7qjjs0hw-!d8I~nyj&{?U;iX5e)mKGrN_$a{7(+m zS@$K%LJ5vCr{4hiylG7J^?G?(0xaNIg5%zelF%7P=%bQwI38Z&D8x~Q<96Kor9UDG zkKp|K21#gq7#@$JTZYKRtq%^=b(K=%5c!GLlMEQ`%(`AaCf#*-fI)6(YAQfrX5&b~ z@eLNSH4jL_o(oM)bvMYu9N-1Oy>T9db0o@sUk_6=C1D57c9;z)pE1a+LyrW(y{y|*(ss-W!)lYi((`-PLPiwm8)-MDLlz} zB1Qg8Y?BhKN9<&%^W9W=t|*-hbjD7Scgx}u=gBnrZK=KLr*rsV8Y*>`-XmYfI>?^M z^4p?xDS+lokpqMC0#G>|NjQe$2w5TtNlA#gvuKLkr8PQv+5PgPKIk}+kH~MBFy|bc zBR^+4*<^CwJy)L3vbpLpxf@2ibIvpKL6wV3QId;*CpZltm-bg?DQyeH-E zQjdaqlQu@KMxIW5OcIJCCE;~k$6YiDVoFofI$ZB_RzEGbm3quPrSm?7`?Xe8)H53opZijP82bymo3BKCT3yAlGjO?(KhCz+M-Ii1+ZA? zOj{{uOVY_I=Xa~+878sP8NW{cOGKN;t!LxbK4;9U@;fG}q0;&L2H7H-_U$*($hmT` zxZN4CQEn0;5wTfb-2o$Bd70d)Jx1Y!AIcwh78f`R|0}-))k~bce_-*Ca1Q-Z{#L|T z<#$wWC1REis+DhRjj3(cZ}OKWv54YM%dbiePtnoS@{5={X4T7Yw-)Q1p%>+EnP-ic z<&S0Q(nHR@k~yQjbYh0HhoAXwGb~#>o5z~amivOt$1oW>OBM4d87XzLoB544SOpbC zncJGM3@GVqep?nZooNHiU-}{yB5yGNCQ0>uot;OTkF=2*`#8@eo41&y#%|8_6U-Jf zRPDLV{Du!IJM(UH+g4Iz2j~20<|tnj6g^;m)fc*w=9*vcfNVQyHLvqWi!NDaZhIXn zTd>~zmPssgCckFB4dXl&zHYu1l{xvk`M+Z4%Keh?@d{B`c2N-WvBdry*VR?qzlk9Bw3k&1|=cJQnBvuhE{@vV!x-LFt?jz#7{j~Wl2}|Y7v*uQ;JJ0&lJe_s? zs`KXeeK5D#{x+W$#rPdopN~b+;Y^l%ZZ)CK1!f{vvX3>{2Euc8=WRxk?N%2WCXWfY# zGMcRTd@P-qKs{8SPqF-|SAANwhNZ?HKJS>sD(CjzKK$_wA81AFNUGAd>{0Na%WkA&muOQ#jj$jL!F_8K98~F&s*bz zeznnQTj$gC32sz2KG15q%7|p0k?a&V~u;T)vei?Q?SwZzdN%3m&Rc` z`+r({Av61b7kVx7|G)kJw+Z=R~`^}zLDqwH>+G39FemyIXbl^{HXGPZ}zq^w5SQm(A@xX!nwhnsJ^QRW06Hr%o` z;|`;+iWA;rl*zdNV-6PK2>sP8#I%=$z9?fxLvaWwTa2>F1|Rm&1e@W^)0}L~FJ_?< z8?LW%vVOm0n+0{eC>$^tnZhbC#-r>s%HW}9Jj+nn{#M-?R_m>XgWXZ)ib9!Db^}+o zz>vL@D_d-oJs!ywE-?yMpv+~s0nV_`JI;1WppZ5Z6ETb8m@UzqMc9&L zGb=l)+6bC6*HO_TsY@GIf{*v3y(IL$X?RxHaHZK=w1YeX#6ETK`r3 z_a}YRBYggoa~7q2+zN*;j@CHZ;Ao4Z9gg-mI^ehthaV1q9363V!V!R@GmbzUU7Tqj z2aO32sQ6{@TbZY~{&9!x*_^;xcYpBXPlxZLE$t{)zjF-4fP zV2w|k9yx7KrO=iy2h=+g8-tE30pA5=-Dux;|I8DIVs5|wvGli7mUnp785a=zs(-+` zopB{U{Foj0T}1mm7Y}}x-t)nz@CJ?P1;gIA^l}wP8pV+|n)AM8s;ijeKKj08MnD^1 z>7fJd{_8(t`np-YMc?Pv{=L1e^PY0ctacWcABG#+$t0kWZFF?MrAu&$Vb`F)9mXy? z3MK2w|MM%!ue-?!S0NW^WPc_-=PG1>JiB1q)OC%t{&P#9sn9IZ?$0fKu{U($b4&kV zVHWEK$U*ju10VJjpYhjI%omm}SPLe90j_`pI(3rHtUbhMTqR@!mlSuFHhp1vTngw! zVP9GTrAg^D>`O~O%dC^4CO1ew2_5htgo#z&K?}dMjIks=pzE7t(7WPcg+Klkt*4{V zBc7zjFD?B|dGI-^$}$>z=d-IU{jn=nRAm`!Nx!Iz)$6_quj4Tw)9%xksAuPl#ar_%8i%tZU>Y(~-u>|Fq6 zW>N$Be{Ja}6+B1@Ut6Z&M!D!~OMhu!Jsn`hq0>n`h;zzc6nhYLjlqUL`^?h*$K!6} zsbc6KWCuOi4;w4&Q`$~vU~dR8Gd8L}h=wR4-)}5arJ~>Ifo~Am2Hcpjk1KMOc!a>X z=k=kjpkx1|<{LE3K7YEvpp7CAA#f3tcE~bT67Hb&hb)1XJobPJpK4T`eI^)zDgTQO z9D>xo{BL1-JOzDgnHpTvpj#}|c?I@f0JE4(HpW#$8^5*mw=Dic=ZZ17e#FBo#$`Q6 zXTG%r2B+c9h@jlo_E(6xjfM({ElY2sm6t|seYOBc&bS?6XCRoLeaqn0y!(bmJL zK;%xH-iT*m?9&HZzf7AWfQ}xvjKOo~$nQ|!1)FHjca|wqd}liF9h|Uzrn6%oN&76p z|4ghd{|LB~y3n{I;Eq^8n~tCX)k^w~orQ7K`oB;*lT!YRmPq=Aw*J==*t=bt?p&#^ zOE_!Jn8|Bg2C5IFfygmKVAiDoQR;?DvR< zmD0aQV;;_@C%?Chk!r$eFN&q|3_9~YdYkoUYW)MWO`?<^5W|S^wD1RXo`Y2IgXMkf z+NS+zc|r<(mk#`hkX8BXwvvr8Xtj}s3(k>P1Lf@+Xk3kD0}`md1~wx*Q|qJ1xBYlN z%Ra7l^$XQQmiHn}JPO+}RB#l{KlyjP(p5&K+1mmvvX_3Pql_(@B5M)iprtgY7RKcM zw2PfHQ%O9A8s(m+SazN_mZlxEjCBo^uAaBm=n^MS(B5O{QnM!LW{Q|+_^dOUKPHI6 z>d>VML-o2YF}z_f7cg(q$y$d%MRuaCbqH@L)zn$KnD%3oxmaiEYstG^=bxo(6}~XW zl}Qdt_z59gyhkU*7=Fz)nyc^<%>i93qs2cV7SX5mSQHx8Kf^N#WMu(fE`y`~`K`5j9 z`bxCyxMg(kf-*e_hYbeyo&jUXqWa?q!ofxg`x)LuC+l_&8f@%61IAWFDLOm1>^wPRKjBSGh(@(dY`Z?#Y3mE6X+ua4HWzfy3)Qldh!>`6QG{?#nSJl z$?bGo;|*KvMFdURPkO-~BR7Bd*UMv!5je%*-;U?QjDOEf^uVuBpW8&A{E9THdy)Kq zvpgQ`n5b8IyTRYV7^+gJ=r;^iRaEvH+GkH+yKKk^C;$|Wh^o~ z>m*z+rFxu#1AftkT>XH(tH6XVk@a^BSZR3QfPLzWHjgth(yudZ{T->3(nYU%jmCv# zaHuXmX%`*+9f?2b6#1V*3YAmjDU7*6UG$3N8c}EODlls{DmsO>n~n9&Db#=7K#DyL z>9k~;#?H0|vY)mD4oJI0cLQUv_Ib^Sr)ens<)wI%DY2Tx*LZLYwV2$##Th>XVERIky=lk)h7S)v&gB!C<^-nVub^A z?XJB3#Ym!|l=25+6R?^Vg3VItG5Ier#=D{h+6ubZK>NUM$zv}?;Ui;}#y&^zrUG*! zx`zB4FqE9^sCyTnlLhw50;8AH><0AKgVX4R2IS04Dr-O_s(vH!oMnT>x=YvU>Qq0X z{rn+UT0(oz!FVnmK4*CyK3M-`i*s8Gl*sm8u(Q?_X@Dil*=W!ph81M-wl>YVrYEh%+8|I4&V0)K{Ddmo4r8pL)?D zDciUmFc|}|yiJef3A;W@xyo(pbv%1b%kr59F7PO~8Mw@&eCJ*BBLuX<42vbTI^yTM~pALme z-sx-Ip#qQcQjH%Qt{XiF(+vaV9_1y5fhv#k1qS~KkMivXf1^ja!{GN@hseRc;A(=< zSM^W^F#=a^ZKZdMP>=HO3`Ijd$^}DDyhnMRp(mLuXZ8i(%X)~?IRYz>cO{)ic`t*1 zu}67mvF^CdqkNt*QxtfV-+I}k*%urh1i?^L=23pwP*m=F24gM5YIo=AMcN zJj$aDhZcL3oBr0DvdE)+sKM{>D9;Wz1on9l;tYYq9_0xJf4xWf2_w{kA?Hf!`wjje zkMcn}o-U+#5Rwg?7V~tp>(ZM=g>r!>$q2&qiB&C|!b$ffi3;g*nFj!uUh6 zYr@FJcq=0AH)wkF|LX1J-?Jt86OJq_qXb4?rrT!H$2hXzGMeK-j^S-DqxBx-85}ug z86EH-=W*otWhC}$Y5M|4PFY5=jBF$pt9N(KHR~Fh2C}r)OsAK`cb2l2ksVlEMkxzJ z1Es~wXfGo#Gn1I!t+Q0HjLsN1mf?-dC@j)#TtdaWaN5h3(L@h2ZgZMH`a*$iFv?E!U&WjP+{Ph2d*CC3ui~bL61qGzVmH zITbNuk;`el2e)gv&4h+7rvo130i3er5PY}cuv*EM?Soq zwtA3n=Ex_O(@_ud1diOeocyEFoOxz?Wi?XEH;)nwe462)JemWHg?&mcYmhuz4;Y;1 zZr#b8wxM}+KkHY$C^^E@yJTJ?miGX}PpW(Ro<8}rD&KRjK5CRodwU%!QIC9nsvU`wAIP&5ZwAX{Yfg=~Jpfez| zxV*IvaoD(m!eY@xE6BQtHPH&12*`)!O-_2>3R>tv-pY{=ub{0Srv5yd^+R7{td^z zF`vQ)>FlpC_Og7M2+W7^dyaiyJ}vYh*Kp*+`Lxx8%+KE^^697t`6o`gF`xVg!#25_ z@Mn(fOOybzNL2p<8a#+-jtBQ|9CswqdYwD;MQ{%#I^e;5n&VC;5{I-jeU>Aq5ygTm z5%RnkQJP6K&4ay>W6vV8gA7j{%=TiUy&l|`Iqm|YGdgz-uD8Yjq z%#ni%XpRRN-#BglL>AC`knz+iW(jMd0y^Nq9nNtl7mzrtr7IB}Ijw+VL58w)rfg;b zO#_q)upBEv7_th;u9Izyytsh&>ad)_f&x0D!+Hic7EsvmmZk=ArpgLvq6c{hN8VRJ z3qgh_p)W((;R4#~!99ZGK2bnNb#5!;ZY&`G8(?~+JK%{Nx9>_y@F0)o$U!Sp_-QnyG06f--a^9WZeJ*MXB)k{GAcr!t(jl460yl@z}ezRg@o({wnS!K{^J zH@M$t+>2MzUSQt!yE)?pE9r~}c``@dxRSy~=(ca&2yJC6X(BLe$Gie#`&QCIgZvgF zA6`jYfq7#ObJ|XM>%riO7b5GW2?+GY7?~iuA&5BaTV1wV?nEE4q)&qcOyK* zNk^`t^&aGGjy!Y~9WazPG3CjtNF3EtTQ0|*wu)js$cs7h%vChagY0@h!a7pcDzbwN z&ueV(eDNyUt3y9KV8JRn1IX&x?G3oHaTSHdw{(SlLCy6kTSXH+$g4T>zE!jkWY{WV z$`7xit$<8oP}xt6t>>>k|j zaNLVm(_TY)6H{KWn$GCbHYUAsHHF=%DJy5NY&A^;Vc;;GJAx#8i(qA&^eTB3Tn0NIIXY6nxZS^1< z&+s(mCkpAP2N_=mZT>VClK)sJH#QB}owo5BgvR2#h7v#)*H9I6EocqR@!&RIdeFEd z*U)+ovhmbfBM)6e2Rz8eqX~_iyoSW&maZ84ml`>34aFuafmFY(TYG4A(5=aelI`#x znQ+0cwzWM}>UPM6V*f;7sf`%;CF%D_l4m2B)B) z)>QzU0W7t3J1_${oWl3%>#Eyqv`EM4bz;5cwNhkpf5=^b102rbHo%D-?f{&@;f}yL91a97=CB30!q1-FY#;;$wH!k@u;lM{ zpeJxRhogWKIouC8gTn)Wb2vN%xR}Gkfh+v8-3CUYpq67u1eQ9w>0^MyIeZgvB8Mjc zXK*+bIETY`02g!kZs3Y+u3!oZYB`(%EOl}_FatQ8!w&){a`+M83=Yo)&f)Omz{MPX z3OKuhD|i+KwH(d?mIB-kJP#bs;TM4uIlL4&gTu>!b2yw2T+HEBi5If{D!78RV5sGA zF|gFx?Z5`$a1L(_PT*n=?@sKBHsA{014FGF)6)}_Xf{OI{J&tT z16{7z{J&zVIedtz=I{~V3J(7OT+88FV5y5+-*Mn@4*v?As9|l0viYAvK?cWg7C48) ze*zbC_#$uxhyMny<*;-k90+nd-~$}a;WogD4C_Ob&A$T*GB}2gz&RWa1TN;V1-OF4 zA;7g94hNQk-465w4(D(ba3ZiVMA`iNp&)}}7yz8Z;UU1q93Bo_!Qqj>wH!_amMm@u z#sG(N_$J^)OY;zA^Phl%42~fcIETY`02g!kZr}6a^U^UIv`Q z;e6m?4zB{P;P6`DS`HTjOI_U#Yyb}D@MhrbM6O^93Nkpn4LFCxJAsQiyc@WJ!|wst za<~Fm3UNE|5pXz%KfMt{R3caK1sF0o{1tExhYtZ4bNC2w1&4nCuH|qou++`%z;WPk z4*z;%HrjwII0c3b4xa_i;qafp#T>o}T*2YLfonM|CBcDEw*x-F;T&$0je{5Wtiho1tj;PA7+wH(d?mLl8^JP#c1!r7Sp zUPM8ns{nW@a0Z8$0q1ZyAGnyqtAHywycW2Y!^OZ-54QswfWr-pTNIoBW)viH3|oLR zIJ^xwhr>I8i#fa-xPrs)0oQW40$A$lcHkr6a2@OO|EDNOTclAdzD@1)Ra*v%onV{u8*E!xw=oIQ%zoEr+Gi za3Ip{fDdpu!>;+i4GI!Dh7Q0P9PS95!{I>SVh&q?D>xhiT+88bV5zs;fu6wOz|Hf2 z6bceKhJL^q93B9i!{H&o#T*_ET*2Xyz_lDs1eT)Q4vYZ~kK*S4n^2I*F-!o?;BYE% z4u|gmF6Qvvz!e;x0$j`C3}C5`+kqLt;eE2*1rMSikz;rSID^A;fpa+gIB+qCp8~Gn z@Uy_R9L@ojqTTkM2M*8X3SLA(B8Qg(XK;8Ka1Mv_fr~l33b=y9Yk_MyTnsGrbvv*D zI6Isx*o=Zi4sQX@;P5u!91iaUF6QuV;0g}E2VBeH3Sg<9+kuZpWBw243O)rxB8R^K z&fxG@z&RW~1YFGFBfu3L{sFj_!?nOtf42k2M`QjE=L&uWLn4Py0cUXdEN~8o{{$}P z@I~MX4*v~Y%VB8@9Efo{;FFDlaIT;Ya3Y600B3NxBXACf1A&V}x%?@Pyk3m7WyTJb@ z;6x5j0M6iWDsT>m?*K05@ZG=_9G(JP%i#=QX`ssi%>Oe`5Z+vX^#E`phaUmX;P71F z91cGYT+HF8fGarsEO0G{bAY8mh68N=e;x(lt^&3m08ZraQs4{@F9Xiua6WJ`hgSht zaCj|nEr*MNrNO!b1K9k(0R`bkfwmq1PSkPjb?p{g+l+ict z1UQ66GJa{KYDc1CuPd^q)|vu0A2oH}{3>YE%T;?3&O&Jq`@MfRQ76ILQ7ad^>gAa| zDQc<`mg*`j26JLfH~T^;#PcbgsVdSw7Q)@|^I*4JYQif4@w;+gvtC=Kn$}EJLb_d3 zZ*(LbnW~H*SRcB${jQq7nudh9v~Tza?WJ^Ax^hFON+IB}8N%v2%h*|5ea0ySIJXY( z_NU2NKD^t{E&eBz45OE3E7##`EURZL-NWibv&0MKg0MpxV@=hfs`jv_BhvVyLjZm) zsEU4^twgsnDAaL|l8o22^m7ShAc8ucg8<(rL)l?Sm9JDNj0Ig-AZqo{KIuqVRl)EuSjjh>=) z-bL+s7n(>KZ%sYk)YPSBZo-9?qEJ8V#vwFeuF^HpQ&A-t_1w6^2m1mjss;TEUeO=) z=3FJ@3Oyyac-NB&Jr$tuH1ufkdW+gWb`?Fj6TIsg4?V@8ujTceriY=Yk7vCyZ}qB& zc0Q)`$abPJ@CvP+HBt>4Bb+tbfZp~nG}wi!H5h1_DPEP@yTG{|(2wf+qK3f9B1E6r zIlsBLmK@y$>sN_da&)1)=PCXk&Dv#i(nRkDE;Tk$2IxQWiY-yJW!@Fd+GTTYsuxAJ zD0>|g6@&gcujn|npRa_BX&J^Yn=5bgu4g~=)PlYPdeRf)hgnU+V6C>F(wzB9&#|7m z>Lz*7H96A06}qI`UC|4Os`|UB3n6dMKXpC*FkcycGoAz6@}~5$A9FR8DPXg0fRKkD zM!^!Q-=atsWQ zePppG$!sw1wArQAigF=Ep)pxXa_cI&Xs{i>>sC*crCg6O?aM5s7sjXaSxTSl9AT9f zdqpd2swUD%u}><)pu5PeTW)=I-AkWTLc{9QI|{o(%fwx%X?>@|!i7KD#$UMnbj8MU z)6_$(jJ|wQQRwNf`?aP&o>V@Fu9_YnXUFUO_CMMR7n01@RILiT6>Z#W;eL=xwNEMW z@VEQZ%FQW`#|)-AA8YD!m~*CE!g=d19e`1TR!k#(T6ein(b89#$qFPO#-n~!s- zC=4z>>!Z(C4Mv0BCI%G&c z!AaL$XH7kZDZAFx1=F^nA>N->J)^|vD!zC|iBqdsNI!slh9Rq2+V5{)n5a}Nk}5_U0K#%X5>EbuxwTNx%g z=(cQSaxd7oxmv5TqZTH|^m47WY-}2XU7Ij@03FI!`u60R>%1|SQ*W#CpI(j~64AHg!w9gnz?)blyzmKO1-^v1lORz9b^fEs0tU^(_r z@ferus?k%qT0B zgW(m|3wP7e=as=|nk)6$9t~CW=><-*qGSHfu4m$oW%Qs7v zAwBgm(UtarT9hlna~HgefOi$RM%u@|qC6$z_jqme)+@@L5%sQNHg|{?C)@P&QTAz2 z+dRxJh^8o;GPHX$XNfmX2mQyUjEvH{WyUD`W6;8eSyq{=OkFnA{CArYMNL-4EV0?k zuJ!X_cBN;(e01$Y(7|L`SD%kFyXTaL@e$Orf$4eKuJofv7b)g2kABbi-$b9!I)w!w zu35jgD-ZU=;?H*8(9Z{&4{#m$wV>9}lgpGL`UnF*cP>-L(XH8vFZF*>X-gfKEAj5& zM8p|kEcXawHO*hH4AFyA@vnmO&2la{K6&~yl&8#)aldGzSMrpb-DXQh8Cf1Gv~0ag zbU9BM&dfS?gbV+sO{T3d%(`Q#r@EYIP2Gj=n)^FqiEE_#))mSl@)OLR{;QQpx;bAN z==Lcy$?(bQ_DM_F8d{&P40AK)dSh&)%lS&Io3X?@<3v(!L08WCS5^Fwj4F0*P8Xv$ zkF})i`MoZCR}hb#q2|8R5T{8-6CN)s&#+L+GWoO8ck}w8pP88o$c3@n!59Xx$Erql&dkyj;%~-9GEQ zw!AIqA2hOU1Zb?npNhI~osys{+`LY);g;}*BIUM-s^&=r_wQ(P+jK!>!%`KSR5V}G zB57L@+Ps;w#2e@A-qc~e5~~lKlE_A|Dq~iu@|;yP)eZE(dMt>vTCQ5Jcfs|_NQ*Y5 zKmodPl`*Ag+n7mr(8L0z6Mo;LM=|DMP44buP43BJCH()!@Q1}N!++w+tHp>k@-K$P zeKhe^m&NB_)xy`MM7gJXb5N4*Ho_P05k4!eD8UHY%$Vto5npgA!3f&SnCqQ!^lQpZ zdSw&;Rg`zV=Bn(kuPGKYMv=sODQpeiCGNICX{T2nnmogud#@|pcWrQ0e(44++;8Hf z&&-fR*QhcE%PO`6;s?vhdySNCpbHz6VS1Yld0h#)1zx;~vFuwZ{z;!-n&?lHu=%`C z`Q7J7#xJXpg&i2zY(Y1QY_Rm&UrMGJ%1dee>#poR4&~x*GH+DE2fCExV@SA`qD*YW zXc;*vg6`hv>Ue86DuctB3MPs_l47sW2kok(sAOe&bI&@rQMo?QAZuD&Qx$G_5;rMJ z(AU1)q}=Lm+KA~!vRFNmrGyeT^U0Frjd35X*{s|P#eZ#9?sqHByw6Zv=fz8uG923sK4c@wClVQ;#YU$y@# zzgNEL%I{C##7;6=2|vY}`XI7v$FEI zc3HIXWy%(1OmAJ%VMscdWaficNLFEa`}r31lqxoCOK&OT`_v!0il}?;!3N_)MjAEH zlW(DaXrepcQqua=pEpFG)cw`0>RpR16(BmRS#+2K*{_K{>rhhq)DJtR#m(_l3(;V# zGAx>C#KVSYDP3?V<8;w6Ta`&Mn#geU$0xBkVQLU5!?Ihv&L!T?mp6vXV^Pg^0l6CRw6W2B$F{fEP;cb`r(zlgTY#-jRUAZU1;hNM+ z9Z&!x)Y38i-#u*=CXZ}o0Lbkj~H)XkYR-wS7CC@tNIg;M>FaEZ;W4r6XLn#a8{h9qXGC^~T$ z%#y;`UeVXhNmy3Zk9g%HPCveE3XeZ zl{h4)!JP$e43Ca0#(k(Z{1{Q@%Il}fm}pRU-F=qVZc*0~75Yy@!Q( z)geeTDPujyb_4Bo&+%huXEypW4ddQNu4x*ceP5f)*SwEac-47mKw{dArH0Kj`p4$Y?c7C%+qmm|{sWikuRcJh(d5_tgS`BLA-|6X zSGvsSR4N-O+mC0i^UiF40kx=oiSFH_gbb-X+C=4eTFA@FOT2hBbEthDWThpvw@Udto0lo^ zCNrsp$&Id2bqESQyQyry zYB#;*6E=3a=NEnZo-w}*rHfHf3D*mE)A~=e?hDP=>b^-})^Y%Qji0!>Z`7w+_Z|1C z)_p&?cK6M7YgqBAtNR|&HC*^qiA6UuG685~vw@#$y%(Z4)n2>zmgE`YGjE_5KX;iv^tsl1As_M& z@^Zc=Z^LqI*cUGI&wru3M!h~%hSKyemHzI1oVo%nSsl|uP4)yVt&Am=O^E10JHNz| zs@~Pf<&|DIlY82`!X*j6Mt~(%|Fj?Vu6P@?xnv`}%hHT0h2K~5>A;rTB`R>)u!~ z7al*Lgt~Vxb5~vIdOY02pC|7973M;0(3uhOI;yG95t}{GYOm}^wjG^m3*FD+r4f@+ZO3Out zH2PcMA!T&4c*Y_29QU7$vp3?jFqG2ghdASwZ_(>C=}(ZBEP&qJFIjJBz>lXd{g!j! z$hYWtnu<05phAAtni>xi=y)Rzb56W?SXnRMq3zXwr`*P+gp@IUZr7V>M94*lxl_BgD&yL8EK*2)%;jdc@54E-?ymRCNI`e zwNu}>sM_W$IgP5t{-7jasHna6P@yq5(8?d0_b`6Ymd!u?fQM^rwR#8&Ff3?0q3GG# zPH66%Er$g|$*>=>a;|4E{80eC|>-#3UmLHi8Lqm*86OOw4c=4zb zJHVwP_8(MKZq-zeB&jhb~3#s#FL-A{MuC_l?r#~?G7Hh_}P{_7ci?F`a0tkiIihqQ&xdn&) zssne{m<8fu!0R8?+wztP1#9Z2?&sauky7vQ; z$_$sv>6zoM_N+LLJMwy(`ZK@%nfs2>Ohql)vkJc|{>#r8AFEu;ixO{)0a128xKsR& zzT5NA3GDVXOI3Q8qE#o9LGHwo_jq;W%n5$iG}9}ibKEb=dI?qh?KkCqoUnr&L=T-* zB70&(%QijKtR)+(P~E5cJ9{lM_yp}bsZ4{M^1JdVPA~o5;$hEshJRLb%fxRNK+-7; zuFZ8W@y40bnWmgl2K30$9MSfokf>MeMIkw>uwL153Zq*U8<>ut(uNfCX=NDpoFL7l zj1`ZAt$32E$zF#?E;f4LG%{KfT6bYwC^AIbHB!?V;^cB61Ey zru`rMR#Kfe#*_5GAN*F5e9Vi+lk{#2#t83>{te1F_fAsXPhK=e45w!saD!jJL-Jus z`A@lUpy6u!lDWt6^!6~G-hP9(r*Vya$*^-u54E1<#|O}iJvgl)^^CgCzMj3rcuwir z+n{OX#;^*SoxMv|2}Na4`W%$vMs({rwn#b;foo+%<F8 zya26}ptTm)>vz+fKb65l9Vbu_ir4#(<19uC9eoxA-G)EZ8|7-+3{-)4(`ah?;ZJ1{ z_C-xTY|-s77TpfdMR&w6hF>-sab8K_{pzB>IR}Oj@XHtGGjP2Zx8CQmoc~A7nZFvT zT1vk|vzDnepivp$Ukg@eWc{@QR`Q$S*b;iP(ba{1ZB*iP@!#OiwZtQSH^leR?H63) z%PuI%F`D)F{y|(mWr(MYpf(q=V$gcrt_=~*??V2Cz zyfH?`&_{nMF>X%zj2D&Qv|QpjBd)>u$R#Dkt+VbAFFMiFYA-2~Y%BI(^|aW_SM6yH z*3=Ktq_(fo({PQRw(xRuPlM*G^|Xr1=xGLxPvPdC21P|s`Vf?&r}_Vl8`}Ew5V%&$ z%jdMFV-v)Ee=9e*-(u4H86(e!&UomQaE*Sp_irUi#u#Ix(|;?9S~VRlY4j?GtDB-% zu?GSz|b3BG<=0#h3lhu zZLmq{{4X@0HxjdSMXaiC$*y=-O^(4fw2$3%Q0o*nHhVDluITKIhFddZSNy5Q>l?bm zMRlX-pktz%*^33CIbAPm2~#z_M$2nfdp6G4gv>tlh@=kg$urA;d0{S~Pb9Tp3*HFt zyj@Kmyh+}9=XmmFdguMnq|RtjIl0M;zvvCAvKoU;&|0lG*o@7XD;`WB6b_`=CRueo zn2=TWiogX~#Y2HZkj5ONJ(xhUVjYhq6>NGev6+U^X4QBw@swF*FT$@dt9Z0=9vYaW zvBa=>E-@m6h3qz;Y52vgx*kmUXg-YgVLk*M(}sTS!Nk?FA=9nF?xVUMOz0ZU`KSZ* zF43oz%DM!Sz+0EdXr-b{+zo52D=64;OVe}l}qsE5>fM)b&0*L)QNiT z1^TKX2}}S>qA#nO2w!E}9B&qO;0HwiS)pVEpp@qOx)grut0oL^xp6PLxJ%PjT{0Oi zVW*_EOT)6(YH66`#+t$QggrhNu!DVpeGvr{utV50rumwtwo&g#ulcx*`dN6@^!QfV zH&XDEtyPKbTC>%btGiSQvs&8+Q%pOx3w_pBy)hJ_ye2dCYo`_}_-;xo`&HFh>8EyT z7#QMxug=gwVeQpn*I;na{PyZZw5iZR*HG`*(&IRPXKj16ojBMrJ-)sDydqpUhj#hX zB(l3ghmh9R9)@Kl8xPK#q$qglNQ8=eFq92szaZHCI`!LFd;zGv{wQTur8r< z{_1GtT1;CS*-=&7Ii?5ru}V7*K5rrct2(UW zu!JOa{90%)!1m+rnH#s67E)NWhl>FF(Ck`}!2i+{;hDzsbi> zu5(9HP%nrE>y^Pb zg>&z+rXqEs*cZ!M_`07y2~yWd@e%Z3usTAI%^Sh$Ks03y-Xmk}#O*lyGPnISj$;O9gDO>_ql3CQwGs-MU4`-k-w@2 zn;hrQQeRc=k?okCZn?U|{&2g`m+br2LICPgmDf?WvoGfs-Iy>8e_L@d8@tTa3A;vj zFw)JNo=Jo4a;7!)k1r4}yf30fx7o`uk8~>+uMtmm z7)c@OpstQEq*VCRc)=Tk7t0GPVsqM5V? zvP`(rq>b8#s*iL`VDk@ZD+Ow`tr8mPdtHP6p?>~y4yoJeHWo7cEsJP?GS#$@4`d!CT-bL zOMQD_9Ic|UJ=DTLSH0nvBkZuimZT2h1YPK%4);`-&{OT6jb#W}Y(nDz`}1h*kCAPc zpMG}}3n=>%X=p?I1xbj%P}~I_A44ZPi;ZP4`*P_Y2)q>=Gg=usbSSR1aMTa77q1hv zUrhM$Gqf-s`LjFn%zLy>W1AkTbyTf+t_$N+VpeZD+f$u|*iP-G#_R2ou3kNiX`|@9 zUTOq*kAYVYr#IeeN{CeZdDj1#NY&CZUMbD-dNWd;aV`4}Ayc3Mz12HdKD^jl4TT?X z^j4!iZT!?*9n#a4&It724<53na&2S_QwO=)C?QIn;3>Z*O10?n2(Tvqf`@!rSC{4!@PHFB} z;e(p{)!x3EPf9;^9WwNte(G&%{q%B+Jsg9KF^4(Wh!DyKok;^Js=vAbjIZ}sZw8}P zxjLioaOx1FIu4Bmull5koJjceRTxksg}nL8gbOpm<9GcyOPKSC zC``JqqhPPJT2s5jIj_Dfp_gOTglo8x5=Xzrs*5A4{=ia?E%9ppjK9EUI}!bA{iAHM zMZaS8pWT`A2dLpJrFIWcW1th?K)-6FBBRJ>pqj3D=P06u1F?$r&VYtJHBg;&4He~d z?;v&9HB=<^ac&!=28h>Skq1!iU^O}!jUIIMs?3bG)4nERYZmrZ3@z5)sy!=~?iixR z!DqosU<&EYA?o03FqF{cA!;l<_NF1LKizb_8hs6pnK88NdNsM17Tazy!ko~ibsb&J zlqXp&7}MNIeTl@O>a^?Y*@S~ucCe}SPgMBx51S|m%a59-rh38F+MX_n7qn}~V0*fc zaa~UqdJuoHcTBsXYUW`NVA8jJ>hn!di6kK(YI}w+V8sfXuG5@@pc7BLKU4J zrrvN3v9xY9W;n*#Dtc(Rdb5oAdkAg0+%=Z{;duxTSCjke^QNfZ&tPK6^s%PihDqhI zv!eE^1Fm>r{KA)MC7N-A8XsIQl()6B6*yZi2<5Gsuj&08)FgdS5{!ugKj+Xw$Lt;y z9;bfTO50m{s3)C@Q*V#fy)cHcDU9cI+E{T8}TH zZ$@f^M|l^$(KsfJVP*sk7^Tj`UDE5L)E{wL6t6B~t#zeR7k^ZOy0LS8SV3Q{JxcK1 zH|wQtLlNaF`Z+=E)2@om+X!cUSWrI-NmK_V)Q8RNXJ3qg@Q>RPHo1OVq7EY!6B1^- ze^y76O6hBV2E299g5NdJcxZW|+8xe06V=D?Hi7XYJM#0c?rdSrr`23@a!?1FaHAUS z=1lU&Ir}RS3_yhSoKD? zR}pyYN^8L)5C7v@)2Xq%|C!!6XSbn|$!bv>=D$4$FF1WfbCwu4JAv%3UNhTg9B(!W zujpuI1?|qw9kwKYS~iY1%UT%a+SsnAvx3Qt#&~ zy1~$s?qM<0hsNBjM&o4|qe(Npac-v-H>(Hqh(0)8P0}Me-7Q&ymnSq|Ydmahq`|kS zecg<8-WaRtKewnK>Lw;kP@`Rqwt9j(3QvJu9&Xp{R85ap?IKHZbT=ejm9UX6Pk^0f z#u9IgC+U`3F}75(CX<{rae}JSia^z0w9(iU_4J#vv#wyr`m`c>f<30XQpivvBj+! zX_|-(s(MWbwQIcPUD;?-m53U&a11gtQq_BPnU7P|QIN43GWgDgH<_s0AcMKWMlao_ z-t6I{5N6N)!0@p=LY#Xgp>5VfW(3vwyHH%UwiKev+38ZzmtKB^;rH9#N zJa*S=7~1U$OQ&vEXS)NP`I#5a?X>U?b$(aJw2Scv`*9MtJf3=g=n z(WpDs{upoL1wA_LhPSkvGMrw#Qyt1yQC>J4^!=S`kAdD9;%~4&hF>}hE^nf0jH}b| z2vtkiQW~75_P&PZ{q$s-ddoGO8y`otX==oERbgU@_v;u?BO=;6E6 zdvz(vuUYE5yVSujhOZ}UdEyy~nIkCbZZ+m+?;NEg>?IwoslTI=Q)eo3)3L(VMg(sf z9rW7W>b=)6nKqLC4`){%7gf3b&%n5#;>IR&jx3HTpdyJHnmG_=R%VWR%gh`WOH)(a z*O(b`Uz*pY$0c(>+@_6EQc@F%y49^4nznaCqV3-5+OC!H`#$HKVTOV6+V2lO=P>7a zpZ$5>=UvWwx=obtNN<*8m1l7O{!L=oDem^e-5a6fgCKvprTH@Arj+(hlvAQr^=PVz z7F_}I{RwsQv6TKZQ67slGweROd!I&7zX0J|fa2Zd>@fXH%$K(qq%ukBpr!_T^gdY$ zYCo8sxleY*!uEJIto>k764Ok!SVJ=>$w@}^wNBbKNzUzXF@8pn2!f_`bA{e7PZ)A> z?!!Ui=P+%qc_T=NW0LZ7lrowM4LAcMJc*Bc;>kn=eRV_P!*K%l%RNFa@{mIf;l@PMQUqfz}$it8w{j@Q%P z6$W90zhbs<4o+Q4!brwq-O%lrD$Byj=(yGF^yOqZS!qPD4G3NU0_H~rPO-}Uy6_#7 zXEb~M6V#PaaYazT$BfJl%?MmVw7u zn7aVVvvDq)CBtB|Ooybdo{}aB+QtP*0~k;E`!|>4OPXnXPj6?-OA!K4Qn3~SP!vtb zk$Xg6o9+ zK8hdmQayDBj2SZnHBkC$w5z#Nud-tRJ1Q#$x$=Qf?57GAmGE8k-%jeEC--Z|4-K%0 znx|&v$rF25OK%#*4gj&Esd-Wg(xMeK8#%>YM*1ZWIj&kVx1fY6@(sb&l8Yuyk+TgK z>C_Y%b2&`svnlcj{E+;BY|?%h`+)kxt_S4Al#4^ese;yr;@T;f%&^!8EJu53Km@Y| zb-VECn5>gfb*MA|wT_d1d;kQy@LlYK^5n!uibw%q%NOhE<^uB;UJeVwWOSyhgJ3R> z9(Yi`!JsoVRqly^^-xbq<-D{80DWZUSMGi_hI%BQ8Ns4y)K-gs#@#W z8FF{f4TK9C{SMTw0uk4S?wl$2(DKc~neynXz#7|9?M%5BFCu!^;L_qGHNC2R+d=B_ zusj|BuC~73P^#xteusA3!|JTM22h%*xr-OT*oi)TSWe^RGZjTsh`jccGz(k=hIox; z&ca+qU;rDPoF%8WEsY0tq;gCx(B)b3_^S{N??4aDma~9UQq9(st;gy0*>Y-pV1U;` z#mD719@6%9-*8IFms??GU{Jn19XR7cSzAqU9-))@@=&l92*BK$BIodii+FQOx_6G; z`){T7LDFK&24J@||lf>JqSv5_nkSY>ov8-XBpueTbM)VeSeMS=}4M%w(AQ zQx{%Aul@e_iQg6GP&gL!KJ=4~`>uDtjv07vM`T)79N3xomE-84^?&f1b3 zxVs9n_vw>)a^fAOlY^TVKd&D|10j$2?;>5W_*FjQ8&ey>kY&DnV~^5!2_F)=$9jx= z${Hv`>oH$!tc%0*;esucaVlJlv9J**B398 zr{Q=_Fj?7JEtdGl?-$DP@W?M0VL0D^k^Eo+?9x;koQ0s#Hz$Z6$+=kWPMYc zU5!V?Y9#8ArE(8+St(29{unz2;h5Ywt*}#Rpj{*Qeg;~#RPL?nq--hXKdR#;iN07W z--TI^+1+XIGPze6|0aSZ+vWslSWg!7m&u7>XU{VE_O|MC{r8`j$;+{@y7}n*bgqF`dBJ*LCZ42anU13M3RB95%1?KVy^iHlp(q;8G8;viJi(|pF zGq+-}?oq0&&IWVF1yiI=9-sk9szORLHHCPAtVVjsCQnGZ0b0V8DI6t=%U%^LANK}r zsK8ZV5R^-$WAv>}?%NXLFthP?+0qI^z*Qt#VwYdkX)D8jH>xexcgI7$NHxFJ>a0*+!le+@Vaf~Cvj;AFq8mZNp^4{f4lRRej~)AeiQ z<)c-Jan7#4DKz~W#CM15DG~?8e;(X5$vDibp=RK5WMex0xyIvD%_DV6=V;7YIjOzJ zQ6w!!)~=PiX&eV?cVB;cb*-FnyA~ILIo=MAncJ%W#cfAyC0r*@(5O~4i_j@`Ortl} z$r+j-1_IhniR;ZOp23{beS_%54f4I6{C~GmkBxHD)#<%SGdIdFT%B=gAZ2dC$m4HPw~n-8liU;2 z+lZfMHpv4xs-2wF{=G@=(Gw2q^aK+hP#=sTaISTzxFBh>ps@5s>5g7B@KL$XL%zUy zcLV?~OccWq5jlANA9yzwPLd9nY7yew98Dr(qHYoIs~e)?FixU>J*2 zwq6AS8u+JEz5_E>fpC091BCoSU&hZ;GyWbeq_ov23%JrSS-2qb+D+AjlcG1vw?yhH z6zbvdV{l#o+M>^xzgbT1GEdzrgoB13a`NA!#NzIezk8PjHE$Z z0VvIaYDz~;Vmz&Bk`a=TZU17K3cb+@gi@Ag1zu7EEiWgyQ0S>Eq(kAH{?Adyf z_HB{7hr01a2#lvrx0S{Xc#_Uix; z-H7cUuufCzZ{8q&c#2MMmBTn6TU%lz4$)Wz6_|~=2-!Sps051r@I)*23U&2?`O;0H z+vKPafbzh;=QGp?|Bhs>#7X%XKi7}LQk*zdZ#;C4f5V10rh;n7!kgLn5ad4(Y8o{B zQa|n=fRUiO*p5-#{_duY+vI4kBqi9l*Q_Mw>%_Bow|SeqB3o`Dg4o^ha*Q1` zc=oXcR0`J^6ML(a|20&)5p-ID&gYo5Q-jsqirZYwytM^~h{rq^u{{mnNobL@_$fGt z&RcsoJ@=R#Re*nk24Y?bqA-bjU_}hBllZiYd>#E&Q7>!B)l^azio!ZI#KYZ_;W>wc zFg=DT7^zedF9aJ86$cwchDswWX)aX7hH=3NL2I6-ktFwz09_Y;W0kY&c!%Y?he(hd z69I63G}PzO8;9sL$s-c+$c(LU*XfUrKtXd6H^q0GFJ}jtFK0-GH|(vYBR?FX)a`N< zyC~00N?@cDn&JqJBrAfYvzxNF%VC|Mwi-P)et@c8KSV3H%iW`y2u>sWleMo~^}=uU z;~Y=YX|xJGP*429i3+d?Iv2LfvC(K;3uW>PWxQBC5dX7sr@AS6hkSbkTChNG>j+ua z`wr3K9rCbf2!isrd{5BS{An1phVeAmJCbl8qBA?>7{ej@c88p7I7rbu<(~L2W2bz( z0p7J!PEJ&bM7k^0YlM4-u8X2r^JOb^5hqD+m?i1RE#PsdJXWRjpi~|ktWgSQN?Q!# z_CqvkmmJdxtD0seNOMmtEmcc~1#zDw@k4&1VQ^I`C6ihs3lJpFre zuQt^GaXCE{2#R71!WCu^j~}4LkION`e`+NPvHkYt;t)~%L@Pe*UR(l-FEPIP0yMZP z_kC6eST)nST8r;gQejC(H+}H9JP4w9+AW7=z%&vz`Klg@F}<2&{uv^8{{ZKhMnP~# z8}SyfRGJNGEs8DV!63{F{{Mg6p~t($H)r ziaI_a8$&?Ubu+TT0aBije`Hqn$YB#;RGk%dS_rIE&jl++O#bSuls2Tx*V>9vz|LzU z%!&=H#B+P2)Pbp;>TXDPb(gNVUN-*m-~n2`2lJ1c=;R(bhJBH}wgUS;TrjKyv(ZJ} z4I>f}!8Qt>>Ugl|Xk)V!?4xh@$wNk#XzufbE|n^g6cx~PB?wWLb{$ar`gb>QzF+R? zLFe#AXAO)l4~ue)?qNT4LHp@~54s0@(P3wg>fAoMR#gqh%EGJ>qqF#-`(huBIUwg| zK-daI-w>b+hssr9vGKEu3o?w}kQ^!q*vJ?KT(&KDgv zCpCvItE-OA7%na$OgQyyQqe^jRBUnd^+EZ;?7gZ}q@9B$Io-5?#rD-i3zbt>Q`9^a zmZhkuOQnhg@sf%6LW!o~)L5l~e{Fbh=EZ9`m-^xCu#cX0%iTTb=J}$t1wc10 zLKrbc&8D~dq5G+XqMwv+hLR^eiKGAzkRZ6)8x2;}c@ln?MpXbdZZt~cTNRNGY&m$Y zzB*rFvxK63(IC9cqLu*>?s9PF!t!DQ{zn8R)u6&Tg5N3-fxa{fbsCYm{zTr#SEvtA z$WwA`51pT?K>S>c6&()HD1gSpel8rEZ2&3m$oy-Xk4m_v38!a%?c0W)d`j+906^xE zYblM-2=^@f=C2E*r$4Ij(?zeY;v=|u5T3IxT+qd{V8U1KiOtAu((y&+HiPi*`)TIW zUbzbc(Y_-_ys)311868na7WIa_<<5S^Pv*ik~?jiue?#uX!5%HOXXZWmE}IlepU`c zUpW6+`E~_|=pm3wGi~wgqfWhYDwQt9i{${VlFuI)Nj zW9i%lCc)K_Ufg+Ee9a_20tcCR#%eU9xO=u3n)yr;9~5cBQY*scL*VobOVLr4#?Lc} zO0?-%!^0*F&tHUI1%T-7!*X2Y3AO~Xo7Jb3r@gfMIczW~q3@rQZ&m=IIt#V}VBBE& zv?bn!sjnaBnrjg2_R^T=p&5W|!XFPBKgHlQ14E3?`^f&hoDyGiou+ZH(MaPLd#L5Z z!~5vW^YUoWia#ue6+j@}uS#>{dNwG;gr2sELhk7T+ShSbEKaA7|61*Mt z=ZTrvlKP7Jz8;5f=6~v=*uG|5jkxCGuN3!{xDL^O+l%`raqqx&Ag;Bzrr{a{f4Cl3 ze&>%jitK?{E|iO*I=eiQ+gZ%dPamY!N953Wju!v~`v5p~0A4>!FQT#|a%`tFo&jSR z2B*sP*=OkUBXVx1`+ADwAfe7=pj_?>Dpcn$nI0v!a;3ErV0&?6V$gf~DJeYj0n$ zZwS{WB(@Y&YJ@zn^G1kggP1Hg!$lX<$`|C(5ojK%XV3(7&jiN6d+F;J5WOW7a#Zda z0{Sj^^|4|~KPtzhU+O7pmKHoe2=1U3c3SF!izRB#c|)(p&RN(?@Q`9y32j94PQZqY zeU^isOWE0bsr;xsEJ#YC-;T;Th9a7D41=U1ntx0l5p-P&y>v|O8UiM3pxhsd=)y5M ztcSI)M(UoF#-tukLD?JWHnp``KHf{wHl(XrsNe7_`~^8} z=p2!RL~`Y`J?eo@>Ssh>gK!*x$jPO-fvCmxV`@ot?QE7Kzz=DX{#`<+L2kH?io4ur zt_9UM8mcS|LinvkMpa!xi7(1Ap%sZD_74fBjc~7W(U=#Je_b@?MPy?aT{$7<|r_ zrF^Ncx)EH>$NpORxWBk}FU231Co2^J6!%T47=lD~T#gw7cFR^61irGG^h{;MgpNJyLIhac@>^*vb-H$d~H zyn&560|vI?ZtF6O&|;%De78{Wleosge1yaabW1Q>Ph z55MymS7p2Z=|DNxURS838Sk>lGAV6)(wmC^f3Y&P+&r+PiGvlpygv#guqTjtK$dk(k(g z_d6PO3jMc_On15{{}jjuLINoU7gNzGIb{-<=B#K&f?%d|fF-p-BUnwqt|e&1nHvZd zB;Y=%1_R?)ppbiz^I|bLWn828QlAQWm{O6(#%n5yDNRVlw6;QyQB?a#4feSJj2`>^ zwueqvK$bvQAj=m;^gTeP=mx;7a(-OgW=mdY^AgB4j(O<}xh^2!bxSAZX*DAjt^+U! zx!yvQv0VIp*uz1ZOwwMOc3R#t1q^$fX#<#L6}sR|`@AIFMx-27b+!Pe1m^-I*q2o2 z#e|Y)$dv)-eP9n|SE53IRCer=MVEQjnkk9J_@65SyXwmkNxOGzx~fnu7?H&B5TtM2 zWM2;fiDeCJg(u~O{tvbw`BYtUK0>eC-(XUFZV4uhNJ5|72>B0qJ@#e!?)bQYp3K(N z%!}|^Wf84?S$;4Tt+_WsHb_ebOWfb)Voe2Q(nZgiUnNgZwBDdO zP_OicSkR|5ang9Y_!a(Ifw9lVC#b9nbE;3!g(^9!Q+2v{8*;E8Sz(&5o*=`ka#*4d z5~Mz-^hY{2?1m+kz#+~7(MM3P1iq#{LBn5>V?!R-#$<16tvmU*t|IY@eD_Gu^6YnI zlX`sD4P?CZqeW0tDLf3^0$cL5&|@x21&^aZ)Qr_m(s_x!WKfH@tD>nMB!&YGf5O%d zIo9ZHBcCAgsysRaULwImkPV%47YxzX`FrS|fJLZAIYzos+z%8%c&--S))kiPcT@b2 zF)^t?Q=tt}8_<}pf@Xw^X8v7{9SLK@@a{OAnuzU}&a%p(T;c&tklnqT49jC8Z&siCogvAv@qqmX%B#hFHgNaHJO5H^^=S>N@Dg|HRcur`GN&AcHVK_Y-ABLmaENiH!hfEw_mqBxe z&fi13spvI1rdtDR`k~;xY?ki;UJi2h1w-h=*W{!`*oTYsj9@iExhLu9-UB*T%}Ah( zgTY4ZrtjM-VHvPd4!H7_++#C7>a8TvW_cYbwt_@4?s;!L(C0IUKZWhN_aztEUq?yr zI(q1JIXP&kD^YV;{xb1EhowI#&RwUug$HdMev`h!)*PVC0Svo>1X923qE2thlMTBl z|4nSMd4l%6sm4iE+V1+OiF@d+H|0kSduY;I@~{DT4aI}HBwHKNG(!L8An`mDQHf_B z^s9E!nYZNMLs+Gt_{ttS|4;cnP<#4qIb}E-45HZvt42+Y!;t2F@Yj%4@Z6JCVCakS zYlaAi)n=*CSEJejwssfASIZOAi+iyTUR(&V;e$R0mTF%dhMg};@AnZ^9UPWRVCKPH znh#FdMTWC-Ok#tL^&_nT_BFtc>;wheX9F#&pQIBz>2kF^=$fcK-d7kqIV}GGn!!4? z8#HRse$-3=I{~oOJ8AbBIr*Bf&r1Q!xTHSfpX| zi`1lcF}EVNF=uHvi5-tqk9XyThR135yI>GV-j$;!ZP)U(3yAjL&Tn$)YyttwmPR6~ zA%Pw5S(|K@$;{dabGZQ_!H)W0=oI-SU6Q5F)*vaJf7`#CQs0wT7LMjFXWBs;ck^;FM}|xvsn~S+yERb^12-q|2~$n{QM#d zVC@0+!FC$|K5Vs%cE1lvx6?@g55I~;pM(4M$d<4E!7IBGcKve!9_juFDz;)kid9WZ zOFX}Q2Pq%ODH&knTG*E$qSSAgFfFxNDnTMuXFo+_-#>`50CpH)kH99GxaT0^l(T9( zo%ukH84t$e{+hg1K28njoD7~Ka`Wl zxCI+b7I>m#{Q>+45Z-D(u^a&`!TRMG=9U#~0nJeIOXuVt~vb1?3Spcht zGrd71K5i^lP3>P3tEvzQ0jTcMDc;#U#X5kU1=wz);h$inx{IcLf*|zKXT`^Vjd0;= z%y0o?tWH?b2xt2RVF|#B05*e=A#nd4HXrQ?3eL~QI~H@HsdvzU`KByhHOgDnG;OJl zbHpGv?W8gP!u|*}9S2leQ_9Cr5n7TfZAia>UbYj5smqh2AJeD1WGQVL_~n5QT2{|p zN1Z-J87!&k(bKY8DoqpPz%zHO>p+MrSTmgT?(H<{Q#ooBrcrOe&mDk?b8*fTBs~m` zVj$wS1oLIQ)KaEl*J_A5JV&mt&h;nLd!QJ&lb-ujUY&^8=Di)f_L4O_066M{HajW1 z29?Y9qibt0MQebQUO-A)M^`S$Q9bD{JMk zBSH4_U;`%I!`-IS9pF`63>J|5 znii&tyLQmX&rl%I(LRMM1=}1td_8Q-( zj~$)#AN1m#V6s18T^rVSU6?Pq`>~_Dzrluk!(&JHeT&X9bcVVt*g}18V@?SU07*Zk+7MXnRq@`_eydqmvgg%ealsUzAe{Hf#QD`>HfUe6m;}b9MQz00ut6=JF=I9`mMn0fGV52TC}t{Z$?^1tRA#wGmReHr){Eo~=_uw5z&jomYLNTQe@6<49G5O6@6Z z2LiQ9JZEZ>-ER2_!kxx`HI{;fb#11^OIXWOvjIMf`EdYLX`!OkR+@Q99-)8(Z3{<4 zlJ7Ege6V!n4MArv;eeyflz&<7AByTUs1E!?%1uss?y@{68jZK|!e^BEvdw1&=n+AN z-{cVE+I2=8bHi`83b=5N?Yw2q$s4XY#m5M7L72CT$+qxft16KIthelPQddgw2rF5&V_;6I9rP@aO*$h?jBAG z{~G}P9U-FS7Rvua9x(t^)JVksV7=(Z+dlD4@&P=lT3XXa3dfG$K9 zjzw05kP>_yPc_pac3wPZ$E$%6mOn~AUy)<`=uN*samz1l8-EFaBCoC3yqPlo#K~-W zld3?Vx3&{(l3G0H88aVKJ3RxNLY#&SPMx{nvZk(!;Hnj3KpQ}o?TXFvg|?E89oOUNc|<7O-8wv9^9P}hx^YnQm#fb`o=D`EFa zy?GxB=32Y^V#50*H;OM1j}=JEvhEBn)Er+!=0<7x0lUS;odSxGH^6Wa>jd~-U3Me& zH7MP%rEio$>5fxD@(oI|;ZaKcGA4>jYhzl`af33-qq4$fcFhEFJsPud?s|d!v3-J+ z7+@NSi>HNgd4Lw*^cOZkYlD=fpw=Q7mrZ03Rz@2((wSi8LHw6)RQ`@;fGRuJk@`lJ;Ux{)s9KTpGwwgDRUOBS7-l+jvQF`U)Z7w@ja zz-2iJ+B6}QaRd&~%rxOeX+vL;6>YbC2B)xWL|Zt}T^mWXRhkTr^8j`ZV0$*;6lLYV z9;WiTHf0Jq$0?!s-1I1oX`?(i9Iec;e}SpTlSN6W-SPn1o!_7__09&BsRF;jLKVR7 z23P{XDsj(4-uF=D$HlA}cp*4SUD}JqGgsHYKt8Xx$m?LYWB_lePW~BO`J)U(ZIuZB zX@f!lUk~6b>*8VN8{1ETr**RL4{AvbSFLt z+FqGB9#NOTRpMygA{Ldzr)T^NO(b~FkKn-51!lz}O7VYgg__2y8)|)HD{&h*&ct&y zjk)ZWZtE$(gVInHn&JX9?blii*hFO=lo5svbftq51=on^sHBVpPsdVlB041E6v|_g zqq$OfGE{5(qrMSeMpFRf?}v!`jkK|&62<6vHa!k+EBp)*$|~`)GOj#phs5MFcdX6Y zsSeQvufHeQE&oQ_3GgUROn0xRjP6SGbr(G$#HMH0%LCv?0NfA$a&XUCz)3ZQG2vwH zq>L+oI$e{ca#k}hH`m}?4s&xVzD^ezLRYZZ4um-X_y%E8)KJk!xw~$ljLym+SYmN!C8hwPzlZa9 ze9>0*48Z0eMv)J!wRp~p$yU4?6dR6Lf3S4p+di*Xfc9{QY_xaac^S6{R}>beB*Hm{ zhAI7V$q!SaCai@Mv}-JBR-XVU;RI^`^2Qg*U zMeGDiwRo=PjS9Qv;kA?+sq{<-n)z^^Q!Sf1Pu2AS(4C7GkD&XB^j1%K{R4oW=qhGy zptm9wbvntD0Sd3z2HYIfYNE3$+%Hv2i(W@GkfhubG+QcM9-$*37PFo{E9Q#eAdQ{u2$(gNaJ~#?R&=1{|T_npO zq6~_F!d{F0>V`DNQK0v5IHM<4 zq@rQajdlwGNiD3%r=(oM^?g4iRp!P>d~w79M=hSO0*+5`&!-Z#g7x^Q@J5W>tm25e znCtHXV5Gx(8YL?|Gho=~(5^eK&*B;Zfu8omQ*ujSUVdIjWk6GaW;}Rof$KJ0e_d5i zTl~=EjSP(DBe3tl^9+cT26Yso?Iwuk@WYXHXY)8(vTom4N7;(feH4N~osPPzc~}NL z270x4-VOB1Kqnh?hJo&6T&F}LV@QU4_~E$c*U%Y737e^-%o-aQr3)x?@Vq5ZdQfBo z#clYV`$k{OWQ`AuXVW?w(@p8l6Vxn4-aU;#OLa#mWhs`i*6_UcN`2G^(EL9@<`kr0 zhgb`#*dcC5t=vr+l8T@8ZoRsp5}T}|#_M%zM)jhGv){9h5@VI*;h+$QQ{++YRF^r_ z4y{_Rc1oKWhLuXCaiYpOKAN6X_d z${x`tg0{m|FXfnverNPxSiMcBb-X5}B#)Hu+v}H|Ju2MDWzg$`fuv-Au31AM~dzMS1 zSJBGu%FV;^v-K}hE}3DsBmkw*DL<-F_8*W{0W1t)|5`~ICZ#`9lKv{CnRd&sQ2YZr zrMv)?0QMEY4zHwdO_)$vNg+MdOOGB(O2kS?c$->{LD#aoB1l?av680sP;!Uk5gfQo zbKq-GyX^*%mv6TefmTPIrS=+2-Vd{B#d;yYHUn%DNH}nhf^5`vO6+*Fa$(Ga{urx6 z+Txb#xHgIrHiFNVeh8xb1lU^&+`2fI^}NG0rn0dS9C+zb_n8kNV;T=OsL)KdwIct2ef&c*x!z+VNk>`GOz7=m)VUr?3+>{ft< z0jwDJFCc?t;ogP!wLMf91)#dwk7_B7*PLg!aOQF8RG-kO`lo^_fF%IzTgX?5dnc#{ zwf0mO5$6KneN;ZHw&`w}6i~~(O$4^#+?KeC#dmORy8mx|B z%UW!=R3l{#(|H}L@#-IJc>p^Nuni!QgL_Y~Z7rl7iAvAm__-kju90%p61$}YC|T)j zn@$>K|6r>F*fxM!3n@Jbvmb>tDGBqOE2-#EOypyiacpEs62^$^ehp(pexoSL{Us3V z<#a=0D-?QSt5K&xz4_D`FUEq31JAjfw!-~-vaAhb#Blec_`d>-vX6N>lM)?YWUfEL z6A5D2YTB5LQTa;Rm+US23O5H}0VVi#tLb~R@T7FpmH?pPCK0uo`tZ`q{n#X0 zt)`j1mAQRvJwz>d*XOjovX}M{m#^&UTnim;3Buc7C|^G+q|iRdG=-GdM|pPm3g{nO zPt>TZ#p2{OWbv%+cFPmM7z(YidD|AMq354Y@&NWI!0rW!9NZ(FOz5i|fX)mlN>~B= zl{S-7+Mx2Cs!~dGamodEQoKj7g7z0ejc&-lfVZt83ijUPLVbpNMYsv`sdi<`N zvH@>ZGkDuzgo49=BE*nYv^zzKQdk(gxE2rGoO(`uqn&TS_xG-(Gbu_eM+R3gS&s)` z6J}PxW)7aeiHO(NO9nxw4Xo4!Y74s~EsyEvwR_?20AyhIKUb3Ir}Q7C*`Zltgo zk)m#s-SRc!y;5f*!TMU^~!Yy2VLl|biKunCKa94 zD(_mjD&^XwsstSh$i0fT%vF^`<*G^20@$qp3vmVxyuZIPSfd;>9F6sCdCi24 z2kaK^@C$Xqj^+uQ0M-& zt_)COhucB*H#JXP3nkT}b%*ShjX?1S+OkD|vo~u|quXwI1YjfWN4E`BZZ?buW9s~k z`f6|_IvjM^EVN&?s(Vh4PgkRCRf+?xMGWxO;H@s8QI;t@(mbw~Qel+%ZUvpW!Mk=< zRn|PLbYmCs@d}DgN1X!2rK7jA(}Z+oEW4blA9Y@w^%LbDtQ*re)fFDq5#z;CpX7w^ zrFIefSJ3zAO3F~I!P=mn**1?HYZ}R6Di44nysKEhf+h|2X3MJPLGdXulUTZfb`M6K z>HF{f~=34wlvqD*CVnRxq_w)QO2g%{?F`IDj8)_@{-+>2)k^D zOzd@!*_!pbDu8tb*yjb*<3=Tg%Pd!GJ!4%9r6XNM-Eq669*Tchr!+&OsrX)7bA=(?UoOLcce~!1g`2z+=w(K!oSKi z0r(#Pehf?`;GVxZ=<06}l{U&B3fHpnl1mspK2#Y8H~W4l3IvXLzmUs%SvM2jC~o9Z z_jM54fISn>)hSc>)HG1?76#$&Gx)y(_V9Jlyg>N2b`)K?6UhMp&vn>+)&YX;wEHGZ zy4&c?O;`tj)P5oD^wcw^S5$VL`~tKcvzZO`3__aIX(pam=^>3hH0=5fZL6ATTLByr z3WeLywieIJ(3YkTQ#yI7%Dh*b5>y)@2nIW-XqYnE1Df?tQ&6=e`YBu}4vH{irNd66 z%!rpq;F{f4#d*1_4$vC$a>Y*D%*yCd(6?txv4n3(qQZ2RCG_M9!O0cER-WEfk1LPB z3}(anl2bB=iITVMmJeY6TY-aP=#~OX&1kytT?Mc=0JiNBnv$XP4}n^0P>UoLP!XWh zuZ0-GDC;#$fdWgEPAozr)})*T*bac@K^-}`M>(6Csf3NkzvgghKs~M=aVxatv}+Uh zQYT@0-ELub{|-1<+;1LfR@^Lr%?8+zN9e6gjLUIrgSh8?C}Z>EwJRDiT=fPfWPqni zr(CH~_D>;o0J{ZXod8ygd!A6`^ihm=bvb?PU5?m7ZSb@&D;NI)E01>;RsXPC`hdpc zz|oSmu(NqB)B&y=;J#i?5m`#lQ9dN`#{RWSe-3W;Pb^{s)g0hpinE)iSOu`35ay?r z)5$C)Y7%ajV@@=vg=awD)xC_fK*JJJ*M=peAXW@8`$l%%nE*I`kjy-0)X z4m?-OFUetPy_{YjfjXvuz6VCnzPG}U17IlZAi4lUF`o0w&^7uNM_(gmeG;H81~DOM zn}g@)(e_e;x%_=U2k2H8sPPS*#BWGmw<=+tG+p?wrgYRajY9650{Y-qZv|xfv?-{n zkdB?j#|0EU5<@KLl9$>1?QVj9p!~tx(0;mnv$J@ufToS~&PAKPZWa{%Qk*zkKqmp} zy92)NLbKpF1UgLYE})3pltCJJ7UQgdM=10tZ33KLhQssd$z5(E?INDJi3TxUhcn7r4la z5~lC$mQz6ASLdR)#)bbVJquu^09(40-Wr8kZ7CV<#5TsI_|}?|dDD569%T^w9bENN zq}sm}f#yr4OHm^4C%Yvdm}(#oEB;?g>CBx?OOZl=7rHsD^Xo`gW>~eZ*jCX#u zqIfYofc{A&e09fyc6VBx{(&AmC&$|);^jMr&w44rEutPAR*Z=mb!gyi& z({9OwxbG~{tbJyQ>R0{&odvM*0E+`yChif?8Do`+*F>dXf+&F!`=R+Boyu;FihsCN z0W2P1-z=t(aTtsOy1{p@MIK{!krm{yv;>OzI;FYIQ_2I_ukeb>#q``bB`O-w>T3%) z70edoJ%jeuC3In&l3M^rblvT#gZMPLF79#c*nU2hWprphW%cu^tzAR~sAb|g`;-lG zZe8pHG2C#el390eFJ5 zZdpb{|K@epy5>PuXDwVti~r`un%61-)=OPQ{xW*yZ%S+*@LY)i1C^xYZ<`+C+_-f0UR2_9?*Ji>T8COeZZNF#**i2aCTyM(A3p z@$<*G7<4&J@<6kXh3I;{#y(Jh1OYuJ}MA$LxIXODUpq>I}1<{ zuLi7Emi$k)N~Lk_MCZJ24$E;6Ow#%4rSat-9fbgU3SbKt(kGLY{^P-(4J@?tVF45P z#%G9%HM&DHF9}jv$amC5ub}F z(dv!Qs_soiQClEBTYz#cp7ZF%Cq5h4&NDUJwv-E}U(jh=y@ivpN3)=q!3z>!ET-?R z=!>9C1faSw?NO@A3HqX+&=(SzqzT#HzR1)w01?FHvBh*K z+Z&h0UI9SA2@?Aj)6c*^U%naI_fx{(PJlt(6 z*f)R8ArW zMZ66r67XE@IO80az4JXm7=yyFt9x`av6H3ai_G;$xTv2vzKC{D!S-+9@sGK@Kztla z6^Fn_4xaN)S9?SKC%cmu(v>OLISMtay95RY5XIDAI2i?=a}c`p^*diRR5jnWnwRgm zUZ}&5_}b!lSqA_{#FyBu|K>H>kz7$8Q2 zrJ$h>iw!J|TTBxk#5CUmTC81mJgB_wDJ)IH0?=DDP}DD^jZ?kLk|uKi&|ZVYw+rd~ zRONOLR%1p0Q0E|VW+4rqhJD=3A&z-zf!M)Ub1~%@;r?PCiD^pIG+n@|5dpA--yoia zYt`a;4W@0JxaXbHUFT^#rG4#|bxX52s`&W3@(C`{kbpVjPX#adtiCJ^1xFWjhPCs^K3yp&Ksa%~ z=vvRA9_GFuh>kQ9>UDSO3b6WxdPOhM32kceTYvZ`4RzTqnh9CsXbs=}x&8=y zCy6WIIS2ra``a$=A&}5NfJ{1~CU|Bfi~lU3PiCOMolif{@P?6TOaM_3>SvK3XL@Ve zlIB6x$bNYNEuZP_UQLz&Skb*6UO*>j`Y|8)2A66EI zLi-VTXOE}sXdb=wFxZDq;ic+EBlP3y&MX%fN9X$B_%>c23=;RvqmWtX@}W}1M5!mG zXN^~RE7jHMZHky5wSdOY^2U7D-vWSI`-mY6X!k7dOpNL70HBEZ?hEM3EM-p!+Z-`J zbRO-SjiCa(eKxjWfW6<50Z^Nk`LbCuIRHY0{|9ilN<4oJ;r}A;KY{0G&A}`yu5;j; z{Bb{DT*mvG;zBQ0%4s0z)%ld4kNrFI>FInWCKTv6N`4HEaU;IY=VLx?4qeH|MlAiw ze{oF*0WO#LD3Vj?LR-$I)HzCQdX5_S>V_*+D@`_BsTRuWD@vv~EGvNK2k^{+e{l|- zUI3TyA58;n9>9hHtP=M;vdH<9^06_aa@h0&nI*@tZAgl@m`sOh4 zpLzJ|uCmb6V-+rMiWLsxl)jNf)jV1}-y0Koj|2d{Xb{iOqto-1iK+S8AvL}(SXB_9 z*`grv=sX&=KuI2nW?0Bm53j_ithB81>fWR>_*0OmvO6sE;RSVQ&)!saJ&g(jW zJpeG~t`_&~O(ytn&hG{9aAx5$@CO*%ifO-ou$2|Zf}gI8ThIUZMjI>6hE1A7sSA~; z2>g2-Jr4U@Hv8W@1~Fm|OF2OZ9y#yfd9VpLQ?8tCq7>uSY=~<4(Y1U)5)=3V^Y9EAb^@bMSmWG6}am z3dw*Pk%U3%BL(ktm`kCHmE-}?2mj1RLi(aMtqy=1R{i^2nzAyc3d*HUx2C-u9anqzE03;=ltqbpt-w-3*+SzT76 zFghVS*w_y|tge#TY7qEO)|dd+3t&IaLYBjQCAeY^lffb0bxazG=3I});hZu4-DB+T zc+O|XsIPsY9TrSVbA7UD1t*I;T8K`-U5n?{c+coZoVY||mnkDBLEZ`sE_q%3Rv>!N zM^;;u#8WWW7VMW=2-6maEb3E$vSkiUDDWod!p!)%j>??e7**@mq7~W%;W_ux zHuTaTJ?z^{N4r;pMg?Smzc%1A>G&dZ{SmGS5kKbBNt+Ti0PQ>*5PaKa1>yxwYw{_? z?ydalb_Zbl<2K^8e41wW_Q-`#Gz(fSq1G+Sr!pT<)1CmJ=)8;b$>2aAvyK#p5;ewO zx`XY66WK1v<*=+pHZ;fbnA|zYSG+ok~{SFI01QQhnF}UyG1nGpuJ)U0C zRrmDFWFBZ<-P8MZ5T*kTOGn@;01o!_q~wzNnKW%3ipH6=cAa-~THdi{8&-4CIDHmnB53& zBEea&Jytsl<8cSwzf0*s9SW2d{<*_+++i64VOm0e3Ai7ump?aw7?*kX&7ku2O6*8H z=8t%^!=WnIOj`~Uc_;8eETG?j_8cYaW;9gpnoK|gw!l#R;Gp}@fQ`RU900fT1XOY)eUm`*qh@Bb}{u@Yk+dhL2)0elditFw}3hvmBIH0Dty zc9K4UT=Mq-O~cc~P&BQ?bDlWH=(o(5r0KN)?c@O>-aC0$5(xCf01^8zg*uhV zo+$?72LZ4a4iSIOB;iy>OoNQYkd-%zpqBK?x?vzThNIJjzWD6)VXg3icDSrZFU_{; zI0$EWMJHvw?XbLycEhLR)pp>HgMKroQ-)B&QbCT7HM8Pp&oY^Zta{Yid55J6O&@&- zV;bC}99Suo*yuo9;v>ug?_UIZ?7Tr#RXZ#d&^UMFHvuchy%pNOsvw+Z&*;l5ctx@o z1c}6CC{I_s0vz1Y)nK&d!EtZ1+USxFZA7{Y&pIrH5Q&!?o`=hC`MLgx3rNF&)Sx28 zJMQZ9F!x!wNC}wb_cAem?tXqcJ+~R#O9IJf94t)lI4sl9oWD%<8RC@FIv)hX(f;VP zhKe1l9bMcH;(y4V=Ha_pwvP=Kb->A9$}Bs;a#yW?4Rf#7`P>IS`Ml;4(`nKcte!!X zUeF8j>3}j=wyZK)u;F=MB}PL{_EloAQZ047QIxX*h@+EAJ`CXYIFmDMCJo)HY(b@cW~&kt0mXXU zPi1P{P*Dqn+ms=bR8e7XT(z!Jh7P;Br-45A8`mr+R}uemSWZEYeRVl|`^f3vJr)AC z6tGLD(OcV;sNw$)a&h-zSe!K=`68d|1Jqpu%BMuksRs1q2NU&cMh z-g@;K;r95}?fBorTtGkTHB6M$I4pMn^>Ll4@@ZZ_^H)d}V21*>HDD`ozgSgBn0uHr-D=-R22x*exl02qfIs*4sJ zm@go}-bJ8Yfu)Y%o)B#F5m@|PA3k@U&uY=mX^Fr-TBO@5Nz~P%HU;A|bOs)pMnypF z9hB+c^!W^X;Q&@X6;6SB`#Sn$yAoA^pOE@6Q5oTW7gP&SCE|M;_y$>|o5Pq*!KPtT z>;-3@^D9;APZ>m39V$}bxLK#3fvc}?c-mN*TYb*abADwW+O~WJjh$0z@eZW?2OFoh z)A&CdM?dn!0`v1NK>6Ahe4T%@zES^P@_B&#fYf&Y?JDtnJSNjv7Eeu3@cJ5`~ROw4L)-xJopds_h&a((d*Pm=WQsO<`f#yOZg6HwEkA^TL|f9cIo zF&p$V@m#GhteCPw`*Y~7eA`zAX#bFOslBiQK`j6-B0v6&15UKW!b_ev`w-7<`ulur1klaGwqQZ*Z>0 z*NIbM@aDeE+;$r8^o^v?b}0&qOT**nQt-L;O6Vl>SXg5n^$QmfK+oU3Q$O4qX1;u7 z>B|QXPOXpH3VUt6E=8cLx5TTf3*qixrc?Ig=s~q~rk)Ac23&ZKV9tIqOyuCKdAOW< zB;0?1x)Pil0K@0csb&Hzo=(EXu8D8k;jr#f6Nd^;0xy!!Vc ze&Y`fN$Yr@r4=u?9%yJ0Y2C=I4-BbGRt6@v3PF6Z!C1zTrj}u zZzT2O5yJ5h*`L7aW)EP80!|WKp(|@IEXPgMnn!c0(uNo|Mv5xnoef?V0-i0|0$1jX z`7H)*HXQRf3%`2|mx=Gu6#;MjF*5AII<>dV?q7TsI;5nxN9EO@?Xh?Ou+8W`08aP$=05`rk3!A~&#>EZ`q+K4O zL>C4T9{TtSYP0llde0b6}lA*{d6}~7H(vcB1{Q9NQPn@jpCOK zmP4~x_fM+9*E$}9Ryo2|Ei{AC#u-76bYmBr#W9WS#mGzeZZEHM;WVNvJEkDOk=s!( zB|QVw%Yl;PG7w7D7C5f05a}5`?*(u;1t@BC6q|u!SeTSA;j^Bq5-nZCr2oOCigLEX z9lqNn+bn!I-WK@N+|424d)zZ?%O0Y|dzE1(xSHzx{%X6in>dUukiB6`$CfiYqUqVUV5ssW-t{1%h=aerxcLKsd12QHYPL z2Cbe-GxtO82WaDdj8`6{_ybB^*=9NRARRgc>Us3~A!Q+S zk>SQ`u~4HK-zzJ+U87@7z=jF@=K7(2fK6I+cJB3(cq)$;yOrr^__-U$xgZ%osU-J- zOHBhh0WPehIev1kc%nn8D3c$^Imc)&YYngIly!OA((?$ht%S zn$E$t;(Ex)O>8jxBE($rT>gMj3?^5*x*u;PbxO#i3qS=Xt~{w6XYQ?>Kw=E#uD!v$ z!_^rNv(_SjT)D6d%$q!gK6wiBY>QtC6%VlIt&MhS~RJ8Q9|Iz!ReV1Wd~HTDbM597=s&8O=WzskV%E$KNH1U$RL& zkI>4deb3{7%sd);Sc#gFt>wD0U_{S#)wwP$*QrSyxh@O&o~ICy>zv+_qm8%;a^~Qf zC)ITZ!54yqINx5>Tq4rV?l3Qx$`3<{IrIrAMgWRO=2naf<|1!%Nih2X3OS<0jKZT! zV5t0Ny#ERL&Vwm_q5d!;g}*w^(>ROpnPm>{mVi|SK8%wF9g+z0{J$6fZ%I27|xfZ;C~xpCK#yM&H~M}F=4bj2#y+rJfiIbiE^*Xqti!}+;BAE_-Cpa z!G`r$Aoj-;-!?IxCX^{-$CMaFF@C}ph$Joyj{_rPo&wpBZL4!DhKSgHQoh=EAq0(G z?pacU&$D^7c|x*vEyS1TTwLjVY!rL*=t>!XKtb&y&R#&Q9kZgNMlBeJd;Z>))zLW} z^J7VGu%lh6PumXI2IGMocT^tZ)jisb@z7DNfDZ>by~ev6Qr?fh@)erygNEeN?iZBU z2*mcOf{;TTC~V9l(El?zbmj$Qglziu1sD{qf+WXT2LhLK23N+H@+kT!UTVi9J}#RL z#VKV3{x5=Y7$jZ_|Ho%JH0dZ_eewZwStN0T`oH$2c~_1k(zw~pMm6MJ#gZ7dA?jF4 zHa&e*%?%$MMF;?bBawF{bN;?Hn?jD^AZ2NY@aWy6Z&lCZT8Iu%KzArJIOwH0KL!UQ zYs&6>p-_(paB|y;RCNp~1p2ok=33>@#$!lJ)wJ&z&c3$N_s8%-nLL_Mj-8q?tps#S ztYmmm=^50qB_+M6+{OX^B2EXjs<|fIZO1nxT$5?-izo-Jbmc`DdNMq+m6VnZ5!i?x z!*RrAwX$nvu2qdHHTu}Nstm?9;tpWX0Zd2CaF1!GeO}Pj+uw-xwClK{gn$XxFZF4I zv+4A4%tj*WnLdBKtt7Z94iEkAxpd_?a;lYvo={fwL96TQ)nt#BQAt(p0<`?9o%k|` zzCEFg74NLo2J()oHECp=mE+gF`O8@{%&wgXvx`rfMz?LPVX0 zFWsgK)vp;=N+O%$^sv&Bd5Os(r2E9Y>x10nknNYU@Y;msa0eoCG`r<1IgwNDl#GD7hVgT?i*gZjp{19y* z!OYN|r=xO`ijzQdi^xsxX?kfC;;fP6tOj%(ktOw zlPS9rwE_fzzLBxjEL|-j<39wOaK@J~6k9Qwc30xlGGPD6T#3I2fLf=kJ8rHda-ox2 z^kf;TPNI?yAC<~iO+u`xJ$O{oE}&tZPXh@qtsCiT*Z;Nm?O|1w+5h_-@SYd$Antt* za1_BScMy=nMG+Mh5Kz=STpBZV%5s`6Qyh$@8mD4+TlFmyjK(~ZMKd^L(*+4DGYhSp zY^FlO%F@!bl2ZA7*1o{ucuqC{{QPHlp3T{Nzw2FVz3W|<_qN~HErZyfc!*pKT5U*Q z3sIG6c`5f*F^2Vu)zGrpLuOQaDl%swy%3oNv;E*oFo|}f!-gp#4l8TGl>LX^_!Kux zSswR*7fg)@Uf(PLqye!Jh&hlp+h|z3_v_~N*wF3Z%0-TcOTZ za5bS-6^=siY^x9GxTsg=*sYv2T)kWY#XhAUCec^pGJoVa9BpP zjgGvI*w;u?-oQRKC?dC>*b00vr*gc4&D`~d7@EGE6@Q!~Olf!dMF-$nKbxYSxVb#m zQ45Lg3WXZgF^@+*cXgXb)y z3#e2cBD}wMi*Gw{S%ZZ5``qBl`I~!OgV~C2T}kqrJQNAmj`zmj>Mnn?`}~~!D)3T) z^uw6qan|yjogVVQ_E&Hoq`$?hI|!d)VMv`K_lYB`Q9-;Vk$I4Su7=(S_KUt6aJ6=Jg>HU-B_m*hkb!=BVcNPt_#2A2r`9li4&@Ui#_si7ycCX zc6l!E_i&pH>L1?WkB{0t!s!4M2E{aUrw9f#yTC%0k!{C0j<;Aqz+ogZoKMp z>UkSYs})rIj%ea$bUnOp=rX$H9efFZT2Q;`YSXz0{Fotxlzx7i^p;n9yGF#C&H~Yi zDEy4SoW6TUT$rx$!~Vhhb+_Pk!VO{a5XdeZ>0MA-e~$W?{JF4X#Eg+( zpgvMvoV<299YacbXF9JMQ21%zt7@MUA9X^lS^fa!=|UO@88eFcG|I!r3BSFJG7q3< zv5b};z~Cj2bJ*dtebsN2f*n6-HK9{ree~X^6~R8!xy0C@I#%8WN=BqJB|9kHjQsDa zt9JS%*6ktxU}0^XOe<*0!TuQ1_?8D+7)HNi1+6=X74n*NbM|`x72@R^z*Ys)c}uNU z! zv|yN4kRu&((ZRTZSH2X{8Dl)T%joWR@nx;9!3W&(rYP5Ru2iJ+%IO5S4>tE#tXS%W z)tb)b>wd>G4$}%6`JT8<1UGz~9}l~3U1{yvjRm5ag5k~bp*TOej1Ihq&6&U;JE*S! zsyUXv3T=PSGlhRGrz!7?5!{Rz)x$F2?wx;h`}=OQ;LD6e`PJog%lmKwOCXZ7z45QQ zp;&v!ZFWU2&n~C^?~A1?AQEo>ur{lGgCNHTS|OapWBEWsC1ecL=AkC}PPPc7D|Tb8 zDO`^Hc0?)1QmQ$GF|FnF;309+FjPdnpFv(*Mtcv5p-aK9GXXBH<=-BNpl!1PM6!_1 zdmbVL>h1j|ZtrISH6PVzMN4N{8KwVA49dqV-h1Ig75rd1liWdHt zTV`2EzZ-eTGHIfjR(>ExRf7Zj0@L#eIGTV*=pGs-JW!9%lw!f56$KPD4i&8R8GVkg zw(BCt*Tij=<4}UFNK;zZsK8bTit#1_ACCrgqC%ag)`yz~;Y}+BOkY^aXP##m%BbKl z7Lg!4wBginJ5W3Z6n3!0t(6*7S7)gj8(sq{SHq|}stBN|(keLXmLmJVEu-Csp+igA z1qT{$TdE%EKIB83O;|=TA7Z|MgAyoSv4o006eEU#P(21M{siUw5JQZbi)j2pqk%n5 zocz8*V*`rchIB4IZ{2tXdc#5Gs-<)Q1VH=5hvG^WdW{z>1>1%3{6;9MhP7!xRpQ)n zCH&hB&Q$7z%Od3rPzgsmQ?Y?xKS+oz&wwri`?{O)to1zlTn3+WuH*A{V!KYba-uwG z8GZC`eDQ*OJh9-Kx5_E6Yu4ap6!jl5)E7m!SRu5;sr5l3+kmyegx;s1z5<5~Je2KfM(VmJ97+~w9))}9*8Rc7J~N5>K> z_(=4-48-G6et!({eMtpRP6?$=ABnRd?E^@y1|nDKHlEVx3(Dw?-iD|X&Q6y%1KU=l zD?Z4E9TkzErK{*3Mn@0hS!txHV;+4HZufxG#7OT1(l!9_a>ZuIsq5Q*Ic@w{Twqv2 z2R?>;P}rT723hYyvFUuzK2RA}R0G_tCFJvoSjp;m`}azagM;^rZpRVgij}HGYSNl% zbpm?ZGeEry>D4q31rq43!@AzW0z7Exd_kbnFu`Bc(*O1qlplMpxI@AnWm* zeRvs_cVZ7fWam2Qf$Utf*Z$-=`_ZNJU8lIJ7*xYi1JsA$iXj-ZBi4c)t;^ak7)cqJtE)77u?;Ki zXkFGiZatR*Wfx$WC021he7>xdGC%FFn(lEIqo_woIeX4hy75zSnsMSpa(s#-*h^{e zr|>aL=;)`Ig(;z7-C|J;B)9uo0V4|PqKsa!`x&l=C-8AJST=_W3C)<8(r4r%OX%)y z421z?Bm8Z{rv}-MS(oGb^JzR|VE>C^+6yore%&{d670Bf34Pa%HhwV`92GP%n*Ts4FpTL zcD6+-XPwo-4a-*O@PWnjBtX<&p*r5*K1IWB?^jMm9A^{dPfBUzG3Zb+#TkkG)>osiSU_?p5_@HBcc?LZD66j$ZNU$OW3MRfQpG~K`w+k3PdmKu{ZmaN`x z?L7d+Qx9=f?VVec!N9|M!ySp%pOlV7pCqdiFUH3!r4;KomQu}eaf@Lwg`Gh87E}5O zF=9a>!YyVBJ5VpK+E_c|Vr)9H7g5-)NPhtwv1~MFO3$1S1Fvu@jRKEhgQo$%5l5{$ z0M9~VYuJqndkyqiAD(tY-;(B;{=+2{^|kmYjC#-4Vk(Os3wGfX{Tw*%sptAa^7#f% ziE9K)t@oe`1gzEO@8KJ;u9q9<47#e6^1s1;WuRT=6};(YufTb{>q#6Wd<4Jek)4+= z-iClsX%Ce*;y248a!u87u(Wfas2AmK3^XPpIDn#|i1q{H6y&rUgoK~XIaHFn12At} zLLnzb(*)MspUGllV#_1w@{^)8{uf{s#+s=CXu;6#lbEPP5$%{ET2@3Koy7F3r_ziu z$?xG?&ku_!>=cGIDr!)Y8lj8|sp;r$TvbrVEo zK)oGn36&DGh7PRr2Lk0(plL%o`zGGZSrDGNb`%6u0e`2q`vM%-vu&Zx=Zv%41J1$1 zdqMJqVmfsik*bJ>ok4pRWZeqxnh?1$%wBjJd*&F6spbsk$=QlPnTMsi&HY@v&29~m z<1je=^73jtkCJU~1wYTL5vHI%6?T6K#SLR=!S~{VQC$Bx4!s7&4zq~l_+GrH z8f>`gMB^l_=;m<)bz-`&Jco;nbVVo3s!oJLRh@O>HaxrPM7sx^gM}OXmz z+~yt^AfGFuN02)Of~jx8`o}exkAZ(`pY{ucYXf9I_}9;h=+s#(X!Jw@O3VuUKG5^A zrT@eTq$S2PYPdffq8zJe?i)q4x(5Sxi&TG94qqgED#$zWT)zgK3S$!4PnB;KWV4?j zKf8!_^uVeX(ViZh-{6KJOT4D<{(pxSuYgT ztRKhwPAWdblH51M(H|e;N68gMWcW$+O8|eLp!(*Z`t+=Bh5rCH8A!1gvXL$-B5B{G zu~hk!D29QQTX(rgAq_f@kiL)_&Wpbf zDe4Uh69&r>-a^k$9E@*%dHWLxyp^34a{&ucW(F!gT93u`+{md9ls5rs*BI3X*(0u8 zNb4@3E3lAuUcf@jBAU`G2Bq5u$;$fP5kgNpLa!%|Emg+XEQ>;u;nnq zu?wlDS4>)p%H$PyYTMA?aS}Qq=sM9lNYx3TtO`{oI z)LWXC?@WF4!0a3--4++r-$AuhAbmKb>gUa!V|+lqfXW9!12kz_ES|H!i0oNN{{b_T zeq+?^o19qLK71f6wUM52Z3nKcNbhD`{??wE%*zc<$nu5`MAoP`MqFzg<$4HOJXrE` z7R#|g1GBeBdFJId@Y03!mzfu{TCTzcBmhkPP@SOdNg>-DF z^n1v&d{}>us#MQ8UR+3z43lz^|i8dmg{hb~DZqa;5R`GR#im&5NQD1XUv!q5Re7;WptwR99 zD^bSbQmAfqwHa<{E3R+}OB3n;fzjIBxWXK(+q@kDTwxyaRv>*p@_u0WVt@1ocNGg!JZ@@t%i`>QHt3g&>_gIw_c5d?tE$OUy_q(qNcROGini zBj>4~5L?gpHjmDymqtn7fTta!rLU@ifP+u}9Als(70_C&?x+-oGS3~?p>n)$>EpRW zYXUS0@0sMUK-`Elu4J=-Bbw$SzsY`(sq)~T9fN;HJaboP17Ovo-+0H*00#y6NMb&S zpyk$)l@`bvq#$pMH-V?&{2U+tsFaBVsZthYRZ5Mb03DU~p?zf0tic4%Sosl<sqH;JEz<9U?&9LLY2@a(HTPvd7_JO`@JllVCh&tui+NPZrRXGwjI;AaWX;b^z? z^fsILIb7&@4cft$MzHih8|(y5{z+&gKH-fkMS6JF_|LzhJY@Ah(0&^PnH8 zu{j>U{5?9A21)_0`vPX=Z5ey;Oz7%$$9&m2i?sR;hzsuHUPvhyn^}xFm z#?mkii@Wv4>=2=O9^P-b$=at_s~~(uu{6btt>hQ1dDzd&22EYb&#bwcrY_@W)>LS+ zxwE&qG@ryEDX18)Ig1rp7Vv$ddOwfvXQ=mc_xMnd*|3|04RP`)h_C|bY~1y5R$X&c2WZd+ z;O9cTT14u@*Qnvc$(Sw(o8sjk=TSp&f4KE-^FSNH;rKi{94v(ffJ6h_mQAk>l<>to z@);`y()5gpL#d-SY7k8hk>U+=sXPR2Ok{8^W9_RNolX~)x)bDGP=}33=Q6gUjCZ4q z@2gR)uT0&Z@`KOq$Zy1=Fy2bwL8GusVy8=GhNvxWIKN5ZJkrogDPtvKOW;FB^$p-1 z^XSM}ggz~{x8JUDEx2%P#*`7LwjrIl?gZCU=TJ;&zhL=OA4FY5B$up>$?}|ev^uoE zRbaW(EvVwylR+g5=^vwPoocHf(K#qs?(&cqUoJ;+UZlT+yn(_P-&TWtKwhccedZ=% zT#7t)9)*pQg7gSd^(VJ(0a(F8$~Hp&sdA^lHAJSgBf!3 zT-qf{ujruN4-Wus(t>uKU{8~8o=cl0iFDA~dIhvelkNb}`EzNhNqRJ2t3&VmJb?Z= zRUXF{ige!U_J#;S-uCFos3-kKNBsthQ|^ni>){xTUe=yxW>cnF@(aoBk2MV#_EFC4 zj4;Yxb7-9z&8J+t%PdVxF%IK4NRt+Qnp~rgjlCZ4wu287j)Bk;VCCpjl1rz6#WniW z0_;P8Jvkdzf_siWQ^TaIbWv12;-P$y+TZi&r7-EQhS{__Tnfzx20c{Pf8EP&ziH>M zWHx7uy>LCa=mZXC_T$+Kv#y)tQ~|64U{hw(pa^M|&ZBpOXC7zfksKjS(m`u~1~fX4 zjyQpOZ}p5dFppv^(i$DB#o-zB+c~t`($8ymqX$sv%R6%@f4mf7n1zkz(Qk5==b`^j zo{dl03!j6ZGi&mc+{?2aU|RvUZWaxiAl2x*-Plz_Gmr~Wnol3R*~O7t6!};+x|N-Se~;N`k{Oq zfrHh`KFh6Is10iVC=8`V5sv!wn;w?Uz8GriN%KqAdGY zfL@cKEINOX!w~}CoL@k1L+P-C8cCC+TVXYuCrJ@{UpQx=d6Lzlh%Od2V$ZW(CW}Y)PYnz5}wd#axk>)mUcZA#nmDq~(4&=>5-ra^fxl`Vk zSF&JXv)jy%S>!Kg(~-&jp|{-=D5CVgXOlQZ%GE8nw!;I~`Qzo**>u;Gem7Tjzyl~g zrP(~20;Wng8?s5BDg}i=+jLcblJBAFX0y=z+uWAjQ>7;i;v9Tl)*8RbhHjl<2m+>` zb=Lz%9A4|R@LK04yq@Gml8qrc4+O?sq_U`6z-`xDOV%uJ=`E6wRrt zRBfh?y_ZsuoGAJI97;%#O!;7vzce&MFzj8?(r_z`i(~;$+x{Ikb^;rdHEyHb*IPHh zo&ng6%q{M(2e-PfOv5{6{Q~-$VL-EUkdW}JSvZp}cVj&y0ry~y1#kS)6A4mCzKimB z|6nh?6M#h^XvF;@FySn!M!bWSUo5J(qU4)$s3B1@>7J&x0#Bsa21R9IeX!^2t_YhX-~4WX&uzUdD4*G7x#M=5w)tOuM9-? z_6tD|(RkfpeZTYz7yub9wh>DpQDbAjPQ@gr%cub&TK$s^C_F z*HB-%I-BmEA!X|w*{Y!)K)o-MuK*hxkfVW8~ayT9?}2 zEVTrA0KIdx9Fk4r)B302yoY%Jy>g^%%%++&>Gh>rfbSmZ0TftYM->=H;W&F2!)qVz zA(~X0Pv*_qd$+S*G--ulc+@c4IOMt!&>8GLo zI*ZdXFyM@RWOz{0nPd+3EO5nR-6uC(NYr znbNy@j>Z%Xx!t8wU>=WVl9(l3t(!+cLGtym#x|tu<}r6B?aq?2^%V>4$sV%8 zJeDA9S2C?nm7=LMTe?E$tSZ(+9*8PBla6Hf+ewba11RhS-Z?!-D)ml>+o6V%C=<2h zNIp6d?+G4q!UR6I(utgY6R4f&0Ti-50H?GS>C2!?EP_KYfv3`G=`3lbPRSDGAv=om zS1WxqtG|KT9_;}XN?&KCyxC~g=w?6J6X+>6(;ku|(J{C|4cOZ$T<;eJ)Shrc0r5t%j-fVsp7IgKn8CtZTV{QhdL%J4uMru6wtIswoSAhYxA6i-yd3e6k8wC>_vX5LJt z%zVi-2+0ZQbVa`O!xT;aitL>vRy((bw*oyWto4&O&g5R}ILDnc>8=IR=P1|ug%Eoh z?O7V27vU_S`g^O!%0%FF{)Ex2Ew?hxpi`yNN*^TS9G+<2tAzK$nwv)DOQgu^ z#1Lh;qH&O&4Ys4wX5prSQs+>Z4xu?j{sB_nigdOeJ2=^w%#_DECV&&}g>lVYr{lr? zw{Gw#lLkkDrw!>m?7`ce1_+~lt6L3sCJsO_!~+7%9K6p6kj}FX))BmWgx{JrjGZ{= z&l&0=KQ=<$gZvdpf1C4rS$CBi?exc`(rl=~{-ugGoLGu_n?d8tF!HOF$UDLa;{4h@yBPNn^2ppix=%A}+fP>L|L+`_PF8BgvfpmHHz96u1?@Y1G| zA4ZLxR@_N|;}+b<9qWy^^ORty&?5rgnkwuPu-HowUbxme3DcH5K@s0tL$Vdbv)HtR0M3shKCF96aWy+HFgxjV>S z_#Q}SvmU|=WG2&*6&DYo@V2vi09KcTsEqq^pxv*;9MxUp;6-h}N4VH~USaZ!&DaE}9SHRxsG{xGD);ot!3kh{yjl4gfs$44N@zX`a0 z1THT6*(9att|Yy&xbY7F%LSMb_bnR9Wq8&|HtkfU-E_Sw?ThAv;KzB4RIWw-)xhEJ6tYzG?RxE1fri9&oSEMPD<8+?;VUa6?;4)tg6N@-pcFd+;( z?EW~+5mVpzW?|A5vf+JSvSw-h4`?;WiTtOF|lR{J%39gpK=9qN;!f{ zBCW$u0LZi;LfX~of8OxQfwuVWa>=h4N#lS|500(j*vPoKHy--&i$9C%nihN_k2I%1Fmus;K=G?7+RAYdg@O@)-50tReG?*fxBZv9@>tv2a?-Gaq^ z@as)ld}z`}*YIU@xLlb+=NZck3aga-0<;Ql9V0XgUiQLQ?iY3yrcz!d;%o}tSSdyN zB!ao25GiKv*_jPbqUS0VLG9zAmQfvaS@0Y(6;NT8VCNTEw1H|euWa6pJs?qqdC7FeDk&l_9?FS%V;R*5|5#<|k`5>DaUnua> zo2}ax^)x_bcteuitb*|2G^1Ow^_d-tr+#l2=x86U`2sgQ>Z-FA{_!f9X*?O$ND;H* z)CKwQcF~8RL3ac5@_vlbjcDWkY^fifEYC@z)oajzN~XK;6U9Zk$F|GMzxH75=-Pud zMlT$D?Y$`)@114RmPiNINRNtRrpo24V|7#ISMfa<*u)M5f;=9Ky%A4$t(8iBaLXT8 z^Hy*ifIg0+Q){K9p+JZ|m=2!B-tJ*kuudv<#ub6qq>k%QEQbB#Y0o-LEX7gEZ{hFa zX!UQUP(~pDMGyJNlfr1nZ>7;AK}>1>z#rThOOJa^3ipqLKx`F88Q-fNd<>>lU1qqE z>TaJrj;4Pw(VI+HOCu3=rd*BBCgN%R)#xk4(d}1DrEq>9T`ffv042}Da@JR}%wO%k z(93Qv}dg&2`FF(h> zk%#x9RA^K(4L`?BnDBbZ#G*fB4A{h5Oz`VB<4ImG1axmd`$5XT@MjR@|Ee|1}dK z4}MO=GwTwURX_Y|OFVsaos>NVWz}nWA=mg;wZ`Z3eV0)_J%UeUX?u#bAUA>XH%O@o z$kKYDH(vQH77D`IPeZcGGqN*^Z}jIbJmdN*rsbs#(wlpY-hZR#-W)N3towt9(!XY! z3XFAwDD3rEk)}Ih$53g8$!s)6kZp@eGTQE=M>0%PV}#YHK>U4wq4#S3t>41`1p!}P zD9=mo8>BoBQT_|U8+10qbf-_9`c``>)HRT7HQiI)dYAg<68%;Smx!=Nw_3Qg41oS6 zWq|2lN(L1AmyiLK{-tA}`qG-8@IU*%bal^F|3CY`v<~Qh_J3&|(EsB9B@Mrd|GAVN zP-(ILe`J80+mi9h&1AS{2);ylD0YmoZZl=hNBjC8KT-L7Q{arf*j~)}Z@>18Q4r;C zy!Ly<1#a*;dTzcc$fsdoLp$gk8S+hmzSCXa&rse6`D`8t?)R<={c%5zkZ59}_l z|Da)Sc6r})jv5&J2^G72h*9DC@Hxs{(Eqwydp-0#onmMg79-EANVcj-Hk7C$Sa@DR zlA^u-Kzn@)wo~9gT~21@3F@uEozvhRJx?-tNO2*y%O$&B2~W9{?49#;gp1(v`t$Ru z+$OuA^}0YY3okBO_yrX_%>_L30@X0M#@68FT=|G_=aadn> zh{hC|lKfh?sL=ZFofEuh%a3_ehA(ix05&cb{VmxYshRqc4qgKXx^dFrb8yo0Au_*(B@hRb-mVy$~4uk() z7lxFKxTCmY!c8OA_lAu;UYur}*y6jyG<%_caj*BLzr4Es!tr)W2+EM(n?K%q-yLhW znQ)nLh2aXv6@klwYdo$AxFT^)#1(}r8rLLTlW|SKH5J!1T+?yIw5;708y)C>^2nyP zr*p$z$owWOaOV51t2UHhf3U^zQtWXb|Kayae;IT2<;6EW9yxu{wz3UR@9^8(;&>-E zF)a9tk3X7u!=ab6uKDKRH-8v?)x1A8zW6Fv1f}_hv{LJvBQB$puV#3+ym2Zv(bs?4 z?fZ-V^>WyU{}@~E>-)*`qc^PGe)S!`-f_6%aV6kN#Fd0A8CMFf8Msn$rQu4)HM7On zJ0t6_{^8GVNm;h#FK_R#Zogf)>7&UN3xBikjw_$dz-7gii7N|NHm)38vvAGEm5Xam z%azaO(Sop^)a>}zo@tgx+*|%~WOv|R|7|P(!n)#v3ugfdqbcTao*#ACM+~Lb=GdVv z*%$KuHPZh?i2uk~A5-0FpWfpSW!-)2$G2G1TBcq$*Wlytv$1#TJ%9Y<^Y`zno3Jl> z=xbNTSRcA0Tb$dHEzWy0*8k>Zi{Ah6@a4T_P0z01XPoLawQ=?GJHi&tYYAI4fBbF! i!}dR)G_7dc#%E3}`1YZ!XWyC;_t7maW$(_nM*csih=$Yv diff --git a/drivers/misc/tzdev/sysdep.h b/drivers/misc/tzdev/sysdep.h index 89d25142fc59..a645ad6113cf 100644 --- a/drivers/misc/tzdev/sysdep.h +++ b/drivers/misc/tzdev/sysdep.h @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include @@ -82,25 +81,6 @@ static inline gid_t __kgid_val(kgid_t gid) #define __flush_dcache_area(s, e) __cpuc_flush_dcache_area(s, e) #endif -#if defined(CONFIG_TZDEV_PAGE_MIGRATION) -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) -#define sysdep_migrate_pages(list, alloc, free) migrate_pages((list), (alloc), (free), 0, MIGRATE_SYNC, MR_MEMORY_FAILURE) -#define sysdep_putback_isolated_pages(list) putback_movable_pages(list) -#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) -#define sysdep_migrate_pages(list, alloc, free) ({(void)free; migrate_pages((list), (alloc), 0, MIGRATE_SYNC, MR_MEMORY_FAILURE);}) -#define sysdep_putback_isolated_pages(list) putback_lru_pages(list) -#else -#define sysdep_migrate_pages(list, alloc, free) ({(void)free; migrate_pages((list), (alloc), 0, false, MIGRATE_SYNC);}) -#define sysdep_putback_isolated_pages(list) putback_lru_pages(list) -#endif -#endif - -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) -#define sysdep_kfifo_put(fifo, val) kfifo_put(fifo, val) -#else -#define sysdep_kfifo_put(fifo, val) kfifo_put(fifo, &val) -#endif - #if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 13, 0) #define U8_MAX ((u8)~0U) #define S8_MAX ((s8)(U8_MAX>>1)) diff --git a/drivers/misc/tzdev/teec/shared_memory.c b/drivers/misc/tzdev/teec/shared_memory.c index a2480746c0d3..837f25d6e8b1 100644 --- a/drivers/misc/tzdev/teec/shared_memory.c +++ b/drivers/misc/tzdev/teec/shared_memory.c @@ -24,8 +24,6 @@ #include "tzlog.h" #define PTR_ALIGN_PGDN(p) ((typeof(p))(((uintptr_t)(p)) & PAGE_MASK)) -#define OFFSET_IN_PAGE(x) ((x) & (~PAGE_MASK)) -#define NUM_PAGES(size) (((size) >> PAGE_SHIFT) + !!OFFSET_IN_PAGE(size)) static void tzdev_teec_release_shared_memory(void *data) { diff --git a/drivers/misc/tzdev/tz_common.h b/drivers/misc/tzdev/tz_common.h index 2bc0069bf99a..88f7f78b913b 100644 --- a/drivers/misc/tzdev/tz_common.h +++ b/drivers/misc/tzdev/tz_common.h @@ -25,12 +25,9 @@ #include #endif -#define UINT_PTR(a) ((void *)(unsigned long)(a)) - #define TZ_IOC_MAGIC 'c' #define TZIO_MEM_REGISTER _IOW(TZ_IOC_MAGIC, 120, struct tzio_mem_register) -#define TZIO_MEM_RELEASE _IOW(TZ_IOC_MAGIC, 121, int) #define TZIO_CRYPTO_CLOCK_CONTROL _IOW(TZ_IOC_MAGIC, 123, int) #define TZIO_GET_SYSCONF _IOW(TZ_IOC_MAGIC, 124, struct tzio_sysconf) #define TZIO_BOOST_CONTROL _IOW(TZ_IOC_MAGIC, 125, int) @@ -70,7 +67,6 @@ struct tzio_sysconf { } __attribute__((__packed__)); struct tzio_mem_register { - const uint64_t ptr; /* Memory region start (in) */ uint64_t size; /* Memory region size (in) */ uint32_t write; /* 1 - rw, 0 - ro */ } __attribute__((__packed__)); diff --git a/drivers/misc/tzdev/tz_mem.c b/drivers/misc/tzdev/tz_mem.c index 2b475b145858..d45d16cb8151 100644 --- a/drivers/misc/tzdev/tz_mem.c +++ b/drivers/misc/tzdev/tz_mem.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2017, Samsung Electronics Co., Ltd. + * Copyright (C) 2012-2020, Samsung Electronics Co., Ltd. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -13,7 +13,6 @@ #include #include -#include #include #include #include @@ -27,8 +26,6 @@ #include "tz_mem.h" #include "tz_iwio.h" -#define TZDEV_MIGRATION_MAX_RETRIES 40 - #define TZDEV_PFNS_PER_PAGE (PAGE_SIZE / sizeof(sk_pfn_t)) #define TZDEV_IWSHMEM_IDS_PER_PAGE (PAGE_SIZE / sizeof(uint32_t)) @@ -39,83 +36,11 @@ static void *tzdev_mem_release_buf; static DEFINE_IDR(tzdev_mem_map); static DEFINE_MUTEX(tzdev_mem_mutex); -int isolate_lru_page(struct page *page); - -static unsigned long __tzdev_get_user_pages(struct task_struct *task, - struct mm_struct *mm, unsigned long start, unsigned long nr_pages, - int write, int force, struct page **pages, - struct vm_area_struct **vmas) -{ - struct page **cur_pages = pages; - unsigned long nr_pinned = 0; - int res; - - while (nr_pinned < nr_pages) { - res = sysdep_get_user_pages(task, mm, start, nr_pages - nr_pinned, write, - force, cur_pages, vmas); - if (res < 0) - return nr_pinned; - - start += res * PAGE_SIZE; - nr_pinned += res; - cur_pages += res; - } - - return nr_pinned; -} - -/* This is the same approach to pinning user memory - * as used in Infiniband drivers. - * Refer to drivers/inifiniband/core/umem.c */ -int tzdev_get_user_pages(struct task_struct *task, struct mm_struct *mm, - unsigned long start, unsigned long nr_pages, int write, - int force, struct page **pages, struct vm_area_struct **vmas) -{ - unsigned long i, locked, nr_pinned; - - locked = nr_pages + mm->pinned_vm; - - nr_pinned = __tzdev_get_user_pages(task, mm, start, nr_pages, write, - force, pages, vmas); - if (nr_pinned != nr_pages) - goto fail; - - mm->pinned_vm = locked; - - - return 0; - -fail: - for (i = 0; i < nr_pinned; i++) - put_page(pages[i]); - - return -EFAULT; -} - -void tzdev_put_user_pages(struct page **pages, unsigned long nr_pages) -{ - unsigned long i; - - for (i = 0; i < nr_pages; i++) { - /* NULL pointers may appear here due to unsuccessful migration */ - if (pages[i]) - put_page(pages[i]); - } -} - -void tzdev_decrease_pinned_vm(struct mm_struct *mm, unsigned long nr_pages) -{ - down_write(&mm->mmap_sem); - mm->pinned_vm -= nr_pages; - up_write(&mm->mmap_sem); -} - static void tzdev_mem_free(int id, struct tzdev_mem_reg *mem, unsigned int is_user) { - struct task_struct *task; - struct mm_struct *mm; + unsigned long i; - if (!mem->pid) { + if (!mem->is_user) { if (!is_user) { if (mem->free_func) mem->free_func(mem->free_data); @@ -129,22 +54,9 @@ static void tzdev_mem_free(int id, struct tzdev_mem_reg *mem, unsigned int is_us idr_remove(&tzdev_mem_map, id); - tzdev_put_user_pages(mem->pages, mem->nr_pages); - - task = get_pid_task(mem->pid, PIDTYPE_PID); - put_pid(mem->pid); - if (!task) - goto out; - - mm = get_task_mm(task); - put_task_struct(task); - if (!mm) - goto out; - - tzdev_decrease_pinned_vm(mm, mem->nr_pages); - mmput(mm); + for (i = 0; i < mem->nr_pages; i++) + __free_page(mem->pages[i]); -out: kfree(mem->pages); kfree(mem); } @@ -178,7 +90,10 @@ static int _tzdev_mem_release(int id, unsigned int is_user) goto out; } - if (is_user != !!mem->pid) { + if (is_user != mem->is_user) { + tzdev_teec_error("Trying to release %s memory but memory belongs %s.\n", + is_user ? "user space":"kernel space", + mem->is_user ? "user space":"kernel space"); ret = -EPERM; goto out; } @@ -271,178 +186,6 @@ static int _tzdev_mem_register(struct tzdev_mem_reg *mem, sk_pfn_t *pfns, return ret; } -#if defined(CONFIG_TZDEV_PAGE_MIGRATION) - -static struct page *tzdev_alloc_kernel_page(struct page *page, unsigned long private, int **x) -{ - return alloc_page(GFP_KERNEL); -} - -static void tzdev_free_kernel_page(struct page *page, unsigned long private) -{ - __free_page(page); -} - -static unsigned long tzdev_get_migratetype(struct page *page) -{ - struct zone *zone; - unsigned long flags; - unsigned long migrate_type; - - /* Zone lock must be held to avoid race with - * set_pageblock_migratetype() */ - zone = page_zone(page); - spin_lock_irqsave(&zone->lock, flags); - migrate_type = get_pageblock_migratetype(page); - spin_unlock_irqrestore(&zone->lock, flags); - - return migrate_type; -} - -static void tzdev_verify_migration_page(struct page *page) -{ - unsigned long migrate_type; - - migrate_type = tzdev_get_migratetype(page); - if (migrate_type == MIGRATE_CMA || migrate_type == MIGRATE_ISOLATE) - tzdev_print(0, "%s: migrate_type == %lu\n", __func__, migrate_type); -} - -static void tzdev_verify_migration(struct page **pages, unsigned long nr_pages) -{ - unsigned long i; - - for (i = 0; i < nr_pages; i++) - tzdev_verify_migration_page(pages[i]); -} - -static int __tzdev_migrate_pages(struct task_struct *task, struct mm_struct *mm, - unsigned long start, unsigned long nr_pages, int write, - int force, struct page **pages, unsigned long *verified_bitmap) -{ - unsigned long i = 0, migrate_nr = 0, nr_pin = 0; - unsigned long cur_pages_index, cur_start, pinned, migrate_type; - int res; - struct page **cur_pages; - LIST_HEAD(pages_list); - int ret = 0; - - /* Add migrating pages to the list */ - while ((i = find_next_zero_bit(verified_bitmap, nr_pages, i)) < nr_pages) { - migrate_type = tzdev_get_migratetype(pages[i]); - /* Skip pages that is currently isolated by somebody. - * Isolated page may originally have MIGRATE_CMA type, - * so caller should repeat migration for such pages */ - if (migrate_type == MIGRATE_ISOLATE) { - tzdev_print(0, "%s: migrate_type is MIGRATE_ISOLATE\n", __func__); - ret = -EAGAIN; - i++; - continue; - } - - /* Mark non-CMA pages as verified and skip them */ - if (migrate_type != MIGRATE_CMA) { - bitmap_set(verified_bitmap, i, 1); - i++; - continue; - } - - /* Call migrate_prep() once if migration necessary */ - if (migrate_nr == 0) - migrate_prep(); - - /* Pages should be isolated from an LRU list before migration. - * If isolation failed skip this page and inform caller to - * repeat migrate operation */ - res = isolate_lru_page(pages[i]); - if (res < 0) { - tzdev_print(0, "%s: isolate_lru_page() failed, res=%d\n", __func__, res); - ret = -EAGAIN; - i++; - continue; - } - - list_add_tail(&pages[i]->lru, &pages_list); - put_page(pages[i]); - /* pages array will be refilled with migrated pages later */ - pages[i] = NULL; - migrate_nr++; - i++; - } - - if (!migrate_nr) - return ret; - - /* make migration */ - res = sysdep_migrate_pages(&pages_list, tzdev_alloc_kernel_page, tzdev_free_kernel_page); - if (res) { - sysdep_putback_isolated_pages(&pages_list); - return -EFAULT; - } - - /* pin migrated pages */ - i = 0; - do { - nr_pin = 0; - - /* find index of the next migrated page */ - while (i < nr_pages && pages[i]) - i++; - - cur_pages = &pages[i]; - cur_pages_index = i; - cur_start = start + i * PAGE_SIZE; - - /* find continuous migrated pages range */ - while (i < nr_pages && !pages[i]) { - nr_pin++; - i++; - } - - /* and pin it */ - pinned = __tzdev_get_user_pages(task, mm, cur_start, nr_pin, - write, force, cur_pages, NULL); - if (pinned != nr_pin) - return -EFAULT; - - /* Check that migrated pages are not MIGRATE_CMA or MIGRATE_ISOLATE */ - tzdev_verify_migration(cur_pages, nr_pin); - bitmap_set(verified_bitmap, cur_pages_index, nr_pin); - - migrate_nr -= nr_pin; - } while (migrate_nr); - - return ret; -} - -int tzdev_migrate_pages(struct task_struct *task, struct mm_struct *mm, - unsigned long start, unsigned long nr_pages, int write, - int force, struct page **pages) -{ - int ret; - unsigned int retries = 0; - unsigned long *verified_bitmap; - size_t bitmap_size = DIV_ROUND_UP(nr_pages, BITS_PER_LONG); - - verified_bitmap = kcalloc(bitmap_size, sizeof(unsigned long), GFP_KERNEL); - if (!verified_bitmap) - return -ENOMEM; - - do { - ret = __tzdev_migrate_pages(task, mm, start, nr_pages, write, - force, pages, verified_bitmap); - - if (ret != -EAGAIN || (retries++ >= TZDEV_MIGRATION_MAX_RETRIES)) - break; - msleep(1); - } while (1); - - kfree(verified_bitmap); - - return ret; -} -#endif /* CONFIG_TZDEV_PAGE_MIGRATION */ - int tzdev_mem_init(void) { struct page *page; @@ -471,40 +214,28 @@ void tzdev_mem_fini(void) __free_page(virt_to_page(tzdev_mem_release_buf)); } -int tzdev_mem_register_user(void *ptr, unsigned long size, unsigned int write) +int tzdev_mem_register_user(unsigned long size, unsigned int write) { - struct task_struct *task; - struct mm_struct *mm; struct page **pages; struct tzdev_mem_reg *mem; sk_pfn_t *pfns; - unsigned long start, end; unsigned long nr_pages = 0; - int ret, res, i, id; + unsigned long i, j; + int ret, id; unsigned int flags = 0; if (!size) return -EINVAL; - if (!access_ok(write ? VERIFY_WRITE : VERIFY_READ, ptr, size)) - return -EFAULT; - - start = (unsigned long)ptr >> PAGE_SHIFT; - end = ((unsigned long)ptr + size + PAGE_SIZE - 1) >> PAGE_SHIFT; - nr_pages = end - start; + nr_pages = NUM_PAGES(size); if (write) flags |= TZDEV_IWSHMEM_REG_FLAG_WRITE; - task = current; - mm = get_task_mm(task); - if (!mm) - return -ESRCH; - pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) { - ret = -ENOMEM; - goto out_mm; + tzdev_teec_error("Failed to allocate pages buffer.\n"); + return -ENOMEM; } pfns = kmalloc(nr_pages * sizeof(sk_pfn_t), GFP_KERNEL); @@ -519,72 +250,43 @@ int tzdev_mem_register_user(void *ptr, unsigned long size, unsigned int write) goto out_pfns; } - mem->pid = get_task_pid(task, PIDTYPE_PID); + for (i = 0; i < nr_pages; i++) { + pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!pages[i]) { + tzdev_teec_error("Failed to allocate iwshmem page.\n"); + ret = -ENOMEM; + goto out_mem; + } + + pfns[i] = page_to_pfn(pages[i]); + } + + mem->is_user = 1; mem->nr_pages = nr_pages; mem->pages = pages; mem->free_func = NULL; mem->free_data = NULL; mem->in_release = 0; - /* - * Holding 'mm->mmap_sem' is required to synchronize users who tries to register same pages simultaneously. - * Without synchronization both users would hold page refcount and so preventing migration. - */ - down_write(&mm->mmap_sem); - res = tzdev_get_user_pages(task, mm, (unsigned long)ptr, - nr_pages, 1, !write, pages, NULL); - if (res) { - up_write(&mm->mmap_sem); - tzdev_print(0, "Failed to pin user pages (%d)\n", res); - ret = res; - goto out_mem; - } - -#if defined(CONFIG_TZDEV_PAGE_MIGRATION) - /* - * In case of enabled migration it is possible that userspace pages - * will be migrated from current physical page to some other - * To avoid fails of CMA migrations we have to move pages to other - * region which can not be inside any CMA region. This is done by - * allocations with GFP_KERNEL flag to point UNMOVABLE memblock - * to be used for such allocations. - */ - res = tzdev_migrate_pages(task, mm, (unsigned long)ptr, nr_pages, - 1, !write, pages); - if (res < 0) { - up_write(&mm->mmap_sem); - tzdev_print(0, "Failed to migrate CMA pages (%d)\n", res); - ret = res; - goto out_pin; - } -#endif /* CONFIG_TZDEV_PAGE_MIGRATION */ - up_write(&mm->mmap_sem); - for (i = 0; i < nr_pages; i++) - pfns[i] = page_to_pfn(pages[i]); - id = _tzdev_mem_register(mem, pfns, nr_pages, flags); if (id < 0) { ret = id; - goto out_pin; + goto out_mem; } kfree(pfns); - mmput(mm); - return id; -out_pin: - tzdev_put_user_pages(pages, nr_pages); - tzdev_decrease_pinned_vm(mm, nr_pages); out_mem: kfree(mem); + + for (j = 0; j < i; j++) + __free_page(pages[j]); out_pfns: kfree(pfns); out_pages: kfree(pages); -out_mm: - mmput(mm); return ret; } @@ -624,7 +326,7 @@ int tzdev_mem_register(void *ptr, unsigned long size, unsigned int write, goto out_pfns; } - mem->pid = NULL; + mem->is_user = 0; mem->free_func = free_func; mem->free_data = free_data; mem->in_release = 0; @@ -676,3 +378,15 @@ void tzdev_mem_release_panic_handler(void) tzdev_mem_free(id, mem, 0); mutex_unlock(&tzdev_mem_mutex); } + +int tzdev_mem_find(unsigned int id, struct tzdev_mem_reg **mem) +{ + mutex_lock(&tzdev_mem_mutex); + *mem = idr_find(&tzdev_mem_map, id); + mutex_unlock(&tzdev_mem_mutex); + + if (*mem == NULL) + return -ENOENT; + + return 0; +} diff --git a/drivers/misc/tzdev/tz_mem.h b/drivers/misc/tzdev/tz_mem.h index 5b3fcaa1aed5..becfbca8732a 100644 --- a/drivers/misc/tzdev/tz_mem.h +++ b/drivers/misc/tzdev/tz_mem.h @@ -16,13 +16,17 @@ #include #include +#include #include +#define OFFSET_IN_PAGE(x) ((x) & (~PAGE_MASK)) +#define NUM_PAGES(size) (((size) >> PAGE_SHIFT) + !!OFFSET_IN_PAGE(size)) + typedef void (*tzdev_mem_free_func_t)(void *); struct tzdev_mem_reg { - struct pid *pid; + unsigned int is_user; unsigned long nr_pages; struct page **pages; tzdev_mem_free_func_t free_func; @@ -33,21 +37,11 @@ struct tzdev_mem_reg { int tzdev_mem_init(void); void tzdev_mem_fini(void); - -int tzdev_mem_register_user(void *ptr, unsigned long size, unsigned int write); +int tzdev_mem_register_user(unsigned long size, unsigned int write); int tzdev_mem_release_user(unsigned int id); - int tzdev_mem_register(void *ptr, unsigned long size, unsigned int write, tzdev_mem_free_func_t free_func, void *free_data); int tzdev_mem_release(unsigned int id); -int tzdev_get_user_pages(struct task_struct *task, struct mm_struct *mm, - unsigned long start, unsigned long nr_pages, int write, - int force, struct page **pages, struct vm_area_struct **vmas); -int tzdev_migrate_pages(struct task_struct *task, struct mm_struct *mm, - unsigned long start, unsigned long nr_pages, int write, - int force, struct page **pages); -void tzdev_put_user_pages(struct page **pages, unsigned long nr_pages); -void tzdev_decrease_pinned_vm(struct mm_struct *mm, unsigned long nr_pages); +int tzdev_mem_find(unsigned int id, struct tzdev_mem_reg **mem); void tzdev_mem_release_panic_handler(void); - #endif /* __TZ_MEM_H__ */ diff --git a/drivers/misc/tzdev/tzdev.c b/drivers/misc/tzdev/tzdev.c index 4d4a25b13ffb..d57ab3bffba6 100644 --- a/drivers/misc/tzdev/tzdev.c +++ b/drivers/misc/tzdev/tzdev.c @@ -60,6 +60,7 @@ #include "tzdev.h" #include "tzlog.h" #include "tzprofiler.h" +#include "umem.h" MODULE_AUTHOR("Jaemin Ryu "); MODULE_AUTHOR("Vasily Leonenko "); @@ -95,11 +96,6 @@ enum tzdev_swd_state { TZDEV_SWD_DEAD }; -struct tzdev_shmem { - struct list_head link; - unsigned int id; -}; - static atomic_t tzdev_nwd_state = ATOMIC_INIT(TZDEV_NWD_DOWN); static atomic_t tzdev_swd_state = ATOMIC_INIT(TZDEV_SWD_DOWN); @@ -357,6 +353,12 @@ int tzdev_run_init_sequence(void) ret = -ESHUTDOWN; goto out; } + + if (tzdev_umem_register()) { + tzdev_print(0, "tzdev_umem_register() failed\n"); + ret = -ESHUTDOWN; + goto out; + } } out: if (ret == -ESHUTDOWN) { @@ -378,63 +380,6 @@ static int tzdev_get_sysconf(struct file *filp, unsigned long arg) return 0; } -static int tzdev_register_shared_memory(struct file *filp, unsigned long arg) -{ - int ret; - struct tzdev_shmem *shmem; - struct tzdev_fd_data *data = filp->private_data; - struct tzio_mem_register __user *argp = (struct tzio_mem_register __user *)arg; - struct tzio_mem_register s; - - if (copy_from_user(&s, argp, sizeof(struct tzio_mem_register))) - return -EFAULT; - - shmem = kzalloc(sizeof(struct tzdev_shmem), GFP_KERNEL); - if (!shmem) { - tzdev_print(0, "Failed to allocate shmem structure\n"); - return -ENOMEM; - } - - ret = tzdev_mem_register_user(UINT_PTR(s.ptr), s.size, s.write); - if (ret < 0) { - kfree(shmem); - return ret; - } - - INIT_LIST_HEAD(&shmem->link); - shmem->id = ret; - - spin_lock(&data->shmem_list_lock); - list_add(&shmem->link, &data->shmem_list); - spin_unlock(&data->shmem_list_lock); - - return shmem->id; -} - -static int tzdev_release_shared_memory(struct file *filp, unsigned int id) -{ - struct tzdev_shmem *shmem; - struct tzdev_fd_data *data = filp->private_data; - unsigned int found = 0; - - spin_lock(&data->shmem_list_lock); - list_for_each_entry(shmem, &data->shmem_list, link) { - if (shmem->id == id) { - list_del(&shmem->link); - found = 1; - break; - } - } - spin_unlock(&data->shmem_list_lock); - - if (!found) - return -EINVAL; - - kfree(shmem); - - return tzdev_mem_release_user(id); -} - static int tzdev_boost_control(struct file *filp, unsigned int state) { struct tzdev_fd_data *data = filp->private_data; @@ -485,8 +430,6 @@ static int tzdev_open(struct inode *inode, struct file *filp) return -ENOMEM; } - INIT_LIST_HEAD(&data->shmem_list); - spin_lock_init(&data->shmem_list_lock); mutex_init(&data->mutex); filp->private_data = data; @@ -496,15 +439,8 @@ static int tzdev_open(struct inode *inode, struct file *filp) static int tzdev_release(struct inode *inode, struct file *filp) { - struct tzdev_shmem *shmem, *tmp; struct tzdev_fd_data *data = filp->private_data; - list_for_each_entry_safe(shmem, tmp, &data->shmem_list, link) { - list_del(&shmem->link); - tzdev_mem_release_user(shmem->id); - kfree(shmem); - } - if (data->boost_state) tzdev_boost_control(filp, TZIO_BOOST_OFF); @@ -522,10 +458,6 @@ static long tzdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) switch (cmd) { case TZIO_GET_SYSCONF: return tzdev_get_sysconf(filp, arg); - case TZIO_MEM_REGISTER: - return tzdev_register_shared_memory(filp, arg); - case TZIO_MEM_RELEASE: - return tzdev_release_shared_memory(filp, arg); case TZIO_BOOST_CONTROL: return tzdev_boost_control(filp, arg); default: @@ -562,6 +494,7 @@ static int exit_tzdev(struct notifier_block *cb, unsigned long code, void *unuse atomic_set(&tzdev_nwd_state, TZDEV_NWD_DOWN); + tzdev_umem_unregister(); tzdev_platform_unregister(); tz_cdev_unregister(&tzdev_cdev); tzdev_cma_mem_release(tzdev_cdev.device); diff --git a/drivers/misc/tzdev/tzdev.h b/drivers/misc/tzdev/tzdev.h index 95ea5bc12d30..f5f6008e1dae 100644 --- a/drivers/misc/tzdev/tzdev.h +++ b/drivers/misc/tzdev/tzdev.h @@ -66,9 +66,6 @@ #define TZDEV_SMC_PROFILER_CONTROL TZDEV_SMC_COMMAND(17) struct tzdev_fd_data { - struct list_head shmem_list; - spinlock_t shmem_list_lock; - unsigned int crypto_clk_state; unsigned int boost_state; struct mutex mutex; diff --git a/drivers/misc/tzdev/umem.c b/drivers/misc/tzdev/umem.c new file mode 100644 index 000000000000..f8902b570986 --- /dev/null +++ b/drivers/misc/tzdev/umem.c @@ -0,0 +1,198 @@ +/* + * Copyright (C) 2012-2020, Samsung Electronics Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tz_cdev.h" +#include "tz_common.h" +#include "tz_mem.h" +#include "tzdev.h" +#include "tzlog.h" + +struct tzdev_mem_priv { + struct mutex mutex; + unsigned int id; +}; + +static atomic_t tzdev_mem_device_ready = ATOMIC_INIT(0); + +static int tzdev_mem_op_open(struct inode *inode, struct file *filp) +{ + struct tzdev_mem_priv *priv; + + (void)inode; + + priv = kmalloc(sizeof(struct tzdev_mem_priv), GFP_KERNEL); + if (!priv) { + tzdev_teec_error("Failed to allocate iwshmem private data.\n"); + return -ENOMEM; + } + + mutex_init(&priv->mutex); + priv->id = 0; + + filp->private_data = priv; + + return 0; +} + +static int tzdev_mem_op_release(struct inode *inode, struct file *filp) +{ + struct tzdev_mem_priv *priv; + + (void)inode; + + priv = filp->private_data; + if (priv->id) + tzdev_mem_release_user(priv->id); + + mutex_destroy(&priv->mutex); + kfree(priv); + + return 0; +} + +static long tzdev_mem_op_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct tzdev_mem_priv *priv = filp->private_data; + struct tzio_mem_register __user *argp = (struct tzio_mem_register __user *)arg; + struct tzio_mem_register memreg; + int ret; + + if (cmd != TZIO_MEM_REGISTER) + return -ENOTTY; + + if (copy_from_user(&memreg, argp, sizeof(struct tzio_mem_register))) + return -EFAULT; + + mutex_lock(&priv->mutex); + + if (priv->id) { + ret = -EEXIST; + goto out; + } + + ret = tzdev_mem_register_user(memreg.size, memreg.write); + if (ret < 0) + goto out; + + priv->id = ret; + +out: + mutex_unlock(&priv->mutex); + + return ret; +} + +static int tzdev_mem_op_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct tzdev_mem_priv *priv = filp->private_data; + struct tzdev_mem_reg *mem; + unsigned long i; + int ret; + + if (vma->vm_pgoff) + return -EINVAL; + + mutex_lock(&priv->mutex); + + if (!priv->id) { + ret = -ENXIO; + goto out; + } + if (!(vma->vm_flags & VM_WRITE)) { + ret = -EPERM; + goto out; + } + if (vma->vm_flags & VM_EXEC) { + ret = -EPERM; + goto out; + } + + vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; + vma->vm_flags &= ~VM_MAYEXEC; + + BUG_ON(tzdev_mem_find(priv->id, &mem)); + + if (vma_pages(vma) != mem->nr_pages) { + ret = -EIO; + goto out; + } + + for (i = 0; i < mem->nr_pages; i++) { + ret = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE, mem->pages[i]); + if (ret) + goto out; + } + +out: + mutex_unlock(&priv->mutex); + + return ret; +} + +static const struct file_operations tzdev_mem_fops = { + .owner = THIS_MODULE, + .open = tzdev_mem_op_open, + .release = tzdev_mem_op_release, + .unlocked_ioctl = tzdev_mem_op_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = tzdev_mem_op_ioctl, +#endif /* CONFIG_COMPAT */ + .mmap = tzdev_mem_op_mmap, +}; + +static struct tz_cdev tzdev_mem_cdev = { + .name = "tziwshmem", + .fops = &tzdev_mem_fops, + .owner = THIS_MODULE, +}; + +int tzdev_umem_register(void) +{ + int ret; + + ret = tz_cdev_register(&tzdev_mem_cdev); + if (ret) { + tzdev_teec_error("Failed to create iwshmem device, error=%d\n", ret); + return ret; + } + + atomic_set(&tzdev_mem_device_ready, 1); + + tzdev_teec_info("Iwshmem user interface initialization done.\n"); + + return 0; +} + +int tzdev_umem_unregister(void) +{ + if (!atomic_cmpxchg(&tzdev_mem_device_ready, 1, 0)) { + tzdev_teec_info("Iwshmem user interface was not initialized.\n"); + return -EPERM; + } + + tz_cdev_unregister(&tzdev_mem_cdev); + + tzdev_teec_info("Iwshmem user interface finalization done.\n"); + + return 0; +} diff --git a/drivers/misc/tzdev/umem.h b/drivers/misc/tzdev/umem.h new file mode 100644 index 000000000000..6a7f453051cd --- /dev/null +++ b/drivers/misc/tzdev/umem.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2012-2020, Samsung Electronics Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __TZ_UMEM_H__ +#define __TZ_UMEM_H__ + +#include + +int tzdev_umem_register(void); +int tzdev_umem_unregister(void); + +#endif /* __TZ_UMEM_H__ */ diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 0f69b77e8502..7e63c3b8c714 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1743,7 +1743,11 @@ static const struct driver_info belkin_info = { static const struct usb_device_id products[] = { { /* ASIX AX88179 10/100/1000 */ +#if IS_ENABLED(CONFIG_USB_HOST_SAMSUNG_FEATURE) + USB_DEVICE_INTERFACE_CLASS(0x0b95, 0x1790, 0xff), +#else USB_DEVICE(0x0b95, 0x1790), +#endif /* CONFIG_USB_HOST_SAMSUNG_FEATURE */ .driver_info = (unsigned long)&ax88179_info, }, { /* ASIX AX88178A 10/100/1000 */ diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index f5316ab68a0a..8da42f47b4c3 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1652,7 +1652,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) static const struct driver_info cdc_ncm_info = { .description = "CDC NCM", .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET - | FLAG_LINK_INTR, + | FLAG_LINK_INTR | FLAG_ETHER, .bind = cdc_ncm_bind, .unbind = cdc_ncm_unbind, .manage_power = usbnet_manage_power, diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/Kconfig b/drivers/net/wireless/broadcom/bcmdhd_101_16/Kconfig index 2e05dc437759..c6be1273619e 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/Kconfig +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/Kconfig @@ -1,5 +1,5 @@ # -# Copyright (C) 2021, Broadcom. +# Copyright (C) 2022, Broadcom. # # Unless you and Broadcom execute a separate written software license # agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile b/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile index 9b959aeebd61..8eba343dde17 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile @@ -1,5 +1,5 @@ # -# Copyright (C) 2021, Broadcom. +# Copyright (C) 2022, Broadcom. # # Unless you and Broadcom execute a separate written software license # agreement governing use of this software, this software is licensed to you @@ -517,6 +517,8 @@ endif # NAN-Ranging BW private command DHDCFLAGS += -DSUPPORT_NAN_RANGING_TEST_BW +# NAN customer discovery cache nubmer + DHDCFLAGS += -DCUSTOM_NAN_MAX_CACHE_DISC_RESULT=160 ifeq ($(CONFIG_ARCH_QCOM),y) # Due to GKI config, Exynos has ARCH_QCOM & ARCH_EXYNOS, MSM has ARCH_QCOM only, Temporary solution @@ -806,6 +808,8 @@ endif DHDCFLAGS += -DFLOW_RING_PREALLOC # NAN-Ranging BW private command DHDCFLAGS += -DSUPPORT_NAN_RANGING_TEST_BW +# NAN customer discovery cache nubmer + DHDCFLAGS += -DCUSTOM_NAN_MAX_CACHE_DISC_RESULT=40 ifeq ($(CONFIG_ARCH_QCOM),y) # Due to GKI config, Exynos has ARCH_QCOM & ARCH_EXYNOS, MSM has ARCH_QCOM only, Temporary solution diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/aiutils.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/aiutils.c index 5f3604da480f..5f071a3cda7b 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/aiutils.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/aiutils.c @@ -2,7 +2,7 @@ * Misc utility routines for accessing chip-specific features * of the SiliconBackplane-based Broadcom chips. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcm_app_utils.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcm_app_utils.c index f51cdddca600..b4ceab7ba2b0 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcm_app_utils.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcm_app_utils.c @@ -3,7 +3,7 @@ * Contents are wifi-specific, used by any kernel or app-level * software that might want wifi things as it grows. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcm_l2_filter.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcm_l2_filter.c index 035dfe883a99..65ef6e224832 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcm_l2_filter.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcm_l2_filter.c @@ -1,7 +1,7 @@ /* * L2 Filter handling functions * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmbloom.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmbloom.c index f97d61a97fa8..a23f242da42d 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmbloom.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmbloom.c @@ -1,7 +1,7 @@ /* * Bloom filter support * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmevent.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmevent.c index f0e470ae4290..1df313d86f5a 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmevent.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmevent.c @@ -1,7 +1,7 @@ /* * bcmevent read-only data shared by kernel or app layers * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmsdh.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmsdh.c index fa34a656a12d..99e21ad91986 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmsdh.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmsdh.c @@ -2,7 +2,7 @@ * BCMSDH interface glue * implement bcmsdh API for SDIOH driver * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmsdh_linux.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmsdh_linux.c index 33de963845ff..7cfca602dde7 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmsdh_linux.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmsdh_linux.c @@ -1,7 +1,7 @@ /* * SDIO access interface for drivers - linux specific (pci only) * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmsdh_sdmmc.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmsdh_sdmmc.c index 7e445f9085b2..f9e3cf2eb6b4 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmsdh_sdmmc.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmsdh_sdmmc.c @@ -1,7 +1,7 @@ /* * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmsdh_sdmmc_linux.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmsdh_sdmmc_linux.c index ab79e6c5a42e..99d49e2c8550 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmsdh_sdmmc_linux.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmsdh_sdmmc_linux.c @@ -1,7 +1,7 @@ /* * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmsdstd.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmsdstd.h index 568cf31f5903..3b38c0184b56 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmsdstd.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmsdstd.h @@ -1,7 +1,7 @@ /* * 'Standard' SDIO HOST CONTROLLER driver * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmstdlib_s.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmstdlib_s.c index c926ddc8982e..fbe7026f6267 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmstdlib_s.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmstdlib_s.c @@ -1,7 +1,7 @@ /* * Broadcom Secure Standard Library. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmutils.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmutils.c index 239a0bf90a7a..2139911820bb 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmutils.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmutils.c @@ -1,7 +1,7 @@ /* * Driver O/S-independent utility routines * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmwifi_channels.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmwifi_channels.c index 4454af49f478..90c47de71fae 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmwifi_channels.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmwifi_channels.c @@ -3,7 +3,7 @@ * Contents are wifi-specific, used by any kernel or app-level * software that might want wifi things as it grows. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmxtlv.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmxtlv.c index b37e45e8cbad..ad330ca6abc4 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmxtlv.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/bcmxtlv.c @@ -1,7 +1,7 @@ /* * Driver O/S-independent utility routines * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd.h index 2e2a1719e250..cc487ce36e18 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd.h @@ -4,7 +4,7 @@ * Provides type definitions and function prototypes used to link the * DHD OS, bus, and protocol modules. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_bitpack.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_bitpack.c index 62c31daa571c..aca99c1ddc85 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_bitpack.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_bitpack.c @@ -1,7 +1,7 @@ /* * Bit packing and Base64 utils for EWP * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_bitpack.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_bitpack.h index 12813ea1a640..8332fb790a78 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_bitpack.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_bitpack.h @@ -1,7 +1,7 @@ /* * Bit packing and Base64 utils for EWP * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_bus.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_bus.h index ea2e73603bcb..d656b1346aeb 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_bus.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_bus.h @@ -4,7 +4,7 @@ * Provides type definitions and function prototypes used to link the * DHD OS, bus, and protocol modules. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_cdc.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_cdc.c index 9b55a55335eb..0131b132cdd1 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_cdc.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_cdc.c @@ -1,7 +1,7 @@ /* * DHD Protocol Module for CDC and BDC. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_cfg80211.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_cfg80211.c index fdc5ea4dce5e..215f0bd732a1 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_cfg80211.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_cfg80211.c @@ -1,7 +1,7 @@ /* * Linux cfg80211 driver - Dongle Host Driver (DHD) related * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_cfg80211.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_cfg80211.h index ed6f026d7417..a362fb0e1398 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_cfg80211.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_cfg80211.h @@ -1,7 +1,7 @@ /* * Linux cfg80211 driver - Dongle Host Driver (DHD) related * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_common.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_common.c index 9ed543eef578..fcafea290f85 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_common.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_common.c @@ -1,7 +1,7 @@ /* * Broadcom Dongle Host Driver (DHD), common DHD core. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_cis.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_cis.c index 0d00cbfe14a6..5db239aced7a 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_cis.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_cis.c @@ -2,7 +2,7 @@ * Process CIS information from OTP for customer platform * (Handle the MAC address and module information) * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_exynos.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_exynos.c index e09b0a3f6ce0..c760794b4dc1 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_exynos.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_exynos.c @@ -1,7 +1,7 @@ /* * Platform Dependent file for Samsung Exynos * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_gpio.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_gpio.c index 78c4506ad7b2..bd565a05d3a1 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_gpio.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_gpio.c @@ -1,7 +1,7 @@ /* * Customer code to add GPIO control during WLAN start/stop * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_memprealloc.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_memprealloc.c index 35def1b5ea3a..fa06bfe16546 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_memprealloc.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_memprealloc.c @@ -1,7 +1,7 @@ /* * Platform Dependent file for usage of Preallocted Memory * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_msm.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_msm.c index 13d883ee9cd9..28e14363377f 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_msm.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_msm.c @@ -1,7 +1,7 @@ /* * Platform Dependent file for Qualcomm MSM/APQ * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_sec.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_sec.c index a88749fedf57..2efe65bbdb6c 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_sec.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_custom_sec.c @@ -1,7 +1,7 @@ /* * Customer HW 4 dependant file * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_dbg.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_dbg.h index 0f0217fef94c..03ce58a69dfd 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_dbg.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_dbg.h @@ -1,7 +1,7 @@ /* * Debug/trace/assert driver definitions for Dongle Host Driver. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_dbg_ring.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_dbg_ring.c index feef011eb39b..9f848e880525 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_dbg_ring.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_dbg_ring.c @@ -1,7 +1,7 @@ /* * DHD debug ring API and structures - implementation * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_dbg_ring.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_dbg_ring.h index 6162508c7f45..3d83d3ebae88 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_dbg_ring.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_dbg_ring.h @@ -1,7 +1,7 @@ /* * DHD debug ring header file - interface * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_debug.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_debug.c index 4793720b1e15..267b2a13057e 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_debug.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_debug.c @@ -1,7 +1,7 @@ /* * DHD debugability support * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_debug.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_debug.h index 491f0725a603..f6ff93fcbafb 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_debug.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_debug.h @@ -1,7 +1,7 @@ /* * DHD debugability header file * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_debug_linux.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_debug_linux.c index 98977a0de112..63474082e1fc 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_debug_linux.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_debug_linux.c @@ -1,7 +1,7 @@ /* * DHD debugability Linux os layer * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_event_log_filter.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_event_log_filter.c index 8da3d4e8c107..2e50bd8c2915 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_event_log_filter.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_event_log_filter.c @@ -1,7 +1,7 @@ /* * Wifi dongle status Filter and Report * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_event_log_filter.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_event_log_filter.h index 9e9d380b34c0..37612db18fe2 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_event_log_filter.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_event_log_filter.h @@ -1,7 +1,7 @@ /* * Wifi dongle status Filter and Report * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_flowring.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_flowring.c index a84edf3f4183..d78cb1f60445 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_flowring.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_flowring.c @@ -4,7 +4,7 @@ * Flow rings are transmit traffic (=propagating towards antenna) related entities * * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_flowring.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_flowring.h index bd837009edaf..bd0207742762 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_flowring.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_flowring.h @@ -6,7 +6,7 @@ * Provides type definitions and function prototypes used to create, delete and manage flow rings at * high level. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_ip.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_ip.c index 86009c4ff5d9..ebb1af2c71ed 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_ip.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_ip.c @@ -1,7 +1,7 @@ /* * IP Packet Parser Module. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_ip.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_ip.h index a95a38965eca..bc36039c1af1 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_ip.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_ip.h @@ -3,7 +3,7 @@ * * Provides type definitions and function prototypes used to parse ip packet. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux.c index dea9685c110f..45536c1affd6 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux.c @@ -2,7 +2,7 @@ * Broadcom Dongle Host Driver (DHD), Linux-specific network interface. * Basically selected code segments from usb-cdc.c and usb-rndis.c * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -6002,7 +6002,7 @@ dhd_add_monitor_if(dhd_info_t *dhd) if (FW_SUPPORTED((&dhd->pub), monitor)) { #ifdef DHD_PCIE_RUNTIMEPM /* Disable RuntimePM in monitor mode */ - DHD_DISABLE_RUNTIME_PM(&dhd->pub); + DHD_STOP_RPM_TIMER(&dhd->pub); DHD_ERROR(("%s : disable runtime PM in monitor mode\n", __FUNCTION__)); #endif /* DHD_PCIE_RUNTIME_PM */ scan_suppress = TRUE; @@ -6036,7 +6036,7 @@ dhd_del_monitor_if(dhd_info_t *dhd) if (FW_SUPPORTED((&dhd->pub), monitor)) { #ifdef DHD_PCIE_RUNTIMEPM /* Enable RuntimePM */ - DHD_ENABLE_RUNTIME_PM(&dhd->pub); + DHD_START_RPM_TIMER(&dhd->pub); DHD_ERROR(("%s : enabled runtime PM\n", __FUNCTION__)); #endif /* DHD_PCIE_RUNTIME_PM */ scan_suppress = FALSE; @@ -7189,6 +7189,11 @@ dhd_open(struct net_device *net) exit: mutex_unlock(&dhd->pub.ndev_op_sync); + + if (dhd_query_bus_erros(&dhd->pub)) { + ret = BCME_ERROR; + } + if (ret) { dhd_stop(net); } diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux.h index a9056371e5ab..70f9c90432b9 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux.h @@ -1,7 +1,7 @@ /* * DHD Linux header file (dhd_linux exports for cfg80211 and other components) * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_exportfs.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_exportfs.c index 2a49688296b5..633fb98b1ecf 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_exportfs.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_exportfs.c @@ -2,7 +2,7 @@ * Broadcom Dongle Host Driver (DHD), Linux-specific network interface * Basically selected code segments from usb-cdc.c and usb-rndis.c * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_lb.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_lb.c index 315a96c2dd6a..be7296c64c29 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_lb.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_lb.c @@ -2,7 +2,7 @@ * Broadcom Dongle Host Driver (DHD), Linux-specific network interface * Basically selected code segments from usb-cdc.c and usb-rndis.c * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_pktdump.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_pktdump.c index 0c046e56cdce..8dbb603aac60 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_pktdump.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_pktdump.c @@ -1,7 +1,7 @@ /* * Packet dump helper functions * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -923,16 +923,27 @@ typedef struct bootp_fmt { uint8 options[BOOTP_MIN_DHCP_OPT_LEN]; } PACKED_STRUCT bootp_fmt_t; +#define MAX_DHCP_OPS_STR 3 +#define MAX_DHCP_TYPES_STR 9 + static const uint8 bootp_magic_cookie[4] = { 99, 130, 83, 99 }; -static char dhcp_ops[][10] = { +static char dhcp_ops[MAX_DHCP_OPS_STR][10] = { "NA", "REQUEST", "REPLY" }; -static char dhcp_types[][10] = { +static char dhcp_types[MAX_DHCP_TYPES_STR][10] = { "NA", "DISCOVER", "OFFER", "REQUEST", "DECLINE", "ACK", "NAK", "RELEASE", "INFORM" }; +#define DHCP_OPS_STR(ops) ((ops < MAX_DHCP_OPS_STR) ? \ + (dhcp_ops[ops]) : "UNKNOWN_DHCP_OPS") +#define DHCP_TYPES_STR(type) ((type < MAX_DHCP_TYPES_STR) ? \ + (dhcp_types[type]) : "UNKNOWN_DHCP_TYPE") + #ifdef DHD_STATUS_LOGGING -static const int dhcp_types_stat[9] = { +#define MAX_DHCP_TYPES_STAT 9 +#define DHCP_TYPES_STAT(type) ((type < MAX_DHCP_TYPES_STAT) ? \ + (dhcp_types_stat[type]) : ST(INVALID)) +static const int dhcp_types_stat[MAX_DHCP_TYPES_STAT] = { ST(INVALID), ST(DHCP_DISCOVER), ST(DHCP_OFFER), ST(DHCP_REQUEST), ST(DHCP_DECLINE), ST(DHCP_ACK), ST(DHCP_NAK), ST(DHCP_RELEASE), ST(DHCP_INFORM) @@ -945,7 +956,8 @@ dhd_dhcp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx, { bootp_fmt_t *b = (bootp_fmt_t *)&pktdata[ETHER_HDR_LEN]; uint8 *ptr, *opt, *end = (uint8 *) b + ntohs(b->iph.tot_len); - int dhcp_type = 0, len, opt_len; + uint8 dhcp_type = 0; + int len, opt_len; char *ifname = NULL, *typestr = NULL, *opstr = NULL; bool cond; @@ -971,9 +983,9 @@ dhd_dhcp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx, if (*opt == DHCP_OPT_MSGTYPE) { if (opt[1]) { dhcp_type = opt[2]; - typestr = dhcp_types[dhcp_type]; - opstr = dhcp_ops[b->op]; - DHD_STATLOG_DATA(dhdp, dhcp_types_stat[dhcp_type], + typestr = DHCP_TYPES_STR(dhcp_type); + opstr = DHCP_OPS_STR(b->op); + DHD_STATLOG_DATA(dhdp, DHCP_TYPES_STAT(dhcp_type), ifidx, tx, cond); DHCP_PRINT("DHCP"); break; diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_pktdump.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_pktdump.h index a850e423311b..150ee2d67dce 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_pktdump.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_pktdump.h @@ -1,7 +1,7 @@ /* * Header file for the Packet dump helper functions * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_platdev.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_platdev.c index c9d5e7384457..fba53b93158f 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_platdev.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_platdev.c @@ -1,7 +1,7 @@ /* * Linux platform device for DHD WLAN adapter * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_priv.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_priv.h index 8765f6f8e8b1..3214181ee2f0 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_priv.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_priv.h @@ -1,7 +1,7 @@ /* * DHD Linux header file - contains private structure definition of the Linux specific layer * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_sched.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_sched.c index c93423d27f3c..cd04bb2e779e 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_sched.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_sched.c @@ -1,7 +1,7 @@ /* * Expose some of the kernel scheduler routines * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_sock_qos.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_sock_qos.h index f37cc030f925..f6745d80c1c1 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_sock_qos.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_sock_qos.h @@ -4,7 +4,7 @@ * Provides type definitions and function prototypes to call into * DHD's QOS on Socket Flow module. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_tx.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_tx.c index 3187cbb0752f..72a8f30e5ff2 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_tx.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_tx.c @@ -2,7 +2,7 @@ * Broadcom Dongle Host Driver (DHD), * Linux-specific network interface for transmit(tx) path * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_tx.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_tx.h index 242cc1e9af14..1484f7b5c31b 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_tx.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_tx.h @@ -2,7 +2,7 @@ * Broadcom Dongle Host Driver (DHD), * Linux-specific network interface for transmit(tx) path * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_wq.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_wq.c index 3370a4708ede..7cc2eaf8070d 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_wq.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_wq.c @@ -2,7 +2,7 @@ * Broadcom Dongle Host Driver (DHD), Generic work queue framework * Generic interface to handle dhd deferred work events * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_wq.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_wq.h index 2e9d316eea9c..72ba203aa4fc 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_wq.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_linux_wq.h @@ -2,7 +2,7 @@ * Broadcom Dongle Host Driver (DHD), Generic work queue framework * Generic interface to handle dhd deferred work events * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_mschdbg.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_mschdbg.c index c9c91dc8af85..bea4ba217091 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_mschdbg.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_mschdbg.c @@ -1,7 +1,7 @@ /* * DHD debugability support * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_mschdbg.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_mschdbg.h index 349f56750bf3..533b4ebd5119 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_mschdbg.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_mschdbg.h @@ -1,7 +1,7 @@ /* * DHD debugability header file * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_msgbuf.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_msgbuf.c index c093f69d56c8..011ea7fce1d9 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_msgbuf.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_msgbuf.c @@ -3,7 +3,7 @@ * Provides type definitions and function prototypes used to link the * DHD OS, bus, and protocol modules. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pcie.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pcie.c index 8fc0a2a19525..969765bb67e0 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pcie.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pcie.c @@ -1,7 +1,7 @@ /* * DHD Bus Module for PCIE * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -14446,7 +14446,7 @@ dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus) uint8 dhd_d11_slices_num_get(dhd_pub_t *dhdp) { - return si_scan_core_present(dhdp->bus->sih) ? + return (dhdp->bus->sih && si_scan_core_present(dhdp->bus->sih)) ? MAX_NUM_D11_CORES_WITH_SCAN : MAX_NUM_D11CORES; } diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pcie.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pcie.h index 6dd0ed8e8231..2ff902ee3b5c 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pcie.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pcie.h @@ -1,7 +1,7 @@ /* * Linux DHD Bus Module for PCIE * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pcie_linux.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pcie_linux.c index ce83766ee897..acf1af81df90 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pcie_linux.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pcie_linux.c @@ -1,7 +1,7 @@ /* * Linux DHD Bus Module for PCIE * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pktlog.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pktlog.c index 5254033c1edd..3d129f23bf94 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pktlog.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pktlog.c @@ -1,7 +1,7 @@ /* * DHD debugability packet logging support * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pktlog.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pktlog.h index 628097b5ea1e..1f0901761261 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pktlog.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pktlog.h @@ -1,7 +1,7 @@ /* * DHD debugability packet logging header file * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pno.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pno.c index 405702bde059..6bb5c4bf725d 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pno.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pno.c @@ -2,7 +2,7 @@ * Broadcom Dongle Host Driver (DHD) * Prefered Network Offload and Wi-Fi Location Service(WLS) code. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -578,15 +578,7 @@ _dhd_pno_set(dhd_pub_t *dhd, const dhd_pno_params_t *pno_params, dhd_pno_mode_t } } else #endif /* GSCAN_SUPPORT */ - { - if (pfn_param.scan_freq < htod32(PNO_SCAN_MIN_FW_SEC) || - pfn_param.scan_freq > htod32(PNO_SCAN_MAX_FW_SEC)) { - DHD_ERROR(("%s pno freq(%d sec) is not valid \n", - __FUNCTION__, PNO_SCAN_MIN_FW_SEC)); - err = BCME_BADARG; - goto exit; - } - } + #if !defined(WL_USE_RANDOMIZED_SCAN) err = dhd_set_rand_mac_oui(dhd); /* Ignore if chip doesnt support the feature */ diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pno.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pno.h index 25d677d40470..14bb261be62b 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pno.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_pno.h @@ -2,7 +2,7 @@ * Header file of Broadcom Dongle Host Driver (DHD) * Prefered Network Offload code and Wi-Fi Location Service(WLS) code. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_proto.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_proto.h index 63bf37b54617..eae8e98dca78 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_proto.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_proto.h @@ -4,7 +4,7 @@ * Provides type definitions and function prototypes used to link the * DHD OS, bus, and protocol modules. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_qos_algo.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_qos_algo.h index 6d87312ddeba..16feba68becb 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_qos_algo.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_qos_algo.h @@ -4,7 +4,7 @@ * Provides type definitions and function prototypes for the QOS Algorithm * Note that this algorithm is a platform independent layer * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_rtt.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_rtt.c index 290599d349cf..46f415c2387f 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_rtt.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_rtt.c @@ -1,7 +1,7 @@ /* * Broadcom Dongle Host Driver (DHD), RTT * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_rtt.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_rtt.h index d96245b973b2..fdcb719f77ef 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_rtt.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_rtt.h @@ -1,7 +1,7 @@ /* * Broadcom Dongle Host Driver (DHD), RTT * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_sdio.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_sdio.c index 86403ff7f979..98bfa72e2ce6 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_sdio.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_sdio.c @@ -1,7 +1,7 @@ /* * DHD Bus Module for SDIO * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_sec_feature.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_sec_feature.h index 6ae4102d2965..5efa91315efc 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_sec_feature.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_sec_feature.h @@ -1,7 +1,7 @@ /* * Customer HW 4 dependant file * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_statlog.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_statlog.c index b43b720583bc..78834c7b04b9 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_statlog.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_statlog.c @@ -1,7 +1,7 @@ /* * DHD debugability: Status Information Logging support * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_statlog.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_statlog.h index dd3152d15f02..04e7340fef8f 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_statlog.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_statlog.h @@ -1,7 +1,7 @@ /* * DHD debugability: Header file for the Status Information Logging * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_wlfc.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_wlfc.c index 9e6dacda38b2..4abf2e3c9363 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_wlfc.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_wlfc.c @@ -1,7 +1,7 @@ /* * DHD PROP_TXSTATUS Module. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_wlfc.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_wlfc.h index 5325ab7b6ec8..90edb305404c 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_wlfc.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_wlfc.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/frag.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/frag.c index 36fe007fdfe9..2276dca44b73 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/frag.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/frag.c @@ -2,7 +2,7 @@ * IE/TLV fragmentation/defragmentation support for * Broadcom 802.11bang Networking Device Driver * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/frag.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/frag.h index c5127abc0ed5..ae3b30159eba 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/frag.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/frag.h @@ -2,7 +2,7 @@ * IE/TLV (de)fragmentation declarations/definitions for * Broadcom 802.11abgn Networking Device Driver * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/hnd_pktpool.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/hnd_pktpool.c index 872a97b87af8..d16afb90c34c 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/hnd_pktpool.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/hnd_pktpool.c @@ -1,7 +1,7 @@ /* * HND generic packet pool operation primitives * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/hnd_pktq.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/hnd_pktq.c index ac51498fdc1b..4d913c059ced 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/hnd_pktq.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/hnd_pktq.c @@ -1,7 +1,7 @@ /* * HND generic pktq operation primitives * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/hndpmu.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/hndpmu.c index 95172213659f..df91168c19a8 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/hndpmu.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/hndpmu.c @@ -2,7 +2,7 @@ * Misc utility routines for accessing PMU corerev specific features * of the SiliconBackplane-based Broadcom chips. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/802.11.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/802.11.h index da3b6b10694d..8a2e5dfdc616 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/802.11.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/802.11.h @@ -1,7 +1,7 @@ /* * Fundamental types and constants relating to 802.11 * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/802.11ax.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/802.11ax.h index d64bb2812710..9ad0a5ef276e 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/802.11ax.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/802.11ax.h @@ -2,7 +2,7 @@ * Basic types and constants relating to 802.11ax/HE STA * This is a portion of 802.11ax definition. The rest are in 802.11.h. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/802.11s.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/802.11s.h index a2c6b5b056ad..429576537cbb 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/802.11s.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/802.11s.h @@ -1,7 +1,7 @@ /* * Fundamental types and constants relating to 802.11s Mesh * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/802.1d.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/802.1d.h index f9416ab6002f..45a94c0f98a8 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/802.1d.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/802.1d.h @@ -1,7 +1,7 @@ /* * Fundamental types and constants relating to 802.1D * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/802.3.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/802.3.h index 8ee0002c5fe8..263e9e793f9c 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/802.3.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/802.3.h @@ -1,7 +1,7 @@ /* * Fundamental constants relating to 802.3 * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/aidmp.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/aidmp.h index b791559d35a2..4d31e56da014 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/aidmp.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/aidmp.h @@ -1,7 +1,7 @@ /* * Broadcom AMBA Interconnect definitions. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcm_l2_filter.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcm_l2_filter.h index e67ecdfc5b66..22301703b305 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcm_l2_filter.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcm_l2_filter.h @@ -1,7 +1,7 @@ /* * L2 Filter handling functions * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcm_mpool_pub.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcm_mpool_pub.h index 0f5498996776..dfbabe56222f 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcm_mpool_pub.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcm_mpool_pub.h @@ -35,7 +35,7 @@ * and instrumentation on top of the heap, without modifying the heap * allocation implementation. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcm_ring.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcm_ring.h index 6b64cd036b30..3990f9ae8868 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcm_ring.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcm_ring.h @@ -6,7 +6,7 @@ * * NOTE: A ring of size N, may only hold N-1 elements. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmarp.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmarp.h index a010fcefe475..21f5e2463ec4 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmarp.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmarp.h @@ -1,7 +1,7 @@ /* * Fundamental constants relating to ARP Protocol * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmbloom.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmbloom.h index 1744a65ffc49..a978d4d45d01 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmbloom.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmbloom.h @@ -1,7 +1,7 @@ /* * Bloom filter support * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmcdc.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmcdc.h index 5d05089d19d0..ed029055f990 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmcdc.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmcdc.h @@ -4,7 +4,7 @@ * * Definitions subject to change without notice. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmdefs.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmdefs.h index a8acf15c817a..30c1411d3b6c 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmdefs.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmdefs.h @@ -1,7 +1,7 @@ /* * Misc system wide definitions * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmdevs.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmdevs.h index 56083d16051a..7f29811513c5 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmdevs.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmdevs.h @@ -1,7 +1,7 @@ /* * Broadcom device-specific manifest constants. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmdevs_legacy.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmdevs_legacy.h index 8edf4d50a0da..63a951aac7d6 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmdevs_legacy.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmdevs_legacy.h @@ -1,7 +1,7 @@ /* * Broadcom device-specific manifest constants used by DHD, but deprecated in firmware. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmdhcp.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmdhcp.h index b4aed133c1d7..287c5e30add7 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmdhcp.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmdhcp.h @@ -1,7 +1,7 @@ /* * Fundamental constants relating to DHCP Protocol * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmendian.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmendian.h index cad9c43e04ca..5f4c530d29a1 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmendian.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmendian.h @@ -1,7 +1,7 @@ /* * Byte order utilities * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmerror.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmerror.h index 9c93ff4862fc..f6de52bee9a1 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmerror.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmerror.h @@ -1,7 +1,7 @@ /* * Common header file for all error codes. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmeth.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmeth.h index a9c3e0594172..814691053572 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmeth.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmeth.h @@ -1,7 +1,7 @@ /* * Broadcom Ethernettype protocol definitions * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmevent.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmevent.h index 1720031bcd53..ddd5ca638b35 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmevent.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmevent.h @@ -3,7 +3,7 @@ * * Dependencies: bcmeth.h * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmicmp.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmicmp.h index 9ca3eed2fb45..3a58a850617a 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmicmp.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmicmp.h @@ -1,7 +1,7 @@ /* * Fundamental constants relating to ICMP Protocol * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmiov.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmiov.h index 7814b45450f1..bf5cb445b6b5 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmiov.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmiov.h @@ -4,7 +4,7 @@ * To be used in firmware and host apps or dhd - reducing code size, * duplication, and maintenance overhead. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmip.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmip.h index 0d3e2969cf56..28c24d7595e8 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmip.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmip.h @@ -1,7 +1,7 @@ /* * Fundamental constants relating to IP Protocol * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmipv6.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmipv6.h index 6529ac3b0048..0ea9b4a98e6c 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmipv6.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmipv6.h @@ -1,7 +1,7 @@ /* * Fundamental constants relating to Neighbor Discovery Protocol * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmmsgbuf.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmmsgbuf.h index 32edb6ec5b09..685a1980129f 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmmsgbuf.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmmsgbuf.h @@ -4,7 +4,7 @@ * * Definitions subject to change without notice. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmpcie.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmpcie.h index c436ac8623c1..699b423f0785 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmpcie.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmpcie.h @@ -3,7 +3,7 @@ * Software-specific definitions shared between device and host side * Explains the shared area between host and dongle * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmproto.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmproto.h index 1444f25a1d09..afc4616a30da 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmproto.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmproto.h @@ -1,7 +1,7 @@ /* * Fundamental constants relating to IP Protocol * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmrand.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmrand.h index 6cde11f10caa..b2fdf820ff64 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmrand.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmrand.h @@ -1,7 +1,7 @@ /* * bcmrand.h. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmsdbus.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmsdbus.h index cc526c32a08b..e23dc7661235 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmsdbus.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmsdbus.h @@ -2,7 +2,7 @@ * Definitions for API from sdio common code (bcmsdh) to individual * host controller drivers. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmsdh.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmsdh.h index f7d96b418872..4ded515710d1 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmsdh.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmsdh.h @@ -3,7 +3,7 @@ * export functions to client drivers * abstract OS and BUS specific details of SDIO * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmsdh_sdmmc.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmsdh_sdmmc.h index 0fcefe43a593..fcf27f80c41d 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmsdh_sdmmc.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmsdh_sdmmc.h @@ -1,7 +1,7 @@ /* * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmsdpcm.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmsdpcm.h index 649098d025a3..c5bdade2eb61 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmsdpcm.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmsdpcm.h @@ -2,7 +2,7 @@ * Broadcom SDIO/PCMCIA * Software-specific definitions shared between device and host side * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmstdlib_s.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmstdlib_s.h index 14d5a57184d5..c70493353a9c 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmstdlib_s.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmstdlib_s.h @@ -1,7 +1,7 @@ /* * Broadcom Secure Standard Library. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmtcp.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmtcp.h index b8c4bc43649b..804026bd14f6 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmtcp.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmtcp.h @@ -1,7 +1,7 @@ /* * Fundamental constants relating to TCP Protocol * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmtlv.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmtlv.h index fed176fc2bc5..21d85f6764a7 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmtlv.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmtlv.h @@ -1,7 +1,7 @@ /* * TLV and XTLV support * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmudp.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmudp.h index 9058563d1cc5..8e4405afa85d 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmudp.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmudp.h @@ -1,7 +1,7 @@ /* * Fundamental constants relating to UDP Protocol * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmutils.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmutils.h index 982ecb16bccf..a21692f98e20 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmutils.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmutils.h @@ -1,7 +1,7 @@ /* * Misc useful os-independent macros and functions. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmwifi_channels.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmwifi_channels.h index 75a9c2b44557..546d4b430bea 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmwifi_channels.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmwifi_channels.h @@ -3,7 +3,7 @@ * This header file housing the define and function prototype use by * both the wl driver, tools & Apps. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmwifi_rates.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmwifi_rates.h index 406683a45202..3554818cd896 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmwifi_rates.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmwifi_rates.h @@ -1,7 +1,7 @@ /* * Indices for 802.11 a/b/g/n/ac 1-3 chain symmetric transmit rates * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmwifi_rspec.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmwifi_rspec.h index b0d5008042dc..75a7d1b8b59d 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmwifi_rspec.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/bcmwifi_rspec.h @@ -1,7 +1,7 @@ /* * Common OS-independent driver header for rate management. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/brcm_nl80211.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/brcm_nl80211.h index 5719324d598d..11415aeca874 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/brcm_nl80211.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/brcm_nl80211.h @@ -1,7 +1,7 @@ /* * Definitions for nl80211 vendor command/event access to host driver * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/dhd_daemon.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/dhd_daemon.h index c60a6f61616e..f71d02907540 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/dhd_daemon.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/dhd_daemon.h @@ -1,7 +1,7 @@ /* * Header file for DHD daemon to handle timeouts * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/dhdioctl.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/dhdioctl.h index 4540db3a3cec..be2112a2c5c1 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/dhdioctl.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/dhdioctl.h @@ -5,7 +5,7 @@ * * Definitions subject to change without notice. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/dngl_stats.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/dngl_stats.h index bc658c751a99..a9a7ba054db8 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/dngl_stats.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/dngl_stats.h @@ -2,7 +2,7 @@ * Common stats definitions for clients of dongle * ports * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/dnglevent.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/dnglevent.h index cd976180f808..f01d89b17d84 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/dnglevent.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/dnglevent.h @@ -3,7 +3,7 @@ * * Dependencies: bcmeth.h * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/dnglioctl.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/dnglioctl.h index 9213dd01af12..debf3c32cc37 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/dnglioctl.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/dnglioctl.h @@ -1,7 +1,7 @@ /* * HND Run Time Environment ioctl. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/eap.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/eap.h index 92d176d77e8a..03a68ea4e817 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/eap.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/eap.h @@ -4,7 +4,7 @@ * See * RFC 2284: PPP Extensible Authentication Protocol (EAP) * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/eapol.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/eapol.h index 50b8553659a4..aaac9ca4f09b 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/eapol.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/eapol.h @@ -5,7 +5,7 @@ * IEEE Std 802.1X-2001 * IEEE 802.1X RADIUS Usage Guidelines * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/epivers.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/epivers.h index 14a5f215a454..01cfad80a80e 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/epivers.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/epivers.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -27,25 +27,25 @@ #define EPI_MINOR_VERSION 16 -#define EPI_RC_NUMBER 89 +#define EPI_RC_NUMBER 90 #define EPI_INCREMENTAL_NUMBER 0 #define EPI_BUILD_NUMBER 0 -#define EPI_VERSION 101, 16, 89, 0 +#define EPI_VERSION 101, 16, 90, 0 -#define EPI_VERSION_NUM 0x65105900 +#define EPI_VERSION_NUM 0x65105a00 -#define EPI_VERSION_DEV 101.16.89 +#define EPI_VERSION_DEV 101.16.90 /* Driver Version String, ASCII, 32 chars max */ #if defined (WLTEST) -#define EPI_VERSION_STR "101.16.89 (wlan=r959202 WLTEST)" +#define EPI_VERSION_STR "101.16.90 (wlan=r969326 WLTEST)" #elif (defined (BCMDBG_ASSERT) && !defined (BCMDBG_ASSERT_DISABLED)) -#define EPI_VERSION_STR "101.16.89 (wlan=r959202 ASSRT)" +#define EPI_VERSION_STR "101.16.90 (wlan=r969326 ASSRT)" #else -#define EPI_VERSION_STR "101.16.89 (wlan=r959202)" +#define EPI_VERSION_STR "101.16.90 (wlan=r969326)" #endif /* BCMINTERNAL */ #endif /* _epivers_h_ */ diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/etd.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/etd.h index ad1399b4c2de..c5ec1a8695fa 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/etd.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/etd.h @@ -1,7 +1,7 @@ /* * Extended Trap data component interface file. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/ethernet.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/ethernet.h index 8ef9e7e08ea2..508529bf37a3 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/ethernet.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/ethernet.h @@ -1,7 +1,7 @@ /* * From FreeBSD 2.2.7: Fundamental constants relating to ethernet. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/event_log.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/event_log.h index 16226fe4edf3..5ed220979ae1 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/event_log.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/event_log.h @@ -1,7 +1,7 @@ /* * EVENT_LOG system definitions * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/event_log_payload.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/event_log_payload.h index b5edd9157ed2..d6adae164e4c 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/event_log_payload.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/event_log_payload.h @@ -4,7 +4,7 @@ * This file describes the payloads of event log entries that are data buffers * rather than formatted string entries. The contents are generally XTLVs. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/event_log_set.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/event_log_set.h index 6ced7dae9ff3..ba327f1caa6b 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/event_log_set.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/event_log_set.h @@ -1,7 +1,7 @@ /* * EVENT_LOG system definitions * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/event_log_tag.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/event_log_tag.h index ac0947cec915..1718add520cc 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/event_log_tag.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/event_log_tag.h @@ -1,7 +1,7 @@ /* * EVENT_LOG system definitions * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/event_trace.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/event_trace.h index 606bf043722a..e760301763ea 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/event_trace.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/event_trace.h @@ -1,7 +1,7 @@ /* * Trace log blocks sent over HBUS * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/fils.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/fils.h index b4eeacb3209f..03f7812b239a 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/fils.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/fils.h @@ -1,7 +1,7 @@ /* * Fundamental types and constants relating to FILS AUTHENTICATION * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_armtrap.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_armtrap.h index e70b129c2afc..370ded15ed0a 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_armtrap.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_armtrap.h @@ -1,7 +1,7 @@ /* * HND arm trap handling. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_cons.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_cons.h index f53d18e9c022..95d3b4293962 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_cons.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_cons.h @@ -1,7 +1,7 @@ /* * Console support for RTE - for host use only. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_debug.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_debug.h index 40c2e7e87d04..3f0c03090dd8 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_debug.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_debug.h @@ -1,7 +1,7 @@ /* * HND Run Time Environment debug info area * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_pktpool.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_pktpool.h index 586f35339fdb..063b28c445e2 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_pktpool.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_pktpool.h @@ -1,7 +1,7 @@ /* * HND generic packet pool operation primitives * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_pktq.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_pktq.h index be39bc57eabd..7a4df00fa949 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_pktq.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_pktq.h @@ -1,7 +1,7 @@ /* * HND generic pktq operation primitives * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_trap.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_trap.h index 622c3729b1d7..47d6e8515702 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_trap.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hnd_trap.h @@ -1,7 +1,7 @@ /* * HND Trap handling. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hndchipc.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hndchipc.h index 5fc9cdb9376f..38664c9d9e37 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hndchipc.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hndchipc.h @@ -1,7 +1,7 @@ /* * HND SiliconBackplane chipcommon support - OS independent. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hndlhl.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hndlhl.h index 6e80fff78d72..1ddd7b000e55 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hndlhl.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hndlhl.h @@ -1,7 +1,7 @@ /* * HND SiliconBackplane PMU support. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hndoobr.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hndoobr.h index c53fc3f9d5b2..9fed7ee7bdcd 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hndoobr.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hndoobr.h @@ -1,7 +1,7 @@ /* * HND OOBR interface header * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hndpmu.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hndpmu.h index e41e83e071c4..e470488c7585 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hndpmu.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hndpmu.h @@ -1,7 +1,7 @@ /* * HND SiliconBackplane PMU support. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hndsoc.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hndsoc.h index 7a4d0c0bc4e7..38dc461a7634 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hndsoc.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/hndsoc.h @@ -1,7 +1,7 @@ /* * Broadcom HND chip & on-chip-interconnect-related definitions. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/linux_osl.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/linux_osl.h index 26e6132d7347..7fe2565c1b2c 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/linux_osl.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/linux_osl.h @@ -1,7 +1,7 @@ /* * Linux OS Independent Layer * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/linux_pkt.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/linux_pkt.h index c03161c42ec4..2c71920ca4f5 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/linux_pkt.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/linux_pkt.h @@ -1,7 +1,7 @@ /* * Linux Packet (skb) interface * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/linuxver.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/linuxver.h index bd49866243d7..ad602ef9b792 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/linuxver.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/linuxver.h @@ -2,7 +2,7 @@ * Linux-specific abstractions to gain some independence from linux kernel versions. * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/lpflags.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/lpflags.h index 98897504bc7e..6ff882a14c19 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/lpflags.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/lpflags.h @@ -1,7 +1,7 @@ /* * Chip related low power flags * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/mbo.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/mbo.h index 41ab6251e3a1..d3a81e8ef7f1 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/mbo.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/mbo.h @@ -1,7 +1,7 @@ /* * Fundamental types and constants relating to WFA MBO * (Multiband Operation) - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/msgtrace.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/msgtrace.h index 9fc7ae1b6c51..ea909bf262c3 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/msgtrace.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/msgtrace.h @@ -1,7 +1,7 @@ /* * Trace messages sent over HBUS * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/nan.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/nan.h index 52b6fa48c5e1..41b57bd79db5 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/nan.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/nan.h @@ -2,7 +2,7 @@ * Fundamental types and constants relating to WFA NAN * (Neighbor Awareness Networking) * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/osl.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/osl.h index a55b3beea749..b6223dc79e0e 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/osl.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/osl.h @@ -1,7 +1,7 @@ /* * OS Abstraction Layer * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/osl_decl.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/osl_decl.h index 16adf43cc39c..898fb0f9ca41 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/osl_decl.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/osl_decl.h @@ -1,7 +1,7 @@ /* * osl forward declarations * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/osl_ext.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/osl_ext.h index 6a04c2b3c5f0..33084e3df4f4 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/osl_ext.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/osl_ext.h @@ -2,7 +2,7 @@ * OS Abstraction Layer Extension - the APIs defined by the "extension" API * are only supported by a subset of all operating systems. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/p2p.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/p2p.h index 17a2505a2408..0b5b667ef570 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/p2p.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/p2p.h @@ -1,7 +1,7 @@ /* * Fundamental types and constants relating to WFA P2P (aka WiFi Direct) * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/packed_section_end.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/packed_section_end.h index 8f57273c595a..c955ddc90bdc 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/packed_section_end.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/packed_section_end.h @@ -15,7 +15,7 @@ * #include * * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/packed_section_start.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/packed_section_start.h index 855bc495fca7..4a1d316805e0 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/packed_section_start.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/packed_section_start.h @@ -15,7 +15,7 @@ * #include * * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/pcicfg.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/pcicfg.h index e4cac5e36169..c70db7c63980 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/pcicfg.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/pcicfg.h @@ -1,7 +1,7 @@ /* * pcicfg.h: PCI configuration constants and structures. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/pcie_core.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/pcie_core.h index 4b0e2c12e4c0..621dcf3bf956 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/pcie_core.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/pcie_core.h @@ -1,7 +1,7 @@ /* * BCM43XX PCIE core hardware definitions. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbchipc.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbchipc.h index 933a2b8e8c7d..81b626b88cfe 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbchipc.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbchipc.h @@ -5,7 +5,7 @@ * JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer, * GPIO interface, extbus, and support for serial and parallel flashes. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbconfig.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbconfig.h index d8786d0ff2be..424a3621cce3 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbconfig.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbconfig.h @@ -1,7 +1,7 @@ /* * Broadcom SiliconBackplane hardware register definitions. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbgci.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbgci.h index fec5d46f9cb0..55afc79d30bb 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbgci.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbgci.h @@ -1,7 +1,7 @@ /* * SiliconBackplane GCI core hardware definitions * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbhndarm.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbhndarm.h index 92d7600eee36..db90a59a082e 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbhndarm.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbhndarm.h @@ -1,7 +1,7 @@ /* * Broadcom SiliconBackplane ARM definitions * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbhnddma.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbhnddma.h index 8e7bca0f089b..86422ebc65c2 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbhnddma.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbhnddma.h @@ -2,7 +2,7 @@ * Generic Broadcom Home Networking Division (HND) DMA engine HW interface * This supports the following chips: BCM42xx, 44xx, 47xx . * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbpcmcia.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbpcmcia.h index 87d6d09ab7c1..297898f2a68f 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbpcmcia.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbpcmcia.h @@ -1,7 +1,7 @@ /* * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbsdio.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbsdio.h index 2e6ac1090d5f..18ba61233618 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbsdio.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbsdio.h @@ -4,7 +4,7 @@ * * SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbsdpcmdev.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbsdpcmdev.h index bdb59b627cab..6373a1df89c3 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbsdpcmdev.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbsdpcmdev.h @@ -2,7 +2,7 @@ * Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific * device core support * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbsocram.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbsocram.h index 97eaa6f2785a..a8802bf60427 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbsocram.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbsocram.h @@ -1,7 +1,7 @@ /* * BCM47XX Sonics SiliconBackplane embedded ram core * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbsysmem.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbsysmem.h index 77147faac885..95974b083522 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbsysmem.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sbsysmem.h @@ -1,7 +1,7 @@ /* * SiliconBackplane System Memory core * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sdio.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sdio.h index 4bb72a9ed381..70721f1bced3 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sdio.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sdio.h @@ -2,7 +2,7 @@ * SDIO spec header file * Protocol and standard (common) device definitions * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sdioh.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sdioh.h index f1e81813cea3..20200c131fdc 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sdioh.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sdioh.h @@ -2,7 +2,7 @@ * SDIO Host Controller Spec header file * Register map and definitions for the Standard Host Controller * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sdiovar.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sdiovar.h index 90cfbc1d9826..7d2a7616d437 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sdiovar.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/sdiovar.h @@ -2,7 +2,7 @@ * Structure used by apps whose drivers access SDIO drivers. * Pulled out separately so dhdu and wlu can both use it. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/siutils.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/siutils.h index 3ffccefe3d4b..32a227b14284 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/siutils.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/siutils.h @@ -2,7 +2,7 @@ * Misc utility routines for accessing the SOC Interconnects * of Broadcom HNBU chips. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/typedefs.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/typedefs.h index e803a1b602a8..c3c01c626d11 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/typedefs.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/typedefs.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/vlan.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/vlan.h index e4c33e92827b..e22e1900e411 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/vlan.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/vlan.h @@ -1,7 +1,7 @@ /* * 802.1Q VLAN protocol definitions * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wl_bam.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wl_bam.h index 98d126f5235f..4d372d36a03a 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wl_bam.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wl_bam.h @@ -1,7 +1,7 @@ /* * Bad AP Manager for ADPS * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wl_bigdata.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wl_bigdata.h index b36b9213dfa3..029879d65575 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wl_bigdata.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wl_bigdata.h @@ -1,7 +1,7 @@ /* * Bigdata logging and report. None EWP and Hang event. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wldev_common.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wldev_common.h index 39a350793bca..f078098f1336 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wldev_common.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wldev_common.h @@ -1,7 +1,7 @@ /* * Common function shared by Linux WEXT, cfg80211 and p2p drivers * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wlfc_proto.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wlfc_proto.h index 6a3f4a857812..2f3c5464a961 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wlfc_proto.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wlfc_proto.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wlioctl.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wlioctl.h index adaf872b7762..334999d99860 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wlioctl.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wlioctl.h @@ -6,7 +6,7 @@ * * Definitions subject to change without notice. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wlioctl_defs.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wlioctl_defs.h index 13ed84aa7ea4..1f7767b45c5e 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wlioctl_defs.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wlioctl_defs.h @@ -4,7 +4,7 @@ * * Definitions subject to change without notice. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wlioctl_utils.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wlioctl_utils.h index 93b369c226f1..9558938684bd 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wlioctl_utils.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wlioctl_utils.h @@ -1,7 +1,7 @@ /* * Custom OID/ioctl related helper functions. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wpa.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wpa.h index c9c7cb2a7066..0ee7fdc899de 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wpa.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/include/wpa.h @@ -1,7 +1,7 @@ /* * Fundamental types and constants relating to WPA * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/linux_osl.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/linux_osl.c index d48a2ad38705..09ba87709f0b 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/linux_osl.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/linux_osl.c @@ -1,7 +1,7 @@ /* * Linux OS Independent Layer * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/linux_osl_priv.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/linux_osl_priv.h index 60c5681009ff..afa82d9e7eab 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/linux_osl_priv.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/linux_osl_priv.h @@ -1,7 +1,7 @@ /* * Private header file for Linux OS Independent Layer * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/linux_pkt.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/linux_pkt.c index 70fc8c18c6f4..32b9427340be 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/linux_pkt.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/linux_pkt.c @@ -1,7 +1,7 @@ /* * Linux Packet (skb) interface * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/pcie_core.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/pcie_core.c index 8482df3038b4..35c12ead7602 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/pcie_core.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/pcie_core.c @@ -3,7 +3,7 @@ * Contains PCIe related functions that are shared between different driver models (e.g. firmware * builds, DHD builds, BMAC builds), in order to avoid code duplication. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/sbutils.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/sbutils.c index 066e4c8a174a..ef6dad3d2851 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/sbutils.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/sbutils.c @@ -2,7 +2,7 @@ * Misc utility routines for accessing chip-specific features * of the SiliconBackplane-based Broadcom chips. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/siutils.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/siutils.c index 9cd334c103de..566bb4b55569 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/siutils.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/siutils.c @@ -2,7 +2,7 @@ * Misc utility routines for accessing chip-specific features * of the SiliconBackplane-based Broadcom chips. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/siutils_priv.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/siutils_priv.h index c8e6c4f0ae7f..0dda6484e916 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/siutils_priv.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/siutils_priv.h @@ -1,7 +1,7 @@ /* * Include file private to the SOC Interconnect support files. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wb_regon_coordinator.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/wb_regon_coordinator.c index f193f857d6c7..236f52b0c12b 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wb_regon_coordinator.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wb_regon_coordinator.c @@ -1,7 +1,7 @@ /* * DHD BT WiFi Coex RegON Coordinator * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wifi_stats.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/wifi_stats.h index 7e8eb1cc8a57..d4d79b1e153a 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wifi_stats.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wifi_stats.h @@ -2,7 +2,7 @@ * Common stats definitions for clients of dongle * ports * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_android.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_android.c index e72299bd2142..ea72b6a4a7b7 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_android.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_android.c @@ -1,7 +1,7 @@ /* * Linux cfg80211 driver - Android related functions * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_android.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_android.h index 3c8af69c0f89..72c37e5e36d3 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_android.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_android.h @@ -1,7 +1,7 @@ /* * Linux cfg80211 driver - Android related functions * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_bam.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_bam.c index 014822a4edf7..222c5f9d7ccd 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_bam.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_bam.c @@ -1,7 +1,7 @@ /* * Bad AP Manager for ADPS * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -375,8 +375,12 @@ wl_bad_ap_mngr_add(wl_bad_ap_mngr_t *bad_ap_mngr, wl_bad_ap_info_t *bad_ap_info) if (bad_ap_mngr->num == WL_BAD_AP_MAX_ENTRY_NUM) { /* Remove the oldest entry if entry list is full */ spin_lock_irqsave(&bad_ap_mngr->lock, flags); - list_del(bad_ap_mngr->list.next); - bad_ap_mngr->num--; + entry = list_entry(bad_ap_mngr->list.next, wl_bad_ap_info_entry_t, list); + if (entry) { + list_del(&entry->list); + MFREE(bad_ap_mngr->osh, entry, sizeof(*entry)); + bad_ap_mngr->num--; + } spin_unlock_irqrestore(&bad_ap_mngr->lock, flags); } @@ -385,6 +389,7 @@ wl_bad_ap_mngr_add(wl_bad_ap_mngr_t *bad_ap_mngr, wl_bad_ap_info_t *bad_ap_info) if (entry != NULL) { spin_lock_irqsave(&bad_ap_mngr->lock, flags); list_del(&entry->list); + MFREE(bad_ap_mngr->osh, entry, sizeof(*entry)); bad_ap_mngr->num--; spin_unlock_irqrestore(&bad_ap_mngr->lock, flags); } @@ -412,7 +417,7 @@ wl_bad_ap_mngr_deinit(struct bcm_cfg80211 *cfg) while (!list_empty(&cfg->bad_ap_mngr.list)) { entry = list_entry(cfg->bad_ap_mngr.list.next, wl_bad_ap_info_entry_t, list); if (entry) { - list_del(&cfg->bad_ap_mngr.list); + list_del(&entry->list); MFREE(cfg->osh, entry, sizeof(*entry)); } } diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_bigdata.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_bigdata.c index 2949146150d2..55c2d97ecfa3 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_bigdata.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_bigdata.c @@ -1,7 +1,7 @@ /* * Bigdata logging and report. None EWP and Hang event. * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -215,9 +215,8 @@ wg_parse_ap_stadata(struct net_device *dev, struct ether_addr *sta_mac, } sta_v4 = (sta_info_v4_t *)ioctl_buf; + bzero(ap_sta_data, sizeof(wl_ap_sta_data_t)); ap_sta_data->mac = *sta_mac; - ap_sta_data->rssi = 0; - ap_sta_data->mimo = 0; rateset_adv = &sta_v4->rateset_adv; ap_sta_data->chanspec = sta_v4->chanspec; @@ -231,9 +230,12 @@ wg_parse_ap_stadata(struct net_device *dev, struct ether_addr *sta_mac, } ap_sta_data->channel = wf_chspec_ctlchan(ap_sta_data->chanspec); - ap_sta_data->rate = - (sta_v4->rateset.rates[sta_v4->rateset.count - 1] & DOT11_RATE_MASK) / 2; - + if (sta_v4->rateset.count > 0) { + ap_sta_data->rate = + (sta_v4->rateset.rates[sta_v4->rateset.count - 1] & DOT11_RATE_MASK) / 2; + } else { + WL_ERR(("get sta rateset failed due to invalid count\n")); + } ap_sta_data->mode_80211 = BIGDATA_DOT11_11BGN_BIT; if (sta_v4->vht_flags) { diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfg80211.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfg80211.c index 349cdeb18e9b..e8c54d629cd3 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfg80211.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfg80211.c @@ -1,7 +1,7 @@ /* * Linux cfg80211 driver * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -10990,7 +10990,7 @@ int wl_get_bss_info(struct bcm_cfg80211 *cfg, struct net_device *dev, const u8 * if ((interworking_ie = bcm_parse_tlvs(ie, ie_len, DOT11_MNG_INTERWORKING_ID)) != NULL) { if ((tlv_ie = bcm_parse_tlvs(ie, ie_len, DOT11_MNG_VS_ID)) != NULL) { - remained_len = ie_len; + remained_len = ie_len - ((u8*)tlv_ie - ie); while (tlv_ie) { if (count > MAX_VNDR_IE_NUMBER) diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfg80211.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfg80211.h index 1fa6b3ce0c93..d85f29427413 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfg80211.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfg80211.h @@ -1,7 +1,7 @@ /* * Linux cfg80211 driver * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfg_btcoex.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfg_btcoex.c index 55a85bf4e8b7..09380fb96cfa 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfg_btcoex.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfg_btcoex.c @@ -1,7 +1,7 @@ /* * Linux cfg80211 driver - Dongle Host Driver (DHD) related * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgdbg.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgdbg.c index 23e0275708c0..5d21fa0d4843 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgdbg.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgdbg.c @@ -1,7 +1,7 @@ /* * Debug ability related code * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgnan.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgnan.c index fded07d6639f..3ce16aabec96 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgnan.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgnan.c @@ -1,7 +1,7 @@ /* * Neighbor Awareness Networking * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgnan.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgnan.h index 2fe96dc65756..6ba49783f63d 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgnan.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgnan.h @@ -1,7 +1,7 @@ /* * Neighbor Awareness Networking * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -656,7 +656,12 @@ typedef struct wl_nan_iov { #ifdef WL_NAN_DISC_CACHE -#define NAN_MAX_CACHE_DISC_RESULT 16 +#ifndef CUSTOM_NAN_MAX_CACHE_DISC_RESULT +#define NAN_MAX_CACHE_DISC_RESULT 40 +#else +#define NAN_MAX_CACHE_DISC_RESULT CUSTOM_NAN_MAX_CACHE_DISC_RESULT +#endif /* CUSTOM_NAN_MAX_CACHE_DISC_RESULT */ + typedef struct { bool valid; wl_nan_instance_id_t pub_id; diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgp2p.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgp2p.c index a84fb2cf6c53..6c59f8752165 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgp2p.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgp2p.c @@ -1,7 +1,7 @@ /* * Linux cfgp2p driver * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgp2p.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgp2p.h index ac11bc9fb811..c47466082a31 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgp2p.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgp2p.h @@ -1,7 +1,7 @@ /* * Linux cfgp2p driver * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgscan.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgscan.c index cfb09f979481..a5bf1752d876 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgscan.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgscan.c @@ -1,7 +1,7 @@ /* * Linux cfg80211 driver scan related code * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -3410,6 +3410,9 @@ wl_cfg80211_scan_mac_disable(struct net_device *dev) #define PNO_REPEAT_MAX 100u #define PNO_FREQ_EXPO_MAX 2u #define PNO_ADAPTIVE_SCAN_LIMIT 60u +#define PNO_SCAN_MAX_UNASSOC_SEC PNO_SCAN_MAX_FW_SEC +#define PNO_SCAN_MAX_ASSOC_SEC 3600 + static bool is_ssid_in_list(struct cfg80211_ssid *ssid, struct cfg80211_ssid *ssid_list, int count) { @@ -3488,6 +3491,21 @@ wl_cfg80211_sched_scan_start(struct wiphy *wiphy, pno_time = request->scan_plans->interval; } + { + uint32 max_scan_freq = PNO_SCAN_MAX_UNASSOC_SEC; + + if (wl_get_drv_status(cfg, CONNECTED, dev)) { + max_scan_freq = PNO_SCAN_MAX_ASSOC_SEC; + } + + if (pno_time < PNO_SCAN_MIN_FW_SEC || + pno_time > max_scan_freq) { + WL_ERR(("Invalid pno scan interval:%d, max pno scan interval:%d\n", + pno_time, max_scan_freq)); + return -EINVAL; + } + } + WL_INFORM_MEM(("Enter. ssids:%d match_sets:%d pno_time:%d pno_repeat:%d " "channels:%d adaptive:%d\n", request->n_ssids, request->n_match_sets, pno_time, pno_repeat, request->n_channels, adaptive_pno)); diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgscan.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgscan.h index 5d5325a0f582..d8175bfee9cf 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgscan.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgscan.h @@ -1,7 +1,7 @@ /* * Header for Linux cfg80211 scan * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgvendor.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgvendor.c index 0c985430ed13..858d8ca5b2b8 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgvendor.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgvendor.c @@ -1,7 +1,7 @@ /* * Linux cfg80211 Vendor Extension Code * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgvendor.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgvendor.h index eb5b8fde4dbe..e8b13104ed2f 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgvendor.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgvendor.h @@ -1,7 +1,7 @@ /* * Linux cfg80211 Vendor Extension Code * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgvif.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgvif.c index a1144642cc56..75083550ab26 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgvif.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgvif.c @@ -1,7 +1,7 @@ /* * Wifi Virtual Interface implementaion * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgvif.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgvif.h index db48fd91818e..4aa9d6bb466c 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgvif.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfgvif.h @@ -1,7 +1,7 @@ /* * Wifi Virtual Interface implementaion * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_linux_mon.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_linux_mon.c index b028e237e993..942eaa0087e6 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_linux_mon.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_linux_mon.c @@ -1,7 +1,7 @@ /* * Broadcom Dongle Host Driver (DHD), Linux monitor network interface * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_roam.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_roam.c index ac9f9b7c6905..9fd84f61a694 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_roam.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_roam.c @@ -1,7 +1,7 @@ /* * Linux roam cache * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wlc_types.h b/drivers/net/wireless/broadcom/bcmdhd_101_16/wlc_types.h index e35416e67287..675134accb78 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wlc_types.h +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wlc_types.h @@ -1,7 +1,7 @@ /* * Forward declarations for commonly used wl driver structs * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wldev_common.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/wldev_common.c index 21ed7ce8b373..c1529de13866 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wldev_common.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wldev_common.c @@ -1,7 +1,7 @@ /* * Common function shared by Linux WEXT, cfg80211 and p2p drivers * - * Copyright (C) 2021, Broadcom. + * Copyright (C) 2022, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/sensorhub/brcm/ssp.h b/drivers/sensorhub/brcm/ssp.h index cdefbdbeaa06..0b679d9a8fb5 100644 --- a/drivers/sensorhub/brcm/ssp.h +++ b/drivers/sensorhub/brcm/ssp.h @@ -1213,7 +1213,7 @@ void report_move_detector_data(struct ssp_data *data, int sensor_type, struct se void report_pocket_mode_data(struct ssp_data *data, int sensor_type, struct sensor_value *pocket_data); void report_led_cover_event_data(struct ssp_data *data, int sensor_type, struct sensor_value *led_cover_event_data); void report_auto_rotation_data(struct ssp_data *data, int sensor_type, struct sensor_value *auto_rotation_data); -void report_sar_backoff_motion_data(struct ssp_data *data, int sensor_type, struct sensor_value *auto_rotation_data); +void report_sar_backoff_motion_data(struct ssp_data *data, int sensor_type, struct sensor_value *sar_backoff_motion_data); unsigned int get_module_rev(struct ssp_data *data); void reset_mcu(struct ssp_data *data); int sensors_register(struct device *dev, void *drvdata, diff --git a/drivers/sensorhub/brcm/ssp_iio.c b/drivers/sensorhub/brcm/ssp_iio.c index 6c247bc05cb6..6c8ef7a1751a 100644 --- a/drivers/sensorhub/brcm/ssp_iio.c +++ b/drivers/sensorhub/brcm/ssp_iio.c @@ -531,11 +531,13 @@ void report_auto_rotation_data(struct ssp_data *data, int sensor_type, } void report_sar_backoff_motion_data(struct ssp_data *data, int sensor_type, - struct sensor_value *auto_rotation_data) + struct sensor_value *sar_backoff_motion_data) { - report_iio_data(data, SAR_BACKOFF_MOTION, auto_rotation_data); + report_iio_data(data, SAR_BACKOFF_MOTION, sar_backoff_motion_data); wake_lock_timeout(&data->ssp_wake_lock, 0.3*HZ); - pr_err("[SSP]: %s: %d ts: %llu", __func__, auto_rotation_data->auto_rotation_event, auto_rotation_data->timestamp); + pr_err("[SSP]: %s: %d ts: %llu", __func__, + sar_backoff_motion_data->sar_backoff_motion_event, + sar_backoff_motion_data->timestamp); } #define THM_UP 0 diff --git a/drivers/sensorhub/brcm/sx9330.c b/drivers/sensorhub/brcm/sx9330.c index 8a556df1c294..8a1d629eb246 100644 --- a/drivers/sensorhub/brcm/sx9330.c +++ b/drivers/sensorhub/brcm/sx9330.c @@ -31,6 +31,7 @@ #define VENDOR_NAME "SEMTECH" #define MODEL_NAME "SX9330" #define MODULE_NAME "grip_sensor" +#define NOTI_MODULE_NAME "grip_notifier" #define I2C_M_WR 0 /* for i2c Write */ #define I2c_M_RD 1 /* for i2c Read */ @@ -55,11 +56,20 @@ | MSK_IRQSTAT_RELEASE \ | MSK_IRQSTAT_COMP) +#define UNKNOWN_ON 1 +#define UNKNOWN_OFF 2 + +#define TYPE_USB 1 +#define TYPE_HALL 2 +#define TYPE_BOOT 3 +#define TYPE_FORCE 4 + #define HALLIC_PATH "/sys/class/sec/hall_ic/hall_detect" struct sx9330_p { struct i2c_client *client; struct input_dev *input; + struct input_dev *noti_input_dev; struct device *factory_device; struct delayed_work init_work; struct delayed_work irq_work; @@ -81,6 +91,8 @@ struct sx9330_p { atomic_t enable; + + u16 detect_threshold; u16 offset; s32 capMain; s32 useful; @@ -98,6 +110,13 @@ struct sx9330_p { int debug_count; char hall_ic[6]; + + int is_unknown_mode; + int motion; + bool first_working; + + int noti_enable; + int pre_attach; }; static int sx9330_check_hallic_state(char *file_path, char hall_ic_status[]) @@ -208,6 +227,7 @@ static u8 sx9330_read_irqstate(struct sx9330_p *data) static void sx9330_initialize_register(struct sx9330_p *data) { u32 val32 = 0; + u32 threshold = 0; int idx; for (idx = 1; idx < (int)(ARRAY_SIZE(setup_reg)); idx++) { @@ -220,6 +240,15 @@ static void sx9330_initialize_register(struct sx9330_p *data) __func__, setup_reg[idx].reg, val32); } + sx9330_i2c_read_16bit(data, SX9330_ADCFILTPH0_REG + + (1 << (4 + MAIN_SENSOR)), &threshold); + + threshold = (threshold & 0xFF00) >> 8; + threshold = threshold * threshold / 2; + + data->detect_threshold = threshold; + + pr_info("[SX9330]: %s - detect threshold: %u\n", __func__, data->detect_threshold); data->init_done = ON; } @@ -307,6 +336,7 @@ static void sx9330_send_event(struct sx9330_p *data, u8 state) else input_report_rel(data->input, REL_MISC, 2); + input_report_rel(data->input, REL_X, data->is_unknown_mode); input_sync(data->input); } @@ -469,6 +499,7 @@ static void sx9330_check_status(struct sx9330_p *data, int enable) if (data->skip_data == true) { input_report_rel(data->input, REL_MISC, 2); + input_report_rel(data->input, REL_X, UNKNOWN_OFF); input_sync(data->input); return; } @@ -550,6 +581,26 @@ static void sx9330_set_debug_work(struct sx9330_p *data, u8 enable, } } +static void sx9330_enter_unknown_mode(struct sx9330_p *data, int type) +{ + if (data->noti_enable && !data->skip_data) { + data->motion = 0; + data->first_working = false; + if (data->is_unknown_mode == UNKNOWN_OFF) { + data->is_unknown_mode = UNKNOWN_ON; + if (!data->skip_data) { + input_report_rel(data->input, REL_X, UNKNOWN_ON); + input_sync(data->input); + } + pr_info("[SX9330]: UNKNOWN Re-enter\n"); + } else { + pr_info("[SX9330]: already UNKNOWN\n"); + } + input_report_rel(data->noti_input_dev, REL_X, type); + input_sync(data->noti_input_dev); + } +} + static ssize_t sx9330_get_offset_calibration_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1046,8 +1097,12 @@ static ssize_t sx9330_onoff_store(struct device *dev, if (atomic_read(&data->enable) == ON) { data->state = IDLE; input_report_rel(data->input, REL_MISC, 2); + input_report_rel(data->input, REL_X, UNKNOWN_OFF); input_sync(data->input); } + data->motion = 1; + data->is_unknown_mode = UNKNOWN_OFF; + data->first_working = false; } else { data->skip_data = false; } @@ -1055,6 +1110,104 @@ static ssize_t sx9330_onoff_store(struct device *dev, return count; } +static ssize_t sx9330_motion_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct sx9330_p *data = dev_get_drvdata(dev); + + return snprintf(buf, PAGE_SIZE, "%s\n", + data->motion == 1 ? "motion_detect" : "motion_non_detect"); +} + +static ssize_t sx9330_motion_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + u8 val; + int ret; + struct sx9330_p *data = dev_get_drvdata(dev); + + ret = kstrtou8(buf, 2, &val); + if (ret) { + pr_err("[SX9330]: %s - Invalid Argument\n", __func__); + return ret; + } + + data->motion = val; + + pr_info("[SX9330]: %s - %u\n", __func__, val); + return count; +} + +static ssize_t sx9330_unknown_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct sx9330_p *data = dev_get_drvdata(dev); + + return snprintf(buf, PAGE_SIZE, "%s\n", + (data->is_unknown_mode == UNKNOWN_ON) ? "UNKNOWN" : "NORMAL"); +} + +static ssize_t sx9330_unknown_state_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + u8 val; + int ret; + struct sx9330_p *data = dev_get_drvdata(dev); + + ret = kstrtou8(buf, 2, &val); + if (ret) { + pr_err("[SX9330]: Invalid Argument\n"); + return ret; + } + + if (val == 1) + sx9330_enter_unknown_mode(data, TYPE_FORCE); + else if (val == 0) + data->is_unknown_mode = UNKNOWN_OFF; + else + pr_info("[SX9330]: Invalid Argument(%u)\n", val); + + pr_info("[SX9330]: %u\n", val); + + return count; +} + +static ssize_t sx9330_noti_enable_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + int ret; + u8 enable; + struct sx9330_p *data = dev_get_drvdata(dev); + + ret = kstrtou8(buf, 2, &enable); + if (ret) { + pr_err("[SX9330]: %s - argument\n", __func__); + return size; + } + + pr_info("[SX9330]: %s - new_value = %d\n", __func__, (int)enable); + + data->noti_enable = enable; + + if (data->noti_enable) + sx9330_enter_unknown_mode(data, TYPE_BOOT); + else { + data->motion = 1; + data->first_working = false; + data->is_unknown_mode = UNKNOWN_OFF; + } + + return size; +} + +static ssize_t sx9330_noti_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct sx9330_p *data = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", data->noti_enable); +} + static DEVICE_ATTR(menual_calibrate, S_IRUGO | S_IWUSR | S_IWGRP, sx9330_get_offset_calibration_show, sx9330_set_offset_calibration_store); @@ -1092,6 +1245,12 @@ static DEVICE_ATTR(irq_count, 0664, static DEVICE_ATTR(resolution, 0444, sx9330_resolution_show, NULL); static DEVICE_ATTR(adc_filt, 0444, sx9330_adc_filt_show, NULL); static DEVICE_ATTR(useful_filt, 0444, sx9330_useful_filt_show, NULL); +static DEVICE_ATTR(motion, S_IRUGO | S_IWUSR | S_IWGRP, + sx9330_motion_show, sx9330_motion_store); +static DEVICE_ATTR(unknown_state, S_IRUGO | S_IWUSR | S_IWGRP, + sx9330_unknown_state_show, sx9330_unknown_state_store); +static DEVICE_ATTR(noti_enable, S_IRUGO | S_IWUSR | S_IWGRP, + sx9330_noti_enable_show, sx9330_noti_enable_store); static struct device_attribute *sensor_attrs[] = { &dev_attr_menual_calibrate, @@ -1122,6 +1281,9 @@ static struct device_attribute *sensor_attrs[] = { &dev_attr_resolution, &dev_attr_adc_filt, &dev_attr_useful_filt, + &dev_attr_motion, + &dev_attr_unknown_state, + &dev_attr_noti_enable, NULL, }; @@ -1191,17 +1353,26 @@ static void sx9330_touch_process(struct sx9330_p *data) } if (data->state == IDLE) { - if (status & (CSX_STATUS_REG << MAIN_SENSOR)) + if (status & (CSX_STATUS_REG << MAIN_SENSOR)) { + if (data->is_unknown_mode == UNKNOWN_ON && data->motion) + data->first_working = true; sx9330_send_event(data, ACTIVE); - else + } else { pr_info("[SX9330]: %s - %x already released.\n", __func__, status); + } } else { // User released - if (!(status & (CSX_STATUS_REG << MAIN_SENSOR))) + if (!(status & (CSX_STATUS_REG << MAIN_SENSOR))) { + if (data->is_unknown_mode == UNKNOWN_ON && data->motion) { + pr_info("[SX9330]: %s - unknown mode off\n", + __func__); + data->is_unknown_mode = UNKNOWN_OFF; + } sx9330_send_event(data, IDLE); - else + } else { pr_info("[SX9330]: %s - %x still touched\n", __func__, status); + } } } @@ -1266,6 +1437,21 @@ static void sx9330_irq_work_func(struct work_struct *work) __func__, sx9330_get_nirq_state(data)); } +static void sx9330_check_first_working(struct sx9330_p *data) +{ + if (data->noti_enable && data->motion) { + if (data->detect_threshold < data->diff) { + data->first_working = true; + pr_info("[SX9330]: first working detected %d\n", data->diff); + } else { + if (data->first_working) { + data->is_unknown_mode = UNKNOWN_OFF; + pr_info("[SX9330]: Release detected %d, unknown mode off\n", data->diff); + } + } + } +} + static void sx9330_debug_work_func(struct work_struct *work) { struct sx9330_p *data = container_of((struct delayed_work *)work, @@ -1279,10 +1465,15 @@ static void sx9330_debug_work_func(struct work_struct *work) if (hall_flag) { pr_info("[SX9330]: %s - hall IC is closed\n", __func__); sx9330_set_offset_calibration(data); + sx9330_enter_unknown_mode(data, TYPE_HALL); hall_flag = 0; } } else { - hall_flag = 1; + if (!hall_flag) { + pr_info("[SX9330]: %s - hall IC is open\n", __func__); + sx9330_enter_unknown_mode(data, TYPE_HALL); + hall_flag = 1; + } } if (atomic_read(&data->enable) == ON) { @@ -1300,6 +1491,18 @@ static void sx9330_debug_work_func(struct work_struct *work) } } + if (data->debug_count >= GRIP_LOG_TIME) { + sx9330_get_data(data); + if (data->is_unknown_mode == UNKNOWN_ON && data->motion) + sx9330_check_first_working(data); + data->debug_count = 0; + } else { + if (data->is_unknown_mode == UNKNOWN_ON && data->motion) { + sx9330_get_data(data); + sx9330_check_first_working(data); + } + data->debug_count++; + } schedule_delayed_work(&data->debug_work, msecs_to_jiffies(2000)); } @@ -1317,6 +1520,7 @@ static int sx9330_input_init(struct sx9330_p *data) { int ret = 0; struct input_dev *dev = NULL; + struct input_dev *noti_input_dev = NULL; /* Create the input device */ dev = input_allocate_device(); @@ -1327,6 +1531,7 @@ static int sx9330_input_init(struct sx9330_p *data) dev->id.bustype = BUS_I2C; input_set_capability(dev, EV_REL, REL_MISC); + input_set_capability(dev, EV_REL, REL_X); input_set_drvdata(dev, data); ret = input_register_device(dev); @@ -1351,6 +1556,30 @@ static int sx9330_input_init(struct sx9330_p *data) /* save the input pointer and finish initialization */ data->input = dev; + noti_input_dev = input_allocate_device(); + if (!noti_input_dev) { + pr_err("[SX9330]: %s - input_allocate_device failed\n", __func__); + input_unregister_device(dev); + return -ENOMEM; + } + + noti_input_dev->name = NOTI_MODULE_NAME; + noti_input_dev->id.bustype = BUS_I2C; + + input_set_capability(noti_input_dev, EV_REL, REL_X); + input_set_drvdata(noti_input_dev, data); + + ret = input_register_device(noti_input_dev); + if (ret < 0) { + pr_err("[SX9330]: %s - failed to register input dev for noti (%d)\n", + __func__, ret); + input_unregister_device(dev); + input_free_device(noti_input_dev); + return ret; + } + + data->noti_input_dev = noti_input_dev; + return 0; } @@ -1396,6 +1625,10 @@ static void sx9330_initialize_variable(struct sx9330_p *data) data->skip_data = false; data->state = IDLE; + data->is_unknown_mode = UNKNOWN_OFF; + data->motion = 1; + data->first_working = false; + atomic_set(&data->enable, OFF); } @@ -1507,6 +1740,7 @@ static int sx9330_ccic_handle_notification(struct notifier_block *nb, default: pr_info("[SX9330]: %s accept cable = %d, attach = %d\n", __func__, usb_typec_info.cable_type, usb_typec_info.attach); + sx9330_enter_unknown_mode(pdata, TYPE_USB); sx9330_set_offset_calibration(pdata); break; } @@ -1652,6 +1886,7 @@ static int sx9330_probe(struct i2c_client *client, sysfs_remove_group(&data->input->dev.kobj, &sx9330_attribute_group); sensors_remove_symlink(data->input); input_unregister_device(data->input); + input_unregister_device(data->noti_input_dev); exit_input_init: kfree(data); exit_kzalloc: @@ -1680,6 +1915,7 @@ static int sx9330_remove(struct i2c_client *client) sensors_remove_symlink(data->input); sysfs_remove_group(&data->input->dev.kobj, &sx9330_attribute_group); input_unregister_device(data->input); + input_unregister_device(data->noti_input_dev); mutex_destroy(&data->read_mutex); kfree(data); diff --git a/drivers/uh/rkp_test.c b/drivers/uh/rkp_test.c index da454f42117f..4214e2245b42 100644 --- a/drivers/uh/rkp_test.c +++ b/drivers/uh/rkp_test.c @@ -425,7 +425,7 @@ static int test_case_kernel_range_rwx(void) u64 ro = 0, rw = 0; u64 xn = 0, x = 0; int i; - u64 fixmap_va = __fix_to_virt(FIX_ENTRY_TRAMP_TEXT); + u64 fixmap_va = __fix_to_virt(FIX_ENTRY_TRAMP_TEXT1); struct mem_range_struct test_ranges[] = { {(u64)VMALLOC_START, ((u64)_text) - ((u64)VMALLOC_START), "VMALLOC - STEXT", false, true}, diff --git a/drivers/usb/gadget/function/f_mtp_samsung.c b/drivers/usb/gadget/function/f_mtp_samsung.c index 0a8f4f1d2fa1..a0481f7a18a8 100644 --- a/drivers/usb/gadget/function/f_mtp_samsung.c +++ b/drivers/usb/gadget/function/f_mtp_samsung.c @@ -596,23 +596,19 @@ static int mtp_send_signal(int value) info.si_signo = SIG_SETUP; info.si_code = SI_QUEUE; info.si_int = value; - rcu_read_lock(); if (!current->nsproxy) { printk(KERN_DEBUG "process has gone\n"); - rcu_read_unlock(); return -ENODEV; } - t = pid_task(find_vpid(mtp_pid), PIDTYPE_PID); + t = get_pid_task(find_vpid(mtp_pid), PIDTYPE_PID); if (t == NULL) { printk(KERN_DEBUG "no such pid\n"); - rcu_read_unlock(); return -ENODEV; } - rcu_read_unlock(); /*send the signal*/ ret = send_sig_info(SIG_SETUP, &info, t); if (ret < 0) { diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c index 7bce0181c2a1..9ea7dfba6d62 100644 --- a/drivers/usb/gadget/function/rndis.c +++ b/drivers/usb/gadget/function/rndis.c @@ -653,6 +653,7 @@ static int rndis_set_response(struct rndis_params *params, BufLength = le32_to_cpu(buf->InformationBufferLength); BufOffset = le32_to_cpu(buf->InformationBufferOffset); if ((BufLength > RNDIS_MAX_TOTAL_SIZE) || + (BufOffset > RNDIS_MAX_TOTAL_SIZE) || (BufOffset + 8 >= RNDIS_MAX_TOTAL_SIZE)) return -EINVAL; diff --git a/drivers/usb/notify/usb_notify.c b/drivers/usb/notify/usb_notify.c index 55c743e2a46e..0657159660e5 100644 --- a/drivers/usb/notify/usb_notify.c +++ b/drivers/usb/notify/usb_notify.c @@ -100,6 +100,7 @@ struct usb_notify { int c_status; int sec_whitelist_enable; int reserve_vbus_booster; + int disable_state; #if defined(CONFIG_USB_HW_PARAM) unsigned long long hw_param[USB_CCIC_HW_PARAM_MAX]; #endif @@ -775,9 +776,17 @@ int set_notify_disable(struct usb_notify_dev *udev, int disable) goto skip; } - pr_info("%s disable=%s(%d)\n", __func__, + pr_info("%s prev=%s(%d) => disable=%s(%d)\n", __func__, + block_string(u_notify->disable_state), u_notify->disable_state, block_string(disable), disable); + if (u_notify->disable_state == disable) { + pr_err("%s duplicated state\n", __func__); + goto skip; + } + + u_notify->disable_state = disable; + switch (disable) { case NOTIFY_BLOCK_TYPE_ALL: send_external_notify(EXTERNAL_NOTIFY_HOSTBLOCK_EARLY, 1); diff --git a/fs/crypto/fscrypt_ddar.c b/fs/crypto/fscrypt_ddar.c index b292e9f71335..77d9d2006c3b 100644 --- a/fs/crypto/fscrypt_ddar.c +++ b/fs/crypto/fscrypt_ddar.c @@ -42,6 +42,11 @@ int update_encryption_context_with_dd_policy( inode_lock(inode); ret = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); + if (ret == offsetof(struct fscrypt_context, knox_flags)) { + ctx.knox_flags = 0; + ret = sizeof(ctx); + } + if (ret == -ENODATA) { dd_error("failed to set dd policy. empty fscrypto context\n"); ret = -EFAULT; diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index a779eb46bd9d..7279c9ef582b 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c @@ -471,6 +471,12 @@ int fscrypt_get_encryption_info(struct inode *inode) return res; res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); +#if defined(CONFIG_FSCRYPT_SDP) || defined(CONFIG_DDAR) + if (res == offsetof(struct fscrypt_context, knox_flags)) { + ctx.knox_flags = 0; + res = sizeof(ctx); + } +#endif if (res < 0) { if (!fscrypt_dummy_context_enabled(inode) || IS_ENCRYPTED(inode)) @@ -753,6 +759,12 @@ int fscrypt_get_encryption_key(struct inode *inode, struct fscrypt_key *key) // return res; res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); +#if defined(CONFIG_FSCRYPT_SDP) || defined(CONFIG_DDAR) + if (res == offsetof(struct fscrypt_context, knox_flags)) { + ctx.knox_flags = 0; + res = sizeof(ctx); + } +#endif if (res < 0) { return res; } else if (res != sizeof(ctx)) { @@ -817,6 +829,12 @@ int fscrypt_get_encryption_key_classified(struct inode *inode, struct fscrypt_ke // return res; res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); +#if defined(CONFIG_FSCRYPT_SDP) || defined(CONFIG_DDAR) + if (res == offsetof(struct fscrypt_context, knox_flags)) { + ctx.knox_flags = 0; + res = sizeof(ctx); + } +#endif if (res < 0) { return res; } else if (res != sizeof(ctx)) { @@ -877,6 +895,12 @@ int fscrypt_get_encryption_kek(struct inode *inode, // return res; res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); +#if defined(CONFIG_FSCRYPT_SDP) || defined(CONFIG_DDAR) + if (res == offsetof(struct fscrypt_context, knox_flags)) { + ctx.knox_flags = 0; + res = sizeof(ctx); + } +#endif if (res < 0) { return res; } else if (res != sizeof(ctx)) { diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index e9c348607d90..a3f622c57c3c 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -72,10 +72,14 @@ static int create_encryption_context_from_policy(struct inode *inode, } #if defined(CONFIG_FSCRYPT_SDP) || defined(CONFIG_DDAR) + BUILD_BUG_ON((sizeof(ctx) - sizeof(ctx.knox_flags)) + != offsetof(struct fscrypt_context, knox_flags)); ctx.knox_flags = 0; -#endif - + return inode->i_sb->s_cop->set_context( + inode, &ctx, offsetof(struct fscrypt_context, knox_flags), NULL); +#else return inode->i_sb->s_cop->set_context(inode, &ctx, sizeof(ctx), NULL); +#endif } int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg) @@ -110,6 +114,12 @@ int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg) inode_lock(inode); ret = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); +#if defined(CONFIG_FSCRYPT_SDP) || defined(CONFIG_DDAR) + if (ret == offsetof(struct fscrypt_context, knox_flags)) { + ctx.knox_flags = 0; + ret = sizeof(ctx); + } +#endif if (ret == -ENODATA) { if (!S_ISDIR(inode->i_mode)) ret = -ENOTDIR; @@ -148,6 +158,12 @@ int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg) return -ENODATA; res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); +#if defined(CONFIG_FSCRYPT_SDP) || defined(CONFIG_DDAR) + if (res == offsetof(struct fscrypt_context, knox_flags)) { + ctx.knox_flags = 0; + res = sizeof(ctx); + } +#endif if (res < 0 && res != -ERANGE) return res; if (res != sizeof(ctx)) @@ -242,10 +258,22 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child) } res = cops->get_context(parent, &parent_ctx, sizeof(parent_ctx)); +#if defined(CONFIG_FSCRYPT_SDP) || defined(CONFIG_DDAR) + if (res == offsetof(struct fscrypt_context, knox_flags)) { + parent_ctx.knox_flags = 0; + res = sizeof(parent_ctx); + } +#endif if (res != sizeof(parent_ctx)) return 0; res = cops->get_context(child, &child_ctx, sizeof(child_ctx)); +#if defined(CONFIG_FSCRYPT_SDP) || defined(CONFIG_DDAR) + if (res == offsetof(struct fscrypt_context, knox_flags)) { + child_ctx.knox_flags = 0; + res = sizeof(child_ctx); + } +#endif if (res != sizeof(child_ctx)) return 0; @@ -304,7 +332,7 @@ int fscrypt_inherit_context(struct inode *parent, struct inode *child, #ifdef CONFIG_DDAR res = dd_test_and_inherit_context(&ctx, parent, child, ci, fs_data); - if(res) { + if (res) { dd_error("failed to inherit dd policy\n"); return res; } @@ -319,8 +347,18 @@ int fscrypt_inherit_context(struct inode *parent, struct inode *child, } #endif +#if defined(CONFIG_FSCRYPT_SDP) || defined(CONFIG_DDAR) + if (ctx.knox_flags != 0) { + res = parent->i_sb->s_cop->set_context(child, &ctx, + sizeof(ctx), fs_data); + } else { + res = parent->i_sb->s_cop->set_context(child, &ctx, + offsetof(struct fscrypt_context, knox_flags), fs_data); + } +#else res = parent->i_sb->s_cop->set_context(child, &ctx, sizeof(ctx), fs_data); +#endif if (res) return res; return preload ? fscrypt_get_encryption_info(child): 0; diff --git a/fs/crypto/sdp/sdp_dek.c b/fs/crypto/sdp/sdp_dek.c index d68b92d0f4ac..f39720526f0d 100644 --- a/fs/crypto/sdp/sdp_dek.c +++ b/fs/crypto/sdp/sdp_dek.c @@ -154,6 +154,10 @@ int fscrypt_sdp_set_sdp_policy(struct inode *inode, int engine_id) } res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); + if (res == offsetof(struct fscrypt_context, knox_flags)) { + ctx.knox_flags = 0; + res = sizeof(ctx); + } if (res != sizeof(ctx)) { if (res >= 0) DEK_LOGE("set_policy: failed to get fscrypt ctx (err:%d)\n", res); @@ -264,6 +268,10 @@ int fscrypt_sdp_set_sensitive(struct inode *inode, int engine_id, struct fscrypt DEK_LOGE("%s: Failed to get fscrypt ctx (err:%d)\n", __func__, rc); return rc; } + if (rc == offsetof(struct fscrypt_context, knox_flags)) { + ctx.knox_flags = 0; + rc = sizeof(ctx); + } if (!is_dir) { //run setsensitive with nonce from ctx @@ -305,6 +313,10 @@ int fscrypt_sdp_set_protected(struct inode *inode, int engine_id) } rc = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); + if (rc == offsetof(struct fscrypt_context, knox_flags)) { + ctx.knox_flags = 0; + rc = sizeof(ctx); + } if (rc != sizeof(ctx)) { DEK_LOGE("set_protected: failed to get fscrypt ctx (err:%d)\n", rc); return -EINVAL; @@ -415,6 +427,10 @@ int fscrypt_sdp_initialize(struct inode *inode, int engine_id, struct fscrypt_ke return res; res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); + if (res == offsetof(struct fscrypt_context, knox_flags)) { + ctx.knox_flags = 0; + res = sizeof(ctx); + } if (res != sizeof(ctx)) { if (res >= 0) res = -EEXIST; @@ -487,6 +503,10 @@ int fscrypt_sdp_add_chamber_directory(int engine_id, struct inode *inode) "%s: Failed to get fscrypt ctx (err:%d)\n", __func__, rc); return rc; } + if (rc == offsetof(struct fscrypt_context, knox_flags)) { + ctx.knox_flags = 0; + rc = sizeof(ctx); + } if (!ci->ci_sdp_info) { struct sdp_info *ci_sdp_info = fscrypt_sdp_alloc_sdp_info(); @@ -548,6 +568,10 @@ int fscrypt_sdp_remove_chamber_directory(struct inode *inode) "%s: Failed to get fscrypt ctx (err:%d)\n", __func__, rc); return rc; } + if (rc == offsetof(struct fscrypt_context, knox_flags)) { + ctx.knox_flags = 0; + rc = sizeof(ctx); + } if (!ci->ci_sdp_info) return -EINVAL; @@ -1100,6 +1124,10 @@ inline int __fscrypt_sdp_thread_convert_sdp_key(void *arg) if (ci && ci->ci_sdp_info) { rc = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); + if (rc == offsetof(struct fscrypt_context, knox_flags)) { + ctx.knox_flags = 0; + rc = sizeof(ctx); + } if (rc != sizeof(ctx)) { if (rc > 0 ) rc = -EINVAL; diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index b2dcc34f618a..9b5f1b6e114a 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -1624,7 +1624,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) goto out; } - if (NM_I(sbi)->dirty_nat_cnt == 0 && + if (NM_I(sbi)->nat_cnt[DIRTY_NAT] == 0 && SIT_I(sbi)->dirty_sentries == 0 && prefree_segments(sbi) == 0) { f2fs_flush_sit_entries(sbi, cpc); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 386a046f137c..41316fd4deac 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -1334,9 +1334,12 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, sync_out: /* for hardware encryption, but to avoid potential issue in future */ - if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) + if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) { f2fs_wait_on_block_writeback_range(inode, map->m_pblk, map->m_len); + invalidate_mapping_pages(META_MAPPING(sbi), + map->m_pblk, map->m_pblk); + } if (flag == F2FS_GET_BLOCK_PRECACHE) { if (map->m_flags & F2FS_MAP_MAPPED) { diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index 4be36e035086..ca2753b60ffd 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -96,8 +96,8 @@ static void update_general_status(struct f2fs_sb_info *sbi) si->dirty_count = dirty_segments(sbi); si->node_pages = NODE_MAPPING(sbi)->nrpages; si->meta_pages = META_MAPPING(sbi)->nrpages; - si->nats = NM_I(sbi)->nat_cnt; - si->dirty_nats = NM_I(sbi)->dirty_nat_cnt; + si->nats = NM_I(sbi)->nat_cnt[TOTAL_NAT]; + si->dirty_nats = NM_I(sbi)->nat_cnt[DIRTY_NAT]; si->sits = MAIN_SEGS(sbi); si->dirty_sits = SIT_I(sbi)->dirty_sentries; si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID]; @@ -244,9 +244,10 @@ static void update_mem_info(struct f2fs_sb_info *sbi) si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID] + NM_I(sbi)->nid_cnt[PREALLOC_NID]) * sizeof(struct free_nid); - si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry); - si->cache_mem += NM_I(sbi)->dirty_nat_cnt * - sizeof(struct nat_entry_set); + si->cache_mem += NM_I(sbi)->nat_cnt[TOTAL_NAT] * + sizeof(struct nat_entry); + si->cache_mem += NM_I(sbi)->nat_cnt[DIRTY_NAT] * + sizeof(struct nat_entry_set); si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages); for (i = 0; i < MAX_INO_ENTRY; i++) si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 169ab8865724..2d27090a3fc6 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -886,6 +886,13 @@ enum nid_state { PREALLOC_NID, /* it is preallocated */ MAX_NID_STATE, }; + +enum nat_state { + TOTAL_NAT, + DIRTY_NAT, + RECLAIMABLE_NAT, + MAX_NAT_STATE, +}; struct f2fs_nm_info { block_t nat_blkaddr; /* base disk address of NAT */ @@ -902,8 +909,7 @@ struct f2fs_nm_info { struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */ struct list_head nat_entries; /* cached nat entry list (clean) */ spinlock_t nat_list_lock; /* protect clean nat entry list */ - unsigned int nat_cnt; /* the # of cached nat entries */ - unsigned int dirty_nat_cnt; /* total num of nat entries in set */ + unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */ unsigned int nat_blocks; /* # of nat blocks */ /* free node ids management */ diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index fe2b1c6aa1c6..0b9d23cea208 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -63,8 +63,8 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type) sizeof(struct free_nid)) >> PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); } else if (type == NAT_ENTRIES) { - mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> - PAGE_SHIFT; + mem_size = (nm_i->nat_cnt[TOTAL_NAT] * + sizeof(struct nat_entry)) >> PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); if (excess_cached_nats(sbi)) res = false; @@ -178,7 +178,8 @@ static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i, list_add_tail(&ne->list, &nm_i->nat_entries); spin_unlock(&nm_i->nat_list_lock); - nm_i->nat_cnt++; + nm_i->nat_cnt[TOTAL_NAT]++; + nm_i->nat_cnt[RECLAIMABLE_NAT]++; return ne; } @@ -208,7 +209,8 @@ static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) { radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); - nm_i->nat_cnt--; + nm_i->nat_cnt[TOTAL_NAT]--; + nm_i->nat_cnt[RECLAIMABLE_NAT]--; __free_nat_entry(e); } @@ -254,7 +256,8 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, if (get_nat_flag(ne, IS_DIRTY)) goto refresh_list; - nm_i->dirty_nat_cnt++; + nm_i->nat_cnt[DIRTY_NAT]++; + nm_i->nat_cnt[RECLAIMABLE_NAT]--; set_nat_flag(ne, IS_DIRTY, true); refresh_list: spin_lock(&nm_i->nat_list_lock); @@ -274,7 +277,8 @@ static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i, set_nat_flag(ne, IS_DIRTY, false); set->entry_cnt--; - nm_i->dirty_nat_cnt--; + nm_i->nat_cnt[DIRTY_NAT]--; + nm_i->nat_cnt[RECLAIMABLE_NAT]++; } static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i, @@ -2915,14 +2919,17 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) LIST_HEAD(sets); int err = 0; - /* during unmount, let's flush nat_bits before checking dirty_nat_cnt */ + /* + * during unmount, let's flush nat_bits before checking + * nat_cnt[DIRTY_NAT]. + */ if (enabled_nat_bits(sbi, cpc)) { down_write(&nm_i->nat_tree_lock); remove_nats_in_journal(sbi); up_write(&nm_i->nat_tree_lock); } - if (!nm_i->dirty_nat_cnt) + if (!nm_i->nat_cnt[DIRTY_NAT]) return 0; down_write(&nm_i->nat_tree_lock); @@ -2933,7 +2940,8 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) * into nat entry set. */ if (enabled_nat_bits(sbi, cpc) || - !__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL)) + !__has_cursum_space(journal, + nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL)) remove_nats_in_journal(sbi); while ((found = __gang_lookup_nat_set(nm_i, @@ -3057,7 +3065,6 @@ static int init_node_manager(struct f2fs_sb_info *sbi) sbi->nquota_files - F2FS_RESERVED_NODE_NUM; nm_i->nid_cnt[FREE_NID] = 0; nm_i->nid_cnt[PREALLOC_NID] = 0; - nm_i->nat_cnt = 0; nm_i->ram_thresh = DEF_RAM_THRESHOLD; nm_i->ra_nid_pages = DEF_RA_NID_PAGES; nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD; @@ -3194,7 +3201,7 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi) __del_from_nat_cache(nm_i, natvec[idx]); } } - f2fs_bug_on(sbi, nm_i->nat_cnt); + f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]); /* destroy nat set cache */ nid = 0; diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index 1c73d879a9bc..847f5293ab6c 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -123,13 +123,13 @@ static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne, static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi) { - return NM_I(sbi)->dirty_nat_cnt >= NM_I(sbi)->max_nid * + return NM_I(sbi)->nat_cnt[DIRTY_NAT] >= NM_I(sbi)->max_nid * NM_I(sbi)->dirty_nats_ratio / 100; } static inline bool excess_cached_nats(struct f2fs_sb_info *sbi) { - return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD; + return NM_I(sbi)->nat_cnt[TOTAL_NAT] >= DEF_NAT_CACHE_THRESHOLD; } static inline bool excess_dirty_nodes(struct f2fs_sb_info *sbi) diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 6dbbe98dbdbf..a28cf0a3dd68 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -3203,6 +3203,9 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio) f2fs_bug_on(sbi, !IS_DATASEG(get_seg_entry(sbi, GET_SEGNO(sbi, fio->new_blkaddr))->type)); + invalidate_mapping_pages(META_MAPPING(sbi), + fio->new_blkaddr, fio->new_blkaddr); + stat_inc_inplace_blocks(fio->sbi); atomic64_inc(&(sbi->sec_stat.inplace_count)); diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c index 9e13db994fdf..4711567f7758 100644 --- a/fs/f2fs/shrinker.c +++ b/fs/f2fs/shrinker.c @@ -18,9 +18,7 @@ static unsigned int shrinker_run_no; static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi) { - long count = NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt; - - return count > 0 ? count : 0; + return NM_I(sbi)->nat_cnt[RECLAIMABLE_NAT]; } static unsigned long __count_free_nids(struct f2fs_sb_info *sbi) diff --git a/fs/proc/base.c b/fs/proc/base.c index 97f9807521fa..52a7618c2579 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -3016,7 +3016,7 @@ static int proc_integrity_reset_cause(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { - if (task->integrity->reset_cause) + if (task->integrity->reset_cause != CAUSE_UNSET) seq_printf(m, "%s\n", tint_reset_cause_to_string( task->integrity->reset_cause)); else diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c index bb3f59bcfcf5..656f9ff63edd 100644 --- a/fs/quota/quota_tree.c +++ b/fs/quota/quota_tree.c @@ -61,7 +61,7 @@ static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) memset(buf, 0, info->dqi_usable_bs); return sb->s_op->quota_read(sb, info->dqi_type, buf, - info->dqi_usable_bs, blk << info->dqi_blocksize_bits); + info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits); } static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) @@ -70,7 +70,7 @@ static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) ssize_t ret; ret = sb->s_op->quota_write(sb, info->dqi_type, buf, - info->dqi_usable_bs, blk << info->dqi_blocksize_bits); + info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits); if (ret != info->dqi_usable_bs) { quota_error(sb, "dquota write failed"); if (ret >= 0) @@ -283,7 +283,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, blk); goto out_buf; } - dquot->dq_off = (blk << info->dqi_blocksize_bits) + + dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct qt_disk_dqdbheader) + i * info->dqi_entry_size; kfree(buf); @@ -558,7 +558,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, ret = -EIO; goto out_buf; } else { - ret = (blk << info->dqi_blocksize_bits) + sizeof(struct + ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct qt_disk_dqdbheader) + i * info->dqi_entry_size; } out_buf: diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c index a73e5b34db41..2f02474a2825 100644 --- a/fs/quota/quota_v2.c +++ b/fs/quota/quota_v2.c @@ -158,6 +158,25 @@ static int v2_read_file_info(struct super_block *sb, int type) qinfo->dqi_entry_size = sizeof(struct v2r1_disk_dqblk); qinfo->dqi_ops = &v2r1_qtree_ops; } + ret = -EUCLEAN; + /* Some sanity checks of the read headers... */ + if ((loff_t)qinfo->dqi_blocks << qinfo->dqi_blocksize_bits > + i_size_read(sb_dqopt(sb)->files[type])) { + quota_error(sb, "Number of blocks too big for quota file size (%llu > %llu).", + (loff_t)qinfo->dqi_blocks << qinfo->dqi_blocksize_bits, + i_size_read(sb_dqopt(sb)->files[type])); + goto out; + } + if (qinfo->dqi_free_blk >= qinfo->dqi_blocks) { + quota_error(sb, "Free block number too big (%u >= %u).", + qinfo->dqi_free_blk, qinfo->dqi_blocks); + goto out; + } + if (qinfo->dqi_free_entry >= qinfo->dqi_blocks) { + quota_error(sb, "Block with free entry too big (%u >= %u).", + qinfo->dqi_free_entry, qinfo->dqi_blocks); + goto out; + } ret = 0; out: up_read(&dqopt->dqio_sem); diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 9f721ee6690d..1c1839ca0960 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -85,6 +85,13 @@ ARM_SMCCC_SMC_32, \ 0, 0x7fff) +#define ARM_SMCCC_ARCH_WORKAROUND_3 \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 0x3fff) + +#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1 + #ifndef __ASSEMBLY__ #include diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 6c6fc1b01aca..6fe31e070800 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -195,4 +195,28 @@ static inline void cpu_smt_disable(bool force) { } static inline void cpu_smt_check_topology(void) { } #endif +/* + * These are used for a global "mitigations=" cmdline option for toggling + * optional CPU mitigations. + */ +enum cpu_mitigations { + CPU_MITIGATIONS_OFF, + CPU_MITIGATIONS_AUTO, + CPU_MITIGATIONS_AUTO_NOSMT, +}; + +extern enum cpu_mitigations cpu_mitigations; + +/* mitigations=off */ +static inline bool cpu_mitigations_off(void) +{ + return cpu_mitigations == CPU_MITIGATIONS_OFF; +} + +/* mitigations=auto,nosmt */ +static inline bool cpu_mitigations_auto_nosmt(void) +{ + return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT; +} + #endif /* _LINUX_CPU_H_ */ diff --git a/include/linux/defex.h b/include/linux/defex.h index a20e580ba39c..90e64100891a 100644 --- a/include/linux/defex.h +++ b/include/linux/defex.h @@ -10,7 +10,7 @@ #define __CONFIG_SECURITY_DEFEX_H /* Defex init API */ -int task_defex_enforce(struct task_struct *p, struct file *f, int syscall); +int task_defex_enforce(struct task_struct *p, struct file *f, int syscall, ...); int task_defex_zero_creds(struct task_struct *tsk); int task_defex_user_exec(const char *new_file); void __init defex_load_rules(void); diff --git a/kernel/cpu.c b/kernel/cpu.c index bf6ef18ff0d8..529b5d3bf350 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -2746,3 +2746,18 @@ void __init boot_cpu_hotplug_init(void) #endif this_cpu_write(cpuhp_state.state, CPUHP_ONLINE); } + +enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO; + +static int __init mitigations_parse_cmdline(char *arg) +{ + if (!strcmp(arg, "off")) + cpu_mitigations = CPU_MITIGATIONS_OFF; + else if (!strcmp(arg, "auto")) + cpu_mitigations = CPU_MITIGATIONS_AUTO; + else if (!strcmp(arg, "auto,nosmt")) + cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT; + + return 0; +} +early_param("mitigations", mitigations_parse_cmdline); diff --git a/security/samsung/defex_lsm/Makefile b/security/samsung/defex_lsm/Makefile index 6200c3c4c402..f7752eb62fa6 100644 --- a/security/samsung/defex_lsm/Makefile +++ b/security/samsung/defex_lsm/Makefile @@ -32,13 +32,18 @@ defex-y += core/defex_lsm.o defex-y += core/defex_main.o defex-y += core/defex_get_mode.o defex-y += core/defex_rules_proc.o +defex-y += core/defex_tailer.o defex-y += catch_engine/defex_catch_list.o defex-y += catch_engine/defex_ht.o +defex-y += defex_rules.o defex-$(CONFIG_COMPAT) += catch_engine/defex_catch_list_compat.o # Immutable Feature is applied with permissive mode first. DEFEX_DEFINES := -DDEFEX_PERMISSIVE_IM +# Integrity Feature is applied with permissive mode first. +DEFEX_DEFINES += -DDEFEX_PERMISSIVE_INT + ifeq ($(CONFIG_DEFEX_KERNEL_ONLY), y) DEFEX_DEFINES += -DDEFEX_KERNEL_ONLY ifeq ($(CONFIG_SAMSUNG_PRODUCT_SHIP), y) @@ -47,6 +52,7 @@ ifeq ($(CONFIG_DEFEX_KERNEL_ONLY), y) $(warning [DEFEX] Kernel_only & Noship) defex-y += debug/defex_debug.o defex-y += core/defex_sysfs.o + DEFEX_DEFINES += -DDEFEX_PERMISSIVE_INT DEFEX_DEFINES += -DDEFEX_PERMISSIVE_SP DEFEX_DEFINES += -DDEFEX_PERMISSIVE_TM DEFEX_DEFINES += -DDEFEX_PERMISSIVE_IM @@ -70,6 +76,7 @@ ifeq ($(SAFEPLACE_ENABLE), true) endif ifeq ($(INTEGRITY_ENABLE), true) + defex-y += feature_safeplace/defex_integrity.o DEFEX_DEFINES += -DDEFEX_INTEGRITY_ENABLE endif @@ -108,17 +115,18 @@ ifeq ($(TRUSTED_MAP_ENABLE), true) DEFEX_DEFINES += -DDEFEX_TRUSTED_MAP_ENABLE DEFEX_DEFINES += -DDEFEX_PERMISSIVE_TM #DEFEX_DEFINES += -DDEFEX_TM_DEFAULT_POLICY_ENABLE - ###defex-y += feature_trusted_map/defex_trusted_map.o + defex-y += feature_trusted_map/defex_trusted_map.o defex-y += feature_trusted_map/dtm.o - ###defex-y += feature_trusted_map/dtm_engine.o - ###defex-y += feature_trusted_map/dtm_log.o - ###defex-y += feature_trusted_map/dtm_utils.o - ###defex-y += feature_trusted_map/ptree.o + defex-y += feature_trusted_map/dtm_engine.o + defex-y += feature_trusted_map/dtm_log.o + defex-y += feature_trusted_map/dtm_utils.o + defex-y += feature_trusted_map/ptree.o endif ifneq (,$(filter userdebug eng, $(TARGET_BUILD_VARIANT))) defex-y += debug/defex_debug.o defex-y += core/defex_sysfs.o + DEFEX_DEFINES += -DDEFEX_PERMISSIVE_INT DEFEX_DEFINES += -DDEFEX_PERMISSIVE_SP DEFEX_DEFINES += -DDEFEX_PERMISSIVE_TM DEFEX_DEFINES += -DDEFEX_PERMISSIVE_IM @@ -131,7 +139,7 @@ else endif # kunit tests options: -ifeq ($(CONFIG_KUNIT), y) +ifeq ($(CONFIG_SEC_KUNIT), y) GCOV_PROFILE := y DEFEX_DEFINES += -DDEFEX_KUNIT_ENABLED else diff --git a/security/samsung/defex_lsm/cert/defex_sign.c b/security/samsung/defex_lsm/cert/defex_sign.c index b84c6aa813fd..8a957a9fe0b6 100644 --- a/security/samsung/defex_lsm/cert/defex_sign.c +++ b/security/samsung/defex_lsm/cert/defex_sign.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "include/defex_debug.h" #include "include/defex_sign.h" @@ -71,6 +72,9 @@ __visible_for_testing int defex_keyring_init(void) const struct cred *cred = current_cred(); static const char keyring_name[] = "defex_keyring"; + if (defex_keyring) + return err; + defex_keyring = defex_keyring_alloc(keyring_name, KUIDT_INIT(0), KGIDT_INIT(0), cred, KEY_ALLOC_NOT_IN_QUOTA); if (!defex_keyring) { @@ -93,20 +97,27 @@ __visible_for_testing int defex_public_key_verify_signature(unsigned char *pub_k key_ref_t key_ref; struct key *key; struct public_key_signature pks; + static const char key_name[] = "defex_key"; if (defex_keyring_init() != 0) return ret; - key_ref = key_create_or_update(make_key_ref(defex_keyring, 1), - "asymmetric", - NULL, - pub_key, - pub_key_size, - ((KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ), - KEY_ALLOC_NOT_IN_QUOTA); - +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0) + key_ref = keyring_search(make_key_ref(defex_keyring, 1), &key_type_asymmetric, key_name); +#else + key_ref = keyring_search(make_key_ref(defex_keyring, 1), &key_type_asymmetric, key_name, true); +#endif + if (IS_ERR(key_ref)) { + key_ref = key_create_or_update(make_key_ref(defex_keyring, 1), + "asymmetric", + key_name, + pub_key, + pub_key_size, + ((KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ), + KEY_ALLOC_NOT_IN_QUOTA); + } if (IS_ERR(key_ref)) { - printk(KERN_INFO "Invalid key reference\n"); + printk(KERN_INFO "Invalid key reference (%ld)\n", PTR_ERR(key_ref)); return ret; } @@ -137,7 +148,6 @@ __visible_for_testing int defex_public_key_verify_signature(unsigned char *pub_k ret = verify_signature(key, &pks); #endif key_ref_put(key_ref); - keyring_clear(defex_keyring); return ret; } #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) */ diff --git a/security/samsung/defex_lsm/core/defex_common.c b/security/samsung/defex_lsm/core/defex_common.c index 07659b274166..e9ea1bae880c 100644 --- a/security/samsung/defex_lsm/core/defex_common.c +++ b/security/samsung/defex_lsm/core/defex_common.c @@ -236,9 +236,11 @@ struct file *defex_get_source_file(struct task_struct *p) return NULL; if (self) #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) - down_read(&proc_mm->mmap_sem); + if (!down_read_trylock(&proc_mm->mmap_sem)) + return NULL; #else - down_read(&proc_mm->mmap_lock); + if (!down_read_trylock(&proc_mm->mmap_lock)) + return NULL; #endif if (file_addr != proc_mm->exe_file) { file_addr = proc_mm->exe_file; diff --git a/security/samsung/defex_lsm/core/defex_get_mode.c b/security/samsung/defex_lsm/core/defex_get_mode.c index 8b6fb0eae7e9..89b9797f018a 100644 --- a/security/samsung/defex_lsm/core/defex_get_mode.c +++ b/security/samsung/defex_lsm/core/defex_get_mode.c @@ -29,6 +29,17 @@ int defex_get_features(void) #endif /* DEFEX_PERMISSIVE_PED */ #endif /* DEFEX_PED_ENABLE */ +#ifdef DEFEX_INTEGRITY_ENABLE +#if !defined(DEFEX_PERMISSIVE_INT) + features |= GLOBAL_INTEGRITY_STATUS; +#else + if (global_integrity_status != 0) + features |= FEATURE_INTEGRITY; + if (global_integrity_status == 2) + features |= FEATURE_INTEGRITY_SOFT; +#endif /* DEFEX_PERMISSIVE_INT */ +#endif /* DEFEX_INTEGRITY_ENABLE */ + #ifdef DEFEX_SAFEPLACE_ENABLE #if !defined(DEFEX_PERMISSIVE_SP) features |= GLOBAL_SAFEPLACE_STATUS; @@ -40,6 +51,17 @@ int defex_get_features(void) #endif /* DEFEX_PERMISSIVE_SP */ #endif /* DEFEX_SAFEPLACE_ENABLE */ +#ifdef DEFEX_TRUSTED_MAP_ENABLE +#if !defined(DEFEX_PERMISSIVE_TM) + features |= GLOBAL_TRUSTED_MAP_STATUS; +#else + if (global_trusted_map_status != 0) + features |= FEATURE_TRUSTED_MAP; + if (global_trusted_map_status & DEFEX_TM_PERMISSIVE_MODE) + features |= FEATURE_TRUSTED_MAP_SOFT; +#endif /* DEFEX_PERMISSIVE_TM */ +#endif /* DEFEX_TRUSTED_MAP_ENABLE */ + #ifdef DEFEX_IMMUTABLE_ENABLE #if !defined(DEFEX_PERMISSIVE_IM) features |= GLOBAL_IMMUTABLE_STATUS; diff --git a/security/samsung/defex_lsm/core/defex_main.c b/security/samsung/defex_lsm/core/defex_main.c index ca5b86f460b4..22a1583319bd 100644 --- a/security/samsung/defex_lsm/core/defex_main.c +++ b/security/samsung/defex_lsm/core/defex_main.c @@ -117,7 +117,7 @@ __visible_for_testing void defex_report_violation(const char *violation, uint64_ } #endif /* DEFEX_DSMS_ENABLE */ -#ifdef DEFEX_SAFEPLACE_ENABLE +#if defined(DEFEX_SAFEPLACE_ENABLE) || defined(DEFEX_TRUSTED_MAP_ENABLE) __visible_for_testing long kill_process(struct task_struct *p) { read_lock(&tasklist_lock); @@ -125,7 +125,7 @@ __visible_for_testing long kill_process(struct task_struct *p) read_unlock(&tasklist_lock); return 0; } -#endif /* DEFEX_SAFEPLACE_ENABLE */ +#endif /* DEFEX_SAFEPLACE_ENABLE || DEFEX_TRUSTED_MAP_ENABLE */ #ifdef DEFEX_PED_ENABLE __visible_for_testing long kill_process_group(struct task_struct *p, int tgid, int pid) @@ -144,8 +144,9 @@ __visible_for_testing int task_defex_is_secured(struct defex_context *dc) { struct file *exe_file = get_dc_process_file(dc); struct task_struct *p = dc->task->group_leader; + struct task_struct *task = dc->task; char *proc_name = get_dc_process_name(dc); - int is_secured = 1; + int is_secured = 0; if (!get_dc_process_dpath(dc)) return is_secured; @@ -158,6 +159,10 @@ __visible_for_testing int task_defex_is_secured(struct defex_context *dc) return DEFEX_ALLOW; } + if (!strncmp(task->comm, "FinalizerDaemon", strlen(task->comm))) { + return DEFEX_ALLOW; + } + is_secured = !rules_lookup(proc_name, feature_ped_exception, exe_file); return is_secured; } @@ -364,6 +369,34 @@ __visible_for_testing int task_defex_check_creds(struct defex_context *dc) } #endif /* DEFEX_PED_ENABLE */ +#ifdef DEFEX_INTEGRITY_ENABLE +__visible_for_testing int task_defex_integrity(struct defex_context *dc) +{ + int ret = DEFEX_ALLOW, is_violation = 0; + char *proc_file, *new_file; + struct task_struct *p = dc->task; + + if (!get_dc_target_dpath(dc)) + goto out; + + new_file = get_dc_target_name(dc); + is_violation = rules_lookup(new_file, feature_integrity_check, dc->target_file); + + if (is_violation == DEFEX_INTEGRITY_FAIL) { + ret = -DEFEX_DENY; + proc_file = get_dc_process_name(dc); + + pr_crit("defex: integrity violation [task=%s (%s), child=%s, uid=%d]\n", + p->comm, proc_file, new_file, uid_get_value(dc->cred.uid)); +#ifdef DEFEX_DSMS_ENABLE + defex_report_violation(INTEGRITY_VIOLATION, 0, dc, 0, 0, 0, 0); +#endif /* DEFEX_DSMS_ENABLE */ + } +out: + return ret; +} +#endif /* DEFEX_INTEGRITY_ENABLE */ + #ifdef DEFEX_SAFEPLACE_ENABLE /* Safeplace feature decision function */ __visible_for_testing int task_defex_safeplace(struct defex_context *dc) @@ -379,44 +412,48 @@ __visible_for_testing int task_defex_safeplace(struct defex_context *dc) goto out; new_file = get_dc_target_name(dc); - is_violation = rules_lookup(new_file, feature_safeplace_path, dc->target_file); -#ifdef DEFEX_INTEGRITY_ENABLE - if (is_violation != DEFEX_INTEGRITY_FAIL) -#endif /* DEFEX_INTEGRITY_ENABLE */ - is_violation = !is_violation; + is_violation = !rules_lookup(new_file, feature_safeplace_path, dc->target_file); if (is_violation) { ret = -DEFEX_DENY; proc_file = get_dc_process_name(dc); -#ifdef DEFEX_INTEGRITY_ENABLE - if (is_violation == DEFEX_INTEGRITY_FAIL) { - pr_crit("defex: integrity violation [task=%s (%s), child=%s, uid=%d]\n", - p->comm, proc_file, new_file, uid_get_value(dc->cred.uid)); -#ifdef DEFEX_DSMS_ENABLE - defex_report_violation(INTEGRITY_VIOLATION, 0, dc, 0, 0, 0, 0); -#endif /* DEFEX_DSMS_ENABLE */ - - /* Temporary make permissive mode for tereble - * system image is changed as google's and defex might not work - */ - ret = DEFEX_ALLOW; - } - else -#endif /* DEFEX_INTEGRITY_ENABLE */ - { - pr_crit("defex: safeplace violation [task=%s (%s), child=%s, uid=%d]\n", - p->comm, proc_file, new_file, uid_get_value(dc->cred.uid)); + pr_crit("defex: safeplace violation [task=%s (%s), child=%s, uid=%d]\n", + p->comm, proc_file, new_file, uid_get_value(dc->cred.uid)); #ifdef DEFEX_DSMS_ENABLE defex_report_violation(SAFEPLACE_VIOLATION, 0, dc, 0, 0, 0, 0); #endif /* DEFEX_DSMS_ENABLE */ - } } out: return ret; } #endif /* DEFEX_SAFEPLACE_ENABLE */ +#ifdef DEFEX_TRUSTED_MAP_ENABLE +/* Trusted map feature decision function */ +__visible_for_testing int task_defex_trusted_map(struct defex_context *dc, va_list ap) +{ + int ret = DEFEX_ALLOW, argc; + void *argv; + + if (!CHECK_ROOT_CREDS(&dc->cred)) + goto out; + + argc = va_arg(ap, int); + argv = va_arg(ap, void *); +#ifdef DEFEX_DEBUG_ENABLE + if (argc <= 0) + pr_crit("[DEFEX][DTM] Invalid trusted map arguments - check integration on fs/exec.c (argc %d)", argc); +#endif + + ret = defex_trusted_map_lookup(dc, argc, argv); + if (defex_tm_mode_enabled(DEFEX_TM_PERMISSIVE_MODE)) + ret = DEFEX_ALLOW; +out: + return ret; +} +#endif /* DEFEX_TRUSTED_MAP_ENABLE */ + #ifdef DEFEX_IMMUTABLE_ENABLE /* Immutable feature decision function */ @@ -468,12 +505,15 @@ __visible_for_testing int task_defex_immutable(struct defex_context *dc, int att #endif /* DEFEX_IMMUTABLE_ENABLE */ /* Main decision function */ -int task_defex_enforce(struct task_struct *p, struct file *f, int syscall) +int task_defex_enforce(struct task_struct *p, struct file *f, int syscall, ...) { int ret = DEFEX_ALLOW; int feature_flag; const struct local_syscall_struct *item; struct defex_context dc; +#ifdef DEFEX_TRUSTED_MAP_ENABLE + va_list ap; +#endif if (boot_state_unlocked) return ret; @@ -511,6 +551,23 @@ int task_defex_enforce(struct task_struct *p, struct file *f, int syscall) } #endif /* DEFEX_PED_ENABLE */ +#ifdef DEFEX_INTEGRITY_ENABLE + /* Integrity feature */ + if (feature_flag & FEATURE_INTEGRITY) { + if (syscall == __DEFEX_execve) { + ret = task_defex_integrity(&dc); + if (ret == -DEFEX_DENY) { + if (!(feature_flag & FEATURE_INTEGRITY_SOFT)) { + release_defex_context(&dc); + put_task_struct(p); + kill_process(p); + return -DEFEX_DENY; + } + } + } + } +#endif /* DEFEX_INTEGRITY_ENABLE */ + #ifdef DEFEX_SAFEPLACE_ENABLE /* Safeplace feature */ if (feature_flag & FEATURE_SAFEPLACE) { @@ -542,6 +599,23 @@ int task_defex_enforce(struct task_struct *p, struct file *f, int syscall) } } #endif /* DEFEX_IMMUTABLE_ENABLE */ + +#ifdef DEFEX_TRUSTED_MAP_ENABLE + /* Trusted map feature */ + if (feature_flag & FEATURE_TRUSTED_MAP) { + if (syscall == __DEFEX_execve) { + va_start(ap, syscall); + ret = task_defex_trusted_map(&dc, ap); + va_end(ap); + if (ret == -DEFEX_DENY) { + if (!(feature_flag & FEATURE_TRUSTED_MAP_SOFT)) { + kill_process(p); + goto do_deny; + } + } + } + } +#endif /* DEFEX_TRUSTED_MAP_ENABLE */ do_allow: release_defex_context(&dc); put_task_struct(p); diff --git a/security/samsung/defex_lsm/core/defex_rules_proc.c b/security/samsung/defex_lsm/core/defex_rules_proc.c index 7a81175a1343..3751d97ada28 100644 --- a/security/samsung/defex_lsm/core/defex_rules_proc.c +++ b/security/samsung/defex_lsm/core/defex_rules_proc.c @@ -18,6 +18,10 @@ #include "include/defex_internal.h" #include "include/defex_rules.h" #include "include/defex_sign.h" +#ifdef DEFEX_TRUSTED_MAP_ENABLE +#include "include/defex_tailer.h" +#include "include/ptree.h" +#endif #define LOAD_FLAG_DPOLICY 0x01 #define LOAD_FLAG_DPOLICY_SYSTEM 0x02 @@ -58,9 +62,16 @@ __visible_for_testing unsigned char packed_rules_primary[DEFEX_RULES_ARRAY_SIZE] __visible_for_testing unsigned char packed_rules_primary[DEFEX_RULES_ARRAY_SIZE] __ro_after_init = {0}; #endif /* DEFEX_KERNEL_ONLY */ static unsigned char *packed_rules_secondary; +#ifdef DEFEX_TRUSTED_MAP_ENABLE +struct PPTree dtm_tree; +#endif #endif /* DEFEX_RAMDISK_ENABLE */ +#ifdef DEFEX_TRUSTED_MAP_ENABLE +/* In loaded policy, title of DTM's section; set by tailer -t in buildscript/build_external/defex. */ +#define DEFEX_DTM_SECTION_NAME "dtm_rules" +#endif #ifdef DEFEX_INTEGRITY_ENABLE @@ -238,6 +249,13 @@ __visible_for_testing int defex_integrity_default(const char *file_path) #if defined(DEFEX_RAMDISK_ENABLE) +#ifdef DEFEX_TRUSTED_MAP_ENABLE +static const unsigned char *find_policy_section(const char *name, const char *data, int data_size, long *section_size) +{ + return data_size > 0 ? defex_tailerp_find(data, data_size, name, section_size) : 0; +} +#endif + __visible_for_testing int load_rules_common(struct file *f, int flags) { int res = -1, data_size, rules_size; @@ -269,20 +287,29 @@ __visible_for_testing int load_rules_common(struct file *f, int flags) #endif if (!res) { + const unsigned char *policy_data = NULL; /* where additional features like DTM could look for policy data */ if (!(load_flags & (LOAD_FLAG_DPOLICY | LOAD_FLAG_DPOLICY_SYSTEM))) { if (rules_size > sizeof(packed_rules_primary)) { res = -1; goto do_clean; } memcpy(packed_rules_primary, data_buff, rules_size); + policy_data = packed_rules_primary; if (flags & LOAD_FLAG_DPOLICY_SYSTEM) load_flags |= LOAD_FLAG_SYSTEM_FIRST; } else { if (rules_size > 0) { - packed_rules_secondary = data_buff; + policy_data = packed_rules_secondary = data_buff; data_buff = NULL; } } +#ifdef DEFEX_TRUSTED_MAP_ENABLE + if (policy_data && !dtm_tree.data) { /* DTM not yet initialized */ + const unsigned char *dtm_section = find_policy_section(DEFEX_DTM_SECTION_NAME, policy_data, rules_size, 0); + if (dtm_section) + pptree_set_data(&dtm_tree, dtm_section); + } +#endif load_flags |= flags; res = rules_size; } @@ -504,7 +531,8 @@ __visible_for_testing int lookup_tree(const char *file_path, int attribute, stru if (cur_item->feature_type & attribute) { #ifdef DEFEX_INTEGRITY_ENABLE /* Integrity acceptable only for files */ - if ((cur_item->feature_type & feature_is_file) && f) { + if ((cur_item->feature_type & feature_integrity_check) && + (cur_item->feature_type & feature_is_file) && f) { if (defex_integrity_default(file_path) && defex_check_integrity(f, cur_item->integrity)) return DEFEX_INTEGRITY_FAIL; diff --git a/security/samsung/defex_lsm/core/defex_tailer.c b/security/samsung/defex_lsm/core/defex_tailer.c new file mode 100644 index 000000000000..20799337bcb4 --- /dev/null +++ b/security/samsung/defex_lsm/core/defex_tailer.c @@ -0,0 +1,98 @@ +/* Routines for handling archival-like files where each new contents is + * appended and linked backwards - memory-only variants. + */ + +#include +#include "include/defex_tailer.h" + +/* Reads int from 4-byte big-endian */ +static int be2int(const unsigned char *p) +{ + return (*p << 24) + (p[1] << 16) + (p[2] << 8) + p[3]; +} + +long defex_tailerp_has_suffix(const unsigned char *p, long size) +{ + return !p || size < TAIL_MAGIC_LEN + 8 + 1 + 2 || + memcmp(p + size - TAIL_MAGIC_LEN, TAIL_MAGIC, TAIL_MAGIC_LEN) + ? 0 : size - TAIL_MAGIC_LEN; +} + +int defex_tailerp_iterate(const unsigned char *p, long size, + int (*task)(const char *title, int titleLen, + const unsigned char *start, long size, + void *data), + void *data) +{ + long start, offset = defex_tailerp_has_suffix(p, size); + char buffer[TAIL_MAX_TITLE_LENGTH]; + + if (!offset) + return 0; + for (offset -= 2; ; offset = start - 2) { + int i, ttlLength; + long size; + + /* Possibly change behavior depending on version + * (p[offset] and p[offset + 1]) + */ + ttlLength = p[--offset]; + if (offset - 4 - 4 - ttlLength < 0) + return -1; + memcpy(buffer, p + (offset -= ttlLength), ttlLength); + size = be2int(p + (offset -= 4)); + start = be2int(p + (offset -= 4)); + if (task) { + i = (*task)(buffer, ttlLength, + p + start, size, data); + if (i < 0) + return i; + } + if (!start) + break; + } + return 0; +} + +/* Auxiliary data for finding an entry */ +struct find_data { + const char *title; + int titleLen; + int found; + const unsigned char *start; + long size; +}; + +static int tailerp_iteratefind(const char *title, int titleLen, + const unsigned char *start, long size, + void *data) +{ + struct find_data *fd = (struct find_data *)data; + + if (fd->titleLen == titleLen && + !memcmp(title, fd->title, titleLen)) { + fd->found = 1; + fd->start = start; + fd->size = size; + return -1; + } + return 0; +} + +const unsigned char *defex_tailerp_find(const unsigned char *p, long size, + const char *title, long *sizep) +{ + struct find_data fd; + + fd.title = title; + fd.titleLen = strlen(title); + fd.found = 0; + if (!defex_tailerp_iterate(p, size, tailerp_iteratefind, &fd)) + return 0; + if (fd.found) { + if (sizep) + *sizep = fd.size; + return fd.start; + } + return 0; +} diff --git a/security/samsung/defex_lsm/debug/defex_debug.c b/security/samsung/defex_lsm/debug/defex_debug.c index 04a6117f9a5b..f46b38f45931 100644 --- a/security/samsung/defex_lsm/debug/defex_debug.c +++ b/security/samsung/defex_lsm/debug/defex_debug.c @@ -50,7 +50,7 @@ void blob(const char *buffer, const size_t bufLen, const int lineSize) offset += snprintf(stringToPrint + offset, MAX_DATA_LEN - offset, " "); } - snprintf(stringToPrint + offset, MAX_DATA_LEN - offset, " |"); + offset += snprintf(stringToPrint + offset, MAX_DATA_LEN - offset, " |"); pr_info("%s\n", stringToPrint); memset(stringToPrint, 0, MAX_DATA_LEN); i += line; @@ -137,7 +137,8 @@ __visible_for_testing ssize_t debug_store(struct kobject *kobj, struct kobj_attr "gid=", "pe_status=", "im_status=", - "sp_status=" + "sp_status=", + "int_status=" }; if (!buf || !p) @@ -167,6 +168,9 @@ __visible_for_testing ssize_t debug_store(struct kobject *kobj, struct kobj_attr case DBG_SET_SP_STATUS: safeplace_status_store(buf + l); break; + case DBG_SET_INT_STATUS: + integrity_status_store(buf + l); + break; default: break; } diff --git a/security/samsung/defex_lsm/defex_rules.c b/security/samsung/defex_lsm/defex_rules.c index 6b3ac5a36c4a..dd99c1b69bc7 100644 --- a/security/samsung/defex_lsm/defex_rules.c +++ b/security/samsung/defex_lsm/defex_rules.c @@ -158,6 +158,7 @@ {feature_safeplace_path,"/vendor/bin/init.insmod.sh"}, {feature_safeplace_path,"/vendor/bin/hw/android.hardware.usb@1.3-service.coral"}, {feature_safeplace_path,"/vendor/bin/hw/vendor.qti.hardware.perf-hal-service"}, + {feature_safeplace_path,"/vendor/bin/iod"}, {feature_safeplace_path,"/system/bin/rdxd"}, {feature_safeplace_path,"/system/system_ext/bin/dpmd"}, {feature_safeplace_path,"/vendor/bin/init.qti.dcvs.sh"}, @@ -213,6 +214,7 @@ {feature_safeplace_path,"/apex/com.android.art/bin/artd"}, {feature_safeplace_path,"/apex/com.android.runtime/bin/crash_dump32"}, {feature_safeplace_path,"/apex/com.android.runtime/bin/crash_dump64"}, + {feature_safeplace_path,"/system/bin/lpdump"}, {feature_safeplace_path,"/tmp/update_binary;updater_intermediates/updater;obj/EXECUTABLES"}, {feature_safeplace_path,"/tmp/update-binary"}, {feature_safeplace_path,"/system/bin/install-recovery.sh"}, /* DEFAULT */ @@ -242,6 +244,13 @@ {feature_immutable_src_exception,"/init"}, {feature_immutable_src_exception,"/system/bin/init"}, {feature_immutable_src_exception,"/system/bin/lshal"}, + {feature_integrity_check,"/vendor/bin/hw/android.hardware.gatekeeper@1.0-service"}, + {feature_integrity_check,"/vendor/bin/hw/android.hardware.keymaster@4.0-service"}, + {feature_integrity_check,"/vendor/bin/hw/android.hardware.security.keymint-service"}, + {feature_integrity_check,"/vendor/bin/hw/vendor.samsung.hardware.tlc.kg@1.0-service"}, + {feature_integrity_check,"/vendor/bin/vendor.samsung.hardware.security.wsm@1.0-service"}, + {feature_integrity_check,"/vendor/bin/vaultkeeperd"}, + {feature_integrity_check,"/vendor/bin/hw/vendor.samsung.hardware.tlc.kg@1.1-service"}, /* Rules will be added here */ /* Never modify the above line. Rules will be added for buildtime */ #endif /* if 0 */ diff --git a/security/samsung/defex_lsm/feature_safeplace/defex_integrity.c b/security/samsung/defex_lsm/feature_safeplace/defex_integrity.c new file mode 100644 index 000000000000..aaf3ba11fdf4 --- /dev/null +++ b/security/samsung/defex_lsm/feature_safeplace/defex_integrity.c @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include "include/defex_internal.h" + +#ifdef DEFEX_PERMISSIVE_INT +unsigned char global_integrity_status = 2; +#else +unsigned char global_integrity_status = 1; +#endif /* DEFEX_PERMISSIVE_INT */ + +int integrity_status_store(const char *status_str) +{ + int ret; + unsigned int status; + + if (!status_str) + return -EINVAL; + + ret = kstrtouint(status_str, 10, &status); + if (ret != 0 || status > 2) + return -EINVAL; + + global_integrity_status = status; + + return 0; +} diff --git a/security/samsung/defex_lsm/feature_trusted_map/defex_trusted_map.c b/security/samsung/defex_lsm/feature_trusted_map/defex_trusted_map.c new file mode 100644 index 000000000000..c59d28eafe0b --- /dev/null +++ b/security/samsung/defex_lsm/feature_trusted_map/defex_trusted_map.c @@ -0,0 +1,16 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#include "include/defex_internal.h" + +unsigned char global_trusted_map_status = DEFEX_TM_ENFORCING_MODE +#ifdef DEFEX_PERMISSIVE_TM + | DEFEX_TM_PERMISSIVE_MODE +#endif + | DEFEX_TM_DEBUG_VIOLATIONS + ; diff --git a/security/samsung/defex_lsm/feature_trusted_map/dtm.c b/security/samsung/defex_lsm/feature_trusted_map/dtm.c new file mode 100644 index 000000000000..cd6cd09e62ac --- /dev/null +++ b/security/samsung/defex_lsm/feature_trusted_map/dtm.c @@ -0,0 +1,206 @@ +/* + * Copyright (c) 2020-2022 Samsung Electronics Co., Ltd. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#include +#endif + +#include "include/dtm.h" +#include "include/dtm_engine.h" +#include "include/dtm_log.h" +#include "include/dtm_utils.h" + +/* From fs/exec.c: SM8150_Q, SM8250_Q, SM8250_R, exynos9820, exynos9830 */ +struct user_arg_ptr { +#ifdef CONFIG_COMPAT + bool is_compat; +#endif + union { + const char __user *const __user *native; +#ifdef CONFIG_COMPAT + const compat_uptr_t __user *compat; +#endif + } ptr; +}; + +/* From fs/exec.c: SM8150_Q, SM8250_Q, SM8250_R, exynos9820, exynos9830 */ +static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr) +{ + const char __user *native; + +#ifdef CONFIG_COMPAT + if (unlikely(argv.is_compat)) { + compat_uptr_t compat; + + if (get_user(compat, argv.ptr.compat + nr)) + return ERR_PTR(-EFAULT); + + return compat_ptr(compat); + } +#endif + + if (get_user(native, argv.ptr.native + nr)) + return ERR_PTR(-EFAULT); + + return native; +} + +static void dtm_kfree_args(struct dtm_context *context) +{ + const char **argv; + int arg, to_free; + + if (unlikely(!is_dtm_context_valid(context))) + return; + argv = context->callee_argv; + arg = min_t(int, context->callee_argc, DTM_MAX_ARGC); + to_free = context->callee_copied_argc; + context->callee_copied_argc = 0; + while (--arg >= 0 && to_free > 0) { + if (!argv[arg]) + continue; + kfree(argv[arg]); + to_free--; + } +} + +/* + * Gets call argument value, copying from user if needed. + */ +const char *dtm_get_callee_arg(struct dtm_context *context, int arg_index) +{ + struct user_arg_ptr argv; + const char __user *user_str; + char *copy; + int max_argc, arg_len, copy_len; + + if (unlikely(!is_dtm_context_valid(context))) + return NULL; + max_argc = min_t(int, context->callee_argc, DTM_MAX_ARGC); + if (unlikely((arg_index < 0) || (arg_index >= max_argc))) + return NULL; + if (context->callee_argv[arg_index]) + return context->callee_argv[arg_index]; + + argv = *(struct user_arg_ptr *)context->callee_argv_ref; + user_str = get_user_arg_ptr(argv, arg_index); + if (IS_ERR(user_str)) + return NULL; + + arg_len = strnlen_user(user_str, MAX_ARG_STRLEN); + if (unlikely(!arg_len)) + return NULL; + + copy_len = min_t(int, arg_len, DTM_MAX_ARG_STRLEN); + copy = kzalloc(copy_len, GFP_KERNEL); + if (unlikely(!copy)) + return NULL; + + if (unlikely(copy_from_user(copy, user_str, copy_len))) + goto out_free_copy; + copy[copy_len - 1] = '\0'; + + context->callee_argv[arg_index] = copy; + context->callee_copied_argc++; + context->callee_copied_args_len += copy_len; + context->callee_total_args_len += arg_len; + return copy; + +out_free_copy: + kfree(copy); + return NULL; +} + +/* + * Initializes dtm context data structure. + */ +__visible_for_testing bool dtm_context_get(struct dtm_context *context, + struct defex_context *defex_context, + int callee_argc, + void *callee_argv_ref) +{ + memset(context, 0, sizeof(*context)); + context->defex_context = defex_context; + context->callee_argc = callee_argc; + context->callee_argv_ref = callee_argv_ref; + return true; +} + +/* + * Releases resources associated to dtm context. + */ +__visible_for_testing void dtm_context_put(struct dtm_context *context) +{ + dtm_kfree_args(context); +} + +/* + * Gets program name for current call. + */ +const char *dtm_get_program_name(struct dtm_context *context) +{ + if (unlikely(!is_dtm_context_valid(context))) + return NULL; + if (context->program_name) + return context->program_name; + context->program_name = dtm_get_callee_arg(context, 0); + if (context->program_name == NULL) + context->program_name = DTM_UNKNOWN; + return context->program_name; +} + +/** + * Gets stdin mode bit for current call. + */ +int dtm_get_stdin_mode_bit(struct dtm_context *context) +{ + if (unlikely(!context)) + return DTM_FD_MODE_ERROR; + if (!context->stdin_mode_bit) + context->stdin_mode_bit = dtm_get_fd_mode_bit(0); + return context->stdin_mode_bit; +} + +/** + * Gets stdin mode for current call. + */ +const char *dtm_get_stdin_mode(struct dtm_context *context) +{ + if (unlikely(!context)) + return NULL; + if (!context->stdin_mode) + context->stdin_mode = dtm_get_fd_mode_bit_name( + dtm_get_stdin_mode_bit(context)); + return context->stdin_mode; +} + +int defex_trusted_map_lookup(struct defex_context *defex_context, + int callee_argc, void *callee_argv_ref) +{ + int ret = DTM_DENY; + struct dtm_context context; + + if (unlikely(!defex_context || !(defex_context->task))) + goto out; + if (unlikely(!dtm_context_get(&context, defex_context, callee_argc, callee_argv_ref))) + goto out; + ret = dtm_enforce(&context); + dtm_context_put(&context); +out: + return ret; +} diff --git a/security/samsung/defex_lsm/feature_trusted_map/dtm_engine.c b/security/samsung/defex_lsm/feature_trusted_map/dtm_engine.c new file mode 100644 index 000000000000..4f125ad3d9d1 --- /dev/null +++ b/security/samsung/defex_lsm/feature_trusted_map/dtm_engine.c @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2020-2022 Samsung Electronics Co., Ltd. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#include +#include +#include + +#include "include/defex_rules.h" +#include "include/dtm.h" +#include "include/dtm_engine.h" +#include "include/dtm_log.h" +#include "include/dtm_utils.h" +#include "include/ptree.h" + +#define DTM_ANY_VALUE "*" /* wildcard value for files and arguments */ +#if defined(DEFEX_TM_DEFAULT_POLICY_ENABLE) || defined(DEFEX_KERNEL_ONLY) +/* Kernel-only builds don't use DEFEX's dynamic policy loading mechanism. */ +#define USE_EMBEDDED_POLICY +static struct PPTree embedded_header; +/* File with hardcoded policy */ +#include "dtm_engine_defaultpolicy.h" +#endif + +#ifdef DEFEX_KUNIT_ENABLED +static struct PPTree override_header, *pptree_override; + +void dtm_engine_override_data(const unsigned char *p) +{ + if (p) { + pr_warn("[DEFEX] dtm_engine data overridden."); + pptree_set_data(pptree_override = &override_header, p); + } else { + pr_warn("[DEFEX] dtm_engine override data back to normal."); + pptree_override = 0; + } +} +#endif + +static int dtm_check_stdin(struct dtm_context *context, int allowed_stdin_modes) +{ + if (unlikely(!(dtm_get_stdin_mode_bit(context) & allowed_stdin_modes))) { + dtm_report_violation(DTM_STDIN_VIOLATION, context); + return DTM_DENY; + } + return DTM_ALLOW; +} + +/* + * Enforces DTM policy for an exec system call. + */ +int dtm_enforce(struct dtm_context *context) +{ + const char *callee_path, *caller_path; + const char *program_name; + int ret, argc, call_argc; + const char *argument_value; + struct PPTree *pptree; /* effective header of policy rules */ + struct PPTreeContext pp_ctx; /* context for policy search */ + static char first_run = 1; /* flag for one-time policy actions */ + + if (first_run) { + if (dtm_tree.data) + pr_info("[DEFEX] DTM engine: policy found."); + else + pr_warn("[DEFEX] DTM engine: dynamic policy not loaded."); + first_run = 0; +#ifdef USE_EMBEDDED_POLICY + pptree_set_data(&embedded_header, dtm_engine_defaultpolicy); +#endif + } + + pr_info("[DEFEX] pid : %d %d", current->tgid, current->pid); + + if (!context || unlikely(!is_dtm_context_valid(context))) { + pr_info("(0) [DEFEX] TMED no or invalid context."); + return DTM_DENY; + } + /* Check callee */ + callee_path = dtm_get_callee_path(context); + if (unlikely(!callee_path)) { + pr_info("(1) [DEFEX] TMED null callee."); + return DTM_DENY; + } + +#ifdef DEFEX_KUNIT_ENABLED + if (pptree_override) /* test code has opportunity to use test policy instead */ + pptree = pptree_override; + else +#endif +#ifdef USE_EMBEDDED_POLICY /* try dynamic policy first, use embedded if not found */ + pptree = dtm_tree.data ? &dtm_tree : &embedded_header; +#else /* only dynamically loaded policy is acceptable */ + pptree = &dtm_tree; +#endif + if (!pptree->data) { /* Should never happen */ + pr_warn("(0) [DEFEX] DTM engine: neither dynamic nor hardcoded rules loaded."); + return DTM_ALLOW; + } + memset(&pp_ctx, 0, sizeof(pp_ctx)); + if (!(pptree_find_path(pptree, + *callee_path == '/' ? + callee_path + 1 : callee_path, '/', &pp_ctx) && + (pp_ctx.types & PTREE_DATA_PATH))) { + pr_info("(2) [DEFEX] TME callee '%s' not found.", callee_path); + return DTM_ALLOW; + } + /* Check caller */ + caller_path = dtm_get_caller_path(context); + if (unlikely(!caller_path)) { + pr_info("(3) [DEFEX] TMED callee '%s': null caller.", callee_path); + return DTM_DENY; + } + if (!(pptree_find_path(pptree, caller_path + 1, '/', &pp_ctx) && + (pp_ctx.types & PTREE_DATA_PATH))) { + pr_info("(4) [DEFEX] TMED callee '%s': caller '%s' not found.", + callee_path, caller_path); + dtm_report_violation(DTM_CALLER_VIOLATION, context); + return DTM_DENY; + } + /* Check program, if any */ + program_name = dtm_get_program_name(context); + if (!program_name) { + pr_info("(5) [DEFEX] TMED callee '%s', caller '%s': null program.", + callee_path, caller_path); + return DTM_DENY; + } + pp_ctx.types |= PTREE_FIND_PEEK; + if (pptree_child_count(pptree, &pp_ctx) == 1 && + pptree_find_path(pptree, DTM_ANY_VALUE, 0, &pp_ctx)) { + pr_info("[DEFEX] TME callee '%s', caller '%s': program may be '*...'.", + callee_path, caller_path); + pp_ctx.types |= PTREE_FIND_PEEKED; + pptree_find_path(pptree, 0, 0, &pp_ctx); + } else { + pp_ctx.types &= ~PTREE_FIND_PEEK; + if (!(pptree_find_path(pptree, program_name, 0, &pp_ctx) && + (pp_ctx.types & PTREE_DATA_INT2))) { + pr_info("(6) [DEFEX] TMED callee '%s', caller '%s': program '%s' not found.", + callee_path, caller_path, program_name); + dtm_report_violation(DTM_PROGRAM_VIOLATION, context); + return DTM_DENY; + } + } + /* Check standard input mode */ + if (pp_ctx.types & PTREE_DATA_INT2) { + ret = dtm_check_stdin(context, pp_ctx.value.int2); + if (unlikely(ret != DTM_ALLOW)) { + pr_info("(7) [DEFEX] TMED callee '%s', caller '%s', program '%s': stdin mode %d, should be %d.", + callee_path, caller_path, + program_name ? program_name : "(null)", + dtm_get_stdin_mode_bit(context), pp_ctx.value.int2); + return ret; + } + } + /* Check program arguments, if any */ + pp_ctx.types |= PTREE_FIND_CONTINUE; + for (call_argc = context->callee_argc, argc = 1; + argc <= call_argc && pptree_child_count(pptree, &pp_ctx); + ++argc) { + pp_ctx.types |= PTREE_FIND_PEEK; + if (pptree_find_path(pptree, DTM_ANY_VALUE, 0, &pp_ctx)) { + pr_info("(8) [DEFEX] TME callee '%s', caller '%s', program '%s': any arguments accepted.", + callee_path, caller_path, program_name); + return DTM_ALLOW; + } + pp_ctx.types &= PTREE_FIND_PEEKED; + pp_ctx.types |= PTREE_FIND_CONTINUE; + argument_value = dtm_get_callee_arg(context, argc); + if (!pptree_find_path(pptree, argument_value, 0, &pp_ctx)) { + pr_info("(9) [DEFEX] TMED callee '%s', caller '%s', program '%s': argument '%s' (%d of %d) not found.", + callee_path, caller_path, program_name, + argument_value ? argument_value : "(null)", + argc, call_argc); + dtm_report_violation(DTM_ARGUMENTS_VIOLATION, context); + return DTM_DENY; + } + if (pp_ctx.value.bits) { + pr_info("(10) [DEFEX] TME callee '%s', caller '%s', program '%s': argument '%s' accepts '*'.", + callee_path, caller_path, program_name, + argument_value ? argument_value : "(null)"); + return DTM_ALLOW; + } + } + if (call_argc && argc > call_argc) + pr_info("[DEFEX] TME callee '%s', caller '%s', program '%s': all %d argument(s) checked.", + callee_path, caller_path, program_name, call_argc); + return DTM_ALLOW; +} diff --git a/security/samsung/defex_lsm/feature_trusted_map/dtm_log.c b/security/samsung/defex_lsm/feature_trusted_map/dtm_log.c new file mode 100644 index 000000000000..939808581c1d --- /dev/null +++ b/security/samsung/defex_lsm/feature_trusted_map/dtm_log.c @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2020-2022 Samsung Electronics Co., Ltd. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include + +#include "include/dtm.h" +#include "include/dtm_log.h" +#include "include/dtm_utils.h" + +#define DTM_MAX_LOG_SIZE (1024) +#define DTM_MAX_DETAIL_SIZE (1024) + +#ifdef DEFEX_DSMS_ENABLE +#include +#else +#define DSMS_SUCCESS (0) +#define dsms_send_message(feature_code, message, value) (DSMS_SUCCESS) +#endif + +static inline bool should_send_dsms_event(void) +{ +#ifdef DEFEX_DSMS_ENABLE + return IS_ENABLED(CONFIG_SECURITY_DSMS); +#else + return false; +#endif +} + +__visible_for_testing void dtm_append_argv(char *message, size_t size, + char separator, + int argc, const char **argv) +{ + const char *from; + char *to; + size_t len; + int arg, available; + + if (!message || size <= 0 || !separator) + return; + if (argc <= 0 || !argv || !argv[0]) + return; + + arg = 0; + len = strnlen(message, size); + available = size - len - 1; + to = message + len; + while (arg < argc && available > 0) { + from = argv[arg++]; + if (!from) + from = "(null)"; + len = strnlen(from, available); + strncpy(to, from, len); + to += len; + available -= len; + if (available-- > 0) + *to++ = separator; + } + *to = 0; +} + +__visible_for_testing void dtm_prepare_message(char *message, size_t size, + const char *where, const char *sep, + struct dtm_context *context) +{ + int total_argc, max_argc, arg; + + /* load all arguments to update attributes and fill arg values */ + total_argc = context->callee_argc; + max_argc = min_t(int, total_argc, ARRAY_SIZE(context->callee_argv)); + if (context->callee_copied_argc != max_argc) + for (arg = 0; arg < max_argc; arg++) + if (!context->callee_argv[arg]) + dtm_get_callee_arg(context, arg); + + snprintf(message, size, "%s%s%d:%d:%ld:%ld:%s:%s:%s:", where, sep, + context->callee_copied_argc, total_argc, + context->callee_copied_args_len, context->callee_total_args_len, + dtm_get_caller_path(context), dtm_get_callee_path(context), + dtm_get_stdin_mode(context)); + dtm_append_argv(message, size, ':', max_argc, context->callee_argv); +} + +#ifdef DEFEX_DEBUG_ENABLE +void dtm_debug_call(const char *where, struct dtm_context *context) +{ + char message[DTM_MAX_LOG_SIZE]; + + dtm_prepare_message(message, sizeof(message), where, ": ", context); + DTM_LOG_DEBUG("%s", message); +} +#endif + +noinline void dtm_report_violation(const char *feature_code, + struct dtm_context *context) +{ + char message[DTM_MAX_DETAIL_SIZE + 1]; + int ret; + + dtm_prepare_message(message, sizeof(message), "", "", context); + DTM_DEBUG(VIOLATIONS, "[%s]%s", feature_code, message); + if (should_send_dsms_event()) { + ret = dsms_send_message(feature_code, message, 0); + if (unlikely(ret != DSMS_SUCCESS)) + DTM_LOG_ERROR("Error %d while sending DSMS report", ret); + } +} diff --git a/security/samsung/defex_lsm/feature_trusted_map/dtm_utils.c b/security/samsung/defex_lsm/feature_trusted_map/dtm_utils.c new file mode 100644 index 000000000000..799321a41b52 --- /dev/null +++ b/security/samsung/defex_lsm/feature_trusted_map/dtm_utils.c @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2020-2022 Samsung Electronics Co., Ltd. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#include +#endif + +#include "include/dtm.h" +#include "include/dtm_log.h" +#include "include/dtm_utils.h" + +const char * const DTM_UNKNOWN = ""; + +static inline int dtm_get_file_attr(struct path *path, struct kstat *stat) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) + return vfs_getattr(path, stat, STATX_BASIC_STATS, 0); +#else + return vfs_getattr(path, stat); +#endif +} + +/* + * Gets mode bit value for a file status. + */ +int dtm_get_stat_mode_bit(struct kstat *stat) +{ + int mode, mode_bit; + + if (!stat) + return DTM_FD_MODE_ERROR; + + mode = (stat->mode) & S_IFMT; + switch (mode) { + case S_IFBLK: + mode_bit = DTM_FD_MODE_BLK; + break; + case S_IFCHR: + mode_bit = DTM_FD_MODE_CHR; + break; + case S_IFDIR: + mode_bit = DTM_FD_MODE_DIR; + break; + case S_IFIFO: + mode_bit = DTM_FD_MODE_FIFO; + break; + case S_IFLNK: + mode_bit = DTM_FD_MODE_LNK; + break; + case S_IFREG: + mode_bit = DTM_FD_MODE_REG; + break; + case S_IFSOCK: + mode_bit = DTM_FD_MODE_SOCK; + break; + default: + mode_bit = DTM_FD_MODE_UNKNOWN; + DTM_LOG_ERROR("Unknown stat mode %d", mode); + } + return mode_bit; +} + +/* + * Gets mode bit value for a file descriptor. + */ +int dtm_get_fd_mode_bit(int fd) +{ + struct kstat stat; + struct fd sf; + int error; + + if (fd < 0) + return DTM_FD_MODE_ERROR; + + sf = fdget_raw(fd); + if (unlikely(!sf.file)) + return DTM_FD_MODE_CLOSED; + + error = dtm_get_file_attr(&sf.file->f_path, &stat); + fdput(sf); + if (unlikely(error < 0)) + return DTM_FD_MODE_ERROR; + + return dtm_get_stat_mode_bit(&stat); +} + +/* + * Gets printable name for a fd mode bit value. + */ +const char *dtm_get_fd_mode_bit_name(int mode_bit) +{ + switch (mode_bit) { + case DTM_FD_MODE_NONE: + return "NONE"; + case DTM_FD_MODE_BLK: + return "BLK"; + case DTM_FD_MODE_CHR: + return "CHR"; + case DTM_FD_MODE_DIR: + return "DIR"; + case DTM_FD_MODE_FIFO: + return "FIFO"; + case DTM_FD_MODE_LNK: + return "LNK"; + case DTM_FD_MODE_REG: + return "REG"; + case DTM_FD_MODE_SOCK: + return "SOCK"; + case DTM_FD_MODE_CLOSED: + return "CLOSED"; + case DTM_FD_MODE_ERROR: + return "ERROR"; + case DTM_FD_MODE_UNKNOWN: + return "UNKNOWN"; + } + DTM_LOG_ERROR("Unexpected mode bit %d", mode_bit); + return "INVALID"; +} diff --git a/security/samsung/defex_lsm/feature_trusted_map/include/dtm.h b/security/samsung/defex_lsm/feature_trusted_map/include/dtm.h new file mode 100644 index 000000000000..c4412b5c0b41 --- /dev/null +++ b/security/samsung/defex_lsm/feature_trusted_map/include/dtm.h @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2020-2022 Samsung Electronics Co., Ltd. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#ifndef _INCLUDE_DTM_H +#define _INCLUDE_DTM_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dtm_utils.h" +#include "include/defex_internal.h" + +// DTM Kernel Interface + +#define DTM_CALLER_VIOLATION "DTM1" +#define DTM_PROGRAM_VIOLATION "DTM2" +#define DTM_STDIN_VIOLATION "DTM3" +#define DTM_ARGUMENTS_VIOLATION "DTM4" + +enum dtm_result_code { + DTM_ALLOW = DEFEX_ALLOW, + DTM_DENY = -DEFEX_DENY, +}; + +enum dtm_constants { + DTM_MAX_ARGC = 5, // max args checked (incl. program name) + DTM_MAX_ARG_STRLEN = 100, // max arg len checked (including '\0') +}; + +struct dtm_context { + struct defex_context *defex_context; + void *callee_argv_ref; + const char *callee_argv[DTM_MAX_ARGC]; + int callee_argc; + int callee_copied_argc; + long callee_copied_args_len; + long callee_total_args_len; + const char *program_name; + const char *stdin_mode; + int stdin_mode_bit; +}; + +/* Verifies if a dtm_context was properly initialized */ +static inline bool is_dtm_context_valid(struct dtm_context *context) +{ + return !ZERO_OR_NULL_PTR(context) + && !ZERO_OR_NULL_PTR(context->defex_context) + && !ZERO_OR_NULL_PTR(context->defex_context->task) + && !ZERO_OR_NULL_PTR(context->callee_argv_ref); +} + +/* Gets caller path for current call */ +static inline const char *dtm_get_caller_path(struct dtm_context *context) +{ + return get_dc_process_name(context->defex_context); +} + +/* Gets callee path for current call */ +static inline const char *dtm_get_callee_path(struct dtm_context *context) +{ + return get_dc_target_name(context->defex_context); +} + +/* Gets program name for current call */ +const char *dtm_get_program_name(struct dtm_context *context); +/* Gets stdin mode bit for current call */ +int dtm_get_stdin_mode_bit(struct dtm_context *context); +/* Gets stdin mode for current call */ +const char *dtm_get_stdin_mode(struct dtm_context *context); +/* Gets call argument value, copying from user if needed */ +const char *dtm_get_callee_arg(struct dtm_context *context, int arg_index); + +#endif /* _INCLUDE_DTM_H */ diff --git a/security/samsung/defex_lsm/feature_trusted_map/include/dtm_engine.h b/security/samsung/defex_lsm/feature_trusted_map/include/dtm_engine.h new file mode 100644 index 000000000000..af9f858ccf53 --- /dev/null +++ b/security/samsung/defex_lsm/feature_trusted_map/include/dtm_engine.h @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2020-2022 Samsung Electronics Co., Ltd. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#ifndef _INCLUDE_DTM_ENGINE_H +#define _INCLUDE_DTM_ENGINE_H + +#include "dtm.h" + +/* Enforces DTM policy for an exec system call */ +extern int dtm_enforce(struct dtm_context *context); +#ifdef DEFEX_KUNIT_ENABLED +/* Replaces DTM policy (use NULL to return to normal) */ +extern void dtm_engine_override_data(const unsigned char *); +#endif + +#endif /* _INCLUDE_DTM_ENGINE_H */ diff --git a/security/samsung/defex_lsm/feature_trusted_map/include/dtm_log.h b/security/samsung/defex_lsm/feature_trusted_map/include/dtm_log.h new file mode 100644 index 000000000000..17f9d95ba115 --- /dev/null +++ b/security/samsung/defex_lsm/feature_trusted_map/include/dtm_log.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2020-2022 Samsung Electronics Co., Ltd. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#ifndef _INCLUDE_DTM_LOG_H +#define _INCLUDE_DTM_LOG_H + +#define DTM_TAG "[DTM] " +#define DTM_LOG_INFO(format, ...) pr_info(DTM_TAG format, ##__VA_ARGS__) +#define DTM_LOG_ERROR(format, ...) pr_err(DTM_TAG format, ##__VA_ARGS__) +#define DTM_LOG_DEBUG(format, ...) pr_debug(DTM_TAG format, ##__VA_ARGS__) + +struct dtm_context; + +extern noinline void dtm_report_violation(const char *feature_code, + struct dtm_context *context); + +#ifdef DEFEX_DEBUG_ENABLE + +#define DTM_DEBUG(mode, format, ...) \ + do { \ + if (defex_tm_mode_enabled(DEFEX_TM_DEBUG_##mode)) \ + DTM_LOG_DEBUG(format, ##__VA_ARGS__); \ + } while (0) + +#define DTM_DEBUG_CALL(message, context) \ + do { \ + if (defex_tm_mode_enabled(DEFEX_TM_DEBUG_CALLS)) \ + dtm_debug_call("dtm_enforce", context); \ + } while (0) + +extern void dtm_debug_call(const char *where, struct dtm_context *context); + +#else +#define DTM_DEBUG(mode, format, ...) (0) +#define DTM_DEBUG_CALL(message, context) (0) +#endif /* DEFEX_DEBUG_ENABLE */ + +#endif /* _INCLUDE_DTM_LOG_H */ diff --git a/security/samsung/defex_lsm/feature_trusted_map/include/dtm_utils.h b/security/samsung/defex_lsm/feature_trusted_map/include/dtm_utils.h new file mode 100644 index 000000000000..15022cbeea8b --- /dev/null +++ b/security/samsung/defex_lsm/feature_trusted_map/include/dtm_utils.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2020-2022 Samsung Electronics Co., Ltd. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#ifndef _INCLUDE_DTM_UTILS_H +#define _INCLUDE_DTM_UTILS_H + +#include +#include +#include +#include +#include + +#include "../../include/defex_internal.h" + +enum dtm_stdin_descriptor_allow { + DTM_FD_MODE_NONE = 0, + DTM_FD_MODE_BLK = 1, + DTM_FD_MODE_CHR = 2, + DTM_FD_MODE_DIR = 4, + DTM_FD_MODE_FIFO = 8, + DTM_FD_MODE_LNK = 16, + DTM_FD_MODE_REG = 32, + DTM_FD_MODE_SOCK = 64, + DTM_FD_MODE_CLOSED = 128, + DTM_FD_MODE_ERROR = 256, + DTM_FD_MODE_UNKNOWN = 512, +}; + +extern const char * const DTM_UNKNOWN; + +/* Gets mode bit value for a file status */ +int dtm_get_stat_mode_bit(struct kstat *stat); + +/* Gets mode bit value for a file descriptor */ +extern int dtm_get_fd_mode_bit(int fd); + +/* Gets printable name for a fd mode bit value */ +const char *dtm_get_fd_mode_bit_name(int mode_bit); + +#endif /* _INCLUDE_DTM_UTILS_H */ diff --git a/security/samsung/defex_lsm/feature_trusted_map/ptree.c b/security/samsung/defex_lsm/feature_trusted_map/ptree.c new file mode 100644 index 000000000000..6284696ffc9a --- /dev/null +++ b/security/samsung/defex_lsm/feature_trusted_map/ptree.c @@ -0,0 +1,476 @@ +/* + * Copyright (c) 2021-2022 Samsung Electronics Co., Ltd. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#include +#include +#include "include/ptree.h" + +/* Functions for "using" (i.e., loading and searching) p-tree in portable + * variant. + */ + +/* Big-endian uchar -> int */ +static unsigned int charp2UInt(const unsigned char *p, int size) +{ + unsigned int i = *p; + + if (size > 1) { + i = (i << 8) | p[1]; + if (size > 2) { + i = (i << 8) | p[2]; + if (size > 3) + i = (i << 8) | p[3]; + } + } + return i; +} + +/* Checks magic number, loads important constants from prologue + */ +static int pptree_set_header(struct PPTree *tree) +{ + const unsigned char *pp = tree->data; + + /* Only PPTREE_MAGIC_FIXEDSIZE bytes are mandatory, remaining + * two can encode version information for compatibility + */ + if (strncmp((char *)pp, PPTREE_MAGIC, PPTREE_MAGIC_FIXEDSIZE)) { + pr_warn("Ptree: Bad magic number\n"); + return -1; + } + pp += PPTREE_MAGIC_FIXEDSIZE + 2; + tree->sTable.fullSize = charp2UInt(pp, UPPER_COUNT_SIZE); + pp += UPPER_COUNT_SIZE; + tree->sTable.size = charp2UInt(pp, UPPER_COUNT_SIZE); + pp += UPPER_COUNT_SIZE; + tree->sTable.indexSize = charp2UInt(pp++, 1); + tree->sTable.table = pp; + pp += tree->sTable.fullSize; + + tree->bTable.fullSize = charp2UInt(pp, UPPER_COUNT_SIZE); + pp += UPPER_COUNT_SIZE; + tree->bTable.size = charp2UInt(pp, UPPER_COUNT_SIZE); + pp += UPPER_COUNT_SIZE; + tree->bTable.indexSize = charp2UInt(pp++, 1); + tree->bTable.table = pp; + pp += tree->bTable.fullSize; + + tree->nodes.childCountSize = charp2UInt(pp++, 1); + tree->nodes.offsetSize = charp2UInt(pp++, 1); + tree->nodes.root = pp; + return 0; +} + +int pptree_set_data(struct PPTree *tree, const unsigned char *data) +{ + tree->data = data; + tree->allocated = 0; + return pptree_set_header(tree); +} + +void pptree_free(struct PPTree *tree) +{ + if (tree->allocated && tree->data) { + kfree((void *)tree->data); + tree->data = 0; + tree->allocated = 0; + } +} + +/* Gets a string (either a component of a search key or data associated + * with an item) given index + */ +static const unsigned char *pptree_string(const struct PPTree *tree, int i) +{ + const unsigned char *sTable = tree->sTable.table; + int index, bcs = tree->sTable.indexSize; + + if (i < 0 || i >= tree->sTable.size) { + pr_warn("Ptree: bad string index: %d (max %d)\n", i, + tree->sTable.size); + return 0; + } + index = charp2UInt(sTable + i * bcs, bcs); + return sTable + (1 + tree->sTable.size) * bcs + index; +} + +/* Gets a bytearray given index */ +static const unsigned char *pptree_bytearray(const struct PPTree *tree, int i, + int *length) +{ + const unsigned char *bTable = tree->bTable.table; + int index, indexNext, bcs = tree->bTable.indexSize; + + if (i < 0 || i >= tree->bTable.size) { + pr_warn("Ptree: Bad bytearray index: %d (max %d)\n", i, + tree->bTable.size); + if (length) + *length = 0; + return ""; + } + index = charp2UInt(bTable + i * bcs, bcs); + if (length) { + indexNext = charp2UInt(bTable + (i + 1) * bcs, bcs); + *length = indexNext - index; + } + return bTable + (1 + tree->bTable.size) * bcs + index; +} + +/* Given a pointer to the start of a tree node, load important values + * and advance pointer to the start of item index. + */ +static void load_node_prologue(const struct PPTree *tree, + const unsigned char **p, + unsigned int *itemSize, + unsigned int *dataTypes, + unsigned int *childCount) +{ + /* is the |-ing of data masks of all items in this node, + * thus all possible data types associated to items. + * By extension, it determines , which is the size in bytes + * of all items in this node. + */ + int dtTypes = charp2UInt((*p)++, 1), + itSize = tree->sTable.indexSize + tree->nodes.offsetSize; + if (dtTypes && itSize) { + ++itSize; + if (dtTypes & PTREE_DATA_BYTES) + itSize += tree->bTable.indexSize; + if (dtTypes & PTREE_DATA_STRING) + itSize += tree->sTable.indexSize; + if (dtTypes & PTREE_DATA_INT1) + itSize++; + if (dtTypes & PTREE_DATA_INT2) + itSize += 2; + if (dtTypes & PTREE_DATA_INT4) + itSize += 4; + if (dtTypes & PTREE_DATA_PATH) + itSize += tree->nodes.offsetSize; + } + if (childCount) + *childCount = charp2UInt(*p, tree->nodes.childCountSize); + *p += tree->nodes.childCountSize; + if (dataTypes) + *dataTypes = dtTypes; + if (itemSize) + *itemSize = itSize; +} + +/* Calculate offset from root node. It depends on a previous search, if any */ +static int pptree_get_offset(const struct PPTree *tree, + struct PPTreeContext *ctx) +{ + return ctx ? + /* Continue from the result of a previous search? */ + (ctx->types & PTREE_FIND_CONTINUE) && ctx->last ? + ctx->last - tree->nodes.root : + /* Continue from subpath of a previous search, if any? */ + ctx->types & PTREE_DATA_PATH ? + ctx->value.childPath : + 0 : + 0; /* No context, use root itself */ +} + +/* Load item-related data.

should be pointing to data type byte */ +static const unsigned char *pptree_get_itemData(const struct PPTree *tree, + const unsigned char *p, + int dataTypes, + struct PPTreeContext *ctx) +{ + memset(&ctx->value, 0, sizeof(ctx->value)); + ctx->types = (ctx->types & ~PTREE_DATA_MASK) | charp2UInt(p++, 1); + if (dataTypes & PTREE_DATA_BYTES) { + if (ctx->types & PTREE_DATA_BYTES) + ctx->value.bytearray.bytes = + pptree_bytearray(tree, + charp2UInt(p, + tree->bTable.indexSize), + &ctx->value.bytearray.length); + p += tree->bTable.indexSize; + } + if (dataTypes & PTREE_DATA_STRING) { + if (ctx->types & PTREE_DATA_STRING) + ctx->value.string = + (const char *) + pptree_string(tree, + charp2UInt(p, + tree->sTable.indexSize)); + p += tree->sTable.indexSize; + } + if (dataTypes & PTREE_DATA_BITA) + ctx->value.bits = (ctx->types & PTREE_DATA_BITA) && + (ctx->types & PTREE_DATA_BITA_MASK) + ? 1 : 0; + if (dataTypes & PTREE_DATA_INT1) { + if (ctx->types & PTREE_DATA_INT1) + ctx->value.int1 = charp2UInt(p, 1); + ++p; + } + if (dataTypes & PTREE_DATA_INT2) { + if (ctx->types & PTREE_DATA_INT2) + ctx->value.int2 = charp2UInt(p, 2); + p += 2; + } + if (dataTypes & PTREE_DATA_INT4) { + if (ctx->types & PTREE_DATA_INT4) + ctx->value.int4 = charp2UInt(p, 4); + p += 4; + } + if (dataTypes & PTREE_DATA_PATH) { + if (ctx->types & PTREE_DATA_PATH) + ctx->value.childPath = + charp2UInt(p, tree->nodes.offsetSize); + p += tree->nodes.offsetSize; + } + return p; +} + +int pptree_find(const struct PPTree *tree, const char **path, int pathLen, + struct PPTreeContext *ctx) +{ + int depth; + unsigned int dataTypes = 0; + const unsigned char *pFound = 0, + *p = tree->nodes.root + pptree_get_offset(tree, ctx); + + if (ctx->types & PTREE_FIND_PEEKED) { + /* If a previous call used PTREE_FIND_PEEK ignore , + * only advance context's offset + */ + if (ctx->lastPeeked) { + ctx->last = ctx->lastPeeked; + ctx->lastPeeked = 0; + return 1; + } + ctx->types &= ~(PTREE_FIND_PEEK | PTREE_FIND_PEEKED); + } + if (pathLen < 1) + return 0; + for (depth = 0; depth < pathLen; ++depth) { + const char *s; + int rCmp, sIndex, i; + unsigned int itemSize, childCount; + + load_node_prologue(tree, &p, &itemSize, &dataTypes, &childCount); + rCmp = -1; + if (childCount < 5) { /* linear ordered search */ + for (i = 0; i < childCount; ++i) { + sIndex = charp2UInt(p + i * itemSize, + tree->sTable.indexSize); + rCmp = strncmp(path[depth], + (const char *) + pptree_string(tree, sIndex), + PTREE_FINDPATH_MAX); + if (!rCmp) + break; + if (rCmp < 0) + return 0; + } + if (i == childCount) + return 0; + } else { /* binary search */ + int l = 0, r = childCount - 1; + + while (l <= r) { + i = l + (r - l) / 2; + sIndex = charp2UInt(p + i * itemSize, + tree->sTable.indexSize); + s = (const char *)pptree_string(tree, sIndex); + rCmp = strncmp(path[depth], s, + PTREE_FINDPATH_MAX); + if (rCmp < 0) + r = i - 1; + else + if (rCmp) + l = i + 1; + else + break; + } + if (rCmp) + return 0; + } + pFound = p + i * itemSize + tree->sTable.indexSize; + p = tree->nodes.root + charp2UInt(pFound, + tree->nodes.offsetSize); + } + if (ctx) { + if (ctx->types & PTREE_FIND_PEEK) + /* Don't advance context, just store it here */ + ctx->lastPeeked = p; + else { + ctx->last = p; + ctx->lastPeeked = 0; + } + if (dataTypes) + pptree_get_itemData(tree, + pFound + tree->nodes.offsetSize, + dataTypes, ctx); + else + /* Clear all bits for associated data */ + ctx->types &= ~PTREE_DATA_MASK; + } + return 1; +} + +int pptree_find_path(const struct PPTree *tree, const char *path, char delim, + struct PPTreeContext *ctx) +{ + int i, itemCount, findRes, flags = ctx->types; + char *ppath, *p, **pathItems; + const char *q, *pathItems1[1]; + + if (!path) + return 0; + /* Convenience: split in components, invoke pptree_find */ + if (ctx->types & PTREE_FIND_PEEKED) { + /* No path array to fill, just use last result */ + pathItems = 0; + itemCount = 0; + } else { + if (!delim) { + /* Special case, consider the whole string as + * a single component + */ + pathItems = (char **)pathItems1; + pathItems1[0] = path; + itemCount = 1; + } else { + ppath = kstrndup(path, PTREE_FINDPATH_MAX, GFP_KERNEL); + if (!ppath) + return 0; + for (itemCount = *path ? 1 : 0, q = path; *q; ++q) + if (*q == delim) + ++itemCount; + pathItems = kmalloc((itemCount ? itemCount : 1) * + sizeof(const char *), + GFP_KERNEL); + if (!pathItems) { + kfree((void *)ppath); + return 0; + } + *pathItems = ppath; + for (i = 1, p = ppath; *p; ++p) + if (*p == delim) { + *p = 0; + if (i < itemCount) + pathItems[i++] = p + 1; + } + } + } + findRes = pptree_find(tree, (const char **)pathItems, itemCount, ctx); + if (!(flags & PTREE_FIND_PEEKED) && delim) { + kfree((void *) pathItems); + kfree((void *) ppath); + } + return findRes; +} + +int pptree_child_count(const struct PPTree *tree, + struct PPTreeContext *ctx) +{ + const unsigned char *p = tree->nodes.root + + pptree_get_offset(tree, ctx); + unsigned int childCount; + + load_node_prologue(tree, &p, 0, 0, &childCount); + return childCount; +} + +int pptree_iterate_children(const struct PPTree *tree, + struct PPTreeContext *ctx, + int (*f)(const struct PPTree *tree, + const char *name, + const struct PPTreeContext *itemData, + void *data), + void *data) +{ + const unsigned char *p; + unsigned int i, childCount, itemSize, dataTypes, sIndex; + int ret; + + if (!f) + return 0; + p = tree->nodes.root + pptree_get_offset(tree, ctx); + load_node_prologue(tree, &p, &itemSize, &dataTypes, &childCount); + for (ret = i = 0; i < childCount; ++i) { + struct PPTreeContext itemData; + + sIndex = charp2UInt(p, tree->sTable.indexSize); + if (dataTypes) + pptree_get_itemData(tree, + p + tree->nodes.offsetSize + + tree->nodes.offsetSize, + dataTypes, &itemData); + else + itemData.types = 0; + ret = (*f)(tree, (const char *)pptree_string(tree, sIndex), + &itemData, data); + if (ret < 0) + return ret; + p += itemSize; + } + return ret; +} + +/* Recursively traverses all children in subpath of a node given + * by +, invoking on all paths ending on a leaf. + * Returns last result of . Stops prematurely if returns nonzero. + */ +static int pptree_iterate_subpaths(const struct PPTree *tree, + int offset, int pathDepth, + int (*f)(const struct PPTree *tree, + const char **path, + int pathLen, void *data), + const char **path, int maxDepth, + void *data) +{ + const unsigned char *p = tree->nodes.root + offset; + unsigned int i, childCount, itemSize, dataTypes; + + load_node_prologue(tree, &p, &itemSize, &dataTypes, &childCount); + for (i = 0; i < childCount; ++i) { + const unsigned char *pp = p + i * itemSize; + int sIndex, childIndex, j; + + sIndex = charp2UInt(pp, tree->sTable.indexSize); + pp += tree->sTable.indexSize; + childIndex = charp2UInt(pp, tree->nodes.offsetSize); + pp += tree->nodes.offsetSize; + path[pathDepth] = (const char *)pptree_string(tree, sIndex); + if (childIndex) { + if (pathDepth < maxDepth) { + j = pptree_iterate_subpaths(tree, childIndex, + pathDepth + 1, f, + path, maxDepth, + data); + if (j) + return j; + } + } else + if (f) { + int j = (*f)(tree, path, pathDepth + 1, data); + + if (j) + return j; + } + } + return 0; +} + +int pptree_iterate_paths(const struct PPTree *tree, + struct PPTreeContext *ctx, + int (*f)(const struct PPTree *tree, + const char **path, + int pathLen, void *data), + const char **path, int maxPathLen, + void *data) +{ + return pptree_iterate_subpaths(tree, pptree_get_offset(tree, ctx), + 0, f, path, maxPathLen, data); +} + diff --git a/security/samsung/defex_lsm/include/defex_config.h b/security/samsung/defex_lsm/include/defex_config.h index 6731b9b2c4ed..54df331bf360 100644 --- a/security/samsung/defex_lsm/include/defex_config.h +++ b/security/samsung/defex_lsm/include/defex_config.h @@ -20,12 +20,24 @@ #define GLOBAL_PED_STATUS FEATURE_CHECK_CREDS #endif +#ifdef DEFEX_PERMISSIVE_INT +#define GLOBAL_INTEGRITY_STATUS (FEATURE_INTEGRITY | FEATURE_INTEGRITY_SOFT) +#else +#define GLOBAL_INTEGRITY_STATUS FEATURE_INTEGRITY +#endif + #ifdef DEFEX_PERMISSIVE_SP #define GLOBAL_SAFEPLACE_STATUS (FEATURE_SAFEPLACE | FEATURE_SAFEPLACE_SOFT) #else #define GLOBAL_SAFEPLACE_STATUS FEATURE_SAFEPLACE #endif +#ifdef DEFEX_PERMISSIVE_TM +#define GLOBAL_TRUSTED_MAP_STATUS (FEATURE_TRUSTED_MAP | FEATURE_TRUSTED_MAP_SOFT) +#else +#define GLOBAL_TRUSTED_MAP_STATUS FEATURE_TRUSTED_MAP +#endif + #ifdef DEFEX_PERMISSIVE_IM #define GLOBAL_IMMUTABLE_STATUS (FEATURE_IMMUTABLE | FEATURE_IMMUTABLE_SOFT) #else @@ -35,9 +47,9 @@ /* Uncomment for Kernels, that require it */ #define STRICT_UID_TYPE_CHECKS 1 -#if defined(DEFEX_PED_ENABLE) || defined(DEFEX_SAFEPLACE_ENABLE) || defined(DEFEX_IMMUTABLE_ENABLE) +#if defined(DEFEX_PED_ENABLE) || defined(DEFEX_SAFEPLACE_ENABLE) || defined(DEFEX_TRUSTED_MAP_ENABLE) || defined(DEFEX_IMMUTABLE_ENABLE) #define DEFEX_FEATURE_ENABLE -#endif /* DEFEX_PED_ENABLE || DEFEX_SAFEPLACE_ENABLE || DEFEX_IMMUTABLE_ENABLE */ +#endif /* DEFEX_PED_ENABLE || DEFEX_SAFEPLACE_ENABLE || DEFEX_TRUSTED_MAP_ENABLE || DEFEX_IMMUTABLE_ENABLE */ int defex_get_features(void); diff --git a/security/samsung/defex_lsm/include/defex_debug.h b/security/samsung/defex_lsm/include/defex_debug.h index 2c89f7d85e36..45fbca1779c2 100644 --- a/security/samsung/defex_lsm/include/defex_debug.h +++ b/security/samsung/defex_lsm/include/defex_debug.h @@ -16,6 +16,7 @@ #define DBG_SET_PE_STATUS 3 #define DBG_SET_IM_STATUS 4 #define DBG_SET_SP_STATUS 5 +#define DBG_SET_INT_STATUS 6 #define MAX_DATA_LEN 300 diff --git a/security/samsung/defex_lsm/include/defex_internal.h b/security/samsung/defex_lsm/include/defex_internal.h index 9d0a5be410c3..eba544267e12 100644 --- a/security/samsung/defex_lsm/include/defex_internal.h +++ b/security/samsung/defex_lsm/include/defex_internal.h @@ -42,6 +42,10 @@ #define FEATURE_SAFEPLACE_SOFT (1 << 9) #define FEATURE_FIVE (1 << 10) /* reserved for future use */ #define FEATURE_FIVE_SOFT (1 << 11) /* reserved for future use */ +#define FEATURE_TRUSTED_MAP (1 << 12) +#define FEATURE_TRUSTED_MAP_SOFT (1 << 13) +#define FEATURE_INTEGRITY (1 << 14) +#define FEATURE_INTEGRITY_SOFT (1 << 15) #define FEATURE_CLEAR_ALL (0xFF0000) @@ -109,6 +113,12 @@ int set_task_creds(struct task_struct *p, unsigned int uid, unsigned int fsuid, void set_task_creds_tcnt(struct task_struct *p, int addition); int is_task_creds_ready(void); +/* -------------------------------------------------------------------------- */ +/* Integrity feature */ +/* -------------------------------------------------------------------------- */ + +extern unsigned char global_integrity_status; + /* -------------------------------------------------------------------------- */ /* SafePlace feature */ /* -------------------------------------------------------------------------- */ @@ -121,6 +131,28 @@ extern unsigned char global_safeplace_status; extern unsigned char global_immutable_status; +/* -------------------------------------------------------------------------- */ +/* Trusted Map feature */ +/* -------------------------------------------------------------------------- */ + +extern unsigned char global_trusted_map_status; + +enum trusted_map_status { + DEFEX_TM_ENFORCING_MODE = (1 << 0), + DEFEX_TM_PERMISSIVE_MODE = (1 << 1), + DEFEX_TM_DEBUG_VIOLATIONS = (1 << 2), + DEFEX_TM_DEBUG_CALLS = (1 << 3), + DEFEX_TM_LAST_STATUS = (1 << 4) - 1 +}; + +static inline int defex_tm_mode_enabled(int mode_flag) +{ + return global_trusted_map_status & mode_flag; +} + +struct defex_context; +int defex_trusted_map_lookup(struct defex_context *dc, int argc, void *argv); + /* -------------------------------------------------------------------------- */ /* Common Helper API */ /* -------------------------------------------------------------------------- */ @@ -184,6 +216,7 @@ int __init do_load_rules(void); int immutable_status_store(const char *status_str); int privesc_status_store(const char *status_str); int safeplace_status_store(const char *status_str); +int integrity_status_store(const char *status_str); extern bool boot_state_recovery __ro_after_init; #ifdef DEFEX_DEPENDING_ON_OEMUNLOCK diff --git a/security/samsung/defex_lsm/include/defex_rules.h b/security/samsung/defex_lsm/include/defex_rules.h index d1d2683a4696..64a8fb272c48 100644 --- a/security/samsung/defex_lsm/include/defex_rules.h +++ b/security/samsung/defex_lsm/include/defex_rules.h @@ -9,6 +9,10 @@ #ifndef __DEFEX_RULES_H #define __DEFEX_RULES_H +#ifdef DEFEX_TRUSTED_MAP_ENABLE +#include "ptree.h" +#endif + #define STATIC_RULES_MAX_STR 32 #define INTEGRITY_LENGTH 32 #define FEATURE_NAME_MAX_STR 32 @@ -28,7 +32,9 @@ enum feature_types { feature_immutable_path_write = 256, feature_immutable_src_exception = 512, feature_immutable_status = 1024, - feature_umhbin_path = 2048 + feature_umhbin_path = 2048, + feature_trusted_map_status = 4096, + feature_integrity_check = 8192 }; struct feature_match_entry { @@ -59,4 +65,9 @@ struct rule_item_struct { int check_rules_ready(void); +#ifdef DEFEX_TRUSTED_MAP_ENABLE +/* "Header" for DTM's dynamically loaded policy */ +extern struct PPTree dtm_tree; +#endif + #endif /* __DEFEX_RULES_H */ diff --git a/security/samsung/defex_lsm/include/defex_tailer.h b/security/samsung/defex_lsm/include/defex_tailer.h new file mode 100644 index 000000000000..9478491e28e8 --- /dev/null +++ b/security/samsung/defex_lsm/include/defex_tailer.h @@ -0,0 +1,57 @@ +#ifndef __INCLUDE_TAILER_H__ +#define __INCLUDE_TAILER_H__ + +/* Functions for managing a "tailer file", which is similar to a tar + * archive but, in order to support ordinary unadorned files + * - stores metainformation after each archived file + * - stores a single magic number at the very end. + */ + +/* Magic number, occurs only once at the end of tailer file. Should it + * change, be sure to update TAIL_MAGIC_LEN + */ +#define TAIL_MAGIC "#TAIL_GUARD#" +#define TAIL_MAGIC_LEN 12 +/* Maximum length of title associated with a stored file. Arbitrary, but + * should it increase, metainfo size (currently 1 byte) must change + * accordingly + */ +#define TAIL_MAX_TITLE_LENGTH 255 + +/* Each file's metainfo entry comprises (version 1,0): + * - offset where actual contents start: 4-byte big-endian + * - size of contents in bytes: 4-byte big-endian + * - title, up to TAIL_MAX_TITLE_LENGTH bytes, non 0-terminated + * - title length, 1 byte + * - major/minor version number, 1 byte each + * + * A tailer file is either an unadorned one or a linked list of content+ + * metainfo entries which goes backwards from the magic number. + */ + +/* Functions for handling tailer data as memory buffers */ + +/* If a memory buffer

with given ends with the tailer magic + * suffix, returns its offset, else returns -1 + */ +extern long defex_tailerp_has_suffix(const unsigned char *p, long size); +/* Given buffer

of given, returns address of last occurrence + * of contents of given (and if <sizep> sets *<sizep> to + * contents size), or 0 if <p> is unadorned has no such + * occurrences + */ +extern const unsigned char *defex_tailerp_find(const unsigned char *p, long size, + const char *title, long *sizep); +/* Given buffer <p> with <size> bytes, returns 0 if unadorned. Else, + * executes <task> for each entry, from last to first, passing arguments + * <title> (unterminated), title length, absolute <start> address and + * <size> in bytes, plus <data>. Terminates immediately if <task> returns + * negative. Returns last result of <task>. + */ +extern int defex_tailerp_iterate(const unsigned char *p, long size, + int (*task)(const char *title, int titleLen, + const unsigned char *start, + long size, void *data), + void *data); + +#endif diff --git a/security/samsung/defex_lsm/include/ptree.h b/security/samsung/defex_lsm/include/ptree.h new file mode 100644 index 000000000000..cfc3610e1c12 --- /dev/null +++ b/security/samsung/defex_lsm/include/ptree.h @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2021-2022 Samsung Electronics Co., Ltd. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#ifndef __PTREE_H__ + +/* A "P-tree" is a n-ary search tree whose keys are strings. A p-tree + * node may be associated with data like a string, a byte array or + * an entry point for an additional search (unrelated to the node's + * children). + * + * P-trees are represented in two variants: + * - a "build" data structure with traditional structs and pointers, used for + * building, converting, merging or exporting + * - a "portable" format packed with integer indices instead of pointers, + * stored as a byte array and used (read-only) by target applications. + * In both, all strings are stored at a dictionary and represented in nodes + * by indices, therefore child data have constant size; in addition, child + * lists are sorted, allowing efficient search. + */ + +/* Masks for data types. Should any be created, PTREE_DATA_MASK must + * be updated accordingly + */ +#define PTREE_DATA_BYTES 1 /* byte array */ +#define PTREE_DATA_STRING 2 /* zero-terminated string */ +#define PTREE_DATA_PATH 4 /* subpath */ +#define PTREE_DATA_INT1 8 /* byte */ +#define PTREE_DATA_INT2 16 /* 2-byte short */ +#define PTREE_DATA_INT4 32 /* 4-byte int */ +#define PTREE_DATA_BITA 64 /* bit */ +/* Bit A is stored directly in the bit below */ +#define PTREE_DATA_BITA_MASK 128 + +#define PTREE_DATA_MASK 255 /* ORs all PTREE_DATA masks */ + +/* Modifiers for search behavior */ +#define PTREE_FIND_CONTINUE 256 /* pptree_find should continue from + * previous result, if any + */ +#define PTREE_FIND_PEEK 512 /* a successful search does not advance + * the context, therefore the next search + * will start from the same point + */ +#define PTREE_FIND_PEEKED 1024 /* go to where a previous search would have + * gone had it not used PTREE_FIND_PEEKED + */ + +/* *************************************************************************** + * Declarations needed for _using_ exported P-trees + * ***************************************************************************/ + +/* Header for portable P-tree. This is not the binary format, rather after + * loading it caches the tree's details. + */ +struct PPTree { + const unsigned char *data; + struct { + int fullSize; + int size; + char indexSize; + const unsigned char *table; + } sTable; + struct { + int fullSize; + int size; + char indexSize; + const unsigned char *table; + } bTable; + struct { + char offsetSize; + char childCountSize; + const unsigned char *root; + } nodes; + char allocated; +}; + +/* Magic number (fixed portion, plus two version bytes) */ +#define PPTREE_MAGIC "PPTree-\1\0" +#define PPTREE_MAGIC_FIXEDSIZE 7 + +/* Sets a byte array as a p-tree's data. Returns 0 if successful. */ +extern int pptree_set_data(struct PPTree *tree, const unsigned char *data); +/* Releases pptree data, if needed */ +extern void pptree_free(struct PPTree *tree); + +/* Context data for portable p-tree operations, especially searching. + * Used for both input (key data) and output (result's place and data) + * Search starts from root, or, + * if .types is PPTREE_DATA_PATH and there's a subpath, from .value.childPath + * if .types is PPTREE_FIND_CONTINUE, from latest sucessful search + * If .types contains PTREE_DATA_PEEK, context does not advance even if + * search is successful. It will advance (and no search will be done) if next + * search include PTREE_DATA_PEEKED. + */ +struct PPTreeContext { + int types; + struct { + struct { + const unsigned char *bytes; + int length; + } bytearray; + const char *string; + int childPath; + int int1; + int int2; + int int4; + unsigned char bits; + } value; + const unsigned char *last, *lastPeeked; + unsigned int childCount; +}; +/* Search for given path. Return 0 if not found, 1 otherwise + * (and fills in *ctx). + * See PPTreeContext for where the search starts. + */ +extern int pptree_find(const struct PPTree *tree, + const char **path, int pathLen, + struct PPTreeContext *ctx); +/* Maximum key length, mostly an arbitrary limit against DoS */ +#define PTREE_FINDPATH_MAX 8000 +/* Search for a given path. + * Similar to pptree_find, but splits <path> at every occurrence of <delim> + * (unless delim is 0). In kernelspace, returns 0 if <path> length exceeds + * PTREE_FINDPATH_MAX. + */ +extern int pptree_find_path(const struct PPTree *tree, const char *path, + char delim, struct PPTreeContext *ctx); +/* Returns number of children. + * See PPTreeContext for which is the parent node. + */ +extern int pptree_child_count(const struct PPTree *tree, + struct PPTreeContext *ctx); +/* Iterates on immediate children. + * See PPTreeContext for iteration root. + * Executes <f> for all children until <f> returns nonzero. Returns + * last return of <f>. + */ +extern int pptree_iterate_children(const struct PPTree *tree, + struct PPTreeContext *ctx, + int (*f)(const struct PPTree *tree, + const char *name, + const struct PPTreeContext *itemData, + void *data), + void *data); + +/* Iterate on subpaths. + * See PPTreeContext for iteration root. + * Executes <f> for all subpaths ending on a leaf, stopping if <f> + * returns nonzero. + * Returns last result of <f> + */ +extern int pptree_iterate_paths(const struct PPTree *tree, + struct PPTreeContext *ctx, + int (*f)(const struct PPTree *tree, + const char **path, + int pathLen, void *data), + const char **path, int maxDepth, + void *data); +/* Dumps to stdout in human-readable format */ +extern void pptree_dump(const struct PPTree *tree); + +/* Maximum number of bytes for counters (practical reasonable limit) */ +#define UPPER_COUNT_SIZE 4 + +#define __PTREE_H__ +#endif diff --git a/security/samsung/defex_lsm/pack_rules.c b/security/samsung/defex_lsm/pack_rules.c index 13e51439e535..ab59686b26ed 100644 --- a/security/samsung/defex_lsm/pack_rules.c +++ b/security/samsung/defex_lsm/pack_rules.c @@ -21,6 +21,7 @@ const struct feature_match_entry feature_match[] = { {"feature_immutable_path_write", feature_immutable_path_write}, {"feature_immutable_src_exception", feature_immutable_src_exception}, {"feature_umhbin_path", feature_umhbin_path}, + {"feature_integrity_check", feature_integrity_check}, }; const int feature_match_size = sizeof(feature_match) / sizeof(feature_match[0]); diff --git a/security/samsung/dsms/test/security_dsms_access_control_test.c b/security/samsung/dsms/test/security_dsms_access_control_test.c deleted file mode 100644 index b34e85fdb382..000000000000 --- a/security/samsung/dsms/test/security_dsms_access_control_test.c +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - */ - -#include <kunit/mock.h> -#include <kunit/test.h> -#include <linux/dsms.h> -#include "dsms_access_control.h" -#include "dsms_test.h" - -/* -------------------------------------------------------------------------- */ -/* Module test functions */ -/* -------------------------------------------------------------------------- */ - -static void find_policy_entry_test(struct test *test) -{ - EXPECT_EQ(test, NULL, find_policy_entry("test")); -} - -static void compare_policy_entries_test(struct test *test) -{ - struct dsms_policy_entry entry; - - entry.file_path = "/path/test"; - entry.function_name = "myfunction"; - EXPECT_GT(test, compare_policy_entries("myfunction1", &entry), 0); - EXPECT_EQ(test, compare_policy_entries("myfunction", &entry), 0); - EXPECT_LT(test, compare_policy_entries("myfunct", &entry), 0); - entry.function_name = "myfunction1"; - EXPECT_EQ(test, compare_policy_entries("myfunction1", &entry), 0); - EXPECT_LT(test, compare_policy_entries("Myfunction", &entry), 0); -} - -static void should_ignore_allowlist_suffix_test(struct test *test) -{ - EXPECT_EQ(test, 1, should_ignore_allowlist_suffix()); -} - -static void dsms_policy_size_test(struct test *test) -{ - EXPECT_EQ(test, 3, dsms_policy_size()); - EXPECT_LT(test, 0, dsms_policy_size()); - EXPECT_GT(test, 4, dsms_policy_size()); -} - -static void dsms_verify_access_test(struct test *test) -{ - EXPECT_EQ(test, DSMS_DENY, dsms_verify_access(NULL)); -} - -/* - * dsms_verify_access_address_not_in_kallsyms_test - caller address not in - * kallsyms test case - * @test - struct test pointer to the running test instance context. - * - * Test the case where the address passed to dsms_verify_access is not null and - * is not in the kallsyms. It is expected to return a DSMS_DENY. - */ -static void dsms_verify_access_address_not_in_kallsyms_test(struct test *test) -{ - EXPECT_EQ(test, DSMS_DENY, dsms_verify_access((const void *)0x1)); -} - -/* -------------------------------------------------------------------------- */ -/* Module definition */ -/* -------------------------------------------------------------------------- */ - -static struct test_case dsms_access_control_test_cases[] = { - TEST_CASE(compare_policy_entries_test), - TEST_CASE(should_ignore_allowlist_suffix_test), - TEST_CASE(dsms_policy_size_test), - TEST_CASE(dsms_verify_access_test), - TEST_CASE(find_policy_entry_test), - TEST_CASE(dsms_verify_access_address_not_in_kallsyms_test), - {}, -}; - -static struct test_module dsms_access_control_test_module = { - .name = "security-dsms-access-control-test", - .test_cases = dsms_access_control_test_cases, -}; -module_test(dsms_access_control_test_module); diff --git a/security/samsung/dsms/test/security_dsms_debug_test.c b/security/samsung/dsms/test/security_dsms_debug_test.c deleted file mode 100644 index a7afbfd2388b..000000000000 --- a/security/samsung/dsms/test/security_dsms_debug_test.c +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - */ - -#include <kunit/test.h> -#include <linux/dsms.h> -#include "dsms_kernel_api.h" -#include "dsms_test.h" - -/* -------------------------------------------------------------------------- */ -/* Module test functions */ -/* -------------------------------------------------------------------------- */ - -static void security_dsms_debug_test(struct test *test) -{ - DSMS_LOG_INFO("DSMS Debug unit test %x\n", 0xdeadbeef); -} - -/* -------------------------------------------------------------------------- */ -/* Module definition */ -/* -------------------------------------------------------------------------- */ - -static struct test_case security_dsms_debug_test_cases[] = { - TEST_CASE(security_dsms_debug_test), - {}, -}; - -static struct test_module security_dsms_debug_test_module = { - .name = "security-dsms-debug-test", - .test_cases = security_dsms_debug_test_cases, -}; -module_test(security_dsms_debug_test_module); diff --git a/security/samsung/dsms/test/security_dsms_init_test.c b/security/samsung/dsms/test/security_dsms_init_test.c deleted file mode 100644 index b295d39baf1f..000000000000 --- a/security/samsung/dsms/test/security_dsms_init_test.c +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - */ - -#include <kunit/mock.h> -#include <kunit/test.h> -#include "dsms_init.h" -#include "dsms_test.h" - -/* -------------------------------------------------------------------------- */ -/* Module test functions */ -/* -------------------------------------------------------------------------- */ - -static void dsms_is_initialized_test(struct test *test) -{ - EXPECT_TRUE(test, dsms_is_initialized()); -} - -static void dsms_init_test(struct test *test) -{ - EXPECT_EQ(test, dsms_init_kunit_helper(), 0); -} - -/* -------------------------------------------------------------------------- */ -/* Module definition */ -/* -------------------------------------------------------------------------- */ - -static struct test_case dsms_init_test_cases[] = { - TEST_CASE(dsms_is_initialized_test), - TEST_CASE(dsms_init_test), - {}, -}; - -static struct test_module dsms_init_test_module = { - .name = "security-dsms-init-test", - .test_cases = dsms_init_test_cases, -}; -module_test(dsms_init_test_module); diff --git a/security/samsung/dsms/test/security_dsms_kernel_api_test.c b/security/samsung/dsms/test/security_dsms_kernel_api_test.c deleted file mode 100644 index 8c35298d8555..000000000000 --- a/security/samsung/dsms/test/security_dsms_kernel_api_test.c +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - */ - -#include <kunit/mock.h> -#include <kunit/test.h> -#include <linux/dsms.h> -#include <linux/slab.h> -#include <linux/types.h> -#include <linux/umh.h> -#include "dsms_kernel_api.h" -#include "dsms_test.h" -#include "security_dsms_test_utils.h" - -/* -------------------------------------------------------------------------- */ -/* Module test functions */ -/* -------------------------------------------------------------------------- */ - -static void dsms_alloc_user_test(struct test *test) -{ - static const char *const s[] = {NULL, "", "a", "Hello, world!\nBye bye world!"}; - static const struct { int64_t i; const char *s; } i_s[] = { - {0, "0"}, {1, "1"}, {12, "12"}, {123456789L, "123456789"}, {-123, "-123"}, - }; - int i; - const char *ss; - - for (i = 0; i < ARRAY_SIZE(s); ++i) { - ss = dsms_alloc_user_string(s[i]); - EXPECT_STREQ(test, s[i] == NULL ? "" : s[i], ss); - dsms_free_user_string(ss); - } - for (i = 0; i < ARRAY_SIZE(i_s); ++i) { - ss = dsms_alloc_user_value(i_s[i].i); - EXPECT_STREQ(test, i_s[i].s, ss); - dsms_free_user_string(ss); - } -} - -static void dsms_message_cleanup_test(struct test *test) -{ - static struct subprocess_info mock_subprocess_info; - char **argv; - int i; - static const int argv_size = 5; - - atomic_inc(&message_counter); - dsms_message_cleanup(NULL); - - argv = kmalloc_array(argv_size, sizeof(char *), GFP_KERNEL); - for (i = 0; i < argv_size; i++) - argv[i] = NULL; - mock_subprocess_info.argv = argv; - atomic_inc(&message_counter); - dsms_message_cleanup(&mock_subprocess_info); -} - -static void dsms_send_message_test(struct test *test) -{ - // should fail, not yet in policy. TODO success test - EXPECT_EQ(test, -1, dsms_send_message("KUNIT", "kunit test", 0)); -} - -/* - * dsms_send_allowed_message_test - deploy two tests on the - * dsms_send_allowed_message function - * @test - struct test pointer to the running test instance context. - * - * Test 1 - The error-free call to the function - * Test 2 - Trigger the limit error case - * Test 3 - Trigger the memory error cases - */ -static void dsms_send_allowed_message_test(struct test *test) -{ - // Test 1 - The error-free call to the function - EXPECT_NE(test, -EBUSY, dsms_send_allowed_message("KUNIT", "kunit test", 0)); - - // Test 2 - Trigger the limit error case - atomic_set(&message_counter, MESSAGE_COUNT_LIMIT); - EXPECT_EQ(test, -EBUSY, dsms_send_allowed_message("KUNIT", "kunit test", 0)); - atomic_set(&message_counter, 0); - - // Test 3 - Trigger the memory error cases - dsms_test_request_kmalloc_fail_at(1); - EXPECT_EQ(test, -ENOMEM, dsms_send_allowed_message("KUNIT", "kunit test", 0)); - dsms_test_request_kmalloc_fail_at(2); - EXPECT_EQ(test, -ENOMEM, dsms_send_allowed_message("KUNIT", "kunit test", 0)); - dsms_test_cancel_kmalloc_fail_requests(); -} - -/* -------------------------------------------------------------------------- */ -/* Module definition */ -/* -------------------------------------------------------------------------- */ - -static struct test_case security_dsms_kernel_api_test_cases[] = { - TEST_CASE(dsms_alloc_user_test), - TEST_CASE(dsms_message_cleanup_test), - TEST_CASE(dsms_send_message_test), - TEST_CASE(dsms_send_allowed_message_test), - {}, -}; - -static struct test_module security_dsms_kernel_api_module = { - .name = "security-dsms-kernel-api", - .test_cases = security_dsms_kernel_api_test_cases, -}; -module_test(security_dsms_kernel_api_module); diff --git a/security/samsung/dsms/test/security_dsms_policy_test.c b/security/samsung/dsms/test/security_dsms_policy_test.c deleted file mode 100644 index d70e66ac8e3f..000000000000 --- a/security/samsung/dsms/test/security_dsms_policy_test.c +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - */ - -#include <kunit/test.h> -#include <linux/dsms.h> -#include <linux/kallsyms.h> -#include <linux/string.h> -#include "dsms_access_control.h" -#include "dsms_test.h" - -/* -------------------------------------------------------------------------- */ -/* Module test functions */ -/* -------------------------------------------------------------------------- */ - -static void security_dsms_policy_test(struct test *test) -{ - size_t i; - - // Check whether policy entries are sorted by function_name - for (i = dsms_policy_size(); i > 1; --i) - EXPECT_TRUE(test, - strncmp(dsms_policy[i - 2].function_name, - dsms_policy[i - 1].function_name, - KSYM_NAME_LEN) <= 0); -} - -/* -------------------------------------------------------------------------- */ -/* Module definition */ -/* -------------------------------------------------------------------------- */ - -static struct test_case security_dsms_policy_test_cases[] = { - TEST_CASE(security_dsms_policy_test), - {}, -}; - -static struct test_module security_dsms_policy_test_module = { - .name = "security-dsms-policy-test", - .test_cases = security_dsms_policy_test_cases, -}; -module_test(security_dsms_policy_test_module); diff --git a/security/samsung/dsms/test/security_dsms_rate_limit_test.c b/security/samsung/dsms/test/security_dsms_rate_limit_test.c deleted file mode 100644 index 15fc612fbae9..000000000000 --- a/security/samsung/dsms/test/security_dsms_rate_limit_test.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - */ - -#include <kunit/mock.h> -#include <kunit/test.h> -#include <linux/dsms.h> -#include <linux/errno.h> -#include "dsms_rate_limit.h" -#include "dsms_test.h" - -static u64 start_ms; - -/* -------------------------------------------------------------------------- */ -/* Module test functions */ -/* -------------------------------------------------------------------------- */ - -static void round_end_ms_test(struct test *test) -{ - EXPECT_EQ(test, start_ms + ((u64)(1000L)), round_end_ms(start_ms)); - EXPECT_NE(test, start_ms + ((u64)(1001L)), round_end_ms(start_ms)); -} - -static void is_new_round_test(struct test *test) -{ - u64 now_ms = dsms_get_time_ms(); - - EXPECT_EQ(test, 0, is_new_round(now_ms, start_ms)); -} - -static void dsms_check_message_rate_limit_deny_test(struct test *test) -{ - int failed = 0, i; - - for (i = dsms_get_max_messages_per_round(); i >= 0; --i) - if (dsms_check_message_rate_limit() == DSMS_DENY) - failed = 1; - EXPECT_TRUE(test, failed); -} - -static void dsms_check_message_rate_limit_success_test(struct test *test) -{ - EXPECT_EQ(test, DSMS_SUCCESS, dsms_check_message_rate_limit()); -} - -/* Test boundary cases (simulate clock wrapped, too many messages) */ -static void dsms_check_message_rate_limit_boundary_test(struct test *test) -{ - int old_count; - - dsms_round_start_ms -= 10; - EXPECT_EQ(test, DSMS_SUCCESS, dsms_check_message_rate_limit()); - old_count = dsms_message_count; - dsms_round_start_ms = 0; - dsms_message_count = dsms_get_max_messages_per_round() + 1; - EXPECT_EQ(test, DSMS_SUCCESS, dsms_check_message_rate_limit()); - EXPECT_EQ(test, dsms_message_count, 0); - dsms_message_count = old_count; -} - -/** - * dsms_check_message_rate_limit_reset_test - * - * This test sets the "dsms_round_start_ms" variable to the maximum value - * of an unsigned 64 bit type (2^64 - 1). Such modification triggers the - * "[rate limit] RESET" case on "dsms_check_message_rate_limit" function. - * - * @param test - struct test pointer to the running test instance context. - */ -static void dsms_check_message_rate_limit_reset_test(struct test *test) -{ - dsms_round_start_ms = -1; - EXPECT_EQ(test, DSMS_SUCCESS, dsms_check_message_rate_limit()); -} - -/* -------------------------------------------------------------------------- */ -/* Module initialization and exit functions */ -/* -------------------------------------------------------------------------- */ - -static int dsms_rate_test_init(struct test *test) -{ - dsms_rate_limit_init(); - start_ms = dsms_get_time_ms(); - return 0; -} - -/* -------------------------------------------------------------------------- */ -/* Module definition */ -/* -------------------------------------------------------------------------- */ - -static struct test_case dsms_rate_test_cases[] = { - TEST_CASE(round_end_ms_test), - TEST_CASE(is_new_round_test), - TEST_CASE(dsms_check_message_rate_limit_deny_test), - TEST_CASE(dsms_check_message_rate_limit_success_test), - TEST_CASE(dsms_check_message_rate_limit_boundary_test), - TEST_CASE(dsms_check_message_rate_limit_reset_test), - {}, -}; - -static struct test_module dsms_rate_test_module = { - .name = "security-dsms-rate-limit-test", - .init = dsms_rate_test_init, - .test_cases = dsms_rate_test_cases, -}; -module_test(dsms_rate_test_module); diff --git a/security/samsung/dsms/test/security_dsms_test_utils.c b/security/samsung/dsms/test/security_dsms_test_utils.c deleted file mode 100644 index b847300252e6..000000000000 --- a/security/samsung/dsms/test/security_dsms_test_utils.c +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - */ - -#include <kunit/test.h> -#include <kunit/mock.h> -#include <linux/slab.h> -#include <linux/types.h> -#include "security_dsms_test_utils.h" - -/* test utils "sees" actual kmalloc */ -#undef kmalloc - -/* -------------------------------------------------------------------------- */ -/* General test functions: kmalloc mock function */ -/* -------------------------------------------------------------------------- */ - -/* each bit indicates if kmalloc mock should return fail (NULL) */ -static uint64_t dsms_test_kmalloc_fail_requests; - -void *dsms_test_kmalloc_mock(size_t size, gfp_t flags) -{ - bool fail; - - fail = dsms_test_kmalloc_fail_requests & 1ul; - dsms_test_kmalloc_fail_requests >>= 1; - return fail ? NULL : kmalloc(size, flags); -} - -/* Requests that kmalloc fails in the attempt given by argument (1 for next) */ -void dsms_test_request_kmalloc_fail_at(int attempt_no) -{ - if (attempt_no > 0) - dsms_test_kmalloc_fail_requests |= (1ul << (attempt_no-1)); -} - -/* Cancels all kmalloc fail requests */ -void dsms_test_cancel_kmalloc_fail_requests(void) -{ - dsms_test_kmalloc_fail_requests = 0; -} - -/* -------------------------------------------------------------------------- */ -/* Module test functions */ -/* -------------------------------------------------------------------------- */ - -static void dsms_test_kmalloc_mock_test(struct test *test) -{ - void *p; - - dsms_test_request_kmalloc_fail_at(1); - dsms_test_request_kmalloc_fail_at(3); - EXPECT_EQ(test, p = dsms_test_kmalloc_mock(1, GFP_KERNEL), NULL); - kfree(p); - EXPECT_NE(test, p = dsms_test_kmalloc_mock(1, GFP_KERNEL), NULL); - kfree(p); - EXPECT_EQ(test, p = dsms_test_kmalloc_mock(1, GFP_KERNEL), NULL); - kfree(p); - EXPECT_NE(test, p = dsms_test_kmalloc_mock(1, GFP_KERNEL), NULL); - kfree(p); -} - -/* -------------------------------------------------------------------------- */ -/* Module initialization and exit functions */ -/* -------------------------------------------------------------------------- */ - -static int security_dsms_test_utils_init(struct test *test) -{ - dsms_test_cancel_kmalloc_fail_requests(); - return 0; -} - -static void security_dsms_test_utils_exit(struct test *test) -{ - dsms_test_cancel_kmalloc_fail_requests(); -} - -/* -------------------------------------------------------------------------- */ -/* Module definition */ -/* -------------------------------------------------------------------------- */ - -static struct test_case security_dsms_test_utils_test_cases[] = { - TEST_CASE(dsms_test_kmalloc_mock_test), - {}, -}; - -static struct test_module security_dsms_test_utils_module = { - .name = "security-dsms-test-utils-test", - .init = security_dsms_test_utils_init, - .exit = security_dsms_test_utils_exit, - .test_cases = security_dsms_test_utils_test_cases, -}; -module_test(security_dsms_test_utils_module); diff --git a/security/samsung/dsms/test/security_dsms_test_utils.h b/security/samsung/dsms/test/security_dsms_test_utils.h deleted file mode 100644 index d085b7e8b122..000000000000 --- a/security/samsung/dsms/test/security_dsms_test_utils.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - */ - -#ifndef _SECURITY_DSMS_TEST_UTILS_H -#define _SECURITY_DSMS_TEST_UTILS_H - -#include "dsms_test.h" - -/* -------------------------------------------------------------------------- */ -/* General test functions: kmalloc mock function */ -/* -------------------------------------------------------------------------- */ - -/* Requests that kmalloc fails in the attempt given by argument (1 for next) */ -void dsms_test_request_kmalloc_fail_at(int attempt_no); - -/* Cancels all kmalloc fail requests */ -void dsms_test_cancel_kmalloc_fail_requests(void); - -#endif /* _SECURITY_DSMS_TEST_UTILS_H */ diff --git a/security/samsung/five/five_appraise.c b/security/samsung/five/five_appraise.c index 93aeab3d074a..5253c6f172a3 100644 --- a/security/samsung/five/five_appraise.c +++ b/security/samsung/five/five_appraise.c @@ -243,7 +243,8 @@ static bool bad_fs(struct inode *inode) { if (inode->i_sb->s_magic == EXT4_SUPER_MAGIC || inode->i_sb->s_magic == F2FS_SUPER_MAGIC || - inode->i_sb->s_magic == OVERLAYFS_SUPER_MAGIC) + inode->i_sb->s_magic == OVERLAYFS_SUPER_MAGIC || + inode->i_sb->s_magic == EROFS_SUPER_MAGIC_V1) return false; return true; diff --git a/security/samsung/five/five_main.c b/security/samsung/five/five_main.c index b6f068b51d21..2b8af0e1282b 100644 --- a/security/samsung/five/five_main.c +++ b/security/samsung/five/five_main.c @@ -30,6 +30,7 @@ #include <linux/debugfs.h> #include <linux/fs.h> #include <linux/shmem_fs.h> +#include <linux/version.h> #include "five.h" #include "five_audit.h" @@ -42,6 +43,12 @@ #include "five_dsms.h" #include "five_testing.h" +/* crash_dump in Android 12 uses this request even if Kernel doesn't + * support it */ +#ifndef PTRACE_PEEKMTETAGS +#define PTRACE_PEEKMTETAGS 33 +#endif + static const bool check_dex2oat_binary = true; static const bool check_memfd_file = true; @@ -706,7 +713,7 @@ int five_file_mmap(struct file *file, unsigned long prot) * * On success return 0. */ -int five_bprm_check(struct linux_binprm *bprm) +int __five_bprm_check(struct linux_binprm *bprm, int depth) { int rc = 0; struct task_struct *task = current; @@ -715,7 +722,7 @@ int five_bprm_check(struct linux_binprm *bprm) if (unlikely(task->ptrace)) return rc; - if (bprm->recursion_depth > 0) { + if (depth > 0) { rc = push_file_event_bunch(task, bprm->file, MMAP_CHECK); } else { struct task_integrity *tint = task_integrity_alloc(); @@ -733,6 +740,18 @@ int five_bprm_check(struct linux_binprm *bprm) return rc; } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) +int five_bprm_check(struct linux_binprm *bprm, int depth) +{ + return __five_bprm_check(bprm, depth); +} +#else +int five_bprm_check(struct linux_binprm *bprm) +{ + return __five_bprm_check(bprm, bprm->recursion_depth); +} +#endif + /** * This function handles two situations: * 1. Device had been rebooted before five_sign finished. @@ -953,6 +972,7 @@ int five_ptrace(struct task_struct *task, long request) case PTRACE_PEEKSIGINFO: case PTRACE_GETSIGMASK: case PTRACE_GETEVENTMSG: + case PTRACE_PEEKMTETAGS: #if defined(CONFIG_ARM64) || defined(KUNIT_UML) case COMPAT_PTRACE_GETREGS: case COMPAT_PTRACE_GET_THREAD_AREA: diff --git a/security/samsung/five/five_porting.h b/security/samsung/five/five_porting.h index 743807a44409..3aa89e190d7e 100644 --- a/security/samsung/five/five_porting.h +++ b/security/samsung/five/five_porting.h @@ -26,6 +26,11 @@ #define OVERLAYFS_SUPER_MAGIC 0x794c7630 #endif +/* EROFS_SUPER_MAGIC_V1 is defined since v5.4 */ +#ifndef EROFS_SUPER_MAGIC_V1 +#define EROFS_SUPER_MAGIC_V1 0xE0F5E1E2 +#endif + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 21) /* d_backing_inode is absent on some Linux Kernel 3.x. but it back porting for * few Samsung kernels: @@ -104,16 +109,29 @@ static inline ssize_t __vfs_getxattr(struct dentry *dentry, struct inode *inode, } #endif -#if defined(CONFIG_ANDROID) && LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0) +#if defined(CONFIG_ANDROID) && (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0) \ + || LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) /* * __vfs_getxattr was changed in Android Kernel v5.4 * https://android.googlesource.com/kernel/common/+/3484eba91d6b529cc606486a2db79513f3db6c67 + * and was reverted in Android Kernel v5.15 + * https://android.googlesource.com/kernel/common/+/e884438aa554219a6d0df3a18ff0b23ea678c36c */ #define XATTR_NOSECURITY 0x4 /* get value, do not involve security check */ #define __vfs_getxattr(dentry, inode, name, value, size, flags) \ __vfs_getxattr(dentry, inode, name, value, size) #endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0) +#define vfs_getxattr_alloc(dentry, name, xattr_value, size, flags) \ + vfs_getxattr_alloc(&init_user_ns, dentry, name, xattr_value, \ + size, flags) +#define __vfs_setxattr_noperm(dentry, name, value, size, flags) \ + __vfs_setxattr_noperm(&init_user_ns, dentry, name, value, size, flags) +#define __vfs_removexattr(dentry, name) \ + __vfs_removexattr(&init_user_ns, dentry, name) +#endif + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) /* __GFP_WAIT was changed to __GFP_RECLAIM in * https://lore.kernel.org/patchwork/patch/592262/ diff --git a/security/samsung/five/five_testing.h b/security/samsung/five/five_testing.h index abb9f0eb8913..d058cbce81ee 100644 --- a/security/samsung/five/five_testing.h +++ b/security/samsung/five/five_testing.h @@ -1,10 +1,7 @@ #ifndef __LINUX_FIVE_TESTING_H #define __LINUX_FIVE_TESTING_H -#ifndef FIVE_KUNIT_ENABLED -#define __mockable -#define __visible_for_testing static -#else // FIVE_KUNIT_ENABLED +#if defined(FIVE_KUNIT_ENABLED) || defined(PROCA_KUNIT_ENABLED) #define KUNIT_UML // this define should be used for adding UML-specific modifications #define __mockable __weak #define __visible_for_testing @@ -22,6 +19,9 @@ static inline int dsms_send_message(const char *feature_code, #define COMPAT_PTRACE_GETVFPREGS 27 #define COMPAT_PTRACE_GETHBPREGS 29 #endif -#endif // FIVE_KUNIT_ENABLED +#else +#define __mockable +#define __visible_for_testing static +#endif // FIVE_KUNIT_ENABLED || PROCA_KUNIT_ENABLED #endif // __LINUX_FIVE_TESTING_H diff --git a/security/samsung/five/gki/five_appraise.c b/security/samsung/five/gki/five_appraise.c index 370bf9f0479a..8f00e7d8492a 100644 --- a/security/samsung/five/gki/five_appraise.c +++ b/security/samsung/five/gki/five_appraise.c @@ -245,7 +245,8 @@ static bool bad_fs(struct inode *inode) { if (inode->i_sb->s_magic == EXT4_SUPER_MAGIC || inode->i_sb->s_magic == F2FS_SUPER_MAGIC || - inode->i_sb->s_magic == OVERLAYFS_SUPER_MAGIC) + inode->i_sb->s_magic == OVERLAYFS_SUPER_MAGIC || + inode->i_sb->s_magic == EROFS_SUPER_MAGIC_V1) return false; return true; @@ -440,7 +441,8 @@ int five_appraise_measurement(struct task_struct *task, int func, out: if (status == FIVE_FILE_FAIL || status == FIVE_FILE_UNKNOWN) { - task_integrity_set_reset_reason(TASK_INTEGRITY(task), cause, file); + task_integrity_set_reset_reason(TASK_INTEGRITY(task), + cause, file); five_audit_verbose(task, file, five_get_string_fn(func), prev_integrity, prev_integrity, tint_reset_cause_to_string(cause), rc); @@ -741,7 +743,7 @@ int five_fcntl_sign(struct file *file, struct integrity_label __user *label) } } else { enum task_integrity_value tint = - task_integrity_read(TASK_INTEGRITY(current)); + task_integrity_read(TASK_INTEGRITY(current)); five_audit_err(current, file, "fcntl_sign", tint, tint, "sign:no-perm", -EPERM); diff --git a/security/samsung/five/s_os/five_appraise.c b/security/samsung/five/s_os/five_appraise.c index 8b457cab9ccd..a731272f0fed 100644 --- a/security/samsung/five/s_os/five_appraise.c +++ b/security/samsung/five/s_os/five_appraise.c @@ -48,7 +48,8 @@ static bool bad_fs(struct inode *inode) { if (inode->i_sb->s_magic == EXT4_SUPER_MAGIC || inode->i_sb->s_magic == F2FS_SUPER_MAGIC || - inode->i_sb->s_magic == OVERLAYFS_SUPER_MAGIC) + inode->i_sb->s_magic == OVERLAYFS_SUPER_MAGIC || + inode->i_sb->s_magic == EROFS_SUPER_MAGIC_V1) return false; return true; diff --git a/security/samsung/mz/Kconfig b/security/samsung/mz/Kconfig new file mode 100644 index 000000000000..789f591d17e2 --- /dev/null +++ b/security/samsung/mz/Kconfig @@ -0,0 +1,26 @@ +# +# MZ configuration +# + +config MEMORY_ZEROISATION + bool "Support memory zeroisation" + default n + help + Support memory zeroisation feature which does zeroing of free pages. + The purpose of this is to zeroise specific memory + when target process exit. + +config MZ_PAGE_V2 + bool "Page functions version" + default n + help + MZ page management code have dependency on kernel. + This version need to be change when kernel page code is changed. + +config MZ_USE_QSEECOM + tristate "MZ Crypto TEE Driver" + help + Enable MZ crypto Trusted Execution Environment support + MZ use crypto function and it will be works in tee + if it's supported + This driver is interface for MZ NWd and MZ ta diff --git a/security/samsung/mz/Makefile b/security/samsung/mz/Makefile new file mode 100644 index 000000000000..6e9ddf6d91f1 --- /dev/null +++ b/security/samsung/mz/Makefile @@ -0,0 +1,21 @@ +$(info MZ kernel makefile main $$CONFIG_MEMORY_ZEROISATION is [${CONFIG_MEMORY_ZEROISATION}]) +obj-$(CONFIG_MEMORY_ZEROISATION) += mz_ioctl.o mz.o mz_crypto.o mz_mem.o mz_log.o +ccflags-$(CONFIG_MEMORY_ZEROISATION) += -Wno-error \ + -Wno-unused \ + -DMZ_TA + +ifneq (,$(filter $(CONFIG_SOC_S5E9925) $(CONFIG_SOC_S5E8825) $(CONFIG_MZ_PAGE_V2), y)) + obj-$(CONFIG_MEMORY_ZEROISATION) += mz_page_v5_10.o +else + obj-$(CONFIG_MEMORY_ZEROISATION) += mz_page.o +endif + +ifneq (,$(filter userdebug eng, $(TARGET_BUILD_VARIANT))) + ccflags-$(CONFIG_MEMORY_ZEROISATION) += -DMZ_DEBUG -DDEBUG +endif + +ifeq ($(CONFIG_SEC_KUNIT), y) + GCOV_PROFILE := y + ccflags-y += -DMEZ_KUNIT_ENABLED + obj-$(CONFIG_MEMORY_ZEROISATION) += test/ +endif diff --git a/security/samsung/mz/include/linux/mz.h b/security/samsung/mz/include/linux/mz.h new file mode 100644 index 000000000000..c613289a1a71 --- /dev/null +++ b/security/samsung/mz/include/linux/mz.h @@ -0,0 +1,31 @@ +#ifndef _LINUX_MZ_H +#define _LINUX_MZ_H + +typedef enum { + MZ_SUCCESS = 1, + MZ_GENERAL_ERROR = 0, + MZ_MALLOC_ERROR = -1, + MZ_IOCTL_OPEN_ERROR = -2, + MZ_INVALID_INPUT_ERROR = -3, + MZ_TA_FAIL = -4, + MZ_NO_TARGET = -5, + MZ_DRIVER_FAIL = -6, + MZ_PROC_NAME_GET_ERROR = -7, + MZ_GET_TS_ERROR = -8, + MZ_LOCK_FAIL = -9, + MZ_PAGE_FAIL = -10, + MZ_CRYPTO_FAIL = -11, +} MzResult; + +MzResult mz_exit(void); + +struct mz_tee_driver_fns { + MzResult (*encrypt)(uint8_t *pt, uint8_t *ct, uint8_t *iv); +}; +MzResult register_mz_tee_crypto_driver( + struct mz_tee_driver_fns *tee_driver_fns); +void unregister_mz_tee_crypto_driver(void); +extern MzResult (*load_trusted_app)(void); +extern void (*unload_trusted_app)(void); + +#endif /* _LINUX_MZ_H */ diff --git a/security/samsung/mz/mz.c b/security/samsung/mz/mz.c new file mode 100644 index 000000000000..432c8c66ce02 --- /dev/null +++ b/security/samsung/mz/mz.c @@ -0,0 +1,441 @@ +/* + * Copyright (c) 2021 Samsung Electronics Co., Ltd. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#include <linux/highmem.h> +#include <linux/kernel.h> +#include <linux/page-isolation.h> +#include <linux/workqueue.h> +#include <trace/hooks/mz.h> +#include "mz_internal.h" +#include "mz_log.h" +#include "mz_page.h" + +static DEFINE_MUTEX(pid_list_lock); + +struct mztarget_t mz_pt_list[PRLIMIT]; +struct mutex crypto_list_lock; +struct mutex page_list_lock; +uint64_t *addr_list; +static int addr_list_count; +int addr_list_count_max; + +#ifndef CONFIG_SEC_KUNIT +static bool is_mz_target(pid_t tgid); +static bool is_mz_all_zero_target(pid_t tgid); +static MzResult mz_add_new_target(pid_t tgid); +static MzResult remove_target_from_all_list(pid_t tgid); +static struct task_struct *findts(pid_t tgid); +static void compact_addr_list(void); +static void update_list(void); +#endif /* CONFIG_SEC_KUNIT */ +static void unload_trusted_app_wq(struct work_struct *work); + +static DECLARE_WORK(mz_ta_unload_work, unload_trusted_app_wq); + +static int add_count; + +MzResult mz_exit(void) +{ + pid_t cur_tgid = current->pid; + + if (!is_mz_target(cur_tgid)) { + mz_pt_list[cur_tgid].is_ta_fail_target = false; + return MZ_SUCCESS; + } + + MZ_LOG(err_level_debug, "[MZ DEBUG] %s %d %d\n", __func__, current->tgid, current->pid); + + mz_pt_list[cur_tgid].target = false; + + remove_target_from_all_list(cur_tgid); +#ifdef MZ_TA + unload_trusted_app(); +#endif /* MZ_TA */ + + return MZ_SUCCESS; +} + +static void vh_mz_exit(void *data, struct task_struct *p) +{ + pid_t cur_tgid = p->pid; + bool ret; + + if (!is_mz_target(cur_tgid)) { + mz_pt_list[cur_tgid].is_ta_fail_target = false; + return; + } + + MZ_LOG(err_level_debug, "[MZ DEBUG] %s %d %d\n", __func__, p->tgid, p->pid); + + mz_pt_list[cur_tgid].target = false; + + remove_target_from_all_list(cur_tgid); +#ifdef MZ_TA +#ifdef CONFIG_MZ_USE_QSEECOM + ret = queue_work(system_long_wq, &mz_ta_unload_work); + if (!ret) + MZ_LOG(err_level_error, "%s unload_trusted_app workqueue fail %d\n", __func__, ret); +#else + unload_trusted_app(); +#endif +#endif /* MZ_TA */ +} + +static void unload_trusted_app_wq(struct work_struct *work) +{ + unload_trusted_app(); +} + +__visible_for_testing bool is_mz_target(pid_t tgid) +{ + if (tgid >= PRLIMIT || tgid <= 0) + return false; + + if (mz_pt_list[tgid].target) + return true; + return false; +} + +__visible_for_testing bool is_mz_all_zero_target(pid_t tgid) +{ + if (tgid >= PRLIMIT || tgid <= 0) + return false; + + if (mz_pt_list[tgid].is_ta_fail_target) + return true; + return false; +} + +MzResult mz_all_zero_set(pid_t tgid) +{ + MzResult mz_ret; + + mz_ret = mz_add_new_target(tgid); + if (mz_ret != MZ_SUCCESS) { + MZ_LOG(err_level_error, "MZ %s new target all zero set fail\n", __func__); + return mz_ret; + } + + mz_pt_list[tgid].is_ta_fail_target = true; + return mz_ret; +} + +static void compact_addr_list(void) +{ + uint64_t i = 1; + struct pid_node_t *pid_node, *tp; + struct pfn_node_encrypted_t *cur_encrypted, *t_e; + +#ifdef MZ_DEBUG + for (i = 3 ; i < addr_list_count ; i += 2) + MZ_LOG(err_level_debug, "[MZ DEBUG] %s start %d %llx %d\n", + __func__, addr_list[0], addr_list[i], addr_list[i+1]); +#endif /* MZ_DEBUG */ + + for (i = 3 ; i < addr_list[0] ; i += 2) { + if (addr_list[i] == 0) { + addr_list[i] = addr_list[addr_list_count - 1]; + addr_list[i + 1] = addr_list[addr_list_count]; + + mutex_lock(&pid_list_lock); + list_for_each_entry_safe(pid_node, tp, &pid_list, list) { + mutex_lock(&crypto_list_lock); + + list_for_each_entry_safe(cur_encrypted, t_e, + &(mz_pt_list[pid_node->tgid].mz_list_head_crypto), list) { + if (cur_encrypted->pa_index == addr_list_count - 1) + cur_encrypted->pa_index = i; + } + + mutex_unlock(&crypto_list_lock); + } + mutex_unlock(&pid_list_lock); + + memset(&(addr_list[addr_list_count - 1]), MZ_PAGE_POISON, sizeof(uint64_t) * 2); + addr_list_count -= 2; + i -= 2; + } + } + addr_list[0] = addr_list_count; + +#ifdef MZ_DEBUG + for (i = 3 ; i < addr_list_count ; i += 2) + MZ_LOG(err_level_debug, "[MZ DEBUG] %s end %d %llx %d\n", + __func__, addr_list[0], addr_list[i], addr_list[i+1]); +#endif /* MZ_DEBUG */ +} + +MzResult remove_target_from_all_list(pid_t tgid) +{ + struct pid_node_t *curp, *tp; + struct pfn_node_encrypted_t *cur_encrypted, *t_e; + page_node *cur_page, *t_p; + + if (tgid >= PRLIMIT || tgid <= 0) + return MZ_INVALID_INPUT_ERROR; + + mutex_lock(&crypto_list_lock); + list_for_each_entry_safe(cur_encrypted, t_e, &(mz_pt_list[tgid].mz_list_head_crypto), list) { + list_del(&(cur_encrypted->list)); + addr_list[cur_encrypted->pa_index] = 0; + kfree(cur_encrypted); + cur_encrypted = NULL; + } + mutex_unlock(&crypto_list_lock); + + mutex_lock(&page_list_lock); + list_for_each_entry_safe(cur_page, t_p, &(mz_pt_list[tgid].mz_list_head_page), list) { + list_del(&(cur_page->list)); + kfree(cur_page->mz_page); + kfree(cur_page); + cur_page = NULL; + } + mutex_unlock(&page_list_lock); + + mutex_lock( &pid_list_lock ); + list_for_each_entry_safe(curp, tp, &pid_list, list) { + if (curp->tgid == tgid) { + list_del(&(curp->list)); + kfree(curp); + curp = NULL; + MZ_LOG(err_level_debug, "%s %d\n", __func__, tgid); + } + } + mutex_unlock( &pid_list_lock ); + + compact_addr_list(); + + return MZ_SUCCESS; +} + +__visible_for_testing MzResult mz_add_new_target(pid_t tgid) +{ + struct pid_node_t *pid_node; + MzResult mz_ret = MZ_SUCCESS; + + if (tgid >= PRLIMIT || tgid <= 0) + return MZ_INVALID_INPUT_ERROR; + + if (is_mz_target(tgid)) { + return mz_ret; + } + + pid_node = kmalloc(sizeof(*pid_node), GFP_KERNEL); +#ifdef CONFIG_SEC_KUNIT + if (tgid == MALLOC_FAIL_PID && pid_node) { + kfree(pid_node); + pid_node = NULL; + } +#endif /* CONFIG_SEC_KUNIT */ + if (!pid_node) { + MZ_LOG(err_level_error, "%s pid_node kmalloc fail\n", __func__); + return MZ_MALLOC_ERROR; + } + + mz_pt_list[tgid].target = true; + mz_pt_list[tgid].is_ta_fail_target = false; + + pid_node->tgid = tgid; + INIT_LIST_HEAD(&(pid_node->list)); + + mutex_lock( &pid_list_lock ); + list_add(&(pid_node->list), &pid_list); + mutex_unlock( &pid_list_lock ); + + MZ_LOG(err_level_debug, "%s %d\n", __func__, tgid); + +#ifdef MZ_TA + mz_ret = load_trusted_app(); + if (mz_ret != MZ_SUCCESS) { + MZ_LOG(err_level_info, "%s ta fail %d, free all memory\n", __func__, mz_ret); + mz_pt_list[tgid].is_ta_fail_target = true; + } +#endif /* MZ_TA */ + + if (!isaddrset()) + set_mz_mem(); + + return mz_ret; +} + +static void update_list(void) +{ + uint64_t *new_addr_list; + + if (addr_list_count != addr_list_count_max - 2) + return; + + addr_list_count_max *= 2; + new_addr_list = kmalloc_array(addr_list_count_max, sizeof(uint64_t), GFP_KERNEL); + if (!new_addr_list) { + MZ_LOG(err_level_error, "%s new_addr_list kmalloc_array fail\n", __func__); + return; + } + memcpy(new_addr_list, addr_list, (sizeof(uint64_t) * addr_list_count) + sizeof(uint64_t)); + memset(addr_list, MZ_PAGE_POISON, (sizeof(uint64_t) * addr_list_count) + sizeof(uint64_t)); + kfree(addr_list); + addr_list = new_addr_list; + set_mz_mem(); +} + +MzResult mz_add_target_pfn(pid_t tgid, unsigned long pfn, unsigned long offset, + unsigned long len, unsigned long va, uint8_t __user *buf) +{ + struct pfn_node_encrypted_t *cur_encrypted; + MzResult mz_ret = MZ_SUCCESS; + struct page *target_page = pfn_to_page(pfn); + uint64_t pa; + uint64_t plain_pa_offset; + unsigned long new_pfn = 0; +#ifdef MZ_TA + uint64_t cipher_pa_offset; + uint8_t *plain_pa_offset_8, cipher_pa_offset_8[8]; +#endif + + if (pfn <= 0) + return MZ_INVALID_INPUT_ERROR; + + mz_ret = mz_add_new_target(tgid); + if (mz_ret != MZ_SUCCESS) { + return mz_ret; + } + + if (is_mz_all_zero_target(tgid)) { + return mz_ret; + } + + cur_encrypted = kmalloc(sizeof(*cur_encrypted), GFP_ATOMIC); +#ifdef CONFIG_SEC_KUNIT + if (tgid == MALLOC_FAIL_CUR && cur_encrypted) { + kfree(cur_encrypted); + cur_encrypted = NULL; + } +#endif /* CONFIG_SEC_KUNIT */ + if (!cur_encrypted) { + MZ_LOG(err_level_error, "%s cur_encrypted kmalloc fail\n", __func__); + return MZ_MALLOC_ERROR; + } + +#ifdef MZ_DEBUG + pa = page_to_phys(target_page); + MZ_LOG(err_level_debug, "%s original addr before migrate/gup %llx\n", __func__, pa + offset); +#endif + +#ifndef CONFIG_SEC_KUNIT + //Migration in case of CMA and pin + mz_ret = mz_migrate_and_pin(target_page, va, buf, &new_pfn, tgid); + if (mz_ret != MZ_SUCCESS) + goto free_alloc; +#endif /* CONFIG_SEC_KUNIT */ + + if (new_pfn != 0) + target_page = pfn_to_page(new_pfn); + pa = page_to_phys(target_page); + MZ_LOG(err_level_debug, "%s original addr after migrate/gup %llx\n", __func__, pa + offset); + + update_list(); + + plain_pa_offset = pa + offset; + +#ifdef MZ_TA + plain_pa_offset_8 = (uint8_t *)&plain_pa_offset; + mz_ret = mz_wb_encrypt(plain_pa_offset_8, cipher_pa_offset_8); + if (mz_ret != MZ_SUCCESS) + goto free_alloc; + cipher_pa_offset = *(uint64_t *)cipher_pa_offset_8; + addr_list[++addr_list_count] = cipher_pa_offset; + + MZ_LOG(err_level_debug, "%s cipher (0x%llx)\n", __func__, (unsigned long long)cipher_pa_offset); +#else + addr_list[++addr_list_count] = plain_pa_offset; +#endif + + addr_list[++addr_list_count] = len; + addr_list[0] = addr_list_count; + cur_encrypted->pa_index = addr_list_count - 1; + + INIT_LIST_HEAD(&(cur_encrypted->list)); + + mutex_lock(&crypto_list_lock); + list_add_tail(&(cur_encrypted->list), &(mz_pt_list[tgid].mz_list_head_crypto)); + mutex_unlock(&crypto_list_lock); + + MZ_LOG(err_level_debug, "%s %d %d %d (0x%llx) (0x%llx) %d\n", + __func__, tgid, ++add_count, addr_list_count, (unsigned long long)pa, offset, len); + + return mz_ret; + +free_alloc: + kfree(cur_encrypted); + return mz_ret; +} + +MzResult mzinit(void) +{ + int i; + for (i = 0 ; i < PRLIMIT ; i++) { + mz_pt_list[i].target = false; + mz_pt_list[i].is_ta_fail_target = false; + INIT_LIST_HEAD(&(mz_pt_list[i].mz_list_head_crypto)); + INIT_LIST_HEAD(&(mz_pt_list[i].mz_list_head_page)); + mutex_init(&crypto_list_lock); + mutex_init(&page_list_lock); + } + + add_count = 0; + addr_list_count = 2; + addr_list_count_max = 0; + + if (mz_addr_init()) + set_mz_mem(); + + register_trace_android_vh_mz_exit(vh_mz_exit, NULL); + + return MZ_SUCCESS; +} + +//Util +MzResult mz_kget_process_name(pid_t tgid, char* name) +{ + MzResult result = MZ_SUCCESS; + int cmdline_size; + struct task_struct *task; + + if (tgid >= PRLIMIT || tgid <= 0) + return MZ_INVALID_INPUT_ERROR; + if (name == NULL) + return MZ_INVALID_INPUT_ERROR; + + task = findts(tgid); + if (!task) + return MZ_GET_TS_ERROR; + if (task->mm == NULL) + return MZ_GET_TS_ERROR; + cmdline_size = get_cmdline(task, name, MAX_PROCESS_NAME); + if (cmdline_size == 0) + result = MZ_PROC_NAME_GET_ERROR; + name[cmdline_size] = 0; + + return result; +} + +__visible_for_testing struct task_struct *findts(pid_t tgid) +{ + struct task_struct *task; + + if (tgid >= PRLIMIT || tgid <= 0) + return NULL; + task = pid_task(find_vpid(tgid), PIDTYPE_PID); + return task; +} + +MzResult (*load_trusted_app)(void) = NULL; +EXPORT_SYMBOL(load_trusted_app); +void (*unload_trusted_app)(void) = NULL; +EXPORT_SYMBOL(unload_trusted_app); + diff --git a/security/samsung/mz/mz.h b/security/samsung/mz/mz.h new file mode 100644 index 000000000000..c613289a1a71 --- /dev/null +++ b/security/samsung/mz/mz.h @@ -0,0 +1,31 @@ +#ifndef _LINUX_MZ_H +#define _LINUX_MZ_H + +typedef enum { + MZ_SUCCESS = 1, + MZ_GENERAL_ERROR = 0, + MZ_MALLOC_ERROR = -1, + MZ_IOCTL_OPEN_ERROR = -2, + MZ_INVALID_INPUT_ERROR = -3, + MZ_TA_FAIL = -4, + MZ_NO_TARGET = -5, + MZ_DRIVER_FAIL = -6, + MZ_PROC_NAME_GET_ERROR = -7, + MZ_GET_TS_ERROR = -8, + MZ_LOCK_FAIL = -9, + MZ_PAGE_FAIL = -10, + MZ_CRYPTO_FAIL = -11, +} MzResult; + +MzResult mz_exit(void); + +struct mz_tee_driver_fns { + MzResult (*encrypt)(uint8_t *pt, uint8_t *ct, uint8_t *iv); +}; +MzResult register_mz_tee_crypto_driver( + struct mz_tee_driver_fns *tee_driver_fns); +void unregister_mz_tee_crypto_driver(void); +extern MzResult (*load_trusted_app)(void); +extern void (*unload_trusted_app)(void); + +#endif /* _LINUX_MZ_H */ diff --git a/security/samsung/mz/mz_crypto.c b/security/samsung/mz/mz_crypto.c new file mode 100644 index 000000000000..e0e0a9b0522b --- /dev/null +++ b/security/samsung/mz/mz_crypto.c @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2021 Samsung Electronics Co., Ltd. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include "mz_internal.h" +#include "mz_log.h" + +struct mz_tee_driver_fns *g_tee_driver_fn; + +MzResult mz_wb_encrypt(uint8_t *pt, uint8_t *ct) +{ + MzResult mz_ret = MZ_SUCCESS; + uint8_t *iv; + + if (!addr_list) { + MZ_LOG(err_level_error, "%s addr_list null\n", __func__); + return MZ_CRYPTO_FAIL; + } + + iv = (uint8_t *)(&(addr_list[1])); + +#ifdef MZ_TA + //Get key from ta + mz_ret = g_tee_driver_fn->encrypt(pt, ct, iv); +#endif /* MZ_TA */ + + if (mz_ret != MZ_SUCCESS) + MZ_LOG(err_level_error, "%s ta encrypt fail, ta error %d\n", __func__, mz_ret); + + return mz_ret; +} + +//Crypto tee driver +static char is_registered; + +MzResult register_mz_tee_crypto_driver(struct mz_tee_driver_fns *tee_driver_fns) +{ + MzResult mz_ret = MZ_SUCCESS; + g_tee_driver_fn = kmalloc(sizeof(*g_tee_driver_fn), GFP_KERNEL); + if (!g_tee_driver_fn) { + MZ_LOG(err_level_error, "%s kmalloc fail\n", __func__); + mz_ret = MZ_DRIVER_FAIL; + goto exit; + } + + g_tee_driver_fn->encrypt = tee_driver_fns->encrypt; + is_registered = 1; + +exit: + return mz_ret; +} + +void unregister_mz_tee_crypto_driver(void) +{ + if (is_registered) { + kfree(g_tee_driver_fn); + g_tee_driver_fn = NULL; + is_registered = 0; + } +} + +EXPORT_SYMBOL(register_mz_tee_crypto_driver); +EXPORT_SYMBOL(unregister_mz_tee_crypto_driver); diff --git a/security/samsung/mz/mz_internal.h b/security/samsung/mz/mz_internal.h new file mode 100644 index 000000000000..6ef80c452085 --- /dev/null +++ b/security/samsung/mz/mz_internal.h @@ -0,0 +1,100 @@ +#ifndef _LINUX_MZ_INTERNAL_H +#define _LINUX_MZ_INTERNAL_H + +#include <asm/page.h> +#include <asm/tlb.h> + +#include <linux/list.h> +#include <linux/sched.h> + +#if defined(MEZ_KUNIT_ENABLED) +#include <kunit/mock.h> +#endif /* !defined(MEZ_KUNIT_ENABLED) */ + +#include "mz.h" + +#define MZ_PAGE_POISON 0x53 +#define PRLIMIT 32768 +#define MZ_APP_KEY_SIZE 32 +#define PFN_BYTE_LEN 32 +#define MAX_PROCESS_NAME 256 +#define RAND_SIZE 32 + +#ifndef PAGE_SIZE +#define PAGE_SIZE 4096 +#endif + +MzResult mzinit(void); +MzResult mz_add_target_pfn(pid_t tgid, unsigned long pfn, unsigned long offset, + unsigned long len, unsigned long va, uint8_t __user *buf); +MzResult mz_all_zero_set(pid_t tgid); +MzResult mz_kget_process_name(pid_t tgid, char *name); +MzResult mz_wb_encrypt(uint8_t *pt, uint8_t *ct); + +bool isaddrset(void); +int mz_addr_init(void); +int set_mz_mem(void); + +typedef struct pfn_node_encrypted_t { + u8 pfn[PFN_BYTE_LEN]; + int pa_index; + struct list_head list; +} pfn_node_encrypted; + +typedef struct page_node_t { + struct page **mz_page; + struct list_head list; +} page_node; + +typedef struct mztarget_t { + bool target; + struct list_head mz_list_head_crypto; + struct list_head mz_list_head_page; + bool is_ta_fail_target; +} mztarget_s; + +extern struct mutex crypto_list_lock; +extern struct mutex page_list_lock; + +typedef struct vainfo_t { + uint64_t va; + uint64_t len; + uint8_t __user *buf; +} vainfo; + +typedef struct pid_node_t { + pid_t tgid; + struct list_head list; +} pid_node; + +static LIST_HEAD(pid_list); + +#define IOC_MAGIC 'S' +#define IOCTL_MZ_SET_CMD _IOWR(IOC_MAGIC, 1, struct vainfo_t) +#define IOCTL_MZ_ALL_SET_CMD _IOWR(IOC_MAGIC, 2, struct vainfo_t) + +extern struct mztarget_t mz_pt_list[PRLIMIT]; +extern uint64_t *addr_list; +extern int addr_list_count_max; + +#ifdef MEZ_KUNIT_ENABLED +#define MALLOC_FAIL_CUR 2 +#define MALLOC_FAIL_PFN 3 +#define MALLOC_FAIL_PID 4 +#define PANIC_FAIL_PID 5 +__visible_for_testing MzResult mz_add_new_target(pid_t tgid); +__visible_for_testing bool is_mz_target(pid_t tgid); +__visible_for_testing bool is_mz_all_zero_target(pid_t tgid); +__visible_for_testing MzResult remove_target_from_all_list(pid_t tgid); +__visible_for_testing struct task_struct *findts(pid_t tgid); +__visible_for_testing long mz_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +int mz_ioctl_init(void); +void mz_ioctl_exit(void); +#define IOCTL_MZ_FAIL_CMD _IOWR(IOC_MAGIC, 0, struct vainfo_t) +#else +#ifndef __visible_for_testing +#define __visible_for_testing static +#endif +#endif /* MEZ_KUNIT_ENABLED */ + +#endif /* _LINUX_MZ_INTERNAL_H */ diff --git a/security/samsung/mz/mz_ioctl.c b/security/samsung/mz/mz_ioctl.c new file mode 100644 index 000000000000..42acb943ded1 --- /dev/null +++ b/security/samsung/mz/mz_ioctl.c @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#include <linux/cdev.h> +#include <linux/kconfig.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include "mz_internal.h" +#include "mz_log.h" +#include "mz_page.h" + +#define MZ_DEV "mz_ioctl" + +static struct class *driver_class, *driver_class_old; +static dev_t mz_ioctl_device_no, mz_ioctl_device_no_old; +static struct cdev mz_ioctl_cdev; + +__visible_for_testing long mz_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret = MZ_SUCCESS; +#ifndef CONFIG_SEC_KUNIT + vainfo mzvainfo = { 0, 0}; + + uint64_t ava, va, ptwi = 0, len; + uint64_t cur_len; + unsigned long pfn, old_pfn = 0; +#endif /* CONFIG_SEC_KUNIT */ + struct mm_struct *mm = current->mm; + pid_t cur_tid = current->pid; + uint8_t __user *buf; + uint8_t pname[MAX_PROCESS_NAME] = ""; + + if (!mm) { + MZ_LOG(err_level_error, "%s mm_struct fail\n", __func__); + ret = MZ_GENERAL_ERROR; + return ret; + } + +#ifndef CONFIG_SEC_KUNIT + ret = mz_kget_process_name(current->tgid, pname); + if (ret != MZ_SUCCESS) { + MZ_LOG(err_level_error, "%s get name fail %d %d %d\n", __func__, current->tgid, cur_tid, ret); + return ret; + } +#endif /* CONFIG_SEC_KUNIT */ + MZ_LOG(err_level_info, "%s start %s %d %d", __func__, pname, current->tgid, cur_tid); + + switch (cmd) { + case IOCTL_MZ_SET_CMD: +#if IS_ENABLED(CONFIG_MEMORY_ZEROISATION) +#ifndef CONFIG_SEC_KUNIT + ret = copy_from_user(&mzvainfo, (void *)arg, sizeof(mzvainfo)); + if (ret) + MZ_LOG(err_level_error, "%s copy from user error\n", __func__); + else ret=MZ_SUCCESS; + + va = mzvainfo.va; + len = mzvainfo.len; + buf = mzvainfo.buf; +#endif /* CONFIG_SEC_KUNIT */ + +#ifndef CONFIG_SEC_KUNIT + if (!isaddrset()) { + ret = set_mz_mem(); + if (ret != MZ_SUCCESS) { + MZ_LOG(err_level_error, "%s global list set fail %d\n", __func__, ret); + goto out; + } + } + + while (len > 0) { + ava = va + ptwi; + pfn = mz_ptw(ava, mm); + if (pfn == 0) { + MZ_LOG(err_level_error, "%s mz_ptw fail\n", __func__); + goto out; + } + + //pfn should be different from old one in normal case. It's for checking system error. + if (old_pfn != pfn) { + if (PAGE_SIZE - (ava & (PAGE_SIZE - 1)) < len) + cur_len = (PAGE_SIZE - (ava & (PAGE_SIZE - 1))); + else + cur_len = len; + + MZ_LOG(err_level_debug, "%s %d %llx %d %llx %d\n", + __func__, pfn, va, len, ava, cur_len); + + ret = mz_add_target_pfn(cur_tid, pfn, ava & (PAGE_SIZE - 1), cur_len, ava, buf); + if (ret != MZ_SUCCESS) { + MZ_LOG(err_level_error, "%s fail %d\n", __func__, ret); + goto out; + } + + len -= cur_len; + ptwi += cur_len; + + old_pfn = pfn; + } else { + MZ_LOG(err_level_debug, "%s recheck %d %llx %d %llx %d\n", + __func__, pfn, va, len, ava, cur_len); + ptwi++; + len--; + } + } +#endif /* CONFIG_SEC_KUNIT */ +out: +#endif /* IS_ENABLED(CONFIG_MEMORY_ZEROISATION) */ + break; + case IOCTL_MZ_ALL_SET_CMD: +#if IS_ENABLED(CONFIG_MEMORY_ZEROISATION) + mz_all_zero_set(cur_tid); +#endif /* IS_ENABLED(CONFIG_MEMORY_ZEROISATION) */ + break; + default: + MZ_LOG(err_level_error, "%s unknown cmd\n", __func__); + ret = MZ_INVALID_INPUT_ERROR; + break; + } + MZ_LOG(err_level_info, "%s end %s %d %d\n", __func__, pname, current->tgid, cur_tid); + + return ret; +} + +static const struct file_operations mz_ioctl_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = mz_ioctl, + .compat_ioctl = mz_ioctl, +}; + +#ifdef CONFIG_SEC_KUNIT +int mz_ioctl_init(void) +#else +static int __init mz_ioctl_init(void) +#endif +{ + int rc; + struct device *class_dev; + + MZ_LOG(err_level_info, "%s mz_ioctl\n", __func__); + + mz_ioctl_device_no_old = mz_ioctl_device_no; + driver_class_old = driver_class; + rc = alloc_chrdev_region(&mz_ioctl_device_no, 0, 1, MZ_DEV); + if (rc < 0) { + MZ_LOG(err_level_error, "%s alloc_chrdev_region failed %d\n", __func__, rc); + return rc; + } + + driver_class = class_create(THIS_MODULE, MZ_DEV); + if (IS_ERR(driver_class)) { + rc = -ENOMEM; + MZ_LOG(err_level_error, "%s class_create failed %d\n", __func__, rc); + goto unregister_chrdev_region; + } + + class_dev = device_create(driver_class, NULL, mz_ioctl_device_no, NULL, MZ_DEV); + if (!class_dev) { + rc = -ENOMEM; + MZ_LOG(err_level_error, "%s class_device_create failed %d\n", __func__, rc); + goto class_destroy; + } + + cdev_init(&mz_ioctl_cdev, &mz_ioctl_fops); + mz_ioctl_cdev.owner = THIS_MODULE; + + rc = cdev_add(&mz_ioctl_cdev, MKDEV(MAJOR(mz_ioctl_device_no), 0), 1); + if (rc < 0) { + MZ_LOG(err_level_error, "%s cdev_add failed %d\n", __func__, rc); + goto class_device_destroy; + } + + mzinit(); + + return 0; + +class_device_destroy: + device_destroy(driver_class, mz_ioctl_device_no); +class_destroy: + class_destroy(driver_class); +unregister_chrdev_region: + if (driver_class_old != NULL) { + device_destroy(driver_class_old, mz_ioctl_device_no_old); + class_destroy(driver_class_old); + driver_class_old = NULL; + unregister_chrdev_region(mz_ioctl_device_no_old, 1); + } + driver_class = NULL; + unregister_chrdev_region(mz_ioctl_device_no, 1); + return rc; +} + +#ifdef CONFIG_SEC_KUNIT +void mz_ioctl_exit(void) +#else +static void __exit mz_ioctl_exit(void) +#endif +{ + MZ_LOG(err_level_info, "%s mz_ioctl\n", __func__); + device_destroy(driver_class, mz_ioctl_device_no); + class_destroy(driver_class); + driver_class = NULL; + unregister_chrdev_region(mz_ioctl_device_no, 1); +} + + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Samsung MZ Driver"); +MODULE_VERSION("1.00"); + +#ifndef CONFIG_SEC_KUNIT +module_init(mz_ioctl_init); +module_exit(mz_ioctl_exit); +#endif /* CONFIG_SEC_KUNIT */ + + + diff --git a/security/samsung/mz/mz_log.c b/security/samsung/mz/mz_log.c new file mode 100644 index 000000000000..644306c890b8 --- /dev/null +++ b/security/samsung/mz/mz_log.c @@ -0,0 +1,58 @@ +#include <linux/kernel.h> +#include "mz_log.h" + +#define LOG_BUF_SIZE (128 + 100) + +void mz_write_to_log(uint32_t prio, const char *tag, const char *fmt, ...) +{ + va_list ap; + int32_t len = 0; + char buf[LOG_BUF_SIZE]; + int written_amount; + + if (NULL == tag || NULL == fmt) + return; + + switch (prio) { + case err_level_debug: +#ifndef MZ_DEBUG + return; +#endif + len = snprintf(buf, LOG_BUF_SIZE, "%s_D ", tag); break; + + case err_level_info: + len = snprintf(buf, LOG_BUF_SIZE, "%s_I ", tag); break; + + case err_level_error: + len = snprintf(buf, LOG_BUF_SIZE, "%s_E ", tag); break; + + default: + len = snprintf(buf, LOG_BUF_SIZE, "%s ", tag); + break; + } + + if ((len < 0) || (LOG_BUF_SIZE - len < len)) + return; + + va_start(ap, fmt); + written_amount = vsnprintf(buf + len, LOG_BUF_SIZE - len, fmt, ap); + va_end(ap); + + if (written_amount < 0) + return; + + switch (prio) { + case err_level_debug: + pr_debug("%s", buf); break; + + case err_level_info: + pr_info("%s", buf); break; + + case err_level_error: + pr_err("%s", buf); break; + + default: + pr_info("%s", buf); + break; + } +} diff --git a/security/samsung/mz/mz_log.h b/security/samsung/mz/mz_log.h new file mode 100644 index 000000000000..87b3fbe0966c --- /dev/null +++ b/security/samsung/mz/mz_log.h @@ -0,0 +1,18 @@ +#ifndef MZ_LOG_H +#define MZ_LOG_H + +#ifndef LOG_TAG +#define LOG_TAG "MZ" +#endif // ifndef LOG_TAG + +typedef enum _err_reporting_level_t { + err_level_debug, + err_level_info, + err_level_error +} err_reporting_level_t; + +void mz_write_to_log(uint32_t prio, const char *tag, const char *fmt, ...); + +#define MZ_LOG(prio, format, ...) mz_write_to_log(prio, LOG_TAG, format, ## __VA_ARGS__) + +#endif // MZ_LOG_H diff --git a/security/samsung/mz/mz_mem.c b/security/samsung/mz/mz_mem.c new file mode 100644 index 000000000000..fed8c9db5700 --- /dev/null +++ b/security/samsung/mz/mz_mem.c @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2021 Samsung Electronics Co., Ltd. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#include <linux/kconfig.h> +#include <linux/kernel.h> +#include <linux/of_address.h> +#include <linux/random.h> +#include <linux/sched/task.h> +#include "mz_internal.h" +#include "mz_log.h" + +#define ADDR_INIT_SIZE 56 + +#ifdef CONFIG_MZ_USE_TZDEV +static void *list_addr_mz; +#elif IS_ENABLED(CONFIG_MZ_USE_QSEECOM) +static void __iomem *list_addr_mz1; +static void __iomem *list_addr_mz2; +static void __iomem *list_addr_mz3; +#endif + +bool addrset; + +int mz_addr_init(void) +{ + struct device_node *np; +#ifdef CONFIG_MZ_USE_TZDEV + struct resource res; +#endif + addr_list = NULL; + +#ifdef CONFIG_MZ_USE_TZDEV + np = of_find_compatible_node(NULL, NULL, "sboot,mz"); + if (unlikely(!np)) { + MZ_LOG(err_level_error, "unable to find DT imem sboot,mz node\n"); + return -ENODEV; + } + + if (of_address_to_resource(np, 0, &res)) { + MZ_LOG(err_level_error, "%s of_address_to_resource fail\n", __func__); + return -ENODEV; + } + list_addr_mz = phys_to_virt(res.start); +#elif IS_ENABLED(CONFIG_MZ_USE_QSEECOM) + np = of_find_compatible_node(NULL, NULL, "samsung,security_mz1"); + if (unlikely(!np)) { + MZ_LOG(err_level_error, "unable to find DT imem samsung,security_mz1 node\n"); + return -ENODEV; + } + + list_addr_mz1 = of_iomap(np, 0); + if (unlikely(!list_addr_mz1)) { + MZ_LOG(err_level_error, "unable to map imem samsung,security_mz1 offset\n"); + return -ENODEV; + } + + np = of_find_compatible_node(NULL, NULL, "samsung,security_mz2"); + if (unlikely(!np)) { + MZ_LOG(err_level_error, "unable to find DT imem samsung,security_mz2 node\n"); + return -ENODEV; + } + + list_addr_mz2 = of_iomap(np, 0); + if (unlikely(!list_addr_mz2)) { + MZ_LOG(err_level_error, "unable to map imem samsung,security_mz2 offset\n"); + return -ENODEV; + } + + np = of_find_compatible_node(NULL, NULL, "samsung,security_mz3"); + if (unlikely(!np)) { + MZ_LOG(err_level_error, "unable to find DT imem samsung,security_mz3 node\n"); + return -ENODEV; + } + + list_addr_mz3 = of_iomap(np, 0); + if (unlikely(!list_addr_mz3)) { + MZ_LOG(err_level_error, "unable to map imem samsung,security_mz3 offset\n"); + return -ENODEV; + } + + MZ_LOG(err_level_debug, "list addr1 : 0x%pK(0x%llx)\n", list_addr_mz1, + (unsigned long long)virt_to_phys(list_addr_mz1)); + MZ_LOG(err_level_debug, "list addr2 : 0x%pK(0x%llx)\n", list_addr_mz2, + (unsigned long long)virt_to_phys(list_addr_mz2)); + MZ_LOG(err_level_debug, "list addr3 : 0x%pK(0x%llx)\n", list_addr_mz3, + (unsigned long long)virt_to_phys(list_addr_mz3)); +#endif + + addr_list = kmalloc(sizeof(uint64_t) * ADDR_INIT_SIZE, GFP_KERNEL); + + addr_list[0] = 0; + get_random_bytes(&(addr_list[1]), sizeof(addr_list[1]) * 2); + addr_list_count_max = ADDR_INIT_SIZE; + + MZ_LOG(err_level_debug, "%s iv %llx %llx\n", __func__, addr_list[1], addr_list[2]); + + addrset = false; + + return MZ_SUCCESS; +} + +int set_mz_mem(void) +{ + unsigned long long addr_list1, addr_list2; + uint32_t mz_magic = 0x4D5A4D5A; + + MZ_LOG(err_level_debug, "set_mz_mem start\n"); + + if (!addr_list) { + MZ_LOG(err_level_error, "%s list_addr null\n", __func__); + return MZ_GENERAL_ERROR; + } + + addr_list1 = (unsigned long long)virt_to_phys(addr_list); + addr_list2 = addr_list1 >> 32; + +#ifdef CONFIG_MZ_USE_TZDEV + memcpy(list_addr_mz, &addr_list1, 4); + memcpy(list_addr_mz+4, &addr_list2, 4); + memcpy(list_addr_mz+8, &mz_magic, 4); +#elif IS_ENABLED(CONFIG_MZ_USE_QSEECOM) + if (unlikely(!list_addr_mz1) || unlikely(!list_addr_mz2) || unlikely(!list_addr_mz3)) { + MZ_LOG(err_level_error, "list_addr address unmapped\n"); + return MZ_GENERAL_ERROR; + } + __raw_writel(addr_list1, list_addr_mz1); + __raw_writel(addr_list2, list_addr_mz2); + __raw_writel(mz_magic, list_addr_mz3); +#endif + + MZ_LOG(err_level_debug, "addr_list addr : (0x%llx) (0x%llx) (0x%llx)\n", + addr_list1, addr_list2, (unsigned long long)virt_to_phys(addr_list)); + + addrset = true; + + return MZ_SUCCESS; +} + +bool isaddrset(void) +{ + return addrset; +} diff --git a/security/samsung/mz/mz_page.c b/security/samsung/mz/mz_page.c new file mode 100644 index 000000000000..d0ede8ad29b0 --- /dev/null +++ b/security/samsung/mz/mz_page.c @@ -0,0 +1,290 @@ +/* + * Copyright (c) 2021 Samsung Electronics Co., Ltd. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#include <linux/atomic.h> +#include <linux/delay.h> +#include <linux/list.h> +#include <linux/migrate.h> +#include <linux/mmzone.h> +#include <linux/mutex.h> +#include <linux/rmap.h> +#include <linux/sched.h> +#include <linux/spinlock.h> +#include <linux/vmalloc.h> +#include <linux/kernel.h> +#include <linux/sched/mm.h> +#include "mz_internal.h" +#include "mz_log.h" +#include "mz_page.h" + +static inline struct page *mz_migrate_alloc(struct page *page, unsigned long private) +{ + return alloc_page(GFP_KERNEL); +} + +static void mz_migrate_free(struct page *page, unsigned long private) +{ + __free_page(page); +} + +unsigned long mz_get_migratetype(struct page *page) +{ + struct zone *zone; + unsigned long flags; + unsigned long migrate_type; + + // Zone lock must be held to avoid race with + // set_pageblock_migratetype() + zone = page_zone(page); + spin_lock_irqsave(&zone->lock, flags); + migrate_type = get_pageblock_migratetype(page); + spin_unlock_irqrestore(&zone->lock, flags); + + return migrate_type; +} + +void move_page_data(struct page *from, struct page *to) +{ + void *old_page_addr, *new_page_addr; +#ifdef MZ_DEBUG + uint64_t opa, npa; +#endif + + old_page_addr = kmap_atomic(from); + new_page_addr = kmap_atomic(to); + memcpy(new_page_addr, old_page_addr, PAGE_SIZE); + memset(old_page_addr, MZ_PAGE_POISON, PAGE_SIZE); + +#ifdef MZ_DEBUG + opa = page_to_phys(from); + npa = page_to_phys(to); + MZ_LOG(err_level_debug, "%s %llx %llx\n", __func__, opa, npa); +#endif + + kunmap_atomic(old_page_addr); + kunmap_atomic(new_page_addr); +} + + +MzResult mz_migrate_pages(struct page *page) +{ + int res; + MzResult mz_ret = MZ_SUCCESS; + LIST_HEAD(pages_list); + + MZ_LOG(err_level_debug, "%s start\n", __func__); + + migrate_prep(); + + res = isolate_lru_page(page); + if (res < 0) { + MZ_LOG(err_level_error, "%s isolate_lru_page fail %d\n", __func__, res); + return MZ_PAGE_FAIL; + } + + list_add_tail(&page->lru, &pages_list); + put_page(page); + /* page will be refilled with migrated pages later */ + page = NULL; + + res = _mz_migrate_pages(&pages_list, mz_migrate_alloc, mz_migrate_free); + if (res) { + MZ_LOG(err_level_error, "%s migrate fail %d\n", __func__, res); + putback_movable_pages(&pages_list); + mz_ret = MZ_PAGE_FAIL; + } + + MZ_LOG(err_level_debug, "%s end\n", __func__); + + return mz_ret; +} + +static __maybe_unused unsigned int gup_flags(int write, int force) +{ + unsigned int flags = 0; + + if (write) + flags |= FOLL_WRITE; + if (force) + flags |= FOLL_FORCE; + + return flags; +} + +static int mz_verify_migration_page(struct page *page) +{ + unsigned long migrate_type; + + migrate_type = mz_get_migratetype(page); + if (migrate_type == MIGRATE_CMA) + return -EFAULT; + + return 0; +} + +MzResult mz_get_user_pages(struct task_struct *task, struct mm_struct *mm, unsigned long va, + struct page **target_page, int after_mig, uint8_t __user *buf) +{ + int res; + MzResult mz_ret = MZ_SUCCESS; + + MZ_LOG(err_level_debug, "%s start %llx %d %llx\n", __func__, va, after_mig, (unsigned long)buf); + + res = get_user_pages_remote(task, mm, (unsigned long)buf, 1, gup_flags(1, 0), target_page, NULL, NULL); + if (res <= 0) { + MZ_LOG(err_level_error, "%s gup fail %d\n", __func__, res); + mz_ret = MZ_PAGE_FAIL; + goto fail; + } + + if (!after_mig) + atomic64_inc(&(mm->pinned_vm)); + + MZ_LOG(err_level_debug, "%s end\n", __func__); + + return MZ_SUCCESS; + +fail: + put_page(target_page[0]); + + return mz_ret; +} + +MzResult mz_migrate_and_pin(struct page *target_page, unsigned long va, uint8_t __user *buf, + unsigned long *new_pfn, pid_t tgid) +{ + MzResult mz_ret = MZ_SUCCESS; + struct page *mig_temp = NULL; + struct page **new_page; + struct task_struct *task; + struct mm_struct *mm; + int res; + page_node *cur_page; + + task = current; + mm = get_task_mm(task); + if (!mm) { + MZ_LOG(err_level_error, "%s get_task_mm fail\n", __func__); + return MZ_PAGE_FAIL; + } + new_page = kcalloc(1, sizeof(struct page *), GFP_KERNEL); + + /* + * Holding 'mm->mmap_sem' is required to synchronize users who try to register same pages simultaneously. + * Migration is impossible without synchronization due to page refcount holding by both users. + */ + down_write(&mm->mmap_sem); + +#ifdef MZ_DEBUG + if (mz_get_migratetype(target_page) == MIGRATE_CMA) + MZ_LOG(err_level_debug, "%s target_page is CMA\n", __func__); +#endif + + mz_ret = mz_get_user_pages(task, mm, va, &new_page[0], 0, buf); + if (mz_ret != MZ_SUCCESS) + goto out_pfns; + + if (mz_get_migratetype(new_page[0]) == MIGRATE_CMA) { + mig_temp = alloc_page(GFP_KERNEL); + move_page_data(target_page, mig_temp); + + mz_ret = mz_migrate_pages(new_page[0]); + if (mz_ret != MZ_SUCCESS) + goto out_pin; + + mz_ret = mz_get_user_pages(task, mm, va, &new_page[0], 1, buf); + if (mz_ret != MZ_SUCCESS) + goto out_pin; + + res = mz_verify_migration_page(new_page[0]); + if (res != 0) { + MZ_LOG(err_level_error, "%s mz_verify_migration_page fail %d\n", __func__, res); + mz_ret = MZ_PAGE_FAIL; + goto out_pin; + } + + *new_pfn = mz_ptw(va, mm); + if (*new_pfn == 0) { + MZ_LOG(err_level_error, "%s mz_ptw fail\n", __func__); + mz_ret = MZ_PAGE_FAIL; + goto out_pin; + } + + if (mig_temp) { + move_page_data(mig_temp, pfn_to_page(*new_pfn)); + __free_page(mig_temp); + } + } + + up_write(&mm->mmap_sem); + + cur_page = kmalloc(sizeof(*cur_page), GFP_ATOMIC); + cur_page->mz_page = new_page; + INIT_LIST_HEAD(&(cur_page->list)); + + mutex_lock(&page_list_lock); + list_add_tail(&(cur_page->list), &(mz_pt_list[tgid].mz_list_head_page)); + mutex_unlock(&page_list_lock); + + return mz_ret; + +out_pin: + put_page(new_page[0]); + down_write(&mm->mmap_sem); + atomic64_dec(&(mm->pinned_vm)); + up_write(&mm->mmap_sem); +out_pfns: + up_write(&mm->mmap_sem); + kfree(new_page); + mmput(mm); + + return mz_ret; +} + +unsigned long mz_ptw(unsigned long ava, struct mm_struct *mm) +{ + pte_t pte; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *ptep = NULL; + unsigned long pfn = 0; + + pgd = pgd_offset(mm, ava); + if (pgd_none(*pgd) || pgd_bad(*pgd)) { + MZ_LOG(err_level_error, "%s pgd_offset fail\n", __func__); + goto out; + } + pud = pud_offset(pgd, ava); + if (pud_none(*pud) || pud_bad(*pud)) { + MZ_LOG(err_level_error, "%s pud_offset fail\n", __func__); + goto out; + } + pmd = pmd_offset(pud, ava); + if (pmd_none(*pmd) || pmd_bad(*pmd)) { + MZ_LOG(err_level_error, "%s pmd_offset fail\n", __func__); + goto out; + } + ptep = pte_offset_map(pmd, ava); + if (!ptep) { + MZ_LOG(err_level_error, "%s pte_offset_map fail\n", __func__); + goto out; + } + + pte = *ptep; + pfn = pte_pfn(pte); + +out: + if (ptep) { + pte_unmap(ptep); + ptep = NULL; + } + + return pfn; +} + diff --git a/security/samsung/mz/mz_page.h b/security/samsung/mz/mz_page.h new file mode 100644 index 000000000000..e9f0daeb5684 --- /dev/null +++ b/security/samsung/mz/mz_page.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2021 Samsung Electronics Co., Ltd. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + + +#ifndef __MZ_PAGE_H__ +#define __MZ_PAGE_H__ + +#include <linux/kernel.h> +#include <linux/migrate.h> + +#define _mz_migrate_pages(list, alloc, free) migrate_pages((list), (alloc), (free), 0, MIGRATE_SYNC, MR_MEMORY_FAILURE) + +unsigned long mz_get_migratetype(struct page *page); +MzResult mz_migrate_pages(struct page *page); +MzResult mz_get_user_pages(struct task_struct *task, struct mm_struct *mm, unsigned long va, + struct page **target_page, int after_mig, uint8_t __user *buf); +void move_page_data(struct page *from, struct page *to); +MzResult mz_migrate_and_pin(struct page *target_page, unsigned long va, uint8_t __user *buf, + unsigned long *new_pfn, pid_t tgid); +unsigned long mz_ptw(unsigned long va, struct mm_struct *mm); + +#endif /* __MZ_PAGE_H__ */ diff --git a/security/samsung/mz/mz_page_v5_10.c b/security/samsung/mz/mz_page_v5_10.c new file mode 100644 index 000000000000..3e495dfbb270 --- /dev/null +++ b/security/samsung/mz/mz_page_v5_10.c @@ -0,0 +1,297 @@ +/* + * Copyright (c) 2021 Samsung Electronics Co., Ltd. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#include <asm/mmu_context.h> +#include <linux/atomic.h> +#include <linux/delay.h> +#include <linux/list.h> +#include <linux/migrate.h> +#include <linux/mmzone.h> +#include <linux/mutex.h> +#include <linux/rmap.h> +#include <linux/sched.h> +#include <linux/spinlock.h> +#include <linux/vmalloc.h> +#include <linux/kernel.h> +#include <linux/sched/mm.h> +#include "mz_internal.h" +#include "mz_log.h" +#include "mz_page.h" + +int isolate_lru_page(struct page *page); + +static inline struct page *mz_migrate_alloc(struct page *page, unsigned long private) +{ + return alloc_page(GFP_KERNEL); +} + +static void mz_migrate_free(struct page *page, unsigned long private) +{ + __free_page(page); +} + +unsigned long mz_get_migratetype(struct page *page) +{ + struct zone *zone; + unsigned long flags; + unsigned long migrate_type; + + // Zone lock must be held to avoid race with + // set_pageblock_migratetype() + zone = page_zone(page); + spin_lock_irqsave(&zone->lock, flags); + migrate_type = get_pageblock_migratetype(page); + spin_unlock_irqrestore(&zone->lock, flags); + + return migrate_type; +} + +void move_page_data(struct page *from, struct page *to) +{ + void *old_page_addr, *new_page_addr; +#ifdef MZ_DEBUG + uint64_t opa, npa; +#endif + + old_page_addr = kmap_atomic(from); + new_page_addr = kmap_atomic(to); + memcpy(new_page_addr, old_page_addr, PAGE_SIZE); + memset(old_page_addr, MZ_PAGE_POISON, PAGE_SIZE); + +#ifdef MZ_DEBUG + opa = page_to_phys(from); + npa = page_to_phys(to); + MZ_LOG(err_level_debug, "%s %llx %llx\n", __func__, opa, npa); +#endif + + kunmap_atomic(old_page_addr); + kunmap_atomic(new_page_addr); +} + + +MzResult mz_migrate_pages(struct page *page) +{ + int res; + MzResult mz_ret = MZ_SUCCESS; + LIST_HEAD(pages_list); + + MZ_LOG(err_level_debug, "%s start\n", __func__); + + lru_add_drain_all(); + + res = isolate_lru_page(page); + if (res < 0) { + MZ_LOG(err_level_error, "%s isolate_lru_page fail %d\n", __func__, res); + return MZ_PAGE_FAIL; + } + + list_add_tail(&page->lru, &pages_list); + put_page(page); + /* page will be refilled with migrated pages later */ + page = NULL; + + res = _mz_migrate_pages(&pages_list, mz_migrate_alloc, mz_migrate_free); + if (res) { + MZ_LOG(err_level_error, "%s migrate fail %d\n", __func__, res); + putback_movable_pages(&pages_list); + mz_ret = MZ_PAGE_FAIL; + } + + MZ_LOG(err_level_debug, "%s end\n", __func__); + + return mz_ret; +} + +static __maybe_unused unsigned int gup_flags(int write, int force) +{ + unsigned int flags = 0; + + if (write) + flags |= FOLL_WRITE; + if (force) + flags |= FOLL_FORCE; + + return flags; +} + +static int mz_verify_migration_page(struct page *page) +{ + unsigned long migrate_type; + + migrate_type = mz_get_migratetype(page); + if (migrate_type == MIGRATE_CMA) + return -EFAULT; + + return 0; +} + +MzResult mz_get_user_pages(struct task_struct *task, struct mm_struct *mm, unsigned long va, + struct page **target_page, int after_mig, uint8_t __user *buf) +{ + int res; + MzResult mz_ret = MZ_SUCCESS; + + MZ_LOG(err_level_debug, "%s start %llx %d %llx\n", __func__, va, after_mig, (unsigned long)buf); + + res = get_user_pages_remote(mm, (unsigned long)buf, 1, gup_flags(1, 0), target_page, NULL, NULL); + if (res <= 0) { + MZ_LOG(err_level_error, "%s gup fail %d\n", __func__, res); + mz_ret = MZ_PAGE_FAIL; + goto fail; + } + + if (!after_mig) + atomic64_inc(&(mm->pinned_vm)); + + MZ_LOG(err_level_debug, "%s end\n", __func__); + + return MZ_SUCCESS; + +fail: + put_page(target_page[0]); + + return mz_ret; +} + +MzResult mz_migrate_and_pin(struct page *target_page, unsigned long va, uint8_t __user *buf, + unsigned long *new_pfn, pid_t tgid) +{ + MzResult mz_ret = MZ_SUCCESS; + struct page *mig_temp = NULL; + struct page **new_page; + struct task_struct *task; + struct mm_struct *mm; + int res; + page_node *cur_page; + + task = current; + mm = get_task_mm(task); + if (!mm) { + MZ_LOG(err_level_error, "%s get_task_mm fail\n", __func__); + return MZ_PAGE_FAIL; + } + new_page = kcalloc(1, sizeof(struct page *), GFP_KERNEL); + + /* + * Holding 'mm->mmap_lock' is required to synchronize users who try to register same pages simultaneously. + * Migration is impossible without synchronization due to page refcount holding by both users. + */ + down_write(&mm->mmap_lock); + +#ifdef MZ_DEBUG + if (mz_get_migratetype(target_page) == MIGRATE_CMA) + MZ_LOG(err_level_debug, "%s target_page is CMA\n", __func__); +#endif + + mz_ret = mz_get_user_pages(task, mm, va, &new_page[0], 0, buf); + if (mz_ret != MZ_SUCCESS) + goto out_pfns; + + if (mz_get_migratetype(new_page[0]) == MIGRATE_CMA) { + mig_temp = alloc_page(GFP_KERNEL); + if (!mig_temp) { + MZ_LOG(err_level_error, "%s alloc_page fail\n", __func__); + goto out_pfns; + } + move_page_data(target_page, mig_temp); + + mz_ret = mz_migrate_pages(new_page[0]); + if (mz_ret != MZ_SUCCESS) + goto out_pin; + + mz_ret = mz_get_user_pages(task, mm, va, &new_page[0], 1, buf); + if (mz_ret != MZ_SUCCESS) + goto out_pin; + + res = mz_verify_migration_page(new_page[0]); + if (res != 0) { + MZ_LOG(err_level_error, "%s mz_verify_migration_page fail %d\n", __func__, res); + mz_ret = MZ_PAGE_FAIL; + goto out_pin; + } + + *new_pfn = mz_ptw(va, mm); + if (*new_pfn == 0) { + MZ_LOG(err_level_error, "%s mz_ptw fail\n", __func__); + mz_ret = MZ_PAGE_FAIL; + goto out_pin; + } + + if (mig_temp) { + move_page_data(mig_temp, pfn_to_page(*new_pfn)); + __free_page(mig_temp); + } + } + + up_write(&mm->mmap_lock); + + cur_page = kmalloc(sizeof(*cur_page), GFP_ATOMIC); + cur_page->mz_page = new_page; + INIT_LIST_HEAD(&(cur_page->list)); + + mutex_lock(&page_list_lock); + list_add_tail(&(cur_page->list), &(mz_pt_list[tgid].mz_list_head_page)); + mutex_unlock(&page_list_lock); + + return mz_ret; + +out_pin: + put_page(new_page[0]); + down_write(&mm->mmap_lock); + atomic64_dec(&(mm->pinned_vm)); + up_write(&mm->mmap_lock); +out_pfns: + up_write(&mm->mmap_lock); + kfree(new_page); + mmput(mm); + + return mz_ret; +} + +unsigned long mz_ptw(unsigned long ava, struct mm_struct *mm) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + pte_t *pte = NULL; + unsigned long pfn = 0; + + pgd = pgd_offset(mm, ava); + if (!pgd_present(*pgd)) { + MZ_LOG(err_level_error, "%s pgd_offset fail\n", __func__); + goto out; + } + p4d = p4d_offset(pgd, ava); + if (!p4d_present(*p4d)) { + MZ_LOG(err_level_error, "%s p4d_offset fail\n", __func__); + goto out; + } + pud = pud_offset(p4d, ava); + if (!pud_present(*pud)) { + MZ_LOG(err_level_error, "%s pud_offset fail\n", __func__); + goto out; + } + pmd = pmd_offset(pud, ava); + if (!pmd_present(*pmd)) { + MZ_LOG(err_level_error, "%s pmd_offset fail\n", __func__); + goto out; + } + pte = pte_offset_map(pmd, ava); + + pfn = pte_pfn(*pte); + +out: + if (pte) { + pte_unmap(pte); + pte = NULL; + } + + return pfn; +} + diff --git a/security/samsung/mz/test/Makefile b/security/samsung/mz/test/Makefile new file mode 100644 index 000000000000..cf75b3c2ca4a --- /dev/null +++ b/security/samsung/mz/test/Makefile @@ -0,0 +1 @@ +obj-y = security_mz_test.o diff --git a/security/samsung/mz/test/security_mz_test.c b/security/samsung/mz/test/security_mz_test.c new file mode 100644 index 000000000000..b318514ebcd8 --- /dev/null +++ b/security/samsung/mz/test/security_mz_test.c @@ -0,0 +1,152 @@ +#include <kunit/test.h> +#include <kunit/mock.h> +#include <linux/printk.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/device.h> +#include <linux/unistd.h> +#include <linux/errno.h> +#include <linux/mz.h> + +int test_pid = 10; + +static void security_mz_add_target_pfn_test(struct kunit *test) +{ + pr_info("MZ %s start\n", __func__); + +/* KUNIT_EXPECT_EQ(test, MZ_SUCCESS, mzinit()); + KUNIT_EXPECT_EQ(test, MZ_SUCCESS, mz_add_target_pfn(test_pid, 99999)); + KUNIT_EXPECT_EQ(test, MZ_SUCCESS, mz_all_zero_set(test_pid)); + KUNIT_EXPECT_EQ(test, MZ_SUCCESS, mz_add_target_pfn(test_pid, 100000)); + KUNIT_EXPECT_EQ(test, MZ_INVALID_INPUT_ERROR, mz_add_target_pfn(0, 99999)); + KUNIT_EXPECT_EQ(test, MZ_INVALID_INPUT_ERROR, mz_add_target_pfn(PRLIMIT, 99999)); + KUNIT_EXPECT_EQ(test, MZ_INVALID_INPUT_ERROR, mz_add_target_pfn(test_pid, 0)); + KUNIT_EXPECT_EQ(test, MZ_MALLOC_ERROR, mz_add_target_pfn(MALLOC_FAIL_CUR, 99999)); + KUNIT_EXPECT_EQ(test, MZ_MALLOC_ERROR, mz_add_target_pfn(MALLOC_FAIL_PFN, 99999)); + KUNIT_EXPECT_EQ(test, MZ_SUCCESS, remove_target_from_all_list(test_pid)); + KUNIT_EXPECT_EQ(test, MZ_SUCCESS, remove_target_from_all_list(MALLOC_FAIL_CUR)); + KUNIT_EXPECT_EQ(test, MZ_SUCCESS, remove_target_from_all_list(MALLOC_FAIL_PFN)); //*/ +} + +static void security_mz_add_new_target_test(struct kunit *test) +{ + pr_info("MZ %s start\n", __func__); + +/* KUNIT_EXPECT_EQ(test, MZ_SUCCESS, mz_add_new_target(test_pid)); + KUNIT_EXPECT_EQ(test, MZ_INVALID_INPUT_ERROR, mz_add_new_target(0)); + KUNIT_EXPECT_EQ(test, MZ_INVALID_INPUT_ERROR, mz_add_new_target(PRLIMIT)); + KUNIT_EXPECT_EQ(test, MZ_MALLOC_ERROR, mz_add_new_target(MALLOC_FAIL_PID)); + KUNIT_EXPECT_EQ(test, MZ_SUCCESS, remove_target_from_all_list(test_pid)); //*/ +} + +static void security_mz_is_mz_target_test(struct kunit *test) +{ + pr_info("MZ %s start\n", __func__); + +/* KUNIT_EXPECT_EQ(test, MZ_SUCCESS, mz_add_target_pfn(test_pid, 99999)); + KUNIT_EXPECT_TRUE(test, is_mz_target(test_pid)); + KUNIT_EXPECT_FALSE(test, is_mz_target(999)); + KUNIT_EXPECT_FALSE(test, is_mz_target(0)); + KUNIT_EXPECT_FALSE(test, is_mz_target(PRLIMIT)); + KUNIT_EXPECT_EQ(test, MZ_SUCCESS, remove_target_from_all_list(test_pid)); //*/ +} + +static void security_mz_is_mz_all_zero_target_test(struct kunit *test) +{ + pr_info("MZ %s start\n", __func__); + +/* KUNIT_EXPECT_EQ(test, MZ_SUCCESS, mz_all_zero_set(test_pid)); + KUNIT_EXPECT_TRUE(test, is_mz_all_zero_target(test_pid)); + KUNIT_EXPECT_FALSE(test, is_mz_all_zero_target(999)); + KUNIT_EXPECT_FALSE(test, is_mz_all_zero_target(0)); + KUNIT_EXPECT_FALSE(test, is_mz_all_zero_target(PRLIMIT)); + KUNIT_EXPECT_EQ(test, MZ_SUCCESS, remove_target_from_all_list(test_pid)); //*/ +} + +static void security_mz_all_zero_set_test(struct kunit *test) +{ + pr_info("MZ %s start\n", __func__); + +/* KUNIT_EXPECT_EQ(test, MZ_SUCCESS, mz_all_zero_set(test_pid)); + KUNIT_EXPECT_EQ(test, MZ_INVALID_INPUT_ERROR, mz_all_zero_set(0)); + KUNIT_EXPECT_EQ(test, MZ_INVALID_INPUT_ERROR, mz_all_zero_set(PRLIMIT)); + KUNIT_EXPECT_EQ(test, MZ_SUCCESS, remove_target_from_all_list(test_pid)); //*/ +} + +static void security_mz_remove_target_from_all_list_test(struct kunit *test) +{ + pr_info("MZ %s start\n", __func__); + +/* KUNIT_EXPECT_EQ(test, MZ_SUCCESS, mz_add_target_pfn(test_pid, 1)); + KUNIT_EXPECT_EQ(test, MZ_SUCCESS, remove_target_from_all_list(test_pid)); + KUNIT_EXPECT_EQ(test, MZ_INVALID_INPUT_ERROR, remove_target_from_all_list(0)); + KUNIT_EXPECT_EQ(test, MZ_INVALID_INPUT_ERROR, remove_target_from_all_list(PRLIMIT)); //*/ +} + +static void security_mz_exit_test(struct kunit *test) +{ + pr_info("MZ %s start\n", __func__); + + KUNIT_EXPECT_EQ(test, MZ_SUCCESS, mz_exit()); +} + +static void security_mz_util_test(struct kunit *test) +{ + pr_info("MZ %s start\n", __func__); + +/* KUNIT_EXPECT_EQ(test, MZ_INVALID_INPUT_ERROR, mz_kget_process_name(test_pid, NULL)); + KUNIT_EXPECT_EQ(test, MZ_INVALID_INPUT_ERROR, mz_kget_process_name(0, NULL)); + KUNIT_EXPECT_EQ(test, MZ_INVALID_INPUT_ERROR, mz_kget_process_name(PRLIMIT, NULL)); + KUNIT_EXPECT_NULL(test, findts(0)); + KUNIT_EXPECT_NULL(test, findts(PRLIMIT)); //*/ +} + +static void security_mz_ioctl_test(struct kunit *test) +{ + pr_info("MZ %s start\n", __func__); + +/* KUNIT_EXPECT_EQ(test, 0, mz_ioctl_init()); + KUNIT_EXPECT_EQ(test, -ENOMEM, mz_ioctl_init()); + KUNIT_EXPECT_EQ(test, 0, mz_ioctl_init()); + KUNIT_EXPECT_EQ(test, MZ_GENERAL_ERROR, mz_ioctl(NULL, IOCTL_MZ_SET_CMD, 0)); + KUNIT_EXPECT_EQ(test, MZ_SUCCESS, mz_ioctl(NULL, IOCTL_MZ_ALL_SET_CMD, 0)); + KUNIT_EXPECT_EQ(test, MZ_INVALID_INPUT_ERROR, mz_ioctl(NULL, IOCTL_MZ_FAIL_CMD, 0)); + KUNIT_EXPECT_EQ(test, MZ_SUCCESS, remove_target_from_all_list(current->tgid)); + mz_ioctl_exit(); //*/ +} + +static int security_mz_test_init(struct kunit *test) +{ + return 0; +} + +static void security_mz_test_exit(struct kunit *test) +{ + test_pid++; + if (test_pid >= PRLIMIT) + test_pid = 10; +} + +static struct kunit_case security_mz_test_cases[] = { + KUNIT_CASE(security_mz_add_target_pfn_test), + KUNIT_CASE(security_mz_add_new_target_test), + KUNIT_CASE(security_mz_is_mz_target_test), + KUNIT_CASE(security_mz_is_mz_all_zero_target_test), + KUNIT_CASE(security_mz_all_zero_set_test), + KUNIT_CASE(security_mz_remove_target_from_all_list_test), + KUNIT_CASE(security_mz_exit_test), + KUNIT_CASE(security_mz_util_test), + KUNIT_CASE(security_mz_ioctl_test), + {}, +}; + +static struct kunit_suite security_mz_test_module = { + .name = "security-mz-test", + .init = security_mz_test_init, + .exit = security_mz_test_exit, + .test_cases = security_mz_test_cases, +}; +kunit_test_suites(&security_mz_test_module); + +MODULE_LICENSE("GPL v2"); diff --git a/security/samsung/mz_tee_driver/Kconfig b/security/samsung/mz_tee_driver/Kconfig new file mode 100644 index 000000000000..dbff818f69f1 --- /dev/null +++ b/security/samsung/mz_tee_driver/Kconfig @@ -0,0 +1,48 @@ +# +# TEE Driver configuration +# + +config MZ_TEE_DRIVER + tristate "Crypto TEE Driver" + depends on MEMORY_ZEROISATION + default y + help + Enable MZ crypto Trusted Execution Environment support + MZ use crypto function and it will be works in tee + if it's supported + This driver is interface for MZ NWd and MZ ta + +choice + prompt "MZ Secure OS" + depends on MZ_TEE_DRIVER + default MZ_USE_QSEECOM if QSEECOM + default MZ_USE_TZDEV if TZDEV + default MZ_USE_TRUSTONIC if TRUSTONIC_TEE + default MZ_DEFAULT + help + Select Secure OS for MZ + + config MZ_USE_TRUSTONIC + bool "MZ based on Trustonic Secure OS" + depends on TRUSTONIC_TEE + help + Use Trustonic as base Trusted Execution Environment + + config MZ_USE_TZDEV + bool "MZ based on TEEgris Secure OS" + depends on TZDEV + help + Use TEEgris as base Trusted Execution Environment + + config MZ_USE_QSEECOM + bool "MZ based on Qualcomm Secure OS" + depends on QSEECOM + help + Use Qualcomm as base Trusted Execution Environment + + config MZ_DEFAULT + bool "MZ Disabled" + help + MZ disabled +endchoice + diff --git a/security/samsung/mz_tee_driver/Makefile b/security/samsung/mz_tee_driver/Makefile new file mode 100644 index 000000000000..e8aeeb1fe7b9 --- /dev/null +++ b/security/samsung/mz_tee_driver/Makefile @@ -0,0 +1,18 @@ +# +# Crypto TEE Driver +# + +obj-$(CONFIG_MZ_TEE_DRIVER) += tee_driver.o + +tee_driver-$(CONFIG_MZ_USE_QSEECOM) += mz_tee_driver_qc.o +tee_driver-$(CONFIG_MZ_USE_TZDEV) += mz_tee_driver_teegris.o + +ccflags-$(CONFIG_MZ_TEE_DRIVER) += -I$(srctree)/drivers/security/samsung/mz +ccflags-$(CONFIG_MZ_TEE_DRIVER) += -I$(srctree)/drivers/misc +ccflags-$(CONFIG_MZ_USE_TZDEV) += -I$(srctree)/drivers/misc/tzdev/include + +subdir-ccflags-y := -Wformat + +ifneq (,$(filter userdebug eng, $(TARGET_BUILD_VARIANT))) + ccflags-y += -DMZ_DEBUG -DDEBUG +endif diff --git a/security/samsung/mz_tee_driver/mz_tee_driver.c b/security/samsung/mz_tee_driver/mz_tee_driver.c new file mode 100644 index 000000000000..d028d8ba83fc --- /dev/null +++ b/security/samsung/mz_tee_driver/mz_tee_driver.c @@ -0,0 +1,397 @@ +/* + * TEE Driver + * + * Copyright (C) 2019 Samsung Electronics, Inc. + * An Seongjin, <seongjin.an@samsung.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/slab.h> +#include <linux/kthread.h> +#include <linux/firmware.h> +#include <linux/mz.h> + +#include "qseecom_kernel.h" + +/////////////////////////////////////////// TA message struct +#define MAX_SHMEM_LEN_BYTES (20 * 1024) +typedef uint32_t tciCommandId_t; +typedef uint32_t tciResponseId_t; +typedef uint32_t tciReturnCode_t; + +/** + * TCI command header. + */ +struct { + tciCommandId_t commandId; /**< Command ID */ +} tciCommandHeader_t; + +/** + * TCI response header. + */ +struct { + tciResponseId_t responseId; /**< Response ID (must be command ID | RSP_ID_MASK )*/ + tciReturnCode_t returnCode; /**< Return code of command */ +} tciResponseHeader_t; + + +struct { + tciCommandHeader_t header; /**< Command header */ + uint32_t number; /**< Length of data to process */ +} cmdTest_t; + +/** + * Response structure Trustlet -> Trustlet Connector. + */ +struct { + tciResponseHeader_t header; /**< Response header */ + int32_t status; + uint32_t value; +} rspTest_t; + +struct { + uint32_t buffer[MAX_SHMEM_LEN_BYTES]; + uint32_t bufferSize; +} msgData_t; + +/** + * TCI message data. + */ +struct { + cmdTest_t command; + rspTest_t response; + msgData_t msgData; +} tciMessage_t; +/////////////////////////////////////////// TA message struct + +#define AES_BLOCK_SIZE_FOR_TZ 1024 +#define CMD_GETKEY 1 +#define CMD_REMOVEKEY 2 +#define QSEE_DEVICE_APP_DIR "/vendor/firmware_mnt/image/" +#define QSEE_APP_NAME "wsm" +#define MZ_PATH_MAX 4096 + +struct tci_msg { + uint32_t cmd; + uint32_t pid; + uint8_t encrypt[AES_BLOCK_SIZE_FOR_TZ]; + uint16_t encrypt_len; +} __packed; + +struct tee_msg { + struct completion *comp; + uint16_t cmd; + uint32_t tgid; + void *encrypt; + uint16_t encrypt_len; + uint16_t rc; + struct list_head queue; +}; + +static int send_cmd(uint16_t cmd, uint32_t tgid, + void *encrypt, + uint16_t encrypt_len); + +static DEFINE_SPINLOCK(tee_msg_lock); +static LIST_HEAD(tee_msg_queue); +struct task_struct *mz_tee_msg_task; +static struct qseecom_handle *session; + +static int tee_msg_thread(void *arg) +{ + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + if (kthread_should_stop()) { + set_current_state(TASK_RUNNING); + break; + } + if (list_empty(&tee_msg_queue)) + schedule(); + set_current_state(TASK_RUNNING); + + spin_lock(&tee_msg_lock); + while (!list_empty(&tee_msg_queue)) { + struct tee_msg *send_cmd_args; + int rc; + + send_cmd_args = list_entry(tee_msg_queue.next, + struct tee_msg, queue); + list_del_init(&send_cmd_args->queue); + spin_unlock(&tee_msg_lock); + + rc = send_cmd(send_cmd_args->cmd, + send_cmd_args->tgid, + send_cmd_args->encrypt, + send_cmd_args->encrypt_len); + send_cmd_args->rc = rc; + // when processing tee_iovec comp is not NULL + // only for last cmd in array + if (send_cmd_args->comp) + complete(send_cmd_args->comp); + spin_lock(&tee_msg_lock); + } + spin_unlock(&tee_msg_lock); + } + + return 0; +} + +static int send_cmd(uint16_t cmd, + uint32_t tgid, + void *encrypt, + uint16_t encrypt_len) +{ + + int iResult = 0; + tciMessage_t *tci; + tciMessage_t *rsptci; + uint32_t retVal = 0; + struct tci_msg *mzbuffer; + + if (session == NULL) { + pr_err("MZ %s ta session is null\n", __func__); + return -1; + } + + if ((tciMessage_t *)(session)->sbuf == NULL) { + pr_err("MZ %s sbuf is null\n", __func__); + return -1; + } + + tci = (tciMessage_t *)((session)->sbuf); + rsptci = (tciMessage_t *)((session)->sbuf + sizeof(tciMessage_t)); + + memset(tci, 0x00, sizeof(tciMessage_t)); + memset(rsptci, 0x00, sizeof(tciMessage_t)); + + + tci->command.header.commandId = cmd; + tci->msgData.bufferSize = sizeof(struct tci_msg); + + mzbuffer = kmalloc(sizeof(struct tci_msg), GFP_KERNEL); + if (!mzbuffer) + return -1; + + mzbuffer->cmd = cmd; + mzbuffer->pid = tgid; + if (encrypt != NULL) { + memcpy(mzbuffer->encrypt, encrypt, encrypt_len); + mzbuffer->encrypt_len = encrypt_len; + } + + memcpy(tci->msgData.buffer, mzbuffer, sizeof(struct tci_msg)); + + iResult = qseecom_set_bandwidth(session, true); + if (iResult != 0) { + pr_err("MZ %s qseecom_set_bandwidth fail\n", __func__); + return -1; + } + + // send command + iResult = qseecom_send_command(session, + (char *)tci, + sizeof(tciMessage_t), + (char *)rsptci, + sizeof(tciMessage_t)); + if (iResult != 0) { + iResult = qseecom_set_bandwidth(session, false); + if (iResult != 0) { + pr_err("MZ %s qseecom_set_bandwidth fail 2\n", __func__); + return -1; + } + pr_err("MZ %s qseecom_send_command fail\n", __func__); + return -1; + } + + iResult = qseecom_set_bandwidth(session, false); + if (iResult != 0) { + pr_err("MZ %s qseecom_set_bandwidth fail 3\n", __func__); + return -1; + } + + // Check the Trustlet return code + if (rsptci->response.header.returnCode != 0) { + pr_err("MZ %s ta return error %d\n", __func__, rsptci->response.header.returnCode); + return -1; + } + + // Read result from TCI buffer + retVal = rsptci->response.status; + if (retVal != 1) { + pr_err("MZ %s ta return status error %d\n", __func__, retVal); + goto exit_error; + } + + if (rsptci->msgData.bufferSize > MAX_SHMEM_LEN_BYTES) { + pr_err("MZ %s ta return buffer size error\n", __func__); + goto exit_error; + } + + memcpy(mzbuffer, rsptci->msgData.buffer, sizeof(struct tci_msg)); + + if (encrypt != NULL) { + encrypt_len = mzbuffer->encrypt_len; + memcpy(encrypt, mzbuffer->encrypt, encrypt_len); + } + + kfree(mzbuffer); + + return 1; + +exit_error: + memset(tci, 0x00, sizeof(tciMessage_t)); + memset(rsptci, 0x00, sizeof(tciMessage_t)); + + return retVal; +} + +static int send_cmd_kthread(uint16_t cmd, uint32_t tgid, u8 *encrypt, uint16_t encrypt_len) +{ + struct completion cmd_sent; + struct tee_msg cmd_msg; + + init_completion(&cmd_sent); + + cmd_msg.comp = &cmd_sent; + cmd_msg.cmd = cmd; + cmd_msg.tgid = tgid; + cmd_msg.encrypt = encrypt; + cmd_msg.encrypt_len = encrypt_len; + cmd_msg.rc = -EBADMSG; + + spin_lock(&tee_msg_lock); + list_add_tail(&cmd_msg.queue, &tee_msg_queue); + spin_unlock(&tee_msg_lock); + wake_up_process(mz_tee_msg_task); + wait_for_completion(&cmd_sent); + return cmd_msg.rc; +} + +MzResult getkeyt(uint32_t tgid, u8 *key, uint16_t key_len) +{ + MzResult mzret = MZ_SUCCESS; + int taret; + + taret = send_cmd_kthread(CMD_GETKEY, tgid, key, key_len); + if (taret != MZ_TA_SUCCESS) { + pr_err("MZ %s ta return error %d\n", __func__, taret); + mzret = MZ_TA_FAIL; + } + return mzret; +} + +MzResult removekeyt(uint32_t tgid) +{ + MzResult mzret = MZ_SUCCESS; + int taret; + + taret = send_cmd_kthread(CMD_REMOVEKEY, tgid, NULL, 0); + if (taret != MZ_TA_SUCCESS) { + pr_err("MZ %s ta return error %d\n", __func__, taret); + mzret = MZ_TA_FAIL; + } + return mzret; +} + +MzResult load_trusted_app(void) +{ + int qsee_res = 0; + int tci_size = 0; + MzResult mzret = MZ_SUCCESS; + + pr_err("MZ %s target_count %d\n", __func__, target_count); //TODO remove + + if (target_count != 0 && session != NULL) + return mzret; + + if (session != NULL) { + pr_err("MZ %s ta session is not null\n", __func__); + return MZ_TA_FAIL; + } + + session = NULL; + + // Open the QSEE device + tci_size = QSEECOM_ALIGN(2 * sizeof(tciMessage_t)); + qsee_res = qseecom_start_app(&session, QSEE_APP_NAME, tci_size); + if (qsee_res != 0) { + pr_err("MZ %s qseecom_start_app fail %d\n", __func__, qsee_res); + mzret = MZ_TA_FAIL; + } else { + pr_info("MZ ta load\n"); + } + + return mzret; +} + +static int register_tee_driver(void) +{ + struct mz_tee_driver_fns fn = { + .getkey = getkeyt, + .removekey = removekeyt, + }; + + return register_mz_tee_crypto_driver(&fn); +} + +static void unregister_tee_driver(void) +{ + unregister_mz_tee_crypto_driver(); +} + +void unload_trusted_app(void) +{ + int iResult = 0; + + pr_err("MZ %s target_count %d\n", __func__, target_count); //TODO remove + if (target_count != 0 && session != NULL) + return; + + if (session == NULL) { + pr_err("MZ %s ta session is null\n", __func__); + return; + } + + iResult = qseecom_shutdown_app(&session); + session = NULL; + pr_info("MZ ta unload\n"); +} + +static int __init tee_driver_init(void) +{ + int rc = 0; + + mz_tee_msg_task = kthread_run(tee_msg_thread, NULL, "mz_tee_msg_thread"); + if (IS_ERR(mz_tee_msg_task)) { + rc = PTR_ERR(mz_tee_msg_task); + pr_err("MZ Can't create mz_tee_msg_task: %d\n", rc); + goto out; + } + rc = register_tee_driver(); + if (rc != MZ_SUCCESS) { + pr_err("MZ Can't register tee_driver\n"); + goto out; + } + +out: + return rc; +} + +static void __exit tee_driver_exit(void) +{ + unregister_tee_driver(); + kthread_stop(mz_tee_msg_task); +} + +module_init(tee_driver_init); +module_exit(tee_driver_exit); diff --git a/security/samsung/mz_tee_driver/mz_tee_driver_qc.c b/security/samsung/mz_tee_driver/mz_tee_driver_qc.c new file mode 100644 index 000000000000..0fe8e39ac48a --- /dev/null +++ b/security/samsung/mz_tee_driver/mz_tee_driver_qc.c @@ -0,0 +1,393 @@ +/* + * TEE Driver + * + * Copyright (C) 2019 Samsung Electronics, Inc. + * An Seongjin, <seongjin.an@samsung.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/slab.h> +#include <linux/kthread.h> +#include <linux/firmware.h> +#include <linux/mz.h> + +#include "qseecom_kernel.h" + +int target_count; + +/////////////////////////////////////////// TA message struct +#define MAX_SHMEM_LEN_BYTES 1024 +typedef uint32_t tciCommandId_t; +typedef uint32_t tciResponseId_t; +typedef uint32_t tciReturnCode_t; + +/** + * TCI command header. + */ +typedef struct _tciCommandHeader_t { + tciCommandId_t commandId; /**< Command ID */ +} tciCommandHeader_t; + +/** + * TCI response header. + */ +typedef struct _tciResponseHeader_t { + tciResponseId_t responseId; /**< Response ID (must be command ID | RSP_ID_MASK )*/ + tciReturnCode_t returnCode; /**< Return code of command */ +} tciResponseHeader_t; + + +typedef struct _cmdTest_t { + tciCommandHeader_t header; /**< Command header */ + uint32_t number; /**< Length of data to process */ +} cmdTest_t; + +/** + * Response structure Trustlet -> Trustlet Connector. + */ +typedef struct _rspTest_t { + tciResponseHeader_t header; /**< Response header */ + int32_t status; + uint32_t value; +} rspTest_t; + +typedef struct _msgData_t { + uint32_t buffer[MAX_SHMEM_LEN_BYTES]; + uint32_t bufferSize; +} msgData_t; + +/** + * TCI message data. + */ +typedef struct _tciMessage_t { + cmdTest_t command; + rspTest_t response; + msgData_t msgData; +} tciMessage_t; +/////////////////////////////////////////// TA message struct + +#define AES_BLOCK_SIZE_FOR_TZ 1024 +#define CMD_MZ_WB_ENCRYPT 0x900 +#define CMD_REMOVEKEY 2 +#define QSEE_DEVICE_APP_DIR "/vendor/firmware_mnt/image/" +#define QSEE_APP_NAME "mz" +#define MZ_PATH_MAX 4096 + +struct tci_msg { + uint32_t cmd; + uint32_t pid; + uint8_t encrypt[8]; + uint8_t iv[16]; +} __packed; + +struct tee_msg { + struct completion *comp; + uint16_t cmd; + uint32_t tgid; + void *encrypt; + void *iv; + uint16_t rc; + struct list_head queue; +}; + +static int send_cmd(uint16_t cmd, uint32_t tgid, + void *encrypt, + void *iv); + +static DEFINE_SPINLOCK(tee_msg_lock); +static LIST_HEAD(tee_msg_queue); +struct task_struct *mz_tee_msg_task; +static struct qseecom_handle *session; + +static int tee_msg_thread(void *arg) +{ + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + if (kthread_should_stop()) { + set_current_state(TASK_RUNNING); + break; + } + if (list_empty(&tee_msg_queue)) + schedule(); + set_current_state(TASK_RUNNING); + + spin_lock(&tee_msg_lock); + while (!list_empty(&tee_msg_queue)) { + struct tee_msg *send_cmd_args; + int rc; + + send_cmd_args = list_entry(tee_msg_queue.next, + struct tee_msg, queue); + list_del_init(&send_cmd_args->queue); + spin_unlock(&tee_msg_lock); + + rc = send_cmd(send_cmd_args->cmd, + send_cmd_args->tgid, + send_cmd_args->encrypt, + send_cmd_args->iv); + send_cmd_args->rc = rc; + // when processing tee_iovec comp is not NULL + // only for last cmd in array + if (send_cmd_args->comp) + complete(send_cmd_args->comp); + spin_lock(&tee_msg_lock); + } + spin_unlock(&tee_msg_lock); + } + + return 0; +} + +static int send_cmd(uint16_t cmd, + uint32_t tgid, + void *encrypt, + void *iv) +{ + + int iResult = 0; + tciMessage_t *tci; + tciMessage_t *rsptci; + uint32_t retVal = 0; + struct tci_msg *mzbuffer; + + if (session == NULL) { + pr_err("MZ %s ta session is null\n", __func__); + return -1; + } + + if ((tciMessage_t *)(session)->sbuf == NULL) { + pr_err("MZ %s sbuf is null\n", __func__); + return -1; + } + + tci = (tciMessage_t *)((session)->sbuf); + rsptci = (tciMessage_t *)((session)->sbuf + sizeof(tciMessage_t)); + + memset(tci, 0x00, sizeof(tciMessage_t)); + memset(rsptci, 0x00, sizeof(tciMessage_t)); + + + tci->command.header.commandId = cmd; + tci->msgData.bufferSize = sizeof(struct tci_msg); + + mzbuffer = kmalloc(sizeof(struct tci_msg), GFP_KERNEL); + if (!mzbuffer) + return -1; + + mzbuffer->cmd = cmd; + mzbuffer->pid = tgid; + if (encrypt != NULL) { + memcpy(mzbuffer->encrypt, encrypt, 8); + memcpy(mzbuffer->iv, iv, 16); + } + + memcpy(tci->msgData.buffer, mzbuffer, sizeof(struct tci_msg)); + + iResult = qseecom_set_bandwidth(session, true); + if (iResult != 0) { + pr_err("MZ %s qseecom_set_bandwidth fail\n", __func__); + return -1; + } + + // send command + iResult = qseecom_send_command(session, + (char *)tci, + sizeof(tciMessage_t), + (char *)rsptci, + sizeof(tciMessage_t)); + if (iResult != 0) { + iResult = qseecom_set_bandwidth(session, false); + if (iResult != 0) { + pr_err("MZ %s qseecom_set_bandwidth fail 2\n", __func__); + return -1; + } + pr_err("MZ %s qseecom_send_command fail\n", __func__); + return -1; + } + + iResult = qseecom_set_bandwidth(session, false); + if (iResult != 0) { + pr_err("MZ %s qseecom_set_bandwidth fail 3\n", __func__); + return -1; + } + + // Check the Trustlet return code + if (rsptci->response.header.returnCode != 0) { + pr_err("MZ %s ta return error %d\n", __func__, rsptci->response.header.returnCode); + return -1; + } + + // Read result from TCI buffer + retVal = rsptci->response.status; + if (retVal != 1) { + pr_err("MZ %s ta return status error %d\n", __func__, retVal); + goto exit_error; + } + + if (rsptci->msgData.bufferSize > MAX_SHMEM_LEN_BYTES) { + pr_err("MZ %s ta return buffer size error\n", __func__); + goto exit_error; + } + + memcpy(mzbuffer, rsptci->msgData.buffer, sizeof(struct tci_msg)); + + if (encrypt != NULL) + memcpy(encrypt, mzbuffer->encrypt, 8); + + kfree(mzbuffer); + + return 1; + +exit_error: + memset(tci, 0x00, sizeof(tciMessage_t)); + memset(rsptci, 0x00, sizeof(tciMessage_t)); + + return retVal; +} + +static int send_cmd_kthread(uint16_t cmd, uint32_t tgid, u8 *encrypt, uint8_t *iv) +{ + struct completion cmd_sent; + struct tee_msg cmd_msg; + + init_completion(&cmd_sent); + + cmd_msg.comp = &cmd_sent; + cmd_msg.cmd = cmd; + cmd_msg.tgid = tgid; + cmd_msg.encrypt = encrypt; + cmd_msg.iv = iv; + cmd_msg.rc = -EBADMSG; + + spin_lock(&tee_msg_lock); + list_add_tail(&cmd_msg.queue, &tee_msg_queue); + spin_unlock(&tee_msg_lock); + wake_up_process(mz_tee_msg_task); + wait_for_completion(&cmd_sent); + return cmd_msg.rc; +} + +MzResult encrypt_impl(uint8_t *pt, uint8_t *ct, uint8_t *iv) +{ + MzResult mzret = MZ_SUCCESS; + int taret; + + taret = send_cmd_kthread(CMD_MZ_WB_ENCRYPT, 0, pt, iv); + if (taret != MZ_TA_SUCCESS) { + pr_err("MZ %s ta return error %d\n", __func__, taret); + mzret = MZ_TA_FAIL; + } + memcpy(ct, pt, 8); + return mzret; +} + +MzResult load_trusted_app(void) +{ + int qsee_res = 0; + int tci_size = 0; + MzResult mzret = MZ_SUCCESS; + + pr_info("MZ %s target_count %d\n", __func__, target_count); + + if (target_count != 0 && session != NULL) { + target_count++; + return mzret; + } + + if (session != NULL) { + pr_err("MZ %s ta session is not null\n", __func__); + return MZ_TA_FAIL; + } + + session = NULL; + + // Open the QSEE device + tci_size = QSEECOM_ALIGN(2 * sizeof(tciMessage_t)); + qsee_res = qseecom_start_app(&session, QSEE_APP_NAME, tci_size); + if (qsee_res != 0) { + pr_err("MZ %s qseecom_start_app fail %d\n", __func__, qsee_res); + mzret = MZ_TA_FAIL; + } else { + target_count++; + pr_info("MZ ta load\n"); + } + + return mzret; +} + +static int register_tee_driver(void) +{ + struct mz_tee_driver_fns fn = { + .encrypt = encrypt_impl, + }; + + return register_mz_tee_crypto_driver(&fn); +} + +static void unregister_tee_driver(void) +{ + unregister_mz_tee_crypto_driver(); +} + +void unload_trusted_app(void) +{ + int iResult = 0; + + pr_info("MZ %s target_count %d\n", __func__, target_count); //TODO remove + if (target_count > 1 && session != NULL) + target_count--; + return; + + if (session == NULL) { + pr_err("MZ %s ta session is null\n", __func__); + return; + } + + iResult = qseecom_shutdown_app(&session); + session = NULL; + target_count--; + pr_info("MZ ta unload\n"); +} + +static int __init tee_driver_init(void) +{ + int rc = 0; + + mz_tee_msg_task = kthread_run(tee_msg_thread, NULL, "mz_tee_msg_thread"); + if (IS_ERR(mz_tee_msg_task)) { + rc = PTR_ERR(mz_tee_msg_task); + pr_err("MZ Can't create mz_tee_msg_task: %d\n", rc); + goto out; + } + rc = register_tee_driver(); + if (rc != MZ_SUCCESS) { + pr_err("MZ Can't register tee_driver\n"); + goto out; + } + +out: + return rc; +} + +static void __exit tee_driver_exit(void) +{ + unregister_tee_driver(); + kthread_stop(mz_tee_msg_task); +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Samsung MZ tee Driver"); +MODULE_VERSION("1.00"); + +module_init(tee_driver_init); +module_exit(tee_driver_exit); diff --git a/security/samsung/mz_tee_driver/mz_tee_driver_teegris.c b/security/samsung/mz_tee_driver/mz_tee_driver_teegris.c new file mode 100644 index 000000000000..02e054928933 --- /dev/null +++ b/security/samsung/mz_tee_driver/mz_tee_driver_teegris.c @@ -0,0 +1,495 @@ +/* + * TEE Driver + * + * Copyright (C) 2021 Samsung Electronics, Inc. + * An Seongjin, <seongjin.an@samsung.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/slab.h> +#include <linux/kthread.h> +#include <linux/firmware.h> +#include <linux/mz.h> +#include <soc/samsung/exynos-pmu-if.h> +#include <tzdev/tee_client_api.h> + +int target_count; + +/////////////////////////////////////////// TA message struct +#define MAX_SHMEM_LEN_BYTES 1024 +typedef uint32_t tciCommandId_t; +typedef uint32_t tciResponseId_t; +typedef uint32_t tciReturnCode_t; + +/** + * TCI command header. + */ +typedef struct _tciCommandHeader_t { + tciCommandId_t commandId; /**< Command ID */ +} tciCommandHeader_t; + +/** + * TCI response header. + */ +typedef struct _tciResponseHeader_t { + tciResponseId_t responseId; /**< Response ID (must be command ID | RSP_ID_MASK )*/ + tciReturnCode_t returnCode; /**< Return code of command */ +} tciResponseHeader_t; + + +typedef struct _cmdTest_t { + tciCommandHeader_t header; /**< Command header */ + uint32_t number; /**< Length of data to process */ +} cmdTest_t; + +/** + * Response structure Trustlet -> Trustlet Connector. + */ +typedef struct _rspTest_t { + tciResponseHeader_t header; /**< Response header */ + int32_t status; + uint32_t value; +} rspTest_t; + +typedef struct _msgData_t { + uint32_t buffer[MAX_SHMEM_LEN_BYTES]; + uint32_t bufferSize; +} msgData_t; + +/** + * TCI message data. + */ +typedef struct _tciMessage_t { + cmdTest_t command; + rspTest_t response; + msgData_t msgData; +} tciMessage_t; +/////////////////////////////////////////// TA message struct + +#define AES_BLOCK_SIZE_FOR_TZ 1024 +#define CMD_MZ_WB_ENCRYPT 0x900 +#define CMD_REMOVEKEY 2 +#define MZ_PATH_MAX 4096 + +#define MAX_SESS_NUM 1 // tlc sessions nums + +static TEEC_UUID uuid = { + 0x00000000, 0x0000, 0x0000, {0x00, 0x00, 0x4d, 0x65, 0x6d, 0x5a, 0x72, 0x67} +}; + +typedef struct tlc_struct { + TEEC_Context context; + TEEC_Session session; + TEEC_Operation operation; + TEEC_SharedMemory shmem; + TEEC_UUID uuid; + uint8_t *shmem_buff; +} tlc_struct_t; + +typedef struct tlc_struct_th { + tlc_struct_t tlc_struct_arr[MAX_SESS_NUM]; // thread unsafe struct +} tlc_struct_th_t; + +static tlc_struct_th_t tlc_struct_safe; +static uint8_t is_initialized; // to be checked + +/*! + * \brief returns pointer to first unfilled tlc_sttuct item in massive + * \return pointer to empty tlc_sttuct + */ +static tlc_struct_t *get_empty(void) +{ + return &tlc_struct_safe.tlc_struct_arr[0]; +} + +/*! + * \brief returns pointer to tlc_struct owned by current thread + * \return pointer to empty tlc_sttuct + */ +static tlc_struct_t *mz_get_current(void) +{ + return &tlc_struct_safe.tlc_struct_arr[0]; +} + +/*! + * \brief Performs useful things only at once at zrtp_create + * \ where could be several processes single thread each + * \return operation result, 0 if OK + */ +static int tlc_init(void) +{ + if (is_initialized) + return 0; + + memset(&tlc_struct_safe, 0x00, sizeof(tlc_struct_safe)); + + is_initialized = 1; + return 0; +} + +struct tci_msg { + uint32_t cmd; + uint32_t pid; + uint8_t encrypt[8]; + uint8_t iv[16]; +} __packed; + +struct tee_msg { + struct completion *comp; + uint16_t cmd; + uint32_t tgid; + void *encrypt; + void *iv; + uint16_t rc; + struct list_head queue; +}; + +bool is_TA_session_opened; + +static int send_cmd(uint16_t cmd, uint32_t tgid, + void *encrypt, + void *iv); + +static DEFINE_SPINLOCK(tee_msg_lock); +static LIST_HEAD(tee_msg_queue); +struct task_struct *mz_tee_msg_task; + + +static int tee_msg_thread(void *arg) +{ + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + if (kthread_should_stop()) { + set_current_state(TASK_RUNNING); + break; + } + if (list_empty(&tee_msg_queue)) + schedule(); + set_current_state(TASK_RUNNING); + + spin_lock(&tee_msg_lock); + while (!list_empty(&tee_msg_queue)) { + struct tee_msg *send_cmd_args; + int rc; + + send_cmd_args = list_entry(tee_msg_queue.next, + struct tee_msg, queue); + list_del_init(&send_cmd_args->queue); + spin_unlock(&tee_msg_lock); + + rc = send_cmd(send_cmd_args->cmd, + send_cmd_args->tgid, + send_cmd_args->encrypt, + send_cmd_args->iv); + send_cmd_args->rc = rc; + // when processing tee_iovec comp is not NULL + // only for last cmd in array + if (send_cmd_args->comp) + complete(send_cmd_args->comp); + spin_lock(&tee_msg_lock); + } + spin_unlock(&tee_msg_lock); + } + + return 0; +} + +static int send_cmd(uint16_t cmd, + uint32_t tgid, + void *encrypt, + void *iv) +{ + tciMessage_t *tci; + uint32_t retVal = 0; + struct tci_msg *mzbuffer; + int res = MZ_SUCCESS; + TEEC_Result result; + tlc_struct_t *tlc_struct = 0; + + if (!is_TA_session_opened) { + pr_err("MZ %s TA session not opened\n", __func__); + return -1; + } + + tlc_struct = mz_get_current(); + tci = (tciMessage_t *)tlc_struct->shmem_buff; + + memset(tci, 0x00, sizeof(tciMessage_t)); + + tci->command.header.commandId = cmd; + tci->msgData.bufferSize = sizeof(struct tci_msg); + + mzbuffer = kmalloc(sizeof(struct tci_msg), GFP_KERNEL); + if (!mzbuffer) + return -1; + + mzbuffer->cmd = cmd; + mzbuffer->pid = tgid; + if (encrypt != NULL) { + memcpy(mzbuffer->encrypt, encrypt, 8); + memcpy(mzbuffer->iv, iv, 16); + } + + memcpy(tci->msgData.buffer, mzbuffer, sizeof(struct tci_msg)); + + result = TEEC_InvokeCommand(&tlc_struct->session, cmd, &tlc_struct->operation, NULL); + if (result == TEEC_ERROR_TARGET_DEAD) { + pr_err("MZ %d, %08x\n", __LINE__, result); + res = -1; + goto exit_error; + } + + if (result != TEEC_SUCCESS) { + pr_err("MZ SendMess: TEEC_InvokeCommand failed with error: %08x\n", result); + res = -1; + goto exit_error; + } + + // Check the Trustlet return code + if (tci->response.header.returnCode != 0) { + pr_err("MZ %s ta return error %d\n", __func__, tci->response.header.returnCode); + return -1; + } + + // Read result from TCI buffer + retVal = tci->response.status; + if (retVal != 1) { + pr_err("MZ %s ta return status error %d\n", __func__, retVal); + goto exit_error; + } + + if (tci->msgData.bufferSize > MAX_SHMEM_LEN_BYTES) { + pr_err("MZ %s ta return buffer size error\n", __func__); + goto exit_error; + } + + memcpy(mzbuffer, tci->msgData.buffer, sizeof(struct tci_msg)); + + if (encrypt != NULL) + memcpy(encrypt, mzbuffer->encrypt, 8); + + kfree(mzbuffer); + + return 1; + +exit_error: + memset(tci, 0x00, sizeof(tciMessage_t)); + + return retVal; +} + +static int send_cmd_kthread(uint16_t cmd, uint32_t tgid, u8 *encrypt, uint8_t *iv) +{ + struct completion cmd_sent; + struct tee_msg cmd_msg; + + init_completion(&cmd_sent); + + cmd_msg.comp = &cmd_sent; + cmd_msg.cmd = cmd; + cmd_msg.tgid = tgid; + cmd_msg.encrypt = encrypt; + cmd_msg.iv = iv; + cmd_msg.rc = -EBADMSG; + + spin_lock(&tee_msg_lock); + list_add_tail(&cmd_msg.queue, &tee_msg_queue); + spin_unlock(&tee_msg_lock); + wake_up_process(mz_tee_msg_task); + wait_for_completion(&cmd_sent); + return cmd_msg.rc; +} + +MzResult encrypt_impl(uint8_t *pt, uint8_t *ct, uint8_t *iv) +{ + MzResult mzret = MZ_SUCCESS; + int taret; + + taret = send_cmd_kthread(CMD_MZ_WB_ENCRYPT, 0, pt, iv); + if (taret != MZ_TA_SUCCESS) { + pr_err("MZ %s ta return error %d\n", __func__, taret); + mzret = MZ_TA_FAIL; + } + memcpy(ct, pt, 8); + return mzret; +} + +MzResult load_trusted_app_teegris(void) +{ + MzResult mzret = MZ_SUCCESS; + TEEC_Result result = 0; + tlc_struct_t *tlc_struct = 0; + + pr_info("MZ %s target_count %d\n", __func__, target_count); + if (target_count != 0) { + target_count++; + return mzret; + } + + if (tlc_init() != 0) + return mzret; + + // Initialize Contex + tlc_struct = get_empty(); + + tlc_struct->uuid = uuid; + + // Initialize TEEC_Context + result = TEEC_InitializeContext(NULL, &tlc_struct->context); + if (result != TEEC_SUCCESS) { + pr_err("MZ TEEC_InitializeContext failed with error: %08x\n", result); + goto exit; + } + pr_debug("MZ TEEC_InitializeContext - OK\n"); + + // register shared memory + tlc_struct->shmem_buff = kmalloc(sizeof(tciMessage_t), GFP_KERNEL); + if (!tlc_struct->shmem_buff) { + result = TEEC_ERROR_OUT_OF_MEMORY; + pr_err("MZ %s shmem kmalloc fail %08x\n", __func__, result); + goto exit; + } + + tlc_struct->shmem.buffer = tlc_struct->shmem_buff; + tlc_struct->shmem.size = sizeof(tciMessage_t); + tlc_struct->shmem.flags = TEEC_MEM_INPUT | TEEC_MEM_OUTPUT; + + result = TEEC_RegisterSharedMemory(&tlc_struct->context, &tlc_struct->shmem); + if (result != TEEC_SUCCESS) { + pr_err("MZ TEEC_RegisterSharedMemory failed with error: %08x\n", result); + goto close_session; + } + pr_debug("MZ TEEC_RegisterSharedMemory - OK\n"); + + // Step 3: Open session + result = TEEC_OpenSession(&tlc_struct->context, &tlc_struct->session, &tlc_struct->uuid, + TEEC_LOGIN_USER, + NULL, // No connection data needed for TEEC_LOGIN_USER. + NULL, // dont use operation + NULL); + if (result != TEEC_SUCCESS) { + pr_err("MZ TEEC_OpenSession failed with error: %08x\n", result); + goto close_context; + } + pr_debug("MZ TEEC_OpenSession - OK\n"); + + tlc_struct->operation.paramTypes = TEEC_PARAM_TYPES(TEEC_MEMREF_PARTIAL_INOUT, TEEC_NONE, TEEC_NONE, TEEC_NONE); + tlc_struct->operation.params[0].memref.parent = &tlc_struct->shmem; + tlc_struct->operation.params[0].memref.size = tlc_struct->shmem.size; + tlc_struct->operation.params[0].memref.offset = 0; + + goto exit; + +close_session: + TEEC_CloseSession(&tlc_struct->session); + +close_context: + TEEC_FinalizeContext(&tlc_struct->context); + kfree(tlc_struct->shmem_buff); + +exit: + if (result == TEEC_SUCCESS) { + mzret = MZ_SUCCESS; + is_TA_session_opened = true; + target_count++; + } else + mzret = MZ_TA_FAIL; + + return mzret; +} + +static int register_tee_driver(void) +{ + struct mz_tee_driver_fns fn = { + .encrypt = encrypt_impl, + }; + + return register_mz_tee_crypto_driver(&fn); +} + +static void unregister_tee_driver(void) +{ + unregister_mz_tee_crypto_driver(); +} + +void unload_trusted_app_teegris(void) +{ + tlc_struct_t *tlc_struct = 0; + + tlc_struct = mz_get_current(); + + pr_info("MZ %s target_count %d\n", __func__, target_count); + if (target_count > 1) { + target_count--; + return; + } + if (target_count != 1) + return; + + pr_debug("MZ TA session is getting closed\n"); + + // TEEC_ReleaseSharedMemory -> void function + TEEC_ReleaseSharedMemory(&tlc_struct->shmem); + kfree(tlc_struct->shmem_buff); + + // TEEC_CloseSession -> void function + TEEC_CloseSession(&tlc_struct->session); + + // TEEC_FinalizeContext -> void function + TEEC_FinalizeContext(&tlc_struct->context); + + memset(tlc_struct, 0x00, sizeof(tlc_struct_t)); + is_TA_session_opened = false; + target_count--; + pr_info("MZ ta unload\n"); +} + +static int __init tee_driver_init(void) +{ + int rc = 0; + + is_initialized = 0; + is_TA_session_opened = false; + target_count = 0; + + mz_tee_msg_task = kthread_run(tee_msg_thread, NULL, "mz_tee_msg_thread"); + if (IS_ERR(mz_tee_msg_task)) { + rc = PTR_ERR(mz_tee_msg_task); + pr_err("MZ Can't create mz_tee_msg_task: %d\n", rc); + goto out; + } + rc = register_tee_driver(); + if (rc != MZ_SUCCESS) { + pr_err("MZ Can't register tee_driver\n"); + goto out; + } + + load_trusted_app = load_trusted_app_teegris; + unload_trusted_app = unload_trusted_app_teegris; + +out: + return rc; +} + +static void __exit tee_driver_exit(void) +{ + unregister_tee_driver(); + kthread_stop(mz_tee_msg_task); +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Samsung MZ tee Driver"); +MODULE_VERSION("1.00"); + +module_init(tee_driver_init); +module_exit(tee_driver_exit); diff --git a/security/samsung/mz_tee_driver/security_mz_tee_driver.py b/security/samsung/mz_tee_driver/security_mz_tee_driver.py new file mode 100755 index 000000000000..bfcd2930e295 --- /dev/null +++ b/security/samsung/mz_tee_driver/security_mz_tee_driver.py @@ -0,0 +1,50 @@ +config = { + "header": { + "uuid": "dd9c76cb-0993-427d-b46d-7ab8b5029fcc", + "type": "SECURITY", + "vendor": "SAMSUNG", + "product": "mz_tee_driver", + "variant": "mz_tee_driver", + "name": "mz_tee_driver", + }, + "build": { + "path": "security/mz_tee_driver", + "file": "security_mz_tee_driver.py", + "location": [ + { + "src": "*.c Makefile:update Kconfig:update", + "dst": "security/samsung/mz_tee_driver/", + }, + ], + }, + "features": [ + { + "label": "BUILD TYPE", + "configs": { + "not set": [ + "# CONFIG_MZ_TEE_DRIVER is not set" + ], + "module": [ + "CONFIG_MZ_TEE_DRIVER=m" + ], + "built-in": [ + "CONFIG_MZ_TEE_DRIVER=y" + ], + }, + "list_value": [ + "not set", + "module", + "built-in", + ], + "type": "list", + "value": "not set", + } + ], +} + + +def load(): + return config + +if __name__ == '__main__': + print(load()) diff --git a/security/samsung/proca/Makefile b/security/samsung/proca/Makefile index 5e09944b962d..a5ffcf2f4f14 100644 --- a/security/samsung/proca/Makefile +++ b/security/samsung/proca/Makefile @@ -50,4 +50,10 @@ else clean-files += proca_certificate.asn1.c proca_certificate.asn1.h endif -subdir-ccflags-y += -Wformat \ No newline at end of file +subdir-ccflags-y += -Wformat + +# kunit tests options: +ifeq ($(CONFIG_SEC_KUNIT)$(CONFIG_UML), yy) + GCOV_PROFILE := y + ccflags-$(CONFIG_PROCA) += -DPROCA_KUNIT_ENABLED +endif diff --git a/security/samsung/proca/gaf/Makefile b/security/samsung/proca/gaf/Makefile index d20735eef478..44edbc2a9a79 100644 --- a/security/samsung/proca/gaf/Makefile +++ b/security/samsung/proca/gaf/Makefile @@ -5,3 +5,10 @@ obj-$(CONFIG_GAF_V5) += gaf_v5.o obj-$(CONFIG_GAF_V6) += gaf_v6.o ccflags-y += -I$(srctree)/security/samsung/proca + +# kunit tests options: +ifeq ($(CONFIG_SEC_KUNIT)$(CONFIG_UML), yy) + GCOV_PROFILE := y + ccflags-$(CONFIG_PROCA) += -DPROCA_KUNIT_ENABLED +endif + diff --git a/security/samsung/proca/gaf/gaf_v6.c b/security/samsung/proca/gaf/gaf_v6.c index 6ec288658a92..1615435a3a37 100644 --- a/security/samsung/proca/gaf/gaf_v6.c +++ b/security/samsung/proca/gaf/gaf_v6.c @@ -22,7 +22,7 @@ #ifdef CONFIG_PROCA_GKI_10 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) #define OFFSETOF_INTEGRITY offsetof(struct task_struct, android_oem_data1[2]) -#define OFFSETOF_F_SIGNATURE offsetof(struct file, android_oem_data1) +#define OFFSETOF_F_SIGNATURE 0 #else #define OFFSETOF_INTEGRITY offsetof(struct task_struct, android_vendor_data1[2]) #define OFFSETOF_F_SIGNATURE offsetof(struct file, android_vendor_data1) @@ -84,7 +84,11 @@ static struct GAForensicINFO { } GAFINFO = { .ver = 0x0600, /* by hryhorii tur 2019 10 21 */ .size = sizeof(GAFINFO), +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 14, 0)) + .task_struct_struct_state = offsetof(struct task_struct, __state), +#else .task_struct_struct_state = offsetof(struct task_struct, state), +#endif .task_struct_struct_comm = offsetof(struct task_struct, comm), .task_struct_struct_tasks = offsetof(struct task_struct, tasks), .task_struct_struct_pid = offsetof(struct task_struct, pid), diff --git a/security/samsung/proca/proca_certificate.c b/security/samsung/proca/proca_certificate.c index f9f6e9f9c593..02aef015f2bf 100644 --- a/security/samsung/proca/proca_certificate.c +++ b/security/samsung/proca/proca_certificate.c @@ -19,13 +19,19 @@ #include <linux/err.h> #include <crypto/hash.h> #include <crypto/hash_info.h> -#include <crypto/sha.h> #include <linux/version.h> #include <linux/file.h> +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0) +#include <crypto/sha1.h> +#include <crypto/sha2.h> +#else +#include <crypto/sha.h> +#endif #include "proca_log.h" #include "proca_certificate.h" #include "five_crypto.h" +#include "five_testing.h" #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 42) #include "proca_certificate.asn1.h" @@ -210,8 +216,9 @@ enum PaFlagBits { PaFlagBits_bitHmac = 2 }; -static bool check_native_pa_id(const struct proca_certificate *parsed_cert, - struct task_struct *task) +__visible_for_testing __mockable +bool check_native_pa_id(const struct proca_certificate *parsed_cert, + struct task_struct *task) { struct file *exe; char *path_buff; @@ -252,7 +259,7 @@ bool is_certificate_relevant_to_task( const char system_server_app_name[] = "/system/framework/services.jar"; const char system_server[] = "system_server"; const size_t max_app_name = 1024; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) || defined(PROCA_KUNIT_ENABLED)) char cmdline[1024 + 1]; #else char cmdline[max_app_name + 1]; diff --git a/security/samsung/proca/proca_config.c b/security/samsung/proca/proca_config.c index af01fb39b6e4..7aa66f515fcd 100644 --- a/security/samsung/proca/proca_config.c +++ b/security/samsung/proca/proca_config.c @@ -75,6 +75,7 @@ static int prepare_sys_ram_ranges(struct proca_config *conf) return ret; } +#ifndef PROCA_KUNIT_ENABLED static void prepare_kernel_constants(struct proca_config *conf) { conf->page_offset = PAGE_OFFSET; @@ -87,6 +88,9 @@ static void prepare_kernel_constants(struct proca_config *conf) conf->kimage_vaddr = get_kimage_vaddr(); conf->kimage_voffset = get_kimage_voffset(); } +#else +static void prepare_kernel_constants(struct proca_config *conf) {} +#endif static void dump_proca_config(const struct proca_config *conf) { diff --git a/security/samsung/proca/proca_porting.h b/security/samsung/proca/proca_porting.h index 3d1d92937dd8..6e8f8753f487 100644 --- a/security/samsung/proca/proca_porting.h +++ b/security/samsung/proca/proca_porting.h @@ -105,6 +105,7 @@ __vfs_getxattr(struct dentry *dentry, struct inode *inode, const char *name, /* * KASLR is backported to 4.4 kernels */ +#ifndef PROCA_KUNIT_ENABLED #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) static inline uintptr_t get_kimage_vaddr(void) @@ -129,6 +130,7 @@ static inline u64 get_kimage_voffset(void) return kimage_voffset; } #endif +#endif #ifndef OVERLAYFS_SUPER_MAGIC #define OVERLAYFS_SUPER_MAGIC 0x794c7630 @@ -193,14 +195,22 @@ static inline struct file *get_task_exe_file(struct task_struct *task) } #endif -#if defined(CONFIG_ANDROID) && LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0) +#if defined(CONFIG_ANDROID) && (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0) || \ + LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) /* * __vfs_getxattr was changed in Android Kernel v5.4 * https://android.googlesource.com/kernel/common/+/3484eba91d6b529cc606486a2db79513f3db6c67 + * and was reverted in Android Kernel v5.15 + * https://android.googlesource.com/kernel/common/+/e884438aa554219a6d0df3a18ff0b23ea678c36c */ #define XATTR_NOSECURITY 0x4 /* get value, do not involve security check */ #define __vfs_getxattr(dentry, inode, name, value, size, flags) \ __vfs_getxattr(dentry, inode, name, value, size) #endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0) +#define __vfs_setxattr_noperm(dentry, name, value, size, flags) \ + __vfs_setxattr_noperm(&init_user_ns, dentry, name, value, size, flags) +#endif + #endif /* __LINUX_PROCA_PORTING_H */ diff --git a/security/samsung/proca/security_proca.py b/security/samsung/proca/security_proca.py new file mode 100755 index 000000000000..3d20ea4a6f5f --- /dev/null +++ b/security/samsung/proca/security_proca.py @@ -0,0 +1,95 @@ +config = { + "header": { + "uuid": "283df57d-cc04-4f9f-ae5a-9c8140dd1e53", + "type": "SECURITY", + "vendor": "SAMSUNG", + "product": "proca", + "variant": "proca", + "name": "proca", + }, + "build": { + "path": "security/proca", + "file": "security_proca.py", + "location": [ + { + "src": "*.c *.h *.asn1 Makefile Kconfig", + "dst": "security/samsung/proca/", + }, + { + "src": "gaf/*", + "dst": "security/samsung/proca/gaf/", + }, + { + "src": "s_os/*.c s_os/*.h", + "dst": "security/samsung/proca/s_os/", + }, + ], + }, + "features": [ + { + "label": "General", + "type": "boolean", + "configs": { + "True": [], + "False": [], + }, + "value": True, + }, + ], + "kunit_test": { + "build": { + "location": [ + { + "src": "../five/kunit_test/test_helpers.h kunit_test/*.c kunit_test/*.h kunit_test/Makefile:cp", + "dst": "security/samsung/proca/kunit_test/", + }, + { + "src": "../five/*.c ../five/*.h ../five/*.S ../five/*.der ../five/Kconfig kunit_test/five/Makefile", + "dst": "security/samsung/five/", + }, + { + "src": "../five/s_os/*.c", + "dst": "security/samsung/five/s_os/", + }, + ], + }, + "features": [ + { + "label": "default", + "configs": { + "True": [ + "CONFIG_PROCA=y", + "CONFIG_PROCA_DEBUG=y", + "CONFIG_PROCA_S_OS=y", + "CONFIG_GAF_V6=y", + "CONFIG_FIVE=y", + "CONFIG_FIVE_GKI_10=y", + "CONFIG_CRC16=y", + "CONFIG_SECURITY=y", + "CONFIG_NET=y", + "CONFIG_AUDIT=y", + "CONFIG_AUDITSYSCALL=y", + "CONFIG_MD=y", + "CONFIG_BLK_DEV_DM=y", + "CONFIG_BLK_DEV_LOOP=y", + "CONFIG_DM_VERITY=y", + "CONFIG_CRYPTO_SHA256=y", + "CONFIG_DEBUG_KERNEL=y", + "CONFIG_DEBUG_INFO=y", + "CONFIG_GCOV=y" + ], + "False": [], + }, + "type": "boolean", + }, + ] + }, +} + + +def load(): + return config + + +if __name__ == "__main__": + print(load()) diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index c95ab4c5a475..129b755824e1 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c @@ -433,6 +433,18 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) break; } break; + case ARM_SMCCC_ARCH_WORKAROUND_3: + switch (kvm_arm_get_spectre_bhb_state()) { + case SPECTRE_VULNERABLE: + break; + case SPECTRE_MITIGATED: + val = SMCCC_RET_SUCCESS; + break; + case SPECTRE_UNAFFECTED: + val = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED; + break; + } + break; } break; default: From 1973f0e82165f7de9bdca16e8b3c2476a5584069 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Nov 2022 16:29:00 +0400 Subject: [PATCH 006/452] G975F - HVJ5 Signed-off-by: Denis Efremov <efremov@linux.com> --- .../admin-guide/kernel-parameters.txt | 47 +++- Documentation/arm64/cpu-feature-registers.txt | 26 ++- Documentation/arm64/silicon-errata.txt | 1 + arch/arm64/boot/dts/G975F.mk | 14 +- .../exynos9820-beyond2lte_eur_open_04.dts | 5 +- .../exynos9820-beyond2lte_eur_open_16.dts | 5 +- .../exynos9820-beyond2lte_eur_open_17.dts | 5 +- .../exynos9820-beyond2lte_eur_open_18.dts | 5 +- .../exynos9820-beyond2lte_eur_open_19.dts | 5 +- .../exynos9820-beyond2lte_eur_open_20.dts | 5 +- .../exynos9820-beyond2lte_eur_open_23.dts | 5 +- .../exynos9820-beyond2lte_eur_open_24.dts | 5 +- .../exynos9820-beyond2lte_eur_open_25.dts | 5 +- .../exynos9820-beyond2lte_eur_open_26.dts | 5 +- .../configs/exynos9820-beyond2lte_defconfig | 10 +- arch/arm64/kernel/bpi.S | 72 +++++-- arch/arm64/kernel/entry.S | 200 ++++++++++++++---- arch/arm64/kernel/head.S | 13 +- arch/arm64/kernel/vmlinux.lds.S | 2 +- arch/arm64/kvm/hyp/entry.S | 12 -- arch/arm64/kvm/hyp/hyp-entry.S | 4 + arch/arm64/mm/proc.S | 24 +-- 22 files changed, 330 insertions(+), 145 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 98c95029ec30..6ba2bb49642c 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2365,6 +2365,41 @@ in the "bleeding edge" mini2440 support kernel at http://repo.or.cz/w/linux-2.6/mini2440.git + mitigations= + [X86,PPC,S390,ARM64] Control optional mitigations for + CPU vulnerabilities. This is a set of curated, + arch-independent options, each of which is an + aggregation of existing arch-specific options. + + off + Disable all optional CPU mitigations. This + improves system performance, but it may also + expose users to several CPU vulnerabilities. + Equivalent to: nopti [X86,PPC] + kpti=0 [ARM64] + nospectre_v1 [PPC] + nobp=0 [S390] + nospectre_v1 [X86] + nospectre_v2 [X86,PPC,S390,ARM64] + spectre_v2_user=off [X86] + spec_store_bypass_disable=off [X86,PPC] + ssbd=force-off [ARM64] + l1tf=off [X86] + mds=off [X86] + + auto (default) + Mitigate all CPU vulnerabilities, but leave SMT + enabled, even if it's vulnerable. This is for + users who don't want to be surprised by SMT + getting disabled across kernel upgrades, or who + have other ways of avoiding SMT-based attacks. + This is the default behavior. + + auto,nosmt + Mitigate all CPU vulnerabilities, disabling SMT + if needed. This is for users who always want to + be fully mitigated, even if it means losing SMT. + mminit_loglevel= [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this parameter allows control of the logging verbosity for @@ -2683,10 +2718,14 @@ nosmt=force: Force disable SMT, cannot be undone via the sysfs control file. - nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2 - (indirect branch prediction) vulnerability. System may - allow data leaks with this option, which is equivalent - to spectre_v2=off. + nospectre_v1 [X66, PPC] Disable mitigations for Spectre Variant 1 + (bounds check bypass). With this option data leaks + are possible in the system. + + nospectre_v2 [X86,PPC_FSL_BOOK3E,ARM64] Disable all mitigations for + the Spectre variant 2 (indirect branch prediction) + vulnerability. System may allow data leaks with this + option. nospec_store_bypass_disable [HW] Disable all mitigations for the Speculative Store Bypass vulnerability diff --git a/Documentation/arm64/cpu-feature-registers.txt b/Documentation/arm64/cpu-feature-registers.txt index dad411d635d8..7964f03846b1 100644 --- a/Documentation/arm64/cpu-feature-registers.txt +++ b/Documentation/arm64/cpu-feature-registers.txt @@ -110,7 +110,17 @@ infrastructure: x--------------------------------------------------x | Name | bits | visible | |--------------------------------------------------| - | RES0 | [63-32] | n | + | TS | [55-52] | y | + |--------------------------------------------------| + | FHM | [51-48] | y | + |--------------------------------------------------| + | DP | [47-44] | y | + |--------------------------------------------------| + | SM4 | [43-40] | y | + |--------------------------------------------------| + | SM3 | [39-36] | y | + |--------------------------------------------------| + | SHA3 | [35-32] | y | |--------------------------------------------------| | RDM | [31-28] | y | |--------------------------------------------------| @@ -123,8 +133,6 @@ infrastructure: | SHA1 | [11-8] | y | |--------------------------------------------------| | AES | [7-4] | y | - |--------------------------------------------------| - | RES0 | [3-0] | n | x--------------------------------------------------x @@ -132,7 +140,9 @@ infrastructure: x--------------------------------------------------x | Name | bits | visible | |--------------------------------------------------| - | RES0 | [63-28] | n | + | DIT | [51-48] | y | + |--------------------------------------------------| + | SVE | [35-32] | y | |--------------------------------------------------| | GIC | [27-24] | n | |--------------------------------------------------| @@ -183,6 +193,14 @@ infrastructure: | DPB | [3-0] | y | x--------------------------------------------------x + 5) ID_AA64MMFR2_EL1 - Memory model feature register 2 + + x--------------------------------------------------x + | Name | bits | visible | + |--------------------------------------------------| + | AT | [35-32] | y | + x--------------------------------------------------x + Appendix I: Example --------------------------- diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index e4fe6adc372b..42f5672e8917 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt @@ -56,6 +56,7 @@ stable kernels. | ARM | Cortex-A72 | #853709 | N/A | | ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 | | ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 | +| ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 | | ARM | MMU-500 | #841119,#826419 | N/A | | | | | | | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | diff --git a/arch/arm64/boot/dts/G975F.mk b/arch/arm64/boot/dts/G975F.mk index 760cf79006f5..227f9fe4dd46 100644 --- a/arch/arm64/boot/dts/G975F.mk +++ b/arch/arm64/boot/dts/G975F.mk @@ -1,15 +1,15 @@ # SPDX-License-Identifier: GPL-2.0 -dtb-y += exynos/exynos9820.dtb -dtbo-y += samsung/exynos9820-beyond2lte_eur_open_20.dtbo +dtbo-y += samsung/exynos9820-beyond2lte_eur_open_25.dtbo +dtbo-y += samsung/exynos9820-beyond2lte_eur_open_16.dtbo dtbo-y += samsung/exynos9820-beyond2lte_eur_open_04.dtbo -dtbo-y += samsung/exynos9820-beyond2lte_eur_open_26.dtbo +dtbo-y += samsung/exynos9820-beyond2lte_eur_open_18.dtbo dtbo-y += samsung/exynos9820-beyond2lte_eur_open_17.dtbo +dtbo-y += samsung/exynos9820-beyond2lte_eur_open_26.dtbo +dtbo-y += samsung/exynos9820-beyond2lte_eur_open_20.dtbo dtbo-y += samsung/exynos9820-beyond2lte_eur_open_23.dtbo -dtbo-y += samsung/exynos9820-beyond2lte_eur_open_25.dtbo -dtbo-y += samsung/exynos9820-beyond2lte_eur_open_18.dtbo -dtbo-y += samsung/exynos9820-beyond2lte_eur_open_24.dtbo dtbo-y += samsung/exynos9820-beyond2lte_eur_open_19.dtbo -dtbo-y += samsung/exynos9820-beyond2lte_eur_open_16.dtbo +dtbo-y += samsung/exynos9820-beyond2lte_eur_open_24.dtbo +dtb-y += exynos/exynos9820.dtb targets += dtbs DTB_LIST := $(dtb-y) $(dtbo-y) diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_04.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_04.dts index 8edff72d90c0..9f6b03ead02f 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_04.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_04.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6621,8 +6622,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e07>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_16.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_16.dts index 35c3317b6653..608a498e1c66 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_16.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_16.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6621,8 +6622,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e07>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_17.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_17.dts index f00c078685cd..f0ff199f9f8f 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_17.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_17.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6556,8 +6557,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e07>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_18.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_18.dts index 52b43e16e353..a50124f7f52e 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_18.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_18.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6679,8 +6680,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e07>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_19.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_19.dts index 5c859fc138a3..6c7aa825f3d4 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_19.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_19.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6708,8 +6709,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e07>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_20.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_20.dts index 705e806e8bc7..d95c8d9b2f08 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_20.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_20.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6707,8 +6708,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e07>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_23.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_23.dts index 802c1bda29ab..79c0e0147f01 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_23.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_23.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6707,8 +6708,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e07>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_24.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_24.dts index cf2f09b67c71..3205cebaedfd 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_24.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_24.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6707,8 +6708,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e07>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_25.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_25.dts index fa14c4be5e77..601d1c618d5a 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_25.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_25.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6707,8 +6708,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e07>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_26.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_26.dts index b5850e14e553..e31faa344e51 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_26.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond2lte_eur_open_26.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6706,8 +6707,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e07>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/configs/exynos9820-beyond2lte_defconfig b/arch/arm64/configs/exynos9820-beyond2lte_defconfig index e17dc320a080..64ac3799600c 100644 --- a/arch/arm64/configs/exynos9820-beyond2lte_defconfig +++ b/arch/arm64/configs/exynos9820-beyond2lte_defconfig @@ -513,6 +513,7 @@ CONFIG_ARM64_ERRATUM_832075=y CONFIG_ARM64_ERRATUM_845719=y CONFIG_ARM64_ERRATUM_843419=y CONFIG_ARM64_ERRATUM_1024718=y +CONFIG_ARM64_ERRATUM_1188873=y # CONFIG_CAVIUM_ERRATUM_22375 is not set # CONFIG_CAVIUM_ERRATUM_23154 is not set # CONFIG_CAVIUM_ERRATUM_27456 is not set @@ -614,6 +615,7 @@ CONFIG_FORCE_MAX_ZONEORDER=11 CONFIG_UNMAP_KERNEL_AT_EL0=y CONFIG_HARDEN_BRANCH_PREDICTOR=y CONFIG_ARM64_SSBD=y +CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y CONFIG_ARMV8_DEPRECATED=y CONFIG_SWP_EMULATION=y CONFIG_CP15_BARRIER_EMULATION=y @@ -771,6 +773,7 @@ CONFIG_NET_EGRESS=y CONFIG_PACKET=y # CONFIG_PACKET_DIAG is not set CONFIG_UNIX=y +CONFIG_UNIX_SCM=y # CONFIG_UNIX_DIAG is not set # CONFIG_TLS is not set CONFIG_XFRM=y @@ -1330,6 +1333,7 @@ CONFIG_ALLOW_DEV_COREDUMP=y # CONFIG_SYS_HYPERVISOR is not set # CONFIG_GENERIC_CPU_DEVICES is not set CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y CONFIG_SOC_BUS=y CONFIG_REGMAP=y CONFIG_REGMAP_I2C=y @@ -1563,8 +1567,6 @@ CONFIG_TZDEV_SK_MULTICORE=y CONFIG_ION_FD2PHYS=y CONFIG_TZIRS=y # CONFIG_TZTUI is not set -CONFIG_TZDEV_PAGE_MIGRATION=y -# CONFIG_TZ_TRANSPORT is not set # CONFIG_TZDEV_HOTPLUG is not set CONFIG_TZDEV_BOOST=y CONFIG_TZ_BOOT_LOG=y @@ -2017,7 +2019,7 @@ CONFIG_USB_NET_CDCETHER=y CONFIG_USB_NET_CDC_EEM=y CONFIG_USB_NET_CDC_NCM=y # CONFIG_USB_NET_HUAWEI_CDC_NCM is not set -CONFIG_USB_NET_CDC_MBIM=y +# CONFIG_USB_NET_CDC_MBIM is not set # CONFIG_USB_NET_DM9601 is not set # CONFIG_USB_NET_SR9700 is not set # CONFIG_USB_NET_SR9800 is not set @@ -4075,7 +4077,7 @@ CONFIG_USB_XHCI_PLATFORM=y # CONFIG_USB_ACM=y CONFIG_USB_PRINTER=y -CONFIG_USB_WDM=y +# CONFIG_USB_WDM is not set # CONFIG_USB_TMC is not set # diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S index e5de33513b5d..bd6ef8750f44 100644 --- a/arch/arm64/kernel/bpi.S +++ b/arch/arm64/kernel/bpi.S @@ -55,29 +55,69 @@ ENTRY(__bp_harden_hyp_vecs_start) .endr ENTRY(__bp_harden_hyp_vecs_end) -ENTRY(__qcom_hyp_sanitize_link_stack_start) - stp x29, x30, [sp, #-16]! - .rept 16 - bl . + 4 - .endr - ldp x29, x30, [sp], #16 -ENTRY(__qcom_hyp_sanitize_link_stack_end) -.macro smccc_workaround_1 inst +ENTRY(__smccc_workaround_1_smc_start) sub sp, sp, #(8 * 4) stp x2, x3, [sp, #(8 * 0)] stp x0, x1, [sp, #(8 * 2)] mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1 - \inst #0 + smc #0 ldp x2, x3, [sp, #(8 * 0)] ldp x0, x1, [sp, #(8 * 2)] add sp, sp, #(8 * 4) -.endm - -ENTRY(__smccc_workaround_1_smc_start) - smccc_workaround_1 smc ENTRY(__smccc_workaround_1_smc_end) -ENTRY(__smccc_workaround_1_hvc_start) - smccc_workaround_1 hvc -ENTRY(__smccc_workaround_1_hvc_end) +ENTRY(__smccc_workaround_3_smc_start) + sub sp, sp, #(8 * 4) + stp x2, x3, [sp, #(8 * 0)] + stp x0, x1, [sp, #(8 * 2)] + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3 + smc #0 + ldp x2, x3, [sp, #(8 * 0)] + ldp x0, x1, [sp, #(8 * 2)] + add sp, sp, #(8 * 4) +ENTRY(__smccc_workaround_3_smc_end) + +ENTRY(__spectre_bhb_loop_k8_start) + sub sp, sp, #(8 * 2) + stp x0, x1, [sp, #(8 * 0)] + mov x0, #8 +2: b . + 4 + subs x0, x0, #1 + b.ne 2b + dsb nsh + isb + ldp x0, x1, [sp, #(8 * 0)] + add sp, sp, #(8 * 2) +ENTRY(__spectre_bhb_loop_k8_end) + +ENTRY(__spectre_bhb_loop_k24_start) + sub sp, sp, #(8 * 2) + stp x0, x1, [sp, #(8 * 0)] + mov x0, #24 +2: b . + 4 + subs x0, x0, #1 + b.ne 2b + dsb nsh + isb + ldp x0, x1, [sp, #(8 * 0)] + add sp, sp, #(8 * 2) +ENTRY(__spectre_bhb_loop_k24_end) + +ENTRY(__spectre_bhb_loop_k32_start) + sub sp, sp, #(8 * 2) + stp x0, x1, [sp, #(8 * 0)] + mov x0, #32 +2: b . + 4 + subs x0, x0, #1 + b.ne 2b + dsb nsh + isb + ldp x0, x1, [sp, #(8 * 0)] + add sp, sp, #(8 * 2) +ENTRY(__spectre_bhb_loop_k32_end) + +ENTRY(__spectre_bhb_clearbhb_start) + hint #22 /* aka clearbhb */ + isb +ENTRY(__spectre_bhb_clearbhb_end) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 25f3603d7731..c36918528b3f 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -74,18 +74,21 @@ .macro kernel_ventry, el, label, regsize = 64 .align 7 -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 -alternative_if ARM64_UNMAP_KERNEL_AT_EL0 +.Lventry_start\@: .if \el == 0 + /* + * This must be the first instruction of the EL0 vector entries. It is + * skipped by the trampoline vectors, to trigger the cleanup. + */ + b .Lskip_tramp_vectors_cleanup\@ .if \regsize == 64 mrs x30, tpidrro_el0 msr tpidrro_el0, xzr .else mov x30, xzr .endif +.Lskip_tramp_vectors_cleanup\@: .endif -alternative_else_nop_endif -#endif .if \el == 0 sub sp, sp, #S_FRAME_SIZE @@ -135,11 +138,15 @@ alternative_else_nop_endif mrs x0, tpidrro_el0 #endif b el\()\el\()_\label +.org .Lventry_start\@ + 128 // Did we overflow the ventry slot? .endm - .macro tramp_alias, dst, sym + .macro tramp_alias, dst, sym, tmp mov_q \dst, TRAMP_VALIAS - add \dst, \dst, #(\sym - .entry.tramp.text) + adr_l \tmp, \sym + add \dst, \dst, \tmp + adr_l \tmp, .entry.tramp.text + sub \dst, \dst, \tmp .endm // This macro corrupts x0-x3. It is the caller's duty @@ -356,25 +363,25 @@ alternative_else_nop_endif ldp x24, x25, [sp, #16 * 12] ldp x26, x27, [sp, #16 * 13] ldp x28, x29, [sp, #16 * 14] - ldr lr, [sp, #S_LR] - .if \el == 0 - add sp, sp, #S_FRAME_SIZE // restore sp - .else - add sp, sp, #(S_FRAME_SIZE+PRESERVE_STACK_SIZE) - .endif /* \el == 0 */ .if \el == 0 -alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 +alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 + ldr lr, [sp, #S_LR] + add sp, sp, #S_FRAME_SIZE // restore sp + eret +alternative_else_nop_endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 bne 4f - msr far_el1, x30 - tramp_alias x30, tramp_exit_native + msr far_el1, x29 + tramp_alias x30, tramp_exit_native, x29 br x30 4: - tramp_alias x30, tramp_exit_compat + tramp_alias x30, tramp_exit_compat, x29 br x30 #endif .else + ldr lr, [sp, #S_LR] + add sp, sp, #(S_FRAME_SIZE+PRESERVE_STACK_SIZE) eret .endif .endm @@ -1071,12 +1078,7 @@ __ni_sys_trace: .popsection // .entry.text -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 -/* - * Exception vectors trampoline. - */ - .pushsection ".entry.tramp.text", "ax" - + // Move from tramp_pg_dir to swapper_pg_dir .macro tramp_map_kernel, tmp mrs \tmp, ttbr1_el1 sub \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) @@ -1108,12 +1110,47 @@ alternative_else_nop_endif */ .endm - .macro tramp_ventry, regsize = 64 + .macro tramp_data_page dst + adr_l \dst, .entry.tramp.text + sub \dst, \dst, PAGE_SIZE + .endm + + .macro tramp_data_read_var dst, var +#ifdef CONFIG_RANDOMIZE_BASE + tramp_data_page \dst + add \dst, \dst, #:lo12:__entry_tramp_data_\var + ldr \dst, [\dst] +#else + ldr \dst, =\var +#endif + .endm + +#define BHB_MITIGATION_NONE 0 +#define BHB_MITIGATION_LOOP 1 +#define BHB_MITIGATION_FW 2 +#define BHB_MITIGATION_INSN 3 + + .macro tramp_ventry, vector_start, regsize, kpti, bhb .align 7 1: .if \regsize == 64 msr tpidrro_el0, x30 // Restored in kernel_ventry .endif + + .if \bhb == BHB_MITIGATION_LOOP + /* + * This sequence must appear before the first indirect branch. i.e. the + * ret out of tramp_ventry. It appears here because x30 is free. + */ + __mitigate_spectre_bhb_loop x30 + .endif // \bhb == BHB_MITIGATION_LOOP + + .if \bhb == BHB_MITIGATION_INSN + clearbhb + isb + .endif // \bhb == BHB_MITIGATION_INSN + + .if \kpti == 1 /* * Defend against branch aliasing attacks by pushing a dummy * entry onto the return stack and using a RET instruction to @@ -1123,43 +1160,75 @@ alternative_else_nop_endif b . 2: tramp_map_kernel x30 -#ifdef CONFIG_RANDOMIZE_BASE - adr x30, tramp_vectors + PAGE_SIZE alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 - ldr x30, [x30] -#else - ldr x30, =vectors -#endif - prfm plil1strm, [x30, #(1b - tramp_vectors)] + tramp_data_read_var x30, vectors + prfm plil1strm, [x30, #(1b - \vector_start)] msr vbar_el1, x30 - add x30, x30, #(1b - tramp_vectors) isb + .else + ldr x30, =vectors + .endif // \kpti == 1 + + .if \bhb == BHB_MITIGATION_FW + /* + * The firmware sequence must appear before the first indirect branch. + * i.e. the ret out of tramp_ventry. But it also needs the stack to be + * mapped to save/restore the registers the SMC clobbers. + */ + __mitigate_spectre_bhb_fw + .endif // \bhb == BHB_MITIGATION_FW + + add x30, x30, #(1b - \vector_start + 4) ret +.org 1b + 128 // Did we overflow the ventry slot? .endm .macro tramp_exit, regsize = 64 - adr x30, tramp_vectors + tramp_data_read_var x30, this_cpu_vector +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN + mrs x29, tpidr_el1 +alternative_else + mrs x29, tpidr_el2 +alternative_endif + ldr x30, [x30, x29] + msr vbar_el1, x30 - tramp_unmap_kernel x30 + ldr lr, [sp, #S_LR] + tramp_unmap_kernel x29 .if \regsize == 64 - mrs x30, far_el1 + mrs x29, far_el1 .endif + add sp, sp, #S_FRAME_SIZE // restore sp eret .endm - .align 11 -ENTRY(tramp_vectors) + .macro generate_tramp_vector, kpti, bhb +.Lvector_start\@: .space 0x400 - tramp_ventry - tramp_ventry - tramp_ventry - tramp_ventry + .rept 4 + tramp_ventry .Lvector_start\@, 64, \kpti, \bhb + .endr + .rept 4 + tramp_ventry .Lvector_start\@, 32, \kpti, \bhb + .endr + .endm - tramp_ventry 32 - tramp_ventry 32 - tramp_ventry 32 - tramp_ventry 32 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +/* + * Exception vectors trampoline. + * The order must match __bp_harden_el1_vectors and the + * arm64_bp_harden_el1_vectors enum. + */ + .pushsection ".entry.tramp.text", "ax" + .align 11 +ENTRY(tramp_vectors) +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE END(tramp_vectors) ENTRY(tramp_exit_native) @@ -1177,11 +1246,54 @@ END(tramp_exit_compat) .align PAGE_SHIFT .globl __entry_tramp_data_start __entry_tramp_data_start: +__entry_tramp_data_vectors: .quad vectors +#ifdef CONFIG_ARM_SDE_INTERFACE +__entry_tramp_data___sdei_asm_trampoline_next_handler: + .quad __sdei_asm_handler +#endif /* CONFIG_ARM_SDE_INTERFACE */ +__entry_tramp_data_this_cpu_vector: + .quad this_cpu_vector .popsection // .rodata #endif /* CONFIG_RANDOMIZE_BASE */ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ +/* + * Exception vectors for spectre mitigations on entry from EL1 when + * kpti is not in use. + */ + .macro generate_el1_vector, bhb +.Lvector_start\@: + kernel_ventry 1, sync_invalid // Synchronous EL1t + kernel_ventry 1, irq_invalid // IRQ EL1t + kernel_ventry 1, fiq_invalid // FIQ EL1t + kernel_ventry 1, error_invalid // Error EL1t + + kernel_ventry 1, sync // Synchronous EL1h + kernel_ventry 1, irq // IRQ EL1h + kernel_ventry 1, fiq_invalid // FIQ EL1h + kernel_ventry 1, error_invalid // Error EL1h + + .rept 4 + tramp_ventry .Lvector_start\@, 64, 0, \bhb + .endr + .rept 4 + tramp_ventry .Lvector_start\@, 32, 0, \bhb + .endr + .endm + +/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */ + .pushsection ".entry.text", "ax" + .align 11 +ENTRY(__bp_harden_el1_vectors) +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY + generate_el1_vector bhb=BHB_MITIGATION_LOOP + generate_el1_vector bhb=BHB_MITIGATION_FW + generate_el1_vector bhb=BHB_MITIGATION_INSN +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ +END(__bp_harden_el1_vectors) + .popsection + /* * Special system call wrappers. */ diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 6dac3f1952bf..e05889e6ef23 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -401,17 +401,13 @@ ENTRY(el2_setup) mrs x0, CurrentEL cmp x0, #CurrentEL_EL2 b.eq 1f - mrs x0, sctlr_el1 -CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1 -CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1 + mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1) msr sctlr_el1, x0 mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1 isb ret -1: mrs x0, sctlr_el2 -CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2 -CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2 +1: mov_q x0, (SCTLR_EL2_RES1 | ENDIAN_SET_EL2) msr sctlr_el2, x0 #ifdef CONFIG_ARM64_VHE @@ -518,10 +514,7 @@ install_el2_stub: * requires no configuration, and all non-hyp-specific EL2 setup * will be done via the _EL1 system register aliases in __cpu_setup. */ - /* sctlr_el1 */ - mov x0, #0x0800 // Set/clear RES{1,0} bits -CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems -CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems + mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1) msr sctlr_el1, x0 /* Coprocessor traps. */ diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index e08998cd25a3..e0b071913f44 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -255,7 +255,7 @@ ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1)) <= SZ_4K, "Hibernate exit text too big or misaligned") #endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 -ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE, +ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE, "Entry trampoline text too big") #endif /* diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index a7b3c198d4de..a360ac6e89e9 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -196,15 +196,3 @@ alternative_endif eret ENDPROC(__fpsimd_guest_restore) - -ENTRY(__qcom_hyp_sanitize_btac_predictors) - /** - * Call SMC64 with Silicon provider serviceID 23<<8 (0xc2001700) - * 0xC2000000-0xC200FFFF: assigned to SiP Service Calls - * b15-b0: contains SiP functionID - */ - movz x0, #0x1700 - movk x0, #0xc200, lsl #16 - smc #0 - ret -ENDPROC(__qcom_hyp_sanitize_btac_predictors) diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 3c283fd8c8f5..94007edacae3 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -111,6 +111,10 @@ el1_hvc_guest: /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */ eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \ ARM_SMCCC_ARCH_WORKAROUND_2) + cbz w1, wa_epilogue + + eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \ + ARM_SMCCC_ARCH_WORKAROUND_3) cbnz w1, el1_trap #ifdef CONFIG_ARM64_SSBD diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 63f3e8e9a940..7ca1cc6f6b39 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -439,11 +439,7 @@ ENTRY(__cpu_setup) /* * Prepare SCTLR */ - adr x5, crval - ldp w5, w6, [x5] - mrs x0, sctlr_el1 - bic x0, x0, x5 // clear bits - orr x0, x0, x6 // set bits + mov_q x0, SCTLR_EL1_SET /* * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for * both user and kernel. @@ -479,21 +475,3 @@ ENTRY(__cpu_setup) msr tcr_el1, x10 ret // return to head.S ENDPROC(__cpu_setup) - - /* - * We set the desired value explicitly, including those of the - * reserved bits. The values of bits EE & E0E were set early in - * el2_setup, which are left untouched below. - * - * n n T - * U E WT T UD US IHBS - * CE0 XWHW CZ ME TEEA S - * .... .IEE .... NEAI TE.I ..AD DEN0 ACAM - * 0011 0... 1101 ..0. ..0. 10.. .0.. .... < hardware reserved - * .... .1.. .... 01.1 11.1 ..01 0.01 1101 < software settings - */ - .type crval, #object -crval: - .word 0xfcffffff // clear - .word 0x34d5d91d // set - .popsection From 26dc11c2af27de5ca266cd52af967d5a3e2583ee Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Nov 2022 16:31:32 +0400 Subject: [PATCH 007/452] G970F - HVJ5 Signed-off-by: Denis Efremov <efremov@linux.com> --- arch/arm64/boot/dts/G970F.mk | 8 ++++---- .../dts/samsung/exynos9820-beyond0lte_eur_open_17.dts | 5 +++-- .../dts/samsung/exynos9820-beyond0lte_eur_open_18.dts | 5 +++-- .../dts/samsung/exynos9820-beyond0lte_eur_open_19.dts | 5 +++-- .../dts/samsung/exynos9820-beyond0lte_eur_open_20.dts | 5 +++-- .../dts/samsung/exynos9820-beyond0lte_eur_open_22.dts | 5 +++-- .../dts/samsung/exynos9820-beyond0lte_eur_open_24.dts | 5 +++-- .../dts/samsung/exynos9820-beyond0lte_eur_open_25.dts | 5 +++-- arch/arm64/configs/exynos9820-beyond0lte_defconfig | 10 ++++++---- 9 files changed, 31 insertions(+), 22 deletions(-) diff --git a/arch/arm64/boot/dts/G970F.mk b/arch/arm64/boot/dts/G970F.mk index 7f9437bfbbe5..55d4b7ee168f 100644 --- a/arch/arm64/boot/dts/G970F.mk +++ b/arch/arm64/boot/dts/G970F.mk @@ -1,12 +1,12 @@ # SPDX-License-Identifier: GPL-2.0 dtb-y += exynos/exynos9820.dtb -dtbo-y += samsung/exynos9820-beyond0lte_eur_open_24.dtbo -dtbo-y += samsung/exynos9820-beyond0lte_eur_open_22.dtbo -dtbo-y += samsung/exynos9820-beyond0lte_eur_open_19.dtbo +dtbo-y += samsung/exynos9820-beyond0lte_eur_open_25.dtbo dtbo-y += samsung/exynos9820-beyond0lte_eur_open_18.dtbo +dtbo-y += samsung/exynos9820-beyond0lte_eur_open_24.dtbo dtbo-y += samsung/exynos9820-beyond0lte_eur_open_17.dtbo +dtbo-y += samsung/exynos9820-beyond0lte_eur_open_19.dtbo dtbo-y += samsung/exynos9820-beyond0lte_eur_open_20.dtbo -dtbo-y += samsung/exynos9820-beyond0lte_eur_open_25.dtbo +dtbo-y += samsung/exynos9820-beyond0lte_eur_open_22.dtbo targets += dtbs DTB_LIST := $(dtb-y) $(dtbo-y) diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_17.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_17.dts index 7a06d703e66b..9d5c720462fa 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_17.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_17.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6400,8 +6401,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x105715>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_18.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_18.dts index 5c70b8837250..012affbfa5a3 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_18.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_18.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6401,8 +6402,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x105715>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_19.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_19.dts index 3cd9980ab2b0..23d25ef5cee9 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_19.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_19.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6430,8 +6431,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x105715>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_20.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_20.dts index 5e782d5634dd..f7aaa78099bd 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_20.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_20.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6429,8 +6430,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x105715>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_22.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_22.dts index 407a89226a3b..701ab9847051 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_22.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_22.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6429,8 +6430,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x105715>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_24.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_24.dts index 008ecb3ce572..f9efc23389c2 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_24.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_24.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6429,8 +6430,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x105715>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_25.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_25.dts index 0a19b7eefb10..4e38eed059e5 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_25.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyond0lte_eur_open_25.dts @@ -865,6 +865,7 @@ battery,usb_temp_check_type = <0x2>; battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -6428,8 +6429,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x105715>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/configs/exynos9820-beyond0lte_defconfig b/arch/arm64/configs/exynos9820-beyond0lte_defconfig index 77f4e6e84f85..daff2edcbe33 100644 --- a/arch/arm64/configs/exynos9820-beyond0lte_defconfig +++ b/arch/arm64/configs/exynos9820-beyond0lte_defconfig @@ -513,6 +513,7 @@ CONFIG_ARM64_ERRATUM_832075=y CONFIG_ARM64_ERRATUM_845719=y CONFIG_ARM64_ERRATUM_843419=y CONFIG_ARM64_ERRATUM_1024718=y +CONFIG_ARM64_ERRATUM_1188873=y # CONFIG_CAVIUM_ERRATUM_22375 is not set # CONFIG_CAVIUM_ERRATUM_23154 is not set # CONFIG_CAVIUM_ERRATUM_27456 is not set @@ -614,6 +615,7 @@ CONFIG_FORCE_MAX_ZONEORDER=11 CONFIG_UNMAP_KERNEL_AT_EL0=y CONFIG_HARDEN_BRANCH_PREDICTOR=y CONFIG_ARM64_SSBD=y +CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y CONFIG_ARMV8_DEPRECATED=y CONFIG_SWP_EMULATION=y CONFIG_CP15_BARRIER_EMULATION=y @@ -771,6 +773,7 @@ CONFIG_NET_EGRESS=y CONFIG_PACKET=y # CONFIG_PACKET_DIAG is not set CONFIG_UNIX=y +CONFIG_UNIX_SCM=y # CONFIG_UNIX_DIAG is not set # CONFIG_TLS is not set CONFIG_XFRM=y @@ -1330,6 +1333,7 @@ CONFIG_ALLOW_DEV_COREDUMP=y # CONFIG_SYS_HYPERVISOR is not set # CONFIG_GENERIC_CPU_DEVICES is not set CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y CONFIG_SOC_BUS=y CONFIG_REGMAP=y CONFIG_REGMAP_I2C=y @@ -1563,8 +1567,6 @@ CONFIG_TZDEV_SK_MULTICORE=y CONFIG_ION_FD2PHYS=y CONFIG_TZIRS=y # CONFIG_TZTUI is not set -CONFIG_TZDEV_PAGE_MIGRATION=y -# CONFIG_TZ_TRANSPORT is not set # CONFIG_TZDEV_HOTPLUG is not set CONFIG_TZDEV_BOOST=y CONFIG_TZ_BOOT_LOG=y @@ -2017,7 +2019,7 @@ CONFIG_USB_NET_CDCETHER=y CONFIG_USB_NET_CDC_EEM=y CONFIG_USB_NET_CDC_NCM=y # CONFIG_USB_NET_HUAWEI_CDC_NCM is not set -CONFIG_USB_NET_CDC_MBIM=y +# CONFIG_USB_NET_CDC_MBIM is not set # CONFIG_USB_NET_DM9601 is not set # CONFIG_USB_NET_SR9700 is not set # CONFIG_USB_NET_SR9800 is not set @@ -4075,7 +4077,7 @@ CONFIG_USB_XHCI_PLATFORM=y # CONFIG_USB_ACM=y CONFIG_USB_PRINTER=y -CONFIG_USB_WDM=y +# CONFIG_USB_WDM is not set # CONFIG_USB_TMC is not set # From 491c8618926ecf13d21c1900c44b32bd9f4efde7 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Nov 2022 17:45:03 +0400 Subject: [PATCH 008/452] G977B - HVJ5 Signed-off-by: Denis Efremov <efremov@linux.com> --- arch/arm64/boot/dts/G977B.mk | 8 ++++---- .../dts/samsung/exynos9820-beyondx_eur_open_00.dts | 5 +++-- .../dts/samsung/exynos9820-beyondx_eur_open_01.dts | 5 +++-- .../dts/samsung/exynos9820-beyondx_eur_open_02.dts | 5 +++-- .../dts/samsung/exynos9820-beyondx_eur_open_03.dts | 5 +++-- .../dts/samsung/exynos9820-beyondx_eur_open_04.dts | 5 +++-- .../dts/samsung/exynos9820-beyondx_eur_open_05.dts | 5 +++-- .../dts/samsung/exynos9820-beyondx_eur_open_06.dts | 5 +++-- .../dts/samsung/exynos9820-beyondx_eur_open_07.dts | 5 +++-- .../dts/samsung/exynos9820-beyondx_eur_open_08.dts | 5 +++-- arch/arm64/configs/exynos9820-beyondx_defconfig | 11 +++++++---- 11 files changed, 38 insertions(+), 26 deletions(-) diff --git a/arch/arm64/boot/dts/G977B.mk b/arch/arm64/boot/dts/G977B.mk index ea514c3398f8..081a7798ee24 100644 --- a/arch/arm64/boot/dts/G977B.mk +++ b/arch/arm64/boot/dts/G977B.mk @@ -1,14 +1,14 @@ # SPDX-License-Identifier: GPL-2.0 -dtbo-y += samsung/exynos9820-beyondx_eur_open_08.dtbo -dtbo-y += samsung/exynos9820-beyondx_eur_open_04.dtbo +dtb-y += exynos/exynos9820.dtb +dtbo-y += samsung/exynos9820-beyondx_eur_open_07.dtbo dtbo-y += samsung/exynos9820-beyondx_eur_open_02.dtbo dtbo-y += samsung/exynos9820-beyondx_eur_open_00.dtbo +dtbo-y += samsung/exynos9820-beyondx_eur_open_08.dtbo dtbo-y += samsung/exynos9820-beyondx_eur_open_01.dtbo +dtbo-y += samsung/exynos9820-beyondx_eur_open_04.dtbo dtbo-y += samsung/exynos9820-beyondx_eur_open_06.dtbo -dtbo-y += samsung/exynos9820-beyondx_eur_open_07.dtbo dtbo-y += samsung/exynos9820-beyondx_eur_open_05.dtbo dtbo-y += samsung/exynos9820-beyondx_eur_open_03.dtbo -dtb-y += exynos/exynos9820.dtb targets += dtbs DTB_LIST := $(dtb-y) $(dtbo-y) diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_00.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_00.dts index 60959d8598f1..fb17ceb71929 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_00.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_00.dts @@ -879,6 +879,7 @@ battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; battery,dchg_temp_check_type = <0x0>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -7351,8 +7352,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_01.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_01.dts index b2dbd34f2d40..c02e2d960807 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_01.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_01.dts @@ -879,6 +879,7 @@ battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; battery,dchg_temp_check_type = <0x0>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -7351,8 +7352,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_02.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_02.dts index c03e9194ac6b..4792d06c793a 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_02.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_02.dts @@ -879,6 +879,7 @@ battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; battery,dchg_temp_check_type = <0x0>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -7361,8 +7362,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_03.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_03.dts index 0735df3e719f..e518b82a96c6 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_03.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_03.dts @@ -879,6 +879,7 @@ battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; battery,dchg_temp_check_type = <0x0>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -7418,8 +7419,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_04.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_04.dts index d0e293c33282..7bef253d5110 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_04.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_04.dts @@ -879,6 +879,7 @@ battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; battery,dchg_temp_check_type = <0x0>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -7418,8 +7419,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_05.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_05.dts index 79e2e1160000..9e29297ccfa8 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_05.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_05.dts @@ -879,6 +879,7 @@ battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; battery,dchg_temp_check_type = <0x0>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -7418,8 +7419,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_06.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_06.dts index bf61762bedda..2323a77805af 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_06.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_06.dts @@ -879,6 +879,7 @@ battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; battery,dchg_temp_check_type = <0x0>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -7418,8 +7419,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_07.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_07.dts index d6bd051617ba..0aab114b8545 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_07.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_07.dts @@ -879,6 +879,7 @@ battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; battery,dchg_temp_check_type = <0x0>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -7378,8 +7379,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_08.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_08.dts index dc8985462259..04c01df4a548 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_08.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_eur_open_08.dts @@ -879,6 +879,7 @@ battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; battery,dchg_temp_check_type = <0x0>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -7378,8 +7379,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/configs/exynos9820-beyondx_defconfig b/arch/arm64/configs/exynos9820-beyondx_defconfig index 20510bd122d9..65a3416a2f85 100644 --- a/arch/arm64/configs/exynos9820-beyondx_defconfig +++ b/arch/arm64/configs/exynos9820-beyondx_defconfig @@ -513,6 +513,7 @@ CONFIG_ARM64_ERRATUM_832075=y CONFIG_ARM64_ERRATUM_845719=y CONFIG_ARM64_ERRATUM_843419=y CONFIG_ARM64_ERRATUM_1024718=y +CONFIG_ARM64_ERRATUM_1188873=y # CONFIG_CAVIUM_ERRATUM_22375 is not set # CONFIG_CAVIUM_ERRATUM_23154 is not set # CONFIG_CAVIUM_ERRATUM_27456 is not set @@ -614,6 +615,7 @@ CONFIG_FORCE_MAX_ZONEORDER=11 CONFIG_UNMAP_KERNEL_AT_EL0=y CONFIG_HARDEN_BRANCH_PREDICTOR=y CONFIG_ARM64_SSBD=y +CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y CONFIG_ARMV8_DEPRECATED=y CONFIG_SWP_EMULATION=y CONFIG_CP15_BARRIER_EMULATION=y @@ -771,6 +773,7 @@ CONFIG_NET_EGRESS=y CONFIG_PACKET=y # CONFIG_PACKET_DIAG is not set CONFIG_UNIX=y +CONFIG_UNIX_SCM=y # CONFIG_UNIX_DIAG is not set # CONFIG_TLS is not set CONFIG_XFRM=y @@ -1330,6 +1333,7 @@ CONFIG_ALLOW_DEV_COREDUMP=y # CONFIG_SYS_HYPERVISOR is not set # CONFIG_GENERIC_CPU_DEVICES is not set CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y CONFIG_SOC_BUS=y CONFIG_REGMAP=y CONFIG_REGMAP_I2C=y @@ -1350,6 +1354,7 @@ CONFIG_CMA_SIZE_SEL_MBYTES=y # CONFIG_CMA_SIZE_SEL_MAX is not set CONFIG_CMA_ALIGNMENT=8 CONFIG_GENERIC_ARCH_TOPOLOGY=y +CONFIG_CPU_CAPACITY_FIXUP=y # # Bus devices @@ -1564,8 +1569,6 @@ CONFIG_TZDEV_SK_MULTICORE=y CONFIG_ION_FD2PHYS=y CONFIG_TZIRS=y # CONFIG_TZTUI is not set -CONFIG_TZDEV_PAGE_MIGRATION=y -# CONFIG_TZ_TRANSPORT is not set # CONFIG_TZDEV_HOTPLUG is not set CONFIG_TZDEV_BOOST=y CONFIG_TZ_BOOT_LOG=y @@ -2018,7 +2021,7 @@ CONFIG_USB_NET_CDCETHER=y CONFIG_USB_NET_CDC_EEM=y CONFIG_USB_NET_CDC_NCM=y # CONFIG_USB_NET_HUAWEI_CDC_NCM is not set -CONFIG_USB_NET_CDC_MBIM=y +# CONFIG_USB_NET_CDC_MBIM is not set # CONFIG_USB_NET_DM9601 is not set # CONFIG_USB_NET_SR9700 is not set # CONFIG_USB_NET_SR9800 is not set @@ -4055,7 +4058,7 @@ CONFIG_USB_XHCI_PLATFORM=y # CONFIG_USB_ACM=y CONFIG_USB_PRINTER=y -CONFIG_USB_WDM=y +# CONFIG_USB_WDM is not set # CONFIG_USB_TMC is not set # From 70064c16a478d6e5d873bc1e534070bdb66d4072 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 22 Nov 2022 12:35:52 +0400 Subject: [PATCH 009/452] N976N - HVJ4 Signed-off-by: Denis Efremov <efremov@linux.com> --- arch/arm64/boot/dts/N976N.mk | 10 +++--- .../dts/samsung/exynos9820-d2x_kor_16.dts | 28 +++++++++------- .../dts/samsung/exynos9820-d2x_kor_17.dts | 28 +++++++++------- .../dts/samsung/exynos9820-d2x_kor_18.dts | 28 +++++++++------- .../dts/samsung/exynos9820-d2x_kor_19.dts | 28 +++++++++------- .../dts/samsung/exynos9820-d2x_kor_21.dts | 32 +++++++++++-------- .../dts/samsung/exynos9820-d2x_kor_22.dts | 32 +++++++++++-------- .../dts/samsung/exynos9820-d2x_kor_23.dts | 32 +++++++++++-------- .../dts/samsung/exynos9820-d2x_kor_24.dts | 32 +++++++++++-------- arch/arm64/configs/exynos9820-d2xks_defconfig | 19 ++++++----- 10 files changed, 150 insertions(+), 119 deletions(-) diff --git a/arch/arm64/boot/dts/N976N.mk b/arch/arm64/boot/dts/N976N.mk index 6b22fa3534b4..60eab0079c30 100644 --- a/arch/arm64/boot/dts/N976N.mk +++ b/arch/arm64/boot/dts/N976N.mk @@ -1,14 +1,14 @@ # SPDX-License-Identifier: GPL-2.0 dtb-y += exynos/exynos9825.dtb -dtbo-y += samsung/exynos9820-d2x_kor_24.dtbo -dtbo-y += samsung/exynos9820-d2x_kor_17.dtbo -dtbo-y += samsung/exynos9820-d2x_kor_19.dtbo -dtbo-y += samsung/exynos9820-d2x_kor_02.dtbo dtbo-y += samsung/exynos9820-d2x_kor_16.dtbo +dtbo-y += samsung/exynos9820-d2x_kor_02.dtbo dtbo-y += samsung/exynos9820-d2x_kor_21.dtbo +dtbo-y += samsung/exynos9820-d2x_kor_24.dtbo dtbo-y += samsung/exynos9820-d2x_kor_22.dtbo -dtbo-y += samsung/exynos9820-d2x_kor_18.dtbo dtbo-y += samsung/exynos9820-d2x_kor_23.dtbo +dtbo-y += samsung/exynos9820-d2x_kor_19.dtbo +dtbo-y += samsung/exynos9820-d2x_kor_17.dtbo +dtbo-y += samsung/exynos9820-d2x_kor_18.dtbo targets += dtbs DTB_LIST := $(dtb-y) $(dtbo-y) diff --git a/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_16.dts b/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_16.dts index 4b8c099738b1..4f1697b01610 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_16.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_16.dts @@ -857,7 +857,7 @@ charger,direct_charger = "pca9468-charger"; charger,dchg_min_current = <0x7d0>; charger,dchg_temp_low_threshold = <0xb4>; - charger,dchg_temp_high_threshold = <0x19a>; + charger,dchg_temp_high_threshold = <0x1a4>; }; battery { @@ -884,6 +884,7 @@ battery,chg_thermal_source = <0x2>; battery,wpc_thermal_source = <0x2>; battery,dchg_thermal_source = <0x3>; + battery,lrp_temp_check_type = <0x3>; battery,polling_time = <0xa 0x1e 0x1e 0x1e 0xe10>; battery,temp_table_adc = <0x125 0x157 0x194 0x1da 0x22d 0x28c 0x2fc 0x37d 0x410 0x4b7 0x56e 0x63a 0x714 0x7f9 0x8e3 0x9cf 0xab2 0xb87 0xc3f 0xcf9 0xd67 0xdda 0xe25>; battery,temp_table_data = <0x384 0x352 0x320 0x2ee 0x2bc 0x28a 0x258 0x226 0x1f4 0x1c2 0x190 0x15e 0x12c 0xfa 0xc8 0x96 0x64 0x32 0x0 0xffffffce 0xffffff9c 0xffffff6a 0xffffff38>; @@ -933,8 +934,8 @@ battery,temp_high_recovery_lpm = <0x1e0>; battery,temp_low_threshold_lpm = <0x0>; battery,temp_low_recovery_lpm = <0x14>; - battery,wpc_high_threshold_normal = <0x1c2>; - battery,wpc_high_recovery_normal = <0x1ae>; + battery,wpc_high_threshold_normal = <0x1f4>; + battery,wpc_high_recovery_normal = <0x1e0>; battery,wpc_low_threshold_normal = <0x0>; battery,wpc_low_recovery_normal = <0x14>; battery,tx_high_threshold = <0x1c2>; @@ -946,12 +947,12 @@ battery,full_check_count = <0x1>; battery,chg_gpio_full_check = <0x0>; battery,chg_polarity_full_check = <0x1>; - battery,chg_high_temp = <0x1e0>; - battery,chg_high_temp_recovery = <0x1cc>; - battery,dchg_high_temp = <0x21c>; - battery,dchg_high_temp_recovery = <0x190>; - battery,dchg_high_batt_temp = <0x190>; - battery,dchg_high_batt_temp_recovery = <0x17c>; + battery,chg_high_temp = <0x258>; + battery,chg_high_temp_recovery = <0x244>; + battery,dchg_high_temp = <0x3e8 0x23a 0x23a 0x23a>; + battery,dchg_high_temp_recovery = <0x3e8 0x212 0x212 0x212>; + battery,dchg_high_batt_temp = <0x3e8 0x19a 0x3e8 0x190>; + battery,dchg_high_batt_temp_recovery = <0x3e8 0x190 0x3e8 0x186>; battery,chg_input_limit_current = <0x3e8>; battery,chg_charging_limit_current = <0x76c>; battery,dchg_input_limit_current = <0x3e8>; @@ -964,7 +965,7 @@ battery,wpc_temp_lcd_on_control_source = <0x1>; battery,wpc_lcd_on_high_temp = <0x19a>; battery,wpc_lcd_on_high_temp_rec = <0x186>; - battery,wpc_lcd_on_input_limit_current = <0x1c2>; + battery,wpc_lcd_on_input_limit_current = <0x258>; battery,wpc_store_high_temp = <0x168>; battery,wpc_store_high_temp_recovery = <0x154>; battery,wpc_store_lcd_on_high_temp = <0x168>; @@ -975,6 +976,9 @@ battery,mix_high_temp = <0x1ae>; battery,mix_high_chg_temp = <0x1f4>; battery,mix_high_temp_recovery = <0x190>; + battery,temp_table_LRP_NORMAL = <0x384 0x17c 0x17c 0x172 0x384 0x17c 0x17c 0x168 0x0 0x0 0x0 0x0>; + battery,temp_table_LRP_25W = <0x190 0x186 0x17c 0x172 0x190 0x17c 0x17c 0x168 0x6a4 0xd48 0x44c 0x898>; + battery,temp_table_LRP_45W = <0x19a 0x190 0x186 0x17c 0x190 0x17c 0x17c 0x168 0x6a4 0xd48 0x44c 0x898>; battery,full_condition_type = <0x9>; battery,full_condition_soc = <0x5d>; battery,full_condition_vcell = <0x109a>; @@ -1024,9 +1028,9 @@ battery,siop_apdo_input_limit_current = <0x4b0>; battery,siop_apdo_charging_limit_current = <0x7d0>; battery,siop_wireless_input_limit_current = <0x258>; - battery,siop_wireless_charging_limit_current = <0x352>; + battery,siop_wireless_charging_limit_current = <0x3e8>; battery,siop_hv_wireless_input_limit_current = <0x2bc>; - battery,siop_hv_wireless_charging_limit_current = <0x352>; + battery,siop_hv_wireless_charging_limit_current = <0x3e8>; battery,siop_store_hv_wireless_input_limit_current = <0x1c2>; battery,ttf_hv_charge_current = <0xc4e>; battery,ttf_hv_wireless_charge_current = <0x60e>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_17.dts b/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_17.dts index f58422ece64d..351d03e7aa17 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_17.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_17.dts @@ -857,7 +857,7 @@ charger,direct_charger = "pca9468-charger"; charger,dchg_min_current = <0x7d0>; charger,dchg_temp_low_threshold = <0xb4>; - charger,dchg_temp_high_threshold = <0x19a>; + charger,dchg_temp_high_threshold = <0x1a4>; }; battery { @@ -884,6 +884,7 @@ battery,chg_thermal_source = <0x2>; battery,wpc_thermal_source = <0x2>; battery,dchg_thermal_source = <0x3>; + battery,lrp_temp_check_type = <0x3>; battery,polling_time = <0xa 0x1e 0x1e 0x1e 0xe10>; battery,temp_table_adc = <0x125 0x157 0x194 0x1da 0x22d 0x28c 0x2fc 0x37d 0x410 0x4b7 0x56e 0x63a 0x714 0x7f9 0x8e3 0x9cf 0xab2 0xb87 0xc3f 0xcf9 0xd67 0xdda 0xe25>; battery,temp_table_data = <0x384 0x352 0x320 0x2ee 0x2bc 0x28a 0x258 0x226 0x1f4 0x1c2 0x190 0x15e 0x12c 0xfa 0xc8 0x96 0x64 0x32 0x0 0xffffffce 0xffffff9c 0xffffff6a 0xffffff38>; @@ -933,8 +934,8 @@ battery,temp_high_recovery_lpm = <0x1e0>; battery,temp_low_threshold_lpm = <0x0>; battery,temp_low_recovery_lpm = <0x14>; - battery,wpc_high_threshold_normal = <0x1c2>; - battery,wpc_high_recovery_normal = <0x1ae>; + battery,wpc_high_threshold_normal = <0x1f4>; + battery,wpc_high_recovery_normal = <0x1e0>; battery,wpc_low_threshold_normal = <0x0>; battery,wpc_low_recovery_normal = <0x14>; battery,tx_high_threshold = <0x1c2>; @@ -946,12 +947,12 @@ battery,full_check_count = <0x1>; battery,chg_gpio_full_check = <0x0>; battery,chg_polarity_full_check = <0x1>; - battery,chg_high_temp = <0x1e0>; - battery,chg_high_temp_recovery = <0x1cc>; - battery,dchg_high_temp = <0x21c>; - battery,dchg_high_temp_recovery = <0x190>; - battery,dchg_high_batt_temp = <0x190>; - battery,dchg_high_batt_temp_recovery = <0x17c>; + battery,chg_high_temp = <0x258>; + battery,chg_high_temp_recovery = <0x244>; + battery,dchg_high_temp = <0x3e8 0x23a 0x23a 0x23a>; + battery,dchg_high_temp_recovery = <0x3e8 0x212 0x212 0x212>; + battery,dchg_high_batt_temp = <0x3e8 0x19a 0x3e8 0x190>; + battery,dchg_high_batt_temp_recovery = <0x3e8 0x190 0x3e8 0x186>; battery,chg_input_limit_current = <0x3e8>; battery,chg_charging_limit_current = <0x76c>; battery,dchg_input_limit_current = <0x3e8>; @@ -964,7 +965,7 @@ battery,wpc_temp_lcd_on_control_source = <0x1>; battery,wpc_lcd_on_high_temp = <0x19a>; battery,wpc_lcd_on_high_temp_rec = <0x186>; - battery,wpc_lcd_on_input_limit_current = <0x1c2>; + battery,wpc_lcd_on_input_limit_current = <0x258>; battery,wpc_store_high_temp = <0x168>; battery,wpc_store_high_temp_recovery = <0x154>; battery,wpc_store_lcd_on_high_temp = <0x168>; @@ -975,6 +976,9 @@ battery,mix_high_temp = <0x1ae>; battery,mix_high_chg_temp = <0x1f4>; battery,mix_high_temp_recovery = <0x190>; + battery,temp_table_LRP_NORMAL = <0x384 0x17c 0x17c 0x172 0x384 0x17c 0x17c 0x168 0x0 0x0 0x0 0x0>; + battery,temp_table_LRP_25W = <0x190 0x186 0x17c 0x172 0x190 0x17c 0x17c 0x168 0x6a4 0xd48 0x44c 0x898>; + battery,temp_table_LRP_45W = <0x19a 0x190 0x186 0x17c 0x190 0x17c 0x17c 0x168 0x6a4 0xd48 0x44c 0x898>; battery,full_condition_type = <0x9>; battery,full_condition_soc = <0x5d>; battery,full_condition_vcell = <0x10b8>; @@ -1024,9 +1028,9 @@ battery,siop_apdo_input_limit_current = <0x4b0>; battery,siop_apdo_charging_limit_current = <0x7d0>; battery,siop_wireless_input_limit_current = <0x258>; - battery,siop_wireless_charging_limit_current = <0x352>; + battery,siop_wireless_charging_limit_current = <0x3e8>; battery,siop_hv_wireless_input_limit_current = <0x2bc>; - battery,siop_hv_wireless_charging_limit_current = <0x352>; + battery,siop_hv_wireless_charging_limit_current = <0x3e8>; battery,siop_store_hv_wireless_input_limit_current = <0x1c2>; battery,ttf_hv_charge_current = <0xc4e>; battery,ttf_hv_wireless_charge_current = <0x60e>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_18.dts b/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_18.dts index c0380b67acf9..c38716ef3957 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_18.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_18.dts @@ -857,7 +857,7 @@ charger,direct_charger = "pca9468-charger"; charger,dchg_min_current = <0x7d0>; charger,dchg_temp_low_threshold = <0xb4>; - charger,dchg_temp_high_threshold = <0x19a>; + charger,dchg_temp_high_threshold = <0x1a4>; }; battery { @@ -884,6 +884,7 @@ battery,chg_thermal_source = <0x2>; battery,wpc_thermal_source = <0x2>; battery,dchg_thermal_source = <0x3>; + battery,lrp_temp_check_type = <0x3>; battery,polling_time = <0xa 0x1e 0x1e 0x1e 0xe10>; battery,temp_table_adc = <0x125 0x157 0x194 0x1da 0x22d 0x28c 0x2fc 0x37d 0x410 0x4b7 0x56e 0x63a 0x714 0x7f9 0x8e3 0x9cf 0xab2 0xb87 0xc3f 0xcf9 0xd67 0xdda 0xe25>; battery,temp_table_data = <0x384 0x352 0x320 0x2ee 0x2bc 0x28a 0x258 0x226 0x1f4 0x1c2 0x190 0x15e 0x12c 0xfa 0xc8 0x96 0x64 0x32 0x0 0xffffffce 0xffffff9c 0xffffff6a 0xffffff38>; @@ -933,8 +934,8 @@ battery,temp_high_recovery_lpm = <0x1e0>; battery,temp_low_threshold_lpm = <0x0>; battery,temp_low_recovery_lpm = <0x14>; - battery,wpc_high_threshold_normal = <0x1c2>; - battery,wpc_high_recovery_normal = <0x1ae>; + battery,wpc_high_threshold_normal = <0x1f4>; + battery,wpc_high_recovery_normal = <0x1e0>; battery,wpc_low_threshold_normal = <0x0>; battery,wpc_low_recovery_normal = <0x14>; battery,tx_high_threshold = <0x1c2>; @@ -946,12 +947,12 @@ battery,full_check_count = <0x1>; battery,chg_gpio_full_check = <0x0>; battery,chg_polarity_full_check = <0x1>; - battery,chg_high_temp = <0x1e0>; - battery,chg_high_temp_recovery = <0x1cc>; - battery,dchg_high_temp = <0x21c>; - battery,dchg_high_temp_recovery = <0x190>; - battery,dchg_high_batt_temp = <0x190>; - battery,dchg_high_batt_temp_recovery = <0x17c>; + battery,chg_high_temp = <0x258>; + battery,chg_high_temp_recovery = <0x244>; + battery,dchg_high_temp = <0x3e8 0x23a 0x23a 0x23a>; + battery,dchg_high_temp_recovery = <0x3e8 0x212 0x212 0x212>; + battery,dchg_high_batt_temp = <0x3e8 0x19a 0x3e8 0x190>; + battery,dchg_high_batt_temp_recovery = <0x3e8 0x190 0x3e8 0x186>; battery,chg_input_limit_current = <0x3e8>; battery,chg_charging_limit_current = <0x76c>; battery,dchg_input_limit_current = <0x3e8>; @@ -964,7 +965,7 @@ battery,wpc_temp_lcd_on_control_source = <0x1>; battery,wpc_lcd_on_high_temp = <0x19a>; battery,wpc_lcd_on_high_temp_rec = <0x186>; - battery,wpc_lcd_on_input_limit_current = <0x1c2>; + battery,wpc_lcd_on_input_limit_current = <0x258>; battery,wpc_store_high_temp = <0x168>; battery,wpc_store_high_temp_recovery = <0x154>; battery,wpc_store_lcd_on_high_temp = <0x168>; @@ -975,6 +976,9 @@ battery,mix_high_temp = <0x1ae>; battery,mix_high_chg_temp = <0x1f4>; battery,mix_high_temp_recovery = <0x190>; + battery,temp_table_LRP_NORMAL = <0x384 0x17c 0x17c 0x172 0x384 0x17c 0x17c 0x168 0x0 0x0 0x0 0x0>; + battery,temp_table_LRP_25W = <0x190 0x186 0x17c 0x172 0x190 0x17c 0x17c 0x168 0x6a4 0xd48 0x44c 0x898>; + battery,temp_table_LRP_45W = <0x19a 0x190 0x186 0x17c 0x190 0x17c 0x17c 0x168 0x6a4 0xd48 0x44c 0x898>; battery,full_condition_type = <0x9>; battery,full_condition_soc = <0x5d>; battery,full_condition_vcell = <0x10b8>; @@ -1024,9 +1028,9 @@ battery,siop_apdo_input_limit_current = <0x4b0>; battery,siop_apdo_charging_limit_current = <0x7d0>; battery,siop_wireless_input_limit_current = <0x258>; - battery,siop_wireless_charging_limit_current = <0x352>; + battery,siop_wireless_charging_limit_current = <0x3e8>; battery,siop_hv_wireless_input_limit_current = <0x2bc>; - battery,siop_hv_wireless_charging_limit_current = <0x352>; + battery,siop_hv_wireless_charging_limit_current = <0x3e8>; battery,siop_store_hv_wireless_input_limit_current = <0x1c2>; battery,ttf_hv_charge_current = <0xc4e>; battery,ttf_hv_wireless_charge_current = <0x60e>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_19.dts b/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_19.dts index ee242903b1ac..f48b618fc9b7 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_19.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_19.dts @@ -857,7 +857,7 @@ charger,direct_charger = "pca9468-charger"; charger,dchg_min_current = <0x7d0>; charger,dchg_temp_low_threshold = <0xb4>; - charger,dchg_temp_high_threshold = <0x19a>; + charger,dchg_temp_high_threshold = <0x1a4>; }; battery { @@ -884,6 +884,7 @@ battery,chg_thermal_source = <0x2>; battery,wpc_thermal_source = <0x2>; battery,dchg_thermal_source = <0x3>; + battery,lrp_temp_check_type = <0x3>; battery,polling_time = <0xa 0x1e 0x1e 0x1e 0xe10>; battery,temp_table_adc = <0x125 0x157 0x194 0x1da 0x22d 0x28c 0x2fc 0x37d 0x410 0x4b7 0x56e 0x63a 0x714 0x7f9 0x8e3 0x9cf 0xab2 0xb87 0xc3f 0xcf9 0xd67 0xdda 0xe25>; battery,temp_table_data = <0x384 0x352 0x320 0x2ee 0x2bc 0x28a 0x258 0x226 0x1f4 0x1c2 0x190 0x15e 0x12c 0xfa 0xc8 0x96 0x64 0x32 0x0 0xffffffce 0xffffff9c 0xffffff6a 0xffffff38>; @@ -933,8 +934,8 @@ battery,temp_high_recovery_lpm = <0x1e0>; battery,temp_low_threshold_lpm = <0x0>; battery,temp_low_recovery_lpm = <0x14>; - battery,wpc_high_threshold_normal = <0x1c2>; - battery,wpc_high_recovery_normal = <0x1ae>; + battery,wpc_high_threshold_normal = <0x1f4>; + battery,wpc_high_recovery_normal = <0x1e0>; battery,wpc_low_threshold_normal = <0x0>; battery,wpc_low_recovery_normal = <0x14>; battery,tx_high_threshold = <0x1c2>; @@ -946,12 +947,12 @@ battery,full_check_count = <0x1>; battery,chg_gpio_full_check = <0x0>; battery,chg_polarity_full_check = <0x1>; - battery,chg_high_temp = <0x1e0>; - battery,chg_high_temp_recovery = <0x1cc>; - battery,dchg_high_temp = <0x21c>; - battery,dchg_high_temp_recovery = <0x190>; - battery,dchg_high_batt_temp = <0x190>; - battery,dchg_high_batt_temp_recovery = <0x17c>; + battery,chg_high_temp = <0x258>; + battery,chg_high_temp_recovery = <0x244>; + battery,dchg_high_temp = <0x3e8 0x23a 0x23a 0x23a>; + battery,dchg_high_temp_recovery = <0x3e8 0x212 0x212 0x212>; + battery,dchg_high_batt_temp = <0x3e8 0x19a 0x3e8 0x190>; + battery,dchg_high_batt_temp_recovery = <0x3e8 0x190 0x3e8 0x186>; battery,chg_input_limit_current = <0x3e8>; battery,chg_charging_limit_current = <0x76c>; battery,dchg_input_limit_current = <0x3e8>; @@ -964,7 +965,7 @@ battery,wpc_temp_lcd_on_control_source = <0x1>; battery,wpc_lcd_on_high_temp = <0x19a>; battery,wpc_lcd_on_high_temp_rec = <0x186>; - battery,wpc_lcd_on_input_limit_current = <0x1c2>; + battery,wpc_lcd_on_input_limit_current = <0x258>; battery,wpc_store_high_temp = <0x168>; battery,wpc_store_high_temp_recovery = <0x154>; battery,wpc_store_lcd_on_high_temp = <0x168>; @@ -975,6 +976,9 @@ battery,mix_high_temp = <0x1ae>; battery,mix_high_chg_temp = <0x1f4>; battery,mix_high_temp_recovery = <0x190>; + battery,temp_table_LRP_NORMAL = <0x384 0x17c 0x17c 0x172 0x384 0x17c 0x17c 0x168 0x0 0x0 0x0 0x0>; + battery,temp_table_LRP_25W = <0x190 0x186 0x17c 0x172 0x190 0x17c 0x17c 0x168 0x6a4 0xd48 0x44c 0x898>; + battery,temp_table_LRP_45W = <0x19a 0x190 0x186 0x17c 0x190 0x17c 0x17c 0x168 0x6a4 0xd48 0x44c 0x898>; battery,full_condition_type = <0x9>; battery,full_condition_soc = <0x5d>; battery,full_condition_vcell = <0x109a>; @@ -1024,9 +1028,9 @@ battery,siop_apdo_input_limit_current = <0x4b0>; battery,siop_apdo_charging_limit_current = <0x7d0>; battery,siop_wireless_input_limit_current = <0x258>; - battery,siop_wireless_charging_limit_current = <0x352>; + battery,siop_wireless_charging_limit_current = <0x3e8>; battery,siop_hv_wireless_input_limit_current = <0x2bc>; - battery,siop_hv_wireless_charging_limit_current = <0x352>; + battery,siop_hv_wireless_charging_limit_current = <0x3e8>; battery,siop_store_hv_wireless_input_limit_current = <0x1c2>; battery,ttf_hv_charge_current = <0xc4e>; battery,ttf_hv_wireless_charge_current = <0x60e>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_21.dts b/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_21.dts index 1f6e7860df75..af13310cba66 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_21.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_21.dts @@ -857,7 +857,7 @@ charger,direct_charger = "pca9468-charger"; charger,dchg_min_current = <0x7d0>; charger,dchg_temp_low_threshold = <0xdc>; - charger,dchg_temp_high_threshold = <0x19a>; + charger,dchg_temp_high_threshold = <0x1a4>; charger,ta_alert_wa; }; @@ -884,6 +884,7 @@ battery,chg_thermal_source = <0x2>; battery,wpc_thermal_source = <0x2>; battery,dchg_thermal_source = <0x3>; + battery,lrp_temp_check_type = <0x3>; battery,polling_time = <0xa 0x1e 0x1e 0x1e 0xe10>; battery,temp_table_adc = <0x149 0x170 0x1aa 0x1da 0x22d 0x28c 0x2fc 0x37d 0x410 0x4b7 0x56e 0x63a 0x714 0x7f9 0x8e3 0x9cf 0xab2 0xb87 0xc3f 0xcf9 0xd67 0xdda 0xe25>; battery,temp_table_data = <0x384 0x352 0x320 0x2ee 0x2bc 0x28a 0x258 0x226 0x1f4 0x1c2 0x190 0x15e 0x12c 0xfa 0xc8 0x96 0x64 0x32 0x0 0xffffffce 0xffffff9c 0xffffff6a 0xffffff38>; @@ -933,8 +934,8 @@ battery,temp_high_recovery_lpm = <0x1e0>; battery,temp_low_threshold_lpm = <0x0>; battery,temp_low_recovery_lpm = <0x14>; - battery,wpc_high_threshold_normal = <0x1c2>; - battery,wpc_high_recovery_normal = <0x1ae>; + battery,wpc_high_threshold_normal = <0x1f4>; + battery,wpc_high_recovery_normal = <0x1e0>; battery,wpc_low_threshold_normal = <0x0>; battery,wpc_low_recovery_normal = <0x14>; battery,tx_high_threshold = <0x1c2>; @@ -946,12 +947,12 @@ battery,full_check_count = <0x1>; battery,chg_gpio_full_check = <0x0>; battery,chg_polarity_full_check = <0x1>; - battery,chg_high_temp = <0x1e0>; - battery,chg_high_temp_recovery = <0x1cc>; - battery,dchg_high_temp = <0x21c>; - battery,dchg_high_temp_recovery = <0x190>; - battery,dchg_high_batt_temp = <0x190>; - battery,dchg_high_batt_temp_recovery = <0x17c>; + battery,chg_high_temp = <0x258>; + battery,chg_high_temp_recovery = <0x244>; + battery,dchg_high_temp = <0x3e8 0x23a 0x23a 0x23a>; + battery,dchg_high_temp_recovery = <0x3e8 0x212 0x212 0x212>; + battery,dchg_high_batt_temp = <0x3e8 0x19a 0x3e8 0x190>; + battery,dchg_high_batt_temp_recovery = <0x3e8 0x190 0x3e8 0x186>; battery,chg_input_limit_current = <0x3e8>; battery,chg_charging_limit_current = <0x76c>; battery,dchg_input_limit_current = <0x3e8>; @@ -966,7 +967,7 @@ battery,wpc_temp_lcd_on_control_source = <0x1>; battery,wpc_lcd_on_high_temp = <0x19a>; battery,wpc_lcd_on_high_temp_rec = <0x186>; - battery,wpc_lcd_on_input_limit_current = <0x1c2>; + battery,wpc_lcd_on_input_limit_current = <0x258>; battery,wpc_store_high_temp = <0x168>; battery,wpc_store_high_temp_recovery = <0x154>; battery,wpc_store_lcd_on_high_temp = <0x168>; @@ -978,6 +979,9 @@ battery,mix_high_temp = <0x1a4>; battery,mix_high_chg_temp = <0x1f4>; battery,mix_high_temp_recovery = <0x186>; + battery,temp_table_LRP_NORMAL = <0x384 0x17c 0x17c 0x172 0x384 0x17c 0x17c 0x168 0x0 0x0 0x0 0x0>; + battery,temp_table_LRP_25W = <0x190 0x186 0x17c 0x172 0x190 0x17c 0x17c 0x168 0x6a4 0xd48 0x44c 0x898>; + battery,temp_table_LRP_45W = <0x19a 0x190 0x186 0x17c 0x190 0x17c 0x17c 0x168 0x6a4 0xd48 0x44c 0x898>; battery,full_condition_type = <0x9>; battery,full_condition_soc = <0x5d>; battery,full_condition_vcell = <0x109a>; @@ -994,8 +998,8 @@ battery,chg_float_voltage = <0x10fe>; battery,chg_ocp_current = <0x1838>; battery,chg_ocp_dtc = <0x64>; - battery,swelling_high_temp_block = <0x19a>; - battery,swelling_high_temp_recov = <0x186>; + battery,swelling_high_temp_block = <0x1a4>; + battery,swelling_high_temp_recov = <0x190>; battery,swelling_wc_high_temp_block = <0x1a4>; battery,swelling_wc_high_temp_recov = <0x19a>; battery,swelling_low_temp_block_1st = <0x96>; @@ -1038,9 +1042,9 @@ battery,siop_apdo_input_limit_current = <0x4b0>; battery,siop_apdo_charging_limit_current = <0x7d0>; battery,siop_wireless_input_limit_current = <0x258>; - battery,siop_wireless_charging_limit_current = <0x352>; + battery,siop_wireless_charging_limit_current = <0x3e8>; battery,siop_hv_wireless_input_limit_current = <0x2bc>; - battery,siop_hv_wireless_charging_limit_current = <0x352>; + battery,siop_hv_wireless_charging_limit_current = <0x3e8>; battery,siop_store_hv_wireless_input_limit_current = <0x1c2>; battery,ttf_hv_charge_current = <0xb22>; battery,ttf_hv_wireless_charge_current = <0x5aa>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_22.dts b/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_22.dts index 15b9ee636bd2..77575668c0c8 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_22.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_22.dts @@ -857,7 +857,7 @@ charger,direct_charger = "pca9468-charger"; charger,dchg_min_current = <0x7d0>; charger,dchg_temp_low_threshold = <0xdc>; - charger,dchg_temp_high_threshold = <0x19a>; + charger,dchg_temp_high_threshold = <0x1a4>; charger,ta_alert_wa; }; @@ -884,6 +884,7 @@ battery,chg_thermal_source = <0x2>; battery,wpc_thermal_source = <0x2>; battery,dchg_thermal_source = <0x3>; + battery,lrp_temp_check_type = <0x3>; battery,polling_time = <0xa 0x1e 0x1e 0x1e 0xe10>; battery,temp_table_adc = <0x149 0x170 0x1aa 0x1da 0x22d 0x28c 0x2fc 0x37d 0x410 0x4b7 0x56e 0x63a 0x714 0x7f9 0x8e3 0x9cf 0xab2 0xb87 0xc3f 0xcf9 0xd67 0xdda 0xe25>; battery,temp_table_data = <0x384 0x352 0x320 0x2ee 0x2bc 0x28a 0x258 0x226 0x1f4 0x1c2 0x190 0x15e 0x12c 0xfa 0xc8 0x96 0x64 0x32 0x0 0xffffffce 0xffffff9c 0xffffff6a 0xffffff38>; @@ -933,8 +934,8 @@ battery,temp_high_recovery_lpm = <0x1e0>; battery,temp_low_threshold_lpm = <0x0>; battery,temp_low_recovery_lpm = <0x14>; - battery,wpc_high_threshold_normal = <0x1c2>; - battery,wpc_high_recovery_normal = <0x1ae>; + battery,wpc_high_threshold_normal = <0x1f4>; + battery,wpc_high_recovery_normal = <0x1e0>; battery,wpc_low_threshold_normal = <0x0>; battery,wpc_low_recovery_normal = <0x14>; battery,tx_high_threshold = <0x1c2>; @@ -946,12 +947,12 @@ battery,full_check_count = <0x1>; battery,chg_gpio_full_check = <0x0>; battery,chg_polarity_full_check = <0x1>; - battery,chg_high_temp = <0x1e0>; - battery,chg_high_temp_recovery = <0x1cc>; - battery,dchg_high_temp = <0x21c>; - battery,dchg_high_temp_recovery = <0x190>; - battery,dchg_high_batt_temp = <0x190>; - battery,dchg_high_batt_temp_recovery = <0x17c>; + battery,chg_high_temp = <0x258>; + battery,chg_high_temp_recovery = <0x244>; + battery,dchg_high_temp = <0x3e8 0x23a 0x23a 0x23a>; + battery,dchg_high_temp_recovery = <0x3e8 0x212 0x212 0x212>; + battery,dchg_high_batt_temp = <0x3e8 0x19a 0x3e8 0x190>; + battery,dchg_high_batt_temp_recovery = <0x3e8 0x190 0x3e8 0x186>; battery,chg_input_limit_current = <0x3e8>; battery,chg_charging_limit_current = <0x76c>; battery,dchg_input_limit_current = <0x3e8>; @@ -966,7 +967,7 @@ battery,wpc_temp_lcd_on_control_source = <0x1>; battery,wpc_lcd_on_high_temp = <0x19a>; battery,wpc_lcd_on_high_temp_rec = <0x186>; - battery,wpc_lcd_on_input_limit_current = <0x1c2>; + battery,wpc_lcd_on_input_limit_current = <0x258>; battery,wpc_store_high_temp = <0x168>; battery,wpc_store_high_temp_recovery = <0x154>; battery,wpc_store_lcd_on_high_temp = <0x168>; @@ -978,6 +979,9 @@ battery,mix_high_temp = <0x1a4>; battery,mix_high_chg_temp = <0x1f4>; battery,mix_high_temp_recovery = <0x186>; + battery,temp_table_LRP_NORMAL = <0x384 0x17c 0x17c 0x172 0x384 0x17c 0x17c 0x168 0x0 0x0 0x0 0x0>; + battery,temp_table_LRP_25W = <0x190 0x186 0x17c 0x172 0x190 0x17c 0x17c 0x168 0x6a4 0xd48 0x44c 0x898>; + battery,temp_table_LRP_45W = <0x19a 0x190 0x186 0x17c 0x190 0x17c 0x17c 0x168 0x6a4 0xd48 0x44c 0x898>; battery,full_condition_type = <0x9>; battery,full_condition_soc = <0x5d>; battery,full_condition_vcell = <0x109a>; @@ -994,8 +998,8 @@ battery,chg_float_voltage = <0x10fe>; battery,chg_ocp_current = <0x1838>; battery,chg_ocp_dtc = <0x64>; - battery,swelling_high_temp_block = <0x19a>; - battery,swelling_high_temp_recov = <0x186>; + battery,swelling_high_temp_block = <0x1a4>; + battery,swelling_high_temp_recov = <0x190>; battery,swelling_wc_high_temp_block = <0x1a4>; battery,swelling_wc_high_temp_recov = <0x19a>; battery,swelling_low_temp_block_1st = <0x96>; @@ -1038,9 +1042,9 @@ battery,siop_apdo_input_limit_current = <0x4b0>; battery,siop_apdo_charging_limit_current = <0x7d0>; battery,siop_wireless_input_limit_current = <0x258>; - battery,siop_wireless_charging_limit_current = <0x352>; + battery,siop_wireless_charging_limit_current = <0x3e8>; battery,siop_hv_wireless_input_limit_current = <0x2bc>; - battery,siop_hv_wireless_charging_limit_current = <0x352>; + battery,siop_hv_wireless_charging_limit_current = <0x3e8>; battery,siop_store_hv_wireless_input_limit_current = <0x1c2>; battery,ttf_hv_charge_current = <0xb22>; battery,ttf_hv_wireless_charge_current = <0x5aa>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_23.dts b/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_23.dts index 817e2f5bb86b..9f455c8e1f4b 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_23.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_23.dts @@ -857,7 +857,7 @@ charger,direct_charger = "pca9468-charger"; charger,dchg_min_current = <0x7d0>; charger,dchg_temp_low_threshold = <0xdc>; - charger,dchg_temp_high_threshold = <0x19a>; + charger,dchg_temp_high_threshold = <0x1a4>; charger,ta_alert_wa; }; @@ -884,6 +884,7 @@ battery,chg_thermal_source = <0x2>; battery,wpc_thermal_source = <0x2>; battery,dchg_thermal_source = <0x3>; + battery,lrp_temp_check_type = <0x3>; battery,polling_time = <0xa 0x1e 0x1e 0x1e 0xe10>; battery,temp_table_adc = <0x149 0x170 0x1aa 0x1da 0x22d 0x28c 0x2fc 0x37d 0x410 0x4b7 0x56e 0x63a 0x714 0x7f9 0x8e3 0x9cf 0xab2 0xb87 0xc3f 0xcf9 0xd67 0xdda 0xe25>; battery,temp_table_data = <0x384 0x352 0x320 0x2ee 0x2bc 0x28a 0x258 0x226 0x1f4 0x1c2 0x190 0x15e 0x12c 0xfa 0xc8 0x96 0x64 0x32 0x0 0xffffffce 0xffffff9c 0xffffff6a 0xffffff38>; @@ -933,8 +934,8 @@ battery,temp_high_recovery_lpm = <0x1e0>; battery,temp_low_threshold_lpm = <0x0>; battery,temp_low_recovery_lpm = <0x14>; - battery,wpc_high_threshold_normal = <0x1c2>; - battery,wpc_high_recovery_normal = <0x1ae>; + battery,wpc_high_threshold_normal = <0x1f4>; + battery,wpc_high_recovery_normal = <0x1e0>; battery,wpc_low_threshold_normal = <0x0>; battery,wpc_low_recovery_normal = <0x14>; battery,tx_high_threshold = <0x1c2>; @@ -946,12 +947,12 @@ battery,full_check_count = <0x1>; battery,chg_gpio_full_check = <0x0>; battery,chg_polarity_full_check = <0x1>; - battery,chg_high_temp = <0x1e0>; - battery,chg_high_temp_recovery = <0x1cc>; - battery,dchg_high_temp = <0x21c>; - battery,dchg_high_temp_recovery = <0x190>; - battery,dchg_high_batt_temp = <0x190>; - battery,dchg_high_batt_temp_recovery = <0x17c>; + battery,chg_high_temp = <0x258>; + battery,chg_high_temp_recovery = <0x244>; + battery,dchg_high_temp = <0x3e8 0x23a 0x23a 0x23a>; + battery,dchg_high_temp_recovery = <0x3e8 0x212 0x212 0x212>; + battery,dchg_high_batt_temp = <0x3e8 0x19a 0x3e8 0x190>; + battery,dchg_high_batt_temp_recovery = <0x3e8 0x190 0x3e8 0x186>; battery,chg_input_limit_current = <0x3e8>; battery,chg_charging_limit_current = <0x76c>; battery,dchg_input_limit_current = <0x3e8>; @@ -966,7 +967,7 @@ battery,wpc_temp_lcd_on_control_source = <0x1>; battery,wpc_lcd_on_high_temp = <0x19a>; battery,wpc_lcd_on_high_temp_rec = <0x186>; - battery,wpc_lcd_on_input_limit_current = <0x1c2>; + battery,wpc_lcd_on_input_limit_current = <0x258>; battery,wpc_store_high_temp = <0x168>; battery,wpc_store_high_temp_recovery = <0x154>; battery,wpc_store_lcd_on_high_temp = <0x168>; @@ -978,6 +979,9 @@ battery,mix_high_temp = <0x1a4>; battery,mix_high_chg_temp = <0x1f4>; battery,mix_high_temp_recovery = <0x186>; + battery,temp_table_LRP_NORMAL = <0x384 0x17c 0x17c 0x172 0x384 0x17c 0x17c 0x168 0x0 0x0 0x0 0x0>; + battery,temp_table_LRP_25W = <0x190 0x186 0x17c 0x172 0x190 0x17c 0x17c 0x168 0x6a4 0xd48 0x44c 0x898>; + battery,temp_table_LRP_45W = <0x19a 0x190 0x186 0x17c 0x190 0x17c 0x17c 0x168 0x6a4 0xd48 0x44c 0x898>; battery,full_condition_type = <0x9>; battery,full_condition_soc = <0x5d>; battery,full_condition_vcell = <0x109a>; @@ -994,8 +998,8 @@ battery,chg_float_voltage = <0x10fe>; battery,chg_ocp_current = <0x1838>; battery,chg_ocp_dtc = <0x64>; - battery,swelling_high_temp_block = <0x19a>; - battery,swelling_high_temp_recov = <0x186>; + battery,swelling_high_temp_block = <0x1a4>; + battery,swelling_high_temp_recov = <0x190>; battery,swelling_wc_high_temp_block = <0x1a4>; battery,swelling_wc_high_temp_recov = <0x19a>; battery,swelling_low_temp_block_1st = <0x96>; @@ -1038,9 +1042,9 @@ battery,siop_apdo_input_limit_current = <0x4b0>; battery,siop_apdo_charging_limit_current = <0x7d0>; battery,siop_wireless_input_limit_current = <0x258>; - battery,siop_wireless_charging_limit_current = <0x352>; + battery,siop_wireless_charging_limit_current = <0x3e8>; battery,siop_hv_wireless_input_limit_current = <0x2bc>; - battery,siop_hv_wireless_charging_limit_current = <0x352>; + battery,siop_hv_wireless_charging_limit_current = <0x3e8>; battery,siop_store_hv_wireless_input_limit_current = <0x1c2>; battery,ttf_hv_charge_current = <0xb22>; battery,ttf_hv_wireless_charge_current = <0x5aa>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_24.dts b/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_24.dts index b684a57d7b22..9f09220a989c 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_24.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-d2x_kor_24.dts @@ -857,7 +857,7 @@ charger,direct_charger = "pca9468-charger"; charger,dchg_min_current = <0x7d0>; charger,dchg_temp_low_threshold = <0xdc>; - charger,dchg_temp_high_threshold = <0x19a>; + charger,dchg_temp_high_threshold = <0x1a4>; charger,ta_alert_wa; }; @@ -884,6 +884,7 @@ battery,chg_thermal_source = <0x2>; battery,wpc_thermal_source = <0x2>; battery,dchg_thermal_source = <0x3>; + battery,lrp_temp_check_type = <0x3>; battery,polling_time = <0xa 0x1e 0x1e 0x1e 0xe10>; battery,temp_table_adc = <0x149 0x170 0x1aa 0x1da 0x22d 0x28c 0x2fc 0x37d 0x410 0x4b7 0x56e 0x63a 0x714 0x7f9 0x8e3 0x9cf 0xab2 0xb87 0xc3f 0xcf9 0xd67 0xdda 0xe25>; battery,temp_table_data = <0x384 0x352 0x320 0x2ee 0x2bc 0x28a 0x258 0x226 0x1f4 0x1c2 0x190 0x15e 0x12c 0xfa 0xc8 0x96 0x64 0x32 0x0 0xffffffce 0xffffff9c 0xffffff6a 0xffffff38>; @@ -933,8 +934,8 @@ battery,temp_high_recovery_lpm = <0x1e0>; battery,temp_low_threshold_lpm = <0x0>; battery,temp_low_recovery_lpm = <0x14>; - battery,wpc_high_threshold_normal = <0x1c2>; - battery,wpc_high_recovery_normal = <0x1ae>; + battery,wpc_high_threshold_normal = <0x1f4>; + battery,wpc_high_recovery_normal = <0x1e0>; battery,wpc_low_threshold_normal = <0x0>; battery,wpc_low_recovery_normal = <0x14>; battery,tx_high_threshold = <0x1c2>; @@ -946,12 +947,12 @@ battery,full_check_count = <0x1>; battery,chg_gpio_full_check = <0x0>; battery,chg_polarity_full_check = <0x1>; - battery,chg_high_temp = <0x1e0>; - battery,chg_high_temp_recovery = <0x1cc>; - battery,dchg_high_temp = <0x21c>; - battery,dchg_high_temp_recovery = <0x190>; - battery,dchg_high_batt_temp = <0x190>; - battery,dchg_high_batt_temp_recovery = <0x17c>; + battery,chg_high_temp = <0x258>; + battery,chg_high_temp_recovery = <0x244>; + battery,dchg_high_temp = <0x3e8 0x23a 0x23a 0x23a>; + battery,dchg_high_temp_recovery = <0x3e8 0x212 0x212 0x212>; + battery,dchg_high_batt_temp = <0x3e8 0x19a 0x3e8 0x190>; + battery,dchg_high_batt_temp_recovery = <0x3e8 0x190 0x3e8 0x186>; battery,chg_input_limit_current = <0x3e8>; battery,chg_charging_limit_current = <0x76c>; battery,dchg_input_limit_current = <0x3e8>; @@ -966,7 +967,7 @@ battery,wpc_temp_lcd_on_control_source = <0x1>; battery,wpc_lcd_on_high_temp = <0x19a>; battery,wpc_lcd_on_high_temp_rec = <0x186>; - battery,wpc_lcd_on_input_limit_current = <0x1c2>; + battery,wpc_lcd_on_input_limit_current = <0x258>; battery,wpc_store_high_temp = <0x168>; battery,wpc_store_high_temp_recovery = <0x154>; battery,wpc_store_lcd_on_high_temp = <0x168>; @@ -978,6 +979,9 @@ battery,mix_high_temp = <0x1a4>; battery,mix_high_chg_temp = <0x1f4>; battery,mix_high_temp_recovery = <0x186>; + battery,temp_table_LRP_NORMAL = <0x384 0x17c 0x17c 0x172 0x384 0x17c 0x17c 0x168 0x0 0x0 0x0 0x0>; + battery,temp_table_LRP_25W = <0x190 0x186 0x17c 0x172 0x190 0x17c 0x17c 0x168 0x6a4 0xd48 0x44c 0x898>; + battery,temp_table_LRP_45W = <0x19a 0x190 0x186 0x17c 0x190 0x17c 0x17c 0x168 0x6a4 0xd48 0x44c 0x898>; battery,full_condition_type = <0x9>; battery,full_condition_soc = <0x5d>; battery,full_condition_vcell = <0x109a>; @@ -994,8 +998,8 @@ battery,chg_float_voltage = <0x10fe>; battery,chg_ocp_current = <0x1838>; battery,chg_ocp_dtc = <0x64>; - battery,swelling_high_temp_block = <0x19a>; - battery,swelling_high_temp_recov = <0x186>; + battery,swelling_high_temp_block = <0x1a4>; + battery,swelling_high_temp_recov = <0x190>; battery,swelling_wc_high_temp_block = <0x1a4>; battery,swelling_wc_high_temp_recov = <0x19a>; battery,swelling_low_temp_block_1st = <0x96>; @@ -1038,9 +1042,9 @@ battery,siop_apdo_input_limit_current = <0x4b0>; battery,siop_apdo_charging_limit_current = <0x7d0>; battery,siop_wireless_input_limit_current = <0x258>; - battery,siop_wireless_charging_limit_current = <0x352>; + battery,siop_wireless_charging_limit_current = <0x3e8>; battery,siop_hv_wireless_input_limit_current = <0x2bc>; - battery,siop_hv_wireless_charging_limit_current = <0x352>; + battery,siop_hv_wireless_charging_limit_current = <0x3e8>; battery,siop_store_hv_wireless_input_limit_current = <0x1c2>; battery,ttf_hv_charge_current = <0xb22>; battery,ttf_hv_wireless_charge_current = <0x5aa>; diff --git a/arch/arm64/configs/exynos9820-d2xks_defconfig b/arch/arm64/configs/exynos9820-d2xks_defconfig index 2857e2063d47..8890e944ba55 100644 --- a/arch/arm64/configs/exynos9820-d2xks_defconfig +++ b/arch/arm64/configs/exynos9820-d2xks_defconfig @@ -513,6 +513,7 @@ CONFIG_ARM64_ERRATUM_832075=y CONFIG_ARM64_ERRATUM_845719=y CONFIG_ARM64_ERRATUM_843419=y CONFIG_ARM64_ERRATUM_1024718=y +CONFIG_ARM64_ERRATUM_1188873=y # CONFIG_CAVIUM_ERRATUM_22375 is not set # CONFIG_CAVIUM_ERRATUM_23154 is not set # CONFIG_CAVIUM_ERRATUM_27456 is not set @@ -614,6 +615,7 @@ CONFIG_FORCE_MAX_ZONEORDER=11 CONFIG_UNMAP_KERNEL_AT_EL0=y CONFIG_HARDEN_BRANCH_PREDICTOR=y CONFIG_ARM64_SSBD=y +CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y CONFIG_ARMV8_DEPRECATED=y CONFIG_SWP_EMULATION=y CONFIG_CP15_BARRIER_EMULATION=y @@ -771,6 +773,7 @@ CONFIG_NET_EGRESS=y CONFIG_PACKET=y # CONFIG_PACKET_DIAG is not set CONFIG_UNIX=y +CONFIG_UNIX_SCM=y # CONFIG_UNIX_DIAG is not set # CONFIG_TLS is not set CONFIG_XFRM=y @@ -1330,6 +1333,7 @@ CONFIG_ALLOW_DEV_COREDUMP=y # CONFIG_SYS_HYPERVISOR is not set # CONFIG_GENERIC_CPU_DEVICES is not set CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y CONFIG_SOC_BUS=y CONFIG_REGMAP=y CONFIG_REGMAP_I2C=y @@ -1350,6 +1354,7 @@ CONFIG_CMA_SIZE_SEL_MBYTES=y # CONFIG_CMA_SIZE_SEL_MAX is not set CONFIG_CMA_ALIGNMENT=8 CONFIG_GENERIC_ARCH_TOPOLOGY=y +CONFIG_CPU_CAPACITY_FIXUP=y # # Bus devices @@ -1564,8 +1569,6 @@ CONFIG_TZDEV_SK_MULTICORE=y CONFIG_ION_FD2PHYS=y CONFIG_TZIRS=y # CONFIG_TZTUI is not set -CONFIG_TZDEV_PAGE_MIGRATION=y -# CONFIG_TZ_TRANSPORT is not set # CONFIG_TZDEV_HOTPLUG is not set CONFIG_TZDEV_BOOST=y CONFIG_TZ_BOOT_LOG=y @@ -2018,7 +2021,7 @@ CONFIG_USB_NET_CDCETHER=y CONFIG_USB_NET_CDC_EEM=y CONFIG_USB_NET_CDC_NCM=y # CONFIG_USB_NET_HUAWEI_CDC_NCM is not set -CONFIG_USB_NET_CDC_MBIM=y +# CONFIG_USB_NET_CDC_MBIM is not set # CONFIG_USB_NET_DM9601 is not set # CONFIG_USB_NET_SR9700 is not set # CONFIG_USB_NET_SR9800 is not set @@ -3987,6 +3990,8 @@ CONFIG_HID_PICOLCD=y # CONFIG_HID_PICOLCD_LEDS is not set # CONFIG_HID_PICOLCD_CIR is not set CONFIG_HID_PLANTRONICS=y +CONFIG_HID_PLAYSTATION=y +CONFIG_PLAYSTATION_FF=y CONFIG_HID_PRIMAX=y # CONFIG_HID_RETRODE is not set CONFIG_HID_ROCCAT=y @@ -4074,7 +4079,7 @@ CONFIG_USB_XHCI_PLATFORM=y # CONFIG_USB_ACM=y CONFIG_USB_PRINTER=y -CONFIG_USB_WDM=y +# CONFIG_USB_WDM is not set # CONFIG_USB_TMC is not set # @@ -5542,9 +5547,6 @@ CONFIG_SENSORS_FINGERPRINT=y # CONFIG_SENSORS_FPRINT_SECURE is not set CONFIG_SENSORS_QBT2000=y # CONFIG_SENSORS_ET5XX is not set -# CONFIG_FIVE_TEE_DRIVER is not set -CONFIG_ICD=y -CONFIG_ICD_USE_TZDEV=y CONFIG_SEC_EXT=y CONFIG_SEC_REBOOT=y @@ -5645,9 +5647,6 @@ CONFIG_SEC_HEAVY_TASK_CPU=y # CONFIG_VBUS_NOTIFIER=y CONFIG_KPERFMON=y -CONFIG_TZIC=y -CONFIG_TZIC_USE_TZDEV=y -# CONFIG_TZIC_DEFAULT is not set CONFIG_SPU_VERIFY=y # From bd7370fcccb42b7dc20ce9d578b4dcb07734bf2a Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Thu, 15 Dec 2022 19:14:40 +0400 Subject: [PATCH 010/452] G977N - HVJ5 Signed-off-by: Denis Efremov <efremov@linux.com> --- arch/arm64/boot/dts/G977N.mk | 6 +- .../dts/samsung/exynos9820-beyondx_kor_00.dts | 5 +- .../dts/samsung/exynos9820-beyondx_kor_01.dts | 5 +- .../dts/samsung/exynos9820-beyondx_kor_02.dts | 5 +- .../dts/samsung/exynos9820-beyondx_kor_03.dts | 5 +- .../dts/samsung/exynos9820-beyondx_kor_04.dts | 5 +- .../dts/samsung/exynos9820-beyondx_kor_05.dts | 5 +- .../dts/samsung/exynos9820-beyondx_kor_06.dts | 5 +- .../dts/samsung/exynos9820-beyondx_kor_07.dts | 56 +++++++++++++++++-- .../dts/samsung/exynos9820-beyondx_kor_08.dts | 56 +++++++++++++++++-- .../configs/exynos9820-beyondxks_defconfig | 10 ++-- 11 files changed, 132 insertions(+), 31 deletions(-) diff --git a/arch/arm64/boot/dts/G977N.mk b/arch/arm64/boot/dts/G977N.mk index 94efe66ca5f6..097dad71d4de 100644 --- a/arch/arm64/boot/dts/G977N.mk +++ b/arch/arm64/boot/dts/G977N.mk @@ -1,14 +1,14 @@ # SPDX-License-Identifier: GPL-2.0 dtb-y += exynos/exynos9820.dtb -dtbo-y += samsung/exynos9820-beyondx_kor_03.dtbo +dtbo-y += samsung/exynos9820-beyondx_kor_08.dtbo dtbo-y += samsung/exynos9820-beyondx_kor_06.dtbo +dtbo-y += samsung/exynos9820-beyondx_kor_00.dtbo +dtbo-y += samsung/exynos9820-beyondx_kor_03.dtbo dtbo-y += samsung/exynos9820-beyondx_kor_04.dtbo dtbo-y += samsung/exynos9820-beyondx_kor_07.dtbo dtbo-y += samsung/exynos9820-beyondx_kor_01.dtbo dtbo-y += samsung/exynos9820-beyondx_kor_02.dtbo -dtbo-y += samsung/exynos9820-beyondx_kor_08.dtbo dtbo-y += samsung/exynos9820-beyondx_kor_05.dtbo -dtbo-y += samsung/exynos9820-beyondx_kor_00.dtbo targets += dtbs DTB_LIST := $(dtb-y) $(dtbo-y) diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_00.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_00.dts index 5bafcd97890d..fe19b937ec08 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_00.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_00.dts @@ -879,6 +879,7 @@ battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; battery,dchg_temp_check_type = <0x0>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -7351,8 +7352,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_01.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_01.dts index 7d489023ea1f..43cb4db28bca 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_01.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_01.dts @@ -879,6 +879,7 @@ battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; battery,dchg_temp_check_type = <0x0>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -7351,8 +7352,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_02.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_02.dts index 55821f551d4f..1205ee0f73c8 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_02.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_02.dts @@ -879,6 +879,7 @@ battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; battery,dchg_temp_check_type = <0x0>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -7361,8 +7362,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_03.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_03.dts index 2f879307e0c3..2ce6ad3be662 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_03.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_03.dts @@ -879,6 +879,7 @@ battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; battery,dchg_temp_check_type = <0x0>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -7418,8 +7419,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_04.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_04.dts index 70752999c370..018eca418f8f 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_04.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_04.dts @@ -879,6 +879,7 @@ battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; battery,dchg_temp_check_type = <0x0>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -7418,8 +7419,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_05.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_05.dts index da7754b7e27b..5378b051e58e 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_05.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_05.dts @@ -879,6 +879,7 @@ battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; battery,dchg_temp_check_type = <0x0>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -7418,8 +7419,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_06.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_06.dts index d8e990ab467c..2101a6b764fc 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_06.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_06.dts @@ -879,6 +879,7 @@ battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; battery,dchg_temp_check_type = <0x0>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -7418,8 +7419,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_07.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_07.dts index 5aa1e0a0b746..28dcf15dfa74 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_07.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_07.dts @@ -879,6 +879,7 @@ battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; battery,dchg_temp_check_type = <0x0>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -7378,8 +7379,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; @@ -8839,6 +8840,41 @@ }; }; + fragment@133 { + target = <0xffffffff>; + + __overlay__ { + gpios = <0xffffffff 0x4 0x0 0xffffffff 0x5 0x0>; + clock-frequency = <0x61a80>; + status = "okay"; + #address-cells = <0x1>; + #size-cells = <0x0>; + + sx9330-i2c@28 { + compatible = "sx9330"; + reg = <0x28>; + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <0x59>; + interrupt-parent = <0xffffffff>; + interrupts = <0x0 0x0 0x0>; + sx9330,nirq-gpio = <0xffffffff 0x0 0x0>; + sx9330,scanperiod_reg = <0x32>; + sx9330,gnrlctrl2_reg = <0xff0002>; + sx9330,afeparamsph0_reg = <0x21f>; + sx9330,afephph0_reg = <0x3fbb8548>; + sx9330,adcfiltph0_reg = <0x510101d>; + sx9330,afeparamsph1_reg = <0x1e0f>; + sx9330,adcfiltph1_reg = <0x104d15>; + sx9330,avgbfilt_reg = "``\f"; + sx9330,avgafilt_reg = <0x0>; + sx9330,advdig3_reg = <0x0>; + sx9330,advdig4_reg = <0x0>; + sx9330,refcorra_reg = <0x0>; + }; + }; + }; + __symbols__ { ssp_batch_wake_irq = "/fragment@0/__overlay__/ssp-batch-wake-irq"; ssp_host_req = "/fragment@0/__overlay__/ssp-host-req"; @@ -9090,7 +9126,7 @@ gpa4 = "/fragment@2/__overlay__/BCM4773@0:ssp-shub-int:0"; gph0 = "/fragment@2/__overlay__/BCM4773@0/controller-data:cs-gpio:0", "/fragment@22/__overlay__:gpios:0", "/fragment@22/__overlay__:gpios:12"; gpa2 = "/fragment@battery/__overlay__/battery:battery,wpc_det:0", "/fragment@5/__overlay__/mfc-charger@3b:battery,wpc_det:0", "/fragment@model/__overlay__/gpio_keys/button@4:gpios:0", "/fragment@model/__overlay__/bluetooth:gpios:24", "/fragment@76/__overlay__/s2mps20mfd@00:s2mps20,irq-gpio:0", "/fragment@76/__overlay__/s2mps20mfd@00:interrupt-parent:0", "/fragment@82/__overlay__/cs47l93@0:interrupt-parent:0", "/fragment@124/__overlay__/sec-nfc@27:sec-nfc,clk_req-gpio:0", "/fragment@130/__overlay__/rtcfmradio@64:interrupt-parent:0", "/fragment@130/__overlay__/rtcfmradio@64:fmint-gpio:0"; - gpa3 = "/fragment@battery/__overlay__/battery:battery,wpc_int:0", "/fragment@5/__overlay__/mfc-charger@3b:battery,wpc_int:0", "/fragment@model/__overlay__/sec_detect_conn:sec,det_conn_gpios:12", "/fragment@model/__overlay__/hall:hall,gpio_flip_cover:0", "/fragment@model/__overlay__/abc_hub/cond:cond,det_conn_gpios:12", "/fragment@46/__overlay__:gpios:0", "/fragment@90/__overlay__/gpios/disp-reset:gpios:0", "/fragment@91/__overlay__/sx9330-i2c@28:interrupt-parent:0", "/fragment@91/__overlay__/sx9330-i2c@28:sx9330,nirq-gpio:0"; + gpa3 = "/fragment@battery/__overlay__/battery:battery,wpc_int:0", "/fragment@5/__overlay__/mfc-charger@3b:battery,wpc_int:0", "/fragment@model/__overlay__/sec_detect_conn:sec,det_conn_gpios:12", "/fragment@model/__overlay__/hall:hall,gpio_flip_cover:0", "/fragment@model/__overlay__/abc_hub/cond:cond,det_conn_gpios:12", "/fragment@46/__overlay__:gpios:0", "/fragment@90/__overlay__/gpios/disp-reset:gpios:0", "/fragment@91/__overlay__/sx9330-i2c@28:interrupt-parent:0", "/fragment@91/__overlay__/sx9330-i2c@28:sx9330,nirq-gpio:0", "/fragment@133/__overlay__/sx9330-i2c@28:interrupt-parent:0", "/fragment@133/__overlay__/sx9330-i2c@28:sx9330,nirq-gpio:0"; exynos_adc = "/fragment@battery/__overlay__/battery:io-channels:0", "/fragment@battery/__overlay__/battery:io-channels:8", "/fragment@battery/__overlay__/battery:io-channels:16", "/fragment@battery/__overlay__/battery:io-channels:24", "/fragment@battery/__overlay__/battery:io-channels:32", "/fragment@model/__overlay__/sec_thermistor@0:io-channels:0", "/fragment@model/__overlay__/sec_thermistor@2:io-channels:0", "/fragment@model/__overlay__/sec_thermistor@5:io-channels:0", "/fragment@model/__overlay__/sec_thermistor@7:io-channels:0"; hsi2c_46 = "/fragment@5:target:0"; gpg3 = "/fragment@5/__overlay__/mfc-charger@3b:battery,mst_pwr_en:0", "/fragment@camera/__overlay__/fimc-is_sensor_3m3@5A:gpio_reset:0", "/fragment@camera/__overlay__/fimc-is-flash-gpio@0:flash-gpio:0", "/fragment@model/__overlay__/sec-mst:sec-mst,mst-pwr-gpio:0", "/fragment@modemif/__overlay__/mif_pdata:gpios:0", "/fragment@modemif/__overlay__/mif_pdata:gpios:12", "/fragment@modemif/__overlay__/mif_pdata:gpios:24", "/fragment@modemif/__overlay__/mif_pdata:gpios:36", "/fragment@77/__overlay__/s2mpb02_pmic@59/torch:flash1-gpio:0"; @@ -9251,8 +9287,8 @@ pcie0_perst = "/fragment@89/__overlay__:pinctrl-0:4", "/fragment@89/__overlay__:pinctrl-1:0"; pcie_wake = "/fragment@89/__overlay__:pinctrl-0:8", "/fragment@89/__overlay__:pinctrl-1:4"; panel_0 = "/fragment@90:target:0"; - hsi2c_14 = "/fragment@91:target:0"; - gpp2 = "/fragment@91/__overlay__:gpios:0", "/fragment@91/__overlay__:gpios:12", "/fragment@116/__overlay__/tdmb-spi@0/controller-data:cs-gpio:0"; + hsi2c_14 = "/fragment@91:target:0", "/fragment@133:target:0"; + gpp2 = "/fragment@91/__overlay__:gpios:0", "/fragment@91/__overlay__:gpios:12", "/fragment@116/__overlay__/tdmb-spi@0/controller-data:cs-gpio:0", "/fragment@133/__overlay__:gpios:0", "/fragment@133/__overlay__:gpios:12"; spi_0 = "/fragment@92:target:0"; spi0_bus = "/fragment@92/__overlay__:pinctrl-0:0"; spi0_cs_func = "/fragment@92/__overlay__:pinctrl-0:4"; @@ -10086,5 +10122,15 @@ }; }; }; + + fragment@133 { + + __overlay__ { + + sx9330-i2c@28 { + pinctrl-0 = <0x0>; + }; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_08.dts b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_08.dts index 42603d93c49f..de9d3b2f1fc0 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_08.dts +++ b/arch/arm64/boot/dts/samsung/exynos9820-beyondx_kor_08.dts @@ -879,6 +879,7 @@ battery,chg_temp_check_type = <0x2>; battery,wpc_temp_check_type = <0x2>; battery,dchg_temp_check_type = <0x0>; + battery,lrp_temp_check_type = <0x0>; battery,thermal_source = <0x2>; battery,usb_thermal_source = <0x2>; battery,chg_thermal_source = <0x2>; @@ -7378,8 +7379,8 @@ sx9330,afephph0_reg = <0x3fbb8548>; sx9330,adcfiltph0_reg = <0x510101d>; sx9330,afeparamsph1_reg = <0x1e0f>; - sx9330,adcfiltph1_reg = <0x104d15>; - sx9330,avgbfilt_reg = "``\f"; + sx9330,adcfiltph1_reg = <0x104b25>; + sx9330,avgbfilt_reg = <0x60600e00>; sx9330,avgafilt_reg = <0x0>; sx9330,advdig3_reg = <0x0>; sx9330,advdig4_reg = <0x0>; @@ -8817,6 +8818,41 @@ }; }; + fragment@131 { + target = <0xffffffff>; + + __overlay__ { + gpios = <0xffffffff 0x4 0x0 0xffffffff 0x5 0x0>; + clock-frequency = <0x61a80>; + status = "okay"; + #address-cells = <0x1>; + #size-cells = <0x0>; + + sx9330-i2c@28 { + compatible = "sx9330"; + reg = <0x28>; + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <0x59>; + interrupt-parent = <0xffffffff>; + interrupts = <0x0 0x0 0x0>; + sx9330,nirq-gpio = <0xffffffff 0x0 0x0>; + sx9330,scanperiod_reg = <0x32>; + sx9330,gnrlctrl2_reg = <0xff0002>; + sx9330,afeparamsph0_reg = <0x21f>; + sx9330,afephph0_reg = <0x3fbb8548>; + sx9330,adcfiltph0_reg = <0x510101d>; + sx9330,afeparamsph1_reg = <0x1e0f>; + sx9330,adcfiltph1_reg = <0x104d15>; + sx9330,avgbfilt_reg = "``\f"; + sx9330,avgafilt_reg = <0x0>; + sx9330,advdig3_reg = <0x0>; + sx9330,advdig4_reg = <0x0>; + sx9330,refcorra_reg = <0x0>; + }; + }; + }; + __symbols__ { ssp_batch_wake_irq = "/fragment@0/__overlay__/ssp-batch-wake-irq"; ssp_host_req = "/fragment@0/__overlay__/ssp-host-req"; @@ -9068,7 +9104,7 @@ gpa4 = "/fragment@2/__overlay__/BCM4773@0:ssp-shub-int:0"; gph0 = "/fragment@2/__overlay__/BCM4773@0/controller-data:cs-gpio:0", "/fragment@22/__overlay__:gpios:0", "/fragment@22/__overlay__:gpios:12"; gpa2 = "/fragment@battery/__overlay__/battery:battery,wpc_det:0", "/fragment@5/__overlay__/mfc-charger@3b:battery,wpc_det:0", "/fragment@model/__overlay__/gpio_keys/button@4:gpios:0", "/fragment@model/__overlay__/bluetooth:gpios:24", "/fragment@76/__overlay__/s2mps20mfd@00:s2mps20,irq-gpio:0", "/fragment@76/__overlay__/s2mps20mfd@00:interrupt-parent:0", "/fragment@82/__overlay__/cs47l93@0:interrupt-parent:0", "/fragment@122/__overlay__/sec-nfc@27:sec-nfc,clk_req-gpio:0", "/fragment@128/__overlay__/rtcfmradio@64:interrupt-parent:0", "/fragment@128/__overlay__/rtcfmradio@64:fmint-gpio:0"; - gpa3 = "/fragment@battery/__overlay__/battery:battery,wpc_int:0", "/fragment@5/__overlay__/mfc-charger@3b:battery,wpc_int:0", "/fragment@model/__overlay__/sec_detect_conn:sec,det_conn_gpios:12", "/fragment@model/__overlay__/hall:hall,gpio_flip_cover:0", "/fragment@model/__overlay__/abc_hub/cond:cond,det_conn_gpios:12", "/fragment@46/__overlay__:gpios:0", "/fragment@90/__overlay__/gpios/disp-reset:gpios:0", "/fragment@91/__overlay__/sx9330-i2c@28:interrupt-parent:0", "/fragment@91/__overlay__/sx9330-i2c@28:sx9330,nirq-gpio:0"; + gpa3 = "/fragment@battery/__overlay__/battery:battery,wpc_int:0", "/fragment@5/__overlay__/mfc-charger@3b:battery,wpc_int:0", "/fragment@model/__overlay__/sec_detect_conn:sec,det_conn_gpios:12", "/fragment@model/__overlay__/hall:hall,gpio_flip_cover:0", "/fragment@model/__overlay__/abc_hub/cond:cond,det_conn_gpios:12", "/fragment@46/__overlay__:gpios:0", "/fragment@90/__overlay__/gpios/disp-reset:gpios:0", "/fragment@91/__overlay__/sx9330-i2c@28:interrupt-parent:0", "/fragment@91/__overlay__/sx9330-i2c@28:sx9330,nirq-gpio:0", "/fragment@131/__overlay__/sx9330-i2c@28:interrupt-parent:0", "/fragment@131/__overlay__/sx9330-i2c@28:sx9330,nirq-gpio:0"; exynos_adc = "/fragment@battery/__overlay__/battery:io-channels:0", "/fragment@battery/__overlay__/battery:io-channels:8", "/fragment@battery/__overlay__/battery:io-channels:16", "/fragment@battery/__overlay__/battery:io-channels:24", "/fragment@battery/__overlay__/battery:io-channels:32", "/fragment@model/__overlay__/sec_thermistor@0:io-channels:0", "/fragment@model/__overlay__/sec_thermistor@2:io-channels:0", "/fragment@model/__overlay__/sec_thermistor@5:io-channels:0", "/fragment@model/__overlay__/sec_thermistor@7:io-channels:0"; hsi2c_46 = "/fragment@5:target:0"; gpg3 = "/fragment@5/__overlay__/mfc-charger@3b:battery,mst_pwr_en:0", "/fragment@camera/__overlay__/fimc-is_sensor_3m3@5A:gpio_reset:0", "/fragment@camera/__overlay__/fimc-is-flash-gpio@0:flash-gpio:0", "/fragment@model/__overlay__/sec-mst:sec-mst,mst-pwr-gpio:0", "/fragment@modemif/__overlay__/mif_pdata:gpios:0", "/fragment@modemif/__overlay__/mif_pdata:gpios:12", "/fragment@modemif/__overlay__/mif_pdata:gpios:24", "/fragment@modemif/__overlay__/mif_pdata:gpios:36", "/fragment@77/__overlay__/s2mpb02_pmic@59/torch:flash1-gpio:0"; @@ -9229,8 +9265,8 @@ pcie0_perst = "/fragment@89/__overlay__:pinctrl-0:4", "/fragment@89/__overlay__:pinctrl-1:0"; pcie_wake = "/fragment@89/__overlay__:pinctrl-0:8", "/fragment@89/__overlay__:pinctrl-1:4"; panel_0 = "/fragment@90:target:0"; - hsi2c_14 = "/fragment@91:target:0"; - gpp2 = "/fragment@91/__overlay__:gpios:0", "/fragment@91/__overlay__:gpios:12", "/fragment@116/__overlay__/tdmb-spi@0/controller-data:cs-gpio:0"; + hsi2c_14 = "/fragment@91:target:0", "/fragment@131:target:0"; + gpp2 = "/fragment@91/__overlay__:gpios:0", "/fragment@91/__overlay__:gpios:12", "/fragment@116/__overlay__/tdmb-spi@0/controller-data:cs-gpio:0", "/fragment@131/__overlay__:gpios:0", "/fragment@131/__overlay__:gpios:12"; spi_0 = "/fragment@92:target:0"; spi0_bus = "/fragment@92/__overlay__:pinctrl-0:0"; spi0_cs_func = "/fragment@92/__overlay__:pinctrl-0:4"; @@ -10064,5 +10100,15 @@ }; }; }; + + fragment@131 { + + __overlay__ { + + sx9330-i2c@28 { + pinctrl-0 = <0x0>; + }; + }; + }; }; }; diff --git a/arch/arm64/configs/exynos9820-beyondxks_defconfig b/arch/arm64/configs/exynos9820-beyondxks_defconfig index 0d0d503e78d6..2289d11fee17 100644 --- a/arch/arm64/configs/exynos9820-beyondxks_defconfig +++ b/arch/arm64/configs/exynos9820-beyondxks_defconfig @@ -513,6 +513,7 @@ CONFIG_ARM64_ERRATUM_832075=y CONFIG_ARM64_ERRATUM_845719=y CONFIG_ARM64_ERRATUM_843419=y CONFIG_ARM64_ERRATUM_1024718=y +CONFIG_ARM64_ERRATUM_1188873=y # CONFIG_CAVIUM_ERRATUM_22375 is not set # CONFIG_CAVIUM_ERRATUM_23154 is not set # CONFIG_CAVIUM_ERRATUM_27456 is not set @@ -614,6 +615,7 @@ CONFIG_FORCE_MAX_ZONEORDER=11 CONFIG_UNMAP_KERNEL_AT_EL0=y CONFIG_HARDEN_BRANCH_PREDICTOR=y CONFIG_ARM64_SSBD=y +CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y CONFIG_ARMV8_DEPRECATED=y CONFIG_SWP_EMULATION=y CONFIG_CP15_BARRIER_EMULATION=y @@ -771,6 +773,7 @@ CONFIG_NET_EGRESS=y CONFIG_PACKET=y # CONFIG_PACKET_DIAG is not set CONFIG_UNIX=y +CONFIG_UNIX_SCM=y # CONFIG_UNIX_DIAG is not set # CONFIG_TLS is not set CONFIG_XFRM=y @@ -1330,6 +1333,7 @@ CONFIG_ALLOW_DEV_COREDUMP=y # CONFIG_SYS_HYPERVISOR is not set # CONFIG_GENERIC_CPU_DEVICES is not set CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y CONFIG_SOC_BUS=y CONFIG_REGMAP=y CONFIG_REGMAP_I2C=y @@ -1565,8 +1569,6 @@ CONFIG_TZDEV_SK_MULTICORE=y CONFIG_ION_FD2PHYS=y CONFIG_TZIRS=y # CONFIG_TZTUI is not set -CONFIG_TZDEV_PAGE_MIGRATION=y -# CONFIG_TZ_TRANSPORT is not set # CONFIG_TZDEV_HOTPLUG is not set CONFIG_TZDEV_BOOST=y CONFIG_TZ_BOOT_LOG=y @@ -2019,7 +2021,7 @@ CONFIG_USB_NET_CDCETHER=y CONFIG_USB_NET_CDC_EEM=y CONFIG_USB_NET_CDC_NCM=y # CONFIG_USB_NET_HUAWEI_CDC_NCM is not set -CONFIG_USB_NET_CDC_MBIM=y +# CONFIG_USB_NET_CDC_MBIM is not set # CONFIG_USB_NET_DM9601 is not set # CONFIG_USB_NET_SR9700 is not set # CONFIG_USB_NET_SR9800 is not set @@ -4091,7 +4093,7 @@ CONFIG_USB_XHCI_PLATFORM=y # CONFIG_USB_ACM=y CONFIG_USB_PRINTER=y -CONFIG_USB_WDM=y +# CONFIG_USB_WDM is not set # CONFIG_USB_TMC is not set # From c4f51cf84bc3aae04b4889b3cad2f0b70c079246 Mon Sep 17 00:00:00 2001 From: Petr Vorel <petr.vorel@gmail.com> Date: Mon, 29 Oct 2018 22:10:58 +0100 Subject: [PATCH 011/452] merge_config.sh: Allow to define config prefix with CONFIG_ environment variable. merge_config.sh uses CONFIG_ which is used in kernel and other projects. There are some projects which use kconfig with different prefixes (e.g. buildroot: BR2_ prefix). CONFIG_ variable is already used for this purpose in kconfig binary (scripts/kconfig/lkc.h), let's use the same rule for in merge_config.sh. Suggested-by: Masahiro Yamada <yamada.masahiro@socionext.com> Signed-off-by: Petr Vorel <petr.vorel@gmail.com> Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> --- scripts/kconfig/merge_config.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh index 67d131447631..da66e7742282 100755 --- a/scripts/kconfig/merge_config.sh +++ b/scripts/kconfig/merge_config.sh @@ -33,12 +33,15 @@ usage() { echo " -n use allnoconfig instead of alldefconfig" echo " -r list redundant entries when merging fragments" echo " -O dir to put generated output files. Consider setting \$KCONFIG_CONFIG instead." + echo + echo "Used prefix: '$CONFIG_PREFIX'. You can redefine it with \$CONFIG_ environment variable." } RUNMAKE=true ALLTARGET=alldefconfig WARNREDUN=false OUTPUT=. +CONFIG_PREFIX=${CONFIG_-CONFIG_} while true; do case $1 in @@ -99,7 +102,8 @@ if [ ! -r "$INITFILE" ]; then fi MERGE_LIST=$* -SED_CONFIG_EXP="s/^\(# \)\{0,1\}\(CONFIG_[a-zA-Z0-9_]*\)[= ].*/\2/p" +SED_CONFIG_EXP="s/^\(# \)\{0,1\}\(${CONFIG_PREFIX}[a-zA-Z0-9_]*\)[= ].*/\2/p" + TMP_FILE=$(mktemp ./.tmp.config.XXXXXXXXXX) echo "Using $INITFILE as base" From 900545e91e7d54c59b3eda61d734486fb499a24b Mon Sep 17 00:00:00 2001 From: Masahiro Yamada <yamada.masahiro@socionext.com> Date: Mon, 5 Nov 2018 17:19:36 +0900 Subject: [PATCH 012/452] kconfig: merge_config: avoid false positive matches from comment lines The current SED_CONFIG_EXP could match to comment lines in config fragment files, especially when CONFIG_PREFIX_ is empty. For example, Buildroot uses empty prefixing; starting symbols with BR2_ is just convention. Make the sed expression more robust against false positives from comment lines. The new sed expression matches to only valid patterns. Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> Reviewed-by: Petr Vorel <petr.vorel@gmail.com> Reviewed-by: Arnout Vandecappelle (Essensium/Mind) <arnout@mind.be> --- scripts/kconfig/merge_config.sh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh index da66e7742282..0ef906499646 100755 --- a/scripts/kconfig/merge_config.sh +++ b/scripts/kconfig/merge_config.sh @@ -102,7 +102,8 @@ if [ ! -r "$INITFILE" ]; then fi MERGE_LIST=$* -SED_CONFIG_EXP="s/^\(# \)\{0,1\}\(${CONFIG_PREFIX}[a-zA-Z0-9_]*\)[= ].*/\2/p" +SED_CONFIG_EXP1="s/^\(${CONFIG_PREFIX}[a-zA-Z0-9_]*\)=.*/\1/p" +SED_CONFIG_EXP2="s/^# \(${CONFIG_PREFIX}[a-zA-Z0-9_]*\) is not set$/\1/p" TMP_FILE=$(mktemp ./.tmp.config.XXXXXXXXXX) @@ -116,7 +117,7 @@ for MERGE_FILE in $MERGE_LIST ; do echo "The merge file '$MERGE_FILE' does not exist. Exit." >&2 exit 1 fi - CFG_LIST=$(sed -n "$SED_CONFIG_EXP" $MERGE_FILE) + CFG_LIST=$(sed -n -e "$SED_CONFIG_EXP1" -e "$SED_CONFIG_EXP2" $MERGE_FILE) for CFG in $CFG_LIST ; do grep -q -w $CFG $TMP_FILE || continue @@ -159,7 +160,7 @@ make KCONFIG_ALLCONFIG=$TMP_FILE $OUTPUT_ARG $ALLTARGET # Check all specified config values took (might have missed-dependency issues) -for CFG in $(sed -n "$SED_CONFIG_EXP" $TMP_FILE); do +for CFG in $(sed -n -e "$SED_CONFIG_EXP1" -e "$SED_CONFIG_EXP2" $TMP_FILE); do REQUESTED_VAL=$(grep -w -e "$CFG" $TMP_FILE) ACTUAL_VAL=$(grep -w -e "$CFG" "$KCONFIG_CONFIG") From e8ee7737fd27e673a7e2f846afdb4c7b6f4fc501 Mon Sep 17 00:00:00 2001 From: Anders Roxell <anders.roxell@linaro.org> Date: Mon, 12 Nov 2018 09:38:55 +0100 Subject: [PATCH 013/452] scripts/kconfig/merge_config: don't redefine 'y' to 'm' In today's merge_config.sh the order of the config fragment files dictates the output of a config option. With this approach we will get different .config files depending on the order of the config fragment files. So doing something like: $ ./merge/kconfig/merge_config.sh selftest.config drm.config Where selftest.config defines DRM=y and drm.config defines DRM=m, the result will be "DRM=m". Rework to add a switch to get builtin '=y' precedence over modules '=m', this will result in "DRM=y". If we do something like this: $ ./merge/kconfig/merge_config.sh -y selftest.config drm.config Suggested-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Anders Roxell <anders.roxell@linaro.org> Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> --- scripts/kconfig/merge_config.sh | 37 ++++++++++++++++++++++++++------- 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh index 0ef906499646..9b89791b202c 100755 --- a/scripts/kconfig/merge_config.sh +++ b/scripts/kconfig/merge_config.sh @@ -22,6 +22,7 @@ clean_up() { rm -f $TMP_FILE + rm -f $MERGE_FILE exit } trap clean_up HUP INT TERM @@ -32,6 +33,7 @@ usage() { echo " -m only merge the fragments, do not execute the make command" echo " -n use allnoconfig instead of alldefconfig" echo " -r list redundant entries when merging fragments" + echo " -y make builtin have precedence over modules" echo " -O dir to put generated output files. Consider setting \$KCONFIG_CONFIG instead." echo echo "Used prefix: '$CONFIG_PREFIX'. You can redefine it with \$CONFIG_ environment variable." @@ -40,6 +42,7 @@ usage() { RUNMAKE=true ALLTARGET=alldefconfig WARNREDUN=false +BUILTIN=false OUTPUT=. CONFIG_PREFIX=${CONFIG_-CONFIG_} @@ -64,6 +67,11 @@ while true; do shift continue ;; + "-y") + BUILTIN=true + shift + continue + ;; "-O") if [ -d $2 ];then OUTPUT=$(echo $2 | sed 's/\/*$//') @@ -106,32 +114,45 @@ SED_CONFIG_EXP1="s/^\(${CONFIG_PREFIX}[a-zA-Z0-9_]*\)=.*/\1/p" SED_CONFIG_EXP2="s/^# \(${CONFIG_PREFIX}[a-zA-Z0-9_]*\) is not set$/\1/p" TMP_FILE=$(mktemp ./.tmp.config.XXXXXXXXXX) +MERGE_FILE=$(mktemp ./.merge_tmp.config.XXXXXXXXXX) echo "Using $INITFILE as base" cat $INITFILE > $TMP_FILE # Merge files, printing warnings on overridden values -for MERGE_FILE in $MERGE_LIST ; do - echo "Merging $MERGE_FILE" - if [ ! -r "$MERGE_FILE" ]; then - echo "The merge file '$MERGE_FILE' does not exist. Exit." >&2 +for ORIG_MERGE_FILE in $MERGE_LIST ; do + echo "Merging $ORIG_MERGE_FILE" + if [ ! -r "$ORIG_MERGE_FILE" ]; then + echo "The merge file '$ORIG_MERGE_FILE' does not exist. Exit." >&2 exit 1 fi + cat $ORIG_MERGE_FILE > $MERGE_FILE CFG_LIST=$(sed -n -e "$SED_CONFIG_EXP1" -e "$SED_CONFIG_EXP2" $MERGE_FILE) for CFG in $CFG_LIST ; do grep -q -w $CFG $TMP_FILE || continue PREV_VAL=$(grep -w $CFG $TMP_FILE) NEW_VAL=$(grep -w $CFG $MERGE_FILE) - if [ "x$PREV_VAL" != "x$NEW_VAL" ] ; then - echo Value of $CFG is redefined by fragment $MERGE_FILE: + BUILTIN_FLAG=false + if [ "$BUILTIN" = "true" ] && [ "${NEW_VAL#CONFIG_*=}" = "m" ] && [ "${PREV_VAL#CONFIG_*=}" = "y" ]; then + echo Previous value: $PREV_VAL + echo New value: $NEW_VAL + echo -y passed, will not demote y to m + echo + BUILTIN_FLAG=true + elif [ "x$PREV_VAL" != "x$NEW_VAL" ] ; then + echo Value of $CFG is redefined by fragment $ORIG_MERGE_FILE: echo Previous value: $PREV_VAL echo New value: $NEW_VAL echo elif [ "$WARNREDUN" = "true" ]; then - echo Value of $CFG is redundant by fragment $MERGE_FILE: + echo Value of $CFG is redundant by fragment $ORIG_MERGE_FILE: + fi + if [ "$BUILTIN_FLAG" = "false" ]; then + sed -i "/$CFG[ =]/d" $TMP_FILE + else + sed -i "/$CFG[ =]/d" $MERGE_FILE fi - sed -i "/$CFG[ =]/d" $TMP_FILE done cat $MERGE_FILE >> $TMP_FILE done From a2226b8481b337f45d405777d3f2d94479951052 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Mon, 24 Jun 2019 07:22:55 -0700 Subject: [PATCH 014/452] simple_lmk: Introduce Simple Low Memory Killer for Android This is a complete low memory killer solution for Android that is small and simple. Processes are killed according to the priorities that Android gives them, so that the least important processes are always killed first. Processes are killed until memory deficits are satisfied, as observed from kswapd struggling to free up pages. Simple LMK stops killing processes when kswapd finally goes back to sleep. The only tunables are the desired amount of memory to be freed per reclaim event and desired frequency of reclaim events. Simple LMK tries to free at least the desired amount of memory per reclaim and waits until all of its victims' memory is freed before proceeding to kill more processes. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/Kconfig | 44 +++++ drivers/android/Makefile | 1 + drivers/android/simple_lmk.c | 332 +++++++++++++++++++++++++++++++++++ include/linux/simple_lmk.h | 26 +++ kernel/fork.c | 2 + mm/vmscan.c | 4 + 6 files changed, 409 insertions(+) create mode 100644 drivers/android/simple_lmk.c create mode 100644 include/linux/simple_lmk.h diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig index ee4880bfdcdc..b572f685374f 100644 --- a/drivers/android/Kconfig +++ b/drivers/android/Kconfig @@ -42,6 +42,50 @@ config ANDROID_BINDER_IPC_SELFTEST exhaustively with combinations of various buffer sizes and alignments. +config ANDROID_SIMPLE_LMK + bool "Simple Android Low Memory Killer" + depends on !ANDROID_LOW_MEMORY_KILLER && !MEMCG + ---help--- + This is a complete low memory killer solution for Android that is + small and simple. Processes are killed according to the priorities + that Android gives them, so that the least important processes are + always killed first. Processes are killed until memory deficits are + satisfied, as observed from kswapd struggling to free up pages. Simple + LMK stops killing processes when kswapd finally goes back to sleep. + +if ANDROID_SIMPLE_LMK + +config ANDROID_SIMPLE_LMK_AGGRESSION + int "Reclaim frequency selection" + range 1 3 + default 1 + help + This value determines how frequently Simple LMK will perform memory + reclaims. A lower value corresponds to less frequent reclaims, which + maximizes memory usage. The range of values has a logarithmic + correlation; 2 is twice as aggressive as 1, and 3 is twice as + aggressive as 2, which makes 3 four times as aggressive as 1. + + The aggression is set as a factor of kswapd's scan depth. This means + that a system with more memory will have a more expensive aggression + factor compared to a system with less memory. For example, setting an + aggression factor of 1 with 4 GiB of memory would be like setting a + factor of 2 with 8 GiB of memory; the more memory a system has, the + more expensive it is to use a lower value. + + Choosing a value of 1 here works well with systems that have 4 GiB of + memory. If the default doesn't work well, then this value should be + tweaked based on empirical results using different values. + +config ANDROID_SIMPLE_LMK_MINFREE + int "Minimum MiB of memory to free per reclaim" + range 8 512 + default 100 + help + Simple LMK will try to free at least this much memory per reclaim. + +endif + endif # if ANDROID endmenu diff --git a/drivers/android/Makefile b/drivers/android/Makefile index a01254c43ee3..81cc79664cf9 100644 --- a/drivers/android/Makefile +++ b/drivers/android/Makefile @@ -2,3 +2,4 @@ ccflags-y += -I$(src) # needed for trace events obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o +obj-$(CONFIG_ANDROID_SIMPLE_LMK) += simple_lmk.o diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c new file mode 100644 index 000000000000..29637ecc5be5 --- /dev/null +++ b/drivers/android/simple_lmk.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Sultan Alsawaf <sultan@kerneltoast.com>. + */ + +#define pr_fmt(fmt) "simple_lmk: " fmt + +#include <linux/delay.h> +#include <linux/kthread.h> +#include <linux/mm.h> +#include <linux/moduleparam.h> +#include <linux/oom.h> +#include <linux/sort.h> +#include <linux/version.h> + +/* The sched_param struct is located elsewhere in newer kernels */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) +#include <uapi/linux/sched/types.h> +#endif + +/* SEND_SIG_FORCED isn't present in newer kernels */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) +#define SIG_INFO_TYPE SEND_SIG_FORCED +#else +#define SIG_INFO_TYPE SEND_SIG_PRIV +#endif + +/* The group argument to do_send_sig_info is different in newer kernels */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0) +#define KILL_GROUP_TYPE true +#else +#define KILL_GROUP_TYPE PIDTYPE_TGID +#endif + +/* The minimum number of pages to free per reclaim */ +#define MIN_FREE_PAGES (CONFIG_ANDROID_SIMPLE_LMK_MINFREE * SZ_1M / PAGE_SIZE) + +/* Kill up to this many victims per reclaim */ +#define MAX_VICTIMS 1024 + +struct victim_info { + struct task_struct *tsk; + struct mm_struct *mm; + unsigned long size; +}; + +/* Pulled from the Android framework. Lower adj means higher priority. */ +static const short adj_prio[] = { + 906, /* CACHED_APP_MAX_ADJ */ + 905, /* Cached app */ + 904, /* Cached app */ + 903, /* Cached app */ + 902, /* Cached app */ + 901, /* Cached app */ + 900, /* CACHED_APP_MIN_ADJ */ + 800, /* SERVICE_B_ADJ */ + 700, /* PREVIOUS_APP_ADJ */ + 600, /* HOME_APP_ADJ */ + 500, /* SERVICE_ADJ */ + 400, /* HEAVY_WEIGHT_APP_ADJ */ + 300, /* BACKUP_APP_ADJ */ + 200, /* PERCEPTIBLE_APP_ADJ */ + 100, /* VISIBLE_APP_ADJ */ + 0 /* FOREGROUND_APP_ADJ */ +}; + +static struct victim_info victims[MAX_VICTIMS]; +static DECLARE_WAIT_QUEUE_HEAD(oom_waitq); +static DECLARE_COMPLETION(reclaim_done); +static int victims_to_kill; +static bool needs_reclaim; + +static int victim_size_cmp(const void *lhs_ptr, const void *rhs_ptr) +{ + const struct victim_info *lhs = (typeof(lhs))lhs_ptr; + const struct victim_info *rhs = (typeof(rhs))rhs_ptr; + + return rhs->size - lhs->size; +} + +static bool vtsk_is_duplicate(struct victim_info *varr, int vlen, + struct task_struct *vtsk) +{ + int i; + + for (i = 0; i < vlen; i++) { + if (same_thread_group(varr[i].tsk, vtsk)) + return true; + } + + return false; +} + +static unsigned long find_victims(struct victim_info *varr, int *vindex, + int vmaxlen, short target_adj) +{ + unsigned long pages_found = 0; + int old_vindex = *vindex; + struct task_struct *tsk; + + for_each_process(tsk) { + struct task_struct *vtsk; + unsigned long tasksize; + + /* + * Search for tasks with the targeted importance (adj). Since + * only tasks with a positive adj can be targeted, that + * naturally excludes tasks which shouldn't be killed, like init + * and kthreads. Although oom_score_adj can still be changed + * while this code runs, it doesn't really matter. We just need + * to make sure that if the adj changes, we won't deadlock + * trying to lock a task that we locked earlier. + */ + if (READ_ONCE(tsk->signal->oom_score_adj) != target_adj || + vtsk_is_duplicate(varr, *vindex, tsk)) + continue; + + vtsk = find_lock_task_mm(tsk); + if (!vtsk) + continue; + + /* Store this potential victim away for later */ + varr[*vindex].tsk = vtsk; + varr[*vindex].mm = vtsk->mm; + varr[*vindex].size = get_mm_rss(vtsk->mm); + + /* Keep track of the number of pages that have been found */ + pages_found += tasksize; + + /* Make sure there's space left in the victim array */ + if (++*vindex == vmaxlen) + break; + } + + /* + * Sort the victims in descending order of size to prioritize killing + * the larger ones first. + */ + if (pages_found) + sort(&varr[old_vindex], *vindex - old_vindex, sizeof(*varr), + victim_size_cmp, NULL); + + return pages_found; +} + +static int process_victims(struct victim_info *varr, int vlen, + unsigned long pages_needed) +{ + unsigned long pages_found = 0; + int i, nr_to_kill = 0; + + /* + * Calculate the number of tasks that need to be killed and quickly + * release the references to those that'll live. + */ + for (i = 0; i < vlen; i++) { + struct victim_info *victim = &victims[i]; + struct task_struct *vtsk = victim->tsk; + + /* The victim's mm lock is taken in find_victims; release it */ + if (pages_found >= pages_needed) { + task_unlock(vtsk); + continue; + } + + pages_found += victim->size; + nr_to_kill++; + } + + return nr_to_kill; +} + +static void scan_and_kill(unsigned long pages_needed) +{ + int i, nr_to_kill = 0, nr_victims = 0; + unsigned long pages_found = 0; + + /* + * Hold the tasklist lock so tasks don't disappear while scanning. This + * is preferred to holding an RCU read lock so that the list of tasks + * is guaranteed to be up to date. + */ + read_lock(&tasklist_lock); + for (i = 0; i < ARRAY_SIZE(adj_prio); i++) { + pages_found += find_victims(victims, &nr_victims, MAX_VICTIMS, + adj_prio[i]); + if (pages_found >= pages_needed || nr_victims == MAX_VICTIMS) + break; + } + read_unlock(&tasklist_lock); + + /* Pretty unlikely but it can happen */ + if (unlikely(!nr_victims)) + return; + + /* First round of victim processing to weed out unneeded victims */ + nr_to_kill = process_victims(victims, nr_victims, pages_needed); + + /* + * Try to kill as few of the chosen victims as possible by sorting the + * chosen victims by size, which means larger victims that have a lower + * adj can be killed in place of smaller victims with a high adj. + */ + sort(victims, nr_to_kill, sizeof(*victims), victim_size_cmp, NULL); + + /* Second round of victim processing to finally select the victims */ + nr_to_kill = process_victims(victims, nr_to_kill, pages_needed); + + /* Kill the victims */ + WRITE_ONCE(victims_to_kill, nr_to_kill); + for (i = 0; i < nr_to_kill; i++) { + struct victim_info *victim = &victims[i]; + struct task_struct *vtsk = victim->tsk; + + pr_info("Killing %s with adj %d to free %lu KiB\n", vtsk->comm, + vtsk->signal->oom_score_adj, + victim->size << (PAGE_SHIFT - 10)); + + /* Accelerate the victim's death by forcing the kill signal */ + do_send_sig_info(SIGKILL, SIG_INFO_TYPE, vtsk, KILL_GROUP_TYPE); + + /* Grab a reference to the victim for later before unlocking */ + get_task_struct(vtsk); + task_unlock(vtsk); + } + + /* Try to speed up the death process now that we can schedule again */ + for (i = 0; i < nr_to_kill; i++) { + struct task_struct *vtsk = victims[i].tsk; + + /* Increase the victim's priority to make it die faster */ + set_user_nice(vtsk, MIN_NICE); + + /* Allow the victim to run on any CPU */ + set_cpus_allowed_ptr(vtsk, cpu_all_mask); + + /* Finally release the victim reference acquired earlier */ + put_task_struct(vtsk); + } + + /* Wait until all the victims die */ + wait_for_completion(&reclaim_done); +} + +static int simple_lmk_reclaim_thread(void *data) +{ + static const struct sched_param sched_max_rt_prio = { + .sched_priority = MAX_RT_PRIO - 1 + }; + + sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_max_rt_prio); + + while (1) { + bool should_stop; + + wait_event(oom_waitq, (should_stop = kthread_should_stop()) || + READ_ONCE(needs_reclaim)); + + if (should_stop) + break; + + /* + * Kill a batch of processes and wait for their memory to be + * freed. After their memory is freed, sleep for 20 ms to give + * OOM'd allocations a chance to scavenge for the newly-freed + * pages. Rinse and repeat while there are still OOM'd + * allocations. + */ + do { + scan_and_kill(MIN_FREE_PAGES); + msleep(20); + } while (READ_ONCE(needs_reclaim)); + } + + return 0; +} + +void simple_lmk_decide_reclaim(int kswapd_priority) +{ + if (kswapd_priority != CONFIG_ANDROID_SIMPLE_LMK_AGGRESSION) + return; + + if (!cmpxchg(&needs_reclaim, false, true)) + wake_up(&oom_waitq); +} + +void simple_lmk_stop_reclaim(void) +{ + WRITE_ONCE(needs_reclaim, false); +} + +void simple_lmk_mm_freed(struct mm_struct *mm) +{ + static atomic_t nr_killed = ATOMIC_INIT(0); + int i, nr_to_kill; + + nr_to_kill = READ_ONCE(victims_to_kill); + for (i = 0; i < nr_to_kill; i++) { + if (cmpxchg(&victims[i].mm, mm, NULL) == mm) { + if (atomic_inc_return(&nr_killed) == nr_to_kill) { + WRITE_ONCE(victims_to_kill, 0); + nr_killed = (atomic_t)ATOMIC_INIT(0); + complete(&reclaim_done); + } + break; + } + } +} + +/* Initialize Simple LMK when lmkd in Android writes to the minfree parameter */ +static int simple_lmk_init_set(const char *val, const struct kernel_param *kp) +{ + static bool init_done; + struct task_struct *thread; + + if (cmpxchg(&init_done, false, true)) + return 0; + + thread = kthread_run(simple_lmk_reclaim_thread, NULL, "simple_lmkd"); + BUG_ON(IS_ERR(thread)); + + return 0; +} + +static const struct kernel_param_ops simple_lmk_init_ops = { + .set = simple_lmk_init_set +}; + +/* Needed to prevent Android from thinking there's no LMK and thus rebooting */ +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "lowmemorykiller." +module_param_cb(minfree, &simple_lmk_init_ops, NULL, 0200); diff --git a/include/linux/simple_lmk.h b/include/linux/simple_lmk.h new file mode 100644 index 000000000000..b0c247f2f2a5 --- /dev/null +++ b/include/linux/simple_lmk.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 Sultan Alsawaf <sultan@kerneltoast.com>. + */ +#ifndef _SIMPLE_LMK_H_ +#define _SIMPLE_LMK_H_ + +struct mm_struct; + +#ifdef CONFIG_ANDROID_SIMPLE_LMK +void simple_lmk_decide_reclaim(int kswapd_priority); +void simple_lmk_stop_reclaim(void); +void simple_lmk_mm_freed(struct mm_struct *mm); +#else +static inline void simple_lmk_decide_reclaim(int kswapd_priority) +{ +} +static inline void simple_lmk_stop_reclaim(void) +{ +} +static inline void simple_lmk_mm_freed(struct mm_struct *mm) +{ +} +#endif + +#endif /* _SIMPLE_LMK_H_ */ diff --git a/kernel/fork.c b/kernel/fork.c index 40cebd5b7163..11a1af672858 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -91,6 +91,7 @@ #include <linux/livepatch.h> #include <linux/thread_info.h> #include <linux/cpufreq_times.h> +#include <linux/simple_lmk.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> @@ -941,6 +942,7 @@ static inline void __mmput(struct mm_struct *mm) } if (mm->binfmt) module_put(mm->binfmt->module); + simple_lmk_mm_freed(mm); mmdrop(mm); } diff --git a/mm/vmscan.c b/mm/vmscan.c index 665d622ee4ff..1fa8a9d117c9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -49,6 +49,7 @@ #include <linux/prefetch.h> #include <linux/printk.h> #include <linux/dax.h> +#include <linux/simple_lmk.h> #include <asm/tlbflush.h> #include <asm/div64.h> @@ -3655,6 +3656,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) unsigned long nr_reclaimed = sc.nr_reclaimed; bool raise_priority = true; + simple_lmk_decide_reclaim(sc.priority); sc.reclaim_idx = classzone_idx; /* @@ -3787,6 +3789,7 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_o * succeed. */ if (prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) { + simple_lmk_stop_reclaim(); /* * Compaction records what page blocks it recently failed to * isolate pages from and skips them in the future scanning. @@ -3823,6 +3826,7 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_o */ if (!remaining && prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) { + simple_lmk_stop_reclaim(); trace_mm_vmscan_kswapd_sleep(pgdat->node_id); /* From e9c3c9af12674849fc9cda334a442f6145d27b72 Mon Sep 17 00:00:00 2001 From: Mark Brown <broonie@kernel.org> Date: Mon, 19 Aug 2019 21:06:50 +0100 Subject: [PATCH 015/452] merge_config.sh: Check error codes from make When we execute make after merging the configurations we ignore any errors it produces causing whatever is running merge_config.sh to be unaware of any failures. This issue was noticed by Guillaume Tucker while looking at problems with testing of clang only builds in KernelCI which caused Kbuild to be unable to find a working host compiler. This implementation was suggested by Yamada-san. Suggested-by: Masahiro Yamada <yamada.masahiro@socionext.com> Reported-by: Guillaume Tucker <guillaume.tucker@collabora.com> Signed-off-by: Mark Brown <broonie@kernel.org> Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> --- scripts/kconfig/merge_config.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh index 9b89791b202c..784db50a894e 100755 --- a/scripts/kconfig/merge_config.sh +++ b/scripts/kconfig/merge_config.sh @@ -20,12 +20,12 @@ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. +set -e + clean_up() { rm -f $TMP_FILE rm -f $MERGE_FILE - exit } -trap clean_up HUP INT TERM usage() { echo "Usage: $0 [OPTIONS] [CONFIG [...]]" @@ -117,6 +117,9 @@ TMP_FILE=$(mktemp ./.tmp.config.XXXXXXXXXX) MERGE_FILE=$(mktemp ./.merge_tmp.config.XXXXXXXXXX) echo "Using $INITFILE as base" + +trap clean_up EXIT + cat $INITFILE > $TMP_FILE # Merge files, printing warnings on overridden values @@ -162,7 +165,6 @@ if [ "$RUNMAKE" = "false" ]; then echo "#" echo "# merged configuration written to $KCONFIG_CONFIG (needs make)" echo "#" - clean_up exit fi @@ -192,5 +194,3 @@ for CFG in $(sed -n -e "$SED_CONFIG_EXP1" -e "$SED_CONFIG_EXP2" $TMP_FILE); do echo "" fi done - -clean_up From 098f643ee8a54d9fa9450e091ecea8504b235227 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Sat, 20 Jul 2019 09:54:58 -0700 Subject: [PATCH 016/452] simple_lmk: Fix pages_found calculation Previously, pages_found would be calculated using an uninitialized variable. Fix it. Reported-by: Julian Liu <wlootlxt123@gmail.com> Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/simple_lmk.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 29637ecc5be5..e7e91b7b4c7b 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -100,7 +100,6 @@ static unsigned long find_victims(struct victim_info *varr, int *vindex, for_each_process(tsk) { struct task_struct *vtsk; - unsigned long tasksize; /* * Search for tasks with the targeted importance (adj). Since @@ -125,7 +124,7 @@ static unsigned long find_victims(struct victim_info *varr, int *vindex, varr[*vindex].size = get_mm_rss(vtsk->mm); /* Keep track of the number of pages that have been found */ - pages_found += tasksize; + pages_found += varr[*vindex].size; /* Make sure there's space left in the victim array */ if (++*vindex == vmaxlen) From d1450383bdf3489f60e4fc523b8ea4a35ae197ed Mon Sep 17 00:00:00 2001 From: Guillaume Tucker <guillaume.tucker@collabora.com> Date: Mon, 2 Sep 2019 16:18:36 +0100 Subject: [PATCH 017/452] merge_config.sh: ignore unwanted grep errors The merge_config.sh script verifies that all the config options have their expected value in the resulting file and prints any issues as warnings. These checks aren't intended to be treated as errors given the current implementation. However, since "set -e" was added, if the grep command to look for a config option does not find it the script will then abort prematurely. Handle the case where the grep exit status is non-zero by setting ACTUAL_VAL to an empty string to restore previous functionality. Fixes: cdfca821571d ("merge_config.sh: Check error codes from make") Signed-off-by: Guillaume Tucker <guillaume.tucker@collabora.com> Acked-by: Jon Hunter <jonathanh@nvidia.com> Tested-by: Jon Hunter <jonathanh@nvidia.com> Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> --- scripts/kconfig/merge_config.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh index 784db50a894e..20a776cb4dfe 100755 --- a/scripts/kconfig/merge_config.sh +++ b/scripts/kconfig/merge_config.sh @@ -186,7 +186,7 @@ make KCONFIG_ALLCONFIG=$TMP_FILE $OUTPUT_ARG $ALLTARGET for CFG in $(sed -n -e "$SED_CONFIG_EXP1" -e "$SED_CONFIG_EXP2" $TMP_FILE); do REQUESTED_VAL=$(grep -w -e "$CFG" $TMP_FILE) - ACTUAL_VAL=$(grep -w -e "$CFG" "$KCONFIG_CONFIG") + ACTUAL_VAL=$(grep -w -e "$CFG" "$KCONFIG_CONFIG" || true) if [ "x$REQUESTED_VAL" != "x$ACTUAL_VAL" ] ; then echo "Value requested for $CFG not in final .config" echo "Requested value: $REQUESTED_VAL" From a35191e841c9835cbf7c2612e0716735ea4cfe96 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Wed, 21 Aug 2019 08:30:55 -0700 Subject: [PATCH 018/452] simple_lmk: Remove kthread_should_stop() exit condition Simple LMK's reclaim thread should never stop; there's no need to have this check. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/simple_lmk.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index e7e91b7b4c7b..845679a4cbed 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -250,13 +250,7 @@ static int simple_lmk_reclaim_thread(void *data) sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_max_rt_prio); while (1) { - bool should_stop; - - wait_event(oom_waitq, (should_stop = kthread_should_stop()) || - READ_ONCE(needs_reclaim)); - - if (should_stop) - break; + wait_event(oom_waitq, READ_ONCE(needs_reclaim)); /* * Kill a batch of processes and wait for their memory to be From fc376f8839682aae9c2130bdafc6831b203eb7b6 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Wed, 21 Aug 2019 08:37:04 -0700 Subject: [PATCH 019/452] simple_lmk: Use proper atomic_* operations where needed cmpxchg() is only atomic with respect to the local CPU, so it cannot be relied on with how it's used in Simple LMK. Switch to fully atomic operations instead for full atomic guarantees. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/simple_lmk.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 845679a4cbed..d1bc94027e5d 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -68,7 +68,7 @@ static struct victim_info victims[MAX_VICTIMS]; static DECLARE_WAIT_QUEUE_HEAD(oom_waitq); static DECLARE_COMPLETION(reclaim_done); static int victims_to_kill; -static bool needs_reclaim; +static atomic_t needs_reclaim = ATOMIC_INIT(0); static int victim_size_cmp(const void *lhs_ptr, const void *rhs_ptr) { @@ -250,7 +250,7 @@ static int simple_lmk_reclaim_thread(void *data) sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_max_rt_prio); while (1) { - wait_event(oom_waitq, READ_ONCE(needs_reclaim)); + wait_event(oom_waitq, atomic_read(&needs_reclaim)); /* * Kill a batch of processes and wait for their memory to be @@ -262,7 +262,7 @@ static int simple_lmk_reclaim_thread(void *data) do { scan_and_kill(MIN_FREE_PAGES); msleep(20); - } while (READ_ONCE(needs_reclaim)); + } while (atomic_read(&needs_reclaim)); } return 0; @@ -273,13 +273,13 @@ void simple_lmk_decide_reclaim(int kswapd_priority) if (kswapd_priority != CONFIG_ANDROID_SIMPLE_LMK_AGGRESSION) return; - if (!cmpxchg(&needs_reclaim, false, true)) + if (!atomic_cmpxchg(&needs_reclaim, 0, 1)) wake_up(&oom_waitq); } void simple_lmk_stop_reclaim(void) { - WRITE_ONCE(needs_reclaim, false); + atomic_set(&needs_reclaim, 0); } void simple_lmk_mm_freed(struct mm_struct *mm) @@ -303,10 +303,10 @@ void simple_lmk_mm_freed(struct mm_struct *mm) /* Initialize Simple LMK when lmkd in Android writes to the minfree parameter */ static int simple_lmk_init_set(const char *val, const struct kernel_param *kp) { - static bool init_done; + static atomic_t init_done = ATOMIC_INIT(0); struct task_struct *thread; - if (cmpxchg(&init_done, false, true)) + if (atomic_cmpxchg(&init_done, 0, 1)) return 0; thread = kthread_run(simple_lmk_reclaim_thread, NULL, "simple_lmkd"); From bb79bf1e616f3520d3f277bd205ee06e33010ec9 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Mon, 4 Nov 2019 10:56:15 -0800 Subject: [PATCH 020/452] simple_lmk: Fix broken multicopy atomicity for victims_to_kill When the reclaim thread writes to victims_to_kill on one CPU, it expects the updated value to be immediately reflected on all CPUs in order for simple_lmk_mm_freed() to work correctly. Due to the lack of memory barriers to guarantee multicopy atomicity, simple_lmk_mm_freed() can be given a victim's mm without knowing the correct victims_to_kill value, which can cause the reclaim thread to remain stuck waiting forever for all victims to be freed. This scenario, despite being rare, has been observed. Fix this by using proper atomic helpers with memory barriers. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/simple_lmk.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index d1bc94027e5d..079281fdcb57 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -67,7 +67,7 @@ static const short adj_prio[] = { static struct victim_info victims[MAX_VICTIMS]; static DECLARE_WAIT_QUEUE_HEAD(oom_waitq); static DECLARE_COMPLETION(reclaim_done); -static int victims_to_kill; +static atomic_t victims_to_kill = ATOMIC_INIT(0); static atomic_t needs_reclaim = ATOMIC_INIT(0); static int victim_size_cmp(const void *lhs_ptr, const void *rhs_ptr) @@ -206,7 +206,7 @@ static void scan_and_kill(unsigned long pages_needed) nr_to_kill = process_victims(victims, nr_to_kill, pages_needed); /* Kill the victims */ - WRITE_ONCE(victims_to_kill, nr_to_kill); + atomic_set_release(&victims_to_kill, nr_to_kill); for (i = 0; i < nr_to_kill; i++) { struct victim_info *victim = &victims[i]; struct task_struct *vtsk = victim->tsk; @@ -287,11 +287,11 @@ void simple_lmk_mm_freed(struct mm_struct *mm) static atomic_t nr_killed = ATOMIC_INIT(0); int i, nr_to_kill; - nr_to_kill = READ_ONCE(victims_to_kill); + nr_to_kill = atomic_read_acquire(&victims_to_kill); for (i = 0; i < nr_to_kill; i++) { if (cmpxchg(&victims[i].mm, mm, NULL) == mm) { if (atomic_inc_return(&nr_killed) == nr_to_kill) { - WRITE_ONCE(victims_to_kill, 0); + atomic_set(&victims_to_kill, 0); nr_killed = (atomic_t)ATOMIC_INIT(0); complete(&reclaim_done); } From 8b5e8dad7c0c00c3b1593dcd89b5e79ed0970025 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Mon, 4 Nov 2019 11:06:13 -0800 Subject: [PATCH 021/452] simple_lmk: Make reclaim deterministic The 20 ms delay in the reclaim thread is a hacky fudge factor that can cause Simple LMK to behave wildly differently depending on the circumstances of when it is invoked. When kswapd doesn't get enough CPU time to finish up and go back to sleep within 20 ms, Simple LMK performs superfluous reclaims. This is suboptimal, so make Simple LMK more deterministic by eliminating the delay and instead queuing up reclaim requests from kswapd. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/simple_lmk.c | 38 +++++++++++++----------------------- include/linux/simple_lmk.h | 4 ---- mm/vmscan.c | 2 -- 3 files changed, 14 insertions(+), 30 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 079281fdcb57..bce8cf651b81 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -5,7 +5,6 @@ #define pr_fmt(fmt) "simple_lmk: " fmt -#include <linux/delay.h> #include <linux/kthread.h> #include <linux/mm.h> #include <linux/moduleparam.h> @@ -250,19 +249,8 @@ static int simple_lmk_reclaim_thread(void *data) sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_max_rt_prio); while (1) { - wait_event(oom_waitq, atomic_read(&needs_reclaim)); - - /* - * Kill a batch of processes and wait for their memory to be - * freed. After their memory is freed, sleep for 20 ms to give - * OOM'd allocations a chance to scavenge for the newly-freed - * pages. Rinse and repeat while there are still OOM'd - * allocations. - */ - do { - scan_and_kill(MIN_FREE_PAGES); - msleep(20); - } while (atomic_read(&needs_reclaim)); + wait_event(oom_waitq, atomic_add_unless(&needs_reclaim, -1, 0)); + scan_and_kill(MIN_FREE_PAGES); } return 0; @@ -270,16 +258,18 @@ static int simple_lmk_reclaim_thread(void *data) void simple_lmk_decide_reclaim(int kswapd_priority) { - if (kswapd_priority != CONFIG_ANDROID_SIMPLE_LMK_AGGRESSION) - return; - - if (!atomic_cmpxchg(&needs_reclaim, 0, 1)) - wake_up(&oom_waitq); -} - -void simple_lmk_stop_reclaim(void) -{ - atomic_set(&needs_reclaim, 0); + if (kswapd_priority == CONFIG_ANDROID_SIMPLE_LMK_AGGRESSION) { + int v, v1; + + for (v = 0;; v = v1) { + v1 = atomic_cmpxchg(&needs_reclaim, v, v + 1); + if (likely(v1 == v)) { + if (!v) + wake_up(&oom_waitq); + break; + } + } + } } void simple_lmk_mm_freed(struct mm_struct *mm) diff --git a/include/linux/simple_lmk.h b/include/linux/simple_lmk.h index b0c247f2f2a5..46cdb389be51 100644 --- a/include/linux/simple_lmk.h +++ b/include/linux/simple_lmk.h @@ -9,15 +9,11 @@ struct mm_struct; #ifdef CONFIG_ANDROID_SIMPLE_LMK void simple_lmk_decide_reclaim(int kswapd_priority); -void simple_lmk_stop_reclaim(void); void simple_lmk_mm_freed(struct mm_struct *mm); #else static inline void simple_lmk_decide_reclaim(int kswapd_priority) { } -static inline void simple_lmk_stop_reclaim(void) -{ -} static inline void simple_lmk_mm_freed(struct mm_struct *mm) { } diff --git a/mm/vmscan.c b/mm/vmscan.c index 1fa8a9d117c9..f8e0ea574421 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3789,7 +3789,6 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_o * succeed. */ if (prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) { - simple_lmk_stop_reclaim(); /* * Compaction records what page blocks it recently failed to * isolate pages from and skips them in the future scanning. @@ -3826,7 +3825,6 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_o */ if (!remaining && prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) { - simple_lmk_stop_reclaim(); trace_mm_vmscan_kswapd_sleep(pgdat->node_id); /* From 50af602760680bd414c35ca2756b839a834da4e4 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Mon, 4 Nov 2019 11:27:29 -0800 Subject: [PATCH 022/452] simple_lmk: Clean up some code style nitpicks Using a parameter to pass around a unmodified pointer to a global variable is crufty; just use the `victims` variable directly instead. Also, compress the code in simple_lmk_init_set() a bit to make it look cleaner. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/simple_lmk.c | 45 ++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 25 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index bce8cf651b81..44f7319defc0 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -77,21 +77,19 @@ static int victim_size_cmp(const void *lhs_ptr, const void *rhs_ptr) return rhs->size - lhs->size; } -static bool vtsk_is_duplicate(struct victim_info *varr, int vlen, - struct task_struct *vtsk) +static bool vtsk_is_duplicate(int vlen, struct task_struct *vtsk) { int i; for (i = 0; i < vlen; i++) { - if (same_thread_group(varr[i].tsk, vtsk)) + if (same_thread_group(victims[i].tsk, vtsk)) return true; } return false; } -static unsigned long find_victims(struct victim_info *varr, int *vindex, - int vmaxlen, short target_adj) +static unsigned long find_victims(int *vindex, short target_adj) { unsigned long pages_found = 0; int old_vindex = *vindex; @@ -110,7 +108,7 @@ static unsigned long find_victims(struct victim_info *varr, int *vindex, * trying to lock a task that we locked earlier. */ if (READ_ONCE(tsk->signal->oom_score_adj) != target_adj || - vtsk_is_duplicate(varr, *vindex, tsk)) + vtsk_is_duplicate(*vindex, tsk)) continue; vtsk = find_lock_task_mm(tsk); @@ -118,15 +116,15 @@ static unsigned long find_victims(struct victim_info *varr, int *vindex, continue; /* Store this potential victim away for later */ - varr[*vindex].tsk = vtsk; - varr[*vindex].mm = vtsk->mm; - varr[*vindex].size = get_mm_rss(vtsk->mm); + victims[*vindex].tsk = vtsk; + victims[*vindex].mm = vtsk->mm; + victims[*vindex].size = get_mm_rss(vtsk->mm); /* Keep track of the number of pages that have been found */ - pages_found += varr[*vindex].size; + pages_found += victims[*vindex].size; /* Make sure there's space left in the victim array */ - if (++*vindex == vmaxlen) + if (++*vindex == MAX_VICTIMS) break; } @@ -135,14 +133,13 @@ static unsigned long find_victims(struct victim_info *varr, int *vindex, * the larger ones first. */ if (pages_found) - sort(&varr[old_vindex], *vindex - old_vindex, sizeof(*varr), - victim_size_cmp, NULL); + sort(&victims[old_vindex], *vindex - old_vindex, + sizeof(*victims), victim_size_cmp, NULL); return pages_found; } -static int process_victims(struct victim_info *varr, int vlen, - unsigned long pages_needed) +static int process_victims(int vlen, unsigned long pages_needed) { unsigned long pages_found = 0; int i, nr_to_kill = 0; @@ -180,8 +177,7 @@ static void scan_and_kill(unsigned long pages_needed) */ read_lock(&tasklist_lock); for (i = 0; i < ARRAY_SIZE(adj_prio); i++) { - pages_found += find_victims(victims, &nr_victims, MAX_VICTIMS, - adj_prio[i]); + pages_found += find_victims(&nr_victims, adj_prio[i]); if (pages_found >= pages_needed || nr_victims == MAX_VICTIMS) break; } @@ -192,7 +188,7 @@ static void scan_and_kill(unsigned long pages_needed) return; /* First round of victim processing to weed out unneeded victims */ - nr_to_kill = process_victims(victims, nr_victims, pages_needed); + nr_to_kill = process_victims(nr_victims, pages_needed); /* * Try to kill as few of the chosen victims as possible by sorting the @@ -202,7 +198,7 @@ static void scan_and_kill(unsigned long pages_needed) sort(victims, nr_to_kill, sizeof(*victims), victim_size_cmp, NULL); /* Second round of victim processing to finally select the victims */ - nr_to_kill = process_victims(victims, nr_to_kill, pages_needed); + nr_to_kill = process_victims(nr_to_kill, pages_needed); /* Kill the victims */ atomic_set_release(&victims_to_kill, nr_to_kill); @@ -296,12 +292,11 @@ static int simple_lmk_init_set(const char *val, const struct kernel_param *kp) static atomic_t init_done = ATOMIC_INIT(0); struct task_struct *thread; - if (atomic_cmpxchg(&init_done, 0, 1)) - return 0; - - thread = kthread_run(simple_lmk_reclaim_thread, NULL, "simple_lmkd"); - BUG_ON(IS_ERR(thread)); - + if (!atomic_cmpxchg(&init_done, 0, 1)) { + thread = kthread_run(simple_lmk_reclaim_thread, NULL, + "simple_lmkd"); + BUG_ON(IS_ERR(thread)); + } return 0; } From 99706dd578d87a7e5f0cdd88ddc6d749cd78d589 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Wed, 6 Nov 2019 10:02:57 -0800 Subject: [PATCH 023/452] simple_lmk: Increase default minfree value After commit "simple_lmk: Make reclaim deterministic", Simple LMK's behavior changed and thus requires some slight re-tuning to make it work well again. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig index b572f685374f..f126cf569529 100644 --- a/drivers/android/Kconfig +++ b/drivers/android/Kconfig @@ -80,7 +80,7 @@ config ANDROID_SIMPLE_LMK_AGGRESSION config ANDROID_SIMPLE_LMK_MINFREE int "Minimum MiB of memory to free per reclaim" range 8 512 - default 100 + default 128 help Simple LMK will try to free at least this much memory per reclaim. From c818642bce7690db496aca0fb05b30291762cf9c Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Mon, 20 Jan 2020 16:03:44 -0800 Subject: [PATCH 024/452] simple_lmk: Don't queue up new reclaim requests during reclaim Queuing up reclaim requests while a reclaim is in progress doesn't make sense, since the additional reclaims may not be needed after the existing reclaim completes. This would cause Simple LMK to go berserk during periods of high memory pressure where kswapd would fire off reclaim requests nonstop. Make Simple LMK ignore new reclaim requests until an existing reclaim is finished to prevent a slaughter-fest. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/simple_lmk.c | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 44f7319defc0..28b808a40a5a 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -245,8 +245,9 @@ static int simple_lmk_reclaim_thread(void *data) sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_max_rt_prio); while (1) { - wait_event(oom_waitq, atomic_add_unless(&needs_reclaim, -1, 0)); + wait_event(oom_waitq, atomic_read_acquire(&needs_reclaim)); scan_and_kill(MIN_FREE_PAGES); + atomic_set_release(&needs_reclaim, 0); } return 0; @@ -254,18 +255,9 @@ static int simple_lmk_reclaim_thread(void *data) void simple_lmk_decide_reclaim(int kswapd_priority) { - if (kswapd_priority == CONFIG_ANDROID_SIMPLE_LMK_AGGRESSION) { - int v, v1; - - for (v = 0;; v = v1) { - v1 = atomic_cmpxchg(&needs_reclaim, v, v + 1); - if (likely(v1 == v)) { - if (!v) - wake_up(&oom_waitq); - break; - } - } - } + if (kswapd_priority == CONFIG_ANDROID_SIMPLE_LMK_AGGRESSION && + !atomic_cmpxchg(&needs_reclaim, 0, 1)) + wake_up(&oom_waitq); } void simple_lmk_mm_freed(struct mm_struct *mm) From e8f0319dd7af11b04f3f04c46edd447f549b0673 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Thu, 6 Feb 2020 20:57:53 -0800 Subject: [PATCH 025/452] simple_lmk: Update copyright to 2020 Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/simple_lmk.c | 2 +- include/linux/simple_lmk.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 28b808a40a5a..2884030276ea 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (C) 2019 Sultan Alsawaf <sultan@kerneltoast.com>. + * Copyright (C) 2019-2020 Sultan Alsawaf <sultan@kerneltoast.com>. */ #define pr_fmt(fmt) "simple_lmk: " fmt diff --git a/include/linux/simple_lmk.h b/include/linux/simple_lmk.h index 46cdb389be51..28103c1b1d4c 100644 --- a/include/linux/simple_lmk.h +++ b/include/linux/simple_lmk.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (C) 2019 Sultan Alsawaf <sultan@kerneltoast.com>. + * Copyright (C) 2019-2020 Sultan Alsawaf <sultan@kerneltoast.com>. */ #ifndef _SIMPLE_LMK_H_ #define _SIMPLE_LMK_H_ From d6e7f890ecfd65b53b8b221e5e31ce301bf628d1 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Thu, 6 Feb 2020 20:59:22 -0800 Subject: [PATCH 026/452] simple_lmk: Remove compat cruft not specific to 4.14 Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/simple_lmk.c | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 2884030276ea..3816f8bf3946 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -10,26 +10,7 @@ #include <linux/moduleparam.h> #include <linux/oom.h> #include <linux/sort.h> -#include <linux/version.h> - -/* The sched_param struct is located elsewhere in newer kernels */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) #include <uapi/linux/sched/types.h> -#endif - -/* SEND_SIG_FORCED isn't present in newer kernels */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) -#define SIG_INFO_TYPE SEND_SIG_FORCED -#else -#define SIG_INFO_TYPE SEND_SIG_PRIV -#endif - -/* The group argument to do_send_sig_info is different in newer kernels */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0) -#define KILL_GROUP_TYPE true -#else -#define KILL_GROUP_TYPE PIDTYPE_TGID -#endif /* The minimum number of pages to free per reclaim */ #define MIN_FREE_PAGES (CONFIG_ANDROID_SIMPLE_LMK_MINFREE * SZ_1M / PAGE_SIZE) @@ -211,7 +192,7 @@ static void scan_and_kill(unsigned long pages_needed) victim->size << (PAGE_SHIFT - 10)); /* Accelerate the victim's death by forcing the kill signal */ - do_send_sig_info(SIGKILL, SIG_INFO_TYPE, vtsk, KILL_GROUP_TYPE); + do_send_sig_info(SIGKILL, SEND_SIG_FORCED, vtsk, true); /* Grab a reference to the victim for later before unlocking */ get_task_struct(vtsk); From 295206fa250ab7a9507a1ac9483c0f2c464c8b5d Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Thu, 6 Feb 2020 21:03:24 -0800 Subject: [PATCH 027/452] simple_lmk: Print a message when there are no processes to kill Makes it clear that Simple LMK tried its best but there was nothing it could do. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/simple_lmk.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 3816f8bf3946..76f40e99b80d 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -165,8 +165,10 @@ static void scan_and_kill(unsigned long pages_needed) read_unlock(&tasklist_lock); /* Pretty unlikely but it can happen */ - if (unlikely(!nr_victims)) + if (unlikely(!nr_victims)) { + pr_err("No processes available to kill!\n"); return; + } /* First round of victim processing to weed out unneeded victims */ nr_to_kill = process_victims(nr_victims, pages_needed); From 576d9aeab2a68be95c961c902dc293c63972bbe2 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Fri, 7 Feb 2020 23:36:58 -0800 Subject: [PATCH 028/452] simple_lmk: Disable OOM killer when Simple LMK is enabled The OOM killer only serves to be a liability when Simple LMK is used. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- mm/oom_kill.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/oom_kill.c b/mm/oom_kill.c index cb2c8f527e67..05375bc8135e 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -1061,7 +1061,7 @@ bool out_of_memory(struct oom_control *oc) unsigned long freed = 0; enum oom_constraint constraint = CONSTRAINT_NONE; - if (oom_killer_disabled) + if (oom_killer_disabled || IS_ENABLED(CONFIG_ANDROID_SIMPLE_LMK)) return false; if (!is_memcg_oom(oc)) { From 9e28ad3b2a62fa09f03cb8e803d63c14d7b77904 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Sat, 8 Feb 2020 17:03:35 -0800 Subject: [PATCH 029/452] simple_lmk: Mark victim thread group with TIF_MEMDIE The OOM killer sets the TIF_MEMDIE thread flag for its victims to alert other kernel code that the current process was killed due to memory pressure, and needs to finish whatever it's doing quickly. In the page allocator this allows victim processes to quickly allocate memory using emergency reserves. This is especially important when memory pressure is high; if all processes are taking a while to allocate memory, then our victim processes will face the same problem and can potentially get stuck in the page allocator for a while rather than die expeditiously. To ensure that victim processes die quickly, set TIF_MEMDIE for the entire victim thread group. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/simple_lmk.c | 8 +++++++- kernel/exit.c | 4 ++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 76f40e99b80d..77172da82701 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -187,7 +187,7 @@ static void scan_and_kill(unsigned long pages_needed) atomic_set_release(&victims_to_kill, nr_to_kill); for (i = 0; i < nr_to_kill; i++) { struct victim_info *victim = &victims[i]; - struct task_struct *vtsk = victim->tsk; + struct task_struct *t, *vtsk = victim->tsk; pr_info("Killing %s with adj %d to free %lu KiB\n", vtsk->comm, vtsk->signal->oom_score_adj, @@ -196,6 +196,12 @@ static void scan_and_kill(unsigned long pages_needed) /* Accelerate the victim's death by forcing the kill signal */ do_send_sig_info(SIGKILL, SEND_SIG_FORCED, vtsk, true); + /* Mark the thread group dead so that other kernel code knows */ + rcu_read_lock(); + for_each_thread(vtsk, t) + set_tsk_thread_flag(t, TIF_MEMDIE); + rcu_read_unlock(); + /* Grab a reference to the victim for later before unlocking */ get_task_struct(vtsk); task_unlock(vtsk); diff --git a/kernel/exit.c b/kernel/exit.c index 8ebc7dfc244a..bc46817e96c0 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -549,8 +549,12 @@ static void exit_mm(void) task_unlock(current); mm_update_next_owner(mm); mmput(mm); +#ifdef CONFIG_ANDROID_SIMPLE_LMK + clear_thread_flag(TIF_MEMDIE); +#else if (test_thread_flag(TIF_MEMDIE)) exit_oom_victim(); +#endif } static struct task_struct *find_alive_thread(struct task_struct *p) From d005a3da40ad5a06668196bdb8a4fd5cf719d822 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Sun, 9 Feb 2020 16:24:29 -0800 Subject: [PATCH 030/452] simple_lmk: Report mm as freed as soon as exit_mmap() finishes exit_mmap() is responsible for freeing the vast majority of an mm's memory; in order to unblock Simple LMK faster, report an mm as freed as soon as exit_mmap() finishes. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- kernel/fork.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/fork.c b/kernel/fork.c index 11a1af672858..0e06ac6c9c39 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -933,6 +933,7 @@ static inline void __mmput(struct mm_struct *mm) ksm_exit(mm); khugepaged_exit(mm); /* must run before exit_mmap */ exit_mmap(mm); + simple_lmk_mm_freed(mm); mm_put_huge_zero_page(mm); set_mm_exe_file(mm, NULL); if (!list_empty(&mm->mmlist)) { @@ -942,7 +943,6 @@ static inline void __mmput(struct mm_struct *mm) } if (mm->binfmt) module_put(mm->binfmt->module); - simple_lmk_mm_freed(mm); mmdrop(mm); } From 5b6a233f64eef3a6101573d8e6f9303330b1e6b9 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Sat, 8 Feb 2020 00:00:48 -0800 Subject: [PATCH 031/452] simple_lmk: Simplify tricks used to speed up the death process set_user_nice() doesn't schedule, and although set_cpus_allowed_ptr() can schedule, it will only do so when the specified task cannot run on the new set of allowed CPUs. Since cpu_all_mask is used, set_cpus_allowed_ptr() will never schedule. Therefore, both the priority elevation and cpus_allowed change can be moved to inside the task lock to simplify and speed things up. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/simple_lmk.c | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 77172da82701..224299997dd4 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -202,23 +202,14 @@ static void scan_and_kill(unsigned long pages_needed) set_tsk_thread_flag(t, TIF_MEMDIE); rcu_read_unlock(); - /* Grab a reference to the victim for later before unlocking */ - get_task_struct(vtsk); - task_unlock(vtsk); - } - - /* Try to speed up the death process now that we can schedule again */ - for (i = 0; i < nr_to_kill; i++) { - struct task_struct *vtsk = victims[i].tsk; - /* Increase the victim's priority to make it die faster */ set_user_nice(vtsk, MIN_NICE); - /* Allow the victim to run on any CPU */ + /* Allow the victim to run on any CPU. This won't schedule. */ set_cpus_allowed_ptr(vtsk, cpu_all_mask); - /* Finally release the victim reference acquired earlier */ - put_task_struct(vtsk); + /* Finally release the victim's task lock acquired earlier */ + task_unlock(vtsk); } /* Wait until all the victims die */ From 623034b03135a4bb2cf7f0db7576fd69cae5a967 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Sat, 8 Feb 2020 03:21:01 -0800 Subject: [PATCH 032/452] simple_lmk: Ignore tasks that won't free memory Dying processes aren't going to help free memory, so ignore them. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/simple_lmk.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 224299997dd4..77695a763a05 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -77,18 +77,22 @@ static unsigned long find_victims(int *vindex, short target_adj) struct task_struct *tsk; for_each_process(tsk) { + struct signal_struct *sig; struct task_struct *vtsk; /* - * Search for tasks with the targeted importance (adj). Since - * only tasks with a positive adj can be targeted, that + * Search for suitable tasks with the targeted importance (adj). + * Since only tasks with a positive adj can be targeted, that * naturally excludes tasks which shouldn't be killed, like init * and kthreads. Although oom_score_adj can still be changed * while this code runs, it doesn't really matter. We just need * to make sure that if the adj changes, we won't deadlock * trying to lock a task that we locked earlier. */ - if (READ_ONCE(tsk->signal->oom_score_adj) != target_adj || + sig = tsk->signal; + if (READ_ONCE(sig->oom_score_adj) != target_adj || + sig->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP) || + (thread_group_empty(tsk) && tsk->flags & PF_EXITING) || vtsk_is_duplicate(*vindex, tsk)) continue; From 463c2f47758218973943c5be39095364797ff310 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Sat, 8 Feb 2020 03:22:44 -0800 Subject: [PATCH 033/452] simple_lmk: Add a timeout to stop waiting for victims to die Simple LMK tries to wait until all of the victims it kills have their memory freed; however, sometimes victims can take a while to die, which can block Simple LMK from killing more processes in time when needed. After the specified timeout elapses, Simple LMK will stop waiting and make itself available to kill more processes. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/Kconfig | 11 ++++++++++ drivers/android/simple_lmk.c | 42 +++++++++++++++++++++++++----------- 2 files changed, 40 insertions(+), 13 deletions(-) diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig index f126cf569529..7f65391e5e73 100644 --- a/drivers/android/Kconfig +++ b/drivers/android/Kconfig @@ -84,6 +84,17 @@ config ANDROID_SIMPLE_LMK_MINFREE help Simple LMK will try to free at least this much memory per reclaim. +config ANDROID_SIMPLE_LMK_TIMEOUT_MSEC + int "Reclaim timeout in milliseconds" + range 50 1000 + default 200 + help + Simple LMK tries to wait until all of the victims it kills have their + memory freed; however, sometimes victims can take a while to die, + which can block Simple LMK from killing more processes in time when + needed. After the specified timeout elapses, Simple LMK will stop + waiting and make itself available to kill more processes. + endif endif # if ANDROID diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 77695a763a05..21895409ff8f 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -18,6 +18,9 @@ /* Kill up to this many victims per reclaim */ #define MAX_VICTIMS 1024 +/* Timeout in jiffies for each reclaim */ +#define RECLAIM_EXPIRES msecs_to_jiffies(CONFIG_ANDROID_SIMPLE_LMK_TIMEOUT_MSEC) + struct victim_info { struct task_struct *tsk; struct mm_struct *mm; @@ -47,8 +50,10 @@ static const short adj_prio[] = { static struct victim_info victims[MAX_VICTIMS]; static DECLARE_WAIT_QUEUE_HEAD(oom_waitq); static DECLARE_COMPLETION(reclaim_done); -static atomic_t victims_to_kill = ATOMIC_INIT(0); +static DEFINE_RWLOCK(mm_free_lock); +static int victims_to_kill; static atomic_t needs_reclaim = ATOMIC_INIT(0); +static atomic_t nr_killed = ATOMIC_INIT(0); static int victim_size_cmp(const void *lhs_ptr, const void *rhs_ptr) { @@ -152,7 +157,7 @@ static int process_victims(int vlen, unsigned long pages_needed) static void scan_and_kill(unsigned long pages_needed) { - int i, nr_to_kill = 0, nr_victims = 0; + int i, nr_to_kill = 0, nr_victims = 0, ret; unsigned long pages_found = 0; /* @@ -187,8 +192,12 @@ static void scan_and_kill(unsigned long pages_needed) /* Second round of victim processing to finally select the victims */ nr_to_kill = process_victims(nr_to_kill, pages_needed); + /* Store the final number of victims for simple_lmk_mm_freed() */ + write_lock(&mm_free_lock); + victims_to_kill = nr_to_kill; + write_unlock(&mm_free_lock); + /* Kill the victims */ - atomic_set_release(&victims_to_kill, nr_to_kill); for (i = 0; i < nr_to_kill; i++) { struct victim_info *victim = &victims[i]; struct task_struct *t, *vtsk = victim->tsk; @@ -216,8 +225,18 @@ static void scan_and_kill(unsigned long pages_needed) task_unlock(vtsk); } - /* Wait until all the victims die */ - wait_for_completion(&reclaim_done); + /* Wait until all the victims die or until the timeout is reached */ + ret = wait_for_completion_timeout(&reclaim_done, RECLAIM_EXPIRES); + write_lock(&mm_free_lock); + if (!ret) { + /* Extra clean-up is needed when the timeout is hit */ + reinit_completion(&reclaim_done); + for (i = 0; i < nr_to_kill; i++) + victims[i].mm = NULL; + } + victims_to_kill = 0; + nr_killed = (atomic_t)ATOMIC_INIT(0); + write_unlock(&mm_free_lock); } static int simple_lmk_reclaim_thread(void *data) @@ -246,20 +265,17 @@ void simple_lmk_decide_reclaim(int kswapd_priority) void simple_lmk_mm_freed(struct mm_struct *mm) { - static atomic_t nr_killed = ATOMIC_INIT(0); - int i, nr_to_kill; + int i; - nr_to_kill = atomic_read_acquire(&victims_to_kill); - for (i = 0; i < nr_to_kill; i++) { + read_lock(&mm_free_lock); + for (i = 0; i < victims_to_kill; i++) { if (cmpxchg(&victims[i].mm, mm, NULL) == mm) { - if (atomic_inc_return(&nr_killed) == nr_to_kill) { - atomic_set(&victims_to_kill, 0); - nr_killed = (atomic_t)ATOMIC_INIT(0); + if (atomic_inc_return(&nr_killed) == victims_to_kill) complete(&reclaim_done); - } break; } } + read_unlock(&mm_free_lock); } /* Initialize Simple LMK when lmkd in Android writes to the minfree parameter */ From cb4a047e72d70268a96fb968f76e56a3a46ad8aa Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Sat, 8 Feb 2020 14:26:31 -0800 Subject: [PATCH 034/452] simple_lmk: Place victims onto SCHED_RR Just increasing the victim's priority to the maximum niceness isn't enough to make it totally preempt everything in SCHED_FAIR, which is important to make sure victims die quickly. Resource-wise, this isn't very burdensome since the RT priority is just set to zero, and because dying victims don't have much to do: they only need to finish whatever they're doing quickly. SCHED_RR is used over SCHED_FIFO so that CPU time between the victims is divided evenly to help them all finish at around the same time, as fast as possible. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/simple_lmk.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 21895409ff8f..ce2d1872c2d3 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -199,6 +199,7 @@ static void scan_and_kill(unsigned long pages_needed) /* Kill the victims */ for (i = 0; i < nr_to_kill; i++) { + static const struct sched_param sched_zero_prio; struct victim_info *victim = &victims[i]; struct task_struct *t, *vtsk = victim->tsk; @@ -215,8 +216,8 @@ static void scan_and_kill(unsigned long pages_needed) set_tsk_thread_flag(t, TIF_MEMDIE); rcu_read_unlock(); - /* Increase the victim's priority to make it die faster */ - set_user_nice(vtsk, MIN_NICE); + /* Elevate the victim to SCHED_RR with zero RT priority */ + sched_setscheduler_nocheck(vtsk, SCHED_RR, &sched_zero_prio); /* Allow the victim to run on any CPU. This won't schedule. */ set_cpus_allowed_ptr(vtsk, cpu_all_mask); From 36c388d7f0656658410411f8d3ddc4e17d208df7 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Tue, 18 Feb 2020 22:37:48 -0800 Subject: [PATCH 035/452] simple_lmk: Relax memory barriers and clean up some styling wake_up() executes a full memory barrier when waking a process up, so there's no need for the acquire in the wait event. Additionally, because of this, the atomic_cmpxchg() only needs a read barrier. The cmpxchg() in simple_lmk_mm_freed() is atomic when it doesn't need to be, so replace it with an extra line of code. The atomic_inc_return() in simple_lmk_mm_freed() lies within a lock, so it doesn't need explicit memory barriers. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/simple_lmk.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index ce2d1872c2d3..3372fe21962d 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -145,11 +145,10 @@ static int process_victims(int vlen, unsigned long pages_needed) /* The victim's mm lock is taken in find_victims; release it */ if (pages_found >= pages_needed) { task_unlock(vtsk); - continue; + } else { + pages_found += victim->size; + nr_to_kill++; } - - pages_found += victim->size; - nr_to_kill++; } return nr_to_kill; @@ -249,7 +248,7 @@ static int simple_lmk_reclaim_thread(void *data) sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_max_rt_prio); while (1) { - wait_event(oom_waitq, atomic_read_acquire(&needs_reclaim)); + wait_event(oom_waitq, atomic_read(&needs_reclaim)); scan_and_kill(MIN_FREE_PAGES); atomic_set_release(&needs_reclaim, 0); } @@ -260,7 +259,7 @@ static int simple_lmk_reclaim_thread(void *data) void simple_lmk_decide_reclaim(int kswapd_priority) { if (kswapd_priority == CONFIG_ANDROID_SIMPLE_LMK_AGGRESSION && - !atomic_cmpxchg(&needs_reclaim, 0, 1)) + !atomic_cmpxchg_acquire(&needs_reclaim, 0, 1)) wake_up(&oom_waitq); } @@ -270,11 +269,13 @@ void simple_lmk_mm_freed(struct mm_struct *mm) read_lock(&mm_free_lock); for (i = 0; i < victims_to_kill; i++) { - if (cmpxchg(&victims[i].mm, mm, NULL) == mm) { - if (atomic_inc_return(&nr_killed) == victims_to_kill) - complete(&reclaim_done); - break; - } + if (victims[i].mm != mm) + continue; + + victims[i].mm = NULL; + if (atomic_inc_return_relaxed(&nr_killed) == victims_to_kill) + complete(&reclaim_done); + break; } read_unlock(&mm_free_lock); } From 428aebb5f33d4aec00d65d30c4af588a42d6983b Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Tue, 18 Feb 2020 22:39:41 -0800 Subject: [PATCH 036/452] simple_lmk: Include swap memory usage in the size of victims Swap memory usage is important when determining what to kill, so include it in the victim size calculation. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/simple_lmk.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 3372fe21962d..215ee674d82d 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -75,6 +75,17 @@ static bool vtsk_is_duplicate(int vlen, struct task_struct *vtsk) return false; } +static unsigned long get_total_mm_pages(struct mm_struct *mm) +{ + unsigned long pages = 0; + int i; + + for (i = 0; i < NR_MM_COUNTERS; i++) + pages += get_mm_counter(mm, i); + + return pages; +} + static unsigned long find_victims(int *vindex, short target_adj) { unsigned long pages_found = 0; @@ -108,7 +119,7 @@ static unsigned long find_victims(int *vindex, short target_adj) /* Store this potential victim away for later */ victims[*vindex].tsk = vtsk; victims[*vindex].mm = vtsk->mm; - victims[*vindex].size = get_mm_rss(vtsk->mm); + victims[*vindex].size = get_total_mm_pages(vtsk->mm); /* Keep track of the number of pages that have been found */ pages_found += victims[*vindex].size; From 0ef04d9b3b171ebe36b0d652fe26b89e1276ee61 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Wed, 19 Feb 2020 14:47:13 -0800 Subject: [PATCH 037/452] mm: Stop kswapd early when nothing's waiting for it to free pages Keeping kswapd running when all the failed allocations that invoked it are satisfied incurs a high overhead due to unnecessary page eviction and writeback, as well as spurious VM pressure events to various registered shrinkers. When kswapd doesn't need to work to make an allocation succeed anymore, stop it prematurely to save resources. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- include/linux/mmzone.h | 1 + mm/page_alloc.c | 17 ++++++++++++++--- mm/vmscan.c | 3 ++- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index cecf0a58cf66..619ccfdf47d7 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -656,6 +656,7 @@ typedef struct pglist_data { unsigned long node_spanned_pages; /* total size of physical page range, including holes */ int node_id; + atomic_t kswapd_waiters; wait_queue_head_t kswapd_wait; wait_queue_head_t pfmemalloc_wait; struct task_struct *kswapd; /* Protected by diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5de05dc023a5..1018bc303bdf 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3913,6 +3913,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, int reserve_flags; unsigned long pages_reclaimed = 0; int retry_loop_count = 0; + pg_data_t *pgdat = ac->preferred_zoneref->zone->zone_pgdat; + bool woke_kswapd = false; unsigned long jiffies_s = jiffies; u64 utime, stime_s, stime_e, stime_d; @@ -3950,8 +3952,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, if (!ac->preferred_zoneref->zone) goto nopage; - if (gfp_mask & __GFP_KSWAPD_RECLAIM) + if (gfp_mask & __GFP_KSWAPD_RECLAIM) { + if (!woke_kswapd) { + atomic_inc(&pgdat->kswapd_waiters); + woke_kswapd = true; + } wake_all_kswapds(order, ac); + } /* * The adjusted alloc_flags might result in immediate success, so try @@ -4147,9 +4154,12 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, goto retry; } fail: - warn_alloc(gfp_mask, ac->nodemask, - "page allocation failure: order:%u", order); got_pg: + if (woke_kswapd) + atomic_dec(&pgdat->kswapd_waiters); + if (!page) + warn_alloc(gfp_mask, ac->nodemask, + "page allocation failure: order:%u", order); task_cputime(current, &utime, &stime_e); stime_d = stime_e - stime_s; if (stime_d / NSEC_PER_MSEC > 256) { @@ -6119,6 +6129,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat) pgdat_page_ext_init(pgdat); spin_lock_init(&pgdat->lru_lock); lruvec_init(node_lruvec(pgdat)); + pgdat->kswapd_waiters = (atomic_t)ATOMIC_INIT(0); pgdat->per_cpu_nodestats = &boot_nodestats; diff --git a/mm/vmscan.c b/mm/vmscan.c index f8e0ea574421..3d2ee7d76f90 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3728,7 +3728,8 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) wake_up_all(&pgdat->pfmemalloc_wait); /* Check if kswapd should be suspending */ - if (try_to_freeze() || kthread_should_stop()) + if (try_to_freeze() || kthread_should_stop() || + !atomic_read(&pgdat->kswapd_waiters)) break; /* From 80558e06d39445368059893388d2ec15d35b5764 Mon Sep 17 00:00:00 2001 From: David Ng <dave@codeaurora.org> Date: Mon, 26 Mar 2018 12:46:49 -0700 Subject: [PATCH 038/452] mm, vmpressure: int cast vmpressure level/model for -1 comparison Resolve -Wenum-compare issue when comparing vmpressure level/model against -1 (invalid state). Change-Id: I1c76667ee8390e2d396c96e5ed73f30d0700ffa8 Signed-off-by: David Ng <dave@codeaurora.org> --- mm/vmpressure.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/vmpressure.c b/mm/vmpressure.c index f5ae7ed17a5d..5a881c46ac4e 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -401,7 +401,7 @@ int vmpressure_register_event(struct mem_cgroup *memcg, /* Find required level */ token = strsep(&spec, ","); level = str_to_level(token); - if (level == -1) { + if ((int)level == -1) { ret = -EINVAL; goto out; } @@ -410,7 +410,7 @@ int vmpressure_register_event(struct mem_cgroup *memcg, token = strsep(&spec, ","); if (token) { mode = str_to_mode(token); - if (mode == -1) { + if ((int)mode == -1) { ret = -EINVAL; goto out; } From 0862f53816bbc1ad97aa3a4e673b6a76506f2c24 Mon Sep 17 00:00:00 2001 From: Vinayak Menon <vinmenon@codeaurora.org> Date: Wed, 4 Mar 2015 16:38:28 +0530 Subject: [PATCH 039/452] mm: vmpressure: allow in-kernel clients to subscribe for events Currently, vmpressure is tied to memcg and its events are available only to userspace clients. This patch removes the dependency on CONFIG_MEMCG and adds a mechanism for in-kernel clients to subscribe for vmpressure events (in fact raw vmpressure values are delivered instead of vmpressure levels, to provide clients more flexibility to take actions on custom pressure levels which are not currently defined by vmpressure module). Change-Id: I38010f166546e8d7f12f5f355b5dbfd6ba04d587 Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org> --- include/linux/vmpressure.h | 12 ++-- mm/Makefile | 4 +- mm/vmpressure.c | 138 ++++++++++++++++++++++++++++++------- 3 files changed, 121 insertions(+), 33 deletions(-) diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h index ed471d595891..5719c9437458 100644 --- a/include/linux/vmpressure.h +++ b/include/linux/vmpressure.h @@ -30,11 +30,13 @@ struct vmpressure { struct mem_cgroup; -#ifdef CONFIG_MEMCG +extern int vmpressure_notifier_register(struct notifier_block *nb); +extern int vmpressure_notifier_unregister(struct notifier_block *nb); extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, unsigned long scanned, unsigned long reclaimed); extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); +#ifdef CONFIG_MEMCG extern void vmpressure_init(struct vmpressure *vmpr); extern void vmpressure_cleanup(struct vmpressure *vmpr); extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg); @@ -45,9 +47,9 @@ extern int vmpressure_register_event(struct mem_cgroup *memcg, extern void vmpressure_unregister_event(struct mem_cgroup *memcg, struct eventfd_ctx *eventfd); #else -static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, - unsigned long scanned, unsigned long reclaimed) {} -static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, - int prio) {} +static inline struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) +{ + return NULL; +} #endif /* CONFIG_MEMCG */ #endif /* __LINUX_VMPRESSURE_H */ diff --git a/mm/Makefile b/mm/Makefile index 28644484d033..1b890be7d4f1 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -39,7 +39,7 @@ obj-y := filemap.o mempool.o oom_kill.o \ mm_init.o mmu_context.o percpu.o slab_common.o \ compaction.o vmacache.o swap_slots.o \ interval_tree.o list_lru.o workingset.o \ - debug.o $(mmu-y) showmem_extra.o + debug.o $(mmu-y) showmem_extra.o vmpressure.o obj-y += init-mm.o @@ -80,7 +80,7 @@ obj-$(CONFIG_MIGRATION) += migrate.o obj-$(CONFIG_QUICKLIST) += quicklist.o obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o khugepaged.o obj-$(CONFIG_PAGE_COUNTER) += page_counter.o -obj-$(CONFIG_MEMCG) += memcontrol.o vmpressure.o +obj-$(CONFIG_MEMCG) += memcontrol.o obj-$(CONFIG_MEMCG_SWAP) += swap_cgroup.o obj-$(CONFIG_CGROUP_HUGETLB) += hugetlb_cgroup.o obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 5a881c46ac4e..ee6515e9a4e5 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -22,6 +22,8 @@ #include <linux/slab.h> #include <linux/swap.h> #include <linux/printk.h> +#include <linux/notifier.h> +#include <linux/init.h> #include <linux/vmpressure.h> /* @@ -49,6 +51,24 @@ static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16; static const unsigned int vmpressure_level_med = CONFIG_VMPRESSURE_LEVEL_MED; static const unsigned int vmpressure_level_critical = 95; +static struct vmpressure global_vmpressure; +static BLOCKING_NOTIFIER_HEAD(vmpressure_notifier); + +int vmpressure_notifier_register(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&vmpressure_notifier, nb); +} + +int vmpressure_notifier_unregister(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&vmpressure_notifier, nb); +} + +static void vmpressure_notify(unsigned long pressure) +{ + blocking_notifier_call_chain(&vmpressure_notifier, pressure, NULL); +} + /* * When there are too little pages left to scan, vmpressure() may miss the * critical pressure as number of pages will be less than "window size". @@ -75,6 +95,7 @@ static struct vmpressure *work_to_vmpressure(struct work_struct *work) return container_of(work, struct vmpressure, work); } +#ifdef CONFIG_MEMCG static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr) { struct cgroup_subsys_state *css = vmpressure_to_css(vmpr); @@ -85,6 +106,12 @@ static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr) return NULL; return memcg_to_vmpressure(memcg); } +#else +static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr) +{ + return NULL; +} +#endif enum vmpressure_levels { VMPRESSURE_LOW = 0, @@ -121,7 +148,7 @@ static enum vmpressure_levels vmpressure_level(unsigned long pressure) return VMPRESSURE_LOW; } -static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned, +static unsigned long vmpressure_calc_pressure(unsigned long scanned, unsigned long reclaimed, struct vmpressure *vmpr) { unsigned long scale = scanned + reclaimed; @@ -149,7 +176,7 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned, pr_debug("%s: %3lu (s: %lu r: %lu)\n", __func__, pressure, scanned, reclaimed); - return vmpressure_level(pressure); + return pressure; } struct vmpressure_event { @@ -187,6 +214,7 @@ static void vmpressure_work_fn(struct work_struct *work) struct vmpressure *vmpr = work_to_vmpressure(work); unsigned long scanned; unsigned long reclaimed; + unsigned long pressure; enum vmpressure_levels level; bool ancestor = false; bool signalled = false; @@ -211,7 +239,8 @@ static void vmpressure_work_fn(struct work_struct *work) vmpr->tree_reclaimed = 0; spin_unlock(&vmpr->sr_lock); - level = vmpressure_calc_level(scanned, reclaimed, vmpr); + pressure = vmpressure_calc_pressure(scanned, reclaimed, vmpr); + level = vmpressure_level(pressure); do { if (vmpressure_event(vmpr, level, ancestor, signalled)) @@ -220,28 +249,8 @@ static void vmpressure_work_fn(struct work_struct *work) } while ((vmpr = vmpressure_parent(vmpr))); } -/** - * vmpressure() - Account memory pressure through scanned/reclaimed ratio - * @gfp: reclaimer's gfp mask - * @memcg: cgroup memory controller handle - * @tree: legacy subtree mode - * @scanned: number of pages scanned - * @reclaimed: number of pages reclaimed - * - * This function should be called from the vmscan reclaim path to account - * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw - * pressure index is then further refined and averaged over time. - * - * If @tree is set, vmpressure is in traditional userspace reporting - * mode: @memcg is considered the pressure root and userspace is - * notified of the entire subtree's reclaim efficiency. - * - * If @tree is not set, reclaim efficiency is recorded for @memcg, and - * only in-kernel users are notified. - * - * This function does not return any value. - */ -void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, +#ifdef CONFIG_MEMCG +static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree, unsigned long scanned, unsigned long reclaimed) { struct vmpressure *vmpr = memcg_to_vmpressure(memcg); @@ -282,6 +291,7 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, schedule_work(&vmpr->work); } else { enum vmpressure_levels level; + unsigned long pressure; /* For now, no users for root-level efficiency */ if (!memcg || memcg == root_mem_cgroup) @@ -297,7 +307,8 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, vmpr->scanned = vmpr->reclaimed = 0; spin_unlock(&vmpr->sr_lock); - level = vmpressure_calc_level(scanned, reclaimed, vmpr); + pressure = vmpressure_calc_pressure(scanned, reclaimed, vmpr); + level = vmpressure_level(pressure); if (level > VMPRESSURE_LOW) { /* @@ -312,6 +323,74 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, } } } +#else +static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree, + unsigned long scanned, unsigned long reclaimed) +{ +} +#endif + +static void vmpressure_global(gfp_t gfp, unsigned long scanned, + unsigned long reclaimed) +{ + struct vmpressure *vmpr = &global_vmpressure; + unsigned long pressure; + + if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) + return; + + if (!scanned) + return; + + spin_lock(&vmpr->sr_lock); + vmpr->scanned += scanned; + vmpr->reclaimed += reclaimed; + scanned = vmpr->scanned; + reclaimed = vmpr->reclaimed; + spin_unlock(&vmpr->sr_lock); + + if (scanned < vmpressure_win) + return; + + spin_lock(&vmpr->sr_lock); + vmpr->scanned = 0; + vmpr->reclaimed = 0; + spin_unlock(&vmpr->sr_lock); + + pressure = vmpressure_calc_pressure(scanned, reclaimed, vmpr); + vmpressure_notify(pressure); +} + +/** + * vmpressure() - Account memory pressure through scanned/reclaimed ratio + * @gfp: reclaimer's gfp mask + * @memcg: cgroup memory controller handle + * @tree: legacy subtree mode + * @scanned: number of pages scanned + * @reclaimed: number of pages reclaimed + * + * This function should be called from the vmscan reclaim path to account + * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw + * pressure index is then further refined and averaged over time. + * + * If @tree is set, vmpressure is in traditional userspace reporting + * mode: @memcg is considered the pressure root and userspace is + * notified of the entire subtree's reclaim efficiency. + * + * If @tree is not set, reclaim efficiency is recorded for @memcg, and + * only in-kernel users are notified. + * + * This function does not return any value. + */ +void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, + unsigned long scanned, unsigned long reclaimed) +{ + if (!memcg && tree) + vmpressure_global(gfp, scanned, reclaimed); + + if (IS_ENABLED(CONFIG_MEMCG)) + vmpressure_memcg(gfp, memcg, tree, scanned, reclaimed); +} /** * vmpressure_prio() - Account memory pressure through reclaimer priority level @@ -492,3 +571,10 @@ void vmpressure_cleanup(struct vmpressure *vmpr) */ flush_work(&vmpr->work); } + +static int vmpressure_global_init(void) +{ + vmpressure_init(&global_vmpressure); + return 0; +} +late_initcall(vmpressure_global_init); From dd43649a68ff13b9b2345adc5bad23c60bb201e0 Mon Sep 17 00:00:00 2001 From: Vinayak Menon <vinmenon@codeaurora.org> Date: Tue, 31 Mar 2015 11:06:29 +0530 Subject: [PATCH 040/452] mm: vmpressure: scale pressure based on reclaim context The existing calculation of vmpressure takes into account only the ratio of reclaimed to scanned pages, but not the time spent or the difficulty in reclaiming those pages. For e.g. when there are quite a number of file pages in the system, an allocation request can be satisfied by reclaiming the file pages alone. If such a reclaim is successful, the vmpressure value will remain low irrespective of the time spent by the reclaim code to free up the file pages. With a feature like lowmemorykiller, killing a task can be faster than reclaiming the file pages alone. So if the vmpressure values reflect the reclaim difficulty level, clients can make a decision based on that, for e.g. to kill a task early. This patch monitors the number of pages scanned in the direct reclaim path and scales the vmpressure level according to that. Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org> Change-Id: I6e643d29a9a1aa0814309253a8b690ad86ec0b13 --- include/linux/vmpressure.h | 1 + mm/vmpressure.c | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h index 5719c9437458..8f33ef96dd5d 100644 --- a/include/linux/vmpressure.h +++ b/include/linux/vmpressure.h @@ -17,6 +17,7 @@ struct vmpressure { unsigned long tree_scanned; unsigned long tree_reclaimed; + unsigned long stall; /* The lock is used to keep the scanned/reclaimed above in sync. */ struct spinlock sr_lock; diff --git a/mm/vmpressure.c b/mm/vmpressure.c index ee6515e9a4e5..1d085bb6c2a7 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -24,6 +24,7 @@ #include <linux/printk.h> #include <linux/notifier.h> #include <linux/init.h> +#include <linux/module.h> #include <linux/vmpressure.h> /* @@ -51,6 +52,10 @@ static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16; static const unsigned int vmpressure_level_med = CONFIG_VMPRESSURE_LEVEL_MED; static const unsigned int vmpressure_level_critical = 95; +static unsigned long vmpressure_scale_max = 100; +module_param_named(vmpressure_scale_max, vmpressure_scale_max, + ulong, 0644); + static struct vmpressure global_vmpressure; static BLOCKING_NOTIFIER_HEAD(vmpressure_notifier); @@ -179,6 +184,15 @@ static unsigned long vmpressure_calc_pressure(unsigned long scanned, return pressure; } +static unsigned long vmpressure_account_stall(unsigned long pressure, + unsigned long stall, unsigned long scanned) +{ + unsigned long scale = + ((vmpressure_scale_max - pressure) * stall) / scanned; + + return pressure + scale; +} + struct vmpressure_event { struct eventfd_ctx *efd; enum vmpressure_levels level; @@ -335,6 +349,7 @@ static void vmpressure_global(gfp_t gfp, unsigned long scanned, { struct vmpressure *vmpr = &global_vmpressure; unsigned long pressure; + unsigned long stall; if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) return; @@ -345,6 +360,11 @@ static void vmpressure_global(gfp_t gfp, unsigned long scanned, spin_lock(&vmpr->sr_lock); vmpr->scanned += scanned; vmpr->reclaimed += reclaimed; + + if (!current_is_kswapd()) + vmpr->stall += scanned; + + stall = vmpr->stall; scanned = vmpr->scanned; reclaimed = vmpr->reclaimed; spin_unlock(&vmpr->sr_lock); @@ -355,9 +375,11 @@ static void vmpressure_global(gfp_t gfp, unsigned long scanned, spin_lock(&vmpr->sr_lock); vmpr->scanned = 0; vmpr->reclaimed = 0; + vmpr->stall = 0; spin_unlock(&vmpr->sr_lock); pressure = vmpressure_calc_pressure(scanned, reclaimed, vmpr); + pressure = vmpressure_account_stall(pressure, stall, scanned); vmpressure_notify(pressure); } From 3da238eec2e2ac03590767db62b6e64302b70161 Mon Sep 17 00:00:00 2001 From: Vinayak Menon <vinmenon@codeaurora.org> Date: Wed, 19 Aug 2015 16:16:39 +0530 Subject: [PATCH 041/452] mm: vmpressure: account allocstalls only on higher pressures At present any vmpressure value is scaled up if the pages are reclaimed through direct reclaim. This can result in false vmpressure values. Consider a case where a device is booted up and most of the memory is occuppied by file pages. kswapd will make sure that high watermark is maintained. Now when a sudden huge allocation request comes in, the system will definitely have to get into direct reclaims. The vmpressures can be very low, but because of allocstall accounting logic even these low values will be scaled to values nearing 100. This can result in unnecessary LMK kills for example. So define a tunable threshold for vmpressure above which the allocstalls will be accounted. Change-Id: Idd7c6724264ac89f1f68f2e9d70a32390ffca3e5 Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org> --- mm/vmpressure.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 1d085bb6c2a7..36fd0b53aa1b 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -56,6 +56,11 @@ static unsigned long vmpressure_scale_max = 100; module_param_named(vmpressure_scale_max, vmpressure_scale_max, ulong, 0644); +/* vmpressure values >= this will be scaled based on allocstalls */ +static unsigned long allocstall_threshold = 70; +module_param_named(allocstall_threshold, allocstall_threshold, + ulong, 0644); + static struct vmpressure global_vmpressure; static BLOCKING_NOTIFIER_HEAD(vmpressure_notifier); @@ -187,8 +192,12 @@ static unsigned long vmpressure_calc_pressure(unsigned long scanned, static unsigned long vmpressure_account_stall(unsigned long pressure, unsigned long stall, unsigned long scanned) { - unsigned long scale = - ((vmpressure_scale_max - pressure) * stall) / scanned; + unsigned long scale; + + if (pressure < allocstall_threshold) + return pressure; + + scale = ((vmpressure_scale_max - pressure) * stall) / scanned; return pressure + scale; } From c96536a7211ec09a0df7013f9dbe8cc9537d96c3 Mon Sep 17 00:00:00 2001 From: Vinayak Menon <vinmenon@codeaurora.org> Date: Mon, 19 Sep 2016 12:44:15 +0530 Subject: [PATCH 042/452] mm: vmpressure: make vmpressure window variable Right now the vmpressure window is of constant size 2MB, which works well with the following exceptions. 1) False vmpressure triggers are seen when the RAM size is greater than 3GB. This results in lowmemorykiller, which uses vmpressure events, killing tasks unnecessarily. 2) Vmpressure events are received late under memory pressure. This behaviour is seen prominently in <=2GB RAM targets. This results in lowmemorykiller kicking in late to kill tasks resulting in avoidable page cache reclaim. The problem analysis shows that the issue is with the constant size of the vmpressure window which does not adapt to the varying memory conditions. This patch recalculates the vmpressure window size at the end of each window. The chosen window size is proportional to the total of free and cached memory at that point. Change-Id: I7e9ef4ddd82e2c2dd04ce09ec8d58a8829cfb64d Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org> --- mm/vmpressure.c | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 36fd0b53aa1b..16ed04650270 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -41,7 +41,7 @@ * TODO: Make the window size depend on machine size, as we do for vmstat * thresholds. Currently we set it to 512 pages (2MB for 4KB pages). */ -static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16; +static unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16; /* * These thresholds are used when we account memory pressure through @@ -353,6 +353,29 @@ static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree, } #endif +static void calculate_vmpressure_win(void) +{ + long x; + + x = global_node_page_state(NR_FILE_PAGES) - + global_node_page_state(NR_SHMEM) - + total_swapcache_pages() + + global_zone_page_state(NR_FREE_PAGES); + if (x < 1) + x = 1; + /* + * For low (free + cached), vmpressure window should be + * small, and high for higher values of (free + cached). + * But it should not be linear as well. This ensures + * timely vmpressure notifications when system is under + * memory pressure, and optimal number of events when + * cached is high. The sqaure root function is empirically + * found to serve the purpose. + */ + x = int_sqrt(x); + vmpressure_win = x; +} + static void vmpressure_global(gfp_t gfp, unsigned long scanned, unsigned long reclaimed) { @@ -367,6 +390,9 @@ static void vmpressure_global(gfp_t gfp, unsigned long scanned, return; spin_lock(&vmpr->sr_lock); + if (!vmpr->scanned) + calculate_vmpressure_win(); + vmpr->scanned += scanned; vmpr->reclaimed += reclaimed; From 5a2931da86527525a24b46ca331ffc24754c27cd Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Mon, 24 Feb 2020 19:03:04 -0800 Subject: [PATCH 043/452] simple_lmk: Use vmpressure notifier to trigger kills Using kswapd's scan depth to trigger task kills is inconsistent and unreliable. When memory pressure quickly spikes, the kswapd scan depth trigger fails to kick off Simple LMK fast enough, causing severe lag. Additionally, kswapd could stop scanning prematurely before reaching the desired scan depth to trigger Simple LMK, which could also cause stalls. To remedy this, use the vmpressure framework instead, since it provides more consistent and accurate readings on memory pressure. This is not very tunable though, so remove CONFIG_ANDROID_SIMPLE_LMK_AGGRESSION. Triggering Simple LMK to kill when the reported memory pressure is 100 should yield good results on all setups. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/Kconfig | 22 ---------------------- drivers/android/simple_lmk.c | 24 +++++++++++++++++------- include/linux/simple_lmk.h | 4 ---- mm/vmscan.c | 2 -- 4 files changed, 17 insertions(+), 35 deletions(-) diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig index 7f65391e5e73..6a87d1298c5e 100644 --- a/drivers/android/Kconfig +++ b/drivers/android/Kconfig @@ -55,28 +55,6 @@ config ANDROID_SIMPLE_LMK if ANDROID_SIMPLE_LMK -config ANDROID_SIMPLE_LMK_AGGRESSION - int "Reclaim frequency selection" - range 1 3 - default 1 - help - This value determines how frequently Simple LMK will perform memory - reclaims. A lower value corresponds to less frequent reclaims, which - maximizes memory usage. The range of values has a logarithmic - correlation; 2 is twice as aggressive as 1, and 3 is twice as - aggressive as 2, which makes 3 four times as aggressive as 1. - - The aggression is set as a factor of kswapd's scan depth. This means - that a system with more memory will have a more expensive aggression - factor compared to a system with less memory. For example, setting an - aggression factor of 1 with 4 GiB of memory would be like setting a - factor of 2 with 8 GiB of memory; the more memory a system has, the - more expensive it is to use a lower value. - - Choosing a value of 1 here works well with systems that have 4 GiB of - memory. If the default doesn't work well, then this value should be - tweaked based on empirical results using different values. - config ANDROID_SIMPLE_LMK_MINFREE int "Minimum MiB of memory to free per reclaim" range 8 512 diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 215ee674d82d..2a3316100c79 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -10,6 +10,7 @@ #include <linux/moduleparam.h> #include <linux/oom.h> #include <linux/sort.h> +#include <linux/vmpressure.h> #include <uapi/linux/sched/types.h> /* The minimum number of pages to free per reclaim */ @@ -267,13 +268,6 @@ static int simple_lmk_reclaim_thread(void *data) return 0; } -void simple_lmk_decide_reclaim(int kswapd_priority) -{ - if (kswapd_priority == CONFIG_ANDROID_SIMPLE_LMK_AGGRESSION && - !atomic_cmpxchg_acquire(&needs_reclaim, 0, 1)) - wake_up(&oom_waitq); -} - void simple_lmk_mm_freed(struct mm_struct *mm) { int i; @@ -291,6 +285,20 @@ void simple_lmk_mm_freed(struct mm_struct *mm) read_unlock(&mm_free_lock); } +static int simple_lmk_vmpressure_cb(struct notifier_block *nb, + unsigned long pressure, void *data) +{ + if (pressure == 100 && !atomic_cmpxchg_acquire(&needs_reclaim, 0, 1)) + wake_up(&oom_waitq); + + return NOTIFY_OK; +} + +static struct notifier_block vmpressure_notif = { + .notifier_call = simple_lmk_vmpressure_cb, + .priority = INT_MAX +}; + /* Initialize Simple LMK when lmkd in Android writes to the minfree parameter */ static int simple_lmk_init_set(const char *val, const struct kernel_param *kp) { @@ -301,7 +309,9 @@ static int simple_lmk_init_set(const char *val, const struct kernel_param *kp) thread = kthread_run(simple_lmk_reclaim_thread, NULL, "simple_lmkd"); BUG_ON(IS_ERR(thread)); + BUG_ON(vmpressure_notifier_register(&vmpressure_notif)); } + return 0; } diff --git a/include/linux/simple_lmk.h b/include/linux/simple_lmk.h index 28103c1b1d4c..b02d1bec9731 100644 --- a/include/linux/simple_lmk.h +++ b/include/linux/simple_lmk.h @@ -8,12 +8,8 @@ struct mm_struct; #ifdef CONFIG_ANDROID_SIMPLE_LMK -void simple_lmk_decide_reclaim(int kswapd_priority); void simple_lmk_mm_freed(struct mm_struct *mm); #else -static inline void simple_lmk_decide_reclaim(int kswapd_priority) -{ -} static inline void simple_lmk_mm_freed(struct mm_struct *mm) { } diff --git a/mm/vmscan.c b/mm/vmscan.c index 3d2ee7d76f90..d1cb6dfa3800 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -49,7 +49,6 @@ #include <linux/prefetch.h> #include <linux/printk.h> #include <linux/dax.h> -#include <linux/simple_lmk.h> #include <asm/tlbflush.h> #include <asm/div64.h> @@ -3656,7 +3655,6 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) unsigned long nr_reclaimed = sc.nr_reclaimed; bool raise_priority = true; - simple_lmk_decide_reclaim(sc.priority); sc.reclaim_idx = classzone_idx; /* From 3026a8a35260c109fa4d19f54766df9869b4393d Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Wed, 26 Feb 2020 10:14:18 -0800 Subject: [PATCH 044/452] simple_lmk: Update adj targeting for Android 10 Android 10 changed its adj assignments. Update Simple LMK to use the new adjs, which also requires looking at each pair of adjs as a range. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/simple_lmk.c | 43 ++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index 2a3316100c79..f502eb5da8d7 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -29,23 +29,21 @@ struct victim_info { }; /* Pulled from the Android framework. Lower adj means higher priority. */ -static const short adj_prio[] = { - 906, /* CACHED_APP_MAX_ADJ */ - 905, /* Cached app */ - 904, /* Cached app */ - 903, /* Cached app */ - 902, /* Cached app */ - 901, /* Cached app */ - 900, /* CACHED_APP_MIN_ADJ */ - 800, /* SERVICE_B_ADJ */ - 700, /* PREVIOUS_APP_ADJ */ - 600, /* HOME_APP_ADJ */ - 500, /* SERVICE_ADJ */ - 400, /* HEAVY_WEIGHT_APP_ADJ */ - 300, /* BACKUP_APP_ADJ */ - 200, /* PERCEPTIBLE_APP_ADJ */ - 100, /* VISIBLE_APP_ADJ */ - 0 /* FOREGROUND_APP_ADJ */ +static const short adjs[] = { + 1000, /* CACHED_APP_MAX_ADJ + 1 */ + 950, /* CACHED_APP_LMK_FIRST_ADJ */ + 900, /* CACHED_APP_MIN_ADJ */ + 800, /* SERVICE_B_ADJ */ + 700, /* PREVIOUS_APP_ADJ */ + 600, /* HOME_APP_ADJ */ + 500, /* SERVICE_ADJ */ + 400, /* HEAVY_WEIGHT_APP_ADJ */ + 300, /* BACKUP_APP_ADJ */ + 250, /* PERCEPTIBLE_LOW_APP_ADJ */ + 200, /* PERCEPTIBLE_APP_ADJ */ + 100, /* VISIBLE_APP_ADJ */ + 50, /* PERCEPTIBLE_RECENT_FOREGROUND_APP_ADJ */ + 0 /* FOREGROUND_APP_ADJ */ }; static struct victim_info victims[MAX_VICTIMS]; @@ -87,7 +85,8 @@ static unsigned long get_total_mm_pages(struct mm_struct *mm) return pages; } -static unsigned long find_victims(int *vindex, short target_adj) +static unsigned long find_victims(int *vindex, short target_adj_min, + short target_adj_max) { unsigned long pages_found = 0; int old_vindex = *vindex; @@ -96,6 +95,7 @@ static unsigned long find_victims(int *vindex, short target_adj) for_each_process(tsk) { struct signal_struct *sig; struct task_struct *vtsk; + short adj; /* * Search for suitable tasks with the targeted importance (adj). @@ -107,7 +107,8 @@ static unsigned long find_victims(int *vindex, short target_adj) * trying to lock a task that we locked earlier. */ sig = tsk->signal; - if (READ_ONCE(sig->oom_score_adj) != target_adj || + adj = READ_ONCE(sig->oom_score_adj); + if (adj < target_adj_min || adj > target_adj_max - 1 || sig->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP) || (thread_group_empty(tsk) && tsk->flags & PF_EXITING) || vtsk_is_duplicate(*vindex, tsk)) @@ -177,8 +178,8 @@ static void scan_and_kill(unsigned long pages_needed) * is guaranteed to be up to date. */ read_lock(&tasklist_lock); - for (i = 0; i < ARRAY_SIZE(adj_prio); i++) { - pages_found += find_victims(&nr_victims, adj_prio[i]); + for (i = 1; i < ARRAY_SIZE(adjs); i++) { + pages_found += find_victims(&nr_victims, adjs[i], adjs[i - 1]); if (pages_found >= pages_needed || nr_victims == MAX_VICTIMS) break; } From b7cbabdea4b3ad13dbe48c97ae5117af81a86396 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Fri, 28 Feb 2020 12:43:54 -0800 Subject: [PATCH 045/452] mm: vmpressure: Don't exclude any allocation types Although userspace processes can't directly help with kernel memory pressure, killing userspace processes can relieve kernel memory if they are responsible for that pressure in the first place. It doesn't make sense to exclude any allocation types knowing that userspace can indeed affect all memory pressure, so don't exclude any allocation types from the pressure calculations. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- mm/vmpressure.c | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 16ed04650270..63bf9c7fb754 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -278,20 +278,6 @@ static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree, { struct vmpressure *vmpr = memcg_to_vmpressure(memcg); - /* - * Here we only want to account pressure that userland is able to - * help us with. For example, suppose that DMA zone is under - * pressure; if we notify userland about that kind of pressure, - * then it will be mostly a waste as it will trigger unnecessary - * freeing of memory by userland (since userland is more likely to - * have HIGHMEM/MOVABLE pages instead of the DMA fallback). That - * is why we include only movable, highmem and FS/IO pages. - * Indirect reclaim (kswapd) sets sc->gfp_mask to GFP_KERNEL, so - * we account it too. - */ - if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) - return; - /* * If we got here with no pages scanned, then that is an indicator * that reclaimer was unable to find any shrinkable LRUs at the @@ -383,9 +369,6 @@ static void vmpressure_global(gfp_t gfp, unsigned long scanned, unsigned long pressure; unsigned long stall; - if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) - return; - if (!scanned) return; From 4ac14468ab9502d7ad86363c22a19a29d96abae2 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Fri, 28 Feb 2020 12:38:10 -0800 Subject: [PATCH 046/452] mm: vmpressure: Interpret zero scanned pages as 100% pressure When no pages are scanned, it usually means no zones were reclaimable and nothing could be done. In this case, the reported pressure should be 100 to elicit help from any listeners. This fixes the vmpressure framework not working when memory pressure is very high. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- mm/vmpressure.c | 39 +++++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 63bf9c7fb754..fb486f79f55a 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -369,26 +369,25 @@ static void vmpressure_global(gfp_t gfp, unsigned long scanned, unsigned long pressure; unsigned long stall; - if (!scanned) - return; - - spin_lock(&vmpr->sr_lock); - if (!vmpr->scanned) - calculate_vmpressure_win(); + if (scanned) { + spin_lock(&vmpr->sr_lock); + if (!vmpr->scanned) + calculate_vmpressure_win(); - vmpr->scanned += scanned; - vmpr->reclaimed += reclaimed; + vmpr->scanned += scanned; + vmpr->reclaimed += reclaimed; - if (!current_is_kswapd()) - vmpr->stall += scanned; + if (!current_is_kswapd()) + vmpr->stall += scanned; - stall = vmpr->stall; - scanned = vmpr->scanned; - reclaimed = vmpr->reclaimed; - spin_unlock(&vmpr->sr_lock); + stall = vmpr->stall; + scanned = vmpr->scanned; + reclaimed = vmpr->reclaimed; + spin_unlock(&vmpr->sr_lock); - if (scanned < vmpressure_win) - return; + if (scanned < vmpressure_win) + return; + } spin_lock(&vmpr->sr_lock); vmpr->scanned = 0; @@ -396,8 +395,12 @@ static void vmpressure_global(gfp_t gfp, unsigned long scanned, vmpr->stall = 0; spin_unlock(&vmpr->sr_lock); - pressure = vmpressure_calc_pressure(scanned, reclaimed, vmpr); - pressure = vmpressure_account_stall(pressure, stall, scanned); + if (scanned) { + pressure = vmpressure_calc_pressure(scanned, reclaimed, vmpr); + pressure = vmpressure_account_stall(pressure, stall, scanned); + } else { + pressure = 100; + } vmpressure_notify(pressure); } From 5cf043ddbdaee104a968209a2d85746e5fdc9467 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Fri, 28 Feb 2020 12:57:20 -0800 Subject: [PATCH 047/452] mm: vmpressure: Don't cache the window size Caching the window size can result in delayed or inaccurate pressure reports. Since calculating a fresh window size is cheap, do so all the time instead of relying on a stale, cached value. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- mm/vmpressure.c | 112 ++++++++++++++++++++++-------------------------- 1 file changed, 52 insertions(+), 60 deletions(-) diff --git a/mm/vmpressure.c b/mm/vmpressure.c index fb486f79f55a..58a8ad031c5e 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -27,22 +27,6 @@ #include <linux/module.h> #include <linux/vmpressure.h> -/* - * The window size (vmpressure_win) is the number of scanned pages before - * we try to analyze scanned/reclaimed ratio. So the window is used as a - * rate-limit tunable for the "low" level notification, and also for - * averaging the ratio for medium/critical levels. Using small window - * sizes can cause lot of false positives, but too big window size will - * delay the notifications. - * - * As the vmscan reclaimer logic works with chunks which are multiple of - * SWAP_CLUSTER_MAX, it makes sense to use it for the window size as well. - * - * TODO: Make the window size depend on machine size, as we do for vmstat - * thresholds. Currently we set it to 512 pages (2MB for 4KB pages). - */ -static unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16; - /* * These thresholds are used when we account memory pressure through * scanned/reclaimed ratio. The current values were chosen empirically. In @@ -272,9 +256,32 @@ static void vmpressure_work_fn(struct work_struct *work) } while ((vmpr = vmpressure_parent(vmpr))); } +static unsigned long calculate_vmpressure_win(void) +{ + long x; + + x = global_node_page_state(NR_FILE_PAGES) - + global_node_page_state(NR_SHMEM) - + total_swapcache_pages() + + global_zone_page_state(NR_FREE_PAGES); + if (x < 1) + return 1; + /* + * For low (free + cached), vmpressure window should be + * small, and high for higher values of (free + cached). + * But it should not be linear as well. This ensures + * timely vmpressure notifications when system is under + * memory pressure, and optimal number of events when + * cached is high. The sqaure root function is empirically + * found to serve the purpose. + */ + return int_sqrt(x); +} + #ifdef CONFIG_MEMCG -static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree, - unsigned long scanned, unsigned long reclaimed) +static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool critical, + bool tree, unsigned long scanned, + unsigned long reclaimed) { struct vmpressure *vmpr = memcg_to_vmpressure(memcg); @@ -286,7 +293,9 @@ static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree, * (scanning depth) goes too high (deep), we will be notified * through vmpressure_prio(). But so far, keep calm. */ - if (!scanned) + if (critical) + scanned = calculate_vmpressure_win(); + else if (!scanned) return; if (tree) { @@ -295,7 +304,7 @@ static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree, vmpr->tree_reclaimed += reclaimed; spin_unlock(&vmpr->sr_lock); - if (scanned < vmpressure_win) + if (!critical && scanned < calculate_vmpressure_win()) return; schedule_work(&vmpr->work); } else { @@ -309,7 +318,7 @@ static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree, spin_lock(&vmpr->sr_lock); scanned = vmpr->scanned += scanned; reclaimed = vmpr->reclaimed += reclaimed; - if (scanned < vmpressure_win) { + if (!critical && scanned < calculate_vmpressure_win()) { spin_unlock(&vmpr->sr_lock); return; } @@ -333,47 +342,23 @@ static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree, } } #else -static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree, - unsigned long scanned, unsigned long reclaimed) -{ -} +static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool critical, + bool tree, unsigned long scanned, + unsigned long reclaimed) { } #endif -static void calculate_vmpressure_win(void) -{ - long x; - - x = global_node_page_state(NR_FILE_PAGES) - - global_node_page_state(NR_SHMEM) - - total_swapcache_pages() + - global_zone_page_state(NR_FREE_PAGES); - if (x < 1) - x = 1; - /* - * For low (free + cached), vmpressure window should be - * small, and high for higher values of (free + cached). - * But it should not be linear as well. This ensures - * timely vmpressure notifications when system is under - * memory pressure, and optimal number of events when - * cached is high. The sqaure root function is empirically - * found to serve the purpose. - */ - x = int_sqrt(x); - vmpressure_win = x; -} - -static void vmpressure_global(gfp_t gfp, unsigned long scanned, - unsigned long reclaimed) +static void vmpressure_global(gfp_t gfp, unsigned long scanned, bool critical, + unsigned long reclaimed) { struct vmpressure *vmpr = &global_vmpressure; unsigned long pressure; unsigned long stall; + if (critical) + scanned = calculate_vmpressure_win(); + if (scanned) { spin_lock(&vmpr->sr_lock); - if (!vmpr->scanned) - calculate_vmpressure_win(); - vmpr->scanned += scanned; vmpr->reclaimed += reclaimed; @@ -385,7 +370,7 @@ static void vmpressure_global(gfp_t gfp, unsigned long scanned, reclaimed = vmpr->reclaimed; spin_unlock(&vmpr->sr_lock); - if (scanned < vmpressure_win) + if (!critical && scanned < calculate_vmpressure_win()) return; } @@ -404,6 +389,17 @@ static void vmpressure_global(gfp_t gfp, unsigned long scanned, vmpressure_notify(pressure); } +static void __vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool critical, + bool tree, unsigned long scanned, + unsigned long reclaimed) +{ + if (!memcg && tree) + vmpressure_global(gfp, scanned, critical, reclaimed); + + if (IS_ENABLED(CONFIG_MEMCG)) + vmpressure_memcg(gfp, memcg, critical, tree, scanned, reclaimed); +} + /** * vmpressure() - Account memory pressure through scanned/reclaimed ratio * @gfp: reclaimer's gfp mask @@ -428,11 +424,7 @@ static void vmpressure_global(gfp_t gfp, unsigned long scanned, void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, unsigned long scanned, unsigned long reclaimed) { - if (!memcg && tree) - vmpressure_global(gfp, scanned, reclaimed); - - if (IS_ENABLED(CONFIG_MEMCG)) - vmpressure_memcg(gfp, memcg, tree, scanned, reclaimed); + __vmpressure(gfp, memcg, false, tree, scanned, reclaimed); } /** @@ -462,7 +454,7 @@ void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) * to the vmpressure() basically means that we signal 'critical' * level. */ - vmpressure(gfp, memcg, true, vmpressure_win, 0); + __vmpressure(gfp, memcg, true, true, 0, 0); } static enum vmpressure_levels str_to_level(const char *arg) From 790936c73fb632ffba9a2aa300c23afeae75bca4 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Thu, 26 Mar 2020 21:42:01 -0700 Subject: [PATCH 048/452] mm: Adjust tsk_is_oom_victim() for Simple LMK The page allocator uses tsk_is_oom_victim() to determine when to fast-path memory allocations in order to get an allocating process out of the page allocator and into do_exit() quickly. Unfortunately, tsk_is_oom_victim()'s check to see if a process is killed for OOM purposes is to look for the presence of an OOM reaper artifact that only the OOM killer sets. This means that for processes killed by Simple LMK, there is no fast-pathing done in the page allocator to get them to die faster. Remedy this by changing tsk_is_oom_victim() to look for the existence of the TIF_MEMDIE flag, which Simple LMK sets for its victims. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- include/linux/oom.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/linux/oom.h b/include/linux/oom.h index 9502b0b057a2..c4d85b912941 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -63,7 +63,11 @@ static inline bool oom_task_origin(const struct task_struct *p) static inline bool tsk_is_oom_victim(struct task_struct * tsk) { +#ifdef CONFIG_ANDROID_SIMPLE_LMK + return test_ti_thread_flag(task_thread_info(tsk), TIF_MEMDIE); +#else return tsk->signal->oom_mm; +#endif } /* From 58257757b069dd185756f4ad79a16a75d75242ee Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Thu, 26 Mar 2020 21:44:28 -0700 Subject: [PATCH 049/452] mm: Don't warn on page allocation failures for OOM-killed processes It can be normal for a dying process to have its page allocation request fail when it has an OOM or LMK kill pending. In this case, it's actually detrimental to print out a massive allocation failure message because this means the running process needs to die quickly and release its memory, which is slowed down slightly by the massive kmsg splat. The allocation failure message is also a false positive in this case, since the failure is intentional rather than being the result of an inability to allocate memory. Suppress the allocation failure warning for processes that are killed to release memory in order to expedite their death and remedy the kmsg confusion from seeing spurious allocation failure messages. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- mm/page_alloc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1018bc303bdf..2dce6a289597 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4099,8 +4099,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, /* Avoid allocations with no watermarks from looping endlessly */ if (tsk_is_oom_victim(current) && (alloc_flags == ALLOC_OOM || - (gfp_mask & __GFP_NOMEMALLOC))) + (gfp_mask & __GFP_NOMEMALLOC))) { + gfp_mask |= __GFP_NOWARN; goto nopage; + } /* Retry as long as the OOM killer is making progress */ if (did_some_progress) { From 64fff6862e57b6bc27cd0e5e7498bde9cfa6228e Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Sat, 4 Apr 2020 17:48:40 -0700 Subject: [PATCH 050/452] mm: vmpressure: Ignore allocation orders above PAGE_ALLOC_COSTLY_ORDER PAGE_ALLOC_COSTLY_ORDER allocations can cause vmpressure to incorrectly think that memory pressure is high, when it's really just that the allocation's high order is difficult to satisfy. When this rare scenario occurs, ignore the input to vmpressure to avoid sending out a spurious high-pressure signal. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- include/linux/vmpressure.h | 3 ++- mm/vmpressure.c | 5 ++++- mm/vmscan.c | 4 ++-- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h index 8f33ef96dd5d..c8b5cf51d652 100644 --- a/include/linux/vmpressure.h +++ b/include/linux/vmpressure.h @@ -34,7 +34,8 @@ struct mem_cgroup; extern int vmpressure_notifier_register(struct notifier_block *nb); extern int vmpressure_notifier_unregister(struct notifier_block *nb); extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, - unsigned long scanned, unsigned long reclaimed); + unsigned long scanned, unsigned long reclaimed, + int order); extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); #ifdef CONFIG_MEMCG diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 58a8ad031c5e..15c95c01bb44 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -422,8 +422,11 @@ static void __vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool critical, * This function does not return any value. */ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, - unsigned long scanned, unsigned long reclaimed) + unsigned long scanned, unsigned long reclaimed, int order) { + if (order > PAGE_ALLOC_COSTLY_ORDER) + return; + __vmpressure(gfp, memcg, false, tree, scanned, reclaimed); } diff --git a/mm/vmscan.c b/mm/vmscan.c index d1cb6dfa3800..2a53e3240d04 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2942,7 +2942,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) /* Record the group's reclaim efficiency */ vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned, - sc->nr_reclaimed - reclaimed); + sc->nr_reclaimed - reclaimed, sc->order); /* * Direct reclaim and kswapd have to scan all memory @@ -2978,7 +2978,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) /* Record the subtree's reclaim efficiency */ vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true, sc->nr_scanned - nr_scanned, - sc->nr_reclaimed - nr_reclaimed); + sc->nr_reclaimed - nr_reclaimed, sc->order); if (sc->nr_reclaimed - nr_reclaimed) reclaimable = true; From 169fee49bbfed711371713a236df472ec4ab4cc6 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Thu, 14 May 2020 15:16:48 -0700 Subject: [PATCH 051/452] simple_lmk: Consider all positive adjs when finding victims We are allowed to kill any process with a positive adj, so we shouldn't exclude any processes with adjs greater than 999. This would present a problem with quirky applications that set their own adj score, such as stress-ng. In the case of stress-ng, it would set its adj score to 1000 and thus exempt itself from being killed by Simple LMK. This shouldn't be allowed; any process with a positive adj, up to the highest positive adj possible (32767) should be killable. Reported-by: Danny Lin <danny@kdrag0n.dev> Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/simple_lmk.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index f502eb5da8d7..d89e5b1ce363 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -29,21 +29,21 @@ struct victim_info { }; /* Pulled from the Android framework. Lower adj means higher priority. */ -static const short adjs[] = { - 1000, /* CACHED_APP_MAX_ADJ + 1 */ - 950, /* CACHED_APP_LMK_FIRST_ADJ */ - 900, /* CACHED_APP_MIN_ADJ */ - 800, /* SERVICE_B_ADJ */ - 700, /* PREVIOUS_APP_ADJ */ - 600, /* HOME_APP_ADJ */ - 500, /* SERVICE_ADJ */ - 400, /* HEAVY_WEIGHT_APP_ADJ */ - 300, /* BACKUP_APP_ADJ */ - 250, /* PERCEPTIBLE_LOW_APP_ADJ */ - 200, /* PERCEPTIBLE_APP_ADJ */ - 100, /* VISIBLE_APP_ADJ */ - 50, /* PERCEPTIBLE_RECENT_FOREGROUND_APP_ADJ */ - 0 /* FOREGROUND_APP_ADJ */ +static const unsigned short adjs[] = { + SHRT_MAX + 1, /* Include all positive adjs in the final range */ + 950, /* CACHED_APP_LMK_FIRST_ADJ */ + 900, /* CACHED_APP_MIN_ADJ */ + 800, /* SERVICE_B_ADJ */ + 700, /* PREVIOUS_APP_ADJ */ + 600, /* HOME_APP_ADJ */ + 500, /* SERVICE_ADJ */ + 400, /* HEAVY_WEIGHT_APP_ADJ */ + 300, /* BACKUP_APP_ADJ */ + 250, /* PERCEPTIBLE_LOW_APP_ADJ */ + 200, /* PERCEPTIBLE_APP_ADJ */ + 100, /* VISIBLE_APP_ADJ */ + 50, /* PERCEPTIBLE_RECENT_FOREGROUND_APP_ADJ */ + 0 /* FOREGROUND_APP_ADJ */ }; static struct victim_info victims[MAX_VICTIMS]; @@ -85,8 +85,8 @@ static unsigned long get_total_mm_pages(struct mm_struct *mm) return pages; } -static unsigned long find_victims(int *vindex, short target_adj_min, - short target_adj_max) +static unsigned long find_victims(int *vindex, unsigned short target_adj_min, + unsigned short target_adj_max) { unsigned long pages_found = 0; int old_vindex = *vindex; From 3feb9373c92bb0dbd944d40d2396aba6b36dfa2f Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Wed, 20 May 2020 09:55:17 -0700 Subject: [PATCH 052/452] mm: Don't stop kswapd on a per-node basis when there are no waiters The page allocator wakes all kswapds in an allocation context's allowed nodemask in the slow path, so it doesn't make sense to have the kswapd- waiter count per each NUMA node. Instead, it should be a global counter to stop all kswapds when there are no failed allocation requests. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- include/linux/mmzone.h | 1 - mm/internal.h | 1 + mm/page_alloc.c | 8 ++++---- mm/vmscan.c | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 619ccfdf47d7..cecf0a58cf66 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -656,7 +656,6 @@ typedef struct pglist_data { unsigned long node_spanned_pages; /* total size of physical page range, including holes */ int node_id; - atomic_t kswapd_waiters; wait_queue_head_t kswapd_wait; wait_queue_head_t pfmemalloc_wait; struct task_struct *kswapd; /* Protected by diff --git a/mm/internal.h b/mm/internal.h index b0302094d4bb..9284225c09f0 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -167,6 +167,7 @@ extern void prep_compound_page(struct page *page, unsigned int order); extern void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags); extern int user_min_free_kbytes; +extern atomic_long_t kswapd_waiters; #if defined CONFIG_COMPACTION || defined CONFIG_CMA diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2dce6a289597..5fb2136b90b8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -75,6 +75,8 @@ #include <asm/div64.h> #include "internal.h" +atomic_long_t kswapd_waiters = ATOMIC_LONG_INIT(0); + /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ static DEFINE_MUTEX(pcp_batch_high_lock); #define MIN_PERCPU_PAGELIST_FRACTION (8) @@ -3913,7 +3915,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, int reserve_flags; unsigned long pages_reclaimed = 0; int retry_loop_count = 0; - pg_data_t *pgdat = ac->preferred_zoneref->zone->zone_pgdat; bool woke_kswapd = false; unsigned long jiffies_s = jiffies; u64 utime, stime_s, stime_e, stime_d; @@ -3954,7 +3955,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, if (gfp_mask & __GFP_KSWAPD_RECLAIM) { if (!woke_kswapd) { - atomic_inc(&pgdat->kswapd_waiters); + atomic_long_inc(&kswapd_waiters); woke_kswapd = true; } wake_all_kswapds(order, ac); @@ -4158,7 +4159,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, fail: got_pg: if (woke_kswapd) - atomic_dec(&pgdat->kswapd_waiters); + atomic_long_dec(&kswapd_waiters); if (!page) warn_alloc(gfp_mask, ac->nodemask, "page allocation failure: order:%u", order); @@ -6131,7 +6132,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat) pgdat_page_ext_init(pgdat); spin_lock_init(&pgdat->lru_lock); lruvec_init(node_lruvec(pgdat)); - pgdat->kswapd_waiters = (atomic_t)ATOMIC_INIT(0); pgdat->per_cpu_nodestats = &boot_nodestats; diff --git a/mm/vmscan.c b/mm/vmscan.c index 2a53e3240d04..3ccf3b12ab4f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3727,7 +3727,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) /* Check if kswapd should be suspending */ if (try_to_freeze() || kthread_should_stop() || - !atomic_read(&pgdat->kswapd_waiters)) + !atomic_long_read(&kswapd_waiters)) break; /* From 9a9e0060d6752fce75a736c670639524dc33c517 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Wed, 20 May 2020 19:52:05 -0700 Subject: [PATCH 053/452] simple_lmk: Hold an RCU read lock instead of the tasklist read lock We already check to see if each eligible process isn't already dying, so an RCU read lock can be used to speed things up instead of holding the tasklist read lock. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/simple_lmk.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index d89e5b1ce363..a08287e4fcfe 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -172,18 +172,14 @@ static void scan_and_kill(unsigned long pages_needed) int i, nr_to_kill = 0, nr_victims = 0, ret; unsigned long pages_found = 0; - /* - * Hold the tasklist lock so tasks don't disappear while scanning. This - * is preferred to holding an RCU read lock so that the list of tasks - * is guaranteed to be up to date. - */ - read_lock(&tasklist_lock); + /* Hold an RCU read lock while traversing the global process list */ + rcu_read_lock(); for (i = 1; i < ARRAY_SIZE(adjs); i++) { pages_found += find_victims(&nr_victims, adjs[i], adjs[i - 1]); if (pages_found >= pages_needed || nr_victims == MAX_VICTIMS) break; } - read_unlock(&tasklist_lock); + rcu_read_unlock(); /* Pretty unlikely but it can happen */ if (unlikely(!nr_victims)) { From 9a2144a08767b3fe5100b39947f3023fb8321005 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Wed, 10 Jun 2020 17:55:04 -0700 Subject: [PATCH 054/452] simple_lmk: Remove unnecessary clean-up when timeout is reached Zeroing out the mm struct pointers when the timeout is hit isn't needed because mm_free_lock prevents any readers from accessing the mm struct pointers while clean-up occurs, and since the simple_lmk_mm_freed() loop bound is set to zero during clean-up, there is no possibility of dying processes ever reading stale mm struct pointers. Therefore, it is unnecessary to clear out the mm struct pointers when the timeout is reached. Now the only step to do when the timeout is reached is to re-init the completion, but since reinit_completion() just sets a struct member to zero, call reinit_completion() unconditionally as it is faster than encapsulating it within a conditional statement. Also take this opportunity to rename some variables and tidy up some code indentation. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/simple_lmk.c | 40 +++++++++++++++--------------------- 1 file changed, 17 insertions(+), 23 deletions(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index a08287e4fcfe..b0bffb991aa3 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -50,7 +50,7 @@ static struct victim_info victims[MAX_VICTIMS]; static DECLARE_WAIT_QUEUE_HEAD(oom_waitq); static DECLARE_COMPLETION(reclaim_done); static DEFINE_RWLOCK(mm_free_lock); -static int victims_to_kill; +static int nr_victims; static atomic_t needs_reclaim = ATOMIC_INIT(0); static atomic_t nr_killed = ATOMIC_INIT(0); @@ -169,26 +169,26 @@ static int process_victims(int vlen, unsigned long pages_needed) static void scan_and_kill(unsigned long pages_needed) { - int i, nr_to_kill = 0, nr_victims = 0, ret; + int i, nr_to_kill = 0, nr_found = 0; unsigned long pages_found = 0; /* Hold an RCU read lock while traversing the global process list */ rcu_read_lock(); for (i = 1; i < ARRAY_SIZE(adjs); i++) { - pages_found += find_victims(&nr_victims, adjs[i], adjs[i - 1]); - if (pages_found >= pages_needed || nr_victims == MAX_VICTIMS) + pages_found += find_victims(&nr_found, adjs[i], adjs[i - 1]); + if (pages_found >= pages_needed || nr_found == MAX_VICTIMS) break; } rcu_read_unlock(); /* Pretty unlikely but it can happen */ - if (unlikely(!nr_victims)) { + if (unlikely(!nr_found)) { pr_err("No processes available to kill!\n"); return; } /* First round of victim processing to weed out unneeded victims */ - nr_to_kill = process_victims(nr_victims, pages_needed); + nr_to_kill = process_victims(nr_found, pages_needed); /* * Try to kill as few of the chosen victims as possible by sorting the @@ -202,7 +202,7 @@ static void scan_and_kill(unsigned long pages_needed) /* Store the final number of victims for simple_lmk_mm_freed() */ write_lock(&mm_free_lock); - victims_to_kill = nr_to_kill; + nr_victims = nr_to_kill; write_unlock(&mm_free_lock); /* Kill the victims */ @@ -235,15 +235,10 @@ static void scan_and_kill(unsigned long pages_needed) } /* Wait until all the victims die or until the timeout is reached */ - ret = wait_for_completion_timeout(&reclaim_done, RECLAIM_EXPIRES); + wait_for_completion_timeout(&reclaim_done, RECLAIM_EXPIRES); write_lock(&mm_free_lock); - if (!ret) { - /* Extra clean-up is needed when the timeout is hit */ - reinit_completion(&reclaim_done); - for (i = 0; i < nr_to_kill; i++) - victims[i].mm = NULL; - } - victims_to_kill = 0; + reinit_completion(&reclaim_done); + nr_victims = 0; nr_killed = (atomic_t)ATOMIC_INIT(0); write_unlock(&mm_free_lock); } @@ -270,14 +265,13 @@ void simple_lmk_mm_freed(struct mm_struct *mm) int i; read_lock(&mm_free_lock); - for (i = 0; i < victims_to_kill; i++) { - if (victims[i].mm != mm) - continue; - - victims[i].mm = NULL; - if (atomic_inc_return_relaxed(&nr_killed) == victims_to_kill) - complete(&reclaim_done); - break; + for (i = 0; i < nr_victims; i++) { + if (victims[i].mm == mm) { + victims[i].mm = NULL; + if (atomic_inc_return_relaxed(&nr_killed) == nr_victims) + complete(&reclaim_done); + break; + } } read_unlock(&mm_free_lock); } From 768d4ad43779ffa78d0c8431832c715cb513482f Mon Sep 17 00:00:00 2001 From: NeilBrown <neilb@suse.com> Date: Fri, 30 Nov 2018 10:33:18 +1100 Subject: [PATCH 055/452] VFS: use synchronize_rcu_expedited() in namespace_unlock() The synchronize_rcu() in namespace_unlock() is called every time a filesystem is unmounted. If a great many filesystems are mounted, this can cause a noticable slow-down in, for example, system shutdown. The sequence: mkdir -p /tmp/Mtest/{0..5000} time for i in /tmp/Mtest/*; do mount -t tmpfs tmpfs $i ; done time umount /tmp/Mtest/* on a 4-cpu VM can report 8 seconds to mount the tmpfs filesystems, and 100 seconds to unmount them. Boot the same VM with 1 CPU and it takes 18 seconds to mount the tmpfs filesystems, but only 36 to unmount. If we change the synchronize_rcu() to synchronize_rcu_expedited() the umount time on a 4-cpu VM drop to 0.6 seconds I think this 200-fold speed up is worth the slightly high system impact of using synchronize_rcu_expedited(). Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> (from general rcu perspective) Signed-off-by: NeilBrown <neilb@suse.com> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> --- fs/namespace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/namespace.c b/fs/namespace.c index eaa138384686..c06a0954a9d2 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1842,7 +1842,7 @@ static void namespace_unlock(void) if (likely(hlist_empty(&head))) return; - synchronize_rcu(); + synchronize_rcu_expedited(); group_pin_kill(&head); } From 7f4766b69893f4e3000a4d16a71100ee3dde6b87 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Thu, 10 Sep 2020 15:41:57 -0700 Subject: [PATCH 056/452] simple_lmk: Print a message when the timeout is reached This aids in selecting an adequate timeout. If the timeout is hit often and Simple LMK is killing too much, then the timeout should be lengthened. If the timeout is rarely hit and Simple LMK is not killing fast enough under pressure, then the timeout should be shortened. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/simple_lmk.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/android/simple_lmk.c b/drivers/android/simple_lmk.c index b0bffb991aa3..0ce3bb924220 100644 --- a/drivers/android/simple_lmk.c +++ b/drivers/android/simple_lmk.c @@ -235,7 +235,10 @@ static void scan_and_kill(unsigned long pages_needed) } /* Wait until all the victims die or until the timeout is reached */ - wait_for_completion_timeout(&reclaim_done, RECLAIM_EXPIRES); + if (!wait_for_completion_timeout(&reclaim_done, RECLAIM_EXPIRES)) + pr_info("Timeout hit waiting for victims to die, proceeding\n"); + + /* Clean up for future reclaim invocations */ write_lock(&mm_free_lock); reinit_completion(&reclaim_done); nr_victims = 0; From 82454907cd25359d79e15fda5a5a5599aa3745c9 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Thu, 10 Sep 2020 15:43:46 -0700 Subject: [PATCH 057/452] simple_lmk: Add !PSI dependency When PSI is enabled, lmkd in userspace will use PSI notifications to perform low memory kills. Therefore, to ensure that Simple LMK is the only active LMK implementation, add a !PSI dependency. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig index 6a87d1298c5e..6e3fce05d861 100644 --- a/drivers/android/Kconfig +++ b/drivers/android/Kconfig @@ -44,7 +44,7 @@ config ANDROID_BINDER_IPC_SELFTEST config ANDROID_SIMPLE_LMK bool "Simple Android Low Memory Killer" - depends on !ANDROID_LOW_MEMORY_KILLER && !MEMCG + depends on !ANDROID_LOW_MEMORY_KILLER && !MEMCG && !PSI ---help--- This is a complete low memory killer solution for Android that is small and simple. Processes are killed according to the priorities From ec8a6d0526cf6a24e2a7c58cd9328d5fb19f96ba Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf <sultan@kerneltoast.com> Date: Thu, 10 Sep 2020 15:51:17 -0700 Subject: [PATCH 058/452] simple_lmk: Update Kconfig description for VM pressure change Simple LMK uses VM pressure now, not a kswapd hook like before. Update the Kconfig description to reflect such. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> --- drivers/android/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig index 6e3fce05d861..c46fbd79e7fa 100644 --- a/drivers/android/Kconfig +++ b/drivers/android/Kconfig @@ -50,8 +50,8 @@ config ANDROID_SIMPLE_LMK small and simple. Processes are killed according to the priorities that Android gives them, so that the least important processes are always killed first. Processes are killed until memory deficits are - satisfied, as observed from kswapd struggling to free up pages. Simple - LMK stops killing processes when kswapd finally goes back to sleep. + satisfied, as observed from direct reclaim and kswapd reclaim + struggling to free up pages, via VM pressure notifications. if ANDROID_SIMPLE_LMK From b96c62bd6894ab9a85322d9fefd2d2dc2588d85f Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 12 Feb 2020 19:43:01 +0300 Subject: [PATCH 059/452] Makefile: ignore toolchain directory Signed-off-by: Denis Efremov <efremov@linux.com> --- Makefile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 6f8e18d17152..294d53d3bc88 100644 --- a/Makefile +++ b/Makefile @@ -460,10 +460,12 @@ export MODVERDIR := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/).tmp_ve # Files to ignore in find ... statements export RCS_FIND_IGNORE := \( -name SCCS -o -name BitKeeper -o -name .svn -o \ - -name CVS -o -name .pc -o -name .hg -o -name .git \) \ + -name CVS -o -name .pc -o -name .hg -o -name .git -o \ + -name toolchain \) \ -prune -o export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \ - --exclude CVS --exclude .pc --exclude .hg --exclude .git + --exclude CVS --exclude .pc --exclude .hg --exclude .git \ + --exclude toolchain # =========================================================================== # Rules shared between *config targets and build targets From cbf46f02e1131120f5466553444c2b82d22a0e70 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Thu, 10 Sep 2020 19:12:14 +0300 Subject: [PATCH 060/452] Makefile: revert back to original compiler variables Signed-off-by: Denis Efremov <efremov@linux.com> --- Makefile | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/Makefile b/Makefile index 294d53d3bc88..ba827944994e 100644 --- a/Makefile +++ b/Makefile @@ -310,10 +310,8 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \ # "make" in the configured kernel build directory always uses that. # Default value for CROSS_COMPILE is not to prefix executables # Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile -#ARCH ?= $(SUBARCH) -#CROSS_COMPILE ?= $(CONFIG_CROSS_COMPILE:"%"=%) -ARCH ?= arm64 -CROSS_COMPILE ?= $(srctree)/toolchain/gcc-cfp/gcc-cfp-jopp-only/aarch64-linux-android-4.9/bin/aarch64-linux-android- +ARCH ?= $(SUBARCH) +CROSS_COMPILE ?= $(CONFIG_CROSS_COMPILE:"%"=%) # Architecture as present in compile.h UTS_MACHINE := $(ARCH) @@ -375,14 +373,14 @@ HOST_LOADLIBES := $(HOST_LFS_LIBS) AS = $(CROSS_COMPILE)as LD = $(CROSS_COMPILE)ld LDGOLD = $(CROSS_COMPILE)ld.gold -#CC = $(CROSS_COMPILE)gcc -CC = $(srctree)/toolchain/clang/host/linux-x86/clang-4639204-cfp-jopp/bin/clang +CC = $(CROSS_COMPILE)gcc CPP = $(CC) -E AR = $(CROSS_COMPILE)ar NM = $(CROSS_COMPILE)nm STRIP = $(CROSS_COMPILE)strip OBJCOPY = $(CROSS_COMPILE)objcopy OBJDUMP = $(CROSS_COMPILE)objdump +READELF = $(CROSS_COMPILE)readelf AWK = awk GENKSYMS = scripts/genksyms/genksyms INSTALLKERNEL := installkernel @@ -391,11 +389,6 @@ PERL = perl PYTHON = python CHECK = sparse -ifeq ($(CONFIG_EXYNOS_FMP_FIPS),) -READELF = $(CROSS_COMPILE)readelf -export READELF -endif - CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ -Wbitwise -Wno-return-void $(CF) NOSTDINC_FLAGS = @@ -440,7 +433,7 @@ KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds GCC_PLUGINS_CFLAGS := export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC -export CPP AR NM STRIP OBJCOPY OBJDUMP HOSTLDFLAGS HOST_LOADLIBES +export CPP AR NM STRIP OBJCOPY OBJDUMP READELF HOSTLDFLAGS HOST_LOADLIBES export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS @@ -513,8 +506,7 @@ endif ifeq ($(cc-name),clang) ifneq ($(CROSS_COMPILE),) -#CLANG_TRIPLE ?= $(CROSS_COMPILE) -CLANG_TRIPLE ?= $(srctree)/toolchain/clang/host/linux-x86/clang-4639204-cfp-jopp/bin/aarch64-linux-gnu- +CLANG_TRIPLE ?= $(CROSS_COMPILE) CLANG_FLAGS := --target=$(notdir $(CLANG_TRIPLE:%-=%)) GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR) From 4c35769553c9cae0a331eb4ea1dcf83bb51af394 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 20 Oct 2020 01:01:46 +0300 Subject: [PATCH 061/452] firmware/Makefile: fix out-of-tree build Signed-off-by: Denis Efremov <efremov@linux.com> --- firmware/Makefile | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/firmware/Makefile b/firmware/Makefile index 5bcb1eb32452..84711ec93fe4 100644 --- a/firmware/Makefile +++ b/firmware/Makefile @@ -8,7 +8,7 @@ CONFIG_EXTRA_FIRMWARE_DIR="firmware" # Create $(fwabs) from $(CONFIG_EXTRA_FIRMWARE_DIR) -- if it doesn't have a # leading /, it's relative to $(srctree). fwdir := $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE_DIR)) -fwabs := $(addprefix $(srctree)/,$(filter-out /%,$(fwdir)))$(filter /%,$(fwdir)) +fwabs := $(filter-out /%,$(fwdir))$(filter /%,$(fwdir)) fw-external-y := $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE)) @@ -209,13 +209,13 @@ wordsize_deps := $(wildcard include/config/64bit.h include/config/32bit.h \ firmware/Makefile) $(patsubst %,$(obj)/%.gen.S, $(fw-shipped-y)): %: $(wordsize_deps) - $(call cmd,fwbin,$(fwabs)/$(patsubst $(obj)/%.gen.S,%,$@)) + $(call cmd,fwbin,$(patsubst %.gen.S,%,$@)) $(patsubst %,$(obj)/%.gen.S, $(fw-external-y)): %: $(wordsize_deps) - $(call cmd,fwbin,$(fwabs)/$(patsubst $(obj)/%.gen.S,%,$@)) + $(call cmd,fwbin,$(patsubst %.gen.S,%,$@)) # The .o files depend on the binaries directly; the .S files don't. -$(patsubst %,$(obj)/%.gen.o, $(fw-shipped-y)): $(obj)/%.gen.o: $(fwdir)/% -$(patsubst %,$(obj)/%.gen.o, $(fw-external-y)): $(obj)/%.gen.o: $(fwdir)/% +$(patsubst %,$(obj)/%.gen.o, $(fw-shipped-y)): %.gen.o: % +$(patsubst %,$(obj)/%.gen.o, $(fw-external-y)): %.gen.o: % # .ihex is used just as a simple way to hold binary files in a source tree # where binaries are frowned upon. They are directly converted with objcopy. From 1a68e803ac2d92f1da884f723cf7c6802fe39641 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 21 Jan 2020 18:48:58 +0300 Subject: [PATCH 062/452] wholetree: .gitignore update Signed-off-by: Denis Efremov <efremov@linux.com> --- .gitignore | 6 ++++++ firmware/five/.gitignore | 1 + lib/.gitignore | 1 + scripts/rkp_cfp/.gitignore | 1 + security/proca/.gitignore | 2 ++ security/samsung/defex_lsm/.gitignore | 3 +++ 6 files changed, 14 insertions(+) create mode 100644 firmware/five/.gitignore create mode 100644 scripts/rkp_cfp/.gitignore create mode 100644 security/proca/.gitignore create mode 100644 security/samsung/defex_lsm/.gitignore diff --git a/.gitignore b/.gitignore index be92dfa89957..880734d33f8e 100644 --- a/.gitignore +++ b/.gitignore @@ -125,3 +125,9 @@ all.config # fetched Android config fragments kernel/configs/android-*.cfg + +# samsung +*.dtbo +*.reverse.dts +__pycache__/ +*.pyc diff --git a/firmware/five/.gitignore b/firmware/five/.gitignore new file mode 100644 index 000000000000..0994479aa88a --- /dev/null +++ b/firmware/five/.gitignore @@ -0,0 +1 @@ +*.tlbin diff --git a/lib/.gitignore b/lib/.gitignore index 09aae85418ab..5d4f9603e477 100644 --- a/lib/.gitignore +++ b/lib/.gitignore @@ -4,3 +4,4 @@ gen_crc32table crc32table.h oid_registry_data.c +libdss.c diff --git a/scripts/rkp_cfp/.gitignore b/scripts/rkp_cfp/.gitignore new file mode 100644 index 000000000000..82520ca123af --- /dev/null +++ b/scripts/rkp_cfp/.gitignore @@ -0,0 +1 @@ +/tmp/ diff --git a/security/proca/.gitignore b/security/proca/.gitignore new file mode 100644 index 000000000000..d95557d705a3 --- /dev/null +++ b/security/proca/.gitignore @@ -0,0 +1,2 @@ +proca_certificate-asn1.h +proca_certificate-asn1.c diff --git a/security/samsung/defex_lsm/.gitignore b/security/samsung/defex_lsm/.gitignore new file mode 100644 index 000000000000..16ed5d7821b2 --- /dev/null +++ b/security/samsung/defex_lsm/.gitignore @@ -0,0 +1,3 @@ +*.der +defex_packed_rules.inc +pack_rules From 39368cb4688ef649b259f9d1053a2543ae024b56 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 24 Mar 2020 20:43:04 +0300 Subject: [PATCH 063/452] kernel: Add CC_WERROR config to turn warnings into errors Add configuration option CONFIG_CC_WERROR to prevent warnings from creeping in. Signed-off-by: Chris Fries <cfries@google.com> Signed-off-by: Nathan Chancellor <natechancellor@gmail.com> Signed-off-by: Denis Efremov <efremov@linux.com> --- Makefile | 6 +++++- lib/Kconfig.debug | 8 ++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index ba827944994e..cce28dd381d6 100644 --- a/Makefile +++ b/Makefile @@ -421,9 +421,9 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -fno-common -fshort-wchar \ -Werror-implicit-function-declaration \ -Wno-format-security \ - -Werror \ -Xassembler -march=armv8-a+lse \ -std=gnu89 + KBUILD_CPPFLAGS := -D__KERNEL__ KBUILD_AFLAGS_KERNEL := KBUILD_CFLAGS_KERNEL := @@ -647,6 +647,10 @@ else include/config/auto.conf: ; endif # $(dot-config) +ifdef CONFIG_CC_WERROR + KBUILD_CFLAGS += -Werror +endif + # For the kernel to actually contain only the needed exported symbols, # we have to build modules as well to determine what those symbols are. # (this can be evaluated only once include/config/auto.conf has been included) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 85b2e9c7aa23..ccd0e952f182 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1960,6 +1960,14 @@ config BUG_ON_DATA_CORRUPTION If unsure, say N. +config CC_WERROR + bool "Treat all compile warnings as errors" + default y + help + Select this option to set compiler warnings as errors, + to prevent easily-fixable problems from creeping into + the codebase. + source "samples/Kconfig" source "lib/Kconfig.kgdb" From cdf5b07c387f636a89b471b0818fe9ff1ec152e1 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 10 Jan 2021 18:19:33 +0300 Subject: [PATCH 064/452] bcmdhd_101_16: add __linux__ define Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile b/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile index 8eba343dde17..c311e7a849c8 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile @@ -24,7 +24,8 @@ ##################### DHDCFLAGS += -DBCMUTILS_ERR_CODES -DUSE_NEW_RSPEC_DEFS -DHDCFLAGS += -Wall -Werror -Wstrict-prototypes -Dlinux -DLINUX -DBCMDRIVER \ +DHDCFLAGS += -Dlinux -D__linux__ -DLINUX +DHDCFLAGS += -Wall -Werror -Wstrict-prototypes -DBCMDRIVER \ -DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DBCMFILEIMAGE \ -DDHDTHREAD -DDHD_BCMEVENTS -DSHOW_EVENTS -DWLP2P \ -DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT -DKEEP_ALIVE -DCSCAN \ From 93ee9a581292b5188cb351c31f5b90efc4a498ab Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 27 Mar 2020 01:40:39 +0300 Subject: [PATCH 065/452] rkp_cfp: fix python version Signed-off-by: Denis Efremov <efremov@linux.com> --- scripts/rkp_cfp/instrument.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/rkp_cfp/instrument.py b/scripts/rkp_cfp/instrument.py index 1895ba96b8bd..d19ea7e01031 100755 --- a/scripts/rkp_cfp/instrument.py +++ b/scripts/rkp_cfp/instrument.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python2 # Copyright (c) 2016 Samsung Electronics Co., Ltd. # Authors: James Gleeson <jagleeso@gmail.com> From 2a51a87bc40c2d254ef7efa31432d75f94a1301c Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 9 Sep 2020 18:32:37 +0300 Subject: [PATCH 066/452] lib/libdss-build.sh: fix python version Signed-off-by: Denis Efremov <efremov@linux.com> --- lib/libdss-build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/libdss-build.sh b/lib/libdss-build.sh index 30d3685c53f1..60c71ab9aa4e 100755 --- a/lib/libdss-build.sh +++ b/lib/libdss-build.sh @@ -3,7 +3,7 @@ if [[ ${CC} = *"clang" ]]; then CC_DIR=$(dirname "${CC}") export PATH=$PATH:${CC_DIR} rm -rf lib/libdss.c -python lib/make_libdss.py &> lib/libdss.c +python2 lib/make_libdss.py &> lib/libdss.c ${CC} \ --target=aarch64-linux-gnu \ -Ilib/libdss-include \ From 5f33f371a1acaf0055d7f994da9ae2ea9f3cce96 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 28 Sep 2020 01:10:39 +0300 Subject: [PATCH 067/452] security/samsung/five: fix include paths Signed-off-by: Denis Efremov <efremov@linux.com> --- security/samsung/five/Makefile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/security/samsung/five/Makefile b/security/samsung/five/Makefile index 8c548f526579..74e6d169463d 100644 --- a/security/samsung/five/Makefile +++ b/security/samsung/five/Makefile @@ -2,10 +2,10 @@ obj-$(CONFIG_FIVE) += five.o obj-$(CONFIG_FIVE_PA_FEATURE) += five_pa.o -EXTRA_CFLAGS += -I$(src) -asflags-y += -Isecurity/integrity/five -asflags-y += -Isecurity/samsung/five -ccflags-y += -I$(srctree) +asflags-y += -Wa,-I$(srctree)/security/integrity/five +asflags-y += -Wa,-I$(srctree)/$(src) +ccflags-y += -I$(srctree)/ +ccflags-y += -I$(srctree)/$(src) ccflags-y += -Wformat five-y := five_lv.o five_cert.o five_keyring.o five_init.o \ From 49f02d58d418875457442e42b9cd8e2e01ffc344 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 28 Sep 2020 01:11:20 +0300 Subject: [PATCH 068/452] drivers/misc/tzdev/tz_deploy_tzar: fix startup.tzar inclusion Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/misc/tzdev/Makefile | 6 +++--- drivers/misc/tzdev/tz_deploy_tzar.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/misc/tzdev/Makefile b/drivers/misc/tzdev/Makefile index d0f764b404f4..ac199989c8bc 100644 --- a/drivers/misc/tzdev/Makefile +++ b/drivers/misc/tzdev/Makefile @@ -1,6 +1,6 @@ -subdir-ccflags-y += -I$(srctree)/drivers/misc/tzdev/include -subdir-ccflags-y += -I$(srctree)/drivers/misc/tzdev -ccflags-$(CONFIG_TZDEV_DEPLOY_TZAR) += -D"_STR(s)=\#s" -D"KBUILD_SRC=_STR($(KBUILD_SRC))" +subdir-ccflags-y += -I$(srctree)/$(src)/include +subdir-ccflags-y += -I$(srctree)/$(src) +ccflags-$(CONFIG_TZDEV_DEPLOY_TZAR) += -Wa,-I$(srctree)/$(src) obj-$(CONFIG_TZDEV) += lib/ obj-$(CONFIG_TZDEV) += teec/ diff --git a/drivers/misc/tzdev/tz_deploy_tzar.c b/drivers/misc/tzdev/tz_deploy_tzar.c index fc16e191cddf..5a280e4ad0fc 100644 --- a/drivers/misc/tzdev/tz_deploy_tzar.c +++ b/drivers/misc/tzdev/tz_deploy_tzar.c @@ -33,7 +33,7 @@ __asm__ ( ".section .init.data,\"aw\"\n" "tzdev_tzar_begin:\n" - ".incbin \"" KBUILD_SRC "/drivers/misc/tzdev/startup.tzar\"\n" + ".incbin \"startup.tzar\"\n" "tzdev_tzar_end:\n" ".previous\n" ); From d5494eda274e24fd0b53754ed1d492c0ed5a97d4 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 21 Jan 2020 16:33:20 +0300 Subject: [PATCH 069/452] drivers: leds: fix double definition of LEDS_KTD2692 Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/leds/Kconfig | 6 ------ drivers/leds/Makefile | 1 - 2 files changed, 7 deletions(-) diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 2ff5ee5ef6b9..6c4a2adf6eac 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -731,12 +731,6 @@ config LEDS_S2MPB02 help This option enables support for the LEDs on the S2MPB02. -config LEDS_KTD2692 - bool "LED support for the KTD2692" - help - If you say yes here you will get support for - for the KTD2692 FLASH led chip. - config LEDS_S2MPB02_MULTI_TORCH_REAR2 bool "LED support for second multi flash" help diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index 5f19157ef445..76a3f8a3addb 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -78,7 +78,6 @@ obj-$(CONFIG_LEDS_MLXCPLD) += leds-mlxcpld.o obj-$(CONFIG_LEDS_NIC78BX) += leds-nic78bx.o obj-$(CONFIG_LEDS_MT6323) += leds-mt6323.o obj-$(CONFIG_LEDS_S2MPB02) += leds-s2mpb02.o -obj-$(CONFIG_LEDS_KTD2692) += leds-ktd2692.o # LED SPI Drivers obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o From 10f7b50bb79dcf2d3d6d2029613957efe53cf114 Mon Sep 17 00:00:00 2001 From: Paulo Alcantara <paulo@paulo.ac> Date: Sun, 24 Feb 2019 21:55:28 -0300 Subject: [PATCH 070/452] selinux: use kernel linux/socket.h for genheaders and mdp commit dfbd199a7cfe3e3cd8531e1353cdbd7175bfbc5e upstream. When compiling genheaders and mdp from a newer host kernel, the following error happens: In file included from scripts/selinux/genheaders/genheaders.c:18: ./security/selinux/include/classmap.h:238:2: error: #error New address family defined, please update secclass_map. #error New address family defined, please update secclass_map. ^~~~~ make[3]: *** [scripts/Makefile.host:107: scripts/selinux/genheaders/genheaders] Error 1 make[2]: *** [scripts/Makefile.build:599: scripts/selinux/genheaders] Error 2 make[1]: *** [scripts/Makefile.build:599: scripts/selinux] Error 2 make[1]: *** Waiting for unfinished jobs.... Instead of relying on the host definition, include linux/socket.h in classmap.h to have PF_MAX. Cc: stable@vger.kernel.org Signed-off-by: Paulo Alcantara <paulo@paulo.ac> Acked-by: Stephen Smalley <sds@tycho.nsa.gov> [PM: manually merge in mdp.c, subject line tweaks] Signed-off-by: Paul Moore <paul@paul-moore.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Denis Efremov <efremov@linux.com> --- scripts/selinux/genheaders/genheaders.c | 1 - scripts/selinux/mdp/mdp.c | 1 - security/selinux/include/classmap.h | 1 + 3 files changed, 1 insertion(+), 2 deletions(-) diff --git a/scripts/selinux/genheaders/genheaders.c b/scripts/selinux/genheaders/genheaders.c index fa48fabcb330..3cc4893d98cc 100644 --- a/scripts/selinux/genheaders/genheaders.c +++ b/scripts/selinux/genheaders/genheaders.c @@ -9,7 +9,6 @@ #include <string.h> #include <errno.h> #include <ctype.h> -#include <sys/socket.h> struct security_class_mapping { const char *name; diff --git a/scripts/selinux/mdp/mdp.c b/scripts/selinux/mdp/mdp.c index ffe8179f5d41..c29fa4a6228d 100644 --- a/scripts/selinux/mdp/mdp.c +++ b/scripts/selinux/mdp/mdp.c @@ -32,7 +32,6 @@ #include <stdlib.h> #include <unistd.h> #include <string.h> -#include <sys/socket.h> static void usage(char *name) { diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h index 05ecb689f8e4..21e523cc32ce 100644 --- a/security/selinux/include/classmap.h +++ b/security/selinux/include/classmap.h @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/capability.h> +#include <linux/socket.h> #define COMMON_FILE_SOCK_PERMS "ioctl", "read", "write", "create", \ "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append", "map" From 12c6658a115a9d4b97de496be3d7df19a962138d Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 21 Jan 2020 16:59:03 +0300 Subject: [PATCH 071/452] drivers/samsung/misc/Kconfig: remove leading whitespace Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/samsung/misc/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/samsung/misc/Kconfig b/drivers/samsung/misc/Kconfig index f0fa141f3d06..6bc5d951efb9 100644 --- a/drivers/samsung/misc/Kconfig +++ b/drivers/samsung/misc/Kconfig @@ -107,7 +107,7 @@ config SEC_NAD_C Samsung TN NAD Feature, Enable NAD C config SEC_NAD_BPS_CLASSIFIER - bool " Samsung TN NAD BPS Classifier Feature" + bool "Samsung TN NAD BPS Classifier Feature" depends on SEC_NAD default n help From f21f370c1b4656ce89e16593c0efd21b22b37008 Mon Sep 17 00:00:00 2001 From: Brian Norris <briannorris@chromium.org> Date: Wed, 14 Nov 2018 18:11:18 -0800 Subject: [PATCH 072/452] scripts/setlocalversion: Improve -dirty check with git-status --no-optional-locks [ Upstream commit ff64dd4857303dd5550faed9fd598ac90f0f2238 ] git-diff-index does not refresh the index for you, so using it for a "-dirty" check can give misleading results. Commit 6147b1cf19651 ("scripts/setlocalversion: git: Make -dirty check more robust") tried to fix this by switching to git-status, but it overlooked the fact that git-status also writes to the .git directory of the source tree, which is definitely not kosher for an out-of-tree (O=) build. That is getting reverted. Fortunately, git-status now supports avoiding writing to the index via the --no-optional-locks flag, as of git 2.14. It still calculates an up-to-date index, but it avoids writing it out to the .git directory. So, let's retry the solution from commit 6147b1cf19651 using this new flag first, and if it fails, we assume this is an older version of git and just use the old git-diff-index method. It's hairy to get the 'grep -vq' (inverted matching) correct by stashing the output of git-status (you have to be careful about the difference betwen "empty stdin" and "blank line on stdin"), so just pipe the output directly to grep and use a regex that's good enough for both the git-status and git-diff-index version. Cc: Christian Kujau <lists@nerdbynature.de> Cc: Guenter Roeck <linux@roeck-us.net> Suggested-by: Alexander Kapshuk <alexander.kapshuk@gmail.com> Signed-off-by: Brian Norris <briannorris@chromium.org> Tested-by: Genki Sky <sky@genki.is> Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> Signed-off-by: Sasha Levin <sashal@kernel.org> Signed-off-by: Denis Efremov <efremov@linux.com> --- scripts/setlocalversion | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/scripts/setlocalversion b/scripts/setlocalversion index 71f39410691b..365b3c2b8f43 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion @@ -73,8 +73,16 @@ scm_version() printf -- '-svn%s' "`git svn find-rev $head`" fi - # Check for uncommitted changes - if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then + # Check for uncommitted changes. + # First, with git-status, but --no-optional-locks is only + # supported in git >= 2.14, so fall back to git-diff-index if + # it fails. Note that git-diff-index does not refresh the + # index, so it may give misleading results. See + # git-update-index(1), git-diff-index(1), and git-status(1). + if { + git --no-optional-locks status -uno --porcelain 2>/dev/null || + git diff-index --name-only HEAD + } | grep -qvE '^(.. )?scripts/package'; then printf '%s' -dirty fi From 1a17d1e466eb191d0bb65dd8ddecb01cafec8b55 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 21 Jan 2020 19:35:22 +0300 Subject: [PATCH 073/452] vmlinux.lds.S: fix undefined reference to `idmap_pg_dir' Signed-off-by: Denis Efremov <efremov@linux.com> --- arch/arm64/kernel/vmlinux.lds.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index e0b071913f44..ca8ceaac4730 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -217,7 +217,7 @@ SECTIONS BSS_SECTION(0, 0, 0) . = ALIGN(PAGE_SIZE); -#ifndef CONFIG_UH +#ifndef CONFIG_UH_RKP idmap_pg_dir = .; . += IDMAP_DIR_SIZE; swapper_pg_dir = .; From b259a1f1ee7fafe4954b54ca13cd22470c28aaa4 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 21 Jan 2020 18:07:53 +0300 Subject: [PATCH 074/452] exynos/fimc-is2: fix section mismatch for fimc_is_resourcemgr_probe() Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.c | 2 +- drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.c b/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.c index a594b5dee75b..519d81fdd441 100644 --- a/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.c +++ b/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.c @@ -1260,7 +1260,7 @@ static struct notifier_block notify_reboot_block = { }; #endif -int fimc_is_resourcemgr_probe(struct fimc_is_resourcemgr *resourcemgr, +int __init fimc_is_resourcemgr_probe(struct fimc_is_resourcemgr *resourcemgr, void *private_data, struct platform_device *pdev) { int ret = 0; diff --git a/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.h b/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.h index 709561a0bfea..2284fccbb30a 100644 --- a/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.h +++ b/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.h @@ -158,7 +158,7 @@ struct fimc_is_resourcemgr { u32 streaming_cnt; }; -int fimc_is_resourcemgr_probe(struct fimc_is_resourcemgr *resourcemgr, void *private_data, struct platform_device *pdev); +int __init fimc_is_resourcemgr_probe(struct fimc_is_resourcemgr *resourcemgr, void *private_data, struct platform_device *pdev); int fimc_is_resource_open(struct fimc_is_resourcemgr *resourcemgr, u32 rsc_type, void **device); int fimc_is_resource_get(struct fimc_is_resourcemgr *resourcemgr, u32 rsc_type); int fimc_is_resource_put(struct fimc_is_resourcemgr *resourcemgr, u32 rsc_type); From 038b0f99ac210322c81bd178cd0586b578e97816 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 28 Feb 2020 22:05:29 +0300 Subject: [PATCH 075/452] modem_v1: fix section mismatch warning Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/misc/modem_v1/boot_device_spi.c | 2 +- drivers/misc/modem_v1_dual/boot_device_spi.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/misc/modem_v1/boot_device_spi.c b/drivers/misc/modem_v1/boot_device_spi.c index 64c872599e96..4de02d55c90c 100644 --- a/drivers/misc/modem_v1/boot_device_spi.c +++ b/drivers/misc/modem_v1/boot_device_spi.c @@ -231,7 +231,7 @@ static const struct file_operations modem_spi_boot_fops = { .unlocked_ioctl = spi_boot_ioctl, }; -static int __init modem_spi_boot_probe(struct spi_device *spi) +static int modem_spi_boot_probe(struct spi_device *spi) { int ret; struct device *dev = &spi->dev; diff --git a/drivers/misc/modem_v1_dual/boot_device_spi.c b/drivers/misc/modem_v1_dual/boot_device_spi.c index 48f82b761c0c..dac41c30a17e 100644 --- a/drivers/misc/modem_v1_dual/boot_device_spi.c +++ b/drivers/misc/modem_v1_dual/boot_device_spi.c @@ -232,7 +232,7 @@ static const struct file_operations modem_spi_boot_fops = { .unlocked_ioctl = spi_boot_ioctl, }; -static int __init modem_spi_boot_probe(struct spi_device *spi) +static int modem_spi_boot_probe(struct spi_device *spi) { int ret; struct device *dev = &spi->dev; From 06f1adc7444a546c21f38ec4ad3b5dc9e8ce443c Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Thu, 10 Sep 2020 17:44:16 +0300 Subject: [PATCH 076/452] drivers: fingerprint: unexport fpsensor_goto_suspend() Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/fingerprint/fingerprint.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/fingerprint/fingerprint.h b/drivers/fingerprint/fingerprint.h index 42cc3cf72f17..13c764c1e0c9 100644 --- a/drivers/fingerprint/fingerprint.h +++ b/drivers/fingerprint/fingerprint.h @@ -73,7 +73,6 @@ EXPORT_SYMBOL(fp_lockscreen_mode); #endif extern int fpsensor_goto_suspend; -EXPORT_SYMBOL(fpsensor_goto_suspend); #endif #endif From 3238f6fa07a5b8489b9c0a42b7efcdb0908dc409 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Thu, 10 Sep 2020 18:56:07 +0300 Subject: [PATCH 077/452] spu-verify: add stub for spu_firmware_signature_verify() Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/spu_verify/Makefile | 2 +- include/linux/spu-verify.h | 36 +++++++++++++++++++++++++++++++++++- 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/drivers/spu_verify/Makefile b/drivers/spu_verify/Makefile index ce595a158d33..09fd2a27cece 100644 --- a/drivers/spu_verify/Makefile +++ b/drivers/spu_verify/Makefile @@ -1 +1 @@ -obj-y += spu-sign-verify.o \ No newline at end of file +obj-$(CONFIG_SPU_VERIFY) += spu-sign-verify.o diff --git a/include/linux/spu-verify.h b/include/linux/spu-verify.h index 3fac19d8d558..cada9ad77987 100644 --- a/include/linux/spu-verify.h +++ b/include/linux/spu-verify.h @@ -26,6 +26,40 @@ /* TOTAL METADATA SIZE */ #define SPU_METADATA_SIZE(FW) ( (TAG_LEN(FW)) + (DIGEST_LEN) + (SIGN_LEN) ) +#ifdef CONFIG_SPU_VERIFY extern long spu_firmware_signature_verify(const char* fw_name, const u8* fw_data, const long fw_size); +#else +static inline long spu_firmware_signature_verify(const char* fw_name, const u8* fw_data, const long fw_size) { + const static struct { + const char *tag; + int len; + int metadata_size; + } tags[] = { + { TSP_TAG, TAG_LEN(TSP), SPU_METADATA_SIZE(TSP) }, + { MFC_TAG, TAG_LEN(MFC), SPU_METADATA_SIZE(MFC) }, + { WACOM_TAG, TAG_LEN(WACOM), SPU_METADATA_SIZE(WACOM) }, + { PDIC_TAG, TAG_LEN(PDIC), SPU_METADATA_SIZE(PDIC) }, + { SENSORHUB_TAG, TAG_LEN(SENSORHUB), SPU_METADATA_SIZE(SENSORHUB) }, + }; + int i; -#endif //end _SPU_VERIFY_H_ \ No newline at end of file + if (!fw_name || !fw_data || fw_size < 0) { + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(tags); ++i) { + if(!strncmp(fw_name, tags[i].tag, tags[i].len)) { + long offset = fw_size - tags[i].metadata_size; + if (!strncmp(fw_name, fw_data + offset, tags[i].len)) { + return offset; + } else { + return -EINVAL; + } + } + } + + return -EINVAL; +} +#endif + +#endif //end _SPU_VERIFY_H_ From a3b52bde11ffda5c6ca67d83a659d13248341969 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 21 Jan 2020 21:05:53 +0300 Subject: [PATCH 078/452] samsung: debug_test: fix linking error for simulate_WRITE_RO() Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/samsung/debug/sec_debug_test.c | 2 +- drivers/soc/samsung/debug/exynos-debug-test.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/samsung/debug/sec_debug_test.c b/drivers/samsung/debug/sec_debug_test.c index 3168d1f764a3..552449bcb020 100644 --- a/drivers/samsung/debug/sec_debug_test.c +++ b/drivers/samsung/debug/sec_debug_test.c @@ -768,7 +768,7 @@ static void simulate_WRITE_RO(char *arg) #ifdef CONFIG_RKP_CFP_JOPP ptr = (unsigned long *)__start_rodata; #else - ptr = (unsigned long *)simulate_WRITE_RO; + ptr = NULL; #endif *ptr ^= 0x12345678; } diff --git a/drivers/soc/samsung/debug/exynos-debug-test.c b/drivers/soc/samsung/debug/exynos-debug-test.c index 2dd874fd0479..f90bfc164193 100644 --- a/drivers/soc/samsung/debug/exynos-debug-test.c +++ b/drivers/soc/samsung/debug/exynos-debug-test.c @@ -787,7 +787,7 @@ static void simulate_WRITE_RO(char *arg) #ifdef CONFIG_RKP_CFP_JOPP ptr = (unsigned long *)__start_rodata; #else - ptr = (unsigned long *)simulate_WRITE_RO; + ptr = NULL; #endif *ptr ^= 0x12345678; } From cadc2c456aa6b9b8cc0dae277539f81777118a6b Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 21 Jan 2020 18:43:13 +0300 Subject: [PATCH 079/452] security: samsung: defex: DEFEX_KERNEL_ONLY=y by default Signed-off-by: Denis Efremov <efremov@linux.com> --- security/samsung/defex_lsm/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security/samsung/defex_lsm/Kconfig b/security/samsung/defex_lsm/Kconfig index 1ed27b1f0a23..f2638458f598 100644 --- a/security/samsung/defex_lsm/Kconfig +++ b/security/samsung/defex_lsm/Kconfig @@ -16,7 +16,7 @@ config SECURITY_DEFEX config DEFEX_KERNEL_ONLY bool "Defex Kernel Only" depends on SECURITY_DEFEX - default n + default y help This lets defex know whether kernel-only build or not. Default value will be set to "y" if the build is kernel-only. From 80e24f166f66bc6b2e1c705ac42fde0cd4f899b7 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 11 Mar 2020 17:18:51 +0300 Subject: [PATCH 080/452] security: selinux: make audit optional Signed-off-by: Denis Efremov <efremov@linux.com> --- security/selinux/Kconfig | 2 +- security/selinux/avc.c | 46 +++++++++++++++++++++++----------- security/selinux/hooks.c | 29 +++++++++++++-------- security/selinux/include/avc.h | 2 ++ security/selinux/ss/services.c | 19 ++++++++++++-- 5 files changed, 71 insertions(+), 27 deletions(-) diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig index 8af7a690eb40..8609ed8f5fc5 100644 --- a/security/selinux/Kconfig +++ b/security/selinux/Kconfig @@ -1,6 +1,6 @@ config SECURITY_SELINUX bool "NSA SELinux Support" - depends on SECURITY_NETWORK && AUDIT && NET && INET + depends on SECURITY_NETWORK && NET && INET select NETWORK_SECMARK default n help diff --git a/security/selinux/avc.c b/security/selinux/avc.c index 7134ac3af173..da4a3d7f56d5 100644 --- a/security/selinux/avc.c +++ b/security/selinux/avc.c @@ -107,6 +107,7 @@ static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass) return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1); } +#ifdef CONFIG_AUDIT /** * avc_dump_av - Display an access vector in human-readable form. * @tclass: target security class @@ -174,6 +175,7 @@ static void avc_dump_query(struct audit_buffer *ab, u32 ssid, u32 tsid, u16 tcla BUG_ON(!tclass || tclass >= ARRAY_SIZE(secclass_map)); audit_log_format(ab, " tclass=%s", secclass_map[tclass-1].name); } +#endif /** * avc_init - Initialize the AVC. @@ -438,6 +440,7 @@ static int avc_xperms_populate(struct avc_node *node, } +#ifdef CONFIG_AUDIT static inline u32 avc_xperms_audit_required(u32 requested, struct av_decision *avd, struct extended_perms_decision *xpd, @@ -483,6 +486,7 @@ static inline int avc_xperms_audit(u32 ssid, u32 tsid, u16 tclass, return slow_avc_audit(ssid, tsid, tclass, requested, audited, denied, result, ad, 0); } +#endif static void avc_node_free(struct rcu_head *rhead) { @@ -708,6 +712,7 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, return node; } +#ifdef CONFIG_AUDIT /** * avc_audit_pre_callback - SELinux specific information * will be called by generic audit code @@ -781,6 +786,7 @@ noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass, common_lsm_audit(a, avc_audit_pre_callback, avc_audit_post_callback); return 0; } +#endif /** * avc_add_callback - Register a callback for security events. @@ -1079,7 +1085,7 @@ int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested, struct extended_perms_data dontaudit; struct avc_xperms_node local_xp_node; struct avc_xperms_node *xp_node; - int rc = 0, rc2; + int rc = 0; xp_node = &local_xp_node; BUG_ON(!requested); @@ -1133,10 +1139,14 @@ int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested, rcu_read_unlock(); - rc2 = avc_xperms_audit(ssid, tsid, tclass, requested, - &avd, xpd, xperm, rc, ad); - if (rc2) - return rc2; +#ifdef CONFIG_AUDIT + { + int rc2 = avc_xperms_audit(ssid, tsid, tclass, requested, + &avd, xpd, xperm, rc, ad); + if (rc2) + return rc2; + } +#endif return rc; } @@ -1208,13 +1218,17 @@ int avc_has_perm(u32 ssid, u32 tsid, u16 tclass, u32 requested, struct common_audit_data *auditdata) { struct av_decision avd; - int rc, rc2; + int rc; rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd); - rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata, 0); - if (rc2) - return rc2; +#ifdef CONFIG_AUDIT + { + int rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata, 0); + if (rc2) + return rc2; + } +#endif return rc; } @@ -1223,14 +1237,18 @@ int avc_has_perm_flags(u32 ssid, u32 tsid, u16 tclass, int flags) { struct av_decision avd; - int rc, rc2; + int rc; rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd); - rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, - auditdata, flags); - if (rc2) - return rc2; +#ifdef CONFIG_AUDIT + { + int rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, + auditdata, flags); + if (rc2) + return rc2; + } +#endif return rc; } diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 40c7112e5875..634f718d6e39 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -1897,11 +1897,13 @@ static int cred_has_capability(const struct cred *cred, } rc = avc_has_perm_noaudit(sid, sid, sclass, av, 0, &avd); +#ifdef CONFIG_AUDIT if (audit == SECURITY_CAP_AUDIT) { int rc2 = avc_audit(sid, sid, sclass, av, &avd, rc, &ad, 0); if (rc2) return rc2; } +#endif return rc; } @@ -3213,6 +3215,7 @@ static int selinux_inode_follow_link(struct dentry *dentry, struct inode *inode, rcu ? MAY_NOT_BLOCK : 0); } +#ifdef CONFIG_AUDIT static noinline int audit_inode_permission(struct inode *inode, u32 perms, u32 audited, u32 denied, int result, @@ -3231,6 +3234,7 @@ static noinline int audit_inode_permission(struct inode *inode, return rc; return 0; } +#endif static int selinux_inode_permission(struct inode *inode, int mask) { @@ -3241,8 +3245,7 @@ static int selinux_inode_permission(struct inode *inode, int mask) struct inode_security_struct *isec; u32 sid; struct av_decision avd; - int rc, rc2; - u32 audited, denied; + int rc; from_access = mask & MAY_ACCESS; mask &= (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND); @@ -3282,15 +3285,21 @@ static int selinux_inode_permission(struct inode *inode, int mask) // ] SEC_SELINUX_PORTING_COMMON rc = avc_has_perm_noaudit(sid, isec->sid, isec->sclass, perms, 0, &avd); - audited = avc_audit_required(perms, &avd, rc, - from_access ? FILE__AUDIT_ACCESS : 0, - &denied); - if (likely(!audited)) - return rc; +#ifdef CONFIG_AUDIT + { + int rc2; + u32 audited, denied; + audited = avc_audit_required(perms, &avd, rc, + from_access ? FILE__AUDIT_ACCESS : 0, + &denied); + if (likely(!audited)) + return rc; - rc2 = audit_inode_permission(inode, perms, audited, denied, rc, flags); - if (rc2) - return rc2; + rc2 = audit_inode_permission(inode, perms, audited, denied, rc, flags); + if (rc2) + return rc2; + } +#endif return rc; } diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h index 57d61cf36500..f5bc76c36b53 100644 --- a/security/selinux/include/avc.h +++ b/security/selinux/include/avc.h @@ -66,6 +66,7 @@ struct selinux_audit_data { void __init avc_init(void); +#ifdef CONFIG_AUDIT static inline u32 avc_audit_required(u32 requested, struct av_decision *avd, int result, @@ -142,6 +143,7 @@ static inline int avc_audit(u32 ssid, u32 tsid, requested, audited, denied, result, a, flags); } +#endif #define AVC_STRICT 1 /* Ignore permissive mode. */ #define AVC_EXTENDED_PERMS 2 /* update extended permissions */ diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 21301d6248c9..8e923c76950c 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -467,6 +467,7 @@ static int constraint_expr_eval(struct context *scontext, return s[0]; } +#ifdef CONFIG_AUDIT /* * security_dump_masked_av - dumps masked permissions during * security_compute_av due to RBAC, MLS/Constraint and Type bounds. @@ -556,6 +557,7 @@ static void security_dump_masked_av(struct context *scontext, return; } +#endif /* * security_boundary_permission - drops violated permissions @@ -609,9 +611,11 @@ static void type_attribute_bounds_av(struct context *scontext, /* mask violated permissions */ avd->allowed &= ~masked; +#ifdef CONFIG_AUDIT /* audit masked permissions */ security_dump_masked_av(scontext, tcontext, tclass, masked, "bounds"); +#endif } /* @@ -753,6 +757,7 @@ static int security_validtrans_handle_fail(struct context *ocontext, struct context *tcontext, u16 tclass) { +#ifdef CONFIG_AUDIT char *o = NULL, *n = NULL, *t = NULL; u32 olen, nlen, tlen; @@ -770,6 +775,7 @@ static int security_validtrans_handle_fail(struct context *ocontext, kfree(o); kfree(n); kfree(t); +#endif // [ SEC_SELINUX_PORTING_COMMON #ifdef CONFIG_ALWAYS_ENFORCE @@ -930,6 +936,7 @@ int security_bounded_transition(u32 old_sid, u32 new_sid) index = type->bounds; } +#ifdef CONFIG_AUDIT if (rc) { char *old_name = NULL; char *new_name = NULL; @@ -949,6 +956,7 @@ int security_bounded_transition(u32 old_sid, u32 new_sid) kfree(new_name); kfree(old_name); } +#endif out: read_unlock(&policy_rwlock); @@ -1542,6 +1550,7 @@ static int compute_sid_handle_invalid_context( u16 tclass, struct context *newcontext) { +#ifdef CONFIG_AUDIT char *s = NULL, *t = NULL, *n = NULL; u32 slen, tlen, nlen; @@ -1561,6 +1570,7 @@ static int compute_sid_handle_invalid_context( kfree(s); kfree(t); kfree(n); +#endif // [ SEC_SELINUX_PORTING_COMMON #ifdef CONFIG_ALWAYS_ENFORCE @@ -2886,8 +2896,6 @@ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid) struct context *context1; struct context *context2; struct context newcon; - char *s; - u32 len; int rc; rc = 0; @@ -2927,6 +2935,10 @@ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid) if (!policydb_context_isvalid(&policydb, &newcon)) { rc = convert_context_handle_invalid_context(&newcon); if (rc) { +#ifdef CONFIG_AUDIT + char *s; + u32 len; + if (!context_struct_to_string(&newcon, &s, &len)) { audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR, @@ -2934,6 +2946,7 @@ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid) "invalid_context=%s", s); kfree(s); } +#endif goto out_unlock; } } @@ -3156,6 +3169,7 @@ int security_policycap_supported(unsigned int req_cap) return rc; } +#ifdef CONFIG_AUDIT struct selinux_audit_rule { u32 au_seqno; struct context au_ctxt; @@ -3422,6 +3436,7 @@ static int __init aurule_init(void) return err; } __initcall(aurule_init); +#endif #ifdef CONFIG_NETLABEL /** From d20f1837913eac0fdf99f7178d52a0f5dd4e519b Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 21 Jan 2020 19:03:23 +0300 Subject: [PATCH 081/452] security: selinux: Add CONFIG_ALWAYS_ENFORCE Signed-off-by: Denis Efremov <efremov@linux.com> --- security/selinux/Kconfig | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig index 8609ed8f5fc5..c529054fe423 100644 --- a/security/selinux/Kconfig +++ b/security/selinux/Kconfig @@ -8,6 +8,23 @@ config SECURITY_SELINUX You will also need a policy configuration and a labeled filesystem. If you are unsure how to answer this question, answer N. +choice + prompt "NSA SELinux mode" + default ALWAYS_ENFORCE + depends on SECURITY_SELINUX + +config SECURITY_SELINUX_SWITCH + bool "Dynamically switch between permissive and enforcing" + help + Allow to switch dynamically between permissive and enforcing modes. + +config ALWAYS_ENFORCE + bool "Always Enforce mode" + help + Pin enforcing mode. + +endchoice + config SECURITY_SELINUX_BOOTPARAM bool "NSA SELinux boot parameter" depends on SECURITY_SELINUX From e774bf917d91158535375e310a836005289add9f Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 25 Mar 2020 00:30:06 +0300 Subject: [PATCH 082/452] security: selinux: Add CONFIG_ALWAYS_PERMIT Signed-off-by: Denis Efremov <efremov@linux.com> --- security/selinux/Kconfig | 5 +++++ security/selinux/selinuxfs.c | 3 +++ 2 files changed, 8 insertions(+) diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig index c529054fe423..9b278a40e4ee 100644 --- a/security/selinux/Kconfig +++ b/security/selinux/Kconfig @@ -23,6 +23,11 @@ config ALWAYS_ENFORCE help Pin enforcing mode. +config ALWAYS_PERMIT + bool "Always Permit mode" + help + Pin permissive mode. + endchoice config SECURITY_SELINUX_BOOTPARAM diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 72abcdab314b..934da25e52aa 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -135,6 +135,9 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf, length = -EINVAL; if (sscanf(page, "%d", &new_value) != 1) goto out; +#ifdef CONFIG_ALWAYS_PERMIT + new_value = 0; +#endif // [ SEC_SELINUX_PORTING_COMMON #ifdef CONFIG_ALWAYS_ENFORCE From 075b019c5819851d34c8b7c642dc323798404bf6 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 22 Jan 2020 00:04:54 +0300 Subject: [PATCH 083/452] drivers: kperfmon: suppress missing files warnings Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/kperfmon/Makefile | 30 ++++++++---------------------- include/linux/ologk.h | 8 +++----- 2 files changed, 11 insertions(+), 27 deletions(-) diff --git a/drivers/kperfmon/Makefile b/drivers/kperfmon/Makefile index e5abca82ff2b..057bf4e92de4 100644 --- a/drivers/kperfmon/Makefile +++ b/drivers/kperfmon/Makefile @@ -5,40 +5,26 @@ # Rewritten to use lists instead of if-statements. # -FLAG=1 - -#$(info kperfmon_DUMMY="$(shell cp -f $(srctree)/include/linux/olog.pb.h $(srctree)/drivers/kperfmon/)") - -ifneq ($(CONFIG_KPERFMON), y) FLAG=0 -$(info kperfmon_DUMMY="CONFIG_KPERFMON is off.") -endif - -ifneq ($(shell [ -e $(srctree)/include/linux/olog.pb.h ] && echo exist), exist) -$(info kperfmon_DUMMY="olog.pb.h file is missing... retrying") -$(info kperfmon_DUMMY="$(shell cp -f $(srctree)/../../frameworks/base/proto/src/olog.proto $(srctree)/drivers/kperfmon/)") -$(info kperfmon_DUMMY="$(shell cp -f $(srctree)/../../vendor/samsung/system/libperflog/aprotoc $(srctree)/drivers/kperfmon/)") -$(info kperfmon_DUMMY="$(shell $(srctree)/drivers/kperfmon/aprotoc --perflog_out=./ $(srctree)/drivers/kperfmon/olog.proto)") -$(info kperfmon_DUMMY="$(shell cp -f $(srctree)/drivers/kperfmon/olog.pb.h $(srctree)/include/linux/)") -#$(info kperfmon_DUMMY="$(shell ls $(srctree)/drivers/kperfmon/*)") -#$(info kperfmon_DUMMY="$(shell ls $(srctree)/include/linux/olog*)") +ifeq ($(CONFIG_KPERFMON), y) +FLAG=1 +endif +ifeq ($(FLAG), 1) ifneq ($(shell [ -e $(srctree)/include/linux/olog.pb.h ] && echo exist), exist) -$(info kperfmon_DUMMY="olog.pb.h file is missing... again") FLAG=0 -endif +$(warning "kperfmon: olog.pb.h file is missing.") endif -ifneq ($(shell [ -e $(srctree)/../../system/core/liblog/include/log/perflog.h ] && echo exist), exist) +ifneq ($(shell [ -e $(srctree)/include/linux/perflog.h ] && echo exist), exist) FLAG=0 -$(info kperfmon_DUMMY="perflog.h file is missing.") +$(warning "kperfmon: perflog.h file is missing.") +endif endif -$(info kperfmon_DUMMY="$(shell cp -f $(srctree)/../../system/core/liblog/include/log/perflog.h $(srctree)/include/linux/)") ifeq ($(FLAG), 1) obj-y += kperfmon.o else obj-y += ologk.o -$(info kperfmon_DUMMY="$(shell cp -f $(srctree)/drivers/kperfmon/ologk.h $(srctree)/include/linux/)") endif diff --git a/include/linux/ologk.h b/include/linux/ologk.h index c3c1b8be0b1a..58456d98d3a1 100644 --- a/include/linux/ologk.h +++ b/include/linux/ologk.h @@ -2,13 +2,11 @@ #define _OLOG_KERNEL_H_ #include <linux/unistd.h> -#include "olog.pb.h" #define OLOG_CPU_FREQ_FILTER 1500000 -#define ologk(...) _perflog(PERFLOG_LOG, PERFLOG_UNKNOWN, __VA_ARGS__) -#define perflog(...) _perflog(PERFLOG_LOG, __VA_ARGS__) -extern void _perflog(int type, int logid, const char *fmt, ...); -extern void perflog_evt(int logid, int arg1); +#define ologk(...) +#define perflog(...) +#define perflog_evt(...) #endif From 1b2632b241eb3dfd9e71596a9456487a41529c17 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 22 Jan 2020 15:04:22 +0300 Subject: [PATCH 084/452] net/sch_generic.h: fix "unused" qcb variable warning Signed-off-by: Denis Efremov <efremov@linux.com> --- include/net/sch_generic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index f59acacaa265..cf44dc4d4b7e 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -277,7 +277,7 @@ struct tcf_block { static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) { - struct qdisc_skb_cb *qcb; + struct qdisc_skb_cb *qcb __maybe_unused; BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz); BUILD_BUG_ON(sizeof(qcb->data) < sz); From d6468526cb91ff927afa2dbd16d831130b672216 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 24 Mar 2020 23:45:11 +0300 Subject: [PATCH 085/452] block: blk-crypt: fix blk_crypt_initialize() defn Signed-off-by: Denis Efremov <efremov@linux.com> --- block/blk-crypt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/blk-crypt.c b/block/blk-crypt.c index dd783891555f..399135cfbe03 100644 --- a/block/blk-crypt.c +++ b/block/blk-crypt.c @@ -210,7 +210,7 @@ void blk_crypt_put_context(blk_crypt_t *bc_ctx) } /* H/W algorithm APIs */ -static int blk_crypt_initialize() +static int blk_crypt_initialize(void) { if (likely(blk_crypt_cachep)) return 0; From a577425324b114586aece893cdecc1ba58f835a2 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 24 Mar 2020 23:46:40 +0300 Subject: [PATCH 086/452] samsung: debug: fix sec_platform_watchdog_start_timer() defn Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/samsung/debug/sec_debug_platform_watchdog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/samsung/debug/sec_debug_platform_watchdog.c b/drivers/samsung/debug/sec_debug_platform_watchdog.c index 6eaaaae91929..e0de88d72b8b 100644 --- a/drivers/samsung/debug/sec_debug_platform_watchdog.c +++ b/drivers/samsung/debug/sec_debug_platform_watchdog.c @@ -169,7 +169,7 @@ static void sec_platform_watchdog_timer_fn(unsigned long data) mod_timer(&sec_platform_watchdog_timer, jiffies + sample_period * HZ); } -static void sec_platform_watchdog_start_timer() +static void sec_platform_watchdog_start_timer(void) { del_timer_sync(&sec_platform_watchdog_timer); mod_timer(&sec_platform_watchdog_timer, jiffies + sample_period * HZ); From 54ec5b3cfb5ed2665f8b55ddd82e7ae16b56ce4e Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 24 Mar 2020 23:52:14 +0300 Subject: [PATCH 087/452] drivers: exynos: fimc-is2: fix loop initial declarations Signed-off-by: Denis Efremov <efremov@linux.com> --- .../exynos/fimc-is2/vendor/mcd/fimc-is-sec-define.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sec-define.c b/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sec-define.c index 24c102d16680..5923832296cd 100644 --- a/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sec-define.c +++ b/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sec-define.c @@ -2952,20 +2952,21 @@ int fimc_is_sec_sensor_find_rear_tof_uid(struct fimc_is_core *core, char *buf) #ifdef CAMERA_REAR_TOF struct fimc_is_vender_specific *specific = core->vender.private_data; struct fimc_is_rom_info *finfo = NULL; + int i; fimc_is_sec_get_sysfs_finfo(&finfo, REAR_TOF_ROM_ID); if (finfo->cal_map_ver[3] >= REAR_TOF_CHECK_MAP_VERSION) { char uid_list[256] = {0, }; char uid_temp[10] = {0, }; - for (int i = 0; i < finfo->rom_tof_cal_uid_addr_len; i++) { + for (i = 0; i < finfo->rom_tof_cal_uid_addr_len; i++) { specific->rear_tof_uid[i] = *((int32_t*)&buf[finfo->rom_tof_cal_uid_addr[i]]); sprintf(uid_temp, "0x%x ", specific->rear_tof_uid[i]); strcat(uid_list, uid_temp); } info("rear_tof_uid: %s\n", uid_list); } else { - for (int i = 0; i < finfo->rom_tof_cal_uid_addr_len; i++) { + for (i = 0; i < finfo->rom_tof_cal_uid_addr_len; i++) { specific->rear_tof_uid[i] = REAR_TOF_DEFAULT_UID; } info("rear_tof_uid: 0x%x, use default 0x%x", *((int32_t*)&buf[finfo->rom_tof_cal_uid_addr[0]]), @@ -2980,20 +2981,21 @@ int fimc_is_sec_sensor_find_front_tof_uid(struct fimc_is_core *core, char *buf) #ifdef CAMERA_FRONT_TOF struct fimc_is_vender_specific *specific = core->vender.private_data; struct fimc_is_rom_info *finfo = NULL; + int i; fimc_is_sec_get_sysfs_finfo(&finfo, FRONT_TOF_ROM_ID); if (finfo->cal_map_ver[3] >= FRONT_TOF_CHECK_MAP_VERSION) { char uid_list[256] = {0, }; char uid_temp[10] = {0, }; - for (int i = 0; i < finfo->rom_tof_cal_uid_addr_len; i++) { + for (i = 0; i < finfo->rom_tof_cal_uid_addr_len; i++) { specific->front_tof_uid[i] = *((int32_t*)&buf[finfo->rom_tof_cal_uid_addr[i]]); sprintf(uid_temp, "0x%x ", specific->front_tof_uid[i]); strcat(uid_list, uid_temp); } info("front_tof_uid: %s\n", uid_list); } else { - for (int i = 0; i < finfo->rom_tof_cal_uid_addr_len; i++) { + for (i = 0; i < finfo->rom_tof_cal_uid_addr_len; i++) { specific->front_tof_uid[i] = FRONT_TOF_DEFAULT_UID; } info("front_tof_uid: 0x%x, use default 0x%x", *((int32_t*)&buf[finfo->rom_tof_cal_uid_addr[0]]), From 254c9d0f338f3b8535a7ca0689479a1397d2b3a8 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 22 Jan 2020 15:58:52 +0300 Subject: [PATCH 088/452] npu: generated: fix update_ver_info.sh script Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/vision/npu/generated/update_ver_info.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/vision/npu/generated/update_ver_info.sh b/drivers/vision/npu/generated/update_ver_info.sh index e93245cd45d3..11548bc1f5a1 100755 --- a/drivers/vision/npu/generated/update_ver_info.sh +++ b/drivers/vision/npu/generated/update_ver_info.sh @@ -9,17 +9,17 @@ if [ -z "$NPU_GIT_LOCAL_CHANGE" ] then NPU_GIT_LOCAL_CHANGE="No local change" fi -STASH_DEPTH=`git stash list | wc -l` -USER_INFO=whoami|sed 's/\\/\-/g' +STASH_DEPTH="$(git stash list | wc -l)" +USER_INFO="$(whoami | sed 's/\\/\-/g')" # Error checking -if [ ( -z $NPU_GIT_LOG ) -o ( -z $NPU_GIT_HASH ) -o ( -z $USER_INFO ) -o ( -z $NPU_GIT_LOCAL_CHANGE ) -o ( -z $STASH_DEPTH ) ] +if [ \( -z "$NPU_GIT_LOG" \) -o \( -z "$NPU_GIT_HASH" \) -o \( -z "$USER_INFO" \) -o \( -z "$NPU_GIT_LOCAL_CHANGE" \) -o \( -z "$STASH_DEPTH" \) ] then echo "An error occured during build info gathering." >&2 exit 16 fi -BUILD_INFO="$(USER_INFO)@$(hostname) / Build on $(date --rfc-3339='seconds')" +BUILD_INFO="$USER_INFO@$(hostname) / Build on $(date --rfc-3339='seconds')" cat > $TARGET_FILE << ENDL const char *npu_git_log_str = From 1443cc6d18e2e53325bb440ed496263d1ae4e7ea Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 2 Oct 2020 21:51:26 +0300 Subject: [PATCH 089/452] arch/arm64/Makefile: don't gzip kernel Image file Signed-off-by: Denis Efremov <efremov@linux.com> --- arch/arm64/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 145408d9df99..a221b9f8f98c 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -161,7 +161,7 @@ endif KBUILD_DTBS := dtbs -all: Image.gz $(KBUILD_DTBS) $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_KERNEL_IMAGE_NAME)) +all: Image $(KBUILD_DTBS) $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_KERNEL_IMAGE_NAME)) Image: vmlinux From 4c32eca58b8593e81f4018c9846c3eec793df25b Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 26 Oct 2020 15:17:36 +0300 Subject: [PATCH 090/452] drivers/of/Kconfig: unbind OF_FLATTREE from CONFIG_DTC Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/of/Kconfig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index ba7b034b2b91..a6539f152951 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig @@ -1,5 +1,5 @@ config DTC - bool + bool "Build DTC compiler" menuconfig OF bool "Device Tree and Open Firmware support" @@ -35,7 +35,6 @@ config OF_ALL_DTBS config OF_FLATTREE bool - select DTC select LIBFDT select CRC32 From 36506453bd4c59c2eecc9cf3e52e4d3ec7588c56 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 24 Jan 2020 18:29:50 +0300 Subject: [PATCH 091/452] include/linux/nmi: fix sl_softirq_entry() definition Signed-off-by: Denis Efremov <efremov@linux.com> --- include/linux/nmi.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 892cf046ca33..69d4e1acfde9 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -123,7 +123,7 @@ extern void sl_softirq_entry(const char *, void *); extern void sl_softirq_exit(void); unsigned long long get_dss_softlockup_thresh(void); #else -static inline void void sl_softirq_entry(const char *, void *) { } +static inline void sl_softirq_entry(const char *softirq_type, void *fn) { } static inline void sl_softirq_exit(void) { } #endif From 0653016b3afafeabff2d3515edfcc4dd791a8ef1 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 24 Jan 2020 18:42:22 +0300 Subject: [PATCH 092/452] wholetree: fix sec_debug missing declarations Signed-off-by: Denis Efremov <efremov@linux.com> --- include/linux/debug-snapshot.h | 7 ++++++- include/linux/sec_debug.h | 3 +++ kernel/power/process.c | 2 +- kernel/printk/printk.c | 4 ++-- 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/include/linux/debug-snapshot.h b/include/linux/debug-snapshot.h index 227371b68d20..d09c89a20dc0 100644 --- a/include/linux/debug-snapshot.h +++ b/include/linux/debug-snapshot.h @@ -206,6 +206,7 @@ extern void dbg_snapshot_get_softlockup_info(unsigned int cpu, void *info); #define dbg_snapshot_printk(...) do { } while(0) #define dbg_snapshot_printkl(a,b) do { } while(0) #define dbg_snapshot_save_context(a) do { } while(0) +#define dbg_snapshot_print_notifier_call(a,b,c) do { } while(0) #define dbg_snapshot_try_enable(a,b) do { } while(0) #define dbg_snapshot_set_enable(a,b) do { } while(0) #define dbg_snapshot_get_enable(a) do { } while(0) @@ -255,12 +256,16 @@ static inline bool dbg_snapshot_dumper_one(void *v_dumper, { return false; } -static int dbg_snapshot_add_bl_item_info(const char *name, +static inline int dbg_snapshot_add_bl_item_info(const char *name, unsigned int paddr, unsigned int size) { return 0; } +static inline void dbg_snapshot_save_log(int cpu, unsigned long where) +{ +} + #endif /* CONFIG_DEBUG_SNAPSHOT */ extern void dbg_snapshot_soc_helper_init(void); diff --git a/include/linux/sec_debug.h b/include/linux/sec_debug.h index 8b7a8d00b7b5..401bbe2a78d9 100644 --- a/include/linux/sec_debug.h +++ b/include/linux/sec_debug.h @@ -521,6 +521,7 @@ extern void sec_debug_set_extra_info_epd(char *str); #define sec_debug_set_extra_info_panic(a) do { } while (0) #define sec_debug_set_extra_info_backtrace(a) do { } while (0) #define sec_debug_set_extra_info_backtrace_cpu(a, b) do { } while (0) +#define sec_debug_set_extra_info_backtrace_task(a) do { } while (0) #define sec_debug_set_extra_info_evt_version() do { } while (0) #define sec_debug_set_extra_info_sysmmu(a) do { } while (0) #define sec_debug_set_extra_info_busmon(a) do { } while (0) @@ -660,7 +661,9 @@ struct sec_debug_next { struct sec_debug_spinlock_info rlock; struct sec_debug_kernel_data kernd; +#ifdef CONFIG_SEC_DEBUG_AUTO_COMMENT struct sec_debug_auto_comment auto_comment; +#endif struct sec_debug_shared_buffer extra_info; }; diff --git a/kernel/power/process.c b/kernel/power/process.c index 98001a7303f7..fd1e7a5c5858 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -44,7 +44,7 @@ static int try_to_freeze_tasks(bool user_only) #ifdef CONFIG_PM_SLEEP char suspend_abort[MAX_SUSPEND_ABORT_LEN]; #endif - char *sys_state[SYSTEM_END] = { + char *sys_state[SYSTEM_END] __maybe_unused = { "BOOTING", "SCHEDULING", "RUNNING", diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index a5bf81671436..13dc0c6581df 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -658,7 +658,7 @@ void register_hook_logbuf(void (*func)(const char *buf, size_t size)) EXPORT_SYMBOL(register_hook_logbuf); #endif -#if CONFIG_SEC_DEBUG_FIRST_KMSG +#ifdef CONFIG_SEC_DEBUG_FIRST_KMSG static void (*func_hook_first_kmsg)(const char *buf, size_t size); void register_first_kmsg_hook_func(void (*func)(const char *buf, size_t size)) { @@ -801,7 +801,7 @@ static int log_store(int facility, int level, } #endif -#if CONFIG_SEC_DEBUG_FIRST_KMSG +#ifdef CONFIG_SEC_DEBUG_FIRST_KMSG if (func_hook_first_kmsg) func_hook_first_kmsg(hook_text, hook_size); #endif From df3dbf6abd3abf6d687b22f67d65d1edec09fbd1 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 20 Mar 2020 02:09:12 +0300 Subject: [PATCH 093/452] sec_debug: fix build errors for SEC_DEBUG_TSP_LOG Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/input/sec_cmd.c | 4 ++++ include/linux/sec_debug.h | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/input/sec_cmd.c b/drivers/input/sec_cmd.c index 20ff8df8ccde..21e8caf35776 100644 --- a/drivers/input/sec_cmd.c +++ b/drivers/input/sec_cmd.c @@ -310,6 +310,7 @@ static void sec_cmd_store_function(struct sec_cmd_data *data) sec_cmd_ptr->cmd_func(data); +#ifdef SEC_DEBUG_TSP_LOG if (cmd_found && sec_cmd_ptr->cmd_log) { char tbuf[32]; unsigned long long t; @@ -324,6 +325,7 @@ static void sec_cmd_store_function(struct sec_cmd_data *data) sec_debug_tsp_command_history(tbuf); } +#endif } static ssize_t sec_cmd_store(struct device *dev, struct device_attribute *devattr, @@ -355,6 +357,7 @@ static ssize_t sec_cmd_store(struct device *dev, struct device_attribute *devatt list_for_each_entry(sec_cmd_ptr, &data->cmd_list_head, list) { if (!strncmp(cmd.cmd, sec_cmd_ptr->cmd_name, strlen(sec_cmd_ptr->cmd_name))) { +#ifdef SEC_DEBUG_TSP_LOG if (sec_cmd_ptr->cmd_log) { char task_info[40]; char tbuf[32]; @@ -374,6 +377,7 @@ static ssize_t sec_cmd_store(struct device *dev, struct device_attribute *devatt sec_debug_tsp_command_history(tbuf); } +#endif break; } } diff --git a/include/linux/sec_debug.h b/include/linux/sec_debug.h index 401bbe2a78d9..c543dbab0b7a 100644 --- a/include/linux/sec_debug.h +++ b/include/linux/sec_debug.h @@ -690,7 +690,7 @@ extern void sec_debug_tsp_command_history(char *buf); #define sec_debug_tsp_raw_data(a, ...) do { } while (0) #define sec_debug_tsp_raw_data_msg(a, b, ...) do { } while (0) #define sec_tsp_raw_data_clear() do { } while (0) -#define sec_debug_tsp_command_history() do { } while (0) +#define sec_debug_tsp_command_history(buf) do { } while (0) #endif /* CONFIG_SEC_DEBUG_TSP_LOG */ #ifdef CONFIG_TOUCHSCREEN_DUMP_MODE From 557d7e41fb5c886b32ef8a7a75d4729c8297ccb6 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 26 Oct 2020 10:34:07 +0300 Subject: [PATCH 094/452] sec_debug: fix build error for CONFIG_KALLSYMS Signed-off-by: Denis Efremov <efremov@linux.com> --- include/linux/sec_debug.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/include/linux/sec_debug.h b/include/linux/sec_debug.h index c543dbab0b7a..a8a25d01ca01 100644 --- a/include/linux/sec_debug.h +++ b/include/linux/sec_debug.h @@ -345,7 +345,17 @@ extern void sec_debug_task_sched_log(int cpu, struct task_struct *task); extern void sec_debug_irq_sched_log(unsigned int irq, void *fn, int en); extern void sec_debug_irq_enterexit_log(unsigned int irq, unsigned long long start_time); +#ifdef CONFIG_KALLSYMS extern void sec_debug_set_kallsyms_info(struct sec_debug_ksyms *ksyms, int magic); +#else +static inline void sec_debug_set_kallsyms_info(struct sec_debug_ksyms *ksyms, int magic) +{ + if (ksyms) { + memset(ksyms, 0, sizeof(*ksyms)); + ksyms->magic = magic; + } +} +#endif extern int sec_debug_check_sj(void); extern unsigned int sec_debug_get_kevent_paddr(int type); From bbf76cc93e799bd394043d2004d997840ed0f965 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 20 Mar 2020 02:11:24 +0300 Subject: [PATCH 095/452] lib/debug-snapshot-utils.c: fix build errors Signed-off-by: Denis Efremov <efremov@linux.com> --- lib/debug-snapshot-utils.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/debug-snapshot-utils.c b/lib/debug-snapshot-utils.c index da8b1249ec7f..d3e380aa57bc 100644 --- a/lib/debug-snapshot-utils.c +++ b/lib/debug-snapshot-utils.c @@ -33,6 +33,8 @@ #include <asm/cacheflush.h> #include <linux/irqflags.h> +#include <linux/sec_debug.h> + #include "debug-snapshot-local.h" DEFINE_PER_CPU(struct pt_regs *, dss_core_reg); From 392ba1017daeab2fcd906fc592d8db6c5e84ba8e Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 25 Jan 2020 00:04:57 +0300 Subject: [PATCH 096/452] phy-exynos-usbdrd: add argument names to definitions Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/phy/samsung/phy-exynos-debug.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/phy/samsung/phy-exynos-debug.h b/drivers/phy/samsung/phy-exynos-debug.h index a04b52ebe4ad..9e587bbbdf05 100644 --- a/drivers/phy/samsung/phy-exynos-debug.h +++ b/drivers/phy/samsung/phy-exynos-debug.h @@ -17,9 +17,9 @@ extern int exynos_usbdrd_debugfs_init(struct exynos_usbdrd_phy *phy_drd); extern int exynos_usbdrd_dp_debugfs_init(struct exynos_usbdrd_phy *phy_drd); extern void exynos_usbdrd_debugfs_exit(struct exynos_usbdrd_phy *phy_drd); #else -static inline int exynos_usbdrd_debugfs_init(struct exynos_usbdrd_phy *) +static inline int exynos_usbdrd_debugfs_init(struct exynos_usbdrd_phy *phy_drd) { return 0; } -static inline void exynos_usbdrd_debugfs_exit(struct exynos_usbdrd_phy *) +static inline void exynos_usbdrd_debugfs_exit(struct exynos_usbdrd_phy *phy_drd) { } #endif From 3ed500247e2c231ae09cd94062ec6451f552a1f5 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 25 Jan 2020 00:05:51 +0300 Subject: [PATCH 097/452] s3c2410_wdt: move variable declaration Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/watchdog/s3c2410_wdt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index 0c885c52d05b..b561edafda56 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c @@ -429,7 +429,6 @@ static int s3c2410wdt_keepalive(struct watchdog_device *wdd) { struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd); unsigned long flags, wtcnt = 0; - time64_t sec; s3c2410wdt_multistage_wdt_keepalive(); @@ -442,6 +441,8 @@ static int s3c2410wdt_keepalive(struct watchdog_device *wdd) #ifdef SEC_WATCHDOGD_FOOTPRINT if (wdt->cluster == 0) { + time64_t sec; + wdd_info->last_ping_cpu = raw_smp_processor_id(); wdd_info->last_ping_time = sched_clock(); From b4d71fc2f09967495d89ca88844c7d9b3814e6b8 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 25 Jan 2020 00:06:51 +0300 Subject: [PATCH 098/452] f2fs: fix CONFIG_F2FS_STAT_FS compilation Signed-off-by: Denis Efremov <efremov@linux.com> --- fs/f2fs/sysfs.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index 6755dba3d2ee..53d791de0f62 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -183,7 +183,7 @@ static ssize_t current_reserved_blocks_show(struct f2fs_attr *a, return snprintf(buf, PAGE_SIZE, "%u\n", sbi->current_reserved_blocks); } -#ifdef CONFIG_F2FS_SEC_BLOCK_OPERATIONS_DEBUG +#if defined(CONFIG_F2FS_SEC_BLOCK_OPERATIONS_DEBUG) && defined(CONFIG_F2FS_STAT_FS) static int f2fs_sec_blockops_dbg(struct f2fs_sb_info *sbi, char *buf, int src_len) { int len = src_len; int i, j; @@ -227,6 +227,7 @@ static int f2fs_sec_blockops_dbg(struct f2fs_sb_info *sbi, char *buf, int src_le } #endif +#ifdef CONFIG_F2FS_STAT_FS /* Copy from debug.c stat_show */ static ssize_t f2fs_sec_stats_show(struct f2fs_sb_info *sbi, char *buf) { @@ -410,6 +411,7 @@ static ssize_t f2fs_sec_stats_show(struct f2fs_sb_info *sbi, char *buf) #endif return len; } +#endif static void __sec_bigdata_init_value(struct f2fs_sb_info *sbi, const char *attr_name) @@ -614,8 +616,10 @@ static ssize_t f2fs_sbi_show(struct f2fs_attr *a, } return len; +#ifdef CONFIG_F2FS_STAT_FS } else if (!strcmp(a->attr.name, "sec_stats")) { return f2fs_sec_stats_show(sbi, buf); +#endif } ui = (unsigned int *)(ptr + a->offset); @@ -908,7 +912,9 @@ F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type); #endif F2FS_RW_ATTR_640(F2FS_SBI, f2fs_sb_info, sec_gc_stat, sec_stat); F2FS_RW_ATTR_640(F2FS_SBI, f2fs_sb_info, sec_io_stat, sec_stat); +#ifdef CONFIG_F2FS_STAT_FS F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, sec_stats, stat_info); +#endif F2FS_RW_ATTR_640(F2FS_SBI, f2fs_sb_info, sec_fsck_stat, sec_fsck_stat); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, sec_part_best_extents, s_sec_part_best_extents); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, sec_part_current_extents, s_sec_part_current_extents); @@ -975,7 +981,9 @@ static struct attribute *f2fs_attrs[] = { ATTR_LIST(extension_list), ATTR_LIST(sec_gc_stat), ATTR_LIST(sec_io_stat), +#ifdef CONFIG_F2FS_STAT_FS ATTR_LIST(sec_stats), +#endif ATTR_LIST(sec_fsck_stat), ATTR_LIST(sec_part_best_extents), ATTR_LIST(sec_part_current_extents), From 596168967cfce68272e276c3b828467aa330f0db Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 23 Oct 2020 23:46:36 +0300 Subject: [PATCH 099/452] fs: sdfat: add missing sdfat_debug_warn_on() define Signed-off-by: Denis Efremov <efremov@linux.com> --- fs/sdfat/sdfat.h | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/sdfat/sdfat.h b/fs/sdfat/sdfat.h index 8824d10ef058..8a18aa5b798e 100644 --- a/fs/sdfat/sdfat.h +++ b/fs/sdfat/sdfat.h @@ -447,6 +447,7 @@ void sdfat_debug_check_clusters(struct inode *inode); #define sdfat_debug_check_clusters(inode) #define sdfat_debug_bug_on(expr) +#define sdfat_debug_warn_on(expr) #endif /* CONFIG_SDFAT_DEBUG */ From da34c0bae8d680209ac0a8b51455ffd76ac09bae Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Thu, 6 Jan 2022 15:45:04 +0300 Subject: [PATCH 100/452] drivers/gpu/arm/exynos: fix functions arguments Signed-off-by: Denis Efremov <efremov@linux.com> --- .../arm/exynos/backend/gpexbe_qos_internal.h | 2 +- drivers/gpu/arm/exynos/frontend/gpex_clock.c | 16 +++++++------- drivers/gpu/arm/exynos/frontend/gpex_dvfs.c | 22 +++++++++---------- .../gpu/arm/exynos/frontend/gpex_thermal.c | 6 ++--- 4 files changed, 23 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/arm/exynos/backend/gpexbe_qos_internal.h b/drivers/gpu/arm/exynos/backend/gpexbe_qos_internal.h index a85fedfcc595..89b5b40546f6 100644 --- a/drivers/gpu/arm/exynos/backend/gpexbe_qos_internal.h +++ b/drivers/gpu/arm/exynos/backend/gpexbe_qos_internal.h @@ -18,7 +18,7 @@ * http://www.gnu.org/licenses/gpl-2.0.html. */ -static inline pmqos_flag_check(mali_pmqos_flags type, mali_pmqos_flags in) +static inline int pmqos_flag_check(mali_pmqos_flags type, mali_pmqos_flags in) { return (type & in) == in; } diff --git a/drivers/gpu/arm/exynos/frontend/gpex_clock.c b/drivers/gpu/arm/exynos/frontend/gpex_clock.c index 1d021f6ce7e0..a2c700ba56a6 100644 --- a/drivers/gpu/arm/exynos/frontend/gpex_clock.c +++ b/drivers/gpu/arm/exynos/frontend/gpex_clock.c @@ -37,31 +37,31 @@ static struct _clock_info clk_info; -int gpex_clock_get_boot_clock() +int gpex_clock_get_boot_clock(void) { return clk_info.boot_clock; } -int gpex_clock_get_max_clock() +int gpex_clock_get_max_clock(void) { return clk_info.gpu_max_clock; } -int gpex_clock_get_max_clock_limit() +int gpex_clock_get_max_clock_limit(void) { return clk_info.gpu_max_clock_limit; } -int gpex_clock_get_min_clock() +int gpex_clock_get_min_clock(void) { return clk_info.gpu_min_clock; } -int gpex_clock_get_cur_clock() +int gpex_clock_get_cur_clock(void) { return clk_info.cur_clock; } -int gpex_clock_get_max_lock() +int gpex_clock_get_max_lock(void) { return clk_info.max_lock; } -int gpex_clock_get_min_lock() +int gpex_clock_get_min_lock(void) { return clk_info.min_lock; } @@ -80,7 +80,7 @@ u64 gpex_clock_get_time_busy(int level) /******************************************* * static helper functions ******************************************/ -static int gpex_clock_update_config_data_from_dt() +static int gpex_clock_update_config_data_from_dt(void) { int ret = 0; struct freq_volt *fv_array; diff --git a/drivers/gpu/arm/exynos/frontend/gpex_dvfs.c b/drivers/gpu/arm/exynos/frontend/gpex_dvfs.c index 60c911fcbef9..ea36eb872189 100644 --- a/drivers/gpu/arm/exynos/frontend/gpex_dvfs.c +++ b/drivers/gpu/arm/exynos/frontend/gpex_dvfs.c @@ -85,7 +85,7 @@ static void gpex_dvfs_context_init(struct device **dev) dvfs.polling_speed = gpexbe_devicetree_get_int(gpu_dvfs_polling_time); } -static int gpu_dvfs_calculate_env_data() +static int gpu_dvfs_calculate_env_data(void) { unsigned long flags; static int polling_period; @@ -172,12 +172,12 @@ static void gpu_dvfs_timer_control(bool timer_state) spin_unlock_irqrestore(&dvfs.spinlock, flags); } -void gpex_dvfs_start() +void gpex_dvfs_start(void) { gpu_dvfs_timer_control(true); } -void gpex_dvfs_stop() +void gpex_dvfs_stop(void) { gpu_dvfs_timer_control(false); } @@ -220,17 +220,17 @@ static int gpu_dvfs_on_off(bool enable) return 0; } -int gpex_dvfs_enable() +int gpex_dvfs_enable(void) { return gpu_dvfs_on_off(true); } -int gpex_dvfs_disable() +int gpex_dvfs_disable(void) { return gpu_dvfs_on_off(false); } -static int gpu_dvfs_handler_init() +static int gpu_dvfs_handler_init(void) { if (!dvfs.status) dvfs.status = true; @@ -243,7 +243,7 @@ static int gpu_dvfs_handler_init() return 0; } -static int gpu_dvfs_handler_deinit() +static int gpu_dvfs_handler_deinit(void) { if (dvfs.status) dvfs.status = false; @@ -254,7 +254,7 @@ static int gpu_dvfs_handler_deinit() return 0; } -static int gpu_pm_metrics_init() +static int gpu_pm_metrics_init(void) { INIT_DELAYED_WORK(&dvfs.dvfs_work, dvfs_callback); dvfs.dvfs_wq = create_workqueue("g3d_dvfs"); @@ -265,7 +265,7 @@ static int gpu_pm_metrics_init() return 0; } -static void gpu_pm_metrics_term() +static void gpu_pm_metrics_term(void) { cancel_delayed_work(&dvfs.dvfs_work); flush_workqueue(dvfs.dvfs_wq); @@ -293,7 +293,7 @@ int gpex_dvfs_init(struct device **dev) return 0; } -void gpex_dvfs_term() +void gpex_dvfs_term(void) { /* DVFS stuff */ gpu_pm_metrics_term(); @@ -301,7 +301,7 @@ void gpex_dvfs_term() dvfs.kbdev = NULL; } -int gpex_dvfs_get_status() +int gpex_dvfs_get_status(void) { return dvfs.status; } diff --git a/drivers/gpu/arm/exynos/frontend/gpex_thermal.c b/drivers/gpu/arm/exynos/frontend/gpex_thermal.c index 62f701bf17c0..7fcf6a55fd5a 100644 --- a/drivers/gpu/arm/exynos/frontend/gpex_thermal.c +++ b/drivers/gpu/arm/exynos/frontend/gpex_thermal.c @@ -149,7 +149,7 @@ static ssize_t show_kernel_sysfs_gpu_temp(char *buf) } CREATE_SYSFS_KOBJECT_READ_FUNCTION(show_kernel_sysfs_gpu_temp); -static void gpex_thermal_create_sysfs_file() +static void gpex_thermal_create_sysfs_file(void) { GPEX_UTILS_SYSFS_DEVICE_FILE_ADD(tmu, show_tmu, set_tmu_control); GPEX_UTILS_SYSFS_KOBJECT_FILE_ADD_RO(gpu_tmu, show_kernel_sysfs_gpu_temp); @@ -158,14 +158,14 @@ static void gpex_thermal_create_sysfs_file() /*********************************************************************** * INIT, TERM FUNCTIONS ***********************************************************************/ -int gpex_thermal_init() +int gpex_thermal_init(void) { gpex_thermal_create_sysfs_file(); return 0; } -void gpex_thermal_term() +void gpex_thermal_term(void) { thermal.tmu_enabled = false; From f188d4b7f4ab721861718781f17639934948e28d Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 26 Nov 2019 15:22:48 +0300 Subject: [PATCH 101/452] drivers/gpu/exynos/g2d/g2d_task: fix kcalloc check Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/gpu/exynos/g2d/g2d_task.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/exynos/g2d/g2d_task.c b/drivers/gpu/exynos/g2d/g2d_task.c index b0ccfbe699f7..5008d9f89c50 100644 --- a/drivers/gpu/exynos/g2d/g2d_task.c +++ b/drivers/gpu/exynos/g2d/g2d_task.c @@ -465,7 +465,7 @@ static struct g2d_task *g2d_create_task(struct g2d_device *g2d_dev, int id) task->source = kcalloc(g2d_dev->max_layers, sizeof(*task->source), GFP_KERNEL); - if (!task) + if (!task->source) goto err_alloc; INIT_LIST_HEAD(&task->node); From a58195747107249448992c24bf8897bd5a4087db Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 6 Dec 2019 15:51:44 +0300 Subject: [PATCH 102/452] drivers/video/fbdev/exynos/panel/aod/aod_drv: add return type int Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/video/fbdev/exynos/panel/aod/aod_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/panel/aod/aod_drv.c b/drivers/video/fbdev/exynos/panel/aod/aod_drv.c index cda91fd33ce9..52591fe3321c 100644 --- a/drivers/video/fbdev/exynos/panel/aod/aod_drv.c +++ b/drivers/video/fbdev/exynos/panel/aod/aod_drv.c @@ -836,7 +836,7 @@ static int __aod_ioctl_set_digital_clk(struct aod_dev_info *aod, unsigned long a } -static __aod_ictl_set_parial_scan(struct aod_dev_info *aod, unsigned long arg) +static int __aod_ictl_set_parial_scan(struct aod_dev_info *aod, unsigned long arg) { int ret = 0; From e8c4217378b7885abe50ba7d7da7c8dff4976fbc Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Nov 2022 20:55:01 +0400 Subject: [PATCH 103/452] drivers/soc/samsung/exynos-cpuhp.c: fix toupper definition Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/soc/samsung/exynos-cpuhp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/samsung/exynos-cpuhp.c b/drivers/soc/samsung/exynos-cpuhp.c index 09da3bfc1438..66eb93310af1 100644 --- a/drivers/soc/samsung/exynos-cpuhp.c +++ b/drivers/soc/samsung/exynos-cpuhp.c @@ -419,7 +419,7 @@ static int cpuhp_control(bool enable) * #echo mask > /sys/power/cpuhp/set_online_cpu */ #define STR_LEN 6 -static inline toupper(char ch) +static inline int toupper(int ch) { if ('a' <= ch && ch <= 'z') ch += 'A' - 'a'; From a187edff1f188bb700cb306d9b933eb3fa92170e Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 13:53:41 +0300 Subject: [PATCH 104/452] drivers/media/platform/exynos/fimc-is2/fimc-is-binary: fix void * dereference Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/media/platform/exynos/fimc-is2/fimc-is-binary.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/platform/exynos/fimc-is2/fimc-is-binary.c b/drivers/media/platform/exynos/fimc-is2/fimc-is-binary.c index 6e487be65613..ef3ff6c8de85 100644 --- a/drivers/media/platform/exynos/fimc-is2/fimc-is-binary.c +++ b/drivers/media/platform/exynos/fimc-is2/fimc-is-binary.c @@ -354,7 +354,7 @@ int carve_binary_version(enum is_bin_type type, unsigned int hint, void *data, s } buf = bin_ver_info[type].get_buf(&bin_ver_info[type], hint); - memcpy(buf, &data[ofs], len); + memcpy(buf, &((char *)data)[ofs], len); buf[len] = '\0'; info("%s version: %s\n", bin_names[bin_ver_info[type].get_name_idx(hint)], From 7b09ff3930cca71f59e76ad83cc45685cbd62d2e Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 13:51:55 +0300 Subject: [PATCH 105/452] drivers/net/wireless/broadcom/bcmdhd_100_10/dhd_linux: remove unused timeleft variable Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/net/wireless/broadcom/bcmdhd_100_10/dhd_linux.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_10/dhd_linux.c b/drivers/net/wireless/broadcom/bcmdhd_100_10/dhd_linux.c index ea5ff1cc27b8..57583fc24cdd 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_10/dhd_linux.c +++ b/drivers/net/wireless/broadcom/bcmdhd_100_10/dhd_linux.c @@ -2811,15 +2811,13 @@ static int dhd_wait_for_file_dump(dhd_pub_t *dhdp) DHD_OS_WAKE_LOCK(dhdp); /* check for hal started and only then send event if not clear dump state here */ if (wl_cfg80211_is_hal_started(cfg)) { - int timeleft = 0; - DHD_ERROR(("[DUMP] %s: HAL started. send urgent event\n", __FUNCTION__)); dhd_dbg_send_urgent_evt(dhdp, NULL, 0); DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n", __FUNCTION__, dhdp->dhd_bus_busy_state)); - timeleft = dhd_os_busbusy_wait_bitmask(dhdp, - &dhdp->dhd_bus_busy_state, DHD_BUS_BUSY_IN_HALDUMP, 0); + dhd_os_busbusy_wait_bitmask(dhdp, + &dhdp->dhd_bus_busy_state, DHD_BUS_BUSY_IN_HALDUMP, 0); if ((dhdp->dhd_bus_busy_state & DHD_BUS_BUSY_IN_HALDUMP) != 0) { DHD_ERROR(("%s: Timed out dhd_bus_busy_state=0x%x\n", __FUNCTION__, dhdp->dhd_bus_busy_state)); From 0c2964be5ed1e0c91fbf9a91928ded84e7947a4c Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 07:57:32 +0300 Subject: [PATCH 106/452] drivers/media/platform/exynos/scaler/scaler-core: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/media/platform/exynos/scaler/scaler-core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/platform/exynos/scaler/scaler-core.c b/drivers/media/platform/exynos/scaler/scaler-core.c index 05eab727dbfe..6ede627f14c1 100644 --- a/drivers/media/platform/exynos/scaler/scaler-core.c +++ b/drivers/media/platform/exynos/scaler/scaler-core.c @@ -3588,7 +3588,7 @@ static int sc_m2m1shot_prepare_buffer(struct m2m1shot_context *m21ctx, &buf_dma->plane[plane], dir, min_size); if (ret) { dev_err(ctx->sc_dev->dev, - "plane%d size %d is smaller than %d\n", + "plane%d size %zu is smaller than %u\n", plane, buf_dma->plane[plane].bytes_used, min_size); m2m1shot_unmap_dma_buf(m21ctx->m21dev->dev, From 2238c2592797db73011b9f4fe8e307c11352cac9 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 08:10:40 +0300 Subject: [PATCH 107/452] drivers/optics/max86915: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/optics/max86915.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/optics/max86915.c b/drivers/optics/max86915.c index e273cb0a4a56..93400af3878e 100644 --- a/drivers/optics/max86915.c +++ b/drivers/optics/max86915.c @@ -4658,7 +4658,7 @@ static int max86915_power_ctrl(struct max86915_device_data *data, int onoff) if (data->i2c_1p8 != NULL) { regulator_i2c_1p8 = regulator_get(NULL, data->i2c_1p8); if (IS_ERR(regulator_i2c_1p8) || regulator_i2c_1p8 == NULL) { - HRM_err("%s - get i2c_1p8 regulator failed, %d\n", __func__, PTR_ERR(regulator_i2c_1p8)); + HRM_err("%s - get i2c_1p8 regulator failed, %ld\n", __func__, PTR_ERR(regulator_i2c_1p8)); rc = -EINVAL; regulator_i2c_1p8 = NULL; goto get_i2c_1p8_failed; @@ -4669,7 +4669,7 @@ static int max86915_power_ctrl(struct max86915_device_data *data, int onoff) regulator_vdd_1p8 = regulator_get(&data->client->dev, data->vdd_1p8); if (IS_ERR(regulator_vdd_1p8) || regulator_vdd_1p8 == NULL) { - HRM_dbg("%s - get vdd_1p8 regulator failed, %d\n", __func__, PTR_ERR(regulator_vdd_1p8)); + HRM_dbg("%s - get vdd_1p8 regulator failed, %ld\n", __func__, PTR_ERR(regulator_vdd_1p8)); regulator_vdd_1p8 = NULL; goto get_vdd_1p8_failed; } From 4f9d5844ab07908d821010149791eafd1af68054 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 13:46:34 +0300 Subject: [PATCH 108/452] drivers/sensorhub/brcm/factory/magnetic_common: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/sensorhub/brcm/factory/magnetic_common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/sensorhub/brcm/factory/magnetic_common.c b/drivers/sensorhub/brcm/factory/magnetic_common.c index 94ab8f709bdf..799123258396 100644 --- a/drivers/sensorhub/brcm/factory/magnetic_common.c +++ b/drivers/sensorhub/brcm/factory/magnetic_common.c @@ -821,7 +821,7 @@ int load_magnetic_cal_param_from_nvm(u8 *data, u8 length) cal_filp = filp_open(MAG_CAL_PARAM_FILE_PATH, O_CREAT | O_RDONLY | O_NOFOLLOW | O_NONBLOCK, 0660); if (IS_ERR(cal_filp)) { - pr_err("[SSP] %s: filp_open failed, errno = %d\n", __func__, PTR_ERR(cal_filp)); + pr_err("[SSP] %s: filp_open failed, errno = %ld\n", __func__, PTR_ERR(cal_filp)); set_fs(old_fs); iRet = PTR_ERR(cal_filp); From a7ba4306f618d0c83c2168bfe743e5dca242bec1 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 13:48:04 +0300 Subject: [PATCH 109/452] drivers/sensorhub/brcm/sx9330: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/sensorhub/brcm/sx9330.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/sensorhub/brcm/sx9330.c b/drivers/sensorhub/brcm/sx9330.c index 8a1d629eb246..f83bab018699 100644 --- a/drivers/sensorhub/brcm/sx9330.c +++ b/drivers/sensorhub/brcm/sx9330.c @@ -850,7 +850,7 @@ static ssize_t sx9330_avgthresh_show(struct device *dev, (1 << (4 + MAIN_SENSOR)), &avgthresh); avgthresh = (avgthresh & 0x3F000000) >> 24; - return snprintf(buf, PAGE_SIZE, "%ld\n", 16384 * avgthresh); + return snprintf(buf, PAGE_SIZE, "%u\n", 16384 * avgthresh); } static ssize_t sx9330_rawfilt_show(struct device *dev, From 21114648d3ddb482c5f5e0356da99c89261a3cbb Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 08:11:30 +0300 Subject: [PATCH 110/452] drivers/sensorhub/brcm/ssp_dev: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/sensorhub/brcm/ssp_dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/sensorhub/brcm/ssp_dev.c b/drivers/sensorhub/brcm/ssp_dev.c index 61cf2cdeb6bc..80d33ab39108 100644 --- a/drivers/sensorhub/brcm/ssp_dev.c +++ b/drivers/sensorhub/brcm/ssp_dev.c @@ -805,7 +805,7 @@ static int panel_notifier_callback(struct notifier_block *self, unsigned long ev pr_info("[SSP] %s PANEL_EVENT_BL_CHANGED %d %d\n", __func__, evdata->brightness, evdata->aor_ratio); } else { - pr_info("[SSP] %s unknown event %d\n", __func__, event); + pr_info("[SSP] %s unknown event %lu\n", __func__, event); } // store these values for reset From 1a247fa4efaf4143a117d0ba52d1d620836ed23a Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 08:11:55 +0300 Subject: [PATCH 111/452] drivers/video/fbdev/exynos/dpu20/decon: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/video/fbdev/exynos/dpu20/decon.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/dpu20/decon.h b/drivers/video/fbdev/exynos/dpu20/decon.h index d7a71fa26f36..da5f47fa244b 100644 --- a/drivers/video/fbdev/exynos/dpu20/decon.h +++ b/drivers/video/fbdev/exynos/dpu20/decon.h @@ -1621,7 +1621,7 @@ static inline int decon_doze_wake_lock(struct decon_device *decon, usleep_range(1000, 1100); if (time_is_before_jiffies(timeout_jiffies)) { - decon_err("%s timeout(elapsed %d msec)\n", + decon_err("%s timeout(elapsed %lu msec)\n", __func__, timeout); } } From 7a1cbee9b53e886786a2ec327a029d3008fdb479 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 07:54:20 +0300 Subject: [PATCH 112/452] drivers/media/platform/exynos/fimc-is2/sensor/module_framework/cis/fimc-is-cis: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- .../exynos/fimc-is2/sensor/module_framework/cis/fimc-is-cis.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/platform/exynos/fimc-is2/sensor/module_framework/cis/fimc-is-cis.c b/drivers/media/platform/exynos/fimc-is2/sensor/module_framework/cis/fimc-is-cis.c index ccc4ce4242a6..e17503576a8f 100644 --- a/drivers/media/platform/exynos/fimc-is2/sensor/module_framework/cis/fimc-is-cis.c +++ b/drivers/media/platform/exynos/fimc-is2/sensor/module_framework/cis/fimc-is-cis.c @@ -645,7 +645,7 @@ int sensor_cis_set_initial_exposure(struct v4l2_subdev *subdev) if (cis->use_initial_ae) { cis->init_ae_setting = cis->last_ae_setting; - dbg_sensor(1, "[MOD:D:%d] %s short(exp:%d/again:%d/dgain:%d), long(exp:%d/again:%d/dgain:%d)\n", + dbg_sensor(1, "[MOD:D:%d] %s short(exp:%llu/again:%d/dgain:%d), long(exp:%llu/again:%d/dgain:%d)\n", cis->id, __func__, cis->init_ae_setting.exposure, cis->init_ae_setting.analog_gain, cis->init_ae_setting.digital_gain, cis->init_ae_setting.long_exposure, cis->init_ae_setting.long_analog_gain, cis->init_ae_setting.long_digital_gain); From 24a8bd945258f57f0318c1ce0c97f57651030a6a Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 08:13:13 +0300 Subject: [PATCH 113/452] drivers/rtc/class: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/rtc/class.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index 65a4e9840646..fdd38df38d38 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c @@ -142,7 +142,7 @@ static int rtc_resume(struct device *dev) if (sleep_time.tv_sec >= 0) timekeeping_inject_sleeptime64(&sleep_time); else - pm_deferred_pr_dbg("rtc: suspended for 0.000 seconds (%lld)\n", + pm_deferred_pr_dbg("rtc: suspended for 0.000 seconds (%ld)\n", sleep_time.tv_sec); rtc_hctosys_ret = 0; return 0; From 9236620276cb17270c0b7508c2e54aa4e66df140 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 00:17:48 +0300 Subject: [PATCH 114/452] block/blk-crypt: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- block/blk-crypt-fmp.c | 4 ++-- block/blk-crypt.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/block/blk-crypt-fmp.c b/block/blk-crypt-fmp.c index c4653d8ee861..07032ad19792 100644 --- a/block/blk-crypt-fmp.c +++ b/block/blk-crypt-fmp.c @@ -31,7 +31,7 @@ static void *blk_crypt_fmp_alloc_aes_xts(void) } if (IS_ERR(bctx)) { - pr_debug("error allocating diskciher '%s' transform: %d", + pr_debug("error allocating diskciher '%s' transform: %ld", cipher_str, PTR_ERR(bctx)); return bctx; } @@ -68,7 +68,7 @@ static int __init blk_crypt_alg_fmp_init(void) blk_crypt_handle = blk_crypt_alg_register(NULL, "xts(aes)", BLK_CRYPT_MODE_INLINE_PRIVATE, &fmp_hw_xts_cbs); if (IS_ERR(blk_crypt_handle)) { - pr_err("%s: failed to register alg(xts(aes)), err:%d\n", + pr_err("%s: failed to register alg(xts(aes)), err:%ld\n", __func__, PTR_ERR(blk_crypt_handle) ); blk_crypt_handle = NULL; } diff --git a/block/blk-crypt.c b/block/blk-crypt.c index 399135cfbe03..94fd159a88a7 100644 --- a/block/blk-crypt.c +++ b/block/blk-crypt.c @@ -163,7 +163,7 @@ blk_crypt_t *blk_crypt_get_context(struct block_device *bdev, const char *cipher struct blk_crypt_t *bctx; bctx = blk_crypt_alloc_context(bdev, cipher_str); if (IS_ERR(bctx)) { - pr_debug("error allocating diskciher '%s' err: %d", + pr_debug("error allocating diskciher '%s' err: %ld", cipher_str, PTR_ERR(bctx)); return bctx; } From 41ec61fdff93f03bce3ff34adfa5902247dcc5fc Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 00:20:01 +0300 Subject: [PATCH 115/452] fs/crypto/keyinfo: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- fs/crypto/keyinfo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index 7279c9ef582b..fd763579eb21 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c @@ -319,7 +319,7 @@ static int prepare_inline_encryption(struct super_block *sb, struct fscrypt_info cipher_str = mode->cipher_str + INLINE_PREFIX_LEN; bctx = blk_crypt_get_context(bdev, cipher_str); if (IS_ERR(bctx)) { - pr_err("%s : failed to get blk_crypt context (transform: %s, err: %d)", + pr_err("%s : failed to get blk_crypt context (transform: %s, err: %ld)", __func__, cipher_str, PTR_ERR(bctx)); return PTR_ERR(bctx); } From 31a0cf5a87866a53130b15f84fe3511deabae396 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 00:21:23 +0300 Subject: [PATCH 116/452] security/sdp/dek: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- security/sdp/dek.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/security/sdp/dek.c b/security/sdp/dek.c index ceb619511d08..b5bc7429e148 100644 --- a/security/sdp/dek.c +++ b/security/sdp/dek.c @@ -1093,7 +1093,7 @@ static long dek_do_ioctl_req(unsigned int minor, unsigned int cmd, memset(tempPlain_dek->buf, 0, DEK_MAXLEN); if (ret < 0) { - DEK_LOGE("DEK_ENCRYPT_DEK: failed to encrypt dek! (err:%d)\n", ret); + DEK_LOGE("DEK_ENCRYPT_DEK: failed to encrypt dek! (err:%ld)\n", ret); zero_out((char *)&req, sizeof(dek_arg_encrypt_dek)); kzfree(tempPlain_dek); kzfree(tempEnc_dek); @@ -1162,7 +1162,7 @@ static long dek_do_ioctl_req(unsigned int minor, unsigned int cmd, tempEnc_dek, tempPlain_dek); if (ret < 0) { - DEK_LOGE("DEK_DECRYPT_DEK: failed to decrypt dek! (err:%d)\n", ret); + DEK_LOGE("DEK_DECRYPT_DEK: failed to decrypt dek! (err:%ld)\n", ret); zero_out((char *)&req, sizeof(dek_arg_decrypt_dek)); kzfree(tempPlain_dek); kzfree(tempEnc_dek); From 0b1feabc1985d202c1b702661a47a1e8d44d3acb Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 00:21:51 +0300 Subject: [PATCH 117/452] security/sdp/dd_kernel_crypto: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- security/sdp/dd_kernel_crypto.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security/sdp/dd_kernel_crypto.c b/security/sdp/dd_kernel_crypto.c index eeea5d21f541..c6aa2a066bbd 100644 --- a/security/sdp/dd_kernel_crypto.c +++ b/security/sdp/dd_kernel_crypto.c @@ -670,6 +670,6 @@ void dd_hex_key_dump(const char* tag, uint8_t *data, size_t data_len) } buf[buf_len - 1] = '\0'; printk(KERN_ERR - "[%s] %s(len=%d) : %s\n", "DEK_DBG", tag, data_len, buf); + "[%s] %s(len=%zu) : %s\n", "DEK_DBG", tag, data_len, buf); kfree(buf); } From f9d39fd170a8761b8fdac84db47fa0b733d9b8fc Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 00:23:05 +0300 Subject: [PATCH 118/452] fs/crypto/sdp/sdp_dek: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- fs/crypto/sdp/sdp_dek.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/crypto/sdp/sdp_dek.c b/fs/crypto/sdp/sdp_dek.c index f39720526f0d..a3b436cec497 100644 --- a/fs/crypto/sdp/sdp_dek.c +++ b/fs/crypto/sdp/sdp_dek.c @@ -85,7 +85,7 @@ void dump_file_key_hex(const char* tag, uint8_t *data, size_t data_len) } buf[buf_len - 1] = '\0'; printk(KERN_ERR - "[%s] %s(len=%d) : %s\n", "DEK_DBG", tag, data_len, buf); + "[%s] %s(len=%zu) : %s\n", "DEK_DBG", tag, data_len, buf); kfree(buf); } From 305fbfd2b8e1ad3752dd8e290726bed28cf9b039 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 00:24:01 +0300 Subject: [PATCH 119/452] mm/page_alloc: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- mm/page_alloc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5fb2136b90b8..c7ae28b60071 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4178,14 +4178,14 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, a_file += node_page_state(pgdat, NR_ACTIVE_FILE); in_file += node_page_state(pgdat, NR_INACTIVE_FILE); } - pr_info("alloc stall: timeJS(ms):%u|%u rec:%lu|%lu ret:%d o:%d gfp:%#x(%pGg) AaiFai:%lukB|%lukB|%lukB|%lukB\n", + pr_info("alloc stall: timeJS(ms):%u|%llu rec:%lu|%lu ret:%d o:%d gfp:%#x(%pGg) AaiFai:%lukB|%lukB|%lukB|%lukB\n", jiffies_to_msecs(jiffies - jiffies_s), stime_d / NSEC_PER_MSEC, did_some_progress, pages_reclaimed, retry_loop_count, order, gfp_mask, &gfp_mask, a_anon << (PAGE_SHIFT-10), in_anon << (PAGE_SHIFT-10), a_file << (PAGE_SHIFT-10), in_file << (PAGE_SHIFT-10)); - ologk("alloc stall: timeJS(ms):%u|%u rec:%lu|%lu ret:%d o:%d gfp:%#x(%pGg) AaiFai:%lukB|%lukB|%lukB|%lukB", + ologk("alloc stall: timeJS(ms):%u|%llu rec:%lu|%lu ret:%d o:%d gfp:%#x(%pGg) AaiFai:%lukB|%lukB|%lukB|%lukB", jiffies_to_msecs(jiffies - jiffies_s), stime_d / NSEC_PER_MSEC, did_some_progress, pages_reclaimed, retry_loop_count, From f185e458c2f14bf26324c314c5b6ab46beef45b2 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 02:19:46 +0300 Subject: [PATCH 120/452] drivers/media/platform/exynos/fimc-is2/fimc-is-core: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/media/platform/exynos/fimc-is2/fimc-is-core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/exynos/fimc-is2/fimc-is-core.c b/drivers/media/platform/exynos/fimc-is2/fimc-is-core.c index ae57fd041af1..a03d6269c6be 100644 --- a/drivers/media/platform/exynos/fimc-is2/fimc-is-core.c +++ b/drivers/media/platform/exynos/fimc-is2/fimc-is-core.c @@ -236,7 +236,7 @@ static int fimc_is_secure_face(struct fimc_is_core *core, if (ret != 0) { err("[SMC] SMC_SECCAM_PREPARE fail(%d)", ret); } else { - info("[SMC] Call SMC_SECCAM_PREPARE ret(%d) / state(%d->%d)\n", + info("[SMC] Call SMC_SECCAM_PREPARE ret(%d) / state(%lu->%d)\n", ret, core->secure_state, FIMC_IS_STATE_SECURED); core->secure_state = FIMC_IS_STATE_SECURED; } @@ -252,7 +252,7 @@ static int fimc_is_secure_face(struct fimc_is_core *core, if (ret != 0) { err("[SMC] SMC_SECCAM_UNPREPARE fail(%d)\n", ret); } else { - info("[SMC] Call SMC_SECCAM_UNPREPARE ret(%d) / smc_state(%d->%d)\n", + info("[SMC] Call SMC_SECCAM_UNPREPARE ret(%d) / smc_state(%lu->%d)\n", ret, core->secure_state, FIMC_IS_STATE_UNSECURE); core->secure_state = FIMC_IS_STATE_UNSECURE; } From 841b62e673f8f817ecad16664f6f209c2d0155ce Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 02:30:22 +0300 Subject: [PATCH 121/452] drivers/media/platform/exynos/fimc-is2/fimc-is-binary: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/media/platform/exynos/fimc-is2/fimc-is-binary.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/exynos/fimc-is2/fimc-is-binary.c b/drivers/media/platform/exynos/fimc-is2/fimc-is-binary.c index ef3ff6c8de85..f69bf1673bb2 100644 --- a/drivers/media/platform/exynos/fimc-is2/fimc-is-binary.c +++ b/drivers/media/platform/exynos/fimc-is2/fimc-is-binary.c @@ -341,14 +341,14 @@ int carve_binary_version(enum is_bin_type type, unsigned int hint, void *data, s ofs = size - bin_ver_info[type].offset; if (ofs <= 0) { - pr_warn("out of range offset(size: %d <= offset: %d)\n", size, + pr_warn("out of range offset(size: %zu <= offset: %d)\n", size, bin_ver_info[type].offset); return -EINVAL; } len = bin_ver_info[type].length; if ((ofs + len) > size) { - pr_warn("too long version length (binary: %d < version: %d)\n", + pr_warn("too long version length (binary: %zu < version: %d)\n", size, (ofs + len)); len -= ((ofs + len) - size); } From b627bfddbcd1e60e48d7c559891475a206adb593 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 02:31:12 +0300 Subject: [PATCH 122/452] drivers/media/platform/exynos/jsqz/jsqz-core: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/media/platform/exynos/jsqz/jsqz-core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/platform/exynos/jsqz/jsqz-core.c b/drivers/media/platform/exynos/jsqz/jsqz-core.c index e2ab043a497e..9cd6a77334f4 100644 --- a/drivers/media/platform/exynos/jsqz/jsqz-core.c +++ b/drivers/media/platform/exynos/jsqz/jsqz-core.c @@ -1390,7 +1390,7 @@ static long jsqz_ioctl(struct file *filp, return ret; } default: - dev_err(jsqz_device->dev, "%s: Unknown ioctl cmd %x, %x\n", + dev_err(jsqz_device->dev, "%s: Unknown ioctl cmd %x, %lx\n", __func__, cmd, HWJSQZ_IOC_PROCESS); return -EINVAL; } From b90fdb65d4c6252d2bcf6343cea333bd48f85fa5 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 02:32:06 +0300 Subject: [PATCH 123/452] drivers/media/platform/exynos/mfc/mfc_qos: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/media/platform/exynos/mfc/mfc_qos.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/exynos/mfc/mfc_qos.c b/drivers/media/platform/exynos/mfc/mfc_qos.c index ae07680f3ac8..d2a82ff2f49d 100644 --- a/drivers/media/platform/exynos/mfc/mfc_qos.c +++ b/drivers/media/platform/exynos/mfc/mfc_qos.c @@ -521,7 +521,7 @@ static int __mfc_qos_get_freq_by_bps(struct mfc_dev *dev, unsigned long total_bp int i; if (total_bps > dev->pdata->max_Kbps[0]) { - mfc_debug(4, "[QoS] overspec bps %d > %d\n", + mfc_debug(4, "[QoS] overspec bps %lu > %d\n", total_bps, dev->pdata->max_Kbps[0]); return dev->bitrate_table[dev->pdata->num_mfc_freq - 1].mfc_freq; } @@ -992,7 +992,7 @@ static int __mfc_qos_get_bps_section(struct mfc_ctx *ctx, u32 bytesused) /* Standardization to high bitrate spec */ if (!CODEC_HIGH_PERF(ctx)) ctx->Kbps = dev->bps_ratio * ctx->Kbps; - mfc_debug(3, "[QoS] %d Kbps, average %lld Kbits per frame\n", ctx->Kbps, avg_Kbits); + mfc_debug(3, "[QoS] %d Kbps, average %lu Kbits per frame\n", ctx->Kbps, avg_Kbits); ctx->bitrate_index++; if (ctx->bitrate_index == MAX_TIME_INDEX) { From f1f862b0de4377b230fc6ca3897a59344fcb70dd Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 13:49:51 +0300 Subject: [PATCH 124/452] drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- .../media/platform/exynos/fimc-is2/fimc-is-resourcemgr.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.c b/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.c index 519d81fdd441..af057519206d 100644 --- a/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.c +++ b/drivers/media/platform/exynos/fimc-is2/fimc-is-resourcemgr.c @@ -343,7 +343,7 @@ static int fimc_is_resourcemgr_allocmem(struct fimc_is_resourcemgr *resourcemgr) } minfo->total_size += minfo->pb_taaisp->size; - info("[RSC] TAAISP_DMA memory size (aligned) : %08lx\n", TAAISP_DMA_SIZE); + info("[RSC] TAAISP_DMA memory size (aligned) : %08lx\n", (unsigned long) TAAISP_DMA_SIZE); /* ME/DRC buffer */ #if (MEDRC_DMA_SIZE > 0) @@ -354,7 +354,7 @@ static int fimc_is_resourcemgr_allocmem(struct fimc_is_resourcemgr *resourcemgr) return -ENOMEM; } - info("[RSC] ME_DRC memory size (aligned) : %08lx\n", MEDRC_DMA_SIZE); + info("[RSC] ME_DRC memory size (aligned) : %08lx\n", (unsigned long) MEDRC_DMA_SIZE); minfo->total_size += minfo->pb_medrc->size; #endif @@ -623,7 +623,7 @@ static int fimc_is_resourcemgr_alloc_secure_mem(struct fimc_is_resourcemgr *reso return -ENOMEM; } - info("[RSC] TAAISP_DMA_S memory size (aligned) : %08lx\n", TAAISP_DMA_SIZE); + info("[RSC] TAAISP_DMA_S memory size (aligned) : %08lx\n", (unsigned long) TAAISP_DMA_SIZE); /* ME/DRC buffer */ #if (MEDRC_DMA_SIZE > 0) @@ -636,7 +636,7 @@ static int fimc_is_resourcemgr_alloc_secure_mem(struct fimc_is_resourcemgr *reso return -ENOMEM; } - info("[RSC] ME_DRC_S memory size (aligned) : %08lx\n", MEDRC_DMA_SIZE); + info("[RSC] ME_DRC_S memory size (aligned) : %08lx\n", (unsigned long) MEDRC_DMA_SIZE); #endif return 0; From 5a74a13c78fb5578f6384b903bc82f9ed3b1cc3d Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 6 Dec 2019 18:22:34 +0300 Subject: [PATCH 125/452] lib/debug-snapshot: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- lib/debug-snapshot.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/debug-snapshot.c b/lib/debug-snapshot.c index 3078480e4a13..bb5faf1869f1 100644 --- a/lib/debug-snapshot.c +++ b/lib/debug-snapshot.c @@ -493,7 +493,7 @@ static int dbg_snapshot_sfr_dump_init(struct device_node *np) static int __init dbg_snapshot_remap(void) { - unsigned long i, j; + size_t i, j; unsigned long flags = VM_NO_GUARD | VM_MAP; unsigned int enabled_count = 0; pgprot_t prot = __pgprot(PROT_NORMAL_NC); @@ -508,7 +508,7 @@ static int __init dbg_snapshot_remap(void) page_size = dss_items[i].entry.size / PAGE_SIZE; pages = kzalloc(sizeof(struct page *) * page_size, GFP_KERNEL); page = phys_to_page(dss_items[i].entry.paddr); - pr_info("%s: %2d: paddr: 0x%x\n", __func__, i, dss_items[i].entry.paddr); + pr_info("%s: %2zu: paddr: 0x%lx\n", __func__, i, dss_items[i].entry.paddr); for (j = 0; j < page_size; j++) pages[j] = page++; From 0e192775beb0510dafee01c9da28e515c786744e Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 12 Sep 2020 01:47:30 +0300 Subject: [PATCH 126/452] drivers/ccic/max77705_pd: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/ccic/max77705_pd.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/ccic/max77705_pd.c b/drivers/ccic/max77705_pd.c index 9323e3462322..4af6bc1dd884 100644 --- a/drivers/ccic/max77705_pd.c +++ b/drivers/ccic/max77705_pd.c @@ -611,7 +611,7 @@ void max77705_pdo_list(struct max77705_usbc_platform_data *usbc_data, unsigned c } if (usbc_data->pd_data->pdo_list && do_power_nego) { - pr_info("%s : PDO list is changed, so power negotiation is need\n", + pr_info("%s : PDO list is changed selected_pdo_num(%d), so power negotiation is need\n", __func__, pd_noti.sink_status.selected_pdo_num); pd_noti.sink_status.selected_pdo_num = 0; pd_noti.event = PDIC_NOTIFY_EVENT_PD_SINK_CAP; @@ -706,7 +706,7 @@ void max77705_current_pdo(struct max77705_usbc_platform_data *usbc_data, unsigne pd_noti.event = PDIC_NOTIFY_EVENT_PD_SINK; if (usbc_data->pd_data->pdo_list && do_power_nego) { - pr_info("%s : PDO list is changed, so power negotiation is need\n", + pr_info("%s : PDO list is changed selected_pdo_num(%d), so power negotiation is need\n", __func__, pd_noti.sink_status.selected_pdo_num); pd_noti.sink_status.selected_pdo_num = 0; pd_noti.event = PDIC_NOTIFY_EVENT_PD_SINK_CAP; @@ -795,7 +795,7 @@ void max77705_current_pdo(struct max77705_usbc_platform_data *usbc_data, unsigne } if (usbc_data->pd_data->pdo_list && do_power_nego) { - pr_info("%s : PDO list is changed, so power negotiation is need\n", + pr_info("%s : PDO list is changed selected_pdo_num(%d), so power negotiation is need\n", __func__, pd_noti.sink_status.selected_pdo_num); pd_noti.sink_status.selected_pdo_num = 0; pd_noti.event = PDIC_NOTIFY_EVENT_PD_SINK_CAP; From 13d8a045e15caff9539ebbdc293d50946cb0487e Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 12 Sep 2020 01:49:58 +0300 Subject: [PATCH 127/452] drivers/motor/cs40l2x: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/motor/cs40l2x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/motor/cs40l2x.c b/drivers/motor/cs40l2x.c index 58873f078b12..092b05e61d28 100644 --- a/drivers/motor/cs40l2x.c +++ b/drivers/motor/cs40l2x.c @@ -3353,7 +3353,7 @@ static ssize_t cs40l2x_motor_type_show(struct device *dev, static ssize_t cs40l2x_event_cmd_show(struct device *dev, struct device_attribute *attr, char *buf) { - pr_info("%s: [%d] %s\n", __func__, sec_prev_event_cmd); + pr_info("%s: %s\n", __func__, sec_prev_event_cmd); return snprintf(buf, MAX_STR_LEN_EVENT_CMD, "%s\n", sec_prev_event_cmd); } From 450530cde48f68bacf67ef163cd00351fcec53cf Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 13 Sep 2020 23:54:40 +0300 Subject: [PATCH 128/452] drivers/samsung/sec_dump_sink: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/samsung/sec_dump_sink.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/samsung/sec_dump_sink.c b/drivers/samsung/sec_dump_sink.c index 8ccde8d8a7fd..167e0822e598 100644 --- a/drivers/samsung/sec_dump_sink.c +++ b/drivers/samsung/sec_dump_sink.c @@ -58,7 +58,7 @@ static void sec_free_rdx_bootdev(phys_addr_t paddr, u64 size) unsigned long pfn_start, pfn_end, pfn_idx; int ret; - pr_info("start (0x%p, 0x%llx)\n", paddr, size); + pr_info("start (0x%p, 0x%llx)\n", (void *)paddr, size); if (!sec_rdx_bootdev_paddr) { pr_err("reserved addr is null\n"); @@ -128,7 +128,7 @@ static ssize_t sec_rdx_bootdev_proc_write(struct file *file, err = -ENODEV; } else { if (count > sec_rdx_bootdev_size) { - pr_err("size is wrong %llu > %llu\n", count, sec_rdx_bootdev_size); + pr_err("size is wrong %lu > %u\n", count, sec_rdx_bootdev_size); err = -EINVAL; goto out; } From f2281a245b3e3bf70fbd99bf23ac115bbd3ba94e Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 00:43:19 +0300 Subject: [PATCH 129/452] drivers/scsi/scsi_lib: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/scsi/scsi_lib.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index b622637ac9df..ea5d94ce63df 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1462,7 +1462,7 @@ void scsi_alloc_tw(struct scsi_device *sdev) blk_alloc_turbo_write(sdev->request_queue); blk_register_tw_try_on_fn(sdev->request_queue, scsi_tw_try_on_fn); blk_register_tw_try_off_fn(sdev->request_queue, scsi_tw_try_off_fn); - printk(KERN_INFO "%s: register scsi ufs tw interface for LU %d\n", + printk(KERN_INFO "%s: register scsi ufs tw interface for LU %llu\n", __func__, sdev->lun); } } From 6a54b7a202e146416a61eb86fb8c67e0ef6fb38d Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 00:43:54 +0300 Subject: [PATCH 130/452] drivers/scsi/ufs/ufshcd: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/scsi/ufs/ufshcd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 93a440212360..21af2b4bce2f 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -5549,7 +5549,7 @@ static void ufshcd_set_queue_depth(struct scsi_device *sdev) if (dLUNumTurboWriteBufferAllocUnits) { sdev->support_tw_lu = true; - dev_info(hba->dev, "%s: LU %d supports tw, twbuf unit : 0x%x\n", + dev_info(hba->dev, "%s: LU %llu supports tw, twbuf unit : 0x%x\n", __func__, sdev->lun, dLUNumTurboWriteBufferAllocUnits); } else sdev->support_tw_lu = false; From acb895160e4c2600eb3fe9320aa3af2851ce111c Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 00:45:54 +0300 Subject: [PATCH 131/452] drivers/sensorhub/brcm/bbdpl/bbd: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/sensorhub/brcm/bbdpl/bbd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/sensorhub/brcm/bbdpl/bbd.c b/drivers/sensorhub/brcm/bbdpl/bbd.c index a103214c9a04..d29857429971 100644 --- a/drivers/sensorhub/brcm/bbdpl/bbd.c +++ b/drivers/sensorhub/brcm/bbdpl/bbd.c @@ -924,7 +924,7 @@ ssize_t bbd_urgent_patch_read(struct file *user_filp, char __user *buf, size_t s rd_size = urgent_patch_size - offset; // 02-3. read requested size of urget_patch - pr_info("[SSPBBD] %s : download in progress (%d/%d)", __func__, offset + rd_size, urgent_patch_size); + pr_info("[SSPBBD] %s : download in progress (%lu/%d)", __func__, offset + rd_size, urgent_patch_size); if(copy_to_user(buf, (void *)(urgent_buffer + offset), rd_size)) { pr_info("[SSPBBD] %s : copy to user from urgent_buffer", __func__); From 7d77f554443023efab1b5e1776a55bafd851cb81 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 00:50:59 +0300 Subject: [PATCH 132/452] drivers/soc/samsung/debug: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/soc/samsung/debug/exynos-ehld.c | 14 +++++++------- drivers/soc/samsung/debug/exynos-helper.c | 4 ++-- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/soc/samsung/debug/exynos-ehld.c b/drivers/soc/samsung/debug/exynos-ehld.c index 8da790bfe836..1dc2a1fc8b31 100644 --- a/drivers/soc/samsung/debug/exynos-ehld.c +++ b/drivers/soc/samsung/debug/exynos-ehld.c @@ -98,8 +98,8 @@ static int exynos_ehld_start_cpu(unsigned int cpu) event = perf_event_create_kernel_counter(&exynos_ehld_attr, cpu, NULL, exynos_ehld_callback, NULL); if (IS_ERR(event)) { - ehld_printk(0, "@%s: cpu%d event make failed err:%d\n", - __func__, cpu, (int)event); + ehld_printk(0, "@%s: cpu%d event make failed err: %ld\n", + __func__, cpu, PTR_ERR(event)); return PTR_ERR(event); } else { ehld_printk(0, "@%s: cpu%d event make success\n", __func__, cpu); @@ -142,7 +142,7 @@ unsigned long long exynos_ehld_event_read_cpu(int cpu) if (!in_irq() && event) { total = perf_event_read_value(event, &enabled, &running); - ehld_printk(0, "%s: cpu%d - enabled: %zx, running: %zx, total: %zx\n", + ehld_printk(0, "%s: cpu%d - enabled: %llx, running: %llx, total: %llx\n", __func__, cpu, enabled, running, total); } return total; @@ -170,12 +170,12 @@ void exynos_ehld_event_raw_update_allcpu(void) data->time[count] = cpu_clock(cpu); if (cpu_is_offline(cpu) || !exynos_cpu.power_state(cpu) || !ctrl->ehld_running) { - ehld_printk(0, "%s: cpu%d is turned off : running:%x, power:%x, offline:%x\n", + ehld_printk(0, "%s: cpu%d is turned off : running:%x, power:%x, offline:%lx\n", __func__, cpu, ctrl->ehld_running, exynos_cpu.power_state(cpu), cpu_is_offline(cpu)); data->event[count] = 0xC2; data->pmpcsr[count] = 0; } else { - ehld_printk(0, "%s: cpu%d is turned on : running:%x, power:%x, offline:%x\n", + ehld_printk(0, "%s: cpu%d is turned on : running:%x, power:%x, offline:%lx\n", __func__, cpu, ctrl->ehld_running, exynos_cpu.power_state(cpu), cpu_is_offline(cpu)); DBG_UNLOCK(ctrl->dbg_base + PMU_OFFSET); val = __raw_readq(ctrl->dbg_base + PMU_OFFSET + PMUPCSR); @@ -186,7 +186,7 @@ void exynos_ehld_event_raw_update_allcpu(void) DBG_LOCK(ctrl->dbg_base + PMU_OFFSET); } raw_spin_unlock_irqrestore(&ctrl->lock, flags); - ehld_printk(0, "%s: cpu%d - time:%llu, event:0x%x\n", + ehld_printk(0, "%s: cpu%d - time:%llu, event:0x%llx\n", __func__, cpu, data->time[count], data->event[count]); } } @@ -444,7 +444,7 @@ static int exynos_ehld_init_dt_parse(struct device_node *np) return -ENOMEM; } - ehld_printk(1, "exynos-ehld: cpu#%d, cs_base:0x%x, dbg_base:0x%x, total:0x%x, ioremap:0x%x\n", + ehld_printk(1, "exynos-ehld: cpu#%d, cs_base:0x%x, dbg_base:0x%x, total:0x%x, ioremap:0x%lx\n", cpu, base, offset, ehld_main.cs_base + offset, (unsigned long)ctrl->dbg_base); } diff --git a/drivers/soc/samsung/debug/exynos-helper.c b/drivers/soc/samsung/debug/exynos-helper.c index dd2dcac9c64b..db7b58d147a3 100644 --- a/drivers/soc/samsung/debug/exynos-helper.c +++ b/drivers/soc/samsung/debug/exynos-helper.c @@ -200,7 +200,7 @@ void exynos_err_parse(u32 reg_idx, u64 reg, struct err_variant_data *exynos_cpu_ valid = reg & BIT(exynos_cpu_err->valid_bit); if (!valid) { - pr_emerg("%s valid_bit(%d) is NOT set (0x%lx)\n", + pr_emerg("%s valid_bit(%d) is NOT set (0x%llx)\n", exynos_cpu_err->reg_name, exynos_cpu_err->valid_bit, valid); return; } @@ -216,7 +216,7 @@ void exynos_err_parse(u32 reg_idx, u64 reg, struct err_variant_data *exynos_cpu_ field = (reg & GENMASK_ULL(fld_end, fld_offset)) >> fld_offset; if (field != 0) - pr_emerg("%s (%d:%d) %s 0x%lx\n", + pr_emerg("%s (%d:%d) %s 0x%x\n", exynos_cpu_err->reg_name, fld_end, fld_offset, variant[i].fld_name, field); From eadb2f7ea71fd0d6db3e2a280be10ad8b40d4fef Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 11 Sep 2020 22:58:25 +0300 Subject: [PATCH 133/452] arch/arm64/mm/fault: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- arch/arm64/mm/fault.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 6b158b39af7d..90052a10f9b9 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -702,7 +702,7 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) inf = esr_to_fault_info(esr); - pr_auto(ASL1, "%s (0x%08x) at 0x%016lx[0x%09lx]\n", + pr_auto(ASL1, "%s (0x%08x) at 0x%016lx[0x%09llx]\n", inf->name, esr, addr, show_virt_to_phys(addr)); /* * Synchronous aborts may interrupt code which had interrupts masked. From 349f97a008091de8d9adef693052724642ce23f8 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 11 Sep 2020 23:02:25 +0300 Subject: [PATCH 134/452] drivers/media/platform/exynos/mfc/mfc_enc_ctrl: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/media/platform/exynos/mfc/mfc_enc_ctrl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/exynos/mfc/mfc_enc_ctrl.c b/drivers/media/platform/exynos/mfc/mfc_enc_ctrl.c index 6412c57f920a..b95520c935fc 100644 --- a/drivers/media/platform/exynos/mfc/mfc_enc_ctrl.c +++ b/drivers/media/platform/exynos/mfc/mfc_enc_ctrl.c @@ -1157,7 +1157,7 @@ static void __mfc_enc_set_buf_ctrls_exception(struct mfc_ctx *ctx, value &= ~(0xFFFF); value |= (p->rc_frame_delta & 0xFFFF); MFC_WRITEL(value, MFC_REG_E_RC_FRAME_RATE); - mfc_debug(3, "[DROPCTRL] fps %d -> %d, delta: %d, reg: %#x\n", + mfc_debug(3, "[DROPCTRL] fps %d -> %ld, delta: %d, reg: %#x\n", p->rc_framerate, USEC_PER_SEC / ctx->ts_last_interval, p->rc_frame_delta, value); } @@ -1478,7 +1478,7 @@ static int mfc_enc_set_buf_ctrls_val_nal_q(struct mfc_ctx *ctx, pInStr->RcFrameRate &= ~(buf_ctrl->mask << buf_ctrl->shft); pInStr->RcFrameRate |= (p->rc_frame_delta & buf_ctrl->mask) << buf_ctrl->shft; - mfc_debug(3, "[NALQ][DROPCTRL] fps %d -> %d, delta: %d, reg: %#x\n", + mfc_debug(3, "[NALQ][DROPCTRL] fps %d -> %ld, delta: %d, reg: %#x\n", p->rc_framerate, USEC_PER_SEC / ctx->ts_last_interval, p->rc_frame_delta, pInStr->RcFrameRate); break; From 61e44991942c2c570f7eebf5605506a2665da12a Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 11 Sep 2020 23:04:52 +0300 Subject: [PATCH 135/452] drivers/media/platform/exynos/tsmux/tsmux_reg: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/media/platform/exynos/tsmux/tsmux_reg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/platform/exynos/tsmux/tsmux_reg.c b/drivers/media/platform/exynos/tsmux/tsmux_reg.c index 5b04d6657a1a..e1f2fbdcab06 100644 --- a/drivers/media/platform/exynos/tsmux/tsmux_reg.c +++ b/drivers/media/platform/exynos/tsmux/tsmux_reg.c @@ -287,7 +287,7 @@ void tsmux_print_cmu_mfc_sfr(struct tsmux_device *tsmux_dev) { for (i = 0; i < tsmux_cmu_mfc_sfr_list_size; i++) { cmu_mfc_sfr = TSMUX_CMU_MFC_READL(tsmux_cmu_mfc_sfr_list[i].offset); - print_tsmux(TSMUX_SFR, "%.8x: %.8x: %.8x, %s\n", + print_tsmux(TSMUX_SFR, "%.8llx: %.8x: %.8x, %s\n", tsmux_cmu_mfc_sfr_list[i].base_pa, tsmux_cmu_mfc_sfr_list[i].offset, cmu_mfc_sfr, From 4d2beda76d9eee21425efaa63bd32f8e6a13016f Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 11 Sep 2020 23:06:47 +0300 Subject: [PATCH 136/452] fs/f2fs/inode: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- fs/f2fs/inode.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 07c99ecb87dc..c2ea7d70060c 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -430,7 +430,7 @@ static int do_read_inode(struct inode *inode) corrupted_inode: printk_ratelimited(KERN_ERR "F2FS-fs: On-disk inode is corrupted: " - "err: %ld, inode: %u, first Non-zero: %lu\n", + "err: %d, inode: %lu, first Non-zero: %lu\n", err, inode->i_ino, find_first_bit(page_address(node_page), F2FS_BLKSIZE)); print_block_data(sbi->sb, node_page->index, @@ -439,7 +439,7 @@ static int do_read_inode(struct inode *inode) if (unlikely(!ignore_fs_panic)) { f2fs_set_sb_extra_flag(sbi, F2FS_SEC_EXTRA_FSCK_MAGIC); #ifdef CONFIG_F2FS_STRICT_BUG_ON - panic("F2FS 0x%p %x", + panic("F2FS 0x%p %lx", page_address(node_page), find_first_bit(page_address(node_page), F2FS_BLKSIZE*8)); #else From 4458e28ef96cd49d9e70aa1d1352b9cf61152a56 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 12 Sep 2020 01:33:45 +0300 Subject: [PATCH 137/452] drivers/samsung/debug: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/samsung/debug/sec_debug.c | 2 +- drivers/samsung/debug/sec_debug_dtask.c | 2 +- drivers/samsung/debug/sec_debug_hist.c | 4 ++-- drivers/samsung/debug/sec_debug_init_log.c | 2 +- drivers/samsung/debug/sec_debug_test.c | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/samsung/debug/sec_debug.c b/drivers/samsung/debug/sec_debug.c index 6d644157da12..5be4a0afe015 100644 --- a/drivers/samsung/debug/sec_debug.c +++ b/drivers/samsung/debug/sec_debug.c @@ -891,7 +891,7 @@ static void sec_debug_set_essinfo(void) init_ess_info(index++, "empty"); for (index = 0; index < SD_NR_ESSINFO_ITEMS; index++) - printk("%s: key: %s offset: %llx nr: %x\n", __func__, + printk("%s: key: %s offset: %lx nr: %x\n", __func__, sdn->ss_info.item[index].key, sdn->ss_info.item[index].base, sdn->ss_info.item[index].nr); diff --git a/drivers/samsung/debug/sec_debug_dtask.c b/drivers/samsung/debug/sec_debug_dtask.c index 577519d8ab60..37767cfca0c4 100644 --- a/drivers/samsung/debug/sec_debug_dtask.c +++ b/drivers/samsung/debug/sec_debug_dtask.c @@ -25,7 +25,7 @@ static void sec_debug_print_mutex_info(struct task_struct *task, struct sec_debu pr_info("Mutex: %pS", wmutex); if (owner_task) { if (raw) - pr_cont(": owner[0x%lx %s :%d]", owner_task, owner_task->comm, owner_task->pid); + pr_cont(": owner[%p %s :%d]", owner_task, owner_task->comm, owner_task->pid); else pr_cont(": owner[%s :%d]", owner_task->comm, owner_task->pid); } diff --git a/drivers/samsung/debug/sec_debug_hist.c b/drivers/samsung/debug/sec_debug_hist.c index 3e88f40b7654..2fb02db8136e 100644 --- a/drivers/samsung/debug/sec_debug_hist.c +++ b/drivers/samsung/debug/sec_debug_hist.c @@ -42,7 +42,7 @@ static ssize_t sec_dhist_read(struct file *file, char __user *buf, } if (pos >= dhist_size) { - pr_crit("%s: pos %x , dhist: %x\n", __func__, pos, dhist_size); + pr_crit("%s: pos %lld, dhist: %x\n", __func__, pos, dhist_size); ret = 0; @@ -53,7 +53,7 @@ static ssize_t sec_dhist_read(struct file *file, char __user *buf, base = (char *)phys_to_virt((phys_addr_t)dhist_base); if (!base) { - pr_crit("%s: fail to get va (%llx)\n", __func__, dhist_base); + pr_crit("%s: fail to get va (%lx)\n", __func__, dhist_base); ret = -EFAULT; diff --git a/drivers/samsung/debug/sec_debug_init_log.c b/drivers/samsung/debug/sec_debug_init_log.c index 7b7c2553f3de..e99899ebdfef 100644 --- a/drivers/samsung/debug/sec_debug_init_log.c +++ b/drivers/samsung/debug/sec_debug_init_log.c @@ -50,7 +50,7 @@ static int __init sec_debug_init_init_log(void) buf_ptr = (char *)phys_to_virt((sec_debug_get_buf_base(SDN_MAP_INITTASK_LOG))); buf_size = sec_debug_get_buf_size(SDN_MAP_INITTASK_LOG); - pr_err("%s: buffer size 0x%llx at addr 0x%llx\n", __func__, buf_size ,buf_ptr); + pr_err("%s: buffer size 0x%lx at addr %p\n", __func__, buf_size ,buf_ptr); if (!buf_ptr || !buf_size) return 0; diff --git a/drivers/samsung/debug/sec_debug_test.c b/drivers/samsung/debug/sec_debug_test.c index 552449bcb020..96245ce6bf18 100644 --- a/drivers/samsung/debug/sec_debug_test.c +++ b/drivers/samsung/debug/sec_debug_test.c @@ -664,7 +664,7 @@ static void simulate_SAFEFAULT(char *arg) smp_call_function(simulate_ALLSPIN_LOCKUP_handler, NULL, 0); - pr_info("%s %p %s %d %p %p %llx\n", + pr_info("%s %p %s %d %p %p %lx\n", __func__, current, current->comm, current->pid, current_thread_info(), current->stack, current_stack_pointer); From 31ca81e1aee99f63febd117e408a4e8660ffdcd3 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 10:56:50 +0300 Subject: [PATCH 138/452] drivers/soc/samsung: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/soc/samsung/exynos-ppmpu.c | 4 ++-- drivers/soc/samsung/exynos-sci.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/soc/samsung/exynos-ppmpu.c b/drivers/soc/samsung/exynos-ppmpu.c index d16b31f32282..533ae3db658b 100644 --- a/drivers/soc/samsung/exynos-ppmpu.c +++ b/drivers/soc/samsung/exynos-ppmpu.c @@ -231,7 +231,7 @@ static int exynos_ppmpu_probe(struct platform_device *pdev) break; case PPMPU_ERROR_INVALID_FAIL_INFO_SIZE: dev_err(data->dev, - "The size of struct ppmpu_fail_info(%#x) is invalid\n", + "The size of struct ppmpu_fail_info(%#lx) is invalid\n", sizeof(struct ppmpu_fail_info)); break; case SMC_CMD_CHECK_PPMPU_CH_NUM: @@ -277,7 +277,7 @@ static int exynos_ppmpu_probe(struct platform_device *pdev) "VA of ppmpu_fail_info : %lx\n", (unsigned long)data->fail_info); dev_dbg(data->dev, - "PA of ppmpu_fail_info : %lx\n", + "PA of ppmpu_fail_info : %llx\n", data->fail_info_pa); ret = of_property_read_u32(data->dev->of_node, "irqcnt", &data->irqcnt); diff --git a/drivers/soc/samsung/exynos-sci.c b/drivers/soc/samsung/exynos-sci.c index 9f3a7b2bd870..e5eeb5d6afd6 100644 --- a/drivers/soc/samsung/exynos-sci.c +++ b/drivers/soc/samsung/exynos-sci.c @@ -585,14 +585,14 @@ void sci_error_dump(void) exynos_sci_err_parse(SCI_ERRSTATHI, sci_reg); pr_info("SCI_ErrStatLo : %08x\n", sci_reg = __raw_readl(sci_base + SCI_ErrStatLo)); exynos_sci_err_parse(SCI_ERRSTATLO, sci_reg); - pr_info("SCI_ErrAddr(Hi,Lo): %08x %08x\n", + pr_info("SCI_ErrAddr(Hi,Lo): %08lx %08x\n", sci_reg_hi = __raw_readl(sci_base + SCI_ErrAddrHi), sci_reg = __raw_readl(sci_base + SCI_ErrAddrLo)); sci_reg_addr = sci_reg + (MSB_MASKING & (sci_reg_hi << 32L)); sci_ns = (ERR_NS & sci_reg_hi) >> 8; sci_err_inj = (ERR_INJ_DONE & sci_reg_hi) >> 31; - pr_info("SCI_ErrAddr : %016lx (NS:%d, ERR_INJ:%d)\n", sci_reg_addr, sci_err_inj); + pr_info("SCI_ErrAddr : %016lx (NS:%u, ERR_INJ:%u)\n", sci_reg_addr, sci_ns, sci_err_inj); exynos_dump_common_cpu_reg(); pr_info("============================================================\n"); } From 3dbcfe12249c79cde4d578f42ddef7118402b003 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 11:46:38 +0300 Subject: [PATCH 139/452] drivers/video/fbdev/exynos/panel/panel_drv: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/video/fbdev/exynos/panel/panel_drv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/video/fbdev/exynos/panel/panel_drv.c b/drivers/video/fbdev/exynos/panel/panel_drv.c index 3c4465693b8e..e36ec272c3ba 100644 --- a/drivers/video/fbdev/exynos/panel/panel_drv.c +++ b/drivers/video/fbdev/exynos/panel/panel_drv.c @@ -2480,14 +2480,14 @@ static int of_get_panel_gpio(struct device_node *np, struct panel_gpio *gpio) if ((gpio->dir & GPIOF_DIR_IN) == GPIOF_DIR_OUT) { ret = gpio_request(gpio->num, gpio->name); if (ret < 0) { - panel_err("PANEL:ERR:%s:failed to request gpio(%s:%d)\n", + panel_err("PANEL:ERR:%s:failed to request gpio(%d:%s)\n", __func__, gpio->num, gpio->name); return ret; } } else { ret = gpio_request_one(gpio->num, GPIOF_IN, gpio->name); if (ret < 0) { - panel_err("PANEL:ERR:%s:failed to request gpio(%s:%d)\n", + panel_err("PANEL:ERR:%s:failed to request gpio(%d:%s)\n", __func__, gpio->num, gpio->name); return ret; } From c7d4d5d740a109d79a008aa7242d241cf2c498db Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 12:14:04 +0300 Subject: [PATCH 140/452] sound/soc/samsung/abox/abox_cmpnt_v20: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- sound/soc/samsung/abox/abox_cmpnt_v20.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/samsung/abox/abox_cmpnt_v20.c b/sound/soc/samsung/abox/abox_cmpnt_v20.c index c6203af533c4..064357a837be 100644 --- a/sound/soc/samsung/abox/abox_cmpnt_v20.c +++ b/sound/soc/samsung/abox/abox_cmpnt_v20.c @@ -2612,7 +2612,7 @@ static int asrc_update_tick(struct abox_data *data, int stream, int id) int ticknum, tickdiv; int i, res, ret = 0; - dev_dbg(dev, "%s(%d, %d, %ulHz)\n", __func__, stream, id, aclk); + dev_dbg(dev, "%s(%d, %d, %luHz)\n", __func__, stream, id, aclk); if (idx < 0) { dev_err(dev, "%s(%d, %d): invalid idx: %d\n", __func__, From c54a7084aebc030919d3cc344804ed5b5702a9c1 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 12:14:31 +0300 Subject: [PATCH 141/452] sound/soc/samsung/sec_audio_sysfs: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- sound/soc/samsung/sec_audio_sysfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sound/soc/samsung/sec_audio_sysfs.c b/sound/soc/samsung/sec_audio_sysfs.c index 814b037e1551..28994d5f0b1b 100644 --- a/sound/soc/samsung/sec_audio_sysfs.c +++ b/sound/soc/samsung/sec_audio_sysfs.c @@ -249,7 +249,7 @@ static int __init sec_audio_sysfs_init(void) audio_data->jack_dev = NULL; --dev_id; } else { - pr_info("%s: create earjack device id(%lu)\n", + pr_info("%s: create earjack device id(%u)\n", __func__, dev_id); audio_data->jack_dev_id = dev_id; } @@ -275,7 +275,7 @@ static int __init sec_audio_sysfs_init(void) audio_data->codec_dev = NULL; --dev_id; } else { - pr_info("%s: create codec device id(%lu)\n", + pr_info("%s: create codec device id(%u)\n", __func__, dev_id); audio_data->codec_dev_id = dev_id; } From a40160d858fd233c8dab11696aa83ae1dc05adb7 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 20 Sep 2020 09:24:08 +0300 Subject: [PATCH 142/452] drivers/optics/tcs3407: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/optics/tcs3407.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/optics/tcs3407.c b/drivers/optics/tcs3407.c index c09280954711..08f87c5368f1 100644 --- a/drivers/optics/tcs3407.c +++ b/drivers/optics/tcs3407.c @@ -2392,7 +2392,7 @@ static int tcs3407_eol_mode(struct tcs3407_device_data *data) s2mpb02_led_en(S2MPB02_TORCH_LED_1, 0, S2MPB02_LED_TURN_WAY_GPIO); gpio_free(data->pin_led_en); } else { - ALS_dbg("%s - PWM torch set 0x%x 0x%x\n", __func__, data->pinctrl_pwm, data->pinctrl_out); + ALS_dbg("%s - PWM torch set 0x%p 0x%p\n", __func__, data->pinctrl_pwm, data->pinctrl_out); pinctrl_select_state(data->als_pinctrl, data->pinctrl_pwm); pwm_get_state(data->pwm, &state); @@ -2440,7 +2440,7 @@ static int tcs3407_eol_mode(struct tcs3407_device_data *data) pwm_apply_state(data->pwm, &state); - ALS_dbg("%s - pinctrl out = 0x%x\n", __func__, data->pinctrl_out); + ALS_dbg("%s - pinctrl out = 0x%p\n", __func__, data->pinctrl_out); pinctrl_select_state(data->als_pinctrl, data->pinctrl_out); } @@ -3054,14 +3054,14 @@ static int tcs3407_parse_dt(struct tcs3407_device_data *data) data->pinctrl_out = pinctrl_lookup_state(data->als_pinctrl, "torch_out"); if (IS_ERR(data->pinctrl_pwm) || IS_ERR(data->pinctrl_out)) { - ALS_err("%s - Failed to get pinctrl for pwm, %d %d\n", + ALS_err("%s - Failed to get pinctrl for pwm, %ld %ld\n", __func__, PTR_ERR(data->pinctrl_pwm), PTR_ERR(data->pinctrl_out)); data->pinctrl_pwm = NULL; data->pinctrl_out = NULL; } else { data->pwm = devm_of_pwm_get(dev, dNode, NULL); if (IS_ERR(data->pwm)) { - ALS_err("%s - unable to request PWM %d\n", __func__, PTR_ERR(data->pwm)); + ALS_err("%s - unable to request PWM %ld\n", __func__, PTR_ERR(data->pwm)); data->pwm = NULL; } } From a89de7d8a6aa80697b0fcb8023b427c9ef27deec Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 20 Sep 2020 10:17:56 +0300 Subject: [PATCH 143/452] drivers/misc/mcu_ipc/shm_ipc: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/misc/mcu_ipc/shm_ipc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/misc/mcu_ipc/shm_ipc.c b/drivers/misc/mcu_ipc/shm_ipc.c index d427e987d433..641749bfa9ca 100644 --- a/drivers/misc/mcu_ipc/shm_ipc.c +++ b/drivers/misc/mcu_ipc/shm_ipc.c @@ -1031,16 +1031,16 @@ static int shm_probe(struct platform_device *pdev) pdata.p_acpm_addr, pdata.acpm_size); #ifdef CONFIG_LINK_DEVICE_PCIE - dev_info(dev, "msi_base=0x%08X msi_size=0x%08X\n", + dev_info(dev, "msi_base=0x%08lX msi_size=0x%08X\n", pdata.p_msi_addr, pdata.t_msi_size); #endif #ifdef CONFIG_SEC_SIPC_DUAL_MODEM_IF - dev_info(dev, "s5100_ipc_base=0x%08X s5100_ipc_size=0x%08X\n", + dev_info(dev, "s5100_ipc_base=0x%08lX s5100_ipc_size=0x%08X\n", pdata.p_s5100_ipc_addr, pdata.t_s5100_ipc_size); - dev_info(dev, "s5100_cp2cp_addr=0x%08X s5100_cp2cp_size=0x%08X s5100_cp2cp_offset=0x%08X\n", - pdata.p_s5100_ipc_addr, pdata.t_s5100_ipc_size, + dev_info(dev, "s5100_cp2cp_addr=0x%08lX s5100_cp2cp_size=0x%08X s5100_cp2cp_offset=0x%08X\n", + pdata.p_s5100_cp2cp_addr, pdata.t_s5100_cp2cp_size, pdata.s5100_cp2cp_off); #endif From f193e0a4ab3e585bb4b738f3a98b990fa03ee6b2 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 20 Sep 2020 10:20:05 +0300 Subject: [PATCH 144/452] drivers/misc/modem_v1_dual: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/misc/modem_v1_dual/link_device_shmem.c | 4 ++-- drivers/misc/modem_v1_dual/modem_ctrl_s5100.c | 2 +- drivers/misc/modem_v1_dual/s5100_pcie.c | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/misc/modem_v1_dual/link_device_shmem.c b/drivers/misc/modem_v1_dual/link_device_shmem.c index d90f212ba311..6ed6239ee8df 100644 --- a/drivers/misc/modem_v1_dual/link_device_shmem.c +++ b/drivers/misc/modem_v1_dual/link_device_shmem.c @@ -2453,7 +2453,7 @@ static int shmem_security_request(struct link_device *ld, struct io_device *iod, cp_init_done = 1; } - mif_err("mode=%lx, param2=0x%lx, param3=0x%lx, cp_base_addr=0x%lx\n", + mif_err("mode=%x, param2=0x%lx, param3=0x%lx, cp_base_addr=0x%lx\n", msr.mode, param2, param3, shm_get_phys_base()); err = exynos_smc(SMC_ID, msr.mode, param2, param3); @@ -2489,7 +2489,7 @@ static int shmem_security_cp2cp_baaw_request(struct link_device *ld, unsigned int cp2cp_size = shm_get_s5100_cp2cp_size(); unsigned int cp2cp_offset = shm_get_s5100_cp2cp_offset(); - mif_info("cp2cp_addr=0x%08X cp2cp_size=0x%08X cp2cp_offset=0x%08X\n", + mif_info("cp2cp_addr=0x%08lX cp2cp_size=0x%08X cp2cp_offset=0x%08X\n", cp2cp_base, cp2cp_size, cp2cp_offset); #if defined(CONFIG_CP_SECURE_BOOT) diff --git a/drivers/misc/modem_v1_dual/modem_ctrl_s5100.c b/drivers/misc/modem_v1_dual/modem_ctrl_s5100.c index c7742a2c20a1..95b10d948f7d 100644 --- a/drivers/misc/modem_v1_dual/modem_ctrl_s5100.c +++ b/drivers/misc/modem_v1_dual/modem_ctrl_s5100.c @@ -1220,7 +1220,7 @@ static int s5100_pm_notifier(struct notifier_block *notifier, break; default: - mif_info("pm_event %d\n", pm_event); + mif_info("pm_event %lu\n", pm_event); break; } diff --git a/drivers/misc/modem_v1_dual/s5100_pcie.c b/drivers/misc/modem_v1_dual/s5100_pcie.c index fb8aa5a14884..bdcfa8af7f72 100644 --- a/drivers/misc/modem_v1_dual/s5100_pcie.c +++ b/drivers/misc/modem_v1_dual/s5100_pcie.c @@ -116,7 +116,7 @@ inline int s5100pcie_send_doorbell_int(int int_num) reg = ioread32(s5100pcie.doorbell_addr); /* debugging: */ - mif_debug("DBG: s5100pcie.doorbell_addr = 0x%x - written(int_num=0x%x) read(reg=0x%x)\n", \ + mif_debug("DBG: s5100pcie.doorbell_addr = 0x%p - written(int_num=0x%x) read(reg=0x%x)\n", \ s5100pcie.doorbell_addr, int_num, reg); if (reg == 0xffffffff) { @@ -400,7 +400,7 @@ static int s5100pcie_probe(struct pci_dev *pdev, s5100pcie.doorbell_addr = devm_ioremap_wc(&pdev->dev, 0x11000d20, SZ_4); - pr_info("s5100pcie.doorbell_addr = 0x%x (CONFIG_SOC_EXYNOS9820: 0x11000d20)\n", \ + pr_info("s5100pcie.doorbell_addr = 0x%p (CONFIG_SOC_EXYNOS9820: 0x11000d20)\n", \ s5100pcie.doorbell_addr); #else #error "Can't set Doorbell interrupt register!" From 90e23690b4120836283a48a8d7924cdddeb3b825 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 20 Sep 2020 10:29:59 +0300 Subject: [PATCH 145/452] drivers/input/wacom: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/input/wacom/wacom_i2c_elec.c | 26 +++++++++++----------- drivers/input/wacom/wacom_i2c_sec.c | 32 ++++++++++++++-------------- 2 files changed, 29 insertions(+), 29 deletions(-) diff --git a/drivers/input/wacom/wacom_i2c_elec.c b/drivers/input/wacom/wacom_i2c_elec.c index 4c29498ee991..2781959c9f37 100644 --- a/drivers/input/wacom/wacom_i2c_elec.c +++ b/drivers/input/wacom/wacom_i2c_elec.c @@ -497,7 +497,7 @@ void print_cal_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->xx_xx[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->xx_xx[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -510,7 +510,7 @@ void print_cal_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->xy_xy[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->xy_xy[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -523,7 +523,7 @@ void print_cal_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->yx_yx[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->yx_yx[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -536,7 +536,7 @@ void print_cal_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->yy_yy[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->yy_yy[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -568,7 +568,7 @@ void print_ratio_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->rxx[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->rxx[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -581,7 +581,7 @@ void print_ratio_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->rxy[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->rxy[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -594,7 +594,7 @@ void print_ratio_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->ryx[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->ryx[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -607,7 +607,7 @@ void print_ratio_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->ryy[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->ryy[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -639,7 +639,7 @@ void print_difference_ratio_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->drxx[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->drxx[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -652,7 +652,7 @@ void print_difference_ratio_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->drxy[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->drxy[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -665,7 +665,7 @@ void print_difference_ratio_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->dryx[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->dryx[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -678,7 +678,7 @@ void print_difference_ratio_trx_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", edata->dryy[i]); + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->dryy[i]); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); } @@ -687,4 +687,4 @@ void print_difference_ratio_trx_data(struct wacom_i2c *wac_i2c) memset(buff, 0x00, buff_size); kfree(buff); -} \ No newline at end of file +} diff --git a/drivers/input/wacom/wacom_i2c_sec.c b/drivers/input/wacom/wacom_i2c_sec.c index 6729f07500cb..a70935be8052 100644 --- a/drivers/input/wacom/wacom_i2c_sec.c +++ b/drivers/input/wacom/wacom_i2c_sec.c @@ -1673,7 +1673,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->xx_ref[i] * power(edata->shift_value)); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1687,7 +1687,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->xy_ref[i] * power(edata->shift_value)); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1701,7 +1701,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->yx_ref[i] * power(edata->shift_value)); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1715,7 +1715,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->yy_ref[i] * power(edata->shift_value)); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1729,7 +1729,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->xx_spec[i] / POWER_OFFSET * power(edata->shift_value)); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1743,7 +1743,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->xy_spec[i] / POWER_OFFSET * power(edata->shift_value)); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1757,7 +1757,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->yx_spec[i] / POWER_OFFSET * power(edata->shift_value)); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1771,7 +1771,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->yy_spec[i] / POWER_OFFSET * power(edata->shift_value)); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1785,7 +1785,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->rxx_ref[i] * power(edata->shift_value) / POWER_OFFSET); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1799,7 +1799,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->rxy_ref[i] * power(edata->shift_value) / POWER_OFFSET); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1813,7 +1813,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->ryx_ref[i] * power(edata->shift_value) / POWER_OFFSET); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1827,7 +1827,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->ryy_ref[i] * power(edata->shift_value) / POWER_OFFSET); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1841,7 +1841,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->drxx_spec[i] * power(edata->shift_value) / POWER_OFFSET); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1855,7 +1855,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_x_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->drxy_spec[i] * power(edata->shift_value) / POWER_OFFSET); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1869,7 +1869,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->dryx_spec[i] * power(edata->shift_value) / POWER_OFFSET); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); @@ -1883,7 +1883,7 @@ static void print_spec_data(struct wacom_i2c *wac_i2c) memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); for (i = 0; i < edata->max_y_ch; i++) { - snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%ld ", + snprintf(tmp_buf, CMD_RESULT_WORD_LEN, "%lld ", edata->dryy_spec[i] * power(edata->shift_value) / POWER_OFFSET); strlcat(buff, tmp_buf, buff_size); memset(tmp_buf, 0x00, CMD_RESULT_WORD_LEN); From a34949698432f13114e2f8c70cdeb5164f8d01dd Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 20 Sep 2020 10:35:24 +0300 Subject: [PATCH 146/452] drivers/sensorhub/brcm/sx9360: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/sensorhub/brcm/sx9360.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/sensorhub/brcm/sx9360.c b/drivers/sensorhub/brcm/sx9360.c index 0889c58d3110..835f3cdac3a6 100644 --- a/drivers/sensorhub/brcm/sx9360.c +++ b/drivers/sensorhub/brcm/sx9360.c @@ -973,7 +973,7 @@ static ssize_t sx9360_normal_threshold_show(struct device *dev, break; } - return snprintf(buf, PAGE_SIZE, "%lu,%lu\n", + return snprintf(buf, PAGE_SIZE, "%u,%u\n", (u32)threshold + (u32)hyst, (u32)threshold - (u32)hyst); } From 31e082fc2f1b447ad0dde59958b8aaa475d0769a Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 20 Sep 2020 16:04:38 +0300 Subject: [PATCH 147/452] drivers/video/fbdev/exynos/dpu20/bts: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/video/fbdev/exynos/dpu20/bts.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/dpu20/bts.c b/drivers/video/fbdev/exynos/dpu20/bts.c index ed1c6d319762..dc91a8dc98cd 100644 --- a/drivers/video/fbdev/exynos/dpu20/bts.c +++ b/drivers/video/fbdev/exynos/dpu20/bts.c @@ -365,7 +365,7 @@ u64 dpu_bts_calc_aclk_disp(struct decon_device *decon, if ((aclk_disp > TSP_INTER_MIN) && (aclk_disp < TSP_INTER_MAX)) { - decon_dbg("aclk : %d -> %d\n", aclk_disp, ACLK_AVOID_INTER); + decon_dbg("aclk : %lld -> %d\n", aclk_disp, ACLK_AVOID_INTER); aclk_disp = ACLK_AVOID_INTER; } } From eb19cc3da10deff3527eb2c3745a094031f16be3 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 3 Dec 2019 01:03:10 +0300 Subject: [PATCH 148/452] drivers/soc/samsung/exynos-seclog: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/soc/samsung/exynos-seclog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/samsung/exynos-seclog.c b/drivers/soc/samsung/exynos-seclog.c index 32b563fa85c2..157d86df4609 100644 --- a/drivers/soc/samsung/exynos-seclog.c +++ b/drivers/soc/samsung/exynos-seclog.c @@ -354,7 +354,7 @@ static int exynos_seclog_probe(struct platform_device *pdev) } dev_info(&pdev->dev, - "Message buffer address[PA : %#lx, VA : %#lx], Message buffer size[%#lx]\n", + "Message buffer address[PA : %#lx, VA : %p], Message buffer size[%#lx]\n", ldata.phys_addr, ldata.virt_addr, ldata.size); dev_info(&pdev->dev, "Exynos Secure Log driver probe done!\n"); From 799d5aa3b9fcbcb523bebb96c6aa2f4b67dd2c0e Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 6 Dec 2019 16:09:30 +0300 Subject: [PATCH 149/452] sound/soc/samsung/abox/abox_mmapfd: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- sound/soc/samsung/abox/abox_mmapfd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/samsung/abox/abox_mmapfd.c b/sound/soc/samsung/abox/abox_mmapfd.c index 2b048b7d39a2..1cd5cb9c9de4 100644 --- a/sound/soc/samsung/abox/abox_mmapfd.c +++ b/sound/soc/samsung/abox/abox_mmapfd.c @@ -120,7 +120,7 @@ int abox_ion_alloc(struct abox_platform_data *data, buf->kva); if (ret < 0) { - dev_err(dev, "Failed to iommu_map(%#lx): %d\n", + dev_err(dev, "Failed to iommu_map(%#llx): %d\n", buf->iova, ret); goto error_iommu_map_sg; From ccfa3124b81e628ab0baa724e909604de5f6b9cb Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 6 Dec 2019 16:10:44 +0300 Subject: [PATCH 150/452] sound/soc/samsung/abox/abox_rdma: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- sound/soc/samsung/abox/abox_rdma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/samsung/abox/abox_rdma.c b/sound/soc/samsung/abox/abox_rdma.c index f02f667fdd9f..2e2af4591674 100644 --- a/sound/soc/samsung/abox/abox_rdma.c +++ b/sound/soc/samsung/abox/abox_rdma.c @@ -1425,7 +1425,7 @@ static int abox_rdma_hw_params(struct snd_pcm_substream *substream, dev_info(dev, "dma buffer changed\n"); } } else if (data->buf_type == BUFFER_TYPE_ION) { - dev_info(dev, "ion_buffer %s bytes(%d) size(%d)\n", + dev_info(dev, "ion_buffer %s bytes(%zu) size(%zu)\n", __func__, buffer_bytes, data->ion_buf.size); } else { From 53e362c746da896f52ded0e45d0647ee6c3fb1bc Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 4 Dec 2019 01:54:04 +0300 Subject: [PATCH 151/452] drivers/video/fbdev/exynos/dpu20/displayport_drv: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/video/fbdev/exynos/dpu20/displayport_drv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/video/fbdev/exynos/dpu20/displayport_drv.c b/drivers/video/fbdev/exynos/dpu20/displayport_drv.c index dc3f6efb739c..bf924a06d258 100644 --- a/drivers/video/fbdev/exynos/dpu20/displayport_drv.c +++ b/drivers/video/fbdev/exynos/dpu20/displayport_drv.c @@ -142,7 +142,7 @@ static u64 displayport_find_edid_max_pixelclock(void) supported_videos[i].dv_timings.bt.pixelclock > max_pclk) max_pclk = supported_videos[i].dv_timings.bt.pixelclock; } - displayport_info("find max pclk : %ld\n", max_pclk); + displayport_info("find max pclk : %lld\n", max_pclk); return max_pclk; } @@ -167,7 +167,7 @@ static int displayport_check_edid_max_clock(struct displayport_device *displaypo if (displayport->rx_edid_data.max_support_clk != 0) { if (calc_pixel_clock > displayport->rx_edid_data.max_support_clk * MHZ) { displayport_info("RX support Max TMDS Clock = %llu, but pixel clock = %llu\n", - displayport->rx_edid_data.max_support_clk * MHZ, calc_pixel_clock); + (u64) displayport->rx_edid_data.max_support_clk * MHZ, calc_pixel_clock); ret_val = false; } } else From 1b60df131f65f37f03a62ee84c2d13f6a7fb577f Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 4 Dec 2019 01:59:13 +0300 Subject: [PATCH 152/452] drivers/video/fbdev/exynos/dpu20/decon_core: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/video/fbdev/exynos/dpu20/decon_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/dpu20/decon_core.c b/drivers/video/fbdev/exynos/dpu20/decon_core.c index 9d5d15e5c204..66dfd7ab1118 100644 --- a/drivers/video/fbdev/exynos/dpu20/decon_core.c +++ b/drivers/video/fbdev/exynos/dpu20/decon_core.c @@ -3513,7 +3513,7 @@ static int decon_ioctl(struct fb_info *info, unsigned int cmd, v4l2_subdev_call(decon->dpp_sd[i], core, ioctl, DPP_GET_RESTRICTION, &disp_res.dpp_ch[i]); - decon_info("DECON:INFO:%s:DPP_RESTRICTIONS:0x%x\n", + decon_info("DECON:INFO:%s:DPP_RESTRICTIONS:0x%lx\n", __func__, disp_res.dpp_ch[i].attr); } From b0ffd212f3b0bad1773e829ba41e386df33b7e99 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 4 Dec 2019 09:12:05 +0300 Subject: [PATCH 153/452] drivers/video/fbdev/exynos/dpu20/event_log: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/video/fbdev/exynos/dpu20/event_log.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/video/fbdev/exynos/dpu20/event_log.c b/drivers/video/fbdev/exynos/dpu20/event_log.c index ffe5cb80d14e..029574147004 100644 --- a/drivers/video/fbdev/exynos/dpu20/event_log.c +++ b/drivers/video/fbdev/exynos/dpu20/event_log.c @@ -1987,7 +1987,7 @@ static int __init decon_event_log_setup(char *str) #else if (reserve_bootmem(base - 8, size + 8, BOOTMEM_EXCLUSIVE)) { #endif - pr_err("%s: failed reserving size %d at base 0x%lx\n", + pr_err("%s: failed reserving size %zu at base 0x%lx\n", __func__, size, base); goto setup_exit; } @@ -1997,7 +1997,7 @@ static int __init decon_event_log_setup(char *str) rdx_mem_size = size; pr_info("%s: *disp_rdx_log_ptr:%x\n", __func__, *rdx_mem_ptr); - pr_info("%s: disp_rdx_log_buf:%p disp_rdx_log_size:0x%llx\n", + pr_info("%s: disp_rdx_log_buf:%p disp_rdx_log_size:%zu\n", __func__, rdx_mem_buf, rdx_mem_size); return 1; @@ -2024,14 +2024,14 @@ int decon_create_debugfs(struct decon_device *decon) if (decon->id == 0) { decon->d.event_log_header = rdx_mem_alloc(sizeof(struct dpu_log_header)); if (IS_ERR_OR_NULL(decon->d.event_log_header)) { - decon_warn("failed to alloc event log header buf[%d]. retry\n", + decon_warn("failed to alloc event log header buf[%zu]. retry\n", sizeof(struct dpu_log_header)); continue; } real_size = sizeof(struct dpu_log_header) + sizeof(struct dpu_log) * event_cnt; - pr_info("%s alloc total size %llx\n", __func__, real_size); + pr_info("%s alloc total size %zu\n", __func__, real_size); if (real_size >= rdx_mem_size) { - decon_warn("failed to alloc because over size[%d]. retry\n", + decon_warn("failed to alloc because over size[%zu]. retry\n", real_size); continue; } From 17879e67dc56cdab63992d19c93bb2eaedbde668 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 6 Dec 2019 16:00:41 +0300 Subject: [PATCH 154/452] drivers/vision/score/hardware/v3/score_scq: fix printk format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/vision/score/hardware/v3/score_packet.h | 2 +- drivers/vision/score/hardware/v3/score_scq.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/vision/score/hardware/v3/score_packet.h b/drivers/vision/score/hardware/v3/score_packet.h index fa91a8a1d383..166865f5b65b 100644 --- a/drivers/vision/score/hardware/v3/score_packet.h +++ b/drivers/vision/score/hardware/v3/score_packet.h @@ -18,7 +18,7 @@ #define MIN_PACKET_SIZE (sizeof(struct score_host_packet) + \ sizeof(struct score_host_packet_info)) -#define MAX_PACKET_SIZE (2048) +#define MAX_PACKET_SIZE (2048UL) enum score_host_packet_version { HOST_PKT_V1 = 0x1, diff --git a/drivers/vision/score/hardware/v3/score_scq.c b/drivers/vision/score/hardware/v3/score_scq.c index 118c62e5ff41..d22bd41bc49e 100644 --- a/drivers/vision/score/hardware/v3/score_scq.c +++ b/drivers/vision/score/hardware/v3/score_scq.c @@ -472,7 +472,7 @@ static int __score_scq_translate_packet(struct score_frame *frame) if (packet_size < MIN_PACKET_SIZE || packet_size > MAX_PACKET_SIZE) { ret = -EINVAL; - score_err("packet size is invalid (%u/MIN:%zu/MAX:%zu)\n", + score_err("packet size is invalid (%u/MIN:%lu/MAX:%lu)\n", packet_size, MIN_PACKET_SIZE, MAX_PACKET_SIZE); goto p_err; } From 944bd0d8ec18bd5cc0060585a603162711149bd1 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Nov 2022 20:55:36 +0400 Subject: [PATCH 155/452] drivers/video/fbdev/exynos/dpu20/displayport.h: fix missing brackets Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/video/fbdev/exynos/dpu20/displayport.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/dpu20/displayport.h b/drivers/video/fbdev/exynos/dpu20/displayport.h index 3cd59c783af4..33bedbefed2e 100644 --- a/drivers/video/fbdev/exynos/dpu20/displayport.h +++ b/drivers/video/fbdev/exynos/dpu20/displayport.h @@ -74,9 +74,10 @@ extern int forced_resolution; #define displayport_info(fmt, ...) \ do { \ - if (displayport_log_level >= 6) \ + if (displayport_log_level >= 6) { \ pr_info("Displayport: " pr_fmt(fmt), ##__VA_ARGS__); \ dp_logger_print(fmt, ##__VA_ARGS__); \ + } \ } while (0) #define displayport_dbg(fmt, ...) \ From 2a17470681865febda73744e858455d231798b22 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 13:57:57 +0300 Subject: [PATCH 156/452] drivers/battery_v2/sec_battery: fix power_supply_propval init Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/battery_v2/sec_battery.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/battery_v2/sec_battery.c b/drivers/battery_v2/sec_battery.c index 8f4b15fa3422..5eb41950e4c8 100644 --- a/drivers/battery_v2/sec_battery.c +++ b/drivers/battery_v2/sec_battery.c @@ -4203,9 +4203,11 @@ static void sec_bat_wireless_minduty_cntl(struct sec_battery_info *battery, unsi static void sec_bat_wireless_uno_cntl(struct sec_battery_info *battery, bool en) { - union power_supply_propval value = {0, }; + union power_supply_propval value = { + .intval = en + }; - battery->uno_en = value.intval = en; + battery->uno_en = en; pr_info("@Tx_Mode %s : Uno control %d\n", __func__, battery->uno_en); if (value.intval) { From b0aba8d22e300adce51c944222202e918d9c5b7a Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 29 Nov 2019 20:28:00 +0300 Subject: [PATCH 157/452] drivers/soc/samsung/exynos-hdcp/exynos-hdcp2: move global vars to .c file Signed-off-by: Denis Efremov <efremov@linux.com> --- .../soc/samsung/exynos-hdcp/exynos-hdcp2.c | 26 +++++++++++++++++++ .../soc/samsung/exynos-hdcp/exynos-hdcp2.h | 26 ++----------------- 2 files changed, 28 insertions(+), 24 deletions(-) diff --git a/drivers/soc/samsung/exynos-hdcp/exynos-hdcp2.c b/drivers/soc/samsung/exynos-hdcp/exynos-hdcp2.c index 738073446146..45958a0088d5 100644 --- a/drivers/soc/samsung/exynos-hdcp/exynos-hdcp2.c +++ b/drivers/soc/samsung/exynos-hdcp/exynos-hdcp2.c @@ -55,6 +55,32 @@ uint32_t func_test_mode = 1; uint32_t func_test_mode; #endif +char *hdcp_session_st_str[] = { + "ST_INIT", + "ST_LINK_SETUP", + "ST_END", + NULL +}; + +char *hdcp_link_st_str[] = { + "ST_INIT", + "ST_H0_NO_RX_ATTACHED", + "ST_H1_TX_LOW_VALUE_CONTENT", + "ST_A0_DETERMINE_RX_HDCP_CAP", + "ST_A1_EXCHANGE_MASTER_KEY", + "ST_A2_LOCALITY_CHECK", + "ST_A3_EXCHANGE_SESSION_KEY", + "ST_A4_TEST_REPEATER", + "ST_A5_AUTHENTICATED", + "ST_A6_WAIT_RECEIVER_ID_LIST", + "ST_A7_VERIFY_RECEIVER_ID_LIST", + "ST_A8_SEND_RECEIVER_ID_LIST_ACK", + "ST_A9_CONTENT_STREAM_MGT", + "ST_END", + NULL +}; + + static long hdcp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int rval; diff --git a/drivers/soc/samsung/exynos-hdcp/exynos-hdcp2.h b/drivers/soc/samsung/exynos-hdcp/exynos-hdcp2.h index fe497b8c2913..9c9aab5985d4 100644 --- a/drivers/soc/samsung/exynos-hdcp/exynos-hdcp2.h +++ b/drivers/soc/samsung/exynos-hdcp/exynos-hdcp2.h @@ -41,30 +41,8 @@ #define TEMP_ERROR -ENOTTY -static char *hdcp_session_st_str[] = { - "ST_INIT", - "ST_LINK_SETUP", - "ST_END", - NULL -}; - -static char *hdcp_link_st_str[] = { - "ST_INIT", - "ST_H0_NO_RX_ATTACHED", - "ST_H1_TX_LOW_VALUE_CONTENT", - "ST_A0_DETERMINE_RX_HDCP_CAP", - "ST_A1_EXCHANGE_MASTER_KEY", - "ST_A2_LOCALITY_CHECK", - "ST_A3_EXCHANGE_SESSION_KEY", - "ST_A4_TEST_REPEATER", - "ST_A5_AUTHENTICATED", - "ST_A6_WAIT_RECEIVER_ID_LIST", - "ST_A7_VERIFY_RECEIVER_ID_LIST", - "ST_A8_SEND_RECEIVER_ID_LIST_ACK", - "ST_A9_CONTENT_STREAM_MGT", - "ST_END", - NULL -}; +extern char *hdcp_session_st_str[]; +extern char *hdcp_link_st_str[]; #define UPDATE_SESSION_STATE(sess, st) do { \ printk("[HDCP2]HDCP Session(%d): %s -> %s\n", sess->id, hdcp_session_st_str[sess->state], hdcp_session_st_str[st]); \ From 3b7445dfc88318bee7018997669028db67408641 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 4 Dec 2019 01:10:16 +0300 Subject: [PATCH 158/452] drivers/usb/core/config: fix print format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/usb/core/config.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 61086eb4eb43..caaa5d0ae022 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -225,7 +225,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, if (d->bEndpointAddress != to_usb_device(ddev)->hwinfo.fb_in_ep) { to_usb_device(ddev)->hwinfo.in_ep = d->bEndpointAddress; - dev_info(ddev, " This is IN ISO endpoint #0%x 0x%p\n", + dev_info(ddev, " This is IN ISO endpoint #0%x 0x%x\n", d->bEndpointAddress, d->bSynchAddress); } else dev_info(ddev, "IN ISO endpoint is same with FB #0%x\n", @@ -233,19 +233,19 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, if ((d->bLength > 7) && (d->bSynchAddress != 0x0)) { to_usb_device(ddev)->hwinfo.fb_out_ep = d->bSynchAddress; - dev_info(ddev, "Feedback IN ISO endpoint #0%x 0x%p\n", + dev_info(ddev, "Feedback IN ISO endpoint #0%x 0x%x\n", d->bEndpointAddress, d->bSynchAddress); } } else { to_usb_device(ddev)->hwinfo.out_ep = d->bEndpointAddress; - dev_info(ddev, " This is OUT ISO endpoint #0%x 0x%p\n", + dev_info(ddev, " This is OUT ISO endpoint #0%x 0x%x\n", d->bEndpointAddress, d->bSynchAddress); if ((d->bLength > 7) && (d->bSynchAddress != 0x0)) { to_usb_device(ddev)->hwinfo.fb_in_ep = d->bSynchAddress; - dev_info(ddev, "Feedback IN ISO endpoint #0%x 0x%p\n", + dev_info(ddev, "Feedback IN ISO endpoint #0%x 0x%x\n", d->bEndpointAddress, d->bSynchAddress); } } @@ -254,12 +254,12 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, if (d->bEndpointAddress & USB_ENDPOINT_DIR_MASK) { to_usb_device(ddev)->hwinfo.fb_in_ep = d->bEndpointAddress; - dev_info(ddev, "Feedback IN ISO endpoint #0%x 0x%p\n", + dev_info(ddev, "Feedback IN ISO endpoint #0%x 0x%x\n", d->bEndpointAddress, d->bSynchAddress); } else { to_usb_device(ddev)->hwinfo.fb_out_ep = d->bEndpointAddress; - dev_info(ddev, "Feedback OUT ISO endpoint #0%x 0x%p\n", + dev_info(ddev, "Feedback OUT ISO endpoint #0%x 0x%x\n", d->bEndpointAddress, d->bSynchAddress); } } From fba427b99a90ea762531c12be97e8f4cdab31f99 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 4 Dec 2019 01:40:53 +0300 Subject: [PATCH 159/452] drivers/video/fbdev/exynos/dpu20/dpp_drv: fix printf format errors Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/video/fbdev/exynos/dpu20/dpp_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/dpu20/dpp_drv.c b/drivers/video/fbdev/exynos/dpu20/dpp_drv.c index d0e3ff92a1cb..d26d2f94d5bb 100644 --- a/drivers/video/fbdev/exynos/dpu20/dpp_drv.c +++ b/drivers/video/fbdev/exynos/dpu20/dpp_drv.c @@ -1185,7 +1185,7 @@ static int dpp_probe(struct platform_device *pdev) if (IS_SUPPORT_WCG(attr)) dpp->attr |= (1 << DPP_ATTR_WCG); - dpp_info("DPP:INFO:%s:%x attr : %x", __func__, dpp->id, dpp->attr); + dpp_info("DPP:INFO:%s:%x attr : %lx", __func__, dpp->id, dpp->attr); #if 0 print_dpp_restrict(dpp->attr); #endif From 624e61c014ab37011224b71f7f2e9cbbee471a96 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 10 Jan 2021 18:55:57 +0300 Subject: [PATCH 160/452] b_r26p0/.../gpu_custom_interface.c: fix scnprintf() format Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/gpu/arm/b_r26p0/platform/exynos/gpu_custom_interface.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/arm/b_r26p0/platform/exynos/gpu_custom_interface.c b/drivers/gpu/arm/b_r26p0/platform/exynos/gpu_custom_interface.c index 29ac5d5f2a21..4ba994a38636 100644 --- a/drivers/gpu/arm/b_r26p0/platform/exynos/gpu_custom_interface.c +++ b/drivers/gpu/arm/b_r26p0/platform/exynos/gpu_custom_interface.c @@ -1916,7 +1916,7 @@ static ssize_t show_kernel_sysfs_gpu_memory(struct kobject *kobj, struct kobj_at kbase_device_put_list(kbdev_list); if (buffer_full) - ret += scnprintf(buf + ret, buf_size - ret, "error: buffer is full\n", ret); + ret += scnprintf(buf + ret, buf_size - ret, "error: %zi buffer is full\n", ret); return ret; } From edecd29f06b4f05cc6e2b500e48b1a48eb7f8ee7 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 4 Dec 2019 09:24:48 +0300 Subject: [PATCH 161/452] drivers/video/fbdev/exynos/dpu20/mcd_hdr/mcd_cm_lut: mark global vars as unused Signed-off-by: Denis Efremov <efremov@linux.com> --- .../video/fbdev/exynos/dpu20/mcd_hdr/mcd_cm_lut.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/video/fbdev/exynos/dpu20/mcd_hdr/mcd_cm_lut.h b/drivers/video/fbdev/exynos/dpu20/mcd_hdr/mcd_cm_lut.h index 70ed0351def0..cfe2788d7675 100644 --- a/drivers/video/fbdev/exynos/dpu20/mcd_hdr/mcd_cm_lut.h +++ b/drivers/video/fbdev/exynos/dpu20/mcd_hdr/mcd_cm_lut.h @@ -480,12 +480,12 @@ static unsigned int TABLE_TMS_PQ[INDEX_TMAX][2] = { //*****************************************************************************************************// //***************************************** Bypass table **********************************************// //*****************************************************************************************************// -static unsigned int TABLE_TMS_BYPASS = 0x00000100; -static unsigned int * TABLE_SC_BYPASS = 0; -static unsigned int * TABLE_TM_BYPASS = 0; -static unsigned int * TABLE_GM_BYPASS = 0; -static unsigned int * TABLE_OETF_BYPASS = 0; -static unsigned int * TABLE_EOTF_BYPASS = 0; +static unsigned int TABLE_TMS_BYPASS __maybe_unused = 0x00000100; +static unsigned int * TABLE_SC_BYPASS __maybe_unused = 0; +static unsigned int * TABLE_TM_BYPASS __maybe_unused = 0; +static unsigned int * TABLE_GM_BYPASS __maybe_unused = 0; +static unsigned int * TABLE_OETF_BYPASS __maybe_unused = 0; +static unsigned int * TABLE_EOTF_BYPASS __maybe_unused = 0; //*****************************************************************************************************// //************************************** Indexed Gamut table ******************************************// From be090d7e4f3398d303ed058ecdab8aa2d1c8b69a Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 6 Dec 2019 15:49:56 +0300 Subject: [PATCH 162/452] drivers/video/fbdev/exynos/panel/Makefile: disable unused-variable warning Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/video/fbdev/exynos/panel/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/video/fbdev/exynos/panel/Makefile b/drivers/video/fbdev/exynos/panel/Makefile index f7e1f63136d8..006002a2a718 100644 --- a/drivers/video/fbdev/exynos/panel/Makefile +++ b/drivers/video/fbdev/exynos/panel/Makefile @@ -5,6 +5,8 @@ # Licensed under GPLv2 # +ccflags-y += $(call cc-disable-warning, unused-variable) + obj-$(CONFIG_EXYNOS_MIPI_DSIM) += timenval.o panel.o panel_bl.o dimming.o panel_drv.o panel_irc.o obj-$(CONFIG_EXYNOS_DECON_LCD_S6E3HF3) += s6e3hf3/s6e3hf3.o obj-$(CONFIG_EXYNOS_DECON_LCD_S6E3HF4) += s6e3hf4/s6e3hf4.o From 07eb2aa13358a9c7d52a749bd23f82767f1d5bdf Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 4 Dec 2019 01:55:28 +0300 Subject: [PATCH 163/452] drivers/video/fbdev/exynos/dpu20/decon_core: fix missing printk arguments Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/video/fbdev/exynos/dpu20/decon_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/dpu20/decon_core.c b/drivers/video/fbdev/exynos/dpu20/decon_core.c index 66dfd7ab1118..42286b86b139 100644 --- a/drivers/video/fbdev/exynos/dpu20/decon_core.c +++ b/drivers/video/fbdev/exynos/dpu20/decon_core.c @@ -4701,7 +4701,7 @@ static int decon_probe(struct platform_device *pdev) ret = create_wcg_sysfs(decon); if (ret) - decon_err("DECON:ERR:%s:faield to create sysfs for wcg\n"); + decon_err("DECON:ERR:%s:failed to create sysfs for wcg\n", __func__); #endif dpu_init_win_update(decon); decon_init_low_persistence_mode(decon); From 6e1a02d5c5530580431a9b6cdf7fa518f4b5838a Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 6 Dec 2019 16:02:00 +0300 Subject: [PATCH 164/452] sound/soc/samsung/abox/abox: fix missing printk arguments Signed-off-by: Denis Efremov <efremov@linux.com> --- sound/soc/samsung/abox/abox.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/samsung/abox/abox.c b/sound/soc/samsung/abox/abox.c index 50b43be81227..6683393df577 100644 --- a/sound/soc/samsung/abox/abox.c +++ b/sound/soc/samsung/abox/abox.c @@ -987,7 +987,7 @@ void abox_request_dram_on(struct device *dev, unsigned int id, bool on) ret = regmap_write(regmap, ABOX_SYSPOWER_CTRL, val); if (ret < 0) { - dev_err(dev, "syspower write failed\n", ret); + dev_err(dev, "syspower write failed (%d)\n", ret); return; } From 35eefc90ffc617cae6e06a9a8147fa2839706c0f Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 22 Nov 2019 17:18:46 +0300 Subject: [PATCH 165/452] init/uh_fault_handler: fix missing printk arguments Signed-off-by: Denis Efremov <efremov@linux.com> --- init/uh_fault_handler.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/init/uh_fault_handler.c b/init/uh_fault_handler.c index 4c5fee09fa4d..76d6497626ed 100644 --- a/init/uh_fault_handler.c +++ b/init/uh_fault_handler.c @@ -110,7 +110,7 @@ void uh_fault_handler(void) exception_class = esr_ec_unknown_reason; pr_alert("=============uH fault handler logging=============\n"); pr_alert("%s",exception_class_string[exception_class]); - pr_alert("[System registers]\n", cpu); + pr_alert("[System registers CPU: %u]\n", cpu); pr_alert("ESR_EL2: %x\tHCR_EL2: %llx\tHPFAR_EL2: %llx\n", uh_handler_data->esr_el2.bits, uh_handler_data->hcr_el2, uh_handler_data->hpfar_el2); From 2251d5eb12a5eeb7bcf299093b41b4ec5a33930c Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 22 Nov 2019 17:19:30 +0300 Subject: [PATCH 166/452] include/trace/events/ems: fix missing printk arguments Signed-off-by: Denis Efremov <efremov@linux.com> --- include/trace/events/ems.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/trace/events/ems.h b/include/trace/events/ems.h index ca0cf7320d61..0239b4f00ed5 100644 --- a/include/trace/events/ems.h +++ b/include/trace/events/ems.h @@ -689,7 +689,7 @@ TRACE_EVENT(ems_select_service_cpu, __entry->backup_cpu = backup_cpu; ), - TP_printk("comm=%s pid=%d best_cpu=%d backup_cpu", + TP_printk("comm=%s pid=%d best_cpu=%d backup_cpu=%d", __entry->comm, __entry->pid, __entry->best_cpu, __entry->backup_cpu) ); From 4631c3cfe34a6640b0717f1c8f140f89b9379307 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 07:56:12 +0300 Subject: [PATCH 167/452] drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu: fix missing printk arguments Signed-off-by: Denis Efremov <efremov@linux.com> --- .../exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu.c b/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu.c index 877a4d7437f1..53a8b9fd125b 100644 --- a/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu.c +++ b/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu.c @@ -666,7 +666,7 @@ int fimc_is_mcu_erase(struct v4l2_subdev *subdev, u32 address, size_t len) xmit_bytes = 2 * erase.count; *((uint8_t *)&pbuf[ix]) = fimc_is_mcu_checksum(xmit, xmit_bytes); xmit_bytes++; - info("mcu xmit_bytess = %d, erase.count = %d"); + info("mcu xmit_bytes = %d, erase.count = %d", xmit_bytes, erase.count); /* transmit parameter */ ret = i2c_master_send(client, xmit, xmit_bytes); @@ -1596,7 +1596,7 @@ int fimc_is_mcu_set_aperture(struct v4l2_subdev *subdev, int onoff) return true; exit: - info("% Do not set aperture. onoff = %d", __FUNCTION__, onoff); + info("%s Do not set aperture. onoff = %d", __FUNCTION__, onoff); return false; } From 1e8f7d22b6059d0b1c9c296d3266c87b4e3b56a7 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 6 Dec 2019 16:16:14 +0300 Subject: [PATCH 168/452] sound/usb/card: fix missing printk arguments Signed-off-by: Denis Efremov <efremov@linux.com> --- sound/usb/card.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/usb/card.c b/sound/usb/card.c index 5f4cf0688d37..afb4ab62dada 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -209,7 +209,7 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L); } - dev_info(&dev->dev, "usb_host : %s %u:%d \n", __func__); + dev_info(&dev->dev, "usb_host : %s %u:%d \n", __func__, ctrlif, interface); return 0; } From d6f7541a355f37efef39cc14764c313335628e10 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 11 Sep 2020 23:07:59 +0300 Subject: [PATCH 169/452] drivers/media/platform/exynos/fimc-is2/fimc-is-device-sensor_v2: fix missing printk arguments Signed-off-by: Denis Efremov <efremov@linux.com> --- .../platform/exynos/fimc-is2/fimc-is-device-sensor_v2.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/media/platform/exynos/fimc-is2/fimc-is-device-sensor_v2.c b/drivers/media/platform/exynos/fimc-is2/fimc-is-device-sensor_v2.c index 4d8139ef861f..37e337e5767a 100644 --- a/drivers/media/platform/exynos/fimc-is2/fimc-is-device-sensor_v2.c +++ b/drivers/media/platform/exynos/fimc-is2/fimc-is-device-sensor_v2.c @@ -3333,7 +3333,7 @@ int fimc_is_sensor_front_start(struct fimc_is_device_sensor *device, mutex_lock(&camif_path_lock); ret = fimc_is_hw_camif_fix_up(device); if (ret) { - merr("failed to fix up CAM I/F", device, ret); + merr("failed to fix up CAM I/F(%d)", device, ret); ret = -EINVAL; mutex_unlock(&camif_path_lock); goto p_err; @@ -3341,7 +3341,7 @@ int fimc_is_sensor_front_start(struct fimc_is_device_sensor *device, ret = fimc_is_hw_camif_pdp_in_enable(device, true); if (ret) { - merr("failed to enable PDP IN", device, ret); + merr("failed to enable PDP IN(%d)", device, ret); ret = -EINVAL; mutex_unlock(&camif_path_lock); goto p_err; @@ -3451,7 +3451,7 @@ int fimc_is_sensor_front_stop(struct fimc_is_device_sensor *device) if (IS_ENABLED(USE_CAMIF_FIX_UP)) { ret = fimc_is_hw_camif_pdp_in_enable(device, false); if (ret) - merr("failed to enable PDP IN", device, ret); + merr("failed to enable PDP IN(%d)", device, ret); } reset_the_others: From 9ecb5eeebcda89e364d5445eae24773648b8894c Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 11 Sep 2020 23:09:51 +0300 Subject: [PATCH 170/452] sound/usb/exynos_usb_audio: fix missing printk arguments Signed-off-by: Denis Efremov <efremov@linux.com> --- sound/usb/exynos_usb_audio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/usb/exynos_usb_audio.c b/sound/usb/exynos_usb_audio.c index ae95e2bc7d64..ff77fc6d0a1a 100644 --- a/sound/usb/exynos_usb_audio.c +++ b/sound/usb/exynos_usb_audio.c @@ -432,7 +432,7 @@ int exynos_usb_audio_hcd(struct usb_device *udev) */ if (ret == -EADDRINUSE) { cancel_work_sync(&usb_audio->usb_work); - pr_err("iommu unmapping not done. unmap here\n", ret); + pr_err("iommu unmapping not done. unmap here %d\n", ret); exynos_usb_audio_unmap_all(); ret = abox_iommu_map(dev, USB_AUDIO_XHCI_BASE, USB_AUDIO_XHCI_BASE, PAGE_SIZE * 16, 0); From e2f877bc0e62f08388e52076876678356c595df1 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 12:02:09 +0300 Subject: [PATCH 171/452] drivers/vision/npu/npu-memory: fix printk format arguments Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/vision/npu/npu-memory.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/vision/npu/npu-memory.c b/drivers/vision/npu/npu-memory.c index da587f67faf7..02f3cdb95180 100644 --- a/drivers/vision/npu/npu-memory.c +++ b/drivers/vision/npu/npu-memory.c @@ -99,7 +99,7 @@ int npu_memory_map(struct npu_memory *memory, struct npu_memory_buffer *buffer) buffer->dma_buf = dma_buf_get(buffer->fd); if (IS_ERR_OR_NULL(buffer->dma_buf)) { - npu_err("dma_buf_get is fail(0x%08x)\n", buffer->dma_buf); + npu_err("dma_buf_get is fail(0x%p)\n", buffer->dma_buf); ret = -EINVAL; goto p_err; } @@ -231,7 +231,7 @@ int npu_memory_alloc(struct npu_memory *memory, struct npu_memory_buffer *buffer dma_buf = ion_alloc_dmabuf(heapname, size, flag); if (IS_ERR_OR_NULL(dma_buf)) { - npu_err("ion_alloc_dmabuf is fail(0x%08x)\n", dma_buf); + npu_err("ion_alloc_dmabuf is fail(0x%p)\n", dma_buf); ret = -EINVAL; goto p_err; } From c1a00c7952fe3a0eba53df93c434c66eb31abed8 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 12 Sep 2020 01:45:25 +0300 Subject: [PATCH 172/452] drivers/battery_v2/mfc_charger: fix printk arguments Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/battery_v2/mfc_charger.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/battery_v2/mfc_charger.c b/drivers/battery_v2/mfc_charger.c index 372e844bc141..3664a6399c1a 100644 --- a/drivers/battery_v2/mfc_charger.c +++ b/drivers/battery_v2/mfc_charger.c @@ -2468,7 +2468,7 @@ static void mfc_wpc_rx_power_work(struct work_struct *work) union power_supply_propval value; - pr_info("%s: rx power = %d (0x%x), This W/A is only for Factory\n", __func__, charger->max_power_by_txid); + pr_info("%s: rx power = %d, This W/A is only for Factory\n", __func__, charger->max_power_by_txid); value.intval = charger->max_power_by_txid; psy_do_property("wireless", set, POWER_SUPPLY_PROP_WIRELESS_RX_POWER, value); @@ -3902,7 +3902,7 @@ static int mfc_chg_set_property(struct power_supply *psy, case POWER_SUPPLY_EXT_PROP_PAD_VOLT_CTRL: if(charger->pdata->wpc_vout_ctrl_lcd_on) { if (delayed_work_pending(&charger->wpc_vout_mode_work)) { - pr_info("%s : Already vout change. skip pad control\n"); + pr_info("%s : Already vout change. skip pad control\n", __func__); return 0; } if (val->intval && charger->is_afc_tx && From f58305ae7d654ca5d71eb8b8eacbf9e125e81780 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 11:50:31 +0300 Subject: [PATCH 173/452] drivers/vision/npu/npu-log: fix printk arguments Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/vision/npu/npu-log.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/vision/npu/npu-log.c b/drivers/vision/npu/npu-log.c index ac6bddb875ed..9dc54b384cc6 100644 --- a/drivers/vision/npu/npu-log.c +++ b/drivers/vision/npu/npu-log.c @@ -655,7 +655,7 @@ static int npu_store_log_dump(const size_t dump_size) total = 0; ret = spin_lock_safe_isr(&npu_log_lock); if (ret) { - pr_err("NPU log dump is not available - in interrupt context\n", total); + pr_err("NPU log dump is not available - in interrupt context\n"); goto err_exit; } From 16e977a5774cd34cdf937151499b2532a007b960 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 20 Sep 2020 10:20:29 +0300 Subject: [PATCH 174/452] drivers/redriver/ptn36502: fix printk arguments Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/redriver/ptn36502.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/redriver/ptn36502.c b/drivers/redriver/ptn36502.c index 1165c22233aa..29dc495e144e 100644 --- a/drivers/redriver/ptn36502.c +++ b/drivers/redriver/ptn36502.c @@ -135,7 +135,7 @@ int ptn36502_config(int config, int is_DFP) break; case CHECK_EXIST: - pr_err("%s: dummy\n"); + pr_err("%s: dummy\n", __func__); break; default: From 9d7b9857f62d9c91db3d06ccacb29e414375ed1c Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 20 Sep 2020 10:36:18 +0300 Subject: [PATCH 175/452] drivers/video/fbdev/exynos/dpu20/decon_dsi: fix printk arguments Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/video/fbdev/exynos/dpu20/decon_dsi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/dpu20/decon_dsi.c b/drivers/video/fbdev/exynos/dpu20/decon_dsi.c index e0cfd52ddd3c..b35c34f1e866 100644 --- a/drivers/video/fbdev/exynos/dpu20/decon_dsi.c +++ b/drivers/video/fbdev/exynos/dpu20/decon_dsi.c @@ -1827,7 +1827,7 @@ static int dpu_set_pre_df_dsim(struct decon_device *decon) return -EINVAL; } if (df_set->hs == 0) { - decon_err("[DYN_FREQ]:ERR:%s:df index : %d hs is 0 : %d\n", + decon_err("[DYN_FREQ]:ERR:%s:df index : %d hs is 0\n", __func__, status->target_df); return -EINVAL; } From be1a54b35f910c960a14a03262bd2e8d74b83b93 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 20 Sep 2020 10:36:37 +0300 Subject: [PATCH 176/452] drivers/video/fbdev/exynos/panel/panel_spi: fix printk arguments Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/video/fbdev/exynos/panel/panel_spi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/panel/panel_spi.c b/drivers/video/fbdev/exynos/panel/panel_spi.c index 2b6c0c860368..bb5ed4fa86b4 100644 --- a/drivers/video/fbdev/exynos/panel/panel_spi.c +++ b/drivers/video/fbdev/exynos/panel/panel_spi.c @@ -199,7 +199,7 @@ static int panel_spi_read_id(struct panel_spi_dev *spi_dev, u32 *id) return -EIO; *id = (rbuf[0] << 16) | (rbuf[1] << 8) | rbuf[2]; - pr_debug("%s: 0x06X\n", __func__, *id); + pr_debug("%s: 0x%06X\n", __func__, *id); return 0; } From d31a211ad10dbf86e5e005b33d602d33b6b7e8aa Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 29 Nov 2019 20:06:17 +0300 Subject: [PATCH 177/452] drivers/soc/samsung/acpm/acpm: fix uninit pointer Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/soc/samsung/acpm/acpm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/samsung/acpm/acpm.c b/drivers/soc/samsung/acpm/acpm.c index 2fbdf183f89a..54d769ad7c9e 100644 --- a/drivers/soc/samsung/acpm/acpm.c +++ b/drivers/soc/samsung/acpm/acpm.c @@ -99,7 +99,7 @@ static int plugins_init(void) unsigned int plugin_id; char name[50]; const char *fw_name = NULL; - void __iomem *fw_base_addr; + void __iomem *fw_base_addr = NULL; struct device_node *node, *child; const __be32 *prop; unsigned int offset; From 41f5cd06265d578838a40aafdf98d28d87b8facf Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 3 Dec 2019 23:44:20 +0300 Subject: [PATCH 178/452] drivers/staging/android/freecess_pkg: fix uninit var Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/staging/android/freecess_pkg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/android/freecess_pkg.c b/drivers/staging/android/freecess_pkg.c index 91046602d9ac..14738a5fb6d8 100644 --- a/drivers/staging/android/freecess_pkg.c +++ b/drivers/staging/android/freecess_pkg.c @@ -240,7 +240,7 @@ static struct nf_hook_ops freecess_nf_ops[] = { static int __init kfreecess_pkg_init(void) { - int ret; + int ret = 0; int i; struct net *net; From 0320b186b1a59518cb17a340b9c2c050b9d0ebd4 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 4 Dec 2019 02:00:26 +0300 Subject: [PATCH 179/452] drivers/video/fbdev/exynos/dpu20/decon_core: fix uninit pointer Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/video/fbdev/exynos/dpu20/decon_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/dpu20/decon_core.c b/drivers/video/fbdev/exynos/dpu20/decon_core.c index 42286b86b139..9e69a218219e 100644 --- a/drivers/video/fbdev/exynos/dpu20/decon_core.c +++ b/drivers/video/fbdev/exynos/dpu20/decon_core.c @@ -2276,7 +2276,7 @@ static void decon_release_old_bufs(struct decon_device *decon, static int decon_set_hdr_info(struct decon_device *decon, struct decon_reg_data *regs, int win_num, bool on) { - struct exynos_video_meta *video_meta; + struct exynos_video_meta *video_meta = NULL; #if defined(CONFIG_EXYNOS_DISPLAYPORT) int ret = 0; #endif From 2c4baf03b74dcfba717af8ae79fba8cd8ca373c3 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 4 Dec 2019 09:09:56 +0300 Subject: [PATCH 180/452] drivers/video/fbdev/exynos/dpu20/event_log: fix uninit var Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/video/fbdev/exynos/dpu20/event_log.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/dpu20/event_log.c b/drivers/video/fbdev/exynos/dpu20/event_log.c index 029574147004..0a8ec2638253 100644 --- a/drivers/video/fbdev/exynos/dpu20/event_log.c +++ b/drivers/video/fbdev/exynos/dpu20/event_log.c @@ -841,7 +841,7 @@ void DPU_EVENT_SHOW(struct seq_file *s, struct decon_device *decon) int latest = idx; struct timeval tv; ktime_t prev_ktime; - struct dsim_device *dsim; + struct dsim_device *dsim = NULL; if (IS_ERR_OR_NULL(decon->d.event_log)) return; From 216b52949fa5cce95823f2a4e50fabcbdd9ee2ae Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 3 Dec 2019 01:06:47 +0300 Subject: [PATCH 181/452] drivers/soc/samsung/exynos-bcm_dbg: fix uninit var Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/soc/samsung/exynos-bcm_dbg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/samsung/exynos-bcm_dbg.c b/drivers/soc/samsung/exynos-bcm_dbg.c index 0ca36d233d13..64df919a454b 100644 --- a/drivers/soc/samsung/exynos-bcm_dbg.c +++ b/drivers/soc/samsung/exynos-bcm_dbg.c @@ -683,7 +683,7 @@ static int exynos_bcm_dbg_run_ctrl(struct exynos_bcm_ipc_base_info *ipc_base_inf struct exynos_bcm_dbg_data *data) { unsigned int cmd[4] = {0, 0, 0, 0}; - unsigned int run, low_ktime, high_ktime; + unsigned int run = 0, low_ktime, high_ktime; int ret = 0; u64 ktime; unsigned long flags; From d5c37e89c074f2a838523a9776329b6797ba7eb7 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 3 Dec 2019 01:08:15 +0300 Subject: [PATCH 182/452] drivers/soc/samsung/exynos-hiu: fix uninit var Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/soc/samsung/exynos-hiu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/samsung/exynos-hiu.c b/drivers/soc/samsung/exynos-hiu.c index 20eca9d64ce2..05770f1d5132 100644 --- a/drivers/soc/samsung/exynos-hiu.c +++ b/drivers/soc/samsung/exynos-hiu.c @@ -662,7 +662,7 @@ static struct attribute_group exynos_hiu_attr_group = { static int hiu_dt_parsing(struct device_node *dn) { const char *buf; - unsigned int val; + unsigned int val = 0; int ret = 0; ret |= of_property_read_u32(dn, "operation-mode", &data->operation_mode); From b924346b3be971b946d3a23784e1fe47770145fd Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 3 Dec 2019 01:08:31 +0300 Subject: [PATCH 183/452] drivers/spi/spi-s3c64xx: fix uninit var Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/spi/spi-s3c64xx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 44be84ecc467..16ddbe978da6 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -960,7 +960,7 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master, list_for_each_entry(xfer, &msg->transfers, transfer_list) { unsigned long flags; - int use_dma; + int use_dma = 0; reinit_completion(&sdd->xfer_completion); From 847faa8575f9bd4f9157f54498eeb21188508d9f Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 6 Dec 2019 18:13:10 +0300 Subject: [PATCH 184/452] net/unix/af_unix: fix uninit var Signed-off-by: Denis Efremov <efremov@linux.com> --- net/unix/af_unix.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 94d33f541b7d..9936a1334e5f 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -995,7 +995,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; char *sun_path = sunaddr->sun_path; int err; - unsigned int hash; + unsigned int hash = 0; struct unix_address *addr; struct hlist_head *list; struct path path = { }; From 4aedc48fb1b5ae298362e8831439cf4fdb474f63 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 25 Dec 2020 15:21:19 +0300 Subject: [PATCH 185/452] bcmdhd_100_15/wl_cfg_btcoex: fix uninit var Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/net/wireless/broadcom/bcmdhd_100_15/wl_cfg_btcoex.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_cfg_btcoex.c b/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_cfg_btcoex.c index 2d59331cc591..d9934e0a1eb8 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_cfg_btcoex.c +++ b/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_cfg_btcoex.c @@ -146,13 +146,13 @@ static bool btcoex_is_sco_active(struct net_device *dev) ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, ¶m27); - WL_TRACE(("sample[%d], btc params: 27:%x\n", i, param27)); - if (ioc_res < 0) { WL_ERR(("ioc read btc params error\n")); break; } + WL_TRACE(("sample[%d], btc params: 27:%x\n", i, param27)); + if ((param27 & 0x6) == 2) { /* count both sco & esco */ sco_id_cnt++; } From 1be05a8a0d9765636b644cb355a1f1b4eef48ede Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 6 Dec 2019 20:30:03 +0300 Subject: [PATCH 186/452] drivers/gpu/exynos/g2d/g2d_task: fix uninit var Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/gpu/exynos/g2d/g2d_task.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/exynos/g2d/g2d_task.c b/drivers/gpu/exynos/g2d/g2d_task.c index 5008d9f89c50..661f38f77179 100644 --- a/drivers/gpu/exynos/g2d/g2d_task.c +++ b/drivers/gpu/exynos/g2d/g2d_task.c @@ -457,7 +457,7 @@ void g2d_destroy_tasks(struct g2d_device *g2d_dev) static struct g2d_task *g2d_create_task(struct g2d_device *g2d_dev, int id) { struct g2d_task *task; - int i, ret; + int i, ret = 0; task = kzalloc(sizeof(*task), GFP_KERNEL); if (!task) From 23cde6a3535cec8caed9d9e32f91714748e9df27 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 10 Jan 2021 18:20:10 +0300 Subject: [PATCH 187/452] bcmdhd_101_16/wl_cfg_btcoex.c: fix uninit var Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfg_btcoex.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfg_btcoex.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfg_btcoex.c index 09380fb96cfa..0798e739f914 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfg_btcoex.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/wl_cfg_btcoex.c @@ -148,13 +148,13 @@ static bool btcoex_is_sco_active(struct net_device *dev) ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, ¶m27); - WL_TRACE(("sample[%d], btc params: 27:%x\n", i, param27)); - if (ioc_res < 0) { WL_ERR(("ioc read btc params error\n")); break; } + WL_TRACE(("sample[%d], btc params: 27:%x\n", i, param27)); + if ((param27 & 0x6) == 2) { /* count both sco & esco */ sco_id_cnt++; } From e3d5462e3bb954f8700ff84ab2b9ba1ef319d81d Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 10 Jan 2021 18:21:33 +0300 Subject: [PATCH 188/452] bcmdhd_101_16/dhd_debug.c: fix uninit var Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_debug.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_debug.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_debug.c index 267b2a13057e..d0941e8de807 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_debug.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_debug.c @@ -1289,6 +1289,7 @@ dhd_dbg_read_ring_into_trace_buf(dhd_dbg_ring_t *ring, trace_buf_info_t *trace_b { dhd_dbg_ring_status_t ring_status; uint32 rlen = 0; + int ret = 0; rlen = dhd_dbg_ring_pull_single(ring, trace_buf_info->buf, TRACE_LOG_BUF_MAX_SIZE, TRUE); @@ -1299,9 +1300,9 @@ dhd_dbg_read_ring_into_trace_buf(dhd_dbg_ring_t *ring, trace_buf_info_t *trace_b return; } - __dhd_dbg_get_ring_status(ring, &ring_status); + ret = __dhd_dbg_get_ring_status(ring, &ring_status); - if (ring_status.written_bytes != ring_status.read_bytes) { + if (ret == BCME_OK && ring_status.written_bytes != ring_status.read_bytes) { trace_buf_info->availability = NEXT_BUF_AVAIL; } } From dc67b875ab316c12c79a5f0aa31c2a9df81aef5f Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 10 Jan 2021 18:56:49 +0300 Subject: [PATCH 189/452] drivers/soc/samsung/exynos-sci.c: fix uninit var Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/soc/samsung/exynos-sci.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/soc/samsung/exynos-sci.c b/drivers/soc/samsung/exynos-sci.c index e5eeb5d6afd6..e86832081a61 100644 --- a/drivers/soc/samsung/exynos-sci.c +++ b/drivers/soc/samsung/exynos-sci.c @@ -696,7 +696,7 @@ static ssize_t show_llc_region_alloc(struct device *dev, struct platform_device, dev); struct exynos_sci_data *data = platform_get_drvdata(pdev); ssize_t count = 0; - unsigned int region_index; + unsigned int region_index = 0; int ret; ret = exynos_sci_llc_region_alloc(data, SCI_IPC_GET, ®ion_index, 0); @@ -745,7 +745,7 @@ static ssize_t show_llc_enable(struct device *dev, struct platform_device, dev); struct exynos_sci_data *data = platform_get_drvdata(pdev); ssize_t count = 0; - unsigned int enable; + unsigned int enable = 0; int ret; ret = exynos_sci_llc_enable(data, SCI_IPC_GET, &enable); From ef5a6ff4f84f5aa174e4fc347e0114f37f3c6b67 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 13:55:01 +0300 Subject: [PATCH 190/452] drivers/battery_v2/mfc_charger: fix sscanf Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/battery_v2/mfc_charger.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/battery_v2/mfc_charger.c b/drivers/battery_v2/mfc_charger.c index 3664a6399c1a..a7bb1835992f 100644 --- a/drivers/battery_v2/mfc_charger.c +++ b/drivers/battery_v2/mfc_charger.c @@ -5370,8 +5370,8 @@ ssize_t mfc_store_attrs(struct device *dev, struct power_supply *psy = dev_get_drvdata(dev); struct mfc_charger_data *charger = power_supply_get_drvdata(psy); const ptrdiff_t offset = attr - mfc_attrs; + unsigned int header, data_com, data_val; int x, ret; - u8 header, data_com, data_val; dev_info(charger->dev, "%s \n", __func__); @@ -5399,8 +5399,9 @@ ssize_t mfc_store_attrs(struct device *dev, break; case MFC_PACKET: if (sscanf(buf, "0x%4x 0x%4x 0x%4x\n", &header, &data_com, &data_val) == 3) { - dev_info(charger->dev, "%s 0x%x, 0x%x, 0x%x \n", __func__, header, data_com, data_val); - mfc_send_packet(charger, header, data_com, &data_val, 1); + u8 u8header = header, u8data_com = u8data_com, u8data_val = data_val; + dev_info(charger->dev, "%s 0x%x, 0x%x, 0x%x \n", __func__, u8header, u8data_com, u8data_val); + mfc_send_packet(charger, u8header, u8data_com, &u8data_val, 1); } ret = count; break; From 03317bfacf58aafc37075dbac2ffbe835ac6be8a Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 6 Dec 2019 15:52:17 +0300 Subject: [PATCH 191/452] drivers/video/fbdev/exynos/panel/sysfs: fix sscanf input format Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/video/fbdev/exynos/panel/sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/panel/sysfs.c b/drivers/video/fbdev/exynos/panel/sysfs.c index bea486939931..9508cbedad86 100644 --- a/drivers/video/fbdev/exynos/panel/sysfs.c +++ b/drivers/video/fbdev/exynos/panel/sysfs.c @@ -363,7 +363,7 @@ static ssize_t gamma_interpolation_test_store(struct device *dev, return -EINVAL; } - ret = sscanf(buf, "%x %x %x %x %x %x", + ret = sscanf(buf, "%hhx %hhx %hhx %hhx %hhx %hhx", &write_buf[0], &write_buf[1], &write_buf[2], &write_buf[3], &write_buf[4], &write_buf[5]); if (ret != 6) { From 10b8a26f265d14a5f313f3381c890fea44148fc8 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 23 Nov 2019 13:50:50 +0300 Subject: [PATCH 192/452] drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu: fix missing '=' Signed-off-by: Denis Efremov <efremov@linux.com> --- .../exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu.c b/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu.c index 53a8b9fd125b..cd16da8c1728 100644 --- a/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu.c +++ b/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-device-ois_mcu.c @@ -3627,7 +3627,7 @@ int fimc_is_mcu_probe(struct i2c_client *client, mcu = kzalloc(sizeof(struct fimc_is_mcu) * sensor_id_len, GFP_KERNEL); if (!mcu) { err("fimc_is_mcu is NULL"); - ret -ENOMEM; + ret = -ENOMEM; goto p_err; } @@ -3641,7 +3641,7 @@ int fimc_is_mcu_probe(struct i2c_client *client, ois = kzalloc(sizeof(struct fimc_is_ois) * sensor_id_len, GFP_KERNEL); if (!ois) { err("fimc_is_ois is NULL"); - ret -ENOMEM; + ret = -ENOMEM; goto p_err; } @@ -3655,7 +3655,7 @@ int fimc_is_mcu_probe(struct i2c_client *client, ois_device = kzalloc(sizeof(struct fimc_is_device_ois), GFP_KERNEL); if (!ois_device) { err("fimc_is_device_ois is NULL"); - ret -ENOMEM; + ret = -ENOMEM; goto p_err; } From 31ae90b12194dd312bf7bbd019305514f70066f4 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 3 Dec 2019 01:05:18 +0300 Subject: [PATCH 193/452] drivers/staging/android/ion/ion_debug: fix wrong argument in printk Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/staging/android/ion/ion_debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/android/ion/ion_debug.c b/drivers/staging/android/ion/ion_debug.c index d9ccabe00cfa..0eb25a87ba14 100644 --- a/drivers/staging/android/ion/ion_debug.c +++ b/drivers/staging/android/ion/ion_debug.c @@ -372,7 +372,7 @@ void ion_debug_heap_init(struct ion_heap *heap) path = dentry_path(heap->dev->heaps_debug_root, buf, 256); - perrfn("failed to create %s/%s", path, heap_file); + perrfn("failed to create %s/%s", path, heap->name); } } From d081149f5afc84a0a62d4c9f3ad3802b5c78b702 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 12 Sep 2020 01:00:39 +0300 Subject: [PATCH 194/452] net/ncm/ncm: check copy_from_user result Signed-off-by: Denis Efremov <efremov@linux.com> --- net/ncm/ncm.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/ncm/ncm.c b/net/ncm/ncm.c index 796ee5ec610f..971551aa760f 100644 --- a/net/ncm/ncm.c +++ b/net/ncm/ncm.c @@ -855,7 +855,9 @@ static ssize_t ncm_write(struct file *file, const char __user *buf, size_t count return -EACCES; } memset(intermediate_string,'\0',sizeof(intermediate_string)); - copy_from_user(intermediate_string,buf,sizeof(intermediate_string)-1); + if (copy_from_user(intermediate_string,buf,sizeof(intermediate_string)-1)) { + return -EINVAL; + } intermediate_value = simple_strtol(intermediate_string, NULL, 10); if (intermediate_value > 0) { update_intermediate_timeout(intermediate_value); From 9ba90520e7a31e93125da8cb95cd8602e5b7da39 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 12 Sep 2020 01:02:41 +0300 Subject: [PATCH 195/452] kernel/sched/ems/core: check sysfs_create_file() return values Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/sched/ems/core.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/kernel/sched/ems/core.c b/kernel/sched/ems/core.c index e6baed836569..c26a6ca4822f 100644 --- a/kernel/sched/ems/core.c +++ b/kernel/sched/ems/core.c @@ -305,11 +305,22 @@ struct kobject *ems_kobj; static int __init init_sysfs(void) { ems_kobj = kobject_create_and_add("ems", kernel_kobj); + if (!ems_kobj) + return -ENOMEM; - sysfs_create_file(ems_kobj, &sched_topology_attr.attr); - sysfs_create_file(ems_kobj, &eff_mode_attr.attr); + if (sysfs_create_file(ems_kobj, &sched_topology_attr.attr) < 0) + goto topology_err; + if (sysfs_create_file(ems_kobj, &eff_mode_attr.attr) < 0) + goto mode_err; return 0; + +mode_err: + sysfs_remove_file(ems_kobj, &sched_topology_attr.attr); +topology_err: + kobject_put(ems_kobj); + ems_kobj = NULL; + return -ENOMEM; } core_initcall(init_sysfs); From d97405cc183f191472ed3a0501bc2e8e77f17595 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 12 Sep 2020 01:20:49 +0300 Subject: [PATCH 196/452] drivers/soc/samsung/debug/exynos9820-itmon: check strtoul results Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/soc/samsung/debug/exynos9820-itmon.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/soc/samsung/debug/exynos9820-itmon.c b/drivers/soc/samsung/debug/exynos9820-itmon.c index 33029c5371f5..d015294ca7af 100644 --- a/drivers/soc/samsung/debug/exynos9820-itmon.c +++ b/drivers/soc/samsung/debug/exynos9820-itmon.c @@ -1747,7 +1747,8 @@ static ssize_t itmon_scandump_store(struct kobject *kobj, { unsigned long val = 0; - kstrtoul(buf, 16, &val); + if (kstrtoul(buf, 16, &val) < 0) + return -EINVAL; g_itmon->pdata->sysfs_scandump = !!val; return count; @@ -1772,7 +1773,8 @@ static ssize_t itmon_s2d_store(struct kobject *kobj, { unsigned long val = 0; - kstrtoul(buf, 16, &val); + if (kstrtoul(buf, 16, &val) < 0) + return -EINVAL; g_itmon->pdata->sysfs_s2d = !!val; return count; From 5ad385345c524b473925b6f62ac46e794bccb510 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 12 Sep 2020 01:29:04 +0300 Subject: [PATCH 197/452] security/samsung/defex_lsm/pack_rules: fix strncpy() warning Signed-off-by: Denis Efremov <efremov@linux.com> --- security/samsung/defex_lsm/pack_rules.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security/samsung/defex_lsm/pack_rules.c b/security/samsung/defex_lsm/pack_rules.c index ab59686b26ed..1deb8d533849 100644 --- a/security/samsung/defex_lsm/pack_rules.c +++ b/security/samsung/defex_lsm/pack_rules.c @@ -12,7 +12,7 @@ #include <string.h> #include "include/defex_rules.h" -#define SAFE_STRCOPY(dst, src) do { strncpy(dst, src, sizeof(dst)); dst[sizeof(dst) - 1] = 0; } while(0) +#define SAFE_STRCOPY(dst, src) do { strncpy(dst, src, sizeof(dst) - 1); dst[sizeof(dst) - 1] = 0; } while(0) const struct feature_match_entry feature_match[] = { {"feature_safeplace_path", feature_safeplace_path}, From 1094ccdba6cf5c4a808234500470cebc5253139f Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 12 Sep 2020 01:30:28 +0300 Subject: [PATCH 198/452] drivers/video/fbdev/exynos/panel: move str_stm_fied to c file Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/video/fbdev/exynos/panel/panel.h | 14 -------------- drivers/video/fbdev/exynos/panel/sysfs.c | 17 +++++++++++++++++ 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/drivers/video/fbdev/exynos/panel/panel.h b/drivers/video/fbdev/exynos/panel/panel.h index 6469dd16a338..0e90f55206f2 100644 --- a/drivers/video/fbdev/exynos/panel/panel.h +++ b/drivers/video/fbdev/exynos/panel/panel.h @@ -807,20 +807,6 @@ enum stm_field_num { STM_V_THRES, STM_FIELD_MAX }; - -static const char *str_stm_fied[STM_FIELD_MAX] = { - "stm_ctrl_en=", - "stm_max_opt=", - "stm_default_opt=", - "stm_dim_step=", - "stm_frame_period=", - "stm_min_sect=", - "stm_pixel_period=", - "stm_line_period=", - "stm_min_move=", - "stm_m_thres=", - "stm_v_thres=" -}; #endif enum { diff --git a/drivers/video/fbdev/exynos/panel/sysfs.c b/drivers/video/fbdev/exynos/panel/sysfs.c index 9508cbedad86..bdf14b146b6e 100644 --- a/drivers/video/fbdev/exynos/panel/sysfs.c +++ b/drivers/video/fbdev/exynos/panel/sysfs.c @@ -33,6 +33,23 @@ #ifdef CONFIG_DYNAMIC_FREQ #include "dynamic_freq.h" #endif + +#ifdef CONFIG_SUPPORT_ISC_TUNE_TEST +static const char *str_stm_fied[STM_FIELD_MAX] = { + "stm_ctrl_en=", + "stm_max_opt=", + "stm_default_opt=", + "stm_dim_step=", + "stm_frame_period=", + "stm_min_sect=", + "stm_pixel_period=", + "stm_line_period=", + "stm_min_move=", + "stm_m_thres=", + "stm_v_thres=" +}; +#endif + static DEFINE_MUTEX(sysfs_lock); char *mcd_rs_name[MAX_MCD_RS] = { From 25708df4784f5f9818e4f3d8e88b8a9cb5988b6c Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 12 Sep 2020 01:34:04 +0300 Subject: [PATCH 199/452] drivers/samsung/debug: check kstrtol results Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/samsung/debug/sec_debug_reset_reason.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/samsung/debug/sec_debug_reset_reason.c b/drivers/samsung/debug/sec_debug_reset_reason.c index 0d00ffe2745a..6565de491dda 100644 --- a/drivers/samsung/debug/sec_debug_reset_reason.c +++ b/drivers/samsung/debug/sec_debug_reset_reason.c @@ -159,7 +159,8 @@ static void parse_pwrsrc_rs(struct outbuf *buf) unsigned long tmp; long long_pwrsrc_rs; - kstrtol(pwrsrc_rs, 16, &long_pwrsrc_rs); + if (kstrtol(pwrsrc_rs, 16, &long_pwrsrc_rs)) + return; secdbg_write_buf(buf, 0, "OFFSRC::"); tmp = long_pwrsrc_rs & 0xff0000000000; From 165a20bdfc6b9201c52286d8c89e23fd1bdd5fb2 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 10 Jan 2021 18:57:34 +0300 Subject: [PATCH 200/452] .../dpu20/displayport_drv.c: check kstrtouint() results Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/video/fbdev/exynos/dpu20/displayport_drv.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/video/fbdev/exynos/dpu20/displayport_drv.c b/drivers/video/fbdev/exynos/dpu20/displayport_drv.c index bf924a06d258..111bbc9a4535 100644 --- a/drivers/video/fbdev/exynos/dpu20/displayport_drv.c +++ b/drivers/video/fbdev/exynos/dpu20/displayport_drv.c @@ -3728,8 +3728,8 @@ static int displayport_update_hmd_list(struct displayport_device *displayport, c ret = -EPERM; goto exit; } - kstrtouint(tok, 10, &num_hmd); - if (num_hmd > MAX_NUM_HMD) { + ret = kstrtouint(tok, 10, &num_hmd); + if (ret || num_hmd > MAX_NUM_HMD) { displayport_err("invalid list num %d\n", num_hmd); num_hmd = 0; ret = -EPERM; @@ -3747,14 +3747,20 @@ static int displayport_update_hmd_list(struct displayport_device *displayport, c tok = strsep(&p, ","); if (tok == NULL || *tok == 0xa/*LF*/) break; - kstrtouint(tok, 16, &val); + if (kstrtouint(tok, 16, &val)) { + ret = -EINVAL; + break; + } displayport->hmd_list[j].ven_id = val; /* PID */ tok = strsep(&p, ","); if (tok == NULL || *tok == 0xa/*LF*/) break; - kstrtouint(tok, 16, &val); + if (kstrtouint(tok, 16, &val)) { + ret = -EINVAL; + break; + } displayport->hmd_list[j].prod_id = val; displayport_info("HMD%02d: %s, 0x%04x, 0x%04x\n", j, From a409b7c49b670c7c93349ff816ed5612924dca93 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 12 Sep 2020 01:37:13 +0300 Subject: [PATCH 201/452] drivers/fingerprint: move sensor_status from header file Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/fingerprint/et5xx-spi.c | 3 +++ drivers/fingerprint/fingerprint.h | 2 -- drivers/fingerprint/qbt2000_common.c | 4 ++++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/fingerprint/et5xx-spi.c b/drivers/fingerprint/et5xx-spi.c index 7a06dc9e7470..8ccfa1786f1b 100644 --- a/drivers/fingerprint/et5xx-spi.c +++ b/drivers/fingerprint/et5xx-spi.c @@ -48,6 +48,9 @@ static DEFINE_MUTEX(device_list_lock); int fpsensor_goto_suspend =0; #endif +static char sensor_status[SENSOR_STATUS_SIZE][10] = {"ooo", "unknown", "failed", + "viper", "raptor", "egis", "viper_wog", "namsan", "goodix", "qbt2000", "et7xx", "goodixopt"}; + static int gpio_irq; static struct etspi_data *g_data; static DECLARE_WAIT_QUEUE_HEAD(interrupt_waitq); diff --git a/drivers/fingerprint/fingerprint.h b/drivers/fingerprint/fingerprint.h index 13c764c1e0c9..f0d5f4ae444a 100644 --- a/drivers/fingerprint/fingerprint.h +++ b/drivers/fingerprint/fingerprint.h @@ -41,8 +41,6 @@ enum { }; #define SENSOR_STATUS_SIZE 12 -static char sensor_status[SENSOR_STATUS_SIZE][10] = {"ooo", "unknown", "failed", - "viper", "raptor", "egis", "viper_wog", "namsan", "goodix", "qbt2000", "et7xx", "goodixopt"}; /* For Finger Detect Mode */ enum { diff --git a/drivers/fingerprint/qbt2000_common.c b/drivers/fingerprint/qbt2000_common.c index 195299974ff1..3178910c9609 100644 --- a/drivers/fingerprint/qbt2000_common.c +++ b/drivers/fingerprint/qbt2000_common.c @@ -16,6 +16,10 @@ static struct qbt2000_drvdata *g_data = NULL; +static char sensor_status[SENSOR_STATUS_SIZE][10] = {"ooo", "unknown", "failed", + "viper", "raptor", "egis", "viper_wog", "namsan", "goodix", "qbt2000", "et7xx", "goodixopt"}; + + /* * struct ipc_msg_type_to_fw_event - * entry in mapping between an IPC message type to a firmware event From 2dd19ea76ce088533f232e851841d1c8ede369d8 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 12 Sep 2020 01:38:03 +0300 Subject: [PATCH 202/452] drivers/net/wireless/broadcom/bcmdhd_100_15/dhd_common.c: fix argv check Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/net/wireless/broadcom/bcmdhd_100_15/dhd_common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_15/dhd_common.c b/drivers/net/wireless/broadcom/bcmdhd_100_15/dhd_common.c index ba4a2fa77810..c9d1a86f8157 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_15/dhd_common.c +++ b/drivers/net/wireless/broadcom/bcmdhd_100_15/dhd_common.c @@ -4120,7 +4120,7 @@ dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg) htod16(WL_PKT_FILTER_MFLAG_NEG); (argv[i])++; } - if (argv[i] == '\0') { + if (*argv[i] == '\0') { printf("Pattern not provided\n"); goto fail; } From 71ecb0668450522b501d051f67631d584e4440a5 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 12 Sep 2020 01:38:29 +0300 Subject: [PATCH 203/452] drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata.c: fix uninit var Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata.c b/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata.c index 0a4ab598d818..6dc91e1f74c5 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata.c +++ b/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata.c @@ -272,7 +272,7 @@ wl_gather_ap_stadata(void *handle, void *event_info, u8 event) wl_event_msg_t *e; wl_ap_sta_data_t *sta_data; - wl_ap_sta_data_t temp_sta_data; + wl_ap_sta_data_t temp_sta_data = {0}; void *data = NULL; int i; int ret; From 65e98861a486635f5abe9f8c144e50195fd0c60f Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 22 Feb 2021 17:30:20 +0300 Subject: [PATCH 204/452] bcmdhd_101_16/dhd_rtt.c: fix uninit var Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_rtt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_rtt.c b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_rtt.c index 46f415c2387f..7f6c5d5adbe5 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_rtt.c +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/dhd_rtt.c @@ -1571,7 +1571,7 @@ static int dhd_rtt_get_version(dhd_pub_t *dhd, int *out_version) { int ret; - ftm_subcmd_info_t subcmd_info; + ftm_subcmd_info_t subcmd_info = {}; subcmd_info.name = "ver"; subcmd_info.cmdid = WL_PROXD_CMD_GET_VERSION; subcmd_info.handler = NULL; From 1a201041bfc67c30f2864d2bba1eec0a6cbbbeec Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 12 Sep 2020 01:39:56 +0300 Subject: [PATCH 205/452] drivers/ccic/max77705_usbc: fix multiple assignments warning Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/ccic/max77705_usbc.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/ccic/max77705_usbc.c b/drivers/ccic/max77705_usbc.c index ba9893f534f4..aeb15c951e15 100644 --- a/drivers/ccic/max77705_usbc.c +++ b/drivers/ccic/max77705_usbc.c @@ -2117,9 +2117,10 @@ void max77705_usbc_clear_queue(struct max77705_usbc_platform_data *usbc_data) while (!is_empty_usbc_cmd_queue(cmd_queue)) { init_usbc_cmd_data(&cmd_data); dequeue_usbc_cmd(cmd_queue, &cmd_data); - if (max77705_check_recover_opcode(cmd_data.opcode)) - usbc_data->recover_opcode_list[cmd_data.opcode] - = usbc_data->need_recover = true; + if (max77705_check_recover_opcode(cmd_data.opcode)) { + usbc_data->recover_opcode_list[cmd_data.opcode] = true; + usbc_data->need_recover = true; + } } usbc_data->opcode_stamp = 0; msg_maxim("OUT"); From 95deeb0d2d4954bcb4427e95de80d45dd144fe0d Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 12 Sep 2020 01:40:40 +0300 Subject: [PATCH 206/452] drivers/hid/hid-samsung: fix set_bit(EV_REP, hi->input->evbit) call Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/hid/hid-samsung.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c index f8746ddadf77..977f50de5b2e 100644 --- a/drivers/hid/hid-samsung.c +++ b/drivers/hid/hid-samsung.c @@ -142,8 +142,8 @@ static int samsung_kbd_input_mapping(struct hid_device *hdev, usage->hid & HID_USAGE); if (HID_UP_KEYBOARD == (usage->hid & HID_USAGE_PAGE)) { - switch (usage->hid & HID_USAGE) { set_bit(EV_REP, hi->input->evbit); + switch (usage->hid & HID_USAGE) { /* Only for UK keyboard */ /* key found */ #ifdef CONFIG_HID_KK_UPGRADE @@ -356,8 +356,8 @@ static int samsung_universal_kbd_input_mapping(struct hid_device *hdev, usage->hid & HID_USAGE); if (HID_UP_KEYBOARD == (usage->hid & HID_USAGE_PAGE)) { - switch (usage->hid & HID_USAGE) { set_bit(EV_REP, hi->input->evbit); + switch (usage->hid & HID_USAGE) { /* Only for UK keyboard */ /* key found */ #ifdef CONFIG_HID_KK_UPGRADE From 624639c2b7bd05d12a70149cbc8203c37a7bb114 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 13 Sep 2020 23:14:22 +0300 Subject: [PATCH 207/452] drivers/samsung/sec_dump_sink: check kstrtouint() result Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/samsung/sec_dump_sink.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/samsung/sec_dump_sink.c b/drivers/samsung/sec_dump_sink.c index 167e0822e598..04d400248854 100644 --- a/drivers/samsung/sec_dump_sink.c +++ b/drivers/samsung/sec_dump_sink.c @@ -27,7 +27,9 @@ static int initialized; static int sec_sdcard_ramdump(const char *val, const struct kernel_param *kp) { - kstrtouint(val, 16, &dump_sink); + if (kstrtouint(val, 16, &dump_sink)) + return 0; + pr_crit("%s: %s %x\n", __func__, val, dump_sink); if (!initialized) From 7cb4f65a48fe5c2909ebcf85c33712e559fcbd17 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 00:52:19 +0300 Subject: [PATCH 208/452] drivers/sensorhub/brcm/ssp: use int instead of bool Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/sensorhub/brcm/ssp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/sensorhub/brcm/ssp.h b/drivers/sensorhub/brcm/ssp.h index 0b679d9a8fb5..30a86de66fe3 100644 --- a/drivers/sensorhub/brcm/ssp.h +++ b/drivers/sensorhub/brcm/ssp.h @@ -1026,7 +1026,7 @@ struct ssp_data { /* AP suspend check flag*/ bool IsAPsuspend; /* no ack about mcu_resp pin*/ - bool IsNoRespCnt; + int IsNoRespCnt; /* hall ic */ bool hall_ic_status; // 0: open 1: close }; From 9bf884952c453d36933f703cc1c73e60c23ab9dd Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 00:53:56 +0300 Subject: [PATCH 209/452] drivers/soc/samsung/exynos_cpu_perf: drop redundant sprintf() args Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpufreq.c | 2 +- drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpuidle.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpufreq.c b/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpufreq.c index 462c55f75669..7beb28db6ab7 100644 --- a/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpufreq.c +++ b/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpufreq.c @@ -144,7 +144,7 @@ static int cpufreq_log_thread(void *data) } } // mif, gpu, task - ret += snprintf(buf + ret, buf_size - ret, "05-mif_cur 06-gpu_util 06-gpu_cur 07-task_cpu\n", grp_num, cpu); + ret += snprintf(buf + ret, buf_size - ret, "05-mif_cur 06-gpu_util 06-gpu_cur 07-task_cpu\n"); //--------------------- // body diff --git a/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpuidle.c b/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpuidle.c index 0ffff700f4c2..4c047272e3e8 100644 --- a/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpuidle.c +++ b/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpuidle.c @@ -362,7 +362,7 @@ static ssize_t show_result(char *buf) if (cpu == cluster_first_cpu[cluster_index]) { /* header: cpufreq */ - ret += snprintf(buf + ret, PAGE_SIZE - ret, "#freq ", cpu); + ret += snprintf(buf + ret, PAGE_SIZE - ret, "#freq "); for (freq = 0; freq < MAX_FREQ; freq++) { freq_value = cpufreq_list[cluster_index][freq]; if (freq_value == 0) { From 73c7023a51623d556345f6a9d86aba2766512a17 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 00:55:18 +0300 Subject: [PATCH 210/452] drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpufreq: cast buf to char * Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpufreq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpufreq.c b/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpufreq.c index 7beb28db6ab7..786fc6dcd5ee 100644 --- a/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpufreq.c +++ b/drivers/soc/samsung/exynos_cpu_perf/exynos_perf_cpufreq.c @@ -268,7 +268,7 @@ static int run_seq_show(struct seq_file *file, void *iter) if (is_running) { seq_printf(file, "NO RESULT\n"); } else { - seq_printf(file, "%s", buf); // PRINT RESULT + seq_printf(file, "%s", (char *)buf); // PRINT RESULT } return 0; } From a1e43fcc2f34f79782665d8b0d1eafbcbfa2b7cc Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 10:57:26 +0300 Subject: [PATCH 211/452] drivers/sensorhub/brcm/sx9330: fix regist, val type to unsigned Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/sensorhub/brcm/sx9330.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/sensorhub/brcm/sx9330.c b/drivers/sensorhub/brcm/sx9330.c index f83bab018699..267c43700639 100644 --- a/drivers/sensorhub/brcm/sx9330.c +++ b/drivers/sensorhub/brcm/sx9330.c @@ -632,7 +632,7 @@ static ssize_t sx9330_set_offset_calibration_store(struct device *dev, static ssize_t sx9330_register_write_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int regist = 0, val = 0; + uint32_t regist, val = 0; struct sx9330_p *data = dev_get_drvdata(dev); if (sscanf(buf, "%6x,%10x", ®ist, &val) != 2) { From b2640aafa9d9a5944fd5a1d971e5430551734e66 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 11:38:45 +0300 Subject: [PATCH 212/452] drivers/video/fbdev/exynos/dpu20/dpp_drv: fix reduntant arg check Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/video/fbdev/exynos/dpu20/dpp_drv.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/video/fbdev/exynos/dpu20/dpp_drv.c b/drivers/video/fbdev/exynos/dpu20/dpp_drv.c index d26d2f94d5bb..24554dbdad42 100644 --- a/drivers/video/fbdev/exynos/dpu20/dpp_drv.c +++ b/drivers/video/fbdev/exynos/dpu20/dpp_drv.c @@ -654,8 +654,7 @@ static long dpp_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg break; case DPP_STOP: - if (&arg != NULL) - reset = (bool)arg; + reset = (bool)arg; #ifdef CONFIG_EXYNOS_MCD_HDR ret = dpp_mcd_stop(dpp); #endif From 0c21a3b7e3ae2f54976a66bbe554b0c6114b9cc7 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 11:47:59 +0300 Subject: [PATCH 213/452] drivers/video/fbdev/exynos/panel/sysfs: drop redundant snprintf() arg Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/video/fbdev/exynos/panel/sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/panel/sysfs.c b/drivers/video/fbdev/exynos/panel/sysfs.c index bdf14b146b6e..9cba2004219e 100644 --- a/drivers/video/fbdev/exynos/panel/sysfs.c +++ b/drivers/video/fbdev/exynos/panel/sysfs.c @@ -1279,7 +1279,7 @@ static ssize_t self_mask_check_show(struct device *dev, len = snprintf(buf, PAGE_SIZE, "%d", success_check); for (i = 0; i < aod->props.self_mask_checksum_len; i++) len += snprintf(buf + len, PAGE_SIZE - len, " %02x", recv_checksum[i]); - len += snprintf(buf + len, PAGE_SIZE - len, "\n", recv_checksum[i]); + len += snprintf(buf + len, PAGE_SIZE - len, "\n"); kfree(recv_checksum); } else { snprintf(buf, PAGE_SIZE, "-1\n"); From 34af7cf73f03ba2a70382929740ee346f41d8dc1 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 12:28:48 +0300 Subject: [PATCH 214/452] drivers/sensorhub/brcm/sx9330: fix strncpy() call warning Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/sensorhub/brcm/sx9330.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/sensorhub/brcm/sx9330.c b/drivers/sensorhub/brcm/sx9330.c index 267c43700639..6ba8bcbfe308 100644 --- a/drivers/sensorhub/brcm/sx9330.c +++ b/drivers/sensorhub/brcm/sx9330.c @@ -109,7 +109,8 @@ struct sx9330_p { s32 max_normal_diff; int debug_count; - char hall_ic[6]; +#define HALL_IC_LEN 6 + char hall_ic[HALL_IC_LEN]; int is_unknown_mode; int motion; @@ -119,12 +120,12 @@ struct sx9330_p { int pre_attach; }; -static int sx9330_check_hallic_state(char *file_path, char hall_ic_status[]) +static int sx9330_check_hallic_state(char *file_path, char hall_ic_status[HALL_IC_LEN]) { int iRet = 0; mm_segment_t old_fs; struct file *filep; - char hall_sysfs[5]; + char hall_sysfs[HALL_IC_LEN]; old_fs = get_fs(); set_fs(KERNEL_DS); @@ -146,7 +147,7 @@ static int sx9330_check_hallic_state(char *file_path, char hall_ic_status[]) set_fs(old_fs); return -EIO; } else { - strncpy(hall_ic_status, hall_sysfs, sizeof(hall_sysfs)); + strncpy(hall_ic_status, hall_sysfs, HALL_IC_LEN); } filp_close(filep, current->files); From d08a1add360cfe36d5996fa2d3233025f8f819f5 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 21:48:28 +0300 Subject: [PATCH 215/452] drivers/net/wireless/broadcom/bcmdhd_100_15/wl_android: fix adps_mode check Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/net/wireless/broadcom/bcmdhd_100_15/wl_android.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_android.c b/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_android.c index b31a85c011c5..45e3616b6515 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_android.c +++ b/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_android.c @@ -8986,7 +8986,7 @@ wl_android_set_adps_mode(struct net_device *dev, const char* string_num) adps_mode = bcm_atoi(string_num); WL_ERR(("%s: SET_ADPS %d\n", __FUNCTION__, adps_mode)); - if ((adps_mode < 0) && (1 < adps_mode)) { + if ((adps_mode < 0) || (1 < adps_mode)) { WL_ERR(("wl_android_set_adps_mode: Invalid value %d.\n", adps_mode)); return -EINVAL; } From 836374d50809e20400ff12bc64c5c7a9c716ae16 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 21:49:49 +0300 Subject: [PATCH 216/452] mmap/rmap: fix pointer cast to enum warning Signed-off-by: Denis Efremov <efremov@linux.com> --- mm/rmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/rmap.c b/mm/rmap.c index a9ff86a08143..11308147e7d6 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1342,7 +1342,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, struct page *subpage; bool ret = true; unsigned long start = address, end; - enum ttu_flags flags = (enum ttu_flags)arg; + enum ttu_flags flags = (uintptr_t)arg; /* munlock has nothing to gain from examining un-locked vmas */ if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) From d3fc220aa735fe8491436f6d0f90eba474562ba4 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 21:50:37 +0300 Subject: [PATCH 217/452] drivers/misc/modem_v1/modem_main: fix pointer to enum cast warning Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/misc/modem_v1/modem_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/misc/modem_v1/modem_main.c b/drivers/misc/modem_v1/modem_main.c index 6fc81df4d485..c7de1b23d2cf 100644 --- a/drivers/misc/modem_v1/modem_main.c +++ b/drivers/misc/modem_v1/modem_main.c @@ -662,7 +662,7 @@ enum mif_sim_mode { static int simslot_count(struct seq_file *m, void *v) { - enum mif_sim_mode mode = (enum mif_sim_mode)m->private; + enum mif_sim_mode mode = (uintptr_t)m->private; seq_printf(m, "%u\n", mode); return 0; From 9a13795398e360da18b052fb8a93a1bc5a9d69bf Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 21:51:07 +0300 Subject: [PATCH 218/452] drivers/scsi/ufs/ufshcd: fix out-of-bounds buffer write Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/scsi/ufs/ufshcd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 21af2b4bce2f..aea5e05d157c 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -10122,7 +10122,7 @@ static void ufs_sec_send_errinfo(void *data) { static struct ufs_hba *hba; struct SEC_UFS_counting *err_info; - char buf[22]; + char buf[23]; if (data) { hba = (struct ufs_hba *)data; From 007ee1c6424e4548e1f2c0db738cc9e025d83f74 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 22:10:40 +0300 Subject: [PATCH 219/452] drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs: suppress clang warning Signed-off-by: Denis Efremov <efremov@linux.com> --- .../media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs.c b/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs.c index 8a65ef35bfec..6665f71ca897 100644 --- a/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs.c +++ b/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs.c @@ -1283,7 +1283,7 @@ static ssize_t camera_ssrm_camera_info_store(struct device *dev, ret_count = sscanf(buf, "%d%d%d%d%d%d%d", &temp.operation, &temp.cameraID, &temp.previewMinFPS, &temp.previewMaxFPS, &temp.previewSizeWidth, &temp.previewSizeHeight, &temp.sensorOn); - if (ret_count > sizeof(SsrmCameraInfo)/sizeof(int)) { + if (ret_count > sizeof(SsrmCameraInfo)/(sizeof(int))) { return -EINVAL; } From c2bece8af223016d4c5b758e64783ab9a42add48 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 20 Sep 2020 07:53:43 +0300 Subject: [PATCH 220/452] drivers/samsung/debug/sec_debug_test: fix simulate_SYNC_IRQ_LOCKUP() Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/samsung/debug/sec_debug_test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/samsung/debug/sec_debug_test.c b/drivers/samsung/debug/sec_debug_test.c index 96245ce6bf18..8aabe2eb09d6 100644 --- a/drivers/samsung/debug/sec_debug_test.c +++ b/drivers/samsung/debug/sec_debug_test.c @@ -893,7 +893,7 @@ static void simulate_SYNC_IRQ_LOCKUP(char *arg) if (arg) { if (!kstrtol(arg, 10, &irq)) { - struct irq_desc *desc = irq_to_desc(i); + struct irq_desc *desc = irq_to_desc(irq); if (desc && desc->action && desc->action->thread_fn) desc->action->thread_fn = dummy_wait_for_completion_irq_handler; From 36212daf67840d5651c7f17c01f4988db377e276 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 20 Sep 2020 09:28:24 +0300 Subject: [PATCH 221/452] drivers/media/tdmb/fc8080/ficdecoder: use unsigned 0x7fU constant to suppress the warning Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/media/tdmb/fc8080/ficdecoder.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/tdmb/fc8080/ficdecoder.c b/drivers/media/tdmb/fc8080/ficdecoder.c index 337bb60c5aa8..b9fae7bc349b 100644 --- a/drivers/media/tdmb/fc8080/ficdecoder.c +++ b/drivers/media/tdmb/fc8080/ficdecoder.c @@ -861,7 +861,7 @@ int fig0_ext10_decoder(u8 *fibBuffer, int figLength) u8 hour = 0; /*minutes = 0, seconds = 0*/ u16 milliseconds = 0; - MJD = (fibBuffer[0] & 0x7f) << 10; + MJD = (fibBuffer[0] & 0x7fU) << 10; MJD |= (fibBuffer[1] << 2); MJD |= (fibBuffer[2] & 0xc0) >> 6; /*LSI = (fibBuffer[2] & 0x20) >> 5; */ From 59eedffcd7ddb8233f174fb0df2875be84e8ee33 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 20 Sep 2020 10:07:18 +0300 Subject: [PATCH 222/452] drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs: fix sscanf format Signed-off-by: Denis Efremov <efremov@linux.com> --- .../media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs.c b/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs.c index 6665f71ca897..3d3e75b38d8d 100644 --- a/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs.c +++ b/drivers/media/platform/exynos/fimc-is2/vendor/mcd/fimc-is-sysfs.c @@ -2761,7 +2761,7 @@ static ssize_t camera_front_tof_check_pd_store(struct device *dev, return -ENODEV; } - ret_count = sscanf(buf, "%d", &value); + ret_count = sscanf(buf, "%hhd", &value); camera_tof_set_laser_current(SENSOR_POSITION_FRONT_TOF, value); return count; } From 8de904e1e190e3cfbdfe279e0dedc97bdd13c3a6 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 20 Sep 2020 16:02:14 +0300 Subject: [PATCH 223/452] drivers/battery_v2/mfc_s2miw04_charger: fix scanf format Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/battery_v2/mfc_s2miw04_charger.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/battery_v2/mfc_s2miw04_charger.c b/drivers/battery_v2/mfc_s2miw04_charger.c index 9ccc1f29062a..21cabb16aa93 100644 --- a/drivers/battery_v2/mfc_s2miw04_charger.c +++ b/drivers/battery_v2/mfc_s2miw04_charger.c @@ -4440,7 +4440,7 @@ ssize_t mfc_s2miw04_store_attrs(struct device *dev, ret = count; break; case MFC_PACKET: - if (sscanf(buf, "0x%4x 0x%4x 0x%4x\n", &header, &data_com, &data_val) == 3) { + if (sscanf(buf, "0x%4hhx 0x%4hhx 0x%4hhx\n", &header, &data_com, &data_val) == 3) { dev_info(charger->dev, "%s 0x%x, 0x%x, 0x%x \n", __func__, header, data_com, data_val); mfc_send_packet(charger, header, data_com, &data_val, 1); } From 7113bfa9c2fa3f3b1fed67009c82dcb246861dfc Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 20 Sep 2020 16:02:47 +0300 Subject: [PATCH 224/452] drivers/input/keyboard/stm/fsr1ad04: move G1,G2 definitions to .c file Signed-off-by: Denis Efremov <efremov@linux.com> --- .../input/keyboard/stm/fsr1ad04/stm_fsr_functions.c | 8 ++++++++ drivers/input/keyboard/stm/fsr1ad04/stm_fsr_sidekey.h | 10 +++------- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/drivers/input/keyboard/stm/fsr1ad04/stm_fsr_functions.c b/drivers/input/keyboard/stm/fsr1ad04/stm_fsr_functions.c index c5d49a34b193..f01c90c296bc 100644 --- a/drivers/input/keyboard/stm/fsr1ad04/stm_fsr_functions.c +++ b/drivers/input/keyboard/stm/fsr1ad04/stm_fsr_functions.c @@ -29,6 +29,14 @@ #include "stm_fsr_sidekey.h" +int G1[4] = { -2400, -2400, -4800, -4800 }; +int G2[4][4] = { + {-250, -250, -500, -500}, + {-125, -125, -250, -250}, + {-500, -500, -1000, -1000}, + {-250, -250, -500, -500} +}; + static void fw_update(void *device_data); static void get_chip_vendor(void *device_data); static void get_chip_name(void *device_data); diff --git a/drivers/input/keyboard/stm/fsr1ad04/stm_fsr_sidekey.h b/drivers/input/keyboard/stm/fsr1ad04/stm_fsr_sidekey.h index ebfff8b2f944..f3329f60949b 100644 --- a/drivers/input/keyboard/stm/fsr1ad04/stm_fsr_sidekey.h +++ b/drivers/input/keyboard/stm/fsr1ad04/stm_fsr_sidekey.h @@ -144,13 +144,9 @@ struct fsr_sidekey_plat_data { #define BUFFER_MAX ((256 * 1024) - 16) -static int G1[4] = { -2400, -2400, -4800, -4800 }; -static int G2[4][4] = { - {-250, -250, -500, -500}, - {-125, -125, -250, -250}, - {-500, -500, -1000, -1000}, - {-250, -250, -500, -500} -}; +extern int G1[4]; +extern int G2[4][4]; + enum { TYPE_RAW_DATA, From 9ee804ebb2058e4d102153fe882114f96317bdce Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 20 Sep 2020 16:03:24 +0300 Subject: [PATCH 225/452] drivers/sensorhub/brcm/sx9360: fix strncpy() call warning Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/sensorhub/brcm/sx9360.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/sensorhub/brcm/sx9360.c b/drivers/sensorhub/brcm/sx9360.c index 835f3cdac3a6..de1c7e6a43dd 100644 --- a/drivers/sensorhub/brcm/sx9360.c +++ b/drivers/sensorhub/brcm/sx9360.c @@ -133,7 +133,8 @@ struct sx9360_p { s16 max_normal_diff; int debug_count; - char hall_ic[6]; +#define HALL_IC_LEN 6 + char hall_ic[HALL_IC_LEN]; int is_unknown_mode; int motion; bool first_working; @@ -148,12 +149,12 @@ struct sx9360_p { #endif }; -static int sx9360_check_hallic_state(char *file_path, char hall_ic_status[]) +static int sx9360_check_hallic_state(char *file_path, char hall_ic_status[HALL_IC_LEN]) { int iRet = 0; mm_segment_t old_fs; struct file *filep; - char hall_sysfs[5]; + char hall_sysfs[HALL_IC_LEN]; old_fs = get_fs(); set_fs(KERNEL_DS); @@ -175,7 +176,7 @@ static int sx9360_check_hallic_state(char *file_path, char hall_ic_status[]) set_fs(old_fs); return -EIO; } else { - strncpy(hall_ic_status, hall_sysfs, sizeof(hall_sysfs)); + strncpy(hall_ic_status, hall_sysfs, HALL_IC_LEN); } filp_close(filep, current->files); From 24e64d8dc4377008cdeb5f1402a0907a872c7996 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 20 Sep 2020 16:05:15 +0300 Subject: [PATCH 226/452] drivers/input/touchscreen/sec_ts/y771_d/sec_ts: check regulator_enable() result Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/input/touchscreen/sec_ts/y771_d/sec_ts.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/input/touchscreen/sec_ts/y771_d/sec_ts.c b/drivers/input/touchscreen/sec_ts/y771_d/sec_ts.c index 51b608b11356..534601f073a5 100644 --- a/drivers/input/touchscreen/sec_ts/y771_d/sec_ts.c +++ b/drivers/input/touchscreen/sec_ts/y771_d/sec_ts.c @@ -2100,8 +2100,14 @@ int sec_ts_power(void *data, bool on) if (regulator_is_enabled(regulator_dvdd)) { ret = regulator_disable(regulator_dvdd); if (ret) { + int ret; + input_err(true, &ts->client->dev, "%s: failed to disable dvdd: %d\n", __func__, ret); - regulator_enable(regulator_avdd); + ret = regulator_enable(regulator_avdd); + if (ret < 0) { + input_err(true, &ts->client->dev, "%s: failed to reenable dvdd: %d\n", __func__, ret); + } + goto out; } } else { From cf97dce5cb39d82579abaaaca43a06dd139b76b9 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 23 Sep 2020 18:20:22 +0300 Subject: [PATCH 227/452] drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata: fix initialization warning Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata.c b/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata.c index 6dc91e1f74c5..b62966916486 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata.c +++ b/drivers/net/wireless/broadcom/bcmdhd_100_15/wl_bigdata.c @@ -272,7 +272,7 @@ wl_gather_ap_stadata(void *handle, void *event_info, u8 event) wl_event_msg_t *e; wl_ap_sta_data_t *sta_data; - wl_ap_sta_data_t temp_sta_data = {0}; + wl_ap_sta_data_t temp_sta_data = {}; void *data = NULL; int i; int ret; From 251111639d56869e6b033627838ac954a7cc9fe5 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 20 Sep 2020 16:20:37 +0300 Subject: [PATCH 228/452] init/main: use __initdata_or_module for initcall_sec_debug Signed-off-by: Denis Efremov <efremov@linux.com> --- init/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/init/main.c b/init/main.c index c1326c2b549e..2366627a67e2 100644 --- a/init/main.c +++ b/init/main.c @@ -1017,7 +1017,7 @@ __setup("initcall_blacklist=", initcall_blacklist); #ifdef CONFIG_SEC_BOOTSTAT -static bool __init_or_module initcall_sec_debug = true; +static bool __initdata_or_module initcall_sec_debug = true; static int __init_or_module do_one_initcall_sec_debug(initcall_t fn) { From 9b7858f924f38a46d6df5c845917b60061b70521 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 12 Sep 2020 01:28:36 +0300 Subject: [PATCH 229/452] security/sdp/dd_kernel_crypto: fix misleading indentation Signed-off-by: Denis Efremov <efremov@linux.com> --- security/sdp/dd_kernel_crypto.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security/sdp/dd_kernel_crypto.c b/security/sdp/dd_kernel_crypto.c index c6aa2a066bbd..14f90f613853 100644 --- a/security/sdp/dd_kernel_crypto.c +++ b/security/sdp/dd_kernel_crypto.c @@ -643,7 +643,7 @@ int dd_sec_crypt_bio_pages(struct dd_info *info, struct bio *orig, if (rw == DD_ENCRYPT) memcpy(&clone->bi_iter, &iter_backup, sizeof(struct bvec_iter)); - return 0; + return 0; } void dd_hex_key_dump(const char* tag, uint8_t *data, size_t data_len) From 15d50c55a44a4fe72c907e0a707cd0e21c3e2642 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 12 Sep 2020 01:29:41 +0300 Subject: [PATCH 230/452] net/mptcp/mptcp_fullmesh: fix misleading indentation Signed-off-by: Denis Efremov <efremov@linux.com> --- net/mptcp/mptcp_fullmesh.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mptcp/mptcp_fullmesh.c b/net/mptcp/mptcp_fullmesh.c index ce8343de9add..56a066ed4ef0 100644 --- a/net/mptcp/mptcp_fullmesh.c +++ b/net/mptcp/mptcp_fullmesh.c @@ -1169,7 +1169,7 @@ static int inet6_addr_event(struct notifier_block *this, unsigned long event, event == NETDEV_CHANGE)) return NOTIFY_DONE; - addr6_event_handler(ifa6, event, net); + addr6_event_handler(ifa6, event, net); return NOTIFY_DONE; } From ce6d321140a20d2c2d9d1f1cb9010b0ca29855e2 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 00:45:05 +0300 Subject: [PATCH 231/452] drivers/sensorhub/brcm/bbdpl/bbd: fix misleading indentation Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/sensorhub/brcm/bbdpl/bbd.c | 32 +++++++++++++++--------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/sensorhub/brcm/bbdpl/bbd.c b/drivers/sensorhub/brcm/bbdpl/bbd.c index d29857429971..5627b24d89b1 100644 --- a/drivers/sensorhub/brcm/bbdpl/bbd.c +++ b/drivers/sensorhub/brcm/bbdpl/bbd.c @@ -903,22 +903,22 @@ ssize_t bbd_urgent_patch_read(struct file *user_filp, char __user *buf, size_t s } else is_signed = true; - if (is_signed == false) { - pr_err("[SSPBBD] %s : urgent_patch is not signed", __func__); - kfree(urgent_buffer); - return 0; - } - - urgent_patch_size = ret; - pr_err("[SSPBBD] %s : total: %d patch size: %d", __func__, fsize, urgent_patch_size); - - if (offset >= urgent_patch_size) { // signal EOF - pr_err("[SSPBBD] %s : signal EOF", __func__); - - *ppos = 0; - kfree(urgent_buffer); - return 0; - } + if (is_signed == false) { + pr_err("[SSPBBD] %s : urgent_patch is not signed", __func__); + kfree(urgent_buffer); + return 0; + } + + urgent_patch_size = ret; + pr_err("[SSPBBD] %s : total: %lld patch size: %d", __func__, fsize, urgent_patch_size); + + if (offset >= urgent_patch_size) { // signal EOF + pr_err("[SSPBBD] %s : signal EOF", __func__); + + *ppos = 0; + kfree(urgent_buffer); + return 0; + } if (offset + size > urgent_patch_size) rd_size = urgent_patch_size - offset; From bbf4016d1d69a272a198346fd3667bf6ddc70efe Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 00:46:18 +0300 Subject: [PATCH 232/452] drivers/sensorhub/brcm/ssp_bbd: fix misleading indentation Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/sensorhub/brcm/ssp_bbd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/sensorhub/brcm/ssp_bbd.c b/drivers/sensorhub/brcm/ssp_bbd.c index 1c9d641ed869..8987fc69f563 100644 --- a/drivers/sensorhub/brcm/ssp_bbd.c +++ b/drivers/sensorhub/brcm/ssp_bbd.c @@ -304,7 +304,7 @@ int callback_bbd_on_mcu_ready(void *ssh_data, bool ready) continue; if(src[i] == ';') break; - dst[idx++] = src[i]; + dst[idx++] = src[i]; } } int callback_bbd_on_control(void *ssh_data, const char *str_ctrl) From fb16e95f894d0f4c34d669a292aac8d2c6354404 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 11:36:51 +0300 Subject: [PATCH 233/452] drivers/usb/gadget/function/f_conn_gadget: fix misleading indentation Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/usb/gadget/function/f_conn_gadget.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/usb/gadget/function/f_conn_gadget.c b/drivers/usb/gadget/function/f_conn_gadget.c index e0bafd0ca278..e6e02f7b819b 100644 --- a/drivers/usb/gadget/function/f_conn_gadget.c +++ b/drivers/usb/gadget/function/f_conn_gadget.c @@ -1284,8 +1284,8 @@ static int conn_gadget_setup(struct conn_gadget_instance *fi_conn_gadget) return 0; err_: - if (dev->rd_queue_buf) - vfree(dev->rd_queue_buf); + if (dev->rd_queue_buf) + vfree(dev->rd_queue_buf); _conn_gadget_dev = NULL; kfree(dev); @@ -1304,8 +1304,8 @@ static void conn_gadget_cleanup(struct kref *kref) misc_deregister(&conn_gadget_device); - if (_conn_gadget_dev->rd_queue_buf) - vfree(_conn_gadget_dev->rd_queue_buf); + if (_conn_gadget_dev->rd_queue_buf) + vfree(_conn_gadget_dev->rd_queue_buf); kfree(_conn_gadget_dev); _conn_gadget_dev = NULL; From b3edfa79e34fe5931a3f0212028851cdc5a259eb Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 11:39:30 +0300 Subject: [PATCH 234/452] drivers/usb/core/devio: fix misleading indentation Move dev_info() out of switch. Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/usb/core/devio.c | 56 +++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 27 deletions(-) diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index f230da5ac6de..d4f4c040ad65 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -2189,36 +2189,38 @@ static int proc_ioctl(struct usb_dev_state *ps, struct usbdevfs_ioctl *ctl) retval = -EHOSTUNREACH; else if (!(intf = usb_ifnum_to_if(ps->dev, ctl->ifno))) retval = -EINVAL; - else switch (ctl->ioctl_code) { - dev_info(&ps->dev->dev,"%s ioctl_code %d\n", __func__, ctl->ioctl_code); - /* disconnect kernel driver from interface */ - case USBDEVFS_DISCONNECT: - if (intf->dev.driver) { - driver = to_usb_driver(intf->dev.driver); - dev_dbg(&intf->dev, "disconnect by usbfs\n"); - usb_driver_release_interface(driver, intf); - } else - retval = -ENODATA; - break; + else { + dev_info(&ps->dev->dev,"%s ioctl_code %d\n", __func__, ctl->ioctl_code); + switch (ctl->ioctl_code) { + /* disconnect kernel driver from interface */ + case USBDEVFS_DISCONNECT: + if (intf->dev.driver) { + driver = to_usb_driver(intf->dev.driver); + dev_dbg(&intf->dev, "disconnect by usbfs\n"); + usb_driver_release_interface(driver, intf); + } else + retval = -ENODATA; + break; - /* let kernel drivers try to (re)bind to the interface */ - case USBDEVFS_CONNECT: - if (!intf->dev.driver) - retval = device_attach(&intf->dev); - else - retval = -EBUSY; - break; + /* let kernel drivers try to (re)bind to the interface */ + case USBDEVFS_CONNECT: + if (!intf->dev.driver) + retval = device_attach(&intf->dev); + else + retval = -EBUSY; + break; - /* talk directly to the interface's driver */ - default: - if (intf->dev.driver) - driver = to_usb_driver(intf->dev.driver); - if (driver == NULL || driver->unlocked_ioctl == NULL) { - retval = -ENOTTY; - } else { - retval = driver->unlocked_ioctl(intf, ctl->ioctl_code, buf); - if (retval == -ENOIOCTLCMD) + /* talk directly to the interface's driver */ + default: + if (intf->dev.driver) + driver = to_usb_driver(intf->dev.driver); + if (driver == NULL || driver->unlocked_ioctl == NULL) { retval = -ENOTTY; + } else { + retval = driver->unlocked_ioctl(intf, ctl->ioctl_code, buf); + if (retval == -ENOIOCTLCMD) + retval = -ENOTTY; + } } } From 5d5cf97b706d63811eac4669eaf0c1181700cc25 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 12:44:32 +0300 Subject: [PATCH 235/452] net/netfilter/linkforward: fix misleading indentation Signed-off-by: Denis Efremov <efremov@linux.com> --- net/netfilter/linkforward.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/net/netfilter/linkforward.c b/net/netfilter/linkforward.c index 07dfa50c3cda..8f90816d7463 100644 --- a/net/netfilter/linkforward.c +++ b/net/netfilter/linkforward.c @@ -224,18 +224,20 @@ int linkforward_add(__be16 dst_port, struct nf_conntrack_tuple *t_rpl, struct nf if (!room_found) { last_conn_idx++; + if (last_conn_idx == MAX_CONNECTION_CNT) last_conn_idx = 0; - i = last_conn_idx; - conn[i].enabled = true; - conn[i].dst_port = dst_port; - conn[i].netdev = netdev; - memcpy(&conn[i].t[0], t_org, sizeof(struct nf_conntrack_tuple)); - memcpy(&conn[i].t[1], t_rpl, sizeof(struct nf_conntrack_tuple)); + + i = last_conn_idx; + conn[i].enabled = true; + conn[i].dst_port = dst_port; + conn[i].netdev = netdev; + memcpy(&conn[i].t[0], t_org, sizeof(struct nf_conntrack_tuple)); + memcpy(&conn[i].t[1], t_rpl, sizeof(struct nf_conntrack_tuple)); #ifdef CONFIG_CP_DIT - dit_set_nat_local_addr(t_org->src.u3.ip); - dit_set_nat_filter(i, IPPROTO_TCP, 0xffffffff, 0xffff, dst_port); + dit_set_nat_local_addr(t_org->src.u3.ip); + dit_set_nat_filter(i, IPPROTO_TCP, 0xffffffff, 0xffff, dst_port); #endif } From faefc77e50fa4153fcd2a74fb8fb1fced1507588 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 14 Sep 2020 22:14:20 +0300 Subject: [PATCH 236/452] drivers/sensorhub/brcm/ssp_i2c: fix misleading indentation Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/sensorhub/brcm/ssp_i2c.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/sensorhub/brcm/ssp_i2c.c b/drivers/sensorhub/brcm/ssp_i2c.c index 37e1522ee2d2..4c59c007e98c 100644 --- a/drivers/sensorhub/brcm/ssp_i2c.c +++ b/drivers/sensorhub/brcm/ssp_i2c.c @@ -315,7 +315,7 @@ int send_instruction(struct ssp_data *data, u8 uInst, if (uLength >= 9) BatchTimeforReset = *(unsigned int *)(&uSendBuf[4]);// Add / change normal case, not factory. //pr_info("[SSP] %s timeForRest %d", __func__, BatchTimeforReset); - data->IsBypassMode[uSensorType] = (BatchTimeforReset == 0); + data->IsBypassMode[uSensorType] = (BatchTimeforReset == 0); //pr_info("[SSP] sensor%d mode%d Time %lld\n", uSensorType, data->IsBypassMode[uSensorType], current_Ts); } return iRet; From 3d883f51f366bd26d48ad08c0200e58fcc2a04b2 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 23 Mar 2020 21:05:20 +0300 Subject: [PATCH 237/452] net: ipv4: lock the initial TCP window size to 64K Signed-off-by: Denis Efremov <efremov@linux.com> --- net/ipv4/Kconfig | 7 +++++++ net/ipv4/tcp_output.c | 9 +++++++++ 2 files changed, 16 insertions(+) diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 93def62ca73e..73a6b13e33ba 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -463,6 +463,13 @@ config INET_DIAG_DESTROY had been disconnected. If unsure, say N. +config LARGE_TCP_INITIAL_BUFFER + bool "TCP: lock the initial window size to 64K" + default n + ---help--- + Lock the initial TCP window size to 64K. + If unsure, say N. + menuconfig TCP_CONG_ADVANCED bool "TCP: advanced congestion control" ---help--- diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 2b6e5715844f..17ace4982a1e 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -272,6 +272,15 @@ void tcp_select_initial_window(int __space, __u32 mss, *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); } +#ifdef CONFIG_LARGE_TCP_INITIAL_BUFFER + pr_info("TCP: default window size: %u\n", *rcv_wnd); + /* Lock the initial TCP window size to 64K. + * Assuming 1500 packet size, 64240 is the largest multiple + * of MSS (44 * 1460) under 65535 (2 << 15). + */ + *rcv_wnd = 64240; +#endif + /* Set the clamp no higher than max representable value */ (*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp); } From 6f9ed02e44fc296a5ded2b048a4d8124bcfce474 Mon Sep 17 00:00:00 2001 From: franciscofranco <franciscofranco.1990@gmail.com> Date: Sun, 17 Sep 2017 02:48:54 +0200 Subject: [PATCH 238/452] fs: fsync on/off support [efremov: change permissions from 0755 to 0644] Signed-off-by: djb77 <dwayne.bakewell@gmail.com> Signed-off-by: Denis Efremov <efremov@linux.com> --- fs/sync.c | 33 +++++++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/fs/sync.c b/fs/sync.c index afb091ba9ecc..2f6aca5f0cdd 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -8,6 +8,7 @@ #include <linux/fs.h> #include <linux/slab.h> #include <linux/export.h> +#include <linux/module.h> #include <linux/namei.h> #include <linux/sched/xacct.h> #include <linux/writeback.h> @@ -18,6 +19,9 @@ #include <linux/backing-dev.h> #include "internal.h" +bool fsync_enabled = true; +module_param(fsync_enabled, bool, 0644); + #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \ SYNC_FILE_RANGE_WAIT_AFTER) @@ -406,10 +410,15 @@ void emergency_sync(void) */ SYSCALL_DEFINE1(syncfs, int, fd) { - struct fd f = fdget(fd); + struct fd f; struct super_block *sb; int ret; + if (!fsync_enabled) + return 0; + + f = fdget(fd); + if (!f.file) return -EBADF; sb = f.file->f_path.dentry->d_sb; @@ -437,6 +446,9 @@ int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; + if (!fsync_enabled) + return 0; + if (!file->f_op->fsync) return -EINVAL; if (!datasync && (inode->i_state & I_DIRTY_TIME)) { @@ -459,6 +471,9 @@ EXPORT_SYMBOL(vfs_fsync_range); */ int vfs_fsync(struct file *file, int datasync) { + if (!fsync_enabled) + return 0; + return vfs_fsync_range(file, 0, LLONG_MAX, datasync); } EXPORT_SYMBOL(vfs_fsync); @@ -487,10 +502,15 @@ static void inc_fsync_time_cnt(unsigned long end, unsigned long start) static int do_fsync(unsigned int fd, int datasync) { - struct fd f = fdget(fd); + struct fd f; int ret = -EBADF; unsigned long stamp = jiffies; + if (!fsync_enabled) + return 0; + + f = fdget(fd); + if (f.file) { ret = vfs_fsync(f.file, datasync); fdput(f); @@ -502,11 +522,17 @@ static int do_fsync(unsigned int fd, int datasync) SYSCALL_DEFINE1(fsync, unsigned int, fd) { + if (!fsync_enabled) + return 0; + return do_fsync(fd, 0); } SYSCALL_DEFINE1(fdatasync, unsigned int, fd) { + if (!fsync_enabled) + return 0; + return do_fsync(fd, 1); } @@ -566,6 +592,9 @@ SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes, loff_t endbyte; /* inclusive */ umode_t i_mode; + if (!fsync_enabled) + return 0; + ret = -EINVAL; if (flags & ~VALID_FLAGS) goto out; From 1a6cb2b6cf8ce18e1abc4645d292d4d3c280ff46 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 12 Apr 2020 09:20:35 +0300 Subject: [PATCH 239/452] fs: add NOATIME/RELATIME default mount option Signed-off-by: Denis Efremov <efremov@linux.com> --- fs/Kconfig | 14 ++++++++++++++ fs/namespace.c | 5 +++++ 2 files changed, 19 insertions(+) diff --git a/fs/Kconfig b/fs/Kconfig index 6f97b4d448d1..b42d356344ab 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -12,6 +12,20 @@ config INTERRUPTIBLE_SYNC bool "Support interruptible sync for Samsung Mobile Device" default y +choice + prompt "Default mount option (RELATIME/NOATIME)" + default DEFAULT_MNT_RELATIME + help + Select default mount option. + + config DEFAULT_MNT_RELATIME + bool "RELATIME" + + config DEFAULT_MNT_NOATIME + bool "NOATIME" +endchoice + + if BLOCK config FS_IOMAP diff --git a/fs/namespace.c b/fs/namespace.c index c06a0954a9d2..ff787d3fb622 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -3469,9 +3469,14 @@ long do_mount(const char *dev_name, const char __user *dir_name, if (retval) goto dput_out; +#ifdef CONFIG_DEFAULT_MNT_NOATIME + if (!(flags & MS_RELATIME)) + mnt_flags |= MNT_NOATIME; +#else /* Default to relatime unless overriden */ if (!(flags & MS_NOATIME)) mnt_flags |= MNT_RELATIME; +#endif /* Separate the per-mountpoint flags */ if (flags & MS_NOSUID) From 40868d67f7c67f2929cd56b17b3ce60a6aab947c Mon Sep 17 00:00:00 2001 From: Sultanxda <sultanxda@gmail.com> Date: Fri, 21 Oct 2016 01:37:16 -0700 Subject: [PATCH 240/452] proc: Remove SafetyNet flags from /proc/cmdline Userspace parses this and sets the ro.boot.verifiedbootstate prop according to the value that this flag has. When ro.boot.verifiedbootstate is not 'green', SafetyNet is tripped and fails the CTS test. Hide verifiedbootstate from /proc/cmdline in order to fix the failed SafetyNet CTS check. SafetyNet checks androidboot.veritymode in Nougat, so remove it. Additionally, remove androidboot.enable_dm_verity and androidboot.secboot in case SafetyNet will check them in the future. Signed-off-by: Sultanxda <sultanxda@gmail.com> Signed-off-by: Denis Efremov <efremov@linux.com> --- fs/proc/cmdline.c | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c index 403cbb12a6e9..bb4ff145fc6a 100644 --- a/fs/proc/cmdline.c +++ b/fs/proc/cmdline.c @@ -3,10 +3,13 @@ #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> +#include <asm/setup.h> + +static char new_command_line[COMMAND_LINE_SIZE]; static int cmdline_proc_show(struct seq_file *m, void *v) { - seq_printf(m, "%s\n", saved_command_line); + seq_printf(m, "%s\n", new_command_line); return 0; } @@ -22,8 +25,38 @@ static const struct file_operations cmdline_proc_fops = { .release = single_release, }; +static void remove_flag(char *cmd, const char *flag) +{ + char *start_addr, *end_addr; + + /* Ensure all instances of a flag are removed */ + while ((start_addr = strstr(cmd, flag))) { + end_addr = strchr(start_addr, ' '); + if (end_addr) + memmove(start_addr, end_addr + 1, strlen(end_addr)); + else + *(start_addr - 1) = '\0'; + } +} + +static void remove_safetynet_flags(char *cmd) +{ + remove_flag(cmd, "androidboot.enable_dm_verity="); + remove_flag(cmd, "androidboot.secboot="); + remove_flag(cmd, "androidboot.verifiedbootstate="); + remove_flag(cmd, "androidboot.veritymode="); +} + static int __init proc_cmdline_init(void) { + strcpy(new_command_line, saved_command_line); + + /* + * Remove various flags from command line seen by userspace in order to + * pass SafetyNet CTS check. + */ + remove_safetynet_flags(new_command_line); + proc_create("cmdline", 0, NULL, &cmdline_proc_fops); return 0; } From adf023686351e8f8d2f59d688e369b3ada150d87 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Thu, 6 Feb 2020 14:29:34 +0300 Subject: [PATCH 241/452] proc: add CONFIG_PROC_REMOVE_SAFETYNET_FLAGS Signed-off-by: Denis Efremov <efremov@linux.com> --- fs/proc/Kconfig | 9 +++++++++ fs/proc/cmdline.c | 5 +++++ 2 files changed, 14 insertions(+) diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig index 54e04a53bd70..5e14d454c865 100644 --- a/fs/proc/Kconfig +++ b/fs/proc/Kconfig @@ -62,6 +62,15 @@ config PROC_SYSCTL building a kernel for install/rescue disks or your system is very limited in memory. +config PROC_REMOVE_SAFETYNET_FLAGS + bool "Hide SafetyNet flags in cmdline" if EXPERT + depends on PROC_FS + default n + ---help--- + Remove "androidboot.enable_dm_verity", "androidboot.secboot", + "androidboot.verifiedbootstate", "androidboot.veritymode" from + cmdline. This will help to bypass SafetyNet checks. + config PROC_PAGE_MONITOR default y depends on PROC_FS && MMU diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c index bb4ff145fc6a..50bdc4052aea 100644 --- a/fs/proc/cmdline.c +++ b/fs/proc/cmdline.c @@ -25,6 +25,8 @@ static const struct file_operations cmdline_proc_fops = { .release = single_release, }; + +#ifdef CONFIG_PROC_REMOVE_SAFETYNET_FLAGS static void remove_flag(char *cmd, const char *flag) { char *start_addr, *end_addr; @@ -46,16 +48,19 @@ static void remove_safetynet_flags(char *cmd) remove_flag(cmd, "androidboot.verifiedbootstate="); remove_flag(cmd, "androidboot.veritymode="); } +#endif static int __init proc_cmdline_init(void) { strcpy(new_command_line, saved_command_line); +#ifdef CONFIG_PROC_REMOVE_SAFETYNET_FLAGS /* * Remove various flags from command line seen by userspace in order to * pass SafetyNet CTS check. */ remove_safetynet_flags(new_command_line); +#endif proc_create("cmdline", 0, NULL, &cmdline_proc_fops); return 0; From 2779b5f1fae38d04360be358341df0fa9e53ae04 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 21 Jan 2020 19:10:21 +0300 Subject: [PATCH 242/452] android: Add superuser driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Run `su` to get root. HELLO NAÏVE TECH JOURNALISTS: This commit here introduces a driver used during development, because having access to scary debugging facilities is useful during kernel development. This driver is disabled by default, and when it is enabled, it spews warnings all over the place to encourage people who turn it on by accident to turn it off. It's exceedingly unlikely that somebody turns this on without intending to do so. You really have to fish around and make a concerted effort to get it enabled and working, and the warning messages it gives during the build and boot processes are really an eyesore. So, if you've found this commit because some idiot shipped a kernel to their users, you have every reason to blame that idiot, and not this commit. And if the fool insists it was an 'accident', he's not telling the truth. This is from: https://git.zx2c4.com/kernel-assisted-superuser/about Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/base/Kconfig | 9 +++ drivers/base/Makefile | 1 + drivers/base/superuser.c | 143 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 153 insertions(+) create mode 100644 drivers/base/superuser.c diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 65db84f77442..0ae31e75040d 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -357,3 +357,12 @@ config CPU_CAPACITY_FIXUP Enable to avoid that unity set cpuset of its client task to big core only. endmenu +config ASSISTED_SUPERUSER + bool "Kernel-assisted superuser" + select SECURITY_SELINUX_DEVELOP if SECURITY_SELINUX + ---help--- + This driver gives trivial root access by typing `su` in a + shell. It is a security disaster, and nobody should enable + this catastrophe of a driver. + + Say N here unless you have a vendetta against kittens. diff --git a/drivers/base/Makefile b/drivers/base/Makefile index e32a52490051..465e8de0d7da 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -30,3 +30,4 @@ obj-y += test/ ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG +obj-$(CONFIG_ASSISTED_SUPERUSER) += superuser.o diff --git a/drivers/base/superuser.c b/drivers/base/superuser.c new file mode 100644 index 000000000000..0e70b55c0c7e --- /dev/null +++ b/drivers/base/superuser.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +/* Hello. If this is enabled in your kernel for some reason, whoever is + * distributing your kernel to you is a complete moron, and you shouldn't + * use their kernel anymore. But it's not my fault! People: don't enable + * this driver! (Note that the existence of this file does not imply the + * driver is actually in use. Look in your .config to see whether this is + * enabled.) -Jason + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/uaccess.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/mman.h> +#include <linux/ptrace.h> +#include <linux/syscalls.h> + +static bool is_su(const char __user *filename) +{ + static const char su_path[] = "/system/bin/su"; + char ufn[sizeof(su_path)]; + + return likely(!copy_from_user(ufn, filename, sizeof(ufn))) && + unlikely(!memcmp(ufn, su_path, sizeof(ufn))); +} + +static void __user *userspace_stack_buffer(const void *d, size_t len) +{ + /* To avoid having to mmap a page in userspace, just write below the stack pointer. */ + char __user *p = (void __user *)current_user_stack_pointer() - len; + + return copy_to_user(p, d, len) ? NULL : p; +} + +static char __user *sh_user_path(void) +{ + static const char sh_path[] = "/system/bin/sh"; + + return userspace_stack_buffer(sh_path, sizeof(sh_path)); +} + +static long(*old_newfstatat)(int dfd, const char __user *filename, + struct stat *statbuf, int flag); +static long new_newfstatat(int dfd, const char __user *filename, + struct stat __user *statbuf, int flag) +{ + if (!is_su(filename)) + return old_newfstatat(dfd, filename, statbuf, flag); + return old_newfstatat(dfd, sh_user_path(), statbuf, flag); +} + +static long(*old_faccessat)(int dfd, const char __user *filename, int mode); +static long new_faccessat(int dfd, const char __user *filename, int mode) +{ + if (!is_su(filename)) + return old_faccessat(dfd, filename, mode); + return old_faccessat(dfd, sh_user_path(), mode); +} + +extern int selinux_enforcing; +static long (*old_execve)(const char __user *filename, + const char __user *const __user *argv, + const char __user *const __user *envp); +static long new_execve(const char __user *filename, + const char __user *const __user *argv, + const char __user *const __user *envp) +{ + static const char now_root[] = "You are now root.\n"; + struct cred *cred; + + if (!is_su(filename)) + return old_execve(filename, argv, envp); + + if (!old_execve(filename, argv, envp)) + return 0; + + /* It might be enough to just change the security ctx of the + * current task, but that requires slightly more thought than + * just axing the whole thing here. + */ + selinux_enforcing = 0; + + /* Rather than the usual commit_creds(prepare_kernel_cred(NULL)) idiom, + * we manually zero out the fields in our existing one, so that we + * don't have to futz with the task's key ring for disk access. + */ + cred = (struct cred *)__task_cred(current); + memset(&cred->uid, 0, sizeof(cred->uid)); + memset(&cred->gid, 0, sizeof(cred->gid)); + memset(&cred->suid, 0, sizeof(cred->suid)); + memset(&cred->euid, 0, sizeof(cred->euid)); + memset(&cred->egid, 0, sizeof(cred->egid)); + memset(&cred->fsuid, 0, sizeof(cred->fsuid)); + memset(&cred->fsgid, 0, sizeof(cred->fsgid)); + memset(&cred->cap_inheritable, 0xff, sizeof(cred->cap_inheritable)); + memset(&cred->cap_permitted, 0xff, sizeof(cred->cap_permitted)); + memset(&cred->cap_effective, 0xff, sizeof(cred->cap_effective)); + memset(&cred->cap_bset, 0xff, sizeof(cred->cap_bset)); + memset(&cred->cap_ambient, 0xff, sizeof(cred->cap_ambient)); + + sys_write(2, userspace_stack_buffer(now_root, sizeof(now_root)), + sizeof(now_root) - 1); + return old_execve(sh_user_path(), argv, envp); +} + +extern const unsigned long sys_call_table[]; +static void read_syscall(void **ptr, unsigned int syscall) +{ + *ptr = READ_ONCE(*((void **)sys_call_table + syscall)); +} +static void replace_syscall(unsigned int syscall, void *ptr) +{ + WRITE_ONCE(*((void **)sys_call_table + syscall), ptr); +} +#define read_and_replace_syscall(name) do { \ + read_syscall((void **)&old_ ## name, __NR_ ## name); \ + replace_syscall(__NR_ ## name, &new_ ## name); \ +} while (0) + +static int superuser_init(void) +{ + pr_err("WARNING WARNING WARNING WARNING WARNING\n"); + pr_err("This kernel has kernel-assisted superuser and contains a\n"); + pr_err("trivial way to get root. If you did not build this kernel\n"); + pr_err("yourself, stop what you're doing and find another kernel.\n"); + pr_err("This one is not safe to use.\n"); + pr_err("WARNING WARNING WARNING WARNING WARNING\n"); + + read_and_replace_syscall(newfstatat); + read_and_replace_syscall(faccessat); + read_and_replace_syscall(execve); + + return 0; +} + +module_init(superuser_init); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Kernel-assisted superuser for Android"); +MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>"); From 7753539397bd81c1f7ecb7445b179c60e1636563 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 21 Jan 2020 21:16:55 +0300 Subject: [PATCH 243/452] drivers/base/Kconfig: CONFIG_ASSISTED_SUPERUSER default n Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/base/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 0ae31e75040d..117492e7265d 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -360,6 +360,7 @@ endmenu config ASSISTED_SUPERUSER bool "Kernel-assisted superuser" select SECURITY_SELINUX_DEVELOP if SECURITY_SELINUX + default n ---help--- This driver gives trivial root access by typing `su` in a shell. It is a security disaster, and nobody should enable From 5d9cfe558f150c1c1cca0cffea741be3a7a67fce Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 22 Jan 2020 00:24:31 +0300 Subject: [PATCH 244/452] initramfs: allow CONFIG_INITRAMFS_FORCE Signed-off-by: Denis Efremov <efremov@linux.com> --- usr/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/usr/Kconfig b/usr/Kconfig index 43658b8a975e..24f6b30cc206 100644 --- a/usr/Kconfig +++ b/usr/Kconfig @@ -24,7 +24,8 @@ config INITRAMFS_SOURCE config INITRAMFS_FORCE bool "Ignore the initramfs passed by the bootloader" - depends on CMDLINE_EXTEND || CMDLINE_FORCE + depends on BLK_DEV_INITRD + default n help This option causes the kernel to ignore the initramfs image (or initrd image) passed to it by the bootloader. This is From cbb975cfd329ca2bb4a4694253f725397ed7b9e6 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 22 Jan 2020 00:39:13 +0300 Subject: [PATCH 245/452] usr: add CONFIG_INITRAMFS_SKIP Signed-off-by: Denis Efremov <efremov@linux.com> --- init/initramfs.c | 4 ++++ usr/Kconfig | 8 ++++++++ 2 files changed, 12 insertions(+) diff --git a/init/initramfs.c b/init/initramfs.c index 5ea7f1b5ec44..2c5be3446fed 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -608,6 +608,7 @@ static void __init clean_rootfs(void) } #endif +#ifdef CONFIG_INITRAMFS_SKIP static int __initdata do_skip_initramfs; static int __init skip_initramfs_param(char *str) @@ -618,16 +619,19 @@ static int __init skip_initramfs_param(char *str) return 1; } __setup("skip_initramfs", skip_initramfs_param); +#endif static int __init populate_rootfs(void) { char *err; +#ifdef CONFIG_INITRAMFS_SKIP if (do_skip_initramfs) { if (initrd_start) free_initrd(); return default_rootfs(); } +#endif /* Load the built in initramfs */ err = unpack_to_rootfs(__initramfs_start, __initramfs_size); diff --git a/usr/Kconfig b/usr/Kconfig index 24f6b30cc206..b3ed941a9f64 100644 --- a/usr/Kconfig +++ b/usr/Kconfig @@ -22,6 +22,14 @@ config INITRAMFS_SOURCE If you are not sure, leave it blank. +config INITRAMFS_SKIP + bool "Boot without initramfs if skip_initramfs in cmdline" + depends on BLK_DEV_INITRD + default y + help + Adds handling of "skip_initramfs" cmdline. Allows bootloader to + force booting from root partition. + config INITRAMFS_FORCE bool "Ignore the initramfs passed by the bootloader" depends on BLK_DEV_INITRD From 9e0589344196151033afa5a4f171435c077cdc13 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 22 Jan 2020 09:46:06 +0300 Subject: [PATCH 246/452] initramfs: integrate magiskinit64 Signed-off-by: Denis Efremov <efremov@linux.com> --- usr/magisk/.gitignore | 3 +++ usr/magisk/backup_magisk | 3 +++ usr/magisk/initramfs_list | 7 +++++ usr/magisk/update_magisk.sh | 54 +++++++++++++++++++++++++++++++++++++ 4 files changed, 67 insertions(+) create mode 100644 usr/magisk/.gitignore create mode 100644 usr/magisk/backup_magisk create mode 100644 usr/magisk/initramfs_list create mode 100755 usr/magisk/update_magisk.sh diff --git a/usr/magisk/.gitignore b/usr/magisk/.gitignore new file mode 100644 index 000000000000..6efeeffea17a --- /dev/null +++ b/usr/magisk/.gitignore @@ -0,0 +1,3 @@ +magiskinit +magiskinit64 +magisk_version diff --git a/usr/magisk/backup_magisk b/usr/magisk/backup_magisk new file mode 100644 index 000000000000..8070a531554f --- /dev/null +++ b/usr/magisk/backup_magisk @@ -0,0 +1,3 @@ +KEEPVERITY=true +KEEPFORCEENCRYPT=true +RECOVERYMODE=false diff --git a/usr/magisk/initramfs_list b/usr/magisk/initramfs_list new file mode 100644 index 000000000000..6fc1b73bfd2d --- /dev/null +++ b/usr/magisk/initramfs_list @@ -0,0 +1,7 @@ +dir /.backup 0705 0 0 +file /init usr/magisk/magiskinit 0755 0 0 +file /.backup/.magisk usr/magisk/backup_magisk 0705 0 0 +dir /overlay.d 0750 0 0 +dir /overlay.d/sbin 0750 0 0 +file /overlay.d/sbin/magisk32.xz usr/magisk/magisk32.xz 0644 0 0 +file /overlay.d/sbin/magisk64.xz usr/magisk/magisk64.xz 0644 0 0 diff --git a/usr/magisk/update_magisk.sh b/usr/magisk/update_magisk.sh new file mode 100755 index 000000000000..7bf57a36b086 --- /dev/null +++ b/usr/magisk/update_magisk.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +set -e + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +ver="$(cat "$DIR/magisk_version" 2>/dev/null || echo -n 'none')" + +if [ "x$1" = "xcanary" ] +then + nver="canary" + magisk_link="https://github.com/topjohnwu/magisk-files/raw/${nver}/app-debug.apk" +elif [ "x$1" = "xalpha" ] +then + nver="alpha" + magisk_link="https://github.com/vvb2060/magisk_files/raw/${nver}/app-release.apk" +else + if [ "x$1" = "x" ]; then + nver="$(curl -s https://github.com/topjohnwu/Magisk/releases | grep -m 1 -Poe 'Magisk v[\d\.]+' | cut -d ' ' -f 2)" + else + nver="$1" + fi + magisk_link="https://github.com/topjohnwu/Magisk/releases/download/${nver}/Magisk-${nver}.apk" +fi + +if [ \( -n "$nver" \) -a \( "$nver" != "$ver" \) -o ! \( -f "$DIR/magiskinit" \) -o \( "$nver" = "canary" \) -o \( "$nver" = "alpha" \) ] +then + echo "Updating Magisk from $ver to $nver" + curl -s --output "$DIR/magisk.zip" -L "$magisk_link" + if fgrep 'Not Found' "$DIR/magisk.zip"; then + curl -s --output "$DIR/magisk.zip" -L "${magisk_link%.apk}.zip" + fi + if unzip -o "$DIR/magisk.zip" arm/magiskinit64 -d "$DIR"; then + mv -f "$DIR/arm/magiskinit64" "$DIR/magiskinit" + : > "$DIR/magisk32.xz" + : > "$DIR/magisk64.xz" + elif unzip -o "$DIR/magisk.zip" lib/armeabi-v7a/libmagiskinit.so lib/armeabi-v7a/libmagisk32.so lib/armeabi-v7a/libmagisk64.so -d "$DIR"; then + mv -f "$DIR/lib/armeabi-v7a/libmagiskinit.so" "$DIR/magiskinit" + mv -f "$DIR/lib/armeabi-v7a/libmagisk32.so" "$DIR/magisk32" + mv -f "$DIR/lib/armeabi-v7a/libmagisk64.so" "$DIR/magisk64" + xz --force --check=crc32 "$DIR/magisk32" "$DIR/magisk64" + else + unzip -o "$DIR/magisk.zip" lib/arm64-v8a/libmagiskinit.so lib/armeabi-v7a/libmagisk32.so lib/arm64-v8a/libmagisk64.so -d "$DIR" + mv -f "$DIR/lib/arm64-v8a/libmagiskinit.so" "$DIR/magiskinit" + mv -f "$DIR/lib/armeabi-v7a/libmagisk32.so" "$DIR/magisk32" + mv -f "$DIR/lib/arm64-v8a/libmagisk64.so" "$DIR/magisk64" + xz --force --check=crc32 "$DIR/magisk32" "$DIR/magisk64" + fi + echo -n "$nver" > "$DIR/magisk_version" + rm "$DIR/magisk.zip" + touch "$DIR/initramfs_list" +else + echo "Nothing to be done: Magisk version $nver" +fi From 395e48331b43cf1a4009cf5b1b0c01a3ca0bafd3 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 30 Sep 2020 13:32:47 +0300 Subject: [PATCH 247/452] usr/Makefile: support relative paths for objtree Signed-off-by: Denis Efremov <efremov@linux.com> --- scripts/gen_initramfs_list.sh | 12 +++++++++++- usr/Makefile | 4 +++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh index 86a3c0e5cfbc..a9c0936f752c 100755 --- a/scripts/gen_initramfs_list.sh +++ b/scripts/gen_initramfs_list.sh @@ -203,11 +203,21 @@ input_file() { fi if [ -z ${dep_list} ]; then print_mtime "$1" >> ${output} - cat "$1" >> ${output} + cat "$1" | while read type dir file perm ; do + if [ "$type" = "file" ]; then + if [ "$1" != "${1#/}" ]; then + file="$(readlink -f "${srctree}/${file}")" + fi + fi + echo $type "${dir}" "${file}" $perm >> ${output} + done else echo "$1 \\" cat "$1" | while read type dir file perm ; do if [ "$type" = "file" ]; then + if [ "$1" != "${1#/}" ]; then + file="$(readlink -f "${srctree}/${file}")" + fi echo "$file \\"; fi done diff --git a/usr/Makefile b/usr/Makefile index 237a028693ce..024ca58c0e39 100644 --- a/usr/Makefile +++ b/usr/Makefile @@ -26,7 +26,9 @@ $(obj)/initramfs_data.o: $(obj)/$(datafile_y) FORCE hostprogs-y := gen_init_cpio initramfs := $(CONFIG_SHELL) $(srctree)/scripts/gen_initramfs_list.sh ramfs-input := $(if $(filter-out "",$(CONFIG_INITRAMFS_SOURCE)), \ - $(shell echo $(CONFIG_INITRAMFS_SOURCE)),-d) + $(if $(patsubst /%,,$(CONFIG_INITRAMFS_SOURCE)), \ + $(abspath $(srctree)/$(CONFIG_INITRAMFS_SOURCE)), \ + $(CONFIG_INITRAMFS_SOURCE)),-d) ramfs-args := \ $(if $(CONFIG_INITRAMFS_ROOT_UID), -u $(CONFIG_INITRAMFS_ROOT_UID)) \ $(if $(CONFIG_INITRAMFS_ROOT_GID), -g $(CONFIG_INITRAMFS_ROOT_GID)) From 2ab5ee7dcb2097d60938e384d8ee36199ede97ac Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 20 Mar 2020 18:36:48 +0300 Subject: [PATCH 248/452] drivers: cpufreq: replace performance with schedutil as fallback sched Default fallback scheduler for ondemand and conservative schedulers is performance one. This commit makes schedutil the default fallback scheduler. Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/cpufreq/Kconfig | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 2057f36da919..bf084b3a6715 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -80,7 +80,7 @@ config CPU_FREQ_DEFAULT_GOV_USERSPACE config CPU_FREQ_DEFAULT_GOV_ONDEMAND bool "ondemand" select CPU_FREQ_GOV_ONDEMAND - select CPU_FREQ_GOV_PERFORMANCE + select CPU_FREQ_GOV_SCHEDUTIL help Use the CPUFreq governor 'ondemand' as default. This allows you to get a full dynamic frequency capable system by simply @@ -92,7 +92,7 @@ config CPU_FREQ_DEFAULT_GOV_ONDEMAND config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE bool "conservative" select CPU_FREQ_GOV_CONSERVATIVE - select CPU_FREQ_GOV_PERFORMANCE + select CPU_FREQ_GOV_SCHEDUTIL help Use the CPUFreq governor 'conservative' as default. This allows you to get a full dynamic frequency capable system by simply @@ -105,7 +105,6 @@ config CPU_FREQ_DEFAULT_GOV_SCHEDUTIL bool "schedutil" depends on SMP select CPU_FREQ_GOV_SCHEDUTIL - select CPU_FREQ_GOV_PERFORMANCE help Use the 'schedutil' CPUFreq governor by default. If unsure, have a look at the help section of that governor. The fallback From ac3e52a91425686c7b5a55d5fd9458de4ae47bac Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 18 Mar 2020 09:49:27 +0300 Subject: [PATCH 249/452] Makefile: add KCONFIG_BUILTINCONFIG Signed-off-by: Denis Efremov <efremov@linux.com> --- Makefile | 2 ++ kernel/Makefile | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index cce28dd381d6..9190284c413b 100644 --- a/Makefile +++ b/Makefile @@ -350,7 +350,9 @@ endif hdr-arch := $(SRCARCH) KCONFIG_CONFIG ?= .config +KCONFIG_BUILTINCONFIG ?= $(KCONFIG_CONFIG) export KCONFIG_CONFIG +export KCONFIG_BUILTINCONFIG # SHELL used by kbuild CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ diff --git a/kernel/Makefile b/kernel/Makefile index 1d5db8de62a8..919682930875 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -122,7 +122,7 @@ obj-$(CONFIG_INTELLIGENCE) += intelligence.o $(obj)/configs.o: $(obj)/config_data.h targets += config_data.gz -$(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE +$(obj)/config_data.gz: $(KCONFIG_BUILTINCONFIG) FORCE $(call if_changed,gzip) filechk_ikconfiggz = (echo "static const char kernel_config_data[] __used = MAGIC_START"; cat $< | scripts/basic/bin2c; echo "MAGIC_END;") From dd982f00b81685a5fba516bb066da43a572f935f Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 30 Nov 2020 21:10:56 +0300 Subject: [PATCH 250/452] HZ: add 50hz config Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/Kconfig.hz | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz index 2a202a846757..2305717d4057 100644 --- a/kernel/Kconfig.hz +++ b/kernel/Kconfig.hz @@ -15,6 +15,8 @@ choice environment leading to NR_CPUS * HZ number of timer interrupts per second. + config HZ_50 + bool "50 HZ" config HZ_100 bool "100 HZ" @@ -49,6 +51,7 @@ endchoice config HZ int + default 50 if HZ_50 default 100 if HZ_100 default 250 if HZ_250 default 300 if HZ_300 From 10e2ecf6b3c69bb9a551a1d41f66aeb2dc7bf03c Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 3 Jan 2021 16:50:35 +0300 Subject: [PATCH 251/452] HZ: add 25hz config Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/Kconfig.hz | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz index 2305717d4057..13424b6381ea 100644 --- a/kernel/Kconfig.hz +++ b/kernel/Kconfig.hz @@ -15,6 +15,9 @@ choice environment leading to NR_CPUS * HZ number of timer interrupts per second. + config HZ_25 + bool "25 HZ" + config HZ_50 bool "50 HZ" @@ -51,6 +54,7 @@ endchoice config HZ int + default 25 if HZ_25 default 50 if HZ_50 default 100 if HZ_100 default 250 if HZ_250 From 4f6fd61d35163f02d319e4766012b3a063712e36 Mon Sep 17 00:00:00 2001 From: Jesse Chan <jc@lineageos.org> Date: Sat, 21 Apr 2018 00:08:51 -0700 Subject: [PATCH 252/452] battery: sec_battery: export {CURRENT/VOLTAGE}_MAX to sysfs --- drivers/battery_v2/sec_battery.c | 36 +++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/drivers/battery_v2/sec_battery.c b/drivers/battery_v2/sec_battery.c index 5eb41950e4c8..8a69238af42e 100644 --- a/drivers/battery_v2/sec_battery.c +++ b/drivers/battery_v2/sec_battery.c @@ -55,16 +55,22 @@ static enum power_supply_property sec_battery_props[] = { static enum power_supply_property sec_power_props[] = { POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_VOLTAGE_MAX, + POWER_SUPPLY_PROP_CURRENT_MAX, }; static enum power_supply_property sec_wireless_props[] = { POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_VOLTAGE_MAX, + POWER_SUPPLY_PROP_CURRENT_MAX, }; static enum power_supply_property sec_ac_props[] = { POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_VOLTAGE_MAX, + POWER_SUPPLY_PROP_CURRENT_MAX, }; static enum power_supply_property sec_ps_props[] = { @@ -6675,8 +6681,20 @@ static int sec_usb_get_property(struct power_supply *psy, { struct sec_battery_info *battery = power_supply_get_drvdata(psy); - if (psp != POWER_SUPPLY_PROP_ONLINE) + switch (psp) { + case POWER_SUPPLY_PROP_ONLINE: + break; + case POWER_SUPPLY_PROP_VOLTAGE_MAX: + /* V -> uV */ + val->intval = battery->input_voltage * 1000000; + return 0; + case POWER_SUPPLY_PROP_CURRENT_MAX: + /* mA -> uA */ + val->intval = battery->pdata->charging_current[battery->cable_type].input_current_limit * 1000; + return 0; + default: return -EINVAL; + } if ((battery->health == POWER_SUPPLY_HEALTH_OVERVOLTAGE) || (battery->health == POWER_SUPPLY_HEALTH_UNDERVOLTAGE)) { @@ -6753,6 +6771,14 @@ static int sec_ac_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_TEMP: val->intval = battery->chg_temp; break; + case POWER_SUPPLY_PROP_VOLTAGE_MAX: + /* V -> uV */ + val->intval = battery->input_voltage * 1000000; + return 0; + case POWER_SUPPLY_PROP_CURRENT_MAX: + /* mA -> uA */ + val->intval = battery->pdata->charging_current[battery->cable_type].input_current_limit * 1000; + return 0; case POWER_SUPPLY_PROP_MAX ... POWER_SUPPLY_EXT_PROP_MAX: switch (ext_psp) { case POWER_SUPPLY_EXT_PROP_WATER_DETECT: @@ -6801,6 +6827,14 @@ static int sec_wireless_get_property(struct power_supply *psy, else val->intval = 0; break; + case POWER_SUPPLY_PROP_VOLTAGE_MAX: + /* V -> uV */ + val->intval = battery->input_voltage * 1000000; + return 0; + case POWER_SUPPLY_PROP_CURRENT_MAX: + /* mA -> uA */ + val->intval = battery->pdata->charging_current[battery->cable_type].input_current_limit * 1000; + return 0; default: return -EINVAL; } From c8cb8b101ee27361dab083f7f0351ee7df2cd7fe Mon Sep 17 00:00:00 2001 From: Paul Keith <javelinanddart@gmail.com> Date: Fri, 2 Mar 2018 04:51:53 +0100 Subject: [PATCH 253/452] fs: sdfat: Add config option to register sdFAT for exFAT Change-Id: Id57abf0a4bd0b433fecc622eecb383cd4ea29d17 Signed-off-by: Paul Keith <javelinanddart@gmail.com> --- fs/sdfat/Kconfig | 7 +++++++ fs/sdfat/sdfat.c | 26 +++++++++++++++++++++++++- 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/fs/sdfat/Kconfig b/fs/sdfat/Kconfig index e849b25af347..62eb87bab6f6 100644 --- a/fs/sdfat/Kconfig +++ b/fs/sdfat/Kconfig @@ -15,6 +15,13 @@ config SDFAT_FS To compile this as a module, choose M here: the module will be called sdfat_core and sdfat_fs. +config SDFAT_USE_FOR_EXFAT + bool "Register sdFAT as exFAT" + default y + depends on SDFAT_FS && !EXFAT_FS + help + If you want to register sdFAT as available for exFAT, say Y. + config SDFAT_DELAYED_META_DIRTY bool "Enable delayed metadata dirty" default y diff --git a/fs/sdfat/sdfat.c b/fs/sdfat/sdfat.c index 516e15129cf5..464402aab7b5 100644 --- a/fs/sdfat/sdfat.c +++ b/fs/sdfat/sdfat.c @@ -5177,6 +5177,20 @@ static struct file_system_type sdfat_fs_type = { .fs_flags = FS_REQUIRES_DEV, }; +#ifdef CONFIG_SDFAT_USE_FOR_EXFAT +static struct file_system_type exfat_fs_type = { + .owner = THIS_MODULE, + .name = "exfat", + .mount = sdfat_fs_mount, +#ifdef CONFIG_SDFAT_DBG_IOCTL + .kill_sb = sdfat_debug_kill_sb, +#else + .kill_sb = kill_block_super, +#endif /* CONFIG_SDFAT_DBG_IOCTL */ + .fs_flags = FS_REQUIRES_DEV, +}; +#endif /* CONFIG_SDFAT_USE_FOR_EXFAT */ + static int __init init_sdfat_fs(void) { int err; @@ -5219,6 +5233,14 @@ static int __init init_sdfat_fs(void) goto error; } +#ifdef CONFIG_SDFAT_USE_FOR_EXFAT + err = register_filesystem(&exfat_fs_type); + if (err) { + pr_err("[SDFAT] failed to register for exfat filesystem\n"); + goto error; + } +#endif /* CONFIG_SDFAT_USE_FOR_EXFAT */ + return 0; error: sdfat_uevent_uninit(); @@ -5257,7 +5279,9 @@ static void __exit exit_sdfat_fs(void) sdfat_destroy_inodecache(); unregister_filesystem(&sdfat_fs_type); - +#ifdef CONFIG_SDFAT_USE_FOR_EXFAT + unregister_filesystem(&exfat_fs_type); +#endif /* CONFIG_SDFAT_USE_FOR_EXFAT */ fsapi_shutdown(); } From 4d15dbf3a2e74a67b450786b371cbe13524e5e4b Mon Sep 17 00:00:00 2001 From: Paul Keith <javelinanddart@gmail.com> Date: Fri, 2 Mar 2018 05:10:27 +0100 Subject: [PATCH 254/452] fs: sdfat: Add config option to register sdFAT for VFAT --- fs/sdfat/Kconfig | 7 +++++++ fs/sdfat/sdfat.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+) diff --git a/fs/sdfat/Kconfig b/fs/sdfat/Kconfig index 62eb87bab6f6..bcad29f51b3e 100644 --- a/fs/sdfat/Kconfig +++ b/fs/sdfat/Kconfig @@ -22,6 +22,13 @@ config SDFAT_USE_FOR_EXFAT help If you want to register sdFAT as available for exFAT, say Y. +config SDFAT_USE_FOR_VFAT + bool "Register sdFAT as VFAT" + default y + depends on SDFAT_FS && !VFAT_FS + help + If you want to register sdFAT as available for VFAT, say Y. + config SDFAT_DELAYED_META_DIRTY bool "Enable delayed metadata dirty" default y diff --git a/fs/sdfat/sdfat.c b/fs/sdfat/sdfat.c index 464402aab7b5..041087b36c3f 100644 --- a/fs/sdfat/sdfat.c +++ b/fs/sdfat/sdfat.c @@ -4644,6 +4644,12 @@ enum { Opt_discard, Opt_fs, Opt_adj_req, +#ifdef CONFIG_SDFAT_USE_FOR_VFAT + Opt_shortname_lower, + Opt_shortname_win95, + Opt_shortname_winnt, + Opt_shortname_mixed, +#endif /* CONFIG_SDFAT_USE_FOR_VFAT */ }; static const match_table_t sdfat_tokens = { @@ -4672,6 +4678,12 @@ static const match_table_t sdfat_tokens = { {Opt_discard, "discard"}, {Opt_fs, "fs=%s"}, {Opt_adj_req, "adj_req"}, +#ifdef CONFIG_SDFAT_USE_FOR_VFAT + {Opt_shortname_lower, "shortname=lower"}, + {Opt_shortname_win95, "shortname=win95"}, + {Opt_shortname_winnt, "shortname=winnt"}, + {Opt_shortname_mixed, "shortname=mixed"}, +#endif /* CONFIG_SDFAT_USE_FOR_VFAT */ {Opt_err, NULL} }; @@ -4838,6 +4850,14 @@ static int parse_options(struct super_block *sb, char *options, int silent, IMSG("adjust request config is not enabled. ignore\n"); #endif break; +#ifdef CONFIG_SDFAT_USE_FOR_VFAT + case Opt_shortname_lower: + case Opt_shortname_win95: + case Opt_shortname_mixed: + pr_warn("[SDFAT] DRAGONS AHEAD! sdFAT only understands \"shortname=winnt\"!\n"); + case Opt_shortname_winnt: + break; +#endif /* CONFIG_SDFAT_USE_FOR_VFAT */ default: if (!silent) { sdfat_msg(sb, KERN_ERR, @@ -5191,6 +5211,20 @@ static struct file_system_type exfat_fs_type = { }; #endif /* CONFIG_SDFAT_USE_FOR_EXFAT */ +#ifdef CONFIG_SDFAT_USE_FOR_VFAT +static struct file_system_type vfat_fs_type = { + .owner = THIS_MODULE, + .name = "vfat", + .mount = sdfat_fs_mount, +#ifdef CONFIG_SDFAT_DBG_IOCTL + .kill_sb = sdfat_debug_kill_sb, +#else + .kill_sb = kill_block_super, +#endif /* CONFIG_SDFAT_DBG_IOCTL */ + .fs_flags = FS_REQUIRES_DEV, +}; +#endif /* CONFIG_SDFAT_USE_FOR_VFAT */ + static int __init init_sdfat_fs(void) { int err; @@ -5241,6 +5275,14 @@ static int __init init_sdfat_fs(void) } #endif /* CONFIG_SDFAT_USE_FOR_EXFAT */ +#ifdef CONFIG_SDFAT_USE_FOR_VFAT + err = register_filesystem(&vfat_fs_type); + if (err) { + pr_err("[SDFAT] failed to register for vfat filesystem\n"); + goto error; + } +#endif /* CONFIG_SDFAT_USE_FOR_VFAT */ + return 0; error: sdfat_uevent_uninit(); @@ -5282,6 +5324,9 @@ static void __exit exit_sdfat_fs(void) #ifdef CONFIG_SDFAT_USE_FOR_EXFAT unregister_filesystem(&exfat_fs_type); #endif /* CONFIG_SDFAT_USE_FOR_EXFAT */ +#ifdef CONFIG_SDFAT_USE_FOR_VFAT + unregister_filesystem(&vfat_fs_type); +#endif /* CONFIG_SDFAT_USE_FOR_VFAT */ fsapi_shutdown(); } From b8e22f2ff04e4eaf9c7a511fe3a59d7e5dd5fcd2 Mon Sep 17 00:00:00 2001 From: Paul Keith <javelinanddart@gmail.com> Date: Wed, 28 Mar 2018 19:52:29 +0200 Subject: [PATCH 255/452] fs: sdfat: Add MODULE_ALIAS_FS for supported filesystems --- fs/sdfat/sdfat.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/sdfat/sdfat.c b/fs/sdfat/sdfat.c index 041087b36c3f..9b7a63581160 100644 --- a/fs/sdfat/sdfat.c +++ b/fs/sdfat/sdfat.c @@ -5196,6 +5196,7 @@ static struct file_system_type sdfat_fs_type = { #endif /* CONFIG_SDFAT_DBG_IOCTL */ .fs_flags = FS_REQUIRES_DEV, }; +MODULE_ALIAS_FS("sdfat"); #ifdef CONFIG_SDFAT_USE_FOR_EXFAT static struct file_system_type exfat_fs_type = { @@ -5209,6 +5210,7 @@ static struct file_system_type exfat_fs_type = { #endif /* CONFIG_SDFAT_DBG_IOCTL */ .fs_flags = FS_REQUIRES_DEV, }; +MODULE_ALIAS_FS("exfat"); #endif /* CONFIG_SDFAT_USE_FOR_EXFAT */ #ifdef CONFIG_SDFAT_USE_FOR_VFAT @@ -5223,6 +5225,7 @@ static struct file_system_type vfat_fs_type = { #endif /* CONFIG_SDFAT_DBG_IOCTL */ .fs_flags = FS_REQUIRES_DEV, }; +MODULE_ALIAS_FS("vfat"); #endif /* CONFIG_SDFAT_USE_FOR_VFAT */ static int __init init_sdfat_fs(void) From 6aaddda2995551ad9c0100621c6f18959dc03b3f Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 20 Mar 2020 00:28:34 +0300 Subject: [PATCH 256/452] fs: sdfat: don't use sdfat for exfat/vfat by default Signed-off-by: Denis Efremov <efremov@linux.com> --- fs/sdfat/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/sdfat/Kconfig b/fs/sdfat/Kconfig index bcad29f51b3e..7b0e7777ea5c 100644 --- a/fs/sdfat/Kconfig +++ b/fs/sdfat/Kconfig @@ -17,14 +17,14 @@ config SDFAT_FS config SDFAT_USE_FOR_EXFAT bool "Register sdFAT as exFAT" - default y + default n depends on SDFAT_FS && !EXFAT_FS help If you want to register sdFAT as available for exFAT, say Y. config SDFAT_USE_FOR_VFAT bool "Register sdFAT as VFAT" - default y + default n depends on SDFAT_FS && !VFAT_FS help If you want to register sdFAT as available for VFAT, say Y. From 0eb3de620a75067af7201f27cba6ac6d9cd26d5b Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 20 Mar 2020 00:39:44 +0300 Subject: [PATCH 257/452] net: wireguard: add wireguard module Signed-off-by: Denis Efremov <efremov@linux.com> --- net/Kconfig | 1 + net/Makefile | 1 + net/wireguard/Kconfig | 33 + net/wireguard/Makefile | 15 + net/wireguard/allowedips.c | 386 + net/wireguard/allowedips.h | 59 + net/wireguard/compat/Makefile.include | 111 + .../compat/checksum/checksum_partial_compat.h | 208 + net/wireguard/compat/compat-asm.h | 86 + net/wireguard/compat/compat.h | 1199 +++ net/wireguard/compat/dst_cache/dst_cache.c | 177 + .../compat/dst_cache/include/net/dst_cache.h | 97 + .../dstmetadata/include/net/dst_metadata.h | 3 + .../compat/fpu-x86/include/asm/fpu/api.h | 1 + .../include/asm/intel-family.h | 73 + net/wireguard/compat/memneq/include.h | 5 + net/wireguard/compat/memneq/memneq.c | 170 + .../compat/neon-arm/include/asm/neon.h | 7 + .../compat/ptr_ring/include/linux/ptr_ring.h | 674 ++ .../compat/simd-asm/include/asm/simd.h | 21 + .../compat/simd/include/linux/simd.h | 69 + .../compat/siphash/include/linux/siphash.h | 134 + net/wireguard/compat/siphash/siphash.c | 539 + .../skb_array/include/linux/skb_array.h | 11 + .../udp_tunnel/include/net/udp_tunnel.h | 94 + net/wireguard/compat/udp_tunnel/udp_tunnel.c | 396 + .../udp_tunnel/udp_tunnel_partial_compat.h | 226 + net/wireguard/compat/version/linux/version.h | 10 + net/wireguard/cookie.c | 236 + net/wireguard/cookie.h | 59 + net/wireguard/crypto/Makefile.include | 57 + net/wireguard/crypto/include/zinc/blake2s.h | 56 + net/wireguard/crypto/include/zinc/chacha20.h | 70 + .../crypto/include/zinc/chacha20poly1305.h | 50 + .../crypto/include/zinc/curve25519.h | 28 + net/wireguard/crypto/include/zinc/poly1305.h | 31 + net/wireguard/crypto/zinc.h | 15 + .../crypto/zinc/blake2s/blake2s-x86_64-glue.c | 72 + .../crypto/zinc/blake2s/blake2s-x86_64.S | 258 + net/wireguard/crypto/zinc/blake2s/blake2s.c | 271 + .../crypto/zinc/chacha20/chacha20-arm-glue.c | 98 + .../crypto/zinc/chacha20/chacha20-arm.pl | 1227 +++ .../crypto/zinc/chacha20/chacha20-arm64.pl | 1163 +++ .../crypto/zinc/chacha20/chacha20-mips-glue.c | 27 + .../crypto/zinc/chacha20/chacha20-mips.S | 424 + .../zinc/chacha20/chacha20-unrolled-arm.S | 461 + .../zinc/chacha20/chacha20-x86_64-glue.c | 105 + .../crypto/zinc/chacha20/chacha20-x86_64.pl | 4106 ++++++++ net/wireguard/crypto/zinc/chacha20/chacha20.c | 191 + net/wireguard/crypto/zinc/chacha20poly1305.c | 398 + .../zinc/curve25519/curve25519-arm-glue.c | 43 + .../crypto/zinc/curve25519/curve25519-arm.S | 2064 ++++ .../zinc/curve25519/curve25519-fiat32.c | 860 ++ .../zinc/curve25519/curve25519-hacl64.c | 779 ++ .../zinc/curve25519/curve25519-x86_64-glue.c | 44 + .../zinc/curve25519/curve25519-x86_64.c | 1580 +++ .../crypto/zinc/curve25519/curve25519.c | 109 + .../crypto/zinc/poly1305/poly1305-arm-glue.c | 140 + .../crypto/zinc/poly1305/poly1305-arm.pl | 1276 +++ .../crypto/zinc/poly1305/poly1305-arm64.pl | 974 ++ .../crypto/zinc/poly1305/poly1305-donna32.c | 205 + .../crypto/zinc/poly1305/poly1305-donna64.c | 182 + .../crypto/zinc/poly1305/poly1305-mips-glue.c | 37 + .../crypto/zinc/poly1305/poly1305-mips.S | 407 + .../crypto/zinc/poly1305/poly1305-mips64.pl | 467 + .../zinc/poly1305/poly1305-x86_64-glue.c | 156 + .../crypto/zinc/poly1305/poly1305-x86_64.pl | 4266 ++++++++ net/wireguard/crypto/zinc/poly1305/poly1305.c | 165 + net/wireguard/crypto/zinc/selftest/blake2s.c | 2090 ++++ net/wireguard/crypto/zinc/selftest/chacha20.c | 2698 +++++ .../crypto/zinc/selftest/chacha20poly1305.c | 9076 +++++++++++++++++ .../crypto/zinc/selftest/curve25519.c | 1315 +++ net/wireguard/crypto/zinc/selftest/poly1305.c | 1107 ++ net/wireguard/crypto/zinc/selftest/run.h | 48 + net/wireguard/device.c | 475 + net/wireguard/device.h | 62 + net/wireguard/main.c | 84 + net/wireguard/messages.h | 128 + net/wireguard/netlink.c | 658 ++ net/wireguard/netlink.h | 12 + net/wireguard/noise.c | 830 ++ net/wireguard/noise.h | 135 + net/wireguard/peer.c | 240 + net/wireguard/peer.h | 86 + net/wireguard/peerlookup.c | 226 + net/wireguard/peerlookup.h | 64 + net/wireguard/queueing.c | 108 + net/wireguard/queueing.h | 217 + net/wireguard/ratelimiter.c | 235 + net/wireguard/ratelimiter.h | 19 + net/wireguard/receive.c | 602 ++ net/wireguard/selftest/allowedips.c | 676 ++ net/wireguard/selftest/counter.c | 111 + net/wireguard/selftest/ratelimiter.c | 226 + net/wireguard/send.c | 420 + net/wireguard/socket.c | 437 + net/wireguard/socket.h | 44 + net/wireguard/timers.c | 243 + net/wireguard/timers.h | 31 + net/wireguard/uapi/wireguard.h | 196 + net/wireguard/version.h | 3 + 101 files changed, 50865 insertions(+) create mode 100644 net/wireguard/Kconfig create mode 100644 net/wireguard/Makefile create mode 100644 net/wireguard/allowedips.c create mode 100644 net/wireguard/allowedips.h create mode 100644 net/wireguard/compat/Makefile.include create mode 100644 net/wireguard/compat/checksum/checksum_partial_compat.h create mode 100644 net/wireguard/compat/compat-asm.h create mode 100644 net/wireguard/compat/compat.h create mode 100644 net/wireguard/compat/dst_cache/dst_cache.c create mode 100644 net/wireguard/compat/dst_cache/include/net/dst_cache.h create mode 100644 net/wireguard/compat/dstmetadata/include/net/dst_metadata.h create mode 100644 net/wireguard/compat/fpu-x86/include/asm/fpu/api.h create mode 100644 net/wireguard/compat/intel-family-x86/include/asm/intel-family.h create mode 100644 net/wireguard/compat/memneq/include.h create mode 100644 net/wireguard/compat/memneq/memneq.c create mode 100644 net/wireguard/compat/neon-arm/include/asm/neon.h create mode 100644 net/wireguard/compat/ptr_ring/include/linux/ptr_ring.h create mode 100644 net/wireguard/compat/simd-asm/include/asm/simd.h create mode 100644 net/wireguard/compat/simd/include/linux/simd.h create mode 100644 net/wireguard/compat/siphash/include/linux/siphash.h create mode 100644 net/wireguard/compat/siphash/siphash.c create mode 100644 net/wireguard/compat/skb_array/include/linux/skb_array.h create mode 100644 net/wireguard/compat/udp_tunnel/include/net/udp_tunnel.h create mode 100644 net/wireguard/compat/udp_tunnel/udp_tunnel.c create mode 100644 net/wireguard/compat/udp_tunnel/udp_tunnel_partial_compat.h create mode 100644 net/wireguard/compat/version/linux/version.h create mode 100644 net/wireguard/cookie.c create mode 100644 net/wireguard/cookie.h create mode 100644 net/wireguard/crypto/Makefile.include create mode 100644 net/wireguard/crypto/include/zinc/blake2s.h create mode 100644 net/wireguard/crypto/include/zinc/chacha20.h create mode 100644 net/wireguard/crypto/include/zinc/chacha20poly1305.h create mode 100644 net/wireguard/crypto/include/zinc/curve25519.h create mode 100644 net/wireguard/crypto/include/zinc/poly1305.h create mode 100644 net/wireguard/crypto/zinc.h create mode 100644 net/wireguard/crypto/zinc/blake2s/blake2s-x86_64-glue.c create mode 100644 net/wireguard/crypto/zinc/blake2s/blake2s-x86_64.S create mode 100644 net/wireguard/crypto/zinc/blake2s/blake2s.c create mode 100644 net/wireguard/crypto/zinc/chacha20/chacha20-arm-glue.c create mode 100644 net/wireguard/crypto/zinc/chacha20/chacha20-arm.pl create mode 100644 net/wireguard/crypto/zinc/chacha20/chacha20-arm64.pl create mode 100644 net/wireguard/crypto/zinc/chacha20/chacha20-mips-glue.c create mode 100644 net/wireguard/crypto/zinc/chacha20/chacha20-mips.S create mode 100644 net/wireguard/crypto/zinc/chacha20/chacha20-unrolled-arm.S create mode 100644 net/wireguard/crypto/zinc/chacha20/chacha20-x86_64-glue.c create mode 100644 net/wireguard/crypto/zinc/chacha20/chacha20-x86_64.pl create mode 100644 net/wireguard/crypto/zinc/chacha20/chacha20.c create mode 100644 net/wireguard/crypto/zinc/chacha20poly1305.c create mode 100644 net/wireguard/crypto/zinc/curve25519/curve25519-arm-glue.c create mode 100644 net/wireguard/crypto/zinc/curve25519/curve25519-arm.S create mode 100644 net/wireguard/crypto/zinc/curve25519/curve25519-fiat32.c create mode 100644 net/wireguard/crypto/zinc/curve25519/curve25519-hacl64.c create mode 100644 net/wireguard/crypto/zinc/curve25519/curve25519-x86_64-glue.c create mode 100644 net/wireguard/crypto/zinc/curve25519/curve25519-x86_64.c create mode 100644 net/wireguard/crypto/zinc/curve25519/curve25519.c create mode 100644 net/wireguard/crypto/zinc/poly1305/poly1305-arm-glue.c create mode 100644 net/wireguard/crypto/zinc/poly1305/poly1305-arm.pl create mode 100644 net/wireguard/crypto/zinc/poly1305/poly1305-arm64.pl create mode 100644 net/wireguard/crypto/zinc/poly1305/poly1305-donna32.c create mode 100644 net/wireguard/crypto/zinc/poly1305/poly1305-donna64.c create mode 100644 net/wireguard/crypto/zinc/poly1305/poly1305-mips-glue.c create mode 100644 net/wireguard/crypto/zinc/poly1305/poly1305-mips.S create mode 100644 net/wireguard/crypto/zinc/poly1305/poly1305-mips64.pl create mode 100644 net/wireguard/crypto/zinc/poly1305/poly1305-x86_64-glue.c create mode 100644 net/wireguard/crypto/zinc/poly1305/poly1305-x86_64.pl create mode 100644 net/wireguard/crypto/zinc/poly1305/poly1305.c create mode 100644 net/wireguard/crypto/zinc/selftest/blake2s.c create mode 100644 net/wireguard/crypto/zinc/selftest/chacha20.c create mode 100644 net/wireguard/crypto/zinc/selftest/chacha20poly1305.c create mode 100644 net/wireguard/crypto/zinc/selftest/curve25519.c create mode 100644 net/wireguard/crypto/zinc/selftest/poly1305.c create mode 100644 net/wireguard/crypto/zinc/selftest/run.h create mode 100644 net/wireguard/device.c create mode 100644 net/wireguard/device.h create mode 100644 net/wireguard/main.c create mode 100644 net/wireguard/messages.h create mode 100644 net/wireguard/netlink.c create mode 100644 net/wireguard/netlink.h create mode 100644 net/wireguard/noise.c create mode 100644 net/wireguard/noise.h create mode 100644 net/wireguard/peer.c create mode 100644 net/wireguard/peer.h create mode 100644 net/wireguard/peerlookup.c create mode 100644 net/wireguard/peerlookup.h create mode 100644 net/wireguard/queueing.c create mode 100644 net/wireguard/queueing.h create mode 100644 net/wireguard/ratelimiter.c create mode 100644 net/wireguard/ratelimiter.h create mode 100644 net/wireguard/receive.c create mode 100644 net/wireguard/selftest/allowedips.c create mode 100644 net/wireguard/selftest/counter.c create mode 100644 net/wireguard/selftest/ratelimiter.c create mode 100644 net/wireguard/send.c create mode 100644 net/wireguard/socket.c create mode 100644 net/wireguard/socket.h create mode 100644 net/wireguard/timers.c create mode 100644 net/wireguard/timers.h create mode 100644 net/wireguard/uapi/wireguard.h create mode 100644 net/wireguard/version.h diff --git a/net/Kconfig b/net/Kconfig index dd36e445c7a1..c22a07b94c7e 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -85,6 +85,7 @@ config INET Short answer: say Y. if INET +source "net/wireguard/Kconfig" source "net/ipv4/Kconfig" source "net/ipv6/Kconfig" source "net/netlabel/Kconfig" diff --git a/net/Makefile b/net/Makefile index 864f6593220e..26bddd8997e2 100644 --- a/net/Makefile +++ b/net/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_NET) += $(tmp-y) obj-$(CONFIG_LLC) += llc/ obj-$(CONFIG_NET) += ethernet/ 802/ sched/ netlink/ bpf/ obj-$(CONFIG_NETFILTER) += netfilter/ +obj-$(CONFIG_WIREGUARD) += wireguard/ obj-$(CONFIG_INET) += ipv4/ obj-$(CONFIG_TLS) += tls/ obj-$(CONFIG_XFRM) += xfrm/ diff --git a/net/wireguard/Kconfig b/net/wireguard/Kconfig new file mode 100644 index 000000000000..156e9dbfc051 --- /dev/null +++ b/net/wireguard/Kconfig @@ -0,0 +1,33 @@ +config WIREGUARD + tristate "IP: WireGuard secure network tunnel" + depends on NET && INET + depends on IPV6 || !IPV6 + select NET_UDP_TUNNEL + select DST_CACHE + select CRYPTO + select CRYPTO_ALGAPI + select VFP + select VFPv3 if CPU_V7 + select NEON if CPU_V7 + select KERNEL_MODE_NEON if CPU_V7 + default m + help + WireGuard is a secure, fast, and easy to use replacement for IPsec + that uses modern cryptography and clever networking tricks. It's + designed to be fairly general purpose and abstract enough to fit most + use cases, while at the same time remaining extremely simple to + configure. See www.wireguard.com for more info. + + It's safe to say Y or M here, as the driver is very lightweight and + is only in use when an administrator chooses to add an interface. + +config WIREGUARD_DEBUG + bool "Debugging checks and verbose messages" + depends on WIREGUARD + help + This will write log messages for handshake and other events + that occur for a WireGuard interface. It will also perform some + extra validation checks and unit tests at various points. This is + only useful for debugging. + + Say N here unless you know what you're doing. diff --git a/net/wireguard/Makefile b/net/wireguard/Makefile new file mode 100644 index 000000000000..c17546eaeedc --- /dev/null +++ b/net/wireguard/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + +ccflags-y := -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt' +ccflags-y += -Wframe-larger-than=2048 +ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DDEBUG -g +ccflags-$(if $(WIREGUARD_VERSION),y,) += -D'WIREGUARD_VERSION="$(WIREGUARD_VERSION)"' + +wireguard-y := main.o noise.o device.o peer.o timers.o queueing.o send.o receive.o socket.o peerlookup.o allowedips.o ratelimiter.o cookie.o netlink.o + +include $(src)/crypto/Makefile.include +include $(src)/compat/Makefile.include + +obj-$(if $(KBUILD_EXTMOD),m,$(CONFIG_WIREGUARD)) := wireguard.o diff --git a/net/wireguard/allowedips.c b/net/wireguard/allowedips.c new file mode 100644 index 000000000000..9a4c8ff32d9d --- /dev/null +++ b/net/wireguard/allowedips.c @@ -0,0 +1,386 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include "allowedips.h" +#include "peer.h" + +static struct kmem_cache *node_cache; + +static void swap_endian(u8 *dst, const u8 *src, u8 bits) +{ + if (bits == 32) { + *(u32 *)dst = be32_to_cpu(*(const __be32 *)src); + } else if (bits == 128) { + ((u64 *)dst)[0] = be64_to_cpu(((const __be64 *)src)[0]); + ((u64 *)dst)[1] = be64_to_cpu(((const __be64 *)src)[1]); + } +} + +static void copy_and_assign_cidr(struct allowedips_node *node, const u8 *src, + u8 cidr, u8 bits) +{ + node->cidr = cidr; + node->bit_at_a = cidr / 8U; +#ifdef __LITTLE_ENDIAN + node->bit_at_a ^= (bits / 8U - 1U) % 8U; +#endif + node->bit_at_b = 7U - (cidr % 8U); + node->bitlen = bits; + memcpy(node->bits, src, bits / 8U); +} + +static inline u8 choose(struct allowedips_node *node, const u8 *key) +{ + return (key[node->bit_at_a] >> node->bit_at_b) & 1; +} + +static void push_rcu(struct allowedips_node **stack, + struct allowedips_node __rcu *p, unsigned int *len) +{ + if (rcu_access_pointer(p)) { + WARN_ON(IS_ENABLED(DEBUG) && *len >= 128); + stack[(*len)++] = rcu_dereference_raw(p); + } +} + +static void node_free_rcu(struct rcu_head *rcu) +{ + kmem_cache_free(node_cache, container_of(rcu, struct allowedips_node, rcu)); +} + +static void root_free_rcu(struct rcu_head *rcu) +{ + struct allowedips_node *node, *stack[128] = { + container_of(rcu, struct allowedips_node, rcu) }; + unsigned int len = 1; + + while (len > 0 && (node = stack[--len])) { + push_rcu(stack, node->bit[0], &len); + push_rcu(stack, node->bit[1], &len); + kmem_cache_free(node_cache, node); + } +} + +static void root_remove_peer_lists(struct allowedips_node *root) +{ + struct allowedips_node *node, *stack[128] = { root }; + unsigned int len = 1; + + while (len > 0 && (node = stack[--len])) { + push_rcu(stack, node->bit[0], &len); + push_rcu(stack, node->bit[1], &len); + if (rcu_access_pointer(node->peer)) + list_del(&node->peer_list); + } +} + +static unsigned int fls128(u64 a, u64 b) +{ + return a ? fls64(a) + 64U : fls64(b); +} + +static u8 common_bits(const struct allowedips_node *node, const u8 *key, + u8 bits) +{ + if (bits == 32) + return 32U - fls(*(const u32 *)node->bits ^ *(const u32 *)key); + else if (bits == 128) + return 128U - fls128( + *(const u64 *)&node->bits[0] ^ *(const u64 *)&key[0], + *(const u64 *)&node->bits[8] ^ *(const u64 *)&key[8]); + return 0; +} + +static bool prefix_matches(const struct allowedips_node *node, const u8 *key, + u8 bits) +{ + /* This could be much faster if it actually just compared the common + * bits properly, by precomputing a mask bswap(~0 << (32 - cidr)), and + * the rest, but it turns out that common_bits is already super fast on + * modern processors, even taking into account the unfortunate bswap. + * So, we just inline it like this instead. + */ + return common_bits(node, key, bits) >= node->cidr; +} + +static struct allowedips_node *find_node(struct allowedips_node *trie, u8 bits, + const u8 *key) +{ + struct allowedips_node *node = trie, *found = NULL; + + while (node && prefix_matches(node, key, bits)) { + if (rcu_access_pointer(node->peer)) + found = node; + if (node->cidr == bits) + break; + node = rcu_dereference_bh(node->bit[choose(node, key)]); + } + return found; +} + +/* Returns a strong reference to a peer */ +static struct wg_peer *lookup(struct allowedips_node __rcu *root, u8 bits, + const void *be_ip) +{ + /* Aligned so it can be passed to fls/fls64 */ + u8 ip[16] __aligned(__alignof(u64)); + struct allowedips_node *node; + struct wg_peer *peer = NULL; + + swap_endian(ip, be_ip, bits); + + rcu_read_lock_bh(); +retry: + node = find_node(rcu_dereference_bh(root), bits, ip); + if (node) { + peer = wg_peer_get_maybe_zero(rcu_dereference_bh(node->peer)); + if (!peer) + goto retry; + } + rcu_read_unlock_bh(); + return peer; +} + +static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key, + u8 cidr, u8 bits, struct allowedips_node **rnode, + struct mutex *lock) +{ + struct allowedips_node *node = rcu_dereference_protected(trie, lockdep_is_held(lock)); + struct allowedips_node *parent = NULL; + bool exact = false; + + while (node && node->cidr <= cidr && prefix_matches(node, key, bits)) { + parent = node; + if (parent->cidr == cidr) { + exact = true; + break; + } + node = rcu_dereference_protected(parent->bit[choose(parent, key)], lockdep_is_held(lock)); + } + *rnode = parent; + return exact; +} + +static inline void connect_node(struct allowedips_node __rcu **parent, u8 bit, struct allowedips_node *node) +{ + node->parent_bit_packed = (unsigned long)parent | bit; + rcu_assign_pointer(*parent, node); +} + +static inline void choose_and_connect_node(struct allowedips_node *parent, struct allowedips_node *node) +{ + u8 bit = choose(parent, node->bits); + connect_node(&parent->bit[bit], bit, node); +} + +static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, + u8 cidr, struct wg_peer *peer, struct mutex *lock) +{ + struct allowedips_node *node, *parent, *down, *newnode; + + if (unlikely(cidr > bits || !peer)) + return -EINVAL; + + if (!rcu_access_pointer(*trie)) { + node = kmem_cache_zalloc(node_cache, GFP_KERNEL); + if (unlikely(!node)) + return -ENOMEM; + RCU_INIT_POINTER(node->peer, peer); + list_add_tail(&node->peer_list, &peer->allowedips_list); + copy_and_assign_cidr(node, key, cidr, bits); + connect_node(trie, 2, node); + return 0; + } + if (node_placement(*trie, key, cidr, bits, &node, lock)) { + rcu_assign_pointer(node->peer, peer); + list_move_tail(&node->peer_list, &peer->allowedips_list); + return 0; + } + + newnode = kmem_cache_zalloc(node_cache, GFP_KERNEL); + if (unlikely(!newnode)) + return -ENOMEM; + RCU_INIT_POINTER(newnode->peer, peer); + list_add_tail(&newnode->peer_list, &peer->allowedips_list); + copy_and_assign_cidr(newnode, key, cidr, bits); + + if (!node) { + down = rcu_dereference_protected(*trie, lockdep_is_held(lock)); + } else { + const u8 bit = choose(node, key); + down = rcu_dereference_protected(node->bit[bit], lockdep_is_held(lock)); + if (!down) { + connect_node(&node->bit[bit], bit, newnode); + return 0; + } + } + cidr = min(cidr, common_bits(down, key, bits)); + parent = node; + + if (newnode->cidr == cidr) { + choose_and_connect_node(newnode, down); + if (!parent) + connect_node(trie, 2, newnode); + else + choose_and_connect_node(parent, newnode); + return 0; + } + + node = kmem_cache_zalloc(node_cache, GFP_KERNEL); + if (unlikely(!node)) { + list_del(&newnode->peer_list); + kmem_cache_free(node_cache, newnode); + return -ENOMEM; + } + INIT_LIST_HEAD(&node->peer_list); + copy_and_assign_cidr(node, newnode->bits, cidr, bits); + + choose_and_connect_node(node, down); + choose_and_connect_node(node, newnode); + if (!parent) + connect_node(trie, 2, node); + else + choose_and_connect_node(parent, node); + return 0; +} + +void wg_allowedips_init(struct allowedips *table) +{ + table->root4 = table->root6 = NULL; + table->seq = 1; +} + +void wg_allowedips_free(struct allowedips *table, struct mutex *lock) +{ + struct allowedips_node __rcu *old4 = table->root4, *old6 = table->root6; + + ++table->seq; + RCU_INIT_POINTER(table->root4, NULL); + RCU_INIT_POINTER(table->root6, NULL); + if (rcu_access_pointer(old4)) { + struct allowedips_node *node = rcu_dereference_protected(old4, + lockdep_is_held(lock)); + + root_remove_peer_lists(node); + call_rcu(&node->rcu, root_free_rcu); + } + if (rcu_access_pointer(old6)) { + struct allowedips_node *node = rcu_dereference_protected(old6, + lockdep_is_held(lock)); + + root_remove_peer_lists(node); + call_rcu(&node->rcu, root_free_rcu); + } +} + +int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip, + u8 cidr, struct wg_peer *peer, struct mutex *lock) +{ + /* Aligned so it can be passed to fls */ + u8 key[4] __aligned(__alignof(u32)); + + ++table->seq; + swap_endian(key, (const u8 *)ip, 32); + return add(&table->root4, 32, key, cidr, peer, lock); +} + +int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip, + u8 cidr, struct wg_peer *peer, struct mutex *lock) +{ + /* Aligned so it can be passed to fls64 */ + u8 key[16] __aligned(__alignof(u64)); + + ++table->seq; + swap_endian(key, (const u8 *)ip, 128); + return add(&table->root6, 128, key, cidr, peer, lock); +} + +void wg_allowedips_remove_by_peer(struct allowedips *table, + struct wg_peer *peer, struct mutex *lock) +{ + struct allowedips_node *node, *child, **parent_bit, *parent, *tmp; + bool free_parent; + + if (list_empty(&peer->allowedips_list)) + return; + ++table->seq; + list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list) { + list_del_init(&node->peer_list); + RCU_INIT_POINTER(node->peer, NULL); + if (node->bit[0] && node->bit[1]) + continue; + child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])], + lockdep_is_held(lock)); + if (child) + child->parent_bit_packed = node->parent_bit_packed; + parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL); + *parent_bit = child; + parent = (void *)parent_bit - + offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]); + free_parent = !rcu_access_pointer(node->bit[0]) && + !rcu_access_pointer(node->bit[1]) && + (node->parent_bit_packed & 3) <= 1 && + !rcu_access_pointer(parent->peer); + if (free_parent) + child = rcu_dereference_protected( + parent->bit[!(node->parent_bit_packed & 1)], + lockdep_is_held(lock)); + call_rcu(&node->rcu, node_free_rcu); + if (!free_parent) + continue; + if (child) + child->parent_bit_packed = parent->parent_bit_packed; + *(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child; + call_rcu(&parent->rcu, node_free_rcu); + } +} + +int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr) +{ + const unsigned int cidr_bytes = DIV_ROUND_UP(node->cidr, 8U); + swap_endian(ip, node->bits, node->bitlen); + memset(ip + cidr_bytes, 0, node->bitlen / 8U - cidr_bytes); + if (node->cidr) + ip[cidr_bytes - 1U] &= ~0U << (-node->cidr % 8U); + + *cidr = node->cidr; + return node->bitlen == 32 ? AF_INET : AF_INET6; +} + +/* Returns a strong reference to a peer */ +struct wg_peer *wg_allowedips_lookup_dst(struct allowedips *table, + struct sk_buff *skb) +{ + if (skb->protocol == htons(ETH_P_IP)) + return lookup(table->root4, 32, &ip_hdr(skb)->daddr); + else if (skb->protocol == htons(ETH_P_IPV6)) + return lookup(table->root6, 128, &ipv6_hdr(skb)->daddr); + return NULL; +} + +/* Returns a strong reference to a peer */ +struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table, + struct sk_buff *skb) +{ + if (skb->protocol == htons(ETH_P_IP)) + return lookup(table->root4, 32, &ip_hdr(skb)->saddr); + else if (skb->protocol == htons(ETH_P_IPV6)) + return lookup(table->root6, 128, &ipv6_hdr(skb)->saddr); + return NULL; +} + +int __init wg_allowedips_slab_init(void) +{ + node_cache = KMEM_CACHE(allowedips_node, 0); + return node_cache ? 0 : -ENOMEM; +} + +void wg_allowedips_slab_uninit(void) +{ + rcu_barrier(); + kmem_cache_destroy(node_cache); +} + +#include "selftest/allowedips.c" diff --git a/net/wireguard/allowedips.h b/net/wireguard/allowedips.h new file mode 100644 index 000000000000..2346c797eb4d --- /dev/null +++ b/net/wireguard/allowedips.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _WG_ALLOWEDIPS_H +#define _WG_ALLOWEDIPS_H + +#include <linux/mutex.h> +#include <linux/ip.h> +#include <linux/ipv6.h> + +struct wg_peer; + +struct allowedips_node { + struct wg_peer __rcu *peer; + struct allowedips_node __rcu *bit[2]; + u8 cidr, bit_at_a, bit_at_b, bitlen; + u8 bits[16] __aligned(__alignof(u64)); + + /* Keep rarely used members at bottom to be beyond cache line. */ + unsigned long parent_bit_packed; + union { + struct list_head peer_list; + struct rcu_head rcu; + }; +}; + +struct allowedips { + struct allowedips_node __rcu *root4; + struct allowedips_node __rcu *root6; + u64 seq; +} __aligned(4); /* We pack the lower 2 bits of &root, but m68k only gives 16-bit alignment. */ + +void wg_allowedips_init(struct allowedips *table); +void wg_allowedips_free(struct allowedips *table, struct mutex *mutex); +int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip, + u8 cidr, struct wg_peer *peer, struct mutex *lock); +int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip, + u8 cidr, struct wg_peer *peer, struct mutex *lock); +void wg_allowedips_remove_by_peer(struct allowedips *table, + struct wg_peer *peer, struct mutex *lock); +/* The ip input pointer should be __aligned(__alignof(u64))) */ +int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr); + +/* These return a strong reference to a peer: */ +struct wg_peer *wg_allowedips_lookup_dst(struct allowedips *table, + struct sk_buff *skb); +struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table, + struct sk_buff *skb); + +#ifdef DEBUG +bool wg_allowedips_selftest(void); +#endif + +int wg_allowedips_slab_init(void); +void wg_allowedips_slab_uninit(void); + +#endif /* _WG_ALLOWEDIPS_H */ diff --git a/net/wireguard/compat/Makefile.include b/net/wireguard/compat/Makefile.include new file mode 100644 index 000000000000..df7670ae8d6c --- /dev/null +++ b/net/wireguard/compat/Makefile.include @@ -0,0 +1,111 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + +kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src)) + +ccflags-y += -include $(kbuild-dir)/compat/compat.h +asflags-y += -include $(kbuild-dir)/compat/compat-asm.h +LINUXINCLUDE := -DCOMPAT_VERSION=$(VERSION) -DCOMPAT_PATCHLEVEL=$(PATCHLEVEL) -DCOMPAT_SUBLEVEL=$(SUBLEVEL) -I$(kbuild-dir)/compat/version $(LINUXINCLUDE) + +ifeq ($(wildcard $(srctree)/include/linux/ptr_ring.h),) +ccflags-y += -I$(kbuild-dir)/compat/ptr_ring/include +endif + +ifeq ($(wildcard $(srctree)/include/linux/skb_array.h),) +ccflags-y += -I$(kbuild-dir)/compat/skb_array/include +endif + +ifeq ($(wildcard $(srctree)/include/linux/siphash.h),) +ccflags-y += -I$(kbuild-dir)/compat/siphash/include +wireguard-y += compat/siphash/siphash.o +endif + +ifeq ($(wildcard $(srctree)/include/net/dst_cache.h),) +ccflags-y += -I$(kbuild-dir)/compat/dst_cache/include +wireguard-y += compat/dst_cache/dst_cache.o +endif + +ifeq ($(wildcard $(srctree)/arch/x86/include/asm/intel-family.h)$(CONFIG_X86),y) +ccflags-y += -I$(kbuild-dir)/compat/intel-family-x86/include +endif + +ifeq ($(wildcard $(srctree)/arch/x86/include/asm/fpu/api.h)$(CONFIG_X86),y) +ccflags-y += -I$(kbuild-dir)/compat/fpu-x86/include +endif + +ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/simd.h)$(shell grep -s -F "generic-y += simd.h" "$(srctree)/arch/$(SRCARCH)/Makefile" "$(srctree)/arch/$(SRCARCH)/Makefile"),) +ccflags-y += -I$(kbuild-dir)/compat/simd-asm/include +endif + +ifeq ($(wildcard $(srctree)/include/linux/simd.h),) +ccflags-y += -I$(kbuild-dir)/compat/simd/include +endif + +ifeq ($(wildcard $(srctree)/include/net/udp_tunnel.h),) +ccflags-y += -I$(kbuild-dir)/compat/udp_tunnel/include +wireguard-y += compat/udp_tunnel/udp_tunnel.o +endif + +ifeq ($(shell grep -s -F "int crypto_memneq" "$(srctree)/include/crypto/algapi.h"),) +ccflags-y += -include $(kbuild-dir)/compat/memneq/include.h +wireguard-y += compat/memneq/memneq.o +endif + +ifeq ($(shell grep -s -F "addr_gen_mode" "$(srctree)/include/linux/ipv6.h"),) +ccflags-y += -DCOMPAT_CANNOT_USE_DEV_CNF +endif + +ifdef CONFIG_HZ +ifeq ($(wildcard $(CURDIR)/include/generated/timeconst.h),) +ccflags-y += $(shell bash -c '((a=$(CONFIG_HZ), b=1000000)); while ((b > 0)); do ((t=b, b=a%b, a=t)); done; echo "-DHZ_TO_USEC_NUM=$$((1000000/a)) -DHZ_TO_USEC_DEN=$$(($(CONFIG_HZ)/a))";') +endif +endif + +ifeq ($(wildcard $(srctree)/arch/arm/include/asm/neon.h)$(CONFIG_ARM),y) +ccflags-y += -I$(kbuild-dir)/compat/neon-arm/include +endif +ifeq ($(wildcard $(srctree)/arch/arm64/include/asm/neon.h)$(CONFIG_ARM64),y) +ccflags-y += -I$(kbuild-dir)/compat/neon-arm/include +endif + +ifeq ($(wildcard $(srctree)/include/net/dst_metadata.h),) +ccflags-y += -I$(kbuild-dir)/compat/dstmetadata/include +endif + +ifeq ($(CONFIG_X86_64),y) + ifeq ($(ssse3_instr),) + ssse3_instr := $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1) + ccflags-y += $(ssse3_instr) + asflags-y += $(ssse3_instr) + endif + ifeq ($(avx_instr),) + avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1) + ccflags-y += $(avx_instr) + asflags-y += $(avx_instr) + endif + ifeq ($(avx2_instr),) + avx2_instr := $(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1) + ccflags-y += $(avx2_instr) + asflags-y += $(avx2_instr) + endif + ifeq ($(avx512_instr),) + avx512_instr := $(call as-instr,vpmovm2b %k1$(comma)%zmm5,-DCONFIG_AS_AVX512=1) + ccflags-y += $(avx512_instr) + asflags-y += $(avx512_instr) + endif + ifeq ($(bmi2_instr),) + bmi2_instr :=$(call as-instr,mulx %rax$(comma)%rax$(comma)%rax,-DCONFIG_AS_BMI2=1) + ccflags-y += $(bmi2_instr) + asflags-y += $(bmi2_instr) + endif + ifeq ($(adx_instr),) + adx_instr :=$(call as-instr,adcx %rax$(comma)%rax,-DCONFIG_AS_ADX=1) + ccflags-y += $(adx_instr) + asflags-y += $(adx_instr) + endif +endif + +ifneq ($(shell grep -s -F "\#define LINUX_PACKAGE_ID \" Debian " "$(CURDIR)/include/generated/package.h"),) +ccflags-y += -DISDEBIAN +endif diff --git a/net/wireguard/compat/checksum/checksum_partial_compat.h b/net/wireguard/compat/checksum/checksum_partial_compat.h new file mode 100644 index 000000000000..3dfe5397a94f --- /dev/null +++ b/net/wireguard/compat/checksum/checksum_partial_compat.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include <net/route.h> +#include <net/esp.h> +#include <net/ip.h> +#include <net/ipv6.h> +#include <net/ip6_checksum.h> + +#define IP6_MF 0x0001 +#define IP6_OFFSET 0xFFF8 +static inline int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, unsigned int max) +{ + if (skb_headlen(skb) >= len) + return 0; + if (max > skb->len) + max = skb->len; + if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) + return -ENOMEM; + if (skb_headlen(skb) < len) + return -EPROTO; + return 0; +} +#define MAX_IP_HDR_LEN 128 +static inline int skb_checksum_setup_ip(struct sk_buff *skb, bool recalculate) +{ + unsigned int off; + bool fragment; + int err; + fragment = false; + err = skb_maybe_pull_tail(skb, sizeof(struct iphdr), MAX_IP_HDR_LEN); + if (err < 0) + goto out; + if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) + fragment = true; + off = ip_hdrlen(skb); + err = -EPROTO; + if (fragment) + goto out; + switch (ip_hdr(skb)->protocol) { + case IPPROTO_TCP: + err = skb_maybe_pull_tail(skb, + off + sizeof(struct tcphdr), + MAX_IP_HDR_LEN); + if (err < 0) + goto out; + + if (!skb_partial_csum_set(skb, off, + offsetof(struct tcphdr, check))) { + err = -EPROTO; + goto out; + } + + if (recalculate) + tcp_hdr(skb)->check = + ~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + skb->len - off, + IPPROTO_TCP, 0); + break; + case IPPROTO_UDP: + err = skb_maybe_pull_tail(skb, + off + sizeof(struct udphdr), + MAX_IP_HDR_LEN); + if (err < 0) + goto out; + + if (!skb_partial_csum_set(skb, off, + offsetof(struct udphdr, check))) { + err = -EPROTO; + goto out; + } + + if (recalculate) + udp_hdr(skb)->check = + ~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + skb->len - off, + IPPROTO_UDP, 0); + break; + default: + goto out; + } + err = 0; +out: + return err; +} +#define MAX_IPV6_HDR_LEN 256 +#define OPT_HDR(type, skb, off) \ + (type *)(skb_network_header(skb) + (off)) +static inline int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) +{ + int err; + u8 nexthdr; + unsigned int off; + unsigned int len; + bool fragment; + bool done; + fragment = false; + done = false; + off = sizeof(struct ipv6hdr); + err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + nexthdr = ipv6_hdr(skb)->nexthdr; + len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); + while (off <= len && !done) { + switch (nexthdr) { + case IPPROTO_DSTOPTS: + case IPPROTO_HOPOPTS: + case IPPROTO_ROUTING: { + struct ipv6_opt_hdr *hp; + + err = skb_maybe_pull_tail(skb, off + sizeof(struct ipv6_opt_hdr), MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); + nexthdr = hp->nexthdr; + off += ipv6_optlen(hp); + break; + } + case IPPROTO_FRAGMENT: { + struct frag_hdr *hp; + err = skb_maybe_pull_tail(skb, off + sizeof(struct frag_hdr), MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + hp = OPT_HDR(struct frag_hdr, skb, off); + if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) + fragment = true; + nexthdr = hp->nexthdr; + off += sizeof(struct frag_hdr); + break; + } + default: + done = true; + break; + } + } + err = -EPROTO; + if (!done || fragment) + goto out; + switch (nexthdr) { + case IPPROTO_TCP: + err = skb_maybe_pull_tail(skb, + off + sizeof(struct tcphdr), + MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + + if (!skb_partial_csum_set(skb, off, + offsetof(struct tcphdr, check))) { + err = -EPROTO; + goto out; + } + + if (recalculate) + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + skb->len - off, + IPPROTO_TCP, 0); + break; + case IPPROTO_UDP: + err = skb_maybe_pull_tail(skb, + off + sizeof(struct udphdr), + MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + + if (!skb_partial_csum_set(skb, off, + offsetof(struct udphdr, check))) { + err = -EPROTO; + goto out; + } + + if (recalculate) + udp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + skb->len - off, + IPPROTO_UDP, 0); + break; + default: + goto out; + } + err = 0; +out: + return err; +} +static inline int skb_checksum_setup(struct sk_buff *skb, bool recalculate) +{ + int err; + switch (skb->protocol) { + case htons(ETH_P_IP): + err = skb_checksum_setup_ip(skb, recalculate); + break; + + case htons(ETH_P_IPV6): + err = skb_checksum_setup_ipv6(skb, recalculate); + break; + default: + err = -EPROTO; + break; + } + return err; +} diff --git a/net/wireguard/compat/compat-asm.h b/net/wireguard/compat/compat-asm.h new file mode 100644 index 000000000000..345087bf0de8 --- /dev/null +++ b/net/wireguard/compat/compat-asm.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _WG_COMPATASM_H +#define _WG_COMPATASM_H + +#include <linux/linkage.h> +#include <linux/kconfig.h> +#include <linux/version.h> + +#ifdef RHEL_MAJOR +#if RHEL_MAJOR == 7 +#define ISRHEL7 +#elif RHEL_MAJOR == 8 +#define ISRHEL8 +#endif +#endif + +/* PaX compatibility */ +#if defined(RAP_PLUGIN) && defined(RAP_ENTRY) +#undef ENTRY +#define ENTRY RAP_ENTRY +#endif + +#if defined(__LINUX_ARM_ARCH__) && LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo + .macro ret\c, reg +#if __LINUX_ARM_ARCH__ < 6 + mov\c pc, \reg +#else + .ifeqs "\reg", "lr" + bx\c \reg + .else + mov\c pc, \reg + .endif +#endif + .endm + .endr +#endif + +#if defined(__LINUX_ARM_ARCH__) && LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) +#include <asm/assembler.h> +#define lspush push +#define lspull pull +#undef push +#undef pull +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 76) && !defined(ISRHEL8) && !defined(SYM_FUNC_START) +#define SYM_FUNC_START ENTRY +#define SYM_FUNC_END ENDPROC +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0) +#define blake2s_compress_ssse3 zinc_blake2s_compress_ssse3 +#define blake2s_compress_avx512 zinc_blake2s_compress_avx512 +#define poly1305_init_arm zinc_poly1305_init_arm +#define poly1305_blocks_arm zinc_poly1305_blocks_arm +#define poly1305_emit_arm zinc_poly1305_emit_arm +#define poly1305_blocks_neon zinc_poly1305_blocks_neon +#define poly1305_emit_neon zinc_poly1305_emit_neon +#define poly1305_init_mips zinc_poly1305_init_mips +#define poly1305_blocks_mips zinc_poly1305_blocks_mips +#define poly1305_emit_mips zinc_poly1305_emit_mips +#define poly1305_init_x86_64 zinc_poly1305_init_x86_64 +#define poly1305_blocks_x86_64 zinc_poly1305_blocks_x86_64 +#define poly1305_emit_x86_64 zinc_poly1305_emit_x86_64 +#define poly1305_emit_avx zinc_poly1305_emit_avx +#define poly1305_blocks_avx zinc_poly1305_blocks_avx +#define poly1305_blocks_avx2 zinc_poly1305_blocks_avx2 +#define poly1305_blocks_avx512 zinc_poly1305_blocks_avx512 +#define curve25519_neon zinc_curve25519_neon +#define hchacha20_ssse3 zinc_hchacha20_ssse3 +#define chacha20_ssse3 zinc_chacha20_ssse3 +#define chacha20_avx2 zinc_chacha20_avx2 +#define chacha20_avx512 zinc_chacha20_avx512 +#define chacha20_avx512vl zinc_chacha20_avx512vl +#define chacha20_mips zinc_chacha20_mips +#define chacha20_arm zinc_chacha20_arm +#define hchacha20_arm zinc_hchacha20_arm +#define chacha20_neon zinc_chacha20_neon +#endif + +#endif /* _WG_COMPATASM_H */ diff --git a/net/wireguard/compat/compat.h b/net/wireguard/compat/compat.h new file mode 100644 index 000000000000..69dada89494f --- /dev/null +++ b/net/wireguard/compat/compat.h @@ -0,0 +1,1199 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _WG_COMPAT_H +#define _WG_COMPAT_H + +#include <linux/kconfig.h> +#include <linux/version.h> +#include <linux/types.h> +#include <generated/utsrelease.h> + +#ifdef RHEL_MAJOR +#if RHEL_MAJOR == 7 +#define ISRHEL7 +#elif RHEL_MAJOR == 8 +#define ISRHEL8 +#endif +#endif +#ifdef UTS_UBUNTU_RELEASE_ABI +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +#define ISUBUNTU1604 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) +#define ISUBUNTU1804 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) +#define ISUBUNTU1904 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) +#define ISUBUNTU1910 +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0) +#error "WireGuard requires Linux >= 3.10" +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) +#error "WireGuard has been merged into Linux >= 5.6 and therefore this compatibility module is no longer required." +#endif + +#if defined(ISRHEL7) +#include <linux/skbuff.h> +#define headers_end headers_start +#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0) +#define headers_start data +#define headers_end data +#endif + +#include <linux/cache.h> +#include <linux/init.h> +#ifndef __ro_after_init +#define __ro_after_init __read_mostly +#endif + +#include <linux/compiler.h> +#ifndef READ_ONCE +#define READ_ONCE ACCESS_ONCE +#endif +#ifndef WRITE_ONCE +#ifdef ACCESS_ONCE_RW +#define WRITE_ONCE(p, v) (ACCESS_ONCE_RW(p) = (v)) +#else +#define WRITE_ONCE(p, v) (ACCESS_ONCE(p) = (v)) +#endif +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) +#include "udp_tunnel/udp_tunnel_partial_compat.h" +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) && !defined(DEBUG) && defined(net_dbg_ratelimited) +#undef net_dbg_ratelimited +#define net_dbg_ratelimited(fmt, ...) do { if (0) no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) +#include <linux/rcupdate.h> +#ifndef RCU_LOCKDEP_WARN +#define RCU_LOCKDEP_WARN(cond, message) rcu_lockdep_assert(!(cond), message) +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) && !defined(ISRHEL7) +#define ipv6_dst_lookup(a, b, c, d) ipv6_dst_lookup(b, c, d) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 83) +#define ipv6_dst_lookup_flow(a, b, c, d) ipv6_dst_lookup_flow(b, c, d) +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 5) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 18) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) && !defined(ISUBUNTU1904)) || (!defined(ISRHEL8) && !defined(ISDEBIAN) && !defined(ISUBUNTU1804) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 119) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 181) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 224) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 224) && !defined(ISUBUNTU1604) && !defined(ISRHEL7)) +#define ipv6_dst_lookup_flow(a, b, c, d) ipv6_dst_lookup(a, b, &dst, c) + (void *)0 ?: dst +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && IS_ENABLED(CONFIG_IPV6) && !defined(ISRHEL7) +#include <net/ipv6.h> +struct ipv6_stub_type { + void *udpv6_encap_enable; + int (*ipv6_dst_lookup)(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6); +}; +static const struct ipv6_stub_type ipv6_stub_impl = { + .udpv6_encap_enable = (void *)1, + .ipv6_dst_lookup = ip6_dst_lookup +}; +static const struct ipv6_stub_type *ipv6_stub = &ipv6_stub_impl; +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) && IS_ENABLED(CONFIG_IPV6) && !defined(ISRHEL7) +#include <net/addrconf.h> +static inline bool ipv6_mod_enabled(void) +{ + return ipv6_stub != NULL && ipv6_stub->udpv6_encap_enable != NULL; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) && !defined(ISRHEL7) +#include <linux/skbuff.h> +static inline void skb_reset_tc(struct sk_buff *skb) +{ +#ifdef CONFIG_NET_CLS_ACT + skb->tc_verd = 0; +#endif +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) +#include <linux/random.h> +#include <linux/siphash.h> +static inline u32 __compat_get_random_u32(void) +{ + static siphash_key_t key; + static u32 counter = 0; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + static bool has_seeded = false; + if (unlikely(!has_seeded)) { + get_random_bytes(&key, sizeof(key)); + has_seeded = true; + } +#else + get_random_once(&key, sizeof(key)); +#endif + return siphash_2u32(counter++, get_random_int(), &key); +} +#define get_random_u32 __compat_get_random_u32 +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0) && !defined(ISRHEL7) +static inline void netif_keep_dst(struct net_device *dev) +{ + dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; +} +#define COMPAT_CANNOT_USE_CSUM_LEVEL +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) && !defined(ISRHEL7) +#include <linux/netdevice.h> +#ifndef netdev_alloc_pcpu_stats +#define pcpu_sw_netstats pcpu_tstats +#endif +#ifndef netdev_alloc_pcpu_stats +#define netdev_alloc_pcpu_stats alloc_percpu +#endif +#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) && !defined(ISRHEL7) +#include <linux/netdevice.h> +#ifndef netdev_alloc_pcpu_stats +#define netdev_alloc_pcpu_stats(type) \ +({ \ + typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \ + if (pcpu_stats) { \ + int __cpu; \ + for_each_possible_cpu(__cpu) { \ + typeof(type) *stat; \ + stat = per_cpu_ptr(pcpu_stats, __cpu); \ + u64_stats_init(&stat->syncp); \ + } \ + } \ + pcpu_stats; \ +}) +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) && !defined(ISRHEL7) +#include "checksum/checksum_partial_compat.h" +static inline void *__compat_pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) +{ + if (tail != skb) { + skb->data_len += len; + skb->len += len; + } + return skb_put(tail, len); +} +#define pskb_put __compat_pskb_put +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) && !defined(ISRHEL7) +#include <net/xfrm.h> +static inline void skb_scrub_packet(struct sk_buff *skb, bool xnet) +{ +#ifdef CONFIG_CAVIUM_OCTEON_IPFWD_OFFLOAD + memset(&skb->cvm_info, 0, sizeof(skb->cvm_info)); + skb->cvm_reserved = 0; +#endif + skb->tstamp.tv64 = 0; + skb->pkt_type = PACKET_HOST; + skb->skb_iif = 0; + skb_dst_drop(skb); + secpath_reset(skb); + nf_reset(skb); + nf_reset_trace(skb); + if (!xnet) + return; + skb_orphan(skb); + skb->mark = 0; +} +#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) +#define skb_scrub_packet(a, b) skb_scrub_packet(a) +#endif + +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 63)) && !defined(ISRHEL7) +#include <linux/random.h> +static inline u32 __compat_prandom_u32_max(u32 ep_ro) +{ + return (u32)(((u64)prandom_u32() * ep_ro) >> 32); +} +#define prandom_u32_max __compat_prandom_u32_max +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) +#include <linux/kernel.h> +#ifndef U8_MAX +#define U8_MAX ((u8)~0U) +#endif +#ifndef S8_MAX +#define S8_MAX ((s8)(U8_MAX >> 1)) +#endif +#ifndef S8_MIN +#define S8_MIN ((s8)(-S8_MAX - 1)) +#endif +#ifndef U16_MAX +#define U16_MAX ((u16)~0U) +#endif +#ifndef S16_MAX +#define S16_MAX ((s16)(U16_MAX >> 1)) +#endif +#ifndef S16_MIN +#define S16_MIN ((s16)(-S16_MAX - 1)) +#endif +#ifndef U32_MAX +#define U32_MAX ((u32)~0U) +#endif +#ifndef S32_MAX +#define S32_MAX ((s32)(U32_MAX >> 1)) +#endif +#ifndef S32_MIN +#define S32_MIN ((s32)(-S32_MAX - 1)) +#endif +#ifndef U64_MAX +#define U64_MAX ((u64)~0ULL) +#endif +#ifndef S64_MAX +#define S64_MAX ((s64)(U64_MAX >> 1)) +#endif +#ifndef S64_MIN +#define S64_MIN ((s64)(-S64_MAX - 1)) +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 3) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 35) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 24) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 33) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 60) && !defined(ISRHEL7)) +static inline void memzero_explicit(void *s, size_t count) +{ + memset(s, 0, count); + barrier(); +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && !defined(ISRHEL7) +static const struct in6_addr __compat_in6addr_any = IN6ADDR_ANY_INIT; +#define in6addr_any __compat_in6addr_any +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 320)) +#include <linux/completion.h> +#include <linux/random.h> +#include <linux/errno.h> +struct rng_initializer { + struct completion done; + struct random_ready_callback cb; +}; +static inline void rng_initialized_callback(struct random_ready_callback *cb) +{ + complete(&container_of(cb, struct rng_initializer, cb)->done); +} +static inline int wait_for_random_bytes(void) +{ + static bool rng_is_initialized = false; + int ret; + if (unlikely(!rng_is_initialized)) { + struct rng_initializer rng = { + .done = COMPLETION_INITIALIZER(rng.done), + .cb = { .owner = THIS_MODULE, .func = rng_initialized_callback } + }; + ret = add_random_ready_callback(&rng.cb); + if (!ret) { + ret = wait_for_completion_interruptible(&rng.done); + if (ret) { + del_random_ready_callback(&rng.cb); + return ret; + } + } else if (ret != -EALREADY) + return ret; + rng_is_initialized = true; + } + return 0; +} +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) +/* This is a disaster. Without this API, we really have no way of + * knowing if it's initialized. We just return that it has and hope + * for the best... */ +static inline int wait_for_random_bytes(void) +{ + return 0; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 285)) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 320)) && !defined(ISRHEL8) +#include <linux/random.h> +#include <linux/slab.h> +struct rng_is_initialized_callback { + struct random_ready_callback cb; + atomic_t *rng_state; +}; +static inline void rng_is_initialized_callback(struct random_ready_callback *cb) +{ + struct rng_is_initialized_callback *rdy = container_of(cb, struct rng_is_initialized_callback, cb); + atomic_set(rdy->rng_state, 2); + kfree(rdy); +} +static inline bool rng_is_initialized(void) +{ + static atomic_t rng_state = ATOMIC_INIT(0); + + if (atomic_read(&rng_state) == 2) + return true; + + if (atomic_cmpxchg(&rng_state, 0, 1) == 0) { + int ret; + struct rng_is_initialized_callback *rdy = kmalloc(sizeof(*rdy), GFP_ATOMIC); + if (!rdy) { + atomic_set(&rng_state, 0); + return false; + } + rdy->cb.owner = THIS_MODULE; + rdy->cb.func = rng_is_initialized_callback; + rdy->rng_state = &rng_state; + ret = add_random_ready_callback(&rdy->cb); + if (ret) + kfree(rdy); + if (ret == -EALREADY) { + atomic_set(&rng_state, 2); + return true; + } else if (ret) + atomic_set(&rng_state, 0); + return false; + } + return false; +} +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) +/* This is a disaster. Without this API, we really have no way of + * knowing if it's initialized. We just return that it has and hope + * for the best... */ +static inline bool rng_is_initialized(void) +{ + return true; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 320)) +static inline int get_random_bytes_wait(void *buf, int nbytes) +{ + int ret = wait_for_random_bytes(); + if (unlikely(ret)) + return ret; + get_random_bytes(buf, nbytes); + return 0; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) && !defined(ISRHEL7) +#define system_power_efficient_wq system_unbound_wq +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0) +#include <linux/ktime.h> +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) +#include <linux/hrtimer.h> +#ifndef ktime_get_real_ts64 +#define timespec64 timespec +#define ktime_get_real_ts64 ktime_get_real_ts +#endif +#else +#include <linux/timekeeping.h> +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) +static inline u64 __compat_jiffies64_to_nsecs(u64 j) +{ +#if !(NSEC_PER_SEC % HZ) + return (NSEC_PER_SEC / HZ) * j; +#else + return div_u64(j * HZ_TO_USEC_NUM, HZ_TO_USEC_DEN) * 1000; +#endif +} +#define jiffies64_to_nsecs __compat_jiffies64_to_nsecs +#endif +static inline u64 ktime_get_coarse_boottime_ns(void) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + return ktime_to_ns(ktime_get_boottime()); +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 12) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 53) + return ktime_to_ns(ktime_mono_to_any(ns_to_ktime(jiffies64_to_nsecs(get_jiffies_64())), TK_OFFS_BOOT)); +#else + return ktime_to_ns(ktime_get_coarse_boottime()); +#endif +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) +#include <linux/inetdevice.h> +static inline __be32 __compat_confirm_addr_indev(struct in_device *in_dev, __be32 dst, __be32 local, int scope) +{ + int same = 0; + __be32 addr = 0; + for_ifa(in_dev) { + if (!addr && (local == ifa->ifa_local || !local) && ifa->ifa_scope <= scope) { + addr = ifa->ifa_local; + if (same) + break; + } + if (!same) { + same = (!local || inet_ifa_match(local, ifa)) && (!dst || inet_ifa_match(dst, ifa)); + if (same && addr) { + if (local || !dst) + break; + if (inet_ifa_match(addr, ifa)) + break; + if (ifa->ifa_scope <= scope) { + addr = ifa->ifa_local; + break; + } + same = 0; + } + } + } endfor_ifa(in_dev); + return same ? addr : 0; +} +static inline __be32 __compat_inet_confirm_addr(struct net *net, struct in_device *in_dev, __be32 dst, __be32 local, int scope) +{ + __be32 addr = 0; + struct net_device *dev; + if (in_dev) + return __compat_confirm_addr_indev(in_dev, dst, local, scope); + rcu_read_lock(); + for_each_netdev_rcu(net, dev) { + in_dev = __in_dev_get_rcu(dev); + if (in_dev) { + addr = __compat_confirm_addr_indev(in_dev, dst, local, scope); + if (addr) + break; + } + } + rcu_read_unlock(); + return addr; +} +#define inet_confirm_addr __compat_inet_confirm_addr +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +#include <linux/vmalloc.h> +#include <linux/mm.h> +#include <linux/slab.h> +static inline void *__compat_kvmalloc(size_t size, gfp_t flags) +{ + gfp_t kmalloc_flags = flags; + void *ret; + if (size > PAGE_SIZE) { + kmalloc_flags |= __GFP_NOWARN; + if (!(kmalloc_flags & __GFP_REPEAT) || (size <= PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) + kmalloc_flags |= __GFP_NORETRY; + } + ret = kmalloc(size, kmalloc_flags); + if (ret || size <= PAGE_SIZE) + return ret; + return __vmalloc(size, flags, PAGE_KERNEL); +} +static inline void *__compat_kvzalloc(size_t size, gfp_t flags) +{ + return __compat_kvmalloc(size, flags | __GFP_ZERO); +} +#define kvmalloc __compat_kvmalloc +#define kvzalloc __compat_kvzalloc +#endif + +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 41)) +#include <linux/vmalloc.h> +#include <linux/mm.h> +static inline void __compat_kvfree(const void *addr) +{ + if (is_vmalloc_addr(addr)) + vfree(addr); + else + kfree(addr); +} +#define kvfree __compat_kvfree +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +#include <linux/vmalloc.h> +#include <linux/mm.h> +static inline void *__compat_kvmalloc_array(size_t n, size_t size, gfp_t flags) +{ + if (n != 0 && SIZE_MAX / n < size) + return NULL; + return kvmalloc(n * size, flags); +} +#define kvmalloc_array __compat_kvmalloc_array +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0) +#include <linux/vmalloc.h> +#include <linux/mm.h> +static inline void *__compat_kvcalloc(size_t n, size_t size, gfp_t flags) +{ + return kvmalloc_array(n, size, flags | __GFP_ZERO); +} +#define kvcalloc __compat_kvcalloc +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 9) +#include <linux/netdevice.h> +#define priv_destructor destructor +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) +#define wg_newlink(a,b,c,d,e) wg_newlink(a,b,c,d) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +#include <net/netlink.h> +#include <net/genetlink.h> +#define nlmsg_parse(a, b, c, d, e, f) nlmsg_parse(a, b, c, d, e) +#define nla_parse_nested(a, b, c, d, e) nla_parse_nested(a, b, c, d) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) && !defined(ISRHEL7) +static inline struct nlattr **genl_family_attrbuf(const struct genl_family *family) +{ + return family->attrbuf; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) +#define PTR_ERR_OR_ZERO(p) PTR_RET(p) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0) +#include <net/netlink.h> +#define nla_put_u64_64bit(a, b, c, d) nla_put_u64(a, b, c) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) +#include <net/genetlink.h> +#ifndef GENL_UNS_ADMIN_PERM +#define GENL_UNS_ADMIN_PERM GENL_ADMIN_PERM +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) && !defined(ISRHEL7) +#include <net/genetlink.h> +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) +#define genl_register_family(a) genl_register_family_with_ops(a, genl_ops, ARRAY_SIZE(genl_ops)) +#define COMPAT_CANNOT_USE_CONST_GENL_OPS +#else +#define genl_register_family(a) genl_register_family_with_ops(a, genl_ops) +#endif +#define COMPAT_CANNOT_USE_GENL_NOPS +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 2) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 16) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 65) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 101) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 84) +#define __COMPAT_NETLINK_DUMP_BLOCK { \ + int ret; \ + skb->end -= nlmsg_total_size(sizeof(int)); \ + ret = wg_get_device_dump_real(skb, cb); \ + skb->end += nlmsg_total_size(sizeof(int)); \ + return ret; \ +} +#define __COMPAT_NETLINK_DUMP_OVERRIDE +#else +#define __COMPAT_NETLINK_DUMP_BLOCK return wg_get_device_dump_real(skb, cb); +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 8) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 25) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 87) +#define wg_get_device_dump(a, b) wg_get_device_dump_real(a, b); \ +static int wg_get_device_dump(a, b) { \ + struct wg_device *wg = (struct wg_device *)cb->args[0]; \ + if (!wg) { \ + int ret = wg_get_device_start(cb); \ + if (ret) \ + return ret; \ + } \ + __COMPAT_NETLINK_DUMP_BLOCK \ +} \ +static int wg_get_device_dump_real(a, b) +#define COMPAT_CANNOT_USE_NETLINK_START +#elif defined(__COMPAT_NETLINK_DUMP_OVERRIDE) +#define wg_get_device_dump(a, b) wg_get_device_dump_real(a, b); \ +static int wg_get_device_dump(a, b) { \ + __COMPAT_NETLINK_DUMP_BLOCK \ +} \ +static int wg_get_device_dump_real(a, b) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) +#define COMPAT_CANNOT_USE_IN6_DEV_GET +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) +#define COMPAT_CANNOT_USE_IFF_NO_QUEUE +#endif + +#if defined(CONFIG_X86_64) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) +#include <asm/user.h> +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) +#include <asm/xsave.h> +#include <asm/xcr.h> +static inline int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name) +{ + return boot_cpu_has(X86_FEATURE_XSAVE) && xgetbv(XCR_XFEATURE_ENABLED_MASK) & xfeatures_needed; +} +#endif +#ifndef XFEATURE_MASK_YMM +#define XFEATURE_MASK_YMM XSTATE_YMM +#endif +#ifndef XFEATURE_MASK_SSE +#define XFEATURE_MASK_SSE XSTATE_SSE +#endif +#ifndef XSTATE_AVX512 +#define XSTATE_AVX512 (XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM) +#endif +#ifndef XFEATURE_MASK_AVX512 +#define XFEATURE_MASK_AVX512 XSTATE_AVX512 +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && defined(CONFIG_X86_64) +/* This is incredibly dumb and reckless, but as it turns out, there's + * not really hardware Linux runs properly on that supports F but not BW + * and VL, so in practice this isn't so bad. Plus, this is compat layer, + * so the bar remains fairly low. + */ +#include <asm/cpufeature.h> +#ifndef X86_FEATURE_AVX512BW +#define X86_FEATURE_AVX512BW X86_FEATURE_AVX512F +#endif +#ifndef X86_FEATURE_AVX512VL +#define X86_FEATURE_AVX512VL X86_FEATURE_AVX512F +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) +struct __compat_dummy_container { char dev; }; +#define netdev_notifier_info net_device *)data); __attribute((unused)) char __compat_dummy_variable = ((struct __compat_dummy_container +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) +#define timer_setup(a, b, c) setup_timer(a, ((void (*)(unsigned long))b), ((unsigned long)a)) +#define from_timer(var, callback_timer, timer_fieldname) container_of(callback_timer, typeof(*var), timer_fieldname) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 3) +#define COMPAT_CANNOT_USE_AVX512 +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +#include <net/genetlink.h> +#define genl_dump_check_consistent(a, b) genl_dump_check_consistent(a, b, &genl_family) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) && !defined(ISRHEL7) +static inline void *skb_put_data(struct sk_buff *skb, const void *data, unsigned int len) +{ + void *tmp = skb_put(skb, len); + memcpy(tmp, data, len); + return tmp; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) && !defined(ISRHEL7) +#define napi_complete_done(n, work_done) napi_complete(n) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) +#include <linux/netdevice.h> +/* NAPI_STATE_SCHED gets set by netif_napi_add anyway, so this is safe. + * Also, kernels without NAPI_STATE_NO_BUSY_POLL don't have a call to + * napi_hash_add inside of netif_napi_add. + */ +#define NAPI_STATE_NO_BUSY_POLL NAPI_STATE_SCHED +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) +#include <linux/atomic.h> +#ifndef atomic_read_acquire +#define atomic_read_acquire(v) ({ int __compat_p1 = atomic_read(v); smp_rmb(); __compat_p1; }) +#endif +#ifndef atomic_set_release +#define atomic_set_release(v, i) ({ smp_wmb(); atomic_set(v, i); }) +#endif +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) +#include <linux/atomic.h> +#ifndef atomic_read_acquire +#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter) +#endif +#ifndef atomic_set_release +#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i)) +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 285)) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 320)) +static inline void le32_to_cpu_array(u32 *buf, unsigned int words) +{ + while (words--) { + __le32_to_cpus(buf); + buf++; + } +} +static inline void cpu_to_le32_array(u32 *buf, unsigned int words) +{ + while (words--) { + __cpu_to_le32s(buf); + buf++; + } +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) +#include <crypto/algapi.h> +static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2, + unsigned int size) +{ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && + __builtin_constant_p(size) && + (size % sizeof(unsigned long)) == 0) { + unsigned long *d = (unsigned long *)dst; + unsigned long *s1 = (unsigned long *)src1; + unsigned long *s2 = (unsigned long *)src2; + + while (size > 0) { + *d++ = *s1++ ^ *s2++; + size -= sizeof(unsigned long); + } + } else { + if (unlikely(dst != src1)) + memmove(dst, src1, size); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) + crypto_xor(dst, src2, size); +#else + __crypto_xor(dst, src2, size); +#endif + } +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) +#define read_cpuid_part() read_cpuid_part_number() +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) && !defined(ISRHEL7) +#define hlist_add_behind(a, b) hlist_add_after(b, a) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) && !defined(ISRHEL8) +#define totalram_pages() totalram_pages +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0) +struct __kernel_timespec { + int64_t tv_sec, tv_nsec; +}; +#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) +#include <linux/time64.h> +#ifdef __kernel_timespec +#undef __kernel_timespec +struct __kernel_timespec { + int64_t tv_sec, tv_nsec; +}; +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +#include <linux/kernel.h> +#ifndef ALIGN_DOWN +#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) && !defined(ISRHEL8) +#include <linux/skbuff.h> +#define skb_probe_transport_header(a) skb_probe_transport_header(a, 0) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) && !defined(ISRHEL7) +#define ignore_df local_df +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) && !defined(ISRHEL8) +/* Note that all intentional uses of the non-_bh variety need to explicitly + * undef these, conditionalized on COMPAT_CANNOT_DEPRECIATE_BH_RCU. + */ +#include <linux/rcupdate.h> +static __always_inline void old_synchronize_rcu(void) +{ + synchronize_rcu(); +} +static __always_inline void old_call_rcu(void *a, void *b) +{ + call_rcu(a, b); +} +static __always_inline void old_rcu_barrier(void) +{ + rcu_barrier(); +} +#ifdef synchronize_rcu +#undef synchronize_rcu +#endif +#ifdef call_rcu +#undef call_rcu +#endif +#ifdef rcu_barrier +#undef rcu_barrier +#endif +#define synchronize_rcu synchronize_rcu_bh +#define call_rcu call_rcu_bh +#define rcu_barrier rcu_barrier_bh +#define COMPAT_CANNOT_DEPRECIATE_BH_RCU +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 10) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) && !defined(ISRHEL8)) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 217) +static inline void skb_mark_not_on_list(struct sk_buff *skb) +{ + skb->next = NULL; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0) && !defined(ISRHEL8) +#include <net/netlink.h> +#ifndef NLA_POLICY_EXACT_LEN +#define NLA_POLICY_EXACT_LEN(_len) { .type = NLA_UNSPEC, .len = _len } +#endif +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0) && !defined(ISRHEL8) +#include <net/netlink.h> +#ifndef NLA_POLICY_MIN_LEN +#define NLA_POLICY_MIN_LEN(_len) { .type = NLA_UNSPEC, .len = _len } +#endif +#define COMPAT_CANNOT_INDIVIDUAL_NETLINK_OPS_POLICY +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0) && defined(__aarch64__) +#define cpu_have_named_feature(name) (elf_hwcap & (HWCAP_ ## name)) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) +#include <linux/stddef.h> +#ifndef offsetofend +#define offsetofend(TYPE, MEMBER) (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER)) +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0) && !defined(ISRHEL8) +#define genl_dumpit_info(cb) ({ \ + struct { struct nlattr **attrs; } *a = (void *)((u8 *)cb->args + offsetofend(struct dump_ctx, next_allowedip)); \ + BUILD_BUG_ON(sizeof(cb->args) < offsetofend(struct dump_ctx, next_allowedip) + sizeof(*a)); \ + a->attrs = genl_family_attrbuf(&genl_family); \ + if (nlmsg_parse(cb->nlh, GENL_HDRLEN + genl_family.hdrsize, a->attrs, genl_family.maxattr, device_policy, NULL) < 0) \ + memset(a->attrs, 0, (genl_family.maxattr + 1) * sizeof(struct nlattr *)); \ + a; \ +}) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0) +#include <linux/skbuff.h> +#ifndef skb_list_walk_safe +#define skb_list_walk_safe(first, skb, next) \ + for ((skb) = (first), (next) = (skb) ? (skb)->next : NULL; (skb); \ + (skb) = (next), (next) = (skb) ? (skb)->next : NULL) +#endif +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 200) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 249)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 285)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 320)) +#define blake2s_init zinc_blake2s_init +#define blake2s_init_key zinc_blake2s_init_key +#define blake2s_update zinc_blake2s_update +#define blake2s_final zinc_blake2s_final +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0) +#define blake2s_hmac zinc_blake2s_hmac +#define chacha20 zinc_chacha20 +#define hchacha20 zinc_hchacha20 +#define chacha20poly1305_encrypt zinc_chacha20poly1305_encrypt +#define chacha20poly1305_encrypt_sg_inplace zinc_chacha20poly1305_encrypt_sg_inplace +#define chacha20poly1305_decrypt zinc_chacha20poly1305_decrypt +#define chacha20poly1305_decrypt_sg_inplace zinc_chacha20poly1305_decrypt_sg_inplace +#define xchacha20poly1305_encrypt zinc_xchacha20poly1305_encrypt +#define xchacha20poly1305_decrypt zinc_xchacha20poly1305_decrypt +#define curve25519 zinc_curve25519 +#define curve25519_generate_secret zinc_curve25519_generate_secret +#define curve25519_generate_public zinc_curve25519_generate_public +#define poly1305_init zinc_poly1305_init +#define poly1305_update zinc_poly1305_update +#define poly1305_final zinc_poly1305_final +#define blake2s_compress_ssse3 zinc_blake2s_compress_ssse3 +#define blake2s_compress_avx512 zinc_blake2s_compress_avx512 +#define poly1305_init_arm zinc_poly1305_init_arm +#define poly1305_blocks_arm zinc_poly1305_blocks_arm +#define poly1305_emit_arm zinc_poly1305_emit_arm +#define poly1305_blocks_neon zinc_poly1305_blocks_neon +#define poly1305_emit_neon zinc_poly1305_emit_neon +#define poly1305_init_mips zinc_poly1305_init_mips +#define poly1305_blocks_mips zinc_poly1305_blocks_mips +#define poly1305_emit_mips zinc_poly1305_emit_mips +#define poly1305_init_x86_64 zinc_poly1305_init_x86_64 +#define poly1305_blocks_x86_64 zinc_poly1305_blocks_x86_64 +#define poly1305_emit_x86_64 zinc_poly1305_emit_x86_64 +#define poly1305_emit_avx zinc_poly1305_emit_avx +#define poly1305_blocks_avx zinc_poly1305_blocks_avx +#define poly1305_blocks_avx2 zinc_poly1305_blocks_avx2 +#define poly1305_blocks_avx512 zinc_poly1305_blocks_avx512 +#define curve25519_neon zinc_curve25519_neon +#define hchacha20_ssse3 zinc_hchacha20_ssse3 +#define chacha20_ssse3 zinc_chacha20_ssse3 +#define chacha20_avx2 zinc_chacha20_avx2 +#define chacha20_avx512 zinc_chacha20_avx512 +#define chacha20_avx512vl zinc_chacha20_avx512vl +#define chacha20_mips zinc_chacha20_mips +#define chacha20_arm zinc_chacha20_arm +#define hchacha20_arm zinc_hchacha20_arm +#define chacha20_neon zinc_chacha20_neon +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) && !defined(ISRHEL7) +#include <linux/skbuff.h> +static inline int skb_ensure_writable(struct sk_buff *skb, int write_len) +{ + if (!pskb_may_pull(skb, write_len)) + return -ENOMEM; + + if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) + return 0; + + return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); +} +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 102) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 178) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 223) && LINUX_VERSION_CODE > KERNEL_VERSION(4, 10, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 259) || defined(ISRHEL8) || defined(ISUBUNTU1804) +#include <linux/icmpv6.h> +#include <net/icmp.h> +#if IS_ENABLED(CONFIG_NF_NAT) +#include <linux/ip.h> +#include <net/ipv6.h> +#include <net/netfilter/nf_conntrack.h> +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) && !defined(ISRHEL8) +#include <net/netfilter/nf_nat_core.h> +#endif +static inline void __compat_icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) +{ + struct sk_buff *cloned_skb = NULL; + enum ip_conntrack_info ctinfo; + struct nf_conn *ct; + __be32 orig_ip; + + ct = nf_ct_get(skb_in, &ctinfo); + if (!ct || !(ct->status & IPS_SRC_NAT)) { + memset(skb_in->cb, 0, sizeof(skb_in->cb)); + icmp_send(skb_in, type, code, info); + return; + } + + if (skb_shared(skb_in)) + skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC); + + if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head || + (skb_network_header(skb_in) + sizeof(struct iphdr)) > + skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in, + skb_network_offset(skb_in) + sizeof(struct iphdr)))) + goto out; + + orig_ip = ip_hdr(skb_in)->saddr; + ip_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.ip; + memset(skb_in->cb, 0, sizeof(skb_in->cb)); + icmp_send(skb_in, type, code, info); + ip_hdr(skb_in)->saddr = orig_ip; +out: + consume_skb(cloned_skb); +} +static inline void __compat_icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) +{ + struct sk_buff *cloned_skb = NULL; + enum ip_conntrack_info ctinfo; + struct in6_addr orig_ip; + struct nf_conn *ct; + + ct = nf_ct_get(skb_in, &ctinfo); + if (!ct || !(ct->status & IPS_SRC_NAT)) { + memset(skb_in->cb, 0, sizeof(skb_in->cb)); + icmpv6_send(skb_in, type, code, info); + return; + } + + if (skb_shared(skb_in)) + skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC); + + if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head || + (skb_network_header(skb_in) + sizeof(struct ipv6hdr)) > + skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in, + skb_network_offset(skb_in) + sizeof(struct ipv6hdr)))) + goto out; + + orig_ip = ipv6_hdr(skb_in)->saddr; + ipv6_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.in6; + memset(skb_in->cb, 0, sizeof(skb_in->cb)); + icmpv6_send(skb_in, type, code, info); + ipv6_hdr(skb_in)->saddr = orig_ip; +out: + consume_skb(cloned_skb); +} +#else +static inline void __compat_icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) +{ + memset(skb_in->cb, 0, sizeof(skb_in->cb)); + icmp_send(skb_in, type, code, info); +} +static inline void __compat_icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) +{ + memset(skb_in->cb, 0, sizeof(skb_in->cb)); + icmpv6_send(skb_in, type, code, info); +} +#endif +#define icmp_ndo_send __compat_icmp_ndo_send +#define icmpv6_ndo_send __compat_icmpv6_ndo_send +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) +#define COMPAT_CANNOT_USE_MAX_MTU +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 14) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 29) && !defined(ISUBUNTU1910) && !defined(ISUBUNTU1904) && !defined(ISRHEL8)) +#include <linux/skbuff.h> +#include <net/sch_generic.h> +static inline void skb_reset_redirect(struct sk_buff *skb) +{ +#ifdef CONFIG_NET_SCHED + skb_reset_tc(skb); +#endif +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) && !defined(ISRHEL7) +#define skb_get_hash skb_get_rxhash +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) && !defined(ISRHEL7) +#define hash rxhash +#define l4_hash l4_rxhash +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) && !defined(ISRHEL7) +#define sw_hash ignore_df = 0; skb->nf_trace = skb->ooo_okay +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0) +#define pre_exit exit +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) +#include <linux/skbuff.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +static inline __be16 ip_tunnel_parse_protocol(const struct sk_buff *skb) +{ + if (skb_network_header(skb) >= skb->head && + (skb_network_header(skb) + sizeof(struct iphdr)) <= skb_tail_pointer(skb) && + ip_hdr(skb)->version == 4) + return htons(ETH_P_IP); + if (skb_network_header(skb) >= skb->head && + (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= skb_tail_pointer(skb) && + ipv6_hdr(skb)->version == 6) + return htons(ETH_P_IPV6); + return 0; +} +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) || defined(ISRHEL8) +static const struct header_ops ip_tunnel_header_ops = { .parse_protocol = ip_tunnel_parse_protocol }; +#else +#define header_ops hard_header_len +#define ip_tunnel_header_ops *(char *)0 - (char *)0 +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0) +#define kfree_sensitive(a) kzfree(a) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) && !defined(ISRHEL7) +#define xchg_release xchg +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) && !defined(ISRHEL7) +#include <asm/barrier.h> +#ifndef smp_load_acquire +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + smp_mb(); \ + ___p1; \ +}) +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0) +#include <net/dst_cache.h> +struct dst_cache_pcpu { + unsigned long refresh_ts; + struct dst_entry *dst; + u32 cookie; + union { + struct in_addr in_saddr; + struct in6_addr in6_saddr; + }; +}; +#define COMPAT_HAS_DEFINED_DST_CACHE_PCPU +static inline void dst_cache_reset_now(struct dst_cache *dst_cache) +{ + int i; + + if (!dst_cache->cache) + return; + + dst_cache->reset_ts = jiffies; + for_each_possible_cpu(i) { + struct dst_cache_pcpu *idst = per_cpu_ptr(dst_cache->cache, i); + struct dst_entry *dst = idst->dst; + + idst->cookie = 0; + idst->dst = NULL; + dst_release(dst); + } +} +#endif + +#if defined(ISUBUNTU1604) || defined(ISRHEL7) +#include <linux/siphash.h> +#ifndef _WG_LINUX_SIPHASH_H +#define hsiphash_1u32 siphash_1u32 +#define hsiphash_2u32 siphash_2u32 +#define hsiphash_3u32 siphash_3u32 +#define hsiphash_key_t siphash_key_t +#endif +#endif + +#ifdef CONFIG_VE +#include <linux/netdev_features.h> +#ifdef NETIF_F_VIRTUAL +#undef NETIF_F_LLTX +#define NETIF_F_LLTX (__NETIF_F(LLTX) | __NETIF_F(VIRTUAL)) +#endif +#endif + +/* https://github.com/ClangBuiltLinux/linux/issues/7 */ +#if defined( __clang__) && (!defined(CONFIG_CLANG_VERSION) || CONFIG_CLANG_VERSION < 80000) +#include <linux/bug.h> +#undef BUILD_BUG_ON +#define BUILD_BUG_ON(x) +#endif + +/* PaX compatibility */ +#ifdef CONSTIFY_PLUGIN +#include <linux/cache.h> +#undef __read_mostly +#define __read_mostly +#endif +#if (defined(CONFIG_PAX) || defined(CONFIG_CFI_CLANG)) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +#include <linux/timer.h> +#define wg_expired_retransmit_handshake(a) wg_expired_retransmit_handshake(unsigned long timer) +#define wg_expired_send_keepalive(a) wg_expired_send_keepalive(unsigned long timer) +#define wg_expired_new_handshake(a) wg_expired_new_handshake(unsigned long timer) +#define wg_expired_zero_key_material(a) wg_expired_zero_key_material(unsigned long timer) +#define wg_expired_send_persistent_keepalive(a) wg_expired_send_persistent_keepalive(unsigned long timer) +#undef timer_setup +#define timer_setup(a, b, c) setup_timer(a, ((void (*)(unsigned long))b), ((unsigned long)a)) +#undef from_timer +#define from_timer(var, callback_timer, timer_fieldname) container_of((struct timer_list *)callback_timer, typeof(*var), timer_fieldname) +#endif + +#endif /* _WG_COMPAT_H */ diff --git a/net/wireguard/compat/dst_cache/dst_cache.c b/net/wireguard/compat/dst_cache/dst_cache.c new file mode 100644 index 000000000000..f74c43c550eb --- /dev/null +++ b/net/wireguard/compat/dst_cache/dst_cache.c @@ -0,0 +1,177 @@ +/* + * net/core/dst_cache.c - dst entry cache + * + * Copyright (c) 2016 Paolo Abeni <pabeni@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/percpu.h> +#include <net/dst_cache.h> +#include <net/route.h> +#if IS_ENABLED(CONFIG_IPV6) +#include <net/ip6_fib.h> +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 50) +static inline u32 rt6_get_cookie(const struct rt6_info *rt) +{ + if ((unlikely(rt->dst.flags & DST_NOCACHE) && rt->dst.from)) + rt = (struct rt6_info *)(rt->dst.from); + + return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; +} +#endif +#endif +#include <uapi/linux/in.h> + +#ifndef COMPAT_HAS_DEFINED_DST_CACHE_PCPU +struct dst_cache_pcpu { + unsigned long refresh_ts; + struct dst_entry *dst; + u32 cookie; + union { + struct in_addr in_saddr; + struct in6_addr in6_saddr; + }; +}; +#endif + +static void dst_cache_per_cpu_dst_set(struct dst_cache_pcpu *dst_cache, + struct dst_entry *dst, u32 cookie) +{ + dst_release(dst_cache->dst); + if (dst) + dst_hold(dst); + + dst_cache->cookie = cookie; + dst_cache->dst = dst; +} + +static struct dst_entry *dst_cache_per_cpu_get(struct dst_cache *dst_cache, + struct dst_cache_pcpu *idst) +{ + struct dst_entry *dst; + + dst = idst->dst; + if (!dst) + goto fail; + + /* the cache already hold a dst reference; it can't go away */ + dst_hold(dst); + + if (unlikely(!time_after(idst->refresh_ts, dst_cache->reset_ts) || + (dst->obsolete && !dst->ops->check(dst, idst->cookie)))) { + dst_cache_per_cpu_dst_set(idst, NULL, 0); + dst_release(dst); + goto fail; + } + return dst; + +fail: + idst->refresh_ts = jiffies; + return NULL; +} + +struct dst_entry *dst_cache_get(struct dst_cache *dst_cache) +{ + if (!dst_cache->cache) + return NULL; + + return dst_cache_per_cpu_get(dst_cache, this_cpu_ptr(dst_cache->cache)); +} + +struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr) +{ + struct dst_cache_pcpu *idst; + struct dst_entry *dst; + + if (!dst_cache->cache) + return NULL; + + idst = this_cpu_ptr(dst_cache->cache); + dst = dst_cache_per_cpu_get(dst_cache, idst); + if (!dst) + return NULL; + + *saddr = idst->in_saddr.s_addr; + return container_of(dst, struct rtable, dst); +} + +void dst_cache_set_ip4(struct dst_cache *dst_cache, struct dst_entry *dst, + __be32 saddr) +{ + struct dst_cache_pcpu *idst; + + if (!dst_cache->cache) + return; + + idst = this_cpu_ptr(dst_cache->cache); + dst_cache_per_cpu_dst_set(idst, dst, 0); + idst->in_saddr.s_addr = saddr; +} + +#if IS_ENABLED(CONFIG_IPV6) +void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst, + const struct in6_addr *addr) +{ + struct dst_cache_pcpu *idst; + + if (!dst_cache->cache) + return; + + idst = this_cpu_ptr(dst_cache->cache); + dst_cache_per_cpu_dst_set(this_cpu_ptr(dst_cache->cache), dst, + rt6_get_cookie((struct rt6_info *)dst)); + idst->in6_saddr = *addr; +} + +struct dst_entry *dst_cache_get_ip6(struct dst_cache *dst_cache, + struct in6_addr *saddr) +{ + struct dst_cache_pcpu *idst; + struct dst_entry *dst; + + if (!dst_cache->cache) + return NULL; + + idst = this_cpu_ptr(dst_cache->cache); + dst = dst_cache_per_cpu_get(dst_cache, idst); + if (!dst) + return NULL; + + *saddr = idst->in6_saddr; + return dst; +} +#endif + +int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0) + BUG_ON(gfp & GFP_ATOMIC); + dst_cache->cache = alloc_percpu(struct dst_cache_pcpu); +#else + dst_cache->cache = alloc_percpu_gfp(struct dst_cache_pcpu, + gfp | __GFP_ZERO); +#endif + if (!dst_cache->cache) + return -ENOMEM; + + dst_cache_reset(dst_cache); + return 0; +} + +void dst_cache_destroy(struct dst_cache *dst_cache) +{ + int i; + + if (!dst_cache->cache) + return; + + for_each_possible_cpu(i) + dst_release(per_cpu_ptr(dst_cache->cache, i)->dst); + + free_percpu(dst_cache->cache); +} diff --git a/net/wireguard/compat/dst_cache/include/net/dst_cache.h b/net/wireguard/compat/dst_cache/include/net/dst_cache.h new file mode 100644 index 000000000000..48021c0d6be1 --- /dev/null +++ b/net/wireguard/compat/dst_cache/include/net/dst_cache.h @@ -0,0 +1,97 @@ +#ifndef _WG_NET_DST_CACHE_H +#define _WG_NET_DST_CACHE_H + +#include <linux/jiffies.h> +#include <net/dst.h> +#if IS_ENABLED(CONFIG_IPV6) +#include <net/ip6_fib.h> +#endif + +struct dst_cache { + struct dst_cache_pcpu __percpu *cache; + unsigned long reset_ts; +}; + +/** + * dst_cache_get - perform cache lookup + * @dst_cache: the cache + * + * The caller should use dst_cache_get_ip4() if it need to retrieve the + * source address to be used when xmitting to the cached dst. + * local BH must be disabled. + */ +struct dst_entry *dst_cache_get(struct dst_cache *dst_cache); + +/** + * dst_cache_get_ip4 - perform cache lookup and fetch ipv4 source address + * @dst_cache: the cache + * @saddr: return value for the retrieved source address + * + * local BH must be disabled. + */ +struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr); + +/** + * dst_cache_set_ip4 - store the ipv4 dst into the cache + * @dst_cache: the cache + * @dst: the entry to be cached + * @saddr: the source address to be stored inside the cache + * + * local BH must be disabled. + */ +void dst_cache_set_ip4(struct dst_cache *dst_cache, struct dst_entry *dst, + __be32 saddr); + +#if IS_ENABLED(CONFIG_IPV6) + +/** + * dst_cache_set_ip6 - store the ipv6 dst into the cache + * @dst_cache: the cache + * @dst: the entry to be cached + * @saddr: the source address to be stored inside the cache + * + * local BH must be disabled. + */ +void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst, + const struct in6_addr *addr); + +/** + * dst_cache_get_ip6 - perform cache lookup and fetch ipv6 source address + * @dst_cache: the cache + * @saddr: return value for the retrieved source address + * + * local BH must be disabled. + */ +struct dst_entry *dst_cache_get_ip6(struct dst_cache *dst_cache, + struct in6_addr *saddr); +#endif + +/** + * dst_cache_reset - invalidate the cache contents + * @dst_cache: the cache + * + * This do not free the cached dst to avoid races and contentions. + * the dst will be freed on later cache lookup. + */ +static inline void dst_cache_reset(struct dst_cache *dst_cache) +{ + dst_cache->reset_ts = jiffies; +} + +/** + * dst_cache_init - initialize the cache, allocating the required storage + * @dst_cache: the cache + * @gfp: allocation flags + */ +int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp); + +/** + * dst_cache_destroy - empty the cache and free the allocated storage + * @dst_cache: the cache + * + * No synchronization is enforced: it must be called only when the cache + * is unused. + */ +void dst_cache_destroy(struct dst_cache *dst_cache); + +#endif /* _WG_NET_DST_CACHE_H */ diff --git a/net/wireguard/compat/dstmetadata/include/net/dst_metadata.h b/net/wireguard/compat/dstmetadata/include/net/dst_metadata.h new file mode 100644 index 000000000000..995094d4f099 --- /dev/null +++ b/net/wireguard/compat/dstmetadata/include/net/dst_metadata.h @@ -0,0 +1,3 @@ +#ifndef skb_valid_dst +#define skb_valid_dst(skb) (!!skb_dst(skb)) +#endif diff --git a/net/wireguard/compat/fpu-x86/include/asm/fpu/api.h b/net/wireguard/compat/fpu-x86/include/asm/fpu/api.h new file mode 100644 index 000000000000..f3f9117bcb88 --- /dev/null +++ b/net/wireguard/compat/fpu-x86/include/asm/fpu/api.h @@ -0,0 +1 @@ +#include <asm/i387.h> diff --git a/net/wireguard/compat/intel-family-x86/include/asm/intel-family.h b/net/wireguard/compat/intel-family-x86/include/asm/intel-family.h new file mode 100644 index 000000000000..35a6bc4da8ad --- /dev/null +++ b/net/wireguard/compat/intel-family-x86/include/asm/intel-family.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_INTEL_FAMILY_H +#define _ASM_X86_INTEL_FAMILY_H + +/* + * "Big Core" Processors (Branded as Core, Xeon, etc...) + * + * The "_X" parts are generally the EP and EX Xeons, or the + * "Extreme" ones, like Broadwell-E. + * + * Things ending in "2" are usually because we have no better + * name for them. There's no processor called "SILVERMONT2". + */ + +#define INTEL_FAM6_CORE_YONAH 0x0E + +#define INTEL_FAM6_CORE2_MEROM 0x0F +#define INTEL_FAM6_CORE2_MEROM_L 0x16 +#define INTEL_FAM6_CORE2_PENRYN 0x17 +#define INTEL_FAM6_CORE2_DUNNINGTON 0x1D + +#define INTEL_FAM6_NEHALEM 0x1E +#define INTEL_FAM6_NEHALEM_G 0x1F /* Auburndale / Havendale */ +#define INTEL_FAM6_NEHALEM_EP 0x1A +#define INTEL_FAM6_NEHALEM_EX 0x2E + +#define INTEL_FAM6_WESTMERE 0x25 +#define INTEL_FAM6_WESTMERE_EP 0x2C +#define INTEL_FAM6_WESTMERE_EX 0x2F + +#define INTEL_FAM6_SANDYBRIDGE 0x2A +#define INTEL_FAM6_SANDYBRIDGE_X 0x2D +#define INTEL_FAM6_IVYBRIDGE 0x3A +#define INTEL_FAM6_IVYBRIDGE_X 0x3E + +#define INTEL_FAM6_HASWELL_CORE 0x3C +#define INTEL_FAM6_HASWELL_X 0x3F +#define INTEL_FAM6_HASWELL_ULT 0x45 +#define INTEL_FAM6_HASWELL_GT3E 0x46 + +#define INTEL_FAM6_BROADWELL_CORE 0x3D +#define INTEL_FAM6_BROADWELL_GT3E 0x47 +#define INTEL_FAM6_BROADWELL_X 0x4F +#define INTEL_FAM6_BROADWELL_XEON_D 0x56 + +#define INTEL_FAM6_SKYLAKE_MOBILE 0x4E +#define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E +#define INTEL_FAM6_SKYLAKE_X 0x55 +#define INTEL_FAM6_KABYLAKE_MOBILE 0x8E +#define INTEL_FAM6_KABYLAKE_DESKTOP 0x9E + +/* "Small Core" Processors (Atom) */ + +#define INTEL_FAM6_ATOM_PINEVIEW 0x1C +#define INTEL_FAM6_ATOM_LINCROFT 0x26 +#define INTEL_FAM6_ATOM_PENWELL 0x27 +#define INTEL_FAM6_ATOM_CLOVERVIEW 0x35 +#define INTEL_FAM6_ATOM_CEDARVIEW 0x36 +#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */ +#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */ +#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */ +#define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */ +#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Anniedale */ +#define INTEL_FAM6_ATOM_GOLDMONT 0x5C +#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */ +#define INTEL_FAM6_ATOM_GEMINI_LAKE 0x7A + +/* Xeon Phi */ + +#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */ +#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */ + +#endif /* _ASM_X86_INTEL_FAMILY_H */ diff --git a/net/wireguard/compat/memneq/include.h b/net/wireguard/compat/memneq/include.h new file mode 100644 index 000000000000..2d18acd9b6c8 --- /dev/null +++ b/net/wireguard/compat/memneq/include.h @@ -0,0 +1,5 @@ +extern noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); +static inline int crypto_memneq(const void *a, const void *b, size_t size) +{ + return __crypto_memneq(a, b, size) != 0UL ? 1 : 0; +} diff --git a/net/wireguard/compat/memneq/memneq.c b/net/wireguard/compat/memneq/memneq.c new file mode 100644 index 000000000000..1c427d405537 --- /dev/null +++ b/net/wireguard/compat/memneq/memneq.c @@ -0,0 +1,170 @@ +/* + * Constant-time equality testing of memory regions. + * + * Authors: + * + * James Yonan <james@openvpn.net> + * Daniel Borkmann <dborkman@redhat.com> + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of OpenVPN Technologies nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <crypto/algapi.h> + +/* Make the optimizer believe the variable can be manipulated arbitrarily. */ +#define COMPILER_OPTIMIZER_HIDE_VAR(var) asm("" : "=r" (var) : "0" (var)) + +#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ + +/* Generic path for arbitrary size */ +static inline unsigned long +__crypto_memneq_generic(const void *a, const void *b, size_t size) +{ + unsigned long neq = 0; + +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + while (size >= sizeof(unsigned long)) { + neq |= *(unsigned long *)a ^ *(unsigned long *)b; + COMPILER_OPTIMIZER_HIDE_VAR(neq); + a += sizeof(unsigned long); + b += sizeof(unsigned long); + size -= sizeof(unsigned long); + } +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + while (size > 0) { + neq |= *(unsigned char *)a ^ *(unsigned char *)b; + COMPILER_OPTIMIZER_HIDE_VAR(neq); + a += 1; + b += 1; + size -= 1; + } + return neq; +} + +/* Loop-free fast-path for frequently used 16-byte size */ +static inline unsigned long __crypto_memneq_16(const void *a, const void *b) +{ + unsigned long neq = 0; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (sizeof(unsigned long) == 8) { + neq |= *(unsigned long *)(a) ^ *(unsigned long *)(b); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned long *)(a+8) ^ *(unsigned long *)(b+8); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + } else if (sizeof(unsigned int) == 4) { + neq |= *(unsigned int *)(a) ^ *(unsigned int *)(b); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned int *)(a+4) ^ *(unsigned int *)(b+4); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned int *)(a+8) ^ *(unsigned int *)(b+8); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned int *)(a+12) ^ *(unsigned int *)(b+12); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + } else +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + { + neq |= *(unsigned char *)(a) ^ *(unsigned char *)(b); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+1) ^ *(unsigned char *)(b+1); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+2) ^ *(unsigned char *)(b+2); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+3) ^ *(unsigned char *)(b+3); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+4) ^ *(unsigned char *)(b+4); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+5) ^ *(unsigned char *)(b+5); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+6) ^ *(unsigned char *)(b+6); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+7) ^ *(unsigned char *)(b+7); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+8) ^ *(unsigned char *)(b+8); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+9) ^ *(unsigned char *)(b+9); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+10) ^ *(unsigned char *)(b+10); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+11) ^ *(unsigned char *)(b+11); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+12) ^ *(unsigned char *)(b+12); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+13) ^ *(unsigned char *)(b+13); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+14) ^ *(unsigned char *)(b+14); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+15) ^ *(unsigned char *)(b+15); + COMPILER_OPTIMIZER_HIDE_VAR(neq); + } + + return neq; +} + +/* Compare two areas of memory without leaking timing information, + * and with special optimizations for common sizes. Users should + * not call this function directly, but should instead use + * crypto_memneq defined in crypto/algapi.h. + */ +noinline unsigned long __crypto_memneq(const void *a, const void *b, + size_t size) +{ + switch (size) { + case 16: + return __crypto_memneq_16(a, b); + default: + return __crypto_memneq_generic(a, b, size); + } +} + +#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */ diff --git a/net/wireguard/compat/neon-arm/include/asm/neon.h b/net/wireguard/compat/neon-arm/include/asm/neon.h new file mode 100644 index 000000000000..980d831e201a --- /dev/null +++ b/net/wireguard/compat/neon-arm/include/asm/neon.h @@ -0,0 +1,7 @@ +#ifndef _ARCH_ARM_ASM_NEON +#define _ARCH_ARM_ASM_NEON +#define kernel_neon_begin() \ + BUILD_BUG_ON_MSG(1, "This kernel does not support ARM NEON") +#define kernel_neon_end() \ + BUILD_BUG_ON_MSG(1, "This kernel does not support ARM NEON") +#endif diff --git a/net/wireguard/compat/ptr_ring/include/linux/ptr_ring.h b/net/wireguard/compat/ptr_ring/include/linux/ptr_ring.h new file mode 100644 index 000000000000..9d514bac1388 --- /dev/null +++ b/net/wireguard/compat/ptr_ring/include/linux/ptr_ring.h @@ -0,0 +1,674 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Definitions for the 'struct ptr_ring' datastructure. + * + * Author: + * Michael S. Tsirkin <mst@redhat.com> + * + * Copyright (C) 2016 Red Hat, Inc. + * + * This is a limited-size FIFO maintaining pointers in FIFO order, with + * one CPU producing entries and another consuming entries from a FIFO. + * + * This implementation tries to minimize cache-contention when there is a + * single producer and a single consumer CPU. + */ + +#ifndef _LINUX_PTR_RING_H +#define _LINUX_PTR_RING_H 1 + +#ifdef __KERNEL__ +#include <linux/spinlock.h> +#include <linux/cache.h> +#include <linux/types.h> +#include <linux/compiler.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <asm/errno.h> +#endif + +struct ptr_ring { + int producer ____cacheline_aligned_in_smp; + spinlock_t producer_lock; + int consumer_head ____cacheline_aligned_in_smp; /* next valid entry */ + int consumer_tail; /* next entry to invalidate */ + spinlock_t consumer_lock; + /* Shared consumer/producer data */ + /* Read-only by both the producer and the consumer */ + int size ____cacheline_aligned_in_smp; /* max entries in queue */ + int batch; /* number of entries to consume in a batch */ + void **queue; +}; + +/* Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). + * + * NB: this is unlike __ptr_ring_empty in that callers must hold producer_lock: + * see e.g. ptr_ring_full. + */ +static inline bool __ptr_ring_full(struct ptr_ring *r) +{ + return r->queue[r->producer]; +} + +static inline bool ptr_ring_full(struct ptr_ring *r) +{ + bool ret; + + spin_lock(&r->producer_lock); + ret = __ptr_ring_full(r); + spin_unlock(&r->producer_lock); + + return ret; +} + +static inline bool ptr_ring_full_irq(struct ptr_ring *r) +{ + bool ret; + + spin_lock_irq(&r->producer_lock); + ret = __ptr_ring_full(r); + spin_unlock_irq(&r->producer_lock); + + return ret; +} + +static inline bool ptr_ring_full_any(struct ptr_ring *r) +{ + unsigned long flags; + bool ret; + + spin_lock_irqsave(&r->producer_lock, flags); + ret = __ptr_ring_full(r); + spin_unlock_irqrestore(&r->producer_lock, flags); + + return ret; +} + +static inline bool ptr_ring_full_bh(struct ptr_ring *r) +{ + bool ret; + + spin_lock_bh(&r->producer_lock); + ret = __ptr_ring_full(r); + spin_unlock_bh(&r->producer_lock); + + return ret; +} + +/* Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). Callers must hold producer_lock. + * Callers are responsible for making sure pointer that is being queued + * points to a valid data. + */ +static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) +{ + if (unlikely(!r->size) || r->queue[r->producer]) + return -ENOSPC; + + /* Make sure the pointer we are storing points to a valid data. */ + /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ + smp_wmb(); + + WRITE_ONCE(r->queue[r->producer++], ptr); + if (unlikely(r->producer >= r->size)) + r->producer = 0; + return 0; +} + +/* + * Note: resize (below) nests producer lock within consumer lock, so if you + * consume in interrupt or BH context, you must disable interrupts/BH when + * calling this. + */ +static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr) +{ + int ret; + + spin_lock(&r->producer_lock); + ret = __ptr_ring_produce(r, ptr); + spin_unlock(&r->producer_lock); + + return ret; +} + +static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr) +{ + int ret; + + spin_lock_irq(&r->producer_lock); + ret = __ptr_ring_produce(r, ptr); + spin_unlock_irq(&r->producer_lock); + + return ret; +} + +static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&r->producer_lock, flags); + ret = __ptr_ring_produce(r, ptr); + spin_unlock_irqrestore(&r->producer_lock, flags); + + return ret; +} + +static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr) +{ + int ret; + + spin_lock_bh(&r->producer_lock); + ret = __ptr_ring_produce(r, ptr); + spin_unlock_bh(&r->producer_lock); + + return ret; +} + +static inline void *__ptr_ring_peek(struct ptr_ring *r) +{ + if (likely(r->size)) + return READ_ONCE(r->queue[r->consumer_head]); + return NULL; +} + +/* + * Test ring empty status without taking any locks. + * + * NB: This is only safe to call if ring is never resized. + * + * However, if some other CPU consumes ring entries at the same time, the value + * returned is not guaranteed to be correct. + * + * In this case - to avoid incorrectly detecting the ring + * as empty - the CPU consuming the ring entries is responsible + * for either consuming all ring entries until the ring is empty, + * or synchronizing with some other CPU and causing it to + * re-test __ptr_ring_empty and/or consume the ring enteries + * after the synchronization point. + * + * Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). + */ +static inline bool __ptr_ring_empty(struct ptr_ring *r) +{ + if (likely(r->size)) + return !r->queue[READ_ONCE(r->consumer_head)]; + return true; +} + +static inline bool ptr_ring_empty(struct ptr_ring *r) +{ + bool ret; + + spin_lock(&r->consumer_lock); + ret = __ptr_ring_empty(r); + spin_unlock(&r->consumer_lock); + + return ret; +} + +static inline bool ptr_ring_empty_irq(struct ptr_ring *r) +{ + bool ret; + + spin_lock_irq(&r->consumer_lock); + ret = __ptr_ring_empty(r); + spin_unlock_irq(&r->consumer_lock); + + return ret; +} + +static inline bool ptr_ring_empty_any(struct ptr_ring *r) +{ + unsigned long flags; + bool ret; + + spin_lock_irqsave(&r->consumer_lock, flags); + ret = __ptr_ring_empty(r); + spin_unlock_irqrestore(&r->consumer_lock, flags); + + return ret; +} + +static inline bool ptr_ring_empty_bh(struct ptr_ring *r) +{ + bool ret; + + spin_lock_bh(&r->consumer_lock); + ret = __ptr_ring_empty(r); + spin_unlock_bh(&r->consumer_lock); + + return ret; +} + +/* Must only be called after __ptr_ring_peek returned !NULL */ +static inline void __ptr_ring_discard_one(struct ptr_ring *r) +{ + /* Fundamentally, what we want to do is update consumer + * index and zero out the entry so producer can reuse it. + * Doing it naively at each consume would be as simple as: + * consumer = r->consumer; + * r->queue[consumer++] = NULL; + * if (unlikely(consumer >= r->size)) + * consumer = 0; + * r->consumer = consumer; + * but that is suboptimal when the ring is full as producer is writing + * out new entries in the same cache line. Defer these updates until a + * batch of entries has been consumed. + */ + /* Note: we must keep consumer_head valid at all times for __ptr_ring_empty + * to work correctly. + */ + int consumer_head = r->consumer_head; + int head = consumer_head++; + + /* Once we have processed enough entries invalidate them in + * the ring all at once so producer can reuse their space in the ring. + * We also do this when we reach end of the ring - not mandatory + * but helps keep the implementation simple. + */ + if (unlikely(consumer_head - r->consumer_tail >= r->batch || + consumer_head >= r->size)) { + /* Zero out entries in the reverse order: this way we touch the + * cache line that producer might currently be reading the last; + * producer won't make progress and touch other cache lines + * besides the first one until we write out all entries. + */ + while (likely(head >= r->consumer_tail)) + r->queue[head--] = NULL; + r->consumer_tail = consumer_head; + } + if (unlikely(consumer_head >= r->size)) { + consumer_head = 0; + r->consumer_tail = 0; + } + /* matching READ_ONCE in __ptr_ring_empty for lockless tests */ + WRITE_ONCE(r->consumer_head, consumer_head); +} + +static inline void *__ptr_ring_consume(struct ptr_ring *r) +{ + void *ptr; + + ptr = __ptr_ring_peek(r); + if (ptr) + __ptr_ring_discard_one(r); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + /* The READ_ONCE in __ptr_ring_peek doesn't imply a barrier on old kernels. */ + smp_read_barrier_depends(); +#endif + + return ptr; +} + +static inline int __ptr_ring_consume_batched(struct ptr_ring *r, + void **array, int n) +{ + void *ptr; + int i; + + for (i = 0; i < n; i++) { + ptr = __ptr_ring_consume(r); + if (!ptr) + break; + array[i] = ptr; + } + + return i; +} + +/* + * Note: resize (below) nests producer lock within consumer lock, so if you + * call this in interrupt or BH context, you must disable interrupts/BH when + * producing. + */ +static inline void *ptr_ring_consume(struct ptr_ring *r) +{ + void *ptr; + + spin_lock(&r->consumer_lock); + ptr = __ptr_ring_consume(r); + spin_unlock(&r->consumer_lock); + + return ptr; +} + +static inline void *ptr_ring_consume_irq(struct ptr_ring *r) +{ + void *ptr; + + spin_lock_irq(&r->consumer_lock); + ptr = __ptr_ring_consume(r); + spin_unlock_irq(&r->consumer_lock); + + return ptr; +} + +static inline void *ptr_ring_consume_any(struct ptr_ring *r) +{ + unsigned long flags; + void *ptr; + + spin_lock_irqsave(&r->consumer_lock, flags); + ptr = __ptr_ring_consume(r); + spin_unlock_irqrestore(&r->consumer_lock, flags); + + return ptr; +} + +static inline void *ptr_ring_consume_bh(struct ptr_ring *r) +{ + void *ptr; + + spin_lock_bh(&r->consumer_lock); + ptr = __ptr_ring_consume(r); + spin_unlock_bh(&r->consumer_lock); + + return ptr; +} + +static inline int ptr_ring_consume_batched(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock(&r->consumer_lock); + + return ret; +} + +static inline int ptr_ring_consume_batched_irq(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock_irq(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_irq(&r->consumer_lock); + + return ret; +} + +static inline int ptr_ring_consume_batched_any(struct ptr_ring *r, + void **array, int n) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&r->consumer_lock, flags); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_irqrestore(&r->consumer_lock, flags); + + return ret; +} + +static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock_bh(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_bh(&r->consumer_lock); + + return ret; +} + +/* Cast to structure type and call a function without discarding from FIFO. + * Function must return a value. + * Callers must take consumer_lock. + */ +#define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r))) + +#define PTR_RING_PEEK_CALL(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + \ + spin_lock(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +#define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + \ + spin_lock_irq(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock_irq(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +#define PTR_RING_PEEK_CALL_BH(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + \ + spin_lock_bh(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock_bh(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +#define PTR_RING_PEEK_CALL_ANY(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + unsigned long __PTR_RING_PEEK_CALL_f;\ + \ + spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +/* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See + * documentation for vmalloc for which of them are legal. + */ +static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) +{ + if (size > KMALLOC_MAX_SIZE / sizeof(void *)) + return NULL; + return kvmalloc(size * sizeof(void *), gfp | __GFP_ZERO); +} + +static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) +{ + r->size = size; + r->batch = SMP_CACHE_BYTES * 2 / sizeof(*(r->queue)); + /* We need to set batch at least to 1 to make logic + * in __ptr_ring_discard_one work correctly. + * Batching too much (because ring is small) would cause a lot of + * burstiness. Needs tuning, for now disable batching. + */ + if (r->batch > r->size / 2 || !r->batch) + r->batch = 1; +} + +static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) +{ + r->queue = __ptr_ring_init_queue_alloc(size, gfp); + if (!r->queue) + return -ENOMEM; + + __ptr_ring_set_size(r, size); + r->producer = r->consumer_head = r->consumer_tail = 0; + spin_lock_init(&r->producer_lock); + spin_lock_init(&r->consumer_lock); + + return 0; +} + +/* + * Return entries into ring. Destroy entries that don't fit. + * + * Note: this is expected to be a rare slow path operation. + * + * Note: producer lock is nested within consumer lock, so if you + * resize you must make sure all uses nest correctly. + * In particular if you consume ring in interrupt or BH context, you must + * disable interrupts/BH when doing so. + */ +static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n, + void (*destroy)(void *)) +{ + unsigned long flags; + int head; + + spin_lock_irqsave(&r->consumer_lock, flags); + spin_lock(&r->producer_lock); + + if (!r->size) + goto done; + + /* + * Clean out buffered entries (for simplicity). This way following code + * can test entries for NULL and if not assume they are valid. + */ + head = r->consumer_head - 1; + while (likely(head >= r->consumer_tail)) + r->queue[head--] = NULL; + r->consumer_tail = r->consumer_head; + + /* + * Go over entries in batch, start moving head back and copy entries. + * Stop when we run into previously unconsumed entries. + */ + while (n) { + head = r->consumer_head - 1; + if (head < 0) + head = r->size - 1; + if (r->queue[head]) { + /* This batch entry will have to be destroyed. */ + goto done; + } + r->queue[head] = batch[--n]; + r->consumer_tail = head; + /* matching READ_ONCE in __ptr_ring_empty for lockless tests */ + WRITE_ONCE(r->consumer_head, head); + } + +done: + /* Destroy all entries left in the batch. */ + while (n) + destroy(batch[--n]); + spin_unlock(&r->producer_lock); + spin_unlock_irqrestore(&r->consumer_lock, flags); +} + +static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, + int size, gfp_t gfp, + void (*destroy)(void *)) +{ + int producer = 0; + void **old; + void *ptr; + + while ((ptr = __ptr_ring_consume(r))) + if (producer < size) + queue[producer++] = ptr; + else if (destroy) + destroy(ptr); + + if (producer >= size) + producer = 0; + __ptr_ring_set_size(r, size); + r->producer = producer; + r->consumer_head = 0; + r->consumer_tail = 0; + old = r->queue; + r->queue = queue; + + return old; +} + +/* + * Note: producer lock is nested within consumer lock, so if you + * resize you must make sure all uses nest correctly. + * In particular if you consume ring in interrupt or BH context, you must + * disable interrupts/BH when doing so. + */ +static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, + void (*destroy)(void *)) +{ + unsigned long flags; + void **queue = __ptr_ring_init_queue_alloc(size, gfp); + void **old; + + if (!queue) + return -ENOMEM; + + spin_lock_irqsave(&(r)->consumer_lock, flags); + spin_lock(&(r)->producer_lock); + + old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy); + + spin_unlock(&(r)->producer_lock); + spin_unlock_irqrestore(&(r)->consumer_lock, flags); + + kvfree(old); + + return 0; +} + +/* + * Note: producer lock is nested within consumer lock, so if you + * resize you must make sure all uses nest correctly. + * In particular if you consume ring in interrupt or BH context, you must + * disable interrupts/BH when doing so. + */ +static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, + unsigned int nrings, + int size, + gfp_t gfp, void (*destroy)(void *)) +{ + unsigned long flags; + void ***queues; + int i; + + queues = kmalloc_array(nrings, sizeof(*queues), gfp); + if (!queues) + goto noqueues; + + for (i = 0; i < nrings; ++i) { + queues[i] = __ptr_ring_init_queue_alloc(size, gfp); + if (!queues[i]) + goto nomem; + } + + for (i = 0; i < nrings; ++i) { + spin_lock_irqsave(&(rings[i])->consumer_lock, flags); + spin_lock(&(rings[i])->producer_lock); + queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], + size, gfp, destroy); + spin_unlock(&(rings[i])->producer_lock); + spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags); + } + + for (i = 0; i < nrings; ++i) + kvfree(queues[i]); + + kfree(queues); + + return 0; + +nomem: + while (--i >= 0) + kvfree(queues[i]); + + kfree(queues); + +noqueues: + return -ENOMEM; +} + +static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *)) +{ + void *ptr; + + if (destroy) + while ((ptr = ptr_ring_consume(r))) + destroy(ptr); + kvfree(r->queue); +} + +#endif /* _LINUX_PTR_RING_H */ diff --git a/net/wireguard/compat/simd-asm/include/asm/simd.h b/net/wireguard/compat/simd-asm/include/asm/simd.h new file mode 100644 index 000000000000..a975b38b5578 --- /dev/null +++ b/net/wireguard/compat/simd-asm/include/asm/simd.h @@ -0,0 +1,21 @@ +#ifndef _COMPAT_ASM_SIMD_H +#define _COMPAT_ASM_SIMD_H + +#if defined(CONFIG_X86_64) +#include <asm/fpu/api.h> +#endif + +static __must_check inline bool may_use_simd(void) +{ +#if defined(CONFIG_X86_64) + return irq_fpu_usable(); +#elif defined(CONFIG_ARM64) && defined(CONFIG_KERNEL_MODE_NEON) + return true; +#elif defined(CONFIG_ARM) && defined(CONFIG_KERNEL_MODE_NEON) + return !in_nmi() && !in_irq() && !in_serving_softirq(); +#else + return false; +#endif +} + +#endif diff --git a/net/wireguard/compat/simd/include/linux/simd.h b/net/wireguard/compat/simd/include/linux/simd.h new file mode 100644 index 000000000000..e7f2550320c7 --- /dev/null +++ b/net/wireguard/compat/simd/include/linux/simd.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _WG_SIMD_H +#define _WG_SIMD_H + +#include <linux/sched.h> +#include <asm/simd.h> +#if defined(CONFIG_X86_64) +#include <asm/fpu/api.h> +#elif defined(CONFIG_KERNEL_MODE_NEON) +#include <asm/neon.h> +#endif + +typedef enum { + HAVE_NO_SIMD = 1 << 0, + HAVE_FULL_SIMD = 1 << 1, + HAVE_SIMD_IN_USE = 1 << 31 +} simd_context_t; + +#define DONT_USE_SIMD ((simd_context_t []){ HAVE_NO_SIMD }) + +static inline void simd_get(simd_context_t *ctx) +{ + *ctx = !IS_ENABLED(CONFIG_PREEMPT_RT) && !IS_ENABLED(CONFIG_PREEMPT_RT_BASE) && may_use_simd() ? HAVE_FULL_SIMD : HAVE_NO_SIMD; +} + +static inline void simd_put(simd_context_t *ctx) +{ +#if defined(CONFIG_X86_64) + if (*ctx & HAVE_SIMD_IN_USE) + kernel_fpu_end(); +#elif defined(CONFIG_KERNEL_MODE_NEON) + if (*ctx & HAVE_SIMD_IN_USE) + kernel_neon_end(); +#endif + *ctx = HAVE_NO_SIMD; +} + +static inline bool simd_relax(simd_context_t *ctx) +{ +#ifdef CONFIG_PREEMPT + if ((*ctx & HAVE_SIMD_IN_USE) && need_resched()) { + simd_put(ctx); + simd_get(ctx); + return true; + } +#endif + return false; +} + +static __must_check inline bool simd_use(simd_context_t *ctx) +{ + if (!(*ctx & HAVE_FULL_SIMD)) + return false; + if (*ctx & HAVE_SIMD_IN_USE) + return true; +#if defined(CONFIG_X86_64) + kernel_fpu_begin(); +#elif defined(CONFIG_KERNEL_MODE_NEON) + kernel_neon_begin(); +#endif + *ctx |= HAVE_SIMD_IN_USE; + return true; +} + +#endif /* _WG_SIMD_H */ diff --git a/net/wireguard/compat/siphash/include/linux/siphash.h b/net/wireguard/compat/siphash/include/linux/siphash.h new file mode 100644 index 000000000000..3b30b3c47778 --- /dev/null +++ b/net/wireguard/compat/siphash/include/linux/siphash.h @@ -0,0 +1,134 @@ +/* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * + * This file is provided under a dual BSD/GPLv2 license. + * + * SipHash: a fast short-input PRF + * https://131002.net/siphash/ + * + * This implementation is specifically for SipHash2-4 for a secure PRF + * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for + * hashtables. + */ + +#ifndef _WG_LINUX_SIPHASH_H +#define _WG_LINUX_SIPHASH_H + +#include <linux/types.h> +#include <linux/kernel.h> + +#define SIPHASH_ALIGNMENT __alignof__(u64) +typedef struct { + u64 key[2]; +} siphash_key_t; + +u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key); +u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key); + +u64 siphash_1u64(const u64 a, const siphash_key_t *key); +u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key); +u64 siphash_3u64(const u64 a, const u64 b, const u64 c, + const siphash_key_t *key); +u64 siphash_4u64(const u64 a, const u64 b, const u64 c, const u64 d, + const siphash_key_t *key); +u64 siphash_1u32(const u32 a, const siphash_key_t *key); +u64 siphash_3u32(const u32 a, const u32 b, const u32 c, + const siphash_key_t *key); + +static inline u64 siphash_2u32(const u32 a, const u32 b, + const siphash_key_t *key) +{ + return siphash_1u64((u64)b << 32 | a, key); +} +static inline u64 siphash_4u32(const u32 a, const u32 b, const u32 c, + const u32 d, const siphash_key_t *key) +{ + return siphash_2u64((u64)b << 32 | a, (u64)d << 32 | c, key); +} + + +static inline u64 ___siphash_aligned(const __le64 *data, size_t len, + const siphash_key_t *key) +{ + if (__builtin_constant_p(len) && len == 4) + return siphash_1u32(le32_to_cpup((const __le32 *)data), key); + if (__builtin_constant_p(len) && len == 8) + return siphash_1u64(le64_to_cpu(data[0]), key); + if (__builtin_constant_p(len) && len == 16) + return siphash_2u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), + key); + if (__builtin_constant_p(len) && len == 24) + return siphash_3u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), + le64_to_cpu(data[2]), key); + if (__builtin_constant_p(len) && len == 32) + return siphash_4u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), + le64_to_cpu(data[2]), le64_to_cpu(data[3]), + key); + return __siphash_aligned(data, len, key); +} + +/** + * siphash - compute 64-bit siphash PRF value + * @data: buffer to hash + * @size: size of @data + * @key: the siphash key + */ +static inline u64 siphash(const void *data, size_t len, + const siphash_key_t *key) +{ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || + !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT)) + return __siphash_unaligned(data, len, key); + return ___siphash_aligned(data, len, key); +} + +#define HSIPHASH_ALIGNMENT __alignof__(unsigned long) +typedef struct { + unsigned long key[2]; +} hsiphash_key_t; + +u32 __hsiphash_aligned(const void *data, size_t len, + const hsiphash_key_t *key); +u32 __hsiphash_unaligned(const void *data, size_t len, + const hsiphash_key_t *key); + +u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key); +u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key); +u32 hsiphash_3u32(const u32 a, const u32 b, const u32 c, + const hsiphash_key_t *key); +u32 hsiphash_4u32(const u32 a, const u32 b, const u32 c, const u32 d, + const hsiphash_key_t *key); + +static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len, + const hsiphash_key_t *key) +{ + if (__builtin_constant_p(len) && len == 4) + return hsiphash_1u32(le32_to_cpu(data[0]), key); + if (__builtin_constant_p(len) && len == 8) + return hsiphash_2u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), + key); + if (__builtin_constant_p(len) && len == 12) + return hsiphash_3u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), + le32_to_cpu(data[2]), key); + if (__builtin_constant_p(len) && len == 16) + return hsiphash_4u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), + le32_to_cpu(data[2]), le32_to_cpu(data[3]), + key); + return __hsiphash_aligned(data, len, key); +} + +/** + * hsiphash - compute 32-bit hsiphash PRF value + * @data: buffer to hash + * @size: size of @data + * @key: the hsiphash key + */ +static inline u32 hsiphash(const void *data, size_t len, + const hsiphash_key_t *key) +{ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || + !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT)) + return __hsiphash_unaligned(data, len, key); + return ___hsiphash_aligned(data, len, key); +} + +#endif /* _WG_LINUX_SIPHASH_H */ diff --git a/net/wireguard/compat/siphash/siphash.c b/net/wireguard/compat/siphash/siphash.c new file mode 100644 index 000000000000..7dc72cb4a710 --- /dev/null +++ b/net/wireguard/compat/siphash/siphash.c @@ -0,0 +1,539 @@ +/* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * + * This file is provided under a dual BSD/GPLv2 license. + * + * SipHash: a fast short-input PRF + * https://131002.net/siphash/ + * + * This implementation is specifically for SipHash2-4 for a secure PRF + * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for + * hashtables. + */ + +#include <linux/siphash.h> +#include <asm/unaligned.h> + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) +#ifdef __LITTLE_ENDIAN +#define bytemask_from_count(cnt) (~(~0ul << (cnt)*8)) +#else +#define bytemask_from_count(cnt) (~(~0ul >> (cnt)*8)) +#endif +#endif + +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 +#include <linux/dcache.h> +#include <asm/word-at-a-time.h> +#endif + +#define SIPROUND \ + do { \ + v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \ + v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \ + v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \ + v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \ + } while (0) + +#define PREAMBLE(len) \ + u64 v0 = 0x736f6d6570736575ULL; \ + u64 v1 = 0x646f72616e646f6dULL; \ + u64 v2 = 0x6c7967656e657261ULL; \ + u64 v3 = 0x7465646279746573ULL; \ + u64 b = ((u64)(len)) << 56; \ + v3 ^= key->key[1]; \ + v2 ^= key->key[0]; \ + v1 ^= key->key[1]; \ + v0 ^= key->key[0]; + +#define POSTAMBLE \ + v3 ^= b; \ + SIPROUND; \ + SIPROUND; \ + v0 ^= b; \ + v2 ^= 0xff; \ + SIPROUND; \ + SIPROUND; \ + SIPROUND; \ + SIPROUND; \ + return (v0 ^ v1) ^ (v2 ^ v3); + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + PREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = le64_to_cpup(data); + v3 ^= m; + SIPROUND; + SIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; fallthrough; + case 6: b |= ((u64)end[5]) << 40; fallthrough; + case 5: b |= ((u64)end[4]) << 32; fallthrough; + case 4: b |= le32_to_cpup(data); break; + case 3: b |= ((u64)end[2]) << 16; fallthrough; + case 2: b |= le16_to_cpup(data); break; + case 1: b |= end[0]; + } +#endif + POSTAMBLE +} +#endif + +u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + PREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = get_unaligned_le64(data); + v3 ^= m; + SIPROUND; + SIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; fallthrough; + case 6: b |= ((u64)end[5]) << 40; fallthrough; + case 5: b |= ((u64)end[4]) << 32; fallthrough; + case 4: b |= get_unaligned_le32(end); break; + case 3: b |= ((u64)end[2]) << 16; fallthrough; + case 2: b |= get_unaligned_le16(end); break; + case 1: b |= end[0]; + } +#endif + POSTAMBLE +} + +/** + * siphash_1u64 - compute 64-bit siphash PRF value of a u64 + * @first: first u64 + * @key: the siphash key + */ +u64 siphash_1u64(const u64 first, const siphash_key_t *key) +{ + PREAMBLE(8) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + POSTAMBLE +} + +/** + * siphash_2u64 - compute 64-bit siphash PRF value of 2 u64 + * @first: first u64 + * @second: second u64 + * @key: the siphash key + */ +u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key) +{ + PREAMBLE(16) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + v3 ^= second; + SIPROUND; + SIPROUND; + v0 ^= second; + POSTAMBLE +} + +/** + * siphash_3u64 - compute 64-bit siphash PRF value of 3 u64 + * @first: first u64 + * @second: second u64 + * @third: third u64 + * @key: the siphash key + */ +u64 siphash_3u64(const u64 first, const u64 second, const u64 third, + const siphash_key_t *key) +{ + PREAMBLE(24) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + v3 ^= second; + SIPROUND; + SIPROUND; + v0 ^= second; + v3 ^= third; + SIPROUND; + SIPROUND; + v0 ^= third; + POSTAMBLE +} + +/** + * siphash_4u64 - compute 64-bit siphash PRF value of 4 u64 + * @first: first u64 + * @second: second u64 + * @third: third u64 + * @forth: forth u64 + * @key: the siphash key + */ +u64 siphash_4u64(const u64 first, const u64 second, const u64 third, + const u64 forth, const siphash_key_t *key) +{ + PREAMBLE(32) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + v3 ^= second; + SIPROUND; + SIPROUND; + v0 ^= second; + v3 ^= third; + SIPROUND; + SIPROUND; + v0 ^= third; + v3 ^= forth; + SIPROUND; + SIPROUND; + v0 ^= forth; + POSTAMBLE +} + +u64 siphash_1u32(const u32 first, const siphash_key_t *key) +{ + PREAMBLE(4) + b |= first; + POSTAMBLE +} + +u64 siphash_3u32(const u32 first, const u32 second, const u32 third, + const siphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + PREAMBLE(12) + v3 ^= combined; + SIPROUND; + SIPROUND; + v0 ^= combined; + b |= third; + POSTAMBLE +} + +#if BITS_PER_LONG == 64 +/* Note that on 64-bit, we make HalfSipHash1-3 actually be SipHash1-3, for + * performance reasons. On 32-bit, below, we actually implement HalfSipHash1-3. + */ + +#define HSIPROUND SIPROUND +#define HPREAMBLE(len) PREAMBLE(len) +#define HPOSTAMBLE \ + v3 ^= b; \ + HSIPROUND; \ + v0 ^= b; \ + v2 ^= 0xff; \ + HSIPROUND; \ + HSIPROUND; \ + HSIPROUND; \ + return (v0 ^ v1) ^ (v2 ^ v3); + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = le64_to_cpup(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; fallthrough; + case 6: b |= ((u64)end[5]) << 40; fallthrough; + case 5: b |= ((u64)end[4]) << 32; fallthrough; + case 4: b |= le32_to_cpup(data); break; + case 3: b |= ((u64)end[2]) << 16; fallthrough; + case 2: b |= le16_to_cpup(data); break; + case 1: b |= end[0]; + } +#endif + HPOSTAMBLE +} +#endif + +u32 __hsiphash_unaligned(const void *data, size_t len, + const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = get_unaligned_le64(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; fallthrough; + case 6: b |= ((u64)end[5]) << 40; fallthrough; + case 5: b |= ((u64)end[4]) << 32; fallthrough; + case 4: b |= get_unaligned_le32(end); break; + case 3: b |= ((u64)end[2]) << 16; fallthrough; + case 2: b |= get_unaligned_le16(end); break; + case 1: b |= end[0]; + } +#endif + HPOSTAMBLE +} + +/** + * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32 + * @first: first u32 + * @key: the hsiphash key + */ +u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key) +{ + HPREAMBLE(4) + b |= first; + HPOSTAMBLE +} + +/** + * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32 + * @first: first u32 + * @second: second u32 + * @key: the hsiphash key + */ +u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + HPREAMBLE(8) + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + HPOSTAMBLE +} + +/** + * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @key: the hsiphash key + */ +u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third, + const hsiphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + HPREAMBLE(12) + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + b |= third; + HPOSTAMBLE +} + +/** + * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @forth: forth u32 + * @key: the hsiphash key + */ +u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third, + const u32 forth, const hsiphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + HPREAMBLE(16) + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + combined = (u64)forth << 32 | third; + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + HPOSTAMBLE +} +#else +#define HSIPROUND \ + do { \ + v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \ + v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \ + v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \ + v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \ + } while (0) + +#define HPREAMBLE(len) \ + u32 v0 = 0; \ + u32 v1 = 0; \ + u32 v2 = 0x6c796765U; \ + u32 v3 = 0x74656462U; \ + u32 b = ((u32)(len)) << 24; \ + v3 ^= key->key[1]; \ + v2 ^= key->key[0]; \ + v1 ^= key->key[1]; \ + v0 ^= key->key[0]; + +#define HPOSTAMBLE \ + v3 ^= b; \ + HSIPROUND; \ + v0 ^= b; \ + v2 ^= 0xff; \ + HSIPROUND; \ + HSIPROUND; \ + HSIPROUND; \ + return v1 ^ v3; + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u32)); + const u8 left = len & (sizeof(u32) - 1); + u32 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u32)) { + m = le32_to_cpup(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } + switch (left) { + case 3: b |= ((u32)end[2]) << 16; fallthrough; + case 2: b |= le16_to_cpup(data); break; + case 1: b |= end[0]; + } + HPOSTAMBLE +} +#endif + +u32 __hsiphash_unaligned(const void *data, size_t len, + const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u32)); + const u8 left = len & (sizeof(u32) - 1); + u32 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u32)) { + m = get_unaligned_le32(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } + switch (left) { + case 3: b |= ((u32)end[2]) << 16; fallthrough; + case 2: b |= get_unaligned_le16(end); break; + case 1: b |= end[0]; + } + HPOSTAMBLE +} + +/** + * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32 + * @first: first u32 + * @key: the hsiphash key + */ +u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key) +{ + HPREAMBLE(4) + v3 ^= first; + HSIPROUND; + v0 ^= first; + HPOSTAMBLE +} + +/** + * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32 + * @first: first u32 + * @second: second u32 + * @key: the hsiphash key + */ +u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key) +{ + HPREAMBLE(8) + v3 ^= first; + HSIPROUND; + v0 ^= first; + v3 ^= second; + HSIPROUND; + v0 ^= second; + HPOSTAMBLE +} + +/** + * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @key: the hsiphash key + */ +u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third, + const hsiphash_key_t *key) +{ + HPREAMBLE(12) + v3 ^= first; + HSIPROUND; + v0 ^= first; + v3 ^= second; + HSIPROUND; + v0 ^= second; + v3 ^= third; + HSIPROUND; + v0 ^= third; + HPOSTAMBLE +} + +/** + * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @forth: forth u32 + * @key: the hsiphash key + */ +u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third, + const u32 forth, const hsiphash_key_t *key) +{ + HPREAMBLE(16) + v3 ^= first; + HSIPROUND; + v0 ^= first; + v3 ^= second; + HSIPROUND; + v0 ^= second; + v3 ^= third; + HSIPROUND; + v0 ^= third; + v3 ^= forth; + HSIPROUND; + v0 ^= forth; + HPOSTAMBLE +} +#endif diff --git a/net/wireguard/compat/skb_array/include/linux/skb_array.h b/net/wireguard/compat/skb_array/include/linux/skb_array.h new file mode 100644 index 000000000000..c91fedcdbfc6 --- /dev/null +++ b/net/wireguard/compat/skb_array/include/linux/skb_array.h @@ -0,0 +1,11 @@ +#ifndef _WG_SKB_ARRAY_H +#define _WG_SKB_ARRAY_H + +#include <linux/skbuff.h> + +static void __skb_array_destroy_skb(void *ptr) +{ + kfree_skb(ptr); +} + +#endif diff --git a/net/wireguard/compat/udp_tunnel/include/net/udp_tunnel.h b/net/wireguard/compat/udp_tunnel/include/net/udp_tunnel.h new file mode 100644 index 000000000000..8999527d6952 --- /dev/null +++ b/net/wireguard/compat/udp_tunnel/include/net/udp_tunnel.h @@ -0,0 +1,94 @@ +#ifndef _WG_NET_UDP_TUNNEL_H +#define _WG_NET_UDP_TUNNEL_H + +#include <net/ip_tunnels.h> +#include <net/udp.h> + +#if IS_ENABLED(CONFIG_IPV6) +#include <net/ipv6.h> +#include <net/addrconf.h> +#endif + +struct udp_port_cfg { + u8 family; + + /* Used only for kernel-created sockets */ + union { + struct in_addr local_ip; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr local_ip6; +#endif + }; + + union { + struct in_addr peer_ip; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr peer_ip6; +#endif + }; + + __be16 local_udp_port; + __be16 peer_udp_port; + unsigned int use_udp_checksums:1, + use_udp6_tx_checksums:1, + use_udp6_rx_checksums:1, + ipv6_v6only:1; +}; + +int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp); + +#if IS_ENABLED(CONFIG_IPV6) +int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp); +#else +static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp) +{ + return 0; +} +#endif + +static inline int udp_sock_create(struct net *net, + struct udp_port_cfg *cfg, + struct socket **sockp) +{ + if (cfg->family == AF_INET) + return udp_sock_create4(net, cfg, sockp); + + if (cfg->family == AF_INET6) + return udp_sock_create6(net, cfg, sockp); + + return -EPFNOSUPPORT; +} + +typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb); + +struct udp_tunnel_sock_cfg { + void *sk_user_data; + __u8 encap_type; + udp_tunnel_encap_rcv_t encap_rcv; +}; + +/* Setup the given (UDP) sock to receive UDP encapsulated packets */ +void setup_udp_tunnel_sock(struct net *net, struct socket *sock, + struct udp_tunnel_sock_cfg *sock_cfg); + +/* Transmit the skb using UDP encapsulation. */ +void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, + __be32 src, __be32 dst, __u8 tos, __u8 ttl, + __be16 df, __be16 src_port, __be16 dst_port, + bool xnet, bool nocheck); + +#if IS_ENABLED(CONFIG_IPV6) +int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, + struct sk_buff *skb, + struct net_device *dev, struct in6_addr *saddr, + struct in6_addr *daddr, + __u8 prio, __u8 ttl, __be32 label, + __be16 src_port, __be16 dst_port, bool nocheck); +#endif + +void udp_tunnel_sock_release(struct socket *sock); + +#endif /* _WG_NET_UDP_TUNNEL_H */ diff --git a/net/wireguard/compat/udp_tunnel/udp_tunnel.c b/net/wireguard/compat/udp_tunnel/udp_tunnel.c new file mode 100644 index 000000000000..d287b917be84 --- /dev/null +++ b/net/wireguard/compat/udp_tunnel/udp_tunnel.c @@ -0,0 +1,396 @@ +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/socket.h> +#include <linux/udp.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <net/net_namespace.h> +#include <net/inet_common.h> +#include <net/udp.h> +#include <net/udp_tunnel.h> + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) +#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) +#define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk))) +#define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr) +#endif + +/* This is global so, uh, only one real call site... This is the kind of horrific hack you'd expect to see in compat code. */ +static udp_tunnel_encap_rcv_t encap_rcv = NULL; +static void __compat_sk_data_ready(struct sock *sk +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) + ,int unused_vulnerable_length_param +#endif + ) +{ + struct sk_buff *skb; + while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { + skb_orphan(skb); + sk_mem_reclaim(sk); + encap_rcv(sk, skb); + } +} + +int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp) +{ + int err; + struct socket *sock = NULL; + struct sockaddr_in udp_addr; + + err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock); + if (err < 0) + goto error; + sk_change_net(sock->sk, net); + + udp_addr.sin_family = AF_INET; + udp_addr.sin_addr = cfg->local_ip; + udp_addr.sin_port = cfg->local_udp_port; + err = kernel_bind(sock, (struct sockaddr *)&udp_addr, + sizeof(udp_addr)); + if (err < 0) + goto error; + + if (cfg->peer_udp_port) { + udp_addr.sin_family = AF_INET; + udp_addr.sin_addr = cfg->peer_ip; + udp_addr.sin_port = cfg->peer_udp_port; + err = kernel_connect(sock, (struct sockaddr *)&udp_addr, + sizeof(udp_addr), 0); + if (err < 0) + goto error; + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) + sock->sk->sk_no_check = !cfg->use_udp_checksums; +#else + sock->sk->sk_no_check_tx = !cfg->use_udp_checksums; +#endif + + *sockp = sock; + return 0; + +error: + if (sock) { + kernel_sock_shutdown(sock, SHUT_RDWR); + sk_release_kernel(sock->sk); + } + *sockp = NULL; + return err; +} + +void setup_udp_tunnel_sock(struct net *net, struct socket *sock, + struct udp_tunnel_sock_cfg *cfg) +{ + inet_sk(sock->sk)->mc_loop = 0; + encap_rcv = cfg->encap_rcv; + rcu_assign_sk_user_data(sock->sk, cfg->sk_user_data); + /* We force the cast in this awful way, due to various Android kernels + * backporting things stupidly. */ + *(void **)&sock->sk->sk_data_ready = (void *)__compat_sk_data_ready; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) +static inline __sum16 udp_v4_check(int len, __be32 saddr, + __be32 daddr, __wsum base) +{ + return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base); +} + +static void udp_set_csum(bool nocheck, struct sk_buff *skb, + __be32 saddr, __be32 daddr, int len) +{ + struct udphdr *uh = udp_hdr(skb); + + if (nocheck) + uh->check = 0; + else if (skb_is_gso(skb)) + uh->check = ~udp_v4_check(len, saddr, daddr, 0); + else if (skb_dst(skb) && skb_dst(skb)->dev && + (skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) { + + BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL); + + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum_start = skb_transport_header(skb) - skb->head; + skb->csum_offset = offsetof(struct udphdr, check); + uh->check = ~udp_v4_check(len, saddr, daddr, 0); + } else { + __wsum csum; + + BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL); + + uh->check = 0; + csum = skb_checksum(skb, 0, len, 0); + uh->check = udp_v4_check(len, saddr, daddr, csum); + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + } +} + +#endif + +static void __compat_fake_destructor(struct sk_buff *skb) +{ +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) +static void __compat_iptunnel_xmit(struct rtable *rt, struct sk_buff *skb, + __be32 src, __be32 dst, __u8 proto, + __u8 tos, __u8 ttl, __be16 df, bool xnet) +{ + struct iphdr *iph; + struct pcpu_tstats *tstats = this_cpu_ptr(skb->dev->tstats); + + skb_scrub_packet(skb, xnet); + + skb->rxhash = 0; + skb_dst_set(skb, &rt->dst); + memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + + /* Push down and install the IP header. */ + skb_push(skb, sizeof(struct iphdr)); + skb_reset_network_header(skb); + + iph = ip_hdr(skb); + + iph->version = 4; + iph->ihl = sizeof(struct iphdr) >> 2; + iph->frag_off = df; + iph->protocol = proto; + iph->tos = tos; + iph->daddr = dst; + iph->saddr = src; + iph->ttl = ttl; +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 53) + __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1); +#else + __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1); +#endif + + iptunnel_xmit(skb, skb->dev); + u64_stats_update_begin(&tstats->syncp); + tstats->tx_bytes -= 8; + u64_stats_update_end(&tstats->syncp); +} +#define iptunnel_xmit __compat_iptunnel_xmit +#endif + +void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, + __be32 src, __be32 dst, __u8 tos, __u8 ttl, + __be16 df, __be16 src_port, __be16 dst_port, + bool xnet, bool nocheck) +{ + struct udphdr *uh; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) + struct net_device *dev = skb->dev; + int ret; +#endif + + __skb_push(skb, sizeof(*uh)); + skb_reset_transport_header(skb); + uh = udp_hdr(skb); + + uh->dest = dst_port; + uh->source = src_port; + uh->len = htons(skb->len); + + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); + + udp_set_csum(nocheck, skb, src, dst, skb->len); + + if (!skb->sk) + skb->sk = sk; + if (!skb->destructor) + skb->destructor = __compat_fake_destructor; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) + ret = +#endif + iptunnel_xmit( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) + sk, +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) + dev_net(dev), +#endif + rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) + , xnet +#endif + ); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) + if (ret) + iptunnel_xmit_stats(ret - 8, &dev->stats, dev->tstats); +#endif +} + +void udp_tunnel_sock_release(struct socket *sock) +{ + rcu_assign_sk_user_data(sock->sk, NULL); + kernel_sock_shutdown(sock, SHUT_RDWR); + sk_release_kernel(sock->sk); +} + +#if IS_ENABLED(CONFIG_IPV6) +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/socket.h> +#include <linux/udp.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/in6.h> +#include <net/udp.h> +#include <net/udp_tunnel.h> +#include <net/net_namespace.h> +#include <net/netns/generic.h> +#include <net/ip6_tunnel.h> +#include <net/ip6_checksum.h> + +int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp) +{ + struct sockaddr_in6 udp6_addr; + int err; + struct socket *sock = NULL; + + err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock); + if (err < 0) + goto error; + sk_change_net(sock->sk, net); + + if (cfg->ipv6_v6only) { + int val = 1; + + err = kernel_setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, + (char *) &val, sizeof(val)); + if (err < 0) + goto error; + } + + udp6_addr.sin6_family = AF_INET6; + memcpy(&udp6_addr.sin6_addr, &cfg->local_ip6, + sizeof(udp6_addr.sin6_addr)); + udp6_addr.sin6_port = cfg->local_udp_port; + err = kernel_bind(sock, (struct sockaddr *)&udp6_addr, + sizeof(udp6_addr)); + if (err < 0) + goto error; + + if (cfg->peer_udp_port) { + udp6_addr.sin6_family = AF_INET6; + memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6, + sizeof(udp6_addr.sin6_addr)); + udp6_addr.sin6_port = cfg->peer_udp_port; + err = kernel_connect(sock, + (struct sockaddr *)&udp6_addr, + sizeof(udp6_addr), 0); + } + if (err < 0) + goto error; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) + sock->sk->sk_no_check = !cfg->use_udp_checksums; +#else + udp_set_no_check6_tx(sock->sk, !cfg->use_udp6_tx_checksums); + udp_set_no_check6_rx(sock->sk, !cfg->use_udp6_rx_checksums); +#endif + + *sockp = sock; + return 0; + +error: + if (sock) { + kernel_sock_shutdown(sock, SHUT_RDWR); + sk_release_kernel(sock->sk); + } + *sockp = NULL; + return err; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) +static inline __sum16 udp_v6_check(int len, + const struct in6_addr *saddr, + const struct in6_addr *daddr, + __wsum base) +{ + return csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, base); +} +static void udp6_set_csum(bool nocheck, struct sk_buff *skb, + const struct in6_addr *saddr, + const struct in6_addr *daddr, int len) +{ + struct udphdr *uh = udp_hdr(skb); + + if (nocheck) + uh->check = 0; + else if (skb_is_gso(skb)) + uh->check = ~udp_v6_check(len, saddr, daddr, 0); + else if (skb_dst(skb) && skb_dst(skb)->dev && + (skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) { + + BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL); + + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum_start = skb_transport_header(skb) - skb->head; + skb->csum_offset = offsetof(struct udphdr, check); + uh->check = ~udp_v6_check(len, saddr, daddr, 0); + } else { + __wsum csum; + + BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL); + + uh->check = 0; + csum = skb_checksum(skb, 0, len, 0); + uh->check = udp_v6_check(len, saddr, daddr, csum); + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + } +} +#endif + +int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, + struct sk_buff *skb, + struct net_device *dev, struct in6_addr *saddr, + struct in6_addr *daddr, + __u8 prio, __u8 ttl, __be32 label, + __be16 src_port, __be16 dst_port, bool nocheck) +{ + struct udphdr *uh; + struct ipv6hdr *ip6h; + + __skb_push(skb, sizeof(*uh)); + skb_reset_transport_header(skb); + uh = udp_hdr(skb); + + uh->dest = dst_port; + uh->source = src_port; + + uh->len = htons(skb->len); + + skb_dst_set(skb, dst); + + udp6_set_csum(nocheck, skb, saddr, daddr, skb->len); + + __skb_push(skb, sizeof(*ip6h)); + skb_reset_network_header(skb); + ip6h = ipv6_hdr(skb); + ip6_flow_hdr(ip6h, prio, label); + ip6h->payload_len = htons(skb->len); + ip6h->nexthdr = IPPROTO_UDP; + ip6h->hop_limit = ttl; + ip6h->daddr = *daddr; + ip6h->saddr = *saddr; + + if (!skb->sk) + skb->sk = sk; + if (!skb->destructor) + skb->destructor = __compat_fake_destructor; + + ip6tunnel_xmit(skb, dev); + return 0; +} +#endif diff --git a/net/wireguard/compat/udp_tunnel/udp_tunnel_partial_compat.h b/net/wireguard/compat/udp_tunnel/udp_tunnel_partial_compat.h new file mode 100644 index 000000000000..0605896e902f --- /dev/null +++ b/net/wireguard/compat/udp_tunnel/udp_tunnel_partial_compat.h @@ -0,0 +1,226 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0) +#define udp_sock_create4 udp_sock_create +#define udp_sock_create6 udp_sock_create +#include <linux/socket.h> +#include <linux/if.h> +#include <linux/in.h> +#include <net/ip_tunnels.h> +#include <net/udp.h> +#include <net/inet_common.h> +#if IS_ENABLED(CONFIG_IPV6) +#include <linux/in6.h> +#include <net/ipv6.h> +#include <net/addrconf.h> +#include <net/ip6_checksum.h> +#include <net/ip6_tunnel.h> +#endif +static inline void __compat_fake_destructor(struct sk_buff *skb) +{ +} +typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb); +struct udp_tunnel_sock_cfg { + void *sk_user_data; + __u8 encap_type; + udp_tunnel_encap_rcv_t encap_rcv; +}; +/* This is global so, uh, only one real call site... This is the kind of horrific hack you'd expect to see in compat code. */ +static udp_tunnel_encap_rcv_t encap_rcv = NULL; +static void __compat_sk_data_ready(struct sock *sk) +{ + struct sk_buff *skb; + while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { + skb_orphan(skb); + sk_mem_reclaim(sk); + encap_rcv(sk, skb); + } +} +static inline void setup_udp_tunnel_sock(struct net *net, struct socket *sock, + struct udp_tunnel_sock_cfg *cfg) +{ + struct sock *sk = sock->sk; + inet_sk(sk)->mc_loop = 0; + encap_rcv = cfg->encap_rcv; + rcu_assign_sk_user_data(sk, cfg->sk_user_data); + sk->sk_data_ready = __compat_sk_data_ready; +} +static inline void udp_tunnel_sock_release(struct socket *sock) +{ + rcu_assign_sk_user_data(sock->sk, NULL); + kernel_sock_shutdown(sock, SHUT_RDWR); + sk_release_kernel(sock->sk); +} +static inline int udp_tunnel_xmit_skb(struct socket *sock, struct rtable *rt, + struct sk_buff *skb, __be32 src, __be32 dst, + __u8 tos, __u8 ttl, __be16 df, __be16 src_port, + __be16 dst_port, bool xnet) +{ + struct udphdr *uh; + __skb_push(skb, sizeof(*uh)); + skb_reset_transport_header(skb); + uh = udp_hdr(skb); + uh->dest = dst_port; + uh->source = src_port; + uh->len = htons(skb->len); + udp_set_csum(sock->sk->sk_no_check_tx, skb, src, dst, skb->len); + return iptunnel_xmit(sock->sk, rt, skb, src, dst, IPPROTO_UDP, + tos, ttl, df, xnet); +} +#if IS_ENABLED(CONFIG_IPV6) +static inline int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst, + struct sk_buff *skb, struct net_device *dev, + struct in6_addr *saddr, struct in6_addr *daddr, + __u8 prio, __u8 ttl, __be16 src_port, + __be16 dst_port) +{ + struct udphdr *uh; + struct ipv6hdr *ip6h; + struct sock *sk = sock->sk; + __skb_push(skb, sizeof(*uh)); + skb_reset_transport_header(skb); + uh = udp_hdr(skb); + uh->dest = dst_port; + uh->source = src_port; + uh->len = htons(skb->len); + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); + IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED + | IPSKB_REROUTED); + skb_dst_set(skb, dst); + udp6_set_csum(udp_get_no_check6_tx(sk), skb, &inet6_sk(sk)->saddr, + &sk->sk_v6_daddr, skb->len); + __skb_push(skb, sizeof(*ip6h)); + skb_reset_network_header(skb); + ip6h = ipv6_hdr(skb); + ip6_flow_hdr(ip6h, prio, htonl(0)); + ip6h->payload_len = htons(skb->len); + ip6h->nexthdr = IPPROTO_UDP; + ip6h->hop_limit = ttl; + ip6h->daddr = *daddr; + ip6h->saddr = *saddr; + ip6tunnel_xmit(skb, dev); + return 0; +} +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) +#include <linux/in.h> +#include <linux/in6.h> +#include <linux/udp.h> +#include <linux/skbuff.h> +#include <linux/if.h> +#include <net/udp_tunnel.h> +#define udp_tunnel_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l) do { struct net_device *dev__ = (c)->dev; int ret__; ret__ = udp_tunnel_xmit_skb((b)->sk_socket, a, c, d, e, f, g, h, i, j, k); if (ret__) iptunnel_xmit_stats(ret__ - 8, &dev__->stats, dev__->tstats); } while (0) +#if IS_ENABLED(CONFIG_IPV6) +#define udp_tunnel6_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l) udp_tunnel6_xmit_skb((b)->sk_socket, a, c, d, e, f, g, h, j, k); +#endif +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) +#include <linux/if.h> +#include <net/udp_tunnel.h> +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) +static inline void __compat_fake_destructor(struct sk_buff *skb) +{ +} +#endif +#define udp_tunnel_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l) do { struct net_device *dev__ = (c)->dev; int ret__; if (!(c)->destructor) (c)->destructor = __compat_fake_destructor; if (!(c)->sk) (c)->sk = (b); ret__ = udp_tunnel_xmit_skb(a, c, d, e, f, g, h, i, j, k, l); if (ret__) iptunnel_xmit_stats(ret__ - 8, &dev__->stats, dev__->tstats); } while (0) +#if IS_ENABLED(CONFIG_IPV6) +#define udp_tunnel6_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l) do { if (!(c)->destructor) (c)->destructor = __compat_fake_destructor; if (!(c)->sk) (c)->sk = (b); udp_tunnel6_xmit_skb(a, c, d, e, f, g, h, j, k, l); } while(0) +#endif +#else + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) +#include <linux/if.h> +#include <net/udp_tunnel.h> +#define udp_tunnel_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l) do { struct net_device *dev__ = (c)->dev; int ret__ = udp_tunnel_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l); if (ret__) iptunnel_xmit_stats(ret__ - 8, &dev__->stats, dev__->tstats); } while (0) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0) +#include <linux/if.h> +#include <net/udp_tunnel.h> +#define udp_tunnel_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l) do { struct net_device *dev__ = (c)->dev; int ret__ = udp_tunnel_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l); iptunnel_xmit_stats(ret__, &dev__->stats, dev__->tstats); } while (0) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && IS_ENABLED(CONFIG_IPV6) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) +#include <linux/if.h> +#include <net/udp_tunnel.h> +#define udp_tunnel6_xmit_skb(a, b, c, d, e, f, g, h, i, j, k, l) udp_tunnel6_xmit_skb(a, b, c, d, e, f, g, h, j, k, l) +#endif + +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) +#include <linux/skbuff.h> +#include <linux/if.h> +#include <net/udp_tunnel.h> +struct __compat_udp_port_cfg { + u8 family; + union { + struct in_addr local_ip; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr local_ip6; +#endif + }; + union { + struct in_addr peer_ip; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr peer_ip6; +#endif + }; + __be16 local_udp_port; + __be16 peer_udp_port; + unsigned int use_udp_checksums:1, use_udp6_tx_checksums:1, use_udp6_rx_checksums:1, ipv6_v6only:1; +}; +static inline int __maybe_unused __compat_udp_sock_create(struct net *net, struct __compat_udp_port_cfg *cfg, struct socket **sockp) +{ + struct udp_port_cfg old_cfg = { + .family = cfg->family, + .local_ip = cfg->local_ip, +#if IS_ENABLED(CONFIG_IPV6) + .local_ip6 = cfg->local_ip6, +#endif + .peer_ip = cfg->peer_ip, +#if IS_ENABLED(CONFIG_IPV6) + .peer_ip6 = cfg->peer_ip6, +#endif + .local_udp_port = cfg->local_udp_port, + .peer_udp_port = cfg->peer_udp_port, + .use_udp_checksums = cfg->use_udp_checksums, + .use_udp6_tx_checksums = cfg->use_udp6_tx_checksums, + .use_udp6_rx_checksums = cfg->use_udp6_rx_checksums + }; + if (cfg->family == AF_INET) + return udp_sock_create4(net, &old_cfg, sockp); + +#if IS_ENABLED(CONFIG_IPV6) + if (cfg->family == AF_INET6) { + int ret; + int old_bindv6only; + struct net *nobns; + + if (cfg->ipv6_v6only) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) + nobns = &init_net; +#else + nobns = net; +#endif + /* Since udp_port_cfg only learned of ipv6_v6only in 4.3, we do this horrible + * hack here and set the sysctl variable temporarily to something that will + * set the right option for us in sock_create. It's super racey! */ + old_bindv6only = nobns->ipv6.sysctl.bindv6only; + nobns->ipv6.sysctl.bindv6only = 1; + } + ret = udp_sock_create6(net, &old_cfg, sockp); + if (cfg->ipv6_v6only) + nobns->ipv6.sysctl.bindv6only = old_bindv6only; + return ret; + } +#endif + return -EPFNOSUPPORT; +} +#define udp_port_cfg __compat_udp_port_cfg +#define udp_sock_create(a, b, c) __compat_udp_sock_create(a, b, c) +#endif diff --git a/net/wireguard/compat/version/linux/version.h b/net/wireguard/compat/version/linux/version.h new file mode 100644 index 000000000000..90988b37aed6 --- /dev/null +++ b/net/wireguard/compat/version/linux/version.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2021 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include_next <linux/version.h> +#undef KERNEL_VERSION +#define KERNEL_VERSION(a, b, c) (((a) << 24) + ((b) << 16) + (c)) +#undef LINUX_VERSION_CODE +#define LINUX_VERSION_CODE KERNEL_VERSION(COMPAT_VERSION, COMPAT_PATCHLEVEL, COMPAT_SUBLEVEL) diff --git a/net/wireguard/cookie.c b/net/wireguard/cookie.c new file mode 100644 index 000000000000..8b7d1fe0cdf4 --- /dev/null +++ b/net/wireguard/cookie.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include "cookie.h" +#include "peer.h" +#include "device.h" +#include "messages.h" +#include "ratelimiter.h" +#include "timers.h" + +#include <zinc/blake2s.h> +#include <zinc/chacha20poly1305.h> + +#include <net/ipv6.h> +#include <crypto/algapi.h> + +void wg_cookie_checker_init(struct cookie_checker *checker, + struct wg_device *wg) +{ + init_rwsem(&checker->secret_lock); + checker->secret_birthdate = ktime_get_coarse_boottime_ns(); + get_random_bytes(checker->secret, NOISE_HASH_LEN); + checker->device = wg; +} + +enum { COOKIE_KEY_LABEL_LEN = 8 }; +static const u8 mac1_key_label[COOKIE_KEY_LABEL_LEN] = "mac1----"; +static const u8 cookie_key_label[COOKIE_KEY_LABEL_LEN] = "cookie--"; + +static void precompute_key(u8 key[NOISE_SYMMETRIC_KEY_LEN], + const u8 pubkey[NOISE_PUBLIC_KEY_LEN], + const u8 label[COOKIE_KEY_LABEL_LEN]) +{ + struct blake2s_state blake; + + blake2s_init(&blake, NOISE_SYMMETRIC_KEY_LEN); + blake2s_update(&blake, label, COOKIE_KEY_LABEL_LEN); + blake2s_update(&blake, pubkey, NOISE_PUBLIC_KEY_LEN); + blake2s_final(&blake, key); +} + +/* Must hold peer->handshake.static_identity->lock */ +void wg_cookie_checker_precompute_device_keys(struct cookie_checker *checker) +{ + if (likely(checker->device->static_identity.has_identity)) { + precompute_key(checker->cookie_encryption_key, + checker->device->static_identity.static_public, + cookie_key_label); + precompute_key(checker->message_mac1_key, + checker->device->static_identity.static_public, + mac1_key_label); + } else { + memset(checker->cookie_encryption_key, 0, + NOISE_SYMMETRIC_KEY_LEN); + memset(checker->message_mac1_key, 0, NOISE_SYMMETRIC_KEY_LEN); + } +} + +void wg_cookie_checker_precompute_peer_keys(struct wg_peer *peer) +{ + precompute_key(peer->latest_cookie.cookie_decryption_key, + peer->handshake.remote_static, cookie_key_label); + precompute_key(peer->latest_cookie.message_mac1_key, + peer->handshake.remote_static, mac1_key_label); +} + +void wg_cookie_init(struct cookie *cookie) +{ + memset(cookie, 0, sizeof(*cookie)); + init_rwsem(&cookie->lock); +} + +static void compute_mac1(u8 mac1[COOKIE_LEN], const void *message, size_t len, + const u8 key[NOISE_SYMMETRIC_KEY_LEN]) +{ + len = len - sizeof(struct message_macs) + + offsetof(struct message_macs, mac1); + blake2s(mac1, message, key, COOKIE_LEN, len, NOISE_SYMMETRIC_KEY_LEN); +} + +static void compute_mac2(u8 mac2[COOKIE_LEN], const void *message, size_t len, + const u8 cookie[COOKIE_LEN]) +{ + len = len - sizeof(struct message_macs) + + offsetof(struct message_macs, mac2); + blake2s(mac2, message, cookie, COOKIE_LEN, len, COOKIE_LEN); +} + +static void make_cookie(u8 cookie[COOKIE_LEN], struct sk_buff *skb, + struct cookie_checker *checker) +{ + struct blake2s_state state; + + if (wg_birthdate_has_expired(checker->secret_birthdate, + COOKIE_SECRET_MAX_AGE)) { + down_write(&checker->secret_lock); + checker->secret_birthdate = ktime_get_coarse_boottime_ns(); + get_random_bytes(checker->secret, NOISE_HASH_LEN); + up_write(&checker->secret_lock); + } + + down_read(&checker->secret_lock); + + blake2s_init_key(&state, COOKIE_LEN, checker->secret, NOISE_HASH_LEN); + if (skb->protocol == htons(ETH_P_IP)) + blake2s_update(&state, (u8 *)&ip_hdr(skb)->saddr, + sizeof(struct in_addr)); + else if (skb->protocol == htons(ETH_P_IPV6)) + blake2s_update(&state, (u8 *)&ipv6_hdr(skb)->saddr, + sizeof(struct in6_addr)); + blake2s_update(&state, (u8 *)&udp_hdr(skb)->source, sizeof(__be16)); + blake2s_final(&state, cookie); + + up_read(&checker->secret_lock); +} + +enum cookie_mac_state wg_cookie_validate_packet(struct cookie_checker *checker, + struct sk_buff *skb, + bool check_cookie) +{ + struct message_macs *macs = (struct message_macs *) + (skb->data + skb->len - sizeof(*macs)); + enum cookie_mac_state ret; + u8 computed_mac[COOKIE_LEN]; + u8 cookie[COOKIE_LEN]; + + ret = INVALID_MAC; + compute_mac1(computed_mac, skb->data, skb->len, + checker->message_mac1_key); + if (crypto_memneq(computed_mac, macs->mac1, COOKIE_LEN)) + goto out; + + ret = VALID_MAC_BUT_NO_COOKIE; + + if (!check_cookie) + goto out; + + make_cookie(cookie, skb, checker); + + compute_mac2(computed_mac, skb->data, skb->len, cookie); + if (crypto_memneq(computed_mac, macs->mac2, COOKIE_LEN)) + goto out; + + ret = VALID_MAC_WITH_COOKIE_BUT_RATELIMITED; + if (!wg_ratelimiter_allow(skb, dev_net(checker->device->dev))) + goto out; + + ret = VALID_MAC_WITH_COOKIE; + +out: + return ret; +} + +void wg_cookie_add_mac_to_packet(void *message, size_t len, + struct wg_peer *peer) +{ + struct message_macs *macs = (struct message_macs *) + ((u8 *)message + len - sizeof(*macs)); + + down_write(&peer->latest_cookie.lock); + compute_mac1(macs->mac1, message, len, + peer->latest_cookie.message_mac1_key); + memcpy(peer->latest_cookie.last_mac1_sent, macs->mac1, COOKIE_LEN); + peer->latest_cookie.have_sent_mac1 = true; + up_write(&peer->latest_cookie.lock); + + down_read(&peer->latest_cookie.lock); + if (peer->latest_cookie.is_valid && + !wg_birthdate_has_expired(peer->latest_cookie.birthdate, + COOKIE_SECRET_MAX_AGE - COOKIE_SECRET_LATENCY)) + compute_mac2(macs->mac2, message, len, + peer->latest_cookie.cookie); + else + memset(macs->mac2, 0, COOKIE_LEN); + up_read(&peer->latest_cookie.lock); +} + +void wg_cookie_message_create(struct message_handshake_cookie *dst, + struct sk_buff *skb, __le32 index, + struct cookie_checker *checker) +{ + struct message_macs *macs = (struct message_macs *) + ((u8 *)skb->data + skb->len - sizeof(*macs)); + u8 cookie[COOKIE_LEN]; + + dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE); + dst->receiver_index = index; + get_random_bytes_wait(dst->nonce, COOKIE_NONCE_LEN); + + make_cookie(cookie, skb, checker); + xchacha20poly1305_encrypt(dst->encrypted_cookie, cookie, COOKIE_LEN, + macs->mac1, COOKIE_LEN, dst->nonce, + checker->cookie_encryption_key); +} + +void wg_cookie_message_consume(struct message_handshake_cookie *src, + struct wg_device *wg) +{ + struct wg_peer *peer = NULL; + u8 cookie[COOKIE_LEN]; + bool ret; + + if (unlikely(!wg_index_hashtable_lookup(wg->index_hashtable, + INDEX_HASHTABLE_HANDSHAKE | + INDEX_HASHTABLE_KEYPAIR, + src->receiver_index, &peer))) + return; + + down_read(&peer->latest_cookie.lock); + if (unlikely(!peer->latest_cookie.have_sent_mac1)) { + up_read(&peer->latest_cookie.lock); + goto out; + } + ret = xchacha20poly1305_decrypt( + cookie, src->encrypted_cookie, sizeof(src->encrypted_cookie), + peer->latest_cookie.last_mac1_sent, COOKIE_LEN, src->nonce, + peer->latest_cookie.cookie_decryption_key); + up_read(&peer->latest_cookie.lock); + + if (ret) { + down_write(&peer->latest_cookie.lock); + memcpy(peer->latest_cookie.cookie, cookie, COOKIE_LEN); + peer->latest_cookie.birthdate = ktime_get_coarse_boottime_ns(); + peer->latest_cookie.is_valid = true; + peer->latest_cookie.have_sent_mac1 = false; + up_write(&peer->latest_cookie.lock); + } else { + net_dbg_ratelimited("%s: Could not decrypt invalid cookie response\n", + wg->dev->name); + } + +out: + wg_peer_put(peer); +} diff --git a/net/wireguard/cookie.h b/net/wireguard/cookie.h new file mode 100644 index 000000000000..c4bd61ca03f2 --- /dev/null +++ b/net/wireguard/cookie.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _WG_COOKIE_H +#define _WG_COOKIE_H + +#include "messages.h" +#include <linux/rwsem.h> + +struct wg_peer; + +struct cookie_checker { + u8 secret[NOISE_HASH_LEN]; + u8 cookie_encryption_key[NOISE_SYMMETRIC_KEY_LEN]; + u8 message_mac1_key[NOISE_SYMMETRIC_KEY_LEN]; + u64 secret_birthdate; + struct rw_semaphore secret_lock; + struct wg_device *device; +}; + +struct cookie { + u64 birthdate; + bool is_valid; + u8 cookie[COOKIE_LEN]; + bool have_sent_mac1; + u8 last_mac1_sent[COOKIE_LEN]; + u8 cookie_decryption_key[NOISE_SYMMETRIC_KEY_LEN]; + u8 message_mac1_key[NOISE_SYMMETRIC_KEY_LEN]; + struct rw_semaphore lock; +}; + +enum cookie_mac_state { + INVALID_MAC, + VALID_MAC_BUT_NO_COOKIE, + VALID_MAC_WITH_COOKIE_BUT_RATELIMITED, + VALID_MAC_WITH_COOKIE +}; + +void wg_cookie_checker_init(struct cookie_checker *checker, + struct wg_device *wg); +void wg_cookie_checker_precompute_device_keys(struct cookie_checker *checker); +void wg_cookie_checker_precompute_peer_keys(struct wg_peer *peer); +void wg_cookie_init(struct cookie *cookie); + +enum cookie_mac_state wg_cookie_validate_packet(struct cookie_checker *checker, + struct sk_buff *skb, + bool check_cookie); +void wg_cookie_add_mac_to_packet(void *message, size_t len, + struct wg_peer *peer); + +void wg_cookie_message_create(struct message_handshake_cookie *src, + struct sk_buff *skb, __le32 index, + struct cookie_checker *checker); +void wg_cookie_message_consume(struct message_handshake_cookie *src, + struct wg_device *wg); + +#endif /* _WG_COOKIE_H */ diff --git a/net/wireguard/crypto/Makefile.include b/net/wireguard/crypto/Makefile.include new file mode 100644 index 000000000000..f2a312e96d88 --- /dev/null +++ b/net/wireguard/crypto/Makefile.include @@ -0,0 +1,57 @@ +ifeq ($(CONFIG_X86_64)$(if $(CONFIG_UML),y,n),yn) +CONFIG_ZINC_ARCH_X86_64 := y +endif +ifeq ($(CONFIG_ARM)$(if $(CONFIG_CPU_32v3),y,n),yn) +CONFIG_ZINC_ARCH_ARM := y +endif +ifeq ($(CONFIG_ARM64),y) +CONFIG_ZINC_ARCH_ARM64 := y +endif +ifeq ($(CONFIG_MIPS)$(CONFIG_CPU_MIPS32_R2),yy) +CONFIG_ZINC_ARCH_MIPS := y +endif +ifeq ($(CONFIG_MIPS)$(CONFIG_64BIT),yy) +CONFIG_ZINC_ARCH_MIPS64 := y +endif + +zinc-y += chacha20/chacha20.o +zinc-$(CONFIG_ZINC_ARCH_X86_64) += chacha20/chacha20-x86_64.o +zinc-$(CONFIG_ZINC_ARCH_ARM) += chacha20/chacha20-arm.o chacha20/chacha20-unrolled-arm.o +zinc-$(CONFIG_ZINC_ARCH_ARM64) += chacha20/chacha20-arm64.o +zinc-$(CONFIG_ZINC_ARCH_MIPS) += chacha20/chacha20-mips.o +AFLAGS_chacha20-mips.o += -O2 # This is required to fill the branch delay slots + +zinc-y += poly1305/poly1305.o +zinc-$(CONFIG_ZINC_ARCH_X86_64) += poly1305/poly1305-x86_64.o +zinc-$(CONFIG_ZINC_ARCH_ARM) += poly1305/poly1305-arm.o +zinc-$(CONFIG_ZINC_ARCH_ARM64) += poly1305/poly1305-arm64.o +zinc-$(CONFIG_ZINC_ARCH_MIPS) += poly1305/poly1305-mips.o +AFLAGS_poly1305-mips.o += -O2 # This is required to fill the branch delay slots +zinc-$(CONFIG_ZINC_ARCH_MIPS64) += poly1305/poly1305-mips64.o + +zinc-y += chacha20poly1305.o + +zinc-y += blake2s/blake2s.o +zinc-$(CONFIG_ZINC_ARCH_X86_64) += blake2s/blake2s-x86_64.o + +zinc-y += curve25519/curve25519.o +zinc-$(CONFIG_ZINC_ARCH_ARM) += curve25519/curve25519-arm.o + +quiet_cmd_perlasm = PERLASM $@ + cmd_perlasm = $(PERL) $< > $@ +$(obj)/%.S: $(src)/%.pl FORCE + $(call if_changed,perlasm) +kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src)) +targets := $(patsubst $(kbuild-dir)/%.pl,%.S,$(wildcard $(patsubst %.o,$(kbuild-dir)/crypto/zinc/%.pl,$(zinc-y) $(zinc-m) $(zinc-)))) + +# Old kernels don't set this, which causes trouble. +.SECONDARY: + +wireguard-y += $(addprefix crypto/zinc/,$(zinc-y)) +ccflags-y += -I$(kbuild-dir)/crypto/include +ccflags-$(CONFIG_ZINC_ARCH_X86_64) += -DCONFIG_ZINC_ARCH_X86_64 +ccflags-$(CONFIG_ZINC_ARCH_ARM) += -DCONFIG_ZINC_ARCH_ARM +ccflags-$(CONFIG_ZINC_ARCH_ARM64) += -DCONFIG_ZINC_ARCH_ARM64 +ccflags-$(CONFIG_ZINC_ARCH_MIPS) += -DCONFIG_ZINC_ARCH_MIPS +ccflags-$(CONFIG_ZINC_ARCH_MIPS64) += -DCONFIG_ZINC_ARCH_MIPS64 +ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DCONFIG_ZINC_SELFTEST diff --git a/net/wireguard/crypto/include/zinc/blake2s.h b/net/wireguard/crypto/include/zinc/blake2s.h new file mode 100644 index 000000000000..2ca0bc30750d --- /dev/null +++ b/net/wireguard/crypto/include/zinc/blake2s.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _ZINC_BLAKE2S_H +#define _ZINC_BLAKE2S_H + +#include <linux/types.h> +#include <linux/kernel.h> +#include <asm/bug.h> + +enum blake2s_lengths { + BLAKE2S_BLOCK_SIZE = 64, + BLAKE2S_HASH_SIZE = 32, + BLAKE2S_KEY_SIZE = 32 +}; + +struct blake2s_state { + u32 h[8]; + u32 t[2]; + u32 f[2]; + u8 buf[BLAKE2S_BLOCK_SIZE]; + unsigned int buflen; + unsigned int outlen; +}; + +void blake2s_init(struct blake2s_state *state, const size_t outlen); +void blake2s_init_key(struct blake2s_state *state, const size_t outlen, + const void *key, const size_t keylen); +void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen); +void blake2s_final(struct blake2s_state *state, u8 *out); + +static inline void blake2s(u8 *out, const u8 *in, const u8 *key, + const size_t outlen, const size_t inlen, + const size_t keylen) +{ + struct blake2s_state state; + + WARN_ON(IS_ENABLED(DEBUG) && ((!in && inlen > 0) || !out || !outlen || + outlen > BLAKE2S_HASH_SIZE || keylen > BLAKE2S_KEY_SIZE || + (!key && keylen))); + + if (keylen) + blake2s_init_key(&state, outlen, key, keylen); + else + blake2s_init(&state, outlen); + + blake2s_update(&state, in, inlen); + blake2s_final(&state, out); +} + +void blake2s_hmac(u8 *out, const u8 *in, const u8 *key, const size_t outlen, + const size_t inlen, const size_t keylen); + +#endif /* _ZINC_BLAKE2S_H */ diff --git a/net/wireguard/crypto/include/zinc/chacha20.h b/net/wireguard/crypto/include/zinc/chacha20.h new file mode 100644 index 000000000000..1b0083d871fb --- /dev/null +++ b/net/wireguard/crypto/include/zinc/chacha20.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _ZINC_CHACHA20_H +#define _ZINC_CHACHA20_H + +#include <asm/unaligned.h> +#include <linux/simd.h> +#include <linux/kernel.h> +#include <linux/types.h> + +enum chacha20_lengths { + CHACHA20_NONCE_SIZE = 16, + CHACHA20_KEY_SIZE = 32, + CHACHA20_KEY_WORDS = CHACHA20_KEY_SIZE / sizeof(u32), + CHACHA20_BLOCK_SIZE = 64, + CHACHA20_BLOCK_WORDS = CHACHA20_BLOCK_SIZE / sizeof(u32), + HCHACHA20_NONCE_SIZE = CHACHA20_NONCE_SIZE, + HCHACHA20_KEY_SIZE = CHACHA20_KEY_SIZE +}; + +enum chacha20_constants { /* expand 32-byte k */ + CHACHA20_CONSTANT_EXPA = 0x61707865U, + CHACHA20_CONSTANT_ND_3 = 0x3320646eU, + CHACHA20_CONSTANT_2_BY = 0x79622d32U, + CHACHA20_CONSTANT_TE_K = 0x6b206574U +}; + +struct chacha20_ctx { + union { + u32 state[16]; + struct { + u32 constant[4]; + u32 key[8]; + u32 counter[4]; + }; + }; +}; + +static inline void chacha20_init(struct chacha20_ctx *ctx, + const u8 key[CHACHA20_KEY_SIZE], + const u64 nonce) +{ + ctx->constant[0] = CHACHA20_CONSTANT_EXPA; + ctx->constant[1] = CHACHA20_CONSTANT_ND_3; + ctx->constant[2] = CHACHA20_CONSTANT_2_BY; + ctx->constant[3] = CHACHA20_CONSTANT_TE_K; + ctx->key[0] = get_unaligned_le32(key + 0); + ctx->key[1] = get_unaligned_le32(key + 4); + ctx->key[2] = get_unaligned_le32(key + 8); + ctx->key[3] = get_unaligned_le32(key + 12); + ctx->key[4] = get_unaligned_le32(key + 16); + ctx->key[5] = get_unaligned_le32(key + 20); + ctx->key[6] = get_unaligned_le32(key + 24); + ctx->key[7] = get_unaligned_le32(key + 28); + ctx->counter[0] = 0; + ctx->counter[1] = 0; + ctx->counter[2] = nonce & U32_MAX; + ctx->counter[3] = nonce >> 32; +} +void chacha20(struct chacha20_ctx *ctx, u8 *dst, const u8 *src, u32 len, + simd_context_t *simd_context); + +void hchacha20(u32 derived_key[CHACHA20_KEY_WORDS], + const u8 nonce[HCHACHA20_NONCE_SIZE], + const u8 key[HCHACHA20_KEY_SIZE], simd_context_t *simd_context); + +#endif /* _ZINC_CHACHA20_H */ diff --git a/net/wireguard/crypto/include/zinc/chacha20poly1305.h b/net/wireguard/crypto/include/zinc/chacha20poly1305.h new file mode 100644 index 000000000000..e3339f02996e --- /dev/null +++ b/net/wireguard/crypto/include/zinc/chacha20poly1305.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _ZINC_CHACHA20POLY1305_H +#define _ZINC_CHACHA20POLY1305_H + +#include <linux/simd.h> +#include <linux/types.h> + +struct scatterlist; + +enum chacha20poly1305_lengths { + XCHACHA20POLY1305_NONCE_SIZE = 24, + CHACHA20POLY1305_KEY_SIZE = 32, + CHACHA20POLY1305_AUTHTAG_SIZE = 16 +}; + +void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, + const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE]); + +bool __must_check chacha20poly1305_encrypt_sg_inplace( + struct scatterlist *src, const size_t src_len, const u8 *ad, + const size_t ad_len, const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE], simd_context_t *simd_context); + +bool __must_check +chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE]); + +bool __must_check chacha20poly1305_decrypt_sg_inplace( + struct scatterlist *src, size_t src_len, const u8 *ad, + const size_t ad_len, const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE], simd_context_t *simd_context); + +void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, + const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE], + const u8 key[CHACHA20POLY1305_KEY_SIZE]); + +bool __must_check xchacha20poly1305_decrypt( + u8 *dst, const u8 *src, const size_t src_len, const u8 *ad, + const size_t ad_len, const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE], + const u8 key[CHACHA20POLY1305_KEY_SIZE]); + +#endif /* _ZINC_CHACHA20POLY1305_H */ diff --git a/net/wireguard/crypto/include/zinc/curve25519.h b/net/wireguard/crypto/include/zinc/curve25519.h new file mode 100644 index 000000000000..127d8a3a1c82 --- /dev/null +++ b/net/wireguard/crypto/include/zinc/curve25519.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _ZINC_CURVE25519_H +#define _ZINC_CURVE25519_H + +#include <linux/types.h> + +enum curve25519_lengths { + CURVE25519_KEY_SIZE = 32 +}; + +bool __must_check curve25519(u8 mypublic[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE], + const u8 basepoint[CURVE25519_KEY_SIZE]); +void curve25519_generate_secret(u8 secret[CURVE25519_KEY_SIZE]); +bool __must_check curve25519_generate_public( + u8 pub[CURVE25519_KEY_SIZE], const u8 secret[CURVE25519_KEY_SIZE]); + +static inline void curve25519_clamp_secret(u8 secret[CURVE25519_KEY_SIZE]) +{ + secret[0] &= 248; + secret[31] = (secret[31] & 127) | 64; +} + +#endif /* _ZINC_CURVE25519_H */ diff --git a/net/wireguard/crypto/include/zinc/poly1305.h b/net/wireguard/crypto/include/zinc/poly1305.h new file mode 100644 index 000000000000..8a16d19f8177 --- /dev/null +++ b/net/wireguard/crypto/include/zinc/poly1305.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _ZINC_POLY1305_H +#define _ZINC_POLY1305_H + +#include <linux/simd.h> +#include <linux/types.h> + +enum poly1305_lengths { + POLY1305_BLOCK_SIZE = 16, + POLY1305_KEY_SIZE = 32, + POLY1305_MAC_SIZE = 16 +}; + +struct poly1305_ctx { + u8 opaque[24 * sizeof(u64)]; + u32 nonce[4]; + u8 data[POLY1305_BLOCK_SIZE]; + size_t num; +} __aligned(8); + +void poly1305_init(struct poly1305_ctx *ctx, const u8 key[POLY1305_KEY_SIZE]); +void poly1305_update(struct poly1305_ctx *ctx, const u8 *input, size_t len, + simd_context_t *simd_context); +void poly1305_final(struct poly1305_ctx *ctx, u8 mac[POLY1305_MAC_SIZE], + simd_context_t *simd_context); + +#endif /* _ZINC_POLY1305_H */ diff --git a/net/wireguard/crypto/zinc.h b/net/wireguard/crypto/zinc.h new file mode 100644 index 000000000000..9aa1e8d59bf5 --- /dev/null +++ b/net/wireguard/crypto/zinc.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _WG_ZINC_H +#define _WG_ZINC_H + +int chacha20_mod_init(void); +int poly1305_mod_init(void); +int chacha20poly1305_mod_init(void); +int blake2s_mod_init(void); +int curve25519_mod_init(void); + +#endif diff --git a/net/wireguard/crypto/zinc/blake2s/blake2s-x86_64-glue.c b/net/wireguard/crypto/zinc/blake2s/blake2s-x86_64-glue.c new file mode 100644 index 000000000000..f8cda59bf297 --- /dev/null +++ b/net/wireguard/crypto/zinc/blake2s/blake2s-x86_64-glue.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include <linux/simd.h> +#include <asm/cpufeature.h> +#include <asm/processor.h> +#include <asm/fpu/api.h> + +asmlinkage void blake2s_compress_ssse3(struct blake2s_state *state, + const u8 *block, const size_t nblocks, + const u32 inc); +asmlinkage void blake2s_compress_avx512(struct blake2s_state *state, + const u8 *block, const size_t nblocks, + const u32 inc); + +static bool blake2s_use_ssse3 __ro_after_init; +static bool blake2s_use_avx512 __ro_after_init; +static bool *const blake2s_nobs[] __initconst = { &blake2s_use_ssse3, + &blake2s_use_avx512 }; + +static void __init blake2s_fpu_init(void) +{ + blake2s_use_ssse3 = boot_cpu_has(X86_FEATURE_SSSE3); +#ifndef COMPAT_CANNOT_USE_AVX512 + blake2s_use_avx512 = + boot_cpu_has(X86_FEATURE_AVX) && + boot_cpu_has(X86_FEATURE_AVX2) && + boot_cpu_has(X86_FEATURE_AVX512F) && + boot_cpu_has(X86_FEATURE_AVX512VL) && + cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | + XFEATURE_MASK_AVX512, NULL); +#endif +} + +static inline bool blake2s_compress_arch(struct blake2s_state *state, + const u8 *block, size_t nblocks, + const u32 inc) +{ + simd_context_t simd_context; + bool used_arch = false; + + /* SIMD disables preemption, so relax after processing each page. */ + BUILD_BUG_ON(PAGE_SIZE / BLAKE2S_BLOCK_SIZE < 8); + + simd_get(&simd_context); + + if (!IS_ENABLED(CONFIG_AS_SSSE3) || !blake2s_use_ssse3 || + !simd_use(&simd_context)) + goto out; + used_arch = true; + + for (;;) { + const size_t blocks = min_t(size_t, nblocks, + PAGE_SIZE / BLAKE2S_BLOCK_SIZE); + + if (IS_ENABLED(CONFIG_AS_AVX512) && blake2s_use_avx512) + blake2s_compress_avx512(state, block, blocks, inc); + else + blake2s_compress_ssse3(state, block, blocks, inc); + + nblocks -= blocks; + if (!nblocks) + break; + block += blocks * BLAKE2S_BLOCK_SIZE; + simd_relax(&simd_context); + } +out: + simd_put(&simd_context); + return used_arch; +} diff --git a/net/wireguard/crypto/zinc/blake2s/blake2s-x86_64.S b/net/wireguard/crypto/zinc/blake2s/blake2s-x86_64.S new file mode 100644 index 000000000000..24910b766bdd --- /dev/null +++ b/net/wireguard/crypto/zinc/blake2s/blake2s-x86_64.S @@ -0,0 +1,258 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * Copyright (C) 2017-2019 Samuel Neves <sneves@dei.uc.pt>. All Rights Reserved. + */ + +#include <linux/linkage.h> + +.section .rodata.cst32.BLAKE2S_IV, "aM", @progbits, 32 +.align 32 +IV: .octa 0xA54FF53A3C6EF372BB67AE856A09E667 + .octa 0x5BE0CD191F83D9AB9B05688C510E527F +.section .rodata.cst16.ROT16, "aM", @progbits, 16 +.align 16 +ROT16: .octa 0x0D0C0F0E09080B0A0504070601000302 +.section .rodata.cst16.ROR328, "aM", @progbits, 16 +.align 16 +ROR328: .octa 0x0C0F0E0D080B0A090407060500030201 +.section .rodata.cst64.BLAKE2S_SIGMA, "aM", @progbits, 160 +.align 64 +SIGMA: +.byte 0, 2, 4, 6, 1, 3, 5, 7, 14, 8, 10, 12, 15, 9, 11, 13 +.byte 14, 4, 9, 13, 10, 8, 15, 6, 5, 1, 0, 11, 3, 12, 2, 7 +.byte 11, 12, 5, 15, 8, 0, 2, 13, 9, 10, 3, 7, 4, 14, 6, 1 +.byte 7, 3, 13, 11, 9, 1, 12, 14, 15, 2, 5, 4, 8, 6, 10, 0 +.byte 9, 5, 2, 10, 0, 7, 4, 15, 3, 14, 11, 6, 13, 1, 12, 8 +.byte 2, 6, 0, 8, 12, 10, 11, 3, 1, 4, 7, 15, 9, 13, 5, 14 +.byte 12, 1, 14, 4, 5, 15, 13, 10, 8, 0, 6, 9, 11, 7, 3, 2 +.byte 13, 7, 12, 3, 11, 14, 1, 9, 2, 5, 15, 8, 10, 0, 4, 6 +.byte 6, 14, 11, 0, 15, 9, 3, 8, 10, 12, 13, 1, 5, 2, 7, 4 +.byte 10, 8, 7, 1, 2, 4, 6, 5, 13, 15, 9, 3, 0, 11, 14, 12 +#ifdef CONFIG_AS_AVX512 +.section .rodata.cst64.BLAKE2S_SIGMA2, "aM", @progbits, 640 +.align 64 +SIGMA2: +.long 0, 2, 4, 6, 1, 3, 5, 7, 14, 8, 10, 12, 15, 9, 11, 13 +.long 8, 2, 13, 15, 10, 9, 12, 3, 6, 4, 0, 14, 5, 11, 1, 7 +.long 11, 13, 8, 6, 5, 10, 14, 3, 2, 4, 12, 15, 1, 0, 7, 9 +.long 11, 10, 7, 0, 8, 15, 1, 13, 3, 6, 2, 12, 4, 14, 9, 5 +.long 4, 10, 9, 14, 15, 0, 11, 8, 1, 7, 3, 13, 2, 5, 6, 12 +.long 2, 11, 4, 15, 14, 3, 10, 8, 13, 6, 5, 7, 0, 12, 1, 9 +.long 4, 8, 15, 9, 14, 11, 13, 5, 3, 2, 1, 12, 6, 10, 7, 0 +.long 6, 13, 0, 14, 12, 2, 1, 11, 15, 4, 5, 8, 7, 9, 3, 10 +.long 15, 5, 4, 13, 10, 7, 3, 11, 12, 2, 0, 6, 9, 8, 1, 14 +.long 8, 7, 14, 11, 13, 15, 0, 12, 10, 4, 5, 6, 3, 2, 1, 9 +#endif /* CONFIG_AS_AVX512 */ + +.text +#ifdef CONFIG_AS_SSSE3 +SYM_FUNC_START(blake2s_compress_ssse3) + testq %rdx,%rdx + je .Lendofloop + movdqu (%rdi),%xmm0 + movdqu 0x10(%rdi),%xmm1 + movdqa ROT16(%rip),%xmm12 + movdqa ROR328(%rip),%xmm13 + movdqu 0x20(%rdi),%xmm14 + movq %rcx,%xmm15 + leaq SIGMA+0xa0(%rip),%r8 + jmp .Lbeginofloop + .align 32 +.Lbeginofloop: + movdqa %xmm0,%xmm10 + movdqa %xmm1,%xmm11 + paddq %xmm15,%xmm14 + movdqa IV(%rip),%xmm2 + movdqa %xmm14,%xmm3 + pxor IV+0x10(%rip),%xmm3 + leaq SIGMA(%rip),%rcx +.Lroundloop: + movzbl (%rcx),%eax + movd (%rsi,%rax,4),%xmm4 + movzbl 0x1(%rcx),%eax + movd (%rsi,%rax,4),%xmm5 + movzbl 0x2(%rcx),%eax + movd (%rsi,%rax,4),%xmm6 + movzbl 0x3(%rcx),%eax + movd (%rsi,%rax,4),%xmm7 + punpckldq %xmm5,%xmm4 + punpckldq %xmm7,%xmm6 + punpcklqdq %xmm6,%xmm4 + paddd %xmm4,%xmm0 + paddd %xmm1,%xmm0 + pxor %xmm0,%xmm3 + pshufb %xmm12,%xmm3 + paddd %xmm3,%xmm2 + pxor %xmm2,%xmm1 + movdqa %xmm1,%xmm8 + psrld $0xc,%xmm1 + pslld $0x14,%xmm8 + por %xmm8,%xmm1 + movzbl 0x4(%rcx),%eax + movd (%rsi,%rax,4),%xmm5 + movzbl 0x5(%rcx),%eax + movd (%rsi,%rax,4),%xmm6 + movzbl 0x6(%rcx),%eax + movd (%rsi,%rax,4),%xmm7 + movzbl 0x7(%rcx),%eax + movd (%rsi,%rax,4),%xmm4 + punpckldq %xmm6,%xmm5 + punpckldq %xmm4,%xmm7 + punpcklqdq %xmm7,%xmm5 + paddd %xmm5,%xmm0 + paddd %xmm1,%xmm0 + pxor %xmm0,%xmm3 + pshufb %xmm13,%xmm3 + paddd %xmm3,%xmm2 + pxor %xmm2,%xmm1 + movdqa %xmm1,%xmm8 + psrld $0x7,%xmm1 + pslld $0x19,%xmm8 + por %xmm8,%xmm1 + pshufd $0x93,%xmm0,%xmm0 + pshufd $0x4e,%xmm3,%xmm3 + pshufd $0x39,%xmm2,%xmm2 + movzbl 0x8(%rcx),%eax + movd (%rsi,%rax,4),%xmm6 + movzbl 0x9(%rcx),%eax + movd (%rsi,%rax,4),%xmm7 + movzbl 0xa(%rcx),%eax + movd (%rsi,%rax,4),%xmm4 + movzbl 0xb(%rcx),%eax + movd (%rsi,%rax,4),%xmm5 + punpckldq %xmm7,%xmm6 + punpckldq %xmm5,%xmm4 + punpcklqdq %xmm4,%xmm6 + paddd %xmm6,%xmm0 + paddd %xmm1,%xmm0 + pxor %xmm0,%xmm3 + pshufb %xmm12,%xmm3 + paddd %xmm3,%xmm2 + pxor %xmm2,%xmm1 + movdqa %xmm1,%xmm8 + psrld $0xc,%xmm1 + pslld $0x14,%xmm8 + por %xmm8,%xmm1 + movzbl 0xc(%rcx),%eax + movd (%rsi,%rax,4),%xmm7 + movzbl 0xd(%rcx),%eax + movd (%rsi,%rax,4),%xmm4 + movzbl 0xe(%rcx),%eax + movd (%rsi,%rax,4),%xmm5 + movzbl 0xf(%rcx),%eax + movd (%rsi,%rax,4),%xmm6 + punpckldq %xmm4,%xmm7 + punpckldq %xmm6,%xmm5 + punpcklqdq %xmm5,%xmm7 + paddd %xmm7,%xmm0 + paddd %xmm1,%xmm0 + pxor %xmm0,%xmm3 + pshufb %xmm13,%xmm3 + paddd %xmm3,%xmm2 + pxor %xmm2,%xmm1 + movdqa %xmm1,%xmm8 + psrld $0x7,%xmm1 + pslld $0x19,%xmm8 + por %xmm8,%xmm1 + pshufd $0x39,%xmm0,%xmm0 + pshufd $0x4e,%xmm3,%xmm3 + pshufd $0x93,%xmm2,%xmm2 + addq $0x10,%rcx + cmpq %r8,%rcx + jnz .Lroundloop + pxor %xmm2,%xmm0 + pxor %xmm3,%xmm1 + pxor %xmm10,%xmm0 + pxor %xmm11,%xmm1 + addq $0x40,%rsi + decq %rdx + jnz .Lbeginofloop + movdqu %xmm0,(%rdi) + movdqu %xmm1,0x10(%rdi) + movdqu %xmm14,0x20(%rdi) +.Lendofloop: + ret +SYM_FUNC_END(blake2s_compress_ssse3) +#endif /* CONFIG_AS_SSSE3 */ + +#ifdef CONFIG_AS_AVX512 +SYM_FUNC_START(blake2s_compress_avx512) + vmovdqu (%rdi),%xmm0 + vmovdqu 0x10(%rdi),%xmm1 + vmovdqu 0x20(%rdi),%xmm4 + vmovq %rcx,%xmm5 + vmovdqa IV(%rip),%xmm14 + vmovdqa IV+16(%rip),%xmm15 + jmp .Lblake2s_compress_avx512_mainloop +.align 32 +.Lblake2s_compress_avx512_mainloop: + vmovdqa %xmm0,%xmm10 + vmovdqa %xmm1,%xmm11 + vpaddq %xmm5,%xmm4,%xmm4 + vmovdqa %xmm14,%xmm2 + vpxor %xmm15,%xmm4,%xmm3 + vmovdqu (%rsi),%ymm6 + vmovdqu 0x20(%rsi),%ymm7 + addq $0x40,%rsi + leaq SIGMA2(%rip),%rax + movb $0xa,%cl +.Lblake2s_compress_avx512_roundloop: + addq $0x40,%rax + vmovdqa -0x40(%rax),%ymm8 + vmovdqa -0x20(%rax),%ymm9 + vpermi2d %ymm7,%ymm6,%ymm8 + vpermi2d %ymm7,%ymm6,%ymm9 + vmovdqa %ymm8,%ymm6 + vmovdqa %ymm9,%ymm7 + vpaddd %xmm8,%xmm0,%xmm0 + vpaddd %xmm1,%xmm0,%xmm0 + vpxor %xmm0,%xmm3,%xmm3 + vprord $0x10,%xmm3,%xmm3 + vpaddd %xmm3,%xmm2,%xmm2 + vpxor %xmm2,%xmm1,%xmm1 + vprord $0xc,%xmm1,%xmm1 + vextracti128 $0x1,%ymm8,%xmm8 + vpaddd %xmm8,%xmm0,%xmm0 + vpaddd %xmm1,%xmm0,%xmm0 + vpxor %xmm0,%xmm3,%xmm3 + vprord $0x8,%xmm3,%xmm3 + vpaddd %xmm3,%xmm2,%xmm2 + vpxor %xmm2,%xmm1,%xmm1 + vprord $0x7,%xmm1,%xmm1 + vpshufd $0x93,%xmm0,%xmm0 + vpshufd $0x4e,%xmm3,%xmm3 + vpshufd $0x39,%xmm2,%xmm2 + vpaddd %xmm9,%xmm0,%xmm0 + vpaddd %xmm1,%xmm0,%xmm0 + vpxor %xmm0,%xmm3,%xmm3 + vprord $0x10,%xmm3,%xmm3 + vpaddd %xmm3,%xmm2,%xmm2 + vpxor %xmm2,%xmm1,%xmm1 + vprord $0xc,%xmm1,%xmm1 + vextracti128 $0x1,%ymm9,%xmm9 + vpaddd %xmm9,%xmm0,%xmm0 + vpaddd %xmm1,%xmm0,%xmm0 + vpxor %xmm0,%xmm3,%xmm3 + vprord $0x8,%xmm3,%xmm3 + vpaddd %xmm3,%xmm2,%xmm2 + vpxor %xmm2,%xmm1,%xmm1 + vprord $0x7,%xmm1,%xmm1 + vpshufd $0x39,%xmm0,%xmm0 + vpshufd $0x4e,%xmm3,%xmm3 + vpshufd $0x93,%xmm2,%xmm2 + decb %cl + jne .Lblake2s_compress_avx512_roundloop + vpxor %xmm10,%xmm0,%xmm0 + vpxor %xmm11,%xmm1,%xmm1 + vpxor %xmm2,%xmm0,%xmm0 + vpxor %xmm3,%xmm1,%xmm1 + decq %rdx + jne .Lblake2s_compress_avx512_mainloop + vmovdqu %xmm0,(%rdi) + vmovdqu %xmm1,0x10(%rdi) + vmovdqu %xmm4,0x20(%rdi) + vzeroupper + retq +SYM_FUNC_END(blake2s_compress_avx512) +#endif /* CONFIG_AS_AVX512 */ diff --git a/net/wireguard/crypto/zinc/blake2s/blake2s.c b/net/wireguard/crypto/zinc/blake2s/blake2s.c new file mode 100644 index 000000000000..a9b8a8d95998 --- /dev/null +++ b/net/wireguard/crypto/zinc/blake2s/blake2s.c @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * + * This is an implementation of the BLAKE2s hash and PRF functions. + * + * Information: https://blake2.net/ + * + */ + +#include <zinc/blake2s.h> +#include "../selftest/run.h" + +#include <linux/types.h> +#include <linux/string.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/bug.h> +#include <asm/unaligned.h> + +static const u32 blake2s_iv[8] = { + 0x6A09E667UL, 0xBB67AE85UL, 0x3C6EF372UL, 0xA54FF53AUL, + 0x510E527FUL, 0x9B05688CUL, 0x1F83D9ABUL, 0x5BE0CD19UL +}; + +static const u8 blake2s_sigma[10][16] = { + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, + { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, + { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }, + { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 }, + { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 }, + { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 }, + { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 }, + { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 }, + { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 }, + { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 }, +}; + +static inline void blake2s_set_lastblock(struct blake2s_state *state) +{ + state->f[0] = -1; +} + +static inline void blake2s_increment_counter(struct blake2s_state *state, + const u32 inc) +{ + state->t[0] += inc; + state->t[1] += (state->t[0] < inc); +} + +static inline void blake2s_init_param(struct blake2s_state *state, + const u32 param) +{ + int i; + + memset(state, 0, sizeof(*state)); + for (i = 0; i < 8; ++i) + state->h[i] = blake2s_iv[i]; + state->h[0] ^= param; +} + +void blake2s_init(struct blake2s_state *state, const size_t outlen) +{ + WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2S_HASH_SIZE)); + blake2s_init_param(state, 0x01010000 | outlen); + state->outlen = outlen; +} + +void blake2s_init_key(struct blake2s_state *state, const size_t outlen, + const void *key, const size_t keylen) +{ + u8 block[BLAKE2S_BLOCK_SIZE] = { 0 }; + + WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2S_HASH_SIZE || + !key || !keylen || keylen > BLAKE2S_KEY_SIZE)); + blake2s_init_param(state, 0x01010000 | keylen << 8 | outlen); + state->outlen = outlen; + memcpy(block, key, keylen); + blake2s_update(state, block, BLAKE2S_BLOCK_SIZE); + memzero_explicit(block, BLAKE2S_BLOCK_SIZE); +} + +#if defined(CONFIG_ZINC_ARCH_X86_64) +#include "blake2s-x86_64-glue.c" +#else +static bool *const blake2s_nobs[] __initconst = { }; +static void __init blake2s_fpu_init(void) +{ +} +static inline bool blake2s_compress_arch(struct blake2s_state *state, + const u8 *block, size_t nblocks, + const u32 inc) +{ + return false; +} +#endif + +static inline void blake2s_compress(struct blake2s_state *state, + const u8 *block, size_t nblocks, + const u32 inc) +{ + u32 m[16]; + u32 v[16]; + int i; + + WARN_ON(IS_ENABLED(DEBUG) && + (nblocks > 1 && inc != BLAKE2S_BLOCK_SIZE)); + + if (blake2s_compress_arch(state, block, nblocks, inc)) + return; + + while (nblocks > 0) { + blake2s_increment_counter(state, inc); + memcpy(m, block, BLAKE2S_BLOCK_SIZE); + le32_to_cpu_array(m, ARRAY_SIZE(m)); + memcpy(v, state->h, 32); + v[ 8] = blake2s_iv[0]; + v[ 9] = blake2s_iv[1]; + v[10] = blake2s_iv[2]; + v[11] = blake2s_iv[3]; + v[12] = blake2s_iv[4] ^ state->t[0]; + v[13] = blake2s_iv[5] ^ state->t[1]; + v[14] = blake2s_iv[6] ^ state->f[0]; + v[15] = blake2s_iv[7] ^ state->f[1]; + +#define G(r, i, a, b, c, d) do { \ + a += b + m[blake2s_sigma[r][2 * i + 0]]; \ + d = ror32(d ^ a, 16); \ + c += d; \ + b = ror32(b ^ c, 12); \ + a += b + m[blake2s_sigma[r][2 * i + 1]]; \ + d = ror32(d ^ a, 8); \ + c += d; \ + b = ror32(b ^ c, 7); \ +} while (0) + +#define ROUND(r) do { \ + G(r, 0, v[0], v[ 4], v[ 8], v[12]); \ + G(r, 1, v[1], v[ 5], v[ 9], v[13]); \ + G(r, 2, v[2], v[ 6], v[10], v[14]); \ + G(r, 3, v[3], v[ 7], v[11], v[15]); \ + G(r, 4, v[0], v[ 5], v[10], v[15]); \ + G(r, 5, v[1], v[ 6], v[11], v[12]); \ + G(r, 6, v[2], v[ 7], v[ 8], v[13]); \ + G(r, 7, v[3], v[ 4], v[ 9], v[14]); \ +} while (0) + ROUND(0); + ROUND(1); + ROUND(2); + ROUND(3); + ROUND(4); + ROUND(5); + ROUND(6); + ROUND(7); + ROUND(8); + ROUND(9); + +#undef G +#undef ROUND + + for (i = 0; i < 8; ++i) + state->h[i] ^= v[i] ^ v[i + 8]; + + block += BLAKE2S_BLOCK_SIZE; + --nblocks; + } +} + +void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen) +{ + const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen; + + if (unlikely(!inlen)) + return; + if (inlen > fill) { + memcpy(state->buf + state->buflen, in, fill); + blake2s_compress(state, state->buf, 1, BLAKE2S_BLOCK_SIZE); + state->buflen = 0; + in += fill; + inlen -= fill; + } + if (inlen > BLAKE2S_BLOCK_SIZE) { + const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); + /* Hash one less (full) block than strictly possible */ + blake2s_compress(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE); + in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); + inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); + } + memcpy(state->buf + state->buflen, in, inlen); + state->buflen += inlen; +} + +void blake2s_final(struct blake2s_state *state, u8 *out) +{ + WARN_ON(IS_ENABLED(DEBUG) && !out); + blake2s_set_lastblock(state); + memset(state->buf + state->buflen, 0, + BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */ + blake2s_compress(state, state->buf, 1, state->buflen); + cpu_to_le32_array(state->h, ARRAY_SIZE(state->h)); + memcpy(out, state->h, state->outlen); + memzero_explicit(state, sizeof(*state)); +} + +void blake2s_hmac(u8 *out, const u8 *in, const u8 *key, const size_t outlen, + const size_t inlen, const size_t keylen) +{ + struct blake2s_state state; + u8 x_key[BLAKE2S_BLOCK_SIZE] __aligned(__alignof__(u32)) = { 0 }; + u8 i_hash[BLAKE2S_HASH_SIZE] __aligned(__alignof__(u32)); + int i; + + if (keylen > BLAKE2S_BLOCK_SIZE) { + blake2s_init(&state, BLAKE2S_HASH_SIZE); + blake2s_update(&state, key, keylen); + blake2s_final(&state, x_key); + } else + memcpy(x_key, key, keylen); + + for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i) + x_key[i] ^= 0x36; + + blake2s_init(&state, BLAKE2S_HASH_SIZE); + blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE); + blake2s_update(&state, in, inlen); + blake2s_final(&state, i_hash); + + for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i) + x_key[i] ^= 0x5c ^ 0x36; + + blake2s_init(&state, BLAKE2S_HASH_SIZE); + blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE); + blake2s_update(&state, i_hash, BLAKE2S_HASH_SIZE); + blake2s_final(&state, i_hash); + + memcpy(out, i_hash, outlen); + memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE); + memzero_explicit(i_hash, BLAKE2S_HASH_SIZE); +} + +#include "../selftest/blake2s.c" + +static bool nosimd __initdata = false; + +#ifndef COMPAT_ZINC_IS_A_MODULE +int __init blake2s_mod_init(void) +#else +static int __init mod_init(void) +#endif +{ + if (!nosimd) + blake2s_fpu_init(); + if (!selftest_run("blake2s", blake2s_selftest, blake2s_nobs, + ARRAY_SIZE(blake2s_nobs))) + return -ENOTRECOVERABLE; + return 0; +} + +#ifdef COMPAT_ZINC_IS_A_MODULE +static void __exit mod_exit(void) +{ +} + +module_param(nosimd, bool, 0); +module_init(mod_init); +module_exit(mod_exit); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("BLAKE2s hash function"); +MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>"); +#endif diff --git a/net/wireguard/crypto/zinc/chacha20/chacha20-arm-glue.c b/net/wireguard/crypto/zinc/chacha20/chacha20-arm-glue.c new file mode 100644 index 000000000000..41e2e79abb2b --- /dev/null +++ b/net/wireguard/crypto/zinc/chacha20/chacha20-arm-glue.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include <asm/hwcap.h> +#include <asm/neon.h> +#if defined(CONFIG_ZINC_ARCH_ARM) +#include <asm/system_info.h> +#include <asm/cputype.h> +#endif + +asmlinkage void chacha20_arm(u8 *out, const u8 *in, const size_t len, + const u32 key[8], const u32 counter[4]); +asmlinkage void hchacha20_arm(const u32 state[16], u32 out[8]); +asmlinkage void chacha20_neon(u8 *out, const u8 *in, const size_t len, + const u32 key[8], const u32 counter[4]); + +static bool chacha20_use_neon __ro_after_init; +static bool *const chacha20_nobs[] __initconst = { &chacha20_use_neon }; +static void __init chacha20_fpu_init(void) +{ +#if defined(CONFIG_ZINC_ARCH_ARM64) + chacha20_use_neon = cpu_have_named_feature(ASIMD); +#elif defined(CONFIG_ZINC_ARCH_ARM) + switch (read_cpuid_part()) { + case ARM_CPU_PART_CORTEX_A7: + case ARM_CPU_PART_CORTEX_A5: + /* The Cortex-A7 and Cortex-A5 do not perform well with the NEON + * implementation but do incredibly with the scalar one and use + * less power. + */ + break; + default: + chacha20_use_neon = elf_hwcap & HWCAP_NEON; + } +#endif +} + +static inline bool chacha20_arch(struct chacha20_ctx *ctx, u8 *dst, + const u8 *src, size_t len, + simd_context_t *simd_context) +{ + /* SIMD disables preemption, so relax after processing each page. */ + BUILD_BUG_ON(PAGE_SIZE < CHACHA20_BLOCK_SIZE || + PAGE_SIZE % CHACHA20_BLOCK_SIZE); + + for (;;) { + if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && chacha20_use_neon && + len >= CHACHA20_BLOCK_SIZE * 3 && simd_use(simd_context)) { + const size_t bytes = min_t(size_t, len, PAGE_SIZE); + + chacha20_neon(dst, src, bytes, ctx->key, ctx->counter); + ctx->counter[0] += (bytes + 63) / 64; + len -= bytes; + if (!len) + break; + dst += bytes; + src += bytes; + simd_relax(simd_context); + } else { + chacha20_arm(dst, src, len, ctx->key, ctx->counter); + ctx->counter[0] += (len + 63) / 64; + break; + } + } + + return true; +} + +static inline bool hchacha20_arch(u32 derived_key[CHACHA20_KEY_WORDS], + const u8 nonce[HCHACHA20_NONCE_SIZE], + const u8 key[HCHACHA20_KEY_SIZE], + simd_context_t *simd_context) +{ + if (IS_ENABLED(CONFIG_ZINC_ARCH_ARM)) { + u32 x[] = { CHACHA20_CONSTANT_EXPA, + CHACHA20_CONSTANT_ND_3, + CHACHA20_CONSTANT_2_BY, + CHACHA20_CONSTANT_TE_K, + get_unaligned_le32(key + 0), + get_unaligned_le32(key + 4), + get_unaligned_le32(key + 8), + get_unaligned_le32(key + 12), + get_unaligned_le32(key + 16), + get_unaligned_le32(key + 20), + get_unaligned_le32(key + 24), + get_unaligned_le32(key + 28), + get_unaligned_le32(nonce + 0), + get_unaligned_le32(nonce + 4), + get_unaligned_le32(nonce + 8), + get_unaligned_le32(nonce + 12) + }; + hchacha20_arm(x, derived_key); + return true; + } + return false; +} diff --git a/net/wireguard/crypto/zinc/chacha20/chacha20-arm.pl b/net/wireguard/crypto/zinc/chacha20/chacha20-arm.pl new file mode 100644 index 000000000000..6785383ab7bb --- /dev/null +++ b/net/wireguard/crypto/zinc/chacha20/chacha20-arm.pl @@ -0,0 +1,1227 @@ +#!/usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# +# This code is taken from the OpenSSL project but the author, Andy Polyakov, +# has relicensed it under the licenses specified in the SPDX header above. +# The original headers, including the original license headers, are +# included below for completeness. +# +# ==================================================================== +# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# December 2014 +# +# ChaCha20 for ARMv4. +# +# September 2018 +# +# Improve scalar performance per Eric Biggers' suggestion to eliminate +# separate rotates. This requires b[0..3] and d[0..3] to be maintained +# pre-rotated, hence odd twists prior inner loop and when accumulating +# key material. Since amount of instructions is reduced as result, even +# NEON performance is improved somewhat, most notably by ~9% on low-end +# Cortex-A5/A7. Full unroll was shown to provide even better scalar +# performance on Cortex-A5/A7, naturally at the cost of manyfold size +# increase. We let it be. Oversized code works in benchmarks, but is not +# necessarily optimal in real life, when it's likely to be out-of-cache +# upon entry and evict significant part of cache upon completion. +# +# Performance in cycles per byte out of large buffer. +# +# IALU/gcc-4.4 1xNEON 3xNEON+1xIALU +# +# Cortex-A5 14.2(*)/+160% 21.8 12.9(**) +# Cortex-A8 10.2(*)/+190% 13.9 6.10 +# Cortex-A9 10.8(*)/+150% 14.3 6.50 +# Cortex-A15 11.0/+40% 16.0 4.90 +# Snapdragon S4 13.9(***)/+90% 13.6 4.90 +# +# (*) most "favourable" result for aligned data on little-endian +# processor, result for misaligned data is 10-15% lower; +# (**) pure 4xNEON [with "vertical" layout] was shown to provide ~8% +# better performance on Cortex-A5/A7, but not on others; +# (***) it's 17% slower than original, trade-off is considered +# acceptable, because of improvement on others, specifically +# +36% on Cortex-A5/A7 and +20% on Cortex-A9; + +$flavour = shift; +if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; } +else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} } + +if ($flavour && $flavour ne "void") { + $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; + ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or + ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or + die "can't locate arm-xlate.pl"; + + open STDOUT,"| \"$^X\" $xlate $flavour $output"; +} else { + open STDOUT,">$output"; +} + +sub AUTOLOAD() # thunk [simplified] x86-style perlasm +{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./; + my $arg = pop; + $arg = "#$arg" if ($arg*1 eq $arg); + $code .= "\t$opcode\t".join(',',@_,$arg)."\n"; +} + +my @x=map("r$_",(0..7,"x","x","x","x",12,"x",14,"x")); +my @t=map("r$_",(8..11)); + +sub ROUND { +my ($a0,$b0,$c0,$d0)=@_; +my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0)); +my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1)); +my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2)); +my $odd = $d0&1; +my ($xc,$xc_) = (@t[0..1]); +my ($xd,$xd_) = $odd ? (@t[2],@x[$d1]) : (@x[$d0],@t[2]); +my @ret; + + # Consider order in which variables are addressed by their + # index: + # + # a b c d + # + # 0 4 8 12 < even round + # 1 5 9 13 + # 2 6 10 14 + # 3 7 11 15 + # 0 5 10 15 < odd round + # 1 6 11 12 + # 2 7 8 13 + # 3 4 9 14 + # + # 'a', 'b' are permanently allocated in registers, @x[0..7], + # while 'c's and pair of 'd's are maintained in memory. If + # you observe 'c' column, you'll notice that pair of 'c's is + # invariant between rounds. This means that we have to reload + # them once per round, in the middle. This is why you'll see + # bunch of 'c' stores and loads in the middle, but none in + # the beginning or end. If you observe 'd' column, you'll + # notice that 15 and 13 are reused in next pair of rounds. + # This is why these two are chosen for offloading to memory, + # to make loads count more. + push @ret,( + "&add (@x[$a0],@x[$a0],@x[$b0],'ror#13')", + "&add (@x[$a1],@x[$a1],@x[$b1],'ror#13')", + "&eor ($xd,@x[$a0],$xd,'ror#24')", + "&eor ($xd_,@x[$a1],$xd_,'ror#24')", + + "&add ($xc,$xc,$xd,'ror#16')", + "&add ($xc_,$xc_,$xd_,'ror#16')", + "&eor (@x[$b0],$xc, @x[$b0],'ror#13')", + "&eor (@x[$b1],$xc_,@x[$b1],'ror#13')", + + "&add (@x[$a0],@x[$a0],@x[$b0],'ror#20')", + "&add (@x[$a1],@x[$a1],@x[$b1],'ror#20')", + "&eor ($xd,@x[$a0],$xd,'ror#16')", + "&eor ($xd_,@x[$a1],$xd_,'ror#16')" ); + push @ret,( + "&str ($xd,'[sp,#4*(16+$d0)]')" ) if ($odd); + push @ret,( + "&add ($xc,$xc,$xd,'ror#24')" ); + push @ret,( + "&ldr ($xd,'[sp,#4*(16+$d2)]')" ) if ($odd); + push @ret,( + "&str ($xd_,'[sp,#4*(16+$d1)]')" ) if (!$odd); + push @ret,( + "&add ($xc_,$xc_,$xd_,'ror#24')" ); + push @ret,( + "&ldr ($xd_,'[sp,#4*(16+$d3)]')" ) if (!$odd); + push @ret,( + "&str ($xc,'[sp,#4*(16+$c0)]')", + "&eor (@x[$b0],@x[$b0],$xc,'ror#12')", + "&str ($xc_,'[sp,#4*(16+$c1)]')", + "&eor (@x[$b1],@x[$b1],$xc_,'ror#12')" ); + + $xd=@x[$d2] if (!$odd); + $xd_=@x[$d3] if ($odd); + push @ret,( + "&ldr ($xc,'[sp,#4*(16+$c2)]')", + "&add (@x[$a2],@x[$a2],@x[$b2],'ror#13')", + "&ldr ($xc_,'[sp,#4*(16+$c3)]')", + "&add (@x[$a3],@x[$a3],@x[$b3],'ror#13')", + "&eor ($xd,@x[$a2],$xd,'ror#24')", + "&eor ($xd_,@x[$a3],$xd_,'ror#24')", + + "&add ($xc,$xc,$xd,'ror#16')", + "&add ($xc_,$xc_,$xd_,'ror#16')", + "&eor (@x[$b2],$xc, @x[$b2],'ror#13')", + "&eor (@x[$b3],$xc_,@x[$b3],'ror#13')", + + "&add (@x[$a2],@x[$a2],@x[$b2],'ror#20')", + "&add (@x[$a3],@x[$a3],@x[$b3],'ror#20')", + "&eor ($xd,@x[$a2],$xd,'ror#16')", + "&eor ($xd_,@x[$a3],$xd_,'ror#16')", + + "&add ($xc,$xc,$xd,'ror#24')", + "&add ($xc_,$xc_,$xd_,'ror#24')", + "&eor (@x[$b2],@x[$b2],$xc,'ror#12')", + "&eor (@x[$b3],@x[$b3],$xc_,'ror#12')" ); + + @ret; +} + +$code.=<<___; +#ifndef __KERNEL__ +# include "arm_arch.h" +#else +# define __ARM_ARCH__ __LINUX_ARM_ARCH__ +# define __ARM_MAX_ARCH__ __LINUX_ARM_ARCH__ +# define ChaCha20_ctr32 chacha20_arm_cryptogams +# define ChaCha20_neon chacha20_neon +#endif + +.text +#if defined(__thumb2__) || defined(__clang__) +.syntax unified +# define ldrhsb ldrbhs +#endif +#if defined(__thumb2__) +.thumb +#else +.code 32 +#endif + +.align 5 +.Lsigma: +.long 0x61707865,0x3320646e,0x79622d32,0x6b206574 @ endian-neutral +.Lone: +.long 1,0,0,0 +.Lrot8: +.long 0x02010003,0x06050407 +#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) +.LOPENSSL_armcap: +.word OPENSSL_armcap_P-.LChaCha20_ctr32 +#else +.word -1 +#endif + +.globl ChaCha20_ctr32 +.type ChaCha20_ctr32,%function +.align 5 +ChaCha20_ctr32: +.LChaCha20_ctr32: + ldr r12,[sp,#0] @ pull pointer to counter and nonce + stmdb sp!,{r0-r2,r4-r11,lr} +#if __ARM_ARCH__<7 && !defined(__thumb2__) + sub r14,pc,#16 @ ChaCha20_ctr32 +#else + adr r14,.LChaCha20_ctr32 +#endif + cmp r2,#0 @ len==0? +#ifdef __thumb2__ + itt eq +#endif + addeq sp,sp,#4*3 + beq .Lno_data +#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) + cmp r2,#192 @ test len + bls .Lshort + ldr r4,[r14,#-24] + ldr r4,[r14,r4] +# ifdef __APPLE__ + ldr r4,[r4] +# endif + tst r4,#ARMV7_NEON + bne .LChaCha20_neon +.Lshort: +#endif + ldmia r12,{r4-r7} @ load counter and nonce + sub sp,sp,#4*(16) @ off-load area + sub r14,r14,#64 @ .Lsigma + stmdb sp!,{r4-r7} @ copy counter and nonce + ldmia r3,{r4-r11} @ load key + ldmia r14,{r0-r3} @ load sigma + stmdb sp!,{r4-r11} @ copy key + stmdb sp!,{r0-r3} @ copy sigma + str r10,[sp,#4*(16+10)] @ off-load "@x[10]" + str r11,[sp,#4*(16+11)] @ off-load "@x[11]" + b .Loop_outer_enter + +.align 4 +.Loop_outer: + ldmia sp,{r0-r9} @ load key material + str @t[3],[sp,#4*(32+2)] @ save len + str r12, [sp,#4*(32+1)] @ save inp + str r14, [sp,#4*(32+0)] @ save out +.Loop_outer_enter: + ldr @t[3], [sp,#4*(15)] + mov @x[4],@x[4],ror#19 @ twist b[0..3] + ldr @x[12],[sp,#4*(12)] @ modulo-scheduled load + mov @x[5],@x[5],ror#19 + ldr @t[2], [sp,#4*(13)] + mov @x[6],@x[6],ror#19 + ldr @x[14],[sp,#4*(14)] + mov @x[7],@x[7],ror#19 + mov @t[3],@t[3],ror#8 @ twist d[0..3] + mov @x[12],@x[12],ror#8 + mov @t[2],@t[2],ror#8 + mov @x[14],@x[14],ror#8 + str @t[3], [sp,#4*(16+15)] + mov @t[3],#10 + b .Loop + +.align 4 +.Loop: + subs @t[3],@t[3],#1 +___ + foreach (&ROUND(0, 4, 8,12)) { eval; } + foreach (&ROUND(0, 5,10,15)) { eval; } +$code.=<<___; + bne .Loop + + ldr @t[3],[sp,#4*(32+2)] @ load len + + str @t[0], [sp,#4*(16+8)] @ modulo-scheduled store + str @t[1], [sp,#4*(16+9)] + str @x[12],[sp,#4*(16+12)] + str @t[2], [sp,#4*(16+13)] + str @x[14],[sp,#4*(16+14)] + + @ at this point we have first half of 512-bit result in + @ @x[0-7] and second half at sp+4*(16+8) + + cmp @t[3],#64 @ done yet? +#ifdef __thumb2__ + itete lo +#endif + addlo r12,sp,#4*(0) @ shortcut or ... + ldrhs r12,[sp,#4*(32+1)] @ ... load inp + addlo r14,sp,#4*(0) @ shortcut or ... + ldrhs r14,[sp,#4*(32+0)] @ ... load out + + ldr @t[0],[sp,#4*(0)] @ load key material + ldr @t[1],[sp,#4*(1)] + +#if __ARM_ARCH__>=6 || !defined(__ARMEB__) +# if __ARM_ARCH__<7 + orr @t[2],r12,r14 + tst @t[2],#3 @ are input and output aligned? + ldr @t[2],[sp,#4*(2)] + bne .Lunaligned + cmp @t[3],#64 @ restore flags +# else + ldr @t[2],[sp,#4*(2)] +# endif + ldr @t[3],[sp,#4*(3)] + + add @x[0],@x[0],@t[0] @ accumulate key material + add @x[1],@x[1],@t[1] +# ifdef __thumb2__ + itt hs +# endif + ldrhs @t[0],[r12],#16 @ load input + ldrhs @t[1],[r12,#-12] + + add @x[2],@x[2],@t[2] + add @x[3],@x[3],@t[3] +# ifdef __thumb2__ + itt hs +# endif + ldrhs @t[2],[r12,#-8] + ldrhs @t[3],[r12,#-4] +# if __ARM_ARCH__>=6 && defined(__ARMEB__) + rev @x[0],@x[0] + rev @x[1],@x[1] + rev @x[2],@x[2] + rev @x[3],@x[3] +# endif +# ifdef __thumb2__ + itt hs +# endif + eorhs @x[0],@x[0],@t[0] @ xor with input + eorhs @x[1],@x[1],@t[1] + add @t[0],sp,#4*(4) + str @x[0],[r14],#16 @ store output +# ifdef __thumb2__ + itt hs +# endif + eorhs @x[2],@x[2],@t[2] + eorhs @x[3],@x[3],@t[3] + ldmia @t[0],{@t[0]-@t[3]} @ load key material + str @x[1],[r14,#-12] + str @x[2],[r14,#-8] + str @x[3],[r14,#-4] + + add @x[4],@t[0],@x[4],ror#13 @ accumulate key material + add @x[5],@t[1],@x[5],ror#13 +# ifdef __thumb2__ + itt hs +# endif + ldrhs @t[0],[r12],#16 @ load input + ldrhs @t[1],[r12,#-12] + add @x[6],@t[2],@x[6],ror#13 + add @x[7],@t[3],@x[7],ror#13 +# ifdef __thumb2__ + itt hs +# endif + ldrhs @t[2],[r12,#-8] + ldrhs @t[3],[r12,#-4] +# if __ARM_ARCH__>=6 && defined(__ARMEB__) + rev @x[4],@x[4] + rev @x[5],@x[5] + rev @x[6],@x[6] + rev @x[7],@x[7] +# endif +# ifdef __thumb2__ + itt hs +# endif + eorhs @x[4],@x[4],@t[0] + eorhs @x[5],@x[5],@t[1] + add @t[0],sp,#4*(8) + str @x[4],[r14],#16 @ store output +# ifdef __thumb2__ + itt hs +# endif + eorhs @x[6],@x[6],@t[2] + eorhs @x[7],@x[7],@t[3] + str @x[5],[r14,#-12] + ldmia @t[0],{@t[0]-@t[3]} @ load key material + str @x[6],[r14,#-8] + add @x[0],sp,#4*(16+8) + str @x[7],[r14,#-4] + + ldmia @x[0],{@x[0]-@x[7]} @ load second half + + add @x[0],@x[0],@t[0] @ accumulate key material + add @x[1],@x[1],@t[1] +# ifdef __thumb2__ + itt hs +# endif + ldrhs @t[0],[r12],#16 @ load input + ldrhs @t[1],[r12,#-12] +# ifdef __thumb2__ + itt hi +# endif + strhi @t[2],[sp,#4*(16+10)] @ copy "@x[10]" while at it + strhi @t[3],[sp,#4*(16+11)] @ copy "@x[11]" while at it + add @x[2],@x[2],@t[2] + add @x[3],@x[3],@t[3] +# ifdef __thumb2__ + itt hs +# endif + ldrhs @t[2],[r12,#-8] + ldrhs @t[3],[r12,#-4] +# if __ARM_ARCH__>=6 && defined(__ARMEB__) + rev @x[0],@x[0] + rev @x[1],@x[1] + rev @x[2],@x[2] + rev @x[3],@x[3] +# endif +# ifdef __thumb2__ + itt hs +# endif + eorhs @x[0],@x[0],@t[0] + eorhs @x[1],@x[1],@t[1] + add @t[0],sp,#4*(12) + str @x[0],[r14],#16 @ store output +# ifdef __thumb2__ + itt hs +# endif + eorhs @x[2],@x[2],@t[2] + eorhs @x[3],@x[3],@t[3] + str @x[1],[r14,#-12] + ldmia @t[0],{@t[0]-@t[3]} @ load key material + str @x[2],[r14,#-8] + str @x[3],[r14,#-4] + + add @x[4],@t[0],@x[4],ror#24 @ accumulate key material + add @x[5],@t[1],@x[5],ror#24 +# ifdef __thumb2__ + itt hi +# endif + addhi @t[0],@t[0],#1 @ next counter value + strhi @t[0],[sp,#4*(12)] @ save next counter value +# ifdef __thumb2__ + itt hs +# endif + ldrhs @t[0],[r12],#16 @ load input + ldrhs @t[1],[r12,#-12] + add @x[6],@t[2],@x[6],ror#24 + add @x[7],@t[3],@x[7],ror#24 +# ifdef __thumb2__ + itt hs +# endif + ldrhs @t[2],[r12,#-8] + ldrhs @t[3],[r12,#-4] +# if __ARM_ARCH__>=6 && defined(__ARMEB__) + rev @x[4],@x[4] + rev @x[5],@x[5] + rev @x[6],@x[6] + rev @x[7],@x[7] +# endif +# ifdef __thumb2__ + itt hs +# endif + eorhs @x[4],@x[4],@t[0] + eorhs @x[5],@x[5],@t[1] +# ifdef __thumb2__ + it ne +# endif + ldrne @t[0],[sp,#4*(32+2)] @ re-load len +# ifdef __thumb2__ + itt hs +# endif + eorhs @x[6],@x[6],@t[2] + eorhs @x[7],@x[7],@t[3] + str @x[4],[r14],#16 @ store output + str @x[5],[r14,#-12] +# ifdef __thumb2__ + it hs +# endif + subhs @t[3],@t[0],#64 @ len-=64 + str @x[6],[r14,#-8] + str @x[7],[r14,#-4] + bhi .Loop_outer + + beq .Ldone +# if __ARM_ARCH__<7 + b .Ltail + +.align 4 +.Lunaligned: @ unaligned endian-neutral path + cmp @t[3],#64 @ restore flags +# endif +#endif +#if __ARM_ARCH__<7 + ldr @t[3],[sp,#4*(3)] +___ +for ($i=0;$i<16;$i+=4) { +my $j=$i&0x7; +my $twist=""; +if ($i==4) { $twist = ",ror#13"; } +elsif ($i==12) { $twist = ",ror#24"; } + +$code.=<<___ if ($i==4); + add @x[0],sp,#4*(16+8) +___ +$code.=<<___ if ($i==8); + ldmia @x[0],{@x[0]-@x[7]} @ load second half +# ifdef __thumb2__ + itt hi +# endif + strhi @t[2],[sp,#4*(16+10)] @ copy "@x[10]" + strhi @t[3],[sp,#4*(16+11)] @ copy "@x[11]" +___ +$code.=<<___; + add @x[$j+0],@t[0],@x[$j+0]$twist @ accumulate key material +___ +$code.=<<___ if ($i==12); +# ifdef __thumb2__ + itt hi +# endif + addhi @t[0],@t[0],#1 @ next counter value + strhi @t[0],[sp,#4*(12)] @ save next counter value +___ +$code.=<<___; + add @x[$j+1],@t[1],@x[$j+1]$twist + add @x[$j+2],@t[2],@x[$j+2]$twist +# ifdef __thumb2__ + itete lo +# endif + eorlo @t[0],@t[0],@t[0] @ zero or ... + ldrhsb @t[0],[r12],#16 @ ... load input + eorlo @t[1],@t[1],@t[1] + ldrhsb @t[1],[r12,#-12] + + add @x[$j+3],@t[3],@x[$j+3]$twist +# ifdef __thumb2__ + itete lo +# endif + eorlo @t[2],@t[2],@t[2] + ldrhsb @t[2],[r12,#-8] + eorlo @t[3],@t[3],@t[3] + ldrhsb @t[3],[r12,#-4] + + eor @x[$j+0],@t[0],@x[$j+0] @ xor with input (or zero) + eor @x[$j+1],@t[1],@x[$j+1] +# ifdef __thumb2__ + itt hs +# endif + ldrhsb @t[0],[r12,#-15] @ load more input + ldrhsb @t[1],[r12,#-11] + eor @x[$j+2],@t[2],@x[$j+2] + strb @x[$j+0],[r14],#16 @ store output + eor @x[$j+3],@t[3],@x[$j+3] +# ifdef __thumb2__ + itt hs +# endif + ldrhsb @t[2],[r12,#-7] + ldrhsb @t[3],[r12,#-3] + strb @x[$j+1],[r14,#-12] + eor @x[$j+0],@t[0],@x[$j+0],lsr#8 + strb @x[$j+2],[r14,#-8] + eor @x[$j+1],@t[1],@x[$j+1],lsr#8 +# ifdef __thumb2__ + itt hs +# endif + ldrhsb @t[0],[r12,#-14] @ load more input + ldrhsb @t[1],[r12,#-10] + strb @x[$j+3],[r14,#-4] + eor @x[$j+2],@t[2],@x[$j+2],lsr#8 + strb @x[$j+0],[r14,#-15] + eor @x[$j+3],@t[3],@x[$j+3],lsr#8 +# ifdef __thumb2__ + itt hs +# endif + ldrhsb @t[2],[r12,#-6] + ldrhsb @t[3],[r12,#-2] + strb @x[$j+1],[r14,#-11] + eor @x[$j+0],@t[0],@x[$j+0],lsr#8 + strb @x[$j+2],[r14,#-7] + eor @x[$j+1],@t[1],@x[$j+1],lsr#8 +# ifdef __thumb2__ + itt hs +# endif + ldrhsb @t[0],[r12,#-13] @ load more input + ldrhsb @t[1],[r12,#-9] + strb @x[$j+3],[r14,#-3] + eor @x[$j+2],@t[2],@x[$j+2],lsr#8 + strb @x[$j+0],[r14,#-14] + eor @x[$j+3],@t[3],@x[$j+3],lsr#8 +# ifdef __thumb2__ + itt hs +# endif + ldrhsb @t[2],[r12,#-5] + ldrhsb @t[3],[r12,#-1] + strb @x[$j+1],[r14,#-10] + strb @x[$j+2],[r14,#-6] + eor @x[$j+0],@t[0],@x[$j+0],lsr#8 + strb @x[$j+3],[r14,#-2] + eor @x[$j+1],@t[1],@x[$j+1],lsr#8 + strb @x[$j+0],[r14,#-13] + eor @x[$j+2],@t[2],@x[$j+2],lsr#8 + strb @x[$j+1],[r14,#-9] + eor @x[$j+3],@t[3],@x[$j+3],lsr#8 + strb @x[$j+2],[r14,#-5] + strb @x[$j+3],[r14,#-1] +___ +$code.=<<___ if ($i<12); + add @t[0],sp,#4*(4+$i) + ldmia @t[0],{@t[0]-@t[3]} @ load key material +___ +} +$code.=<<___; +# ifdef __thumb2__ + it ne +# endif + ldrne @t[0],[sp,#4*(32+2)] @ re-load len +# ifdef __thumb2__ + it hs +# endif + subhs @t[3],@t[0],#64 @ len-=64 + bhi .Loop_outer + + beq .Ldone +#endif + +.Ltail: + ldr r12,[sp,#4*(32+1)] @ load inp + add @t[1],sp,#4*(0) + ldr r14,[sp,#4*(32+0)] @ load out + +.Loop_tail: + ldrb @t[2],[@t[1]],#1 @ read buffer on stack + ldrb @t[3],[r12],#1 @ read input + subs @t[0],@t[0],#1 + eor @t[3],@t[3],@t[2] + strb @t[3],[r14],#1 @ store output + bne .Loop_tail + +.Ldone: + add sp,sp,#4*(32+3) +.Lno_data: +#if __ARM_ARCH__>=5 + ldmia sp!,{r4-r11,pc} +#else + ldmia sp!,{r4-r12,lr} + tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + .long 0xe12fff1e @ interoperable with Thumb ISA:-) +#endif +.size ChaCha20_ctr32,.-ChaCha20_ctr32 +___ + +{{{ +my ($a0,$b0,$c0,$d0,$a1,$b1,$c1,$d1,$a2,$b2,$c2,$d2,$t0,$t1,$t2,$t3) = + map("q$_",(0..15)); + +# This can replace vshr-by-24+vsli-by-8. It gives ~3% improvement on +# Cortex-A5/A7, but hurts Cortex-A9 by 5% and Snapdragon S4 by 14%! +sub vperm() +{ my ($dst,$src,$tbl) = @_; + $code .= " vtbl.8 $dst#lo,{$src#lo},$tbl#lo\n"; + $code .= " vtbl.8 $dst#hi,{$src#hi},$tbl#lo\n"; +} + +sub NEONROUND { +my $odd = pop; +my ($a,$b,$c,$d,$t)=@_; + + ( + "&vadd_i32 ($a,$a,$b)", + "&veor ($d,$d,$a)", + "&vrev32_16 ($d,$d)", # vrot ($d,16) + + "&vadd_i32 ($c,$c,$d)", + "&veor ($t,$b,$c)", + "&vshr_u32 ($b,$t,20)", + "&vsli_32 ($b,$t,12)", + + "&vadd_i32 ($a,$a,$b)", + "&veor ($t,$d,$a)", + "&vshr_u32 ($d,$t,24)", + "&vsli_32 ($d,$t,8)", + #"&vperm ($d,$t,$t3)", + + "&vadd_i32 ($c,$c,$d)", + "&veor ($t,$b,$c)", + "&vshr_u32 ($b,$t,25)", + "&vsli_32 ($b,$t,7)", + + "&vext_8 ($a,$a,$a,$odd?4:12)", + "&vext_8 ($d,$d,$d,8)", + "&vext_8 ($c,$c,$c,$odd?12:4)" + ); +} + +$code.=<<___; +#if (defined(__KERNEL__) && defined(CONFIG_KERNEL_MODE_NEON)) || (!defined(__KERNEL__) && __ARM_MAX_ARCH__>=7) +.arch armv7-a +.fpu neon + +# ifdef __KERNEL__ +.globl ChaCha20_neon +@ For optimal performance it's appropriate for caller to enforce +@ minimum input length, 193 bytes is suggested. +# endif +.type ChaCha20_neon,%function +.align 5 +ChaCha20_neon: + ldr r12,[sp,#0] @ pull pointer to counter and nonce + stmdb sp!,{r0-r2,r4-r11,lr} +.LChaCha20_neon: + adr r14,.Lsigma + vstmdb sp!,{d8-d15} @ ABI spec says so + stmdb sp!,{r0-r3} + + vld1.32 {$b0-$c0},[r3] @ load key + ldmia r3,{r4-r11} @ load key + + sub sp,sp,#4*(16+16) + vld1.32 {$d0},[r12] @ load counter and nonce + add r12,sp,#4*8 + ldmia r14,{r0-r3} @ load sigma + vld1.32 {$a0},[r14]! @ load sigma + vld1.32 {$t0},[r14]! @ one + @ vld1.32 {$t3#lo},[r14] @ rot8 + vst1.32 {$c0-$d0},[r12] @ copy 1/2key|counter|nonce + vst1.32 {$a0-$b0},[sp] @ copy sigma|1/2key + + str r10,[sp,#4*(16+10)] @ off-load "@x[10]" + str r11,[sp,#4*(16+11)] @ off-load "@x[11]" + vshl.i32 $t1#lo,$t0#lo,#1 @ two + vstr $t0#lo,[sp,#4*(16+0)] + vshl.i32 $t2#lo,$t0#lo,#2 @ four + vstr $t1#lo,[sp,#4*(16+2)] + vmov $a1,$a0 + vstr $t2#lo,[sp,#4*(16+4)] + vmov $a2,$a0 + @ vstr $t3#lo,[sp,#4*(16+6)] + vmov $b1,$b0 + vmov $b2,$b0 + b .Loop_neon_enter + +.align 4 +.Loop_neon_outer: + ldmia sp,{r0-r9} @ load key material + cmp @t[3],#64*2 @ if len<=64*2 + bls .Lbreak_neon @ switch to integer-only + @ vldr $t3#lo,[sp,#4*(16+6)] @ rot8 + vmov $a1,$a0 + str @t[3],[sp,#4*(32+2)] @ save len + vmov $a2,$a0 + str r12, [sp,#4*(32+1)] @ save inp + vmov $b1,$b0 + str r14, [sp,#4*(32+0)] @ save out + vmov $b2,$b0 +.Loop_neon_enter: + ldr @t[3], [sp,#4*(15)] + mov @x[4],@x[4],ror#19 @ twist b[0..3] + vadd.i32 $d1,$d0,$t0 @ counter+1 + ldr @x[12],[sp,#4*(12)] @ modulo-scheduled load + mov @x[5],@x[5],ror#19 + vmov $c1,$c0 + ldr @t[2], [sp,#4*(13)] + mov @x[6],@x[6],ror#19 + vmov $c2,$c0 + ldr @x[14],[sp,#4*(14)] + mov @x[7],@x[7],ror#19 + vadd.i32 $d2,$d1,$t0 @ counter+2 + add @x[12],@x[12],#3 @ counter+3 + mov @t[3],@t[3],ror#8 @ twist d[0..3] + mov @x[12],@x[12],ror#8 + mov @t[2],@t[2],ror#8 + mov @x[14],@x[14],ror#8 + str @t[3], [sp,#4*(16+15)] + mov @t[3],#10 + b .Loop_neon + +.align 4 +.Loop_neon: + subs @t[3],@t[3],#1 +___ + my @thread0=&NEONROUND($a0,$b0,$c0,$d0,$t0,0); + my @thread1=&NEONROUND($a1,$b1,$c1,$d1,$t1,0); + my @thread2=&NEONROUND($a2,$b2,$c2,$d2,$t2,0); + my @thread3=&ROUND(0,4,8,12); + + foreach (@thread0) { + eval; eval(shift(@thread3)); + eval(shift(@thread1)); eval(shift(@thread3)); + eval(shift(@thread2)); eval(shift(@thread3)); + } + + @thread0=&NEONROUND($a0,$b0,$c0,$d0,$t0,1); + @thread1=&NEONROUND($a1,$b1,$c1,$d1,$t1,1); + @thread2=&NEONROUND($a2,$b2,$c2,$d2,$t2,1); + @thread3=&ROUND(0,5,10,15); + + foreach (@thread0) { + eval; eval(shift(@thread3)); + eval(shift(@thread1)); eval(shift(@thread3)); + eval(shift(@thread2)); eval(shift(@thread3)); + } +$code.=<<___; + bne .Loop_neon + + add @t[3],sp,#32 + vld1.32 {$t0-$t1},[sp] @ load key material + vld1.32 {$t2-$t3},[@t[3]] + + ldr @t[3],[sp,#4*(32+2)] @ load len + + str @t[0], [sp,#4*(16+8)] @ modulo-scheduled store + str @t[1], [sp,#4*(16+9)] + str @x[12],[sp,#4*(16+12)] + str @t[2], [sp,#4*(16+13)] + str @x[14],[sp,#4*(16+14)] + + @ at this point we have first half of 512-bit result in + @ @x[0-7] and second half at sp+4*(16+8) + + ldr r12,[sp,#4*(32+1)] @ load inp + ldr r14,[sp,#4*(32+0)] @ load out + + vadd.i32 $a0,$a0,$t0 @ accumulate key material + vadd.i32 $a1,$a1,$t0 + vadd.i32 $a2,$a2,$t0 + vldr $t0#lo,[sp,#4*(16+0)] @ one + + vadd.i32 $b0,$b0,$t1 + vadd.i32 $b1,$b1,$t1 + vadd.i32 $b2,$b2,$t1 + vldr $t1#lo,[sp,#4*(16+2)] @ two + + vadd.i32 $c0,$c0,$t2 + vadd.i32 $c1,$c1,$t2 + vadd.i32 $c2,$c2,$t2 + vadd.i32 $d1#lo,$d1#lo,$t0#lo @ counter+1 + vadd.i32 $d2#lo,$d2#lo,$t1#lo @ counter+2 + + vadd.i32 $d0,$d0,$t3 + vadd.i32 $d1,$d1,$t3 + vadd.i32 $d2,$d2,$t3 + + cmp @t[3],#64*4 + blo .Ltail_neon + + vld1.8 {$t0-$t1},[r12]! @ load input + mov @t[3],sp + vld1.8 {$t2-$t3},[r12]! + veor $a0,$a0,$t0 @ xor with input + veor $b0,$b0,$t1 + vld1.8 {$t0-$t1},[r12]! + veor $c0,$c0,$t2 + veor $d0,$d0,$t3 + vld1.8 {$t2-$t3},[r12]! + + veor $a1,$a1,$t0 + vst1.8 {$a0-$b0},[r14]! @ store output + veor $b1,$b1,$t1 + vld1.8 {$t0-$t1},[r12]! + veor $c1,$c1,$t2 + vst1.8 {$c0-$d0},[r14]! + veor $d1,$d1,$t3 + vld1.8 {$t2-$t3},[r12]! + + veor $a2,$a2,$t0 + vld1.32 {$a0-$b0},[@t[3]]! @ load for next iteration + veor $t0#hi,$t0#hi,$t0#hi + vldr $t0#lo,[sp,#4*(16+4)] @ four + veor $b2,$b2,$t1 + vld1.32 {$c0-$d0},[@t[3]] + veor $c2,$c2,$t2 + vst1.8 {$a1-$b1},[r14]! + veor $d2,$d2,$t3 + vst1.8 {$c1-$d1},[r14]! + + vadd.i32 $d0#lo,$d0#lo,$t0#lo @ next counter value + vldr $t0#lo,[sp,#4*(16+0)] @ one + + ldmia sp,{@t[0]-@t[3]} @ load key material + add @x[0],@x[0],@t[0] @ accumulate key material + ldr @t[0],[r12],#16 @ load input + vst1.8 {$a2-$b2},[r14]! + add @x[1],@x[1],@t[1] + ldr @t[1],[r12,#-12] + vst1.8 {$c2-$d2},[r14]! + add @x[2],@x[2],@t[2] + ldr @t[2],[r12,#-8] + add @x[3],@x[3],@t[3] + ldr @t[3],[r12,#-4] +# ifdef __ARMEB__ + rev @x[0],@x[0] + rev @x[1],@x[1] + rev @x[2],@x[2] + rev @x[3],@x[3] +# endif + eor @x[0],@x[0],@t[0] @ xor with input + add @t[0],sp,#4*(4) + eor @x[1],@x[1],@t[1] + str @x[0],[r14],#16 @ store output + eor @x[2],@x[2],@t[2] + str @x[1],[r14,#-12] + eor @x[3],@x[3],@t[3] + ldmia @t[0],{@t[0]-@t[3]} @ load key material + str @x[2],[r14,#-8] + str @x[3],[r14,#-4] + + add @x[4],@t[0],@x[4],ror#13 @ accumulate key material + ldr @t[0],[r12],#16 @ load input + add @x[5],@t[1],@x[5],ror#13 + ldr @t[1],[r12,#-12] + add @x[6],@t[2],@x[6],ror#13 + ldr @t[2],[r12,#-8] + add @x[7],@t[3],@x[7],ror#13 + ldr @t[3],[r12,#-4] +# ifdef __ARMEB__ + rev @x[4],@x[4] + rev @x[5],@x[5] + rev @x[6],@x[6] + rev @x[7],@x[7] +# endif + eor @x[4],@x[4],@t[0] + add @t[0],sp,#4*(8) + eor @x[5],@x[5],@t[1] + str @x[4],[r14],#16 @ store output + eor @x[6],@x[6],@t[2] + str @x[5],[r14,#-12] + eor @x[7],@x[7],@t[3] + ldmia @t[0],{@t[0]-@t[3]} @ load key material + str @x[6],[r14,#-8] + add @x[0],sp,#4*(16+8) + str @x[7],[r14,#-4] + + ldmia @x[0],{@x[0]-@x[7]} @ load second half + + add @x[0],@x[0],@t[0] @ accumulate key material + ldr @t[0],[r12],#16 @ load input + add @x[1],@x[1],@t[1] + ldr @t[1],[r12,#-12] +# ifdef __thumb2__ + it hi +# endif + strhi @t[2],[sp,#4*(16+10)] @ copy "@x[10]" while at it + add @x[2],@x[2],@t[2] + ldr @t[2],[r12,#-8] +# ifdef __thumb2__ + it hi +# endif + strhi @t[3],[sp,#4*(16+11)] @ copy "@x[11]" while at it + add @x[3],@x[3],@t[3] + ldr @t[3],[r12,#-4] +# ifdef __ARMEB__ + rev @x[0],@x[0] + rev @x[1],@x[1] + rev @x[2],@x[2] + rev @x[3],@x[3] +# endif + eor @x[0],@x[0],@t[0] + add @t[0],sp,#4*(12) + eor @x[1],@x[1],@t[1] + str @x[0],[r14],#16 @ store output + eor @x[2],@x[2],@t[2] + str @x[1],[r14,#-12] + eor @x[3],@x[3],@t[3] + ldmia @t[0],{@t[0]-@t[3]} @ load key material + str @x[2],[r14,#-8] + str @x[3],[r14,#-4] + + add @x[4],@t[0],@x[4],ror#24 @ accumulate key material + add @t[0],@t[0],#4 @ next counter value + add @x[5],@t[1],@x[5],ror#24 + str @t[0],[sp,#4*(12)] @ save next counter value + ldr @t[0],[r12],#16 @ load input + add @x[6],@t[2],@x[6],ror#24 + add @x[4],@x[4],#3 @ counter+3 + ldr @t[1],[r12,#-12] + add @x[7],@t[3],@x[7],ror#24 + ldr @t[2],[r12,#-8] + ldr @t[3],[r12,#-4] +# ifdef __ARMEB__ + rev @x[4],@x[4] + rev @x[5],@x[5] + rev @x[6],@x[6] + rev @x[7],@x[7] +# endif + eor @x[4],@x[4],@t[0] +# ifdef __thumb2__ + it hi +# endif + ldrhi @t[0],[sp,#4*(32+2)] @ re-load len + eor @x[5],@x[5],@t[1] + eor @x[6],@x[6],@t[2] + str @x[4],[r14],#16 @ store output + eor @x[7],@x[7],@t[3] + str @x[5],[r14,#-12] + sub @t[3],@t[0],#64*4 @ len-=64*4 + str @x[6],[r14,#-8] + str @x[7],[r14,#-4] + bhi .Loop_neon_outer + + b .Ldone_neon + +.align 4 +.Lbreak_neon: + @ harmonize NEON and integer-only stack frames: load data + @ from NEON frame, but save to integer-only one; distance + @ between the two is 4*(32+4+16-32)=4*(20). + + str @t[3], [sp,#4*(20+32+2)] @ save len + add @t[3],sp,#4*(32+4) + str r12, [sp,#4*(20+32+1)] @ save inp + str r14, [sp,#4*(20+32+0)] @ save out + + ldr @x[12],[sp,#4*(16+10)] + ldr @x[14],[sp,#4*(16+11)] + vldmia @t[3],{d8-d15} @ fulfill ABI requirement + str @x[12],[sp,#4*(20+16+10)] @ copy "@x[10]" + str @x[14],[sp,#4*(20+16+11)] @ copy "@x[11]" + + ldr @t[3], [sp,#4*(15)] + mov @x[4],@x[4],ror#19 @ twist b[0..3] + ldr @x[12],[sp,#4*(12)] @ modulo-scheduled load + mov @x[5],@x[5],ror#19 + ldr @t[2], [sp,#4*(13)] + mov @x[6],@x[6],ror#19 + ldr @x[14],[sp,#4*(14)] + mov @x[7],@x[7],ror#19 + mov @t[3],@t[3],ror#8 @ twist d[0..3] + mov @x[12],@x[12],ror#8 + mov @t[2],@t[2],ror#8 + mov @x[14],@x[14],ror#8 + str @t[3], [sp,#4*(20+16+15)] + add @t[3],sp,#4*(20) + vst1.32 {$a0-$b0},[@t[3]]! @ copy key + add sp,sp,#4*(20) @ switch frame + vst1.32 {$c0-$d0},[@t[3]] + mov @t[3],#10 + b .Loop @ go integer-only + +.align 4 +.Ltail_neon: + cmp @t[3],#64*3 + bhs .L192_or_more_neon + cmp @t[3],#64*2 + bhs .L128_or_more_neon + cmp @t[3],#64*1 + bhs .L64_or_more_neon + + add @t[0],sp,#4*(8) + vst1.8 {$a0-$b0},[sp] + add @t[2],sp,#4*(0) + vst1.8 {$c0-$d0},[@t[0]] + b .Loop_tail_neon + +.align 4 +.L64_or_more_neon: + vld1.8 {$t0-$t1},[r12]! + vld1.8 {$t2-$t3},[r12]! + veor $a0,$a0,$t0 + veor $b0,$b0,$t1 + veor $c0,$c0,$t2 + veor $d0,$d0,$t3 + vst1.8 {$a0-$b0},[r14]! + vst1.8 {$c0-$d0},[r14]! + + beq .Ldone_neon + + add @t[0],sp,#4*(8) + vst1.8 {$a1-$b1},[sp] + add @t[2],sp,#4*(0) + vst1.8 {$c1-$d1},[@t[0]] + sub @t[3],@t[3],#64*1 @ len-=64*1 + b .Loop_tail_neon + +.align 4 +.L128_or_more_neon: + vld1.8 {$t0-$t1},[r12]! + vld1.8 {$t2-$t3},[r12]! + veor $a0,$a0,$t0 + veor $b0,$b0,$t1 + vld1.8 {$t0-$t1},[r12]! + veor $c0,$c0,$t2 + veor $d0,$d0,$t3 + vld1.8 {$t2-$t3},[r12]! + + veor $a1,$a1,$t0 + veor $b1,$b1,$t1 + vst1.8 {$a0-$b0},[r14]! + veor $c1,$c1,$t2 + vst1.8 {$c0-$d0},[r14]! + veor $d1,$d1,$t3 + vst1.8 {$a1-$b1},[r14]! + vst1.8 {$c1-$d1},[r14]! + + beq .Ldone_neon + + add @t[0],sp,#4*(8) + vst1.8 {$a2-$b2},[sp] + add @t[2],sp,#4*(0) + vst1.8 {$c2-$d2},[@t[0]] + sub @t[3],@t[3],#64*2 @ len-=64*2 + b .Loop_tail_neon + +.align 4 +.L192_or_more_neon: + vld1.8 {$t0-$t1},[r12]! + vld1.8 {$t2-$t3},[r12]! + veor $a0,$a0,$t0 + veor $b0,$b0,$t1 + vld1.8 {$t0-$t1},[r12]! + veor $c0,$c0,$t2 + veor $d0,$d0,$t3 + vld1.8 {$t2-$t3},[r12]! + + veor $a1,$a1,$t0 + veor $b1,$b1,$t1 + vld1.8 {$t0-$t1},[r12]! + veor $c1,$c1,$t2 + vst1.8 {$a0-$b0},[r14]! + veor $d1,$d1,$t3 + vld1.8 {$t2-$t3},[r12]! + + veor $a2,$a2,$t0 + vst1.8 {$c0-$d0},[r14]! + veor $b2,$b2,$t1 + vst1.8 {$a1-$b1},[r14]! + veor $c2,$c2,$t2 + vst1.8 {$c1-$d1},[r14]! + veor $d2,$d2,$t3 + vst1.8 {$a2-$b2},[r14]! + vst1.8 {$c2-$d2},[r14]! + + beq .Ldone_neon + + ldmia sp,{@t[0]-@t[3]} @ load key material + add @x[0],@x[0],@t[0] @ accumulate key material + add @t[0],sp,#4*(4) + add @x[1],@x[1],@t[1] + add @x[2],@x[2],@t[2] + add @x[3],@x[3],@t[3] + ldmia @t[0],{@t[0]-@t[3]} @ load key material + + add @x[4],@t[0],@x[4],ror#13 @ accumulate key material + add @t[0],sp,#4*(8) + add @x[5],@t[1],@x[5],ror#13 + add @x[6],@t[2],@x[6],ror#13 + add @x[7],@t[3],@x[7],ror#13 + ldmia @t[0],{@t[0]-@t[3]} @ load key material +# ifdef __ARMEB__ + rev @x[0],@x[0] + rev @x[1],@x[1] + rev @x[2],@x[2] + rev @x[3],@x[3] + rev @x[4],@x[4] + rev @x[5],@x[5] + rev @x[6],@x[6] + rev @x[7],@x[7] +# endif + stmia sp,{@x[0]-@x[7]} + add @x[0],sp,#4*(16+8) + + ldmia @x[0],{@x[0]-@x[7]} @ load second half + + add @x[0],@x[0],@t[0] @ accumulate key material + add @t[0],sp,#4*(12) + add @x[1],@x[1],@t[1] + add @x[2],@x[2],@t[2] + add @x[3],@x[3],@t[3] + ldmia @t[0],{@t[0]-@t[3]} @ load key material + + add @x[4],@t[0],@x[4],ror#24 @ accumulate key material + add @t[0],sp,#4*(8) + add @x[5],@t[1],@x[5],ror#24 + add @x[4],@x[4],#3 @ counter+3 + add @x[6],@t[2],@x[6],ror#24 + add @x[7],@t[3],@x[7],ror#24 + ldr @t[3],[sp,#4*(32+2)] @ re-load len +# ifdef __ARMEB__ + rev @x[0],@x[0] + rev @x[1],@x[1] + rev @x[2],@x[2] + rev @x[3],@x[3] + rev @x[4],@x[4] + rev @x[5],@x[5] + rev @x[6],@x[6] + rev @x[7],@x[7] +# endif + stmia @t[0],{@x[0]-@x[7]} + add @t[2],sp,#4*(0) + sub @t[3],@t[3],#64*3 @ len-=64*3 + +.Loop_tail_neon: + ldrb @t[0],[@t[2]],#1 @ read buffer on stack + ldrb @t[1],[r12],#1 @ read input + subs @t[3],@t[3],#1 + eor @t[0],@t[0],@t[1] + strb @t[0],[r14],#1 @ store output + bne .Loop_tail_neon + +.Ldone_neon: + add sp,sp,#4*(32+4) + vldmia sp,{d8-d15} + add sp,sp,#4*(16+3) + ldmia sp!,{r4-r11,pc} +.size ChaCha20_neon,.-ChaCha20_neon +# ifndef __KERNEL__ +.comm OPENSSL_armcap_P,4,4 +# endif +#endif +___ +}}} + +open SELF,$0; +while(<SELF>) { + next if (/^#!/); + last if (!s/^#/@/ and !/^$/); + print; +} +close SELF; + +foreach (split("\n",$code)) { + s/\`([^\`]*)\`/eval $1/geo; + + s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo; + + print $_,"\n"; +} +close STDOUT; diff --git a/net/wireguard/crypto/zinc/chacha20/chacha20-arm64.pl b/net/wireguard/crypto/zinc/chacha20/chacha20-arm64.pl new file mode 100644 index 000000000000..ac14a9924165 --- /dev/null +++ b/net/wireguard/crypto/zinc/chacha20/chacha20-arm64.pl @@ -0,0 +1,1163 @@ +#!/usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# +# This code is taken from the OpenSSL project but the author, Andy Polyakov, +# has relicensed it under the licenses specified in the SPDX header above. +# The original headers, including the original license headers, are +# included below for completeness. +# +# ==================================================================== +# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# June 2015 +# +# ChaCha20 for ARMv8. +# +# Performance in cycles per byte out of large buffer. +# +# IALU/gcc-4.9 3xNEON+1xIALU 6xNEON+2xIALU(*) +# +# Apple A7 5.50/+49% 3.33 1.70 +# Cortex-A53 8.40/+80% 4.72 4.72(**) +# Cortex-A57 8.06/+43% 4.90 4.43(***) +# Denver 4.50/+82% 2.63 2.67(**) +# X-Gene 9.50/+46% 8.82 8.89(**) +# Mongoose 8.00/+44% 3.64 3.25(***) +# Kryo 8.17/+50% 4.83 4.65(***) +# +# (*) since no non-Apple processor exhibits significantly better +# performance, the code path is #ifdef __APPLE__-ed; +# (**) it's expected that doubling interleave factor doesn't help +# all processors, only those with higher NEON latency and +# higher instruction issue rate; +# (***) expected improvement was actually higher; + +$flavour=shift; +if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; } +else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} } + +if ($flavour && $flavour ne "void") { + $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; + ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or + ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or + die "can't locate arm-xlate.pl"; + + open STDOUT,"| \"$^X\" $xlate $flavour $output"; +} else { + open STDOUT,">$output"; +} + +sub AUTOLOAD() # thunk [simplified] x86-style perlasm +{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./; + my $arg = pop; + $arg = "#$arg" if ($arg*1 eq $arg); + $code .= "\t$opcode\t".join(',',@_,$arg)."\n"; +} + +my ($out,$inp,$len,$key,$ctr) = map("x$_",(0..4)); + +my @x=map("x$_",(5..17,19..21)); +my @d=map("x$_",(22..28,30)); + +sub ROUND { +my ($a0,$b0,$c0,$d0)=@_; +my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0)); +my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1)); +my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2)); + + ( + "&add_32 (@x[$a0],@x[$a0],@x[$b0])", + "&add_32 (@x[$a1],@x[$a1],@x[$b1])", + "&add_32 (@x[$a2],@x[$a2],@x[$b2])", + "&add_32 (@x[$a3],@x[$a3],@x[$b3])", + "&eor_32 (@x[$d0],@x[$d0],@x[$a0])", + "&eor_32 (@x[$d1],@x[$d1],@x[$a1])", + "&eor_32 (@x[$d2],@x[$d2],@x[$a2])", + "&eor_32 (@x[$d3],@x[$d3],@x[$a3])", + "&ror_32 (@x[$d0],@x[$d0],16)", + "&ror_32 (@x[$d1],@x[$d1],16)", + "&ror_32 (@x[$d2],@x[$d2],16)", + "&ror_32 (@x[$d3],@x[$d3],16)", + + "&add_32 (@x[$c0],@x[$c0],@x[$d0])", + "&add_32 (@x[$c1],@x[$c1],@x[$d1])", + "&add_32 (@x[$c2],@x[$c2],@x[$d2])", + "&add_32 (@x[$c3],@x[$c3],@x[$d3])", + "&eor_32 (@x[$b0],@x[$b0],@x[$c0])", + "&eor_32 (@x[$b1],@x[$b1],@x[$c1])", + "&eor_32 (@x[$b2],@x[$b2],@x[$c2])", + "&eor_32 (@x[$b3],@x[$b3],@x[$c3])", + "&ror_32 (@x[$b0],@x[$b0],20)", + "&ror_32 (@x[$b1],@x[$b1],20)", + "&ror_32 (@x[$b2],@x[$b2],20)", + "&ror_32 (@x[$b3],@x[$b3],20)", + + "&add_32 (@x[$a0],@x[$a0],@x[$b0])", + "&add_32 (@x[$a1],@x[$a1],@x[$b1])", + "&add_32 (@x[$a2],@x[$a2],@x[$b2])", + "&add_32 (@x[$a3],@x[$a3],@x[$b3])", + "&eor_32 (@x[$d0],@x[$d0],@x[$a0])", + "&eor_32 (@x[$d1],@x[$d1],@x[$a1])", + "&eor_32 (@x[$d2],@x[$d2],@x[$a2])", + "&eor_32 (@x[$d3],@x[$d3],@x[$a3])", + "&ror_32 (@x[$d0],@x[$d0],24)", + "&ror_32 (@x[$d1],@x[$d1],24)", + "&ror_32 (@x[$d2],@x[$d2],24)", + "&ror_32 (@x[$d3],@x[$d3],24)", + + "&add_32 (@x[$c0],@x[$c0],@x[$d0])", + "&add_32 (@x[$c1],@x[$c1],@x[$d1])", + "&add_32 (@x[$c2],@x[$c2],@x[$d2])", + "&add_32 (@x[$c3],@x[$c3],@x[$d3])", + "&eor_32 (@x[$b0],@x[$b0],@x[$c0])", + "&eor_32 (@x[$b1],@x[$b1],@x[$c1])", + "&eor_32 (@x[$b2],@x[$b2],@x[$c2])", + "&eor_32 (@x[$b3],@x[$b3],@x[$c3])", + "&ror_32 (@x[$b0],@x[$b0],25)", + "&ror_32 (@x[$b1],@x[$b1],25)", + "&ror_32 (@x[$b2],@x[$b2],25)", + "&ror_32 (@x[$b3],@x[$b3],25)" + ); +} + +$code.=<<___; +#ifndef __KERNEL__ +# include "arm_arch.h" +.extern OPENSSL_armcap_P +#else +# define ChaCha20_ctr32 chacha20_arm +# define ChaCha20_neon chacha20_neon +#endif + +.text + +.align 5 +.Lsigma: +.quad 0x3320646e61707865,0x6b20657479622d32 // endian-neutral +.Lone: +.long 1,0,0,0 +#ifndef __KERNEL__ +.LOPENSSL_armcap_P: +# ifdef __ILP32__ +.long OPENSSL_armcap_P-. +# else +.quad OPENSSL_armcap_P-. +# endif +#endif + +.globl ChaCha20_ctr32 +.type ChaCha20_ctr32,%function +.align 5 +ChaCha20_ctr32: + cbz $len,.Labort +#ifndef __KERNEL__ + adr @x[0],.LOPENSSL_armcap_P + cmp $len,#192 + b.lo .Lshort +# ifdef __ILP32__ + ldrsw @x[1],[@x[0]] +# else + ldr @x[1],[@x[0]] +# endif + ldr w17,[@x[1],@x[0]] + tst w17,#ARMV7_NEON + b.ne ChaCha20_neon + +.Lshort: +#endif + stp x29,x30,[sp,#-96]! + add x29,sp,#0 + + adr @x[0],.Lsigma + stp x19,x20,[sp,#16] + stp x21,x22,[sp,#32] + stp x23,x24,[sp,#48] + stp x25,x26,[sp,#64] + stp x27,x28,[sp,#80] + sub sp,sp,#64 + + ldp @d[0],@d[1],[@x[0]] // load sigma + ldp @d[2],@d[3],[$key] // load key + ldp @d[4],@d[5],[$key,#16] + ldp @d[6],@d[7],[$ctr] // load counter +#ifdef __AARCH64EB__ + ror @d[2],@d[2],#32 + ror @d[3],@d[3],#32 + ror @d[4],@d[4],#32 + ror @d[5],@d[5],#32 + ror @d[6],@d[6],#32 + ror @d[7],@d[7],#32 +#endif + +.Loop_outer: + mov.32 @x[0],@d[0] // unpack key block + lsr @x[1],@d[0],#32 + mov.32 @x[2],@d[1] + lsr @x[3],@d[1],#32 + mov.32 @x[4],@d[2] + lsr @x[5],@d[2],#32 + mov.32 @x[6],@d[3] + lsr @x[7],@d[3],#32 + mov.32 @x[8],@d[4] + lsr @x[9],@d[4],#32 + mov.32 @x[10],@d[5] + lsr @x[11],@d[5],#32 + mov.32 @x[12],@d[6] + lsr @x[13],@d[6],#32 + mov.32 @x[14],@d[7] + lsr @x[15],@d[7],#32 + + mov $ctr,#10 + subs $len,$len,#64 +.Loop: + sub $ctr,$ctr,#1 +___ + foreach (&ROUND(0, 4, 8,12)) { eval; } + foreach (&ROUND(0, 5,10,15)) { eval; } +$code.=<<___; + cbnz $ctr,.Loop + + add.32 @x[0],@x[0],@d[0] // accumulate key block + add @x[1],@x[1],@d[0],lsr#32 + add.32 @x[2],@x[2],@d[1] + add @x[3],@x[3],@d[1],lsr#32 + add.32 @x[4],@x[4],@d[2] + add @x[5],@x[5],@d[2],lsr#32 + add.32 @x[6],@x[6],@d[3] + add @x[7],@x[7],@d[3],lsr#32 + add.32 @x[8],@x[8],@d[4] + add @x[9],@x[9],@d[4],lsr#32 + add.32 @x[10],@x[10],@d[5] + add @x[11],@x[11],@d[5],lsr#32 + add.32 @x[12],@x[12],@d[6] + add @x[13],@x[13],@d[6],lsr#32 + add.32 @x[14],@x[14],@d[7] + add @x[15],@x[15],@d[7],lsr#32 + + b.lo .Ltail + + add @x[0],@x[0],@x[1],lsl#32 // pack + add @x[2],@x[2],@x[3],lsl#32 + ldp @x[1],@x[3],[$inp,#0] // load input + add @x[4],@x[4],@x[5],lsl#32 + add @x[6],@x[6],@x[7],lsl#32 + ldp @x[5],@x[7],[$inp,#16] + add @x[8],@x[8],@x[9],lsl#32 + add @x[10],@x[10],@x[11],lsl#32 + ldp @x[9],@x[11],[$inp,#32] + add @x[12],@x[12],@x[13],lsl#32 + add @x[14],@x[14],@x[15],lsl#32 + ldp @x[13],@x[15],[$inp,#48] + add $inp,$inp,#64 +#ifdef __AARCH64EB__ + rev @x[0],@x[0] + rev @x[2],@x[2] + rev @x[4],@x[4] + rev @x[6],@x[6] + rev @x[8],@x[8] + rev @x[10],@x[10] + rev @x[12],@x[12] + rev @x[14],@x[14] +#endif + eor @x[0],@x[0],@x[1] + eor @x[2],@x[2],@x[3] + eor @x[4],@x[4],@x[5] + eor @x[6],@x[6],@x[7] + eor @x[8],@x[8],@x[9] + eor @x[10],@x[10],@x[11] + eor @x[12],@x[12],@x[13] + eor @x[14],@x[14],@x[15] + + stp @x[0],@x[2],[$out,#0] // store output + add @d[6],@d[6],#1 // increment counter + stp @x[4],@x[6],[$out,#16] + stp @x[8],@x[10],[$out,#32] + stp @x[12],@x[14],[$out,#48] + add $out,$out,#64 + + b.hi .Loop_outer + + ldp x19,x20,[x29,#16] + add sp,sp,#64 + ldp x21,x22,[x29,#32] + ldp x23,x24,[x29,#48] + ldp x25,x26,[x29,#64] + ldp x27,x28,[x29,#80] + ldp x29,x30,[sp],#96 +.Labort: + ret + +.align 4 +.Ltail: + add $len,$len,#64 +.Less_than_64: + sub $out,$out,#1 + add $inp,$inp,$len + add $out,$out,$len + add $ctr,sp,$len + neg $len,$len + + add @x[0],@x[0],@x[1],lsl#32 // pack + add @x[2],@x[2],@x[3],lsl#32 + add @x[4],@x[4],@x[5],lsl#32 + add @x[6],@x[6],@x[7],lsl#32 + add @x[8],@x[8],@x[9],lsl#32 + add @x[10],@x[10],@x[11],lsl#32 + add @x[12],@x[12],@x[13],lsl#32 + add @x[14],@x[14],@x[15],lsl#32 +#ifdef __AARCH64EB__ + rev @x[0],@x[0] + rev @x[2],@x[2] + rev @x[4],@x[4] + rev @x[6],@x[6] + rev @x[8],@x[8] + rev @x[10],@x[10] + rev @x[12],@x[12] + rev @x[14],@x[14] +#endif + stp @x[0],@x[2],[sp,#0] + stp @x[4],@x[6],[sp,#16] + stp @x[8],@x[10],[sp,#32] + stp @x[12],@x[14],[sp,#48] + +.Loop_tail: + ldrb w10,[$inp,$len] + ldrb w11,[$ctr,$len] + add $len,$len,#1 + eor w10,w10,w11 + strb w10,[$out,$len] + cbnz $len,.Loop_tail + + stp xzr,xzr,[sp,#0] + stp xzr,xzr,[sp,#16] + stp xzr,xzr,[sp,#32] + stp xzr,xzr,[sp,#48] + + ldp x19,x20,[x29,#16] + add sp,sp,#64 + ldp x21,x22,[x29,#32] + ldp x23,x24,[x29,#48] + ldp x25,x26,[x29,#64] + ldp x27,x28,[x29,#80] + ldp x29,x30,[sp],#96 + ret +.size ChaCha20_ctr32,.-ChaCha20_ctr32 +___ + +{{{ +my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2,$T0,$T1,$T2,$T3) = + map("v$_.4s",(0..7,16..23)); +my (@K)=map("v$_.4s",(24..30)); +my $ONE="v31.4s"; + +sub NEONROUND { +my $odd = pop; +my ($a,$b,$c,$d,$t)=@_; + + ( + "&add ('$a','$a','$b')", + "&eor ('$d','$d','$a')", + "&rev32_16 ('$d','$d')", # vrot ($d,16) + + "&add ('$c','$c','$d')", + "&eor ('$t','$b','$c')", + "&ushr ('$b','$t',20)", + "&sli ('$b','$t',12)", + + "&add ('$a','$a','$b')", + "&eor ('$t','$d','$a')", + "&ushr ('$d','$t',24)", + "&sli ('$d','$t',8)", + + "&add ('$c','$c','$d')", + "&eor ('$t','$b','$c')", + "&ushr ('$b','$t',25)", + "&sli ('$b','$t',7)", + + "&ext ('$a','$a','$a',$odd?4:12)", + "&ext ('$d','$d','$d',8)", + "&ext ('$c','$c','$c',$odd?12:4)" + ); +} + +$code.=<<___; +#if !defined(__KERNEL__) || defined(CONFIG_KERNEL_MODE_NEON) +#ifdef __KERNEL__ +.globl ChaCha20_neon +.type ChaCha20_neon,%function +#endif +.type ChaCha20_neon,%function +.align 5 +ChaCha20_neon: + stp x29,x30,[sp,#-96]! + add x29,sp,#0 + + adr @x[0],.Lsigma + stp x19,x20,[sp,#16] + stp x21,x22,[sp,#32] + stp x23,x24,[sp,#48] + stp x25,x26,[sp,#64] + stp x27,x28,[sp,#80] +#ifdef __APPLE__ + cmp $len,#512 + b.hs .L512_or_more_neon +#endif + + sub sp,sp,#64 + + ldp @d[0],@d[1],[@x[0]] // load sigma + ld1 {@K[0]},[@x[0]],#16 + ldp @d[2],@d[3],[$key] // load key + ldp @d[4],@d[5],[$key,#16] + ld1 {@K[1],@K[2]},[$key] + ldp @d[6],@d[7],[$ctr] // load counter + ld1 {@K[3]},[$ctr] + ld1 {$ONE},[@x[0]] +#ifdef __AARCH64EB__ + rev64 @K[0],@K[0] + ror @d[2],@d[2],#32 + ror @d[3],@d[3],#32 + ror @d[4],@d[4],#32 + ror @d[5],@d[5],#32 + ror @d[6],@d[6],#32 + ror @d[7],@d[7],#32 +#endif + add @K[3],@K[3],$ONE // += 1 + add @K[4],@K[3],$ONE + add @K[5],@K[4],$ONE + shl $ONE,$ONE,#2 // 1 -> 4 + +.Loop_outer_neon: + mov.32 @x[0],@d[0] // unpack key block + lsr @x[1],@d[0],#32 + mov $A0,@K[0] + mov.32 @x[2],@d[1] + lsr @x[3],@d[1],#32 + mov $A1,@K[0] + mov.32 @x[4],@d[2] + lsr @x[5],@d[2],#32 + mov $A2,@K[0] + mov.32 @x[6],@d[3] + mov $B0,@K[1] + lsr @x[7],@d[3],#32 + mov $B1,@K[1] + mov.32 @x[8],@d[4] + mov $B2,@K[1] + lsr @x[9],@d[4],#32 + mov $D0,@K[3] + mov.32 @x[10],@d[5] + mov $D1,@K[4] + lsr @x[11],@d[5],#32 + mov $D2,@K[5] + mov.32 @x[12],@d[6] + mov $C0,@K[2] + lsr @x[13],@d[6],#32 + mov $C1,@K[2] + mov.32 @x[14],@d[7] + mov $C2,@K[2] + lsr @x[15],@d[7],#32 + + mov $ctr,#10 + subs $len,$len,#256 +.Loop_neon: + sub $ctr,$ctr,#1 +___ + my @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0); + my @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0); + my @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0); + my @thread3=&ROUND(0,4,8,12); + + foreach (@thread0) { + eval; eval(shift(@thread3)); + eval(shift(@thread1)); eval(shift(@thread3)); + eval(shift(@thread2)); eval(shift(@thread3)); + } + + @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1); + @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1); + @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1); + @thread3=&ROUND(0,5,10,15); + + foreach (@thread0) { + eval; eval(shift(@thread3)); + eval(shift(@thread1)); eval(shift(@thread3)); + eval(shift(@thread2)); eval(shift(@thread3)); + } +$code.=<<___; + cbnz $ctr,.Loop_neon + + add.32 @x[0],@x[0],@d[0] // accumulate key block + add $A0,$A0,@K[0] + add @x[1],@x[1],@d[0],lsr#32 + add $A1,$A1,@K[0] + add.32 @x[2],@x[2],@d[1] + add $A2,$A2,@K[0] + add @x[3],@x[3],@d[1],lsr#32 + add $C0,$C0,@K[2] + add.32 @x[4],@x[4],@d[2] + add $C1,$C1,@K[2] + add @x[5],@x[5],@d[2],lsr#32 + add $C2,$C2,@K[2] + add.32 @x[6],@x[6],@d[3] + add $D0,$D0,@K[3] + add @x[7],@x[7],@d[3],lsr#32 + add.32 @x[8],@x[8],@d[4] + add $D1,$D1,@K[4] + add @x[9],@x[9],@d[4],lsr#32 + add.32 @x[10],@x[10],@d[5] + add $D2,$D2,@K[5] + add @x[11],@x[11],@d[5],lsr#32 + add.32 @x[12],@x[12],@d[6] + add $B0,$B0,@K[1] + add @x[13],@x[13],@d[6],lsr#32 + add.32 @x[14],@x[14],@d[7] + add $B1,$B1,@K[1] + add @x[15],@x[15],@d[7],lsr#32 + add $B2,$B2,@K[1] + + b.lo .Ltail_neon + + add @x[0],@x[0],@x[1],lsl#32 // pack + add @x[2],@x[2],@x[3],lsl#32 + ldp @x[1],@x[3],[$inp,#0] // load input + add @x[4],@x[4],@x[5],lsl#32 + add @x[6],@x[6],@x[7],lsl#32 + ldp @x[5],@x[7],[$inp,#16] + add @x[8],@x[8],@x[9],lsl#32 + add @x[10],@x[10],@x[11],lsl#32 + ldp @x[9],@x[11],[$inp,#32] + add @x[12],@x[12],@x[13],lsl#32 + add @x[14],@x[14],@x[15],lsl#32 + ldp @x[13],@x[15],[$inp,#48] + add $inp,$inp,#64 +#ifdef __AARCH64EB__ + rev @x[0],@x[0] + rev @x[2],@x[2] + rev @x[4],@x[4] + rev @x[6],@x[6] + rev @x[8],@x[8] + rev @x[10],@x[10] + rev @x[12],@x[12] + rev @x[14],@x[14] +#endif + ld1.8 {$T0-$T3},[$inp],#64 + eor @x[0],@x[0],@x[1] + eor @x[2],@x[2],@x[3] + eor @x[4],@x[4],@x[5] + eor @x[6],@x[6],@x[7] + eor @x[8],@x[8],@x[9] + eor $A0,$A0,$T0 + eor @x[10],@x[10],@x[11] + eor $B0,$B0,$T1 + eor @x[12],@x[12],@x[13] + eor $C0,$C0,$T2 + eor @x[14],@x[14],@x[15] + eor $D0,$D0,$T3 + ld1.8 {$T0-$T3},[$inp],#64 + + stp @x[0],@x[2],[$out,#0] // store output + add @d[6],@d[6],#4 // increment counter + stp @x[4],@x[6],[$out,#16] + add @K[3],@K[3],$ONE // += 4 + stp @x[8],@x[10],[$out,#32] + add @K[4],@K[4],$ONE + stp @x[12],@x[14],[$out,#48] + add @K[5],@K[5],$ONE + add $out,$out,#64 + + st1.8 {$A0-$D0},[$out],#64 + ld1.8 {$A0-$D0},[$inp],#64 + + eor $A1,$A1,$T0 + eor $B1,$B1,$T1 + eor $C1,$C1,$T2 + eor $D1,$D1,$T3 + st1.8 {$A1-$D1},[$out],#64 + + eor $A2,$A2,$A0 + eor $B2,$B2,$B0 + eor $C2,$C2,$C0 + eor $D2,$D2,$D0 + st1.8 {$A2-$D2},[$out],#64 + + b.hi .Loop_outer_neon + + ldp x19,x20,[x29,#16] + add sp,sp,#64 + ldp x21,x22,[x29,#32] + ldp x23,x24,[x29,#48] + ldp x25,x26,[x29,#64] + ldp x27,x28,[x29,#80] + ldp x29,x30,[sp],#96 + ret + +.Ltail_neon: + add $len,$len,#256 + cmp $len,#64 + b.lo .Less_than_64 + + add @x[0],@x[0],@x[1],lsl#32 // pack + add @x[2],@x[2],@x[3],lsl#32 + ldp @x[1],@x[3],[$inp,#0] // load input + add @x[4],@x[4],@x[5],lsl#32 + add @x[6],@x[6],@x[7],lsl#32 + ldp @x[5],@x[7],[$inp,#16] + add @x[8],@x[8],@x[9],lsl#32 + add @x[10],@x[10],@x[11],lsl#32 + ldp @x[9],@x[11],[$inp,#32] + add @x[12],@x[12],@x[13],lsl#32 + add @x[14],@x[14],@x[15],lsl#32 + ldp @x[13],@x[15],[$inp,#48] + add $inp,$inp,#64 +#ifdef __AARCH64EB__ + rev @x[0],@x[0] + rev @x[2],@x[2] + rev @x[4],@x[4] + rev @x[6],@x[6] + rev @x[8],@x[8] + rev @x[10],@x[10] + rev @x[12],@x[12] + rev @x[14],@x[14] +#endif + eor @x[0],@x[0],@x[1] + eor @x[2],@x[2],@x[3] + eor @x[4],@x[4],@x[5] + eor @x[6],@x[6],@x[7] + eor @x[8],@x[8],@x[9] + eor @x[10],@x[10],@x[11] + eor @x[12],@x[12],@x[13] + eor @x[14],@x[14],@x[15] + + stp @x[0],@x[2],[$out,#0] // store output + add @d[6],@d[6],#4 // increment counter + stp @x[4],@x[6],[$out,#16] + stp @x[8],@x[10],[$out,#32] + stp @x[12],@x[14],[$out,#48] + add $out,$out,#64 + b.eq .Ldone_neon + sub $len,$len,#64 + cmp $len,#64 + b.lo .Less_than_128 + + ld1.8 {$T0-$T3},[$inp],#64 + eor $A0,$A0,$T0 + eor $B0,$B0,$T1 + eor $C0,$C0,$T2 + eor $D0,$D0,$T3 + st1.8 {$A0-$D0},[$out],#64 + b.eq .Ldone_neon + sub $len,$len,#64 + cmp $len,#64 + b.lo .Less_than_192 + + ld1.8 {$T0-$T3},[$inp],#64 + eor $A1,$A1,$T0 + eor $B1,$B1,$T1 + eor $C1,$C1,$T2 + eor $D1,$D1,$T3 + st1.8 {$A1-$D1},[$out],#64 + b.eq .Ldone_neon + sub $len,$len,#64 + + st1.8 {$A2-$D2},[sp] + b .Last_neon + +.Less_than_128: + st1.8 {$A0-$D0},[sp] + b .Last_neon +.Less_than_192: + st1.8 {$A1-$D1},[sp] + b .Last_neon + +.align 4 +.Last_neon: + sub $out,$out,#1 + add $inp,$inp,$len + add $out,$out,$len + add $ctr,sp,$len + neg $len,$len + +.Loop_tail_neon: + ldrb w10,[$inp,$len] + ldrb w11,[$ctr,$len] + add $len,$len,#1 + eor w10,w10,w11 + strb w10,[$out,$len] + cbnz $len,.Loop_tail_neon + + stp xzr,xzr,[sp,#0] + stp xzr,xzr,[sp,#16] + stp xzr,xzr,[sp,#32] + stp xzr,xzr,[sp,#48] + +.Ldone_neon: + ldp x19,x20,[x29,#16] + add sp,sp,#64 + ldp x21,x22,[x29,#32] + ldp x23,x24,[x29,#48] + ldp x25,x26,[x29,#64] + ldp x27,x28,[x29,#80] + ldp x29,x30,[sp],#96 + ret +.size ChaCha20_neon,.-ChaCha20_neon +___ +{ +my ($T0,$T1,$T2,$T3,$T4,$T5)=@K; +my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2, + $A3,$B3,$C3,$D3,$A4,$B4,$C4,$D4,$A5,$B5,$C5,$D5) = map("v$_.4s",(0..23)); + +$code.=<<___; +#ifdef __APPLE__ +.type ChaCha20_512_neon,%function +.align 5 +ChaCha20_512_neon: + stp x29,x30,[sp,#-96]! + add x29,sp,#0 + + adr @x[0],.Lsigma + stp x19,x20,[sp,#16] + stp x21,x22,[sp,#32] + stp x23,x24,[sp,#48] + stp x25,x26,[sp,#64] + stp x27,x28,[sp,#80] + +.L512_or_more_neon: + sub sp,sp,#128+64 + + ldp @d[0],@d[1],[@x[0]] // load sigma + ld1 {@K[0]},[@x[0]],#16 + ldp @d[2],@d[3],[$key] // load key + ldp @d[4],@d[5],[$key,#16] + ld1 {@K[1],@K[2]},[$key] + ldp @d[6],@d[7],[$ctr] // load counter + ld1 {@K[3]},[$ctr] + ld1 {$ONE},[@x[0]] +# ifdef __AARCH64EB__ + rev64 @K[0],@K[0] + ror @d[2],@d[2],#32 + ror @d[3],@d[3],#32 + ror @d[4],@d[4],#32 + ror @d[5],@d[5],#32 + ror @d[6],@d[6],#32 + ror @d[7],@d[7],#32 +# endif + add @K[3],@K[3],$ONE // += 1 + stp @K[0],@K[1],[sp,#0] // off-load key block, invariant part + add @K[3],@K[3],$ONE // not typo + str @K[2],[sp,#32] + add @K[4],@K[3],$ONE + add @K[5],@K[4],$ONE + add @K[6],@K[5],$ONE + shl $ONE,$ONE,#2 // 1 -> 4 + + stp d8,d9,[sp,#128+0] // meet ABI requirements + stp d10,d11,[sp,#128+16] + stp d12,d13,[sp,#128+32] + stp d14,d15,[sp,#128+48] + + sub $len,$len,#512 // not typo + +.Loop_outer_512_neon: + mov $A0,@K[0] + mov $A1,@K[0] + mov $A2,@K[0] + mov $A3,@K[0] + mov $A4,@K[0] + mov $A5,@K[0] + mov $B0,@K[1] + mov.32 @x[0],@d[0] // unpack key block + mov $B1,@K[1] + lsr @x[1],@d[0],#32 + mov $B2,@K[1] + mov.32 @x[2],@d[1] + mov $B3,@K[1] + lsr @x[3],@d[1],#32 + mov $B4,@K[1] + mov.32 @x[4],@d[2] + mov $B5,@K[1] + lsr @x[5],@d[2],#32 + mov $D0,@K[3] + mov.32 @x[6],@d[3] + mov $D1,@K[4] + lsr @x[7],@d[3],#32 + mov $D2,@K[5] + mov.32 @x[8],@d[4] + mov $D3,@K[6] + lsr @x[9],@d[4],#32 + mov $C0,@K[2] + mov.32 @x[10],@d[5] + mov $C1,@K[2] + lsr @x[11],@d[5],#32 + add $D4,$D0,$ONE // +4 + mov.32 @x[12],@d[6] + add $D5,$D1,$ONE // +4 + lsr @x[13],@d[6],#32 + mov $C2,@K[2] + mov.32 @x[14],@d[7] + mov $C3,@K[2] + lsr @x[15],@d[7],#32 + mov $C4,@K[2] + stp @K[3],@K[4],[sp,#48] // off-load key block, variable part + mov $C5,@K[2] + str @K[5],[sp,#80] + + mov $ctr,#5 + subs $len,$len,#512 +.Loop_upper_neon: + sub $ctr,$ctr,#1 +___ + my @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0); + my @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0); + my @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0); + my @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,0); + my @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,0); + my @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,0); + my @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15)); + my $diff = ($#thread0+1)*6 - $#thread67 - 1; + my $i = 0; + + foreach (@thread0) { + eval; eval(shift(@thread67)); + eval(shift(@thread1)); eval(shift(@thread67)); + eval(shift(@thread2)); eval(shift(@thread67)); + eval(shift(@thread3)); eval(shift(@thread67)); + eval(shift(@thread4)); eval(shift(@thread67)); + eval(shift(@thread5)); eval(shift(@thread67)); + } + + @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1); + @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1); + @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1); + @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,1); + @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,1); + @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,1); + @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15)); + + foreach (@thread0) { + eval; eval(shift(@thread67)); + eval(shift(@thread1)); eval(shift(@thread67)); + eval(shift(@thread2)); eval(shift(@thread67)); + eval(shift(@thread3)); eval(shift(@thread67)); + eval(shift(@thread4)); eval(shift(@thread67)); + eval(shift(@thread5)); eval(shift(@thread67)); + } +$code.=<<___; + cbnz $ctr,.Loop_upper_neon + + add.32 @x[0],@x[0],@d[0] // accumulate key block + add @x[1],@x[1],@d[0],lsr#32 + add.32 @x[2],@x[2],@d[1] + add @x[3],@x[3],@d[1],lsr#32 + add.32 @x[4],@x[4],@d[2] + add @x[5],@x[5],@d[2],lsr#32 + add.32 @x[6],@x[6],@d[3] + add @x[7],@x[7],@d[3],lsr#32 + add.32 @x[8],@x[8],@d[4] + add @x[9],@x[9],@d[4],lsr#32 + add.32 @x[10],@x[10],@d[5] + add @x[11],@x[11],@d[5],lsr#32 + add.32 @x[12],@x[12],@d[6] + add @x[13],@x[13],@d[6],lsr#32 + add.32 @x[14],@x[14],@d[7] + add @x[15],@x[15],@d[7],lsr#32 + + add @x[0],@x[0],@x[1],lsl#32 // pack + add @x[2],@x[2],@x[3],lsl#32 + ldp @x[1],@x[3],[$inp,#0] // load input + add @x[4],@x[4],@x[5],lsl#32 + add @x[6],@x[6],@x[7],lsl#32 + ldp @x[5],@x[7],[$inp,#16] + add @x[8],@x[8],@x[9],lsl#32 + add @x[10],@x[10],@x[11],lsl#32 + ldp @x[9],@x[11],[$inp,#32] + add @x[12],@x[12],@x[13],lsl#32 + add @x[14],@x[14],@x[15],lsl#32 + ldp @x[13],@x[15],[$inp,#48] + add $inp,$inp,#64 +# ifdef __AARCH64EB__ + rev @x[0],@x[0] + rev @x[2],@x[2] + rev @x[4],@x[4] + rev @x[6],@x[6] + rev @x[8],@x[8] + rev @x[10],@x[10] + rev @x[12],@x[12] + rev @x[14],@x[14] +# endif + eor @x[0],@x[0],@x[1] + eor @x[2],@x[2],@x[3] + eor @x[4],@x[4],@x[5] + eor @x[6],@x[6],@x[7] + eor @x[8],@x[8],@x[9] + eor @x[10],@x[10],@x[11] + eor @x[12],@x[12],@x[13] + eor @x[14],@x[14],@x[15] + + stp @x[0],@x[2],[$out,#0] // store output + add @d[6],@d[6],#1 // increment counter + mov.32 @x[0],@d[0] // unpack key block + lsr @x[1],@d[0],#32 + stp @x[4],@x[6],[$out,#16] + mov.32 @x[2],@d[1] + lsr @x[3],@d[1],#32 + stp @x[8],@x[10],[$out,#32] + mov.32 @x[4],@d[2] + lsr @x[5],@d[2],#32 + stp @x[12],@x[14],[$out,#48] + add $out,$out,#64 + mov.32 @x[6],@d[3] + lsr @x[7],@d[3],#32 + mov.32 @x[8],@d[4] + lsr @x[9],@d[4],#32 + mov.32 @x[10],@d[5] + lsr @x[11],@d[5],#32 + mov.32 @x[12],@d[6] + lsr @x[13],@d[6],#32 + mov.32 @x[14],@d[7] + lsr @x[15],@d[7],#32 + + mov $ctr,#5 +.Loop_lower_neon: + sub $ctr,$ctr,#1 +___ + @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0); + @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0); + @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0); + @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,0); + @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,0); + @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,0); + @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15)); + + foreach (@thread0) { + eval; eval(shift(@thread67)); + eval(shift(@thread1)); eval(shift(@thread67)); + eval(shift(@thread2)); eval(shift(@thread67)); + eval(shift(@thread3)); eval(shift(@thread67)); + eval(shift(@thread4)); eval(shift(@thread67)); + eval(shift(@thread5)); eval(shift(@thread67)); + } + + @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1); + @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1); + @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1); + @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,1); + @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,1); + @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,1); + @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15)); + + foreach (@thread0) { + eval; eval(shift(@thread67)); + eval(shift(@thread1)); eval(shift(@thread67)); + eval(shift(@thread2)); eval(shift(@thread67)); + eval(shift(@thread3)); eval(shift(@thread67)); + eval(shift(@thread4)); eval(shift(@thread67)); + eval(shift(@thread5)); eval(shift(@thread67)); + } +$code.=<<___; + cbnz $ctr,.Loop_lower_neon + + add.32 @x[0],@x[0],@d[0] // accumulate key block + ldp @K[0],@K[1],[sp,#0] + add @x[1],@x[1],@d[0],lsr#32 + ldp @K[2],@K[3],[sp,#32] + add.32 @x[2],@x[2],@d[1] + ldp @K[4],@K[5],[sp,#64] + add @x[3],@x[3],@d[1],lsr#32 + add $A0,$A0,@K[0] + add.32 @x[4],@x[4],@d[2] + add $A1,$A1,@K[0] + add @x[5],@x[5],@d[2],lsr#32 + add $A2,$A2,@K[0] + add.32 @x[6],@x[6],@d[3] + add $A3,$A3,@K[0] + add @x[7],@x[7],@d[3],lsr#32 + add $A4,$A4,@K[0] + add.32 @x[8],@x[8],@d[4] + add $A5,$A5,@K[0] + add @x[9],@x[9],@d[4],lsr#32 + add $C0,$C0,@K[2] + add.32 @x[10],@x[10],@d[5] + add $C1,$C1,@K[2] + add @x[11],@x[11],@d[5],lsr#32 + add $C2,$C2,@K[2] + add.32 @x[12],@x[12],@d[6] + add $C3,$C3,@K[2] + add @x[13],@x[13],@d[6],lsr#32 + add $C4,$C4,@K[2] + add.32 @x[14],@x[14],@d[7] + add $C5,$C5,@K[2] + add @x[15],@x[15],@d[7],lsr#32 + add $D4,$D4,$ONE // +4 + add @x[0],@x[0],@x[1],lsl#32 // pack + add $D5,$D5,$ONE // +4 + add @x[2],@x[2],@x[3],lsl#32 + add $D0,$D0,@K[3] + ldp @x[1],@x[3],[$inp,#0] // load input + add $D1,$D1,@K[4] + add @x[4],@x[4],@x[5],lsl#32 + add $D2,$D2,@K[5] + add @x[6],@x[6],@x[7],lsl#32 + add $D3,$D3,@K[6] + ldp @x[5],@x[7],[$inp,#16] + add $D4,$D4,@K[3] + add @x[8],@x[8],@x[9],lsl#32 + add $D5,$D5,@K[4] + add @x[10],@x[10],@x[11],lsl#32 + add $B0,$B0,@K[1] + ldp @x[9],@x[11],[$inp,#32] + add $B1,$B1,@K[1] + add @x[12],@x[12],@x[13],lsl#32 + add $B2,$B2,@K[1] + add @x[14],@x[14],@x[15],lsl#32 + add $B3,$B3,@K[1] + ldp @x[13],@x[15],[$inp,#48] + add $B4,$B4,@K[1] + add $inp,$inp,#64 + add $B5,$B5,@K[1] + +# ifdef __AARCH64EB__ + rev @x[0],@x[0] + rev @x[2],@x[2] + rev @x[4],@x[4] + rev @x[6],@x[6] + rev @x[8],@x[8] + rev @x[10],@x[10] + rev @x[12],@x[12] + rev @x[14],@x[14] +# endif + ld1.8 {$T0-$T3},[$inp],#64 + eor @x[0],@x[0],@x[1] + eor @x[2],@x[2],@x[3] + eor @x[4],@x[4],@x[5] + eor @x[6],@x[6],@x[7] + eor @x[8],@x[8],@x[9] + eor $A0,$A0,$T0 + eor @x[10],@x[10],@x[11] + eor $B0,$B0,$T1 + eor @x[12],@x[12],@x[13] + eor $C0,$C0,$T2 + eor @x[14],@x[14],@x[15] + eor $D0,$D0,$T3 + ld1.8 {$T0-$T3},[$inp],#64 + + stp @x[0],@x[2],[$out,#0] // store output + add @d[6],@d[6],#7 // increment counter + stp @x[4],@x[6],[$out,#16] + stp @x[8],@x[10],[$out,#32] + stp @x[12],@x[14],[$out,#48] + add $out,$out,#64 + st1.8 {$A0-$D0},[$out],#64 + + ld1.8 {$A0-$D0},[$inp],#64 + eor $A1,$A1,$T0 + eor $B1,$B1,$T1 + eor $C1,$C1,$T2 + eor $D1,$D1,$T3 + st1.8 {$A1-$D1},[$out],#64 + + ld1.8 {$A1-$D1},[$inp],#64 + eor $A2,$A2,$A0 + ldp @K[0],@K[1],[sp,#0] + eor $B2,$B2,$B0 + ldp @K[2],@K[3],[sp,#32] + eor $C2,$C2,$C0 + eor $D2,$D2,$D0 + st1.8 {$A2-$D2},[$out],#64 + + ld1.8 {$A2-$D2},[$inp],#64 + eor $A3,$A3,$A1 + eor $B3,$B3,$B1 + eor $C3,$C3,$C1 + eor $D3,$D3,$D1 + st1.8 {$A3-$D3},[$out],#64 + + ld1.8 {$A3-$D3},[$inp],#64 + eor $A4,$A4,$A2 + eor $B4,$B4,$B2 + eor $C4,$C4,$C2 + eor $D4,$D4,$D2 + st1.8 {$A4-$D4},[$out],#64 + + shl $A0,$ONE,#1 // 4 -> 8 + eor $A5,$A5,$A3 + eor $B5,$B5,$B3 + eor $C5,$C5,$C3 + eor $D5,$D5,$D3 + st1.8 {$A5-$D5},[$out],#64 + + add @K[3],@K[3],$A0 // += 8 + add @K[4],@K[4],$A0 + add @K[5],@K[5],$A0 + add @K[6],@K[6],$A0 + + b.hs .Loop_outer_512_neon + + adds $len,$len,#512 + ushr $A0,$ONE,#2 // 4 -> 1 + + ldp d8,d9,[sp,#128+0] // meet ABI requirements + ldp d10,d11,[sp,#128+16] + ldp d12,d13,[sp,#128+32] + ldp d14,d15,[sp,#128+48] + + stp @K[0],$ONE,[sp,#0] // wipe off-load area + stp @K[0],$ONE,[sp,#32] + stp @K[0],$ONE,[sp,#64] + + b.eq .Ldone_512_neon + + cmp $len,#192 + sub @K[3],@K[3],$A0 // -= 1 + sub @K[4],@K[4],$A0 + sub @K[5],@K[5],$A0 + add sp,sp,#128 + b.hs .Loop_outer_neon + + eor @K[1],@K[1],@K[1] + eor @K[2],@K[2],@K[2] + eor @K[3],@K[3],@K[3] + eor @K[4],@K[4],@K[4] + eor @K[5],@K[5],@K[5] + eor @K[6],@K[6],@K[6] + b .Loop_outer + +.Ldone_512_neon: + ldp x19,x20,[x29,#16] + add sp,sp,#128+64 + ldp x21,x22,[x29,#32] + ldp x23,x24,[x29,#48] + ldp x25,x26,[x29,#64] + ldp x27,x28,[x29,#80] + ldp x29,x30,[sp],#96 + ret +.size ChaCha20_512_neon,.-ChaCha20_512_neon +#endif +#endif +___ +} +}}} + +open SELF,$0; +while(<SELF>) { + next if (/^#!/); + last if (!s/^#/\/\// and !/^$/); + print; +} +close SELF; + +foreach (split("\n",$code)) { + s/\`([^\`]*)\`/eval $1/geo; + + (s/\b([a-z]+)\.32\b/$1/ and (s/x([0-9]+)/w$1/g or 1)) or + (m/\b(eor|ext|mov)\b/ and (s/\.4s/\.16b/g or 1)) or + (s/\b((?:ld|st)1)\.8\b/$1/ and (s/\.4s/\.16b/g or 1)) or + (m/\b(ld|st)[rp]\b/ and (s/v([0-9]+)\.4s/q$1/g or 1)) or + (s/\brev32\.16\b/rev32/ and (s/\.4s/\.8h/g or 1)); + + print $_,"\n"; +} +close STDOUT; # flush diff --git a/net/wireguard/crypto/zinc/chacha20/chacha20-mips-glue.c b/net/wireguard/crypto/zinc/chacha20/chacha20-mips-glue.c new file mode 100644 index 000000000000..96ce01e2c133 --- /dev/null +++ b/net/wireguard/crypto/zinc/chacha20/chacha20-mips-glue.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +asmlinkage void chacha20_mips(u32 state[16], u8 *out, const u8 *in, + const size_t len); +static bool *const chacha20_nobs[] __initconst = { }; +static void __init chacha20_fpu_init(void) +{ +} + +static inline bool chacha20_arch(struct chacha20_ctx *ctx, u8 *dst, + const u8 *src, size_t len, + simd_context_t *simd_context) +{ + chacha20_mips(ctx->state, dst, src, len); + return true; +} + +static inline bool hchacha20_arch(u32 derived_key[CHACHA20_KEY_WORDS], + const u8 nonce[HCHACHA20_NONCE_SIZE], + const u8 key[HCHACHA20_KEY_SIZE], + simd_context_t *simd_context) +{ + return false; +} diff --git a/net/wireguard/crypto/zinc/chacha20/chacha20-mips.S b/net/wireguard/crypto/zinc/chacha20/chacha20-mips.S new file mode 100644 index 000000000000..a81e02db95e7 --- /dev/null +++ b/net/wireguard/crypto/zinc/chacha20/chacha20-mips.S @@ -0,0 +1,424 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2016-2018 René van Dorst <opensource@vdorst.com>. All Rights Reserved. + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#define MASK_U32 0x3c +#define CHACHA20_BLOCK_SIZE 64 +#define STACK_SIZE 32 + +#define X0 $t0 +#define X1 $t1 +#define X2 $t2 +#define X3 $t3 +#define X4 $t4 +#define X5 $t5 +#define X6 $t6 +#define X7 $t7 +#define X8 $t8 +#define X9 $t9 +#define X10 $v1 +#define X11 $s6 +#define X12 $s5 +#define X13 $s4 +#define X14 $s3 +#define X15 $s2 +/* Use regs which are overwritten on exit for Tx so we don't leak clear data. */ +#define T0 $s1 +#define T1 $s0 +#define T(n) T ## n +#define X(n) X ## n + +/* Input arguments */ +#define STATE $a0 +#define OUT $a1 +#define IN $a2 +#define BYTES $a3 + +/* Output argument */ +/* NONCE[0] is kept in a register and not in memory. + * We don't want to touch original value in memory. + * Must be incremented every loop iteration. + */ +#define NONCE_0 $v0 + +/* SAVED_X and SAVED_CA are set in the jump table. + * Use regs which are overwritten on exit else we don't leak clear data. + * They are used to handling the last bytes which are not multiple of 4. + */ +#define SAVED_X X15 +#define SAVED_CA $s7 + +#define IS_UNALIGNED $s7 + +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#define MSB 0 +#define LSB 3 +#define ROTx rotl +#define ROTR(n) rotr n, 24 +#define CPU_TO_LE32(n) \ + wsbh n; \ + rotr n, 16; +#else +#define MSB 3 +#define LSB 0 +#define ROTx rotr +#define CPU_TO_LE32(n) +#define ROTR(n) +#endif + +#define FOR_EACH_WORD(x) \ + x( 0); \ + x( 1); \ + x( 2); \ + x( 3); \ + x( 4); \ + x( 5); \ + x( 6); \ + x( 7); \ + x( 8); \ + x( 9); \ + x(10); \ + x(11); \ + x(12); \ + x(13); \ + x(14); \ + x(15); + +#define FOR_EACH_WORD_REV(x) \ + x(15); \ + x(14); \ + x(13); \ + x(12); \ + x(11); \ + x(10); \ + x( 9); \ + x( 8); \ + x( 7); \ + x( 6); \ + x( 5); \ + x( 4); \ + x( 3); \ + x( 2); \ + x( 1); \ + x( 0); + +#define PLUS_ONE_0 1 +#define PLUS_ONE_1 2 +#define PLUS_ONE_2 3 +#define PLUS_ONE_3 4 +#define PLUS_ONE_4 5 +#define PLUS_ONE_5 6 +#define PLUS_ONE_6 7 +#define PLUS_ONE_7 8 +#define PLUS_ONE_8 9 +#define PLUS_ONE_9 10 +#define PLUS_ONE_10 11 +#define PLUS_ONE_11 12 +#define PLUS_ONE_12 13 +#define PLUS_ONE_13 14 +#define PLUS_ONE_14 15 +#define PLUS_ONE_15 16 +#define PLUS_ONE(x) PLUS_ONE_ ## x +#define _CONCAT3(a,b,c) a ## b ## c +#define CONCAT3(a,b,c) _CONCAT3(a,b,c) + +#define STORE_UNALIGNED(x) \ +CONCAT3(.Lchacha20_mips_xor_unaligned_, PLUS_ONE(x), _b: ;) \ + .if (x != 12); \ + lw T0, (x*4)(STATE); \ + .endif; \ + lwl T1, (x*4)+MSB ## (IN); \ + lwr T1, (x*4)+LSB ## (IN); \ + .if (x == 12); \ + addu X ## x, NONCE_0; \ + .else; \ + addu X ## x, T0; \ + .endif; \ + CPU_TO_LE32(X ## x); \ + xor X ## x, T1; \ + swl X ## x, (x*4)+MSB ## (OUT); \ + swr X ## x, (x*4)+LSB ## (OUT); + +#define STORE_ALIGNED(x) \ +CONCAT3(.Lchacha20_mips_xor_aligned_, PLUS_ONE(x), _b: ;) \ + .if (x != 12); \ + lw T0, (x*4)(STATE); \ + .endif; \ + lw T1, (x*4) ## (IN); \ + .if (x == 12); \ + addu X ## x, NONCE_0; \ + .else; \ + addu X ## x, T0; \ + .endif; \ + CPU_TO_LE32(X ## x); \ + xor X ## x, T1; \ + sw X ## x, (x*4) ## (OUT); + +/* Jump table macro. + * Used for setup and handling the last bytes, which are not multiple of 4. + * X15 is free to store Xn + * Every jumptable entry must be equal in size. + */ +#define JMPTBL_ALIGNED(x) \ +.Lchacha20_mips_jmptbl_aligned_ ## x: ; \ + .set noreorder; \ + b .Lchacha20_mips_xor_aligned_ ## x ## _b; \ + .if (x == 12); \ + addu SAVED_X, X ## x, NONCE_0; \ + .else; \ + addu SAVED_X, X ## x, SAVED_CA; \ + .endif; \ + .set reorder + +#define JMPTBL_UNALIGNED(x) \ +.Lchacha20_mips_jmptbl_unaligned_ ## x: ; \ + .set noreorder; \ + b .Lchacha20_mips_xor_unaligned_ ## x ## _b; \ + .if (x == 12); \ + addu SAVED_X, X ## x, NONCE_0; \ + .else; \ + addu SAVED_X, X ## x, SAVED_CA; \ + .endif; \ + .set reorder + +#define AXR(A, B, C, D, K, L, M, N, V, W, Y, Z, S) \ + addu X(A), X(K); \ + addu X(B), X(L); \ + addu X(C), X(M); \ + addu X(D), X(N); \ + xor X(V), X(A); \ + xor X(W), X(B); \ + xor X(Y), X(C); \ + xor X(Z), X(D); \ + rotl X(V), S; \ + rotl X(W), S; \ + rotl X(Y), S; \ + rotl X(Z), S; + +.text +.set reorder +.set noat +.globl chacha20_mips +.ent chacha20_mips +chacha20_mips: + .frame $sp, STACK_SIZE, $ra + + addiu $sp, -STACK_SIZE + + /* Return bytes = 0. */ + beqz BYTES, .Lchacha20_mips_end + + lw NONCE_0, 48(STATE) + + /* Save s0-s7 */ + sw $s0, 0($sp) + sw $s1, 4($sp) + sw $s2, 8($sp) + sw $s3, 12($sp) + sw $s4, 16($sp) + sw $s5, 20($sp) + sw $s6, 24($sp) + sw $s7, 28($sp) + + /* Test IN or OUT is unaligned. + * IS_UNALIGNED = ( IN | OUT ) & 0x00000003 + */ + or IS_UNALIGNED, IN, OUT + andi IS_UNALIGNED, 0x3 + + /* Set number of rounds */ + li $at, 20 + + b .Lchacha20_rounds_start + +.align 4 +.Loop_chacha20_rounds: + addiu IN, CHACHA20_BLOCK_SIZE + addiu OUT, CHACHA20_BLOCK_SIZE + addiu NONCE_0, 1 + +.Lchacha20_rounds_start: + lw X0, 0(STATE) + lw X1, 4(STATE) + lw X2, 8(STATE) + lw X3, 12(STATE) + + lw X4, 16(STATE) + lw X5, 20(STATE) + lw X6, 24(STATE) + lw X7, 28(STATE) + lw X8, 32(STATE) + lw X9, 36(STATE) + lw X10, 40(STATE) + lw X11, 44(STATE) + + move X12, NONCE_0 + lw X13, 52(STATE) + lw X14, 56(STATE) + lw X15, 60(STATE) + +.Loop_chacha20_xor_rounds: + addiu $at, -2 + AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 16); + AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 12); + AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 8); + AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 7); + AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 16); + AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 12); + AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 8); + AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 7); + bnez $at, .Loop_chacha20_xor_rounds + + addiu BYTES, -(CHACHA20_BLOCK_SIZE) + + /* Is data src/dst unaligned? Jump */ + bnez IS_UNALIGNED, .Loop_chacha20_unaligned + + /* Set number rounds here to fill delayslot. */ + li $at, 20 + + /* BYTES < 0, it has no full block. */ + bltz BYTES, .Lchacha20_mips_no_full_block_aligned + + FOR_EACH_WORD_REV(STORE_ALIGNED) + + /* BYTES > 0? Loop again. */ + bgtz BYTES, .Loop_chacha20_rounds + + /* Place this here to fill delay slot */ + addiu NONCE_0, 1 + + /* BYTES < 0? Handle last bytes */ + bltz BYTES, .Lchacha20_mips_xor_bytes + +.Lchacha20_mips_xor_done: + /* Restore used registers */ + lw $s0, 0($sp) + lw $s1, 4($sp) + lw $s2, 8($sp) + lw $s3, 12($sp) + lw $s4, 16($sp) + lw $s5, 20($sp) + lw $s6, 24($sp) + lw $s7, 28($sp) + + /* Write NONCE_0 back to right location in state */ + sw NONCE_0, 48(STATE) + +.Lchacha20_mips_end: + addiu $sp, STACK_SIZE + jr $ra + +.Lchacha20_mips_no_full_block_aligned: + /* Restore the offset on BYTES */ + addiu BYTES, CHACHA20_BLOCK_SIZE + + /* Get number of full WORDS */ + andi $at, BYTES, MASK_U32 + + /* Load upper half of jump table addr */ + lui T0, %hi(.Lchacha20_mips_jmptbl_aligned_0) + + /* Calculate lower half jump table offset */ + ins T0, $at, 1, 6 + + /* Add offset to STATE */ + addu T1, STATE, $at + + /* Add lower half jump table addr */ + addiu T0, %lo(.Lchacha20_mips_jmptbl_aligned_0) + + /* Read value from STATE */ + lw SAVED_CA, 0(T1) + + /* Store remaining bytecounter as negative value */ + subu BYTES, $at, BYTES + + jr T0 + + /* Jump table */ + FOR_EACH_WORD(JMPTBL_ALIGNED) + + +.Loop_chacha20_unaligned: + /* Set number rounds here to fill delayslot. */ + li $at, 20 + + /* BYTES > 0, it has no full block. */ + bltz BYTES, .Lchacha20_mips_no_full_block_unaligned + + FOR_EACH_WORD_REV(STORE_UNALIGNED) + + /* BYTES > 0? Loop again. */ + bgtz BYTES, .Loop_chacha20_rounds + + /* Write NONCE_0 back to right location in state */ + sw NONCE_0, 48(STATE) + + .set noreorder + /* Fall through to byte handling */ + bgez BYTES, .Lchacha20_mips_xor_done +.Lchacha20_mips_xor_unaligned_0_b: +.Lchacha20_mips_xor_aligned_0_b: + /* Place this here to fill delay slot */ + addiu NONCE_0, 1 + .set reorder + +.Lchacha20_mips_xor_bytes: + addu IN, $at + addu OUT, $at + /* First byte */ + lbu T1, 0(IN) + addiu $at, BYTES, 1 + CPU_TO_LE32(SAVED_X) + ROTR(SAVED_X) + xor T1, SAVED_X + sb T1, 0(OUT) + beqz $at, .Lchacha20_mips_xor_done + /* Second byte */ + lbu T1, 1(IN) + addiu $at, BYTES, 2 + ROTx SAVED_X, 8 + xor T1, SAVED_X + sb T1, 1(OUT) + beqz $at, .Lchacha20_mips_xor_done + /* Third byte */ + lbu T1, 2(IN) + ROTx SAVED_X, 8 + xor T1, SAVED_X + sb T1, 2(OUT) + b .Lchacha20_mips_xor_done + +.Lchacha20_mips_no_full_block_unaligned: + /* Restore the offset on BYTES */ + addiu BYTES, CHACHA20_BLOCK_SIZE + + /* Get number of full WORDS */ + andi $at, BYTES, MASK_U32 + + /* Load upper half of jump table addr */ + lui T0, %hi(.Lchacha20_mips_jmptbl_unaligned_0) + + /* Calculate lower half jump table offset */ + ins T0, $at, 1, 6 + + /* Add offset to STATE */ + addu T1, STATE, $at + + /* Add lower half jump table addr */ + addiu T0, %lo(.Lchacha20_mips_jmptbl_unaligned_0) + + /* Read value from STATE */ + lw SAVED_CA, 0(T1) + + /* Store remaining bytecounter as negative value */ + subu BYTES, $at, BYTES + + jr T0 + + /* Jump table */ + FOR_EACH_WORD(JMPTBL_UNALIGNED) +.end chacha20_mips +.set at diff --git a/net/wireguard/crypto/zinc/chacha20/chacha20-unrolled-arm.S b/net/wireguard/crypto/zinc/chacha20/chacha20-unrolled-arm.S new file mode 100644 index 000000000000..8fb4bc2e7b5b --- /dev/null +++ b/net/wireguard/crypto/zinc/chacha20/chacha20-unrolled-arm.S @@ -0,0 +1,461 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Google, Inc. + */ + +#include <linux/linkage.h> +#include <asm/assembler.h> + +/* + * Design notes: + * + * 16 registers would be needed to hold the state matrix, but only 14 are + * available because 'sp' and 'pc' cannot be used. So we spill the elements + * (x8, x9) to the stack and swap them out with (x10, x11). This adds one + * 'ldrd' and one 'strd' instruction per round. + * + * All rotates are performed using the implicit rotate operand accepted by the + * 'add' and 'eor' instructions. This is faster than using explicit rotate + * instructions. To make this work, we allow the values in the second and last + * rows of the ChaCha state matrix (rows 'b' and 'd') to temporarily have the + * wrong rotation amount. The rotation amount is then fixed up just in time + * when the values are used. 'brot' is the number of bits the values in row 'b' + * need to be rotated right to arrive at the correct values, and 'drot' + * similarly for row 'd'. (brot, drot) start out as (0, 0) but we make it such + * that they end up as (25, 24) after every round. + */ + + // ChaCha state registers + X0 .req r0 + X1 .req r1 + X2 .req r2 + X3 .req r3 + X4 .req r4 + X5 .req r5 + X6 .req r6 + X7 .req r7 + X8_X10 .req r8 // shared by x8 and x10 + X9_X11 .req r9 // shared by x9 and x11 + X12 .req r10 + X13 .req r11 + X14 .req r12 + X15 .req r14 + +.Lexpand_32byte_k: + // "expand 32-byte k" + .word 0x61707865, 0x3320646e, 0x79622d32, 0x6b206574 + +#ifdef __thumb2__ +# define adrl adr +#endif + +.macro __rev out, in, t0, t1, t2 +.if __LINUX_ARM_ARCH__ >= 6 + rev \out, \in +.else + lsl \t0, \in, #24 + and \t1, \in, #0xff00 + and \t2, \in, #0xff0000 + orr \out, \t0, \in, lsr #24 + orr \out, \out, \t1, lsl #8 + orr \out, \out, \t2, lsr #8 +.endif +.endm + +.macro _le32_bswap x, t0, t1, t2 +#ifdef __ARMEB__ + __rev \x, \x, \t0, \t1, \t2 +#endif +.endm + +.macro _le32_bswap_4x a, b, c, d, t0, t1, t2 + _le32_bswap \a, \t0, \t1, \t2 + _le32_bswap \b, \t0, \t1, \t2 + _le32_bswap \c, \t0, \t1, \t2 + _le32_bswap \d, \t0, \t1, \t2 +.endm + +.macro __ldrd a, b, src, offset +#if __LINUX_ARM_ARCH__ >= 6 + ldrd \a, \b, [\src, #\offset] +#else + ldr \a, [\src, #\offset] + ldr \b, [\src, #\offset + 4] +#endif +.endm + +.macro __strd a, b, dst, offset +#if __LINUX_ARM_ARCH__ >= 6 + strd \a, \b, [\dst, #\offset] +#else + str \a, [\dst, #\offset] + str \b, [\dst, #\offset + 4] +#endif +.endm + +.macro _halfround a1, b1, c1, d1, a2, b2, c2, d2 + + // a += b; d ^= a; d = rol(d, 16); + add \a1, \a1, \b1, ror #brot + add \a2, \a2, \b2, ror #brot + eor \d1, \a1, \d1, ror #drot + eor \d2, \a2, \d2, ror #drot + // drot == 32 - 16 == 16 + + // c += d; b ^= c; b = rol(b, 12); + add \c1, \c1, \d1, ror #16 + add \c2, \c2, \d2, ror #16 + eor \b1, \c1, \b1, ror #brot + eor \b2, \c2, \b2, ror #brot + // brot == 32 - 12 == 20 + + // a += b; d ^= a; d = rol(d, 8); + add \a1, \a1, \b1, ror #20 + add \a2, \a2, \b2, ror #20 + eor \d1, \a1, \d1, ror #16 + eor \d2, \a2, \d2, ror #16 + // drot == 32 - 8 == 24 + + // c += d; b ^= c; b = rol(b, 7); + add \c1, \c1, \d1, ror #24 + add \c2, \c2, \d2, ror #24 + eor \b1, \c1, \b1, ror #20 + eor \b2, \c2, \b2, ror #20 + // brot == 32 - 7 == 25 +.endm + +.macro _doubleround + + // column round + + // quarterrounds: (x0, x4, x8, x12) and (x1, x5, x9, x13) + _halfround X0, X4, X8_X10, X12, X1, X5, X9_X11, X13 + + // save (x8, x9); restore (x10, x11) + __strd X8_X10, X9_X11, sp, 0 + __ldrd X8_X10, X9_X11, sp, 8 + + // quarterrounds: (x2, x6, x10, x14) and (x3, x7, x11, x15) + _halfround X2, X6, X8_X10, X14, X3, X7, X9_X11, X15 + + .set brot, 25 + .set drot, 24 + + // diagonal round + + // quarterrounds: (x0, x5, x10, x15) and (x1, x6, x11, x12) + _halfround X0, X5, X8_X10, X15, X1, X6, X9_X11, X12 + + // save (x10, x11); restore (x8, x9) + __strd X8_X10, X9_X11, sp, 8 + __ldrd X8_X10, X9_X11, sp, 0 + + // quarterrounds: (x2, x7, x8, x13) and (x3, x4, x9, x14) + _halfround X2, X7, X8_X10, X13, X3, X4, X9_X11, X14 +.endm + +.macro _chacha_permute nrounds + .set brot, 0 + .set drot, 0 + .rept \nrounds / 2 + _doubleround + .endr +.endm + +.macro _chacha nrounds + +.Lnext_block\@: + // Stack: unused0-unused1 x10-x11 x0-x15 OUT IN LEN + // Registers contain x0-x9,x12-x15. + + // Do the core ChaCha permutation to update x0-x15. + _chacha_permute \nrounds + + add sp, #8 + // Stack: x10-x11 orig_x0-orig_x15 OUT IN LEN + // Registers contain x0-x9,x12-x15. + // x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'. + + // Free up some registers (r8-r12,r14) by pushing (x8-x9,x12-x15). + push {X8_X10, X9_X11, X12, X13, X14, X15} + + // Load (OUT, IN, LEN). + ldr r14, [sp, #96] + ldr r12, [sp, #100] + ldr r11, [sp, #104] + + orr r10, r14, r12 + + // Use slow path if fewer than 64 bytes remain. + cmp r11, #64 + blt .Lxor_slowpath\@ + + // Use slow path if IN and/or OUT isn't 4-byte aligned. Needed even on + // ARMv6+, since ldmia and stmia (used below) still require alignment. + tst r10, #3 + bne .Lxor_slowpath\@ + + // Fast path: XOR 64 bytes of aligned data. + + // Stack: x8-x9 x12-x15 x10-x11 orig_x0-orig_x15 OUT IN LEN + // Registers: r0-r7 are x0-x7; r8-r11 are free; r12 is IN; r14 is OUT. + // x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'. + + // x0-x3 + __ldrd r8, r9, sp, 32 + __ldrd r10, r11, sp, 40 + add X0, X0, r8 + add X1, X1, r9 + add X2, X2, r10 + add X3, X3, r11 + _le32_bswap_4x X0, X1, X2, X3, r8, r9, r10 + ldmia r12!, {r8-r11} + eor X0, X0, r8 + eor X1, X1, r9 + eor X2, X2, r10 + eor X3, X3, r11 + stmia r14!, {X0-X3} + + // x4-x7 + __ldrd r8, r9, sp, 48 + __ldrd r10, r11, sp, 56 + add X4, r8, X4, ror #brot + add X5, r9, X5, ror #brot + ldmia r12!, {X0-X3} + add X6, r10, X6, ror #brot + add X7, r11, X7, ror #brot + _le32_bswap_4x X4, X5, X6, X7, r8, r9, r10 + eor X4, X4, X0 + eor X5, X5, X1 + eor X6, X6, X2 + eor X7, X7, X3 + stmia r14!, {X4-X7} + + // x8-x15 + pop {r0-r7} // (x8-x9,x12-x15,x10-x11) + __ldrd r8, r9, sp, 32 + __ldrd r10, r11, sp, 40 + add r0, r0, r8 // x8 + add r1, r1, r9 // x9 + add r6, r6, r10 // x10 + add r7, r7, r11 // x11 + _le32_bswap_4x r0, r1, r6, r7, r8, r9, r10 + ldmia r12!, {r8-r11} + eor r0, r0, r8 // x8 + eor r1, r1, r9 // x9 + eor r6, r6, r10 // x10 + eor r7, r7, r11 // x11 + stmia r14!, {r0,r1,r6,r7} + ldmia r12!, {r0,r1,r6,r7} + __ldrd r8, r9, sp, 48 + __ldrd r10, r11, sp, 56 + add r2, r8, r2, ror #drot // x12 + add r3, r9, r3, ror #drot // x13 + add r4, r10, r4, ror #drot // x14 + add r5, r11, r5, ror #drot // x15 + _le32_bswap_4x r2, r3, r4, r5, r9, r10, r11 + ldr r9, [sp, #72] // load LEN + eor r2, r2, r0 // x12 + eor r3, r3, r1 // x13 + eor r4, r4, r6 // x14 + eor r5, r5, r7 // x15 + subs r9, #64 // decrement and check LEN + stmia r14!, {r2-r5} + + beq .Ldone\@ + +.Lprepare_for_next_block\@: + + // Stack: x0-x15 OUT IN LEN + + // Increment block counter (x12) + add r8, #1 + + // Store updated (OUT, IN, LEN) + str r14, [sp, #64] + str r12, [sp, #68] + str r9, [sp, #72] + + mov r14, sp + + // Store updated block counter (x12) + str r8, [sp, #48] + + sub sp, #16 + + // Reload state and do next block + ldmia r14!, {r0-r11} // load x0-x11 + __strd r10, r11, sp, 8 // store x10-x11 before state + ldmia r14, {r10-r12,r14} // load x12-x15 + b .Lnext_block\@ + +.Lxor_slowpath\@: + // Slow path: < 64 bytes remaining, or unaligned input or output buffer. + // We handle it by storing the 64 bytes of keystream to the stack, then + // XOR-ing the needed portion with the data. + + // Allocate keystream buffer + sub sp, #64 + mov r14, sp + + // Stack: ks0-ks15 x8-x9 x12-x15 x10-x11 orig_x0-orig_x15 OUT IN LEN + // Registers: r0-r7 are x0-x7; r8-r11 are free; r12 is IN; r14 is &ks0. + // x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'. + + // Save keystream for x0-x3 + __ldrd r8, r9, sp, 96 + __ldrd r10, r11, sp, 104 + add X0, X0, r8 + add X1, X1, r9 + add X2, X2, r10 + add X3, X3, r11 + _le32_bswap_4x X0, X1, X2, X3, r8, r9, r10 + stmia r14!, {X0-X3} + + // Save keystream for x4-x7 + __ldrd r8, r9, sp, 112 + __ldrd r10, r11, sp, 120 + add X4, r8, X4, ror #brot + add X5, r9, X5, ror #brot + add X6, r10, X6, ror #brot + add X7, r11, X7, ror #brot + _le32_bswap_4x X4, X5, X6, X7, r8, r9, r10 + add r8, sp, #64 + stmia r14!, {X4-X7} + + // Save keystream for x8-x15 + ldm r8, {r0-r7} // (x8-x9,x12-x15,x10-x11) + __ldrd r8, r9, sp, 128 + __ldrd r10, r11, sp, 136 + add r0, r0, r8 // x8 + add r1, r1, r9 // x9 + add r6, r6, r10 // x10 + add r7, r7, r11 // x11 + _le32_bswap_4x r0, r1, r6, r7, r8, r9, r10 + stmia r14!, {r0,r1,r6,r7} + __ldrd r8, r9, sp, 144 + __ldrd r10, r11, sp, 152 + add r2, r8, r2, ror #drot // x12 + add r3, r9, r3, ror #drot // x13 + add r4, r10, r4, ror #drot // x14 + add r5, r11, r5, ror #drot // x15 + _le32_bswap_4x r2, r3, r4, r5, r9, r10, r11 + stmia r14, {r2-r5} + + // Stack: ks0-ks15 unused0-unused7 x0-x15 OUT IN LEN + // Registers: r8 is block counter, r12 is IN. + + ldr r9, [sp, #168] // LEN + ldr r14, [sp, #160] // OUT + cmp r9, #64 + mov r0, sp + movle r1, r9 + movgt r1, #64 + // r1 is number of bytes to XOR, in range [1, 64] + +.if __LINUX_ARM_ARCH__ < 6 + orr r2, r12, r14 + tst r2, #3 // IN or OUT misaligned? + bne .Lxor_next_byte\@ +.endif + + // XOR a word at a time +.rept 16 + subs r1, #4 + blt .Lxor_words_done\@ + ldr r2, [r12], #4 + ldr r3, [r0], #4 + eor r2, r2, r3 + str r2, [r14], #4 +.endr + b .Lxor_slowpath_done\@ +.Lxor_words_done\@: + ands r1, r1, #3 + beq .Lxor_slowpath_done\@ + + // XOR a byte at a time +.Lxor_next_byte\@: + ldrb r2, [r12], #1 + ldrb r3, [r0], #1 + eor r2, r2, r3 + strb r2, [r14], #1 + subs r1, #1 + bne .Lxor_next_byte\@ + +.Lxor_slowpath_done\@: + subs r9, #64 + add sp, #96 + bgt .Lprepare_for_next_block\@ + +.Ldone\@: +.endm // _chacha + +/* + * void chacha20_arm(u8 *out, const u8 *in, size_t len, const u32 key[8], + * const u32 iv[4]); + */ +SYM_FUNC_START(chacha20_arm) + cmp r2, #0 // len == 0? + reteq lr + + push {r0-r2,r4-r11,lr} + + // Push state x0-x15 onto stack. + // Also store an extra copy of x10-x11 just before the state. + + ldr r4, [sp, #48] // iv + mov r0, sp + sub sp, #80 + + // iv: x12-x15 + ldm r4, {X12,X13,X14,X15} + stmdb r0!, {X12,X13,X14,X15} + + // key: x4-x11 + __ldrd X8_X10, X9_X11, r3, 24 + __strd X8_X10, X9_X11, sp, 8 + stmdb r0!, {X8_X10, X9_X11} + ldm r3, {X4-X9_X11} + stmdb r0!, {X4-X9_X11} + + // constants: x0-x3 + adrl X3, .Lexpand_32byte_k + ldm X3, {X0-X3} + __strd X0, X1, sp, 16 + __strd X2, X3, sp, 24 + + _chacha 20 + + add sp, #76 + pop {r4-r11, pc} +SYM_FUNC_END(chacha20_arm) + +/* + * void hchacha20_arm(const u32 state[16], u32 out[8]); + */ +SYM_FUNC_START(hchacha20_arm) + push {r1,r4-r11,lr} + + mov r14, r0 + ldmia r14!, {r0-r11} // load x0-x11 + push {r10-r11} // store x10-x11 to stack + ldm r14, {r10-r12,r14} // load x12-x15 + sub sp, #8 + + _chacha_permute 20 + + // Skip over (unused0-unused1, x10-x11) + add sp, #16 + + // Fix up rotations of x12-x15 + ror X12, X12, #drot + ror X13, X13, #drot + pop {r4} // load 'out' + ror X14, X14, #drot + ror X15, X15, #drot + + // Store (x0-x3,x12-x15) to 'out' + stm r4, {X0,X1,X2,X3,X12,X13,X14,X15} + + pop {r4-r11,pc} +SYM_FUNC_END(hchacha20_arm) diff --git a/net/wireguard/crypto/zinc/chacha20/chacha20-x86_64-glue.c b/net/wireguard/crypto/zinc/chacha20/chacha20-x86_64-glue.c new file mode 100644 index 000000000000..5ac5f686a641 --- /dev/null +++ b/net/wireguard/crypto/zinc/chacha20/chacha20-x86_64-glue.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include <asm/fpu/api.h> +#include <asm/cpufeature.h> +#include <asm/processor.h> +#include <asm/intel-family.h> + +asmlinkage void hchacha20_ssse3(u32 *derived_key, const u8 *nonce, + const u8 *key); +asmlinkage void chacha20_ssse3(u8 *out, const u8 *in, const size_t len, + const u32 key[8], const u32 counter[4]); +asmlinkage void chacha20_avx2(u8 *out, const u8 *in, const size_t len, + const u32 key[8], const u32 counter[4]); +asmlinkage void chacha20_avx512(u8 *out, const u8 *in, const size_t len, + const u32 key[8], const u32 counter[4]); +asmlinkage void chacha20_avx512vl(u8 *out, const u8 *in, const size_t len, + const u32 key[8], const u32 counter[4]); + +static bool chacha20_use_ssse3 __ro_after_init; +static bool chacha20_use_avx2 __ro_after_init; +static bool chacha20_use_avx512 __ro_after_init; +static bool chacha20_use_avx512vl __ro_after_init; +static bool *const chacha20_nobs[] __initconst = { + &chacha20_use_ssse3, &chacha20_use_avx2, &chacha20_use_avx512, + &chacha20_use_avx512vl }; + +static void __init chacha20_fpu_init(void) +{ + chacha20_use_ssse3 = boot_cpu_has(X86_FEATURE_SSSE3); + chacha20_use_avx2 = + boot_cpu_has(X86_FEATURE_AVX) && + boot_cpu_has(X86_FEATURE_AVX2) && + cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL); +#ifndef COMPAT_CANNOT_USE_AVX512 + chacha20_use_avx512 = + boot_cpu_has(X86_FEATURE_AVX) && + boot_cpu_has(X86_FEATURE_AVX2) && + boot_cpu_has(X86_FEATURE_AVX512F) && + cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | + XFEATURE_MASK_AVX512, NULL) && + /* Skylake downclocks unacceptably much when using zmm. */ + boot_cpu_data.x86_model != INTEL_FAM6_SKYLAKE_X; + chacha20_use_avx512vl = + boot_cpu_has(X86_FEATURE_AVX) && + boot_cpu_has(X86_FEATURE_AVX2) && + boot_cpu_has(X86_FEATURE_AVX512F) && + boot_cpu_has(X86_FEATURE_AVX512VL) && + cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | + XFEATURE_MASK_AVX512, NULL); +#endif +} + +static inline bool chacha20_arch(struct chacha20_ctx *ctx, u8 *dst, + const u8 *src, size_t len, + simd_context_t *simd_context) +{ + /* SIMD disables preemption, so relax after processing each page. */ + BUILD_BUG_ON(PAGE_SIZE < CHACHA20_BLOCK_SIZE || + PAGE_SIZE % CHACHA20_BLOCK_SIZE); + + if (!IS_ENABLED(CONFIG_AS_SSSE3) || !chacha20_use_ssse3 || + len <= CHACHA20_BLOCK_SIZE || !simd_use(simd_context)) + return false; + + for (;;) { + const size_t bytes = min_t(size_t, len, PAGE_SIZE); + + if (IS_ENABLED(CONFIG_AS_AVX512) && chacha20_use_avx512 && + len >= CHACHA20_BLOCK_SIZE * 8) + chacha20_avx512(dst, src, bytes, ctx->key, ctx->counter); + else if (IS_ENABLED(CONFIG_AS_AVX512) && chacha20_use_avx512vl && + len >= CHACHA20_BLOCK_SIZE * 4) + chacha20_avx512vl(dst, src, bytes, ctx->key, ctx->counter); + else if (IS_ENABLED(CONFIG_AS_AVX2) && chacha20_use_avx2 && + len >= CHACHA20_BLOCK_SIZE * 4) + chacha20_avx2(dst, src, bytes, ctx->key, ctx->counter); + else + chacha20_ssse3(dst, src, bytes, ctx->key, ctx->counter); + ctx->counter[0] += (bytes + 63) / 64; + len -= bytes; + if (!len) + break; + dst += bytes; + src += bytes; + simd_relax(simd_context); + } + + return true; +} + +static inline bool hchacha20_arch(u32 derived_key[CHACHA20_KEY_WORDS], + const u8 nonce[HCHACHA20_NONCE_SIZE], + const u8 key[HCHACHA20_KEY_SIZE], + simd_context_t *simd_context) +{ + if (IS_ENABLED(CONFIG_AS_SSSE3) && chacha20_use_ssse3 && + simd_use(simd_context)) { + hchacha20_ssse3(derived_key, nonce, key); + return true; + } + return false; +} diff --git a/net/wireguard/crypto/zinc/chacha20/chacha20-x86_64.pl b/net/wireguard/crypto/zinc/chacha20/chacha20-x86_64.pl new file mode 100644 index 000000000000..29906a66b8b7 --- /dev/null +++ b/net/wireguard/crypto/zinc/chacha20/chacha20-x86_64.pl @@ -0,0 +1,4106 @@ +#!/usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# +# Copyright (C) 2017-2019 Samuel Neves <sneves@dei.uc.pt>. All Rights Reserved. +# Copyright (C) 2017-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. +# Copyright (C) 2006-2017 CRYPTOGAMS by <appro@openssl.org>. All Rights Reserved. +# +# This code is taken from the OpenSSL project but the author, Andy Polyakov, +# has relicensed it under the licenses specified in the SPDX header above. +# The original headers, including the original license headers, are +# included below for completeness. +# +# ==================================================================== +# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# November 2014 +# +# ChaCha20 for x86_64. +# +# December 2016 +# +# Add AVX512F code path. +# +# December 2017 +# +# Add AVX512VL code path. +# +# Performance in cycles per byte out of large buffer. +# +# IALU/gcc 4.8(i) 1x/2xSSSE3(ii) 4xSSSE3 NxAVX(v) +# +# P4 9.48/+99% - - +# Core2 7.83/+55% 7.90/5.76 4.35 +# Westmere 7.19/+50% 5.60/4.50 3.00 +# Sandy Bridge 8.31/+42% 5.45/4.00 2.72 +# Ivy Bridge 6.71/+46% 5.40/? 2.41 +# Haswell 5.92/+43% 5.20/3.45 2.42 1.23 +# Skylake[-X] 5.87/+39% 4.70/3.22 2.31 1.19[0.80(vi)] +# Silvermont 12.0/+33% 7.75/6.90 7.03(iii) +# Knights L 11.7/- ? 9.60(iii) 0.80 +# Goldmont 10.6/+17% 5.10/3.52 3.28 +# Sledgehammer 7.28/+52% - - +# Bulldozer 9.66/+28% 9.85/5.35(iv) 3.06(iv) +# Ryzen 5.96/+50% 5.19/3.00 2.40 2.09 +# VIA Nano 10.5/+46% 6.72/6.88 6.05 +# +# (i) compared to older gcc 3.x one can observe >2x improvement on +# most platforms; +# (ii) 2xSSSE3 is code path optimized specifically for 128 bytes used +# by chacha20_poly1305_tls_cipher, results are EVP-free; +# (iii) this is not optimal result for Atom because of MSROM +# limitations, SSE2 can do better, but gain is considered too +# low to justify the [maintenance] effort; +# (iv) Bulldozer actually executes 4xXOP code path that delivers 2.20 +# and 4.85 for 128-byte inputs; +# (v) 8xAVX2, 8xAVX512VL or 16xAVX512F, whichever best applicable; +# (vi) even though Skylake-X can execute AVX512F code and deliver 0.57 +# cpb in single thread, the corresponding capability is suppressed; + +$flavour = shift; +$output = shift; +if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } + +$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); +$kernel=0; $kernel=1 if (!$flavour && !$output); + +if (!$kernel) { + $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; + ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or + ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or + die "can't locate x86_64-xlate.pl"; + + open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""; + *STDOUT=*OUT; + + if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1` + =~ /GNU assembler version ([2-9]\.[0-9]+)/) { + $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25); + } + + if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) && + `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) { + $avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12); + $avx += 1 if ($1==2.11 && $2>=8); + } + + if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) && + `ml64 2>&1` =~ /Version ([0-9]+)\./) { + $avx = ($1>=10) + ($1>=11); + } + + if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) { + $avx = ($2>=3.0) + ($2>3.0); + } +} else { + $avx = 4; # The kernel uses ifdefs for this. +} + +# input parameter block +($out,$inp,$len,$key,$counter)=("%rdi","%rsi","%rdx","%rcx","%r8"); + +$code.=<<___ if $kernel; +#include <linux/linkage.h> +___ + +sub declare_variable() { + my ($name, $size, $type, $payload) = @_; + if($kernel) { + $code.=".section .rodata.cst$size.L$name, \"aM\", \@progbits, $size\n"; + $code.=".align $size\n"; + $code.=".L$name:\n"; + $code.=".$type $payload\n"; + } else { + $code.=".L$name:\n"; + $code.=".$type $payload\n"; + } +} + +sub declare_function() { + my ($name, $align, $nargs) = @_; + if($kernel) { + $code .= ".align $align\n"; + $code .= "SYM_FUNC_START($name)\n"; + $code .= ".L$name:\n"; + } else { + $code .= ".globl $name\n"; + $code .= ".type $name,\@function,$nargs\n"; + $code .= ".align $align\n"; + $code .= "$name:\n"; + } +} + +sub end_function() { + my ($name) = @_; + if($kernel) { + $code .= "SYM_FUNC_END($name)\n"; + } else { + $code .= ".size $name,.-$name\n"; + } +} + +if(!$kernel) { + $code .= ".text\n"; +} +&declare_variable('zero', 16, 'long', '0,0,0,0'); +&declare_variable('one', 16, 'long', '1,0,0,0'); +&declare_variable('inc', 16, 'long', '0,1,2,3'); +&declare_variable('four', 16, 'long', '4,4,4,4'); +&declare_variable('incy', 32, 'long', '0,2,4,6,1,3,5,7'); +&declare_variable('eight', 32, 'long', '8,8,8,8,8,8,8,8'); +&declare_variable('rot16', 16, 'byte', '0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd'); +&declare_variable('rot24', 16, 'byte', '0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe'); +&declare_variable('twoy', 32, 'long', '2,0,0,0, 2,0,0,0'); +&declare_variable('zeroz', 64, 'long', '0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0'); +&declare_variable('fourz', 64, 'long', '4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0'); +&declare_variable('incz', 64, 'long', '0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15'); +&declare_variable('sixteen', 64, 'long', '16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16'); +&declare_variable('sigma', 16, 'ascii', '"expand 32-byte k"'); + +$code.=<<___ if !$kernel; +.asciz "ChaCha20 for x86_64, CRYPTOGAMS by <appro\@openssl.org>" +___ +$code.=".text\n"; + +sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm +{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; + my $arg = pop; + $arg = "\$$arg" if ($arg*1 eq $arg); + $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n"; +} + +@x=("%eax","%ebx","%ecx","%edx",map("%r${_}d",(8..11)), + "%nox","%nox","%nox","%nox",map("%r${_}d",(12..15))); +@t=("%esi","%edi"); + +sub ROUND { # critical path is 24 cycles per round +my ($a0,$b0,$c0,$d0)=@_; +my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0)); +my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1)); +my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2)); +my ($xc,$xc_)=map("\"$_\"",@t); +my @x=map("\"$_\"",@x); + + # Consider order in which variables are addressed by their + # index: + # + # a b c d + # + # 0 4 8 12 < even round + # 1 5 9 13 + # 2 6 10 14 + # 3 7 11 15 + # 0 5 10 15 < odd round + # 1 6 11 12 + # 2 7 8 13 + # 3 4 9 14 + # + # 'a', 'b' and 'd's are permanently allocated in registers, + # @x[0..7,12..15], while 'c's are maintained in memory. If + # you observe 'c' column, you'll notice that pair of 'c's is + # invariant between rounds. This means that we have to reload + # them once per round, in the middle. This is why you'll see + # bunch of 'c' stores and loads in the middle, but none in + # the beginning or end. + + # Normally instructions would be interleaved to favour in-order + # execution. Generally out-of-order cores manage it gracefully, + # but not this time for some reason. As in-order execution + # cores are dying breed, old Atom is the only one around, + # instructions are left uninterleaved. Besides, Atom is better + # off executing 1xSSSE3 code anyway... + + ( + "&add (@x[$a0],@x[$b0])", # Q1 + "&xor (@x[$d0],@x[$a0])", + "&rol (@x[$d0],16)", + "&add (@x[$a1],@x[$b1])", # Q2 + "&xor (@x[$d1],@x[$a1])", + "&rol (@x[$d1],16)", + + "&add ($xc,@x[$d0])", + "&xor (@x[$b0],$xc)", + "&rol (@x[$b0],12)", + "&add ($xc_,@x[$d1])", + "&xor (@x[$b1],$xc_)", + "&rol (@x[$b1],12)", + + "&add (@x[$a0],@x[$b0])", + "&xor (@x[$d0],@x[$a0])", + "&rol (@x[$d0],8)", + "&add (@x[$a1],@x[$b1])", + "&xor (@x[$d1],@x[$a1])", + "&rol (@x[$d1],8)", + + "&add ($xc,@x[$d0])", + "&xor (@x[$b0],$xc)", + "&rol (@x[$b0],7)", + "&add ($xc_,@x[$d1])", + "&xor (@x[$b1],$xc_)", + "&rol (@x[$b1],7)", + + "&mov (\"4*$c0(%rsp)\",$xc)", # reload pair of 'c's + "&mov (\"4*$c1(%rsp)\",$xc_)", + "&mov ($xc,\"4*$c2(%rsp)\")", + "&mov ($xc_,\"4*$c3(%rsp)\")", + + "&add (@x[$a2],@x[$b2])", # Q3 + "&xor (@x[$d2],@x[$a2])", + "&rol (@x[$d2],16)", + "&add (@x[$a3],@x[$b3])", # Q4 + "&xor (@x[$d3],@x[$a3])", + "&rol (@x[$d3],16)", + + "&add ($xc,@x[$d2])", + "&xor (@x[$b2],$xc)", + "&rol (@x[$b2],12)", + "&add ($xc_,@x[$d3])", + "&xor (@x[$b3],$xc_)", + "&rol (@x[$b3],12)", + + "&add (@x[$a2],@x[$b2])", + "&xor (@x[$d2],@x[$a2])", + "&rol (@x[$d2],8)", + "&add (@x[$a3],@x[$b3])", + "&xor (@x[$d3],@x[$a3])", + "&rol (@x[$d3],8)", + + "&add ($xc,@x[$d2])", + "&xor (@x[$b2],$xc)", + "&rol (@x[$b2],7)", + "&add ($xc_,@x[$d3])", + "&xor (@x[$b3],$xc_)", + "&rol (@x[$b3],7)" + ); +} + +######################################################################## +# Generic code path that handles all lengths on pre-SSSE3 processors. +if(!$kernel) { +&declare_function("chacha20_ctr32", 64, 5); +$code.=<<___; +.cfi_startproc + cmp \$0,$len + je .Lno_data + mov OPENSSL_ia32cap_P+4(%rip),%r9 +___ +$code.=<<___ if ($avx>2); + bt \$48,%r9 # check for AVX512F + jc .Lchacha20_avx512 + test %r9,%r9 # check for AVX512VL + js .Lchacha20_avx512vl +___ +$code.=<<___; + test \$`1<<(41-32)`,%r9d + jnz .Lchacha20_ssse3 +___ +$code.=<<___; + push %rbx +.cfi_push %rbx + push %rbp +.cfi_push %rbp + push %r12 +.cfi_push %r12 + push %r13 +.cfi_push %r13 + push %r14 +.cfi_push %r14 + push %r15 +.cfi_push %r15 + sub \$64+24,%rsp +.cfi_adjust_cfa_offset 64+24 +.Lctr32_body: + + #movdqa .Lsigma(%rip),%xmm0 + movdqu ($key),%xmm1 + movdqu 16($key),%xmm2 + movdqu ($counter),%xmm3 + movdqa .Lone(%rip),%xmm4 + + #movdqa %xmm0,4*0(%rsp) # key[0] + movdqa %xmm1,4*4(%rsp) # key[1] + movdqa %xmm2,4*8(%rsp) # key[2] + movdqa %xmm3,4*12(%rsp) # key[3] + mov $len,%rbp # reassign $len + jmp .Loop_outer + +.align 32 +.Loop_outer: + mov \$0x61707865,@x[0] # 'expa' + mov \$0x3320646e,@x[1] # 'nd 3' + mov \$0x79622d32,@x[2] # '2-by' + mov \$0x6b206574,@x[3] # 'te k' + mov 4*4(%rsp),@x[4] + mov 4*5(%rsp),@x[5] + mov 4*6(%rsp),@x[6] + mov 4*7(%rsp),@x[7] + movd %xmm3,@x[12] + mov 4*13(%rsp),@x[13] + mov 4*14(%rsp),@x[14] + mov 4*15(%rsp),@x[15] + + mov %rbp,64+0(%rsp) # save len + mov \$10,%ebp + mov $inp,64+8(%rsp) # save inp + movq %xmm2,%rsi # "@x[8]" + mov $out,64+16(%rsp) # save out + mov %rsi,%rdi + shr \$32,%rdi # "@x[9]" + jmp .Loop + +.align 32 +.Loop: +___ + foreach (&ROUND (0, 4, 8,12)) { eval; } + foreach (&ROUND (0, 5,10,15)) { eval; } + &dec ("%ebp"); + &jnz (".Loop"); + +$code.=<<___; + mov @t[1],4*9(%rsp) # modulo-scheduled + mov @t[0],4*8(%rsp) + mov 64(%rsp),%rbp # load len + movdqa %xmm2,%xmm1 + mov 64+8(%rsp),$inp # load inp + paddd %xmm4,%xmm3 # increment counter + mov 64+16(%rsp),$out # load out + + add \$0x61707865,@x[0] # 'expa' + add \$0x3320646e,@x[1] # 'nd 3' + add \$0x79622d32,@x[2] # '2-by' + add \$0x6b206574,@x[3] # 'te k' + add 4*4(%rsp),@x[4] + add 4*5(%rsp),@x[5] + add 4*6(%rsp),@x[6] + add 4*7(%rsp),@x[7] + add 4*12(%rsp),@x[12] + add 4*13(%rsp),@x[13] + add 4*14(%rsp),@x[14] + add 4*15(%rsp),@x[15] + paddd 4*8(%rsp),%xmm1 + + cmp \$64,%rbp + jb .Ltail + + xor 4*0($inp),@x[0] # xor with input + xor 4*1($inp),@x[1] + xor 4*2($inp),@x[2] + xor 4*3($inp),@x[3] + xor 4*4($inp),@x[4] + xor 4*5($inp),@x[5] + xor 4*6($inp),@x[6] + xor 4*7($inp),@x[7] + movdqu 4*8($inp),%xmm0 + xor 4*12($inp),@x[12] + xor 4*13($inp),@x[13] + xor 4*14($inp),@x[14] + xor 4*15($inp),@x[15] + lea 4*16($inp),$inp # inp+=64 + pxor %xmm1,%xmm0 + + movdqa %xmm2,4*8(%rsp) + movd %xmm3,4*12(%rsp) + + mov @x[0],4*0($out) # write output + mov @x[1],4*1($out) + mov @x[2],4*2($out) + mov @x[3],4*3($out) + mov @x[4],4*4($out) + mov @x[5],4*5($out) + mov @x[6],4*6($out) + mov @x[7],4*7($out) + movdqu %xmm0,4*8($out) + mov @x[12],4*12($out) + mov @x[13],4*13($out) + mov @x[14],4*14($out) + mov @x[15],4*15($out) + lea 4*16($out),$out # out+=64 + + sub \$64,%rbp + jnz .Loop_outer + + jmp .Ldone + +.align 16 +.Ltail: + mov @x[0],4*0(%rsp) + mov @x[1],4*1(%rsp) + xor %rbx,%rbx + mov @x[2],4*2(%rsp) + mov @x[3],4*3(%rsp) + mov @x[4],4*4(%rsp) + mov @x[5],4*5(%rsp) + mov @x[6],4*6(%rsp) + mov @x[7],4*7(%rsp) + movdqa %xmm1,4*8(%rsp) + mov @x[12],4*12(%rsp) + mov @x[13],4*13(%rsp) + mov @x[14],4*14(%rsp) + mov @x[15],4*15(%rsp) + +.Loop_tail: + movzb ($inp,%rbx),%eax + movzb (%rsp,%rbx),%edx + lea 1(%rbx),%rbx + xor %edx,%eax + mov %al,-1($out,%rbx) + dec %rbp + jnz .Loop_tail + +.Ldone: + add \$64+24,%rsp +.cfi_adjust_cfa_offset -64-24 + pop %r15 +.cfi_restore %r15 + pop %r14 +.cfi_restore %r14 + pop %r13 +.cfi_restore %r13 + pop %r12 +.cfi_restore %r12 + pop %rbp +.cfi_restore %rbp + pop %rbx +.cfi_restore %rbx +.Lno_data: + ret +.cfi_endproc +___ +&end_function("chacha20_ctr32"); +} + +######################################################################## +# SSSE3 code path that handles shorter lengths +{ +my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(0..7)); + +sub SSSE3ROUND { # critical path is 20 "SIMD ticks" per round + &paddd ($a,$b); + &pxor ($d,$a); + &pshufb ($d,$rot16); + + &paddd ($c,$d); + &pxor ($b,$c); + &movdqa ($t,$b); + &psrld ($b,20); + &pslld ($t,12); + &por ($b,$t); + + &paddd ($a,$b); + &pxor ($d,$a); + &pshufb ($d,$rot24); + + &paddd ($c,$d); + &pxor ($b,$c); + &movdqa ($t,$b); + &psrld ($b,25); + &pslld ($t,7); + &por ($b,$t); +} + +my $xframe = $win64 ? 32+8 : 8; + +if($kernel) { + $code .= "#ifdef CONFIG_AS_SSSE3\n"; +} + +if($kernel) { +&declare_function("hchacha20_ssse3", 32, 5); +$code.=<<___; + movdqa .Lsigma(%rip),$a + movdqu ($len),$b + movdqu 16($len),$c + movdqu ($inp),$d + # This code is only used when targeting kernel. + # If targeting win64, xmm{6,7} preserving needs to be added. + movdqa .Lrot16(%rip),$rot16 + movdqa .Lrot24(%rip),$rot24 + mov \$10,$counter # reuse $counter + jmp 1f +.align 32 +1: +___ + &SSSE3ROUND(); + &pshufd ($a,$a,0b10010011); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b00111001); + &nop (); + + &SSSE3ROUND(); + &pshufd ($a,$a,0b00111001); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b10010011); + + &dec ($counter); + &jnz ("1b"); + +$code.=<<___; + movdqu $a, ($out) + movdqu $d, 16($out) + ret +___ +&end_function("hchacha20_ssse3"); +} + +&declare_function("chacha20_ssse3", 32, 5); +$code.=<<___; +.cfi_startproc + lea 8(%rsp),%r10 # frame pointer +.cfi_def_cfa_register %r10 +___ +$code.=<<___ if ($avx && !$kernel); + test \$`1<<(43-32)`,%r10d + jnz .Lchacha20_4xop # XOP is fastest even if we use 1/4 +___ +$code.=<<___; + cmp \$128,$len # we might throw away some data, + je .Lchacha20_128 + ja .Lchacha20_4x # but overall it won't be slower + +.Ldo_ssse3_after_all: + sub \$64+$xframe,%rsp + and \$-16,%rsp +___ +$code.=<<___ if ($win64); + movaps %xmm6,-0x30(%r10) + movaps %xmm7,-0x20(%r10) +.Lssse3_body: +___ +$code.=<<___; + movdqa .Lsigma(%rip),$a + movdqu ($key),$b + movdqu 16($key),$c + movdqu ($counter),$d + movdqa .Lrot16(%rip),$rot16 + movdqa .Lrot24(%rip),$rot24 + + movdqa $a,0x00(%rsp) + movdqa $b,0x10(%rsp) + movdqa $c,0x20(%rsp) + movdqa $d,0x30(%rsp) + mov \$10,$counter # reuse $counter + jmp .Loop_ssse3 + +.align 32 +.Loop_outer_ssse3: + movdqa .Lone(%rip),$d + movdqa 0x00(%rsp),$a + movdqa 0x10(%rsp),$b + movdqa 0x20(%rsp),$c + paddd 0x30(%rsp),$d + mov \$10,$counter + movdqa $d,0x30(%rsp) + jmp .Loop_ssse3 + +.align 32 +.Loop_ssse3: +___ + &SSSE3ROUND(); + &pshufd ($a,$a,0b10010011); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b00111001); + &nop (); + + &SSSE3ROUND(); + &pshufd ($a,$a,0b00111001); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b10010011); + + &dec ($counter); + &jnz (".Loop_ssse3"); + +$code.=<<___; + paddd 0x00(%rsp),$a + paddd 0x10(%rsp),$b + paddd 0x20(%rsp),$c + paddd 0x30(%rsp),$d + + cmp \$64,$len + jb .Ltail_ssse3 + + movdqu 0x00($inp),$t + movdqu 0x10($inp),$t1 + pxor $t,$a # xor with input + movdqu 0x20($inp),$t + pxor $t1,$b + movdqu 0x30($inp),$t1 + lea 0x40($inp),$inp # inp+=64 + pxor $t,$c + pxor $t1,$d + + movdqu $a,0x00($out) # write output + movdqu $b,0x10($out) + movdqu $c,0x20($out) + movdqu $d,0x30($out) + lea 0x40($out),$out # out+=64 + + sub \$64,$len + jnz .Loop_outer_ssse3 + + jmp .Ldone_ssse3 + +.align 16 +.Ltail_ssse3: + movdqa $a,0x00(%rsp) + movdqa $b,0x10(%rsp) + movdqa $c,0x20(%rsp) + movdqa $d,0x30(%rsp) + xor $counter,$counter + +.Loop_tail_ssse3: + movzb ($inp,$counter),%eax + movzb (%rsp,$counter),%ecx + lea 1($counter),$counter + xor %ecx,%eax + mov %al,-1($out,$counter) + dec $len + jnz .Loop_tail_ssse3 + +.Ldone_ssse3: +___ +$code.=<<___ if ($win64); + movaps -0x30(%r10),%xmm6 + movaps -0x20(%r10),%xmm7 +___ +$code.=<<___; + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +.Lssse3_epilogue: + ret +.cfi_endproc +___ +} +&end_function("chacha20_ssse3"); + +######################################################################## +# SSSE3 code path that handles 128-byte inputs +{ +my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(8,9,2..7)); +my ($a1,$b1,$c1,$d1)=map("%xmm$_",(10,11,0,1)); + +sub SSSE3ROUND_2x { + &paddd ($a,$b); + &pxor ($d,$a); + &paddd ($a1,$b1); + &pxor ($d1,$a1); + &pshufb ($d,$rot16); + &pshufb($d1,$rot16); + + &paddd ($c,$d); + &paddd ($c1,$d1); + &pxor ($b,$c); + &pxor ($b1,$c1); + &movdqa ($t,$b); + &psrld ($b,20); + &movdqa($t1,$b1); + &pslld ($t,12); + &psrld ($b1,20); + &por ($b,$t); + &pslld ($t1,12); + &por ($b1,$t1); + + &paddd ($a,$b); + &pxor ($d,$a); + &paddd ($a1,$b1); + &pxor ($d1,$a1); + &pshufb ($d,$rot24); + &pshufb($d1,$rot24); + + &paddd ($c,$d); + &paddd ($c1,$d1); + &pxor ($b,$c); + &pxor ($b1,$c1); + &movdqa ($t,$b); + &psrld ($b,25); + &movdqa($t1,$b1); + &pslld ($t,7); + &psrld ($b1,25); + &por ($b,$t); + &pslld ($t1,7); + &por ($b1,$t1); +} + +my $xframe = $win64 ? 0x68 : 8; + +$code.=<<___; +.type chacha20_128,\@function,5 +.align 32 +chacha20_128: +.cfi_startproc +.Lchacha20_128: + lea 8(%rsp),%r10 # frame pointer +.cfi_def_cfa_register %r10 + sub \$64+$xframe,%rsp + and \$-16,%rsp +___ +$code.=<<___ if ($win64); + movaps %xmm6,-0x70(%r10) + movaps %xmm7,-0x60(%r10) + movaps %xmm8,-0x50(%r10) + movaps %xmm9,-0x40(%r10) + movaps %xmm10,-0x30(%r10) + movaps %xmm11,-0x20(%r10) +.L128_body: +___ +$code.=<<___; + movdqa .Lsigma(%rip),$a + movdqu ($key),$b + movdqu 16($key),$c + movdqu ($counter),$d + movdqa .Lone(%rip),$d1 + movdqa .Lrot16(%rip),$rot16 + movdqa .Lrot24(%rip),$rot24 + + movdqa $a,$a1 + movdqa $a,0x00(%rsp) + movdqa $b,$b1 + movdqa $b,0x10(%rsp) + movdqa $c,$c1 + movdqa $c,0x20(%rsp) + paddd $d,$d1 + movdqa $d,0x30(%rsp) + mov \$10,$counter # reuse $counter + jmp .Loop_128 + +.align 32 +.Loop_128: +___ + &SSSE3ROUND_2x(); + &pshufd ($a,$a,0b10010011); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b00111001); + &pshufd ($a1,$a1,0b10010011); + &pshufd ($d1,$d1,0b01001110); + &pshufd ($c1,$c1,0b00111001); + + &SSSE3ROUND_2x(); + &pshufd ($a,$a,0b00111001); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b10010011); + &pshufd ($a1,$a1,0b00111001); + &pshufd ($d1,$d1,0b01001110); + &pshufd ($c1,$c1,0b10010011); + + &dec ($counter); + &jnz (".Loop_128"); + +$code.=<<___; + paddd 0x00(%rsp),$a + paddd 0x10(%rsp),$b + paddd 0x20(%rsp),$c + paddd 0x30(%rsp),$d + paddd .Lone(%rip),$d1 + paddd 0x00(%rsp),$a1 + paddd 0x10(%rsp),$b1 + paddd 0x20(%rsp),$c1 + paddd 0x30(%rsp),$d1 + + movdqu 0x00($inp),$t + movdqu 0x10($inp),$t1 + pxor $t,$a # xor with input + movdqu 0x20($inp),$t + pxor $t1,$b + movdqu 0x30($inp),$t1 + pxor $t,$c + movdqu 0x40($inp),$t + pxor $t1,$d + movdqu 0x50($inp),$t1 + pxor $t,$a1 + movdqu 0x60($inp),$t + pxor $t1,$b1 + movdqu 0x70($inp),$t1 + pxor $t,$c1 + pxor $t1,$d1 + + movdqu $a,0x00($out) # write output + movdqu $b,0x10($out) + movdqu $c,0x20($out) + movdqu $d,0x30($out) + movdqu $a1,0x40($out) + movdqu $b1,0x50($out) + movdqu $c1,0x60($out) + movdqu $d1,0x70($out) +___ +$code.=<<___ if ($win64); + movaps -0x70(%r10),%xmm6 + movaps -0x60(%r10),%xmm7 + movaps -0x50(%r10),%xmm8 + movaps -0x40(%r10),%xmm9 + movaps -0x30(%r10),%xmm10 + movaps -0x20(%r10),%xmm11 +___ +$code.=<<___; + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +.L128_epilogue: + ret +.cfi_endproc +.size chacha20_128,.-chacha20_128 +___ +} + +######################################################################## +# SSSE3 code path that handles longer messages. +{ +# assign variables to favor Atom front-end +my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3, + $xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3)=map("%xmm$_",(0..15)); +my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3, + "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3); + +sub SSSE3_lane_ROUND { +my ($a0,$b0,$c0,$d0)=@_; +my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0)); +my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1)); +my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2)); +my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3); +my @x=map("\"$_\"",@xx); + + # Consider order in which variables are addressed by their + # index: + # + # a b c d + # + # 0 4 8 12 < even round + # 1 5 9 13 + # 2 6 10 14 + # 3 7 11 15 + # 0 5 10 15 < odd round + # 1 6 11 12 + # 2 7 8 13 + # 3 4 9 14 + # + # 'a', 'b' and 'd's are permanently allocated in registers, + # @x[0..7,12..15], while 'c's are maintained in memory. If + # you observe 'c' column, you'll notice that pair of 'c's is + # invariant between rounds. This means that we have to reload + # them once per round, in the middle. This is why you'll see + # bunch of 'c' stores and loads in the middle, but none in + # the beginning or end. + + ( + "&paddd (@x[$a0],@x[$b0])", # Q1 + "&paddd (@x[$a1],@x[$b1])", # Q2 + "&pxor (@x[$d0],@x[$a0])", + "&pxor (@x[$d1],@x[$a1])", + "&pshufb (@x[$d0],$t1)", + "&pshufb (@x[$d1],$t1)", + + "&paddd ($xc,@x[$d0])", + "&paddd ($xc_,@x[$d1])", + "&pxor (@x[$b0],$xc)", + "&pxor (@x[$b1],$xc_)", + "&movdqa ($t0,@x[$b0])", + "&pslld (@x[$b0],12)", + "&psrld ($t0,20)", + "&movdqa ($t1,@x[$b1])", + "&pslld (@x[$b1],12)", + "&por (@x[$b0],$t0)", + "&psrld ($t1,20)", + "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip) + "&por (@x[$b1],$t1)", + + "&paddd (@x[$a0],@x[$b0])", + "&paddd (@x[$a1],@x[$b1])", + "&pxor (@x[$d0],@x[$a0])", + "&pxor (@x[$d1],@x[$a1])", + "&pshufb (@x[$d0],$t0)", + "&pshufb (@x[$d1],$t0)", + + "&paddd ($xc,@x[$d0])", + "&paddd ($xc_,@x[$d1])", + "&pxor (@x[$b0],$xc)", + "&pxor (@x[$b1],$xc_)", + "&movdqa ($t1,@x[$b0])", + "&pslld (@x[$b0],7)", + "&psrld ($t1,25)", + "&movdqa ($t0,@x[$b1])", + "&pslld (@x[$b1],7)", + "&por (@x[$b0],$t1)", + "&psrld ($t0,25)", + "&movdqa ($t1,'(%r9)')", # .Lrot16(%rip) + "&por (@x[$b1],$t0)", + + "&movdqa (\"`16*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's + "&movdqa (\"`16*($c1-8)`(%rsp)\",$xc_)", + "&movdqa ($xc,\"`16*($c2-8)`(%rsp)\")", + "&movdqa ($xc_,\"`16*($c3-8)`(%rsp)\")", + + "&paddd (@x[$a2],@x[$b2])", # Q3 + "&paddd (@x[$a3],@x[$b3])", # Q4 + "&pxor (@x[$d2],@x[$a2])", + "&pxor (@x[$d3],@x[$a3])", + "&pshufb (@x[$d2],$t1)", + "&pshufb (@x[$d3],$t1)", + + "&paddd ($xc,@x[$d2])", + "&paddd ($xc_,@x[$d3])", + "&pxor (@x[$b2],$xc)", + "&pxor (@x[$b3],$xc_)", + "&movdqa ($t0,@x[$b2])", + "&pslld (@x[$b2],12)", + "&psrld ($t0,20)", + "&movdqa ($t1,@x[$b3])", + "&pslld (@x[$b3],12)", + "&por (@x[$b2],$t0)", + "&psrld ($t1,20)", + "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip) + "&por (@x[$b3],$t1)", + + "&paddd (@x[$a2],@x[$b2])", + "&paddd (@x[$a3],@x[$b3])", + "&pxor (@x[$d2],@x[$a2])", + "&pxor (@x[$d3],@x[$a3])", + "&pshufb (@x[$d2],$t0)", + "&pshufb (@x[$d3],$t0)", + + "&paddd ($xc,@x[$d2])", + "&paddd ($xc_,@x[$d3])", + "&pxor (@x[$b2],$xc)", + "&pxor (@x[$b3],$xc_)", + "&movdqa ($t1,@x[$b2])", + "&pslld (@x[$b2],7)", + "&psrld ($t1,25)", + "&movdqa ($t0,@x[$b3])", + "&pslld (@x[$b3],7)", + "&por (@x[$b2],$t1)", + "&psrld ($t0,25)", + "&movdqa ($t1,'(%r9)')", # .Lrot16(%rip) + "&por (@x[$b3],$t0)" + ); +} + +my $xframe = $win64 ? 0xa8 : 8; + +$code.=<<___; +.type chacha20_4x,\@function,5 +.align 32 +chacha20_4x: +.cfi_startproc +.Lchacha20_4x: + lea 8(%rsp),%r10 # frame pointer +.cfi_def_cfa_register %r10 +___ +$code.=<<___ if (!$kernel); + mov %r9,%r11 +___ +$code.=<<___ if ($avx>1 && !$kernel); + shr \$32,%r9 # OPENSSL_ia32cap_P+8 + test \$`1<<5`,%r9 # test AVX2 + jnz .Lchacha20_8x +___ +$code.=<<___; + cmp \$192,$len + ja .Lproceed4x +___ +$code.=<<___ if (!$kernel); + and \$`1<<26|1<<22`,%r11 # isolate XSAVE+MOVBE + cmp \$`1<<22`,%r11 # check for MOVBE without XSAVE + je .Ldo_ssse3_after_all # to detect Atom +___ +$code.=<<___; +.Lproceed4x: + sub \$0x140+$xframe,%rsp + and \$-16,%rsp +___ + ################ stack layout + # +0x00 SIMD equivalent of @x[8-12] + # ... + # +0x40 constant copy of key[0-2] smashed by lanes + # ... + # +0x100 SIMD counters (with nonce smashed by lanes) + # ... + # +0x140 +$code.=<<___ if ($win64); + movaps %xmm6,-0xb0(%r10) + movaps %xmm7,-0xa0(%r10) + movaps %xmm8,-0x90(%r10) + movaps %xmm9,-0x80(%r10) + movaps %xmm10,-0x70(%r10) + movaps %xmm11,-0x60(%r10) + movaps %xmm12,-0x50(%r10) + movaps %xmm13,-0x40(%r10) + movaps %xmm14,-0x30(%r10) + movaps %xmm15,-0x20(%r10) +.L4x_body: +___ +$code.=<<___; + movdqa .Lsigma(%rip),$xa3 # key[0] + movdqu ($key),$xb3 # key[1] + movdqu 16($key),$xt3 # key[2] + movdqu ($counter),$xd3 # key[3] + lea 0x100(%rsp),%rcx # size optimization + lea .Lrot16(%rip),%r9 + lea .Lrot24(%rip),%r11 + + pshufd \$0x00,$xa3,$xa0 # smash key by lanes... + pshufd \$0x55,$xa3,$xa1 + movdqa $xa0,0x40(%rsp) # ... and offload + pshufd \$0xaa,$xa3,$xa2 + movdqa $xa1,0x50(%rsp) + pshufd \$0xff,$xa3,$xa3 + movdqa $xa2,0x60(%rsp) + movdqa $xa3,0x70(%rsp) + + pshufd \$0x00,$xb3,$xb0 + pshufd \$0x55,$xb3,$xb1 + movdqa $xb0,0x80-0x100(%rcx) + pshufd \$0xaa,$xb3,$xb2 + movdqa $xb1,0x90-0x100(%rcx) + pshufd \$0xff,$xb3,$xb3 + movdqa $xb2,0xa0-0x100(%rcx) + movdqa $xb3,0xb0-0x100(%rcx) + + pshufd \$0x00,$xt3,$xt0 # "$xc0" + pshufd \$0x55,$xt3,$xt1 # "$xc1" + movdqa $xt0,0xc0-0x100(%rcx) + pshufd \$0xaa,$xt3,$xt2 # "$xc2" + movdqa $xt1,0xd0-0x100(%rcx) + pshufd \$0xff,$xt3,$xt3 # "$xc3" + movdqa $xt2,0xe0-0x100(%rcx) + movdqa $xt3,0xf0-0x100(%rcx) + + pshufd \$0x00,$xd3,$xd0 + pshufd \$0x55,$xd3,$xd1 + paddd .Linc(%rip),$xd0 # don't save counters yet + pshufd \$0xaa,$xd3,$xd2 + movdqa $xd1,0x110-0x100(%rcx) + pshufd \$0xff,$xd3,$xd3 + movdqa $xd2,0x120-0x100(%rcx) + movdqa $xd3,0x130-0x100(%rcx) + + jmp .Loop_enter4x + +.align 32 +.Loop_outer4x: + movdqa 0x40(%rsp),$xa0 # re-load smashed key + movdqa 0x50(%rsp),$xa1 + movdqa 0x60(%rsp),$xa2 + movdqa 0x70(%rsp),$xa3 + movdqa 0x80-0x100(%rcx),$xb0 + movdqa 0x90-0x100(%rcx),$xb1 + movdqa 0xa0-0x100(%rcx),$xb2 + movdqa 0xb0-0x100(%rcx),$xb3 + movdqa 0xc0-0x100(%rcx),$xt0 # "$xc0" + movdqa 0xd0-0x100(%rcx),$xt1 # "$xc1" + movdqa 0xe0-0x100(%rcx),$xt2 # "$xc2" + movdqa 0xf0-0x100(%rcx),$xt3 # "$xc3" + movdqa 0x100-0x100(%rcx),$xd0 + movdqa 0x110-0x100(%rcx),$xd1 + movdqa 0x120-0x100(%rcx),$xd2 + movdqa 0x130-0x100(%rcx),$xd3 + paddd .Lfour(%rip),$xd0 # next SIMD counters + +.Loop_enter4x: + movdqa $xt2,0x20(%rsp) # SIMD equivalent of "@x[10]" + movdqa $xt3,0x30(%rsp) # SIMD equivalent of "@x[11]" + movdqa (%r9),$xt3 # .Lrot16(%rip) + mov \$10,%eax + movdqa $xd0,0x100-0x100(%rcx) # save SIMD counters + jmp .Loop4x + +.align 32 +.Loop4x: +___ + foreach (&SSSE3_lane_ROUND(0, 4, 8,12)) { eval; } + foreach (&SSSE3_lane_ROUND(0, 5,10,15)) { eval; } +$code.=<<___; + dec %eax + jnz .Loop4x + + paddd 0x40(%rsp),$xa0 # accumulate key material + paddd 0x50(%rsp),$xa1 + paddd 0x60(%rsp),$xa2 + paddd 0x70(%rsp),$xa3 + + movdqa $xa0,$xt2 # "de-interlace" data + punpckldq $xa1,$xa0 + movdqa $xa2,$xt3 + punpckldq $xa3,$xa2 + punpckhdq $xa1,$xt2 + punpckhdq $xa3,$xt3 + movdqa $xa0,$xa1 + punpcklqdq $xa2,$xa0 # "a0" + movdqa $xt2,$xa3 + punpcklqdq $xt3,$xt2 # "a2" + punpckhqdq $xa2,$xa1 # "a1" + punpckhqdq $xt3,$xa3 # "a3" +___ + ($xa2,$xt2)=($xt2,$xa2); +$code.=<<___; + paddd 0x80-0x100(%rcx),$xb0 + paddd 0x90-0x100(%rcx),$xb1 + paddd 0xa0-0x100(%rcx),$xb2 + paddd 0xb0-0x100(%rcx),$xb3 + + movdqa $xa0,0x00(%rsp) # offload $xaN + movdqa $xa1,0x10(%rsp) + movdqa 0x20(%rsp),$xa0 # "xc2" + movdqa 0x30(%rsp),$xa1 # "xc3" + + movdqa $xb0,$xt2 + punpckldq $xb1,$xb0 + movdqa $xb2,$xt3 + punpckldq $xb3,$xb2 + punpckhdq $xb1,$xt2 + punpckhdq $xb3,$xt3 + movdqa $xb0,$xb1 + punpcklqdq $xb2,$xb0 # "b0" + movdqa $xt2,$xb3 + punpcklqdq $xt3,$xt2 # "b2" + punpckhqdq $xb2,$xb1 # "b1" + punpckhqdq $xt3,$xb3 # "b3" +___ + ($xb2,$xt2)=($xt2,$xb2); + my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1); +$code.=<<___; + paddd 0xc0-0x100(%rcx),$xc0 + paddd 0xd0-0x100(%rcx),$xc1 + paddd 0xe0-0x100(%rcx),$xc2 + paddd 0xf0-0x100(%rcx),$xc3 + + movdqa $xa2,0x20(%rsp) # keep offloading $xaN + movdqa $xa3,0x30(%rsp) + + movdqa $xc0,$xt2 + punpckldq $xc1,$xc0 + movdqa $xc2,$xt3 + punpckldq $xc3,$xc2 + punpckhdq $xc1,$xt2 + punpckhdq $xc3,$xt3 + movdqa $xc0,$xc1 + punpcklqdq $xc2,$xc0 # "c0" + movdqa $xt2,$xc3 + punpcklqdq $xt3,$xt2 # "c2" + punpckhqdq $xc2,$xc1 # "c1" + punpckhqdq $xt3,$xc3 # "c3" +___ + ($xc2,$xt2)=($xt2,$xc2); + ($xt0,$xt1)=($xa2,$xa3); # use $xaN as temporary +$code.=<<___; + paddd 0x100-0x100(%rcx),$xd0 + paddd 0x110-0x100(%rcx),$xd1 + paddd 0x120-0x100(%rcx),$xd2 + paddd 0x130-0x100(%rcx),$xd3 + + movdqa $xd0,$xt2 + punpckldq $xd1,$xd0 + movdqa $xd2,$xt3 + punpckldq $xd3,$xd2 + punpckhdq $xd1,$xt2 + punpckhdq $xd3,$xt3 + movdqa $xd0,$xd1 + punpcklqdq $xd2,$xd0 # "d0" + movdqa $xt2,$xd3 + punpcklqdq $xt3,$xt2 # "d2" + punpckhqdq $xd2,$xd1 # "d1" + punpckhqdq $xt3,$xd3 # "d3" +___ + ($xd2,$xt2)=($xt2,$xd2); +$code.=<<___; + cmp \$64*4,$len + jb .Ltail4x + + movdqu 0x00($inp),$xt0 # xor with input + movdqu 0x10($inp),$xt1 + movdqu 0x20($inp),$xt2 + movdqu 0x30($inp),$xt3 + pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember? + pxor $xb0,$xt1 + pxor $xc0,$xt2 + pxor $xd0,$xt3 + + movdqu $xt0,0x00($out) + movdqu 0x40($inp),$xt0 + movdqu $xt1,0x10($out) + movdqu 0x50($inp),$xt1 + movdqu $xt2,0x20($out) + movdqu 0x60($inp),$xt2 + movdqu $xt3,0x30($out) + movdqu 0x70($inp),$xt3 + lea 0x80($inp),$inp # size optimization + pxor 0x10(%rsp),$xt0 + pxor $xb1,$xt1 + pxor $xc1,$xt2 + pxor $xd1,$xt3 + + movdqu $xt0,0x40($out) + movdqu 0x00($inp),$xt0 + movdqu $xt1,0x50($out) + movdqu 0x10($inp),$xt1 + movdqu $xt2,0x60($out) + movdqu 0x20($inp),$xt2 + movdqu $xt3,0x70($out) + lea 0x80($out),$out # size optimization + movdqu 0x30($inp),$xt3 + pxor 0x20(%rsp),$xt0 + pxor $xb2,$xt1 + pxor $xc2,$xt2 + pxor $xd2,$xt3 + + movdqu $xt0,0x00($out) + movdqu 0x40($inp),$xt0 + movdqu $xt1,0x10($out) + movdqu 0x50($inp),$xt1 + movdqu $xt2,0x20($out) + movdqu 0x60($inp),$xt2 + movdqu $xt3,0x30($out) + movdqu 0x70($inp),$xt3 + lea 0x80($inp),$inp # inp+=64*4 + pxor 0x30(%rsp),$xt0 + pxor $xb3,$xt1 + pxor $xc3,$xt2 + pxor $xd3,$xt3 + movdqu $xt0,0x40($out) + movdqu $xt1,0x50($out) + movdqu $xt2,0x60($out) + movdqu $xt3,0x70($out) + lea 0x80($out),$out # out+=64*4 + + sub \$64*4,$len + jnz .Loop_outer4x + + jmp .Ldone4x + +.Ltail4x: + cmp \$192,$len + jae .L192_or_more4x + cmp \$128,$len + jae .L128_or_more4x + cmp \$64,$len + jae .L64_or_more4x + + #movdqa 0x00(%rsp),$xt0 # $xaN is offloaded, remember? + xor %r9,%r9 + #movdqa $xt0,0x00(%rsp) + movdqa $xb0,0x10(%rsp) + movdqa $xc0,0x20(%rsp) + movdqa $xd0,0x30(%rsp) + jmp .Loop_tail4x + +.align 32 +.L64_or_more4x: + movdqu 0x00($inp),$xt0 # xor with input + movdqu 0x10($inp),$xt1 + movdqu 0x20($inp),$xt2 + movdqu 0x30($inp),$xt3 + pxor 0x00(%rsp),$xt0 # $xaxN is offloaded, remember? + pxor $xb0,$xt1 + pxor $xc0,$xt2 + pxor $xd0,$xt3 + movdqu $xt0,0x00($out) + movdqu $xt1,0x10($out) + movdqu $xt2,0x20($out) + movdqu $xt3,0x30($out) + je .Ldone4x + + movdqa 0x10(%rsp),$xt0 # $xaN is offloaded, remember? + lea 0x40($inp),$inp # inp+=64*1 + xor %r9,%r9 + movdqa $xt0,0x00(%rsp) + movdqa $xb1,0x10(%rsp) + lea 0x40($out),$out # out+=64*1 + movdqa $xc1,0x20(%rsp) + sub \$64,$len # len-=64*1 + movdqa $xd1,0x30(%rsp) + jmp .Loop_tail4x + +.align 32 +.L128_or_more4x: + movdqu 0x00($inp),$xt0 # xor with input + movdqu 0x10($inp),$xt1 + movdqu 0x20($inp),$xt2 + movdqu 0x30($inp),$xt3 + pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember? + pxor $xb0,$xt1 + pxor $xc0,$xt2 + pxor $xd0,$xt3 + + movdqu $xt0,0x00($out) + movdqu 0x40($inp),$xt0 + movdqu $xt1,0x10($out) + movdqu 0x50($inp),$xt1 + movdqu $xt2,0x20($out) + movdqu 0x60($inp),$xt2 + movdqu $xt3,0x30($out) + movdqu 0x70($inp),$xt3 + pxor 0x10(%rsp),$xt0 + pxor $xb1,$xt1 + pxor $xc1,$xt2 + pxor $xd1,$xt3 + movdqu $xt0,0x40($out) + movdqu $xt1,0x50($out) + movdqu $xt2,0x60($out) + movdqu $xt3,0x70($out) + je .Ldone4x + + movdqa 0x20(%rsp),$xt0 # $xaN is offloaded, remember? + lea 0x80($inp),$inp # inp+=64*2 + xor %r9,%r9 + movdqa $xt0,0x00(%rsp) + movdqa $xb2,0x10(%rsp) + lea 0x80($out),$out # out+=64*2 + movdqa $xc2,0x20(%rsp) + sub \$128,$len # len-=64*2 + movdqa $xd2,0x30(%rsp) + jmp .Loop_tail4x + +.align 32 +.L192_or_more4x: + movdqu 0x00($inp),$xt0 # xor with input + movdqu 0x10($inp),$xt1 + movdqu 0x20($inp),$xt2 + movdqu 0x30($inp),$xt3 + pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember? + pxor $xb0,$xt1 + pxor $xc0,$xt2 + pxor $xd0,$xt3 + + movdqu $xt0,0x00($out) + movdqu 0x40($inp),$xt0 + movdqu $xt1,0x10($out) + movdqu 0x50($inp),$xt1 + movdqu $xt2,0x20($out) + movdqu 0x60($inp),$xt2 + movdqu $xt3,0x30($out) + movdqu 0x70($inp),$xt3 + lea 0x80($inp),$inp # size optimization + pxor 0x10(%rsp),$xt0 + pxor $xb1,$xt1 + pxor $xc1,$xt2 + pxor $xd1,$xt3 + + movdqu $xt0,0x40($out) + movdqu 0x00($inp),$xt0 + movdqu $xt1,0x50($out) + movdqu 0x10($inp),$xt1 + movdqu $xt2,0x60($out) + movdqu 0x20($inp),$xt2 + movdqu $xt3,0x70($out) + lea 0x80($out),$out # size optimization + movdqu 0x30($inp),$xt3 + pxor 0x20(%rsp),$xt0 + pxor $xb2,$xt1 + pxor $xc2,$xt2 + pxor $xd2,$xt3 + movdqu $xt0,0x00($out) + movdqu $xt1,0x10($out) + movdqu $xt2,0x20($out) + movdqu $xt3,0x30($out) + je .Ldone4x + + movdqa 0x30(%rsp),$xt0 # $xaN is offloaded, remember? + lea 0x40($inp),$inp # inp+=64*3 + xor %r9,%r9 + movdqa $xt0,0x00(%rsp) + movdqa $xb3,0x10(%rsp) + lea 0x40($out),$out # out+=64*3 + movdqa $xc3,0x20(%rsp) + sub \$192,$len # len-=64*3 + movdqa $xd3,0x30(%rsp) + +.Loop_tail4x: + movzb ($inp,%r9),%eax + movzb (%rsp,%r9),%ecx + lea 1(%r9),%r9 + xor %ecx,%eax + mov %al,-1($out,%r9) + dec $len + jnz .Loop_tail4x + +.Ldone4x: +___ +$code.=<<___ if ($win64); + movaps -0xb0(%r10),%xmm6 + movaps -0xa0(%r10),%xmm7 + movaps -0x90(%r10),%xmm8 + movaps -0x80(%r10),%xmm9 + movaps -0x70(%r10),%xmm10 + movaps -0x60(%r10),%xmm11 + movaps -0x50(%r10),%xmm12 + movaps -0x40(%r10),%xmm13 + movaps -0x30(%r10),%xmm14 + movaps -0x20(%r10),%xmm15 +___ +$code.=<<___; + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +.L4x_epilogue: + ret +.cfi_endproc +.size chacha20_4x,.-chacha20_4x +___ +} +if($kernel) { + $code .= "#endif\n"; +} + +######################################################################## +# XOP code path that handles all lengths. +if ($avx && !$kernel) { +# There is some "anomaly" observed depending on instructions' size or +# alignment. If you look closely at below code you'll notice that +# sometimes argument order varies. The order affects instruction +# encoding by making it larger, and such fiddling gives 5% performance +# improvement. This is on FX-4100... + +my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3, + $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%xmm$_",(0..15)); +my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3, + $xt0,$xt1,$xt2,$xt3, $xd0,$xd1,$xd2,$xd3); + +sub XOP_lane_ROUND { +my ($a0,$b0,$c0,$d0)=@_; +my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0)); +my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1)); +my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2)); +my @x=map("\"$_\"",@xx); + + ( + "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1 + "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2 + "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3 + "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4 + "&vpxor (@x[$d0],@x[$a0],@x[$d0])", + "&vpxor (@x[$d1],@x[$a1],@x[$d1])", + "&vpxor (@x[$d2],@x[$a2],@x[$d2])", + "&vpxor (@x[$d3],@x[$a3],@x[$d3])", + "&vprotd (@x[$d0],@x[$d0],16)", + "&vprotd (@x[$d1],@x[$d1],16)", + "&vprotd (@x[$d2],@x[$d2],16)", + "&vprotd (@x[$d3],@x[$d3],16)", + + "&vpaddd (@x[$c0],@x[$c0],@x[$d0])", + "&vpaddd (@x[$c1],@x[$c1],@x[$d1])", + "&vpaddd (@x[$c2],@x[$c2],@x[$d2])", + "&vpaddd (@x[$c3],@x[$c3],@x[$d3])", + "&vpxor (@x[$b0],@x[$c0],@x[$b0])", + "&vpxor (@x[$b1],@x[$c1],@x[$b1])", + "&vpxor (@x[$b2],@x[$b2],@x[$c2])", # flip + "&vpxor (@x[$b3],@x[$b3],@x[$c3])", # flip + "&vprotd (@x[$b0],@x[$b0],12)", + "&vprotd (@x[$b1],@x[$b1],12)", + "&vprotd (@x[$b2],@x[$b2],12)", + "&vprotd (@x[$b3],@x[$b3],12)", + + "&vpaddd (@x[$a0],@x[$b0],@x[$a0])", # flip + "&vpaddd (@x[$a1],@x[$b1],@x[$a1])", # flip + "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", + "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", + "&vpxor (@x[$d0],@x[$a0],@x[$d0])", + "&vpxor (@x[$d1],@x[$a1],@x[$d1])", + "&vpxor (@x[$d2],@x[$a2],@x[$d2])", + "&vpxor (@x[$d3],@x[$a3],@x[$d3])", + "&vprotd (@x[$d0],@x[$d0],8)", + "&vprotd (@x[$d1],@x[$d1],8)", + "&vprotd (@x[$d2],@x[$d2],8)", + "&vprotd (@x[$d3],@x[$d3],8)", + + "&vpaddd (@x[$c0],@x[$c0],@x[$d0])", + "&vpaddd (@x[$c1],@x[$c1],@x[$d1])", + "&vpaddd (@x[$c2],@x[$c2],@x[$d2])", + "&vpaddd (@x[$c3],@x[$c3],@x[$d3])", + "&vpxor (@x[$b0],@x[$c0],@x[$b0])", + "&vpxor (@x[$b1],@x[$c1],@x[$b1])", + "&vpxor (@x[$b2],@x[$b2],@x[$c2])", # flip + "&vpxor (@x[$b3],@x[$b3],@x[$c3])", # flip + "&vprotd (@x[$b0],@x[$b0],7)", + "&vprotd (@x[$b1],@x[$b1],7)", + "&vprotd (@x[$b2],@x[$b2],7)", + "&vprotd (@x[$b3],@x[$b3],7)" + ); +} + +my $xframe = $win64 ? 0xa8 : 8; + +&declare_function("chacha20_xop", 32, 5); +$code.=<<___; +.cfi_startproc +.Lchacha20_4xop: + lea 8(%rsp),%r10 # frame pointer +.cfi_def_cfa_register %r10 + sub \$0x140+$xframe,%rsp + and \$-16,%rsp +___ + ################ stack layout + # +0x00 SIMD equivalent of @x[8-12] + # ... + # +0x40 constant copy of key[0-2] smashed by lanes + # ... + # +0x100 SIMD counters (with nonce smashed by lanes) + # ... + # +0x140 +$code.=<<___ if ($win64); + movaps %xmm6,-0xb0(%r10) + movaps %xmm7,-0xa0(%r10) + movaps %xmm8,-0x90(%r10) + movaps %xmm9,-0x80(%r10) + movaps %xmm10,-0x70(%r10) + movaps %xmm11,-0x60(%r10) + movaps %xmm12,-0x50(%r10) + movaps %xmm13,-0x40(%r10) + movaps %xmm14,-0x30(%r10) + movaps %xmm15,-0x20(%r10) +.L4xop_body: +___ +$code.=<<___; + vzeroupper + + vmovdqa .Lsigma(%rip),$xa3 # key[0] + vmovdqu ($key),$xb3 # key[1] + vmovdqu 16($key),$xt3 # key[2] + vmovdqu ($counter),$xd3 # key[3] + lea 0x100(%rsp),%rcx # size optimization + + vpshufd \$0x00,$xa3,$xa0 # smash key by lanes... + vpshufd \$0x55,$xa3,$xa1 + vmovdqa $xa0,0x40(%rsp) # ... and offload + vpshufd \$0xaa,$xa3,$xa2 + vmovdqa $xa1,0x50(%rsp) + vpshufd \$0xff,$xa3,$xa3 + vmovdqa $xa2,0x60(%rsp) + vmovdqa $xa3,0x70(%rsp) + + vpshufd \$0x00,$xb3,$xb0 + vpshufd \$0x55,$xb3,$xb1 + vmovdqa $xb0,0x80-0x100(%rcx) + vpshufd \$0xaa,$xb3,$xb2 + vmovdqa $xb1,0x90-0x100(%rcx) + vpshufd \$0xff,$xb3,$xb3 + vmovdqa $xb2,0xa0-0x100(%rcx) + vmovdqa $xb3,0xb0-0x100(%rcx) + + vpshufd \$0x00,$xt3,$xt0 # "$xc0" + vpshufd \$0x55,$xt3,$xt1 # "$xc1" + vmovdqa $xt0,0xc0-0x100(%rcx) + vpshufd \$0xaa,$xt3,$xt2 # "$xc2" + vmovdqa $xt1,0xd0-0x100(%rcx) + vpshufd \$0xff,$xt3,$xt3 # "$xc3" + vmovdqa $xt2,0xe0-0x100(%rcx) + vmovdqa $xt3,0xf0-0x100(%rcx) + + vpshufd \$0x00,$xd3,$xd0 + vpshufd \$0x55,$xd3,$xd1 + vpaddd .Linc(%rip),$xd0,$xd0 # don't save counters yet + vpshufd \$0xaa,$xd3,$xd2 + vmovdqa $xd1,0x110-0x100(%rcx) + vpshufd \$0xff,$xd3,$xd3 + vmovdqa $xd2,0x120-0x100(%rcx) + vmovdqa $xd3,0x130-0x100(%rcx) + + jmp .Loop_enter4xop + +.align 32 +.Loop_outer4xop: + vmovdqa 0x40(%rsp),$xa0 # re-load smashed key + vmovdqa 0x50(%rsp),$xa1 + vmovdqa 0x60(%rsp),$xa2 + vmovdqa 0x70(%rsp),$xa3 + vmovdqa 0x80-0x100(%rcx),$xb0 + vmovdqa 0x90-0x100(%rcx),$xb1 + vmovdqa 0xa0-0x100(%rcx),$xb2 + vmovdqa 0xb0-0x100(%rcx),$xb3 + vmovdqa 0xc0-0x100(%rcx),$xt0 # "$xc0" + vmovdqa 0xd0-0x100(%rcx),$xt1 # "$xc1" + vmovdqa 0xe0-0x100(%rcx),$xt2 # "$xc2" + vmovdqa 0xf0-0x100(%rcx),$xt3 # "$xc3" + vmovdqa 0x100-0x100(%rcx),$xd0 + vmovdqa 0x110-0x100(%rcx),$xd1 + vmovdqa 0x120-0x100(%rcx),$xd2 + vmovdqa 0x130-0x100(%rcx),$xd3 + vpaddd .Lfour(%rip),$xd0,$xd0 # next SIMD counters + +.Loop_enter4xop: + mov \$10,%eax + vmovdqa $xd0,0x100-0x100(%rcx) # save SIMD counters + jmp .Loop4xop + +.align 32 +.Loop4xop: +___ + foreach (&XOP_lane_ROUND(0, 4, 8,12)) { eval; } + foreach (&XOP_lane_ROUND(0, 5,10,15)) { eval; } +$code.=<<___; + dec %eax + jnz .Loop4xop + + vpaddd 0x40(%rsp),$xa0,$xa0 # accumulate key material + vpaddd 0x50(%rsp),$xa1,$xa1 + vpaddd 0x60(%rsp),$xa2,$xa2 + vpaddd 0x70(%rsp),$xa3,$xa3 + + vmovdqa $xt2,0x20(%rsp) # offload $xc2,3 + vmovdqa $xt3,0x30(%rsp) + + vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data + vpunpckldq $xa3,$xa2,$xt3 + vpunpckhdq $xa1,$xa0,$xa0 + vpunpckhdq $xa3,$xa2,$xa2 + vpunpcklqdq $xt3,$xt2,$xa1 # "a0" + vpunpckhqdq $xt3,$xt2,$xt2 # "a1" + vpunpcklqdq $xa2,$xa0,$xa3 # "a2" + vpunpckhqdq $xa2,$xa0,$xa0 # "a3" +___ + ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2); +$code.=<<___; + vpaddd 0x80-0x100(%rcx),$xb0,$xb0 + vpaddd 0x90-0x100(%rcx),$xb1,$xb1 + vpaddd 0xa0-0x100(%rcx),$xb2,$xb2 + vpaddd 0xb0-0x100(%rcx),$xb3,$xb3 + + vmovdqa $xa0,0x00(%rsp) # offload $xa0,1 + vmovdqa $xa1,0x10(%rsp) + vmovdqa 0x20(%rsp),$xa0 # "xc2" + vmovdqa 0x30(%rsp),$xa1 # "xc3" + + vpunpckldq $xb1,$xb0,$xt2 + vpunpckldq $xb3,$xb2,$xt3 + vpunpckhdq $xb1,$xb0,$xb0 + vpunpckhdq $xb3,$xb2,$xb2 + vpunpcklqdq $xt3,$xt2,$xb1 # "b0" + vpunpckhqdq $xt3,$xt2,$xt2 # "b1" + vpunpcklqdq $xb2,$xb0,$xb3 # "b2" + vpunpckhqdq $xb2,$xb0,$xb0 # "b3" +___ + ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2); + my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1); +$code.=<<___; + vpaddd 0xc0-0x100(%rcx),$xc0,$xc0 + vpaddd 0xd0-0x100(%rcx),$xc1,$xc1 + vpaddd 0xe0-0x100(%rcx),$xc2,$xc2 + vpaddd 0xf0-0x100(%rcx),$xc3,$xc3 + + vpunpckldq $xc1,$xc0,$xt2 + vpunpckldq $xc3,$xc2,$xt3 + vpunpckhdq $xc1,$xc0,$xc0 + vpunpckhdq $xc3,$xc2,$xc2 + vpunpcklqdq $xt3,$xt2,$xc1 # "c0" + vpunpckhqdq $xt3,$xt2,$xt2 # "c1" + vpunpcklqdq $xc2,$xc0,$xc3 # "c2" + vpunpckhqdq $xc2,$xc0,$xc0 # "c3" +___ + ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2); +$code.=<<___; + vpaddd 0x100-0x100(%rcx),$xd0,$xd0 + vpaddd 0x110-0x100(%rcx),$xd1,$xd1 + vpaddd 0x120-0x100(%rcx),$xd2,$xd2 + vpaddd 0x130-0x100(%rcx),$xd3,$xd3 + + vpunpckldq $xd1,$xd0,$xt2 + vpunpckldq $xd3,$xd2,$xt3 + vpunpckhdq $xd1,$xd0,$xd0 + vpunpckhdq $xd3,$xd2,$xd2 + vpunpcklqdq $xt3,$xt2,$xd1 # "d0" + vpunpckhqdq $xt3,$xt2,$xt2 # "d1" + vpunpcklqdq $xd2,$xd0,$xd3 # "d2" + vpunpckhqdq $xd2,$xd0,$xd0 # "d3" +___ + ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2); + ($xa0,$xa1)=($xt2,$xt3); +$code.=<<___; + vmovdqa 0x00(%rsp),$xa0 # restore $xa0,1 + vmovdqa 0x10(%rsp),$xa1 + + cmp \$64*4,$len + jb .Ltail4xop + + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x10($inp),$xb0,$xb0 + vpxor 0x20($inp),$xc0,$xc0 + vpxor 0x30($inp),$xd0,$xd0 + vpxor 0x40($inp),$xa1,$xa1 + vpxor 0x50($inp),$xb1,$xb1 + vpxor 0x60($inp),$xc1,$xc1 + vpxor 0x70($inp),$xd1,$xd1 + lea 0x80($inp),$inp # size optimization + vpxor 0x00($inp),$xa2,$xa2 + vpxor 0x10($inp),$xb2,$xb2 + vpxor 0x20($inp),$xc2,$xc2 + vpxor 0x30($inp),$xd2,$xd2 + vpxor 0x40($inp),$xa3,$xa3 + vpxor 0x50($inp),$xb3,$xb3 + vpxor 0x60($inp),$xc3,$xc3 + vpxor 0x70($inp),$xd3,$xd3 + lea 0x80($inp),$inp # inp+=64*4 + + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x10($out) + vmovdqu $xc0,0x20($out) + vmovdqu $xd0,0x30($out) + vmovdqu $xa1,0x40($out) + vmovdqu $xb1,0x50($out) + vmovdqu $xc1,0x60($out) + vmovdqu $xd1,0x70($out) + lea 0x80($out),$out # size optimization + vmovdqu $xa2,0x00($out) + vmovdqu $xb2,0x10($out) + vmovdqu $xc2,0x20($out) + vmovdqu $xd2,0x30($out) + vmovdqu $xa3,0x40($out) + vmovdqu $xb3,0x50($out) + vmovdqu $xc3,0x60($out) + vmovdqu $xd3,0x70($out) + lea 0x80($out),$out # out+=64*4 + + sub \$64*4,$len + jnz .Loop_outer4xop + + jmp .Ldone4xop + +.align 32 +.Ltail4xop: + cmp \$192,$len + jae .L192_or_more4xop + cmp \$128,$len + jae .L128_or_more4xop + cmp \$64,$len + jae .L64_or_more4xop + + xor %r9,%r9 + vmovdqa $xa0,0x00(%rsp) + vmovdqa $xb0,0x10(%rsp) + vmovdqa $xc0,0x20(%rsp) + vmovdqa $xd0,0x30(%rsp) + jmp .Loop_tail4xop + +.align 32 +.L64_or_more4xop: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x10($inp),$xb0,$xb0 + vpxor 0x20($inp),$xc0,$xc0 + vpxor 0x30($inp),$xd0,$xd0 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x10($out) + vmovdqu $xc0,0x20($out) + vmovdqu $xd0,0x30($out) + je .Ldone4xop + + lea 0x40($inp),$inp # inp+=64*1 + vmovdqa $xa1,0x00(%rsp) + xor %r9,%r9 + vmovdqa $xb1,0x10(%rsp) + lea 0x40($out),$out # out+=64*1 + vmovdqa $xc1,0x20(%rsp) + sub \$64,$len # len-=64*1 + vmovdqa $xd1,0x30(%rsp) + jmp .Loop_tail4xop + +.align 32 +.L128_or_more4xop: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x10($inp),$xb0,$xb0 + vpxor 0x20($inp),$xc0,$xc0 + vpxor 0x30($inp),$xd0,$xd0 + vpxor 0x40($inp),$xa1,$xa1 + vpxor 0x50($inp),$xb1,$xb1 + vpxor 0x60($inp),$xc1,$xc1 + vpxor 0x70($inp),$xd1,$xd1 + + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x10($out) + vmovdqu $xc0,0x20($out) + vmovdqu $xd0,0x30($out) + vmovdqu $xa1,0x40($out) + vmovdqu $xb1,0x50($out) + vmovdqu $xc1,0x60($out) + vmovdqu $xd1,0x70($out) + je .Ldone4xop + + lea 0x80($inp),$inp # inp+=64*2 + vmovdqa $xa2,0x00(%rsp) + xor %r9,%r9 + vmovdqa $xb2,0x10(%rsp) + lea 0x80($out),$out # out+=64*2 + vmovdqa $xc2,0x20(%rsp) + sub \$128,$len # len-=64*2 + vmovdqa $xd2,0x30(%rsp) + jmp .Loop_tail4xop + +.align 32 +.L192_or_more4xop: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x10($inp),$xb0,$xb0 + vpxor 0x20($inp),$xc0,$xc0 + vpxor 0x30($inp),$xd0,$xd0 + vpxor 0x40($inp),$xa1,$xa1 + vpxor 0x50($inp),$xb1,$xb1 + vpxor 0x60($inp),$xc1,$xc1 + vpxor 0x70($inp),$xd1,$xd1 + lea 0x80($inp),$inp # size optimization + vpxor 0x00($inp),$xa2,$xa2 + vpxor 0x10($inp),$xb2,$xb2 + vpxor 0x20($inp),$xc2,$xc2 + vpxor 0x30($inp),$xd2,$xd2 + + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x10($out) + vmovdqu $xc0,0x20($out) + vmovdqu $xd0,0x30($out) + vmovdqu $xa1,0x40($out) + vmovdqu $xb1,0x50($out) + vmovdqu $xc1,0x60($out) + vmovdqu $xd1,0x70($out) + lea 0x80($out),$out # size optimization + vmovdqu $xa2,0x00($out) + vmovdqu $xb2,0x10($out) + vmovdqu $xc2,0x20($out) + vmovdqu $xd2,0x30($out) + je .Ldone4xop + + lea 0x40($inp),$inp # inp+=64*3 + vmovdqa $xa3,0x00(%rsp) + xor %r9,%r9 + vmovdqa $xb3,0x10(%rsp) + lea 0x40($out),$out # out+=64*3 + vmovdqa $xc3,0x20(%rsp) + sub \$192,$len # len-=64*3 + vmovdqa $xd3,0x30(%rsp) + +.Loop_tail4xop: + movzb ($inp,%r9),%eax + movzb (%rsp,%r9),%ecx + lea 1(%r9),%r9 + xor %ecx,%eax + mov %al,-1($out,%r9) + dec $len + jnz .Loop_tail4xop + +.Ldone4xop: + vzeroupper +___ +$code.=<<___ if ($win64); + movaps -0xb0(%r10),%xmm6 + movaps -0xa0(%r10),%xmm7 + movaps -0x90(%r10),%xmm8 + movaps -0x80(%r10),%xmm9 + movaps -0x70(%r10),%xmm10 + movaps -0x60(%r10),%xmm11 + movaps -0x50(%r10),%xmm12 + movaps -0x40(%r10),%xmm13 + movaps -0x30(%r10),%xmm14 + movaps -0x20(%r10),%xmm15 +___ +$code.=<<___; + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +.L4xop_epilogue: + ret +.cfi_endproc +___ +&end_function("chacha20_xop"); +} + +######################################################################## +# AVX2 code path +if ($avx>1) { + +if($kernel) { + $code .= "#ifdef CONFIG_AS_AVX2\n"; +} + +my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3, + $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%ymm$_",(0..15)); +my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3, + "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3); + +sub AVX2_lane_ROUND { +my ($a0,$b0,$c0,$d0)=@_; +my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0)); +my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1)); +my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2)); +my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3); +my @x=map("\"$_\"",@xx); + + # Consider order in which variables are addressed by their + # index: + # + # a b c d + # + # 0 4 8 12 < even round + # 1 5 9 13 + # 2 6 10 14 + # 3 7 11 15 + # 0 5 10 15 < odd round + # 1 6 11 12 + # 2 7 8 13 + # 3 4 9 14 + # + # 'a', 'b' and 'd's are permanently allocated in registers, + # @x[0..7,12..15], while 'c's are maintained in memory. If + # you observe 'c' column, you'll notice that pair of 'c's is + # invariant between rounds. This means that we have to reload + # them once per round, in the middle. This is why you'll see + # bunch of 'c' stores and loads in the middle, but none in + # the beginning or end. + + ( + "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1 + "&vpxor (@x[$d0],@x[$a0],@x[$d0])", + "&vpshufb (@x[$d0],@x[$d0],$t1)", + "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2 + "&vpxor (@x[$d1],@x[$a1],@x[$d1])", + "&vpshufb (@x[$d1],@x[$d1],$t1)", + + "&vpaddd ($xc,$xc,@x[$d0])", + "&vpxor (@x[$b0],$xc,@x[$b0])", + "&vpslld ($t0,@x[$b0],12)", + "&vpsrld (@x[$b0],@x[$b0],20)", + "&vpor (@x[$b0],$t0,@x[$b0])", + "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip) + "&vpaddd ($xc_,$xc_,@x[$d1])", + "&vpxor (@x[$b1],$xc_,@x[$b1])", + "&vpslld ($t1,@x[$b1],12)", + "&vpsrld (@x[$b1],@x[$b1],20)", + "&vpor (@x[$b1],$t1,@x[$b1])", + + "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", + "&vpxor (@x[$d0],@x[$a0],@x[$d0])", + "&vpshufb (@x[$d0],@x[$d0],$t0)", + "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", + "&vpxor (@x[$d1],@x[$a1],@x[$d1])", + "&vpshufb (@x[$d1],@x[$d1],$t0)", + + "&vpaddd ($xc,$xc,@x[$d0])", + "&vpxor (@x[$b0],$xc,@x[$b0])", + "&vpslld ($t1,@x[$b0],7)", + "&vpsrld (@x[$b0],@x[$b0],25)", + "&vpor (@x[$b0],$t1,@x[$b0])", + "&vbroadcasti128($t1,'(%r9)')", # .Lrot16(%rip) + "&vpaddd ($xc_,$xc_,@x[$d1])", + "&vpxor (@x[$b1],$xc_,@x[$b1])", + "&vpslld ($t0,@x[$b1],7)", + "&vpsrld (@x[$b1],@x[$b1],25)", + "&vpor (@x[$b1],$t0,@x[$b1])", + + "&vmovdqa (\"`32*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's + "&vmovdqa (\"`32*($c1-8)`(%rsp)\",$xc_)", + "&vmovdqa ($xc,\"`32*($c2-8)`(%rsp)\")", + "&vmovdqa ($xc_,\"`32*($c3-8)`(%rsp)\")", + + "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3 + "&vpxor (@x[$d2],@x[$a2],@x[$d2])", + "&vpshufb (@x[$d2],@x[$d2],$t1)", + "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4 + "&vpxor (@x[$d3],@x[$a3],@x[$d3])", + "&vpshufb (@x[$d3],@x[$d3],$t1)", + + "&vpaddd ($xc,$xc,@x[$d2])", + "&vpxor (@x[$b2],$xc,@x[$b2])", + "&vpslld ($t0,@x[$b2],12)", + "&vpsrld (@x[$b2],@x[$b2],20)", + "&vpor (@x[$b2],$t0,@x[$b2])", + "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip) + "&vpaddd ($xc_,$xc_,@x[$d3])", + "&vpxor (@x[$b3],$xc_,@x[$b3])", + "&vpslld ($t1,@x[$b3],12)", + "&vpsrld (@x[$b3],@x[$b3],20)", + "&vpor (@x[$b3],$t1,@x[$b3])", + + "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", + "&vpxor (@x[$d2],@x[$a2],@x[$d2])", + "&vpshufb (@x[$d2],@x[$d2],$t0)", + "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", + "&vpxor (@x[$d3],@x[$a3],@x[$d3])", + "&vpshufb (@x[$d3],@x[$d3],$t0)", + + "&vpaddd ($xc,$xc,@x[$d2])", + "&vpxor (@x[$b2],$xc,@x[$b2])", + "&vpslld ($t1,@x[$b2],7)", + "&vpsrld (@x[$b2],@x[$b2],25)", + "&vpor (@x[$b2],$t1,@x[$b2])", + "&vbroadcasti128($t1,'(%r9)')", # .Lrot16(%rip) + "&vpaddd ($xc_,$xc_,@x[$d3])", + "&vpxor (@x[$b3],$xc_,@x[$b3])", + "&vpslld ($t0,@x[$b3],7)", + "&vpsrld (@x[$b3],@x[$b3],25)", + "&vpor (@x[$b3],$t0,@x[$b3])" + ); +} + +my $xframe = $win64 ? 0xa8 : 8; + +&declare_function("chacha20_avx2", 32, 5); +$code.=<<___; +.cfi_startproc +.Lchacha20_8x: + lea 8(%rsp),%r10 # frame register +.cfi_def_cfa_register %r10 + sub \$0x280+$xframe,%rsp + and \$-32,%rsp +___ +$code.=<<___ if ($win64); + movaps %xmm6,-0xb0(%r10) + movaps %xmm7,-0xa0(%r10) + movaps %xmm8,-0x90(%r10) + movaps %xmm9,-0x80(%r10) + movaps %xmm10,-0x70(%r10) + movaps %xmm11,-0x60(%r10) + movaps %xmm12,-0x50(%r10) + movaps %xmm13,-0x40(%r10) + movaps %xmm14,-0x30(%r10) + movaps %xmm15,-0x20(%r10) +.L8x_body: +___ +$code.=<<___; + vzeroupper + + ################ stack layout + # +0x00 SIMD equivalent of @x[8-12] + # ... + # +0x80 constant copy of key[0-2] smashed by lanes + # ... + # +0x200 SIMD counters (with nonce smashed by lanes) + # ... + # +0x280 + + vbroadcasti128 .Lsigma(%rip),$xa3 # key[0] + vbroadcasti128 ($key),$xb3 # key[1] + vbroadcasti128 16($key),$xt3 # key[2] + vbroadcasti128 ($counter),$xd3 # key[3] + lea 0x100(%rsp),%rcx # size optimization + lea 0x200(%rsp),%rax # size optimization + lea .Lrot16(%rip),%r9 + lea .Lrot24(%rip),%r11 + + vpshufd \$0x00,$xa3,$xa0 # smash key by lanes... + vpshufd \$0x55,$xa3,$xa1 + vmovdqa $xa0,0x80-0x100(%rcx) # ... and offload + vpshufd \$0xaa,$xa3,$xa2 + vmovdqa $xa1,0xa0-0x100(%rcx) + vpshufd \$0xff,$xa3,$xa3 + vmovdqa $xa2,0xc0-0x100(%rcx) + vmovdqa $xa3,0xe0-0x100(%rcx) + + vpshufd \$0x00,$xb3,$xb0 + vpshufd \$0x55,$xb3,$xb1 + vmovdqa $xb0,0x100-0x100(%rcx) + vpshufd \$0xaa,$xb3,$xb2 + vmovdqa $xb1,0x120-0x100(%rcx) + vpshufd \$0xff,$xb3,$xb3 + vmovdqa $xb2,0x140-0x100(%rcx) + vmovdqa $xb3,0x160-0x100(%rcx) + + vpshufd \$0x00,$xt3,$xt0 # "xc0" + vpshufd \$0x55,$xt3,$xt1 # "xc1" + vmovdqa $xt0,0x180-0x200(%rax) + vpshufd \$0xaa,$xt3,$xt2 # "xc2" + vmovdqa $xt1,0x1a0-0x200(%rax) + vpshufd \$0xff,$xt3,$xt3 # "xc3" + vmovdqa $xt2,0x1c0-0x200(%rax) + vmovdqa $xt3,0x1e0-0x200(%rax) + + vpshufd \$0x00,$xd3,$xd0 + vpshufd \$0x55,$xd3,$xd1 + vpaddd .Lincy(%rip),$xd0,$xd0 # don't save counters yet + vpshufd \$0xaa,$xd3,$xd2 + vmovdqa $xd1,0x220-0x200(%rax) + vpshufd \$0xff,$xd3,$xd3 + vmovdqa $xd2,0x240-0x200(%rax) + vmovdqa $xd3,0x260-0x200(%rax) + + jmp .Loop_enter8x + +.align 32 +.Loop_outer8x: + vmovdqa 0x80-0x100(%rcx),$xa0 # re-load smashed key + vmovdqa 0xa0-0x100(%rcx),$xa1 + vmovdqa 0xc0-0x100(%rcx),$xa2 + vmovdqa 0xe0-0x100(%rcx),$xa3 + vmovdqa 0x100-0x100(%rcx),$xb0 + vmovdqa 0x120-0x100(%rcx),$xb1 + vmovdqa 0x140-0x100(%rcx),$xb2 + vmovdqa 0x160-0x100(%rcx),$xb3 + vmovdqa 0x180-0x200(%rax),$xt0 # "xc0" + vmovdqa 0x1a0-0x200(%rax),$xt1 # "xc1" + vmovdqa 0x1c0-0x200(%rax),$xt2 # "xc2" + vmovdqa 0x1e0-0x200(%rax),$xt3 # "xc3" + vmovdqa 0x200-0x200(%rax),$xd0 + vmovdqa 0x220-0x200(%rax),$xd1 + vmovdqa 0x240-0x200(%rax),$xd2 + vmovdqa 0x260-0x200(%rax),$xd3 + vpaddd .Leight(%rip),$xd0,$xd0 # next SIMD counters + +.Loop_enter8x: + vmovdqa $xt2,0x40(%rsp) # SIMD equivalent of "@x[10]" + vmovdqa $xt3,0x60(%rsp) # SIMD equivalent of "@x[11]" + vbroadcasti128 (%r9),$xt3 + vmovdqa $xd0,0x200-0x200(%rax) # save SIMD counters + mov \$10,%eax + jmp .Loop8x + +.align 32 +.Loop8x: +___ + foreach (&AVX2_lane_ROUND(0, 4, 8,12)) { eval; } + foreach (&AVX2_lane_ROUND(0, 5,10,15)) { eval; } +$code.=<<___; + dec %eax + jnz .Loop8x + + lea 0x200(%rsp),%rax # size optimization + vpaddd 0x80-0x100(%rcx),$xa0,$xa0 # accumulate key + vpaddd 0xa0-0x100(%rcx),$xa1,$xa1 + vpaddd 0xc0-0x100(%rcx),$xa2,$xa2 + vpaddd 0xe0-0x100(%rcx),$xa3,$xa3 + + vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data + vpunpckldq $xa3,$xa2,$xt3 + vpunpckhdq $xa1,$xa0,$xa0 + vpunpckhdq $xa3,$xa2,$xa2 + vpunpcklqdq $xt3,$xt2,$xa1 # "a0" + vpunpckhqdq $xt3,$xt2,$xt2 # "a1" + vpunpcklqdq $xa2,$xa0,$xa3 # "a2" + vpunpckhqdq $xa2,$xa0,$xa0 # "a3" +___ + ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2); +$code.=<<___; + vpaddd 0x100-0x100(%rcx),$xb0,$xb0 + vpaddd 0x120-0x100(%rcx),$xb1,$xb1 + vpaddd 0x140-0x100(%rcx),$xb2,$xb2 + vpaddd 0x160-0x100(%rcx),$xb3,$xb3 + + vpunpckldq $xb1,$xb0,$xt2 + vpunpckldq $xb3,$xb2,$xt3 + vpunpckhdq $xb1,$xb0,$xb0 + vpunpckhdq $xb3,$xb2,$xb2 + vpunpcklqdq $xt3,$xt2,$xb1 # "b0" + vpunpckhqdq $xt3,$xt2,$xt2 # "b1" + vpunpcklqdq $xb2,$xb0,$xb3 # "b2" + vpunpckhqdq $xb2,$xb0,$xb0 # "b3" +___ + ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2); +$code.=<<___; + vperm2i128 \$0x20,$xb0,$xa0,$xt3 # "de-interlace" further + vperm2i128 \$0x31,$xb0,$xa0,$xb0 + vperm2i128 \$0x20,$xb1,$xa1,$xa0 + vperm2i128 \$0x31,$xb1,$xa1,$xb1 + vperm2i128 \$0x20,$xb2,$xa2,$xa1 + vperm2i128 \$0x31,$xb2,$xa2,$xb2 + vperm2i128 \$0x20,$xb3,$xa3,$xa2 + vperm2i128 \$0x31,$xb3,$xa3,$xb3 +___ + ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3); + my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1); +$code.=<<___; + vmovdqa $xa0,0x00(%rsp) # offload $xaN + vmovdqa $xa1,0x20(%rsp) + vmovdqa 0x40(%rsp),$xc2 # $xa0 + vmovdqa 0x60(%rsp),$xc3 # $xa1 + + vpaddd 0x180-0x200(%rax),$xc0,$xc0 + vpaddd 0x1a0-0x200(%rax),$xc1,$xc1 + vpaddd 0x1c0-0x200(%rax),$xc2,$xc2 + vpaddd 0x1e0-0x200(%rax),$xc3,$xc3 + + vpunpckldq $xc1,$xc0,$xt2 + vpunpckldq $xc3,$xc2,$xt3 + vpunpckhdq $xc1,$xc0,$xc0 + vpunpckhdq $xc3,$xc2,$xc2 + vpunpcklqdq $xt3,$xt2,$xc1 # "c0" + vpunpckhqdq $xt3,$xt2,$xt2 # "c1" + vpunpcklqdq $xc2,$xc0,$xc3 # "c2" + vpunpckhqdq $xc2,$xc0,$xc0 # "c3" +___ + ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2); +$code.=<<___; + vpaddd 0x200-0x200(%rax),$xd0,$xd0 + vpaddd 0x220-0x200(%rax),$xd1,$xd1 + vpaddd 0x240-0x200(%rax),$xd2,$xd2 + vpaddd 0x260-0x200(%rax),$xd3,$xd3 + + vpunpckldq $xd1,$xd0,$xt2 + vpunpckldq $xd3,$xd2,$xt3 + vpunpckhdq $xd1,$xd0,$xd0 + vpunpckhdq $xd3,$xd2,$xd2 + vpunpcklqdq $xt3,$xt2,$xd1 # "d0" + vpunpckhqdq $xt3,$xt2,$xt2 # "d1" + vpunpcklqdq $xd2,$xd0,$xd3 # "d2" + vpunpckhqdq $xd2,$xd0,$xd0 # "d3" +___ + ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2); +$code.=<<___; + vperm2i128 \$0x20,$xd0,$xc0,$xt3 # "de-interlace" further + vperm2i128 \$0x31,$xd0,$xc0,$xd0 + vperm2i128 \$0x20,$xd1,$xc1,$xc0 + vperm2i128 \$0x31,$xd1,$xc1,$xd1 + vperm2i128 \$0x20,$xd2,$xc2,$xc1 + vperm2i128 \$0x31,$xd2,$xc2,$xd2 + vperm2i128 \$0x20,$xd3,$xc3,$xc2 + vperm2i128 \$0x31,$xd3,$xc3,$xd3 +___ + ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3); + ($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)= + ($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3); + ($xa0,$xa1)=($xt2,$xt3); +$code.=<<___; + vmovdqa 0x00(%rsp),$xa0 # $xaN was offloaded, remember? + vmovdqa 0x20(%rsp),$xa1 + + cmp \$64*8,$len + jb .Ltail8x + + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vpxor 0x40($inp),$xc0,$xc0 + vpxor 0x60($inp),$xd0,$xd0 + lea 0x80($inp),$inp # size optimization + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + vmovdqu $xc0,0x40($out) + vmovdqu $xd0,0x60($out) + lea 0x80($out),$out # size optimization + + vpxor 0x00($inp),$xa1,$xa1 + vpxor 0x20($inp),$xb1,$xb1 + vpxor 0x40($inp),$xc1,$xc1 + vpxor 0x60($inp),$xd1,$xd1 + lea 0x80($inp),$inp # size optimization + vmovdqu $xa1,0x00($out) + vmovdqu $xb1,0x20($out) + vmovdqu $xc1,0x40($out) + vmovdqu $xd1,0x60($out) + lea 0x80($out),$out # size optimization + + vpxor 0x00($inp),$xa2,$xa2 + vpxor 0x20($inp),$xb2,$xb2 + vpxor 0x40($inp),$xc2,$xc2 + vpxor 0x60($inp),$xd2,$xd2 + lea 0x80($inp),$inp # size optimization + vmovdqu $xa2,0x00($out) + vmovdqu $xb2,0x20($out) + vmovdqu $xc2,0x40($out) + vmovdqu $xd2,0x60($out) + lea 0x80($out),$out # size optimization + + vpxor 0x00($inp),$xa3,$xa3 + vpxor 0x20($inp),$xb3,$xb3 + vpxor 0x40($inp),$xc3,$xc3 + vpxor 0x60($inp),$xd3,$xd3 + lea 0x80($inp),$inp # size optimization + vmovdqu $xa3,0x00($out) + vmovdqu $xb3,0x20($out) + vmovdqu $xc3,0x40($out) + vmovdqu $xd3,0x60($out) + lea 0x80($out),$out # size optimization + + sub \$64*8,$len + jnz .Loop_outer8x + + jmp .Ldone8x + +.Ltail8x: + cmp \$448,$len + jae .L448_or_more8x + cmp \$384,$len + jae .L384_or_more8x + cmp \$320,$len + jae .L320_or_more8x + cmp \$256,$len + jae .L256_or_more8x + cmp \$192,$len + jae .L192_or_more8x + cmp \$128,$len + jae .L128_or_more8x + cmp \$64,$len + jae .L64_or_more8x + + xor %r9,%r9 + vmovdqa $xa0,0x00(%rsp) + vmovdqa $xb0,0x20(%rsp) + jmp .Loop_tail8x + +.align 32 +.L64_or_more8x: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + je .Ldone8x + + lea 0x40($inp),$inp # inp+=64*1 + xor %r9,%r9 + vmovdqa $xc0,0x00(%rsp) + lea 0x40($out),$out # out+=64*1 + sub \$64,$len # len-=64*1 + vmovdqa $xd0,0x20(%rsp) + jmp .Loop_tail8x + +.align 32 +.L128_or_more8x: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vpxor 0x40($inp),$xc0,$xc0 + vpxor 0x60($inp),$xd0,$xd0 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + vmovdqu $xc0,0x40($out) + vmovdqu $xd0,0x60($out) + je .Ldone8x + + lea 0x80($inp),$inp # inp+=64*2 + xor %r9,%r9 + vmovdqa $xa1,0x00(%rsp) + lea 0x80($out),$out # out+=64*2 + sub \$128,$len # len-=64*2 + vmovdqa $xb1,0x20(%rsp) + jmp .Loop_tail8x + +.align 32 +.L192_or_more8x: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vpxor 0x40($inp),$xc0,$xc0 + vpxor 0x60($inp),$xd0,$xd0 + vpxor 0x80($inp),$xa1,$xa1 + vpxor 0xa0($inp),$xb1,$xb1 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + vmovdqu $xc0,0x40($out) + vmovdqu $xd0,0x60($out) + vmovdqu $xa1,0x80($out) + vmovdqu $xb1,0xa0($out) + je .Ldone8x + + lea 0xc0($inp),$inp # inp+=64*3 + xor %r9,%r9 + vmovdqa $xc1,0x00(%rsp) + lea 0xc0($out),$out # out+=64*3 + sub \$192,$len # len-=64*3 + vmovdqa $xd1,0x20(%rsp) + jmp .Loop_tail8x + +.align 32 +.L256_or_more8x: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vpxor 0x40($inp),$xc0,$xc0 + vpxor 0x60($inp),$xd0,$xd0 + vpxor 0x80($inp),$xa1,$xa1 + vpxor 0xa0($inp),$xb1,$xb1 + vpxor 0xc0($inp),$xc1,$xc1 + vpxor 0xe0($inp),$xd1,$xd1 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + vmovdqu $xc0,0x40($out) + vmovdqu $xd0,0x60($out) + vmovdqu $xa1,0x80($out) + vmovdqu $xb1,0xa0($out) + vmovdqu $xc1,0xc0($out) + vmovdqu $xd1,0xe0($out) + je .Ldone8x + + lea 0x100($inp),$inp # inp+=64*4 + xor %r9,%r9 + vmovdqa $xa2,0x00(%rsp) + lea 0x100($out),$out # out+=64*4 + sub \$256,$len # len-=64*4 + vmovdqa $xb2,0x20(%rsp) + jmp .Loop_tail8x + +.align 32 +.L320_or_more8x: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vpxor 0x40($inp),$xc0,$xc0 + vpxor 0x60($inp),$xd0,$xd0 + vpxor 0x80($inp),$xa1,$xa1 + vpxor 0xa0($inp),$xb1,$xb1 + vpxor 0xc0($inp),$xc1,$xc1 + vpxor 0xe0($inp),$xd1,$xd1 + vpxor 0x100($inp),$xa2,$xa2 + vpxor 0x120($inp),$xb2,$xb2 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + vmovdqu $xc0,0x40($out) + vmovdqu $xd0,0x60($out) + vmovdqu $xa1,0x80($out) + vmovdqu $xb1,0xa0($out) + vmovdqu $xc1,0xc0($out) + vmovdqu $xd1,0xe0($out) + vmovdqu $xa2,0x100($out) + vmovdqu $xb2,0x120($out) + je .Ldone8x + + lea 0x140($inp),$inp # inp+=64*5 + xor %r9,%r9 + vmovdqa $xc2,0x00(%rsp) + lea 0x140($out),$out # out+=64*5 + sub \$320,$len # len-=64*5 + vmovdqa $xd2,0x20(%rsp) + jmp .Loop_tail8x + +.align 32 +.L384_or_more8x: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vpxor 0x40($inp),$xc0,$xc0 + vpxor 0x60($inp),$xd0,$xd0 + vpxor 0x80($inp),$xa1,$xa1 + vpxor 0xa0($inp),$xb1,$xb1 + vpxor 0xc0($inp),$xc1,$xc1 + vpxor 0xe0($inp),$xd1,$xd1 + vpxor 0x100($inp),$xa2,$xa2 + vpxor 0x120($inp),$xb2,$xb2 + vpxor 0x140($inp),$xc2,$xc2 + vpxor 0x160($inp),$xd2,$xd2 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + vmovdqu $xc0,0x40($out) + vmovdqu $xd0,0x60($out) + vmovdqu $xa1,0x80($out) + vmovdqu $xb1,0xa0($out) + vmovdqu $xc1,0xc0($out) + vmovdqu $xd1,0xe0($out) + vmovdqu $xa2,0x100($out) + vmovdqu $xb2,0x120($out) + vmovdqu $xc2,0x140($out) + vmovdqu $xd2,0x160($out) + je .Ldone8x + + lea 0x180($inp),$inp # inp+=64*6 + xor %r9,%r9 + vmovdqa $xa3,0x00(%rsp) + lea 0x180($out),$out # out+=64*6 + sub \$384,$len # len-=64*6 + vmovdqa $xb3,0x20(%rsp) + jmp .Loop_tail8x + +.align 32 +.L448_or_more8x: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vpxor 0x40($inp),$xc0,$xc0 + vpxor 0x60($inp),$xd0,$xd0 + vpxor 0x80($inp),$xa1,$xa1 + vpxor 0xa0($inp),$xb1,$xb1 + vpxor 0xc0($inp),$xc1,$xc1 + vpxor 0xe0($inp),$xd1,$xd1 + vpxor 0x100($inp),$xa2,$xa2 + vpxor 0x120($inp),$xb2,$xb2 + vpxor 0x140($inp),$xc2,$xc2 + vpxor 0x160($inp),$xd2,$xd2 + vpxor 0x180($inp),$xa3,$xa3 + vpxor 0x1a0($inp),$xb3,$xb3 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + vmovdqu $xc0,0x40($out) + vmovdqu $xd0,0x60($out) + vmovdqu $xa1,0x80($out) + vmovdqu $xb1,0xa0($out) + vmovdqu $xc1,0xc0($out) + vmovdqu $xd1,0xe0($out) + vmovdqu $xa2,0x100($out) + vmovdqu $xb2,0x120($out) + vmovdqu $xc2,0x140($out) + vmovdqu $xd2,0x160($out) + vmovdqu $xa3,0x180($out) + vmovdqu $xb3,0x1a0($out) + je .Ldone8x + + lea 0x1c0($inp),$inp # inp+=64*7 + xor %r9,%r9 + vmovdqa $xc3,0x00(%rsp) + lea 0x1c0($out),$out # out+=64*7 + sub \$448,$len # len-=64*7 + vmovdqa $xd3,0x20(%rsp) + +.Loop_tail8x: + movzb ($inp,%r9),%eax + movzb (%rsp,%r9),%ecx + lea 1(%r9),%r9 + xor %ecx,%eax + mov %al,-1($out,%r9) + dec $len + jnz .Loop_tail8x + +.Ldone8x: + vzeroall +___ +$code.=<<___ if ($win64); + movaps -0xb0(%r10),%xmm6 + movaps -0xa0(%r10),%xmm7 + movaps -0x90(%r10),%xmm8 + movaps -0x80(%r10),%xmm9 + movaps -0x70(%r10),%xmm10 + movaps -0x60(%r10),%xmm11 + movaps -0x50(%r10),%xmm12 + movaps -0x40(%r10),%xmm13 + movaps -0x30(%r10),%xmm14 + movaps -0x20(%r10),%xmm15 +___ +$code.=<<___; + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +.L8x_epilogue: + ret +.cfi_endproc +___ +&end_function("chacha20_avx2"); +if($kernel) { + $code .= "#endif\n"; +} +} + +######################################################################## +# AVX512 code paths +if ($avx>2) { +# This one handles shorter inputs... +if($kernel) { + $code .= "#ifdef CONFIG_AS_AVX512\n"; +} + +my ($a,$b,$c,$d, $a_,$b_,$c_,$d_,$fourz) = map("%zmm$_",(0..3,16..20)); +my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7)); + +sub vpxord() # size optimization +{ my $opcode = "vpxor"; # adhere to vpxor when possible + + foreach (@_) { + if (/%([zy])mm([0-9]+)/ && ($1 eq "z" || $2>=16)) { + $opcode = "vpxord"; + last; + } + } + + $code .= "\t$opcode\t".join(',',reverse @_)."\n"; +} + +sub AVX512ROUND { # critical path is 14 "SIMD ticks" per round + &vpaddd ($a,$a,$b); + &vpxord ($d,$d,$a); + &vprold ($d,$d,16); + + &vpaddd ($c,$c,$d); + &vpxord ($b,$b,$c); + &vprold ($b,$b,12); + + &vpaddd ($a,$a,$b); + &vpxord ($d,$d,$a); + &vprold ($d,$d,8); + + &vpaddd ($c,$c,$d); + &vpxord ($b,$b,$c); + &vprold ($b,$b,7); +} + +my $xframe = $win64 ? 32+8 : 8; + +&declare_function("chacha20_avx512", 32, 5); +$code.=<<___; +.cfi_startproc +.Lchacha20_avx512: + lea 8(%rsp),%r10 # frame pointer +.cfi_def_cfa_register %r10 + cmp \$512,$len + ja .Lchacha20_16x + + sub \$64+$xframe,%rsp + and \$-64,%rsp +___ +$code.=<<___ if ($win64); + movaps %xmm6,-0x30(%r10) + movaps %xmm7,-0x20(%r10) +.Lavx512_body: +___ +$code.=<<___; + vbroadcasti32x4 .Lsigma(%rip),$a + vbroadcasti32x4 ($key),$b + vbroadcasti32x4 16($key),$c + vbroadcasti32x4 ($counter),$d + + vmovdqa32 $a,$a_ + vmovdqa32 $b,$b_ + vmovdqa32 $c,$c_ + vpaddd .Lzeroz(%rip),$d,$d + vmovdqa32 .Lfourz(%rip),$fourz + mov \$10,$counter # reuse $counter + vmovdqa32 $d,$d_ + jmp .Loop_avx512 + +.align 16 +.Loop_outer_avx512: + vmovdqa32 $a_,$a + vmovdqa32 $b_,$b + vmovdqa32 $c_,$c + vpaddd $fourz,$d_,$d + mov \$10,$counter + vmovdqa32 $d,$d_ + jmp .Loop_avx512 + +.align 32 +.Loop_avx512: +___ + &AVX512ROUND(); + &vpshufd ($c,$c,0b01001110); + &vpshufd ($b,$b,0b00111001); + &vpshufd ($d,$d,0b10010011); + + &AVX512ROUND(); + &vpshufd ($c,$c,0b01001110); + &vpshufd ($b,$b,0b10010011); + &vpshufd ($d,$d,0b00111001); + + &dec ($counter); + &jnz (".Loop_avx512"); + +$code.=<<___; + vpaddd $a_,$a,$a + vpaddd $b_,$b,$b + vpaddd $c_,$c,$c + vpaddd $d_,$d,$d + + sub \$64,$len + jb .Ltail64_avx512 + + vpxor 0x00($inp),%x#$a,$t0 # xor with input + vpxor 0x10($inp),%x#$b,$t1 + vpxor 0x20($inp),%x#$c,$t2 + vpxor 0x30($inp),%x#$d,$t3 + lea 0x40($inp),$inp # inp+=64 + + vmovdqu $t0,0x00($out) # write output + vmovdqu $t1,0x10($out) + vmovdqu $t2,0x20($out) + vmovdqu $t3,0x30($out) + lea 0x40($out),$out # out+=64 + + jz .Ldone_avx512 + + vextracti32x4 \$1,$a,$t0 + vextracti32x4 \$1,$b,$t1 + vextracti32x4 \$1,$c,$t2 + vextracti32x4 \$1,$d,$t3 + + sub \$64,$len + jb .Ltail_avx512 + + vpxor 0x00($inp),$t0,$t0 # xor with input + vpxor 0x10($inp),$t1,$t1 + vpxor 0x20($inp),$t2,$t2 + vpxor 0x30($inp),$t3,$t3 + lea 0x40($inp),$inp # inp+=64 + + vmovdqu $t0,0x00($out) # write output + vmovdqu $t1,0x10($out) + vmovdqu $t2,0x20($out) + vmovdqu $t3,0x30($out) + lea 0x40($out),$out # out+=64 + + jz .Ldone_avx512 + + vextracti32x4 \$2,$a,$t0 + vextracti32x4 \$2,$b,$t1 + vextracti32x4 \$2,$c,$t2 + vextracti32x4 \$2,$d,$t3 + + sub \$64,$len + jb .Ltail_avx512 + + vpxor 0x00($inp),$t0,$t0 # xor with input + vpxor 0x10($inp),$t1,$t1 + vpxor 0x20($inp),$t2,$t2 + vpxor 0x30($inp),$t3,$t3 + lea 0x40($inp),$inp # inp+=64 + + vmovdqu $t0,0x00($out) # write output + vmovdqu $t1,0x10($out) + vmovdqu $t2,0x20($out) + vmovdqu $t3,0x30($out) + lea 0x40($out),$out # out+=64 + + jz .Ldone_avx512 + + vextracti32x4 \$3,$a,$t0 + vextracti32x4 \$3,$b,$t1 + vextracti32x4 \$3,$c,$t2 + vextracti32x4 \$3,$d,$t3 + + sub \$64,$len + jb .Ltail_avx512 + + vpxor 0x00($inp),$t0,$t0 # xor with input + vpxor 0x10($inp),$t1,$t1 + vpxor 0x20($inp),$t2,$t2 + vpxor 0x30($inp),$t3,$t3 + lea 0x40($inp),$inp # inp+=64 + + vmovdqu $t0,0x00($out) # write output + vmovdqu $t1,0x10($out) + vmovdqu $t2,0x20($out) + vmovdqu $t3,0x30($out) + lea 0x40($out),$out # out+=64 + + jnz .Loop_outer_avx512 + + jmp .Ldone_avx512 + +.align 16 +.Ltail64_avx512: + vmovdqa %x#$a,0x00(%rsp) + vmovdqa %x#$b,0x10(%rsp) + vmovdqa %x#$c,0x20(%rsp) + vmovdqa %x#$d,0x30(%rsp) + add \$64,$len + jmp .Loop_tail_avx512 + +.align 16 +.Ltail_avx512: + vmovdqa $t0,0x00(%rsp) + vmovdqa $t1,0x10(%rsp) + vmovdqa $t2,0x20(%rsp) + vmovdqa $t3,0x30(%rsp) + add \$64,$len + +.Loop_tail_avx512: + movzb ($inp,$counter),%eax + movzb (%rsp,$counter),%ecx + lea 1($counter),$counter + xor %ecx,%eax + mov %al,-1($out,$counter) + dec $len + jnz .Loop_tail_avx512 + + vmovdqu32 $a_,0x00(%rsp) + +.Ldone_avx512: + vzeroall +___ +$code.=<<___ if ($win64); + movaps -0x30(%r10),%xmm6 + movaps -0x20(%r10),%xmm7 +___ +$code.=<<___; + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +.Lavx512_epilogue: + ret +.cfi_endproc +___ +&end_function("chacha20_avx512"); + +map(s/%z/%y/, $a,$b,$c,$d, $a_,$b_,$c_,$d_,$fourz); + +&declare_function("chacha20_avx512vl", 32, 5); +$code.=<<___; +.cfi_startproc +.Lchacha20_avx512vl: + lea 8(%rsp),%r10 # frame pointer +.cfi_def_cfa_register %r10 + cmp \$128,$len + ja .Lchacha20_8xvl + + sub \$64+$xframe,%rsp + and \$-32,%rsp +___ +$code.=<<___ if ($win64); + movaps %xmm6,-0x30(%r10) + movaps %xmm7,-0x20(%r10) +.Lavx512vl_body: +___ +$code.=<<___; + vbroadcasti128 .Lsigma(%rip),$a + vbroadcasti128 ($key),$b + vbroadcasti128 16($key),$c + vbroadcasti128 ($counter),$d + + vmovdqa32 $a,$a_ + vmovdqa32 $b,$b_ + vmovdqa32 $c,$c_ + vpaddd .Lzeroz(%rip),$d,$d + vmovdqa32 .Ltwoy(%rip),$fourz + mov \$10,$counter # reuse $counter + vmovdqa32 $d,$d_ + jmp .Loop_avx512vl + +.align 16 +.Loop_outer_avx512vl: + vmovdqa32 $c_,$c + vpaddd $fourz,$d_,$d + mov \$10,$counter + vmovdqa32 $d,$d_ + jmp .Loop_avx512vl + +.align 32 +.Loop_avx512vl: +___ + &AVX512ROUND(); + &vpshufd ($c,$c,0b01001110); + &vpshufd ($b,$b,0b00111001); + &vpshufd ($d,$d,0b10010011); + + &AVX512ROUND(); + &vpshufd ($c,$c,0b01001110); + &vpshufd ($b,$b,0b10010011); + &vpshufd ($d,$d,0b00111001); + + &dec ($counter); + &jnz (".Loop_avx512vl"); + +$code.=<<___; + vpaddd $a_,$a,$a + vpaddd $b_,$b,$b + vpaddd $c_,$c,$c + vpaddd $d_,$d,$d + + sub \$64,$len + jb .Ltail64_avx512vl + + vpxor 0x00($inp),%x#$a,$t0 # xor with input + vpxor 0x10($inp),%x#$b,$t1 + vpxor 0x20($inp),%x#$c,$t2 + vpxor 0x30($inp),%x#$d,$t3 + lea 0x40($inp),$inp # inp+=64 + + vmovdqu $t0,0x00($out) # write output + vmovdqu $t1,0x10($out) + vmovdqu $t2,0x20($out) + vmovdqu $t3,0x30($out) + lea 0x40($out),$out # out+=64 + + jz .Ldone_avx512vl + + vextracti128 \$1,$a,$t0 + vextracti128 \$1,$b,$t1 + vextracti128 \$1,$c,$t2 + vextracti128 \$1,$d,$t3 + + sub \$64,$len + jb .Ltail_avx512vl + + vpxor 0x00($inp),$t0,$t0 # xor with input + vpxor 0x10($inp),$t1,$t1 + vpxor 0x20($inp),$t2,$t2 + vpxor 0x30($inp),$t3,$t3 + lea 0x40($inp),$inp # inp+=64 + + vmovdqu $t0,0x00($out) # write output + vmovdqu $t1,0x10($out) + vmovdqu $t2,0x20($out) + vmovdqu $t3,0x30($out) + lea 0x40($out),$out # out+=64 + + vmovdqa32 $a_,$a + vmovdqa32 $b_,$b + jnz .Loop_outer_avx512vl + + jmp .Ldone_avx512vl + +.align 16 +.Ltail64_avx512vl: + vmovdqa %x#$a,0x00(%rsp) + vmovdqa %x#$b,0x10(%rsp) + vmovdqa %x#$c,0x20(%rsp) + vmovdqa %x#$d,0x30(%rsp) + add \$64,$len + jmp .Loop_tail_avx512vl + +.align 16 +.Ltail_avx512vl: + vmovdqa $t0,0x00(%rsp) + vmovdqa $t1,0x10(%rsp) + vmovdqa $t2,0x20(%rsp) + vmovdqa $t3,0x30(%rsp) + add \$64,$len + +.Loop_tail_avx512vl: + movzb ($inp,$counter),%eax + movzb (%rsp,$counter),%ecx + lea 1($counter),$counter + xor %ecx,%eax + mov %al,-1($out,$counter) + dec $len + jnz .Loop_tail_avx512vl + + vmovdqu32 $a_,0x00(%rsp) + vmovdqu32 $a_,0x20(%rsp) + +.Ldone_avx512vl: + vzeroall +___ +$code.=<<___ if ($win64); + movaps -0x30(%r10),%xmm6 + movaps -0x20(%r10),%xmm7 +___ +$code.=<<___; + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +.Lavx512vl_epilogue: + ret +.cfi_endproc +___ +&end_function("chacha20_avx512vl"); + +# This one handles longer inputs... + +my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3, + $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3)=map("%zmm$_",(0..15)); +my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3, + $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3); +my @key=map("%zmm$_",(16..31)); +my ($xt0,$xt1,$xt2,$xt3)=@key[0..3]; + +sub AVX512_lane_ROUND { +my ($a0,$b0,$c0,$d0)=@_; +my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0)); +my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1)); +my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2)); +my @x=map("\"$_\"",@xx); + + ( + "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1 + "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2 + "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3 + "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4 + "&vpxord (@x[$d0],@x[$d0],@x[$a0])", + "&vpxord (@x[$d1],@x[$d1],@x[$a1])", + "&vpxord (@x[$d2],@x[$d2],@x[$a2])", + "&vpxord (@x[$d3],@x[$d3],@x[$a3])", + "&vprold (@x[$d0],@x[$d0],16)", + "&vprold (@x[$d1],@x[$d1],16)", + "&vprold (@x[$d2],@x[$d2],16)", + "&vprold (@x[$d3],@x[$d3],16)", + + "&vpaddd (@x[$c0],@x[$c0],@x[$d0])", + "&vpaddd (@x[$c1],@x[$c1],@x[$d1])", + "&vpaddd (@x[$c2],@x[$c2],@x[$d2])", + "&vpaddd (@x[$c3],@x[$c3],@x[$d3])", + "&vpxord (@x[$b0],@x[$b0],@x[$c0])", + "&vpxord (@x[$b1],@x[$b1],@x[$c1])", + "&vpxord (@x[$b2],@x[$b2],@x[$c2])", + "&vpxord (@x[$b3],@x[$b3],@x[$c3])", + "&vprold (@x[$b0],@x[$b0],12)", + "&vprold (@x[$b1],@x[$b1],12)", + "&vprold (@x[$b2],@x[$b2],12)", + "&vprold (@x[$b3],@x[$b3],12)", + + "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", + "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", + "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", + "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", + "&vpxord (@x[$d0],@x[$d0],@x[$a0])", + "&vpxord (@x[$d1],@x[$d1],@x[$a1])", + "&vpxord (@x[$d2],@x[$d2],@x[$a2])", + "&vpxord (@x[$d3],@x[$d3],@x[$a3])", + "&vprold (@x[$d0],@x[$d0],8)", + "&vprold (@x[$d1],@x[$d1],8)", + "&vprold (@x[$d2],@x[$d2],8)", + "&vprold (@x[$d3],@x[$d3],8)", + + "&vpaddd (@x[$c0],@x[$c0],@x[$d0])", + "&vpaddd (@x[$c1],@x[$c1],@x[$d1])", + "&vpaddd (@x[$c2],@x[$c2],@x[$d2])", + "&vpaddd (@x[$c3],@x[$c3],@x[$d3])", + "&vpxord (@x[$b0],@x[$b0],@x[$c0])", + "&vpxord (@x[$b1],@x[$b1],@x[$c1])", + "&vpxord (@x[$b2],@x[$b2],@x[$c2])", + "&vpxord (@x[$b3],@x[$b3],@x[$c3])", + "&vprold (@x[$b0],@x[$b0],7)", + "&vprold (@x[$b1],@x[$b1],7)", + "&vprold (@x[$b2],@x[$b2],7)", + "&vprold (@x[$b3],@x[$b3],7)" + ); +} + +my $xframe = $win64 ? 0xa8 : 8; + +$code.=<<___; +.type chacha20_16x,\@function,5 +.align 32 +chacha20_16x: +.cfi_startproc +.Lchacha20_16x: + lea 8(%rsp),%r10 # frame register +.cfi_def_cfa_register %r10 + sub \$64+$xframe,%rsp + and \$-64,%rsp +___ +$code.=<<___ if ($win64); + movaps %xmm6,-0xb0(%r10) + movaps %xmm7,-0xa0(%r10) + movaps %xmm8,-0x90(%r10) + movaps %xmm9,-0x80(%r10) + movaps %xmm10,-0x70(%r10) + movaps %xmm11,-0x60(%r10) + movaps %xmm12,-0x50(%r10) + movaps %xmm13,-0x40(%r10) + movaps %xmm14,-0x30(%r10) + movaps %xmm15,-0x20(%r10) +.L16x_body: +___ +$code.=<<___; + vzeroupper + + lea .Lsigma(%rip),%r9 + vbroadcasti32x4 (%r9),$xa3 # key[0] + vbroadcasti32x4 ($key),$xb3 # key[1] + vbroadcasti32x4 16($key),$xc3 # key[2] + vbroadcasti32x4 ($counter),$xd3 # key[3] + + vpshufd \$0x00,$xa3,$xa0 # smash key by lanes... + vpshufd \$0x55,$xa3,$xa1 + vpshufd \$0xaa,$xa3,$xa2 + vpshufd \$0xff,$xa3,$xa3 + vmovdqa64 $xa0,@key[0] + vmovdqa64 $xa1,@key[1] + vmovdqa64 $xa2,@key[2] + vmovdqa64 $xa3,@key[3] + + vpshufd \$0x00,$xb3,$xb0 + vpshufd \$0x55,$xb3,$xb1 + vpshufd \$0xaa,$xb3,$xb2 + vpshufd \$0xff,$xb3,$xb3 + vmovdqa64 $xb0,@key[4] + vmovdqa64 $xb1,@key[5] + vmovdqa64 $xb2,@key[6] + vmovdqa64 $xb3,@key[7] + + vpshufd \$0x00,$xc3,$xc0 + vpshufd \$0x55,$xc3,$xc1 + vpshufd \$0xaa,$xc3,$xc2 + vpshufd \$0xff,$xc3,$xc3 + vmovdqa64 $xc0,@key[8] + vmovdqa64 $xc1,@key[9] + vmovdqa64 $xc2,@key[10] + vmovdqa64 $xc3,@key[11] + + vpshufd \$0x00,$xd3,$xd0 + vpshufd \$0x55,$xd3,$xd1 + vpshufd \$0xaa,$xd3,$xd2 + vpshufd \$0xff,$xd3,$xd3 + vpaddd .Lincz(%rip),$xd0,$xd0 # don't save counters yet + vmovdqa64 $xd0,@key[12] + vmovdqa64 $xd1,@key[13] + vmovdqa64 $xd2,@key[14] + vmovdqa64 $xd3,@key[15] + + mov \$10,%eax + jmp .Loop16x + +.align 32 +.Loop_outer16x: + vpbroadcastd 0(%r9),$xa0 # reload key + vpbroadcastd 4(%r9),$xa1 + vpbroadcastd 8(%r9),$xa2 + vpbroadcastd 12(%r9),$xa3 + vpaddd .Lsixteen(%rip),@key[12],@key[12] # next SIMD counters + vmovdqa64 @key[4],$xb0 + vmovdqa64 @key[5],$xb1 + vmovdqa64 @key[6],$xb2 + vmovdqa64 @key[7],$xb3 + vmovdqa64 @key[8],$xc0 + vmovdqa64 @key[9],$xc1 + vmovdqa64 @key[10],$xc2 + vmovdqa64 @key[11],$xc3 + vmovdqa64 @key[12],$xd0 + vmovdqa64 @key[13],$xd1 + vmovdqa64 @key[14],$xd2 + vmovdqa64 @key[15],$xd3 + + vmovdqa64 $xa0,@key[0] + vmovdqa64 $xa1,@key[1] + vmovdqa64 $xa2,@key[2] + vmovdqa64 $xa3,@key[3] + + mov \$10,%eax + jmp .Loop16x + +.align 32 +.Loop16x: +___ + foreach (&AVX512_lane_ROUND(0, 4, 8,12)) { eval; } + foreach (&AVX512_lane_ROUND(0, 5,10,15)) { eval; } +$code.=<<___; + dec %eax + jnz .Loop16x + + vpaddd @key[0],$xa0,$xa0 # accumulate key + vpaddd @key[1],$xa1,$xa1 + vpaddd @key[2],$xa2,$xa2 + vpaddd @key[3],$xa3,$xa3 + + vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data + vpunpckldq $xa3,$xa2,$xt3 + vpunpckhdq $xa1,$xa0,$xa0 + vpunpckhdq $xa3,$xa2,$xa2 + vpunpcklqdq $xt3,$xt2,$xa1 # "a0" + vpunpckhqdq $xt3,$xt2,$xt2 # "a1" + vpunpcklqdq $xa2,$xa0,$xa3 # "a2" + vpunpckhqdq $xa2,$xa0,$xa0 # "a3" +___ + ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2); +$code.=<<___; + vpaddd @key[4],$xb0,$xb0 + vpaddd @key[5],$xb1,$xb1 + vpaddd @key[6],$xb2,$xb2 + vpaddd @key[7],$xb3,$xb3 + + vpunpckldq $xb1,$xb0,$xt2 + vpunpckldq $xb3,$xb2,$xt3 + vpunpckhdq $xb1,$xb0,$xb0 + vpunpckhdq $xb3,$xb2,$xb2 + vpunpcklqdq $xt3,$xt2,$xb1 # "b0" + vpunpckhqdq $xt3,$xt2,$xt2 # "b1" + vpunpcklqdq $xb2,$xb0,$xb3 # "b2" + vpunpckhqdq $xb2,$xb0,$xb0 # "b3" +___ + ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2); +$code.=<<___; + vshufi32x4 \$0x44,$xb0,$xa0,$xt3 # "de-interlace" further + vshufi32x4 \$0xee,$xb0,$xa0,$xb0 + vshufi32x4 \$0x44,$xb1,$xa1,$xa0 + vshufi32x4 \$0xee,$xb1,$xa1,$xb1 + vshufi32x4 \$0x44,$xb2,$xa2,$xa1 + vshufi32x4 \$0xee,$xb2,$xa2,$xb2 + vshufi32x4 \$0x44,$xb3,$xa3,$xa2 + vshufi32x4 \$0xee,$xb3,$xa3,$xb3 +___ + ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3); +$code.=<<___; + vpaddd @key[8],$xc0,$xc0 + vpaddd @key[9],$xc1,$xc1 + vpaddd @key[10],$xc2,$xc2 + vpaddd @key[11],$xc3,$xc3 + + vpunpckldq $xc1,$xc0,$xt2 + vpunpckldq $xc3,$xc2,$xt3 + vpunpckhdq $xc1,$xc0,$xc0 + vpunpckhdq $xc3,$xc2,$xc2 + vpunpcklqdq $xt3,$xt2,$xc1 # "c0" + vpunpckhqdq $xt3,$xt2,$xt2 # "c1" + vpunpcklqdq $xc2,$xc0,$xc3 # "c2" + vpunpckhqdq $xc2,$xc0,$xc0 # "c3" +___ + ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2); +$code.=<<___; + vpaddd @key[12],$xd0,$xd0 + vpaddd @key[13],$xd1,$xd1 + vpaddd @key[14],$xd2,$xd2 + vpaddd @key[15],$xd3,$xd3 + + vpunpckldq $xd1,$xd0,$xt2 + vpunpckldq $xd3,$xd2,$xt3 + vpunpckhdq $xd1,$xd0,$xd0 + vpunpckhdq $xd3,$xd2,$xd2 + vpunpcklqdq $xt3,$xt2,$xd1 # "d0" + vpunpckhqdq $xt3,$xt2,$xt2 # "d1" + vpunpcklqdq $xd2,$xd0,$xd3 # "d2" + vpunpckhqdq $xd2,$xd0,$xd0 # "d3" +___ + ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2); +$code.=<<___; + vshufi32x4 \$0x44,$xd0,$xc0,$xt3 # "de-interlace" further + vshufi32x4 \$0xee,$xd0,$xc0,$xd0 + vshufi32x4 \$0x44,$xd1,$xc1,$xc0 + vshufi32x4 \$0xee,$xd1,$xc1,$xd1 + vshufi32x4 \$0x44,$xd2,$xc2,$xc1 + vshufi32x4 \$0xee,$xd2,$xc2,$xd2 + vshufi32x4 \$0x44,$xd3,$xc3,$xc2 + vshufi32x4 \$0xee,$xd3,$xc3,$xd3 +___ + ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3); +$code.=<<___; + vshufi32x4 \$0x88,$xc0,$xa0,$xt0 # "de-interlace" further + vshufi32x4 \$0xdd,$xc0,$xa0,$xa0 + vshufi32x4 \$0x88,$xd0,$xb0,$xc0 + vshufi32x4 \$0xdd,$xd0,$xb0,$xd0 + vshufi32x4 \$0x88,$xc1,$xa1,$xt1 + vshufi32x4 \$0xdd,$xc1,$xa1,$xa1 + vshufi32x4 \$0x88,$xd1,$xb1,$xc1 + vshufi32x4 \$0xdd,$xd1,$xb1,$xd1 + vshufi32x4 \$0x88,$xc2,$xa2,$xt2 + vshufi32x4 \$0xdd,$xc2,$xa2,$xa2 + vshufi32x4 \$0x88,$xd2,$xb2,$xc2 + vshufi32x4 \$0xdd,$xd2,$xb2,$xd2 + vshufi32x4 \$0x88,$xc3,$xa3,$xt3 + vshufi32x4 \$0xdd,$xc3,$xa3,$xa3 + vshufi32x4 \$0x88,$xd3,$xb3,$xc3 + vshufi32x4 \$0xdd,$xd3,$xb3,$xd3 +___ + ($xa0,$xa1,$xa2,$xa3,$xb0,$xb1,$xb2,$xb3)= + ($xt0,$xt1,$xt2,$xt3,$xa0,$xa1,$xa2,$xa3); + + ($xa0,$xb0,$xc0,$xd0, $xa1,$xb1,$xc1,$xd1, + $xa2,$xb2,$xc2,$xd2, $xa3,$xb3,$xc3,$xd3) = + ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3, + $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3); +$code.=<<___; + cmp \$64*16,$len + jb .Ltail16x + + vpxord 0x00($inp),$xa0,$xa0 # xor with input + vpxord 0x40($inp),$xb0,$xb0 + vpxord 0x80($inp),$xc0,$xc0 + vpxord 0xc0($inp),$xd0,$xd0 + vmovdqu32 $xa0,0x00($out) + vmovdqu32 $xb0,0x40($out) + vmovdqu32 $xc0,0x80($out) + vmovdqu32 $xd0,0xc0($out) + + vpxord 0x100($inp),$xa1,$xa1 + vpxord 0x140($inp),$xb1,$xb1 + vpxord 0x180($inp),$xc1,$xc1 + vpxord 0x1c0($inp),$xd1,$xd1 + vmovdqu32 $xa1,0x100($out) + vmovdqu32 $xb1,0x140($out) + vmovdqu32 $xc1,0x180($out) + vmovdqu32 $xd1,0x1c0($out) + + vpxord 0x200($inp),$xa2,$xa2 + vpxord 0x240($inp),$xb2,$xb2 + vpxord 0x280($inp),$xc2,$xc2 + vpxord 0x2c0($inp),$xd2,$xd2 + vmovdqu32 $xa2,0x200($out) + vmovdqu32 $xb2,0x240($out) + vmovdqu32 $xc2,0x280($out) + vmovdqu32 $xd2,0x2c0($out) + + vpxord 0x300($inp),$xa3,$xa3 + vpxord 0x340($inp),$xb3,$xb3 + vpxord 0x380($inp),$xc3,$xc3 + vpxord 0x3c0($inp),$xd3,$xd3 + lea 0x400($inp),$inp + vmovdqu32 $xa3,0x300($out) + vmovdqu32 $xb3,0x340($out) + vmovdqu32 $xc3,0x380($out) + vmovdqu32 $xd3,0x3c0($out) + lea 0x400($out),$out + + sub \$64*16,$len + jnz .Loop_outer16x + + jmp .Ldone16x + +.align 32 +.Ltail16x: + xor %r9,%r9 + sub $inp,$out + cmp \$64*1,$len + jb .Less_than_64_16x + vpxord ($inp),$xa0,$xa0 # xor with input + vmovdqu32 $xa0,($out,$inp) + je .Ldone16x + vmovdqa32 $xb0,$xa0 + lea 64($inp),$inp + + cmp \$64*2,$len + jb .Less_than_64_16x + vpxord ($inp),$xb0,$xb0 + vmovdqu32 $xb0,($out,$inp) + je .Ldone16x + vmovdqa32 $xc0,$xa0 + lea 64($inp),$inp + + cmp \$64*3,$len + jb .Less_than_64_16x + vpxord ($inp),$xc0,$xc0 + vmovdqu32 $xc0,($out,$inp) + je .Ldone16x + vmovdqa32 $xd0,$xa0 + lea 64($inp),$inp + + cmp \$64*4,$len + jb .Less_than_64_16x + vpxord ($inp),$xd0,$xd0 + vmovdqu32 $xd0,($out,$inp) + je .Ldone16x + vmovdqa32 $xa1,$xa0 + lea 64($inp),$inp + + cmp \$64*5,$len + jb .Less_than_64_16x + vpxord ($inp),$xa1,$xa1 + vmovdqu32 $xa1,($out,$inp) + je .Ldone16x + vmovdqa32 $xb1,$xa0 + lea 64($inp),$inp + + cmp \$64*6,$len + jb .Less_than_64_16x + vpxord ($inp),$xb1,$xb1 + vmovdqu32 $xb1,($out,$inp) + je .Ldone16x + vmovdqa32 $xc1,$xa0 + lea 64($inp),$inp + + cmp \$64*7,$len + jb .Less_than_64_16x + vpxord ($inp),$xc1,$xc1 + vmovdqu32 $xc1,($out,$inp) + je .Ldone16x + vmovdqa32 $xd1,$xa0 + lea 64($inp),$inp + + cmp \$64*8,$len + jb .Less_than_64_16x + vpxord ($inp),$xd1,$xd1 + vmovdqu32 $xd1,($out,$inp) + je .Ldone16x + vmovdqa32 $xa2,$xa0 + lea 64($inp),$inp + + cmp \$64*9,$len + jb .Less_than_64_16x + vpxord ($inp),$xa2,$xa2 + vmovdqu32 $xa2,($out,$inp) + je .Ldone16x + vmovdqa32 $xb2,$xa0 + lea 64($inp),$inp + + cmp \$64*10,$len + jb .Less_than_64_16x + vpxord ($inp),$xb2,$xb2 + vmovdqu32 $xb2,($out,$inp) + je .Ldone16x + vmovdqa32 $xc2,$xa0 + lea 64($inp),$inp + + cmp \$64*11,$len + jb .Less_than_64_16x + vpxord ($inp),$xc2,$xc2 + vmovdqu32 $xc2,($out,$inp) + je .Ldone16x + vmovdqa32 $xd2,$xa0 + lea 64($inp),$inp + + cmp \$64*12,$len + jb .Less_than_64_16x + vpxord ($inp),$xd2,$xd2 + vmovdqu32 $xd2,($out,$inp) + je .Ldone16x + vmovdqa32 $xa3,$xa0 + lea 64($inp),$inp + + cmp \$64*13,$len + jb .Less_than_64_16x + vpxord ($inp),$xa3,$xa3 + vmovdqu32 $xa3,($out,$inp) + je .Ldone16x + vmovdqa32 $xb3,$xa0 + lea 64($inp),$inp + + cmp \$64*14,$len + jb .Less_than_64_16x + vpxord ($inp),$xb3,$xb3 + vmovdqu32 $xb3,($out,$inp) + je .Ldone16x + vmovdqa32 $xc3,$xa0 + lea 64($inp),$inp + + cmp \$64*15,$len + jb .Less_than_64_16x + vpxord ($inp),$xc3,$xc3 + vmovdqu32 $xc3,($out,$inp) + je .Ldone16x + vmovdqa32 $xd3,$xa0 + lea 64($inp),$inp + +.Less_than_64_16x: + vmovdqa32 $xa0,0x00(%rsp) + lea ($out,$inp),$out + and \$63,$len + +.Loop_tail16x: + movzb ($inp,%r9),%eax + movzb (%rsp,%r9),%ecx + lea 1(%r9),%r9 + xor %ecx,%eax + mov %al,-1($out,%r9) + dec $len + jnz .Loop_tail16x + + vpxord $xa0,$xa0,$xa0 + vmovdqa32 $xa0,0(%rsp) + +.Ldone16x: + vzeroall +___ +$code.=<<___ if ($win64); + movaps -0xb0(%r10),%xmm6 + movaps -0xa0(%r10),%xmm7 + movaps -0x90(%r10),%xmm8 + movaps -0x80(%r10),%xmm9 + movaps -0x70(%r10),%xmm10 + movaps -0x60(%r10),%xmm11 + movaps -0x50(%r10),%xmm12 + movaps -0x40(%r10),%xmm13 + movaps -0x30(%r10),%xmm14 + movaps -0x20(%r10),%xmm15 +___ +$code.=<<___; + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +.L16x_epilogue: + ret +.cfi_endproc +.size chacha20_16x,.-chacha20_16x +___ + +# switch to %ymm domain +($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3, + $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3)=map("%ymm$_",(0..15)); +@xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3, + $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3); +@key=map("%ymm$_",(16..31)); +($xt0,$xt1,$xt2,$xt3)=@key[0..3]; + +$code.=<<___; +.type chacha20_8xvl,\@function,5 +.align 32 +chacha20_8xvl: +.cfi_startproc +.Lchacha20_8xvl: + lea 8(%rsp),%r10 # frame register +.cfi_def_cfa_register %r10 + sub \$64+$xframe,%rsp + and \$-64,%rsp +___ +$code.=<<___ if ($win64); + movaps %xmm6,-0xb0(%r10) + movaps %xmm7,-0xa0(%r10) + movaps %xmm8,-0x90(%r10) + movaps %xmm9,-0x80(%r10) + movaps %xmm10,-0x70(%r10) + movaps %xmm11,-0x60(%r10) + movaps %xmm12,-0x50(%r10) + movaps %xmm13,-0x40(%r10) + movaps %xmm14,-0x30(%r10) + movaps %xmm15,-0x20(%r10) +.L8xvl_body: +___ +$code.=<<___; + vzeroupper + + lea .Lsigma(%rip),%r9 + vbroadcasti128 (%r9),$xa3 # key[0] + vbroadcasti128 ($key),$xb3 # key[1] + vbroadcasti128 16($key),$xc3 # key[2] + vbroadcasti128 ($counter),$xd3 # key[3] + + vpshufd \$0x00,$xa3,$xa0 # smash key by lanes... + vpshufd \$0x55,$xa3,$xa1 + vpshufd \$0xaa,$xa3,$xa2 + vpshufd \$0xff,$xa3,$xa3 + vmovdqa64 $xa0,@key[0] + vmovdqa64 $xa1,@key[1] + vmovdqa64 $xa2,@key[2] + vmovdqa64 $xa3,@key[3] + + vpshufd \$0x00,$xb3,$xb0 + vpshufd \$0x55,$xb3,$xb1 + vpshufd \$0xaa,$xb3,$xb2 + vpshufd \$0xff,$xb3,$xb3 + vmovdqa64 $xb0,@key[4] + vmovdqa64 $xb1,@key[5] + vmovdqa64 $xb2,@key[6] + vmovdqa64 $xb3,@key[7] + + vpshufd \$0x00,$xc3,$xc0 + vpshufd \$0x55,$xc3,$xc1 + vpshufd \$0xaa,$xc3,$xc2 + vpshufd \$0xff,$xc3,$xc3 + vmovdqa64 $xc0,@key[8] + vmovdqa64 $xc1,@key[9] + vmovdqa64 $xc2,@key[10] + vmovdqa64 $xc3,@key[11] + + vpshufd \$0x00,$xd3,$xd0 + vpshufd \$0x55,$xd3,$xd1 + vpshufd \$0xaa,$xd3,$xd2 + vpshufd \$0xff,$xd3,$xd3 + vpaddd .Lincy(%rip),$xd0,$xd0 # don't save counters yet + vmovdqa64 $xd0,@key[12] + vmovdqa64 $xd1,@key[13] + vmovdqa64 $xd2,@key[14] + vmovdqa64 $xd3,@key[15] + + mov \$10,%eax + jmp .Loop8xvl + +.align 32 +.Loop_outer8xvl: + #vpbroadcastd 0(%r9),$xa0 # reload key + #vpbroadcastd 4(%r9),$xa1 + vpbroadcastd 8(%r9),$xa2 + vpbroadcastd 12(%r9),$xa3 + vpaddd .Leight(%rip),@key[12],@key[12] # next SIMD counters + vmovdqa64 @key[4],$xb0 + vmovdqa64 @key[5],$xb1 + vmovdqa64 @key[6],$xb2 + vmovdqa64 @key[7],$xb3 + vmovdqa64 @key[8],$xc0 + vmovdqa64 @key[9],$xc1 + vmovdqa64 @key[10],$xc2 + vmovdqa64 @key[11],$xc3 + vmovdqa64 @key[12],$xd0 + vmovdqa64 @key[13],$xd1 + vmovdqa64 @key[14],$xd2 + vmovdqa64 @key[15],$xd3 + + vmovdqa64 $xa0,@key[0] + vmovdqa64 $xa1,@key[1] + vmovdqa64 $xa2,@key[2] + vmovdqa64 $xa3,@key[3] + + mov \$10,%eax + jmp .Loop8xvl + +.align 32 +.Loop8xvl: +___ + foreach (&AVX512_lane_ROUND(0, 4, 8,12)) { eval; } + foreach (&AVX512_lane_ROUND(0, 5,10,15)) { eval; } +$code.=<<___; + dec %eax + jnz .Loop8xvl + + vpaddd @key[0],$xa0,$xa0 # accumulate key + vpaddd @key[1],$xa1,$xa1 + vpaddd @key[2],$xa2,$xa2 + vpaddd @key[3],$xa3,$xa3 + + vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data + vpunpckldq $xa3,$xa2,$xt3 + vpunpckhdq $xa1,$xa0,$xa0 + vpunpckhdq $xa3,$xa2,$xa2 + vpunpcklqdq $xt3,$xt2,$xa1 # "a0" + vpunpckhqdq $xt3,$xt2,$xt2 # "a1" + vpunpcklqdq $xa2,$xa0,$xa3 # "a2" + vpunpckhqdq $xa2,$xa0,$xa0 # "a3" +___ + ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2); +$code.=<<___; + vpaddd @key[4],$xb0,$xb0 + vpaddd @key[5],$xb1,$xb1 + vpaddd @key[6],$xb2,$xb2 + vpaddd @key[7],$xb3,$xb3 + + vpunpckldq $xb1,$xb0,$xt2 + vpunpckldq $xb3,$xb2,$xt3 + vpunpckhdq $xb1,$xb0,$xb0 + vpunpckhdq $xb3,$xb2,$xb2 + vpunpcklqdq $xt3,$xt2,$xb1 # "b0" + vpunpckhqdq $xt3,$xt2,$xt2 # "b1" + vpunpcklqdq $xb2,$xb0,$xb3 # "b2" + vpunpckhqdq $xb2,$xb0,$xb0 # "b3" +___ + ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2); +$code.=<<___; + vshufi32x4 \$0,$xb0,$xa0,$xt3 # "de-interlace" further + vshufi32x4 \$3,$xb0,$xa0,$xb0 + vshufi32x4 \$0,$xb1,$xa1,$xa0 + vshufi32x4 \$3,$xb1,$xa1,$xb1 + vshufi32x4 \$0,$xb2,$xa2,$xa1 + vshufi32x4 \$3,$xb2,$xa2,$xb2 + vshufi32x4 \$0,$xb3,$xa3,$xa2 + vshufi32x4 \$3,$xb3,$xa3,$xb3 +___ + ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3); +$code.=<<___; + vpaddd @key[8],$xc0,$xc0 + vpaddd @key[9],$xc1,$xc1 + vpaddd @key[10],$xc2,$xc2 + vpaddd @key[11],$xc3,$xc3 + + vpunpckldq $xc1,$xc0,$xt2 + vpunpckldq $xc3,$xc2,$xt3 + vpunpckhdq $xc1,$xc0,$xc0 + vpunpckhdq $xc3,$xc2,$xc2 + vpunpcklqdq $xt3,$xt2,$xc1 # "c0" + vpunpckhqdq $xt3,$xt2,$xt2 # "c1" + vpunpcklqdq $xc2,$xc0,$xc3 # "c2" + vpunpckhqdq $xc2,$xc0,$xc0 # "c3" +___ + ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2); +$code.=<<___; + vpaddd @key[12],$xd0,$xd0 + vpaddd @key[13],$xd1,$xd1 + vpaddd @key[14],$xd2,$xd2 + vpaddd @key[15],$xd3,$xd3 + + vpunpckldq $xd1,$xd0,$xt2 + vpunpckldq $xd3,$xd2,$xt3 + vpunpckhdq $xd1,$xd0,$xd0 + vpunpckhdq $xd3,$xd2,$xd2 + vpunpcklqdq $xt3,$xt2,$xd1 # "d0" + vpunpckhqdq $xt3,$xt2,$xt2 # "d1" + vpunpcklqdq $xd2,$xd0,$xd3 # "d2" + vpunpckhqdq $xd2,$xd0,$xd0 # "d3" +___ + ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2); +$code.=<<___; + vperm2i128 \$0x20,$xd0,$xc0,$xt3 # "de-interlace" further + vperm2i128 \$0x31,$xd0,$xc0,$xd0 + vperm2i128 \$0x20,$xd1,$xc1,$xc0 + vperm2i128 \$0x31,$xd1,$xc1,$xd1 + vperm2i128 \$0x20,$xd2,$xc2,$xc1 + vperm2i128 \$0x31,$xd2,$xc2,$xd2 + vperm2i128 \$0x20,$xd3,$xc3,$xc2 + vperm2i128 \$0x31,$xd3,$xc3,$xd3 +___ + ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3); + ($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)= + ($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3); +$code.=<<___; + cmp \$64*8,$len + jb .Ltail8xvl + + mov \$0x80,%eax # size optimization + vpxord 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vpxor 0x40($inp),$xc0,$xc0 + vpxor 0x60($inp),$xd0,$xd0 + lea ($inp,%rax),$inp # size optimization + vmovdqu32 $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + vmovdqu $xc0,0x40($out) + vmovdqu $xd0,0x60($out) + lea ($out,%rax),$out # size optimization + + vpxor 0x00($inp),$xa1,$xa1 + vpxor 0x20($inp),$xb1,$xb1 + vpxor 0x40($inp),$xc1,$xc1 + vpxor 0x60($inp),$xd1,$xd1 + lea ($inp,%rax),$inp # size optimization + vmovdqu $xa1,0x00($out) + vmovdqu $xb1,0x20($out) + vmovdqu $xc1,0x40($out) + vmovdqu $xd1,0x60($out) + lea ($out,%rax),$out # size optimization + + vpxord 0x00($inp),$xa2,$xa2 + vpxor 0x20($inp),$xb2,$xb2 + vpxor 0x40($inp),$xc2,$xc2 + vpxor 0x60($inp),$xd2,$xd2 + lea ($inp,%rax),$inp # size optimization + vmovdqu32 $xa2,0x00($out) + vmovdqu $xb2,0x20($out) + vmovdqu $xc2,0x40($out) + vmovdqu $xd2,0x60($out) + lea ($out,%rax),$out # size optimization + + vpxor 0x00($inp),$xa3,$xa3 + vpxor 0x20($inp),$xb3,$xb3 + vpxor 0x40($inp),$xc3,$xc3 + vpxor 0x60($inp),$xd3,$xd3 + lea ($inp,%rax),$inp # size optimization + vmovdqu $xa3,0x00($out) + vmovdqu $xb3,0x20($out) + vmovdqu $xc3,0x40($out) + vmovdqu $xd3,0x60($out) + lea ($out,%rax),$out # size optimization + + vpbroadcastd 0(%r9),%ymm0 # reload key + vpbroadcastd 4(%r9),%ymm1 + + sub \$64*8,$len + jnz .Loop_outer8xvl + + jmp .Ldone8xvl + +.align 32 +.Ltail8xvl: + vmovdqa64 $xa0,%ymm8 # size optimization +___ +$xa0 = "%ymm8"; +$code.=<<___; + xor %r9,%r9 + sub $inp,$out + cmp \$64*1,$len + jb .Less_than_64_8xvl + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vmovdqu $xa0,0x00($out,$inp) + vmovdqu $xb0,0x20($out,$inp) + je .Ldone8xvl + vmovdqa $xc0,$xa0 + vmovdqa $xd0,$xb0 + lea 64($inp),$inp + + cmp \$64*2,$len + jb .Less_than_64_8xvl + vpxor 0x00($inp),$xc0,$xc0 + vpxor 0x20($inp),$xd0,$xd0 + vmovdqu $xc0,0x00($out,$inp) + vmovdqu $xd0,0x20($out,$inp) + je .Ldone8xvl + vmovdqa $xa1,$xa0 + vmovdqa $xb1,$xb0 + lea 64($inp),$inp + + cmp \$64*3,$len + jb .Less_than_64_8xvl + vpxor 0x00($inp),$xa1,$xa1 + vpxor 0x20($inp),$xb1,$xb1 + vmovdqu $xa1,0x00($out,$inp) + vmovdqu $xb1,0x20($out,$inp) + je .Ldone8xvl + vmovdqa $xc1,$xa0 + vmovdqa $xd1,$xb0 + lea 64($inp),$inp + + cmp \$64*4,$len + jb .Less_than_64_8xvl + vpxor 0x00($inp),$xc1,$xc1 + vpxor 0x20($inp),$xd1,$xd1 + vmovdqu $xc1,0x00($out,$inp) + vmovdqu $xd1,0x20($out,$inp) + je .Ldone8xvl + vmovdqa32 $xa2,$xa0 + vmovdqa $xb2,$xb0 + lea 64($inp),$inp + + cmp \$64*5,$len + jb .Less_than_64_8xvl + vpxord 0x00($inp),$xa2,$xa2 + vpxor 0x20($inp),$xb2,$xb2 + vmovdqu32 $xa2,0x00($out,$inp) + vmovdqu $xb2,0x20($out,$inp) + je .Ldone8xvl + vmovdqa $xc2,$xa0 + vmovdqa $xd2,$xb0 + lea 64($inp),$inp + + cmp \$64*6,$len + jb .Less_than_64_8xvl + vpxor 0x00($inp),$xc2,$xc2 + vpxor 0x20($inp),$xd2,$xd2 + vmovdqu $xc2,0x00($out,$inp) + vmovdqu $xd2,0x20($out,$inp) + je .Ldone8xvl + vmovdqa $xa3,$xa0 + vmovdqa $xb3,$xb0 + lea 64($inp),$inp + + cmp \$64*7,$len + jb .Less_than_64_8xvl + vpxor 0x00($inp),$xa3,$xa3 + vpxor 0x20($inp),$xb3,$xb3 + vmovdqu $xa3,0x00($out,$inp) + vmovdqu $xb3,0x20($out,$inp) + je .Ldone8xvl + vmovdqa $xc3,$xa0 + vmovdqa $xd3,$xb0 + lea 64($inp),$inp + +.Less_than_64_8xvl: + vmovdqa $xa0,0x00(%rsp) + vmovdqa $xb0,0x20(%rsp) + lea ($out,$inp),$out + and \$63,$len + +.Loop_tail8xvl: + movzb ($inp,%r9),%eax + movzb (%rsp,%r9),%ecx + lea 1(%r9),%r9 + xor %ecx,%eax + mov %al,-1($out,%r9) + dec $len + jnz .Loop_tail8xvl + + vpxor $xa0,$xa0,$xa0 + vmovdqa $xa0,0x00(%rsp) + vmovdqa $xa0,0x20(%rsp) + +.Ldone8xvl: + vzeroall +___ +$code.=<<___ if ($win64); + movaps -0xb0(%r10),%xmm6 + movaps -0xa0(%r10),%xmm7 + movaps -0x90(%r10),%xmm8 + movaps -0x80(%r10),%xmm9 + movaps -0x70(%r10),%xmm10 + movaps -0x60(%r10),%xmm11 + movaps -0x50(%r10),%xmm12 + movaps -0x40(%r10),%xmm13 + movaps -0x30(%r10),%xmm14 + movaps -0x20(%r10),%xmm15 +___ +$code.=<<___; + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +.L8xvl_epilogue: + ret +.cfi_endproc +.size chacha20_8xvl,.-chacha20_8xvl +___ +if($kernel) { + $code .= "#endif\n"; +} +} + +# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, +# CONTEXT *context,DISPATCHER_CONTEXT *disp) +if ($win64) { +$rec="%rcx"; +$frame="%rdx"; +$context="%r8"; +$disp="%r9"; + +$code.=<<___; +.extern __imp_RtlVirtualUnwind +.type se_handler,\@abi-omnipotent +.align 16 +se_handler: + push %rsi + push %rdi + push %rbx + push %rbp + push %r12 + push %r13 + push %r14 + push %r15 + pushfq + sub \$64,%rsp + + mov 120($context),%rax # pull context->Rax + mov 248($context),%rbx # pull context->Rip + + mov 8($disp),%rsi # disp->ImageBase + mov 56($disp),%r11 # disp->HandlerData + + lea .Lctr32_body(%rip),%r10 + cmp %r10,%rbx # context->Rip<.Lprologue + jb .Lcommon_seh_tail + + mov 152($context),%rax # pull context->Rsp + + lea .Lno_data(%rip),%r10 # epilogue label + cmp %r10,%rbx # context->Rip>=.Lepilogue + jae .Lcommon_seh_tail + + lea 64+24+48(%rax),%rax + + mov -8(%rax),%rbx + mov -16(%rax),%rbp + mov -24(%rax),%r12 + mov -32(%rax),%r13 + mov -40(%rax),%r14 + mov -48(%rax),%r15 + mov %rbx,144($context) # restore context->Rbx + mov %rbp,160($context) # restore context->Rbp + mov %r12,216($context) # restore context->R12 + mov %r13,224($context) # restore context->R13 + mov %r14,232($context) # restore context->R14 + mov %r15,240($context) # restore context->R14 + +.Lcommon_seh_tail: + mov 8(%rax),%rdi + mov 16(%rax),%rsi + mov %rax,152($context) # restore context->Rsp + mov %rsi,168($context) # restore context->Rsi + mov %rdi,176($context) # restore context->Rdi + + mov 40($disp),%rdi # disp->ContextRecord + mov $context,%rsi # context + mov \$154,%ecx # sizeof(CONTEXT) + .long 0xa548f3fc # cld; rep movsq + + mov $disp,%rsi + xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER + mov 8(%rsi),%rdx # arg2, disp->ImageBase + mov 0(%rsi),%r8 # arg3, disp->ControlPc + mov 16(%rsi),%r9 # arg4, disp->FunctionEntry + mov 40(%rsi),%r10 # disp->ContextRecord + lea 56(%rsi),%r11 # &disp->HandlerData + lea 24(%rsi),%r12 # &disp->EstablisherFrame + mov %r10,32(%rsp) # arg5 + mov %r11,40(%rsp) # arg6 + mov %r12,48(%rsp) # arg7 + mov %rcx,56(%rsp) # arg8, (NULL) + call *__imp_RtlVirtualUnwind(%rip) + + mov \$1,%eax # ExceptionContinueSearch + add \$64,%rsp + popfq + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %rbp + pop %rbx + pop %rdi + pop %rsi + ret +.size se_handler,.-se_handler + +.type simd_handler,\@abi-omnipotent +.align 16 +simd_handler: + push %rsi + push %rdi + push %rbx + push %rbp + push %r12 + push %r13 + push %r14 + push %r15 + pushfq + sub \$64,%rsp + + mov 120($context),%rax # pull context->Rax + mov 248($context),%rbx # pull context->Rip + + mov 8($disp),%rsi # disp->ImageBase + mov 56($disp),%r11 # disp->HandlerData + + mov 0(%r11),%r10d # HandlerData[0] + lea (%rsi,%r10),%r10 # prologue label + cmp %r10,%rbx # context->Rip<prologue label + jb .Lcommon_seh_tail + + mov 192($context),%rax # pull context->R9 + + mov 4(%r11),%r10d # HandlerData[1] + mov 8(%r11),%ecx # HandlerData[2] + lea (%rsi,%r10),%r10 # epilogue label + cmp %r10,%rbx # context->Rip>=epilogue label + jae .Lcommon_seh_tail + + neg %rcx + lea -8(%rax,%rcx),%rsi + lea 512($context),%rdi # &context.Xmm6 + neg %ecx + shr \$3,%ecx + .long 0xa548f3fc # cld; rep movsq + + jmp .Lcommon_seh_tail +.size simd_handler,.-simd_handler + +.section .pdata +.align 4 + .rva .LSEH_begin_chacha20_ctr32 + .rva .LSEH_end_chacha20_ctr32 + .rva .LSEH_info_chacha20_ctr32 + + .rva .LSEH_begin_chacha20_ssse3 + .rva .LSEH_end_chacha20_ssse3 + .rva .LSEH_info_chacha20_ssse3 + + .rva .LSEH_begin_chacha20_128 + .rva .LSEH_end_chacha20_128 + .rva .LSEH_info_chacha20_128 + + .rva .LSEH_begin_chacha20_4x + .rva .LSEH_end_chacha20_4x + .rva .LSEH_info_chacha20_4x +___ +$code.=<<___ if ($avx); + .rva .LSEH_begin_chacha20_xop + .rva .LSEH_end_chacha20_xop + .rva .LSEH_info_chacha20_xop +___ +$code.=<<___ if ($avx>1); + .rva .LSEH_begin_chacha20_avx2 + .rva .LSEH_end_chacha20_avx2 + .rva .LSEH_info_chacha20_avx2 +___ +$code.=<<___ if ($avx>2); + .rva .LSEH_begin_chacha20_avx512 + .rva .LSEH_end_chacha20_avx512 + .rva .LSEH_info_chacha20_avx512 + + .rva .LSEH_begin_chacha20_avx512vl + .rva .LSEH_end_chacha20_avx512vl + .rva .LSEH_info_chacha20_avx512vl + + .rva .LSEH_begin_chacha20_16x + .rva .LSEH_end_chacha20_16x + .rva .LSEH_info_chacha20_16x + + .rva .LSEH_begin_chacha20_8xvl + .rva .LSEH_end_chacha20_8xvl + .rva .LSEH_info_chacha20_8xvl +___ +$code.=<<___; +.section .xdata +.align 8 +.LSEH_info_chacha20_ctr32: + .byte 9,0,0,0 + .rva se_handler + +.LSEH_info_chacha20_ssse3: + .byte 9,0,0,0 + .rva simd_handler + .rva .Lssse3_body,.Lssse3_epilogue + .long 0x20,0 + +.LSEH_info_chacha20_128: + .byte 9,0,0,0 + .rva simd_handler + .rva .L128_body,.L128_epilogue + .long 0x60,0 + +.LSEH_info_chacha20_4x: + .byte 9,0,0,0 + .rva simd_handler + .rva .L4x_body,.L4x_epilogue + .long 0xa0,0 +___ +$code.=<<___ if ($avx); +.LSEH_info_chacha20_xop: + .byte 9,0,0,0 + .rva simd_handler + .rva .L4xop_body,.L4xop_epilogue # HandlerData[] + .long 0xa0,0 +___ +$code.=<<___ if ($avx>1); +.LSEH_info_chacha20_avx2: + .byte 9,0,0,0 + .rva simd_handler + .rva .L8x_body,.L8x_epilogue # HandlerData[] + .long 0xa0,0 +___ +$code.=<<___ if ($avx>2); +.LSEH_info_chacha20_avx512: + .byte 9,0,0,0 + .rva simd_handler + .rva .Lavx512_body,.Lavx512_epilogue # HandlerData[] + .long 0x20,0 + +.LSEH_info_chacha20_avx512vl: + .byte 9,0,0,0 + .rva simd_handler + .rva .Lavx512vl_body,.Lavx512vl_epilogue # HandlerData[] + .long 0x20,0 + +.LSEH_info_chacha20_16x: + .byte 9,0,0,0 + .rva simd_handler + .rva .L16x_body,.L16x_epilogue # HandlerData[] + .long 0xa0,0 + +.LSEH_info_chacha20_8xvl: + .byte 9,0,0,0 + .rva simd_handler + .rva .L8xvl_body,.L8xvl_epilogue # HandlerData[] + .long 0xa0,0 +___ +} + +open SELF,$0; +while(<SELF>) { + next if (/^#!/); + last if (!s/^#/\/\// and !/^$/); + print; +} +close SELF; + +foreach (split("\n",$code)) { + s/\`([^\`]*)\`/eval $1/ge; + + s/%x#%[yz]/%x/g; # "down-shift" + + if ($kernel) { + s/(^\.type.*),[0-9]+$/\1/; + next if /^\.cfi.*/; + } + + print $_,"\n"; +} + +close STDOUT; diff --git a/net/wireguard/crypto/zinc/chacha20/chacha20.c b/net/wireguard/crypto/zinc/chacha20/chacha20.c new file mode 100644 index 000000000000..f4ca8b09d01e --- /dev/null +++ b/net/wireguard/crypto/zinc/chacha20/chacha20.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * + * Implementation of the ChaCha20 stream cipher. + * + * Information: https://cr.yp.to/chacha.html + */ + +#include <zinc/chacha20.h> +#include "../selftest/run.h" + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/vmalloc.h> +#include <crypto/algapi.h> // For crypto_xor_cpy. + +#if defined(CONFIG_ZINC_ARCH_X86_64) +#include "chacha20-x86_64-glue.c" +#elif defined(CONFIG_ZINC_ARCH_ARM) || defined(CONFIG_ZINC_ARCH_ARM64) +#include "chacha20-arm-glue.c" +#elif defined(CONFIG_ZINC_ARCH_MIPS) +#include "chacha20-mips-glue.c" +#else +static bool *const chacha20_nobs[] __initconst = { }; +static void __init chacha20_fpu_init(void) +{ +} +static inline bool chacha20_arch(struct chacha20_ctx *ctx, u8 *dst, + const u8 *src, size_t len, + simd_context_t *simd_context) +{ + return false; +} +static inline bool hchacha20_arch(u32 derived_key[CHACHA20_KEY_WORDS], + const u8 nonce[HCHACHA20_NONCE_SIZE], + const u8 key[HCHACHA20_KEY_SIZE], + simd_context_t *simd_context) +{ + return false; +} +#endif + +#define QUARTER_ROUND(x, a, b, c, d) ( \ + x[a] += x[b], \ + x[d] = rol32((x[d] ^ x[a]), 16), \ + x[c] += x[d], \ + x[b] = rol32((x[b] ^ x[c]), 12), \ + x[a] += x[b], \ + x[d] = rol32((x[d] ^ x[a]), 8), \ + x[c] += x[d], \ + x[b] = rol32((x[b] ^ x[c]), 7) \ +) + +#define C(i, j) (i * 4 + j) + +#define DOUBLE_ROUND(x) ( \ + /* Column Round */ \ + QUARTER_ROUND(x, C(0, 0), C(1, 0), C(2, 0), C(3, 0)), \ + QUARTER_ROUND(x, C(0, 1), C(1, 1), C(2, 1), C(3, 1)), \ + QUARTER_ROUND(x, C(0, 2), C(1, 2), C(2, 2), C(3, 2)), \ + QUARTER_ROUND(x, C(0, 3), C(1, 3), C(2, 3), C(3, 3)), \ + /* Diagonal Round */ \ + QUARTER_ROUND(x, C(0, 0), C(1, 1), C(2, 2), C(3, 3)), \ + QUARTER_ROUND(x, C(0, 1), C(1, 2), C(2, 3), C(3, 0)), \ + QUARTER_ROUND(x, C(0, 2), C(1, 3), C(2, 0), C(3, 1)), \ + QUARTER_ROUND(x, C(0, 3), C(1, 0), C(2, 1), C(3, 2)) \ +) + +#define TWENTY_ROUNDS(x) ( \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x), \ + DOUBLE_ROUND(x) \ +) + +static void chacha20_block_generic(struct chacha20_ctx *ctx, __le32 *stream) +{ + u32 x[CHACHA20_BLOCK_WORDS]; + int i; + + for (i = 0; i < ARRAY_SIZE(x); ++i) + x[i] = ctx->state[i]; + + TWENTY_ROUNDS(x); + + for (i = 0; i < ARRAY_SIZE(x); ++i) + stream[i] = cpu_to_le32(x[i] + ctx->state[i]); + + ctx->counter[0] += 1; +} + +static void chacha20_generic(struct chacha20_ctx *ctx, u8 *out, const u8 *in, + u32 len) +{ + __le32 buf[CHACHA20_BLOCK_WORDS]; + + while (len >= CHACHA20_BLOCK_SIZE) { + chacha20_block_generic(ctx, buf); + crypto_xor_cpy(out, in, (u8 *)buf, CHACHA20_BLOCK_SIZE); + len -= CHACHA20_BLOCK_SIZE; + out += CHACHA20_BLOCK_SIZE; + in += CHACHA20_BLOCK_SIZE; + } + if (len) { + chacha20_block_generic(ctx, buf); + crypto_xor_cpy(out, in, (u8 *)buf, len); + } +} + +void chacha20(struct chacha20_ctx *ctx, u8 *dst, const u8 *src, u32 len, + simd_context_t *simd_context) +{ + if (!chacha20_arch(ctx, dst, src, len, simd_context)) + chacha20_generic(ctx, dst, src, len); +} + +static void hchacha20_generic(u32 derived_key[CHACHA20_KEY_WORDS], + const u8 nonce[HCHACHA20_NONCE_SIZE], + const u8 key[HCHACHA20_KEY_SIZE]) +{ + u32 x[] = { CHACHA20_CONSTANT_EXPA, + CHACHA20_CONSTANT_ND_3, + CHACHA20_CONSTANT_2_BY, + CHACHA20_CONSTANT_TE_K, + get_unaligned_le32(key + 0), + get_unaligned_le32(key + 4), + get_unaligned_le32(key + 8), + get_unaligned_le32(key + 12), + get_unaligned_le32(key + 16), + get_unaligned_le32(key + 20), + get_unaligned_le32(key + 24), + get_unaligned_le32(key + 28), + get_unaligned_le32(nonce + 0), + get_unaligned_le32(nonce + 4), + get_unaligned_le32(nonce + 8), + get_unaligned_le32(nonce + 12) + }; + + TWENTY_ROUNDS(x); + + memcpy(derived_key + 0, x + 0, sizeof(u32) * 4); + memcpy(derived_key + 4, x + 12, sizeof(u32) * 4); +} + +/* Derived key should be 32-bit aligned */ +void hchacha20(u32 derived_key[CHACHA20_KEY_WORDS], + const u8 nonce[HCHACHA20_NONCE_SIZE], + const u8 key[HCHACHA20_KEY_SIZE], simd_context_t *simd_context) +{ + if (!hchacha20_arch(derived_key, nonce, key, simd_context)) + hchacha20_generic(derived_key, nonce, key); +} + +#include "../selftest/chacha20.c" + +static bool nosimd __initdata = false; + +#ifndef COMPAT_ZINC_IS_A_MODULE +int __init chacha20_mod_init(void) +#else +static int __init mod_init(void) +#endif +{ + if (!nosimd) + chacha20_fpu_init(); + if (!selftest_run("chacha20", chacha20_selftest, chacha20_nobs, + ARRAY_SIZE(chacha20_nobs))) + return -ENOTRECOVERABLE; + return 0; +} + +#ifdef COMPAT_ZINC_IS_A_MODULE +static void __exit mod_exit(void) +{ +} + +module_param(nosimd, bool, 0); +module_init(mod_init); +module_exit(mod_exit); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("ChaCha20 stream cipher"); +MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>"); +#endif diff --git a/net/wireguard/crypto/zinc/chacha20poly1305.c b/net/wireguard/crypto/zinc/chacha20poly1305.c new file mode 100644 index 000000000000..cee29db01bc0 --- /dev/null +++ b/net/wireguard/crypto/zinc/chacha20poly1305.c @@ -0,0 +1,398 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * + * This is an implementation of the ChaCha20Poly1305 AEAD construction. + * + * Information: https://tools.ietf.org/html/rfc8439 + */ + +#include <zinc/chacha20poly1305.h> +#include <zinc/chacha20.h> +#include <zinc/poly1305.h> +#include "selftest/run.h" + +#include <asm/unaligned.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <crypto/scatterwalk.h> // For blkcipher_walk. + +static const u8 pad0[CHACHA20_BLOCK_SIZE] = { 0 }; + +static inline void +__chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE], + simd_context_t *simd_context) +{ + struct poly1305_ctx poly1305_state; + struct chacha20_ctx chacha20_state; + union { + u8 block0[POLY1305_KEY_SIZE]; + __le64 lens[2]; + } b = { { 0 } }; + + chacha20_init(&chacha20_state, key, nonce); + chacha20(&chacha20_state, b.block0, b.block0, sizeof(b.block0), + simd_context); + poly1305_init(&poly1305_state, b.block0); + + poly1305_update(&poly1305_state, ad, ad_len, simd_context); + poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf, + simd_context); + + chacha20(&chacha20_state, dst, src, src_len, simd_context); + + poly1305_update(&poly1305_state, dst, src_len, simd_context); + poly1305_update(&poly1305_state, pad0, (0x10 - src_len) & 0xf, + simd_context); + + b.lens[0] = cpu_to_le64(ad_len); + b.lens[1] = cpu_to_le64(src_len); + poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens), + simd_context); + + poly1305_final(&poly1305_state, dst + src_len, simd_context); + + memzero_explicit(&chacha20_state, sizeof(chacha20_state)); + memzero_explicit(&b, sizeof(b)); +} + +void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, + const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE]) +{ + simd_context_t simd_context; + + simd_get(&simd_context); + __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, nonce, key, + &simd_context); + simd_put(&simd_context); +} + +bool chacha20poly1305_encrypt_sg_inplace(struct scatterlist *src, + const size_t src_len, + const u8 *ad, const size_t ad_len, + const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE], + simd_context_t *simd_context) +{ + struct poly1305_ctx poly1305_state; + struct chacha20_ctx chacha20_state; + struct sg_mapping_iter miter; + size_t partial = 0; + ssize_t sl; + union { + u8 chacha20_stream[CHACHA20_BLOCK_SIZE]; + u8 block0[POLY1305_KEY_SIZE]; + u8 mac[POLY1305_MAC_SIZE]; + __le64 lens[2]; + } b __aligned(16) = { { 0 } }; + + if (WARN_ON(src_len > INT_MAX)) + return false; + + chacha20_init(&chacha20_state, key, nonce); + chacha20(&chacha20_state, b.block0, b.block0, sizeof(b.block0), + simd_context); + poly1305_init(&poly1305_state, b.block0); + + poly1305_update(&poly1305_state, ad, ad_len, simd_context); + poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf, + simd_context); + + sg_miter_start(&miter, src, sg_nents(src), SG_MITER_TO_SG | SG_MITER_ATOMIC); + for (sl = src_len; sl > 0 && sg_miter_next(&miter); sl -= miter.length) { + u8 *addr = miter.addr; + size_t length = min_t(size_t, sl, miter.length); + + if (unlikely(partial)) { + size_t l = min(length, CHACHA20_BLOCK_SIZE - partial); + + crypto_xor(addr, b.chacha20_stream + partial, l); + partial = (partial + l) & (CHACHA20_BLOCK_SIZE - 1); + + addr += l; + length -= l; + } + + if (likely(length >= CHACHA20_BLOCK_SIZE || length == sl)) { + size_t l = length; + + if (unlikely(length < sl)) + l &= ~(CHACHA20_BLOCK_SIZE - 1); + chacha20(&chacha20_state, addr, addr, l, simd_context); + addr += l; + length -= l; + } + + if (unlikely(length > 0)) { + chacha20(&chacha20_state, b.chacha20_stream, pad0, + CHACHA20_BLOCK_SIZE, simd_context); + crypto_xor(addr, b.chacha20_stream, length); + partial = length; + } + + poly1305_update(&poly1305_state, miter.addr, + min_t(size_t, sl, miter.length), simd_context); + + simd_relax(simd_context); + } + + poly1305_update(&poly1305_state, pad0, (0x10 - src_len) & 0xf, + simd_context); + + b.lens[0] = cpu_to_le64(ad_len); + b.lens[1] = cpu_to_le64(src_len); + poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens), + simd_context); + + if (likely(sl <= -POLY1305_MAC_SIZE)) + poly1305_final(&poly1305_state, miter.addr + miter.length + sl, + simd_context); + + sg_miter_stop(&miter); + + if (unlikely(sl > -POLY1305_MAC_SIZE)) { + poly1305_final(&poly1305_state, b.mac, simd_context); + scatterwalk_map_and_copy(b.mac, src, src_len, sizeof(b.mac), 1); + } + + memzero_explicit(&chacha20_state, sizeof(chacha20_state)); + memzero_explicit(&b, sizeof(b)); + return true; +} + +static inline bool +__chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE], + simd_context_t *simd_context) +{ + struct poly1305_ctx poly1305_state; + struct chacha20_ctx chacha20_state; + int ret; + size_t dst_len; + union { + u8 block0[POLY1305_KEY_SIZE]; + u8 mac[POLY1305_MAC_SIZE]; + __le64 lens[2]; + } b = { { 0 } }; + + if (unlikely(src_len < POLY1305_MAC_SIZE)) + return false; + + chacha20_init(&chacha20_state, key, nonce); + chacha20(&chacha20_state, b.block0, b.block0, sizeof(b.block0), + simd_context); + poly1305_init(&poly1305_state, b.block0); + + poly1305_update(&poly1305_state, ad, ad_len, simd_context); + poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf, + simd_context); + + dst_len = src_len - POLY1305_MAC_SIZE; + poly1305_update(&poly1305_state, src, dst_len, simd_context); + poly1305_update(&poly1305_state, pad0, (0x10 - dst_len) & 0xf, + simd_context); + + b.lens[0] = cpu_to_le64(ad_len); + b.lens[1] = cpu_to_le64(dst_len); + poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens), + simd_context); + + poly1305_final(&poly1305_state, b.mac, simd_context); + + ret = crypto_memneq(b.mac, src + dst_len, POLY1305_MAC_SIZE); + if (likely(!ret)) + chacha20(&chacha20_state, dst, src, dst_len, simd_context); + + memzero_explicit(&chacha20_state, sizeof(chacha20_state)); + memzero_explicit(&b, sizeof(b)); + + return !ret; +} + +bool chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, + const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE]) +{ + simd_context_t simd_context, ret; + + simd_get(&simd_context); + ret = __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len, nonce, + key, &simd_context); + simd_put(&simd_context); + return ret; +} + +bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, + size_t src_len, + const u8 *ad, const size_t ad_len, + const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE], + simd_context_t *simd_context) +{ + struct poly1305_ctx poly1305_state; + struct chacha20_ctx chacha20_state; + struct sg_mapping_iter miter; + size_t partial = 0; + ssize_t sl; + union { + u8 chacha20_stream[CHACHA20_BLOCK_SIZE]; + u8 block0[POLY1305_KEY_SIZE]; + struct { + u8 read_mac[POLY1305_MAC_SIZE]; + u8 computed_mac[POLY1305_MAC_SIZE]; + }; + __le64 lens[2]; + } b __aligned(16) = { { 0 } }; + bool ret = false; + + if (unlikely(src_len < POLY1305_MAC_SIZE || WARN_ON(src_len > INT_MAX))) + return ret; + src_len -= POLY1305_MAC_SIZE; + + chacha20_init(&chacha20_state, key, nonce); + chacha20(&chacha20_state, b.block0, b.block0, sizeof(b.block0), + simd_context); + poly1305_init(&poly1305_state, b.block0); + + poly1305_update(&poly1305_state, ad, ad_len, simd_context); + poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf, + simd_context); + + sg_miter_start(&miter, src, sg_nents(src), SG_MITER_TO_SG | SG_MITER_ATOMIC); + for (sl = src_len; sl > 0 && sg_miter_next(&miter); sl -= miter.length) { + u8 *addr = miter.addr; + size_t length = min_t(size_t, sl, miter.length); + + poly1305_update(&poly1305_state, addr, length, simd_context); + + if (unlikely(partial)) { + size_t l = min(length, CHACHA20_BLOCK_SIZE - partial); + + crypto_xor(addr, b.chacha20_stream + partial, l); + partial = (partial + l) & (CHACHA20_BLOCK_SIZE - 1); + + addr += l; + length -= l; + } + + if (likely(length >= CHACHA20_BLOCK_SIZE || length == sl)) { + size_t l = length; + + if (unlikely(length < sl)) + l &= ~(CHACHA20_BLOCK_SIZE - 1); + chacha20(&chacha20_state, addr, addr, l, simd_context); + addr += l; + length -= l; + } + + if (unlikely(length > 0)) { + chacha20(&chacha20_state, b.chacha20_stream, pad0, + CHACHA20_BLOCK_SIZE, simd_context); + crypto_xor(addr, b.chacha20_stream, length); + partial = length; + } + + simd_relax(simd_context); + } + + poly1305_update(&poly1305_state, pad0, (0x10 - src_len) & 0xf, + simd_context); + + b.lens[0] = cpu_to_le64(ad_len); + b.lens[1] = cpu_to_le64(src_len); + poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens), + simd_context); + + if (likely(sl <= -POLY1305_MAC_SIZE)) { + poly1305_final(&poly1305_state, b.computed_mac, simd_context); + ret = !crypto_memneq(b.computed_mac, + miter.addr + miter.length + sl, + POLY1305_MAC_SIZE); + } + + sg_miter_stop(&miter); + + if (unlikely(sl > -POLY1305_MAC_SIZE)) { + poly1305_final(&poly1305_state, b.computed_mac, simd_context); + scatterwalk_map_and_copy(b.read_mac, src, src_len, + sizeof(b.read_mac), 0); + ret = !crypto_memneq(b.read_mac, b.computed_mac, + POLY1305_MAC_SIZE); + + } + + memzero_explicit(&chacha20_state, sizeof(chacha20_state)); + memzero_explicit(&b, sizeof(b)); + return ret; +} + +void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, + const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE], + const u8 key[CHACHA20POLY1305_KEY_SIZE]) +{ + simd_context_t simd_context; + u32 derived_key[CHACHA20_KEY_WORDS] __aligned(16); + + simd_get(&simd_context); + hchacha20(derived_key, nonce, key, &simd_context); + cpu_to_le32_array(derived_key, ARRAY_SIZE(derived_key)); + __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, + get_unaligned_le64(nonce + 16), + (u8 *)derived_key, &simd_context); + memzero_explicit(derived_key, CHACHA20POLY1305_KEY_SIZE); + simd_put(&simd_context); +} + +bool xchacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, + const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE], + const u8 key[CHACHA20POLY1305_KEY_SIZE]) +{ + bool ret; + simd_context_t simd_context; + u32 derived_key[CHACHA20_KEY_WORDS] __aligned(16); + + simd_get(&simd_context); + hchacha20(derived_key, nonce, key, &simd_context); + cpu_to_le32_array(derived_key, ARRAY_SIZE(derived_key)); + ret = __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len, + get_unaligned_le64(nonce + 16), + (u8 *)derived_key, &simd_context); + memzero_explicit(derived_key, CHACHA20POLY1305_KEY_SIZE); + simd_put(&simd_context); + return ret; +} + +#include "selftest/chacha20poly1305.c" + +#ifndef COMPAT_ZINC_IS_A_MODULE +int __init chacha20poly1305_mod_init(void) +#else +static int __init mod_init(void) +#endif +{ + if (!selftest_run("chacha20poly1305", chacha20poly1305_selftest, + NULL, 0)) + return -ENOTRECOVERABLE; + return 0; +} + +#ifdef COMPAT_ZINC_IS_A_MODULE +static void __exit mod_exit(void) +{ +} + +module_init(mod_init); +module_exit(mod_exit); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("ChaCha20Poly1305 AEAD construction"); +MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>"); +#endif diff --git a/net/wireguard/crypto/zinc/curve25519/curve25519-arm-glue.c b/net/wireguard/crypto/zinc/curve25519/curve25519-arm-glue.c new file mode 100644 index 000000000000..e0c5a5d297c0 --- /dev/null +++ b/net/wireguard/crypto/zinc/curve25519/curve25519-arm-glue.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include <linux/simd.h> +#include <asm/hwcap.h> +#include <asm/neon.h> + +asmlinkage void curve25519_neon(u8 mypublic[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE], + const u8 basepoint[CURVE25519_KEY_SIZE]); + +static bool curve25519_use_neon __ro_after_init; +static bool *const curve25519_nobs[] __initconst = { &curve25519_use_neon }; +static void __init curve25519_fpu_init(void) +{ + curve25519_use_neon = elf_hwcap & HWCAP_NEON; +} + +static inline bool curve25519_arch(u8 mypublic[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE], + const u8 basepoint[CURVE25519_KEY_SIZE]) +{ + simd_context_t simd_context; + bool used_arch = false; + + simd_get(&simd_context); + if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && + !IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) && curve25519_use_neon && + simd_use(&simd_context)) { + curve25519_neon(mypublic, secret, basepoint); + used_arch = true; + } + simd_put(&simd_context); + return used_arch; +} + +static inline bool curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE]) +{ + return false; +} diff --git a/net/wireguard/crypto/zinc/curve25519/curve25519-arm.S b/net/wireguard/crypto/zinc/curve25519/curve25519-arm.S new file mode 100644 index 000000000000..8eca8a11ef28 --- /dev/null +++ b/net/wireguard/crypto/zinc/curve25519/curve25519-arm.S @@ -0,0 +1,2064 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * + * Based on public domain code from Daniel J. Bernstein and Peter Schwabe. This + * began from SUPERCOP's curve25519/neon2/scalarmult.s, but has subsequently been + * manually reworked for use in kernel space. + */ + +#if defined(CONFIG_KERNEL_MODE_NEON) && !defined(__ARMEB__) +#include <linux/linkage.h> + +.text +.fpu neon +.arch armv7-a +.align 4 + +SYM_FUNC_START(curve25519_neon) + push {r4-r11, lr} + mov ip, sp + sub r3, sp, #704 + and r3, r3, #0xfffffff0 + mov sp, r3 + movw r4, #0 + movw r5, #254 + vmov.i32 q0, #1 + vshr.u64 q1, q0, #7 + vshr.u64 q0, q0, #8 + vmov.i32 d4, #19 + vmov.i32 d5, #38 + add r6, sp, #480 + vst1.8 {d2-d3}, [r6, : 128]! + vst1.8 {d0-d1}, [r6, : 128]! + vst1.8 {d4-d5}, [r6, : 128] + add r6, r3, #0 + vmov.i32 q2, #0 + vst1.8 {d4-d5}, [r6, : 128]! + vst1.8 {d4-d5}, [r6, : 128]! + vst1.8 d4, [r6, : 64] + add r6, r3, #0 + movw r7, #960 + sub r7, r7, #2 + neg r7, r7 + sub r7, r7, r7, LSL #7 + str r7, [r6] + add r6, sp, #672 + vld1.8 {d4-d5}, [r1]! + vld1.8 {d6-d7}, [r1] + vst1.8 {d4-d5}, [r6, : 128]! + vst1.8 {d6-d7}, [r6, : 128] + sub r1, r6, #16 + ldrb r6, [r1] + and r6, r6, #248 + strb r6, [r1] + ldrb r6, [r1, #31] + and r6, r6, #127 + orr r6, r6, #64 + strb r6, [r1, #31] + vmov.i64 q2, #0xffffffff + vshr.u64 q3, q2, #7 + vshr.u64 q2, q2, #6 + vld1.8 {d8}, [r2] + vld1.8 {d10}, [r2] + add r2, r2, #6 + vld1.8 {d12}, [r2] + vld1.8 {d14}, [r2] + add r2, r2, #6 + vld1.8 {d16}, [r2] + add r2, r2, #4 + vld1.8 {d18}, [r2] + vld1.8 {d20}, [r2] + add r2, r2, #6 + vld1.8 {d22}, [r2] + add r2, r2, #2 + vld1.8 {d24}, [r2] + vld1.8 {d26}, [r2] + vshr.u64 q5, q5, #26 + vshr.u64 q6, q6, #3 + vshr.u64 q7, q7, #29 + vshr.u64 q8, q8, #6 + vshr.u64 q10, q10, #25 + vshr.u64 q11, q11, #3 + vshr.u64 q12, q12, #12 + vshr.u64 q13, q13, #38 + vand q4, q4, q2 + vand q6, q6, q2 + vand q8, q8, q2 + vand q10, q10, q2 + vand q2, q12, q2 + vand q5, q5, q3 + vand q7, q7, q3 + vand q9, q9, q3 + vand q11, q11, q3 + vand q3, q13, q3 + add r2, r3, #48 + vadd.i64 q12, q4, q1 + vadd.i64 q13, q10, q1 + vshr.s64 q12, q12, #26 + vshr.s64 q13, q13, #26 + vadd.i64 q5, q5, q12 + vshl.i64 q12, q12, #26 + vadd.i64 q14, q5, q0 + vadd.i64 q11, q11, q13 + vshl.i64 q13, q13, #26 + vadd.i64 q15, q11, q0 + vsub.i64 q4, q4, q12 + vshr.s64 q12, q14, #25 + vsub.i64 q10, q10, q13 + vshr.s64 q13, q15, #25 + vadd.i64 q6, q6, q12 + vshl.i64 q12, q12, #25 + vadd.i64 q14, q6, q1 + vadd.i64 q2, q2, q13 + vsub.i64 q5, q5, q12 + vshr.s64 q12, q14, #26 + vshl.i64 q13, q13, #25 + vadd.i64 q14, q2, q1 + vadd.i64 q7, q7, q12 + vshl.i64 q12, q12, #26 + vadd.i64 q15, q7, q0 + vsub.i64 q11, q11, q13 + vshr.s64 q13, q14, #26 + vsub.i64 q6, q6, q12 + vshr.s64 q12, q15, #25 + vadd.i64 q3, q3, q13 + vshl.i64 q13, q13, #26 + vadd.i64 q14, q3, q0 + vadd.i64 q8, q8, q12 + vshl.i64 q12, q12, #25 + vadd.i64 q15, q8, q1 + add r2, r2, #8 + vsub.i64 q2, q2, q13 + vshr.s64 q13, q14, #25 + vsub.i64 q7, q7, q12 + vshr.s64 q12, q15, #26 + vadd.i64 q14, q13, q13 + vadd.i64 q9, q9, q12 + vtrn.32 d12, d14 + vshl.i64 q12, q12, #26 + vtrn.32 d13, d15 + vadd.i64 q0, q9, q0 + vadd.i64 q4, q4, q14 + vst1.8 d12, [r2, : 64]! + vshl.i64 q6, q13, #4 + vsub.i64 q7, q8, q12 + vshr.s64 q0, q0, #25 + vadd.i64 q4, q4, q6 + vadd.i64 q6, q10, q0 + vshl.i64 q0, q0, #25 + vadd.i64 q8, q6, q1 + vadd.i64 q4, q4, q13 + vshl.i64 q10, q13, #25 + vadd.i64 q1, q4, q1 + vsub.i64 q0, q9, q0 + vshr.s64 q8, q8, #26 + vsub.i64 q3, q3, q10 + vtrn.32 d14, d0 + vshr.s64 q1, q1, #26 + vtrn.32 d15, d1 + vadd.i64 q0, q11, q8 + vst1.8 d14, [r2, : 64] + vshl.i64 q7, q8, #26 + vadd.i64 q5, q5, q1 + vtrn.32 d4, d6 + vshl.i64 q1, q1, #26 + vtrn.32 d5, d7 + vsub.i64 q3, q6, q7 + add r2, r2, #16 + vsub.i64 q1, q4, q1 + vst1.8 d4, [r2, : 64] + vtrn.32 d6, d0 + vtrn.32 d7, d1 + sub r2, r2, #8 + vtrn.32 d2, d10 + vtrn.32 d3, d11 + vst1.8 d6, [r2, : 64] + sub r2, r2, #24 + vst1.8 d2, [r2, : 64] + add r2, r3, #96 + vmov.i32 q0, #0 + vmov.i64 d2, #0xff + vmov.i64 d3, #0 + vshr.u32 q1, q1, #7 + vst1.8 {d2-d3}, [r2, : 128]! + vst1.8 {d0-d1}, [r2, : 128]! + vst1.8 d0, [r2, : 64] + add r2, r3, #144 + vmov.i32 q0, #0 + vst1.8 {d0-d1}, [r2, : 128]! + vst1.8 {d0-d1}, [r2, : 128]! + vst1.8 d0, [r2, : 64] + add r2, r3, #240 + vmov.i32 q0, #0 + vmov.i64 d2, #0xff + vmov.i64 d3, #0 + vshr.u32 q1, q1, #7 + vst1.8 {d2-d3}, [r2, : 128]! + vst1.8 {d0-d1}, [r2, : 128]! + vst1.8 d0, [r2, : 64] + add r2, r3, #48 + add r6, r3, #192 + vld1.8 {d0-d1}, [r2, : 128]! + vld1.8 {d2-d3}, [r2, : 128]! + vld1.8 {d4}, [r2, : 64] + vst1.8 {d0-d1}, [r6, : 128]! + vst1.8 {d2-d3}, [r6, : 128]! + vst1.8 d4, [r6, : 64] +.Lmainloop: + mov r2, r5, LSR #3 + and r6, r5, #7 + ldrb r2, [r1, r2] + mov r2, r2, LSR r6 + and r2, r2, #1 + str r5, [sp, #456] + eor r4, r4, r2 + str r2, [sp, #460] + neg r2, r4 + add r4, r3, #96 + add r5, r3, #192 + add r6, r3, #144 + vld1.8 {d8-d9}, [r4, : 128]! + add r7, r3, #240 + vld1.8 {d10-d11}, [r5, : 128]! + veor q6, q4, q5 + vld1.8 {d14-d15}, [r6, : 128]! + vdup.i32 q8, r2 + vld1.8 {d18-d19}, [r7, : 128]! + veor q10, q7, q9 + vld1.8 {d22-d23}, [r4, : 128]! + vand q6, q6, q8 + vld1.8 {d24-d25}, [r5, : 128]! + vand q10, q10, q8 + vld1.8 {d26-d27}, [r6, : 128]! + veor q4, q4, q6 + vld1.8 {d28-d29}, [r7, : 128]! + veor q5, q5, q6 + vld1.8 {d0}, [r4, : 64] + veor q6, q7, q10 + vld1.8 {d2}, [r5, : 64] + veor q7, q9, q10 + vld1.8 {d4}, [r6, : 64] + veor q9, q11, q12 + vld1.8 {d6}, [r7, : 64] + veor q10, q0, q1 + sub r2, r4, #32 + vand q9, q9, q8 + sub r4, r5, #32 + vand q10, q10, q8 + sub r5, r6, #32 + veor q11, q11, q9 + sub r6, r7, #32 + veor q0, q0, q10 + veor q9, q12, q9 + veor q1, q1, q10 + veor q10, q13, q14 + veor q12, q2, q3 + vand q10, q10, q8 + vand q8, q12, q8 + veor q12, q13, q10 + veor q2, q2, q8 + veor q10, q14, q10 + veor q3, q3, q8 + vadd.i32 q8, q4, q6 + vsub.i32 q4, q4, q6 + vst1.8 {d16-d17}, [r2, : 128]! + vadd.i32 q6, q11, q12 + vst1.8 {d8-d9}, [r5, : 128]! + vsub.i32 q4, q11, q12 + vst1.8 {d12-d13}, [r2, : 128]! + vadd.i32 q6, q0, q2 + vst1.8 {d8-d9}, [r5, : 128]! + vsub.i32 q0, q0, q2 + vst1.8 d12, [r2, : 64] + vadd.i32 q2, q5, q7 + vst1.8 d0, [r5, : 64] + vsub.i32 q0, q5, q7 + vst1.8 {d4-d5}, [r4, : 128]! + vadd.i32 q2, q9, q10 + vst1.8 {d0-d1}, [r6, : 128]! + vsub.i32 q0, q9, q10 + vst1.8 {d4-d5}, [r4, : 128]! + vadd.i32 q2, q1, q3 + vst1.8 {d0-d1}, [r6, : 128]! + vsub.i32 q0, q1, q3 + vst1.8 d4, [r4, : 64] + vst1.8 d0, [r6, : 64] + add r2, sp, #512 + add r4, r3, #96 + add r5, r3, #144 + vld1.8 {d0-d1}, [r2, : 128] + vld1.8 {d2-d3}, [r4, : 128]! + vld1.8 {d4-d5}, [r5, : 128]! + vzip.i32 q1, q2 + vld1.8 {d6-d7}, [r4, : 128]! + vld1.8 {d8-d9}, [r5, : 128]! + vshl.i32 q5, q1, #1 + vzip.i32 q3, q4 + vshl.i32 q6, q2, #1 + vld1.8 {d14}, [r4, : 64] + vshl.i32 q8, q3, #1 + vld1.8 {d15}, [r5, : 64] + vshl.i32 q9, q4, #1 + vmul.i32 d21, d7, d1 + vtrn.32 d14, d15 + vmul.i32 q11, q4, q0 + vmul.i32 q0, q7, q0 + vmull.s32 q12, d2, d2 + vmlal.s32 q12, d11, d1 + vmlal.s32 q12, d12, d0 + vmlal.s32 q12, d13, d23 + vmlal.s32 q12, d16, d22 + vmlal.s32 q12, d7, d21 + vmull.s32 q10, d2, d11 + vmlal.s32 q10, d4, d1 + vmlal.s32 q10, d13, d0 + vmlal.s32 q10, d6, d23 + vmlal.s32 q10, d17, d22 + vmull.s32 q13, d10, d4 + vmlal.s32 q13, d11, d3 + vmlal.s32 q13, d13, d1 + vmlal.s32 q13, d16, d0 + vmlal.s32 q13, d17, d23 + vmlal.s32 q13, d8, d22 + vmull.s32 q1, d10, d5 + vmlal.s32 q1, d11, d4 + vmlal.s32 q1, d6, d1 + vmlal.s32 q1, d17, d0 + vmlal.s32 q1, d8, d23 + vmull.s32 q14, d10, d6 + vmlal.s32 q14, d11, d13 + vmlal.s32 q14, d4, d4 + vmlal.s32 q14, d17, d1 + vmlal.s32 q14, d18, d0 + vmlal.s32 q14, d9, d23 + vmull.s32 q11, d10, d7 + vmlal.s32 q11, d11, d6 + vmlal.s32 q11, d12, d5 + vmlal.s32 q11, d8, d1 + vmlal.s32 q11, d19, d0 + vmull.s32 q15, d10, d8 + vmlal.s32 q15, d11, d17 + vmlal.s32 q15, d12, d6 + vmlal.s32 q15, d13, d5 + vmlal.s32 q15, d19, d1 + vmlal.s32 q15, d14, d0 + vmull.s32 q2, d10, d9 + vmlal.s32 q2, d11, d8 + vmlal.s32 q2, d12, d7 + vmlal.s32 q2, d13, d6 + vmlal.s32 q2, d14, d1 + vmull.s32 q0, d15, d1 + vmlal.s32 q0, d10, d14 + vmlal.s32 q0, d11, d19 + vmlal.s32 q0, d12, d8 + vmlal.s32 q0, d13, d17 + vmlal.s32 q0, d6, d6 + add r2, sp, #480 + vld1.8 {d18-d19}, [r2, : 128]! + vmull.s32 q3, d16, d7 + vmlal.s32 q3, d10, d15 + vmlal.s32 q3, d11, d14 + vmlal.s32 q3, d12, d9 + vmlal.s32 q3, d13, d8 + vld1.8 {d8-d9}, [r2, : 128] + vadd.i64 q5, q12, q9 + vadd.i64 q6, q15, q9 + vshr.s64 q5, q5, #26 + vshr.s64 q6, q6, #26 + vadd.i64 q7, q10, q5 + vshl.i64 q5, q5, #26 + vadd.i64 q8, q7, q4 + vadd.i64 q2, q2, q6 + vshl.i64 q6, q6, #26 + vadd.i64 q10, q2, q4 + vsub.i64 q5, q12, q5 + vshr.s64 q8, q8, #25 + vsub.i64 q6, q15, q6 + vshr.s64 q10, q10, #25 + vadd.i64 q12, q13, q8 + vshl.i64 q8, q8, #25 + vadd.i64 q13, q12, q9 + vadd.i64 q0, q0, q10 + vsub.i64 q7, q7, q8 + vshr.s64 q8, q13, #26 + vshl.i64 q10, q10, #25 + vadd.i64 q13, q0, q9 + vadd.i64 q1, q1, q8 + vshl.i64 q8, q8, #26 + vadd.i64 q15, q1, q4 + vsub.i64 q2, q2, q10 + vshr.s64 q10, q13, #26 + vsub.i64 q8, q12, q8 + vshr.s64 q12, q15, #25 + vadd.i64 q3, q3, q10 + vshl.i64 q10, q10, #26 + vadd.i64 q13, q3, q4 + vadd.i64 q14, q14, q12 + add r2, r3, #288 + vshl.i64 q12, q12, #25 + add r4, r3, #336 + vadd.i64 q15, q14, q9 + add r2, r2, #8 + vsub.i64 q0, q0, q10 + add r4, r4, #8 + vshr.s64 q10, q13, #25 + vsub.i64 q1, q1, q12 + vshr.s64 q12, q15, #26 + vadd.i64 q13, q10, q10 + vadd.i64 q11, q11, q12 + vtrn.32 d16, d2 + vshl.i64 q12, q12, #26 + vtrn.32 d17, d3 + vadd.i64 q1, q11, q4 + vadd.i64 q4, q5, q13 + vst1.8 d16, [r2, : 64]! + vshl.i64 q5, q10, #4 + vst1.8 d17, [r4, : 64]! + vsub.i64 q8, q14, q12 + vshr.s64 q1, q1, #25 + vadd.i64 q4, q4, q5 + vadd.i64 q5, q6, q1 + vshl.i64 q1, q1, #25 + vadd.i64 q6, q5, q9 + vadd.i64 q4, q4, q10 + vshl.i64 q10, q10, #25 + vadd.i64 q9, q4, q9 + vsub.i64 q1, q11, q1 + vshr.s64 q6, q6, #26 + vsub.i64 q3, q3, q10 + vtrn.32 d16, d2 + vshr.s64 q9, q9, #26 + vtrn.32 d17, d3 + vadd.i64 q1, q2, q6 + vst1.8 d16, [r2, : 64] + vshl.i64 q2, q6, #26 + vst1.8 d17, [r4, : 64] + vadd.i64 q6, q7, q9 + vtrn.32 d0, d6 + vshl.i64 q7, q9, #26 + vtrn.32 d1, d7 + vsub.i64 q2, q5, q2 + add r2, r2, #16 + vsub.i64 q3, q4, q7 + vst1.8 d0, [r2, : 64] + add r4, r4, #16 + vst1.8 d1, [r4, : 64] + vtrn.32 d4, d2 + vtrn.32 d5, d3 + sub r2, r2, #8 + sub r4, r4, #8 + vtrn.32 d6, d12 + vtrn.32 d7, d13 + vst1.8 d4, [r2, : 64] + vst1.8 d5, [r4, : 64] + sub r2, r2, #24 + sub r4, r4, #24 + vst1.8 d6, [r2, : 64] + vst1.8 d7, [r4, : 64] + add r2, r3, #240 + add r4, r3, #96 + vld1.8 {d0-d1}, [r4, : 128]! + vld1.8 {d2-d3}, [r4, : 128]! + vld1.8 {d4}, [r4, : 64] + add r4, r3, #144 + vld1.8 {d6-d7}, [r4, : 128]! + vtrn.32 q0, q3 + vld1.8 {d8-d9}, [r4, : 128]! + vshl.i32 q5, q0, #4 + vtrn.32 q1, q4 + vshl.i32 q6, q3, #4 + vadd.i32 q5, q5, q0 + vadd.i32 q6, q6, q3 + vshl.i32 q7, q1, #4 + vld1.8 {d5}, [r4, : 64] + vshl.i32 q8, q4, #4 + vtrn.32 d4, d5 + vadd.i32 q7, q7, q1 + vadd.i32 q8, q8, q4 + vld1.8 {d18-d19}, [r2, : 128]! + vshl.i32 q10, q2, #4 + vld1.8 {d22-d23}, [r2, : 128]! + vadd.i32 q10, q10, q2 + vld1.8 {d24}, [r2, : 64] + vadd.i32 q5, q5, q0 + add r2, r3, #192 + vld1.8 {d26-d27}, [r2, : 128]! + vadd.i32 q6, q6, q3 + vld1.8 {d28-d29}, [r2, : 128]! + vadd.i32 q8, q8, q4 + vld1.8 {d25}, [r2, : 64] + vadd.i32 q10, q10, q2 + vtrn.32 q9, q13 + vadd.i32 q7, q7, q1 + vadd.i32 q5, q5, q0 + vtrn.32 q11, q14 + vadd.i32 q6, q6, q3 + add r2, sp, #528 + vadd.i32 q10, q10, q2 + vtrn.32 d24, d25 + vst1.8 {d12-d13}, [r2, : 128]! + vshl.i32 q6, q13, #1 + vst1.8 {d20-d21}, [r2, : 128]! + vshl.i32 q10, q14, #1 + vst1.8 {d12-d13}, [r2, : 128]! + vshl.i32 q15, q12, #1 + vadd.i32 q8, q8, q4 + vext.32 d10, d31, d30, #0 + vadd.i32 q7, q7, q1 + vst1.8 {d16-d17}, [r2, : 128]! + vmull.s32 q8, d18, d5 + vmlal.s32 q8, d26, d4 + vmlal.s32 q8, d19, d9 + vmlal.s32 q8, d27, d3 + vmlal.s32 q8, d22, d8 + vmlal.s32 q8, d28, d2 + vmlal.s32 q8, d23, d7 + vmlal.s32 q8, d29, d1 + vmlal.s32 q8, d24, d6 + vmlal.s32 q8, d25, d0 + vst1.8 {d14-d15}, [r2, : 128]! + vmull.s32 q2, d18, d4 + vmlal.s32 q2, d12, d9 + vmlal.s32 q2, d13, d8 + vmlal.s32 q2, d19, d3 + vmlal.s32 q2, d22, d2 + vmlal.s32 q2, d23, d1 + vmlal.s32 q2, d24, d0 + vst1.8 {d20-d21}, [r2, : 128]! + vmull.s32 q7, d18, d9 + vmlal.s32 q7, d26, d3 + vmlal.s32 q7, d19, d8 + vmlal.s32 q7, d27, d2 + vmlal.s32 q7, d22, d7 + vmlal.s32 q7, d28, d1 + vmlal.s32 q7, d23, d6 + vmlal.s32 q7, d29, d0 + vst1.8 {d10-d11}, [r2, : 128]! + vmull.s32 q5, d18, d3 + vmlal.s32 q5, d19, d2 + vmlal.s32 q5, d22, d1 + vmlal.s32 q5, d23, d0 + vmlal.s32 q5, d12, d8 + vst1.8 {d16-d17}, [r2, : 128] + vmull.s32 q4, d18, d8 + vmlal.s32 q4, d26, d2 + vmlal.s32 q4, d19, d7 + vmlal.s32 q4, d27, d1 + vmlal.s32 q4, d22, d6 + vmlal.s32 q4, d28, d0 + vmull.s32 q8, d18, d7 + vmlal.s32 q8, d26, d1 + vmlal.s32 q8, d19, d6 + vmlal.s32 q8, d27, d0 + add r2, sp, #544 + vld1.8 {d20-d21}, [r2, : 128] + vmlal.s32 q7, d24, d21 + vmlal.s32 q7, d25, d20 + vmlal.s32 q4, d23, d21 + vmlal.s32 q4, d29, d20 + vmlal.s32 q8, d22, d21 + vmlal.s32 q8, d28, d20 + vmlal.s32 q5, d24, d20 + vst1.8 {d14-d15}, [r2, : 128] + vmull.s32 q7, d18, d6 + vmlal.s32 q7, d26, d0 + add r2, sp, #624 + vld1.8 {d30-d31}, [r2, : 128] + vmlal.s32 q2, d30, d21 + vmlal.s32 q7, d19, d21 + vmlal.s32 q7, d27, d20 + add r2, sp, #592 + vld1.8 {d26-d27}, [r2, : 128] + vmlal.s32 q4, d25, d27 + vmlal.s32 q8, d29, d27 + vmlal.s32 q8, d25, d26 + vmlal.s32 q7, d28, d27 + vmlal.s32 q7, d29, d26 + add r2, sp, #576 + vld1.8 {d28-d29}, [r2, : 128] + vmlal.s32 q4, d24, d29 + vmlal.s32 q8, d23, d29 + vmlal.s32 q8, d24, d28 + vmlal.s32 q7, d22, d29 + vmlal.s32 q7, d23, d28 + vst1.8 {d8-d9}, [r2, : 128] + add r2, sp, #528 + vld1.8 {d8-d9}, [r2, : 128] + vmlal.s32 q7, d24, d9 + vmlal.s32 q7, d25, d31 + vmull.s32 q1, d18, d2 + vmlal.s32 q1, d19, d1 + vmlal.s32 q1, d22, d0 + vmlal.s32 q1, d24, d27 + vmlal.s32 q1, d23, d20 + vmlal.s32 q1, d12, d7 + vmlal.s32 q1, d13, d6 + vmull.s32 q6, d18, d1 + vmlal.s32 q6, d19, d0 + vmlal.s32 q6, d23, d27 + vmlal.s32 q6, d22, d20 + vmlal.s32 q6, d24, d26 + vmull.s32 q0, d18, d0 + vmlal.s32 q0, d22, d27 + vmlal.s32 q0, d23, d26 + vmlal.s32 q0, d24, d31 + vmlal.s32 q0, d19, d20 + add r2, sp, #608 + vld1.8 {d18-d19}, [r2, : 128] + vmlal.s32 q2, d18, d7 + vmlal.s32 q5, d18, d6 + vmlal.s32 q1, d18, d21 + vmlal.s32 q0, d18, d28 + vmlal.s32 q6, d18, d29 + vmlal.s32 q2, d19, d6 + vmlal.s32 q5, d19, d21 + vmlal.s32 q1, d19, d29 + vmlal.s32 q0, d19, d9 + vmlal.s32 q6, d19, d28 + add r2, sp, #560 + vld1.8 {d18-d19}, [r2, : 128] + add r2, sp, #480 + vld1.8 {d22-d23}, [r2, : 128] + vmlal.s32 q5, d19, d7 + vmlal.s32 q0, d18, d21 + vmlal.s32 q0, d19, d29 + vmlal.s32 q6, d18, d6 + add r2, sp, #496 + vld1.8 {d6-d7}, [r2, : 128] + vmlal.s32 q6, d19, d21 + add r2, sp, #544 + vld1.8 {d18-d19}, [r2, : 128] + vmlal.s32 q0, d30, d8 + add r2, sp, #640 + vld1.8 {d20-d21}, [r2, : 128] + vmlal.s32 q5, d30, d29 + add r2, sp, #576 + vld1.8 {d24-d25}, [r2, : 128] + vmlal.s32 q1, d30, d28 + vadd.i64 q13, q0, q11 + vadd.i64 q14, q5, q11 + vmlal.s32 q6, d30, d9 + vshr.s64 q4, q13, #26 + vshr.s64 q13, q14, #26 + vadd.i64 q7, q7, q4 + vshl.i64 q4, q4, #26 + vadd.i64 q14, q7, q3 + vadd.i64 q9, q9, q13 + vshl.i64 q13, q13, #26 + vadd.i64 q15, q9, q3 + vsub.i64 q0, q0, q4 + vshr.s64 q4, q14, #25 + vsub.i64 q5, q5, q13 + vshr.s64 q13, q15, #25 + vadd.i64 q6, q6, q4 + vshl.i64 q4, q4, #25 + vadd.i64 q14, q6, q11 + vadd.i64 q2, q2, q13 + vsub.i64 q4, q7, q4 + vshr.s64 q7, q14, #26 + vshl.i64 q13, q13, #25 + vadd.i64 q14, q2, q11 + vadd.i64 q8, q8, q7 + vshl.i64 q7, q7, #26 + vadd.i64 q15, q8, q3 + vsub.i64 q9, q9, q13 + vshr.s64 q13, q14, #26 + vsub.i64 q6, q6, q7 + vshr.s64 q7, q15, #25 + vadd.i64 q10, q10, q13 + vshl.i64 q13, q13, #26 + vadd.i64 q14, q10, q3 + vadd.i64 q1, q1, q7 + add r2, r3, #144 + vshl.i64 q7, q7, #25 + add r4, r3, #96 + vadd.i64 q15, q1, q11 + add r2, r2, #8 + vsub.i64 q2, q2, q13 + add r4, r4, #8 + vshr.s64 q13, q14, #25 + vsub.i64 q7, q8, q7 + vshr.s64 q8, q15, #26 + vadd.i64 q14, q13, q13 + vadd.i64 q12, q12, q8 + vtrn.32 d12, d14 + vshl.i64 q8, q8, #26 + vtrn.32 d13, d15 + vadd.i64 q3, q12, q3 + vadd.i64 q0, q0, q14 + vst1.8 d12, [r2, : 64]! + vshl.i64 q7, q13, #4 + vst1.8 d13, [r4, : 64]! + vsub.i64 q1, q1, q8 + vshr.s64 q3, q3, #25 + vadd.i64 q0, q0, q7 + vadd.i64 q5, q5, q3 + vshl.i64 q3, q3, #25 + vadd.i64 q6, q5, q11 + vadd.i64 q0, q0, q13 + vshl.i64 q7, q13, #25 + vadd.i64 q8, q0, q11 + vsub.i64 q3, q12, q3 + vshr.s64 q6, q6, #26 + vsub.i64 q7, q10, q7 + vtrn.32 d2, d6 + vshr.s64 q8, q8, #26 + vtrn.32 d3, d7 + vadd.i64 q3, q9, q6 + vst1.8 d2, [r2, : 64] + vshl.i64 q6, q6, #26 + vst1.8 d3, [r4, : 64] + vadd.i64 q1, q4, q8 + vtrn.32 d4, d14 + vshl.i64 q4, q8, #26 + vtrn.32 d5, d15 + vsub.i64 q5, q5, q6 + add r2, r2, #16 + vsub.i64 q0, q0, q4 + vst1.8 d4, [r2, : 64] + add r4, r4, #16 + vst1.8 d5, [r4, : 64] + vtrn.32 d10, d6 + vtrn.32 d11, d7 + sub r2, r2, #8 + sub r4, r4, #8 + vtrn.32 d0, d2 + vtrn.32 d1, d3 + vst1.8 d10, [r2, : 64] + vst1.8 d11, [r4, : 64] + sub r2, r2, #24 + sub r4, r4, #24 + vst1.8 d0, [r2, : 64] + vst1.8 d1, [r4, : 64] + add r2, r3, #288 + add r4, r3, #336 + vld1.8 {d0-d1}, [r2, : 128]! + vld1.8 {d2-d3}, [r4, : 128]! + vsub.i32 q0, q0, q1 + vld1.8 {d2-d3}, [r2, : 128]! + vld1.8 {d4-d5}, [r4, : 128]! + vsub.i32 q1, q1, q2 + add r5, r3, #240 + vld1.8 {d4}, [r2, : 64] + vld1.8 {d6}, [r4, : 64] + vsub.i32 q2, q2, q3 + vst1.8 {d0-d1}, [r5, : 128]! + vst1.8 {d2-d3}, [r5, : 128]! + vst1.8 d4, [r5, : 64] + add r2, r3, #144 + add r4, r3, #96 + add r5, r3, #144 + add r6, r3, #192 + vld1.8 {d0-d1}, [r2, : 128]! + vld1.8 {d2-d3}, [r4, : 128]! + vsub.i32 q2, q0, q1 + vadd.i32 q0, q0, q1 + vld1.8 {d2-d3}, [r2, : 128]! + vld1.8 {d6-d7}, [r4, : 128]! + vsub.i32 q4, q1, q3 + vadd.i32 q1, q1, q3 + vld1.8 {d6}, [r2, : 64] + vld1.8 {d10}, [r4, : 64] + vsub.i32 q6, q3, q5 + vadd.i32 q3, q3, q5 + vst1.8 {d4-d5}, [r5, : 128]! + vst1.8 {d0-d1}, [r6, : 128]! + vst1.8 {d8-d9}, [r5, : 128]! + vst1.8 {d2-d3}, [r6, : 128]! + vst1.8 d12, [r5, : 64] + vst1.8 d6, [r6, : 64] + add r2, r3, #0 + add r4, r3, #240 + vld1.8 {d0-d1}, [r4, : 128]! + vld1.8 {d2-d3}, [r4, : 128]! + vld1.8 {d4}, [r4, : 64] + add r4, r3, #336 + vld1.8 {d6-d7}, [r4, : 128]! + vtrn.32 q0, q3 + vld1.8 {d8-d9}, [r4, : 128]! + vshl.i32 q5, q0, #4 + vtrn.32 q1, q4 + vshl.i32 q6, q3, #4 + vadd.i32 q5, q5, q0 + vadd.i32 q6, q6, q3 + vshl.i32 q7, q1, #4 + vld1.8 {d5}, [r4, : 64] + vshl.i32 q8, q4, #4 + vtrn.32 d4, d5 + vadd.i32 q7, q7, q1 + vadd.i32 q8, q8, q4 + vld1.8 {d18-d19}, [r2, : 128]! + vshl.i32 q10, q2, #4 + vld1.8 {d22-d23}, [r2, : 128]! + vadd.i32 q10, q10, q2 + vld1.8 {d24}, [r2, : 64] + vadd.i32 q5, q5, q0 + add r2, r3, #288 + vld1.8 {d26-d27}, [r2, : 128]! + vadd.i32 q6, q6, q3 + vld1.8 {d28-d29}, [r2, : 128]! + vadd.i32 q8, q8, q4 + vld1.8 {d25}, [r2, : 64] + vadd.i32 q10, q10, q2 + vtrn.32 q9, q13 + vadd.i32 q7, q7, q1 + vadd.i32 q5, q5, q0 + vtrn.32 q11, q14 + vadd.i32 q6, q6, q3 + add r2, sp, #528 + vadd.i32 q10, q10, q2 + vtrn.32 d24, d25 + vst1.8 {d12-d13}, [r2, : 128]! + vshl.i32 q6, q13, #1 + vst1.8 {d20-d21}, [r2, : 128]! + vshl.i32 q10, q14, #1 + vst1.8 {d12-d13}, [r2, : 128]! + vshl.i32 q15, q12, #1 + vadd.i32 q8, q8, q4 + vext.32 d10, d31, d30, #0 + vadd.i32 q7, q7, q1 + vst1.8 {d16-d17}, [r2, : 128]! + vmull.s32 q8, d18, d5 + vmlal.s32 q8, d26, d4 + vmlal.s32 q8, d19, d9 + vmlal.s32 q8, d27, d3 + vmlal.s32 q8, d22, d8 + vmlal.s32 q8, d28, d2 + vmlal.s32 q8, d23, d7 + vmlal.s32 q8, d29, d1 + vmlal.s32 q8, d24, d6 + vmlal.s32 q8, d25, d0 + vst1.8 {d14-d15}, [r2, : 128]! + vmull.s32 q2, d18, d4 + vmlal.s32 q2, d12, d9 + vmlal.s32 q2, d13, d8 + vmlal.s32 q2, d19, d3 + vmlal.s32 q2, d22, d2 + vmlal.s32 q2, d23, d1 + vmlal.s32 q2, d24, d0 + vst1.8 {d20-d21}, [r2, : 128]! + vmull.s32 q7, d18, d9 + vmlal.s32 q7, d26, d3 + vmlal.s32 q7, d19, d8 + vmlal.s32 q7, d27, d2 + vmlal.s32 q7, d22, d7 + vmlal.s32 q7, d28, d1 + vmlal.s32 q7, d23, d6 + vmlal.s32 q7, d29, d0 + vst1.8 {d10-d11}, [r2, : 128]! + vmull.s32 q5, d18, d3 + vmlal.s32 q5, d19, d2 + vmlal.s32 q5, d22, d1 + vmlal.s32 q5, d23, d0 + vmlal.s32 q5, d12, d8 + vst1.8 {d16-d17}, [r2, : 128]! + vmull.s32 q4, d18, d8 + vmlal.s32 q4, d26, d2 + vmlal.s32 q4, d19, d7 + vmlal.s32 q4, d27, d1 + vmlal.s32 q4, d22, d6 + vmlal.s32 q4, d28, d0 + vmull.s32 q8, d18, d7 + vmlal.s32 q8, d26, d1 + vmlal.s32 q8, d19, d6 + vmlal.s32 q8, d27, d0 + add r2, sp, #544 + vld1.8 {d20-d21}, [r2, : 128] + vmlal.s32 q7, d24, d21 + vmlal.s32 q7, d25, d20 + vmlal.s32 q4, d23, d21 + vmlal.s32 q4, d29, d20 + vmlal.s32 q8, d22, d21 + vmlal.s32 q8, d28, d20 + vmlal.s32 q5, d24, d20 + vst1.8 {d14-d15}, [r2, : 128] + vmull.s32 q7, d18, d6 + vmlal.s32 q7, d26, d0 + add r2, sp, #624 + vld1.8 {d30-d31}, [r2, : 128] + vmlal.s32 q2, d30, d21 + vmlal.s32 q7, d19, d21 + vmlal.s32 q7, d27, d20 + add r2, sp, #592 + vld1.8 {d26-d27}, [r2, : 128] + vmlal.s32 q4, d25, d27 + vmlal.s32 q8, d29, d27 + vmlal.s32 q8, d25, d26 + vmlal.s32 q7, d28, d27 + vmlal.s32 q7, d29, d26 + add r2, sp, #576 + vld1.8 {d28-d29}, [r2, : 128] + vmlal.s32 q4, d24, d29 + vmlal.s32 q8, d23, d29 + vmlal.s32 q8, d24, d28 + vmlal.s32 q7, d22, d29 + vmlal.s32 q7, d23, d28 + vst1.8 {d8-d9}, [r2, : 128] + add r2, sp, #528 + vld1.8 {d8-d9}, [r2, : 128] + vmlal.s32 q7, d24, d9 + vmlal.s32 q7, d25, d31 + vmull.s32 q1, d18, d2 + vmlal.s32 q1, d19, d1 + vmlal.s32 q1, d22, d0 + vmlal.s32 q1, d24, d27 + vmlal.s32 q1, d23, d20 + vmlal.s32 q1, d12, d7 + vmlal.s32 q1, d13, d6 + vmull.s32 q6, d18, d1 + vmlal.s32 q6, d19, d0 + vmlal.s32 q6, d23, d27 + vmlal.s32 q6, d22, d20 + vmlal.s32 q6, d24, d26 + vmull.s32 q0, d18, d0 + vmlal.s32 q0, d22, d27 + vmlal.s32 q0, d23, d26 + vmlal.s32 q0, d24, d31 + vmlal.s32 q0, d19, d20 + add r2, sp, #608 + vld1.8 {d18-d19}, [r2, : 128] + vmlal.s32 q2, d18, d7 + vmlal.s32 q5, d18, d6 + vmlal.s32 q1, d18, d21 + vmlal.s32 q0, d18, d28 + vmlal.s32 q6, d18, d29 + vmlal.s32 q2, d19, d6 + vmlal.s32 q5, d19, d21 + vmlal.s32 q1, d19, d29 + vmlal.s32 q0, d19, d9 + vmlal.s32 q6, d19, d28 + add r2, sp, #560 + vld1.8 {d18-d19}, [r2, : 128] + add r2, sp, #480 + vld1.8 {d22-d23}, [r2, : 128] + vmlal.s32 q5, d19, d7 + vmlal.s32 q0, d18, d21 + vmlal.s32 q0, d19, d29 + vmlal.s32 q6, d18, d6 + add r2, sp, #496 + vld1.8 {d6-d7}, [r2, : 128] + vmlal.s32 q6, d19, d21 + add r2, sp, #544 + vld1.8 {d18-d19}, [r2, : 128] + vmlal.s32 q0, d30, d8 + add r2, sp, #640 + vld1.8 {d20-d21}, [r2, : 128] + vmlal.s32 q5, d30, d29 + add r2, sp, #576 + vld1.8 {d24-d25}, [r2, : 128] + vmlal.s32 q1, d30, d28 + vadd.i64 q13, q0, q11 + vadd.i64 q14, q5, q11 + vmlal.s32 q6, d30, d9 + vshr.s64 q4, q13, #26 + vshr.s64 q13, q14, #26 + vadd.i64 q7, q7, q4 + vshl.i64 q4, q4, #26 + vadd.i64 q14, q7, q3 + vadd.i64 q9, q9, q13 + vshl.i64 q13, q13, #26 + vadd.i64 q15, q9, q3 + vsub.i64 q0, q0, q4 + vshr.s64 q4, q14, #25 + vsub.i64 q5, q5, q13 + vshr.s64 q13, q15, #25 + vadd.i64 q6, q6, q4 + vshl.i64 q4, q4, #25 + vadd.i64 q14, q6, q11 + vadd.i64 q2, q2, q13 + vsub.i64 q4, q7, q4 + vshr.s64 q7, q14, #26 + vshl.i64 q13, q13, #25 + vadd.i64 q14, q2, q11 + vadd.i64 q8, q8, q7 + vshl.i64 q7, q7, #26 + vadd.i64 q15, q8, q3 + vsub.i64 q9, q9, q13 + vshr.s64 q13, q14, #26 + vsub.i64 q6, q6, q7 + vshr.s64 q7, q15, #25 + vadd.i64 q10, q10, q13 + vshl.i64 q13, q13, #26 + vadd.i64 q14, q10, q3 + vadd.i64 q1, q1, q7 + add r2, r3, #288 + vshl.i64 q7, q7, #25 + add r4, r3, #96 + vadd.i64 q15, q1, q11 + add r2, r2, #8 + vsub.i64 q2, q2, q13 + add r4, r4, #8 + vshr.s64 q13, q14, #25 + vsub.i64 q7, q8, q7 + vshr.s64 q8, q15, #26 + vadd.i64 q14, q13, q13 + vadd.i64 q12, q12, q8 + vtrn.32 d12, d14 + vshl.i64 q8, q8, #26 + vtrn.32 d13, d15 + vadd.i64 q3, q12, q3 + vadd.i64 q0, q0, q14 + vst1.8 d12, [r2, : 64]! + vshl.i64 q7, q13, #4 + vst1.8 d13, [r4, : 64]! + vsub.i64 q1, q1, q8 + vshr.s64 q3, q3, #25 + vadd.i64 q0, q0, q7 + vadd.i64 q5, q5, q3 + vshl.i64 q3, q3, #25 + vadd.i64 q6, q5, q11 + vadd.i64 q0, q0, q13 + vshl.i64 q7, q13, #25 + vadd.i64 q8, q0, q11 + vsub.i64 q3, q12, q3 + vshr.s64 q6, q6, #26 + vsub.i64 q7, q10, q7 + vtrn.32 d2, d6 + vshr.s64 q8, q8, #26 + vtrn.32 d3, d7 + vadd.i64 q3, q9, q6 + vst1.8 d2, [r2, : 64] + vshl.i64 q6, q6, #26 + vst1.8 d3, [r4, : 64] + vadd.i64 q1, q4, q8 + vtrn.32 d4, d14 + vshl.i64 q4, q8, #26 + vtrn.32 d5, d15 + vsub.i64 q5, q5, q6 + add r2, r2, #16 + vsub.i64 q0, q0, q4 + vst1.8 d4, [r2, : 64] + add r4, r4, #16 + vst1.8 d5, [r4, : 64] + vtrn.32 d10, d6 + vtrn.32 d11, d7 + sub r2, r2, #8 + sub r4, r4, #8 + vtrn.32 d0, d2 + vtrn.32 d1, d3 + vst1.8 d10, [r2, : 64] + vst1.8 d11, [r4, : 64] + sub r2, r2, #24 + sub r4, r4, #24 + vst1.8 d0, [r2, : 64] + vst1.8 d1, [r4, : 64] + add r2, sp, #512 + add r4, r3, #144 + add r5, r3, #192 + vld1.8 {d0-d1}, [r2, : 128] + vld1.8 {d2-d3}, [r4, : 128]! + vld1.8 {d4-d5}, [r5, : 128]! + vzip.i32 q1, q2 + vld1.8 {d6-d7}, [r4, : 128]! + vld1.8 {d8-d9}, [r5, : 128]! + vshl.i32 q5, q1, #1 + vzip.i32 q3, q4 + vshl.i32 q6, q2, #1 + vld1.8 {d14}, [r4, : 64] + vshl.i32 q8, q3, #1 + vld1.8 {d15}, [r5, : 64] + vshl.i32 q9, q4, #1 + vmul.i32 d21, d7, d1 + vtrn.32 d14, d15 + vmul.i32 q11, q4, q0 + vmul.i32 q0, q7, q0 + vmull.s32 q12, d2, d2 + vmlal.s32 q12, d11, d1 + vmlal.s32 q12, d12, d0 + vmlal.s32 q12, d13, d23 + vmlal.s32 q12, d16, d22 + vmlal.s32 q12, d7, d21 + vmull.s32 q10, d2, d11 + vmlal.s32 q10, d4, d1 + vmlal.s32 q10, d13, d0 + vmlal.s32 q10, d6, d23 + vmlal.s32 q10, d17, d22 + vmull.s32 q13, d10, d4 + vmlal.s32 q13, d11, d3 + vmlal.s32 q13, d13, d1 + vmlal.s32 q13, d16, d0 + vmlal.s32 q13, d17, d23 + vmlal.s32 q13, d8, d22 + vmull.s32 q1, d10, d5 + vmlal.s32 q1, d11, d4 + vmlal.s32 q1, d6, d1 + vmlal.s32 q1, d17, d0 + vmlal.s32 q1, d8, d23 + vmull.s32 q14, d10, d6 + vmlal.s32 q14, d11, d13 + vmlal.s32 q14, d4, d4 + vmlal.s32 q14, d17, d1 + vmlal.s32 q14, d18, d0 + vmlal.s32 q14, d9, d23 + vmull.s32 q11, d10, d7 + vmlal.s32 q11, d11, d6 + vmlal.s32 q11, d12, d5 + vmlal.s32 q11, d8, d1 + vmlal.s32 q11, d19, d0 + vmull.s32 q15, d10, d8 + vmlal.s32 q15, d11, d17 + vmlal.s32 q15, d12, d6 + vmlal.s32 q15, d13, d5 + vmlal.s32 q15, d19, d1 + vmlal.s32 q15, d14, d0 + vmull.s32 q2, d10, d9 + vmlal.s32 q2, d11, d8 + vmlal.s32 q2, d12, d7 + vmlal.s32 q2, d13, d6 + vmlal.s32 q2, d14, d1 + vmull.s32 q0, d15, d1 + vmlal.s32 q0, d10, d14 + vmlal.s32 q0, d11, d19 + vmlal.s32 q0, d12, d8 + vmlal.s32 q0, d13, d17 + vmlal.s32 q0, d6, d6 + add r2, sp, #480 + vld1.8 {d18-d19}, [r2, : 128]! + vmull.s32 q3, d16, d7 + vmlal.s32 q3, d10, d15 + vmlal.s32 q3, d11, d14 + vmlal.s32 q3, d12, d9 + vmlal.s32 q3, d13, d8 + vld1.8 {d8-d9}, [r2, : 128] + vadd.i64 q5, q12, q9 + vadd.i64 q6, q15, q9 + vshr.s64 q5, q5, #26 + vshr.s64 q6, q6, #26 + vadd.i64 q7, q10, q5 + vshl.i64 q5, q5, #26 + vadd.i64 q8, q7, q4 + vadd.i64 q2, q2, q6 + vshl.i64 q6, q6, #26 + vadd.i64 q10, q2, q4 + vsub.i64 q5, q12, q5 + vshr.s64 q8, q8, #25 + vsub.i64 q6, q15, q6 + vshr.s64 q10, q10, #25 + vadd.i64 q12, q13, q8 + vshl.i64 q8, q8, #25 + vadd.i64 q13, q12, q9 + vadd.i64 q0, q0, q10 + vsub.i64 q7, q7, q8 + vshr.s64 q8, q13, #26 + vshl.i64 q10, q10, #25 + vadd.i64 q13, q0, q9 + vadd.i64 q1, q1, q8 + vshl.i64 q8, q8, #26 + vadd.i64 q15, q1, q4 + vsub.i64 q2, q2, q10 + vshr.s64 q10, q13, #26 + vsub.i64 q8, q12, q8 + vshr.s64 q12, q15, #25 + vadd.i64 q3, q3, q10 + vshl.i64 q10, q10, #26 + vadd.i64 q13, q3, q4 + vadd.i64 q14, q14, q12 + add r2, r3, #144 + vshl.i64 q12, q12, #25 + add r4, r3, #192 + vadd.i64 q15, q14, q9 + add r2, r2, #8 + vsub.i64 q0, q0, q10 + add r4, r4, #8 + vshr.s64 q10, q13, #25 + vsub.i64 q1, q1, q12 + vshr.s64 q12, q15, #26 + vadd.i64 q13, q10, q10 + vadd.i64 q11, q11, q12 + vtrn.32 d16, d2 + vshl.i64 q12, q12, #26 + vtrn.32 d17, d3 + vadd.i64 q1, q11, q4 + vadd.i64 q4, q5, q13 + vst1.8 d16, [r2, : 64]! + vshl.i64 q5, q10, #4 + vst1.8 d17, [r4, : 64]! + vsub.i64 q8, q14, q12 + vshr.s64 q1, q1, #25 + vadd.i64 q4, q4, q5 + vadd.i64 q5, q6, q1 + vshl.i64 q1, q1, #25 + vadd.i64 q6, q5, q9 + vadd.i64 q4, q4, q10 + vshl.i64 q10, q10, #25 + vadd.i64 q9, q4, q9 + vsub.i64 q1, q11, q1 + vshr.s64 q6, q6, #26 + vsub.i64 q3, q3, q10 + vtrn.32 d16, d2 + vshr.s64 q9, q9, #26 + vtrn.32 d17, d3 + vadd.i64 q1, q2, q6 + vst1.8 d16, [r2, : 64] + vshl.i64 q2, q6, #26 + vst1.8 d17, [r4, : 64] + vadd.i64 q6, q7, q9 + vtrn.32 d0, d6 + vshl.i64 q7, q9, #26 + vtrn.32 d1, d7 + vsub.i64 q2, q5, q2 + add r2, r2, #16 + vsub.i64 q3, q4, q7 + vst1.8 d0, [r2, : 64] + add r4, r4, #16 + vst1.8 d1, [r4, : 64] + vtrn.32 d4, d2 + vtrn.32 d5, d3 + sub r2, r2, #8 + sub r4, r4, #8 + vtrn.32 d6, d12 + vtrn.32 d7, d13 + vst1.8 d4, [r2, : 64] + vst1.8 d5, [r4, : 64] + sub r2, r2, #24 + sub r4, r4, #24 + vst1.8 d6, [r2, : 64] + vst1.8 d7, [r4, : 64] + add r2, r3, #336 + add r4, r3, #288 + vld1.8 {d0-d1}, [r2, : 128]! + vld1.8 {d2-d3}, [r4, : 128]! + vadd.i32 q0, q0, q1 + vld1.8 {d2-d3}, [r2, : 128]! + vld1.8 {d4-d5}, [r4, : 128]! + vadd.i32 q1, q1, q2 + add r5, r3, #288 + vld1.8 {d4}, [r2, : 64] + vld1.8 {d6}, [r4, : 64] + vadd.i32 q2, q2, q3 + vst1.8 {d0-d1}, [r5, : 128]! + vst1.8 {d2-d3}, [r5, : 128]! + vst1.8 d4, [r5, : 64] + add r2, r3, #48 + add r4, r3, #144 + vld1.8 {d0-d1}, [r4, : 128]! + vld1.8 {d2-d3}, [r4, : 128]! + vld1.8 {d4}, [r4, : 64] + add r4, r3, #288 + vld1.8 {d6-d7}, [r4, : 128]! + vtrn.32 q0, q3 + vld1.8 {d8-d9}, [r4, : 128]! + vshl.i32 q5, q0, #4 + vtrn.32 q1, q4 + vshl.i32 q6, q3, #4 + vadd.i32 q5, q5, q0 + vadd.i32 q6, q6, q3 + vshl.i32 q7, q1, #4 + vld1.8 {d5}, [r4, : 64] + vshl.i32 q8, q4, #4 + vtrn.32 d4, d5 + vadd.i32 q7, q7, q1 + vadd.i32 q8, q8, q4 + vld1.8 {d18-d19}, [r2, : 128]! + vshl.i32 q10, q2, #4 + vld1.8 {d22-d23}, [r2, : 128]! + vadd.i32 q10, q10, q2 + vld1.8 {d24}, [r2, : 64] + vadd.i32 q5, q5, q0 + add r2, r3, #240 + vld1.8 {d26-d27}, [r2, : 128]! + vadd.i32 q6, q6, q3 + vld1.8 {d28-d29}, [r2, : 128]! + vadd.i32 q8, q8, q4 + vld1.8 {d25}, [r2, : 64] + vadd.i32 q10, q10, q2 + vtrn.32 q9, q13 + vadd.i32 q7, q7, q1 + vadd.i32 q5, q5, q0 + vtrn.32 q11, q14 + vadd.i32 q6, q6, q3 + add r2, sp, #528 + vadd.i32 q10, q10, q2 + vtrn.32 d24, d25 + vst1.8 {d12-d13}, [r2, : 128]! + vshl.i32 q6, q13, #1 + vst1.8 {d20-d21}, [r2, : 128]! + vshl.i32 q10, q14, #1 + vst1.8 {d12-d13}, [r2, : 128]! + vshl.i32 q15, q12, #1 + vadd.i32 q8, q8, q4 + vext.32 d10, d31, d30, #0 + vadd.i32 q7, q7, q1 + vst1.8 {d16-d17}, [r2, : 128]! + vmull.s32 q8, d18, d5 + vmlal.s32 q8, d26, d4 + vmlal.s32 q8, d19, d9 + vmlal.s32 q8, d27, d3 + vmlal.s32 q8, d22, d8 + vmlal.s32 q8, d28, d2 + vmlal.s32 q8, d23, d7 + vmlal.s32 q8, d29, d1 + vmlal.s32 q8, d24, d6 + vmlal.s32 q8, d25, d0 + vst1.8 {d14-d15}, [r2, : 128]! + vmull.s32 q2, d18, d4 + vmlal.s32 q2, d12, d9 + vmlal.s32 q2, d13, d8 + vmlal.s32 q2, d19, d3 + vmlal.s32 q2, d22, d2 + vmlal.s32 q2, d23, d1 + vmlal.s32 q2, d24, d0 + vst1.8 {d20-d21}, [r2, : 128]! + vmull.s32 q7, d18, d9 + vmlal.s32 q7, d26, d3 + vmlal.s32 q7, d19, d8 + vmlal.s32 q7, d27, d2 + vmlal.s32 q7, d22, d7 + vmlal.s32 q7, d28, d1 + vmlal.s32 q7, d23, d6 + vmlal.s32 q7, d29, d0 + vst1.8 {d10-d11}, [r2, : 128]! + vmull.s32 q5, d18, d3 + vmlal.s32 q5, d19, d2 + vmlal.s32 q5, d22, d1 + vmlal.s32 q5, d23, d0 + vmlal.s32 q5, d12, d8 + vst1.8 {d16-d17}, [r2, : 128]! + vmull.s32 q4, d18, d8 + vmlal.s32 q4, d26, d2 + vmlal.s32 q4, d19, d7 + vmlal.s32 q4, d27, d1 + vmlal.s32 q4, d22, d6 + vmlal.s32 q4, d28, d0 + vmull.s32 q8, d18, d7 + vmlal.s32 q8, d26, d1 + vmlal.s32 q8, d19, d6 + vmlal.s32 q8, d27, d0 + add r2, sp, #544 + vld1.8 {d20-d21}, [r2, : 128] + vmlal.s32 q7, d24, d21 + vmlal.s32 q7, d25, d20 + vmlal.s32 q4, d23, d21 + vmlal.s32 q4, d29, d20 + vmlal.s32 q8, d22, d21 + vmlal.s32 q8, d28, d20 + vmlal.s32 q5, d24, d20 + vst1.8 {d14-d15}, [r2, : 128] + vmull.s32 q7, d18, d6 + vmlal.s32 q7, d26, d0 + add r2, sp, #624 + vld1.8 {d30-d31}, [r2, : 128] + vmlal.s32 q2, d30, d21 + vmlal.s32 q7, d19, d21 + vmlal.s32 q7, d27, d20 + add r2, sp, #592 + vld1.8 {d26-d27}, [r2, : 128] + vmlal.s32 q4, d25, d27 + vmlal.s32 q8, d29, d27 + vmlal.s32 q8, d25, d26 + vmlal.s32 q7, d28, d27 + vmlal.s32 q7, d29, d26 + add r2, sp, #576 + vld1.8 {d28-d29}, [r2, : 128] + vmlal.s32 q4, d24, d29 + vmlal.s32 q8, d23, d29 + vmlal.s32 q8, d24, d28 + vmlal.s32 q7, d22, d29 + vmlal.s32 q7, d23, d28 + vst1.8 {d8-d9}, [r2, : 128] + add r2, sp, #528 + vld1.8 {d8-d9}, [r2, : 128] + vmlal.s32 q7, d24, d9 + vmlal.s32 q7, d25, d31 + vmull.s32 q1, d18, d2 + vmlal.s32 q1, d19, d1 + vmlal.s32 q1, d22, d0 + vmlal.s32 q1, d24, d27 + vmlal.s32 q1, d23, d20 + vmlal.s32 q1, d12, d7 + vmlal.s32 q1, d13, d6 + vmull.s32 q6, d18, d1 + vmlal.s32 q6, d19, d0 + vmlal.s32 q6, d23, d27 + vmlal.s32 q6, d22, d20 + vmlal.s32 q6, d24, d26 + vmull.s32 q0, d18, d0 + vmlal.s32 q0, d22, d27 + vmlal.s32 q0, d23, d26 + vmlal.s32 q0, d24, d31 + vmlal.s32 q0, d19, d20 + add r2, sp, #608 + vld1.8 {d18-d19}, [r2, : 128] + vmlal.s32 q2, d18, d7 + vmlal.s32 q5, d18, d6 + vmlal.s32 q1, d18, d21 + vmlal.s32 q0, d18, d28 + vmlal.s32 q6, d18, d29 + vmlal.s32 q2, d19, d6 + vmlal.s32 q5, d19, d21 + vmlal.s32 q1, d19, d29 + vmlal.s32 q0, d19, d9 + vmlal.s32 q6, d19, d28 + add r2, sp, #560 + vld1.8 {d18-d19}, [r2, : 128] + add r2, sp, #480 + vld1.8 {d22-d23}, [r2, : 128] + vmlal.s32 q5, d19, d7 + vmlal.s32 q0, d18, d21 + vmlal.s32 q0, d19, d29 + vmlal.s32 q6, d18, d6 + add r2, sp, #496 + vld1.8 {d6-d7}, [r2, : 128] + vmlal.s32 q6, d19, d21 + add r2, sp, #544 + vld1.8 {d18-d19}, [r2, : 128] + vmlal.s32 q0, d30, d8 + add r2, sp, #640 + vld1.8 {d20-d21}, [r2, : 128] + vmlal.s32 q5, d30, d29 + add r2, sp, #576 + vld1.8 {d24-d25}, [r2, : 128] + vmlal.s32 q1, d30, d28 + vadd.i64 q13, q0, q11 + vadd.i64 q14, q5, q11 + vmlal.s32 q6, d30, d9 + vshr.s64 q4, q13, #26 + vshr.s64 q13, q14, #26 + vadd.i64 q7, q7, q4 + vshl.i64 q4, q4, #26 + vadd.i64 q14, q7, q3 + vadd.i64 q9, q9, q13 + vshl.i64 q13, q13, #26 + vadd.i64 q15, q9, q3 + vsub.i64 q0, q0, q4 + vshr.s64 q4, q14, #25 + vsub.i64 q5, q5, q13 + vshr.s64 q13, q15, #25 + vadd.i64 q6, q6, q4 + vshl.i64 q4, q4, #25 + vadd.i64 q14, q6, q11 + vadd.i64 q2, q2, q13 + vsub.i64 q4, q7, q4 + vshr.s64 q7, q14, #26 + vshl.i64 q13, q13, #25 + vadd.i64 q14, q2, q11 + vadd.i64 q8, q8, q7 + vshl.i64 q7, q7, #26 + vadd.i64 q15, q8, q3 + vsub.i64 q9, q9, q13 + vshr.s64 q13, q14, #26 + vsub.i64 q6, q6, q7 + vshr.s64 q7, q15, #25 + vadd.i64 q10, q10, q13 + vshl.i64 q13, q13, #26 + vadd.i64 q14, q10, q3 + vadd.i64 q1, q1, q7 + add r2, r3, #240 + vshl.i64 q7, q7, #25 + add r4, r3, #144 + vadd.i64 q15, q1, q11 + add r2, r2, #8 + vsub.i64 q2, q2, q13 + add r4, r4, #8 + vshr.s64 q13, q14, #25 + vsub.i64 q7, q8, q7 + vshr.s64 q8, q15, #26 + vadd.i64 q14, q13, q13 + vadd.i64 q12, q12, q8 + vtrn.32 d12, d14 + vshl.i64 q8, q8, #26 + vtrn.32 d13, d15 + vadd.i64 q3, q12, q3 + vadd.i64 q0, q0, q14 + vst1.8 d12, [r2, : 64]! + vshl.i64 q7, q13, #4 + vst1.8 d13, [r4, : 64]! + vsub.i64 q1, q1, q8 + vshr.s64 q3, q3, #25 + vadd.i64 q0, q0, q7 + vadd.i64 q5, q5, q3 + vshl.i64 q3, q3, #25 + vadd.i64 q6, q5, q11 + vadd.i64 q0, q0, q13 + vshl.i64 q7, q13, #25 + vadd.i64 q8, q0, q11 + vsub.i64 q3, q12, q3 + vshr.s64 q6, q6, #26 + vsub.i64 q7, q10, q7 + vtrn.32 d2, d6 + vshr.s64 q8, q8, #26 + vtrn.32 d3, d7 + vadd.i64 q3, q9, q6 + vst1.8 d2, [r2, : 64] + vshl.i64 q6, q6, #26 + vst1.8 d3, [r4, : 64] + vadd.i64 q1, q4, q8 + vtrn.32 d4, d14 + vshl.i64 q4, q8, #26 + vtrn.32 d5, d15 + vsub.i64 q5, q5, q6 + add r2, r2, #16 + vsub.i64 q0, q0, q4 + vst1.8 d4, [r2, : 64] + add r4, r4, #16 + vst1.8 d5, [r4, : 64] + vtrn.32 d10, d6 + vtrn.32 d11, d7 + sub r2, r2, #8 + sub r4, r4, #8 + vtrn.32 d0, d2 + vtrn.32 d1, d3 + vst1.8 d10, [r2, : 64] + vst1.8 d11, [r4, : 64] + sub r2, r2, #24 + sub r4, r4, #24 + vst1.8 d0, [r2, : 64] + vst1.8 d1, [r4, : 64] + ldr r2, [sp, #456] + ldr r4, [sp, #460] + subs r5, r2, #1 + bge .Lmainloop + add r1, r3, #144 + add r2, r3, #336 + vld1.8 {d0-d1}, [r1, : 128]! + vld1.8 {d2-d3}, [r1, : 128]! + vld1.8 {d4}, [r1, : 64] + vst1.8 {d0-d1}, [r2, : 128]! + vst1.8 {d2-d3}, [r2, : 128]! + vst1.8 d4, [r2, : 64] + movw r1, #0 +.Linvertloop: + add r2, r3, #144 + movw r4, #0 + movw r5, #2 + cmp r1, #1 + moveq r5, #1 + addeq r2, r3, #336 + addeq r4, r3, #48 + cmp r1, #2 + moveq r5, #1 + addeq r2, r3, #48 + cmp r1, #3 + moveq r5, #5 + addeq r4, r3, #336 + cmp r1, #4 + moveq r5, #10 + cmp r1, #5 + moveq r5, #20 + cmp r1, #6 + moveq r5, #10 + addeq r2, r3, #336 + addeq r4, r3, #336 + cmp r1, #7 + moveq r5, #50 + cmp r1, #8 + moveq r5, #100 + cmp r1, #9 + moveq r5, #50 + addeq r2, r3, #336 + cmp r1, #10 + moveq r5, #5 + addeq r2, r3, #48 + cmp r1, #11 + moveq r5, #0 + addeq r2, r3, #96 + add r6, r3, #144 + add r7, r3, #288 + vld1.8 {d0-d1}, [r6, : 128]! + vld1.8 {d2-d3}, [r6, : 128]! + vld1.8 {d4}, [r6, : 64] + vst1.8 {d0-d1}, [r7, : 128]! + vst1.8 {d2-d3}, [r7, : 128]! + vst1.8 d4, [r7, : 64] + cmp r5, #0 + beq .Lskipsquaringloop +.Lsquaringloop: + add r6, r3, #288 + add r7, r3, #288 + add r8, r3, #288 + vmov.i32 q0, #19 + vmov.i32 q1, #0 + vmov.i32 q2, #1 + vzip.i32 q1, q2 + vld1.8 {d4-d5}, [r7, : 128]! + vld1.8 {d6-d7}, [r7, : 128]! + vld1.8 {d9}, [r7, : 64] + vld1.8 {d10-d11}, [r6, : 128]! + add r7, sp, #384 + vld1.8 {d12-d13}, [r6, : 128]! + vmul.i32 q7, q2, q0 + vld1.8 {d8}, [r6, : 64] + vext.32 d17, d11, d10, #1 + vmul.i32 q9, q3, q0 + vext.32 d16, d10, d8, #1 + vshl.u32 q10, q5, q1 + vext.32 d22, d14, d4, #1 + vext.32 d24, d18, d6, #1 + vshl.u32 q13, q6, q1 + vshl.u32 d28, d8, d2 + vrev64.i32 d22, d22 + vmul.i32 d1, d9, d1 + vrev64.i32 d24, d24 + vext.32 d29, d8, d13, #1 + vext.32 d0, d1, d9, #1 + vrev64.i32 d0, d0 + vext.32 d2, d9, d1, #1 + vext.32 d23, d15, d5, #1 + vmull.s32 q4, d20, d4 + vrev64.i32 d23, d23 + vmlal.s32 q4, d21, d1 + vrev64.i32 d2, d2 + vmlal.s32 q4, d26, d19 + vext.32 d3, d5, d15, #1 + vmlal.s32 q4, d27, d18 + vrev64.i32 d3, d3 + vmlal.s32 q4, d28, d15 + vext.32 d14, d12, d11, #1 + vmull.s32 q5, d16, d23 + vext.32 d15, d13, d12, #1 + vmlal.s32 q5, d17, d4 + vst1.8 d8, [r7, : 64]! + vmlal.s32 q5, d14, d1 + vext.32 d12, d9, d8, #0 + vmlal.s32 q5, d15, d19 + vmov.i64 d13, #0 + vmlal.s32 q5, d29, d18 + vext.32 d25, d19, d7, #1 + vmlal.s32 q6, d20, d5 + vrev64.i32 d25, d25 + vmlal.s32 q6, d21, d4 + vst1.8 d11, [r7, : 64]! + vmlal.s32 q6, d26, d1 + vext.32 d9, d10, d10, #0 + vmlal.s32 q6, d27, d19 + vmov.i64 d8, #0 + vmlal.s32 q6, d28, d18 + vmlal.s32 q4, d16, d24 + vmlal.s32 q4, d17, d5 + vmlal.s32 q4, d14, d4 + vst1.8 d12, [r7, : 64]! + vmlal.s32 q4, d15, d1 + vext.32 d10, d13, d12, #0 + vmlal.s32 q4, d29, d19 + vmov.i64 d11, #0 + vmlal.s32 q5, d20, d6 + vmlal.s32 q5, d21, d5 + vmlal.s32 q5, d26, d4 + vext.32 d13, d8, d8, #0 + vmlal.s32 q5, d27, d1 + vmov.i64 d12, #0 + vmlal.s32 q5, d28, d19 + vst1.8 d9, [r7, : 64]! + vmlal.s32 q6, d16, d25 + vmlal.s32 q6, d17, d6 + vst1.8 d10, [r7, : 64] + vmlal.s32 q6, d14, d5 + vext.32 d8, d11, d10, #0 + vmlal.s32 q6, d15, d4 + vmov.i64 d9, #0 + vmlal.s32 q6, d29, d1 + vmlal.s32 q4, d20, d7 + vmlal.s32 q4, d21, d6 + vmlal.s32 q4, d26, d5 + vext.32 d11, d12, d12, #0 + vmlal.s32 q4, d27, d4 + vmov.i64 d10, #0 + vmlal.s32 q4, d28, d1 + vmlal.s32 q5, d16, d0 + sub r6, r7, #32 + vmlal.s32 q5, d17, d7 + vmlal.s32 q5, d14, d6 + vext.32 d30, d9, d8, #0 + vmlal.s32 q5, d15, d5 + vld1.8 {d31}, [r6, : 64]! + vmlal.s32 q5, d29, d4 + vmlal.s32 q15, d20, d0 + vext.32 d0, d6, d18, #1 + vmlal.s32 q15, d21, d25 + vrev64.i32 d0, d0 + vmlal.s32 q15, d26, d24 + vext.32 d1, d7, d19, #1 + vext.32 d7, d10, d10, #0 + vmlal.s32 q15, d27, d23 + vrev64.i32 d1, d1 + vld1.8 {d6}, [r6, : 64] + vmlal.s32 q15, d28, d22 + vmlal.s32 q3, d16, d4 + add r6, r6, #24 + vmlal.s32 q3, d17, d2 + vext.32 d4, d31, d30, #0 + vmov d17, d11 + vmlal.s32 q3, d14, d1 + vext.32 d11, d13, d13, #0 + vext.32 d13, d30, d30, #0 + vmlal.s32 q3, d15, d0 + vext.32 d1, d8, d8, #0 + vmlal.s32 q3, d29, d3 + vld1.8 {d5}, [r6, : 64] + sub r6, r6, #16 + vext.32 d10, d6, d6, #0 + vmov.i32 q1, #0xffffffff + vshl.i64 q4, q1, #25 + add r7, sp, #480 + vld1.8 {d14-d15}, [r7, : 128] + vadd.i64 q9, q2, q7 + vshl.i64 q1, q1, #26 + vshr.s64 q10, q9, #26 + vld1.8 {d0}, [r6, : 64]! + vadd.i64 q5, q5, q10 + vand q9, q9, q1 + vld1.8 {d16}, [r6, : 64]! + add r6, sp, #496 + vld1.8 {d20-d21}, [r6, : 128] + vadd.i64 q11, q5, q10 + vsub.i64 q2, q2, q9 + vshr.s64 q9, q11, #25 + vext.32 d12, d5, d4, #0 + vand q11, q11, q4 + vadd.i64 q0, q0, q9 + vmov d19, d7 + vadd.i64 q3, q0, q7 + vsub.i64 q5, q5, q11 + vshr.s64 q11, q3, #26 + vext.32 d18, d11, d10, #0 + vand q3, q3, q1 + vadd.i64 q8, q8, q11 + vadd.i64 q11, q8, q10 + vsub.i64 q0, q0, q3 + vshr.s64 q3, q11, #25 + vand q11, q11, q4 + vadd.i64 q3, q6, q3 + vadd.i64 q6, q3, q7 + vsub.i64 q8, q8, q11 + vshr.s64 q11, q6, #26 + vand q6, q6, q1 + vadd.i64 q9, q9, q11 + vadd.i64 d25, d19, d21 + vsub.i64 q3, q3, q6 + vshr.s64 d23, d25, #25 + vand q4, q12, q4 + vadd.i64 d21, d23, d23 + vshl.i64 d25, d23, #4 + vadd.i64 d21, d21, d23 + vadd.i64 d25, d25, d21 + vadd.i64 d4, d4, d25 + vzip.i32 q0, q8 + vadd.i64 d12, d4, d14 + add r6, r8, #8 + vst1.8 d0, [r6, : 64] + vsub.i64 d19, d19, d9 + add r6, r6, #16 + vst1.8 d16, [r6, : 64] + vshr.s64 d22, d12, #26 + vand q0, q6, q1 + vadd.i64 d10, d10, d22 + vzip.i32 q3, q9 + vsub.i64 d4, d4, d0 + sub r6, r6, #8 + vst1.8 d6, [r6, : 64] + add r6, r6, #16 + vst1.8 d18, [r6, : 64] + vzip.i32 q2, q5 + sub r6, r6, #32 + vst1.8 d4, [r6, : 64] + subs r5, r5, #1 + bhi .Lsquaringloop +.Lskipsquaringloop: + mov r2, r2 + add r5, r3, #288 + add r6, r3, #144 + vmov.i32 q0, #19 + vmov.i32 q1, #0 + vmov.i32 q2, #1 + vzip.i32 q1, q2 + vld1.8 {d4-d5}, [r5, : 128]! + vld1.8 {d6-d7}, [r5, : 128]! + vld1.8 {d9}, [r5, : 64] + vld1.8 {d10-d11}, [r2, : 128]! + add r5, sp, #384 + vld1.8 {d12-d13}, [r2, : 128]! + vmul.i32 q7, q2, q0 + vld1.8 {d8}, [r2, : 64] + vext.32 d17, d11, d10, #1 + vmul.i32 q9, q3, q0 + vext.32 d16, d10, d8, #1 + vshl.u32 q10, q5, q1 + vext.32 d22, d14, d4, #1 + vext.32 d24, d18, d6, #1 + vshl.u32 q13, q6, q1 + vshl.u32 d28, d8, d2 + vrev64.i32 d22, d22 + vmul.i32 d1, d9, d1 + vrev64.i32 d24, d24 + vext.32 d29, d8, d13, #1 + vext.32 d0, d1, d9, #1 + vrev64.i32 d0, d0 + vext.32 d2, d9, d1, #1 + vext.32 d23, d15, d5, #1 + vmull.s32 q4, d20, d4 + vrev64.i32 d23, d23 + vmlal.s32 q4, d21, d1 + vrev64.i32 d2, d2 + vmlal.s32 q4, d26, d19 + vext.32 d3, d5, d15, #1 + vmlal.s32 q4, d27, d18 + vrev64.i32 d3, d3 + vmlal.s32 q4, d28, d15 + vext.32 d14, d12, d11, #1 + vmull.s32 q5, d16, d23 + vext.32 d15, d13, d12, #1 + vmlal.s32 q5, d17, d4 + vst1.8 d8, [r5, : 64]! + vmlal.s32 q5, d14, d1 + vext.32 d12, d9, d8, #0 + vmlal.s32 q5, d15, d19 + vmov.i64 d13, #0 + vmlal.s32 q5, d29, d18 + vext.32 d25, d19, d7, #1 + vmlal.s32 q6, d20, d5 + vrev64.i32 d25, d25 + vmlal.s32 q6, d21, d4 + vst1.8 d11, [r5, : 64]! + vmlal.s32 q6, d26, d1 + vext.32 d9, d10, d10, #0 + vmlal.s32 q6, d27, d19 + vmov.i64 d8, #0 + vmlal.s32 q6, d28, d18 + vmlal.s32 q4, d16, d24 + vmlal.s32 q4, d17, d5 + vmlal.s32 q4, d14, d4 + vst1.8 d12, [r5, : 64]! + vmlal.s32 q4, d15, d1 + vext.32 d10, d13, d12, #0 + vmlal.s32 q4, d29, d19 + vmov.i64 d11, #0 + vmlal.s32 q5, d20, d6 + vmlal.s32 q5, d21, d5 + vmlal.s32 q5, d26, d4 + vext.32 d13, d8, d8, #0 + vmlal.s32 q5, d27, d1 + vmov.i64 d12, #0 + vmlal.s32 q5, d28, d19 + vst1.8 d9, [r5, : 64]! + vmlal.s32 q6, d16, d25 + vmlal.s32 q6, d17, d6 + vst1.8 d10, [r5, : 64] + vmlal.s32 q6, d14, d5 + vext.32 d8, d11, d10, #0 + vmlal.s32 q6, d15, d4 + vmov.i64 d9, #0 + vmlal.s32 q6, d29, d1 + vmlal.s32 q4, d20, d7 + vmlal.s32 q4, d21, d6 + vmlal.s32 q4, d26, d5 + vext.32 d11, d12, d12, #0 + vmlal.s32 q4, d27, d4 + vmov.i64 d10, #0 + vmlal.s32 q4, d28, d1 + vmlal.s32 q5, d16, d0 + sub r2, r5, #32 + vmlal.s32 q5, d17, d7 + vmlal.s32 q5, d14, d6 + vext.32 d30, d9, d8, #0 + vmlal.s32 q5, d15, d5 + vld1.8 {d31}, [r2, : 64]! + vmlal.s32 q5, d29, d4 + vmlal.s32 q15, d20, d0 + vext.32 d0, d6, d18, #1 + vmlal.s32 q15, d21, d25 + vrev64.i32 d0, d0 + vmlal.s32 q15, d26, d24 + vext.32 d1, d7, d19, #1 + vext.32 d7, d10, d10, #0 + vmlal.s32 q15, d27, d23 + vrev64.i32 d1, d1 + vld1.8 {d6}, [r2, : 64] + vmlal.s32 q15, d28, d22 + vmlal.s32 q3, d16, d4 + add r2, r2, #24 + vmlal.s32 q3, d17, d2 + vext.32 d4, d31, d30, #0 + vmov d17, d11 + vmlal.s32 q3, d14, d1 + vext.32 d11, d13, d13, #0 + vext.32 d13, d30, d30, #0 + vmlal.s32 q3, d15, d0 + vext.32 d1, d8, d8, #0 + vmlal.s32 q3, d29, d3 + vld1.8 {d5}, [r2, : 64] + sub r2, r2, #16 + vext.32 d10, d6, d6, #0 + vmov.i32 q1, #0xffffffff + vshl.i64 q4, q1, #25 + add r5, sp, #480 + vld1.8 {d14-d15}, [r5, : 128] + vadd.i64 q9, q2, q7 + vshl.i64 q1, q1, #26 + vshr.s64 q10, q9, #26 + vld1.8 {d0}, [r2, : 64]! + vadd.i64 q5, q5, q10 + vand q9, q9, q1 + vld1.8 {d16}, [r2, : 64]! + add r2, sp, #496 + vld1.8 {d20-d21}, [r2, : 128] + vadd.i64 q11, q5, q10 + vsub.i64 q2, q2, q9 + vshr.s64 q9, q11, #25 + vext.32 d12, d5, d4, #0 + vand q11, q11, q4 + vadd.i64 q0, q0, q9 + vmov d19, d7 + vadd.i64 q3, q0, q7 + vsub.i64 q5, q5, q11 + vshr.s64 q11, q3, #26 + vext.32 d18, d11, d10, #0 + vand q3, q3, q1 + vadd.i64 q8, q8, q11 + vadd.i64 q11, q8, q10 + vsub.i64 q0, q0, q3 + vshr.s64 q3, q11, #25 + vand q11, q11, q4 + vadd.i64 q3, q6, q3 + vadd.i64 q6, q3, q7 + vsub.i64 q8, q8, q11 + vshr.s64 q11, q6, #26 + vand q6, q6, q1 + vadd.i64 q9, q9, q11 + vadd.i64 d25, d19, d21 + vsub.i64 q3, q3, q6 + vshr.s64 d23, d25, #25 + vand q4, q12, q4 + vadd.i64 d21, d23, d23 + vshl.i64 d25, d23, #4 + vadd.i64 d21, d21, d23 + vadd.i64 d25, d25, d21 + vadd.i64 d4, d4, d25 + vzip.i32 q0, q8 + vadd.i64 d12, d4, d14 + add r2, r6, #8 + vst1.8 d0, [r2, : 64] + vsub.i64 d19, d19, d9 + add r2, r2, #16 + vst1.8 d16, [r2, : 64] + vshr.s64 d22, d12, #26 + vand q0, q6, q1 + vadd.i64 d10, d10, d22 + vzip.i32 q3, q9 + vsub.i64 d4, d4, d0 + sub r2, r2, #8 + vst1.8 d6, [r2, : 64] + add r2, r2, #16 + vst1.8 d18, [r2, : 64] + vzip.i32 q2, q5 + sub r2, r2, #32 + vst1.8 d4, [r2, : 64] + cmp r4, #0 + beq .Lskippostcopy + add r2, r3, #144 + mov r4, r4 + vld1.8 {d0-d1}, [r2, : 128]! + vld1.8 {d2-d3}, [r2, : 128]! + vld1.8 {d4}, [r2, : 64] + vst1.8 {d0-d1}, [r4, : 128]! + vst1.8 {d2-d3}, [r4, : 128]! + vst1.8 d4, [r4, : 64] +.Lskippostcopy: + cmp r1, #1 + bne .Lskipfinalcopy + add r2, r3, #288 + add r4, r3, #144 + vld1.8 {d0-d1}, [r2, : 128]! + vld1.8 {d2-d3}, [r2, : 128]! + vld1.8 {d4}, [r2, : 64] + vst1.8 {d0-d1}, [r4, : 128]! + vst1.8 {d2-d3}, [r4, : 128]! + vst1.8 d4, [r4, : 64] +.Lskipfinalcopy: + add r1, r1, #1 + cmp r1, #12 + blo .Linvertloop + add r1, r3, #144 + ldr r2, [r1], #4 + ldr r3, [r1], #4 + ldr r4, [r1], #4 + ldr r5, [r1], #4 + ldr r6, [r1], #4 + ldr r7, [r1], #4 + ldr r8, [r1], #4 + ldr r9, [r1], #4 + ldr r10, [r1], #4 + ldr r1, [r1] + add r11, r1, r1, LSL #4 + add r11, r11, r1, LSL #1 + add r11, r11, #16777216 + mov r11, r11, ASR #25 + add r11, r11, r2 + mov r11, r11, ASR #26 + add r11, r11, r3 + mov r11, r11, ASR #25 + add r11, r11, r4 + mov r11, r11, ASR #26 + add r11, r11, r5 + mov r11, r11, ASR #25 + add r11, r11, r6 + mov r11, r11, ASR #26 + add r11, r11, r7 + mov r11, r11, ASR #25 + add r11, r11, r8 + mov r11, r11, ASR #26 + add r11, r11, r9 + mov r11, r11, ASR #25 + add r11, r11, r10 + mov r11, r11, ASR #26 + add r11, r11, r1 + mov r11, r11, ASR #25 + add r2, r2, r11 + add r2, r2, r11, LSL #1 + add r2, r2, r11, LSL #4 + mov r11, r2, ASR #26 + add r3, r3, r11 + sub r2, r2, r11, LSL #26 + mov r11, r3, ASR #25 + add r4, r4, r11 + sub r3, r3, r11, LSL #25 + mov r11, r4, ASR #26 + add r5, r5, r11 + sub r4, r4, r11, LSL #26 + mov r11, r5, ASR #25 + add r6, r6, r11 + sub r5, r5, r11, LSL #25 + mov r11, r6, ASR #26 + add r7, r7, r11 + sub r6, r6, r11, LSL #26 + mov r11, r7, ASR #25 + add r8, r8, r11 + sub r7, r7, r11, LSL #25 + mov r11, r8, ASR #26 + add r9, r9, r11 + sub r8, r8, r11, LSL #26 + mov r11, r9, ASR #25 + add r10, r10, r11 + sub r9, r9, r11, LSL #25 + mov r11, r10, ASR #26 + add r1, r1, r11 + sub r10, r10, r11, LSL #26 + mov r11, r1, ASR #25 + sub r1, r1, r11, LSL #25 + add r2, r2, r3, LSL #26 + mov r3, r3, LSR #6 + add r3, r3, r4, LSL #19 + mov r4, r4, LSR #13 + add r4, r4, r5, LSL #13 + mov r5, r5, LSR #19 + add r5, r5, r6, LSL #6 + add r6, r7, r8, LSL #25 + mov r7, r8, LSR #7 + add r7, r7, r9, LSL #19 + mov r8, r9, LSR #13 + add r8, r8, r10, LSL #12 + mov r9, r10, LSR #20 + add r1, r9, r1, LSL #6 + str r2, [r0] + str r3, [r0, #4] + str r4, [r0, #8] + str r5, [r0, #12] + str r6, [r0, #16] + str r7, [r0, #20] + str r8, [r0, #24] + str r1, [r0, #28] + movw r0, #0 + mov sp, ip + pop {r4-r11, pc} +SYM_FUNC_END(curve25519_neon) +#endif diff --git a/net/wireguard/crypto/zinc/curve25519/curve25519-fiat32.c b/net/wireguard/crypto/zinc/curve25519/curve25519-fiat32.c new file mode 100644 index 000000000000..42cfb6c00f98 --- /dev/null +++ b/net/wireguard/crypto/zinc/curve25519/curve25519-fiat32.c @@ -0,0 +1,860 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2016 The fiat-crypto Authors. + * Copyright (C) 2018-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * + * This is a machine-generated formally verified implementation of Curve25519 + * ECDH from: <https://github.com/mit-plv/fiat-crypto>. Though originally + * machine generated, it has been tweaked to be suitable for use in the kernel. + * It is optimized for 32-bit machines and machines that cannot work efficiently + * with 128-bit integer types. + */ + +/* fe means field element. Here the field is \Z/(2^255-19). An element t, + * entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 + * t[3]+2^102 t[4]+...+2^230 t[9]. + * fe limbs are bounded by 1.125*2^26,1.125*2^25,1.125*2^26,1.125*2^25,etc. + * Multiplication and carrying produce fe from fe_loose. + */ +typedef struct fe { u32 v[10]; } fe; + +/* fe_loose limbs are bounded by 3.375*2^26,3.375*2^25,3.375*2^26,3.375*2^25,etc + * Addition and subtraction produce fe_loose from (fe, fe). + */ +typedef struct fe_loose { u32 v[10]; } fe_loose; + +static __always_inline void fe_frombytes_impl(u32 h[10], const u8 *s) +{ + /* Ignores top bit of s. */ + u32 a0 = get_unaligned_le32(s); + u32 a1 = get_unaligned_le32(s+4); + u32 a2 = get_unaligned_le32(s+8); + u32 a3 = get_unaligned_le32(s+12); + u32 a4 = get_unaligned_le32(s+16); + u32 a5 = get_unaligned_le32(s+20); + u32 a6 = get_unaligned_le32(s+24); + u32 a7 = get_unaligned_le32(s+28); + h[0] = a0&((1<<26)-1); /* 26 used, 32-26 left. 26 */ + h[1] = (a0>>26) | ((a1&((1<<19)-1))<< 6); /* (32-26) + 19 = 6+19 = 25 */ + h[2] = (a1>>19) | ((a2&((1<<13)-1))<<13); /* (32-19) + 13 = 13+13 = 26 */ + h[3] = (a2>>13) | ((a3&((1<< 6)-1))<<19); /* (32-13) + 6 = 19+ 6 = 25 */ + h[4] = (a3>> 6); /* (32- 6) = 26 */ + h[5] = a4&((1<<25)-1); /* 25 */ + h[6] = (a4>>25) | ((a5&((1<<19)-1))<< 7); /* (32-25) + 19 = 7+19 = 26 */ + h[7] = (a5>>19) | ((a6&((1<<12)-1))<<13); /* (32-19) + 12 = 13+12 = 25 */ + h[8] = (a6>>12) | ((a7&((1<< 6)-1))<<20); /* (32-12) + 6 = 20+ 6 = 26 */ + h[9] = (a7>> 6)&((1<<25)-1); /* 25 */ +} + +static __always_inline void fe_frombytes(fe *h, const u8 *s) +{ + fe_frombytes_impl(h->v, s); +} + +static __always_inline u8 /*bool*/ +addcarryx_u25(u8 /*bool*/ c, u32 a, u32 b, u32 *low) +{ + /* This function extracts 25 bits of result and 1 bit of carry + * (26 total), so a 32-bit intermediate is sufficient. + */ + u32 x = a + b + c; + *low = x & ((1 << 25) - 1); + return (x >> 25) & 1; +} + +static __always_inline u8 /*bool*/ +addcarryx_u26(u8 /*bool*/ c, u32 a, u32 b, u32 *low) +{ + /* This function extracts 26 bits of result and 1 bit of carry + * (27 total), so a 32-bit intermediate is sufficient. + */ + u32 x = a + b + c; + *low = x & ((1 << 26) - 1); + return (x >> 26) & 1; +} + +static __always_inline u8 /*bool*/ +subborrow_u25(u8 /*bool*/ c, u32 a, u32 b, u32 *low) +{ + /* This function extracts 25 bits of result and 1 bit of borrow + * (26 total), so a 32-bit intermediate is sufficient. + */ + u32 x = a - b - c; + *low = x & ((1 << 25) - 1); + return x >> 31; +} + +static __always_inline u8 /*bool*/ +subborrow_u26(u8 /*bool*/ c, u32 a, u32 b, u32 *low) +{ + /* This function extracts 26 bits of result and 1 bit of borrow + *(27 total), so a 32-bit intermediate is sufficient. + */ + u32 x = a - b - c; + *low = x & ((1 << 26) - 1); + return x >> 31; +} + +static __always_inline u32 cmovznz32(u32 t, u32 z, u32 nz) +{ + t = -!!t; /* all set if nonzero, 0 if 0 */ + return (t&nz) | ((~t)&z); +} + +static __always_inline void fe_freeze(u32 out[10], const u32 in1[10]) +{ + { const u32 x17 = in1[9]; + { const u32 x18 = in1[8]; + { const u32 x16 = in1[7]; + { const u32 x14 = in1[6]; + { const u32 x12 = in1[5]; + { const u32 x10 = in1[4]; + { const u32 x8 = in1[3]; + { const u32 x6 = in1[2]; + { const u32 x4 = in1[1]; + { const u32 x2 = in1[0]; + { u32 x20; u8/*bool*/ x21 = subborrow_u26(0x0, x2, 0x3ffffed, &x20); + { u32 x23; u8/*bool*/ x24 = subborrow_u25(x21, x4, 0x1ffffff, &x23); + { u32 x26; u8/*bool*/ x27 = subborrow_u26(x24, x6, 0x3ffffff, &x26); + { u32 x29; u8/*bool*/ x30 = subborrow_u25(x27, x8, 0x1ffffff, &x29); + { u32 x32; u8/*bool*/ x33 = subborrow_u26(x30, x10, 0x3ffffff, &x32); + { u32 x35; u8/*bool*/ x36 = subborrow_u25(x33, x12, 0x1ffffff, &x35); + { u32 x38; u8/*bool*/ x39 = subborrow_u26(x36, x14, 0x3ffffff, &x38); + { u32 x41; u8/*bool*/ x42 = subborrow_u25(x39, x16, 0x1ffffff, &x41); + { u32 x44; u8/*bool*/ x45 = subborrow_u26(x42, x18, 0x3ffffff, &x44); + { u32 x47; u8/*bool*/ x48 = subborrow_u25(x45, x17, 0x1ffffff, &x47); + { u32 x49 = cmovznz32(x48, 0x0, 0xffffffff); + { u32 x50 = (x49 & 0x3ffffed); + { u32 x52; u8/*bool*/ x53 = addcarryx_u26(0x0, x20, x50, &x52); + { u32 x54 = (x49 & 0x1ffffff); + { u32 x56; u8/*bool*/ x57 = addcarryx_u25(x53, x23, x54, &x56); + { u32 x58 = (x49 & 0x3ffffff); + { u32 x60; u8/*bool*/ x61 = addcarryx_u26(x57, x26, x58, &x60); + { u32 x62 = (x49 & 0x1ffffff); + { u32 x64; u8/*bool*/ x65 = addcarryx_u25(x61, x29, x62, &x64); + { u32 x66 = (x49 & 0x3ffffff); + { u32 x68; u8/*bool*/ x69 = addcarryx_u26(x65, x32, x66, &x68); + { u32 x70 = (x49 & 0x1ffffff); + { u32 x72; u8/*bool*/ x73 = addcarryx_u25(x69, x35, x70, &x72); + { u32 x74 = (x49 & 0x3ffffff); + { u32 x76; u8/*bool*/ x77 = addcarryx_u26(x73, x38, x74, &x76); + { u32 x78 = (x49 & 0x1ffffff); + { u32 x80; u8/*bool*/ x81 = addcarryx_u25(x77, x41, x78, &x80); + { u32 x82 = (x49 & 0x3ffffff); + { u32 x84; u8/*bool*/ x85 = addcarryx_u26(x81, x44, x82, &x84); + { u32 x86 = (x49 & 0x1ffffff); + { u32 x88; addcarryx_u25(x85, x47, x86, &x88); + out[0] = x52; + out[1] = x56; + out[2] = x60; + out[3] = x64; + out[4] = x68; + out[5] = x72; + out[6] = x76; + out[7] = x80; + out[8] = x84; + out[9] = x88; + }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} +} + +static __always_inline void fe_tobytes(u8 s[32], const fe *f) +{ + u32 h[10]; + fe_freeze(h, f->v); + s[0] = h[0] >> 0; + s[1] = h[0] >> 8; + s[2] = h[0] >> 16; + s[3] = (h[0] >> 24) | (h[1] << 2); + s[4] = h[1] >> 6; + s[5] = h[1] >> 14; + s[6] = (h[1] >> 22) | (h[2] << 3); + s[7] = h[2] >> 5; + s[8] = h[2] >> 13; + s[9] = (h[2] >> 21) | (h[3] << 5); + s[10] = h[3] >> 3; + s[11] = h[3] >> 11; + s[12] = (h[3] >> 19) | (h[4] << 6); + s[13] = h[4] >> 2; + s[14] = h[4] >> 10; + s[15] = h[4] >> 18; + s[16] = h[5] >> 0; + s[17] = h[5] >> 8; + s[18] = h[5] >> 16; + s[19] = (h[5] >> 24) | (h[6] << 1); + s[20] = h[6] >> 7; + s[21] = h[6] >> 15; + s[22] = (h[6] >> 23) | (h[7] << 3); + s[23] = h[7] >> 5; + s[24] = h[7] >> 13; + s[25] = (h[7] >> 21) | (h[8] << 4); + s[26] = h[8] >> 4; + s[27] = h[8] >> 12; + s[28] = (h[8] >> 20) | (h[9] << 6); + s[29] = h[9] >> 2; + s[30] = h[9] >> 10; + s[31] = h[9] >> 18; +} + +/* h = f */ +static __always_inline void fe_copy(fe *h, const fe *f) +{ + memmove(h, f, sizeof(u32) * 10); +} + +static __always_inline void fe_copy_lt(fe_loose *h, const fe *f) +{ + memmove(h, f, sizeof(u32) * 10); +} + +/* h = 0 */ +static __always_inline void fe_0(fe *h) +{ + memset(h, 0, sizeof(u32) * 10); +} + +/* h = 1 */ +static __always_inline void fe_1(fe *h) +{ + memset(h, 0, sizeof(u32) * 10); + h->v[0] = 1; +} + +static void fe_add_impl(u32 out[10], const u32 in1[10], const u32 in2[10]) +{ + { const u32 x20 = in1[9]; + { const u32 x21 = in1[8]; + { const u32 x19 = in1[7]; + { const u32 x17 = in1[6]; + { const u32 x15 = in1[5]; + { const u32 x13 = in1[4]; + { const u32 x11 = in1[3]; + { const u32 x9 = in1[2]; + { const u32 x7 = in1[1]; + { const u32 x5 = in1[0]; + { const u32 x38 = in2[9]; + { const u32 x39 = in2[8]; + { const u32 x37 = in2[7]; + { const u32 x35 = in2[6]; + { const u32 x33 = in2[5]; + { const u32 x31 = in2[4]; + { const u32 x29 = in2[3]; + { const u32 x27 = in2[2]; + { const u32 x25 = in2[1]; + { const u32 x23 = in2[0]; + out[0] = (x5 + x23); + out[1] = (x7 + x25); + out[2] = (x9 + x27); + out[3] = (x11 + x29); + out[4] = (x13 + x31); + out[5] = (x15 + x33); + out[6] = (x17 + x35); + out[7] = (x19 + x37); + out[8] = (x21 + x39); + out[9] = (x20 + x38); + }}}}}}}}}}}}}}}}}}}} +} + +/* h = f + g + * Can overlap h with f or g. + */ +static __always_inline void fe_add(fe_loose *h, const fe *f, const fe *g) +{ + fe_add_impl(h->v, f->v, g->v); +} + +static void fe_sub_impl(u32 out[10], const u32 in1[10], const u32 in2[10]) +{ + { const u32 x20 = in1[9]; + { const u32 x21 = in1[8]; + { const u32 x19 = in1[7]; + { const u32 x17 = in1[6]; + { const u32 x15 = in1[5]; + { const u32 x13 = in1[4]; + { const u32 x11 = in1[3]; + { const u32 x9 = in1[2]; + { const u32 x7 = in1[1]; + { const u32 x5 = in1[0]; + { const u32 x38 = in2[9]; + { const u32 x39 = in2[8]; + { const u32 x37 = in2[7]; + { const u32 x35 = in2[6]; + { const u32 x33 = in2[5]; + { const u32 x31 = in2[4]; + { const u32 x29 = in2[3]; + { const u32 x27 = in2[2]; + { const u32 x25 = in2[1]; + { const u32 x23 = in2[0]; + out[0] = ((0x7ffffda + x5) - x23); + out[1] = ((0x3fffffe + x7) - x25); + out[2] = ((0x7fffffe + x9) - x27); + out[3] = ((0x3fffffe + x11) - x29); + out[4] = ((0x7fffffe + x13) - x31); + out[5] = ((0x3fffffe + x15) - x33); + out[6] = ((0x7fffffe + x17) - x35); + out[7] = ((0x3fffffe + x19) - x37); + out[8] = ((0x7fffffe + x21) - x39); + out[9] = ((0x3fffffe + x20) - x38); + }}}}}}}}}}}}}}}}}}}} +} + +/* h = f - g + * Can overlap h with f or g. + */ +static __always_inline void fe_sub(fe_loose *h, const fe *f, const fe *g) +{ + fe_sub_impl(h->v, f->v, g->v); +} + +static void fe_mul_impl(u32 out[10], const u32 in1[10], const u32 in2[10]) +{ + { const u32 x20 = in1[9]; + { const u32 x21 = in1[8]; + { const u32 x19 = in1[7]; + { const u32 x17 = in1[6]; + { const u32 x15 = in1[5]; + { const u32 x13 = in1[4]; + { const u32 x11 = in1[3]; + { const u32 x9 = in1[2]; + { const u32 x7 = in1[1]; + { const u32 x5 = in1[0]; + { const u32 x38 = in2[9]; + { const u32 x39 = in2[8]; + { const u32 x37 = in2[7]; + { const u32 x35 = in2[6]; + { const u32 x33 = in2[5]; + { const u32 x31 = in2[4]; + { const u32 x29 = in2[3]; + { const u32 x27 = in2[2]; + { const u32 x25 = in2[1]; + { const u32 x23 = in2[0]; + { u64 x40 = ((u64)x23 * x5); + { u64 x41 = (((u64)x23 * x7) + ((u64)x25 * x5)); + { u64 x42 = ((((u64)(0x2 * x25) * x7) + ((u64)x23 * x9)) + ((u64)x27 * x5)); + { u64 x43 = (((((u64)x25 * x9) + ((u64)x27 * x7)) + ((u64)x23 * x11)) + ((u64)x29 * x5)); + { u64 x44 = (((((u64)x27 * x9) + (0x2 * (((u64)x25 * x11) + ((u64)x29 * x7)))) + ((u64)x23 * x13)) + ((u64)x31 * x5)); + { u64 x45 = (((((((u64)x27 * x11) + ((u64)x29 * x9)) + ((u64)x25 * x13)) + ((u64)x31 * x7)) + ((u64)x23 * x15)) + ((u64)x33 * x5)); + { u64 x46 = (((((0x2 * ((((u64)x29 * x11) + ((u64)x25 * x15)) + ((u64)x33 * x7))) + ((u64)x27 * x13)) + ((u64)x31 * x9)) + ((u64)x23 * x17)) + ((u64)x35 * x5)); + { u64 x47 = (((((((((u64)x29 * x13) + ((u64)x31 * x11)) + ((u64)x27 * x15)) + ((u64)x33 * x9)) + ((u64)x25 * x17)) + ((u64)x35 * x7)) + ((u64)x23 * x19)) + ((u64)x37 * x5)); + { u64 x48 = (((((((u64)x31 * x13) + (0x2 * (((((u64)x29 * x15) + ((u64)x33 * x11)) + ((u64)x25 * x19)) + ((u64)x37 * x7)))) + ((u64)x27 * x17)) + ((u64)x35 * x9)) + ((u64)x23 * x21)) + ((u64)x39 * x5)); + { u64 x49 = (((((((((((u64)x31 * x15) + ((u64)x33 * x13)) + ((u64)x29 * x17)) + ((u64)x35 * x11)) + ((u64)x27 * x19)) + ((u64)x37 * x9)) + ((u64)x25 * x21)) + ((u64)x39 * x7)) + ((u64)x23 * x20)) + ((u64)x38 * x5)); + { u64 x50 = (((((0x2 * ((((((u64)x33 * x15) + ((u64)x29 * x19)) + ((u64)x37 * x11)) + ((u64)x25 * x20)) + ((u64)x38 * x7))) + ((u64)x31 * x17)) + ((u64)x35 * x13)) + ((u64)x27 * x21)) + ((u64)x39 * x9)); + { u64 x51 = (((((((((u64)x33 * x17) + ((u64)x35 * x15)) + ((u64)x31 * x19)) + ((u64)x37 * x13)) + ((u64)x29 * x21)) + ((u64)x39 * x11)) + ((u64)x27 * x20)) + ((u64)x38 * x9)); + { u64 x52 = (((((u64)x35 * x17) + (0x2 * (((((u64)x33 * x19) + ((u64)x37 * x15)) + ((u64)x29 * x20)) + ((u64)x38 * x11)))) + ((u64)x31 * x21)) + ((u64)x39 * x13)); + { u64 x53 = (((((((u64)x35 * x19) + ((u64)x37 * x17)) + ((u64)x33 * x21)) + ((u64)x39 * x15)) + ((u64)x31 * x20)) + ((u64)x38 * x13)); + { u64 x54 = (((0x2 * ((((u64)x37 * x19) + ((u64)x33 * x20)) + ((u64)x38 * x15))) + ((u64)x35 * x21)) + ((u64)x39 * x17)); + { u64 x55 = (((((u64)x37 * x21) + ((u64)x39 * x19)) + ((u64)x35 * x20)) + ((u64)x38 * x17)); + { u64 x56 = (((u64)x39 * x21) + (0x2 * (((u64)x37 * x20) + ((u64)x38 * x19)))); + { u64 x57 = (((u64)x39 * x20) + ((u64)x38 * x21)); + { u64 x58 = ((u64)(0x2 * x38) * x20); + { u64 x59 = (x48 + (x58 << 0x4)); + { u64 x60 = (x59 + (x58 << 0x1)); + { u64 x61 = (x60 + x58); + { u64 x62 = (x47 + (x57 << 0x4)); + { u64 x63 = (x62 + (x57 << 0x1)); + { u64 x64 = (x63 + x57); + { u64 x65 = (x46 + (x56 << 0x4)); + { u64 x66 = (x65 + (x56 << 0x1)); + { u64 x67 = (x66 + x56); + { u64 x68 = (x45 + (x55 << 0x4)); + { u64 x69 = (x68 + (x55 << 0x1)); + { u64 x70 = (x69 + x55); + { u64 x71 = (x44 + (x54 << 0x4)); + { u64 x72 = (x71 + (x54 << 0x1)); + { u64 x73 = (x72 + x54); + { u64 x74 = (x43 + (x53 << 0x4)); + { u64 x75 = (x74 + (x53 << 0x1)); + { u64 x76 = (x75 + x53); + { u64 x77 = (x42 + (x52 << 0x4)); + { u64 x78 = (x77 + (x52 << 0x1)); + { u64 x79 = (x78 + x52); + { u64 x80 = (x41 + (x51 << 0x4)); + { u64 x81 = (x80 + (x51 << 0x1)); + { u64 x82 = (x81 + x51); + { u64 x83 = (x40 + (x50 << 0x4)); + { u64 x84 = (x83 + (x50 << 0x1)); + { u64 x85 = (x84 + x50); + { u64 x86 = (x85 >> 0x1a); + { u32 x87 = ((u32)x85 & 0x3ffffff); + { u64 x88 = (x86 + x82); + { u64 x89 = (x88 >> 0x19); + { u32 x90 = ((u32)x88 & 0x1ffffff); + { u64 x91 = (x89 + x79); + { u64 x92 = (x91 >> 0x1a); + { u32 x93 = ((u32)x91 & 0x3ffffff); + { u64 x94 = (x92 + x76); + { u64 x95 = (x94 >> 0x19); + { u32 x96 = ((u32)x94 & 0x1ffffff); + { u64 x97 = (x95 + x73); + { u64 x98 = (x97 >> 0x1a); + { u32 x99 = ((u32)x97 & 0x3ffffff); + { u64 x100 = (x98 + x70); + { u64 x101 = (x100 >> 0x19); + { u32 x102 = ((u32)x100 & 0x1ffffff); + { u64 x103 = (x101 + x67); + { u64 x104 = (x103 >> 0x1a); + { u32 x105 = ((u32)x103 & 0x3ffffff); + { u64 x106 = (x104 + x64); + { u64 x107 = (x106 >> 0x19); + { u32 x108 = ((u32)x106 & 0x1ffffff); + { u64 x109 = (x107 + x61); + { u64 x110 = (x109 >> 0x1a); + { u32 x111 = ((u32)x109 & 0x3ffffff); + { u64 x112 = (x110 + x49); + { u64 x113 = (x112 >> 0x19); + { u32 x114 = ((u32)x112 & 0x1ffffff); + { u64 x115 = (x87 + (0x13 * x113)); + { u32 x116 = (u32) (x115 >> 0x1a); + { u32 x117 = ((u32)x115 & 0x3ffffff); + { u32 x118 = (x116 + x90); + { u32 x119 = (x118 >> 0x19); + { u32 x120 = (x118 & 0x1ffffff); + out[0] = x117; + out[1] = x120; + out[2] = (x119 + x93); + out[3] = x96; + out[4] = x99; + out[5] = x102; + out[6] = x105; + out[7] = x108; + out[8] = x111; + out[9] = x114; + }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} +} + +static __always_inline void fe_mul_ttt(fe *h, const fe *f, const fe *g) +{ + fe_mul_impl(h->v, f->v, g->v); +} + +static __always_inline void fe_mul_tlt(fe *h, const fe_loose *f, const fe *g) +{ + fe_mul_impl(h->v, f->v, g->v); +} + +static __always_inline void +fe_mul_tll(fe *h, const fe_loose *f, const fe_loose *g) +{ + fe_mul_impl(h->v, f->v, g->v); +} + +static void fe_sqr_impl(u32 out[10], const u32 in1[10]) +{ + { const u32 x17 = in1[9]; + { const u32 x18 = in1[8]; + { const u32 x16 = in1[7]; + { const u32 x14 = in1[6]; + { const u32 x12 = in1[5]; + { const u32 x10 = in1[4]; + { const u32 x8 = in1[3]; + { const u32 x6 = in1[2]; + { const u32 x4 = in1[1]; + { const u32 x2 = in1[0]; + { u64 x19 = ((u64)x2 * x2); + { u64 x20 = ((u64)(0x2 * x2) * x4); + { u64 x21 = (0x2 * (((u64)x4 * x4) + ((u64)x2 * x6))); + { u64 x22 = (0x2 * (((u64)x4 * x6) + ((u64)x2 * x8))); + { u64 x23 = ((((u64)x6 * x6) + ((u64)(0x4 * x4) * x8)) + ((u64)(0x2 * x2) * x10)); + { u64 x24 = (0x2 * ((((u64)x6 * x8) + ((u64)x4 * x10)) + ((u64)x2 * x12))); + { u64 x25 = (0x2 * (((((u64)x8 * x8) + ((u64)x6 * x10)) + ((u64)x2 * x14)) + ((u64)(0x2 * x4) * x12))); + { u64 x26 = (0x2 * (((((u64)x8 * x10) + ((u64)x6 * x12)) + ((u64)x4 * x14)) + ((u64)x2 * x16))); + { u64 x27 = (((u64)x10 * x10) + (0x2 * ((((u64)x6 * x14) + ((u64)x2 * x18)) + (0x2 * (((u64)x4 * x16) + ((u64)x8 * x12)))))); + { u64 x28 = (0x2 * ((((((u64)x10 * x12) + ((u64)x8 * x14)) + ((u64)x6 * x16)) + ((u64)x4 * x18)) + ((u64)x2 * x17))); + { u64 x29 = (0x2 * (((((u64)x12 * x12) + ((u64)x10 * x14)) + ((u64)x6 * x18)) + (0x2 * (((u64)x8 * x16) + ((u64)x4 * x17))))); + { u64 x30 = (0x2 * (((((u64)x12 * x14) + ((u64)x10 * x16)) + ((u64)x8 * x18)) + ((u64)x6 * x17))); + { u64 x31 = (((u64)x14 * x14) + (0x2 * (((u64)x10 * x18) + (0x2 * (((u64)x12 * x16) + ((u64)x8 * x17)))))); + { u64 x32 = (0x2 * ((((u64)x14 * x16) + ((u64)x12 * x18)) + ((u64)x10 * x17))); + { u64 x33 = (0x2 * ((((u64)x16 * x16) + ((u64)x14 * x18)) + ((u64)(0x2 * x12) * x17))); + { u64 x34 = (0x2 * (((u64)x16 * x18) + ((u64)x14 * x17))); + { u64 x35 = (((u64)x18 * x18) + ((u64)(0x4 * x16) * x17)); + { u64 x36 = ((u64)(0x2 * x18) * x17); + { u64 x37 = ((u64)(0x2 * x17) * x17); + { u64 x38 = (x27 + (x37 << 0x4)); + { u64 x39 = (x38 + (x37 << 0x1)); + { u64 x40 = (x39 + x37); + { u64 x41 = (x26 + (x36 << 0x4)); + { u64 x42 = (x41 + (x36 << 0x1)); + { u64 x43 = (x42 + x36); + { u64 x44 = (x25 + (x35 << 0x4)); + { u64 x45 = (x44 + (x35 << 0x1)); + { u64 x46 = (x45 + x35); + { u64 x47 = (x24 + (x34 << 0x4)); + { u64 x48 = (x47 + (x34 << 0x1)); + { u64 x49 = (x48 + x34); + { u64 x50 = (x23 + (x33 << 0x4)); + { u64 x51 = (x50 + (x33 << 0x1)); + { u64 x52 = (x51 + x33); + { u64 x53 = (x22 + (x32 << 0x4)); + { u64 x54 = (x53 + (x32 << 0x1)); + { u64 x55 = (x54 + x32); + { u64 x56 = (x21 + (x31 << 0x4)); + { u64 x57 = (x56 + (x31 << 0x1)); + { u64 x58 = (x57 + x31); + { u64 x59 = (x20 + (x30 << 0x4)); + { u64 x60 = (x59 + (x30 << 0x1)); + { u64 x61 = (x60 + x30); + { u64 x62 = (x19 + (x29 << 0x4)); + { u64 x63 = (x62 + (x29 << 0x1)); + { u64 x64 = (x63 + x29); + { u64 x65 = (x64 >> 0x1a); + { u32 x66 = ((u32)x64 & 0x3ffffff); + { u64 x67 = (x65 + x61); + { u64 x68 = (x67 >> 0x19); + { u32 x69 = ((u32)x67 & 0x1ffffff); + { u64 x70 = (x68 + x58); + { u64 x71 = (x70 >> 0x1a); + { u32 x72 = ((u32)x70 & 0x3ffffff); + { u64 x73 = (x71 + x55); + { u64 x74 = (x73 >> 0x19); + { u32 x75 = ((u32)x73 & 0x1ffffff); + { u64 x76 = (x74 + x52); + { u64 x77 = (x76 >> 0x1a); + { u32 x78 = ((u32)x76 & 0x3ffffff); + { u64 x79 = (x77 + x49); + { u64 x80 = (x79 >> 0x19); + { u32 x81 = ((u32)x79 & 0x1ffffff); + { u64 x82 = (x80 + x46); + { u64 x83 = (x82 >> 0x1a); + { u32 x84 = ((u32)x82 & 0x3ffffff); + { u64 x85 = (x83 + x43); + { u64 x86 = (x85 >> 0x19); + { u32 x87 = ((u32)x85 & 0x1ffffff); + { u64 x88 = (x86 + x40); + { u64 x89 = (x88 >> 0x1a); + { u32 x90 = ((u32)x88 & 0x3ffffff); + { u64 x91 = (x89 + x28); + { u64 x92 = (x91 >> 0x19); + { u32 x93 = ((u32)x91 & 0x1ffffff); + { u64 x94 = (x66 + (0x13 * x92)); + { u32 x95 = (u32) (x94 >> 0x1a); + { u32 x96 = ((u32)x94 & 0x3ffffff); + { u32 x97 = (x95 + x69); + { u32 x98 = (x97 >> 0x19); + { u32 x99 = (x97 & 0x1ffffff); + out[0] = x96; + out[1] = x99; + out[2] = (x98 + x72); + out[3] = x75; + out[4] = x78; + out[5] = x81; + out[6] = x84; + out[7] = x87; + out[8] = x90; + out[9] = x93; + }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} +} + +static __always_inline void fe_sq_tl(fe *h, const fe_loose *f) +{ + fe_sqr_impl(h->v, f->v); +} + +static __always_inline void fe_sq_tt(fe *h, const fe *f) +{ + fe_sqr_impl(h->v, f->v); +} + +static __always_inline void fe_loose_invert(fe *out, const fe_loose *z) +{ + fe t0; + fe t1; + fe t2; + fe t3; + int i; + + fe_sq_tl(&t0, z); + fe_sq_tt(&t1, &t0); + for (i = 1; i < 2; ++i) + fe_sq_tt(&t1, &t1); + fe_mul_tlt(&t1, z, &t1); + fe_mul_ttt(&t0, &t0, &t1); + fe_sq_tt(&t2, &t0); + fe_mul_ttt(&t1, &t1, &t2); + fe_sq_tt(&t2, &t1); + for (i = 1; i < 5; ++i) + fe_sq_tt(&t2, &t2); + fe_mul_ttt(&t1, &t2, &t1); + fe_sq_tt(&t2, &t1); + for (i = 1; i < 10; ++i) + fe_sq_tt(&t2, &t2); + fe_mul_ttt(&t2, &t2, &t1); + fe_sq_tt(&t3, &t2); + for (i = 1; i < 20; ++i) + fe_sq_tt(&t3, &t3); + fe_mul_ttt(&t2, &t3, &t2); + fe_sq_tt(&t2, &t2); + for (i = 1; i < 10; ++i) + fe_sq_tt(&t2, &t2); + fe_mul_ttt(&t1, &t2, &t1); + fe_sq_tt(&t2, &t1); + for (i = 1; i < 50; ++i) + fe_sq_tt(&t2, &t2); + fe_mul_ttt(&t2, &t2, &t1); + fe_sq_tt(&t3, &t2); + for (i = 1; i < 100; ++i) + fe_sq_tt(&t3, &t3); + fe_mul_ttt(&t2, &t3, &t2); + fe_sq_tt(&t2, &t2); + for (i = 1; i < 50; ++i) + fe_sq_tt(&t2, &t2); + fe_mul_ttt(&t1, &t2, &t1); + fe_sq_tt(&t1, &t1); + for (i = 1; i < 5; ++i) + fe_sq_tt(&t1, &t1); + fe_mul_ttt(out, &t1, &t0); +} + +static __always_inline void fe_invert(fe *out, const fe *z) +{ + fe_loose l; + fe_copy_lt(&l, z); + fe_loose_invert(out, &l); +} + +/* Replace (f,g) with (g,f) if b == 1; + * replace (f,g) with (f,g) if b == 0. + * + * Preconditions: b in {0,1} + */ +static __always_inline void fe_cswap(fe *f, fe *g, unsigned int b) +{ + unsigned i; + b = 0 - b; + for (i = 0; i < 10; i++) { + u32 x = f->v[i] ^ g->v[i]; + x &= b; + f->v[i] ^= x; + g->v[i] ^= x; + } +} + +/* NOTE: based on fiat-crypto fe_mul, edited for in2=121666, 0, 0.*/ +static __always_inline void fe_mul_121666_impl(u32 out[10], const u32 in1[10]) +{ + { const u32 x20 = in1[9]; + { const u32 x21 = in1[8]; + { const u32 x19 = in1[7]; + { const u32 x17 = in1[6]; + { const u32 x15 = in1[5]; + { const u32 x13 = in1[4]; + { const u32 x11 = in1[3]; + { const u32 x9 = in1[2]; + { const u32 x7 = in1[1]; + { const u32 x5 = in1[0]; + { const u32 x38 = 0; + { const u32 x39 = 0; + { const u32 x37 = 0; + { const u32 x35 = 0; + { const u32 x33 = 0; + { const u32 x31 = 0; + { const u32 x29 = 0; + { const u32 x27 = 0; + { const u32 x25 = 0; + { const u32 x23 = 121666; + { u64 x40 = ((u64)x23 * x5); + { u64 x41 = (((u64)x23 * x7) + ((u64)x25 * x5)); + { u64 x42 = ((((u64)(0x2 * x25) * x7) + ((u64)x23 * x9)) + ((u64)x27 * x5)); + { u64 x43 = (((((u64)x25 * x9) + ((u64)x27 * x7)) + ((u64)x23 * x11)) + ((u64)x29 * x5)); + { u64 x44 = (((((u64)x27 * x9) + (0x2 * (((u64)x25 * x11) + ((u64)x29 * x7)))) + ((u64)x23 * x13)) + ((u64)x31 * x5)); + { u64 x45 = (((((((u64)x27 * x11) + ((u64)x29 * x9)) + ((u64)x25 * x13)) + ((u64)x31 * x7)) + ((u64)x23 * x15)) + ((u64)x33 * x5)); + { u64 x46 = (((((0x2 * ((((u64)x29 * x11) + ((u64)x25 * x15)) + ((u64)x33 * x7))) + ((u64)x27 * x13)) + ((u64)x31 * x9)) + ((u64)x23 * x17)) + ((u64)x35 * x5)); + { u64 x47 = (((((((((u64)x29 * x13) + ((u64)x31 * x11)) + ((u64)x27 * x15)) + ((u64)x33 * x9)) + ((u64)x25 * x17)) + ((u64)x35 * x7)) + ((u64)x23 * x19)) + ((u64)x37 * x5)); + { u64 x48 = (((((((u64)x31 * x13) + (0x2 * (((((u64)x29 * x15) + ((u64)x33 * x11)) + ((u64)x25 * x19)) + ((u64)x37 * x7)))) + ((u64)x27 * x17)) + ((u64)x35 * x9)) + ((u64)x23 * x21)) + ((u64)x39 * x5)); + { u64 x49 = (((((((((((u64)x31 * x15) + ((u64)x33 * x13)) + ((u64)x29 * x17)) + ((u64)x35 * x11)) + ((u64)x27 * x19)) + ((u64)x37 * x9)) + ((u64)x25 * x21)) + ((u64)x39 * x7)) + ((u64)x23 * x20)) + ((u64)x38 * x5)); + { u64 x50 = (((((0x2 * ((((((u64)x33 * x15) + ((u64)x29 * x19)) + ((u64)x37 * x11)) + ((u64)x25 * x20)) + ((u64)x38 * x7))) + ((u64)x31 * x17)) + ((u64)x35 * x13)) + ((u64)x27 * x21)) + ((u64)x39 * x9)); + { u64 x51 = (((((((((u64)x33 * x17) + ((u64)x35 * x15)) + ((u64)x31 * x19)) + ((u64)x37 * x13)) + ((u64)x29 * x21)) + ((u64)x39 * x11)) + ((u64)x27 * x20)) + ((u64)x38 * x9)); + { u64 x52 = (((((u64)x35 * x17) + (0x2 * (((((u64)x33 * x19) + ((u64)x37 * x15)) + ((u64)x29 * x20)) + ((u64)x38 * x11)))) + ((u64)x31 * x21)) + ((u64)x39 * x13)); + { u64 x53 = (((((((u64)x35 * x19) + ((u64)x37 * x17)) + ((u64)x33 * x21)) + ((u64)x39 * x15)) + ((u64)x31 * x20)) + ((u64)x38 * x13)); + { u64 x54 = (((0x2 * ((((u64)x37 * x19) + ((u64)x33 * x20)) + ((u64)x38 * x15))) + ((u64)x35 * x21)) + ((u64)x39 * x17)); + { u64 x55 = (((((u64)x37 * x21) + ((u64)x39 * x19)) + ((u64)x35 * x20)) + ((u64)x38 * x17)); + { u64 x56 = (((u64)x39 * x21) + (0x2 * (((u64)x37 * x20) + ((u64)x38 * x19)))); + { u64 x57 = (((u64)x39 * x20) + ((u64)x38 * x21)); + { u64 x58 = ((u64)(0x2 * x38) * x20); + { u64 x59 = (x48 + (x58 << 0x4)); + { u64 x60 = (x59 + (x58 << 0x1)); + { u64 x61 = (x60 + x58); + { u64 x62 = (x47 + (x57 << 0x4)); + { u64 x63 = (x62 + (x57 << 0x1)); + { u64 x64 = (x63 + x57); + { u64 x65 = (x46 + (x56 << 0x4)); + { u64 x66 = (x65 + (x56 << 0x1)); + { u64 x67 = (x66 + x56); + { u64 x68 = (x45 + (x55 << 0x4)); + { u64 x69 = (x68 + (x55 << 0x1)); + { u64 x70 = (x69 + x55); + { u64 x71 = (x44 + (x54 << 0x4)); + { u64 x72 = (x71 + (x54 << 0x1)); + { u64 x73 = (x72 + x54); + { u64 x74 = (x43 + (x53 << 0x4)); + { u64 x75 = (x74 + (x53 << 0x1)); + { u64 x76 = (x75 + x53); + { u64 x77 = (x42 + (x52 << 0x4)); + { u64 x78 = (x77 + (x52 << 0x1)); + { u64 x79 = (x78 + x52); + { u64 x80 = (x41 + (x51 << 0x4)); + { u64 x81 = (x80 + (x51 << 0x1)); + { u64 x82 = (x81 + x51); + { u64 x83 = (x40 + (x50 << 0x4)); + { u64 x84 = (x83 + (x50 << 0x1)); + { u64 x85 = (x84 + x50); + { u64 x86 = (x85 >> 0x1a); + { u32 x87 = ((u32)x85 & 0x3ffffff); + { u64 x88 = (x86 + x82); + { u64 x89 = (x88 >> 0x19); + { u32 x90 = ((u32)x88 & 0x1ffffff); + { u64 x91 = (x89 + x79); + { u64 x92 = (x91 >> 0x1a); + { u32 x93 = ((u32)x91 & 0x3ffffff); + { u64 x94 = (x92 + x76); + { u64 x95 = (x94 >> 0x19); + { u32 x96 = ((u32)x94 & 0x1ffffff); + { u64 x97 = (x95 + x73); + { u64 x98 = (x97 >> 0x1a); + { u32 x99 = ((u32)x97 & 0x3ffffff); + { u64 x100 = (x98 + x70); + { u64 x101 = (x100 >> 0x19); + { u32 x102 = ((u32)x100 & 0x1ffffff); + { u64 x103 = (x101 + x67); + { u64 x104 = (x103 >> 0x1a); + { u32 x105 = ((u32)x103 & 0x3ffffff); + { u64 x106 = (x104 + x64); + { u64 x107 = (x106 >> 0x19); + { u32 x108 = ((u32)x106 & 0x1ffffff); + { u64 x109 = (x107 + x61); + { u64 x110 = (x109 >> 0x1a); + { u32 x111 = ((u32)x109 & 0x3ffffff); + { u64 x112 = (x110 + x49); + { u64 x113 = (x112 >> 0x19); + { u32 x114 = ((u32)x112 & 0x1ffffff); + { u64 x115 = (x87 + (0x13 * x113)); + { u32 x116 = (u32) (x115 >> 0x1a); + { u32 x117 = ((u32)x115 & 0x3ffffff); + { u32 x118 = (x116 + x90); + { u32 x119 = (x118 >> 0x19); + { u32 x120 = (x118 & 0x1ffffff); + out[0] = x117; + out[1] = x120; + out[2] = (x119 + x93); + out[3] = x96; + out[4] = x99; + out[5] = x102; + out[6] = x105; + out[7] = x108; + out[8] = x111; + out[9] = x114; + }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} +} + +static __always_inline void fe_mul121666(fe *h, const fe_loose *f) +{ + fe_mul_121666_impl(h->v, f->v); +} + +static void curve25519_generic(u8 out[CURVE25519_KEY_SIZE], + const u8 scalar[CURVE25519_KEY_SIZE], + const u8 point[CURVE25519_KEY_SIZE]) +{ + fe x1, x2, z2, x3, z3; + fe_loose x2l, z2l, x3l; + unsigned swap = 0; + int pos; + u8 e[32]; + + memcpy(e, scalar, 32); + curve25519_clamp_secret(e); + + /* The following implementation was transcribed to Coq and proven to + * correspond to unary scalar multiplication in affine coordinates given + * that x1 != 0 is the x coordinate of some point on the curve. It was + * also checked in Coq that doing a ladderstep with x1 = x3 = 0 gives + * z2' = z3' = 0, and z2 = z3 = 0 gives z2' = z3' = 0. The statement was + * quantified over the underlying field, so it applies to Curve25519 + * itself and the quadratic twist of Curve25519. It was not proven in + * Coq that prime-field arithmetic correctly simulates extension-field + * arithmetic on prime-field values. The decoding of the byte array + * representation of e was not considered. + * + * Specification of Montgomery curves in affine coordinates: + * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Spec/MontgomeryCurve.v#L27> + * + * Proof that these form a group that is isomorphic to a Weierstrass + * curve: + * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/AffineProofs.v#L35> + * + * Coq transcription and correctness proof of the loop + * (where scalarbits=255): + * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZ.v#L118> + * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L278> + * preconditions: 0 <= e < 2^255 (not necessarily e < order), + * fe_invert(0) = 0 + */ + fe_frombytes(&x1, point); + fe_1(&x2); + fe_0(&z2); + fe_copy(&x3, &x1); + fe_1(&z3); + + for (pos = 254; pos >= 0; --pos) { + fe tmp0, tmp1; + fe_loose tmp0l, tmp1l; + /* loop invariant as of right before the test, for the case + * where x1 != 0: + * pos >= -1; if z2 = 0 then x2 is nonzero; if z3 = 0 then x3 + * is nonzero + * let r := e >> (pos+1) in the following equalities of + * projective points: + * to_xz (r*P) === if swap then (x3, z3) else (x2, z2) + * to_xz ((r+1)*P) === if swap then (x2, z2) else (x3, z3) + * x1 is the nonzero x coordinate of the nonzero + * point (r*P-(r+1)*P) + */ + unsigned b = 1 & (e[pos / 8] >> (pos & 7)); + swap ^= b; + fe_cswap(&x2, &x3, swap); + fe_cswap(&z2, &z3, swap); + swap = b; + /* Coq transcription of ladderstep formula (called from + * transcribed loop): + * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZ.v#L89> + * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L131> + * x1 != 0 <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L217> + * x1 = 0 <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L147> + */ + fe_sub(&tmp0l, &x3, &z3); + fe_sub(&tmp1l, &x2, &z2); + fe_add(&x2l, &x2, &z2); + fe_add(&z2l, &x3, &z3); + fe_mul_tll(&z3, &tmp0l, &x2l); + fe_mul_tll(&z2, &z2l, &tmp1l); + fe_sq_tl(&tmp0, &tmp1l); + fe_sq_tl(&tmp1, &x2l); + fe_add(&x3l, &z3, &z2); + fe_sub(&z2l, &z3, &z2); + fe_mul_ttt(&x2, &tmp1, &tmp0); + fe_sub(&tmp1l, &tmp1, &tmp0); + fe_sq_tl(&z2, &z2l); + fe_mul121666(&z3, &tmp1l); + fe_sq_tl(&x3, &x3l); + fe_add(&tmp0l, &tmp0, &z3); + fe_mul_ttt(&z3, &x1, &z2); + fe_mul_tll(&z2, &tmp1l, &tmp0l); + } + /* here pos=-1, so r=e, so to_xz (e*P) === if swap then (x3, z3) + * else (x2, z2) + */ + fe_cswap(&x2, &x3, swap); + fe_cswap(&z2, &z3, swap); + + fe_invert(&z2, &z2); + fe_mul_ttt(&x2, &x2, &z2); + fe_tobytes(out, &x2); + + memzero_explicit(&x1, sizeof(x1)); + memzero_explicit(&x2, sizeof(x2)); + memzero_explicit(&z2, sizeof(z2)); + memzero_explicit(&x3, sizeof(x3)); + memzero_explicit(&z3, sizeof(z3)); + memzero_explicit(&x2l, sizeof(x2l)); + memzero_explicit(&z2l, sizeof(z2l)); + memzero_explicit(&x3l, sizeof(x3l)); + memzero_explicit(&e, sizeof(e)); +} diff --git a/net/wireguard/crypto/zinc/curve25519/curve25519-hacl64.c b/net/wireguard/crypto/zinc/curve25519/curve25519-hacl64.c new file mode 100644 index 000000000000..d6dcd0ce1892 --- /dev/null +++ b/net/wireguard/crypto/zinc/curve25519/curve25519-hacl64.c @@ -0,0 +1,779 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2016-2017 INRIA and Microsoft Corporation. + * Copyright (C) 2018-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * + * This is a machine-generated formally verified implementation of Curve25519 + * ECDH from: <https://github.com/mitls/hacl-star>. Though originally machine + * generated, it has been tweaked to be suitable for use in the kernel. It is + * optimized for 64-bit machines that can efficiently work with 128-bit + * integer types. + */ + +typedef __uint128_t u128; + +static __always_inline u64 u64_eq_mask(u64 a, u64 b) +{ + u64 x = a ^ b; + u64 minus_x = ~x + (u64)1U; + u64 x_or_minus_x = x | minus_x; + u64 xnx = x_or_minus_x >> (u32)63U; + u64 c = xnx - (u64)1U; + return c; +} + +static __always_inline u64 u64_gte_mask(u64 a, u64 b) +{ + u64 x = a; + u64 y = b; + u64 x_xor_y = x ^ y; + u64 x_sub_y = x - y; + u64 x_sub_y_xor_y = x_sub_y ^ y; + u64 q = x_xor_y | x_sub_y_xor_y; + u64 x_xor_q = x ^ q; + u64 x_xor_q_ = x_xor_q >> (u32)63U; + u64 c = x_xor_q_ - (u64)1U; + return c; +} + +static __always_inline void modulo_carry_top(u64 *b) +{ + u64 b4 = b[4]; + u64 b0 = b[0]; + u64 b4_ = b4 & 0x7ffffffffffffLLU; + u64 b0_ = b0 + 19 * (b4 >> 51); + b[4] = b4_; + b[0] = b0_; +} + +static __always_inline void fproduct_copy_from_wide_(u64 *output, u128 *input) +{ + { + u128 xi = input[0]; + output[0] = ((u64)(xi)); + } + { + u128 xi = input[1]; + output[1] = ((u64)(xi)); + } + { + u128 xi = input[2]; + output[2] = ((u64)(xi)); + } + { + u128 xi = input[3]; + output[3] = ((u64)(xi)); + } + { + u128 xi = input[4]; + output[4] = ((u64)(xi)); + } +} + +static __always_inline void +fproduct_sum_scalar_multiplication_(u128 *output, u64 *input, u64 s) +{ + output[0] += (u128)input[0] * s; + output[1] += (u128)input[1] * s; + output[2] += (u128)input[2] * s; + output[3] += (u128)input[3] * s; + output[4] += (u128)input[4] * s; +} + +static __always_inline void fproduct_carry_wide_(u128 *tmp) +{ + { + u32 ctr = 0; + u128 tctr = tmp[ctr]; + u128 tctrp1 = tmp[ctr + 1]; + u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU; + u128 c = ((tctr) >> (51)); + tmp[ctr] = ((u128)(r0)); + tmp[ctr + 1] = ((tctrp1) + (c)); + } + { + u32 ctr = 1; + u128 tctr = tmp[ctr]; + u128 tctrp1 = tmp[ctr + 1]; + u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU; + u128 c = ((tctr) >> (51)); + tmp[ctr] = ((u128)(r0)); + tmp[ctr + 1] = ((tctrp1) + (c)); + } + + { + u32 ctr = 2; + u128 tctr = tmp[ctr]; + u128 tctrp1 = tmp[ctr + 1]; + u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU; + u128 c = ((tctr) >> (51)); + tmp[ctr] = ((u128)(r0)); + tmp[ctr + 1] = ((tctrp1) + (c)); + } + { + u32 ctr = 3; + u128 tctr = tmp[ctr]; + u128 tctrp1 = tmp[ctr + 1]; + u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU; + u128 c = ((tctr) >> (51)); + tmp[ctr] = ((u128)(r0)); + tmp[ctr + 1] = ((tctrp1) + (c)); + } +} + +static __always_inline void fmul_shift_reduce(u64 *output) +{ + u64 tmp = output[4]; + u64 b0; + { + u32 ctr = 5 - 0 - 1; + u64 z = output[ctr - 1]; + output[ctr] = z; + } + { + u32 ctr = 5 - 1 - 1; + u64 z = output[ctr - 1]; + output[ctr] = z; + } + { + u32 ctr = 5 - 2 - 1; + u64 z = output[ctr - 1]; + output[ctr] = z; + } + { + u32 ctr = 5 - 3 - 1; + u64 z = output[ctr - 1]; + output[ctr] = z; + } + output[0] = tmp; + b0 = output[0]; + output[0] = 19 * b0; +} + +static __always_inline void fmul_mul_shift_reduce_(u128 *output, u64 *input, + u64 *input21) +{ + u32 i; + u64 input2i; + { + u64 input2i = input21[0]; + fproduct_sum_scalar_multiplication_(output, input, input2i); + fmul_shift_reduce(input); + } + { + u64 input2i = input21[1]; + fproduct_sum_scalar_multiplication_(output, input, input2i); + fmul_shift_reduce(input); + } + { + u64 input2i = input21[2]; + fproduct_sum_scalar_multiplication_(output, input, input2i); + fmul_shift_reduce(input); + } + { + u64 input2i = input21[3]; + fproduct_sum_scalar_multiplication_(output, input, input2i); + fmul_shift_reduce(input); + } + i = 4; + input2i = input21[i]; + fproduct_sum_scalar_multiplication_(output, input, input2i); +} + +static __always_inline void fmul_fmul(u64 *output, u64 *input, u64 *input21) +{ + u64 tmp[5] = { input[0], input[1], input[2], input[3], input[4] }; + { + u128 b4; + u128 b0; + u128 b4_; + u128 b0_; + u64 i0; + u64 i1; + u64 i0_; + u64 i1_; + u128 t[5] = { 0 }; + fmul_mul_shift_reduce_(t, tmp, input21); + fproduct_carry_wide_(t); + b4 = t[4]; + b0 = t[0]; + b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU)))); + b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51)))))))); + t[4] = b4_; + t[0] = b0_; + fproduct_copy_from_wide_(output, t); + i0 = output[0]; + i1 = output[1]; + i0_ = i0 & 0x7ffffffffffffLLU; + i1_ = i1 + (i0 >> 51); + output[0] = i0_; + output[1] = i1_; + } +} + +static __always_inline void fsquare_fsquare__(u128 *tmp, u64 *output) +{ + u64 r0 = output[0]; + u64 r1 = output[1]; + u64 r2 = output[2]; + u64 r3 = output[3]; + u64 r4 = output[4]; + u64 d0 = r0 * 2; + u64 d1 = r1 * 2; + u64 d2 = r2 * 2 * 19; + u64 d419 = r4 * 19; + u64 d4 = d419 * 2; + u128 s0 = ((((((u128)(r0) * (r0))) + (((u128)(d4) * (r1))))) + + (((u128)(d2) * (r3)))); + u128 s1 = ((((((u128)(d0) * (r1))) + (((u128)(d4) * (r2))))) + + (((u128)(r3 * 19) * (r3)))); + u128 s2 = ((((((u128)(d0) * (r2))) + (((u128)(r1) * (r1))))) + + (((u128)(d4) * (r3)))); + u128 s3 = ((((((u128)(d0) * (r3))) + (((u128)(d1) * (r2))))) + + (((u128)(r4) * (d419)))); + u128 s4 = ((((((u128)(d0) * (r4))) + (((u128)(d1) * (r3))))) + + (((u128)(r2) * (r2)))); + tmp[0] = s0; + tmp[1] = s1; + tmp[2] = s2; + tmp[3] = s3; + tmp[4] = s4; +} + +static __always_inline void fsquare_fsquare_(u128 *tmp, u64 *output) +{ + u128 b4; + u128 b0; + u128 b4_; + u128 b0_; + u64 i0; + u64 i1; + u64 i0_; + u64 i1_; + fsquare_fsquare__(tmp, output); + fproduct_carry_wide_(tmp); + b4 = tmp[4]; + b0 = tmp[0]; + b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU)))); + b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51)))))))); + tmp[4] = b4_; + tmp[0] = b0_; + fproduct_copy_from_wide_(output, tmp); + i0 = output[0]; + i1 = output[1]; + i0_ = i0 & 0x7ffffffffffffLLU; + i1_ = i1 + (i0 >> 51); + output[0] = i0_; + output[1] = i1_; +} + +static __always_inline void fsquare_fsquare_times_(u64 *output, u128 *tmp, + u32 count1) +{ + u32 i; + fsquare_fsquare_(tmp, output); + for (i = 1; i < count1; ++i) + fsquare_fsquare_(tmp, output); +} + +static __always_inline void fsquare_fsquare_times(u64 *output, u64 *input, + u32 count1) +{ + u128 t[5]; + memcpy(output, input, 5 * sizeof(*input)); + fsquare_fsquare_times_(output, t, count1); +} + +static __always_inline void fsquare_fsquare_times_inplace(u64 *output, + u32 count1) +{ + u128 t[5]; + fsquare_fsquare_times_(output, t, count1); +} + +static __always_inline void crecip_crecip(u64 *out, u64 *z) +{ + u64 buf[20] = { 0 }; + u64 *a0 = buf; + u64 *t00 = buf + 5; + u64 *b0 = buf + 10; + u64 *t01; + u64 *b1; + u64 *c0; + u64 *a; + u64 *t0; + u64 *b; + u64 *c; + fsquare_fsquare_times(a0, z, 1); + fsquare_fsquare_times(t00, a0, 2); + fmul_fmul(b0, t00, z); + fmul_fmul(a0, b0, a0); + fsquare_fsquare_times(t00, a0, 1); + fmul_fmul(b0, t00, b0); + fsquare_fsquare_times(t00, b0, 5); + t01 = buf + 5; + b1 = buf + 10; + c0 = buf + 15; + fmul_fmul(b1, t01, b1); + fsquare_fsquare_times(t01, b1, 10); + fmul_fmul(c0, t01, b1); + fsquare_fsquare_times(t01, c0, 20); + fmul_fmul(t01, t01, c0); + fsquare_fsquare_times_inplace(t01, 10); + fmul_fmul(b1, t01, b1); + fsquare_fsquare_times(t01, b1, 50); + a = buf; + t0 = buf + 5; + b = buf + 10; + c = buf + 15; + fmul_fmul(c, t0, b); + fsquare_fsquare_times(t0, c, 100); + fmul_fmul(t0, t0, c); + fsquare_fsquare_times_inplace(t0, 50); + fmul_fmul(t0, t0, b); + fsquare_fsquare_times_inplace(t0, 5); + fmul_fmul(out, t0, a); +} + +static __always_inline void fsum(u64 *a, u64 *b) +{ + a[0] += b[0]; + a[1] += b[1]; + a[2] += b[2]; + a[3] += b[3]; + a[4] += b[4]; +} + +static __always_inline void fdifference(u64 *a, u64 *b) +{ + u64 tmp[5] = { 0 }; + u64 b0; + u64 b1; + u64 b2; + u64 b3; + u64 b4; + memcpy(tmp, b, 5 * sizeof(*b)); + b0 = tmp[0]; + b1 = tmp[1]; + b2 = tmp[2]; + b3 = tmp[3]; + b4 = tmp[4]; + tmp[0] = b0 + 0x3fffffffffff68LLU; + tmp[1] = b1 + 0x3ffffffffffff8LLU; + tmp[2] = b2 + 0x3ffffffffffff8LLU; + tmp[3] = b3 + 0x3ffffffffffff8LLU; + tmp[4] = b4 + 0x3ffffffffffff8LLU; + { + u64 xi = a[0]; + u64 yi = tmp[0]; + a[0] = yi - xi; + } + { + u64 xi = a[1]; + u64 yi = tmp[1]; + a[1] = yi - xi; + } + { + u64 xi = a[2]; + u64 yi = tmp[2]; + a[2] = yi - xi; + } + { + u64 xi = a[3]; + u64 yi = tmp[3]; + a[3] = yi - xi; + } + { + u64 xi = a[4]; + u64 yi = tmp[4]; + a[4] = yi - xi; + } +} + +static __always_inline void fscalar(u64 *output, u64 *b, u64 s) +{ + u128 tmp[5]; + u128 b4; + u128 b0; + u128 b4_; + u128 b0_; + { + u64 xi = b[0]; + tmp[0] = ((u128)(xi) * (s)); + } + { + u64 xi = b[1]; + tmp[1] = ((u128)(xi) * (s)); + } + { + u64 xi = b[2]; + tmp[2] = ((u128)(xi) * (s)); + } + { + u64 xi = b[3]; + tmp[3] = ((u128)(xi) * (s)); + } + { + u64 xi = b[4]; + tmp[4] = ((u128)(xi) * (s)); + } + fproduct_carry_wide_(tmp); + b4 = tmp[4]; + b0 = tmp[0]; + b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU)))); + b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51)))))))); + tmp[4] = b4_; + tmp[0] = b0_; + fproduct_copy_from_wide_(output, tmp); +} + +static __always_inline void crecip(u64 *output, u64 *input) +{ + crecip_crecip(output, input); +} + +static __always_inline void point_swap_conditional_step(u64 *a, u64 *b, + u64 swap1, u32 ctr) +{ + u32 i = ctr - 1; + u64 ai = a[i]; + u64 bi = b[i]; + u64 x = swap1 & (ai ^ bi); + u64 ai1 = ai ^ x; + u64 bi1 = bi ^ x; + a[i] = ai1; + b[i] = bi1; +} + +static __always_inline void point_swap_conditional5(u64 *a, u64 *b, u64 swap1) +{ + point_swap_conditional_step(a, b, swap1, 5); + point_swap_conditional_step(a, b, swap1, 4); + point_swap_conditional_step(a, b, swap1, 3); + point_swap_conditional_step(a, b, swap1, 2); + point_swap_conditional_step(a, b, swap1, 1); +} + +static __always_inline void point_swap_conditional(u64 *a, u64 *b, u64 iswap) +{ + u64 swap1 = 0 - iswap; + point_swap_conditional5(a, b, swap1); + point_swap_conditional5(a + 5, b + 5, swap1); +} + +static __always_inline void point_copy(u64 *output, u64 *input) +{ + memcpy(output, input, 5 * sizeof(*input)); + memcpy(output + 5, input + 5, 5 * sizeof(*input)); +} + +static __always_inline void addanddouble_fmonty(u64 *pp, u64 *ppq, u64 *p, + u64 *pq, u64 *qmqp) +{ + u64 *qx = qmqp; + u64 *x2 = pp; + u64 *z2 = pp + 5; + u64 *x3 = ppq; + u64 *z3 = ppq + 5; + u64 *x = p; + u64 *z = p + 5; + u64 *xprime = pq; + u64 *zprime = pq + 5; + u64 buf[40] = { 0 }; + u64 *origx = buf; + u64 *origxprime0 = buf + 5; + u64 *xxprime0; + u64 *zzprime0; + u64 *origxprime; + xxprime0 = buf + 25; + zzprime0 = buf + 30; + memcpy(origx, x, 5 * sizeof(*x)); + fsum(x, z); + fdifference(z, origx); + memcpy(origxprime0, xprime, 5 * sizeof(*xprime)); + fsum(xprime, zprime); + fdifference(zprime, origxprime0); + fmul_fmul(xxprime0, xprime, z); + fmul_fmul(zzprime0, x, zprime); + origxprime = buf + 5; + { + u64 *xx0; + u64 *zz0; + u64 *xxprime; + u64 *zzprime; + u64 *zzzprime; + xx0 = buf + 15; + zz0 = buf + 20; + xxprime = buf + 25; + zzprime = buf + 30; + zzzprime = buf + 35; + memcpy(origxprime, xxprime, 5 * sizeof(*xxprime)); + fsum(xxprime, zzprime); + fdifference(zzprime, origxprime); + fsquare_fsquare_times(x3, xxprime, 1); + fsquare_fsquare_times(zzzprime, zzprime, 1); + fmul_fmul(z3, zzzprime, qx); + fsquare_fsquare_times(xx0, x, 1); + fsquare_fsquare_times(zz0, z, 1); + { + u64 *zzz; + u64 *xx; + u64 *zz; + u64 scalar; + zzz = buf + 10; + xx = buf + 15; + zz = buf + 20; + fmul_fmul(x2, xx, zz); + fdifference(zz, xx); + scalar = 121665; + fscalar(zzz, zz, scalar); + fsum(zzz, xx); + fmul_fmul(z2, zzz, zz); + } + } +} + +static __always_inline void +ladder_smallloop_cmult_small_loop_step(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2, + u64 *q, u8 byt) +{ + u64 bit0 = (u64)(byt >> 7); + u64 bit; + point_swap_conditional(nq, nqpq, bit0); + addanddouble_fmonty(nq2, nqpq2, nq, nqpq, q); + bit = (u64)(byt >> 7); + point_swap_conditional(nq2, nqpq2, bit); +} + +static __always_inline void +ladder_smallloop_cmult_small_loop_double_step(u64 *nq, u64 *nqpq, u64 *nq2, + u64 *nqpq2, u64 *q, u8 byt) +{ + u8 byt1; + ladder_smallloop_cmult_small_loop_step(nq, nqpq, nq2, nqpq2, q, byt); + byt1 = byt << 1; + ladder_smallloop_cmult_small_loop_step(nq2, nqpq2, nq, nqpq, q, byt1); +} + +static __always_inline void +ladder_smallloop_cmult_small_loop(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2, + u64 *q, u8 byt, u32 i) +{ + while (i--) { + ladder_smallloop_cmult_small_loop_double_step(nq, nqpq, nq2, + nqpq2, q, byt); + byt <<= 2; + } +} + +static __always_inline void ladder_bigloop_cmult_big_loop(u8 *n1, u64 *nq, + u64 *nqpq, u64 *nq2, + u64 *nqpq2, u64 *q, + u32 i) +{ + while (i--) { + u8 byte = n1[i]; + ladder_smallloop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, + byte, 4); + } +} + +static void ladder_cmult(u64 *result, u8 *n1, u64 *q) +{ + u64 point_buf[40] = { 0 }; + u64 *nq = point_buf; + u64 *nqpq = point_buf + 10; + u64 *nq2 = point_buf + 20; + u64 *nqpq2 = point_buf + 30; + point_copy(nqpq, q); + nq[0] = 1; + ladder_bigloop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, 32); + point_copy(result, nq); +} + +static __always_inline void format_fexpand(u64 *output, const u8 *input) +{ + const u8 *x00 = input + 6; + const u8 *x01 = input + 12; + const u8 *x02 = input + 19; + const u8 *x0 = input + 24; + u64 i0, i1, i2, i3, i4, output0, output1, output2, output3, output4; + i0 = get_unaligned_le64(input); + i1 = get_unaligned_le64(x00); + i2 = get_unaligned_le64(x01); + i3 = get_unaligned_le64(x02); + i4 = get_unaligned_le64(x0); + output0 = i0 & 0x7ffffffffffffLLU; + output1 = i1 >> 3 & 0x7ffffffffffffLLU; + output2 = i2 >> 6 & 0x7ffffffffffffLLU; + output3 = i3 >> 1 & 0x7ffffffffffffLLU; + output4 = i4 >> 12 & 0x7ffffffffffffLLU; + output[0] = output0; + output[1] = output1; + output[2] = output2; + output[3] = output3; + output[4] = output4; +} + +static __always_inline void format_fcontract_first_carry_pass(u64 *input) +{ + u64 t0 = input[0]; + u64 t1 = input[1]; + u64 t2 = input[2]; + u64 t3 = input[3]; + u64 t4 = input[4]; + u64 t1_ = t1 + (t0 >> 51); + u64 t0_ = t0 & 0x7ffffffffffffLLU; + u64 t2_ = t2 + (t1_ >> 51); + u64 t1__ = t1_ & 0x7ffffffffffffLLU; + u64 t3_ = t3 + (t2_ >> 51); + u64 t2__ = t2_ & 0x7ffffffffffffLLU; + u64 t4_ = t4 + (t3_ >> 51); + u64 t3__ = t3_ & 0x7ffffffffffffLLU; + input[0] = t0_; + input[1] = t1__; + input[2] = t2__; + input[3] = t3__; + input[4] = t4_; +} + +static __always_inline void format_fcontract_first_carry_full(u64 *input) +{ + format_fcontract_first_carry_pass(input); + modulo_carry_top(input); +} + +static __always_inline void format_fcontract_second_carry_pass(u64 *input) +{ + u64 t0 = input[0]; + u64 t1 = input[1]; + u64 t2 = input[2]; + u64 t3 = input[3]; + u64 t4 = input[4]; + u64 t1_ = t1 + (t0 >> 51); + u64 t0_ = t0 & 0x7ffffffffffffLLU; + u64 t2_ = t2 + (t1_ >> 51); + u64 t1__ = t1_ & 0x7ffffffffffffLLU; + u64 t3_ = t3 + (t2_ >> 51); + u64 t2__ = t2_ & 0x7ffffffffffffLLU; + u64 t4_ = t4 + (t3_ >> 51); + u64 t3__ = t3_ & 0x7ffffffffffffLLU; + input[0] = t0_; + input[1] = t1__; + input[2] = t2__; + input[3] = t3__; + input[4] = t4_; +} + +static __always_inline void format_fcontract_second_carry_full(u64 *input) +{ + u64 i0; + u64 i1; + u64 i0_; + u64 i1_; + format_fcontract_second_carry_pass(input); + modulo_carry_top(input); + i0 = input[0]; + i1 = input[1]; + i0_ = i0 & 0x7ffffffffffffLLU; + i1_ = i1 + (i0 >> 51); + input[0] = i0_; + input[1] = i1_; +} + +static __always_inline void format_fcontract_trim(u64 *input) +{ + u64 a0 = input[0]; + u64 a1 = input[1]; + u64 a2 = input[2]; + u64 a3 = input[3]; + u64 a4 = input[4]; + u64 mask0 = u64_gte_mask(a0, 0x7ffffffffffedLLU); + u64 mask1 = u64_eq_mask(a1, 0x7ffffffffffffLLU); + u64 mask2 = u64_eq_mask(a2, 0x7ffffffffffffLLU); + u64 mask3 = u64_eq_mask(a3, 0x7ffffffffffffLLU); + u64 mask4 = u64_eq_mask(a4, 0x7ffffffffffffLLU); + u64 mask = (((mask0 & mask1) & mask2) & mask3) & mask4; + u64 a0_ = a0 - (0x7ffffffffffedLLU & mask); + u64 a1_ = a1 - (0x7ffffffffffffLLU & mask); + u64 a2_ = a2 - (0x7ffffffffffffLLU & mask); + u64 a3_ = a3 - (0x7ffffffffffffLLU & mask); + u64 a4_ = a4 - (0x7ffffffffffffLLU & mask); + input[0] = a0_; + input[1] = a1_; + input[2] = a2_; + input[3] = a3_; + input[4] = a4_; +} + +static __always_inline void format_fcontract_store(u8 *output, u64 *input) +{ + u64 t0 = input[0]; + u64 t1 = input[1]; + u64 t2 = input[2]; + u64 t3 = input[3]; + u64 t4 = input[4]; + u64 o0 = t1 << 51 | t0; + u64 o1 = t2 << 38 | t1 >> 13; + u64 o2 = t3 << 25 | t2 >> 26; + u64 o3 = t4 << 12 | t3 >> 39; + u8 *b0 = output; + u8 *b1 = output + 8; + u8 *b2 = output + 16; + u8 *b3 = output + 24; + put_unaligned_le64(o0, b0); + put_unaligned_le64(o1, b1); + put_unaligned_le64(o2, b2); + put_unaligned_le64(o3, b3); +} + +static __always_inline void format_fcontract(u8 *output, u64 *input) +{ + format_fcontract_first_carry_full(input); + format_fcontract_second_carry_full(input); + format_fcontract_trim(input); + format_fcontract_store(output, input); +} + +static __always_inline void format_scalar_of_point(u8 *scalar, u64 *point) +{ + u64 *x = point; + u64 *z = point + 5; + u64 buf[10] __aligned(32) = { 0 }; + u64 *zmone = buf; + u64 *sc = buf + 5; + crecip(zmone, z); + fmul_fmul(sc, x, zmone); + format_fcontract(scalar, sc); +} + +static void curve25519_generic(u8 mypublic[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE], + const u8 basepoint[CURVE25519_KEY_SIZE]) +{ + u64 buf0[10] __aligned(32) = { 0 }; + u64 *x0 = buf0; + u64 *z = buf0 + 5; + u64 *q; + format_fexpand(x0, basepoint); + z[0] = 1; + q = buf0; + { + u8 e[32] __aligned(32) = { 0 }; + u8 *scalar; + memcpy(e, secret, 32); + curve25519_clamp_secret(e); + scalar = e; + { + u64 buf[15] = { 0 }; + u64 *nq = buf; + u64 *x = nq; + x[0] = 1; + ladder_cmult(nq, scalar, q); + format_scalar_of_point(mypublic, nq); + memzero_explicit(buf, sizeof(buf)); + } + memzero_explicit(e, sizeof(e)); + } + memzero_explicit(buf0, sizeof(buf0)); +} diff --git a/net/wireguard/crypto/zinc/curve25519/curve25519-x86_64-glue.c b/net/wireguard/crypto/zinc/curve25519/curve25519-x86_64-glue.c new file mode 100644 index 000000000000..e08cc2ba74f3 --- /dev/null +++ b/net/wireguard/crypto/zinc/curve25519/curve25519-x86_64-glue.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include <asm/cpufeature.h> +#include <asm/processor.h> + +#include "curve25519-x86_64.c" + +static bool curve25519_use_bmi2_adx __ro_after_init; +static bool *const curve25519_nobs[] __initconst = { + &curve25519_use_bmi2_adx }; + +static void __init curve25519_fpu_init(void) +{ + curve25519_use_bmi2_adx = IS_ENABLED(CONFIG_AS_BMI2) && + IS_ENABLED(CONFIG_AS_ADX) && + boot_cpu_has(X86_FEATURE_BMI2) && + boot_cpu_has(X86_FEATURE_ADX); +} + +static inline bool curve25519_arch(u8 mypublic[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE], + const u8 basepoint[CURVE25519_KEY_SIZE]) +{ + if (IS_ENABLED(CONFIG_AS_ADX) && IS_ENABLED(CONFIG_AS_BMI2) && + curve25519_use_bmi2_adx) { + curve25519_ever64(mypublic, secret, basepoint); + return true; + } + return false; +} + +static inline bool curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE]) +{ + if (IS_ENABLED(CONFIG_AS_ADX) && IS_ENABLED(CONFIG_AS_BMI2) && + curve25519_use_bmi2_adx) { + curve25519_ever64_base(pub, secret); + return true; + } + return false; +} diff --git a/net/wireguard/crypto/zinc/curve25519/curve25519-x86_64.c b/net/wireguard/crypto/zinc/curve25519/curve25519-x86_64.c new file mode 100644 index 000000000000..8b6872a2f0d0 --- /dev/null +++ b/net/wireguard/crypto/zinc/curve25519/curve25519-x86_64.c @@ -0,0 +1,1580 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2020 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation + */ + +static __always_inline u64 eq_mask(u64 a, u64 b) +{ + u64 x = a ^ b; + u64 minus_x = ~x + (u64)1U; + u64 x_or_minus_x = x | minus_x; + u64 xnx = x_or_minus_x >> (u32)63U; + return xnx - (u64)1U; +} + +static __always_inline u64 gte_mask(u64 a, u64 b) +{ + u64 x = a; + u64 y = b; + u64 x_xor_y = x ^ y; + u64 x_sub_y = x - y; + u64 x_sub_y_xor_y = x_sub_y ^ y; + u64 q = x_xor_y | x_sub_y_xor_y; + u64 x_xor_q = x ^ q; + u64 x_xor_q_ = x_xor_q >> (u32)63U; + return x_xor_q_ - (u64)1U; +} + +/* Computes the addition of four-element f1 with value in f2 + * and returns the carry (if any) */ +static inline u64 add_scalar(u64 *out, const u64 *f1, u64 f2) +{ + u64 carry_r; + + asm volatile( + /* Clear registers to propagate the carry bit */ + " xor %%r8d, %%r8d;" + " xor %%r9d, %%r9d;" + " xor %%r10d, %%r10d;" + " xor %%r11d, %%r11d;" + " xor %k1, %k1;" + + /* Begin addition chain */ + " addq 0(%3), %0;" + " movq %0, 0(%2);" + " adcxq 8(%3), %%r8;" + " movq %%r8, 8(%2);" + " adcxq 16(%3), %%r9;" + " movq %%r9, 16(%2);" + " adcxq 24(%3), %%r10;" + " movq %%r10, 24(%2);" + + /* Return the carry bit in a register */ + " adcx %%r11, %1;" + : "+&r"(f2), "=&r"(carry_r) + : "r"(out), "r"(f1) + : "%r8", "%r9", "%r10", "%r11", "memory", "cc"); + + return carry_r; +} + +/* Computes the field addition of two field elements */ +static inline void fadd(u64 *out, const u64 *f1, const u64 *f2) +{ + asm volatile( + /* Compute the raw addition of f1 + f2 */ + " movq 0(%0), %%r8;" + " addq 0(%2), %%r8;" + " movq 8(%0), %%r9;" + " adcxq 8(%2), %%r9;" + " movq 16(%0), %%r10;" + " adcxq 16(%2), %%r10;" + " movq 24(%0), %%r11;" + " adcxq 24(%2), %%r11;" + + /* Wrap the result back into the field */ + + /* Step 1: Compute carry*38 */ + " mov $0, %%rax;" + " mov $38, %0;" + " cmovc %0, %%rax;" + + /* Step 2: Add carry*38 to the original sum */ + " xor %%ecx, %%ecx;" + " add %%rax, %%r8;" + " adcx %%rcx, %%r9;" + " movq %%r9, 8(%1);" + " adcx %%rcx, %%r10;" + " movq %%r10, 16(%1);" + " adcx %%rcx, %%r11;" + " movq %%r11, 24(%1);" + + /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ + " mov $0, %%rax;" + " cmovc %0, %%rax;" + " add %%rax, %%r8;" + " movq %%r8, 0(%1);" + : "+&r"(f2) + : "r"(out), "r"(f1) + : "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11", "memory", "cc"); +} + +/* Computes the field subtraction of two field elements */ +static inline void fsub(u64 *out, const u64 *f1, const u64 *f2) +{ + asm volatile( + /* Compute the raw subtraction of f1-f2 */ + " movq 0(%1), %%r8;" + " subq 0(%2), %%r8;" + " movq 8(%1), %%r9;" + " sbbq 8(%2), %%r9;" + " movq 16(%1), %%r10;" + " sbbq 16(%2), %%r10;" + " movq 24(%1), %%r11;" + " sbbq 24(%2), %%r11;" + + /* Wrap the result back into the field */ + + /* Step 1: Compute carry*38 */ + " mov $0, %%rax;" + " mov $38, %%rcx;" + " cmovc %%rcx, %%rax;" + + /* Step 2: Subtract carry*38 from the original difference */ + " sub %%rax, %%r8;" + " sbb $0, %%r9;" + " sbb $0, %%r10;" + " sbb $0, %%r11;" + + /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ + " mov $0, %%rax;" + " cmovc %%rcx, %%rax;" + " sub %%rax, %%r8;" + + /* Store the result */ + " movq %%r8, 0(%0);" + " movq %%r9, 8(%0);" + " movq %%r10, 16(%0);" + " movq %%r11, 24(%0);" + : + : "r"(out), "r"(f1), "r"(f2) + : "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11", "memory", "cc"); +} + +/* Computes a field multiplication: out <- f1 * f2 + * Uses the 8-element buffer tmp for intermediate results */ +static inline void fmul(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp) +{ + asm volatile( + + /* Compute the raw multiplication: tmp <- src1 * src2 */ + + /* Compute src1[0] * src2 */ + " movq 0(%0), %%rdx;" + " mulxq 0(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " movq %%r8, 0(%2);" + " mulxq 8(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " movq %%r10, 8(%2);" + " mulxq 16(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " mulxq 24(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + + /* Compute src1[1] * src2 */ + " movq 8(%0), %%rdx;" + " mulxq 0(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " adcxq 8(%2), %%r8;" + " movq %%r8, 8(%2);" + " mulxq 8(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " adcx %%rbx, %%r10;" + " movq %%r10, 16(%2);" + " mulxq 16(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " adcx %%r14, %%rbx;" + " mov $0, %%r8;" + " mulxq 24(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " adcx %%rax, %%r14;" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + " adcx %%r8, %%rax;" + + /* Compute src1[2] * src2 */ + " movq 16(%0), %%rdx;" + " mulxq 0(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " adcxq 16(%2), %%r8;" + " movq %%r8, 16(%2);" + " mulxq 8(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " adcx %%rbx, %%r10;" + " movq %%r10, 24(%2);" + " mulxq 16(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " adcx %%r14, %%rbx;" + " mov $0, %%r8;" + " mulxq 24(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " adcx %%rax, %%r14;" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + " adcx %%r8, %%rax;" + + /* Compute src1[3] * src2 */ + " movq 24(%0), %%rdx;" + " mulxq 0(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " adcxq 24(%2), %%r8;" + " movq %%r8, 24(%2);" + " mulxq 8(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " adcx %%rbx, %%r10;" + " movq %%r10, 32(%2);" + " mulxq 16(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " adcx %%r14, %%rbx;" + " movq %%rbx, 40(%2);" + " mov $0, %%r8;" + " mulxq 24(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " adcx %%rax, %%r14;" + " movq %%r14, 48(%2);" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + " adcx %%r8, %%rax;" + " movq %%rax, 56(%2);" + + /* Line up pointers */ + " mov %2, %0;" + " mov %3, %2;" + + /* Wrap the result back into the field */ + + /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ + " mov $38, %%rdx;" + " mulxq 32(%0), %%r8, %%r13;" + " xor %k1, %k1;" + " adoxq 0(%0), %%r8;" + " mulxq 40(%0), %%r9, %%rbx;" + " adcx %%r13, %%r9;" + " adoxq 8(%0), %%r9;" + " mulxq 48(%0), %%r10, %%r13;" + " adcx %%rbx, %%r10;" + " adoxq 16(%0), %%r10;" + " mulxq 56(%0), %%r11, %%rax;" + " adcx %%r13, %%r11;" + " adoxq 24(%0), %%r11;" + " adcx %1, %%rax;" + " adox %1, %%rax;" + " imul %%rdx, %%rax;" + + /* Step 2: Fold the carry back into dst */ + " add %%rax, %%r8;" + " adcx %1, %%r9;" + " movq %%r9, 8(%2);" + " adcx %1, %%r10;" + " movq %%r10, 16(%2);" + " adcx %1, %%r11;" + " movq %%r11, 24(%2);" + + /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ + " mov $0, %%rax;" + " cmovc %%rdx, %%rax;" + " add %%rax, %%r8;" + " movq %%r8, 0(%2);" + : "+&r"(f1), "+&r"(f2), "+&r"(tmp) + : "r"(out) + : "%rax", "%rbx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r13", + "%r14", "memory", "cc"); +} + +/* Computes two field multiplications: + * out[0] <- f1[0] * f2[0] + * out[1] <- f1[1] * f2[1] + * Uses the 16-element buffer tmp for intermediate results: */ +static inline void fmul2(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp) +{ + asm volatile( + + /* Compute the raw multiplication tmp[0] <- f1[0] * f2[0] */ + + /* Compute src1[0] * src2 */ + " movq 0(%0), %%rdx;" + " mulxq 0(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " movq %%r8, 0(%2);" + " mulxq 8(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " movq %%r10, 8(%2);" + " mulxq 16(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " mulxq 24(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + + /* Compute src1[1] * src2 */ + " movq 8(%0), %%rdx;" + " mulxq 0(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " adcxq 8(%2), %%r8;" + " movq %%r8, 8(%2);" + " mulxq 8(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " adcx %%rbx, %%r10;" + " movq %%r10, 16(%2);" + " mulxq 16(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " adcx %%r14, %%rbx;" + " mov $0, %%r8;" + " mulxq 24(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " adcx %%rax, %%r14;" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + " adcx %%r8, %%rax;" + + /* Compute src1[2] * src2 */ + " movq 16(%0), %%rdx;" + " mulxq 0(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " adcxq 16(%2), %%r8;" + " movq %%r8, 16(%2);" + " mulxq 8(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " adcx %%rbx, %%r10;" + " movq %%r10, 24(%2);" + " mulxq 16(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " adcx %%r14, %%rbx;" + " mov $0, %%r8;" + " mulxq 24(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " adcx %%rax, %%r14;" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + " adcx %%r8, %%rax;" + + /* Compute src1[3] * src2 */ + " movq 24(%0), %%rdx;" + " mulxq 0(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " adcxq 24(%2), %%r8;" + " movq %%r8, 24(%2);" + " mulxq 8(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " adcx %%rbx, %%r10;" + " movq %%r10, 32(%2);" + " mulxq 16(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " adcx %%r14, %%rbx;" + " movq %%rbx, 40(%2);" + " mov $0, %%r8;" + " mulxq 24(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " adcx %%rax, %%r14;" + " movq %%r14, 48(%2);" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + " adcx %%r8, %%rax;" + " movq %%rax, 56(%2);" + + /* Compute the raw multiplication tmp[1] <- f1[1] * f2[1] */ + + /* Compute src1[0] * src2 */ + " movq 32(%0), %%rdx;" + " mulxq 32(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " movq %%r8, 64(%2);" + " mulxq 40(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " movq %%r10, 72(%2);" + " mulxq 48(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " mulxq 56(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + + /* Compute src1[1] * src2 */ + " movq 40(%0), %%rdx;" + " mulxq 32(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " adcxq 72(%2), %%r8;" + " movq %%r8, 72(%2);" + " mulxq 40(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " adcx %%rbx, %%r10;" + " movq %%r10, 80(%2);" + " mulxq 48(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " adcx %%r14, %%rbx;" + " mov $0, %%r8;" + " mulxq 56(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " adcx %%rax, %%r14;" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + " adcx %%r8, %%rax;" + + /* Compute src1[2] * src2 */ + " movq 48(%0), %%rdx;" + " mulxq 32(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " adcxq 80(%2), %%r8;" + " movq %%r8, 80(%2);" + " mulxq 40(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " adcx %%rbx, %%r10;" + " movq %%r10, 88(%2);" + " mulxq 48(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " adcx %%r14, %%rbx;" + " mov $0, %%r8;" + " mulxq 56(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " adcx %%rax, %%r14;" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + " adcx %%r8, %%rax;" + + /* Compute src1[3] * src2 */ + " movq 56(%0), %%rdx;" + " mulxq 32(%1), %%r8, %%r9;" + " xor %%r10d, %%r10d;" + " adcxq 88(%2), %%r8;" + " movq %%r8, 88(%2);" + " mulxq 40(%1), %%r10, %%r11;" + " adox %%r9, %%r10;" + " adcx %%rbx, %%r10;" + " movq %%r10, 96(%2);" + " mulxq 48(%1), %%rbx, %%r13;" + " adox %%r11, %%rbx;" + " adcx %%r14, %%rbx;" + " movq %%rbx, 104(%2);" + " mov $0, %%r8;" + " mulxq 56(%1), %%r14, %%rdx;" + " adox %%r13, %%r14;" + " adcx %%rax, %%r14;" + " movq %%r14, 112(%2);" + " mov $0, %%rax;" + " adox %%rdx, %%rax;" + " adcx %%r8, %%rax;" + " movq %%rax, 120(%2);" + + /* Line up pointers */ + " mov %2, %0;" + " mov %3, %2;" + + /* Wrap the results back into the field */ + + /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ + " mov $38, %%rdx;" + " mulxq 32(%0), %%r8, %%r13;" + " xor %k1, %k1;" + " adoxq 0(%0), %%r8;" + " mulxq 40(%0), %%r9, %%rbx;" + " adcx %%r13, %%r9;" + " adoxq 8(%0), %%r9;" + " mulxq 48(%0), %%r10, %%r13;" + " adcx %%rbx, %%r10;" + " adoxq 16(%0), %%r10;" + " mulxq 56(%0), %%r11, %%rax;" + " adcx %%r13, %%r11;" + " adoxq 24(%0), %%r11;" + " adcx %1, %%rax;" + " adox %1, %%rax;" + " imul %%rdx, %%rax;" + + /* Step 2: Fold the carry back into dst */ + " add %%rax, %%r8;" + " adcx %1, %%r9;" + " movq %%r9, 8(%2);" + " adcx %1, %%r10;" + " movq %%r10, 16(%2);" + " adcx %1, %%r11;" + " movq %%r11, 24(%2);" + + /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ + " mov $0, %%rax;" + " cmovc %%rdx, %%rax;" + " add %%rax, %%r8;" + " movq %%r8, 0(%2);" + + /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ + " mov $38, %%rdx;" + " mulxq 96(%0), %%r8, %%r13;" + " xor %k1, %k1;" + " adoxq 64(%0), %%r8;" + " mulxq 104(%0), %%r9, %%rbx;" + " adcx %%r13, %%r9;" + " adoxq 72(%0), %%r9;" + " mulxq 112(%0), %%r10, %%r13;" + " adcx %%rbx, %%r10;" + " adoxq 80(%0), %%r10;" + " mulxq 120(%0), %%r11, %%rax;" + " adcx %%r13, %%r11;" + " adoxq 88(%0), %%r11;" + " adcx %1, %%rax;" + " adox %1, %%rax;" + " imul %%rdx, %%rax;" + + /* Step 2: Fold the carry back into dst */ + " add %%rax, %%r8;" + " adcx %1, %%r9;" + " movq %%r9, 40(%2);" + " adcx %1, %%r10;" + " movq %%r10, 48(%2);" + " adcx %1, %%r11;" + " movq %%r11, 56(%2);" + + /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ + " mov $0, %%rax;" + " cmovc %%rdx, %%rax;" + " add %%rax, %%r8;" + " movq %%r8, 32(%2);" + : "+&r"(f1), "+&r"(f2), "+&r"(tmp) + : "r"(out) + : "%rax", "%rbx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r13", + "%r14", "memory", "cc"); +} + +/* Computes the field multiplication of four-element f1 with value in f2 + * Requires f2 to be smaller than 2^17 */ +static inline void fmul_scalar(u64 *out, const u64 *f1, u64 f2) +{ + register u64 f2_r asm("rdx") = f2; + + asm volatile( + /* Compute the raw multiplication of f1*f2 */ + " mulxq 0(%2), %%r8, %%rcx;" /* f1[0]*f2 */ + " mulxq 8(%2), %%r9, %%rbx;" /* f1[1]*f2 */ + " add %%rcx, %%r9;" + " mov $0, %%rcx;" + " mulxq 16(%2), %%r10, %%r13;" /* f1[2]*f2 */ + " adcx %%rbx, %%r10;" + " mulxq 24(%2), %%r11, %%rax;" /* f1[3]*f2 */ + " adcx %%r13, %%r11;" + " adcx %%rcx, %%rax;" + + /* Wrap the result back into the field */ + + /* Step 1: Compute carry*38 */ + " mov $38, %%rdx;" + " imul %%rdx, %%rax;" + + /* Step 2: Fold the carry back into dst */ + " add %%rax, %%r8;" + " adcx %%rcx, %%r9;" + " movq %%r9, 8(%1);" + " adcx %%rcx, %%r10;" + " movq %%r10, 16(%1);" + " adcx %%rcx, %%r11;" + " movq %%r11, 24(%1);" + + /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ + " mov $0, %%rax;" + " cmovc %%rdx, %%rax;" + " add %%rax, %%r8;" + " movq %%r8, 0(%1);" + : "+&r"(f2_r) + : "r"(out), "r"(f1) + : "%rax", "%rbx", "%rcx", "%r8", "%r9", "%r10", "%r11", "%r13", + "memory", "cc"); +} + +/* Computes p1 <- bit ? p2 : p1 in constant time */ +static inline void cswap2(u64 bit, const u64 *p1, const u64 *p2) +{ + asm volatile( + /* Transfer bit into CF flag */ + " add $18446744073709551615, %0;" + + /* cswap p1[0], p2[0] */ + " movq 0(%1), %%r8;" + " movq 0(%2), %%r9;" + " mov %%r8, %%r10;" + " cmovc %%r9, %%r8;" + " cmovc %%r10, %%r9;" + " movq %%r8, 0(%1);" + " movq %%r9, 0(%2);" + + /* cswap p1[1], p2[1] */ + " movq 8(%1), %%r8;" + " movq 8(%2), %%r9;" + " mov %%r8, %%r10;" + " cmovc %%r9, %%r8;" + " cmovc %%r10, %%r9;" + " movq %%r8, 8(%1);" + " movq %%r9, 8(%2);" + + /* cswap p1[2], p2[2] */ + " movq 16(%1), %%r8;" + " movq 16(%2), %%r9;" + " mov %%r8, %%r10;" + " cmovc %%r9, %%r8;" + " cmovc %%r10, %%r9;" + " movq %%r8, 16(%1);" + " movq %%r9, 16(%2);" + + /* cswap p1[3], p2[3] */ + " movq 24(%1), %%r8;" + " movq 24(%2), %%r9;" + " mov %%r8, %%r10;" + " cmovc %%r9, %%r8;" + " cmovc %%r10, %%r9;" + " movq %%r8, 24(%1);" + " movq %%r9, 24(%2);" + + /* cswap p1[4], p2[4] */ + " movq 32(%1), %%r8;" + " movq 32(%2), %%r9;" + " mov %%r8, %%r10;" + " cmovc %%r9, %%r8;" + " cmovc %%r10, %%r9;" + " movq %%r8, 32(%1);" + " movq %%r9, 32(%2);" + + /* cswap p1[5], p2[5] */ + " movq 40(%1), %%r8;" + " movq 40(%2), %%r9;" + " mov %%r8, %%r10;" + " cmovc %%r9, %%r8;" + " cmovc %%r10, %%r9;" + " movq %%r8, 40(%1);" + " movq %%r9, 40(%2);" + + /* cswap p1[6], p2[6] */ + " movq 48(%1), %%r8;" + " movq 48(%2), %%r9;" + " mov %%r8, %%r10;" + " cmovc %%r9, %%r8;" + " cmovc %%r10, %%r9;" + " movq %%r8, 48(%1);" + " movq %%r9, 48(%2);" + + /* cswap p1[7], p2[7] */ + " movq 56(%1), %%r8;" + " movq 56(%2), %%r9;" + " mov %%r8, %%r10;" + " cmovc %%r9, %%r8;" + " cmovc %%r10, %%r9;" + " movq %%r8, 56(%1);" + " movq %%r9, 56(%2);" + : "+&r"(bit) + : "r"(p1), "r"(p2) + : "%r8", "%r9", "%r10", "memory", "cc"); +} + +/* Computes the square of a field element: out <- f * f + * Uses the 8-element buffer tmp for intermediate results */ +static inline void fsqr(u64 *out, const u64 *f, u64 *tmp) +{ + asm volatile( + /* Compute the raw multiplication: tmp <- f * f */ + + /* Step 1: Compute all partial products */ + " movq 0(%0), %%rdx;" /* f[0] */ + " mulxq 8(%0), %%r8, %%r14;" + " xor %%r15d, %%r15d;" /* f[1]*f[0] */ + " mulxq 16(%0), %%r9, %%r10;" + " adcx %%r14, %%r9;" /* f[2]*f[0] */ + " mulxq 24(%0), %%rax, %%rcx;" + " adcx %%rax, %%r10;" /* f[3]*f[0] */ + " movq 24(%0), %%rdx;" /* f[3] */ + " mulxq 8(%0), %%r11, %%rbx;" + " adcx %%rcx, %%r11;" /* f[1]*f[3] */ + " mulxq 16(%0), %%rax, %%r13;" + " adcx %%rax, %%rbx;" /* f[2]*f[3] */ + " movq 8(%0), %%rdx;" + " adcx %%r15, %%r13;" /* f1 */ + " mulxq 16(%0), %%rax, %%rcx;" + " mov $0, %%r14;" /* f[2]*f[1] */ + + /* Step 2: Compute two parallel carry chains */ + " xor %%r15d, %%r15d;" + " adox %%rax, %%r10;" + " adcx %%r8, %%r8;" + " adox %%rcx, %%r11;" + " adcx %%r9, %%r9;" + " adox %%r15, %%rbx;" + " adcx %%r10, %%r10;" + " adox %%r15, %%r13;" + " adcx %%r11, %%r11;" + " adox %%r15, %%r14;" + " adcx %%rbx, %%rbx;" + " adcx %%r13, %%r13;" + " adcx %%r14, %%r14;" + + /* Step 3: Compute intermediate squares */ + " movq 0(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[0]^2 */ + " movq %%rax, 0(%1);" + " add %%rcx, %%r8;" + " movq %%r8, 8(%1);" + " movq 8(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[1]^2 */ + " adcx %%rax, %%r9;" + " movq %%r9, 16(%1);" + " adcx %%rcx, %%r10;" + " movq %%r10, 24(%1);" + " movq 16(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */ + " adcx %%rax, %%r11;" + " movq %%r11, 32(%1);" + " adcx %%rcx, %%rbx;" + " movq %%rbx, 40(%1);" + " movq 24(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */ + " adcx %%rax, %%r13;" + " movq %%r13, 48(%1);" + " adcx %%rcx, %%r14;" + " movq %%r14, 56(%1);" + + /* Line up pointers */ + " mov %1, %0;" + " mov %2, %1;" + + /* Wrap the result back into the field */ + + /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ + " mov $38, %%rdx;" + " mulxq 32(%0), %%r8, %%r13;" + " xor %%ecx, %%ecx;" + " adoxq 0(%0), %%r8;" + " mulxq 40(%0), %%r9, %%rbx;" + " adcx %%r13, %%r9;" + " adoxq 8(%0), %%r9;" + " mulxq 48(%0), %%r10, %%r13;" + " adcx %%rbx, %%r10;" + " adoxq 16(%0), %%r10;" + " mulxq 56(%0), %%r11, %%rax;" + " adcx %%r13, %%r11;" + " adoxq 24(%0), %%r11;" + " adcx %%rcx, %%rax;" + " adox %%rcx, %%rax;" + " imul %%rdx, %%rax;" + + /* Step 2: Fold the carry back into dst */ + " add %%rax, %%r8;" + " adcx %%rcx, %%r9;" + " movq %%r9, 8(%1);" + " adcx %%rcx, %%r10;" + " movq %%r10, 16(%1);" + " adcx %%rcx, %%r11;" + " movq %%r11, 24(%1);" + + /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ + " mov $0, %%rax;" + " cmovc %%rdx, %%rax;" + " add %%rax, %%r8;" + " movq %%r8, 0(%1);" + : "+&r,&r"(f), "+&r,&r"(tmp) + : "r,m"(out) + : "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", + "%r13", "%r14", "%r15", "memory", "cc"); +} + +/* Computes two field squarings: + * out[0] <- f[0] * f[0] + * out[1] <- f[1] * f[1] + * Uses the 16-element buffer tmp for intermediate results */ +static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp) +{ + asm volatile( + /* Step 1: Compute all partial products */ + " movq 0(%0), %%rdx;" /* f[0] */ + " mulxq 8(%0), %%r8, %%r14;" + " xor %%r15d, %%r15d;" /* f[1]*f[0] */ + " mulxq 16(%0), %%r9, %%r10;" + " adcx %%r14, %%r9;" /* f[2]*f[0] */ + " mulxq 24(%0), %%rax, %%rcx;" + " adcx %%rax, %%r10;" /* f[3]*f[0] */ + " movq 24(%0), %%rdx;" /* f[3] */ + " mulxq 8(%0), %%r11, %%rbx;" + " adcx %%rcx, %%r11;" /* f[1]*f[3] */ + " mulxq 16(%0), %%rax, %%r13;" + " adcx %%rax, %%rbx;" /* f[2]*f[3] */ + " movq 8(%0), %%rdx;" + " adcx %%r15, %%r13;" /* f1 */ + " mulxq 16(%0), %%rax, %%rcx;" + " mov $0, %%r14;" /* f[2]*f[1] */ + + /* Step 2: Compute two parallel carry chains */ + " xor %%r15d, %%r15d;" + " adox %%rax, %%r10;" + " adcx %%r8, %%r8;" + " adox %%rcx, %%r11;" + " adcx %%r9, %%r9;" + " adox %%r15, %%rbx;" + " adcx %%r10, %%r10;" + " adox %%r15, %%r13;" + " adcx %%r11, %%r11;" + " adox %%r15, %%r14;" + " adcx %%rbx, %%rbx;" + " adcx %%r13, %%r13;" + " adcx %%r14, %%r14;" + + /* Step 3: Compute intermediate squares */ + " movq 0(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[0]^2 */ + " movq %%rax, 0(%1);" + " add %%rcx, %%r8;" + " movq %%r8, 8(%1);" + " movq 8(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[1]^2 */ + " adcx %%rax, %%r9;" + " movq %%r9, 16(%1);" + " adcx %%rcx, %%r10;" + " movq %%r10, 24(%1);" + " movq 16(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */ + " adcx %%rax, %%r11;" + " movq %%r11, 32(%1);" + " adcx %%rcx, %%rbx;" + " movq %%rbx, 40(%1);" + " movq 24(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */ + " adcx %%rax, %%r13;" + " movq %%r13, 48(%1);" + " adcx %%rcx, %%r14;" + " movq %%r14, 56(%1);" + + /* Step 1: Compute all partial products */ + " movq 32(%0), %%rdx;" /* f[0] */ + " mulxq 40(%0), %%r8, %%r14;" + " xor %%r15d, %%r15d;" /* f[1]*f[0] */ + " mulxq 48(%0), %%r9, %%r10;" + " adcx %%r14, %%r9;" /* f[2]*f[0] */ + " mulxq 56(%0), %%rax, %%rcx;" + " adcx %%rax, %%r10;" /* f[3]*f[0] */ + " movq 56(%0), %%rdx;" /* f[3] */ + " mulxq 40(%0), %%r11, %%rbx;" + " adcx %%rcx, %%r11;" /* f[1]*f[3] */ + " mulxq 48(%0), %%rax, %%r13;" + " adcx %%rax, %%rbx;" /* f[2]*f[3] */ + " movq 40(%0), %%rdx;" + " adcx %%r15, %%r13;" /* f1 */ + " mulxq 48(%0), %%rax, %%rcx;" + " mov $0, %%r14;" /* f[2]*f[1] */ + + /* Step 2: Compute two parallel carry chains */ + " xor %%r15d, %%r15d;" + " adox %%rax, %%r10;" + " adcx %%r8, %%r8;" + " adox %%rcx, %%r11;" + " adcx %%r9, %%r9;" + " adox %%r15, %%rbx;" + " adcx %%r10, %%r10;" + " adox %%r15, %%r13;" + " adcx %%r11, %%r11;" + " adox %%r15, %%r14;" + " adcx %%rbx, %%rbx;" + " adcx %%r13, %%r13;" + " adcx %%r14, %%r14;" + + /* Step 3: Compute intermediate squares */ + " movq 32(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[0]^2 */ + " movq %%rax, 64(%1);" + " add %%rcx, %%r8;" + " movq %%r8, 72(%1);" + " movq 40(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[1]^2 */ + " adcx %%rax, %%r9;" + " movq %%r9, 80(%1);" + " adcx %%rcx, %%r10;" + " movq %%r10, 88(%1);" + " movq 48(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */ + " adcx %%rax, %%r11;" + " movq %%r11, 96(%1);" + " adcx %%rcx, %%rbx;" + " movq %%rbx, 104(%1);" + " movq 56(%0), %%rdx;" + " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */ + " adcx %%rax, %%r13;" + " movq %%r13, 112(%1);" + " adcx %%rcx, %%r14;" + " movq %%r14, 120(%1);" + + /* Line up pointers */ + " mov %1, %0;" + " mov %2, %1;" + + /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ + " mov $38, %%rdx;" + " mulxq 32(%0), %%r8, %%r13;" + " xor %%ecx, %%ecx;" + " adoxq 0(%0), %%r8;" + " mulxq 40(%0), %%r9, %%rbx;" + " adcx %%r13, %%r9;" + " adoxq 8(%0), %%r9;" + " mulxq 48(%0), %%r10, %%r13;" + " adcx %%rbx, %%r10;" + " adoxq 16(%0), %%r10;" + " mulxq 56(%0), %%r11, %%rax;" + " adcx %%r13, %%r11;" + " adoxq 24(%0), %%r11;" + " adcx %%rcx, %%rax;" + " adox %%rcx, %%rax;" + " imul %%rdx, %%rax;" + + /* Step 2: Fold the carry back into dst */ + " add %%rax, %%r8;" + " adcx %%rcx, %%r9;" + " movq %%r9, 8(%1);" + " adcx %%rcx, %%r10;" + " movq %%r10, 16(%1);" + " adcx %%rcx, %%r11;" + " movq %%r11, 24(%1);" + + /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ + " mov $0, %%rax;" + " cmovc %%rdx, %%rax;" + " add %%rax, %%r8;" + " movq %%r8, 0(%1);" + + /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ + " mov $38, %%rdx;" + " mulxq 96(%0), %%r8, %%r13;" + " xor %%ecx, %%ecx;" + " adoxq 64(%0), %%r8;" + " mulxq 104(%0), %%r9, %%rbx;" + " adcx %%r13, %%r9;" + " adoxq 72(%0), %%r9;" + " mulxq 112(%0), %%r10, %%r13;" + " adcx %%rbx, %%r10;" + " adoxq 80(%0), %%r10;" + " mulxq 120(%0), %%r11, %%rax;" + " adcx %%r13, %%r11;" + " adoxq 88(%0), %%r11;" + " adcx %%rcx, %%rax;" + " adox %%rcx, %%rax;" + " imul %%rdx, %%rax;" + + /* Step 2: Fold the carry back into dst */ + " add %%rax, %%r8;" + " adcx %%rcx, %%r9;" + " movq %%r9, 40(%1);" + " adcx %%rcx, %%r10;" + " movq %%r10, 48(%1);" + " adcx %%rcx, %%r11;" + " movq %%r11, 56(%1);" + + /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ + " mov $0, %%rax;" + " cmovc %%rdx, %%rax;" + " add %%rax, %%r8;" + " movq %%r8, 32(%1);" + : "+&r,&r"(f), "+&r,&r"(tmp) + : "r,m"(out) + : "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", + "%r13", "%r14", "%r15", "memory", "cc"); +} + +static void point_add_and_double(u64 *q, u64 *p01_tmp1, u64 *tmp2) +{ + u64 *nq = p01_tmp1; + u64 *nq_p1 = p01_tmp1 + (u32)8U; + u64 *tmp1 = p01_tmp1 + (u32)16U; + u64 *x1 = q; + u64 *x2 = nq; + u64 *z2 = nq + (u32)4U; + u64 *z3 = nq_p1 + (u32)4U; + u64 *a = tmp1; + u64 *b = tmp1 + (u32)4U; + u64 *ab = tmp1; + u64 *dc = tmp1 + (u32)8U; + u64 *x3; + u64 *z31; + u64 *d0; + u64 *c0; + u64 *a1; + u64 *b1; + u64 *d; + u64 *c; + u64 *ab1; + u64 *dc1; + fadd(a, x2, z2); + fsub(b, x2, z2); + x3 = nq_p1; + z31 = nq_p1 + (u32)4U; + d0 = dc; + c0 = dc + (u32)4U; + fadd(c0, x3, z31); + fsub(d0, x3, z31); + fmul2(dc, dc, ab, tmp2); + fadd(x3, d0, c0); + fsub(z31, d0, c0); + a1 = tmp1; + b1 = tmp1 + (u32)4U; + d = tmp1 + (u32)8U; + c = tmp1 + (u32)12U; + ab1 = tmp1; + dc1 = tmp1 + (u32)8U; + fsqr2(dc1, ab1, tmp2); + fsqr2(nq_p1, nq_p1, tmp2); + a1[0U] = c[0U]; + a1[1U] = c[1U]; + a1[2U] = c[2U]; + a1[3U] = c[3U]; + fsub(c, d, c); + fmul_scalar(b1, c, (u64)121665U); + fadd(b1, b1, d); + fmul2(nq, dc1, ab1, tmp2); + fmul(z3, z3, x1, tmp2); +} + +static void point_double(u64 *nq, u64 *tmp1, u64 *tmp2) +{ + u64 *x2 = nq; + u64 *z2 = nq + (u32)4U; + u64 *a = tmp1; + u64 *b = tmp1 + (u32)4U; + u64 *d = tmp1 + (u32)8U; + u64 *c = tmp1 + (u32)12U; + u64 *ab = tmp1; + u64 *dc = tmp1 + (u32)8U; + fadd(a, x2, z2); + fsub(b, x2, z2); + fsqr2(dc, ab, tmp2); + a[0U] = c[0U]; + a[1U] = c[1U]; + a[2U] = c[2U]; + a[3U] = c[3U]; + fsub(c, d, c); + fmul_scalar(b, c, (u64)121665U); + fadd(b, b, d); + fmul2(nq, dc, ab, tmp2); +} + +static void montgomery_ladder(u64 *out, const u8 *key, u64 *init1) +{ + u64 tmp2[16U] = { 0U }; + u64 p01_tmp1_swap[33U] = { 0U }; + u64 *p0 = p01_tmp1_swap; + u64 *p01 = p01_tmp1_swap; + u64 *p03 = p01; + u64 *p11 = p01 + (u32)8U; + u64 *x0; + u64 *z0; + u64 *p01_tmp1; + u64 *p01_tmp11; + u64 *nq10; + u64 *nq_p11; + u64 *swap1; + u64 sw0; + u64 *nq1; + u64 *tmp1; + memcpy(p11, init1, (u32)8U * sizeof(init1[0U])); + x0 = p03; + z0 = p03 + (u32)4U; + x0[0U] = (u64)1U; + x0[1U] = (u64)0U; + x0[2U] = (u64)0U; + x0[3U] = (u64)0U; + z0[0U] = (u64)0U; + z0[1U] = (u64)0U; + z0[2U] = (u64)0U; + z0[3U] = (u64)0U; + p01_tmp1 = p01_tmp1_swap; + p01_tmp11 = p01_tmp1_swap; + nq10 = p01_tmp1_swap; + nq_p11 = p01_tmp1_swap + (u32)8U; + swap1 = p01_tmp1_swap + (u32)32U; + cswap2((u64)1U, nq10, nq_p11); + point_add_and_double(init1, p01_tmp11, tmp2); + swap1[0U] = (u64)1U; + { + u32 i; + for (i = (u32)0U; i < (u32)251U; i = i + (u32)1U) { + u64 *p01_tmp12 = p01_tmp1_swap; + u64 *swap2 = p01_tmp1_swap + (u32)32U; + u64 *nq2 = p01_tmp12; + u64 *nq_p12 = p01_tmp12 + (u32)8U; + u64 bit = (u64)(key[((u32)253U - i) / (u32)8U] >> ((u32)253U - i) % (u32)8U & (u8)1U); + u64 sw = swap2[0U] ^ bit; + cswap2(sw, nq2, nq_p12); + point_add_and_double(init1, p01_tmp12, tmp2); + swap2[0U] = bit; + } + } + sw0 = swap1[0U]; + cswap2(sw0, nq10, nq_p11); + nq1 = p01_tmp1; + tmp1 = p01_tmp1 + (u32)16U; + point_double(nq1, tmp1, tmp2); + point_double(nq1, tmp1, tmp2); + point_double(nq1, tmp1, tmp2); + memcpy(out, p0, (u32)8U * sizeof(p0[0U])); + + memzero_explicit(tmp2, sizeof(tmp2)); + memzero_explicit(p01_tmp1_swap, sizeof(p01_tmp1_swap)); +} + +static void fsquare_times(u64 *o, const u64 *inp, u64 *tmp, u32 n1) +{ + u32 i; + fsqr(o, inp, tmp); + for (i = (u32)0U; i < n1 - (u32)1U; i = i + (u32)1U) + fsqr(o, o, tmp); +} + +static void finv(u64 *o, const u64 *i, u64 *tmp) +{ + u64 t1[16U] = { 0U }; + u64 *a0 = t1; + u64 *b = t1 + (u32)4U; + u64 *c = t1 + (u32)8U; + u64 *t00 = t1 + (u32)12U; + u64 *tmp1 = tmp; + u64 *a; + u64 *t0; + fsquare_times(a0, i, tmp1, (u32)1U); + fsquare_times(t00, a0, tmp1, (u32)2U); + fmul(b, t00, i, tmp); + fmul(a0, b, a0, tmp); + fsquare_times(t00, a0, tmp1, (u32)1U); + fmul(b, t00, b, tmp); + fsquare_times(t00, b, tmp1, (u32)5U); + fmul(b, t00, b, tmp); + fsquare_times(t00, b, tmp1, (u32)10U); + fmul(c, t00, b, tmp); + fsquare_times(t00, c, tmp1, (u32)20U); + fmul(t00, t00, c, tmp); + fsquare_times(t00, t00, tmp1, (u32)10U); + fmul(b, t00, b, tmp); + fsquare_times(t00, b, tmp1, (u32)50U); + fmul(c, t00, b, tmp); + fsquare_times(t00, c, tmp1, (u32)100U); + fmul(t00, t00, c, tmp); + fsquare_times(t00, t00, tmp1, (u32)50U); + fmul(t00, t00, b, tmp); + fsquare_times(t00, t00, tmp1, (u32)5U); + a = t1; + t0 = t1 + (u32)12U; + fmul(o, t0, a, tmp); +} + +static void store_felem(u64 *b, u64 *f) +{ + u64 f30 = f[3U]; + u64 top_bit0 = f30 >> (u32)63U; + u64 carry0; + u64 f31; + u64 top_bit; + u64 carry; + u64 f0; + u64 f1; + u64 f2; + u64 f3; + u64 m0; + u64 m1; + u64 m2; + u64 m3; + u64 mask; + u64 f0_; + u64 f1_; + u64 f2_; + u64 f3_; + u64 o0; + u64 o1; + u64 o2; + u64 o3; + f[3U] = f30 & (u64)0x7fffffffffffffffU; + carry0 = add_scalar(f, f, (u64)19U * top_bit0); + f31 = f[3U]; + top_bit = f31 >> (u32)63U; + f[3U] = f31 & (u64)0x7fffffffffffffffU; + carry = add_scalar(f, f, (u64)19U * top_bit); + f0 = f[0U]; + f1 = f[1U]; + f2 = f[2U]; + f3 = f[3U]; + m0 = gte_mask(f0, (u64)0xffffffffffffffedU); + m1 = eq_mask(f1, (u64)0xffffffffffffffffU); + m2 = eq_mask(f2, (u64)0xffffffffffffffffU); + m3 = eq_mask(f3, (u64)0x7fffffffffffffffU); + mask = ((m0 & m1) & m2) & m3; + f0_ = f0 - (mask & (u64)0xffffffffffffffedU); + f1_ = f1 - (mask & (u64)0xffffffffffffffffU); + f2_ = f2 - (mask & (u64)0xffffffffffffffffU); + f3_ = f3 - (mask & (u64)0x7fffffffffffffffU); + o0 = f0_; + o1 = f1_; + o2 = f2_; + o3 = f3_; + b[0U] = o0; + b[1U] = o1; + b[2U] = o2; + b[3U] = o3; +} + +static void encode_point(u8 *o, const u64 *i) +{ + const u64 *x = i; + const u64 *z = i + (u32)4U; + u64 tmp[4U] = { 0U }; + u64 tmp_w[16U] = { 0U }; + finv(tmp, z, tmp_w); + fmul(tmp, tmp, x, tmp_w); + store_felem((u64 *)o, tmp); +} + +static void curve25519_ever64(u8 *out, const u8 *priv, const u8 *pub) +{ + u64 init1[8U] = { 0U }; + u64 tmp[4U] = { 0U }; + u64 tmp3; + u64 *x; + u64 *z; + { + u32 i; + for (i = (u32)0U; i < (u32)4U; i = i + (u32)1U) { + u64 *os = tmp; + const u8 *bj = pub + i * (u32)8U; + u64 u = *(u64 *)bj; + u64 r = u; + u64 x0 = r; + os[i] = x0; + } + } + tmp3 = tmp[3U]; + tmp[3U] = tmp3 & (u64)0x7fffffffffffffffU; + x = init1; + z = init1 + (u32)4U; + z[0U] = (u64)1U; + z[1U] = (u64)0U; + z[2U] = (u64)0U; + z[3U] = (u64)0U; + x[0U] = tmp[0U]; + x[1U] = tmp[1U]; + x[2U] = tmp[2U]; + x[3U] = tmp[3U]; + montgomery_ladder(init1, priv, init1); + encode_point(out, init1); +} + +/* The below constants were generated using this sage script: + * + * #!/usr/bin/env sage + * import sys + * from sage.all import * + * def limbs(n): + * n = int(n) + * l = ((n >> 0) % 2^64, (n >> 64) % 2^64, (n >> 128) % 2^64, (n >> 192) % 2^64) + * return "0x%016xULL, 0x%016xULL, 0x%016xULL, 0x%016xULL" % l + * ec = EllipticCurve(GF(2^255 - 19), [0, 486662, 0, 1, 0]) + * p_minus_s = (ec.lift_x(9) - ec.lift_x(1))[0] + * print("static const u64 p_minus_s[] = { %s };\n" % limbs(p_minus_s)) + * print("static const u64 table_ladder[] = {") + * p = ec.lift_x(9) + * for i in range(252): + * l = (p[0] + p[2]) / (p[0] - p[2]) + * print(("\t%s" + ("," if i != 251 else "")) % limbs(l)) + * p = p * 2 + * print("};") + * + */ + +static const u64 p_minus_s[] = { 0x816b1e0137d48290ULL, 0x440f6a51eb4d1207ULL, 0x52385f46dca2b71dULL, 0x215132111d8354cbULL }; + +static const u64 table_ladder[] = { + 0xfffffffffffffff3ULL, 0xffffffffffffffffULL, 0xffffffffffffffffULL, 0x5fffffffffffffffULL, + 0x6b8220f416aafe96ULL, 0x82ebeb2b4f566a34ULL, 0xd5a9a5b075a5950fULL, 0x5142b2cf4b2488f4ULL, + 0x6aaebc750069680cULL, 0x89cf7820a0f99c41ULL, 0x2a58d9183b56d0f4ULL, 0x4b5aca80e36011a4ULL, + 0x329132348c29745dULL, 0xf4a2e616e1642fd7ULL, 0x1e45bb03ff67bc34ULL, 0x306912d0f42a9b4aULL, + 0xff886507e6af7154ULL, 0x04f50e13dfeec82fULL, 0xaa512fe82abab5ceULL, 0x174e251a68d5f222ULL, + 0xcf96700d82028898ULL, 0x1743e3370a2c02c5ULL, 0x379eec98b4e86eaaULL, 0x0c59888a51e0482eULL, + 0xfbcbf1d699b5d189ULL, 0xacaef0d58e9fdc84ULL, 0xc1c20d06231f7614ULL, 0x2938218da274f972ULL, + 0xf6af49beff1d7f18ULL, 0xcc541c22387ac9c2ULL, 0x96fcc9ef4015c56bULL, 0x69c1627c690913a9ULL, + 0x7a86fd2f4733db0eULL, 0xfdb8c4f29e087de9ULL, 0x095e4b1a8ea2a229ULL, 0x1ad7a7c829b37a79ULL, + 0x342d89cad17ea0c0ULL, 0x67bedda6cced2051ULL, 0x19ca31bf2bb42f74ULL, 0x3df7b4c84980acbbULL, + 0xa8c6444dc80ad883ULL, 0xb91e440366e3ab85ULL, 0xc215cda00164f6d8ULL, 0x3d867c6ef247e668ULL, + 0xc7dd582bcc3e658cULL, 0xfd2c4748ee0e5528ULL, 0xa0fd9b95cc9f4f71ULL, 0x7529d871b0675ddfULL, + 0xb8f568b42d3cbd78ULL, 0x1233011b91f3da82ULL, 0x2dce6ccd4a7c3b62ULL, 0x75e7fc8e9e498603ULL, + 0x2f4f13f1fcd0b6ecULL, 0xf1a8ca1f29ff7a45ULL, 0xc249c1a72981e29bULL, 0x6ebe0dbb8c83b56aULL, + 0x7114fa8d170bb222ULL, 0x65a2dcd5bf93935fULL, 0xbdc41f68b59c979aULL, 0x2f0eef79a2ce9289ULL, + 0x42ecbf0c083c37ceULL, 0x2930bc09ec496322ULL, 0xf294b0c19cfeac0dULL, 0x3780aa4bedfabb80ULL, + 0x56c17d3e7cead929ULL, 0xe7cb4beb2e5722c5ULL, 0x0ce931732dbfe15aULL, 0x41b883c7621052f8ULL, + 0xdbf75ca0c3d25350ULL, 0x2936be086eb1e351ULL, 0xc936e03cb4a9b212ULL, 0x1d45bf82322225aaULL, + 0xe81ab1036a024cc5ULL, 0xe212201c304c9a72ULL, 0xc5d73fba6832b1fcULL, 0x20ffdb5a4d839581ULL, + 0xa283d367be5d0fadULL, 0x6c2b25ca8b164475ULL, 0x9d4935467caaf22eULL, 0x5166408eee85ff49ULL, + 0x3c67baa2fab4e361ULL, 0xb3e433c67ef35cefULL, 0x5259729241159b1cULL, 0x6a621892d5b0ab33ULL, + 0x20b74a387555cdcbULL, 0x532aa10e1208923fULL, 0xeaa17b7762281dd1ULL, 0x61ab3443f05c44bfULL, + 0x257a6c422324def8ULL, 0x131c6c1017e3cf7fULL, 0x23758739f630a257ULL, 0x295a407a01a78580ULL, + 0xf8c443246d5da8d9ULL, 0x19d775450c52fa5dULL, 0x2afcfc92731bf83dULL, 0x7d10c8e81b2b4700ULL, + 0xc8e0271f70baa20bULL, 0x993748867ca63957ULL, 0x5412efb3cb7ed4bbULL, 0x3196d36173e62975ULL, + 0xde5bcad141c7dffcULL, 0x47cc8cd2b395c848ULL, 0xa34cd942e11af3cbULL, 0x0256dbf2d04ecec2ULL, + 0x875ab7e94b0e667fULL, 0xcad4dd83c0850d10ULL, 0x47f12e8f4e72c79fULL, 0x5f1a87bb8c85b19bULL, + 0x7ae9d0b6437f51b8ULL, 0x12c7ce5518879065ULL, 0x2ade09fe5cf77aeeULL, 0x23a05a2f7d2c5627ULL, + 0x5908e128f17c169aULL, 0xf77498dd8ad0852dULL, 0x74b4c4ceab102f64ULL, 0x183abadd10139845ULL, + 0xb165ba8daa92aaacULL, 0xd5c5ef9599386705ULL, 0xbe2f8f0cf8fc40d1ULL, 0x2701e635ee204514ULL, + 0x629fa80020156514ULL, 0xf223868764a8c1ceULL, 0x5b894fff0b3f060eULL, 0x60d9944cf708a3faULL, + 0xaeea001a1c7a201fULL, 0xebf16a633ee2ce63ULL, 0x6f7709594c7a07e1ULL, 0x79b958150d0208cbULL, + 0x24b55e5301d410e7ULL, 0xe3a34edff3fdc84dULL, 0xd88768e4904032d8ULL, 0x131384427b3aaeecULL, + 0x8405e51286234f14ULL, 0x14dc4739adb4c529ULL, 0xb8a2b5b250634ffdULL, 0x2fe2a94ad8a7ff93ULL, + 0xec5c57efe843faddULL, 0x2843ce40f0bb9918ULL, 0xa4b561d6cf3d6305ULL, 0x743629bde8fb777eULL, + 0x343edd46bbaf738fULL, 0xed981828b101a651ULL, 0xa401760b882c797aULL, 0x1fc223e28dc88730ULL, + 0x48604e91fc0fba0eULL, 0xb637f78f052c6fa4ULL, 0x91ccac3d09e9239cULL, 0x23f7eed4437a687cULL, + 0x5173b1118d9bd800ULL, 0x29d641b63189d4a7ULL, 0xfdbf177988bbc586ULL, 0x2959894fcad81df5ULL, + 0xaebc8ef3b4bbc899ULL, 0x4148995ab26992b9ULL, 0x24e20b0134f92cfbULL, 0x40d158894a05dee8ULL, + 0x46b00b1185af76f6ULL, 0x26bac77873187a79ULL, 0x3dc0bf95ab8fff5fULL, 0x2a608bd8945524d7ULL, + 0x26449588bd446302ULL, 0x7c4bc21c0388439cULL, 0x8e98a4f383bd11b2ULL, 0x26218d7bc9d876b9ULL, + 0xe3081542997c178aULL, 0x3c2d29a86fb6606fULL, 0x5c217736fa279374ULL, 0x7dde05734afeb1faULL, + 0x3bf10e3906d42babULL, 0xe4f7803e1980649cULL, 0xe6053bf89595bf7aULL, 0x394faf38da245530ULL, + 0x7a8efb58896928f4ULL, 0xfbc778e9cc6a113cULL, 0x72670ce330af596fULL, 0x48f222a81d3d6cf7ULL, + 0xf01fce410d72caa7ULL, 0x5a20ecc7213b5595ULL, 0x7bc21165c1fa1483ULL, 0x07f89ae31da8a741ULL, + 0x05d2c2b4c6830ff9ULL, 0xd43e330fc6316293ULL, 0xa5a5590a96d3a904ULL, 0x705edb91a65333b6ULL, + 0x048ee15e0bb9a5f7ULL, 0x3240cfca9e0aaf5dULL, 0x8f4b71ceedc4a40bULL, 0x621c0da3de544a6dULL, + 0x92872836a08c4091ULL, 0xce8375b010c91445ULL, 0x8a72eb524f276394ULL, 0x2667fcfa7ec83635ULL, + 0x7f4c173345e8752aULL, 0x061b47feee7079a5ULL, 0x25dd9afa9f86ff34ULL, 0x3780cef5425dc89cULL, + 0x1a46035a513bb4e9ULL, 0x3e1ef379ac575adaULL, 0xc78c5f1c5fa24b50ULL, 0x321a967634fd9f22ULL, + 0x946707b8826e27faULL, 0x3dca84d64c506fd0ULL, 0xc189218075e91436ULL, 0x6d9284169b3b8484ULL, + 0x3a67e840383f2ddfULL, 0x33eec9a30c4f9b75ULL, 0x3ec7c86fa783ef47ULL, 0x26ec449fbac9fbc4ULL, + 0x5c0f38cba09b9e7dULL, 0x81168cc762a3478cULL, 0x3e23b0d306fc121cULL, 0x5a238aa0a5efdcddULL, + 0x1ba26121c4ea43ffULL, 0x36f8c77f7c8832b5ULL, 0x88fbea0b0adcf99aULL, 0x5ca9938ec25bebf9ULL, + 0xd5436a5e51fccda0ULL, 0x1dbc4797c2cd893bULL, 0x19346a65d3224a08ULL, 0x0f5034e49b9af466ULL, + 0xf23c3967a1e0b96eULL, 0xe58b08fa867a4d88ULL, 0xfb2fabc6a7341679ULL, 0x2a75381eb6026946ULL, + 0xc80a3be4c19420acULL, 0x66b1f6c681f2b6dcULL, 0x7cf7036761e93388ULL, 0x25abbbd8a660a4c4ULL, + 0x91ea12ba14fd5198ULL, 0x684950fc4a3cffa9ULL, 0xf826842130f5ad28ULL, 0x3ea988f75301a441ULL, + 0xc978109a695f8c6fULL, 0x1746eb4a0530c3f3ULL, 0x444d6d77b4459995ULL, 0x75952b8c054e5cc7ULL, + 0xa3703f7915f4d6aaULL, 0x66c346202f2647d8ULL, 0xd01469df811d644bULL, 0x77fea47d81a5d71fULL, + 0xc5e9529ef57ca381ULL, 0x6eeeb4b9ce2f881aULL, 0xb6e91a28e8009bd6ULL, 0x4b80be3e9afc3fecULL, + 0x7e3773c526aed2c5ULL, 0x1b4afcb453c9a49dULL, 0xa920bdd7baffb24dULL, 0x7c54699f122d400eULL, + 0xef46c8e14fa94bc8ULL, 0xe0b074ce2952ed5eULL, 0xbea450e1dbd885d5ULL, 0x61b68649320f712cULL, + 0x8a485f7309ccbdd1ULL, 0xbd06320d7d4d1a2dULL, 0x25232973322dbef4ULL, 0x445dc4758c17f770ULL, + 0xdb0434177cc8933cULL, 0xed6fe82175ea059fULL, 0x1efebefdc053db34ULL, 0x4adbe867c65daf99ULL, + 0x3acd71a2a90609dfULL, 0xe5e991856dd04050ULL, 0x1ec69b688157c23cULL, 0x697427f6885cfe4dULL, + 0xd7be7b9b65e1a851ULL, 0xa03d28d522c536ddULL, 0x28399d658fd2b645ULL, 0x49e5b7e17c2641e1ULL, + 0x6f8c3a98700457a4ULL, 0x5078f0a25ebb6778ULL, 0xd13c3ccbc382960fULL, 0x2e003258a7df84b1ULL, + 0x8ad1f39be6296a1cULL, 0xc1eeaa652a5fbfb2ULL, 0x33ee0673fd26f3cbULL, 0x59256173a69d2cccULL, + 0x41ea07aa4e18fc41ULL, 0xd9fc19527c87a51eULL, 0xbdaacb805831ca6fULL, 0x445b652dc916694fULL, + 0xce92a3a7f2172315ULL, 0x1edc282de11b9964ULL, 0xa1823aafe04c314aULL, 0x790a2d94437cf586ULL, + 0x71c447fb93f6e009ULL, 0x8922a56722845276ULL, 0xbf70903b204f5169ULL, 0x2f7a89891ba319feULL, + 0x02a08eb577e2140cULL, 0xed9a4ed4427bdcf4ULL, 0x5253ec44e4323cd1ULL, 0x3e88363c14e9355bULL, + 0xaa66c14277110b8cULL, 0x1ae0391610a23390ULL, 0x2030bd12c93fc2a2ULL, 0x3ee141579555c7abULL, + 0x9214de3a6d6e7d41ULL, 0x3ccdd88607f17efeULL, 0x674f1288f8e11217ULL, 0x5682250f329f93d0ULL, + 0x6cf00b136d2e396eULL, 0x6e4cf86f1014debfULL, 0x5930b1b5bfcc4e83ULL, 0x047069b48aba16b6ULL, + 0x0d4ce4ab69b20793ULL, 0xb24db91a97d0fb9eULL, 0xcdfa50f54e00d01dULL, 0x221b1085368bddb5ULL, + 0xe7e59468b1e3d8d2ULL, 0x53c56563bd122f93ULL, 0xeee8a903e0663f09ULL, 0x61efa662cbbe3d42ULL, + 0x2cf8ddddde6eab2aULL, 0x9bf80ad51435f231ULL, 0x5deadacec9f04973ULL, 0x29275b5d41d29b27ULL, + 0xcfde0f0895ebf14fULL, 0xb9aab96b054905a7ULL, 0xcae80dd9a1c420fdULL, 0x0a63bf2f1673bbc7ULL, + 0x092f6e11958fbc8cULL, 0x672a81e804822fadULL, 0xcac8351560d52517ULL, 0x6f3f7722c8f192f8ULL, + 0xf8ba90ccc2e894b7ULL, 0x2c7557a438ff9f0dULL, 0x894d1d855ae52359ULL, 0x68e122157b743d69ULL, + 0xd87e5570cfb919f3ULL, 0x3f2cdecd95798db9ULL, 0x2121154710c0a2ceULL, 0x3c66a115246dc5b2ULL, + 0xcbedc562294ecb72ULL, 0xba7143c36a280b16ULL, 0x9610c2efd4078b67ULL, 0x6144735d946a4b1eULL, + 0x536f111ed75b3350ULL, 0x0211db8c2041d81bULL, 0xf93cb1000e10413cULL, 0x149dfd3c039e8876ULL, + 0xd479dde46b63155bULL, 0xb66e15e93c837976ULL, 0xdafde43b1f13e038ULL, 0x5fafda1a2e4b0b35ULL, + 0x3600bbdf17197581ULL, 0x3972050bbe3cd2c2ULL, 0x5938906dbdd5be86ULL, 0x34fce5e43f9b860fULL, + 0x75a8a4cd42d14d02ULL, 0x828dabc53441df65ULL, 0x33dcabedd2e131d3ULL, 0x3ebad76fb814d25fULL, + 0xd4906f566f70e10fULL, 0x5d12f7aa51690f5aULL, 0x45adb16e76cefcf2ULL, 0x01f768aead232999ULL, + 0x2b6cc77b6248febdULL, 0x3cd30628ec3aaffdULL, 0xce1c0b80d4ef486aULL, 0x4c3bff2ea6f66c23ULL, + 0x3f2ec4094aeaeb5fULL, 0x61b19b286e372ca7ULL, 0x5eefa966de2a701dULL, 0x23b20565de55e3efULL, + 0xe301ca5279d58557ULL, 0x07b2d4ce27c2874fULL, 0xa532cd8a9dcf1d67ULL, 0x2a52fee23f2bff56ULL, + 0x8624efb37cd8663dULL, 0xbbc7ac20ffbd7594ULL, 0x57b85e9c82d37445ULL, 0x7b3052cb86a6ec66ULL, + 0x3482f0ad2525e91eULL, 0x2cb68043d28edca0ULL, 0xaf4f6d052e1b003aULL, 0x185f8c2529781b0aULL, + 0xaa41de5bd80ce0d6ULL, 0x9407b2416853e9d6ULL, 0x563ec36e357f4c3aULL, 0x4cc4b8dd0e297bceULL, + 0xa2fc1a52ffb8730eULL, 0x1811f16e67058e37ULL, 0x10f9a366cddf4ee1ULL, 0x72f4a0c4a0b9f099ULL, + 0x8c16c06f663f4ea7ULL, 0x693b3af74e970fbaULL, 0x2102e7f1d69ec345ULL, 0x0ba53cbc968a8089ULL, + 0xca3d9dc7fea15537ULL, 0x4c6824bb51536493ULL, 0xb9886314844006b1ULL, 0x40d2a72ab454cc60ULL, + 0x5936a1b712570975ULL, 0x91b9d648debda657ULL, 0x3344094bb64330eaULL, 0x006ba10d12ee51d0ULL, + 0x19228468f5de5d58ULL, 0x0eb12f4c38cc05b0ULL, 0xa1039f9dd5601990ULL, 0x4502d4ce4fff0e0bULL, + 0xeb2054106837c189ULL, 0xd0f6544c6dd3b93cULL, 0x40727064c416d74fULL, 0x6e15c6114b502ef0ULL, + 0x4df2a398cfb1a76bULL, 0x11256c7419f2f6b1ULL, 0x4a497962066e6043ULL, 0x705b3aab41355b44ULL, + 0x365ef536d797b1d8ULL, 0x00076bd622ddf0dbULL, 0x3bbf33b0e0575a88ULL, 0x3777aa05c8e4ca4dULL, + 0x392745c85578db5fULL, 0x6fda4149dbae5ae2ULL, 0xb1f0b00b8adc9867ULL, 0x09963437d36f1da3ULL, + 0x7e824e90a5dc3853ULL, 0xccb5f6641f135cbdULL, 0x6736d86c87ce8fccULL, 0x625f3ce26604249fULL, + 0xaf8ac8059502f63fULL, 0x0c05e70a2e351469ULL, 0x35292e9c764b6305ULL, 0x1a394360c7e23ac3ULL, + 0xd5c6d53251183264ULL, 0x62065abd43c2b74fULL, 0xb5fbf5d03b973f9bULL, 0x13a3da3661206e5eULL, + 0xc6bd5837725d94e5ULL, 0x18e30912205016c5ULL, 0x2088ce1570033c68ULL, 0x7fba1f495c837987ULL, + 0x5a8c7423f2f9079dULL, 0x1735157b34023fc5ULL, 0xe4f9b49ad2fab351ULL, 0x6691ff72c878e33cULL, + 0x122c2adedc5eff3eULL, 0xf8dd4bf1d8956cf4ULL, 0xeb86205d9e9e5bdaULL, 0x049b92b9d975c743ULL, + 0xa5379730b0f6c05aULL, 0x72a0ffacc6f3a553ULL, 0xb0032c34b20dcd6dULL, 0x470e9dbc88d5164aULL, + 0xb19cf10ca237c047ULL, 0xb65466711f6c81a2ULL, 0xb3321bd16dd80b43ULL, 0x48c14f600c5fbe8eULL, + 0x66451c264aa6c803ULL, 0xb66e3904a4fa7da6ULL, 0xd45f19b0b3128395ULL, 0x31602627c3c9bc10ULL, + 0x3120dc4832e4e10dULL, 0xeb20c46756c717f7ULL, 0x00f52e3f67280294ULL, 0x566d4fc14730c509ULL, + 0x7e3a5d40fd837206ULL, 0xc1e926dc7159547aULL, 0x216730fba68d6095ULL, 0x22e8c3843f69cea7ULL, + 0x33d074e8930e4b2bULL, 0xb6e4350e84d15816ULL, 0x5534c26ad6ba2365ULL, 0x7773c12f89f1f3f3ULL, + 0x8cba404da57962aaULL, 0x5b9897a81999ce56ULL, 0x508e862f121692fcULL, 0x3a81907fa093c291ULL, + 0x0dded0ff4725a510ULL, 0x10d8cc10673fc503ULL, 0x5b9d151c9f1f4e89ULL, 0x32a5c1d5cb09a44cULL, + 0x1e0aa442b90541fbULL, 0x5f85eb7cc1b485dbULL, 0xbee595ce8a9df2e5ULL, 0x25e496c722422236ULL, + 0x5edf3c46cd0fe5b9ULL, 0x34e75a7ed2a43388ULL, 0xe488de11d761e352ULL, 0x0e878a01a085545cULL, + 0xba493c77e021bb04ULL, 0x2b4d1843c7df899aULL, 0x9ea37a487ae80d67ULL, 0x67a9958011e41794ULL, + 0x4b58051a6697b065ULL, 0x47e33f7d8d6ba6d4ULL, 0xbb4da8d483ca46c1ULL, 0x68becaa181c2db0dULL, + 0x8d8980e90b989aa5ULL, 0xf95eb14a2c93c99bULL, 0x51c6c7c4796e73a2ULL, 0x6e228363b5efb569ULL, + 0xc6bbc0b02dd624c8ULL, 0x777eb47dec8170eeULL, 0x3cde15a004cfafa9ULL, 0x1dc6bc087160bf9bULL, + 0x2e07e043eec34002ULL, 0x18e9fc677a68dc7fULL, 0xd8da03188bd15b9aULL, 0x48fbc3bb00568253ULL, + 0x57547d4cfb654ce1ULL, 0xd3565b82a058e2adULL, 0xf63eaf0bbf154478ULL, 0x47531ef114dfbb18ULL, + 0xe1ec630a4278c587ULL, 0x5507d546ca8e83f3ULL, 0x85e135c63adc0c2bULL, 0x0aa7efa85682844eULL, + 0x72691ba8b3e1f615ULL, 0x32b4e9701fbe3ffaULL, 0x97b6d92e39bb7868ULL, 0x2cfe53dea02e39e8ULL, + 0x687392cd85cd52b0ULL, 0x27ff66c910e29831ULL, 0x97134556a9832d06ULL, 0x269bb0360a84f8a0ULL, + 0x706e55457643f85cULL, 0x3734a48c9b597d1bULL, 0x7aee91e8c6efa472ULL, 0x5cd6abc198a9d9e0ULL, + 0x0e04de06cb3ce41aULL, 0xd8c6eb893402e138ULL, 0x904659bb686e3772ULL, 0x7215c371746ba8c8ULL, + 0xfd12a97eeae4a2d9ULL, 0x9514b7516394f2c5ULL, 0x266fd5809208f294ULL, 0x5c847085619a26b9ULL, + 0x52985410fed694eaULL, 0x3c905b934a2ed254ULL, 0x10bb47692d3be467ULL, 0x063b3d2d69e5e9e1ULL, + 0x472726eedda57debULL, 0xefb6c4ae10f41891ULL, 0x2b1641917b307614ULL, 0x117c554fc4f45b7cULL, + 0xc07cf3118f9d8812ULL, 0x01dbd82050017939ULL, 0xd7e803f4171b2827ULL, 0x1015e87487d225eaULL, + 0xc58de3fed23acc4dULL, 0x50db91c294a7be2dULL, 0x0b94d43d1c9cf457ULL, 0x6b1640fa6e37524aULL, + 0x692f346c5fda0d09ULL, 0x200b1c59fa4d3151ULL, 0xb8c46f760777a296ULL, 0x4b38395f3ffdfbcfULL, + 0x18d25e00be54d671ULL, 0x60d50582bec8aba6ULL, 0x87ad8f263b78b982ULL, 0x50fdf64e9cda0432ULL, + 0x90f567aac578dcf0ULL, 0xef1e9b0ef2a3133bULL, 0x0eebba9242d9de71ULL, 0x15473c9bf03101c7ULL, + 0x7c77e8ae56b78095ULL, 0xb678e7666e6f078eULL, 0x2da0b9615348ba1fULL, 0x7cf931c1ff733f0bULL, + 0x26b357f50a0a366cULL, 0xe9708cf42b87d732ULL, 0xc13aeea5f91cb2c0ULL, 0x35d90c991143bb4cULL, + 0x47c1c404a9a0d9dcULL, 0x659e58451972d251ULL, 0x3875a8c473b38c31ULL, 0x1fbd9ed379561f24ULL, + 0x11fabc6fd41ec28dULL, 0x7ef8dfe3cd2a2dcaULL, 0x72e73b5d8c404595ULL, 0x6135fa4954b72f27ULL, + 0xccfc32a2de24b69cULL, 0x3f55698c1f095d88ULL, 0xbe3350ed5ac3f929ULL, 0x5e9bf806ca477eebULL, + 0xe9ce8fb63c309f68ULL, 0x5376f63565e1f9f4ULL, 0xd1afcfb35a6393f1ULL, 0x6632a1ede5623506ULL, + 0x0b7d6c390c2ded4cULL, 0x56cb3281df04cb1fULL, 0x66305a1249ecc3c7ULL, 0x5d588b60a38ca72aULL, + 0xa6ecbf78e8e5f42dULL, 0x86eeb44b3c8a3eecULL, 0xec219c48fbd21604ULL, 0x1aaf1af517c36731ULL, + 0xc306a2836769bde7ULL, 0x208280622b1e2adbULL, 0x8027f51ffbff94a6ULL, 0x76cfa1ce1124f26bULL, + 0x18eb00562422abb6ULL, 0xf377c4d58f8c29c3ULL, 0x4dbbc207f531561aULL, 0x0253b7f082128a27ULL, + 0x3d1f091cb62c17e0ULL, 0x4860e1abd64628a9ULL, 0x52d17436309d4253ULL, 0x356f97e13efae576ULL, + 0xd351e11aa150535bULL, 0x3e6b45bb1dd878ccULL, 0x0c776128bed92c98ULL, 0x1d34ae93032885b8ULL, + 0x4ba0488ca85ba4c3ULL, 0x985348c33c9ce6ceULL, 0x66124c6f97bda770ULL, 0x0f81a0290654124aULL, + 0x9ed09ca6569b86fdULL, 0x811009fd18af9a2dULL, 0xff08d03f93d8c20aULL, 0x52a148199faef26bULL, + 0x3e03f9dc2d8d1b73ULL, 0x4205801873961a70ULL, 0xc0d987f041a35970ULL, 0x07aa1f15a1c0d549ULL, + 0xdfd46ce08cd27224ULL, 0x6d0a024f934e4239ULL, 0x808a7a6399897b59ULL, 0x0a4556e9e13d95a2ULL, + 0xd21a991fe9c13045ULL, 0x9b0e8548fe7751b8ULL, 0x5da643cb4bf30035ULL, 0x77db28d63940f721ULL, + 0xfc5eeb614adc9011ULL, 0x5229419ae8c411ebULL, 0x9ec3e7787d1dcf74ULL, 0x340d053e216e4cb5ULL, + 0xcac7af39b48df2b4ULL, 0xc0faec2871a10a94ULL, 0x140a69245ca575edULL, 0x0cf1c37134273a4cULL, + 0xc8ee306ac224b8a5ULL, 0x57eaee7ccb4930b0ULL, 0xa1e806bdaacbe74fULL, 0x7d9a62742eeb657dULL, + 0x9eb6b6ef546c4830ULL, 0x885cca1fddb36e2eULL, 0xe6b9f383ef0d7105ULL, 0x58654fef9d2e0412ULL, + 0xa905c4ffbe0e8e26ULL, 0x942de5df9b31816eULL, 0x497d723f802e88e1ULL, 0x30684dea602f408dULL, + 0x21e5a278a3e6cb34ULL, 0xaefb6e6f5b151dc4ULL, 0xb30b8e049d77ca15ULL, 0x28c3c9cf53b98981ULL, + 0x287fb721556cdd2aULL, 0x0d317ca897022274ULL, 0x7468c7423a543258ULL, 0x4a7f11464eb5642fULL, + 0xa237a4774d193aa6ULL, 0xd865986ea92129a1ULL, 0x24c515ecf87c1a88ULL, 0x604003575f39f5ebULL, + 0x47b9f189570a9b27ULL, 0x2b98cede465e4b78ULL, 0x026df551dbb85c20ULL, 0x74fcd91047e21901ULL, + 0x13e2a90a23c1bfa3ULL, 0x0cb0074e478519f6ULL, 0x5ff1cbbe3af6cf44ULL, 0x67fe5438be812dbeULL, + 0xd13cf64fa40f05b0ULL, 0x054dfb2f32283787ULL, 0x4173915b7f0d2aeaULL, 0x482f144f1f610d4eULL, + 0xf6210201b47f8234ULL, 0x5d0ae1929e70b990ULL, 0xdcd7f455b049567cULL, 0x7e93d0f1f0916f01ULL, + 0xdd79cbf18a7db4faULL, 0xbe8391bf6f74c62fULL, 0x027145d14b8291bdULL, 0x585a73ea2cbf1705ULL, + 0x485ca03e928a0db2ULL, 0x10fc01a5742857e7ULL, 0x2f482edbd6d551a7ULL, 0x0f0433b5048fdb8aULL, + 0x60da2e8dd7dc6247ULL, 0x88b4c9d38cd4819aULL, 0x13033ac001f66697ULL, 0x273b24fe3b367d75ULL, + 0xc6e8f66a31b3b9d4ULL, 0x281514a494df49d5ULL, 0xd1726fdfc8b23da7ULL, 0x4b3ae7d103dee548ULL, + 0xc6256e19ce4b9d7eULL, 0xff5c5cf186e3c61cULL, 0xacc63ca34b8ec145ULL, 0x74621888fee66574ULL, + 0x956f409645290a1eULL, 0xef0bf8e3263a962eULL, 0xed6a50eb5ec2647bULL, 0x0694283a9dca7502ULL, + 0x769b963643a2dcd1ULL, 0x42b7c8ea09fc5353ULL, 0x4f002aee13397eabULL, 0x63005e2c19b7d63aULL, + 0xca6736da63023beaULL, 0x966c7f6db12a99b7ULL, 0xace09390c537c5e1ULL, 0x0b696063a1aa89eeULL, + 0xebb03e97288c56e5ULL, 0x432a9f9f938c8be8ULL, 0xa6a5a93d5b717f71ULL, 0x1a5fb4c3e18f9d97ULL, + 0x1c94e7ad1c60cdceULL, 0xee202a43fc02c4a0ULL, 0x8dafe4d867c46a20ULL, 0x0a10263c8ac27b58ULL, + 0xd0dea9dfe4432a4aULL, 0x856af87bbe9277c5ULL, 0xce8472acc212c71aULL, 0x6f151b6d9bbb1e91ULL, + 0x26776c527ceed56aULL, 0x7d211cb7fbf8faecULL, 0x37ae66a6fd4609ccULL, 0x1f81b702d2770c42ULL, + 0x2fb0b057eac58392ULL, 0xe1dd89fe29744e9dULL, 0xc964f8eb17beb4f8ULL, 0x29571073c9a2d41eULL, + 0xa948a18981c0e254ULL, 0x2df6369b65b22830ULL, 0xa33eb2d75fcfd3c6ULL, 0x078cd6ec4199a01fULL, + 0x4a584a41ad900d2fULL, 0x32142b78e2c74c52ULL, 0x68c4e8338431c978ULL, 0x7f69ea9008689fc2ULL, + 0x52f2c81e46a38265ULL, 0xfd78072d04a832fdULL, 0x8cd7d5fa25359e94ULL, 0x4de71b7454cc29d2ULL, + 0x42eb60ad1eda6ac9ULL, 0x0aad37dfdbc09c3aULL, 0x81004b71e33cc191ULL, 0x44e6be345122803cULL, + 0x03fe8388ba1920dbULL, 0xf5d57c32150db008ULL, 0x49c8c4281af60c29ULL, 0x21edb518de701aeeULL, + 0x7fb63e418f06dc99ULL, 0xa4460d99c166d7b8ULL, 0x24dd5248ce520a83ULL, 0x5ec3ad712b928358ULL, + 0x15022a5fbd17930fULL, 0xa4f64a77d82570e3ULL, 0x12bc8d6915783712ULL, 0x498194c0fc620abbULL, + 0x38a2d9d255686c82ULL, 0x785c6bd9193e21f0ULL, 0xe4d5c81ab24a5484ULL, 0x56307860b2e20989ULL, + 0x429d55f78b4d74c4ULL, 0x22f1834643350131ULL, 0x1e60c24598c71fffULL, 0x59f2f014979983efULL, + 0x46a47d56eb494a44ULL, 0x3e22a854d636a18eULL, 0xb346e15274491c3bULL, 0x2ceafd4e5390cde7ULL, + 0xba8a8538be0d6675ULL, 0x4b9074bb50818e23ULL, 0xcbdab89085d304c3ULL, 0x61a24fe0e56192c4ULL, + 0xcb7615e6db525bcbULL, 0xdd7d8c35a567e4caULL, 0xe6b4153acafcdd69ULL, 0x2d668e097f3c9766ULL, + 0xa57e7e265ce55ef0ULL, 0x5d9f4e527cd4b967ULL, 0xfbc83606492fd1e5ULL, 0x090d52beb7c3f7aeULL, + 0x09b9515a1e7b4d7cULL, 0x1f266a2599da44c0ULL, 0xa1c49548e2c55504ULL, 0x7ef04287126f15ccULL, + 0xfed1659dbd30ef15ULL, 0x8b4ab9eec4e0277bULL, 0x884d6236a5df3291ULL, 0x1fd96ea6bf5cf788ULL, + 0x42a161981f190d9aULL, 0x61d849507e6052c1ULL, 0x9fe113bf285a2cd5ULL, 0x7c22d676dbad85d8ULL, + 0x82e770ed2bfbd27dULL, 0x4c05b2ece996f5a5ULL, 0xcd40a9c2b0900150ULL, 0x5895319213d9bf64ULL, + 0xe7cc5d703fea2e08ULL, 0xb50c491258e2188cULL, 0xcce30baa48205bf0ULL, 0x537c659ccfa32d62ULL, + 0x37b6623a98cfc088ULL, 0xfe9bed1fa4d6aca4ULL, 0x04d29b8e56a8d1b0ULL, 0x725f71c40b519575ULL, + 0x28c7f89cd0339ce6ULL, 0x8367b14469ddc18bULL, 0x883ada83a6a1652cULL, 0x585f1974034d6c17ULL, + 0x89cfb266f1b19188ULL, 0xe63b4863e7c35217ULL, 0xd88c9da6b4c0526aULL, 0x3e035c9df0954635ULL, + 0xdd9d5412fb45de9dULL, 0xdd684532e4cff40dULL, 0x4b5c999b151d671cULL, 0x2d8c2cc811e7f690ULL, + 0x7f54be1d90055d40ULL, 0xa464c5df464aaf40ULL, 0x33979624f0e917beULL, 0x2c018dc527356b30ULL, + 0xa5415024e330b3d4ULL, 0x73ff3d96691652d3ULL, 0x94ec42c4ef9b59f1ULL, 0x0747201618d08e5aULL, + 0x4d6ca48aca411c53ULL, 0x66415f2fcfa66119ULL, 0x9c4dd40051e227ffULL, 0x59810bc09a02f7ebULL, + 0x2a7eb171b3dc101dULL, 0x441c5ab99ffef68eULL, 0x32025c9b93b359eaULL, 0x5e8ce0a71e9d112fULL, + 0xbfcccb92429503fdULL, 0xd271ba752f095d55ULL, 0x345ead5e972d091eULL, 0x18c8df11a83103baULL, + 0x90cd949a9aed0f4cULL, 0xc5d1f4cb6660e37eULL, 0xb8cac52d56c52e0bULL, 0x6e42e400c5808e0dULL, + 0xa3b46966eeaefd23ULL, 0x0c4f1f0be39ecdcaULL, 0x189dc8c9d683a51dULL, 0x51f27f054c09351bULL, + 0x4c487ccd2a320682ULL, 0x587ea95bb3df1c96ULL, 0xc8ccf79e555cb8e8ULL, 0x547dc829a206d73dULL, + 0xb822a6cd80c39b06ULL, 0xe96d54732000d4c6ULL, 0x28535b6f91463b4dULL, 0x228f4660e2486e1dULL, + 0x98799538de8d3abfULL, 0x8cd8330045ebca6eULL, 0x79952a008221e738ULL, 0x4322e1a7535cd2bbULL, + 0xb114c11819d1801cULL, 0x2016e4d84f3f5ec7ULL, 0xdd0e2df409260f4cULL, 0x5ec362c0ae5f7266ULL, + 0xc0462b18b8b2b4eeULL, 0x7cc8d950274d1afbULL, 0xf25f7105436b02d2ULL, 0x43bbf8dcbff9ccd3ULL, + 0xb6ad1767a039e9dfULL, 0xb0714da8f69d3583ULL, 0x5e55fa18b42931f5ULL, 0x4ed5558f33c60961ULL, + 0x1fe37901c647a5ddULL, 0x593ddf1f8081d357ULL, 0x0249a4fd813fd7a6ULL, 0x69acca274e9caf61ULL, + 0x047ba3ea330721c9ULL, 0x83423fc20e7e1ea0ULL, 0x1df4c0af01314a60ULL, 0x09a62dab89289527ULL, + 0xa5b325a49cc6cb00ULL, 0xe94b5dc654b56cb6ULL, 0x3be28779adc994a0ULL, 0x4296e8f8ba3a4aadULL, + 0x328689761e451eabULL, 0x2e4d598bff59594aULL, 0x49b96853d7a7084aULL, 0x4980a319601420a8ULL, + 0x9565b9e12f552c42ULL, 0x8a5318db7100fe96ULL, 0x05c90b4d43add0d7ULL, 0x538b4cd66a5d4edaULL, + 0xf4e94fc3e89f039fULL, 0x592c9af26f618045ULL, 0x08a36eb5fd4b9550ULL, 0x25fffaf6c2ed1419ULL, + 0x34434459cc79d354ULL, 0xeeecbfb4b1d5476bULL, 0xddeb34a061615d99ULL, 0x5129cecceb64b773ULL, + 0xee43215894993520ULL, 0x772f9c7cf14c0b3bULL, 0xd2e2fce306bedad5ULL, 0x715f42b546f06a97ULL, + 0x434ecdceda5b5f1aULL, 0x0da17115a49741a9ULL, 0x680bd77c73edad2eULL, 0x487c02354edd9041ULL, + 0xb8efeff3a70ed9c4ULL, 0x56a32aa3e857e302ULL, 0xdf3a68bd48a2a5a0ULL, 0x07f650b73176c444ULL, + 0xe38b9b1626e0ccb1ULL, 0x79e053c18b09fb36ULL, 0x56d90319c9f94964ULL, 0x1ca941e7ac9ff5c4ULL, + 0x49c4df29162fa0bbULL, 0x8488cf3282b33305ULL, 0x95dfda14cabb437dULL, 0x3391f78264d5ad86ULL, + 0x729ae06ae2b5095dULL, 0xd58a58d73259a946ULL, 0xe9834262d13921edULL, 0x27fedafaa54bb592ULL, + 0xa99dc5b829ad48bbULL, 0x5f025742499ee260ULL, 0x802c8ecd5d7513fdULL, 0x78ceb3ef3f6dd938ULL, + 0xc342f44f8a135d94ULL, 0x7b9edb44828cdda3ULL, 0x9436d11a0537cfe7ULL, 0x5064b164ec1ab4c8ULL, + 0x7020eccfd37eb2fcULL, 0x1f31ea3ed90d25fcULL, 0x1b930d7bdfa1bb34ULL, 0x5344467a48113044ULL, + 0x70073170f25e6dfbULL, 0xe385dc1a50114cc8ULL, 0x2348698ac8fc4f00ULL, 0x2a77a55284dd40d8ULL, + 0xfe06afe0c98c6ce4ULL, 0xc235df96dddfd6e4ULL, 0x1428d01e33bf1ed3ULL, 0x785768ec9300bdafULL, + 0x9702e57a91deb63bULL, 0x61bdb8bfe5ce8b80ULL, 0x645b426f3d1d58acULL, 0x4804a82227a557bcULL, + 0x8e57048ab44d2601ULL, 0x68d6501a4b3a6935ULL, 0xc39c9ec3f9e1c293ULL, 0x4172f257d4de63e2ULL, + 0xd368b450330c6401ULL, 0x040d3017418f2391ULL, 0x2c34bb6090b7d90dULL, 0x16f649228fdfd51fULL, + 0xbea6818e2b928ef5ULL, 0xe28ccf91cdc11e72ULL, 0x594aaa68e77a36cdULL, 0x313034806c7ffd0fULL, + 0x8a9d27ac2249bd65ULL, 0x19a3b464018e9512ULL, 0xc26ccff352b37ec7ULL, 0x056f68341d797b21ULL, + 0x5e79d6757efd2327ULL, 0xfabdbcb6553afe15ULL, 0xd3e7222c6eaf5a60ULL, 0x7046c76d4dae743bULL, + 0x660be872b18d4a55ULL, 0x19992518574e1496ULL, 0xc103053a302bdcbbULL, 0x3ed8e9800b218e8eULL, + 0x7b0b9239fa75e03eULL, 0xefe9fb684633c083ULL, 0x98a35fbe391a7793ULL, 0x6065510fe2d0fe34ULL, + 0x55cb668548abad0cULL, 0xb4584548da87e527ULL, 0x2c43ecea0107c1ddULL, 0x526028809372de35ULL, + 0x3415c56af9213b1fULL, 0x5bee1a4d017e98dbULL, 0x13f6b105b5cf709bULL, 0x5ff20e3482b29ab6ULL, + 0x0aa29c75cc2e6c90ULL, 0xfc7d73ca3a70e206ULL, 0x899fc38fc4b5c515ULL, 0x250386b124ffc207ULL, + 0x54ea28d5ae3d2b56ULL, 0x9913149dd6de60ceULL, 0x16694fc58f06d6c1ULL, 0x46b23975eb018fc7ULL, + 0x470a6a0fb4b7b4e2ULL, 0x5d92475a8f7253deULL, 0xabeee5b52fbd3adbULL, 0x7fa20801a0806968ULL, + 0x76f3faf19f7714d2ULL, 0xb3e840c12f4660c3ULL, 0x0fb4cd8df212744eULL, 0x4b065a251d3a2dd2ULL, + 0x5cebde383d77cd4aULL, 0x6adf39df882c9cb1ULL, 0xa2dd242eb09af759ULL, 0x3147c0e50e5f6422ULL, + 0x164ca5101d1350dbULL, 0xf8d13479c33fc962ULL, 0xe640ce4d13e5da08ULL, 0x4bdee0c45061f8baULL, + 0xd7c46dc1a4edb1c9ULL, 0x5514d7b6437fd98aULL, 0x58942f6bb2a1c00bULL, 0x2dffb2ab1d70710eULL, + 0xccdfcf2fc18b6d68ULL, 0xa8ebcba8b7806167ULL, 0x980697f95e2937e3ULL, 0x02fbba1cd0126e8cULL +}; + +static void curve25519_ever64_base(u8 *out, const u8 *priv) +{ + u64 swap = 1; + int i, j, k; + u64 tmp[16 + 32 + 4]; + u64 *x1 = &tmp[0]; + u64 *z1 = &tmp[4]; + u64 *x2 = &tmp[8]; + u64 *z2 = &tmp[12]; + u64 *xz1 = &tmp[0]; + u64 *xz2 = &tmp[8]; + u64 *a = &tmp[0 + 16]; + u64 *b = &tmp[4 + 16]; + u64 *c = &tmp[8 + 16]; + u64 *ab = &tmp[0 + 16]; + u64 *abcd = &tmp[0 + 16]; + u64 *ef = &tmp[16 + 16]; + u64 *efgh = &tmp[16 + 16]; + u64 *key = &tmp[0 + 16 + 32]; + + memcpy(key, priv, 32); + ((u8 *)key)[0] &= 248; + ((u8 *)key)[31] = (((u8 *)key)[31] & 127) | 64; + + x1[0] = 1, x1[1] = x1[2] = x1[3] = 0; + z1[0] = 1, z1[1] = z1[2] = z1[3] = 0; + z2[0] = 1, z2[1] = z2[2] = z2[3] = 0; + memcpy(x2, p_minus_s, sizeof(p_minus_s)); + + j = 3; + for (i = 0; i < 4; ++i) { + while (j < (const int[]){ 64, 64, 64, 63 }[i]) { + u64 bit = (key[i] >> j) & 1; + k = (64 * i + j - 3); + swap = swap ^ bit; + cswap2(swap, xz1, xz2); + swap = bit; + fsub(b, x1, z1); + fadd(a, x1, z1); + fmul(c, &table_ladder[4 * k], b, ef); + fsub(b, a, c); + fadd(a, a, c); + fsqr2(ab, ab, efgh); + fmul2(xz1, xz2, ab, efgh); + ++j; + } + j = 0; + } + + point_double(xz1, abcd, efgh); + point_double(xz1, abcd, efgh); + point_double(xz1, abcd, efgh); + encode_point(out, xz1); + + memzero_explicit(tmp, sizeof(tmp)); +} diff --git a/net/wireguard/crypto/zinc/curve25519/curve25519.c b/net/wireguard/crypto/zinc/curve25519/curve25519.c new file mode 100644 index 000000000000..dffaa09c18db --- /dev/null +++ b/net/wireguard/crypto/zinc/curve25519/curve25519.c @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * + * This is an implementation of the Curve25519 ECDH algorithm, using either + * a 32-bit implementation or a 64-bit implementation with 128-bit integers, + * depending on what is supported by the target compiler. + * + * Information: https://cr.yp.to/ecdh.html + */ + +#include <zinc/curve25519.h> +#include "../selftest/run.h" + +#include <asm/unaligned.h> +#include <linux/string.h> +#include <linux/random.h> +#include <linux/module.h> +#include <linux/init.h> +#include <crypto/algapi.h> // For crypto_memneq. + +#if defined(CONFIG_ZINC_ARCH_X86_64) +#include "curve25519-x86_64-glue.c" +#elif defined(CONFIG_ZINC_ARCH_ARM) +#include "curve25519-arm-glue.c" +#else +static bool *const curve25519_nobs[] __initconst = { }; +static void __init curve25519_fpu_init(void) +{ +} +static inline bool curve25519_arch(u8 mypublic[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE], + const u8 basepoint[CURVE25519_KEY_SIZE]) +{ + return false; +} +static inline bool curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE]) +{ + return false; +} +#endif + +#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) +#include "curve25519-hacl64.c" +#else +#include "curve25519-fiat32.c" +#endif + +static const u8 null_point[CURVE25519_KEY_SIZE] = { 0 }; + +bool curve25519(u8 mypublic[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE], + const u8 basepoint[CURVE25519_KEY_SIZE]) +{ + if (!curve25519_arch(mypublic, secret, basepoint)) + curve25519_generic(mypublic, secret, basepoint); + return crypto_memneq(mypublic, null_point, CURVE25519_KEY_SIZE); +} + +bool curve25519_generate_public(u8 pub[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE]) +{ + static const u8 basepoint[CURVE25519_KEY_SIZE] __aligned(32) = { 9 }; + + if (unlikely(!crypto_memneq(secret, null_point, CURVE25519_KEY_SIZE))) + return false; + + if (curve25519_base_arch(pub, secret)) + return crypto_memneq(pub, null_point, CURVE25519_KEY_SIZE); + return curve25519(pub, secret, basepoint); +} + +void curve25519_generate_secret(u8 secret[CURVE25519_KEY_SIZE]) +{ + get_random_bytes_wait(secret, CURVE25519_KEY_SIZE); + curve25519_clamp_secret(secret); +} + +#include "../selftest/curve25519.c" + +static bool nosimd __initdata = false; + +#ifndef COMPAT_ZINC_IS_A_MODULE +int __init curve25519_mod_init(void) +#else +static int __init mod_init(void) +#endif +{ + if (!nosimd) + curve25519_fpu_init(); + if (!selftest_run("curve25519", curve25519_selftest, curve25519_nobs, + ARRAY_SIZE(curve25519_nobs))) + return -ENOTRECOVERABLE; + return 0; +} + +#ifdef COMPAT_ZINC_IS_A_MODULE +static void __exit mod_exit(void) +{ +} + +module_param(nosimd, bool, 0); +module_init(mod_init); +module_exit(mod_exit); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Curve25519 scalar multiplication"); +MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>"); +#endif diff --git a/net/wireguard/crypto/zinc/poly1305/poly1305-arm-glue.c b/net/wireguard/crypto/zinc/poly1305/poly1305-arm-glue.c new file mode 100644 index 000000000000..291fe4ba98b0 --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/poly1305-arm-glue.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include <asm/hwcap.h> +#include <asm/neon.h> + +asmlinkage void poly1305_init_arm(void *ctx, const u8 key[16]); +asmlinkage void poly1305_blocks_arm(void *ctx, const u8 *inp, const size_t len, + const u32 padbit); +asmlinkage void poly1305_emit_arm(void *ctx, u8 mac[16], const u32 nonce[4]); +asmlinkage void poly1305_blocks_neon(void *ctx, const u8 *inp, const size_t len, + const u32 padbit); +asmlinkage void poly1305_emit_neon(void *ctx, u8 mac[16], const u32 nonce[4]); + +static bool poly1305_use_neon __ro_after_init; +static bool *const poly1305_nobs[] __initconst = { &poly1305_use_neon }; + +static void __init poly1305_fpu_init(void) +{ +#if defined(CONFIG_ZINC_ARCH_ARM64) + poly1305_use_neon = cpu_have_named_feature(ASIMD); +#elif defined(CONFIG_ZINC_ARCH_ARM) + poly1305_use_neon = elf_hwcap & HWCAP_NEON; +#endif +} + +#if defined(CONFIG_ZINC_ARCH_ARM64) +struct poly1305_arch_internal { + union { + u32 h[5]; + struct { + u64 h0, h1, h2; + }; + }; + u64 is_base2_26; + u64 r[2]; +}; +#elif defined(CONFIG_ZINC_ARCH_ARM) +struct poly1305_arch_internal { + union { + u32 h[5]; + struct { + u64 h0, h1; + u32 h2; + } __packed; + }; + u32 r[4]; + u32 is_base2_26; +}; +#endif + +/* The NEON code uses base 2^26, while the scalar code uses base 2^64 on 64-bit + * and base 2^32 on 32-bit. If we hit the unfortunate situation of using NEON + * and then having to go back to scalar -- because the user is silly and has + * called the update function from two separate contexts -- then we need to + * convert back to the original base before proceeding. The below function is + * written for 64-bit integers, and so we have to swap words at the end on + * big-endian 32-bit. It is possible to reason that the initial reduction below + * is sufficient given the implementation invariants. However, for an avoidance + * of doubt and because this is not performance critical, we do the full + * reduction anyway. + */ +static void convert_to_base2_64(void *ctx) +{ + struct poly1305_arch_internal *state = ctx; + u32 cy; + + if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !state->is_base2_26) + return; + + cy = state->h[0] >> 26; state->h[0] &= 0x3ffffff; state->h[1] += cy; + cy = state->h[1] >> 26; state->h[1] &= 0x3ffffff; state->h[2] += cy; + cy = state->h[2] >> 26; state->h[2] &= 0x3ffffff; state->h[3] += cy; + cy = state->h[3] >> 26; state->h[3] &= 0x3ffffff; state->h[4] += cy; + state->h0 = ((u64)state->h[2] << 52) | ((u64)state->h[1] << 26) | state->h[0]; + state->h1 = ((u64)state->h[4] << 40) | ((u64)state->h[3] << 14) | (state->h[2] >> 12); + state->h2 = state->h[4] >> 24; + if (IS_ENABLED(CONFIG_ZINC_ARCH_ARM) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) { + state->h0 = rol64(state->h0, 32); + state->h1 = rol64(state->h1, 32); + } +#define ULT(a, b) ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1)) + cy = (state->h2 >> 2) + (state->h2 & ~3ULL); + state->h2 &= 3; + state->h0 += cy; + state->h1 += (cy = ULT(state->h0, cy)); + state->h2 += ULT(state->h1, cy); +#undef ULT + state->is_base2_26 = 0; +} + +static inline bool poly1305_init_arch(void *ctx, + const u8 key[POLY1305_KEY_SIZE]) +{ + poly1305_init_arm(ctx, key); + return true; +} + +static inline bool poly1305_blocks_arch(void *ctx, const u8 *inp, + size_t len, const u32 padbit, + simd_context_t *simd_context) +{ + /* SIMD disables preemption, so relax after processing each page. */ + BUILD_BUG_ON(PAGE_SIZE < POLY1305_BLOCK_SIZE || + PAGE_SIZE % POLY1305_BLOCK_SIZE); + + if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !poly1305_use_neon || + !simd_use(simd_context)) { + convert_to_base2_64(ctx); + poly1305_blocks_arm(ctx, inp, len, padbit); + return true; + } + + for (;;) { + const size_t bytes = min_t(size_t, len, PAGE_SIZE); + + poly1305_blocks_neon(ctx, inp, bytes, padbit); + len -= bytes; + if (!len) + break; + inp += bytes; + simd_relax(simd_context); + } + return true; +} + +static inline bool poly1305_emit_arch(void *ctx, u8 mac[POLY1305_MAC_SIZE], + const u32 nonce[4], + simd_context_t *simd_context) +{ + if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !poly1305_use_neon || + !simd_use(simd_context)) { + convert_to_base2_64(ctx); + poly1305_emit_arm(ctx, mac, nonce); + } else + poly1305_emit_neon(ctx, mac, nonce); + return true; +} diff --git a/net/wireguard/crypto/zinc/poly1305/poly1305-arm.pl b/net/wireguard/crypto/zinc/poly1305/poly1305-arm.pl new file mode 100644 index 000000000000..468f41b76fbd --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/poly1305-arm.pl @@ -0,0 +1,1276 @@ +#!/usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# +# This code is taken from the OpenSSL project but the author, Andy Polyakov, +# has relicensed it under the licenses specified in the SPDX header above. +# The original headers, including the original license headers, are +# included below for completeness. +# +# ==================================================================== +# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# IALU(*)/gcc-4.4 NEON +# +# ARM11xx(ARMv6) 7.78/+100% - +# Cortex-A5 6.35/+130% 3.00 +# Cortex-A8 6.25/+115% 2.36 +# Cortex-A9 5.10/+95% 2.55 +# Cortex-A15 3.85/+85% 1.25(**) +# Snapdragon S4 5.70/+100% 1.48(**) +# +# (*) this is for -march=armv6, i.e. with bunch of ldrb loading data; +# (**) these are trade-off results, they can be improved by ~8% but at +# the cost of 15/12% regression on Cortex-A5/A7, it's even possible +# to improve Cortex-A9 result, but then A5/A7 loose more than 20%; + +$flavour = shift; +if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; } +else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} } + +if ($flavour && $flavour ne "void") { + $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; + ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or + ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or + die "can't locate arm-xlate.pl"; + + open STDOUT,"| \"$^X\" $xlate $flavour $output"; +} else { + open STDOUT,">$output"; +} + +($ctx,$inp,$len,$padbit)=map("r$_",(0..3)); + +$code.=<<___; +#ifndef __KERNEL__ +# include "arm_arch.h" +#else +# define __ARM_ARCH__ __LINUX_ARM_ARCH__ +# define __ARM_MAX_ARCH__ __LINUX_ARM_ARCH__ +# define poly1305_init poly1305_init_arm +# define poly1305_blocks poly1305_blocks_arm +# define poly1305_emit poly1305_emit_arm +#endif + +.text +#if defined(__thumb2__) +.syntax unified +.thumb +#else +.code 32 +#endif + +.globl poly1305_emit +.globl poly1305_blocks +.globl poly1305_init +.type poly1305_init,%function +.align 5 +poly1305_init: +.Lpoly1305_init: + stmdb sp!,{r4-r11} + + eor r3,r3,r3 + cmp $inp,#0 + str r3,[$ctx,#0] @ zero hash value + str r3,[$ctx,#4] + str r3,[$ctx,#8] + str r3,[$ctx,#12] + str r3,[$ctx,#16] + str r3,[$ctx,#36] @ is_base2_26 + add $ctx,$ctx,#20 + +#ifdef __thumb2__ + it eq +#endif + moveq r0,#0 + beq .Lno_key + +#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) + adr r11,.Lpoly1305_init + ldr r12,.LOPENSSL_armcap +#endif + ldrb r4,[$inp,#0] + mov r10,#0x0fffffff + ldrb r5,[$inp,#1] + and r3,r10,#-4 @ 0x0ffffffc + ldrb r6,[$inp,#2] + ldrb r7,[$inp,#3] + orr r4,r4,r5,lsl#8 + ldrb r5,[$inp,#4] + orr r4,r4,r6,lsl#16 + ldrb r6,[$inp,#5] + orr r4,r4,r7,lsl#24 + ldrb r7,[$inp,#6] + and r4,r4,r10 + +#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) + ldr r12,[r11,r12] @ OPENSSL_armcap_P +# ifdef __APPLE__ + ldr r12,[r12] +# endif +#endif + ldrb r8,[$inp,#7] + orr r5,r5,r6,lsl#8 + ldrb r6,[$inp,#8] + orr r5,r5,r7,lsl#16 + ldrb r7,[$inp,#9] + orr r5,r5,r8,lsl#24 + ldrb r8,[$inp,#10] + and r5,r5,r3 + +#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) + tst r12,#ARMV7_NEON @ check for NEON +# ifdef __APPLE__ + adr r9,poly1305_blocks_neon + adr r11,poly1305_blocks +# ifdef __thumb2__ + it ne +# endif + movne r11,r9 + adr r12,poly1305_emit + adr r10,poly1305_emit_neon +# ifdef __thumb2__ + it ne +# endif + movne r12,r10 +# else +# ifdef __thumb2__ + itete eq +# endif + addeq r12,r11,#(poly1305_emit-.Lpoly1305_init) + addne r12,r11,#(poly1305_emit_neon-.Lpoly1305_init) + addeq r11,r11,#(poly1305_blocks-.Lpoly1305_init) + addne r11,r11,#(poly1305_blocks_neon-.Lpoly1305_init) +# endif +# ifdef __thumb2__ + orr r12,r12,#1 @ thumb-ify address + orr r11,r11,#1 +# endif +#endif + ldrb r9,[$inp,#11] + orr r6,r6,r7,lsl#8 + ldrb r7,[$inp,#12] + orr r6,r6,r8,lsl#16 + ldrb r8,[$inp,#13] + orr r6,r6,r9,lsl#24 + ldrb r9,[$inp,#14] + and r6,r6,r3 + + ldrb r10,[$inp,#15] + orr r7,r7,r8,lsl#8 + str r4,[$ctx,#0] + orr r7,r7,r9,lsl#16 + str r5,[$ctx,#4] + orr r7,r7,r10,lsl#24 + str r6,[$ctx,#8] + and r7,r7,r3 + str r7,[$ctx,#12] +#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) + stmia r2,{r11,r12} @ fill functions table + mov r0,#1 +#else + mov r0,#0 +#endif +.Lno_key: + ldmia sp!,{r4-r11} +#if __ARM_ARCH__>=5 + ret @ bx lr +#else + tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + bx lr @ interoperable with Thumb ISA:-) +#endif +.size poly1305_init,.-poly1305_init +___ +{ +my ($h0,$h1,$h2,$h3,$h4,$r0,$r1,$r2,$r3)=map("r$_",(4..12)); +my ($s1,$s2,$s3)=($r1,$r2,$r3); + +$code.=<<___; +.type poly1305_blocks,%function +.align 5 +poly1305_blocks: +.Lpoly1305_blocks: + stmdb sp!,{r3-r11,lr} + + ands $len,$len,#-16 + beq .Lno_data + + cmp $padbit,#0 + add $len,$len,$inp @ end pointer + sub sp,sp,#32 + + ldmia $ctx,{$h0-$r3} @ load context + + str $ctx,[sp,#12] @ offload stuff + mov lr,$inp + str $len,[sp,#16] + str $r1,[sp,#20] + str $r2,[sp,#24] + str $r3,[sp,#28] + b .Loop + +.Loop: +#if __ARM_ARCH__<7 + ldrb r0,[lr],#16 @ load input +# ifdef __thumb2__ + it hi +# endif + addhi $h4,$h4,#1 @ 1<<128 + ldrb r1,[lr,#-15] + ldrb r2,[lr,#-14] + ldrb r3,[lr,#-13] + orr r1,r0,r1,lsl#8 + ldrb r0,[lr,#-12] + orr r2,r1,r2,lsl#16 + ldrb r1,[lr,#-11] + orr r3,r2,r3,lsl#24 + ldrb r2,[lr,#-10] + adds $h0,$h0,r3 @ accumulate input + + ldrb r3,[lr,#-9] + orr r1,r0,r1,lsl#8 + ldrb r0,[lr,#-8] + orr r2,r1,r2,lsl#16 + ldrb r1,[lr,#-7] + orr r3,r2,r3,lsl#24 + ldrb r2,[lr,#-6] + adcs $h1,$h1,r3 + + ldrb r3,[lr,#-5] + orr r1,r0,r1,lsl#8 + ldrb r0,[lr,#-4] + orr r2,r1,r2,lsl#16 + ldrb r1,[lr,#-3] + orr r3,r2,r3,lsl#24 + ldrb r2,[lr,#-2] + adcs $h2,$h2,r3 + + ldrb r3,[lr,#-1] + orr r1,r0,r1,lsl#8 + str lr,[sp,#8] @ offload input pointer + orr r2,r1,r2,lsl#16 + add $s1,$r1,$r1,lsr#2 + orr r3,r2,r3,lsl#24 +#else + ldr r0,[lr],#16 @ load input +# ifdef __thumb2__ + it hi +# endif + addhi $h4,$h4,#1 @ padbit + ldr r1,[lr,#-12] + ldr r2,[lr,#-8] + ldr r3,[lr,#-4] +# ifdef __ARMEB__ + rev r0,r0 + rev r1,r1 + rev r2,r2 + rev r3,r3 +# endif + adds $h0,$h0,r0 @ accumulate input + str lr,[sp,#8] @ offload input pointer + adcs $h1,$h1,r1 + add $s1,$r1,$r1,lsr#2 + adcs $h2,$h2,r2 +#endif + add $s2,$r2,$r2,lsr#2 + adcs $h3,$h3,r3 + add $s3,$r3,$r3,lsr#2 + + umull r2,r3,$h1,$r0 + adc $h4,$h4,#0 + umull r0,r1,$h0,$r0 + umlal r2,r3,$h4,$s1 + umlal r0,r1,$h3,$s1 + ldr $r1,[sp,#20] @ reload $r1 + umlal r2,r3,$h2,$s3 + umlal r0,r1,$h1,$s3 + umlal r2,r3,$h3,$s2 + umlal r0,r1,$h2,$s2 + umlal r2,r3,$h0,$r1 + str r0,[sp,#0] @ future $h0 + mul r0,$s2,$h4 + ldr $r2,[sp,#24] @ reload $r2 + adds r2,r2,r1 @ d1+=d0>>32 + eor r1,r1,r1 + adc lr,r3,#0 @ future $h2 + str r2,[sp,#4] @ future $h1 + + mul r2,$s3,$h4 + eor r3,r3,r3 + umlal r0,r1,$h3,$s3 + ldr $r3,[sp,#28] @ reload $r3 + umlal r2,r3,$h3,$r0 + umlal r0,r1,$h2,$r0 + umlal r2,r3,$h2,$r1 + umlal r0,r1,$h1,$r1 + umlal r2,r3,$h1,$r2 + umlal r0,r1,$h0,$r2 + umlal r2,r3,$h0,$r3 + ldr $h0,[sp,#0] + mul $h4,$r0,$h4 + ldr $h1,[sp,#4] + + adds $h2,lr,r0 @ d2+=d1>>32 + ldr lr,[sp,#8] @ reload input pointer + adc r1,r1,#0 + adds $h3,r2,r1 @ d3+=d2>>32 + ldr r0,[sp,#16] @ reload end pointer + adc r3,r3,#0 + add $h4,$h4,r3 @ h4+=d3>>32 + + and r1,$h4,#-4 + and $h4,$h4,#3 + add r1,r1,r1,lsr#2 @ *=5 + adds $h0,$h0,r1 + adcs $h1,$h1,#0 + adcs $h2,$h2,#0 + adcs $h3,$h3,#0 + adc $h4,$h4,#0 + + cmp r0,lr @ done yet? + bhi .Loop + + ldr $ctx,[sp,#12] + add sp,sp,#32 + stmia $ctx,{$h0-$h4} @ store the result + +.Lno_data: +#if __ARM_ARCH__>=5 + ldmia sp!,{r3-r11,pc} +#else + ldmia sp!,{r3-r11,lr} + tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + bx lr @ interoperable with Thumb ISA:-) +#endif +.size poly1305_blocks,.-poly1305_blocks +___ +} +{ +my ($ctx,$mac,$nonce)=map("r$_",(0..2)); +my ($h0,$h1,$h2,$h3,$h4,$g0,$g1,$g2,$g3)=map("r$_",(3..11)); +my $g4=$h4; + +$code.=<<___; +.type poly1305_emit,%function +.align 5 +poly1305_emit: + stmdb sp!,{r4-r11} +.Lpoly1305_emit_enter: + + ldmia $ctx,{$h0-$h4} + adds $g0,$h0,#5 @ compare to modulus + adcs $g1,$h1,#0 + adcs $g2,$h2,#0 + adcs $g3,$h3,#0 + adc $g4,$h4,#0 + tst $g4,#4 @ did it carry/borrow? + +#ifdef __thumb2__ + it ne +#endif + movne $h0,$g0 + ldr $g0,[$nonce,#0] +#ifdef __thumb2__ + it ne +#endif + movne $h1,$g1 + ldr $g1,[$nonce,#4] +#ifdef __thumb2__ + it ne +#endif + movne $h2,$g2 + ldr $g2,[$nonce,#8] +#ifdef __thumb2__ + it ne +#endif + movne $h3,$g3 + ldr $g3,[$nonce,#12] + + adds $h0,$h0,$g0 + adcs $h1,$h1,$g1 + adcs $h2,$h2,$g2 + adc $h3,$h3,$g3 + +#if __ARM_ARCH__>=7 +# ifdef __ARMEB__ + rev $h0,$h0 + rev $h1,$h1 + rev $h2,$h2 + rev $h3,$h3 +# endif + str $h0,[$mac,#0] + str $h1,[$mac,#4] + str $h2,[$mac,#8] + str $h3,[$mac,#12] +#else + strb $h0,[$mac,#0] + mov $h0,$h0,lsr#8 + strb $h1,[$mac,#4] + mov $h1,$h1,lsr#8 + strb $h2,[$mac,#8] + mov $h2,$h2,lsr#8 + strb $h3,[$mac,#12] + mov $h3,$h3,lsr#8 + + strb $h0,[$mac,#1] + mov $h0,$h0,lsr#8 + strb $h1,[$mac,#5] + mov $h1,$h1,lsr#8 + strb $h2,[$mac,#9] + mov $h2,$h2,lsr#8 + strb $h3,[$mac,#13] + mov $h3,$h3,lsr#8 + + strb $h0,[$mac,#2] + mov $h0,$h0,lsr#8 + strb $h1,[$mac,#6] + mov $h1,$h1,lsr#8 + strb $h2,[$mac,#10] + mov $h2,$h2,lsr#8 + strb $h3,[$mac,#14] + mov $h3,$h3,lsr#8 + + strb $h0,[$mac,#3] + strb $h1,[$mac,#7] + strb $h2,[$mac,#11] + strb $h3,[$mac,#15] +#endif + ldmia sp!,{r4-r11} +#if __ARM_ARCH__>=5 + ret @ bx lr +#else + tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + bx lr @ interoperable with Thumb ISA:-) +#endif +.size poly1305_emit,.-poly1305_emit +___ +{ +my ($R0,$R1,$S1,$R2,$S2,$R3,$S3,$R4,$S4) = map("d$_",(0..9)); +my ($D0,$D1,$D2,$D3,$D4, $H0,$H1,$H2,$H3,$H4) = map("q$_",(5..14)); +my ($T0,$T1,$MASK) = map("q$_",(15,4,0)); + +my ($in2,$zeros,$tbl0,$tbl1) = map("r$_",(4..7)); + +$code.=<<___; +#if (defined(__KERNEL__) && defined(CONFIG_KERNEL_MODE_NEON)) || (!defined(__KERNEL__) && __ARM_MAX_ARCH__>=7) +.fpu neon + +.type poly1305_init_neon,%function +.align 5 +poly1305_init_neon: +.Lpoly1305_init_neon: + ldr r4,[$ctx,#20] @ load key base 2^32 + ldr r5,[$ctx,#24] + ldr r6,[$ctx,#28] + ldr r7,[$ctx,#32] + + and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26 + mov r3,r4,lsr#26 + mov r4,r5,lsr#20 + orr r3,r3,r5,lsl#6 + mov r5,r6,lsr#14 + orr r4,r4,r6,lsl#12 + mov r6,r7,lsr#8 + orr r5,r5,r7,lsl#18 + and r3,r3,#0x03ffffff + and r4,r4,#0x03ffffff + and r5,r5,#0x03ffffff + + vdup.32 $R0,r2 @ r^1 in both lanes + add r2,r3,r3,lsl#2 @ *5 + vdup.32 $R1,r3 + add r3,r4,r4,lsl#2 + vdup.32 $S1,r2 + vdup.32 $R2,r4 + add r4,r5,r5,lsl#2 + vdup.32 $S2,r3 + vdup.32 $R3,r5 + add r5,r6,r6,lsl#2 + vdup.32 $S3,r4 + vdup.32 $R4,r6 + vdup.32 $S4,r5 + + mov $zeros,#2 @ counter + +.Lsquare_neon: + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 + @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 + @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 + @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 + @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 + + vmull.u32 $D0,$R0,${R0}[1] + vmull.u32 $D1,$R1,${R0}[1] + vmull.u32 $D2,$R2,${R0}[1] + vmull.u32 $D3,$R3,${R0}[1] + vmull.u32 $D4,$R4,${R0}[1] + + vmlal.u32 $D0,$R4,${S1}[1] + vmlal.u32 $D1,$R0,${R1}[1] + vmlal.u32 $D2,$R1,${R1}[1] + vmlal.u32 $D3,$R2,${R1}[1] + vmlal.u32 $D4,$R3,${R1}[1] + + vmlal.u32 $D0,$R3,${S2}[1] + vmlal.u32 $D1,$R4,${S2}[1] + vmlal.u32 $D3,$R1,${R2}[1] + vmlal.u32 $D2,$R0,${R2}[1] + vmlal.u32 $D4,$R2,${R2}[1] + + vmlal.u32 $D0,$R2,${S3}[1] + vmlal.u32 $D3,$R0,${R3}[1] + vmlal.u32 $D1,$R3,${S3}[1] + vmlal.u32 $D2,$R4,${S3}[1] + vmlal.u32 $D4,$R1,${R3}[1] + + vmlal.u32 $D3,$R4,${S4}[1] + vmlal.u32 $D0,$R1,${S4}[1] + vmlal.u32 $D1,$R2,${S4}[1] + vmlal.u32 $D2,$R3,${S4}[1] + vmlal.u32 $D4,$R0,${R4}[1] + + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ lazy reduction as discussed in "NEON crypto" by D.J. Bernstein + @ and P. Schwabe + @ + @ H0>>+H1>>+H2>>+H3>>+H4 + @ H3>>+H4>>*5+H0>>+H1 + @ + @ Trivia. + @ + @ Result of multiplication of n-bit number by m-bit number is + @ n+m bits wide. However! Even though 2^n is a n+1-bit number, + @ m-bit number multiplied by 2^n is still n+m bits wide. + @ + @ Sum of two n-bit numbers is n+1 bits wide, sum of three - n+2, + @ and so is sum of four. Sum of 2^m n-m-bit numbers and n-bit + @ one is n+1 bits wide. + @ + @ >>+ denotes Hnext += Hn>>26, Hn &= 0x3ffffff. This means that + @ H0, H2, H3 are guaranteed to be 26 bits wide, while H1 and H4 + @ can be 27. However! In cases when their width exceeds 26 bits + @ they are limited by 2^26+2^6. This in turn means that *sum* + @ of the products with these values can still be viewed as sum + @ of 52-bit numbers as long as the amount of addends is not a + @ power of 2. For example, + @ + @ H4 = H4*R0 + H3*R1 + H2*R2 + H1*R3 + H0 * R4, + @ + @ which can't be larger than 5 * (2^26 + 2^6) * (2^26 + 2^6), or + @ 5 * (2^52 + 2*2^32 + 2^12), which in turn is smaller than + @ 8 * (2^52) or 2^55. However, the value is then multiplied by + @ by 5, so we should be looking at 5 * 5 * (2^52 + 2^33 + 2^12), + @ which is less than 32 * (2^52) or 2^57. And when processing + @ data we are looking at triple as many addends... + @ + @ In key setup procedure pre-reduced H0 is limited by 5*4+1 and + @ 5*H4 - by 5*5 52-bit addends, or 57 bits. But when hashing the + @ input H0 is limited by (5*4+1)*3 addends, or 58 bits, while + @ 5*H4 by 5*5*3, or 59[!] bits. How is this relevant? vmlal.u32 + @ instruction accepts 2x32-bit input and writes 2x64-bit result. + @ This means that result of reduction have to be compressed upon + @ loop wrap-around. This can be done in the process of reduction + @ to minimize amount of instructions [as well as amount of + @ 128-bit instructions, which benefits low-end processors], but + @ one has to watch for H2 (which is narrower than H0) and 5*H4 + @ not being wider than 58 bits, so that result of right shift + @ by 26 bits fits in 32 bits. This is also useful on x86, + @ because it allows to use paddd in place for paddq, which + @ benefits Atom, where paddq is ridiculously slow. + + vshr.u64 $T0,$D3,#26 + vmovn.i64 $D3#lo,$D3 + vshr.u64 $T1,$D0,#26 + vmovn.i64 $D0#lo,$D0 + vadd.i64 $D4,$D4,$T0 @ h3 -> h4 + vbic.i32 $D3#lo,#0xfc000000 @ &=0x03ffffff + vadd.i64 $D1,$D1,$T1 @ h0 -> h1 + vbic.i32 $D0#lo,#0xfc000000 + + vshrn.u64 $T0#lo,$D4,#26 + vmovn.i64 $D4#lo,$D4 + vshr.u64 $T1,$D1,#26 + vmovn.i64 $D1#lo,$D1 + vadd.i64 $D2,$D2,$T1 @ h1 -> h2 + vbic.i32 $D4#lo,#0xfc000000 + vbic.i32 $D1#lo,#0xfc000000 + + vadd.i32 $D0#lo,$D0#lo,$T0#lo + vshl.u32 $T0#lo,$T0#lo,#2 + vshrn.u64 $T1#lo,$D2,#26 + vmovn.i64 $D2#lo,$D2 + vadd.i32 $D0#lo,$D0#lo,$T0#lo @ h4 -> h0 + vadd.i32 $D3#lo,$D3#lo,$T1#lo @ h2 -> h3 + vbic.i32 $D2#lo,#0xfc000000 + + vshr.u32 $T0#lo,$D0#lo,#26 + vbic.i32 $D0#lo,#0xfc000000 + vshr.u32 $T1#lo,$D3#lo,#26 + vbic.i32 $D3#lo,#0xfc000000 + vadd.i32 $D1#lo,$D1#lo,$T0#lo @ h0 -> h1 + vadd.i32 $D4#lo,$D4#lo,$T1#lo @ h3 -> h4 + + subs $zeros,$zeros,#1 + beq .Lsquare_break_neon + + add $tbl0,$ctx,#(48+0*9*4) + add $tbl1,$ctx,#(48+1*9*4) + + vtrn.32 $R0,$D0#lo @ r^2:r^1 + vtrn.32 $R2,$D2#lo + vtrn.32 $R3,$D3#lo + vtrn.32 $R1,$D1#lo + vtrn.32 $R4,$D4#lo + + vshl.u32 $S2,$R2,#2 @ *5 + vshl.u32 $S3,$R3,#2 + vshl.u32 $S1,$R1,#2 + vshl.u32 $S4,$R4,#2 + vadd.i32 $S2,$S2,$R2 + vadd.i32 $S1,$S1,$R1 + vadd.i32 $S3,$S3,$R3 + vadd.i32 $S4,$S4,$R4 + + vst4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! + vst4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! + vst4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]! + vst4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]! + vst1.32 {${S4}[0]},[$tbl0,:32] + vst1.32 {${S4}[1]},[$tbl1,:32] + + b .Lsquare_neon + +.align 4 +.Lsquare_break_neon: + add $tbl0,$ctx,#(48+2*4*9) + add $tbl1,$ctx,#(48+3*4*9) + + vmov $R0,$D0#lo @ r^4:r^3 + vshl.u32 $S1,$D1#lo,#2 @ *5 + vmov $R1,$D1#lo + vshl.u32 $S2,$D2#lo,#2 + vmov $R2,$D2#lo + vshl.u32 $S3,$D3#lo,#2 + vmov $R3,$D3#lo + vshl.u32 $S4,$D4#lo,#2 + vmov $R4,$D4#lo + vadd.i32 $S1,$S1,$D1#lo + vadd.i32 $S2,$S2,$D2#lo + vadd.i32 $S3,$S3,$D3#lo + vadd.i32 $S4,$S4,$D4#lo + + vst4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! + vst4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! + vst4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]! + vst4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]! + vst1.32 {${S4}[0]},[$tbl0] + vst1.32 {${S4}[1]},[$tbl1] + + ret @ bx lr +.size poly1305_init_neon,.-poly1305_init_neon + +#ifdef __KERNEL__ +.globl poly1305_blocks_neon +#endif +.type poly1305_blocks_neon,%function +.align 5 +poly1305_blocks_neon: + ldr ip,[$ctx,#36] @ is_base2_26 + ands $len,$len,#-16 + beq .Lno_data_neon + + cmp $len,#64 + bhs .Lenter_neon + tst ip,ip @ is_base2_26? + beq .Lpoly1305_blocks + +.Lenter_neon: + stmdb sp!,{r4-r7} + vstmdb sp!,{d8-d15} @ ABI specification says so + + tst ip,ip @ is_base2_26? + bne .Lbase2_26_neon + + stmdb sp!,{r1-r3,lr} + bl .Lpoly1305_init_neon + + ldr r4,[$ctx,#0] @ load hash value base 2^32 + ldr r5,[$ctx,#4] + ldr r6,[$ctx,#8] + ldr r7,[$ctx,#12] + ldr ip,[$ctx,#16] + + and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26 + mov r3,r4,lsr#26 + veor $D0#lo,$D0#lo,$D0#lo + mov r4,r5,lsr#20 + orr r3,r3,r5,lsl#6 + veor $D1#lo,$D1#lo,$D1#lo + mov r5,r6,lsr#14 + orr r4,r4,r6,lsl#12 + veor $D2#lo,$D2#lo,$D2#lo + mov r6,r7,lsr#8 + orr r5,r5,r7,lsl#18 + veor $D3#lo,$D3#lo,$D3#lo + and r3,r3,#0x03ffffff + orr r6,r6,ip,lsl#24 + veor $D4#lo,$D4#lo,$D4#lo + and r4,r4,#0x03ffffff + mov r1,#1 + and r5,r5,#0x03ffffff + str r1,[$ctx,#36] @ is_base2_26 + + vmov.32 $D0#lo[0],r2 + vmov.32 $D1#lo[0],r3 + vmov.32 $D2#lo[0],r4 + vmov.32 $D3#lo[0],r5 + vmov.32 $D4#lo[0],r6 + adr $zeros,.Lzeros + + ldmia sp!,{r1-r3,lr} + b .Lbase2_32_neon + +.align 4 +.Lbase2_26_neon: + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ load hash value + + veor $D0#lo,$D0#lo,$D0#lo + veor $D1#lo,$D1#lo,$D1#lo + veor $D2#lo,$D2#lo,$D2#lo + veor $D3#lo,$D3#lo,$D3#lo + veor $D4#lo,$D4#lo,$D4#lo + vld4.32 {$D0#lo[0],$D1#lo[0],$D2#lo[0],$D3#lo[0]},[$ctx]! + adr $zeros,.Lzeros + vld1.32 {$D4#lo[0]},[$ctx] + sub $ctx,$ctx,#16 @ rewind + +.Lbase2_32_neon: + add $in2,$inp,#32 + mov $padbit,$padbit,lsl#24 + tst $len,#31 + beq .Leven + + vld4.32 {$H0#lo[0],$H1#lo[0],$H2#lo[0],$H3#lo[0]},[$inp]! + vmov.32 $H4#lo[0],$padbit + sub $len,$len,#16 + add $in2,$inp,#32 + +# ifdef __ARMEB__ + vrev32.8 $H0,$H0 + vrev32.8 $H3,$H3 + vrev32.8 $H1,$H1 + vrev32.8 $H2,$H2 +# endif + vsri.u32 $H4#lo,$H3#lo,#8 @ base 2^32 -> base 2^26 + vshl.u32 $H3#lo,$H3#lo,#18 + + vsri.u32 $H3#lo,$H2#lo,#14 + vshl.u32 $H2#lo,$H2#lo,#12 + vadd.i32 $H4#hi,$H4#lo,$D4#lo @ add hash value and move to #hi + + vbic.i32 $H3#lo,#0xfc000000 + vsri.u32 $H2#lo,$H1#lo,#20 + vshl.u32 $H1#lo,$H1#lo,#6 + + vbic.i32 $H2#lo,#0xfc000000 + vsri.u32 $H1#lo,$H0#lo,#26 + vadd.i32 $H3#hi,$H3#lo,$D3#lo + + vbic.i32 $H0#lo,#0xfc000000 + vbic.i32 $H1#lo,#0xfc000000 + vadd.i32 $H2#hi,$H2#lo,$D2#lo + + vadd.i32 $H0#hi,$H0#lo,$D0#lo + vadd.i32 $H1#hi,$H1#lo,$D1#lo + + mov $tbl1,$zeros + add $tbl0,$ctx,#48 + + cmp $len,$len + b .Long_tail + +.align 4 +.Leven: + subs $len,$len,#64 + it lo + movlo $in2,$zeros + + vmov.i32 $H4,#1<<24 @ padbit, yes, always + vld4.32 {$H0#lo,$H1#lo,$H2#lo,$H3#lo},[$inp] @ inp[0:1] + add $inp,$inp,#64 + vld4.32 {$H0#hi,$H1#hi,$H2#hi,$H3#hi},[$in2] @ inp[2:3] (or 0) + add $in2,$in2,#64 + itt hi + addhi $tbl1,$ctx,#(48+1*9*4) + addhi $tbl0,$ctx,#(48+3*9*4) + +# ifdef __ARMEB__ + vrev32.8 $H0,$H0 + vrev32.8 $H3,$H3 + vrev32.8 $H1,$H1 + vrev32.8 $H2,$H2 +# endif + vsri.u32 $H4,$H3,#8 @ base 2^32 -> base 2^26 + vshl.u32 $H3,$H3,#18 + + vsri.u32 $H3,$H2,#14 + vshl.u32 $H2,$H2,#12 + + vbic.i32 $H3,#0xfc000000 + vsri.u32 $H2,$H1,#20 + vshl.u32 $H1,$H1,#6 + + vbic.i32 $H2,#0xfc000000 + vsri.u32 $H1,$H0,#26 + + vbic.i32 $H0,#0xfc000000 + vbic.i32 $H1,#0xfc000000 + + bls .Lskip_loop + + vld4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! @ load r^2 + vld4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! @ load r^4 + vld4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]! + vld4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]! + b .Loop_neon + +.align 5 +.Loop_neon: + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2 + @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r + @ \___________________/ + @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2 + @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r + @ \___________________/ \____________________/ + @ + @ Note that we start with inp[2:3]*r^2. This is because it + @ doesn't depend on reduction in previous iteration. + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 + @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 + @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 + @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 + @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 + + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ inp[2:3]*r^2 + + vadd.i32 $H2#lo,$H2#lo,$D2#lo @ accumulate inp[0:1] + vmull.u32 $D2,$H2#hi,${R0}[1] + vadd.i32 $H0#lo,$H0#lo,$D0#lo + vmull.u32 $D0,$H0#hi,${R0}[1] + vadd.i32 $H3#lo,$H3#lo,$D3#lo + vmull.u32 $D3,$H3#hi,${R0}[1] + vmlal.u32 $D2,$H1#hi,${R1}[1] + vadd.i32 $H1#lo,$H1#lo,$D1#lo + vmull.u32 $D1,$H1#hi,${R0}[1] + + vadd.i32 $H4#lo,$H4#lo,$D4#lo + vmull.u32 $D4,$H4#hi,${R0}[1] + subs $len,$len,#64 + vmlal.u32 $D0,$H4#hi,${S1}[1] + it lo + movlo $in2,$zeros + vmlal.u32 $D3,$H2#hi,${R1}[1] + vld1.32 ${S4}[1],[$tbl1,:32] + vmlal.u32 $D1,$H0#hi,${R1}[1] + vmlal.u32 $D4,$H3#hi,${R1}[1] + + vmlal.u32 $D0,$H3#hi,${S2}[1] + vmlal.u32 $D3,$H1#hi,${R2}[1] + vmlal.u32 $D4,$H2#hi,${R2}[1] + vmlal.u32 $D1,$H4#hi,${S2}[1] + vmlal.u32 $D2,$H0#hi,${R2}[1] + + vmlal.u32 $D3,$H0#hi,${R3}[1] + vmlal.u32 $D0,$H2#hi,${S3}[1] + vmlal.u32 $D4,$H1#hi,${R3}[1] + vmlal.u32 $D1,$H3#hi,${S3}[1] + vmlal.u32 $D2,$H4#hi,${S3}[1] + + vmlal.u32 $D3,$H4#hi,${S4}[1] + vmlal.u32 $D0,$H1#hi,${S4}[1] + vmlal.u32 $D4,$H0#hi,${R4}[1] + vmlal.u32 $D1,$H2#hi,${S4}[1] + vmlal.u32 $D2,$H3#hi,${S4}[1] + + vld4.32 {$H0#hi,$H1#hi,$H2#hi,$H3#hi},[$in2] @ inp[2:3] (or 0) + add $in2,$in2,#64 + + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ (hash+inp[0:1])*r^4 and accumulate + + vmlal.u32 $D3,$H3#lo,${R0}[0] + vmlal.u32 $D0,$H0#lo,${R0}[0] + vmlal.u32 $D4,$H4#lo,${R0}[0] + vmlal.u32 $D1,$H1#lo,${R0}[0] + vmlal.u32 $D2,$H2#lo,${R0}[0] + vld1.32 ${S4}[0],[$tbl0,:32] + + vmlal.u32 $D3,$H2#lo,${R1}[0] + vmlal.u32 $D0,$H4#lo,${S1}[0] + vmlal.u32 $D4,$H3#lo,${R1}[0] + vmlal.u32 $D1,$H0#lo,${R1}[0] + vmlal.u32 $D2,$H1#lo,${R1}[0] + + vmlal.u32 $D3,$H1#lo,${R2}[0] + vmlal.u32 $D0,$H3#lo,${S2}[0] + vmlal.u32 $D4,$H2#lo,${R2}[0] + vmlal.u32 $D1,$H4#lo,${S2}[0] + vmlal.u32 $D2,$H0#lo,${R2}[0] + + vmlal.u32 $D3,$H0#lo,${R3}[0] + vmlal.u32 $D0,$H2#lo,${S3}[0] + vmlal.u32 $D4,$H1#lo,${R3}[0] + vmlal.u32 $D1,$H3#lo,${S3}[0] + vmlal.u32 $D3,$H4#lo,${S4}[0] + + vmlal.u32 $D2,$H4#lo,${S3}[0] + vmlal.u32 $D0,$H1#lo,${S4}[0] + vmlal.u32 $D4,$H0#lo,${R4}[0] + vmov.i32 $H4,#1<<24 @ padbit, yes, always + vmlal.u32 $D1,$H2#lo,${S4}[0] + vmlal.u32 $D2,$H3#lo,${S4}[0] + + vld4.32 {$H0#lo,$H1#lo,$H2#lo,$H3#lo},[$inp] @ inp[0:1] + add $inp,$inp,#64 +# ifdef __ARMEB__ + vrev32.8 $H0,$H0 + vrev32.8 $H1,$H1 + vrev32.8 $H2,$H2 + vrev32.8 $H3,$H3 +# endif + + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ lazy reduction interleaved with base 2^32 -> base 2^26 of + @ inp[0:3] previously loaded to $H0-$H3 and smashed to $H0-$H4. + + vshr.u64 $T0,$D3,#26 + vmovn.i64 $D3#lo,$D3 + vshr.u64 $T1,$D0,#26 + vmovn.i64 $D0#lo,$D0 + vadd.i64 $D4,$D4,$T0 @ h3 -> h4 + vbic.i32 $D3#lo,#0xfc000000 + vsri.u32 $H4,$H3,#8 @ base 2^32 -> base 2^26 + vadd.i64 $D1,$D1,$T1 @ h0 -> h1 + vshl.u32 $H3,$H3,#18 + vbic.i32 $D0#lo,#0xfc000000 + + vshrn.u64 $T0#lo,$D4,#26 + vmovn.i64 $D4#lo,$D4 + vshr.u64 $T1,$D1,#26 + vmovn.i64 $D1#lo,$D1 + vadd.i64 $D2,$D2,$T1 @ h1 -> h2 + vsri.u32 $H3,$H2,#14 + vbic.i32 $D4#lo,#0xfc000000 + vshl.u32 $H2,$H2,#12 + vbic.i32 $D1#lo,#0xfc000000 + + vadd.i32 $D0#lo,$D0#lo,$T0#lo + vshl.u32 $T0#lo,$T0#lo,#2 + vbic.i32 $H3,#0xfc000000 + vshrn.u64 $T1#lo,$D2,#26 + vmovn.i64 $D2#lo,$D2 + vaddl.u32 $D0,$D0#lo,$T0#lo @ h4 -> h0 [widen for a sec] + vsri.u32 $H2,$H1,#20 + vadd.i32 $D3#lo,$D3#lo,$T1#lo @ h2 -> h3 + vshl.u32 $H1,$H1,#6 + vbic.i32 $D2#lo,#0xfc000000 + vbic.i32 $H2,#0xfc000000 + + vshrn.u64 $T0#lo,$D0,#26 @ re-narrow + vmovn.i64 $D0#lo,$D0 + vsri.u32 $H1,$H0,#26 + vbic.i32 $H0,#0xfc000000 + vshr.u32 $T1#lo,$D3#lo,#26 + vbic.i32 $D3#lo,#0xfc000000 + vbic.i32 $D0#lo,#0xfc000000 + vadd.i32 $D1#lo,$D1#lo,$T0#lo @ h0 -> h1 + vadd.i32 $D4#lo,$D4#lo,$T1#lo @ h3 -> h4 + vbic.i32 $H1,#0xfc000000 + + bhi .Loop_neon + +.Lskip_loop: + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1 + + add $tbl1,$ctx,#(48+0*9*4) + add $tbl0,$ctx,#(48+1*9*4) + adds $len,$len,#32 + it ne + movne $len,#0 + bne .Long_tail + + vadd.i32 $H2#hi,$H2#lo,$D2#lo @ add hash value and move to #hi + vadd.i32 $H0#hi,$H0#lo,$D0#lo + vadd.i32 $H3#hi,$H3#lo,$D3#lo + vadd.i32 $H1#hi,$H1#lo,$D1#lo + vadd.i32 $H4#hi,$H4#lo,$D4#lo + +.Long_tail: + vld4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! @ load r^1 + vld4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! @ load r^2 + + vadd.i32 $H2#lo,$H2#lo,$D2#lo @ can be redundant + vmull.u32 $D2,$H2#hi,$R0 + vadd.i32 $H0#lo,$H0#lo,$D0#lo + vmull.u32 $D0,$H0#hi,$R0 + vadd.i32 $H3#lo,$H3#lo,$D3#lo + vmull.u32 $D3,$H3#hi,$R0 + vadd.i32 $H1#lo,$H1#lo,$D1#lo + vmull.u32 $D1,$H1#hi,$R0 + vadd.i32 $H4#lo,$H4#lo,$D4#lo + vmull.u32 $D4,$H4#hi,$R0 + + vmlal.u32 $D0,$H4#hi,$S1 + vld4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]! + vmlal.u32 $D3,$H2#hi,$R1 + vld4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]! + vmlal.u32 $D1,$H0#hi,$R1 + vmlal.u32 $D4,$H3#hi,$R1 + vmlal.u32 $D2,$H1#hi,$R1 + + vmlal.u32 $D3,$H1#hi,$R2 + vld1.32 ${S4}[1],[$tbl1,:32] + vmlal.u32 $D0,$H3#hi,$S2 + vld1.32 ${S4}[0],[$tbl0,:32] + vmlal.u32 $D4,$H2#hi,$R2 + vmlal.u32 $D1,$H4#hi,$S2 + vmlal.u32 $D2,$H0#hi,$R2 + + vmlal.u32 $D3,$H0#hi,$R3 + it ne + addne $tbl1,$ctx,#(48+2*9*4) + vmlal.u32 $D0,$H2#hi,$S3 + it ne + addne $tbl0,$ctx,#(48+3*9*4) + vmlal.u32 $D4,$H1#hi,$R3 + vmlal.u32 $D1,$H3#hi,$S3 + vmlal.u32 $D2,$H4#hi,$S3 + + vmlal.u32 $D3,$H4#hi,$S4 + vorn $MASK,$MASK,$MASK @ all-ones, can be redundant + vmlal.u32 $D0,$H1#hi,$S4 + vshr.u64 $MASK,$MASK,#38 + vmlal.u32 $D4,$H0#hi,$R4 + vmlal.u32 $D1,$H2#hi,$S4 + vmlal.u32 $D2,$H3#hi,$S4 + + beq .Lshort_tail + + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ (hash+inp[0:1])*r^4:r^3 and accumulate + + vld4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! @ load r^3 + vld4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! @ load r^4 + + vmlal.u32 $D2,$H2#lo,$R0 + vmlal.u32 $D0,$H0#lo,$R0 + vmlal.u32 $D3,$H3#lo,$R0 + vmlal.u32 $D1,$H1#lo,$R0 + vmlal.u32 $D4,$H4#lo,$R0 + + vmlal.u32 $D0,$H4#lo,$S1 + vld4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]! + vmlal.u32 $D3,$H2#lo,$R1 + vld4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]! + vmlal.u32 $D1,$H0#lo,$R1 + vmlal.u32 $D4,$H3#lo,$R1 + vmlal.u32 $D2,$H1#lo,$R1 + + vmlal.u32 $D3,$H1#lo,$R2 + vld1.32 ${S4}[1],[$tbl1,:32] + vmlal.u32 $D0,$H3#lo,$S2 + vld1.32 ${S4}[0],[$tbl0,:32] + vmlal.u32 $D4,$H2#lo,$R2 + vmlal.u32 $D1,$H4#lo,$S2 + vmlal.u32 $D2,$H0#lo,$R2 + + vmlal.u32 $D3,$H0#lo,$R3 + vmlal.u32 $D0,$H2#lo,$S3 + vmlal.u32 $D4,$H1#lo,$R3 + vmlal.u32 $D1,$H3#lo,$S3 + vmlal.u32 $D2,$H4#lo,$S3 + + vmlal.u32 $D3,$H4#lo,$S4 + vorn $MASK,$MASK,$MASK @ all-ones + vmlal.u32 $D0,$H1#lo,$S4 + vshr.u64 $MASK,$MASK,#38 + vmlal.u32 $D4,$H0#lo,$R4 + vmlal.u32 $D1,$H2#lo,$S4 + vmlal.u32 $D2,$H3#lo,$S4 + +.Lshort_tail: + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ horizontal addition + + vadd.i64 $D3#lo,$D3#lo,$D3#hi + vadd.i64 $D0#lo,$D0#lo,$D0#hi + vadd.i64 $D4#lo,$D4#lo,$D4#hi + vadd.i64 $D1#lo,$D1#lo,$D1#hi + vadd.i64 $D2#lo,$D2#lo,$D2#hi + + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ lazy reduction, but without narrowing + + vshr.u64 $T0,$D3,#26 + vand.i64 $D3,$D3,$MASK + vshr.u64 $T1,$D0,#26 + vand.i64 $D0,$D0,$MASK + vadd.i64 $D4,$D4,$T0 @ h3 -> h4 + vadd.i64 $D1,$D1,$T1 @ h0 -> h1 + + vshr.u64 $T0,$D4,#26 + vand.i64 $D4,$D4,$MASK + vshr.u64 $T1,$D1,#26 + vand.i64 $D1,$D1,$MASK + vadd.i64 $D2,$D2,$T1 @ h1 -> h2 + + vadd.i64 $D0,$D0,$T0 + vshl.u64 $T0,$T0,#2 + vshr.u64 $T1,$D2,#26 + vand.i64 $D2,$D2,$MASK + vadd.i64 $D0,$D0,$T0 @ h4 -> h0 + vadd.i64 $D3,$D3,$T1 @ h2 -> h3 + + vshr.u64 $T0,$D0,#26 + vand.i64 $D0,$D0,$MASK + vshr.u64 $T1,$D3,#26 + vand.i64 $D3,$D3,$MASK + vadd.i64 $D1,$D1,$T0 @ h0 -> h1 + vadd.i64 $D4,$D4,$T1 @ h3 -> h4 + + cmp $len,#0 + bne .Leven + + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ store hash value + + vst4.32 {$D0#lo[0],$D1#lo[0],$D2#lo[0],$D3#lo[0]},[$ctx]! + vst1.32 {$D4#lo[0]},[$ctx] + + vldmia sp!,{d8-d15} @ epilogue + ldmia sp!,{r4-r7} +.Lno_data_neon: + ret @ bx lr +.size poly1305_blocks_neon,.-poly1305_blocks_neon + +#ifdef __KERNEL__ +.globl poly1305_emit_neon +#endif +.type poly1305_emit_neon,%function +.align 5 +poly1305_emit_neon: + ldr ip,[$ctx,#36] @ is_base2_26 + + stmdb sp!,{r4-r11} + + tst ip,ip + beq .Lpoly1305_emit_enter + + ldmia $ctx,{$h0-$h4} + eor $g0,$g0,$g0 + + adds $h0,$h0,$h1,lsl#26 @ base 2^26 -> base 2^32 + mov $h1,$h1,lsr#6 + adcs $h1,$h1,$h2,lsl#20 + mov $h2,$h2,lsr#12 + adcs $h2,$h2,$h3,lsl#14 + mov $h3,$h3,lsr#18 + adcs $h3,$h3,$h4,lsl#8 + adc $h4,$g0,$h4,lsr#24 @ can be partially reduced ... + + and $g0,$h4,#-4 @ ... so reduce + and $h4,$h3,#3 + add $g0,$g0,$g0,lsr#2 @ *= 5 + adds $h0,$h0,$g0 + adcs $h1,$h1,#0 + adcs $h2,$h2,#0 + adcs $h3,$h3,#0 + adc $h4,$h4,#0 + + adds $g0,$h0,#5 @ compare to modulus + adcs $g1,$h1,#0 + adcs $g2,$h2,#0 + adcs $g3,$h3,#0 + adc $g4,$h4,#0 + tst $g4,#4 @ did it carry/borrow? + + it ne + movne $h0,$g0 + ldr $g0,[$nonce,#0] + it ne + movne $h1,$g1 + ldr $g1,[$nonce,#4] + it ne + movne $h2,$g2 + ldr $g2,[$nonce,#8] + it ne + movne $h3,$g3 + ldr $g3,[$nonce,#12] + + adds $h0,$h0,$g0 @ accumulate nonce + adcs $h1,$h1,$g1 + adcs $h2,$h2,$g2 + adc $h3,$h3,$g3 + +# ifdef __ARMEB__ + rev $h0,$h0 + rev $h1,$h1 + rev $h2,$h2 + rev $h3,$h3 +# endif + str $h0,[$mac,#0] @ store the result + str $h1,[$mac,#4] + str $h2,[$mac,#8] + str $h3,[$mac,#12] + + ldmia sp!,{r4-r11} + ret @ bx lr +.size poly1305_emit_neon,.-poly1305_emit_neon + +.align 5 +.Lzeros: +.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +# ifndef __KERNEL__ +.LOPENSSL_armcap: +.word OPENSSL_armcap_P-.Lpoly1305_init +# endif +#endif +___ +} } +$code.=<<___; +.align 2 +#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) +.comm OPENSSL_armcap_P,4,4 +#endif +___ + +open SELF,$0; +while(<SELF>) { + next if (/^#!/); + last if (!s/^#/@/ and !/^$/); + print; +} +close SELF; + +foreach (split("\n",$code)) { + s/\`([^\`]*)\`/eval $1/geo; + + s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or + s/\bret\b/bx lr/go or + s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4 + + print $_,"\n"; +} +close STDOUT; # enforce flush diff --git a/net/wireguard/crypto/zinc/poly1305/poly1305-arm64.pl b/net/wireguard/crypto/zinc/poly1305/poly1305-arm64.pl new file mode 100644 index 000000000000..d513b45a149b --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/poly1305-arm64.pl @@ -0,0 +1,974 @@ +#!/usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# +# This code is taken from the OpenSSL project but the author, Andy Polyakov, +# has relicensed it under the licenses specified in the SPDX header above. +# The original headers, including the original license headers, are +# included below for completeness. +# +# ==================================================================== +# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# This module implements Poly1305 hash for ARMv8. +# +# June 2015 +# +# Numbers are cycles per processed byte with poly1305_blocks alone. +# +# IALU/gcc-4.9 NEON +# +# Apple A7 1.86/+5% 0.72 +# Cortex-A53 2.69/+58% 1.47 +# Cortex-A57 2.70/+7% 1.14 +# Denver 1.64/+50% 1.18(*) +# X-Gene 2.13/+68% 2.27 +# Mongoose 1.77/+75% 1.12 +# Kryo 2.70/+55% 1.13 +# +# (*) estimate based on resources availability is less than 1.0, +# i.e. measured result is worse than expected, presumably binary +# translator is not almighty; + +$flavour=shift; +if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; } +else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} } + +if ($flavour && $flavour ne "void") { + $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; + ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or + ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or + die "can't locate arm-xlate.pl"; + + open STDOUT,"| \"$^X\" $xlate $flavour $output"; +} else { + open STDOUT,">$output"; +} + +my ($ctx,$inp,$len,$padbit) = map("x$_",(0..3)); +my ($mac,$nonce)=($inp,$len); + +my ($h0,$h1,$h2,$r0,$r1,$s1,$t0,$t1,$d0,$d1,$d2) = map("x$_",(4..14)); + +$code.=<<___; +#ifndef __KERNEL__ +# include "arm_arch.h" +.extern OPENSSL_armcap_P +#else +# define poly1305_init poly1305_init_arm +# define poly1305_blocks poly1305_blocks_arm +# define poly1305_emit poly1305_emit_arm +#endif + +.text + +// forward "declarations" are required for Apple +.globl poly1305_blocks +.globl poly1305_emit +.globl poly1305_init +.type poly1305_init,%function +.align 5 +poly1305_init: + cmp $inp,xzr + stp xzr,xzr,[$ctx] // zero hash value + stp xzr,xzr,[$ctx,#16] // [along with is_base2_26] + + csel x0,xzr,x0,eq + b.eq .Lno_key + +#ifndef __KERNEL__ +# ifdef __ILP32__ + ldrsw $t1,.LOPENSSL_armcap_P +# else + ldr $t1,.LOPENSSL_armcap_P +# endif + adr $t0,.LOPENSSL_armcap_P + ldr w17,[$t0,$t1] +#endif + + ldp $r0,$r1,[$inp] // load key + mov $s1,#0xfffffffc0fffffff + movk $s1,#0x0fff,lsl#48 +#ifdef __AARCH64EB__ + rev $r0,$r0 // flip bytes + rev $r1,$r1 +#endif + and $r0,$r0,$s1 // &=0ffffffc0fffffff + and $s1,$s1,#-4 + and $r1,$r1,$s1 // &=0ffffffc0ffffffc + stp $r0,$r1,[$ctx,#32] // save key value + +#ifndef __KERNEL__ + tst w17,#ARMV7_NEON + + adr $d0,poly1305_blocks + adr $r0,poly1305_blocks_neon + adr $d1,poly1305_emit + adr $r1,poly1305_emit_neon + + csel $d0,$d0,$r0,eq + csel $d1,$d1,$r1,eq + +# ifdef __ILP32__ + stp w12,w13,[$len] +# else + stp $d0,$d1,[$len] +# endif + + mov x0,#1 +#else + mov x0,#0 +#endif +.Lno_key: + ret +.size poly1305_init,.-poly1305_init + +.type poly1305_blocks,%function +.align 5 +poly1305_blocks: + ands $len,$len,#-16 + b.eq .Lno_data + + ldp $h0,$h1,[$ctx] // load hash value + ldp $r0,$r1,[$ctx,#32] // load key value + ldr $h2,[$ctx,#16] + add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2) + b .Loop + +.align 5 +.Loop: + ldp $t0,$t1,[$inp],#16 // load input + sub $len,$len,#16 +#ifdef __AARCH64EB__ + rev $t0,$t0 + rev $t1,$t1 +#endif + adds $h0,$h0,$t0 // accumulate input + adcs $h1,$h1,$t1 + + mul $d0,$h0,$r0 // h0*r0 + adc $h2,$h2,$padbit + umulh $d1,$h0,$r0 + + mul $t0,$h1,$s1 // h1*5*r1 + umulh $t1,$h1,$s1 + + adds $d0,$d0,$t0 + mul $t0,$h0,$r1 // h0*r1 + adc $d1,$d1,$t1 + umulh $d2,$h0,$r1 + + adds $d1,$d1,$t0 + mul $t0,$h1,$r0 // h1*r0 + adc $d2,$d2,xzr + umulh $t1,$h1,$r0 + + adds $d1,$d1,$t0 + mul $t0,$h2,$s1 // h2*5*r1 + adc $d2,$d2,$t1 + mul $t1,$h2,$r0 // h2*r0 + + adds $d1,$d1,$t0 + adc $d2,$d2,$t1 + + and $t0,$d2,#-4 // final reduction + and $h2,$d2,#3 + add $t0,$t0,$d2,lsr#2 + adds $h0,$d0,$t0 + adcs $h1,$d1,xzr + adc $h2,$h2,xzr + + cbnz $len,.Loop + + stp $h0,$h1,[$ctx] // store hash value + str $h2,[$ctx,#16] + +.Lno_data: + ret +.size poly1305_blocks,.-poly1305_blocks + +.type poly1305_emit,%function +.align 5 +poly1305_emit: + ldp $h0,$h1,[$ctx] // load hash base 2^64 + ldr $h2,[$ctx,#16] + ldp $t0,$t1,[$nonce] // load nonce + + adds $d0,$h0,#5 // compare to modulus + adcs $d1,$h1,xzr + adc $d2,$h2,xzr + + tst $d2,#-4 // see if it's carried/borrowed + + csel $h0,$h0,$d0,eq + csel $h1,$h1,$d1,eq + +#ifdef __AARCH64EB__ + ror $t0,$t0,#32 // flip nonce words + ror $t1,$t1,#32 +#endif + adds $h0,$h0,$t0 // accumulate nonce + adc $h1,$h1,$t1 +#ifdef __AARCH64EB__ + rev $h0,$h0 // flip output bytes + rev $h1,$h1 +#endif + stp $h0,$h1,[$mac] // write result + + ret +.size poly1305_emit,.-poly1305_emit +___ +my ($R0,$R1,$S1,$R2,$S2,$R3,$S3,$R4,$S4) = map("v$_.4s",(0..8)); +my ($IN01_0,$IN01_1,$IN01_2,$IN01_3,$IN01_4) = map("v$_.2s",(9..13)); +my ($IN23_0,$IN23_1,$IN23_2,$IN23_3,$IN23_4) = map("v$_.2s",(14..18)); +my ($ACC0,$ACC1,$ACC2,$ACC3,$ACC4) = map("v$_.2d",(19..23)); +my ($H0,$H1,$H2,$H3,$H4) = map("v$_.2s",(24..28)); +my ($T0,$T1,$MASK) = map("v$_",(29..31)); + +my ($in2,$zeros)=("x16","x17"); +my $is_base2_26 = $zeros; # borrow + +$code.=<<___; +.type __poly1305_mult,%function +.align 5 +__poly1305_mult: + mul $d0,$h0,$r0 // h0*r0 + umulh $d1,$h0,$r0 + + mul $t0,$h1,$s1 // h1*5*r1 + umulh $t1,$h1,$s1 + + adds $d0,$d0,$t0 + mul $t0,$h0,$r1 // h0*r1 + adc $d1,$d1,$t1 + umulh $d2,$h0,$r1 + + adds $d1,$d1,$t0 + mul $t0,$h1,$r0 // h1*r0 + adc $d2,$d2,xzr + umulh $t1,$h1,$r0 + + adds $d1,$d1,$t0 + mul $t0,$h2,$s1 // h2*5*r1 + adc $d2,$d2,$t1 + mul $t1,$h2,$r0 // h2*r0 + + adds $d1,$d1,$t0 + adc $d2,$d2,$t1 + + and $t0,$d2,#-4 // final reduction + and $h2,$d2,#3 + add $t0,$t0,$d2,lsr#2 + adds $h0,$d0,$t0 + adcs $h1,$d1,xzr + adc $h2,$h2,xzr + + ret +.size __poly1305_mult,.-__poly1305_mult + +.type __poly1305_splat,%function +.align 5 +__poly1305_splat: + and x12,$h0,#0x03ffffff // base 2^64 -> base 2^26 + ubfx x13,$h0,#26,#26 + extr x14,$h1,$h0,#52 + and x14,x14,#0x03ffffff + ubfx x15,$h1,#14,#26 + extr x16,$h2,$h1,#40 + + str w12,[$ctx,#16*0] // r0 + add w12,w13,w13,lsl#2 // r1*5 + str w13,[$ctx,#16*1] // r1 + add w13,w14,w14,lsl#2 // r2*5 + str w12,[$ctx,#16*2] // s1 + str w14,[$ctx,#16*3] // r2 + add w14,w15,w15,lsl#2 // r3*5 + str w13,[$ctx,#16*4] // s2 + str w15,[$ctx,#16*5] // r3 + add w15,w16,w16,lsl#2 // r4*5 + str w14,[$ctx,#16*6] // s3 + str w16,[$ctx,#16*7] // r4 + str w15,[$ctx,#16*8] // s4 + + ret +.size __poly1305_splat,.-__poly1305_splat + +#if !defined(__KERNEL__) || defined(CONFIG_KERNEL_MODE_NEON) +#ifdef __KERNEL__ +.globl poly1305_blocks_neon +.globl poly1305_emit_neon +#endif + +.type poly1305_blocks_neon,%function +.align 5 +poly1305_blocks_neon: + ldr $is_base2_26,[$ctx,#24] + cmp $len,#128 + b.hs .Lblocks_neon + cbz $is_base2_26,poly1305_blocks + +.Lblocks_neon: + stp x29,x30,[sp,#-80]! + add x29,sp,#0 + + ands $len,$len,#-16 + b.eq .Lno_data_neon + + cbz $is_base2_26,.Lbase2_64_neon + + ldp w10,w11,[$ctx] // load hash value base 2^26 + ldp w12,w13,[$ctx,#8] + ldr w14,[$ctx,#16] + + tst $len,#31 + b.eq .Leven_neon + + ldp $r0,$r1,[$ctx,#32] // load key value + + add $h0,x10,x11,lsl#26 // base 2^26 -> base 2^64 + lsr $h1,x12,#12 + adds $h0,$h0,x12,lsl#52 + add $h1,$h1,x13,lsl#14 + adc $h1,$h1,xzr + lsr $h2,x14,#24 + adds $h1,$h1,x14,lsl#40 + adc $d2,$h2,xzr // can be partially reduced... + + ldp $d0,$d1,[$inp],#16 // load input + sub $len,$len,#16 + add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2) + + and $t0,$d2,#-4 // ... so reduce + and $h2,$d2,#3 + add $t0,$t0,$d2,lsr#2 + adds $h0,$h0,$t0 + adcs $h1,$h1,xzr + adc $h2,$h2,xzr + +#ifdef __AARCH64EB__ + rev $d0,$d0 + rev $d1,$d1 +#endif + adds $h0,$h0,$d0 // accumulate input + adcs $h1,$h1,$d1 + adc $h2,$h2,$padbit + + bl __poly1305_mult + ldr x30,[sp,#8] + + cbz $padbit,.Lstore_base2_64_neon + + and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26 + ubfx x11,$h0,#26,#26 + extr x12,$h1,$h0,#52 + and x12,x12,#0x03ffffff + ubfx x13,$h1,#14,#26 + extr x14,$h2,$h1,#40 + + cbnz $len,.Leven_neon + + stp w10,w11,[$ctx] // store hash value base 2^26 + stp w12,w13,[$ctx,#8] + str w14,[$ctx,#16] + b .Lno_data_neon + +.align 4 +.Lstore_base2_64_neon: + stp $h0,$h1,[$ctx] // store hash value base 2^64 + stp $h2,xzr,[$ctx,#16] // note that is_base2_26 is zeroed + b .Lno_data_neon + +.align 4 +.Lbase2_64_neon: + ldp $r0,$r1,[$ctx,#32] // load key value + + ldp $h0,$h1,[$ctx] // load hash value base 2^64 + ldr $h2,[$ctx,#16] + + tst $len,#31 + b.eq .Linit_neon + + ldp $d0,$d1,[$inp],#16 // load input + sub $len,$len,#16 + add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2) +#ifdef __AARCH64EB__ + rev $d0,$d0 + rev $d1,$d1 +#endif + adds $h0,$h0,$d0 // accumulate input + adcs $h1,$h1,$d1 + adc $h2,$h2,$padbit + + bl __poly1305_mult + +.Linit_neon: + and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26 + ubfx x11,$h0,#26,#26 + extr x12,$h1,$h0,#52 + and x12,x12,#0x03ffffff + ubfx x13,$h1,#14,#26 + extr x14,$h2,$h1,#40 + + stp d8,d9,[sp,#16] // meet ABI requirements + stp d10,d11,[sp,#32] + stp d12,d13,[sp,#48] + stp d14,d15,[sp,#64] + + fmov ${H0},x10 + fmov ${H1},x11 + fmov ${H2},x12 + fmov ${H3},x13 + fmov ${H4},x14 + + ////////////////////////////////// initialize r^n table + mov $h0,$r0 // r^1 + add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2) + mov $h1,$r1 + mov $h2,xzr + add $ctx,$ctx,#48+12 + bl __poly1305_splat + + bl __poly1305_mult // r^2 + sub $ctx,$ctx,#4 + bl __poly1305_splat + + bl __poly1305_mult // r^3 + sub $ctx,$ctx,#4 + bl __poly1305_splat + + bl __poly1305_mult // r^4 + sub $ctx,$ctx,#4 + bl __poly1305_splat + ldr x30,[sp,#8] + + add $in2,$inp,#32 + adr $zeros,.Lzeros + subs $len,$len,#64 + csel $in2,$zeros,$in2,lo + + mov x4,#1 + str x4,[$ctx,#-24] // set is_base2_26 + sub $ctx,$ctx,#48 // restore original $ctx + b .Ldo_neon + +.align 4 +.Leven_neon: + add $in2,$inp,#32 + adr $zeros,.Lzeros + subs $len,$len,#64 + csel $in2,$zeros,$in2,lo + + stp d8,d9,[sp,#16] // meet ABI requirements + stp d10,d11,[sp,#32] + stp d12,d13,[sp,#48] + stp d14,d15,[sp,#64] + + fmov ${H0},x10 + fmov ${H1},x11 + fmov ${H2},x12 + fmov ${H3},x13 + fmov ${H4},x14 + +.Ldo_neon: + ldp x8,x12,[$in2],#16 // inp[2:3] (or zero) + ldp x9,x13,[$in2],#48 + + lsl $padbit,$padbit,#24 + add x15,$ctx,#48 + +#ifdef __AARCH64EB__ + rev x8,x8 + rev x12,x12 + rev x9,x9 + rev x13,x13 +#endif + and x4,x8,#0x03ffffff // base 2^64 -> base 2^26 + and x5,x9,#0x03ffffff + ubfx x6,x8,#26,#26 + ubfx x7,x9,#26,#26 + add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32 + extr x8,x12,x8,#52 + extr x9,x13,x9,#52 + add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32 + fmov $IN23_0,x4 + and x8,x8,#0x03ffffff + and x9,x9,#0x03ffffff + ubfx x10,x12,#14,#26 + ubfx x11,x13,#14,#26 + add x12,$padbit,x12,lsr#40 + add x13,$padbit,x13,lsr#40 + add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32 + fmov $IN23_1,x6 + add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32 + add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32 + fmov $IN23_2,x8 + fmov $IN23_3,x10 + fmov $IN23_4,x12 + + ldp x8,x12,[$inp],#16 // inp[0:1] + ldp x9,x13,[$inp],#48 + + ld1 {$R0,$R1,$S1,$R2},[x15],#64 + ld1 {$S2,$R3,$S3,$R4},[x15],#64 + ld1 {$S4},[x15] + +#ifdef __AARCH64EB__ + rev x8,x8 + rev x12,x12 + rev x9,x9 + rev x13,x13 +#endif + and x4,x8,#0x03ffffff // base 2^64 -> base 2^26 + and x5,x9,#0x03ffffff + ubfx x6,x8,#26,#26 + ubfx x7,x9,#26,#26 + add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32 + extr x8,x12,x8,#52 + extr x9,x13,x9,#52 + add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32 + fmov $IN01_0,x4 + and x8,x8,#0x03ffffff + and x9,x9,#0x03ffffff + ubfx x10,x12,#14,#26 + ubfx x11,x13,#14,#26 + add x12,$padbit,x12,lsr#40 + add x13,$padbit,x13,lsr#40 + add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32 + fmov $IN01_1,x6 + add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32 + add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32 + movi $MASK.2d,#-1 + fmov $IN01_2,x8 + fmov $IN01_3,x10 + fmov $IN01_4,x12 + ushr $MASK.2d,$MASK.2d,#38 + + b.ls .Lskip_loop + +.align 4 +.Loop_neon: + //////////////////////////////////////////////////////////////// + // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2 + // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r + // \___________________/ + // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2 + // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r + // \___________________/ \____________________/ + // + // Note that we start with inp[2:3]*r^2. This is because it + // doesn't depend on reduction in previous iteration. + //////////////////////////////////////////////////////////////// + // d4 = h0*r4 + h1*r3 + h2*r2 + h3*r1 + h4*r0 + // d3 = h0*r3 + h1*r2 + h2*r1 + h3*r0 + h4*5*r4 + // d2 = h0*r2 + h1*r1 + h2*r0 + h3*5*r4 + h4*5*r3 + // d1 = h0*r1 + h1*r0 + h2*5*r4 + h3*5*r3 + h4*5*r2 + // d0 = h0*r0 + h1*5*r4 + h2*5*r3 + h3*5*r2 + h4*5*r1 + + subs $len,$len,#64 + umull $ACC4,$IN23_0,${R4}[2] + csel $in2,$zeros,$in2,lo + umull $ACC3,$IN23_0,${R3}[2] + umull $ACC2,$IN23_0,${R2}[2] + ldp x8,x12,[$in2],#16 // inp[2:3] (or zero) + umull $ACC1,$IN23_0,${R1}[2] + ldp x9,x13,[$in2],#48 + umull $ACC0,$IN23_0,${R0}[2] +#ifdef __AARCH64EB__ + rev x8,x8 + rev x12,x12 + rev x9,x9 + rev x13,x13 +#endif + + umlal $ACC4,$IN23_1,${R3}[2] + and x4,x8,#0x03ffffff // base 2^64 -> base 2^26 + umlal $ACC3,$IN23_1,${R2}[2] + and x5,x9,#0x03ffffff + umlal $ACC2,$IN23_1,${R1}[2] + ubfx x6,x8,#26,#26 + umlal $ACC1,$IN23_1,${R0}[2] + ubfx x7,x9,#26,#26 + umlal $ACC0,$IN23_1,${S4}[2] + add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32 + + umlal $ACC4,$IN23_2,${R2}[2] + extr x8,x12,x8,#52 + umlal $ACC3,$IN23_2,${R1}[2] + extr x9,x13,x9,#52 + umlal $ACC2,$IN23_2,${R0}[2] + add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32 + umlal $ACC1,$IN23_2,${S4}[2] + fmov $IN23_0,x4 + umlal $ACC0,$IN23_2,${S3}[2] + and x8,x8,#0x03ffffff + + umlal $ACC4,$IN23_3,${R1}[2] + and x9,x9,#0x03ffffff + umlal $ACC3,$IN23_3,${R0}[2] + ubfx x10,x12,#14,#26 + umlal $ACC2,$IN23_3,${S4}[2] + ubfx x11,x13,#14,#26 + umlal $ACC1,$IN23_3,${S3}[2] + add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32 + umlal $ACC0,$IN23_3,${S2}[2] + fmov $IN23_1,x6 + + add $IN01_2,$IN01_2,$H2 + add x12,$padbit,x12,lsr#40 + umlal $ACC4,$IN23_4,${R0}[2] + add x13,$padbit,x13,lsr#40 + umlal $ACC3,$IN23_4,${S4}[2] + add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32 + umlal $ACC2,$IN23_4,${S3}[2] + add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32 + umlal $ACC1,$IN23_4,${S2}[2] + fmov $IN23_2,x8 + umlal $ACC0,$IN23_4,${S1}[2] + fmov $IN23_3,x10 + + //////////////////////////////////////////////////////////////// + // (hash+inp[0:1])*r^4 and accumulate + + add $IN01_0,$IN01_0,$H0 + fmov $IN23_4,x12 + umlal $ACC3,$IN01_2,${R1}[0] + ldp x8,x12,[$inp],#16 // inp[0:1] + umlal $ACC0,$IN01_2,${S3}[0] + ldp x9,x13,[$inp],#48 + umlal $ACC4,$IN01_2,${R2}[0] + umlal $ACC1,$IN01_2,${S4}[0] + umlal $ACC2,$IN01_2,${R0}[0] +#ifdef __AARCH64EB__ + rev x8,x8 + rev x12,x12 + rev x9,x9 + rev x13,x13 +#endif + + add $IN01_1,$IN01_1,$H1 + umlal $ACC3,$IN01_0,${R3}[0] + umlal $ACC4,$IN01_0,${R4}[0] + and x4,x8,#0x03ffffff // base 2^64 -> base 2^26 + umlal $ACC2,$IN01_0,${R2}[0] + and x5,x9,#0x03ffffff + umlal $ACC0,$IN01_0,${R0}[0] + ubfx x6,x8,#26,#26 + umlal $ACC1,$IN01_0,${R1}[0] + ubfx x7,x9,#26,#26 + + add $IN01_3,$IN01_3,$H3 + add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32 + umlal $ACC3,$IN01_1,${R2}[0] + extr x8,x12,x8,#52 + umlal $ACC4,$IN01_1,${R3}[0] + extr x9,x13,x9,#52 + umlal $ACC0,$IN01_1,${S4}[0] + add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32 + umlal $ACC2,$IN01_1,${R1}[0] + fmov $IN01_0,x4 + umlal $ACC1,$IN01_1,${R0}[0] + and x8,x8,#0x03ffffff + + add $IN01_4,$IN01_4,$H4 + and x9,x9,#0x03ffffff + umlal $ACC3,$IN01_3,${R0}[0] + ubfx x10,x12,#14,#26 + umlal $ACC0,$IN01_3,${S2}[0] + ubfx x11,x13,#14,#26 + umlal $ACC4,$IN01_3,${R1}[0] + add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32 + umlal $ACC1,$IN01_3,${S3}[0] + fmov $IN01_1,x6 + umlal $ACC2,$IN01_3,${S4}[0] + add x12,$padbit,x12,lsr#40 + + umlal $ACC3,$IN01_4,${S4}[0] + add x13,$padbit,x13,lsr#40 + umlal $ACC0,$IN01_4,${S1}[0] + add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32 + umlal $ACC4,$IN01_4,${R0}[0] + add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32 + umlal $ACC1,$IN01_4,${S2}[0] + fmov $IN01_2,x8 + umlal $ACC2,$IN01_4,${S3}[0] + fmov $IN01_3,x10 + fmov $IN01_4,x12 + + ///////////////////////////////////////////////////////////////// + // lazy reduction as discussed in "NEON crypto" by D.J. Bernstein + // and P. Schwabe + // + // [see discussion in poly1305-armv4 module] + + ushr $T0.2d,$ACC3,#26 + xtn $H3,$ACC3 + ushr $T1.2d,$ACC0,#26 + and $ACC0,$ACC0,$MASK.2d + add $ACC4,$ACC4,$T0.2d // h3 -> h4 + bic $H3,#0xfc,lsl#24 // &=0x03ffffff + add $ACC1,$ACC1,$T1.2d // h0 -> h1 + + ushr $T0.2d,$ACC4,#26 + xtn $H4,$ACC4 + ushr $T1.2d,$ACC1,#26 + xtn $H1,$ACC1 + bic $H4,#0xfc,lsl#24 + add $ACC2,$ACC2,$T1.2d // h1 -> h2 + + add $ACC0,$ACC0,$T0.2d + shl $T0.2d,$T0.2d,#2 + shrn $T1.2s,$ACC2,#26 + xtn $H2,$ACC2 + add $ACC0,$ACC0,$T0.2d // h4 -> h0 + bic $H1,#0xfc,lsl#24 + add $H3,$H3,$T1.2s // h2 -> h3 + bic $H2,#0xfc,lsl#24 + + shrn $T0.2s,$ACC0,#26 + xtn $H0,$ACC0 + ushr $T1.2s,$H3,#26 + bic $H3,#0xfc,lsl#24 + bic $H0,#0xfc,lsl#24 + add $H1,$H1,$T0.2s // h0 -> h1 + add $H4,$H4,$T1.2s // h3 -> h4 + + b.hi .Loop_neon + +.Lskip_loop: + dup $IN23_2,${IN23_2}[0] + add $IN01_2,$IN01_2,$H2 + + //////////////////////////////////////////////////////////////// + // multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1 + + adds $len,$len,#32 + b.ne .Long_tail + + dup $IN23_2,${IN01_2}[0] + add $IN23_0,$IN01_0,$H0 + add $IN23_3,$IN01_3,$H3 + add $IN23_1,$IN01_1,$H1 + add $IN23_4,$IN01_4,$H4 + +.Long_tail: + dup $IN23_0,${IN23_0}[0] + umull2 $ACC0,$IN23_2,${S3} + umull2 $ACC3,$IN23_2,${R1} + umull2 $ACC4,$IN23_2,${R2} + umull2 $ACC2,$IN23_2,${R0} + umull2 $ACC1,$IN23_2,${S4} + + dup $IN23_1,${IN23_1}[0] + umlal2 $ACC0,$IN23_0,${R0} + umlal2 $ACC2,$IN23_0,${R2} + umlal2 $ACC3,$IN23_0,${R3} + umlal2 $ACC4,$IN23_0,${R4} + umlal2 $ACC1,$IN23_0,${R1} + + dup $IN23_3,${IN23_3}[0] + umlal2 $ACC0,$IN23_1,${S4} + umlal2 $ACC3,$IN23_1,${R2} + umlal2 $ACC2,$IN23_1,${R1} + umlal2 $ACC4,$IN23_1,${R3} + umlal2 $ACC1,$IN23_1,${R0} + + dup $IN23_4,${IN23_4}[0] + umlal2 $ACC3,$IN23_3,${R0} + umlal2 $ACC4,$IN23_3,${R1} + umlal2 $ACC0,$IN23_3,${S2} + umlal2 $ACC1,$IN23_3,${S3} + umlal2 $ACC2,$IN23_3,${S4} + + umlal2 $ACC3,$IN23_4,${S4} + umlal2 $ACC0,$IN23_4,${S1} + umlal2 $ACC4,$IN23_4,${R0} + umlal2 $ACC1,$IN23_4,${S2} + umlal2 $ACC2,$IN23_4,${S3} + + b.eq .Lshort_tail + + //////////////////////////////////////////////////////////////// + // (hash+inp[0:1])*r^4:r^3 and accumulate + + add $IN01_0,$IN01_0,$H0 + umlal $ACC3,$IN01_2,${R1} + umlal $ACC0,$IN01_2,${S3} + umlal $ACC4,$IN01_2,${R2} + umlal $ACC1,$IN01_2,${S4} + umlal $ACC2,$IN01_2,${R0} + + add $IN01_1,$IN01_1,$H1 + umlal $ACC3,$IN01_0,${R3} + umlal $ACC0,$IN01_0,${R0} + umlal $ACC4,$IN01_0,${R4} + umlal $ACC1,$IN01_0,${R1} + umlal $ACC2,$IN01_0,${R2} + + add $IN01_3,$IN01_3,$H3 + umlal $ACC3,$IN01_1,${R2} + umlal $ACC0,$IN01_1,${S4} + umlal $ACC4,$IN01_1,${R3} + umlal $ACC1,$IN01_1,${R0} + umlal $ACC2,$IN01_1,${R1} + + add $IN01_4,$IN01_4,$H4 + umlal $ACC3,$IN01_3,${R0} + umlal $ACC0,$IN01_3,${S2} + umlal $ACC4,$IN01_3,${R1} + umlal $ACC1,$IN01_3,${S3} + umlal $ACC2,$IN01_3,${S4} + + umlal $ACC3,$IN01_4,${S4} + umlal $ACC0,$IN01_4,${S1} + umlal $ACC4,$IN01_4,${R0} + umlal $ACC1,$IN01_4,${S2} + umlal $ACC2,$IN01_4,${S3} + +.Lshort_tail: + //////////////////////////////////////////////////////////////// + // horizontal add + + addp $ACC3,$ACC3,$ACC3 + ldp d8,d9,[sp,#16] // meet ABI requirements + addp $ACC0,$ACC0,$ACC0 + ldp d10,d11,[sp,#32] + addp $ACC4,$ACC4,$ACC4 + ldp d12,d13,[sp,#48] + addp $ACC1,$ACC1,$ACC1 + ldp d14,d15,[sp,#64] + addp $ACC2,$ACC2,$ACC2 + + //////////////////////////////////////////////////////////////// + // lazy reduction, but without narrowing + + ushr $T0.2d,$ACC3,#26 + and $ACC3,$ACC3,$MASK.2d + ushr $T1.2d,$ACC0,#26 + and $ACC0,$ACC0,$MASK.2d + + add $ACC4,$ACC4,$T0.2d // h3 -> h4 + add $ACC1,$ACC1,$T1.2d // h0 -> h1 + + ushr $T0.2d,$ACC4,#26 + and $ACC4,$ACC4,$MASK.2d + ushr $T1.2d,$ACC1,#26 + and $ACC1,$ACC1,$MASK.2d + add $ACC2,$ACC2,$T1.2d // h1 -> h2 + + add $ACC0,$ACC0,$T0.2d + shl $T0.2d,$T0.2d,#2 + ushr $T1.2d,$ACC2,#26 + and $ACC2,$ACC2,$MASK.2d + add $ACC0,$ACC0,$T0.2d // h4 -> h0 + add $ACC3,$ACC3,$T1.2d // h2 -> h3 + + ushr $T0.2d,$ACC0,#26 + and $ACC0,$ACC0,$MASK.2d + ushr $T1.2d,$ACC3,#26 + and $ACC3,$ACC3,$MASK.2d + add $ACC1,$ACC1,$T0.2d // h0 -> h1 + add $ACC4,$ACC4,$T1.2d // h3 -> h4 + + //////////////////////////////////////////////////////////////// + // write the result, can be partially reduced + + st4 {$ACC0,$ACC1,$ACC2,$ACC3}[0],[$ctx],#16 + st1 {$ACC4}[0],[$ctx] + +.Lno_data_neon: + ldr x29,[sp],#80 + ret +.size poly1305_blocks_neon,.-poly1305_blocks_neon + +.type poly1305_emit_neon,%function +.align 5 +poly1305_emit_neon: + ldr $is_base2_26,[$ctx,#24] + cbz $is_base2_26,poly1305_emit + + ldp w10,w11,[$ctx] // load hash value base 2^26 + ldp w12,w13,[$ctx,#8] + ldr w14,[$ctx,#16] + + add $h0,x10,x11,lsl#26 // base 2^26 -> base 2^64 + lsr $h1,x12,#12 + adds $h0,$h0,x12,lsl#52 + add $h1,$h1,x13,lsl#14 + adc $h1,$h1,xzr + lsr $h2,x14,#24 + adds $h1,$h1,x14,lsl#40 + adc $h2,$h2,xzr // can be partially reduced... + + ldp $t0,$t1,[$nonce] // load nonce + + and $d0,$h2,#-4 // ... so reduce + add $d0,$d0,$h2,lsr#2 + and $h2,$h2,#3 + adds $h0,$h0,$d0 + adcs $h1,$h1,xzr + adc $h2,$h2,xzr + + adds $d0,$h0,#5 // compare to modulus + adcs $d1,$h1,xzr + adc $d2,$h2,xzr + + tst $d2,#-4 // see if it's carried/borrowed + + csel $h0,$h0,$d0,eq + csel $h1,$h1,$d1,eq + +#ifdef __AARCH64EB__ + ror $t0,$t0,#32 // flip nonce words + ror $t1,$t1,#32 +#endif + adds $h0,$h0,$t0 // accumulate nonce + adc $h1,$h1,$t1 +#ifdef __AARCH64EB__ + rev $h0,$h0 // flip output bytes + rev $h1,$h1 +#endif + stp $h0,$h1,[$mac] // write result + + ret +.size poly1305_emit_neon,.-poly1305_emit_neon +#endif + +.align 5 +.Lzeros: +.long 0,0,0,0,0,0,0,0 +#ifndef __KERNEL__ +.LOPENSSL_armcap_P: +#ifdef __ILP32__ +.long OPENSSL_armcap_P-. +#else +.quad OPENSSL_armcap_P-. +#endif +#endif +.align 2 +___ + +open SELF,$0; +while(<SELF>) { + next if (/^#!/); + last if (!s/^#/\/\// and !/^$/); + print; +} +close SELF; + +foreach (split("\n",$code)) { + s/\b(shrn\s+v[0-9]+)\.[24]d/$1.2s/ or + s/\b(fmov\s+)v([0-9]+)[^,]*,\s*x([0-9]+)/$1d$2,x$3/ or + (m/\bdup\b/ and (s/\.[24]s/.2d/g or 1)) or + (m/\b(eor|and)/ and (s/\.[248][sdh]/.16b/g or 1)) or + (m/\bum(ul|la)l\b/ and (s/\.4s/.2s/g or 1)) or + (m/\bum(ul|la)l2\b/ and (s/\.2s/.4s/g or 1)) or + (m/\bst[1-4]\s+{[^}]+}\[/ and (s/\.[24]d/.s/g or 1)); + + s/\.[124]([sd])\[/.$1\[/; + + print $_,"\n"; +} +close STDOUT; diff --git a/net/wireguard/crypto/zinc/poly1305/poly1305-donna32.c b/net/wireguard/crypto/zinc/poly1305/poly1305-donna32.c new file mode 100644 index 000000000000..527ccc3b59cc --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/poly1305-donna32.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * + * This is based in part on Andrew Moon's poly1305-donna, which is in the + * public domain. + */ + +struct poly1305_internal { + u32 h[5]; + u32 r[5]; + u32 s[4]; +}; + +static void poly1305_init_generic(void *ctx, const u8 key[16]) +{ + struct poly1305_internal *st = (struct poly1305_internal *)ctx; + + /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ + st->r[0] = (get_unaligned_le32(&key[0])) & 0x3ffffff; + st->r[1] = (get_unaligned_le32(&key[3]) >> 2) & 0x3ffff03; + st->r[2] = (get_unaligned_le32(&key[6]) >> 4) & 0x3ffc0ff; + st->r[3] = (get_unaligned_le32(&key[9]) >> 6) & 0x3f03fff; + st->r[4] = (get_unaligned_le32(&key[12]) >> 8) & 0x00fffff; + + /* s = 5*r */ + st->s[0] = st->r[1] * 5; + st->s[1] = st->r[2] * 5; + st->s[2] = st->r[3] * 5; + st->s[3] = st->r[4] * 5; + + /* h = 0 */ + st->h[0] = 0; + st->h[1] = 0; + st->h[2] = 0; + st->h[3] = 0; + st->h[4] = 0; +} + +static void poly1305_blocks_generic(void *ctx, const u8 *input, size_t len, + const u32 padbit) +{ + struct poly1305_internal *st = (struct poly1305_internal *)ctx; + const u32 hibit = padbit << 24; + u32 r0, r1, r2, r3, r4; + u32 s1, s2, s3, s4; + u32 h0, h1, h2, h3, h4; + u64 d0, d1, d2, d3, d4; + u32 c; + + r0 = st->r[0]; + r1 = st->r[1]; + r2 = st->r[2]; + r3 = st->r[3]; + r4 = st->r[4]; + + s1 = st->s[0]; + s2 = st->s[1]; + s3 = st->s[2]; + s4 = st->s[3]; + + h0 = st->h[0]; + h1 = st->h[1]; + h2 = st->h[2]; + h3 = st->h[3]; + h4 = st->h[4]; + + while (len >= POLY1305_BLOCK_SIZE) { + /* h += m[i] */ + h0 += (get_unaligned_le32(&input[0])) & 0x3ffffff; + h1 += (get_unaligned_le32(&input[3]) >> 2) & 0x3ffffff; + h2 += (get_unaligned_le32(&input[6]) >> 4) & 0x3ffffff; + h3 += (get_unaligned_le32(&input[9]) >> 6) & 0x3ffffff; + h4 += (get_unaligned_le32(&input[12]) >> 8) | hibit; + + /* h *= r */ + d0 = ((u64)h0 * r0) + ((u64)h1 * s4) + + ((u64)h2 * s3) + ((u64)h3 * s2) + + ((u64)h4 * s1); + d1 = ((u64)h0 * r1) + ((u64)h1 * r0) + + ((u64)h2 * s4) + ((u64)h3 * s3) + + ((u64)h4 * s2); + d2 = ((u64)h0 * r2) + ((u64)h1 * r1) + + ((u64)h2 * r0) + ((u64)h3 * s4) + + ((u64)h4 * s3); + d3 = ((u64)h0 * r3) + ((u64)h1 * r2) + + ((u64)h2 * r1) + ((u64)h3 * r0) + + ((u64)h4 * s4); + d4 = ((u64)h0 * r4) + ((u64)h1 * r3) + + ((u64)h2 * r2) + ((u64)h3 * r1) + + ((u64)h4 * r0); + + /* (partial) h %= p */ + c = (u32)(d0 >> 26); + h0 = (u32)d0 & 0x3ffffff; + d1 += c; + c = (u32)(d1 >> 26); + h1 = (u32)d1 & 0x3ffffff; + d2 += c; + c = (u32)(d2 >> 26); + h2 = (u32)d2 & 0x3ffffff; + d3 += c; + c = (u32)(d3 >> 26); + h3 = (u32)d3 & 0x3ffffff; + d4 += c; + c = (u32)(d4 >> 26); + h4 = (u32)d4 & 0x3ffffff; + h0 += c * 5; + c = (h0 >> 26); + h0 = h0 & 0x3ffffff; + h1 += c; + + input += POLY1305_BLOCK_SIZE; + len -= POLY1305_BLOCK_SIZE; + } + + st->h[0] = h0; + st->h[1] = h1; + st->h[2] = h2; + st->h[3] = h3; + st->h[4] = h4; +} + +static void poly1305_emit_generic(void *ctx, u8 mac[16], const u32 nonce[4]) +{ + struct poly1305_internal *st = (struct poly1305_internal *)ctx; + u32 h0, h1, h2, h3, h4, c; + u32 g0, g1, g2, g3, g4; + u64 f; + u32 mask; + + /* fully carry h */ + h0 = st->h[0]; + h1 = st->h[1]; + h2 = st->h[2]; + h3 = st->h[3]; + h4 = st->h[4]; + + c = h1 >> 26; + h1 = h1 & 0x3ffffff; + h2 += c; + c = h2 >> 26; + h2 = h2 & 0x3ffffff; + h3 += c; + c = h3 >> 26; + h3 = h3 & 0x3ffffff; + h4 += c; + c = h4 >> 26; + h4 = h4 & 0x3ffffff; + h0 += c * 5; + c = h0 >> 26; + h0 = h0 & 0x3ffffff; + h1 += c; + + /* compute h + -p */ + g0 = h0 + 5; + c = g0 >> 26; + g0 &= 0x3ffffff; + g1 = h1 + c; + c = g1 >> 26; + g1 &= 0x3ffffff; + g2 = h2 + c; + c = g2 >> 26; + g2 &= 0x3ffffff; + g3 = h3 + c; + c = g3 >> 26; + g3 &= 0x3ffffff; + g4 = h4 + c - (1UL << 26); + + /* select h if h < p, or h + -p if h >= p */ + mask = (g4 >> ((sizeof(u32) * 8) - 1)) - 1; + g0 &= mask; + g1 &= mask; + g2 &= mask; + g3 &= mask; + g4 &= mask; + mask = ~mask; + + h0 = (h0 & mask) | g0; + h1 = (h1 & mask) | g1; + h2 = (h2 & mask) | g2; + h3 = (h3 & mask) | g3; + h4 = (h4 & mask) | g4; + + /* h = h % (2^128) */ + h0 = ((h0) | (h1 << 26)) & 0xffffffff; + h1 = ((h1 >> 6) | (h2 << 20)) & 0xffffffff; + h2 = ((h2 >> 12) | (h3 << 14)) & 0xffffffff; + h3 = ((h3 >> 18) | (h4 << 8)) & 0xffffffff; + + /* mac = (h + nonce) % (2^128) */ + f = (u64)h0 + nonce[0]; + h0 = (u32)f; + f = (u64)h1 + nonce[1] + (f >> 32); + h1 = (u32)f; + f = (u64)h2 + nonce[2] + (f >> 32); + h2 = (u32)f; + f = (u64)h3 + nonce[3] + (f >> 32); + h3 = (u32)f; + + put_unaligned_le32(h0, &mac[0]); + put_unaligned_le32(h1, &mac[4]); + put_unaligned_le32(h2, &mac[8]); + put_unaligned_le32(h3, &mac[12]); +} diff --git a/net/wireguard/crypto/zinc/poly1305/poly1305-donna64.c b/net/wireguard/crypto/zinc/poly1305/poly1305-donna64.c new file mode 100644 index 000000000000..131f1dda1b1d --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/poly1305-donna64.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * + * This is based in part on Andrew Moon's poly1305-donna, which is in the + * public domain. + */ + +typedef __uint128_t u128; + +struct poly1305_internal { + u64 r[3]; + u64 h[3]; + u64 s[2]; +}; + +static void poly1305_init_generic(void *ctx, const u8 key[16]) +{ + struct poly1305_internal *st = (struct poly1305_internal *)ctx; + u64 t0, t1; + + /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ + t0 = get_unaligned_le64(&key[0]); + t1 = get_unaligned_le64(&key[8]); + + st->r[0] = t0 & 0xffc0fffffffULL; + st->r[1] = ((t0 >> 44) | (t1 << 20)) & 0xfffffc0ffffULL; + st->r[2] = ((t1 >> 24)) & 0x00ffffffc0fULL; + + /* s = 20*r */ + st->s[0] = st->r[1] * 20; + st->s[1] = st->r[2] * 20; + + /* h = 0 */ + st->h[0] = 0; + st->h[1] = 0; + st->h[2] = 0; +} + +static void poly1305_blocks_generic(void *ctx, const u8 *input, size_t len, + const u32 padbit) +{ + struct poly1305_internal *st = (struct poly1305_internal *)ctx; + const u64 hibit = ((u64)padbit) << 40; + u64 r0, r1, r2; + u64 s1, s2; + u64 h0, h1, h2; + u64 c; + u128 d0, d1, d2, d; + + r0 = st->r[0]; + r1 = st->r[1]; + r2 = st->r[2]; + + h0 = st->h[0]; + h1 = st->h[1]; + h2 = st->h[2]; + + s1 = st->s[0]; + s2 = st->s[1]; + + while (len >= POLY1305_BLOCK_SIZE) { + u64 t0, t1; + + /* h += m[i] */ + t0 = get_unaligned_le64(&input[0]); + t1 = get_unaligned_le64(&input[8]); + + h0 += t0 & 0xfffffffffffULL; + h1 += ((t0 >> 44) | (t1 << 20)) & 0xfffffffffffULL; + h2 += (((t1 >> 24)) & 0x3ffffffffffULL) | hibit; + + /* h *= r */ + d0 = (u128)h0 * r0; + d = (u128)h1 * s2; + d0 += d; + d = (u128)h2 * s1; + d0 += d; + d1 = (u128)h0 * r1; + d = (u128)h1 * r0; + d1 += d; + d = (u128)h2 * s2; + d1 += d; + d2 = (u128)h0 * r2; + d = (u128)h1 * r1; + d2 += d; + d = (u128)h2 * r0; + d2 += d; + + /* (partial) h %= p */ + c = (u64)(d0 >> 44); + h0 = (u64)d0 & 0xfffffffffffULL; + d1 += c; + c = (u64)(d1 >> 44); + h1 = (u64)d1 & 0xfffffffffffULL; + d2 += c; + c = (u64)(d2 >> 42); + h2 = (u64)d2 & 0x3ffffffffffULL; + h0 += c * 5; + c = h0 >> 44; + h0 = h0 & 0xfffffffffffULL; + h1 += c; + + input += POLY1305_BLOCK_SIZE; + len -= POLY1305_BLOCK_SIZE; + } + + st->h[0] = h0; + st->h[1] = h1; + st->h[2] = h2; +} + +static void poly1305_emit_generic(void *ctx, u8 mac[16], const u32 nonce[4]) +{ + struct poly1305_internal *st = (struct poly1305_internal *)ctx; + u64 h0, h1, h2, c; + u64 g0, g1, g2; + u64 t0, t1; + + /* fully carry h */ + h0 = st->h[0]; + h1 = st->h[1]; + h2 = st->h[2]; + + c = h1 >> 44; + h1 &= 0xfffffffffffULL; + h2 += c; + c = h2 >> 42; + h2 &= 0x3ffffffffffULL; + h0 += c * 5; + c = h0 >> 44; + h0 &= 0xfffffffffffULL; + h1 += c; + c = h1 >> 44; + h1 &= 0xfffffffffffULL; + h2 += c; + c = h2 >> 42; + h2 &= 0x3ffffffffffULL; + h0 += c * 5; + c = h0 >> 44; + h0 &= 0xfffffffffffULL; + h1 += c; + + /* compute h + -p */ + g0 = h0 + 5; + c = g0 >> 44; + g0 &= 0xfffffffffffULL; + g1 = h1 + c; + c = g1 >> 44; + g1 &= 0xfffffffffffULL; + g2 = h2 + c - (1ULL << 42); + + /* select h if h < p, or h + -p if h >= p */ + c = (g2 >> ((sizeof(u64) * 8) - 1)) - 1; + g0 &= c; + g1 &= c; + g2 &= c; + c = ~c; + h0 = (h0 & c) | g0; + h1 = (h1 & c) | g1; + h2 = (h2 & c) | g2; + + /* h = (h + nonce) */ + t0 = ((u64)nonce[1] << 32) | nonce[0]; + t1 = ((u64)nonce[3] << 32) | nonce[2]; + + h0 += t0 & 0xfffffffffffULL; + c = h0 >> 44; + h0 &= 0xfffffffffffULL; + h1 += (((t0 >> 44) | (t1 << 20)) & 0xfffffffffffULL) + c; + c = h1 >> 44; + h1 &= 0xfffffffffffULL; + h2 += (((t1 >> 24)) & 0x3ffffffffffULL) + c; + h2 &= 0x3ffffffffffULL; + + /* mac = h % (2^128) */ + h0 = h0 | (h1 << 44); + h1 = (h1 >> 20) | (h2 << 24); + + put_unaligned_le64(h0, &mac[0]); + put_unaligned_le64(h1, &mac[8]); +} diff --git a/net/wireguard/crypto/zinc/poly1305/poly1305-mips-glue.c b/net/wireguard/crypto/zinc/poly1305/poly1305-mips-glue.c new file mode 100644 index 000000000000..a540e9c4eee8 --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/poly1305-mips-glue.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +asmlinkage void poly1305_init_mips(void *ctx, const u8 key[16]); +asmlinkage void poly1305_blocks_mips(void *ctx, const u8 *inp, const size_t len, + const u32 padbit); +asmlinkage void poly1305_emit_mips(void *ctx, u8 mac[16], const u32 nonce[4]); + +static bool *const poly1305_nobs[] __initconst = { }; +static void __init poly1305_fpu_init(void) +{ +} + +static inline bool poly1305_init_arch(void *ctx, + const u8 key[POLY1305_KEY_SIZE]) +{ + poly1305_init_mips(ctx, key); + return true; +} + +static inline bool poly1305_blocks_arch(void *ctx, const u8 *inp, + size_t len, const u32 padbit, + simd_context_t *simd_context) +{ + poly1305_blocks_mips(ctx, inp, len, padbit); + return true; +} + +static inline bool poly1305_emit_arch(void *ctx, u8 mac[POLY1305_MAC_SIZE], + const u32 nonce[4], + simd_context_t *simd_context) +{ + poly1305_emit_mips(ctx, mac, nonce); + return true; +} diff --git a/net/wireguard/crypto/zinc/poly1305/poly1305-mips.S b/net/wireguard/crypto/zinc/poly1305/poly1305-mips.S new file mode 100644 index 000000000000..4291c156815b --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/poly1305-mips.S @@ -0,0 +1,407 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2016-2018 René van Dorst <opensource@vdorst.com> All Rights Reserved. + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#define MSB 0 +#define LSB 3 +#else +#define MSB 3 +#define LSB 0 +#endif + +#define POLY1305_BLOCK_SIZE 16 +.text +#define H0 $t0 +#define H1 $t1 +#define H2 $t2 +#define H3 $t3 +#define H4 $t4 + +#define R0 $t5 +#define R1 $t6 +#define R2 $t7 +#define R3 $t8 + +#define O0 $s0 +#define O1 $s4 +#define O2 $v1 +#define O3 $t9 +#define O4 $s5 + +#define S1 $s1 +#define S2 $s2 +#define S3 $s3 + +#define SC $at +#define CA $v0 + +/* Input arguments */ +#define poly $a0 +#define src $a1 +#define srclen $a2 +#define hibit $a3 + +/* Location in the opaque buffer + * R[0..3], CA, H[0..4] + */ +#define PTR_POLY1305_R(n) ( 0 + (n*4)) ## ($a0) +#define PTR_POLY1305_CA (16 ) ## ($a0) +#define PTR_POLY1305_H(n) (20 + (n*4)) ## ($a0) + +#define POLY1305_BLOCK_SIZE 16 +#define POLY1305_STACK_SIZE 32 + +.set noat +.align 4 +.globl poly1305_blocks_mips +.ent poly1305_blocks_mips +poly1305_blocks_mips: + .frame $sp, POLY1305_STACK_SIZE, $ra + /* srclen &= 0xFFFFFFF0 */ + ins srclen, $zero, 0, 4 + + addiu $sp, -(POLY1305_STACK_SIZE) + + /* check srclen >= 16 bytes */ + beqz srclen, .Lpoly1305_blocks_mips_end + + /* Calculate last round based on src address pointer. + * last round src ptr (srclen) = src + (srclen & 0xFFFFFFF0) + */ + addu srclen, src + + lw R0, PTR_POLY1305_R(0) + lw R1, PTR_POLY1305_R(1) + lw R2, PTR_POLY1305_R(2) + lw R3, PTR_POLY1305_R(3) + + /* store the used save registers. */ + sw $s0, 0($sp) + sw $s1, 4($sp) + sw $s2, 8($sp) + sw $s3, 12($sp) + sw $s4, 16($sp) + sw $s5, 20($sp) + + /* load Hx and Carry */ + lw CA, PTR_POLY1305_CA + lw H0, PTR_POLY1305_H(0) + lw H1, PTR_POLY1305_H(1) + lw H2, PTR_POLY1305_H(2) + lw H3, PTR_POLY1305_H(3) + lw H4, PTR_POLY1305_H(4) + + /* Sx = Rx + (Rx >> 2) */ + srl S1, R1, 2 + srl S2, R2, 2 + srl S3, R3, 2 + addu S1, R1 + addu S2, R2 + addu S3, R3 + + addiu SC, $zero, 1 + +.Lpoly1305_loop: + lwl O0, 0+MSB(src) + lwl O1, 4+MSB(src) + lwl O2, 8+MSB(src) + lwl O3,12+MSB(src) + lwr O0, 0+LSB(src) + lwr O1, 4+LSB(src) + lwr O2, 8+LSB(src) + lwr O3,12+LSB(src) + +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + wsbh O0 + wsbh O1 + wsbh O2 + wsbh O3 + rotr O0, 16 + rotr O1, 16 + rotr O2, 16 + rotr O3, 16 +#endif + + /* h0 = (u32)(d0 = (u64)h0 + inp[0] + c 'Carry_previous cycle'); */ + addu H0, CA + sltu CA, H0, CA + addu O0, H0 + sltu H0, O0, H0 + addu CA, H0 + + /* h1 = (u32)(d1 = (u64)h1 + (d0 >> 32) + inp[4]); */ + addu H1, CA + sltu CA, H1, CA + addu O1, H1 + sltu H1, O1, H1 + addu CA, H1 + + /* h2 = (u32)(d2 = (u64)h2 + (d1 >> 32) + inp[8]); */ + addu H2, CA + sltu CA, H2, CA + addu O2, H2 + sltu H2, O2, H2 + addu CA, H2 + + /* h3 = (u32)(d3 = (u64)h3 + (d2 >> 32) + inp[12]); */ + addu H3, CA + sltu CA, H3, CA + addu O3, H3 + sltu H3, O3, H3 + addu CA, H3 + + /* h4 += (u32)(d3 >> 32) + padbit; */ + addu H4, hibit + addu O4, H4, CA + + /* D0 */ + multu O0, R0 + maddu O1, S3 + maddu O2, S2 + maddu O3, S1 + mfhi CA + mflo H0 + + /* D1 */ + multu O0, R1 + maddu O1, R0 + maddu O2, S3 + maddu O3, S2 + maddu O4, S1 + maddu CA, SC + mfhi CA + mflo H1 + + /* D2 */ + multu O0, R2 + maddu O1, R1 + maddu O2, R0 + maddu O3, S3 + maddu O4, S2 + maddu CA, SC + mfhi CA + mflo H2 + + /* D4 */ + mul H4, O4, R0 + + /* D3 */ + multu O0, R3 + maddu O1, R2 + maddu O2, R1 + maddu O3, R0 + maddu O4, S3 + maddu CA, SC + mfhi CA + mflo H3 + + addiu src, POLY1305_BLOCK_SIZE + + /* h4 += (u32)(d3 >> 32); */ + addu O4, H4, CA + /* h4 &= 3 */ + andi H4, O4, 3 + /* c = (h4 >> 2) + (h4 & ~3U); */ + srl CA, O4, 2 + ins O4, $zero, 0, 2 + + addu CA, O4 + + /* able to do a 16 byte block. */ + bne src, srclen, .Lpoly1305_loop + + /* restore the used save registers. */ + lw $s0, 0($sp) + lw $s1, 4($sp) + lw $s2, 8($sp) + lw $s3, 12($sp) + lw $s4, 16($sp) + lw $s5, 20($sp) + + /* store Hx and Carry */ + sw CA, PTR_POLY1305_CA + sw H0, PTR_POLY1305_H(0) + sw H1, PTR_POLY1305_H(1) + sw H2, PTR_POLY1305_H(2) + sw H3, PTR_POLY1305_H(3) + sw H4, PTR_POLY1305_H(4) + +.Lpoly1305_blocks_mips_end: + addiu $sp, POLY1305_STACK_SIZE + + /* Jump Back */ + jr $ra +.end poly1305_blocks_mips +.set at + +/* Input arguments CTX=$a0, MAC=$a1, NONCE=$a2 */ +#define MAC $a1 +#define NONCE $a2 + +#define G0 $t5 +#define G1 $t6 +#define G2 $t7 +#define G3 $t8 +#define G4 $t9 + +.set noat +.align 4 +.globl poly1305_emit_mips +.ent poly1305_emit_mips +poly1305_emit_mips: + /* load Hx and Carry */ + lw CA, PTR_POLY1305_CA + lw H0, PTR_POLY1305_H(0) + lw H1, PTR_POLY1305_H(1) + lw H2, PTR_POLY1305_H(2) + lw H3, PTR_POLY1305_H(3) + lw H4, PTR_POLY1305_H(4) + + /* Add left over carry */ + addu H0, CA + sltu CA, H0, CA + addu H1, CA + sltu CA, H1, CA + addu H2, CA + sltu CA, H2, CA + addu H3, CA + sltu CA, H3, CA + addu H4, CA + + /* compare to modulus by computing h + -p */ + addiu G0, H0, 5 + sltu CA, G0, H0 + addu G1, H1, CA + sltu CA, G1, H1 + addu G2, H2, CA + sltu CA, G2, H2 + addu G3, H3, CA + sltu CA, G3, H3 + addu G4, H4, CA + + srl SC, G4, 2 + + /* if there was carry into 131st bit, h3:h0 = g3:g0 */ + movn H0, G0, SC + movn H1, G1, SC + movn H2, G2, SC + movn H3, G3, SC + + lwl G0, 0+MSB(NONCE) + lwl G1, 4+MSB(NONCE) + lwl G2, 8+MSB(NONCE) + lwl G3,12+MSB(NONCE) + lwr G0, 0+LSB(NONCE) + lwr G1, 4+LSB(NONCE) + lwr G2, 8+LSB(NONCE) + lwr G3,12+LSB(NONCE) + + /* mac = (h + nonce) % (2^128) */ + addu H0, G0 + sltu CA, H0, G0 + + /* H1 */ + addu H1, CA + sltu CA, H1, CA + addu H1, G1 + sltu G1, H1, G1 + addu CA, G1 + + /* H2 */ + addu H2, CA + sltu CA, H2, CA + addu H2, G2 + sltu G2, H2, G2 + addu CA, G2 + + /* H3 */ + addu H3, CA + addu H3, G3 + +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + wsbh H0 + wsbh H1 + wsbh H2 + wsbh H3 + rotr H0, 16 + rotr H1, 16 + rotr H2, 16 + rotr H3, 16 +#endif + + /* store MAC */ + swl H0, 0+MSB(MAC) + swl H1, 4+MSB(MAC) + swl H2, 8+MSB(MAC) + swl H3,12+MSB(MAC) + swr H0, 0+LSB(MAC) + swr H1, 4+LSB(MAC) + swr H2, 8+LSB(MAC) + swr H3,12+LSB(MAC) + + jr $ra +.end poly1305_emit_mips + +#define PR0 $t0 +#define PR1 $t1 +#define PR2 $t2 +#define PR3 $t3 +#define PT0 $t4 + +/* Input arguments CTX=$a0, KEY=$a1 */ + +.align 4 +.globl poly1305_init_mips +.ent poly1305_init_mips +poly1305_init_mips: + lwl PR0, 0+MSB($a1) + lwl PR1, 4+MSB($a1) + lwl PR2, 8+MSB($a1) + lwl PR3,12+MSB($a1) + lwr PR0, 0+LSB($a1) + lwr PR1, 4+LSB($a1) + lwr PR2, 8+LSB($a1) + lwr PR3,12+LSB($a1) + + /* store Hx and Carry */ + sw $zero, PTR_POLY1305_CA + sw $zero, PTR_POLY1305_H(0) + sw $zero, PTR_POLY1305_H(1) + sw $zero, PTR_POLY1305_H(2) + sw $zero, PTR_POLY1305_H(3) + sw $zero, PTR_POLY1305_H(4) + +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + wsbh PR0 + wsbh PR1 + wsbh PR2 + wsbh PR3 + rotr PR0, 16 + rotr PR1, 16 + rotr PR2, 16 + rotr PR3, 16 +#endif + + lui PT0, 0x0FFF + ori PT0, 0xFFFC + + /* AND 0x0fffffff; */ + ext PR0, PR0, 0, (32-4) + + /* AND 0x0ffffffc; */ + and PR1, PT0 + and PR2, PT0 + and PR3, PT0 + + /* store Rx */ + sw PR0, PTR_POLY1305_R(0) + sw PR1, PTR_POLY1305_R(1) + sw PR2, PTR_POLY1305_R(2) + sw PR3, PTR_POLY1305_R(3) + + /* Jump Back */ + jr $ra +.end poly1305_init_mips diff --git a/net/wireguard/crypto/zinc/poly1305/poly1305-mips64.pl b/net/wireguard/crypto/zinc/poly1305/poly1305-mips64.pl new file mode 100644 index 000000000000..d30a03d79177 --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/poly1305-mips64.pl @@ -0,0 +1,467 @@ +#!/usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# +# This code is taken from the OpenSSL project but the author, Andy Polyakov, +# has relicensed it under the licenses specified in the SPDX header above. +# The original headers, including the original license headers, are +# included below for completeness. +# +# ==================================================================== +# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# Poly1305 hash for MIPS64. +# +# May 2016 +# +# Numbers are cycles per processed byte with poly1305_blocks alone. +# +# IALU/gcc +# R1x000 5.64/+120% (big-endian) +# Octeon II 3.80/+280% (little-endian) + +###################################################################### +# There is a number of MIPS ABI in use, O32 and N32/64 are most +# widely used. Then there is a new contender: NUBI. It appears that if +# one picks the latter, it's possible to arrange code in ABI neutral +# manner. Therefore let's stick to NUBI register layout: +# +($zero,$at,$t0,$t1,$t2)=map("\$$_",(0..2,24,25)); +($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11)); +($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7,$s8,$s9,$s10,$s11)=map("\$$_",(12..23)); +($gp,$tp,$sp,$fp,$ra)=map("\$$_",(3,28..31)); +# +# The return value is placed in $a0. Following coding rules facilitate +# interoperability: +# +# - never ever touch $tp, "thread pointer", former $gp [o32 can be +# excluded from the rule, because it's specified volatile]; +# - copy return value to $t0, former $v0 [or to $a0 if you're adapting +# old code]; +# - on O32 populate $a4-$a7 with 'lw $aN,4*N($sp)' if necessary; +# +# For reference here is register layout for N32/64 MIPS ABIs: +# +# ($zero,$at,$v0,$v1)=map("\$$_",(0..3)); +# ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11)); +# ($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25)); +# ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23)); +# ($gp,$sp,$fp,$ra)=map("\$$_",(28..31)); +# +# <appro@openssl.org> +# +###################################################################### + +$flavour = shift || "64"; # supported flavours are o32,n32,64,nubi32,nubi64 + +die "MIPS64 only" unless ($flavour =~ /64|n32/i); + +$v0 = ($flavour =~ /nubi/i) ? $a0 : $t0; +$SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? "0x0003f000" : "0x00030000"; + +($ctx,$inp,$len,$padbit) = ($a0,$a1,$a2,$a3); +($in0,$in1,$tmp0,$tmp1,$tmp2,$tmp3,$tmp4) = ($a4,$a5,$a6,$a7,$at,$t0,$t1); + +$code.=<<___; +#if (defined(_MIPS_ARCH_MIPS64R3) || defined(_MIPS_ARCH_MIPS64R5) || \\ + defined(_MIPS_ARCH_MIPS64R6)) \\ + && !defined(_MIPS_ARCH_MIPS64R2) +# define _MIPS_ARCH_MIPS64R2 +#endif + +#if defined(_MIPS_ARCH_MIPS64R6) +# define dmultu(rs,rt) +# define mflo(rd,rs,rt) dmulu rd,rs,rt +# define mfhi(rd,rs,rt) dmuhu rd,rs,rt +#else +# define dmultu(rs,rt) dmultu rs,rt +# define mflo(rd,rs,rt) mflo rd +# define mfhi(rd,rs,rt) mfhi rd +#endif + +#ifdef __KERNEL__ +# define poly1305_init poly1305_init_mips +# define poly1305_blocks poly1305_blocks_mips +# define poly1305_emit poly1305_emit_mips +#endif + +#if defined(__MIPSEB__) && !defined(MIPSEB) +# define MIPSEB +#endif + +#ifdef MIPSEB +# define MSB 0 +# define LSB 7 +#else +# define MSB 7 +# define LSB 0 +#endif + +.text +.set noat +.set noreorder + +.align 5 +.globl poly1305_init +.ent poly1305_init +poly1305_init: + .frame $sp,0,$ra + .set reorder + + sd $zero,0($ctx) + sd $zero,8($ctx) + sd $zero,16($ctx) + + beqz $inp,.Lno_key + +#if defined(_MIPS_ARCH_MIPS64R6) + ld $in0,0($inp) + ld $in1,8($inp) +#else + ldl $in0,0+MSB($inp) + ldl $in1,8+MSB($inp) + ldr $in0,0+LSB($inp) + ldr $in1,8+LSB($inp) +#endif +#ifdef MIPSEB +# if defined(_MIPS_ARCH_MIPS64R2) + dsbh $in0,$in0 # byte swap + dsbh $in1,$in1 + dshd $in0,$in0 + dshd $in1,$in1 +# else + ori $tmp0,$zero,0xFF + dsll $tmp2,$tmp0,32 + or $tmp0,$tmp2 # 0x000000FF000000FF + + and $tmp1,$in0,$tmp0 # byte swap + and $tmp3,$in1,$tmp0 + dsrl $tmp2,$in0,24 + dsrl $tmp4,$in1,24 + dsll $tmp1,24 + dsll $tmp3,24 + and $tmp2,$tmp0 + and $tmp4,$tmp0 + dsll $tmp0,8 # 0x0000FF000000FF00 + or $tmp1,$tmp2 + or $tmp3,$tmp4 + and $tmp2,$in0,$tmp0 + and $tmp4,$in1,$tmp0 + dsrl $in0,8 + dsrl $in1,8 + dsll $tmp2,8 + dsll $tmp4,8 + and $in0,$tmp0 + and $in1,$tmp0 + or $tmp1,$tmp2 + or $tmp3,$tmp4 + or $in0,$tmp1 + or $in1,$tmp3 + dsrl $tmp1,$in0,32 + dsrl $tmp3,$in1,32 + dsll $in0,32 + dsll $in1,32 + or $in0,$tmp1 + or $in1,$tmp3 +# endif +#endif + li $tmp0,1 + dsll $tmp0,32 + daddiu $tmp0,-63 + dsll $tmp0,28 + daddiu $tmp0,-1 # 0ffffffc0fffffff + + and $in0,$tmp0 + daddiu $tmp0,-3 # 0ffffffc0ffffffc + and $in1,$tmp0 + + sd $in0,24($ctx) + dsrl $tmp0,$in1,2 + sd $in1,32($ctx) + daddu $tmp0,$in1 # s1 = r1 + (r1 >> 2) + sd $tmp0,40($ctx) + +.Lno_key: + li $v0,0 # return 0 + jr $ra +.end poly1305_init +___ +{ +my ($h0,$h1,$h2,$r0,$r1,$s1,$d0,$d1,$d2) = + ($s0,$s1,$s2,$s3,$s4,$s5,$in0,$in1,$t2); + +$code.=<<___; +.align 5 +.globl poly1305_blocks +.ent poly1305_blocks +poly1305_blocks: + .set noreorder + dsrl $len,4 # number of complete blocks + bnez $len,poly1305_blocks_internal + nop + jr $ra + nop +.end poly1305_blocks + +.align 5 +.ent poly1305_blocks_internal +poly1305_blocks_internal: + .frame $sp,6*8,$ra + .mask $SAVED_REGS_MASK,-8 + .set noreorder + dsubu $sp,6*8 + sd $s5,40($sp) + sd $s4,32($sp) +___ +$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi prologue + sd $s3,24($sp) + sd $s2,16($sp) + sd $s1,8($sp) + sd $s0,0($sp) +___ +$code.=<<___; + .set reorder + + ld $h0,0($ctx) # load hash value + ld $h1,8($ctx) + ld $h2,16($ctx) + + ld $r0,24($ctx) # load key + ld $r1,32($ctx) + ld $s1,40($ctx) + +.Loop: +#if defined(_MIPS_ARCH_MIPS64R6) + ld $in0,0($inp) # load input + ld $in1,8($inp) +#else + ldl $in0,0+MSB($inp) # load input + ldl $in1,8+MSB($inp) + ldr $in0,0+LSB($inp) + ldr $in1,8+LSB($inp) +#endif + daddiu $len,-1 + daddiu $inp,16 +#ifdef MIPSEB +# if defined(_MIPS_ARCH_MIPS64R2) + dsbh $in0,$in0 # byte swap + dsbh $in1,$in1 + dshd $in0,$in0 + dshd $in1,$in1 +# else + ori $tmp0,$zero,0xFF + dsll $tmp2,$tmp0,32 + or $tmp0,$tmp2 # 0x000000FF000000FF + + and $tmp1,$in0,$tmp0 # byte swap + and $tmp3,$in1,$tmp0 + dsrl $tmp2,$in0,24 + dsrl $tmp4,$in1,24 + dsll $tmp1,24 + dsll $tmp3,24 + and $tmp2,$tmp0 + and $tmp4,$tmp0 + dsll $tmp0,8 # 0x0000FF000000FF00 + or $tmp1,$tmp2 + or $tmp3,$tmp4 + and $tmp2,$in0,$tmp0 + and $tmp4,$in1,$tmp0 + dsrl $in0,8 + dsrl $in1,8 + dsll $tmp2,8 + dsll $tmp4,8 + and $in0,$tmp0 + and $in1,$tmp0 + or $tmp1,$tmp2 + or $tmp3,$tmp4 + or $in0,$tmp1 + or $in1,$tmp3 + dsrl $tmp1,$in0,32 + dsrl $tmp3,$in1,32 + dsll $in0,32 + dsll $in1,32 + or $in0,$tmp1 + or $in1,$tmp3 +# endif +#endif + daddu $h0,$in0 # accumulate input + daddu $h1,$in1 + sltu $tmp0,$h0,$in0 + sltu $tmp1,$h1,$in1 + daddu $h1,$tmp0 + + dmultu ($r0,$h0) # h0*r0 + daddu $h2,$padbit + sltu $tmp0,$h1,$tmp0 + mflo ($d0,$r0,$h0) + mfhi ($d1,$r0,$h0) + + dmultu ($s1,$h1) # h1*5*r1 + daddu $tmp0,$tmp1 + daddu $h2,$tmp0 + mflo ($tmp0,$s1,$h1) + mfhi ($tmp1,$s1,$h1) + + dmultu ($r1,$h0) # h0*r1 + daddu $d0,$tmp0 + daddu $d1,$tmp1 + mflo ($tmp2,$r1,$h0) + mfhi ($d2,$r1,$h0) + sltu $tmp0,$d0,$tmp0 + daddu $d1,$tmp0 + + dmultu ($r0,$h1) # h1*r0 + daddu $d1,$tmp2 + sltu $tmp2,$d1,$tmp2 + mflo ($tmp0,$r0,$h1) + mfhi ($tmp1,$r0,$h1) + daddu $d2,$tmp2 + + dmultu ($s1,$h2) # h2*5*r1 + daddu $d1,$tmp0 + daddu $d2,$tmp1 + mflo ($tmp2,$s1,$h2) + + dmultu ($r0,$h2) # h2*r0 + sltu $tmp0,$d1,$tmp0 + daddu $d2,$tmp0 + mflo ($tmp3,$r0,$h2) + + daddu $d1,$tmp2 + daddu $d2,$tmp3 + sltu $tmp2,$d1,$tmp2 + daddu $d2,$tmp2 + + li $tmp0,-4 # final reduction + and $tmp0,$d2 + dsrl $tmp1,$d2,2 + andi $h2,$d2,3 + daddu $tmp0,$tmp1 + daddu $h0,$d0,$tmp0 + sltu $tmp0,$h0,$tmp0 + daddu $h1,$d1,$tmp0 + sltu $tmp0,$h1,$tmp0 + daddu $h2,$h2,$tmp0 + + bnez $len,.Loop + + sd $h0,0($ctx) # store hash value + sd $h1,8($ctx) + sd $h2,16($ctx) + + .set noreorder + ld $s5,40($sp) # epilogue + ld $s4,32($sp) +___ +$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi epilogue + ld $s3,24($sp) + ld $s2,16($sp) + ld $s1,8($sp) + ld $s0,0($sp) +___ +$code.=<<___; + jr $ra + daddu $sp,6*8 +.end poly1305_blocks_internal +___ +} +{ +my ($ctx,$mac,$nonce) = ($a0,$a1,$a2); + +$code.=<<___; +.align 5 +.globl poly1305_emit +.ent poly1305_emit +poly1305_emit: + .frame $sp,0,$ra + .set reorder + + ld $tmp0,0($ctx) + ld $tmp1,8($ctx) + ld $tmp2,16($ctx) + + daddiu $in0,$tmp0,5 # compare to modulus + sltiu $tmp3,$in0,5 + daddu $in1,$tmp1,$tmp3 + sltu $tmp3,$in1,$tmp3 + daddu $tmp2,$tmp2,$tmp3 + + dsrl $tmp2,2 # see if it carried/borrowed + dsubu $tmp2,$zero,$tmp2 + nor $tmp3,$zero,$tmp2 + + and $in0,$tmp2 + and $tmp0,$tmp3 + and $in1,$tmp2 + and $tmp1,$tmp3 + or $in0,$tmp0 + or $in1,$tmp1 + + lwu $tmp0,0($nonce) # load nonce + lwu $tmp1,4($nonce) + lwu $tmp2,8($nonce) + lwu $tmp3,12($nonce) + dsll $tmp1,32 + dsll $tmp3,32 + or $tmp0,$tmp1 + or $tmp2,$tmp3 + + daddu $in0,$tmp0 # accumulate nonce + daddu $in1,$tmp2 + sltu $tmp0,$in0,$tmp0 + daddu $in1,$tmp0 + + dsrl $tmp0,$in0,8 # write mac value + dsrl $tmp1,$in0,16 + dsrl $tmp2,$in0,24 + sb $in0,0($mac) + dsrl $tmp3,$in0,32 + sb $tmp0,1($mac) + dsrl $tmp0,$in0,40 + sb $tmp1,2($mac) + dsrl $tmp1,$in0,48 + sb $tmp2,3($mac) + dsrl $tmp2,$in0,56 + sb $tmp3,4($mac) + dsrl $tmp3,$in1,8 + sb $tmp0,5($mac) + dsrl $tmp0,$in1,16 + sb $tmp1,6($mac) + dsrl $tmp1,$in1,24 + sb $tmp2,7($mac) + + sb $in1,8($mac) + dsrl $tmp2,$in1,32 + sb $tmp3,9($mac) + dsrl $tmp3,$in1,40 + sb $tmp0,10($mac) + dsrl $tmp0,$in1,48 + sb $tmp1,11($mac) + dsrl $tmp1,$in1,56 + sb $tmp2,12($mac) + sb $tmp3,13($mac) + sb $tmp0,14($mac) + sb $tmp1,15($mac) + + jr $ra +.end poly1305_emit +.rdata +.align 2 +___ +} + +open SELF,$0; +while(<SELF>) { + next if (/^#!/); + last if (!s/^#/\/\// and !/^$/); + print; +} +close SELF; + +$output=pop and open STDOUT,">$output"; +print $code; +close STDOUT; + diff --git a/net/wireguard/crypto/zinc/poly1305/poly1305-x86_64-glue.c b/net/wireguard/crypto/zinc/poly1305/poly1305-x86_64-glue.c new file mode 100644 index 000000000000..ce48a42f7654 --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/poly1305-x86_64-glue.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include <asm/cpufeature.h> +#include <asm/processor.h> +#include <asm/intel-family.h> + +asmlinkage void poly1305_init_x86_64(void *ctx, + const u8 key[POLY1305_KEY_SIZE]); +asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp, + const size_t len, const u32 padbit); +asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_MAC_SIZE], + const u32 nonce[4]); +asmlinkage void poly1305_emit_avx(void *ctx, u8 mac[POLY1305_MAC_SIZE], + const u32 nonce[4]); +asmlinkage void poly1305_blocks_avx(void *ctx, const u8 *inp, const size_t len, + const u32 padbit); +asmlinkage void poly1305_blocks_avx2(void *ctx, const u8 *inp, const size_t len, + const u32 padbit); +asmlinkage void poly1305_blocks_avx512(void *ctx, const u8 *inp, + const size_t len, const u32 padbit); + +static bool poly1305_use_avx __ro_after_init; +static bool poly1305_use_avx2 __ro_after_init; +static bool poly1305_use_avx512 __ro_after_init; +static bool *const poly1305_nobs[] __initconst = { + &poly1305_use_avx, &poly1305_use_avx2, &poly1305_use_avx512 }; + +static void __init poly1305_fpu_init(void) +{ + poly1305_use_avx = + boot_cpu_has(X86_FEATURE_AVX) && + cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL); + poly1305_use_avx2 = + boot_cpu_has(X86_FEATURE_AVX) && + boot_cpu_has(X86_FEATURE_AVX2) && + cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL); +#ifndef COMPAT_CANNOT_USE_AVX512 + poly1305_use_avx512 = + boot_cpu_has(X86_FEATURE_AVX) && + boot_cpu_has(X86_FEATURE_AVX2) && + boot_cpu_has(X86_FEATURE_AVX512F) && + cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | + XFEATURE_MASK_AVX512, NULL) && + /* Skylake downclocks unacceptably much when using zmm. */ + boot_cpu_data.x86_model != INTEL_FAM6_SKYLAKE_X; +#endif +} + +static inline bool poly1305_init_arch(void *ctx, + const u8 key[POLY1305_KEY_SIZE]) +{ + poly1305_init_x86_64(ctx, key); + return true; +} + +struct poly1305_arch_internal { + union { + struct { + u32 h[5]; + u32 is_base2_26; + }; + u64 hs[3]; + }; + u64 r[2]; + u64 pad; + struct { u32 r2, r1, r4, r3; } rn[9]; +}; + +/* The AVX code uses base 2^26, while the scalar code uses base 2^64. If we hit + * the unfortunate situation of using AVX and then having to go back to scalar + * -- because the user is silly and has called the update function from two + * separate contexts -- then we need to convert back to the original base before + * proceeding. It is possible to reason that the initial reduction below is + * sufficient given the implementation invariants. However, for an avoidance of + * doubt and because this is not performance critical, we do the full reduction + * anyway. + */ +static void convert_to_base2_64(void *ctx) +{ + struct poly1305_arch_internal *state = ctx; + u32 cy; + + if (!state->is_base2_26) + return; + + cy = state->h[0] >> 26; state->h[0] &= 0x3ffffff; state->h[1] += cy; + cy = state->h[1] >> 26; state->h[1] &= 0x3ffffff; state->h[2] += cy; + cy = state->h[2] >> 26; state->h[2] &= 0x3ffffff; state->h[3] += cy; + cy = state->h[3] >> 26; state->h[3] &= 0x3ffffff; state->h[4] += cy; + state->hs[0] = ((u64)state->h[2] << 52) | ((u64)state->h[1] << 26) | state->h[0]; + state->hs[1] = ((u64)state->h[4] << 40) | ((u64)state->h[3] << 14) | (state->h[2] >> 12); + state->hs[2] = state->h[4] >> 24; +#define ULT(a, b) ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1)) + cy = (state->hs[2] >> 2) + (state->hs[2] & ~3ULL); + state->hs[2] &= 3; + state->hs[0] += cy; + state->hs[1] += (cy = ULT(state->hs[0], cy)); + state->hs[2] += ULT(state->hs[1], cy); +#undef ULT + state->is_base2_26 = 0; +} + +static inline bool poly1305_blocks_arch(void *ctx, const u8 *inp, + size_t len, const u32 padbit, + simd_context_t *simd_context) +{ + struct poly1305_arch_internal *state = ctx; + + /* SIMD disables preemption, so relax after processing each page. */ + BUILD_BUG_ON(PAGE_SIZE < POLY1305_BLOCK_SIZE || + PAGE_SIZE % POLY1305_BLOCK_SIZE); + + if (!IS_ENABLED(CONFIG_AS_AVX) || !poly1305_use_avx || + (len < (POLY1305_BLOCK_SIZE * 18) && !state->is_base2_26) || + !simd_use(simd_context)) { + convert_to_base2_64(ctx); + poly1305_blocks_x86_64(ctx, inp, len, padbit); + return true; + } + + for (;;) { + const size_t bytes = min_t(size_t, len, PAGE_SIZE); + + if (IS_ENABLED(CONFIG_AS_AVX512) && poly1305_use_avx512) + poly1305_blocks_avx512(ctx, inp, bytes, padbit); + else if (IS_ENABLED(CONFIG_AS_AVX2) && poly1305_use_avx2) + poly1305_blocks_avx2(ctx, inp, bytes, padbit); + else + poly1305_blocks_avx(ctx, inp, bytes, padbit); + len -= bytes; + if (!len) + break; + inp += bytes; + simd_relax(simd_context); + } + + return true; +} + +static inline bool poly1305_emit_arch(void *ctx, u8 mac[POLY1305_MAC_SIZE], + const u32 nonce[4], + simd_context_t *simd_context) +{ + struct poly1305_arch_internal *state = ctx; + + if (!IS_ENABLED(CONFIG_AS_AVX) || !poly1305_use_avx || + !state->is_base2_26 || !simd_use(simd_context)) { + convert_to_base2_64(ctx); + poly1305_emit_x86_64(ctx, mac, nonce); + } else + poly1305_emit_avx(ctx, mac, nonce); + return true; +} diff --git a/net/wireguard/crypto/zinc/poly1305/poly1305-x86_64.pl b/net/wireguard/crypto/zinc/poly1305/poly1305-x86_64.pl new file mode 100644 index 000000000000..f994855cdbe2 --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/poly1305-x86_64.pl @@ -0,0 +1,4266 @@ +#!/usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# +# Copyright (C) 2017-2018 Samuel Neves <sneves@dei.uc.pt>. All Rights Reserved. +# Copyright (C) 2017-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. +# Copyright (C) 2006-2017 CRYPTOGAMS by <appro@openssl.org>. All Rights Reserved. +# +# This code is taken from the OpenSSL project but the author, Andy Polyakov, +# has relicensed it under the licenses specified in the SPDX header above. +# The original headers, including the original license headers, are +# included below for completeness. +# +# ==================================================================== +# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# This module implements Poly1305 hash for x86_64. +# +# March 2015 +# +# Initial release. +# +# December 2016 +# +# Add AVX512F+VL+BW code path. +# +# November 2017 +# +# Convert AVX512F+VL+BW code path to pure AVX512F, so that it can be +# executed even on Knights Landing. Trigger for modification was +# observation that AVX512 code paths can negatively affect overall +# Skylake-X system performance. Since we are likely to suppress +# AVX512F capability flag [at least on Skylake-X], conversion serves +# as kind of "investment protection". Note that next *lake processor, +# Cannonlake, has AVX512IFMA code path to execute... +# +# Numbers are cycles per processed byte with poly1305_blocks alone, +# measured with rdtsc at fixed clock frequency. +# +# IALU/gcc-4.8(*) AVX(**) AVX2 AVX-512 +# P4 4.46/+120% - +# Core 2 2.41/+90% - +# Westmere 1.88/+120% - +# Sandy Bridge 1.39/+140% 1.10 +# Haswell 1.14/+175% 1.11 0.65 +# Skylake[-X] 1.13/+120% 0.96 0.51 [0.35] +# Silvermont 2.83/+95% - +# Knights L 3.60/? 1.65 1.10 0.41(***) +# Goldmont 1.70/+180% - +# VIA Nano 1.82/+150% - +# Sledgehammer 1.38/+160% - +# Bulldozer 2.30/+130% 0.97 +# Ryzen 1.15/+200% 1.08 1.18 +# +# (*) improvement coefficients relative to clang are more modest and +# are ~50% on most processors, in both cases we are comparing to +# __int128 code; +# (**) SSE2 implementation was attempted, but among non-AVX processors +# it was faster than integer-only code only on older Intel P4 and +# Core processors, 50-30%, less newer processor is, but slower on +# contemporary ones, for example almost 2x slower on Atom, and as +# former are naturally disappearing, SSE2 is deemed unnecessary; +# (***) strangely enough performance seems to vary from core to core, +# listed result is best case; + +$flavour = shift; +$output = shift; +if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } + +$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); +$kernel=0; $kernel=1 if (!$flavour && !$output); + +if (!$kernel) { + $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; + ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or + ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or + die "can't locate x86_64-xlate.pl"; + + open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""; + *STDOUT=*OUT; + + if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1` + =~ /GNU assembler version ([2-9]\.[0-9]+)/) { + $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25); + } + + if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) && + `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) { + $avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12); + $avx += 1 if ($1==2.11 && $2>=8); + } + + if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) && + `ml64 2>&1` =~ /Version ([0-9]+)\./) { + $avx = ($1>=10) + ($1>=11); + } + + if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) { + $avx = ($2>=3.0) + ($2>3.0); + } +} else { + $avx = 4; # The kernel uses ifdefs for this. +} + +sub declare_function() { + my ($name, $align, $nargs) = @_; + if($kernel) { + $code .= ".align $align\n"; + $code .= "SYM_FUNC_START($name)\n"; + $code .= ".L$name:\n"; + } else { + $code .= ".globl $name\n"; + $code .= ".type $name,\@function,$nargs\n"; + $code .= ".align $align\n"; + $code .= "$name:\n"; + } +} + +sub end_function() { + my ($name) = @_; + if($kernel) { + $code .= "SYM_FUNC_END($name)\n"; + } else { + $code .= ".size $name,.-$name\n"; + } +} + +$code.=<<___ if $kernel; +#include <linux/linkage.h> +___ + +if ($avx) { +$code.=<<___ if $kernel; +.section .rodata +___ +$code.=<<___; +.align 64 +.Lconst: +.Lmask24: +.long 0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0 +.L129: +.long `1<<24`,0,`1<<24`,0,`1<<24`,0,`1<<24`,0 +.Lmask26: +.long 0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0 +.Lpermd_avx2: +.long 2,2,2,3,2,0,2,1 +.Lpermd_avx512: +.long 0,0,0,1, 0,2,0,3, 0,4,0,5, 0,6,0,7 + +.L2_44_inp_permd: +.long 0,1,1,2,2,3,7,7 +.L2_44_inp_shift: +.quad 0,12,24,64 +.L2_44_mask: +.quad 0xfffffffffff,0xfffffffffff,0x3ffffffffff,0xffffffffffffffff +.L2_44_shift_rgt: +.quad 44,44,42,64 +.L2_44_shift_lft: +.quad 8,8,10,64 + +.align 64 +.Lx_mask44: +.quad 0xfffffffffff,0xfffffffffff,0xfffffffffff,0xfffffffffff +.quad 0xfffffffffff,0xfffffffffff,0xfffffffffff,0xfffffffffff +.Lx_mask42: +.quad 0x3ffffffffff,0x3ffffffffff,0x3ffffffffff,0x3ffffffffff +.quad 0x3ffffffffff,0x3ffffffffff,0x3ffffffffff,0x3ffffffffff +___ +} +$code.=<<___ if (!$kernel); +.asciz "Poly1305 for x86_64, CRYPTOGAMS by <appro\@openssl.org>" +.align 16 +___ + +my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx"); +my ($mac,$nonce)=($inp,$len); # *_emit arguments +my ($d1,$d2,$d3, $r0,$r1,$s1)=("%r8","%r9","%rdi","%r11","%r12","%r13"); +my ($h0,$h1,$h2)=("%r14","%rbx","%r10"); + +sub poly1305_iteration { +# input: copy of $r1 in %rax, $h0-$h2, $r0-$r1 +# output: $h0-$h2 *= $r0-$r1 +$code.=<<___; + mulq $h0 # h0*r1 + mov %rax,$d2 + mov $r0,%rax + mov %rdx,$d3 + + mulq $h0 # h0*r0 + mov %rax,$h0 # future $h0 + mov $r0,%rax + mov %rdx,$d1 + + mulq $h1 # h1*r0 + add %rax,$d2 + mov $s1,%rax + adc %rdx,$d3 + + mulq $h1 # h1*s1 + mov $h2,$h1 # borrow $h1 + add %rax,$h0 + adc %rdx,$d1 + + imulq $s1,$h1 # h2*s1 + add $h1,$d2 + mov $d1,$h1 + adc \$0,$d3 + + imulq $r0,$h2 # h2*r0 + add $d2,$h1 + mov \$-4,%rax # mask value + adc $h2,$d3 + + and $d3,%rax # last reduction step + mov $d3,$h2 + shr \$2,$d3 + and \$3,$h2 + add $d3,%rax + add %rax,$h0 + adc \$0,$h1 + adc \$0,$h2 +___ +} + +######################################################################## +# Layout of opaque area is following. +# +# unsigned __int64 h[3]; # current hash value base 2^64 +# unsigned __int64 r[2]; # key value base 2^64 + +$code.=<<___; +.text +___ +$code.=<<___ if (!$kernel); +.extern OPENSSL_ia32cap_P + +.globl poly1305_init_x86_64 +.hidden poly1305_init_x86_64 +.globl poly1305_blocks_x86_64 +.hidden poly1305_blocks_x86_64 +.globl poly1305_emit_x86_64 +.hidden poly1305_emit_x86_64 +___ +&declare_function("poly1305_init_x86_64", 32, 3); +$code.=<<___; + xor %rax,%rax + mov %rax,0($ctx) # initialize hash value + mov %rax,8($ctx) + mov %rax,16($ctx) + + cmp \$0,$inp + je .Lno_key +___ +$code.=<<___ if (!$kernel); + lea poly1305_blocks_x86_64(%rip),%r10 + lea poly1305_emit_x86_64(%rip),%r11 +___ +$code.=<<___ if (!$kernel && $avx); + mov OPENSSL_ia32cap_P+4(%rip),%r9 + lea poly1305_blocks_avx(%rip),%rax + lea poly1305_emit_avx(%rip),%rcx + bt \$`60-32`,%r9 # AVX? + cmovc %rax,%r10 + cmovc %rcx,%r11 +___ +$code.=<<___ if (!$kernel && $avx>1); + lea poly1305_blocks_avx2(%rip),%rax + bt \$`5+32`,%r9 # AVX2? + cmovc %rax,%r10 +___ +$code.=<<___ if (!$kernel && $avx>3); + mov \$`(1<<31|1<<21|1<<16)`,%rax + shr \$32,%r9 + and %rax,%r9 + cmp %rax,%r9 + je .Linit_base2_44 +___ +$code.=<<___; + mov \$0x0ffffffc0fffffff,%rax + mov \$0x0ffffffc0ffffffc,%rcx + and 0($inp),%rax + and 8($inp),%rcx + mov %rax,24($ctx) + mov %rcx,32($ctx) +___ +$code.=<<___ if (!$kernel && $flavour !~ /elf32/); + mov %r10,0(%rdx) + mov %r11,8(%rdx) +___ +$code.=<<___ if (!$kernel && $flavour =~ /elf32/); + mov %r10d,0(%rdx) + mov %r11d,4(%rdx) +___ +$code.=<<___; + mov \$1,%eax +.Lno_key: + ret +___ +&end_function("poly1305_init_x86_64"); + +&declare_function("poly1305_blocks_x86_64", 32, 4); +$code.=<<___; +.cfi_startproc +.Lblocks: + shr \$4,$len + jz .Lno_data # too short + + push %rbx +.cfi_push %rbx + push %r12 +.cfi_push %r12 + push %r13 +.cfi_push %r13 + push %r14 +.cfi_push %r14 + push %r15 +.cfi_push %r15 + push $ctx +.cfi_push $ctx +.Lblocks_body: + + mov $len,%r15 # reassign $len + + mov 24($ctx),$r0 # load r + mov 32($ctx),$s1 + + mov 0($ctx),$h0 # load hash value + mov 8($ctx),$h1 + mov 16($ctx),$h2 + + mov $s1,$r1 + shr \$2,$s1 + mov $r1,%rax + add $r1,$s1 # s1 = r1 + (r1 >> 2) + jmp .Loop + +.align 32 +.Loop: + add 0($inp),$h0 # accumulate input + adc 8($inp),$h1 + lea 16($inp),$inp + adc $padbit,$h2 +___ + + &poly1305_iteration(); + +$code.=<<___; + mov $r1,%rax + dec %r15 # len-=16 + jnz .Loop + + mov 0(%rsp),$ctx +.cfi_restore $ctx + + mov $h0,0($ctx) # store hash value + mov $h1,8($ctx) + mov $h2,16($ctx) + + mov 8(%rsp),%r15 +.cfi_restore %r15 + mov 16(%rsp),%r14 +.cfi_restore %r14 + mov 24(%rsp),%r13 +.cfi_restore %r13 + mov 32(%rsp),%r12 +.cfi_restore %r12 + mov 40(%rsp),%rbx +.cfi_restore %rbx + lea 48(%rsp),%rsp +.cfi_adjust_cfa_offset -48 +.Lno_data: +.Lblocks_epilogue: + ret +.cfi_endproc +___ +&end_function("poly1305_blocks_x86_64"); + +&declare_function("poly1305_emit_x86_64", 32, 3); +$code.=<<___; +.Lemit: + mov 0($ctx),%r8 # load hash value + mov 8($ctx),%r9 + mov 16($ctx),%r10 + + mov %r8,%rax + add \$5,%r8 # compare to modulus + mov %r9,%rcx + adc \$0,%r9 + adc \$0,%r10 + shr \$2,%r10 # did 130-bit value overflow? + cmovnz %r8,%rax + cmovnz %r9,%rcx + + add 0($nonce),%rax # accumulate nonce + adc 8($nonce),%rcx + mov %rax,0($mac) # write result + mov %rcx,8($mac) + + ret +___ +&end_function("poly1305_emit_x86_64"); +if ($avx) { + +if($kernel) { + $code .= "#ifdef CONFIG_AS_AVX\n"; +} + +######################################################################## +# Layout of opaque area is following. +# +# unsigned __int32 h[5]; # current hash value base 2^26 +# unsigned __int32 is_base2_26; +# unsigned __int64 r[2]; # key value base 2^64 +# unsigned __int64 pad; +# struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9]; +# +# where r^n are base 2^26 digits of degrees of multiplier key. There are +# 5 digits, but last four are interleaved with multiples of 5, totalling +# in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4. + +my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) = + map("%xmm$_",(0..15)); + +$code.=<<___; +.type __poly1305_block,\@abi-omnipotent +.align 32 +__poly1305_block: + push $ctx +___ + &poly1305_iteration(); +$code.=<<___; + pop $ctx + ret +.size __poly1305_block,.-__poly1305_block + +.type __poly1305_init_avx,\@abi-omnipotent +.align 32 +__poly1305_init_avx: + push %rbp + mov %rsp,%rbp + mov $r0,$h0 + mov $r1,$h1 + xor $h2,$h2 + + lea 48+64($ctx),$ctx # size optimization + + mov $r1,%rax + call __poly1305_block # r^2 + + mov \$0x3ffffff,%eax # save interleaved r^2 and r base 2^26 + mov \$0x3ffffff,%edx + mov $h0,$d1 + and $h0#d,%eax + mov $r0,$d2 + and $r0#d,%edx + mov %eax,`16*0+0-64`($ctx) + shr \$26,$d1 + mov %edx,`16*0+4-64`($ctx) + shr \$26,$d2 + + mov \$0x3ffffff,%eax + mov \$0x3ffffff,%edx + and $d1#d,%eax + and $d2#d,%edx + mov %eax,`16*1+0-64`($ctx) + lea (%rax,%rax,4),%eax # *5 + mov %edx,`16*1+4-64`($ctx) + lea (%rdx,%rdx,4),%edx # *5 + mov %eax,`16*2+0-64`($ctx) + shr \$26,$d1 + mov %edx,`16*2+4-64`($ctx) + shr \$26,$d2 + + mov $h1,%rax + mov $r1,%rdx + shl \$12,%rax + shl \$12,%rdx + or $d1,%rax + or $d2,%rdx + and \$0x3ffffff,%eax + and \$0x3ffffff,%edx + mov %eax,`16*3+0-64`($ctx) + lea (%rax,%rax,4),%eax # *5 + mov %edx,`16*3+4-64`($ctx) + lea (%rdx,%rdx,4),%edx # *5 + mov %eax,`16*4+0-64`($ctx) + mov $h1,$d1 + mov %edx,`16*4+4-64`($ctx) + mov $r1,$d2 + + mov \$0x3ffffff,%eax + mov \$0x3ffffff,%edx + shr \$14,$d1 + shr \$14,$d2 + and $d1#d,%eax + and $d2#d,%edx + mov %eax,`16*5+0-64`($ctx) + lea (%rax,%rax,4),%eax # *5 + mov %edx,`16*5+4-64`($ctx) + lea (%rdx,%rdx,4),%edx # *5 + mov %eax,`16*6+0-64`($ctx) + shr \$26,$d1 + mov %edx,`16*6+4-64`($ctx) + shr \$26,$d2 + + mov $h2,%rax + shl \$24,%rax + or %rax,$d1 + mov $d1#d,`16*7+0-64`($ctx) + lea ($d1,$d1,4),$d1 # *5 + mov $d2#d,`16*7+4-64`($ctx) + lea ($d2,$d2,4),$d2 # *5 + mov $d1#d,`16*8+0-64`($ctx) + mov $d2#d,`16*8+4-64`($ctx) + + mov $r1,%rax + call __poly1305_block # r^3 + + mov \$0x3ffffff,%eax # save r^3 base 2^26 + mov $h0,$d1 + and $h0#d,%eax + shr \$26,$d1 + mov %eax,`16*0+12-64`($ctx) + + mov \$0x3ffffff,%edx + and $d1#d,%edx + mov %edx,`16*1+12-64`($ctx) + lea (%rdx,%rdx,4),%edx # *5 + shr \$26,$d1 + mov %edx,`16*2+12-64`($ctx) + + mov $h1,%rax + shl \$12,%rax + or $d1,%rax + and \$0x3ffffff,%eax + mov %eax,`16*3+12-64`($ctx) + lea (%rax,%rax,4),%eax # *5 + mov $h1,$d1 + mov %eax,`16*4+12-64`($ctx) + + mov \$0x3ffffff,%edx + shr \$14,$d1 + and $d1#d,%edx + mov %edx,`16*5+12-64`($ctx) + lea (%rdx,%rdx,4),%edx # *5 + shr \$26,$d1 + mov %edx,`16*6+12-64`($ctx) + + mov $h2,%rax + shl \$24,%rax + or %rax,$d1 + mov $d1#d,`16*7+12-64`($ctx) + lea ($d1,$d1,4),$d1 # *5 + mov $d1#d,`16*8+12-64`($ctx) + + mov $r1,%rax + call __poly1305_block # r^4 + + mov \$0x3ffffff,%eax # save r^4 base 2^26 + mov $h0,$d1 + and $h0#d,%eax + shr \$26,$d1 + mov %eax,`16*0+8-64`($ctx) + + mov \$0x3ffffff,%edx + and $d1#d,%edx + mov %edx,`16*1+8-64`($ctx) + lea (%rdx,%rdx,4),%edx # *5 + shr \$26,$d1 + mov %edx,`16*2+8-64`($ctx) + + mov $h1,%rax + shl \$12,%rax + or $d1,%rax + and \$0x3ffffff,%eax + mov %eax,`16*3+8-64`($ctx) + lea (%rax,%rax,4),%eax # *5 + mov $h1,$d1 + mov %eax,`16*4+8-64`($ctx) + + mov \$0x3ffffff,%edx + shr \$14,$d1 + and $d1#d,%edx + mov %edx,`16*5+8-64`($ctx) + lea (%rdx,%rdx,4),%edx # *5 + shr \$26,$d1 + mov %edx,`16*6+8-64`($ctx) + + mov $h2,%rax + shl \$24,%rax + or %rax,$d1 + mov $d1#d,`16*7+8-64`($ctx) + lea ($d1,$d1,4),$d1 # *5 + mov $d1#d,`16*8+8-64`($ctx) + + lea -48-64($ctx),$ctx # size [de-]optimization + pop %rbp + ret +.size __poly1305_init_avx,.-__poly1305_init_avx +___ + +&declare_function("poly1305_blocks_avx", 32, 4); +$code.=<<___; +.cfi_startproc + mov 20($ctx),%r8d # is_base2_26 + cmp \$128,$len + jae .Lblocks_avx + test %r8d,%r8d + jz .Lblocks + +.Lblocks_avx: + and \$-16,$len + jz .Lno_data_avx + + vzeroupper + + test %r8d,%r8d + jz .Lbase2_64_avx + + test \$31,$len + jz .Leven_avx + + push %rbp +.cfi_push %rbp + mov %rsp,%rbp + push %rbx +.cfi_push %rbx + push %r12 +.cfi_push %r12 + push %r13 +.cfi_push %r13 + push %r14 +.cfi_push %r14 + push %r15 +.cfi_push %r15 +.Lblocks_avx_body: + + mov $len,%r15 # reassign $len + + mov 0($ctx),$d1 # load hash value + mov 8($ctx),$d2 + mov 16($ctx),$h2#d + + mov 24($ctx),$r0 # load r + mov 32($ctx),$s1 + + ################################# base 2^26 -> base 2^64 + mov $d1#d,$h0#d + and \$`-1*(1<<31)`,$d1 + mov $d2,$r1 # borrow $r1 + mov $d2#d,$h1#d + and \$`-1*(1<<31)`,$d2 + + shr \$6,$d1 + shl \$52,$r1 + add $d1,$h0 + shr \$12,$h1 + shr \$18,$d2 + add $r1,$h0 + adc $d2,$h1 + + mov $h2,$d1 + shl \$40,$d1 + shr \$24,$h2 + add $d1,$h1 + adc \$0,$h2 # can be partially reduced... + + mov \$-4,$d2 # ... so reduce + mov $h2,$d1 + and $h2,$d2 + shr \$2,$d1 + and \$3,$h2 + add $d2,$d1 # =*5 + add $d1,$h0 + adc \$0,$h1 + adc \$0,$h2 + + mov $s1,$r1 + mov $s1,%rax + shr \$2,$s1 + add $r1,$s1 # s1 = r1 + (r1 >> 2) + + add 0($inp),$h0 # accumulate input + adc 8($inp),$h1 + lea 16($inp),$inp + adc $padbit,$h2 + + call __poly1305_block + + test $padbit,$padbit # if $padbit is zero, + jz .Lstore_base2_64_avx # store hash in base 2^64 format + + ################################# base 2^64 -> base 2^26 + mov $h0,%rax + mov $h0,%rdx + shr \$52,$h0 + mov $h1,$r0 + mov $h1,$r1 + shr \$26,%rdx + and \$0x3ffffff,%rax # h[0] + shl \$12,$r0 + and \$0x3ffffff,%rdx # h[1] + shr \$14,$h1 + or $r0,$h0 + shl \$24,$h2 + and \$0x3ffffff,$h0 # h[2] + shr \$40,$r1 + and \$0x3ffffff,$h1 # h[3] + or $r1,$h2 # h[4] + + sub \$16,%r15 + jz .Lstore_base2_26_avx + + vmovd %rax#d,$H0 + vmovd %rdx#d,$H1 + vmovd $h0#d,$H2 + vmovd $h1#d,$H3 + vmovd $h2#d,$H4 + jmp .Lproceed_avx + +.align 32 +.Lstore_base2_64_avx: + mov $h0,0($ctx) + mov $h1,8($ctx) + mov $h2,16($ctx) # note that is_base2_26 is zeroed + jmp .Ldone_avx + +.align 16 +.Lstore_base2_26_avx: + mov %rax#d,0($ctx) # store hash value base 2^26 + mov %rdx#d,4($ctx) + mov $h0#d,8($ctx) + mov $h1#d,12($ctx) + mov $h2#d,16($ctx) +.align 16 +.Ldone_avx: + pop %r15 +.cfi_restore %r15 + pop %r14 +.cfi_restore %r14 + pop %r13 +.cfi_restore %r13 + pop %r12 +.cfi_restore %r12 + pop %rbx +.cfi_restore %rbx + pop %rbp +.cfi_restore %rbp +.Lno_data_avx: +.Lblocks_avx_epilogue: + ret +.cfi_endproc + +.align 32 +.Lbase2_64_avx: +.cfi_startproc + push %rbp +.cfi_push %rbp + mov %rsp,%rbp + push %rbx +.cfi_push %rbx + push %r12 +.cfi_push %r12 + push %r13 +.cfi_push %r13 + push %r14 +.cfi_push %r14 + push %r15 +.cfi_push %r15 +.Lbase2_64_avx_body: + + mov $len,%r15 # reassign $len + + mov 24($ctx),$r0 # load r + mov 32($ctx),$s1 + + mov 0($ctx),$h0 # load hash value + mov 8($ctx),$h1 + mov 16($ctx),$h2#d + + mov $s1,$r1 + mov $s1,%rax + shr \$2,$s1 + add $r1,$s1 # s1 = r1 + (r1 >> 2) + + test \$31,$len + jz .Linit_avx + + add 0($inp),$h0 # accumulate input + adc 8($inp),$h1 + lea 16($inp),$inp + adc $padbit,$h2 + sub \$16,%r15 + + call __poly1305_block + +.Linit_avx: + ################################# base 2^64 -> base 2^26 + mov $h0,%rax + mov $h0,%rdx + shr \$52,$h0 + mov $h1,$d1 + mov $h1,$d2 + shr \$26,%rdx + and \$0x3ffffff,%rax # h[0] + shl \$12,$d1 + and \$0x3ffffff,%rdx # h[1] + shr \$14,$h1 + or $d1,$h0 + shl \$24,$h2 + and \$0x3ffffff,$h0 # h[2] + shr \$40,$d2 + and \$0x3ffffff,$h1 # h[3] + or $d2,$h2 # h[4] + + vmovd %rax#d,$H0 + vmovd %rdx#d,$H1 + vmovd $h0#d,$H2 + vmovd $h1#d,$H3 + vmovd $h2#d,$H4 + movl \$1,20($ctx) # set is_base2_26 + + call __poly1305_init_avx + +.Lproceed_avx: + mov %r15,$len + pop %r15 +.cfi_restore %r15 + pop %r14 +.cfi_restore %r14 + pop %r13 +.cfi_restore %r13 + pop %r12 +.cfi_restore %r12 + pop %rbx +.cfi_restore %rbx + pop %rbp +.cfi_restore %rbp +.Lbase2_64_avx_epilogue: + jmp .Ldo_avx +.cfi_endproc + +.align 32 +.Leven_avx: +.cfi_startproc + vmovd 4*0($ctx),$H0 # load hash value + vmovd 4*1($ctx),$H1 + vmovd 4*2($ctx),$H2 + vmovd 4*3($ctx),$H3 + vmovd 4*4($ctx),$H4 + +.Ldo_avx: +___ +$code.=<<___ if (!$win64); + lea 8(%rsp),%r10 +.cfi_def_cfa_register %r10 + and \$-32,%rsp + sub \$-8,%rsp + lea -0x58(%rsp),%r11 + sub \$0x178,%rsp + +___ +$code.=<<___ if ($win64); + lea -0xf8(%rsp),%r11 + sub \$0x218,%rsp + vmovdqa %xmm6,0x50(%r11) + vmovdqa %xmm7,0x60(%r11) + vmovdqa %xmm8,0x70(%r11) + vmovdqa %xmm9,0x80(%r11) + vmovdqa %xmm10,0x90(%r11) + vmovdqa %xmm11,0xa0(%r11) + vmovdqa %xmm12,0xb0(%r11) + vmovdqa %xmm13,0xc0(%r11) + vmovdqa %xmm14,0xd0(%r11) + vmovdqa %xmm15,0xe0(%r11) +.Ldo_avx_body: +___ +$code.=<<___; + sub \$64,$len + lea -32($inp),%rax + cmovc %rax,$inp + + vmovdqu `16*3`($ctx),$D4 # preload r0^2 + lea `16*3+64`($ctx),$ctx # size optimization + lea .Lconst(%rip),%rcx + + ################################################################ + # load input + vmovdqu 16*2($inp),$T0 + vmovdqu 16*3($inp),$T1 + vmovdqa 64(%rcx),$MASK # .Lmask26 + + vpsrldq \$6,$T0,$T2 # splat input + vpsrldq \$6,$T1,$T3 + vpunpckhqdq $T1,$T0,$T4 # 4 + vpunpcklqdq $T1,$T0,$T0 # 0:1 + vpunpcklqdq $T3,$T2,$T3 # 2:3 + + vpsrlq \$40,$T4,$T4 # 4 + vpsrlq \$26,$T0,$T1 + vpand $MASK,$T0,$T0 # 0 + vpsrlq \$4,$T3,$T2 + vpand $MASK,$T1,$T1 # 1 + vpsrlq \$30,$T3,$T3 + vpand $MASK,$T2,$T2 # 2 + vpand $MASK,$T3,$T3 # 3 + vpor 32(%rcx),$T4,$T4 # padbit, yes, always + + jbe .Lskip_loop_avx + + # expand and copy pre-calculated table to stack + vmovdqu `16*1-64`($ctx),$D1 + vmovdqu `16*2-64`($ctx),$D2 + vpshufd \$0xEE,$D4,$D3 # 34xx -> 3434 + vpshufd \$0x44,$D4,$D0 # xx12 -> 1212 + vmovdqa $D3,-0x90(%r11) + vmovdqa $D0,0x00(%rsp) + vpshufd \$0xEE,$D1,$D4 + vmovdqu `16*3-64`($ctx),$D0 + vpshufd \$0x44,$D1,$D1 + vmovdqa $D4,-0x80(%r11) + vmovdqa $D1,0x10(%rsp) + vpshufd \$0xEE,$D2,$D3 + vmovdqu `16*4-64`($ctx),$D1 + vpshufd \$0x44,$D2,$D2 + vmovdqa $D3,-0x70(%r11) + vmovdqa $D2,0x20(%rsp) + vpshufd \$0xEE,$D0,$D4 + vmovdqu `16*5-64`($ctx),$D2 + vpshufd \$0x44,$D0,$D0 + vmovdqa $D4,-0x60(%r11) + vmovdqa $D0,0x30(%rsp) + vpshufd \$0xEE,$D1,$D3 + vmovdqu `16*6-64`($ctx),$D0 + vpshufd \$0x44,$D1,$D1 + vmovdqa $D3,-0x50(%r11) + vmovdqa $D1,0x40(%rsp) + vpshufd \$0xEE,$D2,$D4 + vmovdqu `16*7-64`($ctx),$D1 + vpshufd \$0x44,$D2,$D2 + vmovdqa $D4,-0x40(%r11) + vmovdqa $D2,0x50(%rsp) + vpshufd \$0xEE,$D0,$D3 + vmovdqu `16*8-64`($ctx),$D2 + vpshufd \$0x44,$D0,$D0 + vmovdqa $D3,-0x30(%r11) + vmovdqa $D0,0x60(%rsp) + vpshufd \$0xEE,$D1,$D4 + vpshufd \$0x44,$D1,$D1 + vmovdqa $D4,-0x20(%r11) + vmovdqa $D1,0x70(%rsp) + vpshufd \$0xEE,$D2,$D3 + vmovdqa 0x00(%rsp),$D4 # preload r0^2 + vpshufd \$0x44,$D2,$D2 + vmovdqa $D3,-0x10(%r11) + vmovdqa $D2,0x80(%rsp) + + jmp .Loop_avx + +.align 32 +.Loop_avx: + ################################################################ + # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2 + # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r + # \___________________/ + # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2 + # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r + # \___________________/ \____________________/ + # + # Note that we start with inp[2:3]*r^2. This is because it + # doesn't depend on reduction in previous iteration. + ################################################################ + # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 + # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 + # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 + # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 + # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 + # + # though note that $Tx and $Hx are "reversed" in this section, + # and $D4 is preloaded with r0^2... + + vpmuludq $T0,$D4,$D0 # d0 = h0*r0 + vpmuludq $T1,$D4,$D1 # d1 = h1*r0 + vmovdqa $H2,0x20(%r11) # offload hash + vpmuludq $T2,$D4,$D2 # d3 = h2*r0 + vmovdqa 0x10(%rsp),$H2 # r1^2 + vpmuludq $T3,$D4,$D3 # d3 = h3*r0 + vpmuludq $T4,$D4,$D4 # d4 = h4*r0 + + vmovdqa $H0,0x00(%r11) # + vpmuludq 0x20(%rsp),$T4,$H0 # h4*s1 + vmovdqa $H1,0x10(%r11) # + vpmuludq $T3,$H2,$H1 # h3*r1 + vpaddq $H0,$D0,$D0 # d0 += h4*s1 + vpaddq $H1,$D4,$D4 # d4 += h3*r1 + vmovdqa $H3,0x30(%r11) # + vpmuludq $T2,$H2,$H0 # h2*r1 + vpmuludq $T1,$H2,$H1 # h1*r1 + vpaddq $H0,$D3,$D3 # d3 += h2*r1 + vmovdqa 0x30(%rsp),$H3 # r2^2 + vpaddq $H1,$D2,$D2 # d2 += h1*r1 + vmovdqa $H4,0x40(%r11) # + vpmuludq $T0,$H2,$H2 # h0*r1 + vpmuludq $T2,$H3,$H0 # h2*r2 + vpaddq $H2,$D1,$D1 # d1 += h0*r1 + + vmovdqa 0x40(%rsp),$H4 # s2^2 + vpaddq $H0,$D4,$D4 # d4 += h2*r2 + vpmuludq $T1,$H3,$H1 # h1*r2 + vpmuludq $T0,$H3,$H3 # h0*r2 + vpaddq $H1,$D3,$D3 # d3 += h1*r2 + vmovdqa 0x50(%rsp),$H2 # r3^2 + vpaddq $H3,$D2,$D2 # d2 += h0*r2 + vpmuludq $T4,$H4,$H0 # h4*s2 + vpmuludq $T3,$H4,$H4 # h3*s2 + vpaddq $H0,$D1,$D1 # d1 += h4*s2 + vmovdqa 0x60(%rsp),$H3 # s3^2 + vpaddq $H4,$D0,$D0 # d0 += h3*s2 + + vmovdqa 0x80(%rsp),$H4 # s4^2 + vpmuludq $T1,$H2,$H1 # h1*r3 + vpmuludq $T0,$H2,$H2 # h0*r3 + vpaddq $H1,$D4,$D4 # d4 += h1*r3 + vpaddq $H2,$D3,$D3 # d3 += h0*r3 + vpmuludq $T4,$H3,$H0 # h4*s3 + vpmuludq $T3,$H3,$H1 # h3*s3 + vpaddq $H0,$D2,$D2 # d2 += h4*s3 + vmovdqu 16*0($inp),$H0 # load input + vpaddq $H1,$D1,$D1 # d1 += h3*s3 + vpmuludq $T2,$H3,$H3 # h2*s3 + vpmuludq $T2,$H4,$T2 # h2*s4 + vpaddq $H3,$D0,$D0 # d0 += h2*s3 + + vmovdqu 16*1($inp),$H1 # + vpaddq $T2,$D1,$D1 # d1 += h2*s4 + vpmuludq $T3,$H4,$T3 # h3*s4 + vpmuludq $T4,$H4,$T4 # h4*s4 + vpsrldq \$6,$H0,$H2 # splat input + vpaddq $T3,$D2,$D2 # d2 += h3*s4 + vpaddq $T4,$D3,$D3 # d3 += h4*s4 + vpsrldq \$6,$H1,$H3 # + vpmuludq 0x70(%rsp),$T0,$T4 # h0*r4 + vpmuludq $T1,$H4,$T0 # h1*s4 + vpunpckhqdq $H1,$H0,$H4 # 4 + vpaddq $T4,$D4,$D4 # d4 += h0*r4 + vmovdqa -0x90(%r11),$T4 # r0^4 + vpaddq $T0,$D0,$D0 # d0 += h1*s4 + + vpunpcklqdq $H1,$H0,$H0 # 0:1 + vpunpcklqdq $H3,$H2,$H3 # 2:3 + + #vpsrlq \$40,$H4,$H4 # 4 + vpsrldq \$`40/8`,$H4,$H4 # 4 + vpsrlq \$26,$H0,$H1 + vpand $MASK,$H0,$H0 # 0 + vpsrlq \$4,$H3,$H2 + vpand $MASK,$H1,$H1 # 1 + vpand 0(%rcx),$H4,$H4 # .Lmask24 + vpsrlq \$30,$H3,$H3 + vpand $MASK,$H2,$H2 # 2 + vpand $MASK,$H3,$H3 # 3 + vpor 32(%rcx),$H4,$H4 # padbit, yes, always + + vpaddq 0x00(%r11),$H0,$H0 # add hash value + vpaddq 0x10(%r11),$H1,$H1 + vpaddq 0x20(%r11),$H2,$H2 + vpaddq 0x30(%r11),$H3,$H3 + vpaddq 0x40(%r11),$H4,$H4 + + lea 16*2($inp),%rax + lea 16*4($inp),$inp + sub \$64,$len + cmovc %rax,$inp + + ################################################################ + # Now we accumulate (inp[0:1]+hash)*r^4 + ################################################################ + # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 + # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 + # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 + # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 + # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 + + vpmuludq $H0,$T4,$T0 # h0*r0 + vpmuludq $H1,$T4,$T1 # h1*r0 + vpaddq $T0,$D0,$D0 + vpaddq $T1,$D1,$D1 + vmovdqa -0x80(%r11),$T2 # r1^4 + vpmuludq $H2,$T4,$T0 # h2*r0 + vpmuludq $H3,$T4,$T1 # h3*r0 + vpaddq $T0,$D2,$D2 + vpaddq $T1,$D3,$D3 + vpmuludq $H4,$T4,$T4 # h4*r0 + vpmuludq -0x70(%r11),$H4,$T0 # h4*s1 + vpaddq $T4,$D4,$D4 + + vpaddq $T0,$D0,$D0 # d0 += h4*s1 + vpmuludq $H2,$T2,$T1 # h2*r1 + vpmuludq $H3,$T2,$T0 # h3*r1 + vpaddq $T1,$D3,$D3 # d3 += h2*r1 + vmovdqa -0x60(%r11),$T3 # r2^4 + vpaddq $T0,$D4,$D4 # d4 += h3*r1 + vpmuludq $H1,$T2,$T1 # h1*r1 + vpmuludq $H0,$T2,$T2 # h0*r1 + vpaddq $T1,$D2,$D2 # d2 += h1*r1 + vpaddq $T2,$D1,$D1 # d1 += h0*r1 + + vmovdqa -0x50(%r11),$T4 # s2^4 + vpmuludq $H2,$T3,$T0 # h2*r2 + vpmuludq $H1,$T3,$T1 # h1*r2 + vpaddq $T0,$D4,$D4 # d4 += h2*r2 + vpaddq $T1,$D3,$D3 # d3 += h1*r2 + vmovdqa -0x40(%r11),$T2 # r3^4 + vpmuludq $H0,$T3,$T3 # h0*r2 + vpmuludq $H4,$T4,$T0 # h4*s2 + vpaddq $T3,$D2,$D2 # d2 += h0*r2 + vpaddq $T0,$D1,$D1 # d1 += h4*s2 + vmovdqa -0x30(%r11),$T3 # s3^4 + vpmuludq $H3,$T4,$T4 # h3*s2 + vpmuludq $H1,$T2,$T1 # h1*r3 + vpaddq $T4,$D0,$D0 # d0 += h3*s2 + + vmovdqa -0x10(%r11),$T4 # s4^4 + vpaddq $T1,$D4,$D4 # d4 += h1*r3 + vpmuludq $H0,$T2,$T2 # h0*r3 + vpmuludq $H4,$T3,$T0 # h4*s3 + vpaddq $T2,$D3,$D3 # d3 += h0*r3 + vpaddq $T0,$D2,$D2 # d2 += h4*s3 + vmovdqu 16*2($inp),$T0 # load input + vpmuludq $H3,$T3,$T2 # h3*s3 + vpmuludq $H2,$T3,$T3 # h2*s3 + vpaddq $T2,$D1,$D1 # d1 += h3*s3 + vmovdqu 16*3($inp),$T1 # + vpaddq $T3,$D0,$D0 # d0 += h2*s3 + + vpmuludq $H2,$T4,$H2 # h2*s4 + vpmuludq $H3,$T4,$H3 # h3*s4 + vpsrldq \$6,$T0,$T2 # splat input + vpaddq $H2,$D1,$D1 # d1 += h2*s4 + vpmuludq $H4,$T4,$H4 # h4*s4 + vpsrldq \$6,$T1,$T3 # + vpaddq $H3,$D2,$H2 # h2 = d2 + h3*s4 + vpaddq $H4,$D3,$H3 # h3 = d3 + h4*s4 + vpmuludq -0x20(%r11),$H0,$H4 # h0*r4 + vpmuludq $H1,$T4,$H0 + vpunpckhqdq $T1,$T0,$T4 # 4 + vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4 + vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4 + + vpunpcklqdq $T1,$T0,$T0 # 0:1 + vpunpcklqdq $T3,$T2,$T3 # 2:3 + + #vpsrlq \$40,$T4,$T4 # 4 + vpsrldq \$`40/8`,$T4,$T4 # 4 + vpsrlq \$26,$T0,$T1 + vmovdqa 0x00(%rsp),$D4 # preload r0^2 + vpand $MASK,$T0,$T0 # 0 + vpsrlq \$4,$T3,$T2 + vpand $MASK,$T1,$T1 # 1 + vpand 0(%rcx),$T4,$T4 # .Lmask24 + vpsrlq \$30,$T3,$T3 + vpand $MASK,$T2,$T2 # 2 + vpand $MASK,$T3,$T3 # 3 + vpor 32(%rcx),$T4,$T4 # padbit, yes, always + + ################################################################ + # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein + # and P. Schwabe + + vpsrlq \$26,$H3,$D3 + vpand $MASK,$H3,$H3 + vpaddq $D3,$H4,$H4 # h3 -> h4 + + vpsrlq \$26,$H0,$D0 + vpand $MASK,$H0,$H0 + vpaddq $D0,$D1,$H1 # h0 -> h1 + + vpsrlq \$26,$H4,$D0 + vpand $MASK,$H4,$H4 + + vpsrlq \$26,$H1,$D1 + vpand $MASK,$H1,$H1 + vpaddq $D1,$H2,$H2 # h1 -> h2 + + vpaddq $D0,$H0,$H0 + vpsllq \$2,$D0,$D0 + vpaddq $D0,$H0,$H0 # h4 -> h0 + + vpsrlq \$26,$H2,$D2 + vpand $MASK,$H2,$H2 + vpaddq $D2,$H3,$H3 # h2 -> h3 + + vpsrlq \$26,$H0,$D0 + vpand $MASK,$H0,$H0 + vpaddq $D0,$H1,$H1 # h0 -> h1 + + vpsrlq \$26,$H3,$D3 + vpand $MASK,$H3,$H3 + vpaddq $D3,$H4,$H4 # h3 -> h4 + + ja .Loop_avx + +.Lskip_loop_avx: + ################################################################ + # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1 + + vpshufd \$0x10,$D4,$D4 # r0^n, xx12 -> x1x2 + add \$32,$len + jnz .Long_tail_avx + + vpaddq $H2,$T2,$T2 + vpaddq $H0,$T0,$T0 + vpaddq $H1,$T1,$T1 + vpaddq $H3,$T3,$T3 + vpaddq $H4,$T4,$T4 + +.Long_tail_avx: + vmovdqa $H2,0x20(%r11) + vmovdqa $H0,0x00(%r11) + vmovdqa $H1,0x10(%r11) + vmovdqa $H3,0x30(%r11) + vmovdqa $H4,0x40(%r11) + + # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 + # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 + # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 + # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 + # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 + + vpmuludq $T2,$D4,$D2 # d2 = h2*r0 + vpmuludq $T0,$D4,$D0 # d0 = h0*r0 + vpshufd \$0x10,`16*1-64`($ctx),$H2 # r1^n + vpmuludq $T1,$D4,$D1 # d1 = h1*r0 + vpmuludq $T3,$D4,$D3 # d3 = h3*r0 + vpmuludq $T4,$D4,$D4 # d4 = h4*r0 + + vpmuludq $T3,$H2,$H0 # h3*r1 + vpaddq $H0,$D4,$D4 # d4 += h3*r1 + vpshufd \$0x10,`16*2-64`($ctx),$H3 # s1^n + vpmuludq $T2,$H2,$H1 # h2*r1 + vpaddq $H1,$D3,$D3 # d3 += h2*r1 + vpshufd \$0x10,`16*3-64`($ctx),$H4 # r2^n + vpmuludq $T1,$H2,$H0 # h1*r1 + vpaddq $H0,$D2,$D2 # d2 += h1*r1 + vpmuludq $T0,$H2,$H2 # h0*r1 + vpaddq $H2,$D1,$D1 # d1 += h0*r1 + vpmuludq $T4,$H3,$H3 # h4*s1 + vpaddq $H3,$D0,$D0 # d0 += h4*s1 + + vpshufd \$0x10,`16*4-64`($ctx),$H2 # s2^n + vpmuludq $T2,$H4,$H1 # h2*r2 + vpaddq $H1,$D4,$D4 # d4 += h2*r2 + vpmuludq $T1,$H4,$H0 # h1*r2 + vpaddq $H0,$D3,$D3 # d3 += h1*r2 + vpshufd \$0x10,`16*5-64`($ctx),$H3 # r3^n + vpmuludq $T0,$H4,$H4 # h0*r2 + vpaddq $H4,$D2,$D2 # d2 += h0*r2 + vpmuludq $T4,$H2,$H1 # h4*s2 + vpaddq $H1,$D1,$D1 # d1 += h4*s2 + vpshufd \$0x10,`16*6-64`($ctx),$H4 # s3^n + vpmuludq $T3,$H2,$H2 # h3*s2 + vpaddq $H2,$D0,$D0 # d0 += h3*s2 + + vpmuludq $T1,$H3,$H0 # h1*r3 + vpaddq $H0,$D4,$D4 # d4 += h1*r3 + vpmuludq $T0,$H3,$H3 # h0*r3 + vpaddq $H3,$D3,$D3 # d3 += h0*r3 + vpshufd \$0x10,`16*7-64`($ctx),$H2 # r4^n + vpmuludq $T4,$H4,$H1 # h4*s3 + vpaddq $H1,$D2,$D2 # d2 += h4*s3 + vpshufd \$0x10,`16*8-64`($ctx),$H3 # s4^n + vpmuludq $T3,$H4,$H0 # h3*s3 + vpaddq $H0,$D1,$D1 # d1 += h3*s3 + vpmuludq $T2,$H4,$H4 # h2*s3 + vpaddq $H4,$D0,$D0 # d0 += h2*s3 + + vpmuludq $T0,$H2,$H2 # h0*r4 + vpaddq $H2,$D4,$D4 # h4 = d4 + h0*r4 + vpmuludq $T4,$H3,$H1 # h4*s4 + vpaddq $H1,$D3,$D3 # h3 = d3 + h4*s4 + vpmuludq $T3,$H3,$H0 # h3*s4 + vpaddq $H0,$D2,$D2 # h2 = d2 + h3*s4 + vpmuludq $T2,$H3,$H1 # h2*s4 + vpaddq $H1,$D1,$D1 # h1 = d1 + h2*s4 + vpmuludq $T1,$H3,$H3 # h1*s4 + vpaddq $H3,$D0,$D0 # h0 = d0 + h1*s4 + + jz .Lshort_tail_avx + + vmovdqu 16*0($inp),$H0 # load input + vmovdqu 16*1($inp),$H1 + + vpsrldq \$6,$H0,$H2 # splat input + vpsrldq \$6,$H1,$H3 + vpunpckhqdq $H1,$H0,$H4 # 4 + vpunpcklqdq $H1,$H0,$H0 # 0:1 + vpunpcklqdq $H3,$H2,$H3 # 2:3 + + vpsrlq \$40,$H4,$H4 # 4 + vpsrlq \$26,$H0,$H1 + vpand $MASK,$H0,$H0 # 0 + vpsrlq \$4,$H3,$H2 + vpand $MASK,$H1,$H1 # 1 + vpsrlq \$30,$H3,$H3 + vpand $MASK,$H2,$H2 # 2 + vpand $MASK,$H3,$H3 # 3 + vpor 32(%rcx),$H4,$H4 # padbit, yes, always + + vpshufd \$0x32,`16*0-64`($ctx),$T4 # r0^n, 34xx -> x3x4 + vpaddq 0x00(%r11),$H0,$H0 + vpaddq 0x10(%r11),$H1,$H1 + vpaddq 0x20(%r11),$H2,$H2 + vpaddq 0x30(%r11),$H3,$H3 + vpaddq 0x40(%r11),$H4,$H4 + + ################################################################ + # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate + + vpmuludq $H0,$T4,$T0 # h0*r0 + vpaddq $T0,$D0,$D0 # d0 += h0*r0 + vpmuludq $H1,$T4,$T1 # h1*r0 + vpaddq $T1,$D1,$D1 # d1 += h1*r0 + vpmuludq $H2,$T4,$T0 # h2*r0 + vpaddq $T0,$D2,$D2 # d2 += h2*r0 + vpshufd \$0x32,`16*1-64`($ctx),$T2 # r1^n + vpmuludq $H3,$T4,$T1 # h3*r0 + vpaddq $T1,$D3,$D3 # d3 += h3*r0 + vpmuludq $H4,$T4,$T4 # h4*r0 + vpaddq $T4,$D4,$D4 # d4 += h4*r0 + + vpmuludq $H3,$T2,$T0 # h3*r1 + vpaddq $T0,$D4,$D4 # d4 += h3*r1 + vpshufd \$0x32,`16*2-64`($ctx),$T3 # s1 + vpmuludq $H2,$T2,$T1 # h2*r1 + vpaddq $T1,$D3,$D3 # d3 += h2*r1 + vpshufd \$0x32,`16*3-64`($ctx),$T4 # r2 + vpmuludq $H1,$T2,$T0 # h1*r1 + vpaddq $T0,$D2,$D2 # d2 += h1*r1 + vpmuludq $H0,$T2,$T2 # h0*r1 + vpaddq $T2,$D1,$D1 # d1 += h0*r1 + vpmuludq $H4,$T3,$T3 # h4*s1 + vpaddq $T3,$D0,$D0 # d0 += h4*s1 + + vpshufd \$0x32,`16*4-64`($ctx),$T2 # s2 + vpmuludq $H2,$T4,$T1 # h2*r2 + vpaddq $T1,$D4,$D4 # d4 += h2*r2 + vpmuludq $H1,$T4,$T0 # h1*r2 + vpaddq $T0,$D3,$D3 # d3 += h1*r2 + vpshufd \$0x32,`16*5-64`($ctx),$T3 # r3 + vpmuludq $H0,$T4,$T4 # h0*r2 + vpaddq $T4,$D2,$D2 # d2 += h0*r2 + vpmuludq $H4,$T2,$T1 # h4*s2 + vpaddq $T1,$D1,$D1 # d1 += h4*s2 + vpshufd \$0x32,`16*6-64`($ctx),$T4 # s3 + vpmuludq $H3,$T2,$T2 # h3*s2 + vpaddq $T2,$D0,$D0 # d0 += h3*s2 + + vpmuludq $H1,$T3,$T0 # h1*r3 + vpaddq $T0,$D4,$D4 # d4 += h1*r3 + vpmuludq $H0,$T3,$T3 # h0*r3 + vpaddq $T3,$D3,$D3 # d3 += h0*r3 + vpshufd \$0x32,`16*7-64`($ctx),$T2 # r4 + vpmuludq $H4,$T4,$T1 # h4*s3 + vpaddq $T1,$D2,$D2 # d2 += h4*s3 + vpshufd \$0x32,`16*8-64`($ctx),$T3 # s4 + vpmuludq $H3,$T4,$T0 # h3*s3 + vpaddq $T0,$D1,$D1 # d1 += h3*s3 + vpmuludq $H2,$T4,$T4 # h2*s3 + vpaddq $T4,$D0,$D0 # d0 += h2*s3 + + vpmuludq $H0,$T2,$T2 # h0*r4 + vpaddq $T2,$D4,$D4 # d4 += h0*r4 + vpmuludq $H4,$T3,$T1 # h4*s4 + vpaddq $T1,$D3,$D3 # d3 += h4*s4 + vpmuludq $H3,$T3,$T0 # h3*s4 + vpaddq $T0,$D2,$D2 # d2 += h3*s4 + vpmuludq $H2,$T3,$T1 # h2*s4 + vpaddq $T1,$D1,$D1 # d1 += h2*s4 + vpmuludq $H1,$T3,$T3 # h1*s4 + vpaddq $T3,$D0,$D0 # d0 += h1*s4 + +.Lshort_tail_avx: + ################################################################ + # horizontal addition + + vpsrldq \$8,$D4,$T4 + vpsrldq \$8,$D3,$T3 + vpsrldq \$8,$D1,$T1 + vpsrldq \$8,$D0,$T0 + vpsrldq \$8,$D2,$T2 + vpaddq $T3,$D3,$D3 + vpaddq $T4,$D4,$D4 + vpaddq $T0,$D0,$D0 + vpaddq $T1,$D1,$D1 + vpaddq $T2,$D2,$D2 + + ################################################################ + # lazy reduction + + vpsrlq \$26,$D3,$H3 + vpand $MASK,$D3,$D3 + vpaddq $H3,$D4,$D4 # h3 -> h4 + + vpsrlq \$26,$D0,$H0 + vpand $MASK,$D0,$D0 + vpaddq $H0,$D1,$D1 # h0 -> h1 + + vpsrlq \$26,$D4,$H4 + vpand $MASK,$D4,$D4 + + vpsrlq \$26,$D1,$H1 + vpand $MASK,$D1,$D1 + vpaddq $H1,$D2,$D2 # h1 -> h2 + + vpaddq $H4,$D0,$D0 + vpsllq \$2,$H4,$H4 + vpaddq $H4,$D0,$D0 # h4 -> h0 + + vpsrlq \$26,$D2,$H2 + vpand $MASK,$D2,$D2 + vpaddq $H2,$D3,$D3 # h2 -> h3 + + vpsrlq \$26,$D0,$H0 + vpand $MASK,$D0,$D0 + vpaddq $H0,$D1,$D1 # h0 -> h1 + + vpsrlq \$26,$D3,$H3 + vpand $MASK,$D3,$D3 + vpaddq $H3,$D4,$D4 # h3 -> h4 + + vmovd $D0,`4*0-48-64`($ctx) # save partially reduced + vmovd $D1,`4*1-48-64`($ctx) + vmovd $D2,`4*2-48-64`($ctx) + vmovd $D3,`4*3-48-64`($ctx) + vmovd $D4,`4*4-48-64`($ctx) +___ +$code.=<<___ if ($win64); + vmovdqa 0x50(%r11),%xmm6 + vmovdqa 0x60(%r11),%xmm7 + vmovdqa 0x70(%r11),%xmm8 + vmovdqa 0x80(%r11),%xmm9 + vmovdqa 0x90(%r11),%xmm10 + vmovdqa 0xa0(%r11),%xmm11 + vmovdqa 0xb0(%r11),%xmm12 + vmovdqa 0xc0(%r11),%xmm13 + vmovdqa 0xd0(%r11),%xmm14 + vmovdqa 0xe0(%r11),%xmm15 + lea 0xf8(%r11),%rsp +.Ldo_avx_epilogue: +___ +$code.=<<___ if (!$win64); + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +___ +$code.=<<___; + vzeroupper + ret +.cfi_endproc +___ +&end_function("poly1305_blocks_avx"); + +&declare_function("poly1305_emit_avx", 32, 3); +$code.=<<___; + cmpl \$0,20($ctx) # is_base2_26? + je .Lemit + + mov 0($ctx),%eax # load hash value base 2^26 + mov 4($ctx),%ecx + mov 8($ctx),%r8d + mov 12($ctx),%r11d + mov 16($ctx),%r10d + + shl \$26,%rcx # base 2^26 -> base 2^64 + mov %r8,%r9 + shl \$52,%r8 + add %rcx,%rax + shr \$12,%r9 + add %rax,%r8 # h0 + adc \$0,%r9 + + shl \$14,%r11 + mov %r10,%rax + shr \$24,%r10 + add %r11,%r9 + shl \$40,%rax + add %rax,%r9 # h1 + adc \$0,%r10 # h2 + + mov %r10,%rax # could be partially reduced, so reduce + mov %r10,%rcx + and \$3,%r10 + shr \$2,%rax + and \$-4,%rcx + add %rcx,%rax + add %rax,%r8 + adc \$0,%r9 + adc \$0,%r10 + + mov %r8,%rax + add \$5,%r8 # compare to modulus + mov %r9,%rcx + adc \$0,%r9 + adc \$0,%r10 + shr \$2,%r10 # did 130-bit value overflow? + cmovnz %r8,%rax + cmovnz %r9,%rcx + + add 0($nonce),%rax # accumulate nonce + adc 8($nonce),%rcx + mov %rax,0($mac) # write result + mov %rcx,8($mac) + + ret +___ +&end_function("poly1305_emit_avx"); + +if ($kernel) { + $code .= "#endif\n"; +} + +if ($avx>1) { + +if ($kernel) { + $code .= "#ifdef CONFIG_AS_AVX2\n"; +} + +my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) = + map("%ymm$_",(0..15)); +my $S4=$MASK; + +sub poly1305_blocks_avxN { + my ($avx512) = @_; + my $suffix = $avx512 ? "_avx512" : ""; +$code.=<<___; +.cfi_startproc + mov 20($ctx),%r8d # is_base2_26 + cmp \$128,$len + jae .Lblocks_avx2$suffix + test %r8d,%r8d + jz .Lblocks + +.Lblocks_avx2$suffix: + and \$-16,$len + jz .Lno_data_avx2$suffix + + vzeroupper + + test %r8d,%r8d + jz .Lbase2_64_avx2$suffix + + test \$63,$len + jz .Leven_avx2$suffix + + push %rbp +.cfi_push %rbp + mov %rsp,%rbp + push %rbx +.cfi_push %rbx + push %r12 +.cfi_push %r12 + push %r13 +.cfi_push %r13 + push %r14 +.cfi_push %r14 + push %r15 +.cfi_push %r15 +.Lblocks_avx2_body$suffix: + + mov $len,%r15 # reassign $len + + mov 0($ctx),$d1 # load hash value + mov 8($ctx),$d2 + mov 16($ctx),$h2#d + + mov 24($ctx),$r0 # load r + mov 32($ctx),$s1 + + ################################# base 2^26 -> base 2^64 + mov $d1#d,$h0#d + and \$`-1*(1<<31)`,$d1 + mov $d2,$r1 # borrow $r1 + mov $d2#d,$h1#d + and \$`-1*(1<<31)`,$d2 + + shr \$6,$d1 + shl \$52,$r1 + add $d1,$h0 + shr \$12,$h1 + shr \$18,$d2 + add $r1,$h0 + adc $d2,$h1 + + mov $h2,$d1 + shl \$40,$d1 + shr \$24,$h2 + add $d1,$h1 + adc \$0,$h2 # can be partially reduced... + + mov \$-4,$d2 # ... so reduce + mov $h2,$d1 + and $h2,$d2 + shr \$2,$d1 + and \$3,$h2 + add $d2,$d1 # =*5 + add $d1,$h0 + adc \$0,$h1 + adc \$0,$h2 + + mov $s1,$r1 + mov $s1,%rax + shr \$2,$s1 + add $r1,$s1 # s1 = r1 + (r1 >> 2) + +.Lbase2_26_pre_avx2$suffix: + add 0($inp),$h0 # accumulate input + adc 8($inp),$h1 + lea 16($inp),$inp + adc $padbit,$h2 + sub \$16,%r15 + + call __poly1305_block + mov $r1,%rax + + test \$63,%r15 + jnz .Lbase2_26_pre_avx2$suffix + + test $padbit,$padbit # if $padbit is zero, + jz .Lstore_base2_64_avx2$suffix # store hash in base 2^64 format + + ################################# base 2^64 -> base 2^26 + mov $h0,%rax + mov $h0,%rdx + shr \$52,$h0 + mov $h1,$r0 + mov $h1,$r1 + shr \$26,%rdx + and \$0x3ffffff,%rax # h[0] + shl \$12,$r0 + and \$0x3ffffff,%rdx # h[1] + shr \$14,$h1 + or $r0,$h0 + shl \$24,$h2 + and \$0x3ffffff,$h0 # h[2] + shr \$40,$r1 + and \$0x3ffffff,$h1 # h[3] + or $r1,$h2 # h[4] + + test %r15,%r15 + jz .Lstore_base2_26_avx2$suffix + + vmovd %rax#d,%x#$H0 + vmovd %rdx#d,%x#$H1 + vmovd $h0#d,%x#$H2 + vmovd $h1#d,%x#$H3 + vmovd $h2#d,%x#$H4 + jmp .Lproceed_avx2$suffix + +.align 32 +.Lstore_base2_64_avx2$suffix: + mov $h0,0($ctx) + mov $h1,8($ctx) + mov $h2,16($ctx) # note that is_base2_26 is zeroed + jmp .Ldone_avx2$suffix + +.align 16 +.Lstore_base2_26_avx2$suffix: + mov %rax#d,0($ctx) # store hash value base 2^26 + mov %rdx#d,4($ctx) + mov $h0#d,8($ctx) + mov $h1#d,12($ctx) + mov $h2#d,16($ctx) +.align 16 +.Ldone_avx2$suffix: + pop %r15 +.cfi_restore %r15 + pop %r14 +.cfi_restore %r14 + pop %r13 +.cfi_restore %r13 + pop %r12 +.cfi_restore %r12 + pop %rbx +.cfi_restore %rbx + pop %rbp +.cfi_restore %rbp +.Lno_data_avx2$suffix: +.Lblocks_avx2_epilogue$suffix: + ret +.cfi_endproc + +.align 32 +.Lbase2_64_avx2$suffix: +.cfi_startproc + push %rbp +.cfi_push %rbp + mov %rsp,%rbp + push %rbx +.cfi_push %rbx + push %r12 +.cfi_push %r12 + push %r13 +.cfi_push %r13 + push %r14 +.cfi_push %r14 + push %r15 +.cfi_push %r15 +.Lbase2_64_avx2_body$suffix: + + mov $len,%r15 # reassign $len + + mov 24($ctx),$r0 # load r + mov 32($ctx),$s1 + + mov 0($ctx),$h0 # load hash value + mov 8($ctx),$h1 + mov 16($ctx),$h2#d + + mov $s1,$r1 + mov $s1,%rax + shr \$2,$s1 + add $r1,$s1 # s1 = r1 + (r1 >> 2) + + test \$63,$len + jz .Linit_avx2$suffix + +.Lbase2_64_pre_avx2$suffix: + add 0($inp),$h0 # accumulate input + adc 8($inp),$h1 + lea 16($inp),$inp + adc $padbit,$h2 + sub \$16,%r15 + + call __poly1305_block + mov $r1,%rax + + test \$63,%r15 + jnz .Lbase2_64_pre_avx2$suffix + +.Linit_avx2$suffix: + ################################# base 2^64 -> base 2^26 + mov $h0,%rax + mov $h0,%rdx + shr \$52,$h0 + mov $h1,$d1 + mov $h1,$d2 + shr \$26,%rdx + and \$0x3ffffff,%rax # h[0] + shl \$12,$d1 + and \$0x3ffffff,%rdx # h[1] + shr \$14,$h1 + or $d1,$h0 + shl \$24,$h2 + and \$0x3ffffff,$h0 # h[2] + shr \$40,$d2 + and \$0x3ffffff,$h1 # h[3] + or $d2,$h2 # h[4] + + vmovd %rax#d,%x#$H0 + vmovd %rdx#d,%x#$H1 + vmovd $h0#d,%x#$H2 + vmovd $h1#d,%x#$H3 + vmovd $h2#d,%x#$H4 + movl \$1,20($ctx) # set is_base2_26 + + call __poly1305_init_avx + +.Lproceed_avx2$suffix: + mov %r15,$len # restore $len +___ +$code.=<<___ if (!$kernel); + mov OPENSSL_ia32cap_P+8(%rip),%r9d + mov \$`(1<<31|1<<30|1<<16)`,%r11d +___ +$code.=<<___; + pop %r15 +.cfi_restore %r15 + pop %r14 +.cfi_restore %r14 + pop %r13 +.cfi_restore %r13 + pop %r12 +.cfi_restore %r12 + pop %rbx +.cfi_restore %rbx + pop %rbp +.cfi_restore %rbp +.Lbase2_64_avx2_epilogue$suffix: + jmp .Ldo_avx2$suffix +.cfi_endproc + +.align 32 +.Leven_avx2$suffix: +.cfi_startproc +___ +$code.=<<___ if (!$kernel); + mov OPENSSL_ia32cap_P+8(%rip),%r9d +___ +$code.=<<___; + vmovd 4*0($ctx),%x#$H0 # load hash value base 2^26 + vmovd 4*1($ctx),%x#$H1 + vmovd 4*2($ctx),%x#$H2 + vmovd 4*3($ctx),%x#$H3 + vmovd 4*4($ctx),%x#$H4 + +.Ldo_avx2$suffix: +___ +$code.=<<___ if (!$kernel && $avx>2); + cmp \$512,$len + jb .Lskip_avx512 + and %r11d,%r9d + test \$`1<<16`,%r9d # check for AVX512F + jnz .Lblocks_avx512 +.Lskip_avx512$suffix: +___ +$code.=<<___ if ($avx > 2 && $avx512 && $kernel); + cmp \$512,$len + jae .Lblocks_avx512 +___ +$code.=<<___ if (!$win64); + lea 8(%rsp),%r10 +.cfi_def_cfa_register %r10 + sub \$0x128,%rsp +___ +$code.=<<___ if ($win64); + lea 8(%rsp),%r10 + sub \$0x1c8,%rsp + vmovdqa %xmm6,-0xb0(%r10) + vmovdqa %xmm7,-0xa0(%r10) + vmovdqa %xmm8,-0x90(%r10) + vmovdqa %xmm9,-0x80(%r10) + vmovdqa %xmm10,-0x70(%r10) + vmovdqa %xmm11,-0x60(%r10) + vmovdqa %xmm12,-0x50(%r10) + vmovdqa %xmm13,-0x40(%r10) + vmovdqa %xmm14,-0x30(%r10) + vmovdqa %xmm15,-0x20(%r10) +.Ldo_avx2_body$suffix: +___ +$code.=<<___; + lea .Lconst(%rip),%rcx + lea 48+64($ctx),$ctx # size optimization + vmovdqa 96(%rcx),$T0 # .Lpermd_avx2 + + # expand and copy pre-calculated table to stack + vmovdqu `16*0-64`($ctx),%x#$T2 + and \$-512,%rsp + vmovdqu `16*1-64`($ctx),%x#$T3 + vmovdqu `16*2-64`($ctx),%x#$T4 + vmovdqu `16*3-64`($ctx),%x#$D0 + vmovdqu `16*4-64`($ctx),%x#$D1 + vmovdqu `16*5-64`($ctx),%x#$D2 + lea 0x90(%rsp),%rax # size optimization + vmovdqu `16*6-64`($ctx),%x#$D3 + vpermd $T2,$T0,$T2 # 00003412 -> 14243444 + vmovdqu `16*7-64`($ctx),%x#$D4 + vpermd $T3,$T0,$T3 + vmovdqu `16*8-64`($ctx),%x#$MASK + vpermd $T4,$T0,$T4 + vmovdqa $T2,0x00(%rsp) + vpermd $D0,$T0,$D0 + vmovdqa $T3,0x20-0x90(%rax) + vpermd $D1,$T0,$D1 + vmovdqa $T4,0x40-0x90(%rax) + vpermd $D2,$T0,$D2 + vmovdqa $D0,0x60-0x90(%rax) + vpermd $D3,$T0,$D3 + vmovdqa $D1,0x80-0x90(%rax) + vpermd $D4,$T0,$D4 + vmovdqa $D2,0xa0-0x90(%rax) + vpermd $MASK,$T0,$MASK + vmovdqa $D3,0xc0-0x90(%rax) + vmovdqa $D4,0xe0-0x90(%rax) + vmovdqa $MASK,0x100-0x90(%rax) + vmovdqa 64(%rcx),$MASK # .Lmask26 + + ################################################################ + # load input + vmovdqu 16*0($inp),%x#$T0 + vmovdqu 16*1($inp),%x#$T1 + vinserti128 \$1,16*2($inp),$T0,$T0 + vinserti128 \$1,16*3($inp),$T1,$T1 + lea 16*4($inp),$inp + + vpsrldq \$6,$T0,$T2 # splat input + vpsrldq \$6,$T1,$T3 + vpunpckhqdq $T1,$T0,$T4 # 4 + vpunpcklqdq $T3,$T2,$T2 # 2:3 + vpunpcklqdq $T1,$T0,$T0 # 0:1 + + vpsrlq \$30,$T2,$T3 + vpsrlq \$4,$T2,$T2 + vpsrlq \$26,$T0,$T1 + vpsrlq \$40,$T4,$T4 # 4 + vpand $MASK,$T2,$T2 # 2 + vpand $MASK,$T0,$T0 # 0 + vpand $MASK,$T1,$T1 # 1 + vpand $MASK,$T3,$T3 # 3 + vpor 32(%rcx),$T4,$T4 # padbit, yes, always + + vpaddq $H2,$T2,$H2 # accumulate input + sub \$64,$len + jz .Ltail_avx2$suffix + jmp .Loop_avx2$suffix + +.align 32 +.Loop_avx2$suffix: + ################################################################ + # ((inp[0]*r^4+inp[4])*r^4+inp[ 8])*r^4 + # ((inp[1]*r^4+inp[5])*r^4+inp[ 9])*r^3 + # ((inp[2]*r^4+inp[6])*r^4+inp[10])*r^2 + # ((inp[3]*r^4+inp[7])*r^4+inp[11])*r^1 + # \________/\__________/ + ################################################################ + #vpaddq $H2,$T2,$H2 # accumulate input + vpaddq $H0,$T0,$H0 + vmovdqa `32*0`(%rsp),$T0 # r0^4 + vpaddq $H1,$T1,$H1 + vmovdqa `32*1`(%rsp),$T1 # r1^4 + vpaddq $H3,$T3,$H3 + vmovdqa `32*3`(%rsp),$T2 # r2^4 + vpaddq $H4,$T4,$H4 + vmovdqa `32*6-0x90`(%rax),$T3 # s3^4 + vmovdqa `32*8-0x90`(%rax),$S4 # s4^4 + + # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 + # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 + # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 + # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 + # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 + # + # however, as h2 is "chronologically" first one available pull + # corresponding operations up, so it's + # + # d4 = h2*r2 + h4*r0 + h3*r1 + h1*r3 + h0*r4 + # d3 = h2*r1 + h3*r0 + h1*r2 + h0*r3 + h4*5*r4 + # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 + # d1 = h2*5*r4 + h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2 + h1*5*r4 + + vpmuludq $H2,$T0,$D2 # d2 = h2*r0 + vpmuludq $H2,$T1,$D3 # d3 = h2*r1 + vpmuludq $H2,$T2,$D4 # d4 = h2*r2 + vpmuludq $H2,$T3,$D0 # d0 = h2*s3 + vpmuludq $H2,$S4,$D1 # d1 = h2*s4 + + vpmuludq $H0,$T1,$T4 # h0*r1 + vpmuludq $H1,$T1,$H2 # h1*r1, borrow $H2 as temp + vpaddq $T4,$D1,$D1 # d1 += h0*r1 + vpaddq $H2,$D2,$D2 # d2 += h1*r1 + vpmuludq $H3,$T1,$T4 # h3*r1 + vpmuludq `32*2`(%rsp),$H4,$H2 # h4*s1 + vpaddq $T4,$D4,$D4 # d4 += h3*r1 + vpaddq $H2,$D0,$D0 # d0 += h4*s1 + vmovdqa `32*4-0x90`(%rax),$T1 # s2 + + vpmuludq $H0,$T0,$T4 # h0*r0 + vpmuludq $H1,$T0,$H2 # h1*r0 + vpaddq $T4,$D0,$D0 # d0 += h0*r0 + vpaddq $H2,$D1,$D1 # d1 += h1*r0 + vpmuludq $H3,$T0,$T4 # h3*r0 + vpmuludq $H4,$T0,$H2 # h4*r0 + vmovdqu 16*0($inp),%x#$T0 # load input + vpaddq $T4,$D3,$D3 # d3 += h3*r0 + vpaddq $H2,$D4,$D4 # d4 += h4*r0 + vinserti128 \$1,16*2($inp),$T0,$T0 + + vpmuludq $H3,$T1,$T4 # h3*s2 + vpmuludq $H4,$T1,$H2 # h4*s2 + vmovdqu 16*1($inp),%x#$T1 + vpaddq $T4,$D0,$D0 # d0 += h3*s2 + vpaddq $H2,$D1,$D1 # d1 += h4*s2 + vmovdqa `32*5-0x90`(%rax),$H2 # r3 + vpmuludq $H1,$T2,$T4 # h1*r2 + vpmuludq $H0,$T2,$T2 # h0*r2 + vpaddq $T4,$D3,$D3 # d3 += h1*r2 + vpaddq $T2,$D2,$D2 # d2 += h0*r2 + vinserti128 \$1,16*3($inp),$T1,$T1 + lea 16*4($inp),$inp + + vpmuludq $H1,$H2,$T4 # h1*r3 + vpmuludq $H0,$H2,$H2 # h0*r3 + vpsrldq \$6,$T0,$T2 # splat input + vpaddq $T4,$D4,$D4 # d4 += h1*r3 + vpaddq $H2,$D3,$D3 # d3 += h0*r3 + vpmuludq $H3,$T3,$T4 # h3*s3 + vpmuludq $H4,$T3,$H2 # h4*s3 + vpsrldq \$6,$T1,$T3 + vpaddq $T4,$D1,$D1 # d1 += h3*s3 + vpaddq $H2,$D2,$D2 # d2 += h4*s3 + vpunpckhqdq $T1,$T0,$T4 # 4 + + vpmuludq $H3,$S4,$H3 # h3*s4 + vpmuludq $H4,$S4,$H4 # h4*s4 + vpunpcklqdq $T1,$T0,$T0 # 0:1 + vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4 + vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4 + vpunpcklqdq $T3,$T2,$T3 # 2:3 + vpmuludq `32*7-0x90`(%rax),$H0,$H4 # h0*r4 + vpmuludq $H1,$S4,$H0 # h1*s4 + vmovdqa 64(%rcx),$MASK # .Lmask26 + vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4 + vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4 + + ################################################################ + # lazy reduction (interleaved with tail of input splat) + + vpsrlq \$26,$H3,$D3 + vpand $MASK,$H3,$H3 + vpaddq $D3,$H4,$H4 # h3 -> h4 + + vpsrlq \$26,$H0,$D0 + vpand $MASK,$H0,$H0 + vpaddq $D0,$D1,$H1 # h0 -> h1 + + vpsrlq \$26,$H4,$D4 + vpand $MASK,$H4,$H4 + + vpsrlq \$4,$T3,$T2 + + vpsrlq \$26,$H1,$D1 + vpand $MASK,$H1,$H1 + vpaddq $D1,$H2,$H2 # h1 -> h2 + + vpaddq $D4,$H0,$H0 + vpsllq \$2,$D4,$D4 + vpaddq $D4,$H0,$H0 # h4 -> h0 + + vpand $MASK,$T2,$T2 # 2 + vpsrlq \$26,$T0,$T1 + + vpsrlq \$26,$H2,$D2 + vpand $MASK,$H2,$H2 + vpaddq $D2,$H3,$H3 # h2 -> h3 + + vpaddq $T2,$H2,$H2 # modulo-scheduled + vpsrlq \$30,$T3,$T3 + + vpsrlq \$26,$H0,$D0 + vpand $MASK,$H0,$H0 + vpaddq $D0,$H1,$H1 # h0 -> h1 + + vpsrlq \$40,$T4,$T4 # 4 + + vpsrlq \$26,$H3,$D3 + vpand $MASK,$H3,$H3 + vpaddq $D3,$H4,$H4 # h3 -> h4 + + vpand $MASK,$T0,$T0 # 0 + vpand $MASK,$T1,$T1 # 1 + vpand $MASK,$T3,$T3 # 3 + vpor 32(%rcx),$T4,$T4 # padbit, yes, always + + sub \$64,$len + jnz .Loop_avx2$suffix + + .byte 0x66,0x90 +.Ltail_avx2$suffix: + ################################################################ + # while above multiplications were by r^4 in all lanes, in last + # iteration we multiply least significant lane by r^4 and most + # significant one by r, so copy of above except that references + # to the precomputed table are displaced by 4... + + #vpaddq $H2,$T2,$H2 # accumulate input + vpaddq $H0,$T0,$H0 + vmovdqu `32*0+4`(%rsp),$T0 # r0^4 + vpaddq $H1,$T1,$H1 + vmovdqu `32*1+4`(%rsp),$T1 # r1^4 + vpaddq $H3,$T3,$H3 + vmovdqu `32*3+4`(%rsp),$T2 # r2^4 + vpaddq $H4,$T4,$H4 + vmovdqu `32*6+4-0x90`(%rax),$T3 # s3^4 + vmovdqu `32*8+4-0x90`(%rax),$S4 # s4^4 + + vpmuludq $H2,$T0,$D2 # d2 = h2*r0 + vpmuludq $H2,$T1,$D3 # d3 = h2*r1 + vpmuludq $H2,$T2,$D4 # d4 = h2*r2 + vpmuludq $H2,$T3,$D0 # d0 = h2*s3 + vpmuludq $H2,$S4,$D1 # d1 = h2*s4 + + vpmuludq $H0,$T1,$T4 # h0*r1 + vpmuludq $H1,$T1,$H2 # h1*r1 + vpaddq $T4,$D1,$D1 # d1 += h0*r1 + vpaddq $H2,$D2,$D2 # d2 += h1*r1 + vpmuludq $H3,$T1,$T4 # h3*r1 + vpmuludq `32*2+4`(%rsp),$H4,$H2 # h4*s1 + vpaddq $T4,$D4,$D4 # d4 += h3*r1 + vpaddq $H2,$D0,$D0 # d0 += h4*s1 + + vpmuludq $H0,$T0,$T4 # h0*r0 + vpmuludq $H1,$T0,$H2 # h1*r0 + vpaddq $T4,$D0,$D0 # d0 += h0*r0 + vmovdqu `32*4+4-0x90`(%rax),$T1 # s2 + vpaddq $H2,$D1,$D1 # d1 += h1*r0 + vpmuludq $H3,$T0,$T4 # h3*r0 + vpmuludq $H4,$T0,$H2 # h4*r0 + vpaddq $T4,$D3,$D3 # d3 += h3*r0 + vpaddq $H2,$D4,$D4 # d4 += h4*r0 + + vpmuludq $H3,$T1,$T4 # h3*s2 + vpmuludq $H4,$T1,$H2 # h4*s2 + vpaddq $T4,$D0,$D0 # d0 += h3*s2 + vpaddq $H2,$D1,$D1 # d1 += h4*s2 + vmovdqu `32*5+4-0x90`(%rax),$H2 # r3 + vpmuludq $H1,$T2,$T4 # h1*r2 + vpmuludq $H0,$T2,$T2 # h0*r2 + vpaddq $T4,$D3,$D3 # d3 += h1*r2 + vpaddq $T2,$D2,$D2 # d2 += h0*r2 + + vpmuludq $H1,$H2,$T4 # h1*r3 + vpmuludq $H0,$H2,$H2 # h0*r3 + vpaddq $T4,$D4,$D4 # d4 += h1*r3 + vpaddq $H2,$D3,$D3 # d3 += h0*r3 + vpmuludq $H3,$T3,$T4 # h3*s3 + vpmuludq $H4,$T3,$H2 # h4*s3 + vpaddq $T4,$D1,$D1 # d1 += h3*s3 + vpaddq $H2,$D2,$D2 # d2 += h4*s3 + + vpmuludq $H3,$S4,$H3 # h3*s4 + vpmuludq $H4,$S4,$H4 # h4*s4 + vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4 + vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4 + vpmuludq `32*7+4-0x90`(%rax),$H0,$H4 # h0*r4 + vpmuludq $H1,$S4,$H0 # h1*s4 + vmovdqa 64(%rcx),$MASK # .Lmask26 + vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4 + vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4 + + ################################################################ + # horizontal addition + + vpsrldq \$8,$D1,$T1 + vpsrldq \$8,$H2,$T2 + vpsrldq \$8,$H3,$T3 + vpsrldq \$8,$H4,$T4 + vpsrldq \$8,$H0,$T0 + vpaddq $T1,$D1,$D1 + vpaddq $T2,$H2,$H2 + vpaddq $T3,$H3,$H3 + vpaddq $T4,$H4,$H4 + vpaddq $T0,$H0,$H0 + + vpermq \$0x2,$H3,$T3 + vpermq \$0x2,$H4,$T4 + vpermq \$0x2,$H0,$T0 + vpermq \$0x2,$D1,$T1 + vpermq \$0x2,$H2,$T2 + vpaddq $T3,$H3,$H3 + vpaddq $T4,$H4,$H4 + vpaddq $T0,$H0,$H0 + vpaddq $T1,$D1,$D1 + vpaddq $T2,$H2,$H2 + + ################################################################ + # lazy reduction + + vpsrlq \$26,$H3,$D3 + vpand $MASK,$H3,$H3 + vpaddq $D3,$H4,$H4 # h3 -> h4 + + vpsrlq \$26,$H0,$D0 + vpand $MASK,$H0,$H0 + vpaddq $D0,$D1,$H1 # h0 -> h1 + + vpsrlq \$26,$H4,$D4 + vpand $MASK,$H4,$H4 + + vpsrlq \$26,$H1,$D1 + vpand $MASK,$H1,$H1 + vpaddq $D1,$H2,$H2 # h1 -> h2 + + vpaddq $D4,$H0,$H0 + vpsllq \$2,$D4,$D4 + vpaddq $D4,$H0,$H0 # h4 -> h0 + + vpsrlq \$26,$H2,$D2 + vpand $MASK,$H2,$H2 + vpaddq $D2,$H3,$H3 # h2 -> h3 + + vpsrlq \$26,$H0,$D0 + vpand $MASK,$H0,$H0 + vpaddq $D0,$H1,$H1 # h0 -> h1 + + vpsrlq \$26,$H3,$D3 + vpand $MASK,$H3,$H3 + vpaddq $D3,$H4,$H4 # h3 -> h4 + + vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced + vmovd %x#$H1,`4*1-48-64`($ctx) + vmovd %x#$H2,`4*2-48-64`($ctx) + vmovd %x#$H3,`4*3-48-64`($ctx) + vmovd %x#$H4,`4*4-48-64`($ctx) +___ +$code.=<<___ if ($win64); + vmovdqa -0xb0(%r10),%xmm6 + vmovdqa -0xa0(%r10),%xmm7 + vmovdqa -0x90(%r10),%xmm8 + vmovdqa -0x80(%r10),%xmm9 + vmovdqa -0x70(%r10),%xmm10 + vmovdqa -0x60(%r10),%xmm11 + vmovdqa -0x50(%r10),%xmm12 + vmovdqa -0x40(%r10),%xmm13 + vmovdqa -0x30(%r10),%xmm14 + vmovdqa -0x20(%r10),%xmm15 + lea -8(%r10),%rsp +.Ldo_avx2_epilogue$suffix: +___ +$code.=<<___ if (!$win64); + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +___ +$code.=<<___; + vzeroupper + ret +.cfi_endproc +___ +if($avx > 2 && $avx512) { +my ($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4) = map("%zmm$_",(16..24)); +my ($M0,$M1,$M2,$M3,$M4) = map("%zmm$_",(25..29)); +my $PADBIT="%zmm30"; + +map(s/%y/%z/,($T4,$T0,$T1,$T2,$T3)); # switch to %zmm domain +map(s/%y/%z/,($D0,$D1,$D2,$D3,$D4)); +map(s/%y/%z/,($H0,$H1,$H2,$H3,$H4)); +map(s/%y/%z/,($MASK)); + +$code.=<<___; +.cfi_startproc +.Lblocks_avx512: + mov \$15,%eax + kmovw %eax,%k2 +___ +$code.=<<___ if (!$win64); + lea 8(%rsp),%r10 +.cfi_def_cfa_register %r10 + sub \$0x128,%rsp +___ +$code.=<<___ if ($win64); + lea 8(%rsp),%r10 + sub \$0x1c8,%rsp + vmovdqa %xmm6,-0xb0(%r10) + vmovdqa %xmm7,-0xa0(%r10) + vmovdqa %xmm8,-0x90(%r10) + vmovdqa %xmm9,-0x80(%r10) + vmovdqa %xmm10,-0x70(%r10) + vmovdqa %xmm11,-0x60(%r10) + vmovdqa %xmm12,-0x50(%r10) + vmovdqa %xmm13,-0x40(%r10) + vmovdqa %xmm14,-0x30(%r10) + vmovdqa %xmm15,-0x20(%r10) +.Ldo_avx512_body: +___ +$code.=<<___; + lea .Lconst(%rip),%rcx + lea 48+64($ctx),$ctx # size optimization + vmovdqa 96(%rcx),%y#$T2 # .Lpermd_avx2 + + # expand pre-calculated table + vmovdqu `16*0-64`($ctx),%x#$D0 # will become expanded ${R0} + and \$-512,%rsp + vmovdqu `16*1-64`($ctx),%x#$D1 # will become ... ${R1} + mov \$0x20,%rax + vmovdqu `16*2-64`($ctx),%x#$T0 # ... ${S1} + vmovdqu `16*3-64`($ctx),%x#$D2 # ... ${R2} + vmovdqu `16*4-64`($ctx),%x#$T1 # ... ${S2} + vmovdqu `16*5-64`($ctx),%x#$D3 # ... ${R3} + vmovdqu `16*6-64`($ctx),%x#$T3 # ... ${S3} + vmovdqu `16*7-64`($ctx),%x#$D4 # ... ${R4} + vmovdqu `16*8-64`($ctx),%x#$T4 # ... ${S4} + vpermd $D0,$T2,$R0 # 00003412 -> 14243444 + vpbroadcastq 64(%rcx),$MASK # .Lmask26 + vpermd $D1,$T2,$R1 + vpermd $T0,$T2,$S1 + vpermd $D2,$T2,$R2 + vmovdqa64 $R0,0x00(%rsp){%k2} # save in case $len%128 != 0 + vpsrlq \$32,$R0,$T0 # 14243444 -> 01020304 + vpermd $T1,$T2,$S2 + vmovdqu64 $R1,0x00(%rsp,%rax){%k2} + vpsrlq \$32,$R1,$T1 + vpermd $D3,$T2,$R3 + vmovdqa64 $S1,0x40(%rsp){%k2} + vpermd $T3,$T2,$S3 + vpermd $D4,$T2,$R4 + vmovdqu64 $R2,0x40(%rsp,%rax){%k2} + vpermd $T4,$T2,$S4 + vmovdqa64 $S2,0x80(%rsp){%k2} + vmovdqu64 $R3,0x80(%rsp,%rax){%k2} + vmovdqa64 $S3,0xc0(%rsp){%k2} + vmovdqu64 $R4,0xc0(%rsp,%rax){%k2} + vmovdqa64 $S4,0x100(%rsp){%k2} + + ################################################################ + # calculate 5th through 8th powers of the key + # + # d0 = r0'*r0 + r1'*5*r4 + r2'*5*r3 + r3'*5*r2 + r4'*5*r1 + # d1 = r0'*r1 + r1'*r0 + r2'*5*r4 + r3'*5*r3 + r4'*5*r2 + # d2 = r0'*r2 + r1'*r1 + r2'*r0 + r3'*5*r4 + r4'*5*r3 + # d3 = r0'*r3 + r1'*r2 + r2'*r1 + r3'*r0 + r4'*5*r4 + # d4 = r0'*r4 + r1'*r3 + r2'*r2 + r3'*r1 + r4'*r0 + + vpmuludq $T0,$R0,$D0 # d0 = r0'*r0 + vpmuludq $T0,$R1,$D1 # d1 = r0'*r1 + vpmuludq $T0,$R2,$D2 # d2 = r0'*r2 + vpmuludq $T0,$R3,$D3 # d3 = r0'*r3 + vpmuludq $T0,$R4,$D4 # d4 = r0'*r4 + vpsrlq \$32,$R2,$T2 + + vpmuludq $T1,$S4,$M0 + vpmuludq $T1,$R0,$M1 + vpmuludq $T1,$R1,$M2 + vpmuludq $T1,$R2,$M3 + vpmuludq $T1,$R3,$M4 + vpsrlq \$32,$R3,$T3 + vpaddq $M0,$D0,$D0 # d0 += r1'*5*r4 + vpaddq $M1,$D1,$D1 # d1 += r1'*r0 + vpaddq $M2,$D2,$D2 # d2 += r1'*r1 + vpaddq $M3,$D3,$D3 # d3 += r1'*r2 + vpaddq $M4,$D4,$D4 # d4 += r1'*r3 + + vpmuludq $T2,$S3,$M0 + vpmuludq $T2,$S4,$M1 + vpmuludq $T2,$R1,$M3 + vpmuludq $T2,$R2,$M4 + vpmuludq $T2,$R0,$M2 + vpsrlq \$32,$R4,$T4 + vpaddq $M0,$D0,$D0 # d0 += r2'*5*r3 + vpaddq $M1,$D1,$D1 # d1 += r2'*5*r4 + vpaddq $M3,$D3,$D3 # d3 += r2'*r1 + vpaddq $M4,$D4,$D4 # d4 += r2'*r2 + vpaddq $M2,$D2,$D2 # d2 += r2'*r0 + + vpmuludq $T3,$S2,$M0 + vpmuludq $T3,$R0,$M3 + vpmuludq $T3,$R1,$M4 + vpmuludq $T3,$S3,$M1 + vpmuludq $T3,$S4,$M2 + vpaddq $M0,$D0,$D0 # d0 += r3'*5*r2 + vpaddq $M3,$D3,$D3 # d3 += r3'*r0 + vpaddq $M4,$D4,$D4 # d4 += r3'*r1 + vpaddq $M1,$D1,$D1 # d1 += r3'*5*r3 + vpaddq $M2,$D2,$D2 # d2 += r3'*5*r4 + + vpmuludq $T4,$S4,$M3 + vpmuludq $T4,$R0,$M4 + vpmuludq $T4,$S1,$M0 + vpmuludq $T4,$S2,$M1 + vpmuludq $T4,$S3,$M2 + vpaddq $M3,$D3,$D3 # d3 += r2'*5*r4 + vpaddq $M4,$D4,$D4 # d4 += r2'*r0 + vpaddq $M0,$D0,$D0 # d0 += r2'*5*r1 + vpaddq $M1,$D1,$D1 # d1 += r2'*5*r2 + vpaddq $M2,$D2,$D2 # d2 += r2'*5*r3 + + ################################################################ + # load input + vmovdqu64 16*0($inp),%z#$T3 + vmovdqu64 16*4($inp),%z#$T4 + lea 16*8($inp),$inp + + ################################################################ + # lazy reduction + + vpsrlq \$26,$D3,$M3 + vpandq $MASK,$D3,$D3 + vpaddq $M3,$D4,$D4 # d3 -> d4 + + vpsrlq \$26,$D0,$M0 + vpandq $MASK,$D0,$D0 + vpaddq $M0,$D1,$D1 # d0 -> d1 + + vpsrlq \$26,$D4,$M4 + vpandq $MASK,$D4,$D4 + + vpsrlq \$26,$D1,$M1 + vpandq $MASK,$D1,$D1 + vpaddq $M1,$D2,$D2 # d1 -> d2 + + vpaddq $M4,$D0,$D0 + vpsllq \$2,$M4,$M4 + vpaddq $M4,$D0,$D0 # d4 -> d0 + + vpsrlq \$26,$D2,$M2 + vpandq $MASK,$D2,$D2 + vpaddq $M2,$D3,$D3 # d2 -> d3 + + vpsrlq \$26,$D0,$M0 + vpandq $MASK,$D0,$D0 + vpaddq $M0,$D1,$D1 # d0 -> d1 + + vpsrlq \$26,$D3,$M3 + vpandq $MASK,$D3,$D3 + vpaddq $M3,$D4,$D4 # d3 -> d4 + + ################################################################ + # at this point we have 14243444 in $R0-$S4 and 05060708 in + # $D0-$D4, ... + + vpunpcklqdq $T4,$T3,$T0 # transpose input + vpunpckhqdq $T4,$T3,$T4 + + # ... since input 64-bit lanes are ordered as 73625140, we could + # "vperm" it to 76543210 (here and in each loop iteration), *or* + # we could just flow along, hence the goal for $R0-$S4 is + # 1858286838784888 ... + + vmovdqa32 128(%rcx),$M0 # .Lpermd_avx512: + mov \$0x7777,%eax + kmovw %eax,%k1 + + vpermd $R0,$M0,$R0 # 14243444 -> 1---2---3---4--- + vpermd $R1,$M0,$R1 + vpermd $R2,$M0,$R2 + vpermd $R3,$M0,$R3 + vpermd $R4,$M0,$R4 + + vpermd $D0,$M0,${R0}{%k1} # 05060708 -> 1858286838784888 + vpermd $D1,$M0,${R1}{%k1} + vpermd $D2,$M0,${R2}{%k1} + vpermd $D3,$M0,${R3}{%k1} + vpermd $D4,$M0,${R4}{%k1} + + vpslld \$2,$R1,$S1 # *5 + vpslld \$2,$R2,$S2 + vpslld \$2,$R3,$S3 + vpslld \$2,$R4,$S4 + vpaddd $R1,$S1,$S1 + vpaddd $R2,$S2,$S2 + vpaddd $R3,$S3,$S3 + vpaddd $R4,$S4,$S4 + + vpbroadcastq 32(%rcx),$PADBIT # .L129 + + vpsrlq \$52,$T0,$T2 # splat input + vpsllq \$12,$T4,$T3 + vporq $T3,$T2,$T2 + vpsrlq \$26,$T0,$T1 + vpsrlq \$14,$T4,$T3 + vpsrlq \$40,$T4,$T4 # 4 + vpandq $MASK,$T2,$T2 # 2 + vpandq $MASK,$T0,$T0 # 0 + #vpandq $MASK,$T1,$T1 # 1 + #vpandq $MASK,$T3,$T3 # 3 + #vporq $PADBIT,$T4,$T4 # padbit, yes, always + + vpaddq $H2,$T2,$H2 # accumulate input + sub \$192,$len + jbe .Ltail_avx512 + jmp .Loop_avx512 + +.align 32 +.Loop_avx512: + ################################################################ + # ((inp[0]*r^8+inp[ 8])*r^8+inp[16])*r^8 + # ((inp[1]*r^8+inp[ 9])*r^8+inp[17])*r^7 + # ((inp[2]*r^8+inp[10])*r^8+inp[18])*r^6 + # ((inp[3]*r^8+inp[11])*r^8+inp[19])*r^5 + # ((inp[4]*r^8+inp[12])*r^8+inp[20])*r^4 + # ((inp[5]*r^8+inp[13])*r^8+inp[21])*r^3 + # ((inp[6]*r^8+inp[14])*r^8+inp[22])*r^2 + # ((inp[7]*r^8+inp[15])*r^8+inp[23])*r^1 + # \________/\___________/ + ################################################################ + #vpaddq $H2,$T2,$H2 # accumulate input + + # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 + # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 + # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 + # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 + # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 + # + # however, as h2 is "chronologically" first one available pull + # corresponding operations up, so it's + # + # d3 = h2*r1 + h0*r3 + h1*r2 + h3*r0 + h4*5*r4 + # d4 = h2*r2 + h0*r4 + h1*r3 + h3*r1 + h4*r0 + # d0 = h2*5*r3 + h0*r0 + h1*5*r4 + h3*5*r2 + h4*5*r1 + # d1 = h2*5*r4 + h0*r1 + h1*r0 + h3*5*r3 + h4*5*r2 + # d2 = h2*r0 + h0*r2 + h1*r1 + h3*5*r4 + h4*5*r3 + + vpmuludq $H2,$R1,$D3 # d3 = h2*r1 + vpaddq $H0,$T0,$H0 + vpmuludq $H2,$R2,$D4 # d4 = h2*r2 + vpandq $MASK,$T1,$T1 # 1 + vpmuludq $H2,$S3,$D0 # d0 = h2*s3 + vpandq $MASK,$T3,$T3 # 3 + vpmuludq $H2,$S4,$D1 # d1 = h2*s4 + vporq $PADBIT,$T4,$T4 # padbit, yes, always + vpmuludq $H2,$R0,$D2 # d2 = h2*r0 + vpaddq $H1,$T1,$H1 # accumulate input + vpaddq $H3,$T3,$H3 + vpaddq $H4,$T4,$H4 + + vmovdqu64 16*0($inp),$T3 # load input + vmovdqu64 16*4($inp),$T4 + lea 16*8($inp),$inp + vpmuludq $H0,$R3,$M3 + vpmuludq $H0,$R4,$M4 + vpmuludq $H0,$R0,$M0 + vpmuludq $H0,$R1,$M1 + vpaddq $M3,$D3,$D3 # d3 += h0*r3 + vpaddq $M4,$D4,$D4 # d4 += h0*r4 + vpaddq $M0,$D0,$D0 # d0 += h0*r0 + vpaddq $M1,$D1,$D1 # d1 += h0*r1 + + vpmuludq $H1,$R2,$M3 + vpmuludq $H1,$R3,$M4 + vpmuludq $H1,$S4,$M0 + vpmuludq $H0,$R2,$M2 + vpaddq $M3,$D3,$D3 # d3 += h1*r2 + vpaddq $M4,$D4,$D4 # d4 += h1*r3 + vpaddq $M0,$D0,$D0 # d0 += h1*s4 + vpaddq $M2,$D2,$D2 # d2 += h0*r2 + + vpunpcklqdq $T4,$T3,$T0 # transpose input + vpunpckhqdq $T4,$T3,$T4 + + vpmuludq $H3,$R0,$M3 + vpmuludq $H3,$R1,$M4 + vpmuludq $H1,$R0,$M1 + vpmuludq $H1,$R1,$M2 + vpaddq $M3,$D3,$D3 # d3 += h3*r0 + vpaddq $M4,$D4,$D4 # d4 += h3*r1 + vpaddq $M1,$D1,$D1 # d1 += h1*r0 + vpaddq $M2,$D2,$D2 # d2 += h1*r1 + + vpmuludq $H4,$S4,$M3 + vpmuludq $H4,$R0,$M4 + vpmuludq $H3,$S2,$M0 + vpmuludq $H3,$S3,$M1 + vpaddq $M3,$D3,$D3 # d3 += h4*s4 + vpmuludq $H3,$S4,$M2 + vpaddq $M4,$D4,$D4 # d4 += h4*r0 + vpaddq $M0,$D0,$D0 # d0 += h3*s2 + vpaddq $M1,$D1,$D1 # d1 += h3*s3 + vpaddq $M2,$D2,$D2 # d2 += h3*s4 + + vpmuludq $H4,$S1,$M0 + vpmuludq $H4,$S2,$M1 + vpmuludq $H4,$S3,$M2 + vpaddq $M0,$D0,$H0 # h0 = d0 + h4*s1 + vpaddq $M1,$D1,$H1 # h1 = d2 + h4*s2 + vpaddq $M2,$D2,$H2 # h2 = d3 + h4*s3 + + ################################################################ + # lazy reduction (interleaved with input splat) + + vpsrlq \$52,$T0,$T2 # splat input + vpsllq \$12,$T4,$T3 + + vpsrlq \$26,$D3,$H3 + vpandq $MASK,$D3,$D3 + vpaddq $H3,$D4,$H4 # h3 -> h4 + + vporq $T3,$T2,$T2 + + vpsrlq \$26,$H0,$D0 + vpandq $MASK,$H0,$H0 + vpaddq $D0,$H1,$H1 # h0 -> h1 + + vpandq $MASK,$T2,$T2 # 2 + + vpsrlq \$26,$H4,$D4 + vpandq $MASK,$H4,$H4 + + vpsrlq \$26,$H1,$D1 + vpandq $MASK,$H1,$H1 + vpaddq $D1,$H2,$H2 # h1 -> h2 + + vpaddq $D4,$H0,$H0 + vpsllq \$2,$D4,$D4 + vpaddq $D4,$H0,$H0 # h4 -> h0 + + vpaddq $T2,$H2,$H2 # modulo-scheduled + vpsrlq \$26,$T0,$T1 + + vpsrlq \$26,$H2,$D2 + vpandq $MASK,$H2,$H2 + vpaddq $D2,$D3,$H3 # h2 -> h3 + + vpsrlq \$14,$T4,$T3 + + vpsrlq \$26,$H0,$D0 + vpandq $MASK,$H0,$H0 + vpaddq $D0,$H1,$H1 # h0 -> h1 + + vpsrlq \$40,$T4,$T4 # 4 + + vpsrlq \$26,$H3,$D3 + vpandq $MASK,$H3,$H3 + vpaddq $D3,$H4,$H4 # h3 -> h4 + + vpandq $MASK,$T0,$T0 # 0 + #vpandq $MASK,$T1,$T1 # 1 + #vpandq $MASK,$T3,$T3 # 3 + #vporq $PADBIT,$T4,$T4 # padbit, yes, always + + sub \$128,$len + ja .Loop_avx512 + +.Ltail_avx512: + ################################################################ + # while above multiplications were by r^8 in all lanes, in last + # iteration we multiply least significant lane by r^8 and most + # significant one by r, that's why table gets shifted... + + vpsrlq \$32,$R0,$R0 # 0105020603070408 + vpsrlq \$32,$R1,$R1 + vpsrlq \$32,$R2,$R2 + vpsrlq \$32,$S3,$S3 + vpsrlq \$32,$S4,$S4 + vpsrlq \$32,$R3,$R3 + vpsrlq \$32,$R4,$R4 + vpsrlq \$32,$S1,$S1 + vpsrlq \$32,$S2,$S2 + + ################################################################ + # load either next or last 64 byte of input + lea ($inp,$len),$inp + + #vpaddq $H2,$T2,$H2 # accumulate input + vpaddq $H0,$T0,$H0 + + vpmuludq $H2,$R1,$D3 # d3 = h2*r1 + vpmuludq $H2,$R2,$D4 # d4 = h2*r2 + vpmuludq $H2,$S3,$D0 # d0 = h2*s3 + vpandq $MASK,$T1,$T1 # 1 + vpmuludq $H2,$S4,$D1 # d1 = h2*s4 + vpandq $MASK,$T3,$T3 # 3 + vpmuludq $H2,$R0,$D2 # d2 = h2*r0 + vporq $PADBIT,$T4,$T4 # padbit, yes, always + vpaddq $H1,$T1,$H1 # accumulate input + vpaddq $H3,$T3,$H3 + vpaddq $H4,$T4,$H4 + + vmovdqu 16*0($inp),%x#$T0 + vpmuludq $H0,$R3,$M3 + vpmuludq $H0,$R4,$M4 + vpmuludq $H0,$R0,$M0 + vpmuludq $H0,$R1,$M1 + vpaddq $M3,$D3,$D3 # d3 += h0*r3 + vpaddq $M4,$D4,$D4 # d4 += h0*r4 + vpaddq $M0,$D0,$D0 # d0 += h0*r0 + vpaddq $M1,$D1,$D1 # d1 += h0*r1 + + vmovdqu 16*1($inp),%x#$T1 + vpmuludq $H1,$R2,$M3 + vpmuludq $H1,$R3,$M4 + vpmuludq $H1,$S4,$M0 + vpmuludq $H0,$R2,$M2 + vpaddq $M3,$D3,$D3 # d3 += h1*r2 + vpaddq $M4,$D4,$D4 # d4 += h1*r3 + vpaddq $M0,$D0,$D0 # d0 += h1*s4 + vpaddq $M2,$D2,$D2 # d2 += h0*r2 + + vinserti128 \$1,16*2($inp),%y#$T0,%y#$T0 + vpmuludq $H3,$R0,$M3 + vpmuludq $H3,$R1,$M4 + vpmuludq $H1,$R0,$M1 + vpmuludq $H1,$R1,$M2 + vpaddq $M3,$D3,$D3 # d3 += h3*r0 + vpaddq $M4,$D4,$D4 # d4 += h3*r1 + vpaddq $M1,$D1,$D1 # d1 += h1*r0 + vpaddq $M2,$D2,$D2 # d2 += h1*r1 + + vinserti128 \$1,16*3($inp),%y#$T1,%y#$T1 + vpmuludq $H4,$S4,$M3 + vpmuludq $H4,$R0,$M4 + vpmuludq $H3,$S2,$M0 + vpmuludq $H3,$S3,$M1 + vpmuludq $H3,$S4,$M2 + vpaddq $M3,$D3,$H3 # h3 = d3 + h4*s4 + vpaddq $M4,$D4,$D4 # d4 += h4*r0 + vpaddq $M0,$D0,$D0 # d0 += h3*s2 + vpaddq $M1,$D1,$D1 # d1 += h3*s3 + vpaddq $M2,$D2,$D2 # d2 += h3*s4 + + vpmuludq $H4,$S1,$M0 + vpmuludq $H4,$S2,$M1 + vpmuludq $H4,$S3,$M2 + vpaddq $M0,$D0,$H0 # h0 = d0 + h4*s1 + vpaddq $M1,$D1,$H1 # h1 = d2 + h4*s2 + vpaddq $M2,$D2,$H2 # h2 = d3 + h4*s3 + + ################################################################ + # horizontal addition + + mov \$1,%eax + vpermq \$0xb1,$H3,$D3 + vpermq \$0xb1,$D4,$H4 + vpermq \$0xb1,$H0,$D0 + vpermq \$0xb1,$H1,$D1 + vpermq \$0xb1,$H2,$D2 + vpaddq $D3,$H3,$H3 + vpaddq $D4,$H4,$H4 + vpaddq $D0,$H0,$H0 + vpaddq $D1,$H1,$H1 + vpaddq $D2,$H2,$H2 + + kmovw %eax,%k3 + vpermq \$0x2,$H3,$D3 + vpermq \$0x2,$H4,$D4 + vpermq \$0x2,$H0,$D0 + vpermq \$0x2,$H1,$D1 + vpermq \$0x2,$H2,$D2 + vpaddq $D3,$H3,$H3 + vpaddq $D4,$H4,$H4 + vpaddq $D0,$H0,$H0 + vpaddq $D1,$H1,$H1 + vpaddq $D2,$H2,$H2 + + vextracti64x4 \$0x1,$H3,%y#$D3 + vextracti64x4 \$0x1,$H4,%y#$D4 + vextracti64x4 \$0x1,$H0,%y#$D0 + vextracti64x4 \$0x1,$H1,%y#$D1 + vextracti64x4 \$0x1,$H2,%y#$D2 + vpaddq $D3,$H3,${H3}{%k3}{z} # keep single qword in case + vpaddq $D4,$H4,${H4}{%k3}{z} # it's passed to .Ltail_avx2 + vpaddq $D0,$H0,${H0}{%k3}{z} + vpaddq $D1,$H1,${H1}{%k3}{z} + vpaddq $D2,$H2,${H2}{%k3}{z} +___ +map(s/%z/%y/,($T0,$T1,$T2,$T3,$T4, $PADBIT)); +map(s/%z/%y/,($H0,$H1,$H2,$H3,$H4, $D0,$D1,$D2,$D3,$D4, $MASK)); +$code.=<<___; + ################################################################ + # lazy reduction (interleaved with input splat) + + vpsrlq \$26,$H3,$D3 + vpand $MASK,$H3,$H3 + vpsrldq \$6,$T0,$T2 # splat input + vpsrldq \$6,$T1,$T3 + vpunpckhqdq $T1,$T0,$T4 # 4 + vpaddq $D3,$H4,$H4 # h3 -> h4 + + vpsrlq \$26,$H0,$D0 + vpand $MASK,$H0,$H0 + vpunpcklqdq $T3,$T2,$T2 # 2:3 + vpunpcklqdq $T1,$T0,$T0 # 0:1 + vpaddq $D0,$H1,$H1 # h0 -> h1 + + vpsrlq \$26,$H4,$D4 + vpand $MASK,$H4,$H4 + + vpsrlq \$26,$H1,$D1 + vpand $MASK,$H1,$H1 + vpsrlq \$30,$T2,$T3 + vpsrlq \$4,$T2,$T2 + vpaddq $D1,$H2,$H2 # h1 -> h2 + + vpaddq $D4,$H0,$H0 + vpsllq \$2,$D4,$D4 + vpsrlq \$26,$T0,$T1 + vpsrlq \$40,$T4,$T4 # 4 + vpaddq $D4,$H0,$H0 # h4 -> h0 + + vpsrlq \$26,$H2,$D2 + vpand $MASK,$H2,$H2 + vpand $MASK,$T2,$T2 # 2 + vpand $MASK,$T0,$T0 # 0 + vpaddq $D2,$H3,$H3 # h2 -> h3 + + vpsrlq \$26,$H0,$D0 + vpand $MASK,$H0,$H0 + vpaddq $H2,$T2,$H2 # accumulate input for .Ltail_avx2 + vpand $MASK,$T1,$T1 # 1 + vpaddq $D0,$H1,$H1 # h0 -> h1 + + vpsrlq \$26,$H3,$D3 + vpand $MASK,$H3,$H3 + vpand $MASK,$T3,$T3 # 3 + vpor 32(%rcx),$T4,$T4 # padbit, yes, always + vpaddq $D3,$H4,$H4 # h3 -> h4 + + lea 0x90(%rsp),%rax # size optimization for .Ltail_avx2 + add \$64,$len + jnz .Ltail_avx2$suffix + + vpsubq $T2,$H2,$H2 # undo input accumulation + vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced + vmovd %x#$H1,`4*1-48-64`($ctx) + vmovd %x#$H2,`4*2-48-64`($ctx) + vmovd %x#$H3,`4*3-48-64`($ctx) + vmovd %x#$H4,`4*4-48-64`($ctx) + vzeroall +___ +$code.=<<___ if ($win64); + movdqa -0xb0(%r10),%xmm6 + movdqa -0xa0(%r10),%xmm7 + movdqa -0x90(%r10),%xmm8 + movdqa -0x80(%r10),%xmm9 + movdqa -0x70(%r10),%xmm10 + movdqa -0x60(%r10),%xmm11 + movdqa -0x50(%r10),%xmm12 + movdqa -0x40(%r10),%xmm13 + movdqa -0x30(%r10),%xmm14 + movdqa -0x20(%r10),%xmm15 + lea -8(%r10),%rsp +.Ldo_avx512_epilogue: +___ +$code.=<<___ if (!$win64); + lea -8(%r10),%rsp +.cfi_def_cfa_register %rsp +___ +$code.=<<___; + ret +.cfi_endproc +___ + +} + +} + +&declare_function("poly1305_blocks_avx2", 32, 4); +poly1305_blocks_avxN(0); +&end_function("poly1305_blocks_avx2"); + +if($kernel) { + $code .= "#endif\n"; +} + +####################################################################### +if ($avx>2) { +# On entry we have input length divisible by 64. But since inner loop +# processes 128 bytes per iteration, cases when length is not divisible +# by 128 are handled by passing tail 64 bytes to .Ltail_avx2. For this +# reason stack layout is kept identical to poly1305_blocks_avx2. If not +# for this tail, we wouldn't have to even allocate stack frame... + +if($kernel) { + $code .= "#ifdef CONFIG_AS_AVX512\n"; +} + +&declare_function("poly1305_blocks_avx512", 32, 4); +poly1305_blocks_avxN(1); +&end_function("poly1305_blocks_avx512"); + +if ($kernel) { + $code .= "#endif\n"; +} + +if (!$kernel && $avx>3) { +######################################################################## +# VPMADD52 version using 2^44 radix. +# +# One can argue that base 2^52 would be more natural. Well, even though +# some operations would be more natural, one has to recognize couple of +# things. Base 2^52 doesn't provide advantage over base 2^44 if you look +# at amount of multiply-n-accumulate operations. Secondly, it makes it +# impossible to pre-compute multiples of 5 [referred to as s[]/sN in +# reference implementations], which means that more such operations +# would have to be performed in inner loop, which in turn makes critical +# path longer. In other words, even though base 2^44 reduction might +# look less elegant, overall critical path is actually shorter... + +######################################################################## +# Layout of opaque area is following. +# +# unsigned __int64 h[3]; # current hash value base 2^44 +# unsigned __int64 s[2]; # key value*20 base 2^44 +# unsigned __int64 r[3]; # key value base 2^44 +# struct { unsigned __int64 r^1, r^3, r^2, r^4; } R[4]; +# # r^n positions reflect +# # placement in register, not +# # memory, R[3] is R[1]*20 + +$code.=<<___; +.type poly1305_init_base2_44,\@function,3 +.align 32 +poly1305_init_base2_44: + xor %rax,%rax + mov %rax,0($ctx) # initialize hash value + mov %rax,8($ctx) + mov %rax,16($ctx) + +.Linit_base2_44: + lea poly1305_blocks_vpmadd52(%rip),%r10 + lea poly1305_emit_base2_44(%rip),%r11 + + mov \$0x0ffffffc0fffffff,%rax + mov \$0x0ffffffc0ffffffc,%rcx + and 0($inp),%rax + mov \$0x00000fffffffffff,%r8 + and 8($inp),%rcx + mov \$0x00000fffffffffff,%r9 + and %rax,%r8 + shrd \$44,%rcx,%rax + mov %r8,40($ctx) # r0 + and %r9,%rax + shr \$24,%rcx + mov %rax,48($ctx) # r1 + lea (%rax,%rax,4),%rax # *5 + mov %rcx,56($ctx) # r2 + shl \$2,%rax # magic <<2 + lea (%rcx,%rcx,4),%rcx # *5 + shl \$2,%rcx # magic <<2 + mov %rax,24($ctx) # s1 + mov %rcx,32($ctx) # s2 + movq \$-1,64($ctx) # write impossible value +___ +$code.=<<___ if ($flavour !~ /elf32/); + mov %r10,0(%rdx) + mov %r11,8(%rdx) +___ +$code.=<<___ if ($flavour =~ /elf32/); + mov %r10d,0(%rdx) + mov %r11d,4(%rdx) +___ +$code.=<<___; + mov \$1,%eax + ret +.size poly1305_init_base2_44,.-poly1305_init_base2_44 +___ +{ +my ($H0,$H1,$H2,$r2r1r0,$r1r0s2,$r0s2s1,$Dlo,$Dhi) = map("%ymm$_",(0..5,16,17)); +my ($T0,$inp_permd,$inp_shift,$PAD) = map("%ymm$_",(18..21)); +my ($reduc_mask,$reduc_rght,$reduc_left) = map("%ymm$_",(22..25)); + +$code.=<<___; +.type poly1305_blocks_vpmadd52,\@function,4 +.align 32 +poly1305_blocks_vpmadd52: + shr \$4,$len + jz .Lno_data_vpmadd52 # too short + + shl \$40,$padbit + mov 64($ctx),%r8 # peek on power of the key + + # if powers of the key are not calculated yet, process up to 3 + # blocks with this single-block subroutine, otherwise ensure that + # length is divisible by 2 blocks and pass the rest down to next + # subroutine... + + mov \$3,%rax + mov \$1,%r10 + cmp \$4,$len # is input long + cmovae %r10,%rax + test %r8,%r8 # is power value impossible? + cmovns %r10,%rax + + and $len,%rax # is input of favourable length? + jz .Lblocks_vpmadd52_4x + + sub %rax,$len + mov \$7,%r10d + mov \$1,%r11d + kmovw %r10d,%k7 + lea .L2_44_inp_permd(%rip),%r10 + kmovw %r11d,%k1 + + vmovq $padbit,%x#$PAD + vmovdqa64 0(%r10),$inp_permd # .L2_44_inp_permd + vmovdqa64 32(%r10),$inp_shift # .L2_44_inp_shift + vpermq \$0xcf,$PAD,$PAD + vmovdqa64 64(%r10),$reduc_mask # .L2_44_mask + + vmovdqu64 0($ctx),${Dlo}{%k7}{z} # load hash value + vmovdqu64 40($ctx),${r2r1r0}{%k7}{z} # load keys + vmovdqu64 32($ctx),${r1r0s2}{%k7}{z} + vmovdqu64 24($ctx),${r0s2s1}{%k7}{z} + + vmovdqa64 96(%r10),$reduc_rght # .L2_44_shift_rgt + vmovdqa64 128(%r10),$reduc_left # .L2_44_shift_lft + + jmp .Loop_vpmadd52 + +.align 32 +.Loop_vpmadd52: + vmovdqu32 0($inp),%x#$T0 # load input as ----3210 + lea 16($inp),$inp + + vpermd $T0,$inp_permd,$T0 # ----3210 -> --322110 + vpsrlvq $inp_shift,$T0,$T0 + vpandq $reduc_mask,$T0,$T0 + vporq $PAD,$T0,$T0 + + vpaddq $T0,$Dlo,$Dlo # accumulate input + + vpermq \$0,$Dlo,${H0}{%k7}{z} # smash hash value + vpermq \$0b01010101,$Dlo,${H1}{%k7}{z} + vpermq \$0b10101010,$Dlo,${H2}{%k7}{z} + + vpxord $Dlo,$Dlo,$Dlo + vpxord $Dhi,$Dhi,$Dhi + + vpmadd52luq $r2r1r0,$H0,$Dlo + vpmadd52huq $r2r1r0,$H0,$Dhi + + vpmadd52luq $r1r0s2,$H1,$Dlo + vpmadd52huq $r1r0s2,$H1,$Dhi + + vpmadd52luq $r0s2s1,$H2,$Dlo + vpmadd52huq $r0s2s1,$H2,$Dhi + + vpsrlvq $reduc_rght,$Dlo,$T0 # 0 in topmost qword + vpsllvq $reduc_left,$Dhi,$Dhi # 0 in topmost qword + vpandq $reduc_mask,$Dlo,$Dlo + + vpaddq $T0,$Dhi,$Dhi + + vpermq \$0b10010011,$Dhi,$Dhi # 0 in lowest qword + + vpaddq $Dhi,$Dlo,$Dlo # note topmost qword :-) + + vpsrlvq $reduc_rght,$Dlo,$T0 # 0 in topmost word + vpandq $reduc_mask,$Dlo,$Dlo + + vpermq \$0b10010011,$T0,$T0 + + vpaddq $T0,$Dlo,$Dlo + + vpermq \$0b10010011,$Dlo,${T0}{%k1}{z} + + vpaddq $T0,$Dlo,$Dlo + vpsllq \$2,$T0,$T0 + + vpaddq $T0,$Dlo,$Dlo + + dec %rax # len-=16 + jnz .Loop_vpmadd52 + + vmovdqu64 $Dlo,0($ctx){%k7} # store hash value + + test $len,$len + jnz .Lblocks_vpmadd52_4x + +.Lno_data_vpmadd52: + ret +.size poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52 +___ +} +{ +######################################################################## +# As implied by its name 4x subroutine processes 4 blocks in parallel +# (but handles even 4*n+2 blocks lengths). It takes up to 4th key power +# and is handled in 256-bit %ymm registers. + +my ($H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2) = map("%ymm$_",(0..5,16,17)); +my ($D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi) = map("%ymm$_",(18..23)); +my ($T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD) = map("%ymm$_",(24..31)); + +$code.=<<___; +.type poly1305_blocks_vpmadd52_4x,\@function,4 +.align 32 +poly1305_blocks_vpmadd52_4x: + shr \$4,$len + jz .Lno_data_vpmadd52_4x # too short + + shl \$40,$padbit + mov 64($ctx),%r8 # peek on power of the key + +.Lblocks_vpmadd52_4x: + vpbroadcastq $padbit,$PAD + + vmovdqa64 .Lx_mask44(%rip),$mask44 + mov \$5,%eax + vmovdqa64 .Lx_mask42(%rip),$mask42 + kmovw %eax,%k1 # used in 2x path + + test %r8,%r8 # is power value impossible? + js .Linit_vpmadd52 # if it is, then init R[4] + + vmovq 0($ctx),%x#$H0 # load current hash value + vmovq 8($ctx),%x#$H1 + vmovq 16($ctx),%x#$H2 + + test \$3,$len # is length 4*n+2? + jnz .Lblocks_vpmadd52_2x_do + +.Lblocks_vpmadd52_4x_do: + vpbroadcastq 64($ctx),$R0 # load 4th power of the key + vpbroadcastq 96($ctx),$R1 + vpbroadcastq 128($ctx),$R2 + vpbroadcastq 160($ctx),$S1 + +.Lblocks_vpmadd52_4x_key_loaded: + vpsllq \$2,$R2,$S2 # S2 = R2*5*4 + vpaddq $R2,$S2,$S2 + vpsllq \$2,$S2,$S2 + + test \$7,$len # is len 8*n? + jz .Lblocks_vpmadd52_8x + + vmovdqu64 16*0($inp),$T2 # load data + vmovdqu64 16*2($inp),$T3 + lea 16*4($inp),$inp + + vpunpcklqdq $T3,$T2,$T1 # transpose data + vpunpckhqdq $T3,$T2,$T3 + + # at this point 64-bit lanes are ordered as 3-1-2-0 + + vpsrlq \$24,$T3,$T2 # splat the data + vporq $PAD,$T2,$T2 + vpaddq $T2,$H2,$H2 # accumulate input + vpandq $mask44,$T1,$T0 + vpsrlq \$44,$T1,$T1 + vpsllq \$20,$T3,$T3 + vporq $T3,$T1,$T1 + vpandq $mask44,$T1,$T1 + + sub \$4,$len + jz .Ltail_vpmadd52_4x + jmp .Loop_vpmadd52_4x + ud2 + +.align 32 +.Linit_vpmadd52: + vmovq 24($ctx),%x#$S1 # load key + vmovq 56($ctx),%x#$H2 + vmovq 32($ctx),%x#$S2 + vmovq 40($ctx),%x#$R0 + vmovq 48($ctx),%x#$R1 + + vmovdqa $R0,$H0 + vmovdqa $R1,$H1 + vmovdqa $H2,$R2 + + mov \$2,%eax + +.Lmul_init_vpmadd52: + vpxorq $D0lo,$D0lo,$D0lo + vpmadd52luq $H2,$S1,$D0lo + vpxorq $D0hi,$D0hi,$D0hi + vpmadd52huq $H2,$S1,$D0hi + vpxorq $D1lo,$D1lo,$D1lo + vpmadd52luq $H2,$S2,$D1lo + vpxorq $D1hi,$D1hi,$D1hi + vpmadd52huq $H2,$S2,$D1hi + vpxorq $D2lo,$D2lo,$D2lo + vpmadd52luq $H2,$R0,$D2lo + vpxorq $D2hi,$D2hi,$D2hi + vpmadd52huq $H2,$R0,$D2hi + + vpmadd52luq $H0,$R0,$D0lo + vpmadd52huq $H0,$R0,$D0hi + vpmadd52luq $H0,$R1,$D1lo + vpmadd52huq $H0,$R1,$D1hi + vpmadd52luq $H0,$R2,$D2lo + vpmadd52huq $H0,$R2,$D2hi + + vpmadd52luq $H1,$S2,$D0lo + vpmadd52huq $H1,$S2,$D0hi + vpmadd52luq $H1,$R0,$D1lo + vpmadd52huq $H1,$R0,$D1hi + vpmadd52luq $H1,$R1,$D2lo + vpmadd52huq $H1,$R1,$D2hi + + ################################################################ + # partial reduction + vpsrlq \$44,$D0lo,$tmp + vpsllq \$8,$D0hi,$D0hi + vpandq $mask44,$D0lo,$H0 + vpaddq $tmp,$D0hi,$D0hi + + vpaddq $D0hi,$D1lo,$D1lo + + vpsrlq \$44,$D1lo,$tmp + vpsllq \$8,$D1hi,$D1hi + vpandq $mask44,$D1lo,$H1 + vpaddq $tmp,$D1hi,$D1hi + + vpaddq $D1hi,$D2lo,$D2lo + + vpsrlq \$42,$D2lo,$tmp + vpsllq \$10,$D2hi,$D2hi + vpandq $mask42,$D2lo,$H2 + vpaddq $tmp,$D2hi,$D2hi + + vpaddq $D2hi,$H0,$H0 + vpsllq \$2,$D2hi,$D2hi + + vpaddq $D2hi,$H0,$H0 + + vpsrlq \$44,$H0,$tmp # additional step + vpandq $mask44,$H0,$H0 + + vpaddq $tmp,$H1,$H1 + + dec %eax + jz .Ldone_init_vpmadd52 + + vpunpcklqdq $R1,$H1,$R1 # 1,2 + vpbroadcastq %x#$H1,%x#$H1 # 2,2 + vpunpcklqdq $R2,$H2,$R2 + vpbroadcastq %x#$H2,%x#$H2 + vpunpcklqdq $R0,$H0,$R0 + vpbroadcastq %x#$H0,%x#$H0 + + vpsllq \$2,$R1,$S1 # S1 = R1*5*4 + vpsllq \$2,$R2,$S2 # S2 = R2*5*4 + vpaddq $R1,$S1,$S1 + vpaddq $R2,$S2,$S2 + vpsllq \$2,$S1,$S1 + vpsllq \$2,$S2,$S2 + + jmp .Lmul_init_vpmadd52 + ud2 + +.align 32 +.Ldone_init_vpmadd52: + vinserti128 \$1,%x#$R1,$H1,$R1 # 1,2,3,4 + vinserti128 \$1,%x#$R2,$H2,$R2 + vinserti128 \$1,%x#$R0,$H0,$R0 + + vpermq \$0b11011000,$R1,$R1 # 1,3,2,4 + vpermq \$0b11011000,$R2,$R2 + vpermq \$0b11011000,$R0,$R0 + + vpsllq \$2,$R1,$S1 # S1 = R1*5*4 + vpaddq $R1,$S1,$S1 + vpsllq \$2,$S1,$S1 + + vmovq 0($ctx),%x#$H0 # load current hash value + vmovq 8($ctx),%x#$H1 + vmovq 16($ctx),%x#$H2 + + test \$3,$len # is length 4*n+2? + jnz .Ldone_init_vpmadd52_2x + + vmovdqu64 $R0,64($ctx) # save key powers + vpbroadcastq %x#$R0,$R0 # broadcast 4th power + vmovdqu64 $R1,96($ctx) + vpbroadcastq %x#$R1,$R1 + vmovdqu64 $R2,128($ctx) + vpbroadcastq %x#$R2,$R2 + vmovdqu64 $S1,160($ctx) + vpbroadcastq %x#$S1,$S1 + + jmp .Lblocks_vpmadd52_4x_key_loaded + ud2 + +.align 32 +.Ldone_init_vpmadd52_2x: + vmovdqu64 $R0,64($ctx) # save key powers + vpsrldq \$8,$R0,$R0 # 0-1-0-2 + vmovdqu64 $R1,96($ctx) + vpsrldq \$8,$R1,$R1 + vmovdqu64 $R2,128($ctx) + vpsrldq \$8,$R2,$R2 + vmovdqu64 $S1,160($ctx) + vpsrldq \$8,$S1,$S1 + jmp .Lblocks_vpmadd52_2x_key_loaded + ud2 + +.align 32 +.Lblocks_vpmadd52_2x_do: + vmovdqu64 128+8($ctx),${R2}{%k1}{z}# load 2nd and 1st key powers + vmovdqu64 160+8($ctx),${S1}{%k1}{z} + vmovdqu64 64+8($ctx),${R0}{%k1}{z} + vmovdqu64 96+8($ctx),${R1}{%k1}{z} + +.Lblocks_vpmadd52_2x_key_loaded: + vmovdqu64 16*0($inp),$T2 # load data + vpxorq $T3,$T3,$T3 + lea 16*2($inp),$inp + + vpunpcklqdq $T3,$T2,$T1 # transpose data + vpunpckhqdq $T3,$T2,$T3 + + # at this point 64-bit lanes are ordered as x-1-x-0 + + vpsrlq \$24,$T3,$T2 # splat the data + vporq $PAD,$T2,$T2 + vpaddq $T2,$H2,$H2 # accumulate input + vpandq $mask44,$T1,$T0 + vpsrlq \$44,$T1,$T1 + vpsllq \$20,$T3,$T3 + vporq $T3,$T1,$T1 + vpandq $mask44,$T1,$T1 + + jmp .Ltail_vpmadd52_2x + ud2 + +.align 32 +.Loop_vpmadd52_4x: + #vpaddq $T2,$H2,$H2 # accumulate input + vpaddq $T0,$H0,$H0 + vpaddq $T1,$H1,$H1 + + vpxorq $D0lo,$D0lo,$D0lo + vpmadd52luq $H2,$S1,$D0lo + vpxorq $D0hi,$D0hi,$D0hi + vpmadd52huq $H2,$S1,$D0hi + vpxorq $D1lo,$D1lo,$D1lo + vpmadd52luq $H2,$S2,$D1lo + vpxorq $D1hi,$D1hi,$D1hi + vpmadd52huq $H2,$S2,$D1hi + vpxorq $D2lo,$D2lo,$D2lo + vpmadd52luq $H2,$R0,$D2lo + vpxorq $D2hi,$D2hi,$D2hi + vpmadd52huq $H2,$R0,$D2hi + + vmovdqu64 16*0($inp),$T2 # load data + vmovdqu64 16*2($inp),$T3 + lea 16*4($inp),$inp + vpmadd52luq $H0,$R0,$D0lo + vpmadd52huq $H0,$R0,$D0hi + vpmadd52luq $H0,$R1,$D1lo + vpmadd52huq $H0,$R1,$D1hi + vpmadd52luq $H0,$R2,$D2lo + vpmadd52huq $H0,$R2,$D2hi + + vpunpcklqdq $T3,$T2,$T1 # transpose data + vpunpckhqdq $T3,$T2,$T3 + vpmadd52luq $H1,$S2,$D0lo + vpmadd52huq $H1,$S2,$D0hi + vpmadd52luq $H1,$R0,$D1lo + vpmadd52huq $H1,$R0,$D1hi + vpmadd52luq $H1,$R1,$D2lo + vpmadd52huq $H1,$R1,$D2hi + + ################################################################ + # partial reduction (interleaved with data splat) + vpsrlq \$44,$D0lo,$tmp + vpsllq \$8,$D0hi,$D0hi + vpandq $mask44,$D0lo,$H0 + vpaddq $tmp,$D0hi,$D0hi + + vpsrlq \$24,$T3,$T2 + vporq $PAD,$T2,$T2 + vpaddq $D0hi,$D1lo,$D1lo + + vpsrlq \$44,$D1lo,$tmp + vpsllq \$8,$D1hi,$D1hi + vpandq $mask44,$D1lo,$H1 + vpaddq $tmp,$D1hi,$D1hi + + vpandq $mask44,$T1,$T0 + vpsrlq \$44,$T1,$T1 + vpsllq \$20,$T3,$T3 + vpaddq $D1hi,$D2lo,$D2lo + + vpsrlq \$42,$D2lo,$tmp + vpsllq \$10,$D2hi,$D2hi + vpandq $mask42,$D2lo,$H2 + vpaddq $tmp,$D2hi,$D2hi + + vpaddq $T2,$H2,$H2 # accumulate input + vpaddq $D2hi,$H0,$H0 + vpsllq \$2,$D2hi,$D2hi + + vpaddq $D2hi,$H0,$H0 + vporq $T3,$T1,$T1 + vpandq $mask44,$T1,$T1 + + vpsrlq \$44,$H0,$tmp # additional step + vpandq $mask44,$H0,$H0 + + vpaddq $tmp,$H1,$H1 + + sub \$4,$len # len-=64 + jnz .Loop_vpmadd52_4x + +.Ltail_vpmadd52_4x: + vmovdqu64 128($ctx),$R2 # load all key powers + vmovdqu64 160($ctx),$S1 + vmovdqu64 64($ctx),$R0 + vmovdqu64 96($ctx),$R1 + +.Ltail_vpmadd52_2x: + vpsllq \$2,$R2,$S2 # S2 = R2*5*4 + vpaddq $R2,$S2,$S2 + vpsllq \$2,$S2,$S2 + + #vpaddq $T2,$H2,$H2 # accumulate input + vpaddq $T0,$H0,$H0 + vpaddq $T1,$H1,$H1 + + vpxorq $D0lo,$D0lo,$D0lo + vpmadd52luq $H2,$S1,$D0lo + vpxorq $D0hi,$D0hi,$D0hi + vpmadd52huq $H2,$S1,$D0hi + vpxorq $D1lo,$D1lo,$D1lo + vpmadd52luq $H2,$S2,$D1lo + vpxorq $D1hi,$D1hi,$D1hi + vpmadd52huq $H2,$S2,$D1hi + vpxorq $D2lo,$D2lo,$D2lo + vpmadd52luq $H2,$R0,$D2lo + vpxorq $D2hi,$D2hi,$D2hi + vpmadd52huq $H2,$R0,$D2hi + + vpmadd52luq $H0,$R0,$D0lo + vpmadd52huq $H0,$R0,$D0hi + vpmadd52luq $H0,$R1,$D1lo + vpmadd52huq $H0,$R1,$D1hi + vpmadd52luq $H0,$R2,$D2lo + vpmadd52huq $H0,$R2,$D2hi + + vpmadd52luq $H1,$S2,$D0lo + vpmadd52huq $H1,$S2,$D0hi + vpmadd52luq $H1,$R0,$D1lo + vpmadd52huq $H1,$R0,$D1hi + vpmadd52luq $H1,$R1,$D2lo + vpmadd52huq $H1,$R1,$D2hi + + ################################################################ + # horizontal addition + + mov \$1,%eax + kmovw %eax,%k1 + vpsrldq \$8,$D0lo,$T0 + vpsrldq \$8,$D0hi,$H0 + vpsrldq \$8,$D1lo,$T1 + vpsrldq \$8,$D1hi,$H1 + vpaddq $T0,$D0lo,$D0lo + vpaddq $H0,$D0hi,$D0hi + vpsrldq \$8,$D2lo,$T2 + vpsrldq \$8,$D2hi,$H2 + vpaddq $T1,$D1lo,$D1lo + vpaddq $H1,$D1hi,$D1hi + vpermq \$0x2,$D0lo,$T0 + vpermq \$0x2,$D0hi,$H0 + vpaddq $T2,$D2lo,$D2lo + vpaddq $H2,$D2hi,$D2hi + + vpermq \$0x2,$D1lo,$T1 + vpermq \$0x2,$D1hi,$H1 + vpaddq $T0,$D0lo,${D0lo}{%k1}{z} + vpaddq $H0,$D0hi,${D0hi}{%k1}{z} + vpermq \$0x2,$D2lo,$T2 + vpermq \$0x2,$D2hi,$H2 + vpaddq $T1,$D1lo,${D1lo}{%k1}{z} + vpaddq $H1,$D1hi,${D1hi}{%k1}{z} + vpaddq $T2,$D2lo,${D2lo}{%k1}{z} + vpaddq $H2,$D2hi,${D2hi}{%k1}{z} + + ################################################################ + # partial reduction + vpsrlq \$44,$D0lo,$tmp + vpsllq \$8,$D0hi,$D0hi + vpandq $mask44,$D0lo,$H0 + vpaddq $tmp,$D0hi,$D0hi + + vpaddq $D0hi,$D1lo,$D1lo + + vpsrlq \$44,$D1lo,$tmp + vpsllq \$8,$D1hi,$D1hi + vpandq $mask44,$D1lo,$H1 + vpaddq $tmp,$D1hi,$D1hi + + vpaddq $D1hi,$D2lo,$D2lo + + vpsrlq \$42,$D2lo,$tmp + vpsllq \$10,$D2hi,$D2hi + vpandq $mask42,$D2lo,$H2 + vpaddq $tmp,$D2hi,$D2hi + + vpaddq $D2hi,$H0,$H0 + vpsllq \$2,$D2hi,$D2hi + + vpaddq $D2hi,$H0,$H0 + + vpsrlq \$44,$H0,$tmp # additional step + vpandq $mask44,$H0,$H0 + + vpaddq $tmp,$H1,$H1 + # at this point $len is + # either 4*n+2 or 0... + sub \$2,$len # len-=32 + ja .Lblocks_vpmadd52_4x_do + + vmovq %x#$H0,0($ctx) + vmovq %x#$H1,8($ctx) + vmovq %x#$H2,16($ctx) + vzeroall + +.Lno_data_vpmadd52_4x: + ret +.size poly1305_blocks_vpmadd52_4x,.-poly1305_blocks_vpmadd52_4x +___ +} +{ +######################################################################## +# As implied by its name 8x subroutine processes 8 blocks in parallel... +# This is intermediate version, as it's used only in cases when input +# length is either 8*n, 8*n+1 or 8*n+2... + +my ($H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2) = map("%ymm$_",(0..5,16,17)); +my ($D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi) = map("%ymm$_",(18..23)); +my ($T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD) = map("%ymm$_",(24..31)); +my ($RR0,$RR1,$RR2,$SS1,$SS2) = map("%ymm$_",(6..10)); + +$code.=<<___; +.type poly1305_blocks_vpmadd52_8x,\@function,4 +.align 32 +poly1305_blocks_vpmadd52_8x: + shr \$4,$len + jz .Lno_data_vpmadd52_8x # too short + + shl \$40,$padbit + mov 64($ctx),%r8 # peek on power of the key + + vmovdqa64 .Lx_mask44(%rip),$mask44 + vmovdqa64 .Lx_mask42(%rip),$mask42 + + test %r8,%r8 # is power value impossible? + js .Linit_vpmadd52 # if it is, then init R[4] + + vmovq 0($ctx),%x#$H0 # load current hash value + vmovq 8($ctx),%x#$H1 + vmovq 16($ctx),%x#$H2 + +.Lblocks_vpmadd52_8x: + ################################################################ + # fist we calculate more key powers + + vmovdqu64 128($ctx),$R2 # load 1-3-2-4 powers + vmovdqu64 160($ctx),$S1 + vmovdqu64 64($ctx),$R0 + vmovdqu64 96($ctx),$R1 + + vpsllq \$2,$R2,$S2 # S2 = R2*5*4 + vpaddq $R2,$S2,$S2 + vpsllq \$2,$S2,$S2 + + vpbroadcastq %x#$R2,$RR2 # broadcast 4th power + vpbroadcastq %x#$R0,$RR0 + vpbroadcastq %x#$R1,$RR1 + + vpxorq $D0lo,$D0lo,$D0lo + vpmadd52luq $RR2,$S1,$D0lo + vpxorq $D0hi,$D0hi,$D0hi + vpmadd52huq $RR2,$S1,$D0hi + vpxorq $D1lo,$D1lo,$D1lo + vpmadd52luq $RR2,$S2,$D1lo + vpxorq $D1hi,$D1hi,$D1hi + vpmadd52huq $RR2,$S2,$D1hi + vpxorq $D2lo,$D2lo,$D2lo + vpmadd52luq $RR2,$R0,$D2lo + vpxorq $D2hi,$D2hi,$D2hi + vpmadd52huq $RR2,$R0,$D2hi + + vpmadd52luq $RR0,$R0,$D0lo + vpmadd52huq $RR0,$R0,$D0hi + vpmadd52luq $RR0,$R1,$D1lo + vpmadd52huq $RR0,$R1,$D1hi + vpmadd52luq $RR0,$R2,$D2lo + vpmadd52huq $RR0,$R2,$D2hi + + vpmadd52luq $RR1,$S2,$D0lo + vpmadd52huq $RR1,$S2,$D0hi + vpmadd52luq $RR1,$R0,$D1lo + vpmadd52huq $RR1,$R0,$D1hi + vpmadd52luq $RR1,$R1,$D2lo + vpmadd52huq $RR1,$R1,$D2hi + + ################################################################ + # partial reduction + vpsrlq \$44,$D0lo,$tmp + vpsllq \$8,$D0hi,$D0hi + vpandq $mask44,$D0lo,$RR0 + vpaddq $tmp,$D0hi,$D0hi + + vpaddq $D0hi,$D1lo,$D1lo + + vpsrlq \$44,$D1lo,$tmp + vpsllq \$8,$D1hi,$D1hi + vpandq $mask44,$D1lo,$RR1 + vpaddq $tmp,$D1hi,$D1hi + + vpaddq $D1hi,$D2lo,$D2lo + + vpsrlq \$42,$D2lo,$tmp + vpsllq \$10,$D2hi,$D2hi + vpandq $mask42,$D2lo,$RR2 + vpaddq $tmp,$D2hi,$D2hi + + vpaddq $D2hi,$RR0,$RR0 + vpsllq \$2,$D2hi,$D2hi + + vpaddq $D2hi,$RR0,$RR0 + + vpsrlq \$44,$RR0,$tmp # additional step + vpandq $mask44,$RR0,$RR0 + + vpaddq $tmp,$RR1,$RR1 + + ################################################################ + # At this point Rx holds 1324 powers, RRx - 5768, and the goal + # is 15263748, which reflects how data is loaded... + + vpunpcklqdq $R2,$RR2,$T2 # 3748 + vpunpckhqdq $R2,$RR2,$R2 # 1526 + vpunpcklqdq $R0,$RR0,$T0 + vpunpckhqdq $R0,$RR0,$R0 + vpunpcklqdq $R1,$RR1,$T1 + vpunpckhqdq $R1,$RR1,$R1 +___ +######## switch to %zmm +map(s/%y/%z/, $H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2); +map(s/%y/%z/, $D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi); +map(s/%y/%z/, $T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD); +map(s/%y/%z/, $RR0,$RR1,$RR2,$SS1,$SS2); + +$code.=<<___; + vshufi64x2 \$0x44,$R2,$T2,$RR2 # 15263748 + vshufi64x2 \$0x44,$R0,$T0,$RR0 + vshufi64x2 \$0x44,$R1,$T1,$RR1 + + vmovdqu64 16*0($inp),$T2 # load data + vmovdqu64 16*4($inp),$T3 + lea 16*8($inp),$inp + + vpsllq \$2,$RR2,$SS2 # S2 = R2*5*4 + vpsllq \$2,$RR1,$SS1 # S1 = R1*5*4 + vpaddq $RR2,$SS2,$SS2 + vpaddq $RR1,$SS1,$SS1 + vpsllq \$2,$SS2,$SS2 + vpsllq \$2,$SS1,$SS1 + + vpbroadcastq $padbit,$PAD + vpbroadcastq %x#$mask44,$mask44 + vpbroadcastq %x#$mask42,$mask42 + + vpbroadcastq %x#$SS1,$S1 # broadcast 8th power + vpbroadcastq %x#$SS2,$S2 + vpbroadcastq %x#$RR0,$R0 + vpbroadcastq %x#$RR1,$R1 + vpbroadcastq %x#$RR2,$R2 + + vpunpcklqdq $T3,$T2,$T1 # transpose data + vpunpckhqdq $T3,$T2,$T3 + + # at this point 64-bit lanes are ordered as 73625140 + + vpsrlq \$24,$T3,$T2 # splat the data + vporq $PAD,$T2,$T2 + vpaddq $T2,$H2,$H2 # accumulate input + vpandq $mask44,$T1,$T0 + vpsrlq \$44,$T1,$T1 + vpsllq \$20,$T3,$T3 + vporq $T3,$T1,$T1 + vpandq $mask44,$T1,$T1 + + sub \$8,$len + jz .Ltail_vpmadd52_8x + jmp .Loop_vpmadd52_8x + +.align 32 +.Loop_vpmadd52_8x: + #vpaddq $T2,$H2,$H2 # accumulate input + vpaddq $T0,$H0,$H0 + vpaddq $T1,$H1,$H1 + + vpxorq $D0lo,$D0lo,$D0lo + vpmadd52luq $H2,$S1,$D0lo + vpxorq $D0hi,$D0hi,$D0hi + vpmadd52huq $H2,$S1,$D0hi + vpxorq $D1lo,$D1lo,$D1lo + vpmadd52luq $H2,$S2,$D1lo + vpxorq $D1hi,$D1hi,$D1hi + vpmadd52huq $H2,$S2,$D1hi + vpxorq $D2lo,$D2lo,$D2lo + vpmadd52luq $H2,$R0,$D2lo + vpxorq $D2hi,$D2hi,$D2hi + vpmadd52huq $H2,$R0,$D2hi + + vmovdqu64 16*0($inp),$T2 # load data + vmovdqu64 16*4($inp),$T3 + lea 16*8($inp),$inp + vpmadd52luq $H0,$R0,$D0lo + vpmadd52huq $H0,$R0,$D0hi + vpmadd52luq $H0,$R1,$D1lo + vpmadd52huq $H0,$R1,$D1hi + vpmadd52luq $H0,$R2,$D2lo + vpmadd52huq $H0,$R2,$D2hi + + vpunpcklqdq $T3,$T2,$T1 # transpose data + vpunpckhqdq $T3,$T2,$T3 + vpmadd52luq $H1,$S2,$D0lo + vpmadd52huq $H1,$S2,$D0hi + vpmadd52luq $H1,$R0,$D1lo + vpmadd52huq $H1,$R0,$D1hi + vpmadd52luq $H1,$R1,$D2lo + vpmadd52huq $H1,$R1,$D2hi + + ################################################################ + # partial reduction (interleaved with data splat) + vpsrlq \$44,$D0lo,$tmp + vpsllq \$8,$D0hi,$D0hi + vpandq $mask44,$D0lo,$H0 + vpaddq $tmp,$D0hi,$D0hi + + vpsrlq \$24,$T3,$T2 + vporq $PAD,$T2,$T2 + vpaddq $D0hi,$D1lo,$D1lo + + vpsrlq \$44,$D1lo,$tmp + vpsllq \$8,$D1hi,$D1hi + vpandq $mask44,$D1lo,$H1 + vpaddq $tmp,$D1hi,$D1hi + + vpandq $mask44,$T1,$T0 + vpsrlq \$44,$T1,$T1 + vpsllq \$20,$T3,$T3 + vpaddq $D1hi,$D2lo,$D2lo + + vpsrlq \$42,$D2lo,$tmp + vpsllq \$10,$D2hi,$D2hi + vpandq $mask42,$D2lo,$H2 + vpaddq $tmp,$D2hi,$D2hi + + vpaddq $T2,$H2,$H2 # accumulate input + vpaddq $D2hi,$H0,$H0 + vpsllq \$2,$D2hi,$D2hi + + vpaddq $D2hi,$H0,$H0 + vporq $T3,$T1,$T1 + vpandq $mask44,$T1,$T1 + + vpsrlq \$44,$H0,$tmp # additional step + vpandq $mask44,$H0,$H0 + + vpaddq $tmp,$H1,$H1 + + sub \$8,$len # len-=128 + jnz .Loop_vpmadd52_8x + +.Ltail_vpmadd52_8x: + #vpaddq $T2,$H2,$H2 # accumulate input + vpaddq $T0,$H0,$H0 + vpaddq $T1,$H1,$H1 + + vpxorq $D0lo,$D0lo,$D0lo + vpmadd52luq $H2,$SS1,$D0lo + vpxorq $D0hi,$D0hi,$D0hi + vpmadd52huq $H2,$SS1,$D0hi + vpxorq $D1lo,$D1lo,$D1lo + vpmadd52luq $H2,$SS2,$D1lo + vpxorq $D1hi,$D1hi,$D1hi + vpmadd52huq $H2,$SS2,$D1hi + vpxorq $D2lo,$D2lo,$D2lo + vpmadd52luq $H2,$RR0,$D2lo + vpxorq $D2hi,$D2hi,$D2hi + vpmadd52huq $H2,$RR0,$D2hi + + vpmadd52luq $H0,$RR0,$D0lo + vpmadd52huq $H0,$RR0,$D0hi + vpmadd52luq $H0,$RR1,$D1lo + vpmadd52huq $H0,$RR1,$D1hi + vpmadd52luq $H0,$RR2,$D2lo + vpmadd52huq $H0,$RR2,$D2hi + + vpmadd52luq $H1,$SS2,$D0lo + vpmadd52huq $H1,$SS2,$D0hi + vpmadd52luq $H1,$RR0,$D1lo + vpmadd52huq $H1,$RR0,$D1hi + vpmadd52luq $H1,$RR1,$D2lo + vpmadd52huq $H1,$RR1,$D2hi + + ################################################################ + # horizontal addition + + mov \$1,%eax + kmovw %eax,%k1 + vpsrldq \$8,$D0lo,$T0 + vpsrldq \$8,$D0hi,$H0 + vpsrldq \$8,$D1lo,$T1 + vpsrldq \$8,$D1hi,$H1 + vpaddq $T0,$D0lo,$D0lo + vpaddq $H0,$D0hi,$D0hi + vpsrldq \$8,$D2lo,$T2 + vpsrldq \$8,$D2hi,$H2 + vpaddq $T1,$D1lo,$D1lo + vpaddq $H1,$D1hi,$D1hi + vpermq \$0x2,$D0lo,$T0 + vpermq \$0x2,$D0hi,$H0 + vpaddq $T2,$D2lo,$D2lo + vpaddq $H2,$D2hi,$D2hi + + vpermq \$0x2,$D1lo,$T1 + vpermq \$0x2,$D1hi,$H1 + vpaddq $T0,$D0lo,$D0lo + vpaddq $H0,$D0hi,$D0hi + vpermq \$0x2,$D2lo,$T2 + vpermq \$0x2,$D2hi,$H2 + vpaddq $T1,$D1lo,$D1lo + vpaddq $H1,$D1hi,$D1hi + vextracti64x4 \$1,$D0lo,%y#$T0 + vextracti64x4 \$1,$D0hi,%y#$H0 + vpaddq $T2,$D2lo,$D2lo + vpaddq $H2,$D2hi,$D2hi + + vextracti64x4 \$1,$D1lo,%y#$T1 + vextracti64x4 \$1,$D1hi,%y#$H1 + vextracti64x4 \$1,$D2lo,%y#$T2 + vextracti64x4 \$1,$D2hi,%y#$H2 +___ +######## switch back to %ymm +map(s/%z/%y/, $H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2); +map(s/%z/%y/, $D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi); +map(s/%z/%y/, $T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD); + +$code.=<<___; + vpaddq $T0,$D0lo,${D0lo}{%k1}{z} + vpaddq $H0,$D0hi,${D0hi}{%k1}{z} + vpaddq $T1,$D1lo,${D1lo}{%k1}{z} + vpaddq $H1,$D1hi,${D1hi}{%k1}{z} + vpaddq $T2,$D2lo,${D2lo}{%k1}{z} + vpaddq $H2,$D2hi,${D2hi}{%k1}{z} + + ################################################################ + # partial reduction + vpsrlq \$44,$D0lo,$tmp + vpsllq \$8,$D0hi,$D0hi + vpandq $mask44,$D0lo,$H0 + vpaddq $tmp,$D0hi,$D0hi + + vpaddq $D0hi,$D1lo,$D1lo + + vpsrlq \$44,$D1lo,$tmp + vpsllq \$8,$D1hi,$D1hi + vpandq $mask44,$D1lo,$H1 + vpaddq $tmp,$D1hi,$D1hi + + vpaddq $D1hi,$D2lo,$D2lo + + vpsrlq \$42,$D2lo,$tmp + vpsllq \$10,$D2hi,$D2hi + vpandq $mask42,$D2lo,$H2 + vpaddq $tmp,$D2hi,$D2hi + + vpaddq $D2hi,$H0,$H0 + vpsllq \$2,$D2hi,$D2hi + + vpaddq $D2hi,$H0,$H0 + + vpsrlq \$44,$H0,$tmp # additional step + vpandq $mask44,$H0,$H0 + + vpaddq $tmp,$H1,$H1 + + ################################################################ + + vmovq %x#$H0,0($ctx) + vmovq %x#$H1,8($ctx) + vmovq %x#$H2,16($ctx) + vzeroall + +.Lno_data_vpmadd52_8x: + ret +.size poly1305_blocks_vpmadd52_8x,.-poly1305_blocks_vpmadd52_8x +___ +} +$code.=<<___; +.type poly1305_emit_base2_44,\@function,3 +.align 32 +poly1305_emit_base2_44: + mov 0($ctx),%r8 # load hash value + mov 8($ctx),%r9 + mov 16($ctx),%r10 + + mov %r9,%rax + shr \$20,%r9 + shl \$44,%rax + mov %r10,%rcx + shr \$40,%r10 + shl \$24,%rcx + + add %rax,%r8 + adc %rcx,%r9 + adc \$0,%r10 + + mov %r8,%rax + add \$5,%r8 # compare to modulus + mov %r9,%rcx + adc \$0,%r9 + adc \$0,%r10 + shr \$2,%r10 # did 130-bit value overflow? + cmovnz %r8,%rax + cmovnz %r9,%rcx + + add 0($nonce),%rax # accumulate nonce + adc 8($nonce),%rcx + mov %rax,0($mac) # write result + mov %rcx,8($mac) + + ret +.size poly1305_emit_base2_44,.-poly1305_emit_base2_44 +___ +} } } +} + +if (!$kernel) +{ # chacha20-poly1305 helpers +my ($out,$inp,$otp,$len)=$win64 ? ("%rcx","%rdx","%r8", "%r9") : # Win64 order + ("%rdi","%rsi","%rdx","%rcx"); # Unix order +$code.=<<___; +.globl xor128_encrypt_n_pad +.type xor128_encrypt_n_pad,\@abi-omnipotent +.align 16 +xor128_encrypt_n_pad: + sub $otp,$inp + sub $otp,$out + mov $len,%r10 # put len aside + shr \$4,$len # len / 16 + jz .Ltail_enc + nop +.Loop_enc_xmm: + movdqu ($inp,$otp),%xmm0 + pxor ($otp),%xmm0 + movdqu %xmm0,($out,$otp) + movdqa %xmm0,($otp) + lea 16($otp),$otp + dec $len + jnz .Loop_enc_xmm + + and \$15,%r10 # len % 16 + jz .Ldone_enc + +.Ltail_enc: + mov \$16,$len + sub %r10,$len + xor %eax,%eax +.Loop_enc_byte: + mov ($inp,$otp),%al + xor ($otp),%al + mov %al,($out,$otp) + mov %al,($otp) + lea 1($otp),$otp + dec %r10 + jnz .Loop_enc_byte + + xor %eax,%eax +.Loop_enc_pad: + mov %al,($otp) + lea 1($otp),$otp + dec $len + jnz .Loop_enc_pad + +.Ldone_enc: + mov $otp,%rax + ret +.size xor128_encrypt_n_pad,.-xor128_encrypt_n_pad + +.globl xor128_decrypt_n_pad +.type xor128_decrypt_n_pad,\@abi-omnipotent +.align 16 +xor128_decrypt_n_pad: + sub $otp,$inp + sub $otp,$out + mov $len,%r10 # put len aside + shr \$4,$len # len / 16 + jz .Ltail_dec + nop +.Loop_dec_xmm: + movdqu ($inp,$otp),%xmm0 + movdqa ($otp),%xmm1 + pxor %xmm0,%xmm1 + movdqu %xmm1,($out,$otp) + movdqa %xmm0,($otp) + lea 16($otp),$otp + dec $len + jnz .Loop_dec_xmm + + pxor %xmm1,%xmm1 + and \$15,%r10 # len % 16 + jz .Ldone_dec + +.Ltail_dec: + mov \$16,$len + sub %r10,$len + xor %eax,%eax + xor %r11,%r11 +.Loop_dec_byte: + mov ($inp,$otp),%r11b + mov ($otp),%al + xor %r11b,%al + mov %al,($out,$otp) + mov %r11b,($otp) + lea 1($otp),$otp + dec %r10 + jnz .Loop_dec_byte + + xor %eax,%eax +.Loop_dec_pad: + mov %al,($otp) + lea 1($otp),$otp + dec $len + jnz .Loop_dec_pad + +.Ldone_dec: + mov $otp,%rax + ret +.size xor128_decrypt_n_pad,.-xor128_decrypt_n_pad +___ +} + +# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, +# CONTEXT *context,DISPATCHER_CONTEXT *disp) +if ($win64) { +$rec="%rcx"; +$frame="%rdx"; +$context="%r8"; +$disp="%r9"; + +$code.=<<___; +.extern __imp_RtlVirtualUnwind +.type se_handler,\@abi-omnipotent +.align 16 +se_handler: + push %rsi + push %rdi + push %rbx + push %rbp + push %r12 + push %r13 + push %r14 + push %r15 + pushfq + sub \$64,%rsp + + mov 120($context),%rax # pull context->Rax + mov 248($context),%rbx # pull context->Rip + + mov 8($disp),%rsi # disp->ImageBase + mov 56($disp),%r11 # disp->HandlerData + + mov 0(%r11),%r10d # HandlerData[0] + lea (%rsi,%r10),%r10 # prologue label + cmp %r10,%rbx # context->Rip<.Lprologue + jb .Lcommon_seh_tail + + mov 152($context),%rax # pull context->Rsp + + mov 4(%r11),%r10d # HandlerData[1] + lea (%rsi,%r10),%r10 # epilogue label + cmp %r10,%rbx # context->Rip>=.Lepilogue + jae .Lcommon_seh_tail + + lea 48(%rax),%rax + + mov -8(%rax),%rbx + mov -16(%rax),%rbp + mov -24(%rax),%r12 + mov -32(%rax),%r13 + mov -40(%rax),%r14 + mov -48(%rax),%r15 + mov %rbx,144($context) # restore context->Rbx + mov %rbp,160($context) # restore context->Rbp + mov %r12,216($context) # restore context->R12 + mov %r13,224($context) # restore context->R13 + mov %r14,232($context) # restore context->R14 + mov %r15,240($context) # restore context->R14 + + jmp .Lcommon_seh_tail +.size se_handler,.-se_handler + +.type avx_handler,\@abi-omnipotent +.align 16 +avx_handler: + push %rsi + push %rdi + push %rbx + push %rbp + push %r12 + push %r13 + push %r14 + push %r15 + pushfq + sub \$64,%rsp + + mov 120($context),%rax # pull context->Rax + mov 248($context),%rbx # pull context->Rip + + mov 8($disp),%rsi # disp->ImageBase + mov 56($disp),%r11 # disp->HandlerData + + mov 0(%r11),%r10d # HandlerData[0] + lea (%rsi,%r10),%r10 # prologue label + cmp %r10,%rbx # context->Rip<prologue label + jb .Lcommon_seh_tail + + mov 152($context),%rax # pull context->Rsp + + mov 4(%r11),%r10d # HandlerData[1] + lea (%rsi,%r10),%r10 # epilogue label + cmp %r10,%rbx # context->Rip>=epilogue label + jae .Lcommon_seh_tail + + mov 208($context),%rax # pull context->R11 + + lea 0x50(%rax),%rsi + lea 0xf8(%rax),%rax + lea 512($context),%rdi # &context.Xmm6 + mov \$20,%ecx + .long 0xa548f3fc # cld; rep movsq + +.Lcommon_seh_tail: + mov 8(%rax),%rdi + mov 16(%rax),%rsi + mov %rax,152($context) # restore context->Rsp + mov %rsi,168($context) # restore context->Rsi + mov %rdi,176($context) # restore context->Rdi + + mov 40($disp),%rdi # disp->ContextRecord + mov $context,%rsi # context + mov \$154,%ecx # sizeof(CONTEXT) + .long 0xa548f3fc # cld; rep movsq + + mov $disp,%rsi + xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER + mov 8(%rsi),%rdx # arg2, disp->ImageBase + mov 0(%rsi),%r8 # arg3, disp->ControlPc + mov 16(%rsi),%r9 # arg4, disp->FunctionEntry + mov 40(%rsi),%r10 # disp->ContextRecord + lea 56(%rsi),%r11 # &disp->HandlerData + lea 24(%rsi),%r12 # &disp->EstablisherFrame + mov %r10,32(%rsp) # arg5 + mov %r11,40(%rsp) # arg6 + mov %r12,48(%rsp) # arg7 + mov %rcx,56(%rsp) # arg8, (NULL) + call *__imp_RtlVirtualUnwind(%rip) + + mov \$1,%eax # ExceptionContinueSearch + add \$64,%rsp + popfq + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %rbp + pop %rbx + pop %rdi + pop %rsi + ret +.size avx_handler,.-avx_handler + +.section .pdata +.align 4 + .rva .LSEH_begin_poly1305_init_x86_64 + .rva .LSEH_end_poly1305_init_x86_64 + .rva .LSEH_info_poly1305_init_x86_64 + + .rva .LSEH_begin_poly1305_blocks_x86_64 + .rva .LSEH_end_poly1305_blocks_x86_64 + .rva .LSEH_info_poly1305_blocks_x86_64 + + .rva .LSEH_begin_poly1305_emit_x86_64 + .rva .LSEH_end_poly1305_emit_x86_64 + .rva .LSEH_info_poly1305_emit_x86_64 +___ +$code.=<<___ if ($avx); + .rva .LSEH_begin_poly1305_blocks_avx + .rva .Lbase2_64_avx + .rva .LSEH_info_poly1305_blocks_avx_1 + + .rva .Lbase2_64_avx + .rva .Leven_avx + .rva .LSEH_info_poly1305_blocks_avx_2 + + .rva .Leven_avx + .rva .LSEH_end_poly1305_blocks_avx + .rva .LSEH_info_poly1305_blocks_avx_3 + + .rva .LSEH_begin_poly1305_emit_avx + .rva .LSEH_end_poly1305_emit_avx + .rva .LSEH_info_poly1305_emit_avx +___ +$code.=<<___ if ($avx>1); + .rva .LSEH_begin_poly1305_blocks_avx2 + .rva .Lbase2_64_avx2 + .rva .LSEH_info_poly1305_blocks_avx2_1 + + .rva .Lbase2_64_avx2 + .rva .Leven_avx2 + .rva .LSEH_info_poly1305_blocks_avx2_2 + + .rva .Leven_avx2 + .rva .LSEH_end_poly1305_blocks_avx2 + .rva .LSEH_info_poly1305_blocks_avx2_3 +___ +$code.=<<___ if ($avx>2); + .rva .LSEH_begin_poly1305_blocks_avx512 + .rva .LSEH_end_poly1305_blocks_avx512 + .rva .LSEH_info_poly1305_blocks_avx512 +___ +$code.=<<___; +.section .xdata +.align 8 +.LSEH_info_poly1305_init_x86_64: + .byte 9,0,0,0 + .rva se_handler + .rva .LSEH_begin_poly1305_init_x86_64,.LSEH_begin_poly1305_init_x86_64 + +.LSEH_info_poly1305_blocks_x86_64: + .byte 9,0,0,0 + .rva se_handler + .rva .Lblocks_body,.Lblocks_epilogue + +.LSEH_info_poly1305_emit_x86_64: + .byte 9,0,0,0 + .rva se_handler + .rva .LSEH_begin_poly1305_emit_x86_64,.LSEH_begin_poly1305_emit_x86_64 +___ +$code.=<<___ if ($avx); +.LSEH_info_poly1305_blocks_avx_1: + .byte 9,0,0,0 + .rva se_handler + .rva .Lblocks_avx_body,.Lblocks_avx_epilogue # HandlerData[] + +.LSEH_info_poly1305_blocks_avx_2: + .byte 9,0,0,0 + .rva se_handler + .rva .Lbase2_64_avx_body,.Lbase2_64_avx_epilogue # HandlerData[] + +.LSEH_info_poly1305_blocks_avx_3: + .byte 9,0,0,0 + .rva avx_handler + .rva .Ldo_avx_body,.Ldo_avx_epilogue # HandlerData[] + +.LSEH_info_poly1305_emit_avx: + .byte 9,0,0,0 + .rva se_handler + .rva .LSEH_begin_poly1305_emit_avx,.LSEH_begin_poly1305_emit_avx +___ +$code.=<<___ if ($avx>1); +.LSEH_info_poly1305_blocks_avx2_1: + .byte 9,0,0,0 + .rva se_handler + .rva .Lblocks_avx2_body,.Lblocks_avx2_epilogue # HandlerData[] + +.LSEH_info_poly1305_blocks_avx2_2: + .byte 9,0,0,0 + .rva se_handler + .rva .Lbase2_64_avx2_body,.Lbase2_64_avx2_epilogue # HandlerData[] + +.LSEH_info_poly1305_blocks_avx2_3: + .byte 9,0,0,0 + .rva avx_handler + .rva .Ldo_avx2_body,.Ldo_avx2_epilogue # HandlerData[] +___ +$code.=<<___ if ($avx>2); +.LSEH_info_poly1305_blocks_avx512: + .byte 9,0,0,0 + .rva avx_handler + .rva .Ldo_avx512_body,.Ldo_avx512_epilogue # HandlerData[] +___ +} + +open SELF,$0; +while(<SELF>) { + next if (/^#!/); + last if (!s/^#/\/\// and !/^$/); + print; +} +close SELF; + +foreach (split('\n',$code)) { + s/\`([^\`]*)\`/eval($1)/ge; + s/%r([a-z]+)#d/%e$1/g; + s/%r([0-9]+)#d/%r$1d/g; + s/%x#%[yz]/%x/g or s/%y#%z/%y/g or s/%z#%[yz]/%z/g; + + if ($kernel) { + s/(^\.type.*),[0-9]+$/\1/; + s/(^\.type.*),\@abi-omnipotent+$/\1,\@function/; + next if /^\.cfi.*/; + } + + print $_,"\n"; +} +close STDOUT; diff --git a/net/wireguard/crypto/zinc/poly1305/poly1305.c b/net/wireguard/crypto/zinc/poly1305/poly1305.c new file mode 100644 index 000000000000..a54bc3309cf2 --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/poly1305.c @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * + * Implementation of the Poly1305 message authenticator. + * + * Information: https://cr.yp.to/mac.html + */ + +#include <zinc/poly1305.h> +#include "../selftest/run.h" + +#include <asm/unaligned.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/module.h> +#include <linux/init.h> + +#if defined(CONFIG_ZINC_ARCH_X86_64) +#include "poly1305-x86_64-glue.c" +#elif defined(CONFIG_ZINC_ARCH_ARM) || defined(CONFIG_ZINC_ARCH_ARM64) +#include "poly1305-arm-glue.c" +#elif defined(CONFIG_ZINC_ARCH_MIPS) || defined(CONFIG_ZINC_ARCH_MIPS64) +#include "poly1305-mips-glue.c" +#else +static inline bool poly1305_init_arch(void *ctx, + const u8 key[POLY1305_KEY_SIZE]) +{ + return false; +} +static inline bool poly1305_blocks_arch(void *ctx, const u8 *input, + size_t len, const u32 padbit, + simd_context_t *simd_context) +{ + return false; +} +static inline bool poly1305_emit_arch(void *ctx, u8 mac[POLY1305_MAC_SIZE], + const u32 nonce[4], + simd_context_t *simd_context) +{ + return false; +} +static bool *const poly1305_nobs[] __initconst = { }; +static void __init poly1305_fpu_init(void) +{ +} +#endif + +#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) +#include "poly1305-donna64.c" +#else +#include "poly1305-donna32.c" +#endif + +void poly1305_init(struct poly1305_ctx *ctx, const u8 key[POLY1305_KEY_SIZE]) +{ + ctx->nonce[0] = get_unaligned_le32(&key[16]); + ctx->nonce[1] = get_unaligned_le32(&key[20]); + ctx->nonce[2] = get_unaligned_le32(&key[24]); + ctx->nonce[3] = get_unaligned_le32(&key[28]); + + if (!poly1305_init_arch(ctx->opaque, key)) + poly1305_init_generic(ctx->opaque, key); + + ctx->num = 0; +} + +static inline void poly1305_blocks(void *ctx, const u8 *input, const size_t len, + const u32 padbit, + simd_context_t *simd_context) +{ + if (!poly1305_blocks_arch(ctx, input, len, padbit, simd_context)) + poly1305_blocks_generic(ctx, input, len, padbit); +} + +static inline void poly1305_emit(void *ctx, u8 mac[POLY1305_KEY_SIZE], + const u32 nonce[4], + simd_context_t *simd_context) +{ + if (!poly1305_emit_arch(ctx, mac, nonce, simd_context)) + poly1305_emit_generic(ctx, mac, nonce); +} + +void poly1305_update(struct poly1305_ctx *ctx, const u8 *input, size_t len, + simd_context_t *simd_context) +{ + const size_t num = ctx->num; + size_t rem; + + if (num) { + rem = POLY1305_BLOCK_SIZE - num; + if (len < rem) { + memcpy(ctx->data + num, input, len); + ctx->num = num + len; + return; + } + memcpy(ctx->data + num, input, rem); + poly1305_blocks(ctx->opaque, ctx->data, POLY1305_BLOCK_SIZE, 1, + simd_context); + input += rem; + len -= rem; + } + + rem = len % POLY1305_BLOCK_SIZE; + len -= rem; + + if (len >= POLY1305_BLOCK_SIZE) { + poly1305_blocks(ctx->opaque, input, len, 1, simd_context); + input += len; + } + + if (rem) + memcpy(ctx->data, input, rem); + + ctx->num = rem; +} + +void poly1305_final(struct poly1305_ctx *ctx, u8 mac[POLY1305_MAC_SIZE], + simd_context_t *simd_context) +{ + size_t num = ctx->num; + + if (num) { + ctx->data[num++] = 1; + while (num < POLY1305_BLOCK_SIZE) + ctx->data[num++] = 0; + poly1305_blocks(ctx->opaque, ctx->data, POLY1305_BLOCK_SIZE, 0, + simd_context); + } + + poly1305_emit(ctx->opaque, mac, ctx->nonce, simd_context); + + memzero_explicit(ctx, sizeof(*ctx)); +} + +#include "../selftest/poly1305.c" + +static bool nosimd __initdata = false; + +#ifndef COMPAT_ZINC_IS_A_MODULE +int __init poly1305_mod_init(void) +#else +static int __init mod_init(void) +#endif +{ + if (!nosimd) + poly1305_fpu_init(); + if (!selftest_run("poly1305", poly1305_selftest, poly1305_nobs, + ARRAY_SIZE(poly1305_nobs))) + return -ENOTRECOVERABLE; + return 0; +} + +#ifdef COMPAT_ZINC_IS_A_MODULE +static void __exit mod_exit(void) +{ +} + +module_param(nosimd, bool, 0); +module_init(mod_init); +module_exit(mod_exit); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Poly1305 one-time authenticator"); +MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>"); +#endif diff --git a/net/wireguard/crypto/zinc/selftest/blake2s.c b/net/wireguard/crypto/zinc/selftest/blake2s.c new file mode 100644 index 000000000000..1b5c210dc7a8 --- /dev/null +++ b/net/wireguard/crypto/zinc/selftest/blake2s.c @@ -0,0 +1,2090 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +static const u8 blake2s_testvecs[][BLAKE2S_HASH_SIZE] __initconst = { + { 0x69, 0x21, 0x7a, 0x30, 0x79, 0x90, 0x80, 0x94, + 0xe1, 0x11, 0x21, 0xd0, 0x42, 0x35, 0x4a, 0x7c, + 0x1f, 0x55, 0xb6, 0x48, 0x2c, 0xa1, 0xa5, 0x1e, + 0x1b, 0x25, 0x0d, 0xfd, 0x1e, 0xd0, 0xee, 0xf9 }, + { 0xe3, 0x4d, 0x74, 0xdb, 0xaf, 0x4f, 0xf4, 0xc6, + 0xab, 0xd8, 0x71, 0xcc, 0x22, 0x04, 0x51, 0xd2, + 0xea, 0x26, 0x48, 0x84, 0x6c, 0x77, 0x57, 0xfb, + 0xaa, 0xc8, 0x2f, 0xe5, 0x1a, 0xd6, 0x4b, 0xea }, + { 0xdd, 0xad, 0x9a, 0xb1, 0x5d, 0xac, 0x45, 0x49, + 0xba, 0x42, 0xf4, 0x9d, 0x26, 0x24, 0x96, 0xbe, + 0xf6, 0xc0, 0xba, 0xe1, 0xdd, 0x34, 0x2a, 0x88, + 0x08, 0xf8, 0xea, 0x26, 0x7c, 0x6e, 0x21, 0x0c }, + { 0xe8, 0xf9, 0x1c, 0x6e, 0xf2, 0x32, 0xa0, 0x41, + 0x45, 0x2a, 0xb0, 0xe1, 0x49, 0x07, 0x0c, 0xdd, + 0x7d, 0xd1, 0x76, 0x9e, 0x75, 0xb3, 0xa5, 0x92, + 0x1b, 0xe3, 0x78, 0x76, 0xc4, 0x5c, 0x99, 0x00 }, + { 0x0c, 0xc7, 0x0e, 0x00, 0x34, 0x8b, 0x86, 0xba, + 0x29, 0x44, 0xd0, 0xc3, 0x20, 0x38, 0xb2, 0x5c, + 0x55, 0x58, 0x4f, 0x90, 0xdf, 0x23, 0x04, 0xf5, + 0x5f, 0xa3, 0x32, 0xaf, 0x5f, 0xb0, 0x1e, 0x20 }, + { 0xec, 0x19, 0x64, 0x19, 0x10, 0x87, 0xa4, 0xfe, + 0x9d, 0xf1, 0xc7, 0x95, 0x34, 0x2a, 0x02, 0xff, + 0xc1, 0x91, 0xa5, 0xb2, 0x51, 0x76, 0x48, 0x56, + 0xae, 0x5b, 0x8b, 0x57, 0x69, 0xf0, 0xc6, 0xcd }, + { 0xe1, 0xfa, 0x51, 0x61, 0x8d, 0x7d, 0xf4, 0xeb, + 0x70, 0xcf, 0x0d, 0x5a, 0x9e, 0x90, 0x6f, 0x80, + 0x6e, 0x9d, 0x19, 0xf7, 0xf4, 0xf0, 0x1e, 0x3b, + 0x62, 0x12, 0x88, 0xe4, 0x12, 0x04, 0x05, 0xd6 }, + { 0x59, 0x80, 0x01, 0xfa, 0xfb, 0xe8, 0xf9, 0x4e, + 0xc6, 0x6d, 0xc8, 0x27, 0xd0, 0x12, 0xcf, 0xcb, + 0xba, 0x22, 0x28, 0x56, 0x9f, 0x44, 0x8e, 0x89, + 0xea, 0x22, 0x08, 0xc8, 0xbf, 0x76, 0x92, 0x93 }, + { 0xc7, 0xe8, 0x87, 0xb5, 0x46, 0x62, 0x36, 0x35, + 0xe9, 0x3e, 0x04, 0x95, 0x59, 0x8f, 0x17, 0x26, + 0x82, 0x19, 0x96, 0xc2, 0x37, 0x77, 0x05, 0xb9, + 0x3a, 0x1f, 0x63, 0x6f, 0x87, 0x2b, 0xfa, 0x2d }, + { 0xc3, 0x15, 0xa4, 0x37, 0xdd, 0x28, 0x06, 0x2a, + 0x77, 0x0d, 0x48, 0x19, 0x67, 0x13, 0x6b, 0x1b, + 0x5e, 0xb8, 0x8b, 0x21, 0xee, 0x53, 0xd0, 0x32, + 0x9c, 0x58, 0x97, 0x12, 0x6e, 0x9d, 0xb0, 0x2c }, + { 0xbb, 0x47, 0x3d, 0xed, 0xdc, 0x05, 0x5f, 0xea, + 0x62, 0x28, 0xf2, 0x07, 0xda, 0x57, 0x53, 0x47, + 0xbb, 0x00, 0x40, 0x4c, 0xd3, 0x49, 0xd3, 0x8c, + 0x18, 0x02, 0x63, 0x07, 0xa2, 0x24, 0xcb, 0xff }, + { 0x68, 0x7e, 0x18, 0x73, 0xa8, 0x27, 0x75, 0x91, + 0xbb, 0x33, 0xd9, 0xad, 0xf9, 0xa1, 0x39, 0x12, + 0xef, 0xef, 0xe5, 0x57, 0xca, 0xfc, 0x39, 0xa7, + 0x95, 0x26, 0x23, 0xe4, 0x72, 0x55, 0xf1, 0x6d }, + { 0x1a, 0xc7, 0xba, 0x75, 0x4d, 0x6e, 0x2f, 0x94, + 0xe0, 0xe8, 0x6c, 0x46, 0xbf, 0xb2, 0x62, 0xab, + 0xbb, 0x74, 0xf4, 0x50, 0xef, 0x45, 0x6d, 0x6b, + 0x4d, 0x97, 0xaa, 0x80, 0xce, 0x6d, 0xa7, 0x67 }, + { 0x01, 0x2c, 0x97, 0x80, 0x96, 0x14, 0x81, 0x6b, + 0x5d, 0x94, 0x94, 0x47, 0x7d, 0x4b, 0x68, 0x7d, + 0x15, 0xb9, 0x6e, 0xb6, 0x9c, 0x0e, 0x80, 0x74, + 0xa8, 0x51, 0x6f, 0x31, 0x22, 0x4b, 0x5c, 0x98 }, + { 0x91, 0xff, 0xd2, 0x6c, 0xfa, 0x4d, 0xa5, 0x13, + 0x4c, 0x7e, 0xa2, 0x62, 0xf7, 0x88, 0x9c, 0x32, + 0x9f, 0x61, 0xf6, 0xa6, 0x57, 0x22, 0x5c, 0xc2, + 0x12, 0xf4, 0x00, 0x56, 0xd9, 0x86, 0xb3, 0xf4 }, + { 0xd9, 0x7c, 0x82, 0x8d, 0x81, 0x82, 0xa7, 0x21, + 0x80, 0xa0, 0x6a, 0x78, 0x26, 0x83, 0x30, 0x67, + 0x3f, 0x7c, 0x4e, 0x06, 0x35, 0x94, 0x7c, 0x04, + 0xc0, 0x23, 0x23, 0xfd, 0x45, 0xc0, 0xa5, 0x2d }, + { 0xef, 0xc0, 0x4c, 0xdc, 0x39, 0x1c, 0x7e, 0x91, + 0x19, 0xbd, 0x38, 0x66, 0x8a, 0x53, 0x4e, 0x65, + 0xfe, 0x31, 0x03, 0x6d, 0x6a, 0x62, 0x11, 0x2e, + 0x44, 0xeb, 0xeb, 0x11, 0xf9, 0xc5, 0x70, 0x80 }, + { 0x99, 0x2c, 0xf5, 0xc0, 0x53, 0x44, 0x2a, 0x5f, + 0xbc, 0x4f, 0xaf, 0x58, 0x3e, 0x04, 0xe5, 0x0b, + 0xb7, 0x0d, 0x2f, 0x39, 0xfb, 0xb6, 0xa5, 0x03, + 0xf8, 0x9e, 0x56, 0xa6, 0x3e, 0x18, 0x57, 0x8a }, + { 0x38, 0x64, 0x0e, 0x9f, 0x21, 0x98, 0x3e, 0x67, + 0xb5, 0x39, 0xca, 0xcc, 0xae, 0x5e, 0xcf, 0x61, + 0x5a, 0xe2, 0x76, 0x4f, 0x75, 0xa0, 0x9c, 0x9c, + 0x59, 0xb7, 0x64, 0x83, 0xc1, 0xfb, 0xc7, 0x35 }, + { 0x21, 0x3d, 0xd3, 0x4c, 0x7e, 0xfe, 0x4f, 0xb2, + 0x7a, 0x6b, 0x35, 0xf6, 0xb4, 0x00, 0x0d, 0x1f, + 0xe0, 0x32, 0x81, 0xaf, 0x3c, 0x72, 0x3e, 0x5c, + 0x9f, 0x94, 0x74, 0x7a, 0x5f, 0x31, 0xcd, 0x3b }, + { 0xec, 0x24, 0x6e, 0xee, 0xb9, 0xce, 0xd3, 0xf7, + 0xad, 0x33, 0xed, 0x28, 0x66, 0x0d, 0xd9, 0xbb, + 0x07, 0x32, 0x51, 0x3d, 0xb4, 0xe2, 0xfa, 0x27, + 0x8b, 0x60, 0xcd, 0xe3, 0x68, 0x2a, 0x4c, 0xcd }, + { 0xac, 0x9b, 0x61, 0xd4, 0x46, 0x64, 0x8c, 0x30, + 0x05, 0xd7, 0x89, 0x2b, 0xf3, 0xa8, 0x71, 0x9f, + 0x4c, 0x81, 0x81, 0xcf, 0xdc, 0xbc, 0x2b, 0x79, + 0xfe, 0xf1, 0x0a, 0x27, 0x9b, 0x91, 0x10, 0x95 }, + { 0x7b, 0xf8, 0xb2, 0x29, 0x59, 0xe3, 0x4e, 0x3a, + 0x43, 0xf7, 0x07, 0x92, 0x23, 0xe8, 0x3a, 0x97, + 0x54, 0x61, 0x7d, 0x39, 0x1e, 0x21, 0x3d, 0xfd, + 0x80, 0x8e, 0x41, 0xb9, 0xbe, 0xad, 0x4c, 0xe7 }, + { 0x68, 0xd4, 0xb5, 0xd4, 0xfa, 0x0e, 0x30, 0x2b, + 0x64, 0xcc, 0xc5, 0xaf, 0x79, 0x29, 0x13, 0xac, + 0x4c, 0x88, 0xec, 0x95, 0xc0, 0x7d, 0xdf, 0x40, + 0x69, 0x42, 0x56, 0xeb, 0x88, 0xce, 0x9f, 0x3d }, + { 0xb2, 0xc2, 0x42, 0x0f, 0x05, 0xf9, 0xab, 0xe3, + 0x63, 0x15, 0x91, 0x93, 0x36, 0xb3, 0x7e, 0x4e, + 0x0f, 0xa3, 0x3f, 0xf7, 0xe7, 0x6a, 0x49, 0x27, + 0x67, 0x00, 0x6f, 0xdb, 0x5d, 0x93, 0x54, 0x62 }, + { 0x13, 0x4f, 0x61, 0xbb, 0xd0, 0xbb, 0xb6, 0x9a, + 0xed, 0x53, 0x43, 0x90, 0x45, 0x51, 0xa3, 0xe6, + 0xc1, 0xaa, 0x7d, 0xcd, 0xd7, 0x7e, 0x90, 0x3e, + 0x70, 0x23, 0xeb, 0x7c, 0x60, 0x32, 0x0a, 0xa7 }, + { 0x46, 0x93, 0xf9, 0xbf, 0xf7, 0xd4, 0xf3, 0x98, + 0x6a, 0x7d, 0x17, 0x6e, 0x6e, 0x06, 0xf7, 0x2a, + 0xd1, 0x49, 0x0d, 0x80, 0x5c, 0x99, 0xe2, 0x53, + 0x47, 0xb8, 0xde, 0x77, 0xb4, 0xdb, 0x6d, 0x9b }, + { 0x85, 0x3e, 0x26, 0xf7, 0x41, 0x95, 0x3b, 0x0f, + 0xd5, 0xbd, 0xb4, 0x24, 0xe8, 0xab, 0x9e, 0x8b, + 0x37, 0x50, 0xea, 0xa8, 0xef, 0x61, 0xe4, 0x79, + 0x02, 0xc9, 0x1e, 0x55, 0x4e, 0x9c, 0x73, 0xb9 }, + { 0xf7, 0xde, 0x53, 0x63, 0x61, 0xab, 0xaa, 0x0e, + 0x15, 0x81, 0x56, 0xcf, 0x0e, 0xa4, 0xf6, 0x3a, + 0x99, 0xb5, 0xe4, 0x05, 0x4f, 0x8f, 0xa4, 0xc9, + 0xd4, 0x5f, 0x62, 0x85, 0xca, 0xd5, 0x56, 0x94 }, + { 0x4c, 0x23, 0x06, 0x08, 0x86, 0x0a, 0x99, 0xae, + 0x8d, 0x7b, 0xd5, 0xc2, 0xcc, 0x17, 0xfa, 0x52, + 0x09, 0x6b, 0x9a, 0x61, 0xbe, 0xdb, 0x17, 0xcb, + 0x76, 0x17, 0x86, 0x4a, 0xd2, 0x9c, 0xa7, 0xa6 }, + { 0xae, 0xb9, 0x20, 0xea, 0x87, 0x95, 0x2d, 0xad, + 0xb1, 0xfb, 0x75, 0x92, 0x91, 0xe3, 0x38, 0x81, + 0x39, 0xa8, 0x72, 0x86, 0x50, 0x01, 0x88, 0x6e, + 0xd8, 0x47, 0x52, 0xe9, 0x3c, 0x25, 0x0c, 0x2a }, + { 0xab, 0xa4, 0xad, 0x9b, 0x48, 0x0b, 0x9d, 0xf3, + 0xd0, 0x8c, 0xa5, 0xe8, 0x7b, 0x0c, 0x24, 0x40, + 0xd4, 0xe4, 0xea, 0x21, 0x22, 0x4c, 0x2e, 0xb4, + 0x2c, 0xba, 0xe4, 0x69, 0xd0, 0x89, 0xb9, 0x31 }, + { 0x05, 0x82, 0x56, 0x07, 0xd7, 0xfd, 0xf2, 0xd8, + 0x2e, 0xf4, 0xc3, 0xc8, 0xc2, 0xae, 0xa9, 0x61, + 0xad, 0x98, 0xd6, 0x0e, 0xdf, 0xf7, 0xd0, 0x18, + 0x98, 0x3e, 0x21, 0x20, 0x4c, 0x0d, 0x93, 0xd1 }, + { 0xa7, 0x42, 0xf8, 0xb6, 0xaf, 0x82, 0xd8, 0xa6, + 0xca, 0x23, 0x57, 0xc5, 0xf1, 0xcf, 0x91, 0xde, + 0xfb, 0xd0, 0x66, 0x26, 0x7d, 0x75, 0xc0, 0x48, + 0xb3, 0x52, 0x36, 0x65, 0x85, 0x02, 0x59, 0x62 }, + { 0x2b, 0xca, 0xc8, 0x95, 0x99, 0x00, 0x0b, 0x42, + 0xc9, 0x5a, 0xe2, 0x38, 0x35, 0xa7, 0x13, 0x70, + 0x4e, 0xd7, 0x97, 0x89, 0xc8, 0x4f, 0xef, 0x14, + 0x9a, 0x87, 0x4f, 0xf7, 0x33, 0xf0, 0x17, 0xa2 }, + { 0xac, 0x1e, 0xd0, 0x7d, 0x04, 0x8f, 0x10, 0x5a, + 0x9e, 0x5b, 0x7a, 0xb8, 0x5b, 0x09, 0xa4, 0x92, + 0xd5, 0xba, 0xff, 0x14, 0xb8, 0xbf, 0xb0, 0xe9, + 0xfd, 0x78, 0x94, 0x86, 0xee, 0xa2, 0xb9, 0x74 }, + { 0xe4, 0x8d, 0x0e, 0xcf, 0xaf, 0x49, 0x7d, 0x5b, + 0x27, 0xc2, 0x5d, 0x99, 0xe1, 0x56, 0xcb, 0x05, + 0x79, 0xd4, 0x40, 0xd6, 0xe3, 0x1f, 0xb6, 0x24, + 0x73, 0x69, 0x6d, 0xbf, 0x95, 0xe0, 0x10, 0xe4 }, + { 0x12, 0xa9, 0x1f, 0xad, 0xf8, 0xb2, 0x16, 0x44, + 0xfd, 0x0f, 0x93, 0x4f, 0x3c, 0x4a, 0x8f, 0x62, + 0xba, 0x86, 0x2f, 0xfd, 0x20, 0xe8, 0xe9, 0x61, + 0x15, 0x4c, 0x15, 0xc1, 0x38, 0x84, 0xed, 0x3d }, + { 0x7c, 0xbe, 0xe9, 0x6e, 0x13, 0x98, 0x97, 0xdc, + 0x98, 0xfb, 0xef, 0x3b, 0xe8, 0x1a, 0xd4, 0xd9, + 0x64, 0xd2, 0x35, 0xcb, 0x12, 0x14, 0x1f, 0xb6, + 0x67, 0x27, 0xe6, 0xe5, 0xdf, 0x73, 0xa8, 0x78 }, + { 0xeb, 0xf6, 0x6a, 0xbb, 0x59, 0x7a, 0xe5, 0x72, + 0xa7, 0x29, 0x7c, 0xb0, 0x87, 0x1e, 0x35, 0x5a, + 0xcc, 0xaf, 0xad, 0x83, 0x77, 0xb8, 0xe7, 0x8b, + 0xf1, 0x64, 0xce, 0x2a, 0x18, 0xde, 0x4b, 0xaf }, + { 0x71, 0xb9, 0x33, 0xb0, 0x7e, 0x4f, 0xf7, 0x81, + 0x8c, 0xe0, 0x59, 0xd0, 0x08, 0x82, 0x9e, 0x45, + 0x3c, 0x6f, 0xf0, 0x2e, 0xc0, 0xa7, 0xdb, 0x39, + 0x3f, 0xc2, 0xd8, 0x70, 0xf3, 0x7a, 0x72, 0x86 }, + { 0x7c, 0xf7, 0xc5, 0x13, 0x31, 0x22, 0x0b, 0x8d, + 0x3e, 0xba, 0xed, 0x9c, 0x29, 0x39, 0x8a, 0x16, + 0xd9, 0x81, 0x56, 0xe2, 0x61, 0x3c, 0xb0, 0x88, + 0xf2, 0xb0, 0xe0, 0x8a, 0x1b, 0xe4, 0xcf, 0x4f }, + { 0x3e, 0x41, 0xa1, 0x08, 0xe0, 0xf6, 0x4a, 0xd2, + 0x76, 0xb9, 0x79, 0xe1, 0xce, 0x06, 0x82, 0x79, + 0xe1, 0x6f, 0x7b, 0xc7, 0xe4, 0xaa, 0x1d, 0x21, + 0x1e, 0x17, 0xb8, 0x11, 0x61, 0xdf, 0x16, 0x02 }, + { 0x88, 0x65, 0x02, 0xa8, 0x2a, 0xb4, 0x7b, 0xa8, + 0xd8, 0x67, 0x10, 0xaa, 0x9d, 0xe3, 0xd4, 0x6e, + 0xa6, 0x5c, 0x47, 0xaf, 0x6e, 0xe8, 0xde, 0x45, + 0x0c, 0xce, 0xb8, 0xb1, 0x1b, 0x04, 0x5f, 0x50 }, + { 0xc0, 0x21, 0xbc, 0x5f, 0x09, 0x54, 0xfe, 0xe9, + 0x4f, 0x46, 0xea, 0x09, 0x48, 0x7e, 0x10, 0xa8, + 0x48, 0x40, 0xd0, 0x2f, 0x64, 0x81, 0x0b, 0xc0, + 0x8d, 0x9e, 0x55, 0x1f, 0x7d, 0x41, 0x68, 0x14 }, + { 0x20, 0x30, 0x51, 0x6e, 0x8a, 0x5f, 0xe1, 0x9a, + 0xe7, 0x9c, 0x33, 0x6f, 0xce, 0x26, 0x38, 0x2a, + 0x74, 0x9d, 0x3f, 0xd0, 0xec, 0x91, 0xe5, 0x37, + 0xd4, 0xbd, 0x23, 0x58, 0xc1, 0x2d, 0xfb, 0x22 }, + { 0x55, 0x66, 0x98, 0xda, 0xc8, 0x31, 0x7f, 0xd3, + 0x6d, 0xfb, 0xdf, 0x25, 0xa7, 0x9c, 0xb1, 0x12, + 0xd5, 0x42, 0x58, 0x60, 0x60, 0x5c, 0xba, 0xf5, + 0x07, 0xf2, 0x3b, 0xf7, 0xe9, 0xf4, 0x2a, 0xfe }, + { 0x2f, 0x86, 0x7b, 0xa6, 0x77, 0x73, 0xfd, 0xc3, + 0xe9, 0x2f, 0xce, 0xd9, 0x9a, 0x64, 0x09, 0xad, + 0x39, 0xd0, 0xb8, 0x80, 0xfd, 0xe8, 0xf1, 0x09, + 0xa8, 0x17, 0x30, 0xc4, 0x45, 0x1d, 0x01, 0x78 }, + { 0x17, 0x2e, 0xc2, 0x18, 0xf1, 0x19, 0xdf, 0xae, + 0x98, 0x89, 0x6d, 0xff, 0x29, 0xdd, 0x98, 0x76, + 0xc9, 0x4a, 0xf8, 0x74, 0x17, 0xf9, 0xae, 0x4c, + 0x70, 0x14, 0xbb, 0x4e, 0x4b, 0x96, 0xaf, 0xc7 }, + { 0x3f, 0x85, 0x81, 0x4a, 0x18, 0x19, 0x5f, 0x87, + 0x9a, 0xa9, 0x62, 0xf9, 0x5d, 0x26, 0xbd, 0x82, + 0xa2, 0x78, 0xf2, 0xb8, 0x23, 0x20, 0x21, 0x8f, + 0x6b, 0x3b, 0xd6, 0xf7, 0xf6, 0x67, 0xa6, 0xd9 }, + { 0x1b, 0x61, 0x8f, 0xba, 0xa5, 0x66, 0xb3, 0xd4, + 0x98, 0xc1, 0x2e, 0x98, 0x2c, 0x9e, 0xc5, 0x2e, + 0x4d, 0xa8, 0x5a, 0x8c, 0x54, 0xf3, 0x8f, 0x34, + 0xc0, 0x90, 0x39, 0x4f, 0x23, 0xc1, 0x84, 0xc1 }, + { 0x0c, 0x75, 0x8f, 0xb5, 0x69, 0x2f, 0xfd, 0x41, + 0xa3, 0x57, 0x5d, 0x0a, 0xf0, 0x0c, 0xc7, 0xfb, + 0xf2, 0xcb, 0xe5, 0x90, 0x5a, 0x58, 0x32, 0x3a, + 0x88, 0xae, 0x42, 0x44, 0xf6, 0xe4, 0xc9, 0x93 }, + { 0xa9, 0x31, 0x36, 0x0c, 0xad, 0x62, 0x8c, 0x7f, + 0x12, 0xa6, 0xc1, 0xc4, 0xb7, 0x53, 0xb0, 0xf4, + 0x06, 0x2a, 0xef, 0x3c, 0xe6, 0x5a, 0x1a, 0xe3, + 0xf1, 0x93, 0x69, 0xda, 0xdf, 0x3a, 0xe2, 0x3d }, + { 0xcb, 0xac, 0x7d, 0x77, 0x3b, 0x1e, 0x3b, 0x3c, + 0x66, 0x91, 0xd7, 0xab, 0xb7, 0xe9, 0xdf, 0x04, + 0x5c, 0x8b, 0xa1, 0x92, 0x68, 0xde, 0xd1, 0x53, + 0x20, 0x7f, 0x5e, 0x80, 0x43, 0x52, 0xec, 0x5d }, + { 0x23, 0xa1, 0x96, 0xd3, 0x80, 0x2e, 0xd3, 0xc1, + 0xb3, 0x84, 0x01, 0x9a, 0x82, 0x32, 0x58, 0x40, + 0xd3, 0x2f, 0x71, 0x95, 0x0c, 0x45, 0x80, 0xb0, + 0x34, 0x45, 0xe0, 0x89, 0x8e, 0x14, 0x05, 0x3c }, + { 0xf4, 0x49, 0x54, 0x70, 0xf2, 0x26, 0xc8, 0xc2, + 0x14, 0xbe, 0x08, 0xfd, 0xfa, 0xd4, 0xbc, 0x4a, + 0x2a, 0x9d, 0xbe, 0xa9, 0x13, 0x6a, 0x21, 0x0d, + 0xf0, 0xd4, 0xb6, 0x49, 0x29, 0xe6, 0xfc, 0x14 }, + { 0xe2, 0x90, 0xdd, 0x27, 0x0b, 0x46, 0x7f, 0x34, + 0xab, 0x1c, 0x00, 0x2d, 0x34, 0x0f, 0xa0, 0x16, + 0x25, 0x7f, 0xf1, 0x9e, 0x58, 0x33, 0xfd, 0xbb, + 0xf2, 0xcb, 0x40, 0x1c, 0x3b, 0x28, 0x17, 0xde }, + { 0x9f, 0xc7, 0xb5, 0xde, 0xd3, 0xc1, 0x50, 0x42, + 0xb2, 0xa6, 0x58, 0x2d, 0xc3, 0x9b, 0xe0, 0x16, + 0xd2, 0x4a, 0x68, 0x2d, 0x5e, 0x61, 0xad, 0x1e, + 0xff, 0x9c, 0x63, 0x30, 0x98, 0x48, 0xf7, 0x06 }, + { 0x8c, 0xca, 0x67, 0xa3, 0x6d, 0x17, 0xd5, 0xe6, + 0x34, 0x1c, 0xb5, 0x92, 0xfd, 0x7b, 0xef, 0x99, + 0x26, 0xc9, 0xe3, 0xaa, 0x10, 0x27, 0xea, 0x11, + 0xa7, 0xd8, 0xbd, 0x26, 0x0b, 0x57, 0x6e, 0x04 }, + { 0x40, 0x93, 0x92, 0xf5, 0x60, 0xf8, 0x68, 0x31, + 0xda, 0x43, 0x73, 0xee, 0x5e, 0x00, 0x74, 0x26, + 0x05, 0x95, 0xd7, 0xbc, 0x24, 0x18, 0x3b, 0x60, + 0xed, 0x70, 0x0d, 0x45, 0x83, 0xd3, 0xf6, 0xf0 }, + { 0x28, 0x02, 0x16, 0x5d, 0xe0, 0x90, 0x91, 0x55, + 0x46, 0xf3, 0x39, 0x8c, 0xd8, 0x49, 0x16, 0x4a, + 0x19, 0xf9, 0x2a, 0xdb, 0xc3, 0x61, 0xad, 0xc9, + 0x9b, 0x0f, 0x20, 0xc8, 0xea, 0x07, 0x10, 0x54 }, + { 0xad, 0x83, 0x91, 0x68, 0xd9, 0xf8, 0xa4, 0xbe, + 0x95, 0xba, 0x9e, 0xf9, 0xa6, 0x92, 0xf0, 0x72, + 0x56, 0xae, 0x43, 0xfe, 0x6f, 0x98, 0x64, 0xe2, + 0x90, 0x69, 0x1b, 0x02, 0x56, 0xce, 0x50, 0xa9 }, + { 0x75, 0xfd, 0xaa, 0x50, 0x38, 0xc2, 0x84, 0xb8, + 0x6d, 0x6e, 0x8a, 0xff, 0xe8, 0xb2, 0x80, 0x7e, + 0x46, 0x7b, 0x86, 0x60, 0x0e, 0x79, 0xaf, 0x36, + 0x89, 0xfb, 0xc0, 0x63, 0x28, 0xcb, 0xf8, 0x94 }, + { 0xe5, 0x7c, 0xb7, 0x94, 0x87, 0xdd, 0x57, 0x90, + 0x24, 0x32, 0xb2, 0x50, 0x73, 0x38, 0x13, 0xbd, + 0x96, 0xa8, 0x4e, 0xfc, 0xe5, 0x9f, 0x65, 0x0f, + 0xac, 0x26, 0xe6, 0x69, 0x6a, 0xef, 0xaf, 0xc3 }, + { 0x56, 0xf3, 0x4e, 0x8b, 0x96, 0x55, 0x7e, 0x90, + 0xc1, 0xf2, 0x4b, 0x52, 0xd0, 0xc8, 0x9d, 0x51, + 0x08, 0x6a, 0xcf, 0x1b, 0x00, 0xf6, 0x34, 0xcf, + 0x1d, 0xde, 0x92, 0x33, 0xb8, 0xea, 0xaa, 0x3e }, + { 0x1b, 0x53, 0xee, 0x94, 0xaa, 0xf3, 0x4e, 0x4b, + 0x15, 0x9d, 0x48, 0xde, 0x35, 0x2c, 0x7f, 0x06, + 0x61, 0xd0, 0xa4, 0x0e, 0xdf, 0xf9, 0x5a, 0x0b, + 0x16, 0x39, 0xb4, 0x09, 0x0e, 0x97, 0x44, 0x72 }, + { 0x05, 0x70, 0x5e, 0x2a, 0x81, 0x75, 0x7c, 0x14, + 0xbd, 0x38, 0x3e, 0xa9, 0x8d, 0xda, 0x54, 0x4e, + 0xb1, 0x0e, 0x6b, 0xc0, 0x7b, 0xae, 0x43, 0x5e, + 0x25, 0x18, 0xdb, 0xe1, 0x33, 0x52, 0x53, 0x75 }, + { 0xd8, 0xb2, 0x86, 0x6e, 0x8a, 0x30, 0x9d, 0xb5, + 0x3e, 0x52, 0x9e, 0xc3, 0x29, 0x11, 0xd8, 0x2f, + 0x5c, 0xa1, 0x6c, 0xff, 0x76, 0x21, 0x68, 0x91, + 0xa9, 0x67, 0x6a, 0xa3, 0x1a, 0xaa, 0x6c, 0x42 }, + { 0xf5, 0x04, 0x1c, 0x24, 0x12, 0x70, 0xeb, 0x04, + 0xc7, 0x1e, 0xc2, 0xc9, 0x5d, 0x4c, 0x38, 0xd8, + 0x03, 0xb1, 0x23, 0x7b, 0x0f, 0x29, 0xfd, 0x4d, + 0xb3, 0xeb, 0x39, 0x76, 0x69, 0xe8, 0x86, 0x99 }, + { 0x9a, 0x4c, 0xe0, 0x77, 0xc3, 0x49, 0x32, 0x2f, + 0x59, 0x5e, 0x0e, 0xe7, 0x9e, 0xd0, 0xda, 0x5f, + 0xab, 0x66, 0x75, 0x2c, 0xbf, 0xef, 0x8f, 0x87, + 0xd0, 0xe9, 0xd0, 0x72, 0x3c, 0x75, 0x30, 0xdd }, + { 0x65, 0x7b, 0x09, 0xf3, 0xd0, 0xf5, 0x2b, 0x5b, + 0x8f, 0x2f, 0x97, 0x16, 0x3a, 0x0e, 0xdf, 0x0c, + 0x04, 0xf0, 0x75, 0x40, 0x8a, 0x07, 0xbb, 0xeb, + 0x3a, 0x41, 0x01, 0xa8, 0x91, 0x99, 0x0d, 0x62 }, + { 0x1e, 0x3f, 0x7b, 0xd5, 0xa5, 0x8f, 0xa5, 0x33, + 0x34, 0x4a, 0xa8, 0xed, 0x3a, 0xc1, 0x22, 0xbb, + 0x9e, 0x70, 0xd4, 0xef, 0x50, 0xd0, 0x04, 0x53, + 0x08, 0x21, 0x94, 0x8f, 0x5f, 0xe6, 0x31, 0x5a }, + { 0x80, 0xdc, 0xcf, 0x3f, 0xd8, 0x3d, 0xfd, 0x0d, + 0x35, 0xaa, 0x28, 0x58, 0x59, 0x22, 0xab, 0x89, + 0xd5, 0x31, 0x39, 0x97, 0x67, 0x3e, 0xaf, 0x90, + 0x5c, 0xea, 0x9c, 0x0b, 0x22, 0x5c, 0x7b, 0x5f }, + { 0x8a, 0x0d, 0x0f, 0xbf, 0x63, 0x77, 0xd8, 0x3b, + 0xb0, 0x8b, 0x51, 0x4b, 0x4b, 0x1c, 0x43, 0xac, + 0xc9, 0x5d, 0x75, 0x17, 0x14, 0xf8, 0x92, 0x56, + 0x45, 0xcb, 0x6b, 0xc8, 0x56, 0xca, 0x15, 0x0a }, + { 0x9f, 0xa5, 0xb4, 0x87, 0x73, 0x8a, 0xd2, 0x84, + 0x4c, 0xc6, 0x34, 0x8a, 0x90, 0x19, 0x18, 0xf6, + 0x59, 0xa3, 0xb8, 0x9e, 0x9c, 0x0d, 0xfe, 0xea, + 0xd3, 0x0d, 0xd9, 0x4b, 0xcf, 0x42, 0xef, 0x8e }, + { 0x80, 0x83, 0x2c, 0x4a, 0x16, 0x77, 0xf5, 0xea, + 0x25, 0x60, 0xf6, 0x68, 0xe9, 0x35, 0x4d, 0xd3, + 0x69, 0x97, 0xf0, 0x37, 0x28, 0xcf, 0xa5, 0x5e, + 0x1b, 0x38, 0x33, 0x7c, 0x0c, 0x9e, 0xf8, 0x18 }, + { 0xab, 0x37, 0xdd, 0xb6, 0x83, 0x13, 0x7e, 0x74, + 0x08, 0x0d, 0x02, 0x6b, 0x59, 0x0b, 0x96, 0xae, + 0x9b, 0xb4, 0x47, 0x72, 0x2f, 0x30, 0x5a, 0x5a, + 0xc5, 0x70, 0xec, 0x1d, 0xf9, 0xb1, 0x74, 0x3c }, + { 0x3e, 0xe7, 0x35, 0xa6, 0x94, 0xc2, 0x55, 0x9b, + 0x69, 0x3a, 0xa6, 0x86, 0x29, 0x36, 0x1e, 0x15, + 0xd1, 0x22, 0x65, 0xad, 0x6a, 0x3d, 0xed, 0xf4, + 0x88, 0xb0, 0xb0, 0x0f, 0xac, 0x97, 0x54, 0xba }, + { 0xd6, 0xfc, 0xd2, 0x32, 0x19, 0xb6, 0x47, 0xe4, + 0xcb, 0xd5, 0xeb, 0x2d, 0x0a, 0xd0, 0x1e, 0xc8, + 0x83, 0x8a, 0x4b, 0x29, 0x01, 0xfc, 0x32, 0x5c, + 0xc3, 0x70, 0x19, 0x81, 0xca, 0x6c, 0x88, 0x8b }, + { 0x05, 0x20, 0xec, 0x2f, 0x5b, 0xf7, 0xa7, 0x55, + 0xda, 0xcb, 0x50, 0xc6, 0xbf, 0x23, 0x3e, 0x35, + 0x15, 0x43, 0x47, 0x63, 0xdb, 0x01, 0x39, 0xcc, + 0xd9, 0xfa, 0xef, 0xbb, 0x82, 0x07, 0x61, 0x2d }, + { 0xaf, 0xf3, 0xb7, 0x5f, 0x3f, 0x58, 0x12, 0x64, + 0xd7, 0x66, 0x16, 0x62, 0xb9, 0x2f, 0x5a, 0xd3, + 0x7c, 0x1d, 0x32, 0xbd, 0x45, 0xff, 0x81, 0xa4, + 0xed, 0x8a, 0xdc, 0x9e, 0xf3, 0x0d, 0xd9, 0x89 }, + { 0xd0, 0xdd, 0x65, 0x0b, 0xef, 0xd3, 0xba, 0x63, + 0xdc, 0x25, 0x10, 0x2c, 0x62, 0x7c, 0x92, 0x1b, + 0x9c, 0xbe, 0xb0, 0xb1, 0x30, 0x68, 0x69, 0x35, + 0xb5, 0xc9, 0x27, 0xcb, 0x7c, 0xcd, 0x5e, 0x3b }, + { 0xe1, 0x14, 0x98, 0x16, 0xb1, 0x0a, 0x85, 0x14, + 0xfb, 0x3e, 0x2c, 0xab, 0x2c, 0x08, 0xbe, 0xe9, + 0xf7, 0x3c, 0xe7, 0x62, 0x21, 0x70, 0x12, 0x46, + 0xa5, 0x89, 0xbb, 0xb6, 0x73, 0x02, 0xd8, 0xa9 }, + { 0x7d, 0xa3, 0xf4, 0x41, 0xde, 0x90, 0x54, 0x31, + 0x7e, 0x72, 0xb5, 0xdb, 0xf9, 0x79, 0xda, 0x01, + 0xe6, 0xbc, 0xee, 0xbb, 0x84, 0x78, 0xea, 0xe6, + 0xa2, 0x28, 0x49, 0xd9, 0x02, 0x92, 0x63, 0x5c }, + { 0x12, 0x30, 0xb1, 0xfc, 0x8a, 0x7d, 0x92, 0x15, + 0xed, 0xc2, 0xd4, 0xa2, 0xde, 0xcb, 0xdd, 0x0a, + 0x6e, 0x21, 0x6c, 0x92, 0x42, 0x78, 0xc9, 0x1f, + 0xc5, 0xd1, 0x0e, 0x7d, 0x60, 0x19, 0x2d, 0x94 }, + { 0x57, 0x50, 0xd7, 0x16, 0xb4, 0x80, 0x8f, 0x75, + 0x1f, 0xeb, 0xc3, 0x88, 0x06, 0xba, 0x17, 0x0b, + 0xf6, 0xd5, 0x19, 0x9a, 0x78, 0x16, 0xbe, 0x51, + 0x4e, 0x3f, 0x93, 0x2f, 0xbe, 0x0c, 0xb8, 0x71 }, + { 0x6f, 0xc5, 0x9b, 0x2f, 0x10, 0xfe, 0xba, 0x95, + 0x4a, 0xa6, 0x82, 0x0b, 0x3c, 0xa9, 0x87, 0xee, + 0x81, 0xd5, 0xcc, 0x1d, 0xa3, 0xc6, 0x3c, 0xe8, + 0x27, 0x30, 0x1c, 0x56, 0x9d, 0xfb, 0x39, 0xce }, + { 0xc7, 0xc3, 0xfe, 0x1e, 0xeb, 0xdc, 0x7b, 0x5a, + 0x93, 0x93, 0x26, 0xe8, 0xdd, 0xb8, 0x3e, 0x8b, + 0xf2, 0xb7, 0x80, 0xb6, 0x56, 0x78, 0xcb, 0x62, + 0xf2, 0x08, 0xb0, 0x40, 0xab, 0xdd, 0x35, 0xe2 }, + { 0x0c, 0x75, 0xc1, 0xa1, 0x5c, 0xf3, 0x4a, 0x31, + 0x4e, 0xe4, 0x78, 0xf4, 0xa5, 0xce, 0x0b, 0x8a, + 0x6b, 0x36, 0x52, 0x8e, 0xf7, 0xa8, 0x20, 0x69, + 0x6c, 0x3e, 0x42, 0x46, 0xc5, 0xa1, 0x58, 0x64 }, + { 0x21, 0x6d, 0xc1, 0x2a, 0x10, 0x85, 0x69, 0xa3, + 0xc7, 0xcd, 0xde, 0x4a, 0xed, 0x43, 0xa6, 0xc3, + 0x30, 0x13, 0x9d, 0xda, 0x3c, 0xcc, 0x4a, 0x10, + 0x89, 0x05, 0xdb, 0x38, 0x61, 0x89, 0x90, 0x50 }, + { 0xa5, 0x7b, 0xe6, 0xae, 0x67, 0x56, 0xf2, 0x8b, + 0x02, 0xf5, 0x9d, 0xad, 0xf7, 0xe0, 0xd7, 0xd8, + 0x80, 0x7f, 0x10, 0xfa, 0x15, 0xce, 0xd1, 0xad, + 0x35, 0x85, 0x52, 0x1a, 0x1d, 0x99, 0x5a, 0x89 }, + { 0x81, 0x6a, 0xef, 0x87, 0x59, 0x53, 0x71, 0x6c, + 0xd7, 0xa5, 0x81, 0xf7, 0x32, 0xf5, 0x3d, 0xd4, + 0x35, 0xda, 0xb6, 0x6d, 0x09, 0xc3, 0x61, 0xd2, + 0xd6, 0x59, 0x2d, 0xe1, 0x77, 0x55, 0xd8, 0xa8 }, + { 0x9a, 0x76, 0x89, 0x32, 0x26, 0x69, 0x3b, 0x6e, + 0xa9, 0x7e, 0x6a, 0x73, 0x8f, 0x9d, 0x10, 0xfb, + 0x3d, 0x0b, 0x43, 0xae, 0x0e, 0x8b, 0x7d, 0x81, + 0x23, 0xea, 0x76, 0xce, 0x97, 0x98, 0x9c, 0x7e }, + { 0x8d, 0xae, 0xdb, 0x9a, 0x27, 0x15, 0x29, 0xdb, + 0xb7, 0xdc, 0x3b, 0x60, 0x7f, 0xe5, 0xeb, 0x2d, + 0x32, 0x11, 0x77, 0x07, 0x58, 0xdd, 0x3b, 0x0a, + 0x35, 0x93, 0xd2, 0xd7, 0x95, 0x4e, 0x2d, 0x5b }, + { 0x16, 0xdb, 0xc0, 0xaa, 0x5d, 0xd2, 0xc7, 0x74, + 0xf5, 0x05, 0x10, 0x0f, 0x73, 0x37, 0x86, 0xd8, + 0xa1, 0x75, 0xfc, 0xbb, 0xb5, 0x9c, 0x43, 0xe1, + 0xfb, 0xff, 0x3e, 0x1e, 0xaf, 0x31, 0xcb, 0x4a }, + { 0x86, 0x06, 0xcb, 0x89, 0x9c, 0x6a, 0xea, 0xf5, + 0x1b, 0x9d, 0xb0, 0xfe, 0x49, 0x24, 0xa9, 0xfd, + 0x5d, 0xab, 0xc1, 0x9f, 0x88, 0x26, 0xf2, 0xbc, + 0x1c, 0x1d, 0x7d, 0xa1, 0x4d, 0x2c, 0x2c, 0x99 }, + { 0x84, 0x79, 0x73, 0x1a, 0xed, 0xa5, 0x7b, 0xd3, + 0x7e, 0xad, 0xb5, 0x1a, 0x50, 0x7e, 0x30, 0x7f, + 0x3b, 0xd9, 0x5e, 0x69, 0xdb, 0xca, 0x94, 0xf3, + 0xbc, 0x21, 0x72, 0x60, 0x66, 0xad, 0x6d, 0xfd }, + { 0x58, 0x47, 0x3a, 0x9e, 0xa8, 0x2e, 0xfa, 0x3f, + 0x3b, 0x3d, 0x8f, 0xc8, 0x3e, 0xd8, 0x86, 0x31, + 0x27, 0xb3, 0x3a, 0xe8, 0xde, 0xae, 0x63, 0x07, + 0x20, 0x1e, 0xdb, 0x6d, 0xde, 0x61, 0xde, 0x29 }, + { 0x9a, 0x92, 0x55, 0xd5, 0x3a, 0xf1, 0x16, 0xde, + 0x8b, 0xa2, 0x7c, 0xe3, 0x5b, 0x4c, 0x7e, 0x15, + 0x64, 0x06, 0x57, 0xa0, 0xfc, 0xb8, 0x88, 0xc7, + 0x0d, 0x95, 0x43, 0x1d, 0xac, 0xd8, 0xf8, 0x30 }, + { 0x9e, 0xb0, 0x5f, 0xfb, 0xa3, 0x9f, 0xd8, 0x59, + 0x6a, 0x45, 0x49, 0x3e, 0x18, 0xd2, 0x51, 0x0b, + 0xf3, 0xef, 0x06, 0x5c, 0x51, 0xd6, 0xe1, 0x3a, + 0xbe, 0x66, 0xaa, 0x57, 0xe0, 0x5c, 0xfd, 0xb7 }, + { 0x81, 0xdc, 0xc3, 0xa5, 0x05, 0xea, 0xce, 0x3f, + 0x87, 0x9d, 0x8f, 0x70, 0x27, 0x76, 0x77, 0x0f, + 0x9d, 0xf5, 0x0e, 0x52, 0x1d, 0x14, 0x28, 0xa8, + 0x5d, 0xaf, 0x04, 0xf9, 0xad, 0x21, 0x50, 0xe0 }, + { 0xe3, 0xe3, 0xc4, 0xaa, 0x3a, 0xcb, 0xbc, 0x85, + 0x33, 0x2a, 0xf9, 0xd5, 0x64, 0xbc, 0x24, 0x16, + 0x5e, 0x16, 0x87, 0xf6, 0xb1, 0xad, 0xcb, 0xfa, + 0xe7, 0x7a, 0x8f, 0x03, 0xc7, 0x2a, 0xc2, 0x8c }, + { 0x67, 0x46, 0xc8, 0x0b, 0x4e, 0xb5, 0x6a, 0xea, + 0x45, 0xe6, 0x4e, 0x72, 0x89, 0xbb, 0xa3, 0xed, + 0xbf, 0x45, 0xec, 0xf8, 0x20, 0x64, 0x81, 0xff, + 0x63, 0x02, 0x12, 0x29, 0x84, 0xcd, 0x52, 0x6a }, + { 0x2b, 0x62, 0x8e, 0x52, 0x76, 0x4d, 0x7d, 0x62, + 0xc0, 0x86, 0x8b, 0x21, 0x23, 0x57, 0xcd, 0xd1, + 0x2d, 0x91, 0x49, 0x82, 0x2f, 0x4e, 0x98, 0x45, + 0xd9, 0x18, 0xa0, 0x8d, 0x1a, 0xe9, 0x90, 0xc0 }, + { 0xe4, 0xbf, 0xe8, 0x0d, 0x58, 0xc9, 0x19, 0x94, + 0x61, 0x39, 0x09, 0xdc, 0x4b, 0x1a, 0x12, 0x49, + 0x68, 0x96, 0xc0, 0x04, 0xaf, 0x7b, 0x57, 0x01, + 0x48, 0x3d, 0xe4, 0x5d, 0x28, 0x23, 0xd7, 0x8e }, + { 0xeb, 0xb4, 0xba, 0x15, 0x0c, 0xef, 0x27, 0x34, + 0x34, 0x5b, 0x5d, 0x64, 0x1b, 0xbe, 0xd0, 0x3a, + 0x21, 0xea, 0xfa, 0xe9, 0x33, 0xc9, 0x9e, 0x00, + 0x92, 0x12, 0xef, 0x04, 0x57, 0x4a, 0x85, 0x30 }, + { 0x39, 0x66, 0xec, 0x73, 0xb1, 0x54, 0xac, 0xc6, + 0x97, 0xac, 0x5c, 0xf5, 0xb2, 0x4b, 0x40, 0xbd, + 0xb0, 0xdb, 0x9e, 0x39, 0x88, 0x36, 0xd7, 0x6d, + 0x4b, 0x88, 0x0e, 0x3b, 0x2a, 0xf1, 0xaa, 0x27 }, + { 0xef, 0x7e, 0x48, 0x31, 0xb3, 0xa8, 0x46, 0x36, + 0x51, 0x8d, 0x6e, 0x4b, 0xfc, 0xe6, 0x4a, 0x43, + 0xdb, 0x2a, 0x5d, 0xda, 0x9c, 0xca, 0x2b, 0x44, + 0xf3, 0x90, 0x33, 0xbd, 0xc4, 0x0d, 0x62, 0x43 }, + { 0x7a, 0xbf, 0x6a, 0xcf, 0x5c, 0x8e, 0x54, 0x9d, + 0xdb, 0xb1, 0x5a, 0xe8, 0xd8, 0xb3, 0x88, 0xc1, + 0xc1, 0x97, 0xe6, 0x98, 0x73, 0x7c, 0x97, 0x85, + 0x50, 0x1e, 0xd1, 0xf9, 0x49, 0x30, 0xb7, 0xd9 }, + { 0x88, 0x01, 0x8d, 0xed, 0x66, 0x81, 0x3f, 0x0c, + 0xa9, 0x5d, 0xef, 0x47, 0x4c, 0x63, 0x06, 0x92, + 0x01, 0x99, 0x67, 0xb9, 0xe3, 0x68, 0x88, 0xda, + 0xdd, 0x94, 0x12, 0x47, 0x19, 0xb6, 0x82, 0xf6 }, + { 0x39, 0x30, 0x87, 0x6b, 0x9f, 0xc7, 0x52, 0x90, + 0x36, 0xb0, 0x08, 0xb1, 0xb8, 0xbb, 0x99, 0x75, + 0x22, 0xa4, 0x41, 0x63, 0x5a, 0x0c, 0x25, 0xec, + 0x02, 0xfb, 0x6d, 0x90, 0x26, 0xe5, 0x5a, 0x97 }, + { 0x0a, 0x40, 0x49, 0xd5, 0x7e, 0x83, 0x3b, 0x56, + 0x95, 0xfa, 0xc9, 0x3d, 0xd1, 0xfb, 0xef, 0x31, + 0x66, 0xb4, 0x4b, 0x12, 0xad, 0x11, 0x24, 0x86, + 0x62, 0x38, 0x3a, 0xe0, 0x51, 0xe1, 0x58, 0x27 }, + { 0x81, 0xdc, 0xc0, 0x67, 0x8b, 0xb6, 0xa7, 0x65, + 0xe4, 0x8c, 0x32, 0x09, 0x65, 0x4f, 0xe9, 0x00, + 0x89, 0xce, 0x44, 0xff, 0x56, 0x18, 0x47, 0x7e, + 0x39, 0xab, 0x28, 0x64, 0x76, 0xdf, 0x05, 0x2b }, + { 0xe6, 0x9b, 0x3a, 0x36, 0xa4, 0x46, 0x19, 0x12, + 0xdc, 0x08, 0x34, 0x6b, 0x11, 0xdd, 0xcb, 0x9d, + 0xb7, 0x96, 0xf8, 0x85, 0xfd, 0x01, 0x93, 0x6e, + 0x66, 0x2f, 0xe2, 0x92, 0x97, 0xb0, 0x99, 0xa4 }, + { 0x5a, 0xc6, 0x50, 0x3b, 0x0d, 0x8d, 0xa6, 0x91, + 0x76, 0x46, 0xe6, 0xdc, 0xc8, 0x7e, 0xdc, 0x58, + 0xe9, 0x42, 0x45, 0x32, 0x4c, 0xc2, 0x04, 0xf4, + 0xdd, 0x4a, 0xf0, 0x15, 0x63, 0xac, 0xd4, 0x27 }, + { 0xdf, 0x6d, 0xda, 0x21, 0x35, 0x9a, 0x30, 0xbc, + 0x27, 0x17, 0x80, 0x97, 0x1c, 0x1a, 0xbd, 0x56, + 0xa6, 0xef, 0x16, 0x7e, 0x48, 0x08, 0x87, 0x88, + 0x8e, 0x73, 0xa8, 0x6d, 0x3b, 0xf6, 0x05, 0xe9 }, + { 0xe8, 0xe6, 0xe4, 0x70, 0x71, 0xe7, 0xb7, 0xdf, + 0x25, 0x80, 0xf2, 0x25, 0xcf, 0xbb, 0xed, 0xf8, + 0x4c, 0xe6, 0x77, 0x46, 0x62, 0x66, 0x28, 0xd3, + 0x30, 0x97, 0xe4, 0xb7, 0xdc, 0x57, 0x11, 0x07 }, + { 0x53, 0xe4, 0x0e, 0xad, 0x62, 0x05, 0x1e, 0x19, + 0xcb, 0x9b, 0xa8, 0x13, 0x3e, 0x3e, 0x5c, 0x1c, + 0xe0, 0x0d, 0xdc, 0xad, 0x8a, 0xcf, 0x34, 0x2a, + 0x22, 0x43, 0x60, 0xb0, 0xac, 0xc1, 0x47, 0x77 }, + { 0x9c, 0xcd, 0x53, 0xfe, 0x80, 0xbe, 0x78, 0x6a, + 0xa9, 0x84, 0x63, 0x84, 0x62, 0xfb, 0x28, 0xaf, + 0xdf, 0x12, 0x2b, 0x34, 0xd7, 0x8f, 0x46, 0x87, + 0xec, 0x63, 0x2b, 0xb1, 0x9d, 0xe2, 0x37, 0x1a }, + { 0xcb, 0xd4, 0x80, 0x52, 0xc4, 0x8d, 0x78, 0x84, + 0x66, 0xa3, 0xe8, 0x11, 0x8c, 0x56, 0xc9, 0x7f, + 0xe1, 0x46, 0xe5, 0x54, 0x6f, 0xaa, 0xf9, 0x3e, + 0x2b, 0xc3, 0xc4, 0x7e, 0x45, 0x93, 0x97, 0x53 }, + { 0x25, 0x68, 0x83, 0xb1, 0x4e, 0x2a, 0xf4, 0x4d, + 0xad, 0xb2, 0x8e, 0x1b, 0x34, 0xb2, 0xac, 0x0f, + 0x0f, 0x4c, 0x91, 0xc3, 0x4e, 0xc9, 0x16, 0x9e, + 0x29, 0x03, 0x61, 0x58, 0xac, 0xaa, 0x95, 0xb9 }, + { 0x44, 0x71, 0xb9, 0x1a, 0xb4, 0x2d, 0xb7, 0xc4, + 0xdd, 0x84, 0x90, 0xab, 0x95, 0xa2, 0xee, 0x8d, + 0x04, 0xe3, 0xef, 0x5c, 0x3d, 0x6f, 0xc7, 0x1a, + 0xc7, 0x4b, 0x2b, 0x26, 0x91, 0x4d, 0x16, 0x41 }, + { 0xa5, 0xeb, 0x08, 0x03, 0x8f, 0x8f, 0x11, 0x55, + 0xed, 0x86, 0xe6, 0x31, 0x90, 0x6f, 0xc1, 0x30, + 0x95, 0xf6, 0xbb, 0xa4, 0x1d, 0xe5, 0xd4, 0xe7, + 0x95, 0x75, 0x8e, 0xc8, 0xc8, 0xdf, 0x8a, 0xf1 }, + { 0xdc, 0x1d, 0xb6, 0x4e, 0xd8, 0xb4, 0x8a, 0x91, + 0x0e, 0x06, 0x0a, 0x6b, 0x86, 0x63, 0x74, 0xc5, + 0x78, 0x78, 0x4e, 0x9a, 0xc4, 0x9a, 0xb2, 0x77, + 0x40, 0x92, 0xac, 0x71, 0x50, 0x19, 0x34, 0xac }, + { 0x28, 0x54, 0x13, 0xb2, 0xf2, 0xee, 0x87, 0x3d, + 0x34, 0x31, 0x9e, 0xe0, 0xbb, 0xfb, 0xb9, 0x0f, + 0x32, 0xda, 0x43, 0x4c, 0xc8, 0x7e, 0x3d, 0xb5, + 0xed, 0x12, 0x1b, 0xb3, 0x98, 0xed, 0x96, 0x4b }, + { 0x02, 0x16, 0xe0, 0xf8, 0x1f, 0x75, 0x0f, 0x26, + 0xf1, 0x99, 0x8b, 0xc3, 0x93, 0x4e, 0x3e, 0x12, + 0x4c, 0x99, 0x45, 0xe6, 0x85, 0xa6, 0x0b, 0x25, + 0xe8, 0xfb, 0xd9, 0x62, 0x5a, 0xb6, 0xb5, 0x99 }, + { 0x38, 0xc4, 0x10, 0xf5, 0xb9, 0xd4, 0x07, 0x20, + 0x50, 0x75, 0x5b, 0x31, 0xdc, 0xa8, 0x9f, 0xd5, + 0x39, 0x5c, 0x67, 0x85, 0xee, 0xb3, 0xd7, 0x90, + 0xf3, 0x20, 0xff, 0x94, 0x1c, 0x5a, 0x93, 0xbf }, + { 0xf1, 0x84, 0x17, 0xb3, 0x9d, 0x61, 0x7a, 0xb1, + 0xc1, 0x8f, 0xdf, 0x91, 0xeb, 0xd0, 0xfc, 0x6d, + 0x55, 0x16, 0xbb, 0x34, 0xcf, 0x39, 0x36, 0x40, + 0x37, 0xbc, 0xe8, 0x1f, 0xa0, 0x4c, 0xec, 0xb1 }, + { 0x1f, 0xa8, 0x77, 0xde, 0x67, 0x25, 0x9d, 0x19, + 0x86, 0x3a, 0x2a, 0x34, 0xbc, 0xc6, 0x96, 0x2a, + 0x2b, 0x25, 0xfc, 0xbf, 0x5c, 0xbe, 0xcd, 0x7e, + 0xde, 0x8f, 0x1f, 0xa3, 0x66, 0x88, 0xa7, 0x96 }, + { 0x5b, 0xd1, 0x69, 0xe6, 0x7c, 0x82, 0xc2, 0xc2, + 0xe9, 0x8e, 0xf7, 0x00, 0x8b, 0xdf, 0x26, 0x1f, + 0x2d, 0xdf, 0x30, 0xb1, 0xc0, 0x0f, 0x9e, 0x7f, + 0x27, 0x5b, 0xb3, 0xe8, 0xa2, 0x8d, 0xc9, 0xa2 }, + { 0xc8, 0x0a, 0xbe, 0xeb, 0xb6, 0x69, 0xad, 0x5d, + 0xee, 0xb5, 0xf5, 0xec, 0x8e, 0xa6, 0xb7, 0xa0, + 0x5d, 0xdf, 0x7d, 0x31, 0xec, 0x4c, 0x0a, 0x2e, + 0xe2, 0x0b, 0x0b, 0x98, 0xca, 0xec, 0x67, 0x46 }, + { 0xe7, 0x6d, 0x3f, 0xbd, 0xa5, 0xba, 0x37, 0x4e, + 0x6b, 0xf8, 0xe5, 0x0f, 0xad, 0xc3, 0xbb, 0xb9, + 0xba, 0x5c, 0x20, 0x6e, 0xbd, 0xec, 0x89, 0xa3, + 0xa5, 0x4c, 0xf3, 0xdd, 0x84, 0xa0, 0x70, 0x16 }, + { 0x7b, 0xba, 0x9d, 0xc5, 0xb5, 0xdb, 0x20, 0x71, + 0xd1, 0x77, 0x52, 0xb1, 0x04, 0x4c, 0x1e, 0xce, + 0xd9, 0x6a, 0xaf, 0x2d, 0xd4, 0x6e, 0x9b, 0x43, + 0x37, 0x50, 0xe8, 0xea, 0x0d, 0xcc, 0x18, 0x70 }, + { 0xf2, 0x9b, 0x1b, 0x1a, 0xb9, 0xba, 0xb1, 0x63, + 0x01, 0x8e, 0xe3, 0xda, 0x15, 0x23, 0x2c, 0xca, + 0x78, 0xec, 0x52, 0xdb, 0xc3, 0x4e, 0xda, 0x5b, + 0x82, 0x2e, 0xc1, 0xd8, 0x0f, 0xc2, 0x1b, 0xd0 }, + { 0x9e, 0xe3, 0xe3, 0xe7, 0xe9, 0x00, 0xf1, 0xe1, + 0x1d, 0x30, 0x8c, 0x4b, 0x2b, 0x30, 0x76, 0xd2, + 0x72, 0xcf, 0x70, 0x12, 0x4f, 0x9f, 0x51, 0xe1, + 0xda, 0x60, 0xf3, 0x78, 0x46, 0xcd, 0xd2, 0xf4 }, + { 0x70, 0xea, 0x3b, 0x01, 0x76, 0x92, 0x7d, 0x90, + 0x96, 0xa1, 0x85, 0x08, 0xcd, 0x12, 0x3a, 0x29, + 0x03, 0x25, 0x92, 0x0a, 0x9d, 0x00, 0xa8, 0x9b, + 0x5d, 0xe0, 0x42, 0x73, 0xfb, 0xc7, 0x6b, 0x85 }, + { 0x67, 0xde, 0x25, 0xc0, 0x2a, 0x4a, 0xab, 0xa2, + 0x3b, 0xdc, 0x97, 0x3c, 0x8b, 0xb0, 0xb5, 0x79, + 0x6d, 0x47, 0xcc, 0x06, 0x59, 0xd4, 0x3d, 0xff, + 0x1f, 0x97, 0xde, 0x17, 0x49, 0x63, 0xb6, 0x8e }, + { 0xb2, 0x16, 0x8e, 0x4e, 0x0f, 0x18, 0xb0, 0xe6, + 0x41, 0x00, 0xb5, 0x17, 0xed, 0x95, 0x25, 0x7d, + 0x73, 0xf0, 0x62, 0x0d, 0xf8, 0x85, 0xc1, 0x3d, + 0x2e, 0xcf, 0x79, 0x36, 0x7b, 0x38, 0x4c, 0xee }, + { 0x2e, 0x7d, 0xec, 0x24, 0x28, 0x85, 0x3b, 0x2c, + 0x71, 0x76, 0x07, 0x45, 0x54, 0x1f, 0x7a, 0xfe, + 0x98, 0x25, 0xb5, 0xdd, 0x77, 0xdf, 0x06, 0x51, + 0x1d, 0x84, 0x41, 0xa9, 0x4b, 0xac, 0xc9, 0x27 }, + { 0xca, 0x9f, 0xfa, 0xc4, 0xc4, 0x3f, 0x0b, 0x48, + 0x46, 0x1d, 0xc5, 0xc2, 0x63, 0xbe, 0xa3, 0xf6, + 0xf0, 0x06, 0x11, 0xce, 0xac, 0xab, 0xf6, 0xf8, + 0x95, 0xba, 0x2b, 0x01, 0x01, 0xdb, 0xb6, 0x8d }, + { 0x74, 0x10, 0xd4, 0x2d, 0x8f, 0xd1, 0xd5, 0xe9, + 0xd2, 0xf5, 0x81, 0x5c, 0xb9, 0x34, 0x17, 0x99, + 0x88, 0x28, 0xef, 0x3c, 0x42, 0x30, 0xbf, 0xbd, + 0x41, 0x2d, 0xf0, 0xa4, 0xa7, 0xa2, 0x50, 0x7a }, + { 0x50, 0x10, 0xf6, 0x84, 0x51, 0x6d, 0xcc, 0xd0, + 0xb6, 0xee, 0x08, 0x52, 0xc2, 0x51, 0x2b, 0x4d, + 0xc0, 0x06, 0x6c, 0xf0, 0xd5, 0x6f, 0x35, 0x30, + 0x29, 0x78, 0xdb, 0x8a, 0xe3, 0x2c, 0x6a, 0x81 }, + { 0xac, 0xaa, 0xb5, 0x85, 0xf7, 0xb7, 0x9b, 0x71, + 0x99, 0x35, 0xce, 0xb8, 0x95, 0x23, 0xdd, 0xc5, + 0x48, 0x27, 0xf7, 0x5c, 0x56, 0x88, 0x38, 0x56, + 0x15, 0x4a, 0x56, 0xcd, 0xcd, 0x5e, 0xe9, 0x88 }, + { 0x66, 0x6d, 0xe5, 0xd1, 0x44, 0x0f, 0xee, 0x73, + 0x31, 0xaa, 0xf0, 0x12, 0x3a, 0x62, 0xef, 0x2d, + 0x8b, 0xa5, 0x74, 0x53, 0xa0, 0x76, 0x96, 0x35, + 0xac, 0x6c, 0xd0, 0x1e, 0x63, 0x3f, 0x77, 0x12 }, + { 0xa6, 0xf9, 0x86, 0x58, 0xf6, 0xea, 0xba, 0xf9, + 0x02, 0xd8, 0xb3, 0x87, 0x1a, 0x4b, 0x10, 0x1d, + 0x16, 0x19, 0x6e, 0x8a, 0x4b, 0x24, 0x1e, 0x15, + 0x58, 0xfe, 0x29, 0x96, 0x6e, 0x10, 0x3e, 0x8d }, + { 0x89, 0x15, 0x46, 0xa8, 0xb2, 0x9f, 0x30, 0x47, + 0xdd, 0xcf, 0xe5, 0xb0, 0x0e, 0x45, 0xfd, 0x55, + 0x75, 0x63, 0x73, 0x10, 0x5e, 0xa8, 0x63, 0x7d, + 0xfc, 0xff, 0x54, 0x7b, 0x6e, 0xa9, 0x53, 0x5f }, + { 0x18, 0xdf, 0xbc, 0x1a, 0xc5, 0xd2, 0x5b, 0x07, + 0x61, 0x13, 0x7d, 0xbd, 0x22, 0xc1, 0x7c, 0x82, + 0x9d, 0x0f, 0x0e, 0xf1, 0xd8, 0x23, 0x44, 0xe9, + 0xc8, 0x9c, 0x28, 0x66, 0x94, 0xda, 0x24, 0xe8 }, + { 0xb5, 0x4b, 0x9b, 0x67, 0xf8, 0xfe, 0xd5, 0x4b, + 0xbf, 0x5a, 0x26, 0x66, 0xdb, 0xdf, 0x4b, 0x23, + 0xcf, 0xf1, 0xd1, 0xb6, 0xf4, 0xaf, 0xc9, 0x85, + 0xb2, 0xe6, 0xd3, 0x30, 0x5a, 0x9f, 0xf8, 0x0f }, + { 0x7d, 0xb4, 0x42, 0xe1, 0x32, 0xba, 0x59, 0xbc, + 0x12, 0x89, 0xaa, 0x98, 0xb0, 0xd3, 0xe8, 0x06, + 0x00, 0x4f, 0x8e, 0xc1, 0x28, 0x11, 0xaf, 0x1e, + 0x2e, 0x33, 0xc6, 0x9b, 0xfd, 0xe7, 0x29, 0xe1 }, + { 0x25, 0x0f, 0x37, 0xcd, 0xc1, 0x5e, 0x81, 0x7d, + 0x2f, 0x16, 0x0d, 0x99, 0x56, 0xc7, 0x1f, 0xe3, + 0xeb, 0x5d, 0xb7, 0x45, 0x56, 0xe4, 0xad, 0xf9, + 0xa4, 0xff, 0xaf, 0xba, 0x74, 0x01, 0x03, 0x96 }, + { 0x4a, 0xb8, 0xa3, 0xdd, 0x1d, 0xdf, 0x8a, 0xd4, + 0x3d, 0xab, 0x13, 0xa2, 0x7f, 0x66, 0xa6, 0x54, + 0x4f, 0x29, 0x05, 0x97, 0xfa, 0x96, 0x04, 0x0e, + 0x0e, 0x1d, 0xb9, 0x26, 0x3a, 0xa4, 0x79, 0xf8 }, + { 0xee, 0x61, 0x72, 0x7a, 0x07, 0x66, 0xdf, 0x93, + 0x9c, 0xcd, 0xc8, 0x60, 0x33, 0x40, 0x44, 0xc7, + 0x9a, 0x3c, 0x9b, 0x15, 0x62, 0x00, 0xbc, 0x3a, + 0xa3, 0x29, 0x73, 0x48, 0x3d, 0x83, 0x41, 0xae }, + { 0x3f, 0x68, 0xc7, 0xec, 0x63, 0xac, 0x11, 0xeb, + 0xb9, 0x8f, 0x94, 0xb3, 0x39, 0xb0, 0x5c, 0x10, + 0x49, 0x84, 0xfd, 0xa5, 0x01, 0x03, 0x06, 0x01, + 0x44, 0xe5, 0xa2, 0xbf, 0xcc, 0xc9, 0xda, 0x95 }, + { 0x05, 0x6f, 0x29, 0x81, 0x6b, 0x8a, 0xf8, 0xf5, + 0x66, 0x82, 0xbc, 0x4d, 0x7c, 0xf0, 0x94, 0x11, + 0x1d, 0xa7, 0x73, 0x3e, 0x72, 0x6c, 0xd1, 0x3d, + 0x6b, 0x3e, 0x8e, 0xa0, 0x3e, 0x92, 0xa0, 0xd5 }, + { 0xf5, 0xec, 0x43, 0xa2, 0x8a, 0xcb, 0xef, 0xf1, + 0xf3, 0x31, 0x8a, 0x5b, 0xca, 0xc7, 0xc6, 0x6d, + 0xdb, 0x52, 0x30, 0xb7, 0x9d, 0xb2, 0xd1, 0x05, + 0xbc, 0xbe, 0x15, 0xf3, 0xc1, 0x14, 0x8d, 0x69 }, + { 0x2a, 0x69, 0x60, 0xad, 0x1d, 0x8d, 0xd5, 0x47, + 0x55, 0x5c, 0xfb, 0xd5, 0xe4, 0x60, 0x0f, 0x1e, + 0xaa, 0x1c, 0x8e, 0xda, 0x34, 0xde, 0x03, 0x74, + 0xec, 0x4a, 0x26, 0xea, 0xaa, 0xa3, 0x3b, 0x4e }, + { 0xdc, 0xc1, 0xea, 0x7b, 0xaa, 0xb9, 0x33, 0x84, + 0xf7, 0x6b, 0x79, 0x68, 0x66, 0x19, 0x97, 0x54, + 0x74, 0x2f, 0x7b, 0x96, 0xd6, 0xb4, 0xc1, 0x20, + 0x16, 0x5c, 0x04, 0xa6, 0xc4, 0xf5, 0xce, 0x10 }, + { 0x13, 0xd5, 0xdf, 0x17, 0x92, 0x21, 0x37, 0x9c, + 0x6a, 0x78, 0xc0, 0x7c, 0x79, 0x3f, 0xf5, 0x34, + 0x87, 0xca, 0xe6, 0xbf, 0x9f, 0xe8, 0x82, 0x54, + 0x1a, 0xb0, 0xe7, 0x35, 0xe3, 0xea, 0xda, 0x3b }, + { 0x8c, 0x59, 0xe4, 0x40, 0x76, 0x41, 0xa0, 0x1e, + 0x8f, 0xf9, 0x1f, 0x99, 0x80, 0xdc, 0x23, 0x6f, + 0x4e, 0xcd, 0x6f, 0xcf, 0x52, 0x58, 0x9a, 0x09, + 0x9a, 0x96, 0x16, 0x33, 0x96, 0x77, 0x14, 0xe1 }, + { 0x83, 0x3b, 0x1a, 0xc6, 0xa2, 0x51, 0xfd, 0x08, + 0xfd, 0x6d, 0x90, 0x8f, 0xea, 0x2a, 0x4e, 0xe1, + 0xe0, 0x40, 0xbc, 0xa9, 0x3f, 0xc1, 0xa3, 0x8e, + 0xc3, 0x82, 0x0e, 0x0c, 0x10, 0xbd, 0x82, 0xea }, + { 0xa2, 0x44, 0xf9, 0x27, 0xf3, 0xb4, 0x0b, 0x8f, + 0x6c, 0x39, 0x15, 0x70, 0xc7, 0x65, 0x41, 0x8f, + 0x2f, 0x6e, 0x70, 0x8e, 0xac, 0x90, 0x06, 0xc5, + 0x1a, 0x7f, 0xef, 0xf4, 0xaf, 0x3b, 0x2b, 0x9e }, + { 0x3d, 0x99, 0xed, 0x95, 0x50, 0xcf, 0x11, 0x96, + 0xe6, 0xc4, 0xd2, 0x0c, 0x25, 0x96, 0x20, 0xf8, + 0x58, 0xc3, 0xd7, 0x03, 0x37, 0x4c, 0x12, 0x8c, + 0xe7, 0xb5, 0x90, 0x31, 0x0c, 0x83, 0x04, 0x6d }, + { 0x2b, 0x35, 0xc4, 0x7d, 0x7b, 0x87, 0x76, 0x1f, + 0x0a, 0xe4, 0x3a, 0xc5, 0x6a, 0xc2, 0x7b, 0x9f, + 0x25, 0x83, 0x03, 0x67, 0xb5, 0x95, 0xbe, 0x8c, + 0x24, 0x0e, 0x94, 0x60, 0x0c, 0x6e, 0x33, 0x12 }, + { 0x5d, 0x11, 0xed, 0x37, 0xd2, 0x4d, 0xc7, 0x67, + 0x30, 0x5c, 0xb7, 0xe1, 0x46, 0x7d, 0x87, 0xc0, + 0x65, 0xac, 0x4b, 0xc8, 0xa4, 0x26, 0xde, 0x38, + 0x99, 0x1f, 0xf5, 0x9a, 0xa8, 0x73, 0x5d, 0x02 }, + { 0xb8, 0x36, 0x47, 0x8e, 0x1c, 0xa0, 0x64, 0x0d, + 0xce, 0x6f, 0xd9, 0x10, 0xa5, 0x09, 0x62, 0x72, + 0xc8, 0x33, 0x09, 0x90, 0xcd, 0x97, 0x86, 0x4a, + 0xc2, 0xbf, 0x14, 0xef, 0x6b, 0x23, 0x91, 0x4a }, + { 0x91, 0x00, 0xf9, 0x46, 0xd6, 0xcc, 0xde, 0x3a, + 0x59, 0x7f, 0x90, 0xd3, 0x9f, 0xc1, 0x21, 0x5b, + 0xad, 0xdc, 0x74, 0x13, 0x64, 0x3d, 0x85, 0xc2, + 0x1c, 0x3e, 0xee, 0x5d, 0x2d, 0xd3, 0x28, 0x94 }, + { 0xda, 0x70, 0xee, 0xdd, 0x23, 0xe6, 0x63, 0xaa, + 0x1a, 0x74, 0xb9, 0x76, 0x69, 0x35, 0xb4, 0x79, + 0x22, 0x2a, 0x72, 0xaf, 0xba, 0x5c, 0x79, 0x51, + 0x58, 0xda, 0xd4, 0x1a, 0x3b, 0xd7, 0x7e, 0x40 }, + { 0xf0, 0x67, 0xed, 0x6a, 0x0d, 0xbd, 0x43, 0xaa, + 0x0a, 0x92, 0x54, 0xe6, 0x9f, 0xd6, 0x6b, 0xdd, + 0x8a, 0xcb, 0x87, 0xde, 0x93, 0x6c, 0x25, 0x8c, + 0xfb, 0x02, 0x28, 0x5f, 0x2c, 0x11, 0xfa, 0x79 }, + { 0x71, 0x5c, 0x99, 0xc7, 0xd5, 0x75, 0x80, 0xcf, + 0x97, 0x53, 0xb4, 0xc1, 0xd7, 0x95, 0xe4, 0x5a, + 0x83, 0xfb, 0xb2, 0x28, 0xc0, 0xd3, 0x6f, 0xbe, + 0x20, 0xfa, 0xf3, 0x9b, 0xdd, 0x6d, 0x4e, 0x85 }, + { 0xe4, 0x57, 0xd6, 0xad, 0x1e, 0x67, 0xcb, 0x9b, + 0xbd, 0x17, 0xcb, 0xd6, 0x98, 0xfa, 0x6d, 0x7d, + 0xae, 0x0c, 0x9b, 0x7a, 0xd6, 0xcb, 0xd6, 0x53, + 0x96, 0x34, 0xe3, 0x2a, 0x71, 0x9c, 0x84, 0x92 }, + { 0xec, 0xe3, 0xea, 0x81, 0x03, 0xe0, 0x24, 0x83, + 0xc6, 0x4a, 0x70, 0xa4, 0xbd, 0xce, 0xe8, 0xce, + 0xb6, 0x27, 0x8f, 0x25, 0x33, 0xf3, 0xf4, 0x8d, + 0xbe, 0xed, 0xfb, 0xa9, 0x45, 0x31, 0xd4, 0xae }, + { 0x38, 0x8a, 0xa5, 0xd3, 0x66, 0x7a, 0x97, 0xc6, + 0x8d, 0x3d, 0x56, 0xf8, 0xf3, 0xee, 0x8d, 0x3d, + 0x36, 0x09, 0x1f, 0x17, 0xfe, 0x5d, 0x1b, 0x0d, + 0x5d, 0x84, 0xc9, 0x3b, 0x2f, 0xfe, 0x40, 0xbd }, + { 0x8b, 0x6b, 0x31, 0xb9, 0xad, 0x7c, 0x3d, 0x5c, + 0xd8, 0x4b, 0xf9, 0x89, 0x47, 0xb9, 0xcd, 0xb5, + 0x9d, 0xf8, 0xa2, 0x5f, 0xf7, 0x38, 0x10, 0x10, + 0x13, 0xbe, 0x4f, 0xd6, 0x5e, 0x1d, 0xd1, 0xa3 }, + { 0x06, 0x62, 0x91, 0xf6, 0xbb, 0xd2, 0x5f, 0x3c, + 0x85, 0x3d, 0xb7, 0xd8, 0xb9, 0x5c, 0x9a, 0x1c, + 0xfb, 0x9b, 0xf1, 0xc1, 0xc9, 0x9f, 0xb9, 0x5a, + 0x9b, 0x78, 0x69, 0xd9, 0x0f, 0x1c, 0x29, 0x03 }, + { 0xa7, 0x07, 0xef, 0xbc, 0xcd, 0xce, 0xed, 0x42, + 0x96, 0x7a, 0x66, 0xf5, 0x53, 0x9b, 0x93, 0xed, + 0x75, 0x60, 0xd4, 0x67, 0x30, 0x40, 0x16, 0xc4, + 0x78, 0x0d, 0x77, 0x55, 0xa5, 0x65, 0xd4, 0xc4 }, + { 0x38, 0xc5, 0x3d, 0xfb, 0x70, 0xbe, 0x7e, 0x79, + 0x2b, 0x07, 0xa6, 0xa3, 0x5b, 0x8a, 0x6a, 0x0a, + 0xba, 0x02, 0xc5, 0xc5, 0xf3, 0x8b, 0xaf, 0x5c, + 0x82, 0x3f, 0xdf, 0xd9, 0xe4, 0x2d, 0x65, 0x7e }, + { 0xf2, 0x91, 0x13, 0x86, 0x50, 0x1d, 0x9a, 0xb9, + 0xd7, 0x20, 0xcf, 0x8a, 0xd1, 0x05, 0x03, 0xd5, + 0x63, 0x4b, 0xf4, 0xb7, 0xd1, 0x2b, 0x56, 0xdf, + 0xb7, 0x4f, 0xec, 0xc6, 0xe4, 0x09, 0x3f, 0x68 }, + { 0xc6, 0xf2, 0xbd, 0xd5, 0x2b, 0x81, 0xe6, 0xe4, + 0xf6, 0x59, 0x5a, 0xbd, 0x4d, 0x7f, 0xb3, 0x1f, + 0x65, 0x11, 0x69, 0xd0, 0x0f, 0xf3, 0x26, 0x92, + 0x6b, 0x34, 0x94, 0x7b, 0x28, 0xa8, 0x39, 0x59 }, + { 0x29, 0x3d, 0x94, 0xb1, 0x8c, 0x98, 0xbb, 0x32, + 0x23, 0x36, 0x6b, 0x8c, 0xe7, 0x4c, 0x28, 0xfb, + 0xdf, 0x28, 0xe1, 0xf8, 0x4a, 0x33, 0x50, 0xb0, + 0xeb, 0x2d, 0x18, 0x04, 0xa5, 0x77, 0x57, 0x9b }, + { 0x2c, 0x2f, 0xa5, 0xc0, 0xb5, 0x15, 0x33, 0x16, + 0x5b, 0xc3, 0x75, 0xc2, 0x2e, 0x27, 0x81, 0x76, + 0x82, 0x70, 0xa3, 0x83, 0x98, 0x5d, 0x13, 0xbd, + 0x6b, 0x67, 0xb6, 0xfd, 0x67, 0xf8, 0x89, 0xeb }, + { 0xca, 0xa0, 0x9b, 0x82, 0xb7, 0x25, 0x62, 0xe4, + 0x3f, 0x4b, 0x22, 0x75, 0xc0, 0x91, 0x91, 0x8e, + 0x62, 0x4d, 0x91, 0x16, 0x61, 0xcc, 0x81, 0x1b, + 0xb5, 0xfa, 0xec, 0x51, 0xf6, 0x08, 0x8e, 0xf7 }, + { 0x24, 0x76, 0x1e, 0x45, 0xe6, 0x74, 0x39, 0x53, + 0x79, 0xfb, 0x17, 0x72, 0x9c, 0x78, 0xcb, 0x93, + 0x9e, 0x6f, 0x74, 0xc5, 0xdf, 0xfb, 0x9c, 0x96, + 0x1f, 0x49, 0x59, 0x82, 0xc3, 0xed, 0x1f, 0xe3 }, + { 0x55, 0xb7, 0x0a, 0x82, 0x13, 0x1e, 0xc9, 0x48, + 0x88, 0xd7, 0xab, 0x54, 0xa7, 0xc5, 0x15, 0x25, + 0x5c, 0x39, 0x38, 0xbb, 0x10, 0xbc, 0x78, 0x4d, + 0xc9, 0xb6, 0x7f, 0x07, 0x6e, 0x34, 0x1a, 0x73 }, + { 0x6a, 0xb9, 0x05, 0x7b, 0x97, 0x7e, 0xbc, 0x3c, + 0xa4, 0xd4, 0xce, 0x74, 0x50, 0x6c, 0x25, 0xcc, + 0xcd, 0xc5, 0x66, 0x49, 0x7c, 0x45, 0x0b, 0x54, + 0x15, 0xa3, 0x94, 0x86, 0xf8, 0x65, 0x7a, 0x03 }, + { 0x24, 0x06, 0x6d, 0xee, 0xe0, 0xec, 0xee, 0x15, + 0xa4, 0x5f, 0x0a, 0x32, 0x6d, 0x0f, 0x8d, 0xbc, + 0x79, 0x76, 0x1e, 0xbb, 0x93, 0xcf, 0x8c, 0x03, + 0x77, 0xaf, 0x44, 0x09, 0x78, 0xfc, 0xf9, 0x94 }, + { 0x20, 0x00, 0x0d, 0x3f, 0x66, 0xba, 0x76, 0x86, + 0x0d, 0x5a, 0x95, 0x06, 0x88, 0xb9, 0xaa, 0x0d, + 0x76, 0xcf, 0xea, 0x59, 0xb0, 0x05, 0xd8, 0x59, + 0x91, 0x4b, 0x1a, 0x46, 0x65, 0x3a, 0x93, 0x9b }, + { 0xb9, 0x2d, 0xaa, 0x79, 0x60, 0x3e, 0x3b, 0xdb, + 0xc3, 0xbf, 0xe0, 0xf4, 0x19, 0xe4, 0x09, 0xb2, + 0xea, 0x10, 0xdc, 0x43, 0x5b, 0xee, 0xfe, 0x29, + 0x59, 0xda, 0x16, 0x89, 0x5d, 0x5d, 0xca, 0x1c }, + { 0xe9, 0x47, 0x94, 0x87, 0x05, 0xb2, 0x06, 0xd5, + 0x72, 0xb0, 0xe8, 0xf6, 0x2f, 0x66, 0xa6, 0x55, + 0x1c, 0xbd, 0x6b, 0xc3, 0x05, 0xd2, 0x6c, 0xe7, + 0x53, 0x9a, 0x12, 0xf9, 0xaa, 0xdf, 0x75, 0x71 }, + { 0x3d, 0x67, 0xc1, 0xb3, 0xf9, 0xb2, 0x39, 0x10, + 0xe3, 0xd3, 0x5e, 0x6b, 0x0f, 0x2c, 0xcf, 0x44, + 0xa0, 0xb5, 0x40, 0xa4, 0x5c, 0x18, 0xba, 0x3c, + 0x36, 0x26, 0x4d, 0xd4, 0x8e, 0x96, 0xaf, 0x6a }, + { 0xc7, 0x55, 0x8b, 0xab, 0xda, 0x04, 0xbc, 0xcb, + 0x76, 0x4d, 0x0b, 0xbf, 0x33, 0x58, 0x42, 0x51, + 0x41, 0x90, 0x2d, 0x22, 0x39, 0x1d, 0x9f, 0x8c, + 0x59, 0x15, 0x9f, 0xec, 0x9e, 0x49, 0xb1, 0x51 }, + { 0x0b, 0x73, 0x2b, 0xb0, 0x35, 0x67, 0x5a, 0x50, + 0xff, 0x58, 0xf2, 0xc2, 0x42, 0xe4, 0x71, 0x0a, + 0xec, 0xe6, 0x46, 0x70, 0x07, 0x9c, 0x13, 0x04, + 0x4c, 0x79, 0xc9, 0xb7, 0x49, 0x1f, 0x70, 0x00 }, + { 0xd1, 0x20, 0xb5, 0xef, 0x6d, 0x57, 0xeb, 0xf0, + 0x6e, 0xaf, 0x96, 0xbc, 0x93, 0x3c, 0x96, 0x7b, + 0x16, 0xcb, 0xe6, 0xe2, 0xbf, 0x00, 0x74, 0x1c, + 0x30, 0xaa, 0x1c, 0x54, 0xba, 0x64, 0x80, 0x1f }, + { 0x58, 0xd2, 0x12, 0xad, 0x6f, 0x58, 0xae, 0xf0, + 0xf8, 0x01, 0x16, 0xb4, 0x41, 0xe5, 0x7f, 0x61, + 0x95, 0xbf, 0xef, 0x26, 0xb6, 0x14, 0x63, 0xed, + 0xec, 0x11, 0x83, 0xcd, 0xb0, 0x4f, 0xe7, 0x6d }, + { 0xb8, 0x83, 0x6f, 0x51, 0xd1, 0xe2, 0x9b, 0xdf, + 0xdb, 0xa3, 0x25, 0x56, 0x53, 0x60, 0x26, 0x8b, + 0x8f, 0xad, 0x62, 0x74, 0x73, 0xed, 0xec, 0xef, + 0x7e, 0xae, 0xfe, 0xe8, 0x37, 0xc7, 0x40, 0x03 }, + { 0xc5, 0x47, 0xa3, 0xc1, 0x24, 0xae, 0x56, 0x85, + 0xff, 0xa7, 0xb8, 0xed, 0xaf, 0x96, 0xec, 0x86, + 0xf8, 0xb2, 0xd0, 0xd5, 0x0c, 0xee, 0x8b, 0xe3, + 0xb1, 0xf0, 0xc7, 0x67, 0x63, 0x06, 0x9d, 0x9c }, + { 0x5d, 0x16, 0x8b, 0x76, 0x9a, 0x2f, 0x67, 0x85, + 0x3d, 0x62, 0x95, 0xf7, 0x56, 0x8b, 0xe4, 0x0b, + 0xb7, 0xa1, 0x6b, 0x8d, 0x65, 0xba, 0x87, 0x63, + 0x5d, 0x19, 0x78, 0xd2, 0xab, 0x11, 0xba, 0x2a }, + { 0xa2, 0xf6, 0x75, 0xdc, 0x73, 0x02, 0x63, 0x8c, + 0xb6, 0x02, 0x01, 0x06, 0x4c, 0xa5, 0x50, 0x77, + 0x71, 0x4d, 0x71, 0xfe, 0x09, 0x6a, 0x31, 0x5f, + 0x2f, 0xe7, 0x40, 0x12, 0x77, 0xca, 0xa5, 0xaf }, + { 0xc8, 0xaa, 0xb5, 0xcd, 0x01, 0x60, 0xae, 0x78, + 0xcd, 0x2e, 0x8a, 0xc5, 0xfb, 0x0e, 0x09, 0x3c, + 0xdb, 0x5c, 0x4b, 0x60, 0x52, 0xa0, 0xa9, 0x7b, + 0xb0, 0x42, 0x16, 0x82, 0x6f, 0xa7, 0xa4, 0x37 }, + { 0xff, 0x68, 0xca, 0x40, 0x35, 0xbf, 0xeb, 0x43, + 0xfb, 0xf1, 0x45, 0xfd, 0xdd, 0x5e, 0x43, 0xf1, + 0xce, 0xa5, 0x4f, 0x11, 0xf7, 0xbe, 0xe1, 0x30, + 0x58, 0xf0, 0x27, 0x32, 0x9a, 0x4a, 0x5f, 0xa4 }, + { 0x1d, 0x4e, 0x54, 0x87, 0xae, 0x3c, 0x74, 0x0f, + 0x2b, 0xa6, 0xe5, 0x41, 0xac, 0x91, 0xbc, 0x2b, + 0xfc, 0xd2, 0x99, 0x9c, 0x51, 0x8d, 0x80, 0x7b, + 0x42, 0x67, 0x48, 0x80, 0x3a, 0x35, 0x0f, 0xd4 }, + { 0x6d, 0x24, 0x4e, 0x1a, 0x06, 0xce, 0x4e, 0xf5, + 0x78, 0xdd, 0x0f, 0x63, 0xaf, 0xf0, 0x93, 0x67, + 0x06, 0x73, 0x51, 0x19, 0xca, 0x9c, 0x8d, 0x22, + 0xd8, 0x6c, 0x80, 0x14, 0x14, 0xab, 0x97, 0x41 }, + { 0xde, 0xcf, 0x73, 0x29, 0xdb, 0xcc, 0x82, 0x7b, + 0x8f, 0xc5, 0x24, 0xc9, 0x43, 0x1e, 0x89, 0x98, + 0x02, 0x9e, 0xce, 0x12, 0xce, 0x93, 0xb7, 0xb2, + 0xf3, 0xe7, 0x69, 0xa9, 0x41, 0xfb, 0x8c, 0xea }, + { 0x2f, 0xaf, 0xcc, 0x0f, 0x2e, 0x63, 0xcb, 0xd0, + 0x77, 0x55, 0xbe, 0x7b, 0x75, 0xec, 0xea, 0x0a, + 0xdf, 0xf9, 0xaa, 0x5e, 0xde, 0x2a, 0x52, 0xfd, + 0xab, 0x4d, 0xfd, 0x03, 0x74, 0xcd, 0x48, 0x3f }, + { 0xaa, 0x85, 0x01, 0x0d, 0xd4, 0x6a, 0x54, 0x6b, + 0x53, 0x5e, 0xf4, 0xcf, 0x5f, 0x07, 0xd6, 0x51, + 0x61, 0xe8, 0x98, 0x28, 0xf3, 0xa7, 0x7d, 0xb7, + 0xb9, 0xb5, 0x6f, 0x0d, 0xf5, 0x9a, 0xae, 0x45 }, + { 0x07, 0xe8, 0xe1, 0xee, 0x73, 0x2c, 0xb0, 0xd3, + 0x56, 0xc9, 0xc0, 0xd1, 0x06, 0x9c, 0x89, 0xd1, + 0x7a, 0xdf, 0x6a, 0x9a, 0x33, 0x4f, 0x74, 0x5e, + 0xc7, 0x86, 0x73, 0x32, 0x54, 0x8c, 0xa8, 0xe9 }, + { 0x0e, 0x01, 0xe8, 0x1c, 0xad, 0xa8, 0x16, 0x2b, + 0xfd, 0x5f, 0x8a, 0x8c, 0x81, 0x8a, 0x6c, 0x69, + 0xfe, 0xdf, 0x02, 0xce, 0xb5, 0x20, 0x85, 0x23, + 0xcb, 0xe5, 0x31, 0x3b, 0x89, 0xca, 0x10, 0x53 }, + { 0x6b, 0xb6, 0xc6, 0x47, 0x26, 0x55, 0x08, 0x43, + 0x99, 0x85, 0x2e, 0x00, 0x24, 0x9f, 0x8c, 0xb2, + 0x47, 0x89, 0x6d, 0x39, 0x2b, 0x02, 0xd7, 0x3b, + 0x7f, 0x0d, 0xd8, 0x18, 0xe1, 0xe2, 0x9b, 0x07 }, + { 0x42, 0xd4, 0x63, 0x6e, 0x20, 0x60, 0xf0, 0x8f, + 0x41, 0xc8, 0x82, 0xe7, 0x6b, 0x39, 0x6b, 0x11, + 0x2e, 0xf6, 0x27, 0xcc, 0x24, 0xc4, 0x3d, 0xd5, + 0xf8, 0x3a, 0x1d, 0x1a, 0x7e, 0xad, 0x71, 0x1a }, + { 0x48, 0x58, 0xc9, 0xa1, 0x88, 0xb0, 0x23, 0x4f, + 0xb9, 0xa8, 0xd4, 0x7d, 0x0b, 0x41, 0x33, 0x65, + 0x0a, 0x03, 0x0b, 0xd0, 0x61, 0x1b, 0x87, 0xc3, + 0x89, 0x2e, 0x94, 0x95, 0x1f, 0x8d, 0xf8, 0x52 }, + { 0x3f, 0xab, 0x3e, 0x36, 0x98, 0x8d, 0x44, 0x5a, + 0x51, 0xc8, 0x78, 0x3e, 0x53, 0x1b, 0xe3, 0xa0, + 0x2b, 0xe4, 0x0c, 0xd0, 0x47, 0x96, 0xcf, 0xb6, + 0x1d, 0x40, 0x34, 0x74, 0x42, 0xd3, 0xf7, 0x94 }, + { 0xeb, 0xab, 0xc4, 0x96, 0x36, 0xbd, 0x43, 0x3d, + 0x2e, 0xc8, 0xf0, 0xe5, 0x18, 0x73, 0x2e, 0xf8, + 0xfa, 0x21, 0xd4, 0xd0, 0x71, 0xcc, 0x3b, 0xc4, + 0x6c, 0xd7, 0x9f, 0xa3, 0x8a, 0x28, 0xb8, 0x10 }, + { 0xa1, 0xd0, 0x34, 0x35, 0x23, 0xb8, 0x93, 0xfc, + 0xa8, 0x4f, 0x47, 0xfe, 0xb4, 0xa6, 0x4d, 0x35, + 0x0a, 0x17, 0xd8, 0xee, 0xf5, 0x49, 0x7e, 0xce, + 0x69, 0x7d, 0x02, 0xd7, 0x91, 0x78, 0xb5, 0x91 }, + { 0x26, 0x2e, 0xbf, 0xd9, 0x13, 0x0b, 0x7d, 0x28, + 0x76, 0x0d, 0x08, 0xef, 0x8b, 0xfd, 0x3b, 0x86, + 0xcd, 0xd3, 0xb2, 0x11, 0x3d, 0x2c, 0xae, 0xf7, + 0xea, 0x95, 0x1a, 0x30, 0x3d, 0xfa, 0x38, 0x46 }, + { 0xf7, 0x61, 0x58, 0xed, 0xd5, 0x0a, 0x15, 0x4f, + 0xa7, 0x82, 0x03, 0xed, 0x23, 0x62, 0x93, 0x2f, + 0xcb, 0x82, 0x53, 0xaa, 0xe3, 0x78, 0x90, 0x3e, + 0xde, 0xd1, 0xe0, 0x3f, 0x70, 0x21, 0xa2, 0x57 }, + { 0x26, 0x17, 0x8e, 0x95, 0x0a, 0xc7, 0x22, 0xf6, + 0x7a, 0xe5, 0x6e, 0x57, 0x1b, 0x28, 0x4c, 0x02, + 0x07, 0x68, 0x4a, 0x63, 0x34, 0xa1, 0x77, 0x48, + 0xa9, 0x4d, 0x26, 0x0b, 0xc5, 0xf5, 0x52, 0x74 }, + { 0xc3, 0x78, 0xd1, 0xe4, 0x93, 0xb4, 0x0e, 0xf1, + 0x1f, 0xe6, 0xa1, 0x5d, 0x9c, 0x27, 0x37, 0xa3, + 0x78, 0x09, 0x63, 0x4c, 0x5a, 0xba, 0xd5, 0xb3, + 0x3d, 0x7e, 0x39, 0x3b, 0x4a, 0xe0, 0x5d, 0x03 }, + { 0x98, 0x4b, 0xd8, 0x37, 0x91, 0x01, 0xbe, 0x8f, + 0xd8, 0x06, 0x12, 0xd8, 0xea, 0x29, 0x59, 0xa7, + 0x86, 0x5e, 0xc9, 0x71, 0x85, 0x23, 0x55, 0x01, + 0x07, 0xae, 0x39, 0x38, 0xdf, 0x32, 0x01, 0x1b }, + { 0xc6, 0xf2, 0x5a, 0x81, 0x2a, 0x14, 0x48, 0x58, + 0xac, 0x5c, 0xed, 0x37, 0xa9, 0x3a, 0x9f, 0x47, + 0x59, 0xba, 0x0b, 0x1c, 0x0f, 0xdc, 0x43, 0x1d, + 0xce, 0x35, 0xf9, 0xec, 0x1f, 0x1f, 0x4a, 0x99 }, + { 0x92, 0x4c, 0x75, 0xc9, 0x44, 0x24, 0xff, 0x75, + 0xe7, 0x4b, 0x8b, 0x4e, 0x94, 0x35, 0x89, 0x58, + 0xb0, 0x27, 0xb1, 0x71, 0xdf, 0x5e, 0x57, 0x89, + 0x9a, 0xd0, 0xd4, 0xda, 0xc3, 0x73, 0x53, 0xb6 }, + { 0x0a, 0xf3, 0x58, 0x92, 0xa6, 0x3f, 0x45, 0x93, + 0x1f, 0x68, 0x46, 0xed, 0x19, 0x03, 0x61, 0xcd, + 0x07, 0x30, 0x89, 0xe0, 0x77, 0x16, 0x57, 0x14, + 0xb5, 0x0b, 0x81, 0xa2, 0xe3, 0xdd, 0x9b, 0xa1 }, + { 0xcc, 0x80, 0xce, 0xfb, 0x26, 0xc3, 0xb2, 0xb0, + 0xda, 0xef, 0x23, 0x3e, 0x60, 0x6d, 0x5f, 0xfc, + 0x80, 0xfa, 0x17, 0x42, 0x7d, 0x18, 0xe3, 0x04, + 0x89, 0x67, 0x3e, 0x06, 0xef, 0x4b, 0x87, 0xf7 }, + { 0xc2, 0xf8, 0xc8, 0x11, 0x74, 0x47, 0xf3, 0x97, + 0x8b, 0x08, 0x18, 0xdc, 0xf6, 0xf7, 0x01, 0x16, + 0xac, 0x56, 0xfd, 0x18, 0x4d, 0xd1, 0x27, 0x84, + 0x94, 0xe1, 0x03, 0xfc, 0x6d, 0x74, 0xa8, 0x87 }, + { 0xbd, 0xec, 0xf6, 0xbf, 0xc1, 0xba, 0x0d, 0xf6, + 0xe8, 0x62, 0xc8, 0x31, 0x99, 0x22, 0x07, 0x79, + 0x6a, 0xcc, 0x79, 0x79, 0x68, 0x35, 0x88, 0x28, + 0xc0, 0x6e, 0x7a, 0x51, 0xe0, 0x90, 0x09, 0x8f }, + { 0x24, 0xd1, 0xa2, 0x6e, 0x3d, 0xab, 0x02, 0xfe, + 0x45, 0x72, 0xd2, 0xaa, 0x7d, 0xbd, 0x3e, 0xc3, + 0x0f, 0x06, 0x93, 0xdb, 0x26, 0xf2, 0x73, 0xd0, + 0xab, 0x2c, 0xb0, 0xc1, 0x3b, 0x5e, 0x64, 0x51 }, + { 0xec, 0x56, 0xf5, 0x8b, 0x09, 0x29, 0x9a, 0x30, + 0x0b, 0x14, 0x05, 0x65, 0xd7, 0xd3, 0xe6, 0x87, + 0x82, 0xb6, 0xe2, 0xfb, 0xeb, 0x4b, 0x7e, 0xa9, + 0x7a, 0xc0, 0x57, 0x98, 0x90, 0x61, 0xdd, 0x3f }, + { 0x11, 0xa4, 0x37, 0xc1, 0xab, 0xa3, 0xc1, 0x19, + 0xdd, 0xfa, 0xb3, 0x1b, 0x3e, 0x8c, 0x84, 0x1d, + 0xee, 0xeb, 0x91, 0x3e, 0xf5, 0x7f, 0x7e, 0x48, + 0xf2, 0xc9, 0xcf, 0x5a, 0x28, 0xfa, 0x42, 0xbc }, + { 0x53, 0xc7, 0xe6, 0x11, 0x4b, 0x85, 0x0a, 0x2c, + 0xb4, 0x96, 0xc9, 0xb3, 0xc6, 0x9a, 0x62, 0x3e, + 0xae, 0xa2, 0xcb, 0x1d, 0x33, 0xdd, 0x81, 0x7e, + 0x47, 0x65, 0xed, 0xaa, 0x68, 0x23, 0xc2, 0x28 }, + { 0x15, 0x4c, 0x3e, 0x96, 0xfe, 0xe5, 0xdb, 0x14, + 0xf8, 0x77, 0x3e, 0x18, 0xaf, 0x14, 0x85, 0x79, + 0x13, 0x50, 0x9d, 0xa9, 0x99, 0xb4, 0x6c, 0xdd, + 0x3d, 0x4c, 0x16, 0x97, 0x60, 0xc8, 0x3a, 0xd2 }, + { 0x40, 0xb9, 0x91, 0x6f, 0x09, 0x3e, 0x02, 0x7a, + 0x87, 0x86, 0x64, 0x18, 0x18, 0x92, 0x06, 0x20, + 0x47, 0x2f, 0xbc, 0xf6, 0x8f, 0x70, 0x1d, 0x1b, + 0x68, 0x06, 0x32, 0xe6, 0x99, 0x6b, 0xde, 0xd3 }, + { 0x24, 0xc4, 0xcb, 0xba, 0x07, 0x11, 0x98, 0x31, + 0xa7, 0x26, 0xb0, 0x53, 0x05, 0xd9, 0x6d, 0xa0, + 0x2f, 0xf8, 0xb1, 0x48, 0xf0, 0xda, 0x44, 0x0f, + 0xe2, 0x33, 0xbc, 0xaa, 0x32, 0xc7, 0x2f, 0x6f }, + { 0x5d, 0x20, 0x15, 0x10, 0x25, 0x00, 0x20, 0xb7, + 0x83, 0x68, 0x96, 0x88, 0xab, 0xbf, 0x8e, 0xcf, + 0x25, 0x94, 0xa9, 0x6a, 0x08, 0xf2, 0xbf, 0xec, + 0x6c, 0xe0, 0x57, 0x44, 0x65, 0xdd, 0xed, 0x71 }, + { 0x04, 0x3b, 0x97, 0xe3, 0x36, 0xee, 0x6f, 0xdb, + 0xbe, 0x2b, 0x50, 0xf2, 0x2a, 0xf8, 0x32, 0x75, + 0xa4, 0x08, 0x48, 0x05, 0xd2, 0xd5, 0x64, 0x59, + 0x62, 0x45, 0x4b, 0x6c, 0x9b, 0x80, 0x53, 0xa0 }, + { 0x56, 0x48, 0x35, 0xcb, 0xae, 0xa7, 0x74, 0x94, + 0x85, 0x68, 0xbe, 0x36, 0xcf, 0x52, 0xfc, 0xdd, + 0x83, 0x93, 0x4e, 0xb0, 0xa2, 0x75, 0x12, 0xdb, + 0xe3, 0xe2, 0xdb, 0x47, 0xb9, 0xe6, 0x63, 0x5a }, + { 0xf2, 0x1c, 0x33, 0xf4, 0x7b, 0xde, 0x40, 0xa2, + 0xa1, 0x01, 0xc9, 0xcd, 0xe8, 0x02, 0x7a, 0xaf, + 0x61, 0xa3, 0x13, 0x7d, 0xe2, 0x42, 0x2b, 0x30, + 0x03, 0x5a, 0x04, 0xc2, 0x70, 0x89, 0x41, 0x83 }, + { 0x9d, 0xb0, 0xef, 0x74, 0xe6, 0x6c, 0xbb, 0x84, + 0x2e, 0xb0, 0xe0, 0x73, 0x43, 0xa0, 0x3c, 0x5c, + 0x56, 0x7e, 0x37, 0x2b, 0x3f, 0x23, 0xb9, 0x43, + 0xc7, 0x88, 0xa4, 0xf2, 0x50, 0xf6, 0x78, 0x91 }, + { 0xab, 0x8d, 0x08, 0x65, 0x5f, 0xf1, 0xd3, 0xfe, + 0x87, 0x58, 0xd5, 0x62, 0x23, 0x5f, 0xd2, 0x3e, + 0x7c, 0xf9, 0xdc, 0xaa, 0xd6, 0x58, 0x87, 0x2a, + 0x49, 0xe5, 0xd3, 0x18, 0x3b, 0x6c, 0xce, 0xbd }, + { 0x6f, 0x27, 0xf7, 0x7e, 0x7b, 0xcf, 0x46, 0xa1, + 0xe9, 0x63, 0xad, 0xe0, 0x30, 0x97, 0x33, 0x54, + 0x30, 0x31, 0xdc, 0xcd, 0xd4, 0x7c, 0xaa, 0xc1, + 0x74, 0xd7, 0xd2, 0x7c, 0xe8, 0x07, 0x7e, 0x8b }, + { 0xe3, 0xcd, 0x54, 0xda, 0x7e, 0x44, 0x4c, 0xaa, + 0x62, 0x07, 0x56, 0x95, 0x25, 0xa6, 0x70, 0xeb, + 0xae, 0x12, 0x78, 0xde, 0x4e, 0x3f, 0xe2, 0x68, + 0x4b, 0x3e, 0x33, 0xf5, 0xef, 0x90, 0xcc, 0x1b }, + { 0xb2, 0xc3, 0xe3, 0x3a, 0x51, 0xd2, 0x2c, 0x4c, + 0x08, 0xfc, 0x09, 0x89, 0xc8, 0x73, 0xc9, 0xcc, + 0x41, 0x50, 0x57, 0x9b, 0x1e, 0x61, 0x63, 0xfa, + 0x69, 0x4a, 0xd5, 0x1d, 0x53, 0xd7, 0x12, 0xdc }, + { 0xbe, 0x7f, 0xda, 0x98, 0x3e, 0x13, 0x18, 0x9b, + 0x4c, 0x77, 0xe0, 0xa8, 0x09, 0x20, 0xb6, 0xe0, + 0xe0, 0xea, 0x80, 0xc3, 0xb8, 0x4d, 0xbe, 0x7e, + 0x71, 0x17, 0xd2, 0x53, 0xf4, 0x81, 0x12, 0xf4 }, + { 0xb6, 0x00, 0x8c, 0x28, 0xfa, 0xe0, 0x8a, 0xa4, + 0x27, 0xe5, 0xbd, 0x3a, 0xad, 0x36, 0xf1, 0x00, + 0x21, 0xf1, 0x6c, 0x77, 0xcf, 0xea, 0xbe, 0xd0, + 0x7f, 0x97, 0xcc, 0x7d, 0xc1, 0xf1, 0x28, 0x4a }, + { 0x6e, 0x4e, 0x67, 0x60, 0xc5, 0x38, 0xf2, 0xe9, + 0x7b, 0x3a, 0xdb, 0xfb, 0xbc, 0xde, 0x57, 0xf8, + 0x96, 0x6b, 0x7e, 0xa8, 0xfc, 0xb5, 0xbf, 0x7e, + 0xfe, 0xc9, 0x13, 0xfd, 0x2a, 0x2b, 0x0c, 0x55 }, + { 0x4a, 0xe5, 0x1f, 0xd1, 0x83, 0x4a, 0xa5, 0xbd, + 0x9a, 0x6f, 0x7e, 0xc3, 0x9f, 0xc6, 0x63, 0x33, + 0x8d, 0xc5, 0xd2, 0xe2, 0x07, 0x61, 0x56, 0x6d, + 0x90, 0xcc, 0x68, 0xb1, 0xcb, 0x87, 0x5e, 0xd8 }, + { 0xb6, 0x73, 0xaa, 0xd7, 0x5a, 0xb1, 0xfd, 0xb5, + 0x40, 0x1a, 0xbf, 0xa1, 0xbf, 0x89, 0xf3, 0xad, + 0xd2, 0xeb, 0xc4, 0x68, 0xdf, 0x36, 0x24, 0xa4, + 0x78, 0xf4, 0xfe, 0x85, 0x9d, 0x8d, 0x55, 0xe2 }, + { 0x13, 0xc9, 0x47, 0x1a, 0x98, 0x55, 0x91, 0x35, + 0x39, 0x83, 0x66, 0x60, 0x39, 0x8d, 0xa0, 0xf3, + 0xf9, 0x9a, 0xda, 0x08, 0x47, 0x9c, 0x69, 0xd1, + 0xb7, 0xfc, 0xaa, 0x34, 0x61, 0xdd, 0x7e, 0x59 }, + { 0x2c, 0x11, 0xf4, 0xa7, 0xf9, 0x9a, 0x1d, 0x23, + 0xa5, 0x8b, 0xb6, 0x36, 0x35, 0x0f, 0xe8, 0x49, + 0xf2, 0x9c, 0xba, 0xc1, 0xb2, 0xa1, 0x11, 0x2d, + 0x9f, 0x1e, 0xd5, 0xbc, 0x5b, 0x31, 0x3c, 0xcd }, + { 0xc7, 0xd3, 0xc0, 0x70, 0x6b, 0x11, 0xae, 0x74, + 0x1c, 0x05, 0xa1, 0xef, 0x15, 0x0d, 0xd6, 0x5b, + 0x54, 0x94, 0xd6, 0xd5, 0x4c, 0x9a, 0x86, 0xe2, + 0x61, 0x78, 0x54, 0xe6, 0xae, 0xee, 0xbb, 0xd9 }, + { 0x19, 0x4e, 0x10, 0xc9, 0x38, 0x93, 0xaf, 0xa0, + 0x64, 0xc3, 0xac, 0x04, 0xc0, 0xdd, 0x80, 0x8d, + 0x79, 0x1c, 0x3d, 0x4b, 0x75, 0x56, 0xe8, 0x9d, + 0x8d, 0x9c, 0xb2, 0x25, 0xc4, 0xb3, 0x33, 0x39 }, + { 0x6f, 0xc4, 0x98, 0x8b, 0x8f, 0x78, 0x54, 0x6b, + 0x16, 0x88, 0x99, 0x18, 0x45, 0x90, 0x8f, 0x13, + 0x4b, 0x6a, 0x48, 0x2e, 0x69, 0x94, 0xb3, 0xd4, + 0x83, 0x17, 0xbf, 0x08, 0xdb, 0x29, 0x21, 0x85 }, + { 0x56, 0x65, 0xbe, 0xb8, 0xb0, 0x95, 0x55, 0x25, + 0x81, 0x3b, 0x59, 0x81, 0xcd, 0x14, 0x2e, 0xd4, + 0xd0, 0x3f, 0xba, 0x38, 0xa6, 0xf3, 0xe5, 0xad, + 0x26, 0x8e, 0x0c, 0xc2, 0x70, 0xd1, 0xcd, 0x11 }, + { 0xb8, 0x83, 0xd6, 0x8f, 0x5f, 0xe5, 0x19, 0x36, + 0x43, 0x1b, 0xa4, 0x25, 0x67, 0x38, 0x05, 0x3b, + 0x1d, 0x04, 0x26, 0xd4, 0xcb, 0x64, 0xb1, 0x6e, + 0x83, 0xba, 0xdc, 0x5e, 0x9f, 0xbe, 0x3b, 0x81 }, + { 0x53, 0xe7, 0xb2, 0x7e, 0xa5, 0x9c, 0x2f, 0x6d, + 0xbb, 0x50, 0x76, 0x9e, 0x43, 0x55, 0x4d, 0xf3, + 0x5a, 0xf8, 0x9f, 0x48, 0x22, 0xd0, 0x46, 0x6b, + 0x00, 0x7d, 0xd6, 0xf6, 0xde, 0xaf, 0xff, 0x02 }, + { 0x1f, 0x1a, 0x02, 0x29, 0xd4, 0x64, 0x0f, 0x01, + 0x90, 0x15, 0x88, 0xd9, 0xde, 0xc2, 0x2d, 0x13, + 0xfc, 0x3e, 0xb3, 0x4a, 0x61, 0xb3, 0x29, 0x38, + 0xef, 0xbf, 0x53, 0x34, 0xb2, 0x80, 0x0a, 0xfa }, + { 0xc2, 0xb4, 0x05, 0xaf, 0xa0, 0xfa, 0x66, 0x68, + 0x85, 0x2a, 0xee, 0x4d, 0x88, 0x04, 0x08, 0x53, + 0xfa, 0xb8, 0x00, 0xe7, 0x2b, 0x57, 0x58, 0x14, + 0x18, 0xe5, 0x50, 0x6f, 0x21, 0x4c, 0x7d, 0x1f }, + { 0xc0, 0x8a, 0xa1, 0xc2, 0x86, 0xd7, 0x09, 0xfd, + 0xc7, 0x47, 0x37, 0x44, 0x97, 0x71, 0x88, 0xc8, + 0x95, 0xba, 0x01, 0x10, 0x14, 0x24, 0x7e, 0x4e, + 0xfa, 0x8d, 0x07, 0xe7, 0x8f, 0xec, 0x69, 0x5c }, + { 0xf0, 0x3f, 0x57, 0x89, 0xd3, 0x33, 0x6b, 0x80, + 0xd0, 0x02, 0xd5, 0x9f, 0xdf, 0x91, 0x8b, 0xdb, + 0x77, 0x5b, 0x00, 0x95, 0x6e, 0xd5, 0x52, 0x8e, + 0x86, 0xaa, 0x99, 0x4a, 0xcb, 0x38, 0xfe, 0x2d } +}; + +static const u8 blake2s_keyed_testvecs[][BLAKE2S_HASH_SIZE] __initconst = { + { 0x48, 0xa8, 0x99, 0x7d, 0xa4, 0x07, 0x87, 0x6b, + 0x3d, 0x79, 0xc0, 0xd9, 0x23, 0x25, 0xad, 0x3b, + 0x89, 0xcb, 0xb7, 0x54, 0xd8, 0x6a, 0xb7, 0x1a, + 0xee, 0x04, 0x7a, 0xd3, 0x45, 0xfd, 0x2c, 0x49 }, + { 0x40, 0xd1, 0x5f, 0xee, 0x7c, 0x32, 0x88, 0x30, + 0x16, 0x6a, 0xc3, 0xf9, 0x18, 0x65, 0x0f, 0x80, + 0x7e, 0x7e, 0x01, 0xe1, 0x77, 0x25, 0x8c, 0xdc, + 0x0a, 0x39, 0xb1, 0x1f, 0x59, 0x80, 0x66, 0xf1 }, + { 0x6b, 0xb7, 0x13, 0x00, 0x64, 0x4c, 0xd3, 0x99, + 0x1b, 0x26, 0xcc, 0xd4, 0xd2, 0x74, 0xac, 0xd1, + 0xad, 0xea, 0xb8, 0xb1, 0xd7, 0x91, 0x45, 0x46, + 0xc1, 0x19, 0x8b, 0xbe, 0x9f, 0xc9, 0xd8, 0x03 }, + { 0x1d, 0x22, 0x0d, 0xbe, 0x2e, 0xe1, 0x34, 0x66, + 0x1f, 0xdf, 0x6d, 0x9e, 0x74, 0xb4, 0x17, 0x04, + 0x71, 0x05, 0x56, 0xf2, 0xf6, 0xe5, 0xa0, 0x91, + 0xb2, 0x27, 0x69, 0x74, 0x45, 0xdb, 0xea, 0x6b }, + { 0xf6, 0xc3, 0xfb, 0xad, 0xb4, 0xcc, 0x68, 0x7a, + 0x00, 0x64, 0xa5, 0xbe, 0x6e, 0x79, 0x1b, 0xec, + 0x63, 0xb8, 0x68, 0xad, 0x62, 0xfb, 0xa6, 0x1b, + 0x37, 0x57, 0xef, 0x9c, 0xa5, 0x2e, 0x05, 0xb2 }, + { 0x49, 0xc1, 0xf2, 0x11, 0x88, 0xdf, 0xd7, 0x69, + 0xae, 0xa0, 0xe9, 0x11, 0xdd, 0x6b, 0x41, 0xf1, + 0x4d, 0xab, 0x10, 0x9d, 0x2b, 0x85, 0x97, 0x7a, + 0xa3, 0x08, 0x8b, 0x5c, 0x70, 0x7e, 0x85, 0x98 }, + { 0xfd, 0xd8, 0x99, 0x3d, 0xcd, 0x43, 0xf6, 0x96, + 0xd4, 0x4f, 0x3c, 0xea, 0x0f, 0xf3, 0x53, 0x45, + 0x23, 0x4e, 0xc8, 0xee, 0x08, 0x3e, 0xb3, 0xca, + 0xda, 0x01, 0x7c, 0x7f, 0x78, 0xc1, 0x71, 0x43 }, + { 0xe6, 0xc8, 0x12, 0x56, 0x37, 0x43, 0x8d, 0x09, + 0x05, 0xb7, 0x49, 0xf4, 0x65, 0x60, 0xac, 0x89, + 0xfd, 0x47, 0x1c, 0xf8, 0x69, 0x2e, 0x28, 0xfa, + 0xb9, 0x82, 0xf7, 0x3f, 0x01, 0x9b, 0x83, 0xa9 }, + { 0x19, 0xfc, 0x8c, 0xa6, 0x97, 0x9d, 0x60, 0xe6, + 0xed, 0xd3, 0xb4, 0x54, 0x1e, 0x2f, 0x96, 0x7c, + 0xed, 0x74, 0x0d, 0xf6, 0xec, 0x1e, 0xae, 0xbb, + 0xfe, 0x81, 0x38, 0x32, 0xe9, 0x6b, 0x29, 0x74 }, + { 0xa6, 0xad, 0x77, 0x7c, 0xe8, 0x81, 0xb5, 0x2b, + 0xb5, 0xa4, 0x42, 0x1a, 0xb6, 0xcd, 0xd2, 0xdf, + 0xba, 0x13, 0xe9, 0x63, 0x65, 0x2d, 0x4d, 0x6d, + 0x12, 0x2a, 0xee, 0x46, 0x54, 0x8c, 0x14, 0xa7 }, + { 0xf5, 0xc4, 0xb2, 0xba, 0x1a, 0x00, 0x78, 0x1b, + 0x13, 0xab, 0xa0, 0x42, 0x52, 0x42, 0xc6, 0x9c, + 0xb1, 0x55, 0x2f, 0x3f, 0x71, 0xa9, 0xa3, 0xbb, + 0x22, 0xb4, 0xa6, 0xb4, 0x27, 0x7b, 0x46, 0xdd }, + { 0xe3, 0x3c, 0x4c, 0x9b, 0xd0, 0xcc, 0x7e, 0x45, + 0xc8, 0x0e, 0x65, 0xc7, 0x7f, 0xa5, 0x99, 0x7f, + 0xec, 0x70, 0x02, 0x73, 0x85, 0x41, 0x50, 0x9e, + 0x68, 0xa9, 0x42, 0x38, 0x91, 0xe8, 0x22, 0xa3 }, + { 0xfb, 0xa1, 0x61, 0x69, 0xb2, 0xc3, 0xee, 0x10, + 0x5b, 0xe6, 0xe1, 0xe6, 0x50, 0xe5, 0xcb, 0xf4, + 0x07, 0x46, 0xb6, 0x75, 0x3d, 0x03, 0x6a, 0xb5, + 0x51, 0x79, 0x01, 0x4a, 0xd7, 0xef, 0x66, 0x51 }, + { 0xf5, 0xc4, 0xbe, 0xc6, 0xd6, 0x2f, 0xc6, 0x08, + 0xbf, 0x41, 0xcc, 0x11, 0x5f, 0x16, 0xd6, 0x1c, + 0x7e, 0xfd, 0x3f, 0xf6, 0xc6, 0x56, 0x92, 0xbb, + 0xe0, 0xaf, 0xff, 0xb1, 0xfe, 0xde, 0x74, 0x75 }, + { 0xa4, 0x86, 0x2e, 0x76, 0xdb, 0x84, 0x7f, 0x05, + 0xba, 0x17, 0xed, 0xe5, 0xda, 0x4e, 0x7f, 0x91, + 0xb5, 0x92, 0x5c, 0xf1, 0xad, 0x4b, 0xa1, 0x27, + 0x32, 0xc3, 0x99, 0x57, 0x42, 0xa5, 0xcd, 0x6e }, + { 0x65, 0xf4, 0xb8, 0x60, 0xcd, 0x15, 0xb3, 0x8e, + 0xf8, 0x14, 0xa1, 0xa8, 0x04, 0x31, 0x4a, 0x55, + 0xbe, 0x95, 0x3c, 0xaa, 0x65, 0xfd, 0x75, 0x8a, + 0xd9, 0x89, 0xff, 0x34, 0xa4, 0x1c, 0x1e, 0xea }, + { 0x19, 0xba, 0x23, 0x4f, 0x0a, 0x4f, 0x38, 0x63, + 0x7d, 0x18, 0x39, 0xf9, 0xd9, 0xf7, 0x6a, 0xd9, + 0x1c, 0x85, 0x22, 0x30, 0x71, 0x43, 0xc9, 0x7d, + 0x5f, 0x93, 0xf6, 0x92, 0x74, 0xce, 0xc9, 0xa7 }, + { 0x1a, 0x67, 0x18, 0x6c, 0xa4, 0xa5, 0xcb, 0x8e, + 0x65, 0xfc, 0xa0, 0xe2, 0xec, 0xbc, 0x5d, 0xdc, + 0x14, 0xae, 0x38, 0x1b, 0xb8, 0xbf, 0xfe, 0xb9, + 0xe0, 0xa1, 0x03, 0x44, 0x9e, 0x3e, 0xf0, 0x3c }, + { 0xaf, 0xbe, 0xa3, 0x17, 0xb5, 0xa2, 0xe8, 0x9c, + 0x0b, 0xd9, 0x0c, 0xcf, 0x5d, 0x7f, 0xd0, 0xed, + 0x57, 0xfe, 0x58, 0x5e, 0x4b, 0xe3, 0x27, 0x1b, + 0x0a, 0x6b, 0xf0, 0xf5, 0x78, 0x6b, 0x0f, 0x26 }, + { 0xf1, 0xb0, 0x15, 0x58, 0xce, 0x54, 0x12, 0x62, + 0xf5, 0xec, 0x34, 0x29, 0x9d, 0x6f, 0xb4, 0x09, + 0x00, 0x09, 0xe3, 0x43, 0x4b, 0xe2, 0xf4, 0x91, + 0x05, 0xcf, 0x46, 0xaf, 0x4d, 0x2d, 0x41, 0x24 }, + { 0x13, 0xa0, 0xa0, 0xc8, 0x63, 0x35, 0x63, 0x5e, + 0xaa, 0x74, 0xca, 0x2d, 0x5d, 0x48, 0x8c, 0x79, + 0x7b, 0xbb, 0x4f, 0x47, 0xdc, 0x07, 0x10, 0x50, + 0x15, 0xed, 0x6a, 0x1f, 0x33, 0x09, 0xef, 0xce }, + { 0x15, 0x80, 0xaf, 0xee, 0xbe, 0xbb, 0x34, 0x6f, + 0x94, 0xd5, 0x9f, 0xe6, 0x2d, 0xa0, 0xb7, 0x92, + 0x37, 0xea, 0xd7, 0xb1, 0x49, 0x1f, 0x56, 0x67, + 0xa9, 0x0e, 0x45, 0xed, 0xf6, 0xca, 0x8b, 0x03 }, + { 0x20, 0xbe, 0x1a, 0x87, 0x5b, 0x38, 0xc5, 0x73, + 0xdd, 0x7f, 0xaa, 0xa0, 0xde, 0x48, 0x9d, 0x65, + 0x5c, 0x11, 0xef, 0xb6, 0xa5, 0x52, 0x69, 0x8e, + 0x07, 0xa2, 0xd3, 0x31, 0xb5, 0xf6, 0x55, 0xc3 }, + { 0xbe, 0x1f, 0xe3, 0xc4, 0xc0, 0x40, 0x18, 0xc5, + 0x4c, 0x4a, 0x0f, 0x6b, 0x9a, 0x2e, 0xd3, 0xc5, + 0x3a, 0xbe, 0x3a, 0x9f, 0x76, 0xb4, 0xd2, 0x6d, + 0xe5, 0x6f, 0xc9, 0xae, 0x95, 0x05, 0x9a, 0x99 }, + { 0xe3, 0xe3, 0xac, 0xe5, 0x37, 0xeb, 0x3e, 0xdd, + 0x84, 0x63, 0xd9, 0xad, 0x35, 0x82, 0xe1, 0x3c, + 0xf8, 0x65, 0x33, 0xff, 0xde, 0x43, 0xd6, 0x68, + 0xdd, 0x2e, 0x93, 0xbb, 0xdb, 0xd7, 0x19, 0x5a }, + { 0x11, 0x0c, 0x50, 0xc0, 0xbf, 0x2c, 0x6e, 0x7a, + 0xeb, 0x7e, 0x43, 0x5d, 0x92, 0xd1, 0x32, 0xab, + 0x66, 0x55, 0x16, 0x8e, 0x78, 0xa2, 0xde, 0xcd, + 0xec, 0x33, 0x30, 0x77, 0x76, 0x84, 0xd9, 0xc1 }, + { 0xe9, 0xba, 0x8f, 0x50, 0x5c, 0x9c, 0x80, 0xc0, + 0x86, 0x66, 0xa7, 0x01, 0xf3, 0x36, 0x7e, 0x6c, + 0xc6, 0x65, 0xf3, 0x4b, 0x22, 0xe7, 0x3c, 0x3c, + 0x04, 0x17, 0xeb, 0x1c, 0x22, 0x06, 0x08, 0x2f }, + { 0x26, 0xcd, 0x66, 0xfc, 0xa0, 0x23, 0x79, 0xc7, + 0x6d, 0xf1, 0x23, 0x17, 0x05, 0x2b, 0xca, 0xfd, + 0x6c, 0xd8, 0xc3, 0xa7, 0xb8, 0x90, 0xd8, 0x05, + 0xf3, 0x6c, 0x49, 0x98, 0x97, 0x82, 0x43, 0x3a }, + { 0x21, 0x3f, 0x35, 0x96, 0xd6, 0xe3, 0xa5, 0xd0, + 0xe9, 0x93, 0x2c, 0xd2, 0x15, 0x91, 0x46, 0x01, + 0x5e, 0x2a, 0xbc, 0x94, 0x9f, 0x47, 0x29, 0xee, + 0x26, 0x32, 0xfe, 0x1e, 0xdb, 0x78, 0xd3, 0x37 }, + { 0x10, 0x15, 0xd7, 0x01, 0x08, 0xe0, 0x3b, 0xe1, + 0xc7, 0x02, 0xfe, 0x97, 0x25, 0x36, 0x07, 0xd1, + 0x4a, 0xee, 0x59, 0x1f, 0x24, 0x13, 0xea, 0x67, + 0x87, 0x42, 0x7b, 0x64, 0x59, 0xff, 0x21, 0x9a }, + { 0x3c, 0xa9, 0x89, 0xde, 0x10, 0xcf, 0xe6, 0x09, + 0x90, 0x94, 0x72, 0xc8, 0xd3, 0x56, 0x10, 0x80, + 0x5b, 0x2f, 0x97, 0x77, 0x34, 0xcf, 0x65, 0x2c, + 0xc6, 0x4b, 0x3b, 0xfc, 0x88, 0x2d, 0x5d, 0x89 }, + { 0xb6, 0x15, 0x6f, 0x72, 0xd3, 0x80, 0xee, 0x9e, + 0xa6, 0xac, 0xd1, 0x90, 0x46, 0x4f, 0x23, 0x07, + 0xa5, 0xc1, 0x79, 0xef, 0x01, 0xfd, 0x71, 0xf9, + 0x9f, 0x2d, 0x0f, 0x7a, 0x57, 0x36, 0x0a, 0xea }, + { 0xc0, 0x3b, 0xc6, 0x42, 0xb2, 0x09, 0x59, 0xcb, + 0xe1, 0x33, 0xa0, 0x30, 0x3e, 0x0c, 0x1a, 0xbf, + 0xf3, 0xe3, 0x1e, 0xc8, 0xe1, 0xa3, 0x28, 0xec, + 0x85, 0x65, 0xc3, 0x6d, 0xec, 0xff, 0x52, 0x65 }, + { 0x2c, 0x3e, 0x08, 0x17, 0x6f, 0x76, 0x0c, 0x62, + 0x64, 0xc3, 0xa2, 0xcd, 0x66, 0xfe, 0xc6, 0xc3, + 0xd7, 0x8d, 0xe4, 0x3f, 0xc1, 0x92, 0x45, 0x7b, + 0x2a, 0x4a, 0x66, 0x0a, 0x1e, 0x0e, 0xb2, 0x2b }, + { 0xf7, 0x38, 0xc0, 0x2f, 0x3c, 0x1b, 0x19, 0x0c, + 0x51, 0x2b, 0x1a, 0x32, 0xde, 0xab, 0xf3, 0x53, + 0x72, 0x8e, 0x0e, 0x9a, 0xb0, 0x34, 0x49, 0x0e, + 0x3c, 0x34, 0x09, 0x94, 0x6a, 0x97, 0xae, 0xec }, + { 0x8b, 0x18, 0x80, 0xdf, 0x30, 0x1c, 0xc9, 0x63, + 0x41, 0x88, 0x11, 0x08, 0x89, 0x64, 0x83, 0x92, + 0x87, 0xff, 0x7f, 0xe3, 0x1c, 0x49, 0xea, 0x6e, + 0xbd, 0x9e, 0x48, 0xbd, 0xee, 0xe4, 0x97, 0xc5 }, + { 0x1e, 0x75, 0xcb, 0x21, 0xc6, 0x09, 0x89, 0x02, + 0x03, 0x75, 0xf1, 0xa7, 0xa2, 0x42, 0x83, 0x9f, + 0x0b, 0x0b, 0x68, 0x97, 0x3a, 0x4c, 0x2a, 0x05, + 0xcf, 0x75, 0x55, 0xed, 0x5a, 0xae, 0xc4, 0xc1 }, + { 0x62, 0xbf, 0x8a, 0x9c, 0x32, 0xa5, 0xbc, 0xcf, + 0x29, 0x0b, 0x6c, 0x47, 0x4d, 0x75, 0xb2, 0xa2, + 0xa4, 0x09, 0x3f, 0x1a, 0x9e, 0x27, 0x13, 0x94, + 0x33, 0xa8, 0xf2, 0xb3, 0xbc, 0xe7, 0xb8, 0xd7 }, + { 0x16, 0x6c, 0x83, 0x50, 0xd3, 0x17, 0x3b, 0x5e, + 0x70, 0x2b, 0x78, 0x3d, 0xfd, 0x33, 0xc6, 0x6e, + 0xe0, 0x43, 0x27, 0x42, 0xe9, 0xb9, 0x2b, 0x99, + 0x7f, 0xd2, 0x3c, 0x60, 0xdc, 0x67, 0x56, 0xca }, + { 0x04, 0x4a, 0x14, 0xd8, 0x22, 0xa9, 0x0c, 0xac, + 0xf2, 0xf5, 0xa1, 0x01, 0x42, 0x8a, 0xdc, 0x8f, + 0x41, 0x09, 0x38, 0x6c, 0xcb, 0x15, 0x8b, 0xf9, + 0x05, 0xc8, 0x61, 0x8b, 0x8e, 0xe2, 0x4e, 0xc3 }, + { 0x38, 0x7d, 0x39, 0x7e, 0xa4, 0x3a, 0x99, 0x4b, + 0xe8, 0x4d, 0x2d, 0x54, 0x4a, 0xfb, 0xe4, 0x81, + 0xa2, 0x00, 0x0f, 0x55, 0x25, 0x26, 0x96, 0xbb, + 0xa2, 0xc5, 0x0c, 0x8e, 0xbd, 0x10, 0x13, 0x47 }, + { 0x56, 0xf8, 0xcc, 0xf1, 0xf8, 0x64, 0x09, 0xb4, + 0x6c, 0xe3, 0x61, 0x66, 0xae, 0x91, 0x65, 0x13, + 0x84, 0x41, 0x57, 0x75, 0x89, 0xdb, 0x08, 0xcb, + 0xc5, 0xf6, 0x6c, 0xa2, 0x97, 0x43, 0xb9, 0xfd }, + { 0x97, 0x06, 0xc0, 0x92, 0xb0, 0x4d, 0x91, 0xf5, + 0x3d, 0xff, 0x91, 0xfa, 0x37, 0xb7, 0x49, 0x3d, + 0x28, 0xb5, 0x76, 0xb5, 0xd7, 0x10, 0x46, 0x9d, + 0xf7, 0x94, 0x01, 0x66, 0x22, 0x36, 0xfc, 0x03 }, + { 0x87, 0x79, 0x68, 0x68, 0x6c, 0x06, 0x8c, 0xe2, + 0xf7, 0xe2, 0xad, 0xcf, 0xf6, 0x8b, 0xf8, 0x74, + 0x8e, 0xdf, 0x3c, 0xf8, 0x62, 0xcf, 0xb4, 0xd3, + 0x94, 0x7a, 0x31, 0x06, 0x95, 0x80, 0x54, 0xe3 }, + { 0x88, 0x17, 0xe5, 0x71, 0x98, 0x79, 0xac, 0xf7, + 0x02, 0x47, 0x87, 0xec, 0xcd, 0xb2, 0x71, 0x03, + 0x55, 0x66, 0xcf, 0xa3, 0x33, 0xe0, 0x49, 0x40, + 0x7c, 0x01, 0x78, 0xcc, 0xc5, 0x7a, 0x5b, 0x9f }, + { 0x89, 0x38, 0x24, 0x9e, 0x4b, 0x50, 0xca, 0xda, + 0xcc, 0xdf, 0x5b, 0x18, 0x62, 0x13, 0x26, 0xcb, + 0xb1, 0x52, 0x53, 0xe3, 0x3a, 0x20, 0xf5, 0x63, + 0x6e, 0x99, 0x5d, 0x72, 0x47, 0x8d, 0xe4, 0x72 }, + { 0xf1, 0x64, 0xab, 0xba, 0x49, 0x63, 0xa4, 0x4d, + 0x10, 0x72, 0x57, 0xe3, 0x23, 0x2d, 0x90, 0xac, + 0xa5, 0xe6, 0x6a, 0x14, 0x08, 0x24, 0x8c, 0x51, + 0x74, 0x1e, 0x99, 0x1d, 0xb5, 0x22, 0x77, 0x56 }, + { 0xd0, 0x55, 0x63, 0xe2, 0xb1, 0xcb, 0xa0, 0xc4, + 0xa2, 0xa1, 0xe8, 0xbd, 0xe3, 0xa1, 0xa0, 0xd9, + 0xf5, 0xb4, 0x0c, 0x85, 0xa0, 0x70, 0xd6, 0xf5, + 0xfb, 0x21, 0x06, 0x6e, 0xad, 0x5d, 0x06, 0x01 }, + { 0x03, 0xfb, 0xb1, 0x63, 0x84, 0xf0, 0xa3, 0x86, + 0x6f, 0x4c, 0x31, 0x17, 0x87, 0x76, 0x66, 0xef, + 0xbf, 0x12, 0x45, 0x97, 0x56, 0x4b, 0x29, 0x3d, + 0x4a, 0xab, 0x0d, 0x26, 0x9f, 0xab, 0xdd, 0xfa }, + { 0x5f, 0xa8, 0x48, 0x6a, 0xc0, 0xe5, 0x29, 0x64, + 0xd1, 0x88, 0x1b, 0xbe, 0x33, 0x8e, 0xb5, 0x4b, + 0xe2, 0xf7, 0x19, 0x54, 0x92, 0x24, 0x89, 0x20, + 0x57, 0xb4, 0xda, 0x04, 0xba, 0x8b, 0x34, 0x75 }, + { 0xcd, 0xfa, 0xbc, 0xee, 0x46, 0x91, 0x11, 0x11, + 0x23, 0x6a, 0x31, 0x70, 0x8b, 0x25, 0x39, 0xd7, + 0x1f, 0xc2, 0x11, 0xd9, 0xb0, 0x9c, 0x0d, 0x85, + 0x30, 0xa1, 0x1e, 0x1d, 0xbf, 0x6e, 0xed, 0x01 }, + { 0x4f, 0x82, 0xde, 0x03, 0xb9, 0x50, 0x47, 0x93, + 0xb8, 0x2a, 0x07, 0xa0, 0xbd, 0xcd, 0xff, 0x31, + 0x4d, 0x75, 0x9e, 0x7b, 0x62, 0xd2, 0x6b, 0x78, + 0x49, 0x46, 0xb0, 0xd3, 0x6f, 0x91, 0x6f, 0x52 }, + { 0x25, 0x9e, 0xc7, 0xf1, 0x73, 0xbc, 0xc7, 0x6a, + 0x09, 0x94, 0xc9, 0x67, 0xb4, 0xf5, 0xf0, 0x24, + 0xc5, 0x60, 0x57, 0xfb, 0x79, 0xc9, 0x65, 0xc4, + 0xfa, 0xe4, 0x18, 0x75, 0xf0, 0x6a, 0x0e, 0x4c }, + { 0x19, 0x3c, 0xc8, 0xe7, 0xc3, 0xe0, 0x8b, 0xb3, + 0x0f, 0x54, 0x37, 0xaa, 0x27, 0xad, 0xe1, 0xf1, + 0x42, 0x36, 0x9b, 0x24, 0x6a, 0x67, 0x5b, 0x23, + 0x83, 0xe6, 0xda, 0x9b, 0x49, 0xa9, 0x80, 0x9e }, + { 0x5c, 0x10, 0x89, 0x6f, 0x0e, 0x28, 0x56, 0xb2, + 0xa2, 0xee, 0xe0, 0xfe, 0x4a, 0x2c, 0x16, 0x33, + 0x56, 0x5d, 0x18, 0xf0, 0xe9, 0x3e, 0x1f, 0xab, + 0x26, 0xc3, 0x73, 0xe8, 0xf8, 0x29, 0x65, 0x4d }, + { 0xf1, 0x60, 0x12, 0xd9, 0x3f, 0x28, 0x85, 0x1a, + 0x1e, 0xb9, 0x89, 0xf5, 0xd0, 0xb4, 0x3f, 0x3f, + 0x39, 0xca, 0x73, 0xc9, 0xa6, 0x2d, 0x51, 0x81, + 0xbf, 0xf2, 0x37, 0x53, 0x6b, 0xd3, 0x48, 0xc3 }, + { 0x29, 0x66, 0xb3, 0xcf, 0xae, 0x1e, 0x44, 0xea, + 0x99, 0x6d, 0xc5, 0xd6, 0x86, 0xcf, 0x25, 0xfa, + 0x05, 0x3f, 0xb6, 0xf6, 0x72, 0x01, 0xb9, 0xe4, + 0x6e, 0xad, 0xe8, 0x5d, 0x0a, 0xd6, 0xb8, 0x06 }, + { 0xdd, 0xb8, 0x78, 0x24, 0x85, 0xe9, 0x00, 0xbc, + 0x60, 0xbc, 0xf4, 0xc3, 0x3a, 0x6f, 0xd5, 0x85, + 0x68, 0x0c, 0xc6, 0x83, 0xd5, 0x16, 0xef, 0xa0, + 0x3e, 0xb9, 0x98, 0x5f, 0xad, 0x87, 0x15, 0xfb }, + { 0x4c, 0x4d, 0x6e, 0x71, 0xae, 0xa0, 0x57, 0x86, + 0x41, 0x31, 0x48, 0xfc, 0x7a, 0x78, 0x6b, 0x0e, + 0xca, 0xf5, 0x82, 0xcf, 0xf1, 0x20, 0x9f, 0x5a, + 0x80, 0x9f, 0xba, 0x85, 0x04, 0xce, 0x66, 0x2c }, + { 0xfb, 0x4c, 0x5e, 0x86, 0xd7, 0xb2, 0x22, 0x9b, + 0x99, 0xb8, 0xba, 0x6d, 0x94, 0xc2, 0x47, 0xef, + 0x96, 0x4a, 0xa3, 0xa2, 0xba, 0xe8, 0xed, 0xc7, + 0x75, 0x69, 0xf2, 0x8d, 0xbb, 0xff, 0x2d, 0x4e }, + { 0xe9, 0x4f, 0x52, 0x6d, 0xe9, 0x01, 0x96, 0x33, + 0xec, 0xd5, 0x4a, 0xc6, 0x12, 0x0f, 0x23, 0x95, + 0x8d, 0x77, 0x18, 0xf1, 0xe7, 0x71, 0x7b, 0xf3, + 0x29, 0x21, 0x1a, 0x4f, 0xae, 0xed, 0x4e, 0x6d }, + { 0xcb, 0xd6, 0x66, 0x0a, 0x10, 0xdb, 0x3f, 0x23, + 0xf7, 0xa0, 0x3d, 0x4b, 0x9d, 0x40, 0x44, 0xc7, + 0x93, 0x2b, 0x28, 0x01, 0xac, 0x89, 0xd6, 0x0b, + 0xc9, 0xeb, 0x92, 0xd6, 0x5a, 0x46, 0xc2, 0xa0 }, + { 0x88, 0x18, 0xbb, 0xd3, 0xdb, 0x4d, 0xc1, 0x23, + 0xb2, 0x5c, 0xbb, 0xa5, 0xf5, 0x4c, 0x2b, 0xc4, + 0xb3, 0xfc, 0xf9, 0xbf, 0x7d, 0x7a, 0x77, 0x09, + 0xf4, 0xae, 0x58, 0x8b, 0x26, 0x7c, 0x4e, 0xce }, + { 0xc6, 0x53, 0x82, 0x51, 0x3f, 0x07, 0x46, 0x0d, + 0xa3, 0x98, 0x33, 0xcb, 0x66, 0x6c, 0x5e, 0xd8, + 0x2e, 0x61, 0xb9, 0xe9, 0x98, 0xf4, 0xb0, 0xc4, + 0x28, 0x7c, 0xee, 0x56, 0xc3, 0xcc, 0x9b, 0xcd }, + { 0x89, 0x75, 0xb0, 0x57, 0x7f, 0xd3, 0x55, 0x66, + 0xd7, 0x50, 0xb3, 0x62, 0xb0, 0x89, 0x7a, 0x26, + 0xc3, 0x99, 0x13, 0x6d, 0xf0, 0x7b, 0xab, 0xab, + 0xbd, 0xe6, 0x20, 0x3f, 0xf2, 0x95, 0x4e, 0xd4 }, + { 0x21, 0xfe, 0x0c, 0xeb, 0x00, 0x52, 0xbe, 0x7f, + 0xb0, 0xf0, 0x04, 0x18, 0x7c, 0xac, 0xd7, 0xde, + 0x67, 0xfa, 0x6e, 0xb0, 0x93, 0x8d, 0x92, 0x76, + 0x77, 0xf2, 0x39, 0x8c, 0x13, 0x23, 0x17, 0xa8 }, + { 0x2e, 0xf7, 0x3f, 0x3c, 0x26, 0xf1, 0x2d, 0x93, + 0x88, 0x9f, 0x3c, 0x78, 0xb6, 0xa6, 0x6c, 0x1d, + 0x52, 0xb6, 0x49, 0xdc, 0x9e, 0x85, 0x6e, 0x2c, + 0x17, 0x2e, 0xa7, 0xc5, 0x8a, 0xc2, 0xb5, 0xe3 }, + { 0x38, 0x8a, 0x3c, 0xd5, 0x6d, 0x73, 0x86, 0x7a, + 0xbb, 0x5f, 0x84, 0x01, 0x49, 0x2b, 0x6e, 0x26, + 0x81, 0xeb, 0x69, 0x85, 0x1e, 0x76, 0x7f, 0xd8, + 0x42, 0x10, 0xa5, 0x60, 0x76, 0xfb, 0x3d, 0xd3 }, + { 0xaf, 0x53, 0x3e, 0x02, 0x2f, 0xc9, 0x43, 0x9e, + 0x4e, 0x3c, 0xb8, 0x38, 0xec, 0xd1, 0x86, 0x92, + 0x23, 0x2a, 0xdf, 0x6f, 0xe9, 0x83, 0x95, 0x26, + 0xd3, 0xc3, 0xdd, 0x1b, 0x71, 0x91, 0x0b, 0x1a }, + { 0x75, 0x1c, 0x09, 0xd4, 0x1a, 0x93, 0x43, 0x88, + 0x2a, 0x81, 0xcd, 0x13, 0xee, 0x40, 0x81, 0x8d, + 0x12, 0xeb, 0x44, 0xc6, 0xc7, 0xf4, 0x0d, 0xf1, + 0x6e, 0x4a, 0xea, 0x8f, 0xab, 0x91, 0x97, 0x2a }, + { 0x5b, 0x73, 0xdd, 0xb6, 0x8d, 0x9d, 0x2b, 0x0a, + 0xa2, 0x65, 0xa0, 0x79, 0x88, 0xd6, 0xb8, 0x8a, + 0xe9, 0xaa, 0xc5, 0x82, 0xaf, 0x83, 0x03, 0x2f, + 0x8a, 0x9b, 0x21, 0xa2, 0xe1, 0xb7, 0xbf, 0x18 }, + { 0x3d, 0xa2, 0x91, 0x26, 0xc7, 0xc5, 0xd7, 0xf4, + 0x3e, 0x64, 0x24, 0x2a, 0x79, 0xfe, 0xaa, 0x4e, + 0xf3, 0x45, 0x9c, 0xde, 0xcc, 0xc8, 0x98, 0xed, + 0x59, 0xa9, 0x7f, 0x6e, 0xc9, 0x3b, 0x9d, 0xab }, + { 0x56, 0x6d, 0xc9, 0x20, 0x29, 0x3d, 0xa5, 0xcb, + 0x4f, 0xe0, 0xaa, 0x8a, 0xbd, 0xa8, 0xbb, 0xf5, + 0x6f, 0x55, 0x23, 0x13, 0xbf, 0xf1, 0x90, 0x46, + 0x64, 0x1e, 0x36, 0x15, 0xc1, 0xe3, 0xed, 0x3f }, + { 0x41, 0x15, 0xbe, 0xa0, 0x2f, 0x73, 0xf9, 0x7f, + 0x62, 0x9e, 0x5c, 0x55, 0x90, 0x72, 0x0c, 0x01, + 0xe7, 0xe4, 0x49, 0xae, 0x2a, 0x66, 0x97, 0xd4, + 0xd2, 0x78, 0x33, 0x21, 0x30, 0x36, 0x92, 0xf9 }, + { 0x4c, 0xe0, 0x8f, 0x47, 0x62, 0x46, 0x8a, 0x76, + 0x70, 0x01, 0x21, 0x64, 0x87, 0x8d, 0x68, 0x34, + 0x0c, 0x52, 0xa3, 0x5e, 0x66, 0xc1, 0x88, 0x4d, + 0x5c, 0x86, 0x48, 0x89, 0xab, 0xc9, 0x66, 0x77 }, + { 0x81, 0xea, 0x0b, 0x78, 0x04, 0x12, 0x4e, 0x0c, + 0x22, 0xea, 0x5f, 0xc7, 0x11, 0x04, 0xa2, 0xaf, + 0xcb, 0x52, 0xa1, 0xfa, 0x81, 0x6f, 0x3e, 0xcb, + 0x7d, 0xcb, 0x5d, 0x9d, 0xea, 0x17, 0x86, 0xd0 }, + { 0xfe, 0x36, 0x27, 0x33, 0xb0, 0x5f, 0x6b, 0xed, + 0xaf, 0x93, 0x79, 0xd7, 0xf7, 0x93, 0x6e, 0xde, + 0x20, 0x9b, 0x1f, 0x83, 0x23, 0xc3, 0x92, 0x25, + 0x49, 0xd9, 0xe7, 0x36, 0x81, 0xb5, 0xdb, 0x7b }, + { 0xef, 0xf3, 0x7d, 0x30, 0xdf, 0xd2, 0x03, 0x59, + 0xbe, 0x4e, 0x73, 0xfd, 0xf4, 0x0d, 0x27, 0x73, + 0x4b, 0x3d, 0xf9, 0x0a, 0x97, 0xa5, 0x5e, 0xd7, + 0x45, 0x29, 0x72, 0x94, 0xca, 0x85, 0xd0, 0x9f }, + { 0x17, 0x2f, 0xfc, 0x67, 0x15, 0x3d, 0x12, 0xe0, + 0xca, 0x76, 0xa8, 0xb6, 0xcd, 0x5d, 0x47, 0x31, + 0x88, 0x5b, 0x39, 0xce, 0x0c, 0xac, 0x93, 0xa8, + 0x97, 0x2a, 0x18, 0x00, 0x6c, 0x8b, 0x8b, 0xaf }, + { 0xc4, 0x79, 0x57, 0xf1, 0xcc, 0x88, 0xe8, 0x3e, + 0xf9, 0x44, 0x58, 0x39, 0x70, 0x9a, 0x48, 0x0a, + 0x03, 0x6b, 0xed, 0x5f, 0x88, 0xac, 0x0f, 0xcc, + 0x8e, 0x1e, 0x70, 0x3f, 0xfa, 0xac, 0x13, 0x2c }, + { 0x30, 0xf3, 0x54, 0x83, 0x70, 0xcf, 0xdc, 0xed, + 0xa5, 0xc3, 0x7b, 0x56, 0x9b, 0x61, 0x75, 0xe7, + 0x99, 0xee, 0xf1, 0xa6, 0x2a, 0xaa, 0x94, 0x32, + 0x45, 0xae, 0x76, 0x69, 0xc2, 0x27, 0xa7, 0xb5 }, + { 0xc9, 0x5d, 0xcb, 0x3c, 0xf1, 0xf2, 0x7d, 0x0e, + 0xef, 0x2f, 0x25, 0xd2, 0x41, 0x38, 0x70, 0x90, + 0x4a, 0x87, 0x7c, 0x4a, 0x56, 0xc2, 0xde, 0x1e, + 0x83, 0xe2, 0xbc, 0x2a, 0xe2, 0xe4, 0x68, 0x21 }, + { 0xd5, 0xd0, 0xb5, 0xd7, 0x05, 0x43, 0x4c, 0xd4, + 0x6b, 0x18, 0x57, 0x49, 0xf6, 0x6b, 0xfb, 0x58, + 0x36, 0xdc, 0xdf, 0x6e, 0xe5, 0x49, 0xa2, 0xb7, + 0xa4, 0xae, 0xe7, 0xf5, 0x80, 0x07, 0xca, 0xaf }, + { 0xbb, 0xc1, 0x24, 0xa7, 0x12, 0xf1, 0x5d, 0x07, + 0xc3, 0x00, 0xe0, 0x5b, 0x66, 0x83, 0x89, 0xa4, + 0x39, 0xc9, 0x17, 0x77, 0xf7, 0x21, 0xf8, 0x32, + 0x0c, 0x1c, 0x90, 0x78, 0x06, 0x6d, 0x2c, 0x7e }, + { 0xa4, 0x51, 0xb4, 0x8c, 0x35, 0xa6, 0xc7, 0x85, + 0x4c, 0xfa, 0xae, 0x60, 0x26, 0x2e, 0x76, 0x99, + 0x08, 0x16, 0x38, 0x2a, 0xc0, 0x66, 0x7e, 0x5a, + 0x5c, 0x9e, 0x1b, 0x46, 0xc4, 0x34, 0x2d, 0xdf }, + { 0xb0, 0xd1, 0x50, 0xfb, 0x55, 0xe7, 0x78, 0xd0, + 0x11, 0x47, 0xf0, 0xb5, 0xd8, 0x9d, 0x99, 0xec, + 0xb2, 0x0f, 0xf0, 0x7e, 0x5e, 0x67, 0x60, 0xd6, + 0xb6, 0x45, 0xeb, 0x5b, 0x65, 0x4c, 0x62, 0x2b }, + { 0x34, 0xf7, 0x37, 0xc0, 0xab, 0x21, 0x99, 0x51, + 0xee, 0xe8, 0x9a, 0x9f, 0x8d, 0xac, 0x29, 0x9c, + 0x9d, 0x4c, 0x38, 0xf3, 0x3f, 0xa4, 0x94, 0xc5, + 0xc6, 0xee, 0xfc, 0x92, 0xb6, 0xdb, 0x08, 0xbc }, + { 0x1a, 0x62, 0xcc, 0x3a, 0x00, 0x80, 0x0d, 0xcb, + 0xd9, 0x98, 0x91, 0x08, 0x0c, 0x1e, 0x09, 0x84, + 0x58, 0x19, 0x3a, 0x8c, 0xc9, 0xf9, 0x70, 0xea, + 0x99, 0xfb, 0xef, 0xf0, 0x03, 0x18, 0xc2, 0x89 }, + { 0xcf, 0xce, 0x55, 0xeb, 0xaf, 0xc8, 0x40, 0xd7, + 0xae, 0x48, 0x28, 0x1c, 0x7f, 0xd5, 0x7e, 0xc8, + 0xb4, 0x82, 0xd4, 0xb7, 0x04, 0x43, 0x74, 0x95, + 0x49, 0x5a, 0xc4, 0x14, 0xcf, 0x4a, 0x37, 0x4b }, + { 0x67, 0x46, 0xfa, 0xcf, 0x71, 0x14, 0x6d, 0x99, + 0x9d, 0xab, 0xd0, 0x5d, 0x09, 0x3a, 0xe5, 0x86, + 0x64, 0x8d, 0x1e, 0xe2, 0x8e, 0x72, 0x61, 0x7b, + 0x99, 0xd0, 0xf0, 0x08, 0x6e, 0x1e, 0x45, 0xbf }, + { 0x57, 0x1c, 0xed, 0x28, 0x3b, 0x3f, 0x23, 0xb4, + 0xe7, 0x50, 0xbf, 0x12, 0xa2, 0xca, 0xf1, 0x78, + 0x18, 0x47, 0xbd, 0x89, 0x0e, 0x43, 0x60, 0x3c, + 0xdc, 0x59, 0x76, 0x10, 0x2b, 0x7b, 0xb1, 0x1b }, + { 0xcf, 0xcb, 0x76, 0x5b, 0x04, 0x8e, 0x35, 0x02, + 0x2c, 0x5d, 0x08, 0x9d, 0x26, 0xe8, 0x5a, 0x36, + 0xb0, 0x05, 0xa2, 0xb8, 0x04, 0x93, 0xd0, 0x3a, + 0x14, 0x4e, 0x09, 0xf4, 0x09, 0xb6, 0xaf, 0xd1 }, + { 0x40, 0x50, 0xc7, 0xa2, 0x77, 0x05, 0xbb, 0x27, + 0xf4, 0x20, 0x89, 0xb2, 0x99, 0xf3, 0xcb, 0xe5, + 0x05, 0x4e, 0xad, 0x68, 0x72, 0x7e, 0x8e, 0xf9, + 0x31, 0x8c, 0xe6, 0xf2, 0x5c, 0xd6, 0xf3, 0x1d }, + { 0x18, 0x40, 0x70, 0xbd, 0x5d, 0x26, 0x5f, 0xbd, + 0xc1, 0x42, 0xcd, 0x1c, 0x5c, 0xd0, 0xd7, 0xe4, + 0x14, 0xe7, 0x03, 0x69, 0xa2, 0x66, 0xd6, 0x27, + 0xc8, 0xfb, 0xa8, 0x4f, 0xa5, 0xe8, 0x4c, 0x34 }, + { 0x9e, 0xdd, 0xa9, 0xa4, 0x44, 0x39, 0x02, 0xa9, + 0x58, 0x8c, 0x0d, 0x0c, 0xcc, 0x62, 0xb9, 0x30, + 0x21, 0x84, 0x79, 0xa6, 0x84, 0x1e, 0x6f, 0xe7, + 0xd4, 0x30, 0x03, 0xf0, 0x4b, 0x1f, 0xd6, 0x43 }, + { 0xe4, 0x12, 0xfe, 0xef, 0x79, 0x08, 0x32, 0x4a, + 0x6d, 0xa1, 0x84, 0x16, 0x29, 0xf3, 0x5d, 0x3d, + 0x35, 0x86, 0x42, 0x01, 0x93, 0x10, 0xec, 0x57, + 0xc6, 0x14, 0x83, 0x6b, 0x63, 0xd3, 0x07, 0x63 }, + { 0x1a, 0x2b, 0x8e, 0xdf, 0xf3, 0xf9, 0xac, 0xc1, + 0x55, 0x4f, 0xcb, 0xae, 0x3c, 0xf1, 0xd6, 0x29, + 0x8c, 0x64, 0x62, 0xe2, 0x2e, 0x5e, 0xb0, 0x25, + 0x96, 0x84, 0xf8, 0x35, 0x01, 0x2b, 0xd1, 0x3f }, + { 0x28, 0x8c, 0x4a, 0xd9, 0xb9, 0x40, 0x97, 0x62, + 0xea, 0x07, 0xc2, 0x4a, 0x41, 0xf0, 0x4f, 0x69, + 0xa7, 0xd7, 0x4b, 0xee, 0x2d, 0x95, 0x43, 0x53, + 0x74, 0xbd, 0xe9, 0x46, 0xd7, 0x24, 0x1c, 0x7b }, + { 0x80, 0x56, 0x91, 0xbb, 0x28, 0x67, 0x48, 0xcf, + 0xb5, 0x91, 0xd3, 0xae, 0xbe, 0x7e, 0x6f, 0x4e, + 0x4d, 0xc6, 0xe2, 0x80, 0x8c, 0x65, 0x14, 0x3c, + 0xc0, 0x04, 0xe4, 0xeb, 0x6f, 0xd0, 0x9d, 0x43 }, + { 0xd4, 0xac, 0x8d, 0x3a, 0x0a, 0xfc, 0x6c, 0xfa, + 0x7b, 0x46, 0x0a, 0xe3, 0x00, 0x1b, 0xae, 0xb3, + 0x6d, 0xad, 0xb3, 0x7d, 0xa0, 0x7d, 0x2e, 0x8a, + 0xc9, 0x18, 0x22, 0xdf, 0x34, 0x8a, 0xed, 0x3d }, + { 0xc3, 0x76, 0x61, 0x70, 0x14, 0xd2, 0x01, 0x58, + 0xbc, 0xed, 0x3d, 0x3b, 0xa5, 0x52, 0xb6, 0xec, + 0xcf, 0x84, 0xe6, 0x2a, 0xa3, 0xeb, 0x65, 0x0e, + 0x90, 0x02, 0x9c, 0x84, 0xd1, 0x3e, 0xea, 0x69 }, + { 0xc4, 0x1f, 0x09, 0xf4, 0x3c, 0xec, 0xae, 0x72, + 0x93, 0xd6, 0x00, 0x7c, 0xa0, 0xa3, 0x57, 0x08, + 0x7d, 0x5a, 0xe5, 0x9b, 0xe5, 0x00, 0xc1, 0xcd, + 0x5b, 0x28, 0x9e, 0xe8, 0x10, 0xc7, 0xb0, 0x82 }, + { 0x03, 0xd1, 0xce, 0xd1, 0xfb, 0xa5, 0xc3, 0x91, + 0x55, 0xc4, 0x4b, 0x77, 0x65, 0xcb, 0x76, 0x0c, + 0x78, 0x70, 0x8d, 0xcf, 0xc8, 0x0b, 0x0b, 0xd8, + 0xad, 0xe3, 0xa5, 0x6d, 0xa8, 0x83, 0x0b, 0x29 }, + { 0x09, 0xbd, 0xe6, 0xf1, 0x52, 0x21, 0x8d, 0xc9, + 0x2c, 0x41, 0xd7, 0xf4, 0x53, 0x87, 0xe6, 0x3e, + 0x58, 0x69, 0xd8, 0x07, 0xec, 0x70, 0xb8, 0x21, + 0x40, 0x5d, 0xbd, 0x88, 0x4b, 0x7f, 0xcf, 0x4b }, + { 0x71, 0xc9, 0x03, 0x6e, 0x18, 0x17, 0x9b, 0x90, + 0xb3, 0x7d, 0x39, 0xe9, 0xf0, 0x5e, 0xb8, 0x9c, + 0xc5, 0xfc, 0x34, 0x1f, 0xd7, 0xc4, 0x77, 0xd0, + 0xd7, 0x49, 0x32, 0x85, 0xfa, 0xca, 0x08, 0xa4 }, + { 0x59, 0x16, 0x83, 0x3e, 0xbb, 0x05, 0xcd, 0x91, + 0x9c, 0xa7, 0xfe, 0x83, 0xb6, 0x92, 0xd3, 0x20, + 0x5b, 0xef, 0x72, 0x39, 0x2b, 0x2c, 0xf6, 0xbb, + 0x0a, 0x6d, 0x43, 0xf9, 0x94, 0xf9, 0x5f, 0x11 }, + { 0xf6, 0x3a, 0xab, 0x3e, 0xc6, 0x41, 0xb3, 0xb0, + 0x24, 0x96, 0x4c, 0x2b, 0x43, 0x7c, 0x04, 0xf6, + 0x04, 0x3c, 0x4c, 0x7e, 0x02, 0x79, 0x23, 0x99, + 0x95, 0x40, 0x19, 0x58, 0xf8, 0x6b, 0xbe, 0x54 }, + { 0xf1, 0x72, 0xb1, 0x80, 0xbf, 0xb0, 0x97, 0x40, + 0x49, 0x31, 0x20, 0xb6, 0x32, 0x6c, 0xbd, 0xc5, + 0x61, 0xe4, 0x77, 0xde, 0xf9, 0xbb, 0xcf, 0xd2, + 0x8c, 0xc8, 0xc1, 0xc5, 0xe3, 0x37, 0x9a, 0x31 }, + { 0xcb, 0x9b, 0x89, 0xcc, 0x18, 0x38, 0x1d, 0xd9, + 0x14, 0x1a, 0xde, 0x58, 0x86, 0x54, 0xd4, 0xe6, + 0xa2, 0x31, 0xd5, 0xbf, 0x49, 0xd4, 0xd5, 0x9a, + 0xc2, 0x7d, 0x86, 0x9c, 0xbe, 0x10, 0x0c, 0xf3 }, + { 0x7b, 0xd8, 0x81, 0x50, 0x46, 0xfd, 0xd8, 0x10, + 0xa9, 0x23, 0xe1, 0x98, 0x4a, 0xae, 0xbd, 0xcd, + 0xf8, 0x4d, 0x87, 0xc8, 0x99, 0x2d, 0x68, 0xb5, + 0xee, 0xb4, 0x60, 0xf9, 0x3e, 0xb3, 0xc8, 0xd7 }, + { 0x60, 0x7b, 0xe6, 0x68, 0x62, 0xfd, 0x08, 0xee, + 0x5b, 0x19, 0xfa, 0xca, 0xc0, 0x9d, 0xfd, 0xbc, + 0xd4, 0x0c, 0x31, 0x21, 0x01, 0xd6, 0x6e, 0x6e, + 0xbd, 0x2b, 0x84, 0x1f, 0x1b, 0x9a, 0x93, 0x25 }, + { 0x9f, 0xe0, 0x3b, 0xbe, 0x69, 0xab, 0x18, 0x34, + 0xf5, 0x21, 0x9b, 0x0d, 0xa8, 0x8a, 0x08, 0xb3, + 0x0a, 0x66, 0xc5, 0x91, 0x3f, 0x01, 0x51, 0x96, + 0x3c, 0x36, 0x05, 0x60, 0xdb, 0x03, 0x87, 0xb3 }, + { 0x90, 0xa8, 0x35, 0x85, 0x71, 0x7b, 0x75, 0xf0, + 0xe9, 0xb7, 0x25, 0xe0, 0x55, 0xee, 0xee, 0xb9, + 0xe7, 0xa0, 0x28, 0xea, 0x7e, 0x6c, 0xbc, 0x07, + 0xb2, 0x09, 0x17, 0xec, 0x03, 0x63, 0xe3, 0x8c }, + { 0x33, 0x6e, 0xa0, 0x53, 0x0f, 0x4a, 0x74, 0x69, + 0x12, 0x6e, 0x02, 0x18, 0x58, 0x7e, 0xbb, 0xde, + 0x33, 0x58, 0xa0, 0xb3, 0x1c, 0x29, 0xd2, 0x00, + 0xf7, 0xdc, 0x7e, 0xb1, 0x5c, 0x6a, 0xad, 0xd8 }, + { 0xa7, 0x9e, 0x76, 0xdc, 0x0a, 0xbc, 0xa4, 0x39, + 0x6f, 0x07, 0x47, 0xcd, 0x7b, 0x74, 0x8d, 0xf9, + 0x13, 0x00, 0x76, 0x26, 0xb1, 0xd6, 0x59, 0xda, + 0x0c, 0x1f, 0x78, 0xb9, 0x30, 0x3d, 0x01, 0xa3 }, + { 0x44, 0xe7, 0x8a, 0x77, 0x37, 0x56, 0xe0, 0x95, + 0x15, 0x19, 0x50, 0x4d, 0x70, 0x38, 0xd2, 0x8d, + 0x02, 0x13, 0xa3, 0x7e, 0x0c, 0xe3, 0x75, 0x37, + 0x17, 0x57, 0xbc, 0x99, 0x63, 0x11, 0xe3, 0xb8 }, + { 0x77, 0xac, 0x01, 0x2a, 0x3f, 0x75, 0x4d, 0xcf, + 0xea, 0xb5, 0xeb, 0x99, 0x6b, 0xe9, 0xcd, 0x2d, + 0x1f, 0x96, 0x11, 0x1b, 0x6e, 0x49, 0xf3, 0x99, + 0x4d, 0xf1, 0x81, 0xf2, 0x85, 0x69, 0xd8, 0x25 }, + { 0xce, 0x5a, 0x10, 0xdb, 0x6f, 0xcc, 0xda, 0xf1, + 0x40, 0xaa, 0xa4, 0xde, 0xd6, 0x25, 0x0a, 0x9c, + 0x06, 0xe9, 0x22, 0x2b, 0xc9, 0xf9, 0xf3, 0x65, + 0x8a, 0x4a, 0xff, 0x93, 0x5f, 0x2b, 0x9f, 0x3a }, + { 0xec, 0xc2, 0x03, 0xa7, 0xfe, 0x2b, 0xe4, 0xab, + 0xd5, 0x5b, 0xb5, 0x3e, 0x6e, 0x67, 0x35, 0x72, + 0xe0, 0x07, 0x8d, 0xa8, 0xcd, 0x37, 0x5e, 0xf4, + 0x30, 0xcc, 0x97, 0xf9, 0xf8, 0x00, 0x83, 0xaf }, + { 0x14, 0xa5, 0x18, 0x6d, 0xe9, 0xd7, 0xa1, 0x8b, + 0x04, 0x12, 0xb8, 0x56, 0x3e, 0x51, 0xcc, 0x54, + 0x33, 0x84, 0x0b, 0x4a, 0x12, 0x9a, 0x8f, 0xf9, + 0x63, 0xb3, 0x3a, 0x3c, 0x4a, 0xfe, 0x8e, 0xbb }, + { 0x13, 0xf8, 0xef, 0x95, 0xcb, 0x86, 0xe6, 0xa6, + 0x38, 0x93, 0x1c, 0x8e, 0x10, 0x76, 0x73, 0xeb, + 0x76, 0xba, 0x10, 0xd7, 0xc2, 0xcd, 0x70, 0xb9, + 0xd9, 0x92, 0x0b, 0xbe, 0xed, 0x92, 0x94, 0x09 }, + { 0x0b, 0x33, 0x8f, 0x4e, 0xe1, 0x2f, 0x2d, 0xfc, + 0xb7, 0x87, 0x13, 0x37, 0x79, 0x41, 0xe0, 0xb0, + 0x63, 0x21, 0x52, 0x58, 0x1d, 0x13, 0x32, 0x51, + 0x6e, 0x4a, 0x2c, 0xab, 0x19, 0x42, 0xcc, 0xa4 }, + { 0xea, 0xab, 0x0e, 0xc3, 0x7b, 0x3b, 0x8a, 0xb7, + 0x96, 0xe9, 0xf5, 0x72, 0x38, 0xde, 0x14, 0xa2, + 0x64, 0xa0, 0x76, 0xf3, 0x88, 0x7d, 0x86, 0xe2, + 0x9b, 0xb5, 0x90, 0x6d, 0xb5, 0xa0, 0x0e, 0x02 }, + { 0x23, 0xcb, 0x68, 0xb8, 0xc0, 0xe6, 0xdc, 0x26, + 0xdc, 0x27, 0x76, 0x6d, 0xdc, 0x0a, 0x13, 0xa9, + 0x94, 0x38, 0xfd, 0x55, 0x61, 0x7a, 0xa4, 0x09, + 0x5d, 0x8f, 0x96, 0x97, 0x20, 0xc8, 0x72, 0xdf }, + { 0x09, 0x1d, 0x8e, 0xe3, 0x0d, 0x6f, 0x29, 0x68, + 0xd4, 0x6b, 0x68, 0x7d, 0xd6, 0x52, 0x92, 0x66, + 0x57, 0x42, 0xde, 0x0b, 0xb8, 0x3d, 0xcc, 0x00, + 0x04, 0xc7, 0x2c, 0xe1, 0x00, 0x07, 0xa5, 0x49 }, + { 0x7f, 0x50, 0x7a, 0xbc, 0x6d, 0x19, 0xba, 0x00, + 0xc0, 0x65, 0xa8, 0x76, 0xec, 0x56, 0x57, 0x86, + 0x88, 0x82, 0xd1, 0x8a, 0x22, 0x1b, 0xc4, 0x6c, + 0x7a, 0x69, 0x12, 0x54, 0x1f, 0x5b, 0xc7, 0xba }, + { 0xa0, 0x60, 0x7c, 0x24, 0xe1, 0x4e, 0x8c, 0x22, + 0x3d, 0xb0, 0xd7, 0x0b, 0x4d, 0x30, 0xee, 0x88, + 0x01, 0x4d, 0x60, 0x3f, 0x43, 0x7e, 0x9e, 0x02, + 0xaa, 0x7d, 0xaf, 0xa3, 0xcd, 0xfb, 0xad, 0x94 }, + { 0xdd, 0xbf, 0xea, 0x75, 0xcc, 0x46, 0x78, 0x82, + 0xeb, 0x34, 0x83, 0xce, 0x5e, 0x2e, 0x75, 0x6a, + 0x4f, 0x47, 0x01, 0xb7, 0x6b, 0x44, 0x55, 0x19, + 0xe8, 0x9f, 0x22, 0xd6, 0x0f, 0xa8, 0x6e, 0x06 }, + { 0x0c, 0x31, 0x1f, 0x38, 0xc3, 0x5a, 0x4f, 0xb9, + 0x0d, 0x65, 0x1c, 0x28, 0x9d, 0x48, 0x68, 0x56, + 0xcd, 0x14, 0x13, 0xdf, 0x9b, 0x06, 0x77, 0xf5, + 0x3e, 0xce, 0x2c, 0xd9, 0xe4, 0x77, 0xc6, 0x0a }, + { 0x46, 0xa7, 0x3a, 0x8d, 0xd3, 0xe7, 0x0f, 0x59, + 0xd3, 0x94, 0x2c, 0x01, 0xdf, 0x59, 0x9d, 0xef, + 0x78, 0x3c, 0x9d, 0xa8, 0x2f, 0xd8, 0x32, 0x22, + 0xcd, 0x66, 0x2b, 0x53, 0xdc, 0xe7, 0xdb, 0xdf }, + { 0xad, 0x03, 0x8f, 0xf9, 0xb1, 0x4d, 0xe8, 0x4a, + 0x80, 0x1e, 0x4e, 0x62, 0x1c, 0xe5, 0xdf, 0x02, + 0x9d, 0xd9, 0x35, 0x20, 0xd0, 0xc2, 0xfa, 0x38, + 0xbf, 0xf1, 0x76, 0xa8, 0xb1, 0xd1, 0x69, 0x8c }, + { 0xab, 0x70, 0xc5, 0xdf, 0xbd, 0x1e, 0xa8, 0x17, + 0xfe, 0xd0, 0xcd, 0x06, 0x72, 0x93, 0xab, 0xf3, + 0x19, 0xe5, 0xd7, 0x90, 0x1c, 0x21, 0x41, 0xd5, + 0xd9, 0x9b, 0x23, 0xf0, 0x3a, 0x38, 0xe7, 0x48 }, + { 0x1f, 0xff, 0xda, 0x67, 0x93, 0x2b, 0x73, 0xc8, + 0xec, 0xaf, 0x00, 0x9a, 0x34, 0x91, 0xa0, 0x26, + 0x95, 0x3b, 0xab, 0xfe, 0x1f, 0x66, 0x3b, 0x06, + 0x97, 0xc3, 0xc4, 0xae, 0x8b, 0x2e, 0x7d, 0xcb }, + { 0xb0, 0xd2, 0xcc, 0x19, 0x47, 0x2d, 0xd5, 0x7f, + 0x2b, 0x17, 0xef, 0xc0, 0x3c, 0x8d, 0x58, 0xc2, + 0x28, 0x3d, 0xbb, 0x19, 0xda, 0x57, 0x2f, 0x77, + 0x55, 0x85, 0x5a, 0xa9, 0x79, 0x43, 0x17, 0xa0 }, + { 0xa0, 0xd1, 0x9a, 0x6e, 0xe3, 0x39, 0x79, 0xc3, + 0x25, 0x51, 0x0e, 0x27, 0x66, 0x22, 0xdf, 0x41, + 0xf7, 0x15, 0x83, 0xd0, 0x75, 0x01, 0xb8, 0x70, + 0x71, 0x12, 0x9a, 0x0a, 0xd9, 0x47, 0x32, 0xa5 }, + { 0x72, 0x46, 0x42, 0xa7, 0x03, 0x2d, 0x10, 0x62, + 0xb8, 0x9e, 0x52, 0xbe, 0xa3, 0x4b, 0x75, 0xdf, + 0x7d, 0x8f, 0xe7, 0x72, 0xd9, 0xfe, 0x3c, 0x93, + 0xdd, 0xf3, 0xc4, 0x54, 0x5a, 0xb5, 0xa9, 0x9b }, + { 0xad, 0xe5, 0xea, 0xa7, 0xe6, 0x1f, 0x67, 0x2d, + 0x58, 0x7e, 0xa0, 0x3d, 0xae, 0x7d, 0x7b, 0x55, + 0x22, 0x9c, 0x01, 0xd0, 0x6b, 0xc0, 0xa5, 0x70, + 0x14, 0x36, 0xcb, 0xd1, 0x83, 0x66, 0xa6, 0x26 }, + { 0x01, 0x3b, 0x31, 0xeb, 0xd2, 0x28, 0xfc, 0xdd, + 0xa5, 0x1f, 0xab, 0xb0, 0x3b, 0xb0, 0x2d, 0x60, + 0xac, 0x20, 0xca, 0x21, 0x5a, 0xaf, 0xa8, 0x3b, + 0xdd, 0x85, 0x5e, 0x37, 0x55, 0xa3, 0x5f, 0x0b }, + { 0x33, 0x2e, 0xd4, 0x0b, 0xb1, 0x0d, 0xde, 0x3c, + 0x95, 0x4a, 0x75, 0xd7, 0xb8, 0x99, 0x9d, 0x4b, + 0x26, 0xa1, 0xc0, 0x63, 0xc1, 0xdc, 0x6e, 0x32, + 0xc1, 0xd9, 0x1b, 0xab, 0x7b, 0xbb, 0x7d, 0x16 }, + { 0xc7, 0xa1, 0x97, 0xb3, 0xa0, 0x5b, 0x56, 0x6b, + 0xcc, 0x9f, 0xac, 0xd2, 0x0e, 0x44, 0x1d, 0x6f, + 0x6c, 0x28, 0x60, 0xac, 0x96, 0x51, 0xcd, 0x51, + 0xd6, 0xb9, 0xd2, 0xcd, 0xee, 0xea, 0x03, 0x90 }, + { 0xbd, 0x9c, 0xf6, 0x4e, 0xa8, 0x95, 0x3c, 0x03, + 0x71, 0x08, 0xe6, 0xf6, 0x54, 0x91, 0x4f, 0x39, + 0x58, 0xb6, 0x8e, 0x29, 0xc1, 0x67, 0x00, 0xdc, + 0x18, 0x4d, 0x94, 0xa2, 0x17, 0x08, 0xff, 0x60 }, + { 0x88, 0x35, 0xb0, 0xac, 0x02, 0x11, 0x51, 0xdf, + 0x71, 0x64, 0x74, 0xce, 0x27, 0xce, 0x4d, 0x3c, + 0x15, 0xf0, 0xb2, 0xda, 0xb4, 0x80, 0x03, 0xcf, + 0x3f, 0x3e, 0xfd, 0x09, 0x45, 0x10, 0x6b, 0x9a }, + { 0x3b, 0xfe, 0xfa, 0x33, 0x01, 0xaa, 0x55, 0xc0, + 0x80, 0x19, 0x0c, 0xff, 0xda, 0x8e, 0xae, 0x51, + 0xd9, 0xaf, 0x48, 0x8b, 0x4c, 0x1f, 0x24, 0xc3, + 0xd9, 0xa7, 0x52, 0x42, 0xfd, 0x8e, 0xa0, 0x1d }, + { 0x08, 0x28, 0x4d, 0x14, 0x99, 0x3c, 0xd4, 0x7d, + 0x53, 0xeb, 0xae, 0xcf, 0x0d, 0xf0, 0x47, 0x8c, + 0xc1, 0x82, 0xc8, 0x9c, 0x00, 0xe1, 0x85, 0x9c, + 0x84, 0x85, 0x16, 0x86, 0xdd, 0xf2, 0xc1, 0xb7 }, + { 0x1e, 0xd7, 0xef, 0x9f, 0x04, 0xc2, 0xac, 0x8d, + 0xb6, 0xa8, 0x64, 0xdb, 0x13, 0x10, 0x87, 0xf2, + 0x70, 0x65, 0x09, 0x8e, 0x69, 0xc3, 0xfe, 0x78, + 0x71, 0x8d, 0x9b, 0x94, 0x7f, 0x4a, 0x39, 0xd0 }, + { 0xc1, 0x61, 0xf2, 0xdc, 0xd5, 0x7e, 0x9c, 0x14, + 0x39, 0xb3, 0x1a, 0x9d, 0xd4, 0x3d, 0x8f, 0x3d, + 0x7d, 0xd8, 0xf0, 0xeb, 0x7c, 0xfa, 0xc6, 0xfb, + 0x25, 0xa0, 0xf2, 0x8e, 0x30, 0x6f, 0x06, 0x61 }, + { 0xc0, 0x19, 0x69, 0xad, 0x34, 0xc5, 0x2c, 0xaf, + 0x3d, 0xc4, 0xd8, 0x0d, 0x19, 0x73, 0x5c, 0x29, + 0x73, 0x1a, 0xc6, 0xe7, 0xa9, 0x20, 0x85, 0xab, + 0x92, 0x50, 0xc4, 0x8d, 0xea, 0x48, 0xa3, 0xfc }, + { 0x17, 0x20, 0xb3, 0x65, 0x56, 0x19, 0xd2, 0xa5, + 0x2b, 0x35, 0x21, 0xae, 0x0e, 0x49, 0xe3, 0x45, + 0xcb, 0x33, 0x89, 0xeb, 0xd6, 0x20, 0x8a, 0xca, + 0xf9, 0xf1, 0x3f, 0xda, 0xcc, 0xa8, 0xbe, 0x49 }, + { 0x75, 0x62, 0x88, 0x36, 0x1c, 0x83, 0xe2, 0x4c, + 0x61, 0x7c, 0xf9, 0x5c, 0x90, 0x5b, 0x22, 0xd0, + 0x17, 0xcd, 0xc8, 0x6f, 0x0b, 0xf1, 0xd6, 0x58, + 0xf4, 0x75, 0x6c, 0x73, 0x79, 0x87, 0x3b, 0x7f }, + { 0xe7, 0xd0, 0xed, 0xa3, 0x45, 0x26, 0x93, 0xb7, + 0x52, 0xab, 0xcd, 0xa1, 0xb5, 0x5e, 0x27, 0x6f, + 0x82, 0x69, 0x8f, 0x5f, 0x16, 0x05, 0x40, 0x3e, + 0xff, 0x83, 0x0b, 0xea, 0x00, 0x71, 0xa3, 0x94 }, + { 0x2c, 0x82, 0xec, 0xaa, 0x6b, 0x84, 0x80, 0x3e, + 0x04, 0x4a, 0xf6, 0x31, 0x18, 0xaf, 0xe5, 0x44, + 0x68, 0x7c, 0xb6, 0xe6, 0xc7, 0xdf, 0x49, 0xed, + 0x76, 0x2d, 0xfd, 0x7c, 0x86, 0x93, 0xa1, 0xbc }, + { 0x61, 0x36, 0xcb, 0xf4, 0xb4, 0x41, 0x05, 0x6f, + 0xa1, 0xe2, 0x72, 0x24, 0x98, 0x12, 0x5d, 0x6d, + 0xed, 0x45, 0xe1, 0x7b, 0x52, 0x14, 0x39, 0x59, + 0xc7, 0xf4, 0xd4, 0xe3, 0x95, 0x21, 0x8a, 0xc2 }, + { 0x72, 0x1d, 0x32, 0x45, 0xaa, 0xfe, 0xf2, 0x7f, + 0x6a, 0x62, 0x4f, 0x47, 0x95, 0x4b, 0x6c, 0x25, + 0x50, 0x79, 0x52, 0x6f, 0xfa, 0x25, 0xe9, 0xff, + 0x77, 0xe5, 0xdc, 0xff, 0x47, 0x3b, 0x15, 0x97 }, + { 0x9d, 0xd2, 0xfb, 0xd8, 0xce, 0xf1, 0x6c, 0x35, + 0x3c, 0x0a, 0xc2, 0x11, 0x91, 0xd5, 0x09, 0xeb, + 0x28, 0xdd, 0x9e, 0x3e, 0x0d, 0x8c, 0xea, 0x5d, + 0x26, 0xca, 0x83, 0x93, 0x93, 0x85, 0x1c, 0x3a }, + { 0xb2, 0x39, 0x4c, 0xea, 0xcd, 0xeb, 0xf2, 0x1b, + 0xf9, 0xdf, 0x2c, 0xed, 0x98, 0xe5, 0x8f, 0x1c, + 0x3a, 0x4b, 0xbb, 0xff, 0x66, 0x0d, 0xd9, 0x00, + 0xf6, 0x22, 0x02, 0xd6, 0x78, 0x5c, 0xc4, 0x6e }, + { 0x57, 0x08, 0x9f, 0x22, 0x27, 0x49, 0xad, 0x78, + 0x71, 0x76, 0x5f, 0x06, 0x2b, 0x11, 0x4f, 0x43, + 0xba, 0x20, 0xec, 0x56, 0x42, 0x2a, 0x8b, 0x1e, + 0x3f, 0x87, 0x19, 0x2c, 0x0e, 0xa7, 0x18, 0xc6 }, + { 0xe4, 0x9a, 0x94, 0x59, 0x96, 0x1c, 0xd3, 0x3c, + 0xdf, 0x4a, 0xae, 0x1b, 0x10, 0x78, 0xa5, 0xde, + 0xa7, 0xc0, 0x40, 0xe0, 0xfe, 0xa3, 0x40, 0xc9, + 0x3a, 0x72, 0x48, 0x72, 0xfc, 0x4a, 0xf8, 0x06 }, + { 0xed, 0xe6, 0x7f, 0x72, 0x0e, 0xff, 0xd2, 0xca, + 0x9c, 0x88, 0x99, 0x41, 0x52, 0xd0, 0x20, 0x1d, + 0xee, 0x6b, 0x0a, 0x2d, 0x2c, 0x07, 0x7a, 0xca, + 0x6d, 0xae, 0x29, 0xf7, 0x3f, 0x8b, 0x63, 0x09 }, + { 0xe0, 0xf4, 0x34, 0xbf, 0x22, 0xe3, 0x08, 0x80, + 0x39, 0xc2, 0x1f, 0x71, 0x9f, 0xfc, 0x67, 0xf0, + 0xf2, 0xcb, 0x5e, 0x98, 0xa7, 0xa0, 0x19, 0x4c, + 0x76, 0xe9, 0x6b, 0xf4, 0xe8, 0xe1, 0x7e, 0x61 }, + { 0x27, 0x7c, 0x04, 0xe2, 0x85, 0x34, 0x84, 0xa4, + 0xeb, 0xa9, 0x10, 0xad, 0x33, 0x6d, 0x01, 0xb4, + 0x77, 0xb6, 0x7c, 0xc2, 0x00, 0xc5, 0x9f, 0x3c, + 0x8d, 0x77, 0xee, 0xf8, 0x49, 0x4f, 0x29, 0xcd }, + { 0x15, 0x6d, 0x57, 0x47, 0xd0, 0xc9, 0x9c, 0x7f, + 0x27, 0x09, 0x7d, 0x7b, 0x7e, 0x00, 0x2b, 0x2e, + 0x18, 0x5c, 0xb7, 0x2d, 0x8d, 0xd7, 0xeb, 0x42, + 0x4a, 0x03, 0x21, 0x52, 0x81, 0x61, 0x21, 0x9f }, + { 0x20, 0xdd, 0xd1, 0xed, 0x9b, 0x1c, 0xa8, 0x03, + 0x94, 0x6d, 0x64, 0xa8, 0x3a, 0xe4, 0x65, 0x9d, + 0xa6, 0x7f, 0xba, 0x7a, 0x1a, 0x3e, 0xdd, 0xb1, + 0xe1, 0x03, 0xc0, 0xf5, 0xe0, 0x3e, 0x3a, 0x2c }, + { 0xf0, 0xaf, 0x60, 0x4d, 0x3d, 0xab, 0xbf, 0x9a, + 0x0f, 0x2a, 0x7d, 0x3d, 0xda, 0x6b, 0xd3, 0x8b, + 0xba, 0x72, 0xc6, 0xd0, 0x9b, 0xe4, 0x94, 0xfc, + 0xef, 0x71, 0x3f, 0xf1, 0x01, 0x89, 0xb6, 0xe6 }, + { 0x98, 0x02, 0xbb, 0x87, 0xde, 0xf4, 0xcc, 0x10, + 0xc4, 0xa5, 0xfd, 0x49, 0xaa, 0x58, 0xdf, 0xe2, + 0xf3, 0xfd, 0xdb, 0x46, 0xb4, 0x70, 0x88, 0x14, + 0xea, 0xd8, 0x1d, 0x23, 0xba, 0x95, 0x13, 0x9b }, + { 0x4f, 0x8c, 0xe1, 0xe5, 0x1d, 0x2f, 0xe7, 0xf2, + 0x40, 0x43, 0xa9, 0x04, 0xd8, 0x98, 0xeb, 0xfc, + 0x91, 0x97, 0x54, 0x18, 0x75, 0x34, 0x13, 0xaa, + 0x09, 0x9b, 0x79, 0x5e, 0xcb, 0x35, 0xce, 0xdb }, + { 0xbd, 0xdc, 0x65, 0x14, 0xd7, 0xee, 0x6a, 0xce, + 0x0a, 0x4a, 0xc1, 0xd0, 0xe0, 0x68, 0x11, 0x22, + 0x88, 0xcb, 0xcf, 0x56, 0x04, 0x54, 0x64, 0x27, + 0x05, 0x63, 0x01, 0x77, 0xcb, 0xa6, 0x08, 0xbd }, + { 0xd6, 0x35, 0x99, 0x4f, 0x62, 0x91, 0x51, 0x7b, + 0x02, 0x81, 0xff, 0xdd, 0x49, 0x6a, 0xfa, 0x86, + 0x27, 0x12, 0xe5, 0xb3, 0xc4, 0xe5, 0x2e, 0x4c, + 0xd5, 0xfd, 0xae, 0x8c, 0x0e, 0x72, 0xfb, 0x08 }, + { 0x87, 0x8d, 0x9c, 0xa6, 0x00, 0xcf, 0x87, 0xe7, + 0x69, 0xcc, 0x30, 0x5c, 0x1b, 0x35, 0x25, 0x51, + 0x86, 0x61, 0x5a, 0x73, 0xa0, 0xda, 0x61, 0x3b, + 0x5f, 0x1c, 0x98, 0xdb, 0xf8, 0x12, 0x83, 0xea }, + { 0xa6, 0x4e, 0xbe, 0x5d, 0xc1, 0x85, 0xde, 0x9f, + 0xdd, 0xe7, 0x60, 0x7b, 0x69, 0x98, 0x70, 0x2e, + 0xb2, 0x34, 0x56, 0x18, 0x49, 0x57, 0x30, 0x7d, + 0x2f, 0xa7, 0x2e, 0x87, 0xa4, 0x77, 0x02, 0xd6 }, + { 0xce, 0x50, 0xea, 0xb7, 0xb5, 0xeb, 0x52, 0xbd, + 0xc9, 0xad, 0x8e, 0x5a, 0x48, 0x0a, 0xb7, 0x80, + 0xca, 0x93, 0x20, 0xe4, 0x43, 0x60, 0xb1, 0xfe, + 0x37, 0xe0, 0x3f, 0x2f, 0x7a, 0xd7, 0xde, 0x01 }, + { 0xee, 0xdd, 0xb7, 0xc0, 0xdb, 0x6e, 0x30, 0xab, + 0xe6, 0x6d, 0x79, 0xe3, 0x27, 0x51, 0x1e, 0x61, + 0xfc, 0xeb, 0xbc, 0x29, 0xf1, 0x59, 0xb4, 0x0a, + 0x86, 0xb0, 0x46, 0xec, 0xf0, 0x51, 0x38, 0x23 }, + { 0x78, 0x7f, 0xc9, 0x34, 0x40, 0xc1, 0xec, 0x96, + 0xb5, 0xad, 0x01, 0xc1, 0x6c, 0xf7, 0x79, 0x16, + 0xa1, 0x40, 0x5f, 0x94, 0x26, 0x35, 0x6e, 0xc9, + 0x21, 0xd8, 0xdf, 0xf3, 0xea, 0x63, 0xb7, 0xe0 }, + { 0x7f, 0x0d, 0x5e, 0xab, 0x47, 0xee, 0xfd, 0xa6, + 0x96, 0xc0, 0xbf, 0x0f, 0xbf, 0x86, 0xab, 0x21, + 0x6f, 0xce, 0x46, 0x1e, 0x93, 0x03, 0xab, 0xa6, + 0xac, 0x37, 0x41, 0x20, 0xe8, 0x90, 0xe8, 0xdf }, + { 0xb6, 0x80, 0x04, 0xb4, 0x2f, 0x14, 0xad, 0x02, + 0x9f, 0x4c, 0x2e, 0x03, 0xb1, 0xd5, 0xeb, 0x76, + 0xd5, 0x71, 0x60, 0xe2, 0x64, 0x76, 0xd2, 0x11, + 0x31, 0xbe, 0xf2, 0x0a, 0xda, 0x7d, 0x27, 0xf4 }, + { 0xb0, 0xc4, 0xeb, 0x18, 0xae, 0x25, 0x0b, 0x51, + 0xa4, 0x13, 0x82, 0xea, 0xd9, 0x2d, 0x0d, 0xc7, + 0x45, 0x5f, 0x93, 0x79, 0xfc, 0x98, 0x84, 0x42, + 0x8e, 0x47, 0x70, 0x60, 0x8d, 0xb0, 0xfa, 0xec }, + { 0xf9, 0x2b, 0x7a, 0x87, 0x0c, 0x05, 0x9f, 0x4d, + 0x46, 0x46, 0x4c, 0x82, 0x4e, 0xc9, 0x63, 0x55, + 0x14, 0x0b, 0xdc, 0xe6, 0x81, 0x32, 0x2c, 0xc3, + 0xa9, 0x92, 0xff, 0x10, 0x3e, 0x3f, 0xea, 0x52 }, + { 0x53, 0x64, 0x31, 0x26, 0x14, 0x81, 0x33, 0x98, + 0xcc, 0x52, 0x5d, 0x4c, 0x4e, 0x14, 0x6e, 0xde, + 0xb3, 0x71, 0x26, 0x5f, 0xba, 0x19, 0x13, 0x3a, + 0x2c, 0x3d, 0x21, 0x59, 0x29, 0x8a, 0x17, 0x42 }, + { 0xf6, 0x62, 0x0e, 0x68, 0xd3, 0x7f, 0xb2, 0xaf, + 0x50, 0x00, 0xfc, 0x28, 0xe2, 0x3b, 0x83, 0x22, + 0x97, 0xec, 0xd8, 0xbc, 0xe9, 0x9e, 0x8b, 0xe4, + 0xd0, 0x4e, 0x85, 0x30, 0x9e, 0x3d, 0x33, 0x74 }, + { 0x53, 0x16, 0xa2, 0x79, 0x69, 0xd7, 0xfe, 0x04, + 0xff, 0x27, 0xb2, 0x83, 0x96, 0x1b, 0xff, 0xc3, + 0xbf, 0x5d, 0xfb, 0x32, 0xfb, 0x6a, 0x89, 0xd1, + 0x01, 0xc6, 0xc3, 0xb1, 0x93, 0x7c, 0x28, 0x71 }, + { 0x81, 0xd1, 0x66, 0x4f, 0xdf, 0x3c, 0xb3, 0x3c, + 0x24, 0xee, 0xba, 0xc0, 0xbd, 0x64, 0x24, 0x4b, + 0x77, 0xc4, 0xab, 0xea, 0x90, 0xbb, 0xe8, 0xb5, + 0xee, 0x0b, 0x2a, 0xaf, 0xcf, 0x2d, 0x6a, 0x53 }, + { 0x34, 0x57, 0x82, 0xf2, 0x95, 0xb0, 0x88, 0x03, + 0x52, 0xe9, 0x24, 0xa0, 0x46, 0x7b, 0x5f, 0xbc, + 0x3e, 0x8f, 0x3b, 0xfb, 0xc3, 0xc7, 0xe4, 0x8b, + 0x67, 0x09, 0x1f, 0xb5, 0xe8, 0x0a, 0x94, 0x42 }, + { 0x79, 0x41, 0x11, 0xea, 0x6c, 0xd6, 0x5e, 0x31, + 0x1f, 0x74, 0xee, 0x41, 0xd4, 0x76, 0xcb, 0x63, + 0x2c, 0xe1, 0xe4, 0xb0, 0x51, 0xdc, 0x1d, 0x9e, + 0x9d, 0x06, 0x1a, 0x19, 0xe1, 0xd0, 0xbb, 0x49 }, + { 0x2a, 0x85, 0xda, 0xf6, 0x13, 0x88, 0x16, 0xb9, + 0x9b, 0xf8, 0xd0, 0x8b, 0xa2, 0x11, 0x4b, 0x7a, + 0xb0, 0x79, 0x75, 0xa7, 0x84, 0x20, 0xc1, 0xa3, + 0xb0, 0x6a, 0x77, 0x7c, 0x22, 0xdd, 0x8b, 0xcb }, + { 0x89, 0xb0, 0xd5, 0xf2, 0x89, 0xec, 0x16, 0x40, + 0x1a, 0x06, 0x9a, 0x96, 0x0d, 0x0b, 0x09, 0x3e, + 0x62, 0x5d, 0xa3, 0xcf, 0x41, 0xee, 0x29, 0xb5, + 0x9b, 0x93, 0x0c, 0x58, 0x20, 0x14, 0x54, 0x55 }, + { 0xd0, 0xfd, 0xcb, 0x54, 0x39, 0x43, 0xfc, 0x27, + 0xd2, 0x08, 0x64, 0xf5, 0x21, 0x81, 0x47, 0x1b, + 0x94, 0x2c, 0xc7, 0x7c, 0xa6, 0x75, 0xbc, 0xb3, + 0x0d, 0xf3, 0x1d, 0x35, 0x8e, 0xf7, 0xb1, 0xeb }, + { 0xb1, 0x7e, 0xa8, 0xd7, 0x70, 0x63, 0xc7, 0x09, + 0xd4, 0xdc, 0x6b, 0x87, 0x94, 0x13, 0xc3, 0x43, + 0xe3, 0x79, 0x0e, 0x9e, 0x62, 0xca, 0x85, 0xb7, + 0x90, 0x0b, 0x08, 0x6f, 0x6b, 0x75, 0xc6, 0x72 }, + { 0xe7, 0x1a, 0x3e, 0x2c, 0x27, 0x4d, 0xb8, 0x42, + 0xd9, 0x21, 0x14, 0xf2, 0x17, 0xe2, 0xc0, 0xea, + 0xc8, 0xb4, 0x50, 0x93, 0xfd, 0xfd, 0x9d, 0xf4, + 0xca, 0x71, 0x62, 0x39, 0x48, 0x62, 0xd5, 0x01 }, + { 0xc0, 0x47, 0x67, 0x59, 0xab, 0x7a, 0xa3, 0x33, + 0x23, 0x4f, 0x6b, 0x44, 0xf5, 0xfd, 0x85, 0x83, + 0x90, 0xec, 0x23, 0x69, 0x4c, 0x62, 0x2c, 0xb9, + 0x86, 0xe7, 0x69, 0xc7, 0x8e, 0xdd, 0x73, 0x3e }, + { 0x9a, 0xb8, 0xea, 0xbb, 0x14, 0x16, 0x43, 0x4d, + 0x85, 0x39, 0x13, 0x41, 0xd5, 0x69, 0x93, 0xc5, + 0x54, 0x58, 0x16, 0x7d, 0x44, 0x18, 0xb1, 0x9a, + 0x0f, 0x2a, 0xd8, 0xb7, 0x9a, 0x83, 0xa7, 0x5b }, + { 0x79, 0x92, 0xd0, 0xbb, 0xb1, 0x5e, 0x23, 0x82, + 0x6f, 0x44, 0x3e, 0x00, 0x50, 0x5d, 0x68, 0xd3, + 0xed, 0x73, 0x72, 0x99, 0x5a, 0x5c, 0x3e, 0x49, + 0x86, 0x54, 0x10, 0x2f, 0xbc, 0xd0, 0x96, 0x4e }, + { 0xc0, 0x21, 0xb3, 0x00, 0x85, 0x15, 0x14, 0x35, + 0xdf, 0x33, 0xb0, 0x07, 0xcc, 0xec, 0xc6, 0x9d, + 0xf1, 0x26, 0x9f, 0x39, 0xba, 0x25, 0x09, 0x2b, + 0xed, 0x59, 0xd9, 0x32, 0xac, 0x0f, 0xdc, 0x28 }, + { 0x91, 0xa2, 0x5e, 0xc0, 0xec, 0x0d, 0x9a, 0x56, + 0x7f, 0x89, 0xc4, 0xbf, 0xe1, 0xa6, 0x5a, 0x0e, + 0x43, 0x2d, 0x07, 0x06, 0x4b, 0x41, 0x90, 0xe2, + 0x7d, 0xfb, 0x81, 0x90, 0x1f, 0xd3, 0x13, 0x9b }, + { 0x59, 0x50, 0xd3, 0x9a, 0x23, 0xe1, 0x54, 0x5f, + 0x30, 0x12, 0x70, 0xaa, 0x1a, 0x12, 0xf2, 0xe6, + 0xc4, 0x53, 0x77, 0x6e, 0x4d, 0x63, 0x55, 0xde, + 0x42, 0x5c, 0xc1, 0x53, 0xf9, 0x81, 0x88, 0x67 }, + { 0xd7, 0x9f, 0x14, 0x72, 0x0c, 0x61, 0x0a, 0xf1, + 0x79, 0xa3, 0x76, 0x5d, 0x4b, 0x7c, 0x09, 0x68, + 0xf9, 0x77, 0x96, 0x2d, 0xbf, 0x65, 0x5b, 0x52, + 0x12, 0x72, 0xb6, 0xf1, 0xe1, 0x94, 0x48, 0x8e }, + { 0xe9, 0x53, 0x1b, 0xfc, 0x8b, 0x02, 0x99, 0x5a, + 0xea, 0xa7, 0x5b, 0xa2, 0x70, 0x31, 0xfa, 0xdb, + 0xcb, 0xf4, 0xa0, 0xda, 0xb8, 0x96, 0x1d, 0x92, + 0x96, 0xcd, 0x7e, 0x84, 0xd2, 0x5d, 0x60, 0x06 }, + { 0x34, 0xe9, 0xc2, 0x6a, 0x01, 0xd7, 0xf1, 0x61, + 0x81, 0xb4, 0x54, 0xa9, 0xd1, 0x62, 0x3c, 0x23, + 0x3c, 0xb9, 0x9d, 0x31, 0xc6, 0x94, 0x65, 0x6e, + 0x94, 0x13, 0xac, 0xa3, 0xe9, 0x18, 0x69, 0x2f }, + { 0xd9, 0xd7, 0x42, 0x2f, 0x43, 0x7b, 0xd4, 0x39, + 0xdd, 0xd4, 0xd8, 0x83, 0xda, 0xe2, 0xa0, 0x83, + 0x50, 0x17, 0x34, 0x14, 0xbe, 0x78, 0x15, 0x51, + 0x33, 0xff, 0xf1, 0x96, 0x4c, 0x3d, 0x79, 0x72 }, + { 0x4a, 0xee, 0x0c, 0x7a, 0xaf, 0x07, 0x54, 0x14, + 0xff, 0x17, 0x93, 0xea, 0xd7, 0xea, 0xca, 0x60, + 0x17, 0x75, 0xc6, 0x15, 0xdb, 0xd6, 0x0b, 0x64, + 0x0b, 0x0a, 0x9f, 0x0c, 0xe5, 0x05, 0xd4, 0x35 }, + { 0x6b, 0xfd, 0xd1, 0x54, 0x59, 0xc8, 0x3b, 0x99, + 0xf0, 0x96, 0xbf, 0xb4, 0x9e, 0xe8, 0x7b, 0x06, + 0x3d, 0x69, 0xc1, 0x97, 0x4c, 0x69, 0x28, 0xac, + 0xfc, 0xfb, 0x40, 0x99, 0xf8, 0xc4, 0xef, 0x67 }, + { 0x9f, 0xd1, 0xc4, 0x08, 0xfd, 0x75, 0xc3, 0x36, + 0x19, 0x3a, 0x2a, 0x14, 0xd9, 0x4f, 0x6a, 0xf5, + 0xad, 0xf0, 0x50, 0xb8, 0x03, 0x87, 0xb4, 0xb0, + 0x10, 0xfb, 0x29, 0xf4, 0xcc, 0x72, 0x70, 0x7c }, + { 0x13, 0xc8, 0x84, 0x80, 0xa5, 0xd0, 0x0d, 0x6c, + 0x8c, 0x7a, 0xd2, 0x11, 0x0d, 0x76, 0xa8, 0x2d, + 0x9b, 0x70, 0xf4, 0xfa, 0x66, 0x96, 0xd4, 0xe5, + 0xdd, 0x42, 0xa0, 0x66, 0xdc, 0xaf, 0x99, 0x20 }, + { 0x82, 0x0e, 0x72, 0x5e, 0xe2, 0x5f, 0xe8, 0xfd, + 0x3a, 0x8d, 0x5a, 0xbe, 0x4c, 0x46, 0xc3, 0xba, + 0x88, 0x9d, 0xe6, 0xfa, 0x91, 0x91, 0xaa, 0x22, + 0xba, 0x67, 0xd5, 0x70, 0x54, 0x21, 0x54, 0x2b }, + { 0x32, 0xd9, 0x3a, 0x0e, 0xb0, 0x2f, 0x42, 0xfb, + 0xbc, 0xaf, 0x2b, 0xad, 0x00, 0x85, 0xb2, 0x82, + 0xe4, 0x60, 0x46, 0xa4, 0xdf, 0x7a, 0xd1, 0x06, + 0x57, 0xc9, 0xd6, 0x47, 0x63, 0x75, 0xb9, 0x3e }, + { 0xad, 0xc5, 0x18, 0x79, 0x05, 0xb1, 0x66, 0x9c, + 0xd8, 0xec, 0x9c, 0x72, 0x1e, 0x19, 0x53, 0x78, + 0x6b, 0x9d, 0x89, 0xa9, 0xba, 0xe3, 0x07, 0x80, + 0xf1, 0xe1, 0xea, 0xb2, 0x4a, 0x00, 0x52, 0x3c }, + { 0xe9, 0x07, 0x56, 0xff, 0x7f, 0x9a, 0xd8, 0x10, + 0xb2, 0x39, 0xa1, 0x0c, 0xed, 0x2c, 0xf9, 0xb2, + 0x28, 0x43, 0x54, 0xc1, 0xf8, 0xc7, 0xe0, 0xac, + 0xcc, 0x24, 0x61, 0xdc, 0x79, 0x6d, 0x6e, 0x89 }, + { 0x12, 0x51, 0xf7, 0x6e, 0x56, 0x97, 0x84, 0x81, + 0x87, 0x53, 0x59, 0x80, 0x1d, 0xb5, 0x89, 0xa0, + 0xb2, 0x2f, 0x86, 0xd8, 0xd6, 0x34, 0xdc, 0x04, + 0x50, 0x6f, 0x32, 0x2e, 0xd7, 0x8f, 0x17, 0xe8 }, + { 0x3a, 0xfa, 0x89, 0x9f, 0xd9, 0x80, 0xe7, 0x3e, + 0xcb, 0x7f, 0x4d, 0x8b, 0x8f, 0x29, 0x1d, 0xc9, + 0xaf, 0x79, 0x6b, 0xc6, 0x5d, 0x27, 0xf9, 0x74, + 0xc6, 0xf1, 0x93, 0xc9, 0x19, 0x1a, 0x09, 0xfd }, + { 0xaa, 0x30, 0x5b, 0xe2, 0x6e, 0x5d, 0xed, 0xdc, + 0x3c, 0x10, 0x10, 0xcb, 0xc2, 0x13, 0xf9, 0x5f, + 0x05, 0x1c, 0x78, 0x5c, 0x5b, 0x43, 0x1e, 0x6a, + 0x7c, 0xd0, 0x48, 0xf1, 0x61, 0x78, 0x75, 0x28 }, + { 0x8e, 0xa1, 0x88, 0x4f, 0xf3, 0x2e, 0x9d, 0x10, + 0xf0, 0x39, 0xb4, 0x07, 0xd0, 0xd4, 0x4e, 0x7e, + 0x67, 0x0a, 0xbd, 0x88, 0x4a, 0xee, 0xe0, 0xfb, + 0x75, 0x7a, 0xe9, 0x4e, 0xaa, 0x97, 0x37, 0x3d }, + { 0xd4, 0x82, 0xb2, 0x15, 0x5d, 0x4d, 0xec, 0x6b, + 0x47, 0x36, 0xa1, 0xf1, 0x61, 0x7b, 0x53, 0xaa, + 0xa3, 0x73, 0x10, 0x27, 0x7d, 0x3f, 0xef, 0x0c, + 0x37, 0xad, 0x41, 0x76, 0x8f, 0xc2, 0x35, 0xb4 }, + { 0x4d, 0x41, 0x39, 0x71, 0x38, 0x7e, 0x7a, 0x88, + 0x98, 0xa8, 0xdc, 0x2a, 0x27, 0x50, 0x07, 0x78, + 0x53, 0x9e, 0xa2, 0x14, 0xa2, 0xdf, 0xe9, 0xb3, + 0xd7, 0xe8, 0xeb, 0xdc, 0xe5, 0xcf, 0x3d, 0xb3 }, + { 0x69, 0x6e, 0x5d, 0x46, 0xe6, 0xc5, 0x7e, 0x87, + 0x96, 0xe4, 0x73, 0x5d, 0x08, 0x91, 0x6e, 0x0b, + 0x79, 0x29, 0xb3, 0xcf, 0x29, 0x8c, 0x29, 0x6d, + 0x22, 0xe9, 0xd3, 0x01, 0x96, 0x53, 0x37, 0x1c }, + { 0x1f, 0x56, 0x47, 0xc1, 0xd3, 0xb0, 0x88, 0x22, + 0x88, 0x85, 0x86, 0x5c, 0x89, 0x40, 0x90, 0x8b, + 0xf4, 0x0d, 0x1a, 0x82, 0x72, 0x82, 0x19, 0x73, + 0xb1, 0x60, 0x00, 0x8e, 0x7a, 0x3c, 0xe2, 0xeb }, + { 0xb6, 0xe7, 0x6c, 0x33, 0x0f, 0x02, 0x1a, 0x5b, + 0xda, 0x65, 0x87, 0x50, 0x10, 0xb0, 0xed, 0xf0, + 0x91, 0x26, 0xc0, 0xf5, 0x10, 0xea, 0x84, 0x90, + 0x48, 0x19, 0x20, 0x03, 0xae, 0xf4, 0xc6, 0x1c }, + { 0x3c, 0xd9, 0x52, 0xa0, 0xbe, 0xad, 0xa4, 0x1a, + 0xbb, 0x42, 0x4c, 0xe4, 0x7f, 0x94, 0xb4, 0x2b, + 0xe6, 0x4e, 0x1f, 0xfb, 0x0f, 0xd0, 0x78, 0x22, + 0x76, 0x80, 0x79, 0x46, 0xd0, 0xd0, 0xbc, 0x55 }, + { 0x98, 0xd9, 0x26, 0x77, 0x43, 0x9b, 0x41, 0xb7, + 0xbb, 0x51, 0x33, 0x12, 0xaf, 0xb9, 0x2b, 0xcc, + 0x8e, 0xe9, 0x68, 0xb2, 0xe3, 0xb2, 0x38, 0xce, + 0xcb, 0x9b, 0x0f, 0x34, 0xc9, 0xbb, 0x63, 0xd0 }, + { 0xec, 0xbc, 0xa2, 0xcf, 0x08, 0xae, 0x57, 0xd5, + 0x17, 0xad, 0x16, 0x15, 0x8a, 0x32, 0xbf, 0xa7, + 0xdc, 0x03, 0x82, 0xea, 0xed, 0xa1, 0x28, 0xe9, + 0x18, 0x86, 0x73, 0x4c, 0x24, 0xa0, 0xb2, 0x9d }, + { 0x94, 0x2c, 0xc7, 0xc0, 0xb5, 0x2e, 0x2b, 0x16, + 0xa4, 0xb8, 0x9f, 0xa4, 0xfc, 0x7e, 0x0b, 0xf6, + 0x09, 0xe2, 0x9a, 0x08, 0xc1, 0xa8, 0x54, 0x34, + 0x52, 0xb7, 0x7c, 0x7b, 0xfd, 0x11, 0xbb, 0x28 }, + { 0x8a, 0x06, 0x5d, 0x8b, 0x61, 0xa0, 0xdf, 0xfb, + 0x17, 0x0d, 0x56, 0x27, 0x73, 0x5a, 0x76, 0xb0, + 0xe9, 0x50, 0x60, 0x37, 0x80, 0x8c, 0xba, 0x16, + 0xc3, 0x45, 0x00, 0x7c, 0x9f, 0x79, 0xcf, 0x8f }, + { 0x1b, 0x9f, 0xa1, 0x97, 0x14, 0x65, 0x9c, 0x78, + 0xff, 0x41, 0x38, 0x71, 0x84, 0x92, 0x15, 0x36, + 0x10, 0x29, 0xac, 0x80, 0x2b, 0x1c, 0xbc, 0xd5, + 0x4e, 0x40, 0x8b, 0xd8, 0x72, 0x87, 0xf8, 0x1f }, + { 0x8d, 0xab, 0x07, 0x1b, 0xcd, 0x6c, 0x72, 0x92, + 0xa9, 0xef, 0x72, 0x7b, 0x4a, 0xe0, 0xd8, 0x67, + 0x13, 0x30, 0x1d, 0xa8, 0x61, 0x8d, 0x9a, 0x48, + 0xad, 0xce, 0x55, 0xf3, 0x03, 0xa8, 0x69, 0xa1 }, + { 0x82, 0x53, 0xe3, 0xe7, 0xc7, 0xb6, 0x84, 0xb9, + 0xcb, 0x2b, 0xeb, 0x01, 0x4c, 0xe3, 0x30, 0xff, + 0x3d, 0x99, 0xd1, 0x7a, 0xbb, 0xdb, 0xab, 0xe4, + 0xf4, 0xd6, 0x74, 0xde, 0xd5, 0x3f, 0xfc, 0x6b }, + { 0xf1, 0x95, 0xf3, 0x21, 0xe9, 0xe3, 0xd6, 0xbd, + 0x7d, 0x07, 0x45, 0x04, 0xdd, 0x2a, 0xb0, 0xe6, + 0x24, 0x1f, 0x92, 0xe7, 0x84, 0xb1, 0xaa, 0x27, + 0x1f, 0xf6, 0x48, 0xb1, 0xca, 0xb6, 0xd7, 0xf6 }, + { 0x27, 0xe4, 0xcc, 0x72, 0x09, 0x0f, 0x24, 0x12, + 0x66, 0x47, 0x6a, 0x7c, 0x09, 0x49, 0x5f, 0x2d, + 0xb1, 0x53, 0xd5, 0xbc, 0xbd, 0x76, 0x19, 0x03, + 0xef, 0x79, 0x27, 0x5e, 0xc5, 0x6b, 0x2e, 0xd8 }, + { 0x89, 0x9c, 0x24, 0x05, 0x78, 0x8e, 0x25, 0xb9, + 0x9a, 0x18, 0x46, 0x35, 0x5e, 0x64, 0x6d, 0x77, + 0xcf, 0x40, 0x00, 0x83, 0x41, 0x5f, 0x7d, 0xc5, + 0xaf, 0xe6, 0x9d, 0x6e, 0x17, 0xc0, 0x00, 0x23 }, + { 0xa5, 0x9b, 0x78, 0xc4, 0x90, 0x57, 0x44, 0x07, + 0x6b, 0xfe, 0xe8, 0x94, 0xde, 0x70, 0x7d, 0x4f, + 0x12, 0x0b, 0x5c, 0x68, 0x93, 0xea, 0x04, 0x00, + 0x29, 0x7d, 0x0b, 0xb8, 0x34, 0x72, 0x76, 0x32 }, + { 0x59, 0xdc, 0x78, 0xb1, 0x05, 0x64, 0x97, 0x07, + 0xa2, 0xbb, 0x44, 0x19, 0xc4, 0x8f, 0x00, 0x54, + 0x00, 0xd3, 0x97, 0x3d, 0xe3, 0x73, 0x66, 0x10, + 0x23, 0x04, 0x35, 0xb1, 0x04, 0x24, 0xb2, 0x4f }, + { 0xc0, 0x14, 0x9d, 0x1d, 0x7e, 0x7a, 0x63, 0x53, + 0xa6, 0xd9, 0x06, 0xef, 0xe7, 0x28, 0xf2, 0xf3, + 0x29, 0xfe, 0x14, 0xa4, 0x14, 0x9a, 0x3e, 0xa7, + 0x76, 0x09, 0xbc, 0x42, 0xb9, 0x75, 0xdd, 0xfa }, + { 0xa3, 0x2f, 0x24, 0x14, 0x74, 0xa6, 0xc1, 0x69, + 0x32, 0xe9, 0x24, 0x3b, 0xe0, 0xcf, 0x09, 0xbc, + 0xdc, 0x7e, 0x0c, 0xa0, 0xe7, 0xa6, 0xa1, 0xb9, + 0xb1, 0xa0, 0xf0, 0x1e, 0x41, 0x50, 0x23, 0x77 }, + { 0xb2, 0x39, 0xb2, 0xe4, 0xf8, 0x18, 0x41, 0x36, + 0x1c, 0x13, 0x39, 0xf6, 0x8e, 0x2c, 0x35, 0x9f, + 0x92, 0x9a, 0xf9, 0xad, 0x9f, 0x34, 0xe0, 0x1a, + 0xab, 0x46, 0x31, 0xad, 0x6d, 0x55, 0x00, 0xb0 }, + { 0x85, 0xfb, 0x41, 0x9c, 0x70, 0x02, 0xa3, 0xe0, + 0xb4, 0xb6, 0xea, 0x09, 0x3b, 0x4c, 0x1a, 0xc6, + 0x93, 0x66, 0x45, 0xb6, 0x5d, 0xac, 0x5a, 0xc1, + 0x5a, 0x85, 0x28, 0xb7, 0xb9, 0x4c, 0x17, 0x54 }, + { 0x96, 0x19, 0x72, 0x06, 0x25, 0xf1, 0x90, 0xb9, + 0x3a, 0x3f, 0xad, 0x18, 0x6a, 0xb3, 0x14, 0x18, + 0x96, 0x33, 0xc0, 0xd3, 0xa0, 0x1e, 0x6f, 0x9b, + 0xc8, 0xc4, 0xa8, 0xf8, 0x2f, 0x38, 0x3d, 0xbf }, + { 0x7d, 0x62, 0x0d, 0x90, 0xfe, 0x69, 0xfa, 0x46, + 0x9a, 0x65, 0x38, 0x38, 0x89, 0x70, 0xa1, 0xaa, + 0x09, 0xbb, 0x48, 0xa2, 0xd5, 0x9b, 0x34, 0x7b, + 0x97, 0xe8, 0xce, 0x71, 0xf4, 0x8c, 0x7f, 0x46 }, + { 0x29, 0x43, 0x83, 0x56, 0x85, 0x96, 0xfb, 0x37, + 0xc7, 0x5b, 0xba, 0xcd, 0x97, 0x9c, 0x5f, 0xf6, + 0xf2, 0x0a, 0x55, 0x6b, 0xf8, 0x87, 0x9c, 0xc7, + 0x29, 0x24, 0x85, 0x5d, 0xf9, 0xb8, 0x24, 0x0e }, + { 0x16, 0xb1, 0x8a, 0xb3, 0x14, 0x35, 0x9c, 0x2b, + 0x83, 0x3c, 0x1c, 0x69, 0x86, 0xd4, 0x8c, 0x55, + 0xa9, 0xfc, 0x97, 0xcd, 0xe9, 0xa3, 0xc1, 0xf1, + 0x0a, 0x31, 0x77, 0x14, 0x0f, 0x73, 0xf7, 0x38 }, + { 0x8c, 0xbb, 0xdd, 0x14, 0xbc, 0x33, 0xf0, 0x4c, + 0xf4, 0x58, 0x13, 0xe4, 0xa1, 0x53, 0xa2, 0x73, + 0xd3, 0x6a, 0xda, 0xd5, 0xce, 0x71, 0xf4, 0x99, + 0xee, 0xb8, 0x7f, 0xb8, 0xac, 0x63, 0xb7, 0x29 }, + { 0x69, 0xc9, 0xa4, 0x98, 0xdb, 0x17, 0x4e, 0xca, + 0xef, 0xcc, 0x5a, 0x3a, 0xc9, 0xfd, 0xed, 0xf0, + 0xf8, 0x13, 0xa5, 0xbe, 0xc7, 0x27, 0xf1, 0xe7, + 0x75, 0xba, 0xbd, 0xec, 0x77, 0x18, 0x81, 0x6e }, + { 0xb4, 0x62, 0xc3, 0xbe, 0x40, 0x44, 0x8f, 0x1d, + 0x4f, 0x80, 0x62, 0x62, 0x54, 0xe5, 0x35, 0xb0, + 0x8b, 0xc9, 0xcd, 0xcf, 0xf5, 0x99, 0xa7, 0x68, + 0x57, 0x8d, 0x4b, 0x28, 0x81, 0xa8, 0xe3, 0xf0 }, + { 0x55, 0x3e, 0x9d, 0x9c, 0x5f, 0x36, 0x0a, 0xc0, + 0xb7, 0x4a, 0x7d, 0x44, 0xe5, 0xa3, 0x91, 0xda, + 0xd4, 0xce, 0xd0, 0x3e, 0x0c, 0x24, 0x18, 0x3b, + 0x7e, 0x8e, 0xca, 0xbd, 0xf1, 0x71, 0x5a, 0x64 }, + { 0x7a, 0x7c, 0x55, 0xa5, 0x6f, 0xa9, 0xae, 0x51, + 0xe6, 0x55, 0xe0, 0x19, 0x75, 0xd8, 0xa6, 0xff, + 0x4a, 0xe9, 0xe4, 0xb4, 0x86, 0xfc, 0xbe, 0x4e, + 0xac, 0x04, 0x45, 0x88, 0xf2, 0x45, 0xeb, 0xea }, + { 0x2a, 0xfd, 0xf3, 0xc8, 0x2a, 0xbc, 0x48, 0x67, + 0xf5, 0xde, 0x11, 0x12, 0x86, 0xc2, 0xb3, 0xbe, + 0x7d, 0x6e, 0x48, 0x65, 0x7b, 0xa9, 0x23, 0xcf, + 0xbf, 0x10, 0x1a, 0x6d, 0xfc, 0xf9, 0xdb, 0x9a }, + { 0x41, 0x03, 0x7d, 0x2e, 0xdc, 0xdc, 0xe0, 0xc4, + 0x9b, 0x7f, 0xb4, 0xa6, 0xaa, 0x09, 0x99, 0xca, + 0x66, 0x97, 0x6c, 0x74, 0x83, 0xaf, 0xe6, 0x31, + 0xd4, 0xed, 0xa2, 0x83, 0x14, 0x4f, 0x6d, 0xfc }, + { 0xc4, 0x46, 0x6f, 0x84, 0x97, 0xca, 0x2e, 0xeb, + 0x45, 0x83, 0xa0, 0xb0, 0x8e, 0x9d, 0x9a, 0xc7, + 0x43, 0x95, 0x70, 0x9f, 0xda, 0x10, 0x9d, 0x24, + 0xf2, 0xe4, 0x46, 0x21, 0x96, 0x77, 0x9c, 0x5d }, + { 0x75, 0xf6, 0x09, 0x33, 0x8a, 0xa6, 0x7d, 0x96, + 0x9a, 0x2a, 0xe2, 0xa2, 0x36, 0x2b, 0x2d, 0xa9, + 0xd7, 0x7c, 0x69, 0x5d, 0xfd, 0x1d, 0xf7, 0x22, + 0x4a, 0x69, 0x01, 0xdb, 0x93, 0x2c, 0x33, 0x64 }, + { 0x68, 0x60, 0x6c, 0xeb, 0x98, 0x9d, 0x54, 0x88, + 0xfc, 0x7c, 0xf6, 0x49, 0xf3, 0xd7, 0xc2, 0x72, + 0xef, 0x05, 0x5d, 0xa1, 0xa9, 0x3f, 0xae, 0xcd, + 0x55, 0xfe, 0x06, 0xf6, 0x96, 0x70, 0x98, 0xca }, + { 0x44, 0x34, 0x6b, 0xde, 0xb7, 0xe0, 0x52, 0xf6, + 0x25, 0x50, 0x48, 0xf0, 0xd9, 0xb4, 0x2c, 0x42, + 0x5b, 0xab, 0x9c, 0x3d, 0xd2, 0x41, 0x68, 0x21, + 0x2c, 0x3e, 0xcf, 0x1e, 0xbf, 0x34, 0xe6, 0xae }, + { 0x8e, 0x9c, 0xf6, 0xe1, 0xf3, 0x66, 0x47, 0x1f, + 0x2a, 0xc7, 0xd2, 0xee, 0x9b, 0x5e, 0x62, 0x66, + 0xfd, 0xa7, 0x1f, 0x8f, 0x2e, 0x41, 0x09, 0xf2, + 0x23, 0x7e, 0xd5, 0xf8, 0x81, 0x3f, 0xc7, 0x18 }, + { 0x84, 0xbb, 0xeb, 0x84, 0x06, 0xd2, 0x50, 0x95, + 0x1f, 0x8c, 0x1b, 0x3e, 0x86, 0xa7, 0xc0, 0x10, + 0x08, 0x29, 0x21, 0x83, 0x3d, 0xfd, 0x95, 0x55, + 0xa2, 0xf9, 0x09, 0xb1, 0x08, 0x6e, 0xb4, 0xb8 }, + { 0xee, 0x66, 0x6f, 0x3e, 0xef, 0x0f, 0x7e, 0x2a, + 0x9c, 0x22, 0x29, 0x58, 0xc9, 0x7e, 0xaf, 0x35, + 0xf5, 0x1c, 0xed, 0x39, 0x3d, 0x71, 0x44, 0x85, + 0xab, 0x09, 0xa0, 0x69, 0x34, 0x0f, 0xdf, 0x88 }, + { 0xc1, 0x53, 0xd3, 0x4a, 0x65, 0xc4, 0x7b, 0x4a, + 0x62, 0xc5, 0xca, 0xcf, 0x24, 0x01, 0x09, 0x75, + 0xd0, 0x35, 0x6b, 0x2f, 0x32, 0xc8, 0xf5, 0xda, + 0x53, 0x0d, 0x33, 0x88, 0x16, 0xad, 0x5d, 0xe6 }, + { 0x9f, 0xc5, 0x45, 0x01, 0x09, 0xe1, 0xb7, 0x79, + 0xf6, 0xc7, 0xae, 0x79, 0xd5, 0x6c, 0x27, 0x63, + 0x5c, 0x8d, 0xd4, 0x26, 0xc5, 0xa9, 0xd5, 0x4e, + 0x25, 0x78, 0xdb, 0x98, 0x9b, 0x8c, 0x3b, 0x4e }, + { 0xd1, 0x2b, 0xf3, 0x73, 0x2e, 0xf4, 0xaf, 0x5c, + 0x22, 0xfa, 0x90, 0x35, 0x6a, 0xf8, 0xfc, 0x50, + 0xfc, 0xb4, 0x0f, 0x8f, 0x2e, 0xa5, 0xc8, 0x59, + 0x47, 0x37, 0xa3, 0xb3, 0xd5, 0xab, 0xdb, 0xd7 }, + { 0x11, 0x03, 0x0b, 0x92, 0x89, 0xbb, 0xa5, 0xaf, + 0x65, 0x26, 0x06, 0x72, 0xab, 0x6f, 0xee, 0x88, + 0xb8, 0x74, 0x20, 0xac, 0xef, 0x4a, 0x17, 0x89, + 0xa2, 0x07, 0x3b, 0x7e, 0xc2, 0xf2, 0xa0, 0x9e }, + { 0x69, 0xcb, 0x19, 0x2b, 0x84, 0x44, 0x00, 0x5c, + 0x8c, 0x0c, 0xeb, 0x12, 0xc8, 0x46, 0x86, 0x07, + 0x68, 0x18, 0x8c, 0xda, 0x0a, 0xec, 0x27, 0xa9, + 0xc8, 0xa5, 0x5c, 0xde, 0xe2, 0x12, 0x36, 0x32 }, + { 0xdb, 0x44, 0x4c, 0x15, 0x59, 0x7b, 0x5f, 0x1a, + 0x03, 0xd1, 0xf9, 0xed, 0xd1, 0x6e, 0x4a, 0x9f, + 0x43, 0xa6, 0x67, 0xcc, 0x27, 0x51, 0x75, 0xdf, + 0xa2, 0xb7, 0x04, 0xe3, 0xbb, 0x1a, 0x9b, 0x83 }, + { 0x3f, 0xb7, 0x35, 0x06, 0x1a, 0xbc, 0x51, 0x9d, + 0xfe, 0x97, 0x9e, 0x54, 0xc1, 0xee, 0x5b, 0xfa, + 0xd0, 0xa9, 0xd8, 0x58, 0xb3, 0x31, 0x5b, 0xad, + 0x34, 0xbd, 0xe9, 0x99, 0xef, 0xd7, 0x24, 0xdd } +}; + +static bool __init blake2s_selftest(void) +{ + u8 key[BLAKE2S_KEY_SIZE]; + u8 buf[ARRAY_SIZE(blake2s_testvecs)]; + u8 hash[BLAKE2S_HASH_SIZE]; + size_t i; + bool success = true; + + for (i = 0; i < BLAKE2S_KEY_SIZE; ++i) + key[i] = (u8)i; + + for (i = 0; i < ARRAY_SIZE(blake2s_testvecs); ++i) + buf[i] = (u8)i; + + for (i = 0; i < ARRAY_SIZE(blake2s_keyed_testvecs); ++i) { + blake2s(hash, buf, key, BLAKE2S_HASH_SIZE, i, BLAKE2S_KEY_SIZE); + if (memcmp(hash, blake2s_keyed_testvecs[i], BLAKE2S_HASH_SIZE)) { + pr_err("blake2s keyed self-test %zu: FAIL\n", i + 1); + success = false; + } + } + + for (i = 0; i < ARRAY_SIZE(blake2s_testvecs); ++i) { + blake2s(hash, buf, NULL, BLAKE2S_HASH_SIZE, i, 0); + if (memcmp(hash, blake2s_testvecs[i], BLAKE2S_HASH_SIZE)) { + pr_err("blake2s unkeyed self-test %zu: FAIL\n", i + i); + success = false; + } + } + return success; +} diff --git a/net/wireguard/crypto/zinc/selftest/chacha20.c b/net/wireguard/crypto/zinc/selftest/chacha20.c new file mode 100644 index 000000000000..1a2390aaf6c2 --- /dev/null +++ b/net/wireguard/crypto/zinc/selftest/chacha20.c @@ -0,0 +1,2698 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +struct chacha20_testvec { + const u8 *input, *output, *key; + u64 nonce; + size_t ilen; +}; + +struct hchacha20_testvec { + u8 key[HCHACHA20_KEY_SIZE]; + u8 nonce[HCHACHA20_NONCE_SIZE]; + u8 output[CHACHA20_KEY_SIZE]; +}; + +/* These test vectors are generated by reference implementations and are + * designed to check chacha20 implementation block handling, as well as from + * the draft-arciszewski-xchacha-01 document. + */ + +static const u8 input01[] __initconst = { }; +static const u8 output01[] __initconst = { }; +static const u8 key01[] __initconst = { + 0x09, 0xf4, 0xe8, 0x57, 0x10, 0xf2, 0x12, 0xc3, + 0xc6, 0x91, 0xc4, 0x09, 0x97, 0x46, 0xef, 0xfe, + 0x02, 0x00, 0xe4, 0x5c, 0x82, 0xed, 0x16, 0xf3, + 0x32, 0xbe, 0xec, 0x7a, 0xe6, 0x68, 0x12, 0x26 +}; +enum { nonce01 = 0x3834e2afca3c66d3ULL }; + +static const u8 input02[] __initconst = { + 0x9d +}; +static const u8 output02[] __initconst = { + 0x94 +}; +static const u8 key02[] __initconst = { + 0x8c, 0x01, 0xac, 0xaf, 0x62, 0x63, 0x56, 0x7a, + 0xad, 0x23, 0x4c, 0x58, 0x29, 0x29, 0xbe, 0xab, + 0xe9, 0xf8, 0xdf, 0x6c, 0x8c, 0x74, 0x4d, 0x7d, + 0x13, 0x94, 0x10, 0x02, 0x3d, 0x8e, 0x9f, 0x94 +}; +enum { nonce02 = 0x5d1b3bfdedd9f73aULL }; + +static const u8 input03[] __initconst = { + 0x04, 0x16 +}; +static const u8 output03[] __initconst = { + 0x92, 0x07 +}; +static const u8 key03[] __initconst = { + 0x22, 0x0c, 0x79, 0x2c, 0x38, 0x51, 0xbe, 0x99, + 0xa9, 0x59, 0x24, 0x50, 0xef, 0x87, 0x38, 0xa6, + 0xa0, 0x97, 0x20, 0xcb, 0xb4, 0x0c, 0x94, 0x67, + 0x1f, 0x98, 0xdc, 0xc4, 0x83, 0xbc, 0x35, 0x4d +}; +enum { nonce03 = 0x7a3353ad720a3e2eULL }; + +static const u8 input04[] __initconst = { + 0xc7, 0xcc, 0xd0 +}; +static const u8 output04[] __initconst = { + 0xd8, 0x41, 0x80 +}; +static const u8 key04[] __initconst = { + 0x81, 0x5e, 0x12, 0x01, 0xc4, 0x36, 0x15, 0x03, + 0x11, 0xa0, 0xe9, 0x86, 0xbb, 0x5a, 0xdc, 0x45, + 0x7d, 0x5e, 0x98, 0xf8, 0x06, 0x76, 0x1c, 0xec, + 0xc0, 0xf7, 0xca, 0x4e, 0x99, 0xd9, 0x42, 0x38 +}; +enum { nonce04 = 0x6816e2fc66176da2ULL }; + +static const u8 input05[] __initconst = { + 0x48, 0xf1, 0x31, 0x5f +}; +static const u8 output05[] __initconst = { + 0x48, 0xf7, 0x13, 0x67 +}; +static const u8 key05[] __initconst = { + 0x3f, 0xd6, 0xb6, 0x5e, 0x2f, 0xda, 0x82, 0x39, + 0x97, 0x06, 0xd3, 0x62, 0x4f, 0xbd, 0xcb, 0x9b, + 0x1d, 0xe6, 0x4a, 0x76, 0xab, 0xdd, 0x14, 0x50, + 0x59, 0x21, 0xe3, 0xb2, 0xc7, 0x95, 0xbc, 0x45 +}; +enum { nonce05 = 0xc41a7490e228cc42ULL }; + +static const u8 input06[] __initconst = { + 0xae, 0xa2, 0x85, 0x1d, 0xc8 +}; +static const u8 output06[] __initconst = { + 0xfa, 0xff, 0x45, 0x6b, 0x6f +}; +static const u8 key06[] __initconst = { + 0x04, 0x8d, 0xea, 0x67, 0x20, 0x78, 0xfb, 0x8f, + 0x49, 0x80, 0x35, 0xb5, 0x7b, 0xe4, 0x31, 0x74, + 0x57, 0x43, 0x3a, 0x64, 0x64, 0xb9, 0xe6, 0x23, + 0x4d, 0xfe, 0xb8, 0x7b, 0x71, 0x4d, 0x9d, 0x21 +}; +enum { nonce06 = 0x251366db50b10903ULL }; + +static const u8 input07[] __initconst = { + 0x1a, 0x32, 0x85, 0xb6, 0xe8, 0x52 +}; +static const u8 output07[] __initconst = { + 0xd3, 0x5f, 0xf0, 0x07, 0x69, 0xec +}; +static const u8 key07[] __initconst = { + 0xbf, 0x2d, 0x42, 0x99, 0x97, 0x76, 0x04, 0xad, + 0xd3, 0x8f, 0x6e, 0x6a, 0x34, 0x85, 0xaf, 0x81, + 0xef, 0x36, 0x33, 0xd5, 0x43, 0xa2, 0xaa, 0x08, + 0x0f, 0x77, 0x42, 0x83, 0x58, 0xc5, 0x42, 0x2a +}; +enum { nonce07 = 0xe0796da17dba9b58ULL }; + +static const u8 input08[] __initconst = { + 0x40, 0xae, 0xcd, 0xe4, 0x3d, 0x22, 0xe0 +}; +static const u8 output08[] __initconst = { + 0xfd, 0x8a, 0x9f, 0x3d, 0x05, 0xc9, 0xd3 +}; +static const u8 key08[] __initconst = { + 0xdc, 0x3f, 0x41, 0xe3, 0x23, 0x2a, 0x8d, 0xf6, + 0x41, 0x2a, 0xa7, 0x66, 0x05, 0x68, 0xe4, 0x7b, + 0xc4, 0x58, 0xd6, 0xcc, 0xdf, 0x0d, 0xc6, 0x25, + 0x1b, 0x61, 0x32, 0x12, 0x4e, 0xf1, 0xe6, 0x29 +}; +enum { nonce08 = 0xb1d2536d9e159832ULL }; + +static const u8 input09[] __initconst = { + 0xba, 0x1d, 0x14, 0x16, 0x9f, 0x83, 0x67, 0x24 +}; +static const u8 output09[] __initconst = { + 0x7c, 0xe3, 0x78, 0x1d, 0xa2, 0xe7, 0xe9, 0x39 +}; +static const u8 key09[] __initconst = { + 0x17, 0x55, 0x90, 0x52, 0xa4, 0xce, 0x12, 0xae, + 0xd4, 0xfd, 0xd4, 0xfb, 0xd5, 0x18, 0x59, 0x50, + 0x4e, 0x51, 0x99, 0x32, 0x09, 0x31, 0xfc, 0xf7, + 0x27, 0x10, 0x8e, 0xa2, 0x4b, 0xa5, 0xf5, 0x62 +}; +enum { nonce09 = 0x495fc269536d003ULL }; + +static const u8 input10[] __initconst = { + 0x09, 0xfd, 0x3c, 0x0b, 0x3d, 0x0e, 0xf3, 0x9d, + 0x27 +}; +static const u8 output10[] __initconst = { + 0xdc, 0xe4, 0x33, 0x60, 0x0c, 0x07, 0xcb, 0x51, + 0x6b +}; +static const u8 key10[] __initconst = { + 0x4e, 0x00, 0x72, 0x37, 0x0f, 0x52, 0x4d, 0x6f, + 0x37, 0x50, 0x3c, 0xb3, 0x51, 0x81, 0x49, 0x16, + 0x7e, 0xfd, 0xb1, 0x51, 0x72, 0x2e, 0xe4, 0x16, + 0x68, 0x5c, 0x5b, 0x8a, 0xc3, 0x90, 0x70, 0x04 +}; +enum { nonce10 = 0x1ad9d1114d88cbbdULL }; + +static const u8 input11[] __initconst = { + 0x70, 0x18, 0x52, 0x85, 0xba, 0x66, 0xff, 0x2c, + 0x9a, 0x46 +}; +static const u8 output11[] __initconst = { + 0xf5, 0x2a, 0x7a, 0xfd, 0x31, 0x7c, 0x91, 0x41, + 0xb1, 0xcf +}; +static const u8 key11[] __initconst = { + 0x48, 0xb4, 0xd0, 0x7c, 0x88, 0xd1, 0x96, 0x0d, + 0x80, 0x33, 0xb4, 0xd5, 0x31, 0x9a, 0x88, 0xca, + 0x14, 0xdc, 0xf0, 0xa8, 0xf3, 0xac, 0xb8, 0x47, + 0x75, 0x86, 0x7c, 0x88, 0x50, 0x11, 0x43, 0x40 +}; +enum { nonce11 = 0x47c35dd1f4f8aa4fULL }; + +static const u8 input12[] __initconst = { + 0x9e, 0x8e, 0x3d, 0x2a, 0x05, 0xfd, 0xe4, 0x90, + 0x24, 0x1c, 0xd3 +}; +static const u8 output12[] __initconst = { + 0x97, 0x72, 0x40, 0x9f, 0xc0, 0x6b, 0x05, 0x33, + 0x42, 0x7e, 0x28 +}; +static const u8 key12[] __initconst = { + 0xee, 0xff, 0x33, 0x33, 0xe0, 0x28, 0xdf, 0xa2, + 0xb6, 0x5e, 0x25, 0x09, 0x52, 0xde, 0xa5, 0x9c, + 0x8f, 0x95, 0xa9, 0x03, 0x77, 0x0f, 0xbe, 0xa1, + 0xd0, 0x7d, 0x73, 0x2f, 0xf8, 0x7e, 0x51, 0x44 +}; +enum { nonce12 = 0xc22d044dc6ea4af3ULL }; + +static const u8 input13[] __initconst = { + 0x9c, 0x16, 0xa2, 0x22, 0x4d, 0xbe, 0x04, 0x9a, + 0xb3, 0xb5, 0xc6, 0x58 +}; +static const u8 output13[] __initconst = { + 0xf0, 0x81, 0xdb, 0x6d, 0xa3, 0xe9, 0xb2, 0xc6, + 0x32, 0x50, 0x16, 0x9f +}; +static const u8 key13[] __initconst = { + 0x96, 0xb3, 0x01, 0xd2, 0x7a, 0x8c, 0x94, 0x09, + 0x4f, 0x58, 0xbe, 0x80, 0xcc, 0xa9, 0x7e, 0x2d, + 0xad, 0x58, 0x3b, 0x63, 0xb8, 0x5c, 0x17, 0xce, + 0xbf, 0x43, 0x33, 0x7a, 0x7b, 0x82, 0x28, 0x2f +}; +enum { nonce13 = 0x2a5d05d88cd7b0daULL }; + +static const u8 input14[] __initconst = { + 0x57, 0x4f, 0xaa, 0x30, 0xe6, 0x23, 0x50, 0x86, + 0x91, 0xa5, 0x60, 0x96, 0x2b +}; +static const u8 output14[] __initconst = { + 0x6c, 0x1f, 0x3b, 0x42, 0xb6, 0x2f, 0xf0, 0xbd, + 0x76, 0x60, 0xc7, 0x7e, 0x8d +}; +static const u8 key14[] __initconst = { + 0x22, 0x85, 0xaf, 0x8f, 0xa3, 0x53, 0xa0, 0xc4, + 0xb5, 0x75, 0xc0, 0xba, 0x30, 0x92, 0xc3, 0x32, + 0x20, 0x5a, 0x8f, 0x7e, 0x93, 0xda, 0x65, 0x18, + 0xd1, 0xf6, 0x9a, 0x9b, 0x8f, 0x85, 0x30, 0xe6 +}; +enum { nonce14 = 0xf9946c166aa4475fULL }; + +static const u8 input15[] __initconst = { + 0x89, 0x81, 0xc7, 0xe2, 0x00, 0xac, 0x52, 0x70, + 0xa4, 0x79, 0xab, 0xeb, 0x74, 0xf7 +}; +static const u8 output15[] __initconst = { + 0xb4, 0xd0, 0xa9, 0x9d, 0x15, 0x5f, 0x48, 0xd6, + 0x00, 0x7e, 0x4c, 0x77, 0x5a, 0x46 +}; +static const u8 key15[] __initconst = { + 0x0a, 0x66, 0x36, 0xca, 0x5d, 0x82, 0x23, 0xb6, + 0xe4, 0x9b, 0xad, 0x5e, 0xd0, 0x7f, 0xf6, 0x7a, + 0x7b, 0x03, 0xa7, 0x4c, 0xfd, 0xec, 0xd5, 0xa1, + 0xfc, 0x25, 0x54, 0xda, 0x5a, 0x5c, 0xf0, 0x2c +}; +enum { nonce15 = 0x9ab2b87a35e772c8ULL }; + +static const u8 input16[] __initconst = { + 0x5f, 0x09, 0xc0, 0x8b, 0x1e, 0xde, 0xca, 0xd9, + 0xb7, 0x5c, 0x23, 0xc9, 0x55, 0x1e, 0xcf +}; +static const u8 output16[] __initconst = { + 0x76, 0x9b, 0x53, 0xf3, 0x66, 0x88, 0x28, 0x60, + 0x98, 0x80, 0x2c, 0xa8, 0x80, 0xa6, 0x48 +}; +static const u8 key16[] __initconst = { + 0x80, 0xb5, 0x51, 0xdf, 0x17, 0x5b, 0xb0, 0xef, + 0x8b, 0x5b, 0x2e, 0x3e, 0xc5, 0xe3, 0xa5, 0x86, + 0xac, 0x0d, 0x8e, 0x32, 0x90, 0x9d, 0x82, 0x27, + 0xf1, 0x23, 0x26, 0xc3, 0xea, 0x55, 0xb6, 0x63 +}; +enum { nonce16 = 0xa82e9d39e4d02ef5ULL }; + +static const u8 input17[] __initconst = { + 0x87, 0x0b, 0x36, 0x71, 0x7c, 0xb9, 0x0b, 0x80, + 0x4d, 0x77, 0x5c, 0x4f, 0xf5, 0x51, 0x0e, 0x1a +}; +static const u8 output17[] __initconst = { + 0xf1, 0x12, 0x4a, 0x8a, 0xd9, 0xd0, 0x08, 0x67, + 0x66, 0xd7, 0x34, 0xea, 0x32, 0x3b, 0x54, 0x0e +}; +static const u8 key17[] __initconst = { + 0xfb, 0x71, 0x5f, 0x3f, 0x7a, 0xc0, 0x9a, 0xc8, + 0xc8, 0xcf, 0xe8, 0xbc, 0xfb, 0x09, 0xbf, 0x89, + 0x6a, 0xef, 0xd5, 0xe5, 0x36, 0x87, 0x14, 0x76, + 0x00, 0xb9, 0x32, 0x28, 0xb2, 0x00, 0x42, 0x53 +}; +enum { nonce17 = 0x229b87e73d557b96ULL }; + +static const u8 input18[] __initconst = { + 0x38, 0x42, 0xb5, 0x37, 0xb4, 0x3d, 0xfe, 0x59, + 0x38, 0x68, 0x88, 0xfa, 0x89, 0x8a, 0x5f, 0x90, + 0x3c +}; +static const u8 output18[] __initconst = { + 0xac, 0xad, 0x14, 0xe8, 0x7e, 0xd7, 0xce, 0x96, + 0x3d, 0xb3, 0x78, 0x85, 0x22, 0x5a, 0xcb, 0x39, + 0xd4 +}; +static const u8 key18[] __initconst = { + 0xe1, 0xc1, 0xa8, 0xe0, 0x91, 0xe7, 0x38, 0x66, + 0x80, 0x17, 0x12, 0x3c, 0x5e, 0x2d, 0xbb, 0xea, + 0xeb, 0x6c, 0x8b, 0xc8, 0x1b, 0x6f, 0x7c, 0xea, + 0x50, 0x57, 0x23, 0x1e, 0x65, 0x6f, 0x6d, 0x81 +}; +enum { nonce18 = 0xfaf5fcf8f30e57a9ULL }; + +static const u8 input19[] __initconst = { + 0x1c, 0x4a, 0x30, 0x26, 0xef, 0x9a, 0x32, 0xa7, + 0x8f, 0xe5, 0xc0, 0x0f, 0x30, 0x3a, 0xbf, 0x38, + 0x54, 0xba +}; +static const u8 output19[] __initconst = { + 0x57, 0x67, 0x54, 0x4f, 0x31, 0xd6, 0xef, 0x35, + 0x0b, 0xd9, 0x52, 0xa7, 0x46, 0x7d, 0x12, 0x17, + 0x1e, 0xe3 +}; +static const u8 key19[] __initconst = { + 0x5a, 0x79, 0xc1, 0xea, 0x33, 0xb3, 0xc7, 0x21, + 0xec, 0xf8, 0xcb, 0xd2, 0x58, 0x96, 0x23, 0xd6, + 0x4d, 0xed, 0x2f, 0xdf, 0x8a, 0x79, 0xe6, 0x8b, + 0x38, 0xa3, 0xc3, 0x7a, 0x33, 0xda, 0x02, 0xc7 +}; +enum { nonce19 = 0x2b23b61840429604ULL }; + +static const u8 input20[] __initconst = { + 0xab, 0xe9, 0x32, 0xbb, 0x35, 0x17, 0xe0, 0x60, + 0x80, 0xb1, 0x27, 0xdc, 0xe6, 0x62, 0x9e, 0x0c, + 0x77, 0xf4, 0x50 +}; +static const u8 output20[] __initconst = { + 0x54, 0x6d, 0xaa, 0xfc, 0x08, 0xfb, 0x71, 0xa8, + 0xd6, 0x1d, 0x7d, 0xf3, 0x45, 0x10, 0xb5, 0x4c, + 0xcc, 0x4b, 0x45 +}; +static const u8 key20[] __initconst = { + 0xa3, 0xfd, 0x3d, 0xa9, 0xeb, 0xea, 0x2c, 0x69, + 0xcf, 0x59, 0x38, 0x13, 0x5b, 0xa7, 0x53, 0x8f, + 0x5e, 0xa2, 0x33, 0x86, 0x4c, 0x75, 0x26, 0xaf, + 0x35, 0x12, 0x09, 0x71, 0x81, 0xea, 0x88, 0x66 +}; +enum { nonce20 = 0x7459667a8fadff58ULL }; + +static const u8 input21[] __initconst = { + 0xa6, 0x82, 0x21, 0x23, 0xad, 0x27, 0x3f, 0xc6, + 0xd7, 0x16, 0x0d, 0x6d, 0x24, 0x15, 0x54, 0xc5, + 0x96, 0x72, 0x59, 0x8a +}; +static const u8 output21[] __initconst = { + 0x5f, 0x34, 0x32, 0xea, 0x06, 0xd4, 0x9e, 0x01, + 0xdc, 0x32, 0x32, 0x40, 0x66, 0x73, 0x6d, 0x4a, + 0x6b, 0x12, 0x20, 0xe8 +}; +static const u8 key21[] __initconst = { + 0x96, 0xfd, 0x13, 0x23, 0xa9, 0x89, 0x04, 0xe6, + 0x31, 0xa5, 0x2c, 0xc1, 0x40, 0xd5, 0x69, 0x5c, + 0x32, 0x79, 0x56, 0xe0, 0x29, 0x93, 0x8f, 0xe8, + 0x5f, 0x65, 0x53, 0x7f, 0xc1, 0xe9, 0xaf, 0xaf +}; +enum { nonce21 = 0xba8defee9d8e13b5ULL }; + +static const u8 input22[] __initconst = { + 0xb8, 0x32, 0x1a, 0x81, 0xd8, 0x38, 0x89, 0x5a, + 0xb0, 0x05, 0xbe, 0xf4, 0xd2, 0x08, 0xc6, 0xee, + 0x79, 0x7b, 0x3a, 0x76, 0x59 +}; +static const u8 output22[] __initconst = { + 0xb7, 0xba, 0xae, 0x80, 0xe4, 0x9f, 0x79, 0x84, + 0x5a, 0x48, 0x50, 0x6d, 0xcb, 0xd0, 0x06, 0x0c, + 0x15, 0x63, 0xa7, 0x5e, 0xbd +}; +static const u8 key22[] __initconst = { + 0x0f, 0x35, 0x3d, 0xeb, 0x5f, 0x0a, 0x82, 0x0d, + 0x24, 0x59, 0x71, 0xd8, 0xe6, 0x2d, 0x5f, 0xe1, + 0x7e, 0x0c, 0xae, 0xf6, 0xdc, 0x2c, 0xc5, 0x4a, + 0x38, 0x88, 0xf2, 0xde, 0xd9, 0x5f, 0x76, 0x7c +}; +enum { nonce22 = 0xe77f1760e9f5e192ULL }; + +static const u8 input23[] __initconst = { + 0x4b, 0x1e, 0x79, 0x99, 0xcf, 0xef, 0x64, 0x4b, + 0xb0, 0x66, 0xae, 0x99, 0x2e, 0x68, 0x97, 0xf5, + 0x5d, 0x9b, 0x3f, 0x7a, 0xa9, 0xd9 +}; +static const u8 output23[] __initconst = { + 0x5f, 0xa4, 0x08, 0x39, 0xca, 0xfa, 0x2b, 0x83, + 0x5d, 0x95, 0x70, 0x7c, 0x2e, 0xd4, 0xae, 0xfa, + 0x45, 0x4a, 0x77, 0x7f, 0xa7, 0x65 +}; +static const u8 key23[] __initconst = { + 0x4a, 0x06, 0x83, 0x64, 0xaa, 0xe3, 0x38, 0x32, + 0x28, 0x5d, 0xa4, 0xb2, 0x5a, 0xee, 0xcf, 0x8e, + 0x19, 0x67, 0xf1, 0x09, 0xe8, 0xc9, 0xf6, 0x40, + 0x02, 0x6d, 0x0b, 0xde, 0xfa, 0x81, 0x03, 0xb1 +}; +enum { nonce23 = 0x9b3f349158709849ULL }; + +static const u8 input24[] __initconst = { + 0xc6, 0xfc, 0x47, 0x5e, 0xd8, 0xed, 0xa9, 0xe5, + 0x4f, 0x82, 0x79, 0x35, 0xee, 0x3e, 0x7e, 0x3e, + 0x35, 0x70, 0x6e, 0xfa, 0x6d, 0x08, 0xe8 +}; +static const u8 output24[] __initconst = { + 0x3b, 0xc5, 0xf8, 0xc2, 0xbf, 0x2b, 0x90, 0x33, + 0xa6, 0xae, 0xf5, 0x5a, 0x65, 0xb3, 0x3d, 0xe1, + 0xcd, 0x5f, 0x55, 0xfa, 0xe7, 0xa5, 0x4a +}; +static const u8 key24[] __initconst = { + 0x00, 0x24, 0xc3, 0x65, 0x5f, 0xe6, 0x31, 0xbb, + 0x6d, 0xfc, 0x20, 0x7b, 0x1b, 0xa8, 0x96, 0x26, + 0x55, 0x21, 0x62, 0x25, 0x7e, 0xba, 0x23, 0x97, + 0xc9, 0xb8, 0x53, 0xa8, 0xef, 0xab, 0xad, 0x61 +}; +enum { nonce24 = 0x13ee0b8f526177c3ULL }; + +static const u8 input25[] __initconst = { + 0x33, 0x07, 0x16, 0xb1, 0x34, 0x33, 0x67, 0x04, + 0x9b, 0x0a, 0xce, 0x1b, 0xe9, 0xde, 0x1a, 0xec, + 0xd0, 0x55, 0xfb, 0xc6, 0x33, 0xaf, 0x2d, 0xe3 +}; +static const u8 output25[] __initconst = { + 0x05, 0x93, 0x10, 0xd1, 0x58, 0x6f, 0x68, 0x62, + 0x45, 0xdb, 0x91, 0xae, 0x70, 0xcf, 0xd4, 0x5f, + 0xee, 0xdf, 0xd5, 0xba, 0x9e, 0xde, 0x68, 0xe6 +}; +static const u8 key25[] __initconst = { + 0x83, 0xa9, 0x4f, 0x5d, 0x74, 0xd5, 0x91, 0xb3, + 0xc9, 0x97, 0x19, 0x15, 0xdb, 0x0d, 0x0b, 0x4a, + 0x3d, 0x55, 0xcf, 0xab, 0xb2, 0x05, 0x21, 0x35, + 0x45, 0x50, 0xeb, 0xf8, 0xf5, 0xbf, 0x36, 0x35 +}; +enum { nonce25 = 0x7c6f459e49ebfebcULL }; + +static const u8 input26[] __initconst = { + 0xc2, 0xd4, 0x7a, 0xa3, 0x92, 0xe1, 0xac, 0x46, + 0x1a, 0x15, 0x38, 0xc9, 0xb5, 0xfd, 0xdf, 0x84, + 0x38, 0xbc, 0x6b, 0x1d, 0xb0, 0x83, 0x43, 0x04, + 0x39 +}; +static const u8 output26[] __initconst = { + 0x7f, 0xde, 0xd6, 0x87, 0xcc, 0x34, 0xf4, 0x12, + 0xae, 0x55, 0xa5, 0x89, 0x95, 0x29, 0xfc, 0x18, + 0xd8, 0xc7, 0x7c, 0xd3, 0xcb, 0x85, 0x95, 0x21, + 0xd2 +}; +static const u8 key26[] __initconst = { + 0xe4, 0xd0, 0x54, 0x1d, 0x7d, 0x47, 0xa8, 0xc1, + 0x08, 0xca, 0xe2, 0x42, 0x52, 0x95, 0x16, 0x43, + 0xa3, 0x01, 0x23, 0x03, 0xcc, 0x3b, 0x81, 0x78, + 0x23, 0xcc, 0xa7, 0x36, 0xd7, 0xa0, 0x97, 0x8d +}; +enum { nonce26 = 0x524401012231683ULL }; + +static const u8 input27[] __initconst = { + 0x0d, 0xb0, 0xcf, 0xec, 0xfc, 0x38, 0x9d, 0x9d, + 0x89, 0x00, 0x96, 0xf2, 0x79, 0x8a, 0xa1, 0x8d, + 0x32, 0x5e, 0xc6, 0x12, 0x22, 0xec, 0xf6, 0x52, + 0xc1, 0x0b +}; +static const u8 output27[] __initconst = { + 0xef, 0xe1, 0xf2, 0x67, 0x8e, 0x2c, 0x00, 0x9f, + 0x1d, 0x4c, 0x66, 0x1f, 0x94, 0x58, 0xdc, 0xbb, + 0xb9, 0x11, 0x8f, 0x74, 0xfd, 0x0e, 0x14, 0x01, + 0xa8, 0x21 +}; +static const u8 key27[] __initconst = { + 0x78, 0x71, 0xa4, 0xe6, 0xb2, 0x95, 0x44, 0x12, + 0x81, 0xaa, 0x7e, 0x94, 0xa7, 0x8d, 0x44, 0xea, + 0xc4, 0xbc, 0x01, 0xb7, 0x9e, 0xf7, 0x82, 0x9e, + 0x3b, 0x23, 0x9f, 0x31, 0xdd, 0xb8, 0x0d, 0x18 +}; +enum { nonce27 = 0xd58fe0e58fb254d6ULL }; + +static const u8 input28[] __initconst = { + 0xaa, 0xb7, 0xaa, 0xd9, 0xa8, 0x91, 0xd7, 0x8a, + 0x97, 0x9b, 0xdb, 0x7c, 0x47, 0x2b, 0xdb, 0xd2, + 0xda, 0x77, 0xb1, 0xfa, 0x2d, 0x12, 0xe3, 0xe9, + 0xc4, 0x7f, 0x54 +}; +static const u8 output28[] __initconst = { + 0x87, 0x84, 0xa9, 0xa6, 0xad, 0x8f, 0xe6, 0x0f, + 0x69, 0xf8, 0x21, 0xc3, 0x54, 0x95, 0x0f, 0xb0, + 0x4e, 0xc7, 0x02, 0xe4, 0x04, 0xb0, 0x6c, 0x42, + 0x8c, 0x63, 0xe3 +}; +static const u8 key28[] __initconst = { + 0x12, 0x23, 0x37, 0x95, 0x04, 0xb4, 0x21, 0xe8, + 0xbc, 0x65, 0x46, 0x7a, 0xf4, 0x01, 0x05, 0x3f, + 0xb1, 0x34, 0x73, 0xd2, 0x49, 0xbf, 0x6f, 0x20, + 0xbd, 0x23, 0x58, 0x5f, 0xd1, 0x73, 0x57, 0xa6 +}; +enum { nonce28 = 0x3a04d51491eb4e07ULL }; + +static const u8 input29[] __initconst = { + 0x55, 0xd0, 0xd4, 0x4b, 0x17, 0xc8, 0xc4, 0x2b, + 0xc0, 0x28, 0xbd, 0x9d, 0x65, 0x4d, 0xaf, 0x77, + 0x72, 0x7c, 0x36, 0x68, 0xa7, 0xb6, 0x87, 0x4d, + 0xb9, 0x27, 0x25, 0x6c +}; +static const u8 output29[] __initconst = { + 0x0e, 0xac, 0x4c, 0xf5, 0x12, 0xb5, 0x56, 0xa5, + 0x00, 0x9a, 0xd6, 0xe5, 0x1a, 0x59, 0x2c, 0xf6, + 0x42, 0x22, 0xcf, 0x23, 0x98, 0x34, 0x29, 0xac, + 0x6e, 0xe3, 0x37, 0x6d +}; +static const u8 key29[] __initconst = { + 0xda, 0x9d, 0x05, 0x0c, 0x0c, 0xba, 0x75, 0xb9, + 0x9e, 0xb1, 0x8d, 0xd9, 0x73, 0x26, 0x2c, 0xa9, + 0x3a, 0xb5, 0xcb, 0x19, 0x49, 0xa7, 0x4f, 0xf7, + 0x64, 0x35, 0x23, 0x20, 0x2a, 0x45, 0x78, 0xc7 +}; +enum { nonce29 = 0xc25ac9982431cbfULL }; + +static const u8 input30[] __initconst = { + 0x4e, 0xd6, 0x85, 0xbb, 0xe7, 0x99, 0xfa, 0x04, + 0x33, 0x24, 0xfd, 0x75, 0x18, 0xe3, 0xd3, 0x25, + 0xcd, 0xca, 0xae, 0x00, 0xbe, 0x52, 0x56, 0x4a, + 0x31, 0xe9, 0x4f, 0xae, 0x8a +}; +static const u8 output30[] __initconst = { + 0x30, 0x36, 0x32, 0xa2, 0x3c, 0xb6, 0xf9, 0xf9, + 0x76, 0x70, 0xad, 0xa6, 0x10, 0x41, 0x00, 0x4a, + 0xfa, 0xce, 0x1b, 0x86, 0x05, 0xdb, 0x77, 0x96, + 0xb3, 0xb7, 0x8f, 0x61, 0x24 +}; +static const u8 key30[] __initconst = { + 0x49, 0x35, 0x4c, 0x15, 0x98, 0xfb, 0xc6, 0x57, + 0x62, 0x6d, 0x06, 0xc3, 0xd4, 0x79, 0x20, 0x96, + 0x05, 0x2a, 0x31, 0x63, 0xc0, 0x44, 0x42, 0x09, + 0x13, 0x13, 0xff, 0x1b, 0xc8, 0x63, 0x1f, 0x0b +}; +enum { nonce30 = 0x4967f9c08e41568bULL }; + +static const u8 input31[] __initconst = { + 0x91, 0x04, 0x20, 0x47, 0x59, 0xee, 0xa6, 0x0f, + 0x04, 0x75, 0xc8, 0x18, 0x95, 0x44, 0x01, 0x28, + 0x20, 0x6f, 0x73, 0x68, 0x66, 0xb5, 0x03, 0xb3, + 0x58, 0x27, 0x6e, 0x7a, 0x76, 0xb8 +}; +static const u8 output31[] __initconst = { + 0xe8, 0x03, 0x78, 0x9d, 0x13, 0x15, 0x98, 0xef, + 0x64, 0x68, 0x12, 0x41, 0xb0, 0x29, 0x94, 0x0c, + 0x83, 0x35, 0x46, 0xa9, 0x74, 0xe1, 0x75, 0xf0, + 0xb6, 0x96, 0xc3, 0x6f, 0xd7, 0x70 +}; +static const u8 key31[] __initconst = { + 0xef, 0xcd, 0x5a, 0x4a, 0xf4, 0x7e, 0x6a, 0x3a, + 0x11, 0x88, 0x72, 0x94, 0xb8, 0xae, 0x84, 0xc3, + 0x66, 0xe0, 0xde, 0x4b, 0x00, 0xa5, 0xd6, 0x2d, + 0x50, 0xb7, 0x28, 0xff, 0x76, 0x57, 0x18, 0x1f +}; +enum { nonce31 = 0xcb6f428fa4192e19ULL }; + +static const u8 input32[] __initconst = { + 0x90, 0x06, 0x50, 0x4b, 0x98, 0x14, 0x30, 0xf1, + 0xb8, 0xd7, 0xf0, 0xa4, 0x3e, 0x4e, 0xd8, 0x00, + 0xea, 0xdb, 0x4f, 0x93, 0x05, 0xef, 0x02, 0x71, + 0x1a, 0xcd, 0xa3, 0xb1, 0xae, 0xd3, 0x18 +}; +static const u8 output32[] __initconst = { + 0xcb, 0x4a, 0x37, 0x3f, 0xea, 0x40, 0xab, 0x86, + 0xfe, 0xcc, 0x07, 0xd5, 0xdc, 0xb2, 0x25, 0xb6, + 0xfd, 0x2a, 0x72, 0xbc, 0x5e, 0xd4, 0x75, 0xff, + 0x71, 0xfc, 0xce, 0x1e, 0x6f, 0x22, 0xc1 +}; +static const u8 key32[] __initconst = { + 0xfc, 0x6d, 0xc3, 0x80, 0xce, 0xa4, 0x31, 0xa1, + 0xcc, 0xfa, 0x9d, 0x10, 0x0b, 0xc9, 0x11, 0x77, + 0x34, 0xdb, 0xad, 0x1b, 0xc4, 0xfc, 0xeb, 0x79, + 0x91, 0xda, 0x59, 0x3b, 0x0d, 0xb1, 0x19, 0x3b +}; +enum { nonce32 = 0x88551bf050059467ULL }; + +static const u8 input33[] __initconst = { + 0x88, 0x94, 0x71, 0x92, 0xe8, 0xd7, 0xf9, 0xbd, + 0x55, 0xe3, 0x22, 0xdb, 0x99, 0x51, 0xfb, 0x50, + 0xbf, 0x82, 0xb5, 0x70, 0x8b, 0x2b, 0x6a, 0x03, + 0x37, 0xa0, 0xc6, 0x19, 0x5d, 0xc9, 0xbc, 0xcc +}; +static const u8 output33[] __initconst = { + 0xb6, 0x17, 0x51, 0xc8, 0xea, 0x8a, 0x14, 0xdc, + 0x23, 0x1b, 0xd4, 0xed, 0xbf, 0x50, 0xb9, 0x38, + 0x00, 0xc2, 0x3f, 0x78, 0x3d, 0xbf, 0xa0, 0x84, + 0xef, 0x45, 0xb2, 0x7d, 0x48, 0x7b, 0x62, 0xa7 +}; +static const u8 key33[] __initconst = { + 0xb9, 0x8f, 0x6a, 0xad, 0xb4, 0x6f, 0xb5, 0xdc, + 0x48, 0xfa, 0x43, 0x57, 0x62, 0x97, 0xef, 0x89, + 0x4c, 0x5a, 0x7b, 0x67, 0xb8, 0x9d, 0xf0, 0x42, + 0x2b, 0x8f, 0xf3, 0x18, 0x05, 0x2e, 0x48, 0xd0 +}; +enum { nonce33 = 0x31f16488fe8447f5ULL }; + +static const u8 input34[] __initconst = { + 0xda, 0x2b, 0x3d, 0x63, 0x9e, 0x4f, 0xc2, 0xb8, + 0x7f, 0xc2, 0x1a, 0x8b, 0x0d, 0x95, 0x65, 0x55, + 0x52, 0xba, 0x51, 0x51, 0xc0, 0x61, 0x9f, 0x0a, + 0x5d, 0xb0, 0x59, 0x8c, 0x64, 0x6a, 0xab, 0xf5, + 0x57 +}; +static const u8 output34[] __initconst = { + 0x5c, 0xf6, 0x62, 0x24, 0x8c, 0x45, 0xa3, 0x26, + 0xd0, 0xe4, 0x88, 0x1c, 0xed, 0xc4, 0x26, 0x58, + 0xb5, 0x5d, 0x92, 0xc4, 0x17, 0x44, 0x1c, 0xb8, + 0x2c, 0xf3, 0x55, 0x7e, 0xd6, 0xe5, 0xb3, 0x65, + 0xa8 +}; +static const u8 key34[] __initconst = { + 0xde, 0xd1, 0x27, 0xb7, 0x7c, 0xfa, 0xa6, 0x78, + 0x39, 0x80, 0xdf, 0xb7, 0x46, 0xac, 0x71, 0x26, + 0xd0, 0x2a, 0x56, 0x79, 0x12, 0xeb, 0x26, 0x37, + 0x01, 0x0d, 0x30, 0xe0, 0xe3, 0x66, 0xb2, 0xf4 +}; +enum { nonce34 = 0x92d0d9b252c24149ULL }; + +static const u8 input35[] __initconst = { + 0x3a, 0x15, 0x5b, 0x75, 0x6e, 0xd0, 0x52, 0x20, + 0x6c, 0x82, 0xfa, 0xce, 0x5b, 0xea, 0xf5, 0x43, + 0xc1, 0x81, 0x7c, 0xb2, 0xac, 0x16, 0x3f, 0xd3, + 0x5a, 0xaf, 0x55, 0x98, 0xf4, 0xc6, 0xba, 0x71, + 0x25, 0x8b +}; +static const u8 output35[] __initconst = { + 0xb3, 0xaf, 0xac, 0x6d, 0x4d, 0xc7, 0x68, 0x56, + 0x50, 0x5b, 0x69, 0x2a, 0xe5, 0x90, 0xf9, 0x5f, + 0x99, 0x88, 0xff, 0x0c, 0xa6, 0xb1, 0x83, 0xd6, + 0x80, 0xa6, 0x1b, 0xde, 0x94, 0xa4, 0x2c, 0xc3, + 0x74, 0xfa +}; +static const u8 key35[] __initconst = { + 0xd8, 0x24, 0xe2, 0x06, 0xd7, 0x7a, 0xce, 0x81, + 0x52, 0x72, 0x02, 0x69, 0x89, 0xc4, 0xe9, 0x53, + 0x3b, 0x08, 0x5f, 0x98, 0x1e, 0x1b, 0x99, 0x6e, + 0x28, 0x17, 0x6d, 0xba, 0xc0, 0x96, 0xf9, 0x3c +}; +enum { nonce35 = 0x7baf968c4c8e3a37ULL }; + +static const u8 input36[] __initconst = { + 0x31, 0x5d, 0x4f, 0xe3, 0xac, 0xad, 0x17, 0xa6, + 0xb5, 0x01, 0xe2, 0xc6, 0xd4, 0x7e, 0xc4, 0x80, + 0xc0, 0x59, 0x72, 0xbb, 0x4b, 0x74, 0x6a, 0x41, + 0x0f, 0x9c, 0xf6, 0xca, 0x20, 0xb3, 0x73, 0x07, + 0x6b, 0x02, 0x2a +}; +static const u8 output36[] __initconst = { + 0xf9, 0x09, 0x92, 0x94, 0x7e, 0x31, 0xf7, 0x53, + 0xe8, 0x8a, 0x5b, 0x20, 0xef, 0x9b, 0x45, 0x81, + 0xba, 0x5e, 0x45, 0x63, 0xc1, 0xc7, 0x9e, 0x06, + 0x0e, 0xd9, 0x62, 0x8e, 0x96, 0xf9, 0xfa, 0x43, + 0x4d, 0xd4, 0x28 +}; +static const u8 key36[] __initconst = { + 0x13, 0x30, 0x4c, 0x06, 0xae, 0x18, 0xde, 0x03, + 0x1d, 0x02, 0x40, 0xf5, 0xbb, 0x19, 0xe3, 0x88, + 0x41, 0xb1, 0x29, 0x15, 0x97, 0xc2, 0x69, 0x3f, + 0x32, 0x2a, 0x0c, 0x8b, 0xcf, 0x83, 0x8b, 0x6c +}; +enum { nonce36 = 0x226d251d475075a0ULL }; + +static const u8 input37[] __initconst = { + 0x10, 0x18, 0xbe, 0xfd, 0x66, 0xc9, 0x77, 0xcc, + 0x43, 0xe5, 0x46, 0x0b, 0x08, 0x8b, 0xae, 0x11, + 0x86, 0x15, 0xc2, 0xf6, 0x45, 0xd4, 0x5f, 0xd6, + 0xb6, 0x5f, 0x9f, 0x3e, 0x97, 0xb7, 0xd4, 0xad, + 0x0b, 0xe8, 0x31, 0x94 +}; +static const u8 output37[] __initconst = { + 0x03, 0x2c, 0x1c, 0xee, 0xc6, 0xdd, 0xed, 0x38, + 0x80, 0x6d, 0x84, 0x16, 0xc3, 0xc2, 0x04, 0x63, + 0xcd, 0xa7, 0x6e, 0x36, 0x8b, 0xed, 0x78, 0x63, + 0x95, 0xfc, 0x69, 0x7a, 0x3f, 0x8d, 0x75, 0x6b, + 0x6c, 0x26, 0x56, 0x4d +}; +static const u8 key37[] __initconst = { + 0xac, 0x84, 0x4d, 0xa9, 0x29, 0x49, 0x3c, 0x39, + 0x7f, 0xd9, 0xa6, 0x01, 0xf3, 0x7e, 0xfa, 0x4a, + 0x14, 0x80, 0x22, 0x74, 0xf0, 0x29, 0x30, 0x2d, + 0x07, 0x21, 0xda, 0xc0, 0x4d, 0x70, 0x56, 0xa2 +}; +enum { nonce37 = 0x167823ce3b64925aULL }; + +static const u8 input38[] __initconst = { + 0x30, 0x8f, 0xfa, 0x24, 0x29, 0xb1, 0xfb, 0xce, + 0x31, 0x62, 0xdc, 0xd0, 0x46, 0xab, 0xe1, 0x31, + 0xd9, 0xae, 0x60, 0x0d, 0xca, 0x0a, 0x49, 0x12, + 0x3d, 0x92, 0xe9, 0x91, 0x67, 0x12, 0x62, 0x18, + 0x89, 0xe2, 0xf9, 0x1c, 0xcc +}; +static const u8 output38[] __initconst = { + 0x56, 0x9c, 0xc8, 0x7a, 0xc5, 0x98, 0xa3, 0x0f, + 0xba, 0xd5, 0x3e, 0xe1, 0xc9, 0x33, 0x64, 0x33, + 0xf0, 0xd5, 0xf7, 0x43, 0x66, 0x0e, 0x08, 0x9a, + 0x6e, 0x09, 0xe4, 0x01, 0x0d, 0x1e, 0x2f, 0x4b, + 0xed, 0x9c, 0x08, 0x8c, 0x03 +}; +static const u8 key38[] __initconst = { + 0x77, 0x52, 0x2a, 0x23, 0xf1, 0xc5, 0x96, 0x2b, + 0x89, 0x4f, 0x3e, 0xf3, 0xff, 0x0e, 0x94, 0xce, + 0xf1, 0xbd, 0x53, 0xf5, 0x77, 0xd6, 0x9e, 0x47, + 0x49, 0x3d, 0x16, 0x64, 0xff, 0x95, 0x42, 0x42 +}; +enum { nonce38 = 0xff629d7b82cef357ULL }; + +static const u8 input39[] __initconst = { + 0x38, 0x26, 0x27, 0xd0, 0xc2, 0xf5, 0x34, 0xba, + 0xda, 0x0f, 0x1c, 0x1c, 0x9a, 0x70, 0xe5, 0x8a, + 0x78, 0x2d, 0x8f, 0x9a, 0xbf, 0x89, 0x6a, 0xfd, + 0xd4, 0x9c, 0x33, 0xf1, 0xb6, 0x89, 0x16, 0xe3, + 0x6a, 0x00, 0xfa, 0x3a, 0x0f, 0x26 +}; +static const u8 output39[] __initconst = { + 0x0f, 0xaf, 0x91, 0x6d, 0x9c, 0x99, 0xa4, 0xf7, + 0x3b, 0x9d, 0x9a, 0x98, 0xca, 0xbb, 0x50, 0x48, + 0xee, 0xcb, 0x5d, 0xa1, 0x37, 0x2d, 0x36, 0x09, + 0x2a, 0xe2, 0x1c, 0x3d, 0x98, 0x40, 0x1c, 0x16, + 0x56, 0xa7, 0x98, 0xe9, 0x7d, 0x2b +}; +static const u8 key39[] __initconst = { + 0x6e, 0x83, 0x15, 0x4d, 0xf8, 0x78, 0xa8, 0x0e, + 0x71, 0x37, 0xd4, 0x6e, 0x28, 0x5c, 0x06, 0xa1, + 0x2d, 0x6c, 0x72, 0x7a, 0xfd, 0xf8, 0x65, 0x1a, + 0xb8, 0xe6, 0x29, 0x7b, 0xe5, 0xb3, 0x23, 0x79 +}; +enum { nonce39 = 0xa4d8c491cf093e9dULL }; + +static const u8 input40[] __initconst = { + 0x8f, 0x32, 0x7c, 0x40, 0x37, 0x95, 0x08, 0x00, + 0x00, 0xfe, 0x2f, 0x95, 0x20, 0x12, 0x40, 0x18, + 0x5e, 0x7e, 0x5e, 0x99, 0xee, 0x8d, 0x91, 0x7d, + 0x50, 0x7d, 0x21, 0x45, 0x27, 0xe1, 0x7f, 0xd4, + 0x73, 0x10, 0xe1, 0x33, 0xbc, 0xf8, 0xdd +}; +static const u8 output40[] __initconst = { + 0x78, 0x7c, 0xdc, 0x55, 0x2b, 0xd9, 0x2b, 0x3a, + 0xdd, 0x56, 0x11, 0x52, 0xd3, 0x2e, 0xe0, 0x0d, + 0x23, 0x20, 0x8a, 0xf1, 0x4f, 0xee, 0xf1, 0x68, + 0xf6, 0xdc, 0x53, 0xcf, 0x17, 0xd4, 0xf0, 0x6c, + 0xdc, 0x80, 0x5f, 0x1c, 0xa4, 0x91, 0x05 +}; +static const u8 key40[] __initconst = { + 0x0d, 0x86, 0xbf, 0x8a, 0xba, 0x9e, 0x39, 0x91, + 0xa8, 0xe7, 0x22, 0xf0, 0x0c, 0x43, 0x18, 0xe4, + 0x1f, 0xb0, 0xaf, 0x8a, 0x34, 0x31, 0xf4, 0x41, + 0xf0, 0x89, 0x85, 0xca, 0x5d, 0x05, 0x3b, 0x94 +}; +enum { nonce40 = 0xae7acc4f5986439eULL }; + +static const u8 input41[] __initconst = { + 0x20, 0x5f, 0xc1, 0x83, 0x36, 0x02, 0x76, 0x96, + 0xf0, 0xbf, 0x8e, 0x0e, 0x1a, 0xd1, 0xc7, 0x88, + 0x18, 0xc7, 0x09, 0xc4, 0x15, 0xd9, 0x4f, 0x5e, + 0x1f, 0xb3, 0xb4, 0x6d, 0xcb, 0xa0, 0xd6, 0x8a, + 0x3b, 0x40, 0x8e, 0x80, 0xf1, 0xe8, 0x8f, 0x5f +}; +static const u8 output41[] __initconst = { + 0x0b, 0xd1, 0x49, 0x9a, 0x9d, 0xe8, 0x97, 0xb8, + 0xd1, 0xeb, 0x90, 0x62, 0x37, 0xd2, 0x99, 0x15, + 0x67, 0x6d, 0x27, 0x93, 0xce, 0x37, 0x65, 0xa2, + 0x94, 0x88, 0xd6, 0x17, 0xbc, 0x1c, 0x6e, 0xa2, + 0xcc, 0xfb, 0x81, 0x0e, 0x30, 0x60, 0x5a, 0x6f +}; +static const u8 key41[] __initconst = { + 0x36, 0x27, 0x57, 0x01, 0x21, 0x68, 0x97, 0xc7, + 0x00, 0x67, 0x7b, 0xe9, 0x0f, 0x55, 0x49, 0xbb, + 0x92, 0x18, 0x98, 0xf5, 0x5e, 0xbc, 0xe7, 0x5a, + 0x9d, 0x3d, 0xc7, 0xbd, 0x59, 0xec, 0x82, 0x8e +}; +enum { nonce41 = 0x5da05e4c8dfab464ULL }; + +static const u8 input42[] __initconst = { + 0xca, 0x30, 0xcd, 0x63, 0xf0, 0x2d, 0xf1, 0x03, + 0x4d, 0x0d, 0xf2, 0xf7, 0x6f, 0xae, 0xd6, 0x34, + 0xea, 0xf6, 0x13, 0xcf, 0x1c, 0xa0, 0xd0, 0xe8, + 0xa4, 0x78, 0x80, 0x3b, 0x1e, 0xa5, 0x32, 0x4c, + 0x73, 0x12, 0xd4, 0x6a, 0x94, 0xbc, 0xba, 0x80, + 0x5e +}; +static const u8 output42[] __initconst = { + 0xec, 0x3f, 0x18, 0x31, 0xc0, 0x7b, 0xb5, 0xe2, + 0xad, 0xf3, 0xec, 0xa0, 0x16, 0x9d, 0xef, 0xce, + 0x05, 0x65, 0x59, 0x9d, 0x5a, 0xca, 0x3e, 0x13, + 0xb9, 0x5d, 0x5d, 0xb5, 0xeb, 0xae, 0xc0, 0x87, + 0xbb, 0xfd, 0xe7, 0xe4, 0x89, 0x5b, 0xd2, 0x6c, + 0x56 +}; +static const u8 key42[] __initconst = { + 0x7c, 0x6b, 0x7e, 0x77, 0xcc, 0x8c, 0x1b, 0x03, + 0x8b, 0x2a, 0xb3, 0x7c, 0x5a, 0x73, 0xcc, 0xac, + 0xdd, 0x53, 0x54, 0x0c, 0x85, 0xed, 0xcd, 0x47, + 0x24, 0xc1, 0xb8, 0x9b, 0x2e, 0x41, 0x92, 0x36 +}; +enum { nonce42 = 0xe4d7348b09682c9cULL }; + +static const u8 input43[] __initconst = { + 0x52, 0xf2, 0x4b, 0x7c, 0xe5, 0x58, 0xe8, 0xd2, + 0xb7, 0xf3, 0xa1, 0x29, 0x68, 0xa2, 0x50, 0x50, + 0xae, 0x9c, 0x1b, 0xe2, 0x67, 0x77, 0xe2, 0xdb, + 0x85, 0x55, 0x7e, 0x84, 0x8a, 0x12, 0x3c, 0xb6, + 0x2e, 0xed, 0xd3, 0xec, 0x47, 0x68, 0xfa, 0x52, + 0x46, 0x9d +}; +static const u8 output43[] __initconst = { + 0x1b, 0xf0, 0x05, 0xe4, 0x1c, 0xd8, 0x74, 0x9a, + 0xf0, 0xee, 0x00, 0x54, 0xce, 0x02, 0x83, 0x15, + 0xfb, 0x23, 0x35, 0x78, 0xc3, 0xda, 0x98, 0xd8, + 0x9d, 0x1b, 0xb2, 0x51, 0x82, 0xb0, 0xff, 0xbe, + 0x05, 0xa9, 0xa4, 0x04, 0xba, 0xea, 0x4b, 0x73, + 0x47, 0x6e +}; +static const u8 key43[] __initconst = { + 0xeb, 0xec, 0x0e, 0xa1, 0x65, 0xe2, 0x99, 0x46, + 0xd8, 0x54, 0x8c, 0x4a, 0x93, 0xdf, 0x6d, 0xbf, + 0x93, 0x34, 0x94, 0x57, 0xc9, 0x12, 0x9d, 0x68, + 0x05, 0xc5, 0x05, 0xad, 0x5a, 0xc9, 0x2a, 0x3b +}; +enum { nonce43 = 0xe14f6a902b7827fULL }; + +static const u8 input44[] __initconst = { + 0x3e, 0x22, 0x3e, 0x8e, 0xcd, 0x18, 0xe2, 0xa3, + 0x8d, 0x8b, 0x38, 0xc3, 0x02, 0xa3, 0x31, 0x48, + 0xc6, 0x0e, 0xec, 0x99, 0x51, 0x11, 0x6d, 0x8b, + 0x32, 0x35, 0x3b, 0x08, 0x58, 0x76, 0x25, 0x30, + 0xe2, 0xfc, 0xa2, 0x46, 0x7d, 0x6e, 0x34, 0x87, + 0xac, 0x42, 0xbf +}; +static const u8 output44[] __initconst = { + 0x08, 0x92, 0x58, 0x02, 0x1a, 0xf4, 0x1f, 0x3d, + 0x38, 0x7b, 0x6b, 0xf6, 0x84, 0x07, 0xa3, 0x19, + 0x17, 0x2a, 0xed, 0x57, 0x1c, 0xf9, 0x55, 0x37, + 0x4e, 0xf4, 0x68, 0x68, 0x82, 0x02, 0x4f, 0xca, + 0x21, 0x00, 0xc6, 0x66, 0x79, 0x53, 0x19, 0xef, + 0x7f, 0xdd, 0x74 +}; +static const u8 key44[] __initconst = { + 0x73, 0xb6, 0x3e, 0xf4, 0x57, 0x52, 0xa6, 0x43, + 0x51, 0xd8, 0x25, 0x00, 0xdb, 0xb4, 0x52, 0x69, + 0xd6, 0x27, 0x49, 0xeb, 0x9b, 0xf1, 0x7b, 0xa0, + 0xd6, 0x7c, 0x9c, 0xd8, 0x95, 0x03, 0x69, 0x26 +}; +enum { nonce44 = 0xf5e6dc4f35ce24e5ULL }; + +static const u8 input45[] __initconst = { + 0x55, 0x76, 0xc0, 0xf1, 0x74, 0x03, 0x7a, 0x6d, + 0x14, 0xd8, 0x36, 0x2c, 0x9f, 0x9a, 0x59, 0x7a, + 0x2a, 0xf5, 0x77, 0x84, 0x70, 0x7c, 0x1d, 0x04, + 0x90, 0x45, 0xa4, 0xc1, 0x5e, 0xdd, 0x2e, 0x07, + 0x18, 0x34, 0xa6, 0x85, 0x56, 0x4f, 0x09, 0xaf, + 0x2f, 0x83, 0xe1, 0xc6 +}; +static const u8 output45[] __initconst = { + 0x22, 0x46, 0xe4, 0x0b, 0x3a, 0x55, 0xcc, 0x9b, + 0xf0, 0xc0, 0x53, 0xcd, 0x95, 0xc7, 0x57, 0x6c, + 0x77, 0x46, 0x41, 0x72, 0x07, 0xbf, 0xa8, 0xe5, + 0x68, 0x69, 0xd8, 0x1e, 0x45, 0xc1, 0xa2, 0x50, + 0xa5, 0xd1, 0x62, 0xc9, 0x5a, 0x7d, 0x08, 0x14, + 0xae, 0x44, 0x16, 0xb9 +}; +static const u8 key45[] __initconst = { + 0x41, 0xf3, 0x88, 0xb2, 0x51, 0x25, 0x47, 0x02, + 0x39, 0xe8, 0x15, 0x3a, 0x22, 0x78, 0x86, 0x0b, + 0xf9, 0x1e, 0x8d, 0x98, 0xb2, 0x22, 0x82, 0xac, + 0x42, 0x94, 0xde, 0x64, 0xf0, 0xfd, 0xb3, 0x6c +}; +enum { nonce45 = 0xf51a582daf4aa01aULL }; + +static const u8 input46[] __initconst = { + 0xf6, 0xff, 0x20, 0xf9, 0x26, 0x7e, 0x0f, 0xa8, + 0x6a, 0x45, 0x5a, 0x91, 0x73, 0xc4, 0x4c, 0x63, + 0xe5, 0x61, 0x59, 0xca, 0xec, 0xc0, 0x20, 0x35, + 0xbc, 0x9f, 0x58, 0x9c, 0x5e, 0xa1, 0x17, 0x46, + 0xcc, 0xab, 0x6e, 0xd0, 0x4f, 0x24, 0xeb, 0x05, + 0x4d, 0x40, 0x41, 0xe0, 0x9d +}; +static const u8 output46[] __initconst = { + 0x31, 0x6e, 0x63, 0x3f, 0x9c, 0xe6, 0xb1, 0xb7, + 0xef, 0x47, 0x46, 0xd7, 0xb1, 0x53, 0x42, 0x2f, + 0x2c, 0xc8, 0x01, 0xae, 0x8b, 0xec, 0x42, 0x2c, + 0x6b, 0x2c, 0x9c, 0xb2, 0xf0, 0x29, 0x06, 0xa5, + 0xcd, 0x7e, 0xc7, 0x3a, 0x38, 0x98, 0x8a, 0xde, + 0x03, 0x29, 0x14, 0x8f, 0xf9 +}; +static const u8 key46[] __initconst = { + 0xac, 0xa6, 0x44, 0x4a, 0x0d, 0x42, 0x10, 0xbc, + 0xd3, 0xc9, 0x8e, 0x9e, 0x71, 0xa3, 0x1c, 0x14, + 0x9d, 0x65, 0x0d, 0x49, 0x4d, 0x8c, 0xec, 0x46, + 0xe1, 0x41, 0xcd, 0xf5, 0xfc, 0x82, 0x75, 0x34 +}; +enum { nonce46 = 0x25f85182df84dec5ULL }; + +static const u8 input47[] __initconst = { + 0xa1, 0xd2, 0xf2, 0x52, 0x2f, 0x79, 0x50, 0xb2, + 0x42, 0x29, 0x5b, 0x44, 0x20, 0xf9, 0xbd, 0x85, + 0xb7, 0x65, 0x77, 0x86, 0xce, 0x3e, 0x1c, 0xe4, + 0x70, 0x80, 0xdd, 0x72, 0x07, 0x48, 0x0f, 0x84, + 0x0d, 0xfd, 0x97, 0xc0, 0xb7, 0x48, 0x9b, 0xb4, + 0xec, 0xff, 0x73, 0x14, 0x99, 0xe4 +}; +static const u8 output47[] __initconst = { + 0xe5, 0x3c, 0x78, 0x66, 0x31, 0x1e, 0xd6, 0xc4, + 0x9e, 0x71, 0xb3, 0xd7, 0xd5, 0xad, 0x84, 0xf2, + 0x78, 0x61, 0x77, 0xf8, 0x31, 0xf0, 0x13, 0xad, + 0x66, 0xf5, 0x31, 0x7d, 0xeb, 0xdf, 0xaf, 0xcb, + 0xac, 0x28, 0x6c, 0xc2, 0x9e, 0xe7, 0x78, 0xa2, + 0xa2, 0x58, 0xce, 0x84, 0x76, 0x70 +}; +static const u8 key47[] __initconst = { + 0x05, 0x7f, 0xc0, 0x7f, 0x37, 0x20, 0x71, 0x02, + 0x3a, 0xe7, 0x20, 0x5a, 0x0a, 0x8f, 0x79, 0x5a, + 0xfe, 0xbb, 0x43, 0x4d, 0x2f, 0xcb, 0xf6, 0x9e, + 0xa2, 0x97, 0x00, 0xad, 0x0d, 0x51, 0x7e, 0x17 +}; +enum { nonce47 = 0xae707c60f54de32bULL }; + +static const u8 input48[] __initconst = { + 0x80, 0x93, 0x77, 0x2e, 0x8d, 0xe8, 0xe6, 0xc1, + 0x27, 0xe6, 0xf2, 0x89, 0x5b, 0x33, 0x62, 0x18, + 0x80, 0x6e, 0x17, 0x22, 0x8e, 0x83, 0x31, 0x40, + 0x8f, 0xc9, 0x5c, 0x52, 0x6c, 0x0e, 0xa5, 0xe9, + 0x6c, 0x7f, 0xd4, 0x6a, 0x27, 0x56, 0x99, 0xce, + 0x8d, 0x37, 0x59, 0xaf, 0xc0, 0x0e, 0xe1 +}; +static const u8 output48[] __initconst = { + 0x02, 0xa4, 0x2e, 0x33, 0xb7, 0x7c, 0x2b, 0x9a, + 0x18, 0x5a, 0xba, 0x53, 0x38, 0xaf, 0x00, 0xeb, + 0xd8, 0x3d, 0x02, 0x77, 0x43, 0x45, 0x03, 0x91, + 0xe2, 0x5e, 0x4e, 0xeb, 0x50, 0xd5, 0x5b, 0xe0, + 0xf3, 0x33, 0xa7, 0xa2, 0xac, 0x07, 0x6f, 0xeb, + 0x3f, 0x6c, 0xcd, 0xf2, 0x6c, 0x61, 0x64 +}; +static const u8 key48[] __initconst = { + 0xf3, 0x79, 0xe7, 0xf8, 0x0e, 0x02, 0x05, 0x6b, + 0x83, 0x1a, 0xe7, 0x86, 0x6b, 0xe6, 0x8f, 0x3f, + 0xd3, 0xa3, 0xe4, 0x6e, 0x29, 0x06, 0xad, 0xbc, + 0xe8, 0x33, 0x56, 0x39, 0xdf, 0xb0, 0xe2, 0xfe +}; +enum { nonce48 = 0xd849b938c6569da0ULL }; + +static const u8 input49[] __initconst = { + 0x89, 0x3b, 0x88, 0x9e, 0x7b, 0x38, 0x16, 0x9f, + 0xa1, 0x28, 0xf6, 0xf5, 0x23, 0x74, 0x28, 0xb0, + 0xdf, 0x6c, 0x9e, 0x8a, 0x71, 0xaf, 0xed, 0x7a, + 0x39, 0x21, 0x57, 0x7d, 0x31, 0x6c, 0xee, 0x0d, + 0x11, 0x8d, 0x41, 0x9a, 0x5f, 0xb7, 0x27, 0x40, + 0x08, 0xad, 0xc6, 0xe0, 0x00, 0x43, 0x9e, 0xae +}; +static const u8 output49[] __initconst = { + 0x4d, 0xfd, 0xdb, 0x4c, 0x77, 0xc1, 0x05, 0x07, + 0x4d, 0x6d, 0x32, 0xcb, 0x2e, 0x0e, 0xff, 0x65, + 0xc9, 0x27, 0xeb, 0xa9, 0x46, 0x5b, 0xab, 0x06, + 0xe6, 0xb6, 0x5a, 0x1e, 0x00, 0xfb, 0xcf, 0xe4, + 0xb9, 0x71, 0x40, 0x10, 0xef, 0x12, 0x39, 0xf0, + 0xea, 0x40, 0xb8, 0x9a, 0xa2, 0x85, 0x38, 0x48 +}; +static const u8 key49[] __initconst = { + 0xe7, 0x10, 0x40, 0xd9, 0x66, 0xc0, 0xa8, 0x6d, + 0xa3, 0xcc, 0x8b, 0xdd, 0x93, 0xf2, 0x6e, 0xe0, + 0x90, 0x7f, 0xd0, 0xf4, 0x37, 0x0c, 0x8b, 0x9b, + 0x4c, 0x4d, 0xe6, 0xf2, 0x1f, 0xe9, 0x95, 0x24 +}; +enum { nonce49 = 0xf269817bdae01bc0ULL }; + +static const u8 input50[] __initconst = { + 0xda, 0x5b, 0x60, 0xcd, 0xed, 0x58, 0x8e, 0x7f, + 0xae, 0xdd, 0xc8, 0x2e, 0x16, 0x90, 0xea, 0x4b, + 0x0c, 0x74, 0x14, 0x35, 0xeb, 0xee, 0x2c, 0xff, + 0x46, 0x99, 0x97, 0x6e, 0xae, 0xa7, 0x8e, 0x6e, + 0x38, 0xfe, 0x63, 0xe7, 0x51, 0xd9, 0xaa, 0xce, + 0x7b, 0x1e, 0x7e, 0x5d, 0xc0, 0xe8, 0x10, 0x06, + 0x14 +}; +static const u8 output50[] __initconst = { + 0xe4, 0xe5, 0x86, 0x1b, 0x66, 0x19, 0xac, 0x49, + 0x1c, 0xbd, 0xee, 0x03, 0xaf, 0x11, 0xfc, 0x1f, + 0x6a, 0xd2, 0x50, 0x5c, 0xea, 0x2c, 0xa5, 0x75, + 0xfd, 0xb7, 0x0e, 0x80, 0x8f, 0xed, 0x3f, 0x31, + 0x47, 0xac, 0x67, 0x43, 0xb8, 0x2e, 0xb4, 0x81, + 0x6d, 0xe4, 0x1e, 0xb7, 0x8b, 0x0c, 0x53, 0xa9, + 0x26 +}; +static const u8 key50[] __initconst = { + 0xd7, 0xb2, 0x04, 0x76, 0x30, 0xcc, 0x38, 0x45, + 0xef, 0xdb, 0xc5, 0x86, 0x08, 0x61, 0xf0, 0xee, + 0x6d, 0xd8, 0x22, 0x04, 0x8c, 0xfb, 0xcb, 0x37, + 0xa6, 0xfb, 0x95, 0x22, 0xe1, 0x87, 0xb7, 0x6f +}; +enum { nonce50 = 0x3b44d09c45607d38ULL }; + +static const u8 input51[] __initconst = { + 0xa9, 0x41, 0x02, 0x4b, 0xd7, 0xd5, 0xd1, 0xf1, + 0x21, 0x55, 0xb2, 0x75, 0x6d, 0x77, 0x1b, 0x86, + 0xa9, 0xc8, 0x90, 0xfd, 0xed, 0x4a, 0x7b, 0x6c, + 0xb2, 0x5f, 0x9b, 0x5f, 0x16, 0xa1, 0x54, 0xdb, + 0xd6, 0x3f, 0x6a, 0x7f, 0x2e, 0x51, 0x9d, 0x49, + 0x5b, 0xa5, 0x0e, 0xf9, 0xfb, 0x2a, 0x38, 0xff, + 0x20, 0x8c +}; +static const u8 output51[] __initconst = { + 0x18, 0xf7, 0x88, 0xc1, 0x72, 0xfd, 0x90, 0x4b, + 0xa9, 0x2d, 0xdb, 0x47, 0xb0, 0xa5, 0xc4, 0x37, + 0x01, 0x95, 0xc4, 0xb1, 0xab, 0xc5, 0x5b, 0xcd, + 0xe1, 0x97, 0x78, 0x13, 0xde, 0x6a, 0xff, 0x36, + 0xce, 0xa4, 0x67, 0xc5, 0x4a, 0x45, 0x2b, 0xd9, + 0xff, 0x8f, 0x06, 0x7c, 0x63, 0xbb, 0x83, 0x17, + 0xb4, 0x6b +}; +static const u8 key51[] __initconst = { + 0x82, 0x1a, 0x79, 0xab, 0x9a, 0xb5, 0x49, 0x6a, + 0x30, 0x6b, 0x99, 0x19, 0x11, 0xc7, 0xa2, 0xf4, + 0xca, 0x55, 0xb9, 0xdd, 0xe7, 0x2f, 0xe7, 0xc1, + 0xdd, 0x27, 0xad, 0x80, 0xf2, 0x56, 0xad, 0xf3 +}; +enum { nonce51 = 0xe93aff94ca71a4a6ULL }; + +static const u8 input52[] __initconst = { + 0x89, 0xdd, 0xf3, 0xfa, 0xb6, 0xc1, 0xaa, 0x9a, + 0xc8, 0xad, 0x6b, 0x00, 0xa1, 0x65, 0xea, 0x14, + 0x55, 0x54, 0x31, 0x8f, 0xf0, 0x03, 0x84, 0x51, + 0x17, 0x1e, 0x0a, 0x93, 0x6e, 0x79, 0x96, 0xa3, + 0x2a, 0x85, 0x9c, 0x89, 0xf8, 0xd1, 0xe2, 0x15, + 0x95, 0x05, 0xf4, 0x43, 0x4d, 0x6b, 0xf0, 0x71, + 0x3b, 0x3e, 0xba +}; +static const u8 output52[] __initconst = { + 0x0c, 0x42, 0x6a, 0xb3, 0x66, 0x63, 0x5d, 0x2c, + 0x9f, 0x3d, 0xa6, 0x6e, 0xc7, 0x5f, 0x79, 0x2f, + 0x50, 0xe3, 0xd6, 0x07, 0x56, 0xa4, 0x2b, 0x2d, + 0x8d, 0x10, 0xc0, 0x6c, 0xa2, 0xfc, 0x97, 0xec, + 0x3f, 0x5c, 0x8d, 0x59, 0xbe, 0x84, 0xf1, 0x3e, + 0x38, 0x47, 0x4f, 0x75, 0x25, 0x66, 0x88, 0x14, + 0x03, 0xdd, 0xde +}; +static const u8 key52[] __initconst = { + 0x4f, 0xb0, 0x27, 0xb6, 0xdd, 0x24, 0x0c, 0xdb, + 0x6b, 0x71, 0x2e, 0xac, 0xfc, 0x3f, 0xa6, 0x48, + 0x5d, 0xd5, 0xff, 0x53, 0xb5, 0x62, 0xf1, 0xe0, + 0x93, 0xfe, 0x39, 0x4c, 0x9f, 0x03, 0x11, 0xa7 +}; +enum { nonce52 = 0xed8becec3bdf6f25ULL }; + +static const u8 input53[] __initconst = { + 0x68, 0xd1, 0xc7, 0x74, 0x44, 0x1c, 0x84, 0xde, + 0x27, 0x27, 0x35, 0xf0, 0x18, 0x0b, 0x57, 0xaa, + 0xd0, 0x1a, 0xd3, 0x3b, 0x5e, 0x5c, 0x62, 0x93, + 0xd7, 0x6b, 0x84, 0x3b, 0x71, 0x83, 0x77, 0x01, + 0x3e, 0x59, 0x45, 0xf4, 0x77, 0x6c, 0x6b, 0xcb, + 0x88, 0x45, 0x09, 0x1d, 0xc6, 0x45, 0x6e, 0xdc, + 0x6e, 0x51, 0xb8, 0x28 +}; +static const u8 output53[] __initconst = { + 0xc5, 0x90, 0x96, 0x78, 0x02, 0xf5, 0xc4, 0x3c, + 0xde, 0xd4, 0xd4, 0xc6, 0xa7, 0xad, 0x12, 0x47, + 0x45, 0xce, 0xcd, 0x8c, 0x35, 0xcc, 0xa6, 0x9e, + 0x5a, 0xc6, 0x60, 0xbb, 0xe3, 0xed, 0xec, 0x68, + 0x3f, 0x64, 0xf7, 0x06, 0x63, 0x9c, 0x8c, 0xc8, + 0x05, 0x3a, 0xad, 0x32, 0x79, 0x8b, 0x45, 0x96, + 0x93, 0x73, 0x4c, 0xe0 +}; +static const u8 key53[] __initconst = { + 0x42, 0x4b, 0x20, 0x81, 0x49, 0x50, 0xe9, 0xc2, + 0x43, 0x69, 0x36, 0xe7, 0x68, 0xae, 0xd5, 0x7e, + 0x42, 0x1a, 0x1b, 0xb4, 0x06, 0x4d, 0xa7, 0x17, + 0xb5, 0x31, 0xd6, 0x0c, 0xb0, 0x5c, 0x41, 0x0b +}; +enum { nonce53 = 0xf44ce1931fbda3d7ULL }; + +static const u8 input54[] __initconst = { + 0x7b, 0xf6, 0x8b, 0xae, 0xc0, 0xcb, 0x10, 0x8e, + 0xe8, 0xd8, 0x2e, 0x3b, 0x14, 0xba, 0xb4, 0xd2, + 0x58, 0x6b, 0x2c, 0xec, 0xc1, 0x81, 0x71, 0xb4, + 0xc6, 0xea, 0x08, 0xc5, 0xc9, 0x78, 0xdb, 0xa2, + 0xfa, 0x44, 0x50, 0x9b, 0xc8, 0x53, 0x8d, 0x45, + 0x42, 0xe7, 0x09, 0xc4, 0x29, 0xd8, 0x75, 0x02, + 0xbb, 0xb2, 0x78, 0xcf, 0xe7 +}; +static const u8 output54[] __initconst = { + 0xaf, 0x2c, 0x83, 0x26, 0x6e, 0x7f, 0xa6, 0xe9, + 0x03, 0x75, 0xfe, 0xfe, 0x87, 0x58, 0xcf, 0xb5, + 0xbc, 0x3c, 0x9d, 0xa1, 0x6e, 0x13, 0xf1, 0x0f, + 0x9e, 0xbc, 0xe0, 0x54, 0x24, 0x32, 0xce, 0x95, + 0xe6, 0xa5, 0x59, 0x3d, 0x24, 0x1d, 0x8f, 0xb1, + 0x74, 0x6c, 0x56, 0xe7, 0x96, 0xc1, 0x91, 0xc8, + 0x2d, 0x0e, 0xb7, 0x51, 0x10 +}; +static const u8 key54[] __initconst = { + 0x00, 0x68, 0x74, 0xdc, 0x30, 0x9e, 0xe3, 0x52, + 0xa9, 0xae, 0xb6, 0x7c, 0xa1, 0xdc, 0x12, 0x2d, + 0x98, 0x32, 0x7a, 0x77, 0xe1, 0xdd, 0xa3, 0x76, + 0x72, 0x34, 0x83, 0xd8, 0xb7, 0x69, 0xba, 0x77 +}; +enum { nonce54 = 0xbea57d79b798b63aULL }; + +static const u8 input55[] __initconst = { + 0xb5, 0xf4, 0x2f, 0xc1, 0x5e, 0x10, 0xa7, 0x4e, + 0x74, 0x3d, 0xa3, 0x96, 0xc0, 0x4d, 0x7b, 0x92, + 0x8f, 0xdb, 0x2d, 0x15, 0x52, 0x6a, 0x95, 0x5e, + 0x40, 0x81, 0x4f, 0x70, 0x73, 0xea, 0x84, 0x65, + 0x3d, 0x9a, 0x4e, 0x03, 0x95, 0xf8, 0x5d, 0x2f, + 0x07, 0x02, 0x13, 0x13, 0xdd, 0x82, 0xe6, 0x3b, + 0xe1, 0x5f, 0xb3, 0x37, 0x9b, 0x88 +}; +static const u8 output55[] __initconst = { + 0xc1, 0x88, 0xbd, 0x92, 0x77, 0xad, 0x7c, 0x5f, + 0xaf, 0xa8, 0x57, 0x0e, 0x40, 0x0a, 0xdc, 0x70, + 0xfb, 0xc6, 0x71, 0xfd, 0xc4, 0x74, 0x60, 0xcc, + 0xa0, 0x89, 0x8e, 0x99, 0xf0, 0x06, 0xa6, 0x7c, + 0x97, 0x42, 0x21, 0x81, 0x6a, 0x07, 0xe7, 0xb3, + 0xf7, 0xa5, 0x03, 0x71, 0x50, 0x05, 0x63, 0x17, + 0xa9, 0x46, 0x0b, 0xff, 0x30, 0x78 +}; +static const u8 key55[] __initconst = { + 0x19, 0x8f, 0xe7, 0xd7, 0x6b, 0x7f, 0x6f, 0x69, + 0x86, 0x91, 0x0f, 0xa7, 0x4a, 0x69, 0x8e, 0x34, + 0xf3, 0xdb, 0xde, 0xaf, 0xf2, 0x66, 0x1d, 0x64, + 0x97, 0x0c, 0xcf, 0xfa, 0x33, 0x84, 0xfd, 0x0c +}; +enum { nonce55 = 0x80aa3d3e2c51ef06ULL }; + +static const u8 input56[] __initconst = { + 0x6b, 0xe9, 0x73, 0x42, 0x27, 0x5e, 0x12, 0xcd, + 0xaa, 0x45, 0x12, 0x8b, 0xb3, 0xe6, 0x54, 0x33, + 0x31, 0x7d, 0xe2, 0x25, 0xc6, 0x86, 0x47, 0x67, + 0x86, 0x83, 0xe4, 0x46, 0xb5, 0x8f, 0x2c, 0xbb, + 0xe4, 0xb8, 0x9f, 0xa2, 0xa4, 0xe8, 0x75, 0x96, + 0x92, 0x51, 0x51, 0xac, 0x8e, 0x2e, 0x6f, 0xfc, + 0xbd, 0x0d, 0xa3, 0x9f, 0x16, 0x55, 0x3e +}; +static const u8 output56[] __initconst = { + 0x42, 0x99, 0x73, 0x6c, 0xd9, 0x4b, 0x16, 0xe5, + 0x18, 0x63, 0x1a, 0xd9, 0x0e, 0xf1, 0x15, 0x2e, + 0x0f, 0x4b, 0xe4, 0x5f, 0xa0, 0x4d, 0xde, 0x9f, + 0xa7, 0x18, 0xc1, 0x0c, 0x0b, 0xae, 0x55, 0xe4, + 0x89, 0x18, 0xa4, 0x78, 0x9d, 0x25, 0x0d, 0xd5, + 0x94, 0x0f, 0xf9, 0x78, 0xa3, 0xa6, 0xe9, 0x9e, + 0x2c, 0x73, 0xf0, 0xf7, 0x35, 0xf3, 0x2b +}; +static const u8 key56[] __initconst = { + 0x7d, 0x12, 0xad, 0x51, 0xd5, 0x6f, 0x8f, 0x96, + 0xc0, 0x5d, 0x9a, 0xd1, 0x7e, 0x20, 0x98, 0x0e, + 0x3c, 0x0a, 0x67, 0x6b, 0x1b, 0x88, 0x69, 0xd4, + 0x07, 0x8c, 0xaf, 0x0f, 0x3a, 0x28, 0xe4, 0x5d +}; +enum { nonce56 = 0x70f4c372fb8b5984ULL }; + +static const u8 input57[] __initconst = { + 0x28, 0xa3, 0x06, 0xe8, 0xe7, 0x08, 0xb9, 0xef, + 0x0d, 0x63, 0x15, 0x99, 0xb2, 0x78, 0x7e, 0xaf, + 0x30, 0x50, 0xcf, 0xea, 0xc9, 0x91, 0x41, 0x2f, + 0x3b, 0x38, 0x70, 0xc4, 0x87, 0xb0, 0x3a, 0xee, + 0x4a, 0xea, 0xe3, 0x83, 0x68, 0x8b, 0xcf, 0xda, + 0x04, 0xa5, 0xbd, 0xb2, 0xde, 0x3c, 0x55, 0x13, + 0xfe, 0x96, 0xad, 0xc1, 0x61, 0x1b, 0x98, 0xde +}; +static const u8 output57[] __initconst = { + 0xf4, 0x44, 0xe9, 0xd2, 0x6d, 0xc2, 0x5a, 0xe9, + 0xfd, 0x7e, 0x41, 0x54, 0x3f, 0xf4, 0x12, 0xd8, + 0x55, 0x0d, 0x12, 0x9b, 0xd5, 0x2e, 0x95, 0xe5, + 0x77, 0x42, 0x3f, 0x2c, 0xfb, 0x28, 0x9d, 0x72, + 0x6d, 0x89, 0x82, 0x27, 0x64, 0x6f, 0x0d, 0x57, + 0xa1, 0x25, 0xa3, 0x6b, 0x88, 0x9a, 0xac, 0x0c, + 0x76, 0x19, 0x90, 0xe2, 0x50, 0x5a, 0xf8, 0x12 +}; +static const u8 key57[] __initconst = { + 0x08, 0x26, 0xb8, 0xac, 0xf3, 0xa5, 0xc6, 0xa3, + 0x7f, 0x09, 0x87, 0xf5, 0x6c, 0x5a, 0x85, 0x6c, + 0x3d, 0xbd, 0xde, 0xd5, 0x87, 0xa3, 0x98, 0x7a, + 0xaa, 0x40, 0x3e, 0xf7, 0xff, 0x44, 0x5d, 0xee +}; +enum { nonce57 = 0xc03a6130bf06b089ULL }; + +static const u8 input58[] __initconst = { + 0x82, 0xa5, 0x38, 0x6f, 0xaa, 0xb4, 0xaf, 0xb2, + 0x42, 0x01, 0xa8, 0x39, 0x3f, 0x15, 0x51, 0xa8, + 0x11, 0x1b, 0x93, 0xca, 0x9c, 0xa0, 0x57, 0x68, + 0x8f, 0xdb, 0x68, 0x53, 0x51, 0x6d, 0x13, 0x22, + 0x12, 0x9b, 0xbd, 0x33, 0xa8, 0x52, 0x40, 0x57, + 0x80, 0x9b, 0x98, 0xef, 0x56, 0x70, 0x11, 0xfa, + 0x36, 0x69, 0x7d, 0x15, 0x48, 0xf9, 0x3b, 0xeb, + 0x42 +}; +static const u8 output58[] __initconst = { + 0xff, 0x3a, 0x74, 0xc3, 0x3e, 0x44, 0x64, 0x4d, + 0x0e, 0x5f, 0x9d, 0xa8, 0xdb, 0xbe, 0x12, 0xef, + 0xba, 0x56, 0x65, 0x50, 0x76, 0xaf, 0xa4, 0x4e, + 0x01, 0xc1, 0xd3, 0x31, 0x14, 0xe2, 0xbe, 0x7b, + 0xa5, 0x67, 0xb4, 0xe3, 0x68, 0x40, 0x9c, 0xb0, + 0xb1, 0x78, 0xef, 0x49, 0x03, 0x0f, 0x2d, 0x56, + 0xb4, 0x37, 0xdb, 0xbc, 0x2d, 0x68, 0x1c, 0x3c, + 0xf1 +}; +static const u8 key58[] __initconst = { + 0x7e, 0xf1, 0x7c, 0x20, 0x65, 0xed, 0xcd, 0xd7, + 0x57, 0xe8, 0xdb, 0x90, 0x87, 0xdb, 0x5f, 0x63, + 0x3d, 0xdd, 0xb8, 0x2b, 0x75, 0x8e, 0x04, 0xb5, + 0xf4, 0x12, 0x79, 0xa9, 0x4d, 0x42, 0x16, 0x7f +}; +enum { nonce58 = 0x92838183f80d2f7fULL }; + +static const u8 input59[] __initconst = { + 0x37, 0xf1, 0x9d, 0xdd, 0xd7, 0x08, 0x9f, 0x13, + 0xc5, 0x21, 0x82, 0x75, 0x08, 0x9e, 0x25, 0x16, + 0xb1, 0xd1, 0x71, 0x42, 0x28, 0x63, 0xac, 0x47, + 0x71, 0x54, 0xb1, 0xfc, 0x39, 0xf0, 0x61, 0x4f, + 0x7c, 0x6d, 0x4f, 0xc8, 0x33, 0xef, 0x7e, 0xc8, + 0xc0, 0x97, 0xfc, 0x1a, 0x61, 0xb4, 0x87, 0x6f, + 0xdd, 0x5a, 0x15, 0x7b, 0x1b, 0x95, 0x50, 0x94, + 0x1d, 0xba +}; +static const u8 output59[] __initconst = { + 0x73, 0x67, 0xc5, 0x07, 0xbb, 0x57, 0x79, 0xd5, + 0xc9, 0x04, 0xdd, 0x88, 0xf3, 0x86, 0xe5, 0x70, + 0x49, 0x31, 0xe0, 0xcc, 0x3b, 0x1d, 0xdf, 0xb0, + 0xaf, 0xf4, 0x2d, 0xe0, 0x06, 0x10, 0x91, 0x8d, + 0x1c, 0xcf, 0x31, 0x0b, 0xf6, 0x73, 0xda, 0x1c, + 0xf0, 0x17, 0x52, 0x9e, 0x20, 0x2e, 0x9f, 0x8c, + 0xb3, 0x59, 0xce, 0xd4, 0xd3, 0xc1, 0x81, 0xe9, + 0x11, 0x36 +}; +static const u8 key59[] __initconst = { + 0xbd, 0x07, 0xd0, 0x53, 0x2c, 0xb3, 0xcc, 0x3f, + 0xc4, 0x95, 0xfd, 0xe7, 0x81, 0xb3, 0x29, 0x99, + 0x05, 0x45, 0xd6, 0x95, 0x25, 0x0b, 0x72, 0xd3, + 0xcd, 0xbb, 0x73, 0xf8, 0xfa, 0xc0, 0x9b, 0x7a +}; +enum { nonce59 = 0x4a0db819b0d519e2ULL }; + +static const u8 input60[] __initconst = { + 0x58, 0x4e, 0xdf, 0x94, 0x3c, 0x76, 0x0a, 0x79, + 0x47, 0xf1, 0xbe, 0x88, 0xd3, 0xba, 0x94, 0xd8, + 0xe2, 0x8f, 0xe3, 0x2f, 0x2f, 0x74, 0x82, 0x55, + 0xc3, 0xda, 0xe2, 0x4e, 0x2c, 0x8c, 0x45, 0x1d, + 0x72, 0x8f, 0x54, 0x41, 0xb5, 0xb7, 0x69, 0xe4, + 0xdc, 0xd2, 0x36, 0x21, 0x5c, 0x28, 0x52, 0xf7, + 0x98, 0x8e, 0x72, 0xa7, 0x6d, 0x57, 0xed, 0xdc, + 0x3c, 0xe6, 0x6a +}; +static const u8 output60[] __initconst = { + 0xda, 0xaf, 0xb5, 0xe3, 0x30, 0x65, 0x5c, 0xb1, + 0x48, 0x08, 0x43, 0x7b, 0x9e, 0xd2, 0x6a, 0x62, + 0x56, 0x7c, 0xad, 0xd9, 0xe5, 0xf6, 0x09, 0x71, + 0xcd, 0xe6, 0x05, 0x6b, 0x3f, 0x44, 0x3a, 0x5c, + 0xf6, 0xf8, 0xd7, 0xce, 0x7d, 0xd1, 0xe0, 0x4f, + 0x88, 0x15, 0x04, 0xd8, 0x20, 0xf0, 0x3e, 0xef, + 0xae, 0xa6, 0x27, 0xa3, 0x0e, 0xfc, 0x18, 0x90, + 0x33, 0xcd, 0xd3 +}; +static const u8 key60[] __initconst = { + 0xbf, 0xfd, 0x25, 0xb5, 0xb2, 0xfc, 0x78, 0x0c, + 0x8e, 0xb9, 0x57, 0x2f, 0x26, 0x4a, 0x7e, 0x71, + 0xcc, 0xf2, 0xe0, 0xfd, 0x24, 0x11, 0x20, 0x23, + 0x57, 0x00, 0xff, 0x80, 0x11, 0x0c, 0x1e, 0xff +}; +enum { nonce60 = 0xf18df56fdb7954adULL }; + +static const u8 input61[] __initconst = { + 0xb0, 0xf3, 0x06, 0xbc, 0x22, 0xae, 0x49, 0x40, + 0xae, 0xff, 0x1b, 0x31, 0xa7, 0x98, 0xab, 0x1d, + 0xe7, 0x40, 0x23, 0x18, 0x4f, 0xab, 0x8e, 0x93, + 0x82, 0xf4, 0x56, 0x61, 0xfd, 0x2b, 0xcf, 0xa7, + 0xc4, 0xb4, 0x0a, 0xf4, 0xcb, 0xc7, 0x8c, 0x40, + 0x57, 0xac, 0x0b, 0x3e, 0x2a, 0x0a, 0x67, 0x83, + 0x50, 0xbf, 0xec, 0xb0, 0xc7, 0xf1, 0x32, 0x26, + 0x98, 0x80, 0x33, 0xb4 +}; +static const u8 output61[] __initconst = { + 0x9d, 0x23, 0x0e, 0xff, 0xcc, 0x7c, 0xd5, 0xcf, + 0x1a, 0xb8, 0x59, 0x1e, 0x92, 0xfd, 0x7f, 0xca, + 0xca, 0x3c, 0x18, 0x81, 0xde, 0xfa, 0x59, 0xc8, + 0x6f, 0x9c, 0x24, 0x3f, 0x3a, 0xe6, 0x0b, 0xb4, + 0x34, 0x48, 0x69, 0xfc, 0xb6, 0xea, 0xb2, 0xde, + 0x9f, 0xfd, 0x92, 0x36, 0x18, 0x98, 0x99, 0xaa, + 0x65, 0xe2, 0xea, 0xf4, 0xb1, 0x47, 0x8e, 0xb0, + 0xe7, 0xd4, 0x7a, 0x2c +}; +static const u8 key61[] __initconst = { + 0xd7, 0xfd, 0x9b, 0xbd, 0x8f, 0x65, 0x0d, 0x00, + 0xca, 0xa1, 0x6c, 0x85, 0x85, 0xa4, 0x6d, 0xf1, + 0xb1, 0x68, 0x0c, 0x8b, 0x5d, 0x37, 0x72, 0xd0, + 0xd8, 0xd2, 0x25, 0xab, 0x9f, 0x7b, 0x7d, 0x95 +}; +enum { nonce61 = 0xd82caf72a9c4864fULL }; + +static const u8 input62[] __initconst = { + 0x10, 0x77, 0xf3, 0x2f, 0xc2, 0x50, 0xd6, 0x0c, + 0xba, 0xa8, 0x8d, 0xce, 0x0d, 0x58, 0x9e, 0x87, + 0xb1, 0x59, 0x66, 0x0a, 0x4a, 0xb3, 0xd8, 0xca, + 0x0a, 0x6b, 0xf8, 0xc6, 0x2b, 0x3f, 0x8e, 0x09, + 0xe0, 0x0a, 0x15, 0x85, 0xfe, 0xaa, 0xc6, 0xbd, + 0x30, 0xef, 0xe4, 0x10, 0x78, 0x03, 0xc1, 0xc7, + 0x8a, 0xd9, 0xde, 0x0b, 0x51, 0x07, 0xc4, 0x7b, + 0xe2, 0x2e, 0x36, 0x3a, 0xc2 +}; +static const u8 output62[] __initconst = { + 0xa0, 0x0c, 0xfc, 0xc1, 0xf6, 0xaf, 0xc2, 0xb8, + 0x5c, 0xef, 0x6e, 0xf3, 0xce, 0x15, 0x48, 0x05, + 0xb5, 0x78, 0x49, 0x51, 0x1f, 0x9d, 0xf4, 0xbf, + 0x2f, 0x53, 0xa2, 0xd1, 0x15, 0x20, 0x82, 0x6b, + 0xd2, 0x22, 0x6c, 0x4e, 0x14, 0x87, 0xe3, 0xd7, + 0x49, 0x45, 0x84, 0xdb, 0x5f, 0x68, 0x60, 0xc4, + 0xb3, 0xe6, 0x3f, 0xd1, 0xfc, 0xa5, 0x73, 0xf3, + 0xfc, 0xbb, 0xbe, 0xc8, 0x9d +}; +static const u8 key62[] __initconst = { + 0x6e, 0xc9, 0xaf, 0xce, 0x35, 0xb9, 0x86, 0xd1, + 0xce, 0x5f, 0xd9, 0xbb, 0xd5, 0x1f, 0x7c, 0xcd, + 0xfe, 0x19, 0xaa, 0x3d, 0xea, 0x64, 0xc1, 0x28, + 0x40, 0xba, 0xa1, 0x28, 0xcd, 0x40, 0xb6, 0xf2 +}; +enum { nonce62 = 0xa1c0c265f900cde8ULL }; + +static const u8 input63[] __initconst = { + 0x7a, 0x70, 0x21, 0x2c, 0xef, 0xa6, 0x36, 0xd4, + 0xe0, 0xab, 0x8c, 0x25, 0x73, 0x34, 0xc8, 0x94, + 0x6c, 0x81, 0xcb, 0x19, 0x8d, 0x5a, 0x49, 0xaa, + 0x6f, 0xba, 0x83, 0x72, 0x02, 0x5e, 0xf5, 0x89, + 0xce, 0x79, 0x7e, 0x13, 0x3d, 0x5b, 0x98, 0x60, + 0x5d, 0xd9, 0xfb, 0x15, 0x93, 0x4c, 0xf3, 0x51, + 0x49, 0x55, 0xd1, 0x58, 0xdd, 0x7e, 0x6d, 0xfe, + 0xdd, 0x84, 0x23, 0x05, 0xba, 0xe9 +}; +static const u8 output63[] __initconst = { + 0x20, 0xb3, 0x5c, 0x03, 0x03, 0x78, 0x17, 0xfc, + 0x3b, 0x35, 0x30, 0x9a, 0x00, 0x18, 0xf5, 0xc5, + 0x06, 0x53, 0xf5, 0x04, 0x24, 0x9d, 0xd1, 0xb2, + 0xac, 0x5a, 0xb6, 0x2a, 0xa5, 0xda, 0x50, 0x00, + 0xec, 0xff, 0xa0, 0x7a, 0x14, 0x7b, 0xe4, 0x6b, + 0x63, 0xe8, 0x66, 0x86, 0x34, 0xfd, 0x74, 0x44, + 0xa2, 0x50, 0x97, 0x0d, 0xdc, 0xc3, 0x84, 0xf8, + 0x71, 0x02, 0x31, 0x95, 0xed, 0x54 +}; +static const u8 key63[] __initconst = { + 0x7d, 0x64, 0xb4, 0x12, 0x81, 0xe4, 0xe6, 0x8f, + 0xcc, 0xe7, 0xd1, 0x1f, 0x70, 0x20, 0xfd, 0xb8, + 0x3a, 0x7d, 0xa6, 0x53, 0x65, 0x30, 0x5d, 0xe3, + 0x1a, 0x44, 0xbe, 0x62, 0xed, 0x90, 0xc4, 0xd1 +}; +enum { nonce63 = 0xe8e849596c942276ULL }; + +static const u8 input64[] __initconst = { + 0x84, 0xf8, 0xda, 0x87, 0x23, 0x39, 0x60, 0xcf, + 0xc5, 0x50, 0x7e, 0xc5, 0x47, 0x29, 0x7c, 0x05, + 0xc2, 0xb4, 0xf4, 0xb2, 0xec, 0x5d, 0x48, 0x36, + 0xbf, 0xfc, 0x06, 0x8c, 0xf2, 0x0e, 0x88, 0xe7, + 0xc9, 0xc5, 0xa4, 0xa2, 0x83, 0x20, 0xa1, 0x6f, + 0x37, 0xe5, 0x2d, 0xa1, 0x72, 0xa1, 0x19, 0xef, + 0x05, 0x42, 0x08, 0xf2, 0x57, 0x47, 0x31, 0x1e, + 0x17, 0x76, 0x13, 0xd3, 0xcc, 0x75, 0x2c +}; +static const u8 output64[] __initconst = { + 0xcb, 0xec, 0x90, 0x88, 0xeb, 0x31, 0x69, 0x20, + 0xa6, 0xdc, 0xff, 0x76, 0x98, 0xb0, 0x24, 0x49, + 0x7b, 0x20, 0xd9, 0xd1, 0x1b, 0xe3, 0x61, 0xdc, + 0xcf, 0x51, 0xf6, 0x70, 0x72, 0x33, 0x28, 0x94, + 0xac, 0x73, 0x18, 0xcf, 0x93, 0xfd, 0xca, 0x08, + 0x0d, 0xa2, 0xb9, 0x57, 0x1e, 0x51, 0xb6, 0x07, + 0x5c, 0xc1, 0x13, 0x64, 0x1d, 0x18, 0x6f, 0xe6, + 0x0b, 0xb7, 0x14, 0x03, 0x43, 0xb6, 0xaf +}; +static const u8 key64[] __initconst = { + 0xbf, 0x82, 0x65, 0xe4, 0x50, 0xf9, 0x5e, 0xea, + 0x28, 0x91, 0xd1, 0xd2, 0x17, 0x7c, 0x13, 0x7e, + 0xf5, 0xd5, 0x6b, 0x06, 0x1c, 0x20, 0xc2, 0x82, + 0xa1, 0x7a, 0xa2, 0x14, 0xa1, 0xb0, 0x54, 0x58 +}; +enum { nonce64 = 0xe57c5095aa5723c9ULL }; + +static const u8 input65[] __initconst = { + 0x1c, 0xfb, 0xd3, 0x3f, 0x85, 0xd7, 0xba, 0x7b, + 0xae, 0xb1, 0xa5, 0xd2, 0xe5, 0x40, 0xce, 0x4d, + 0x3e, 0xab, 0x17, 0x9d, 0x7d, 0x9f, 0x03, 0x98, + 0x3f, 0x9f, 0xc8, 0xdd, 0x36, 0x17, 0x43, 0x5c, + 0x34, 0xd1, 0x23, 0xe0, 0x77, 0xbf, 0x35, 0x5d, + 0x8f, 0xb1, 0xcb, 0x82, 0xbb, 0x39, 0x69, 0xd8, + 0x90, 0x45, 0x37, 0xfd, 0x98, 0x25, 0xf7, 0x5b, + 0xce, 0x06, 0x43, 0xba, 0x61, 0xa8, 0x47, 0xb9 +}; +static const u8 output65[] __initconst = { + 0x73, 0xa5, 0x68, 0xab, 0x8b, 0xa5, 0xc3, 0x7e, + 0x74, 0xf8, 0x9d, 0xf5, 0x93, 0x6e, 0xf2, 0x71, + 0x6d, 0xde, 0x82, 0xc5, 0x40, 0xa0, 0x46, 0xb3, + 0x9a, 0x78, 0xa8, 0xf7, 0xdf, 0xb1, 0xc3, 0xdd, + 0x8d, 0x90, 0x00, 0x68, 0x21, 0x48, 0xe8, 0xba, + 0x56, 0x9f, 0x8f, 0xe7, 0xa4, 0x4d, 0x36, 0x55, + 0xd0, 0x34, 0x99, 0xa6, 0x1c, 0x4c, 0xc1, 0xe2, + 0x65, 0x98, 0x14, 0x8e, 0x6a, 0x05, 0xb1, 0x2b +}; +static const u8 key65[] __initconst = { + 0xbd, 0x5c, 0x8a, 0xb0, 0x11, 0x29, 0xf3, 0x00, + 0x7a, 0x78, 0x32, 0x63, 0x34, 0x00, 0xe6, 0x7d, + 0x30, 0x54, 0xde, 0x37, 0xda, 0xc2, 0xc4, 0x3d, + 0x92, 0x6b, 0x4c, 0xc2, 0x92, 0xe9, 0x9e, 0x2a +}; +enum { nonce65 = 0xf654a3031de746f2ULL }; + +static const u8 input66[] __initconst = { + 0x4b, 0x27, 0x30, 0x8f, 0x28, 0xd8, 0x60, 0x46, + 0x39, 0x06, 0x49, 0xea, 0x1b, 0x71, 0x26, 0xe0, + 0x99, 0x2b, 0xd4, 0x8f, 0x64, 0x64, 0xcd, 0xac, + 0x1d, 0x78, 0x88, 0x90, 0xe1, 0x5c, 0x24, 0x4b, + 0xdc, 0x2d, 0xb7, 0xee, 0x3a, 0xe6, 0x86, 0x2c, + 0x21, 0xe4, 0x2b, 0xfc, 0xe8, 0x19, 0xca, 0x65, + 0xe7, 0xdd, 0x6f, 0x52, 0xb3, 0x11, 0xe1, 0xe2, + 0xbf, 0xe8, 0x70, 0xe3, 0x0d, 0x45, 0xb8, 0xa5, + 0x20, 0xb7, 0xb5, 0xaf, 0xff, 0x08, 0xcf, 0x23, + 0x65, 0xdf, 0x8d, 0xc3, 0x31, 0xf3, 0x1e, 0x6a, + 0x58, 0x8d, 0xcc, 0x45, 0x16, 0x86, 0x1f, 0x31, + 0x5c, 0x27, 0xcd, 0xc8, 0x6b, 0x19, 0x1e, 0xec, + 0x44, 0x75, 0x63, 0x97, 0xfd, 0x79, 0xf6, 0x62, + 0xc5, 0xba, 0x17, 0xc7, 0xab, 0x8f, 0xbb, 0xed, + 0x85, 0x2a, 0x98, 0x79, 0x21, 0xec, 0x6e, 0x4d, + 0xdc, 0xfa, 0x72, 0x52, 0xba, 0xc8, 0x4c +}; +static const u8 output66[] __initconst = { + 0x76, 0x5b, 0x2c, 0xa7, 0x62, 0xb9, 0x08, 0x4a, + 0xc6, 0x4a, 0x92, 0xc3, 0xbb, 0x10, 0xb3, 0xee, + 0xff, 0xb9, 0x07, 0xc7, 0x27, 0xcb, 0x1e, 0xcf, + 0x58, 0x6f, 0xa1, 0x64, 0xe8, 0xf1, 0x4e, 0xe1, + 0xef, 0x18, 0x96, 0xab, 0x97, 0x28, 0xd1, 0x7c, + 0x71, 0x6c, 0xd1, 0xe2, 0xfa, 0xd9, 0x75, 0xcb, + 0xeb, 0xea, 0x0c, 0x86, 0x82, 0xd8, 0xf4, 0xcc, + 0xea, 0xa3, 0x00, 0xfa, 0x82, 0xd2, 0xcd, 0xcb, + 0xdb, 0x63, 0x28, 0xe2, 0x82, 0xe9, 0x01, 0xed, + 0x31, 0xe6, 0x71, 0x45, 0x08, 0x89, 0x8a, 0x23, + 0xa8, 0xb5, 0xc2, 0xe2, 0x9f, 0xe9, 0xb8, 0x9a, + 0xc4, 0x79, 0x6d, 0x71, 0x52, 0x61, 0x74, 0x6c, + 0x1b, 0xd7, 0x65, 0x6d, 0x03, 0xc4, 0x1a, 0xc0, + 0x50, 0xba, 0xd6, 0xc9, 0x43, 0x50, 0xbe, 0x09, + 0x09, 0x8a, 0xdb, 0xaa, 0x76, 0x4e, 0x3b, 0x61, + 0x3c, 0x7c, 0x44, 0xe7, 0xdb, 0x10, 0xa7 +}; +static const u8 key66[] __initconst = { + 0x88, 0xdf, 0xca, 0x68, 0xaf, 0x4f, 0xb3, 0xfd, + 0x6e, 0xa7, 0x95, 0x35, 0x8a, 0xe8, 0x37, 0xe8, + 0xc8, 0x55, 0xa2, 0x2a, 0x6d, 0x77, 0xf8, 0x93, + 0x7a, 0x41, 0xf3, 0x7b, 0x95, 0xdf, 0x89, 0xf5 +}; +enum { nonce66 = 0x1024b4fdd415cf82ULL }; + +static const u8 input67[] __initconst = { + 0xd4, 0x2e, 0xfa, 0x92, 0xe9, 0x29, 0x68, 0xb7, + 0x54, 0x2c, 0xf7, 0xa4, 0x2d, 0xb7, 0x50, 0xb5, + 0xc5, 0xb2, 0x9d, 0x17, 0x5e, 0x0a, 0xca, 0x37, + 0xbf, 0x60, 0xae, 0xd2, 0x98, 0xe9, 0xfa, 0x59, + 0x67, 0x62, 0xe6, 0x43, 0x0c, 0x77, 0x80, 0x82, + 0x33, 0x61, 0xa3, 0xff, 0xc1, 0xa0, 0x8f, 0x56, + 0xbc, 0xec, 0x65, 0x43, 0x88, 0xa5, 0xff, 0x51, + 0x64, 0x30, 0xee, 0x34, 0xb7, 0x5c, 0x28, 0x68, + 0xc3, 0x52, 0xd2, 0xac, 0x78, 0x2a, 0xa6, 0x10, + 0xb8, 0xb2, 0x4c, 0x80, 0x4f, 0x99, 0xb2, 0x36, + 0x94, 0x8f, 0x66, 0xcb, 0xa1, 0x91, 0xed, 0x06, + 0x42, 0x6d, 0xc1, 0xae, 0x55, 0x93, 0xdd, 0x93, + 0x9e, 0x88, 0x34, 0x7f, 0x98, 0xeb, 0xbe, 0x61, + 0xf9, 0xa9, 0x0f, 0xd9, 0xc4, 0x87, 0xd5, 0xef, + 0xcc, 0x71, 0x8c, 0x0e, 0xce, 0xad, 0x02, 0xcf, + 0xa2, 0x61, 0xdf, 0xb1, 0xfe, 0x3b, 0xdc, 0xc0, + 0x58, 0xb5, 0x71, 0xa1, 0x83, 0xc9, 0xb4, 0xaf, + 0x9d, 0x54, 0x12, 0xcd, 0xea, 0x06, 0xd6, 0x4e, + 0xe5, 0x27, 0x0c, 0xc3, 0xbb, 0xa8, 0x0a, 0x81, + 0x75, 0xc3, 0xc9, 0xd4, 0x35, 0x3e, 0x53, 0x9f, + 0xaa, 0x20, 0xc0, 0x68, 0x39, 0x2c, 0x96, 0x39, + 0x53, 0x81, 0xda, 0x07, 0x0f, 0x44, 0xa5, 0x47, + 0x0e, 0xb3, 0x87, 0x0d, 0x1b, 0xc1, 0xe5, 0x41, + 0x35, 0x12, 0x58, 0x96, 0x69, 0x8a, 0x1a, 0xa3, + 0x9d, 0x3d, 0xd4, 0xb1, 0x8e, 0x1f, 0x96, 0x87, + 0xda, 0xd3, 0x19, 0xe2, 0xb1, 0x3a, 0x19, 0x74, + 0xa0, 0x00, 0x9f, 0x4d, 0xbc, 0xcb, 0x0c, 0xe9, + 0xec, 0x10, 0xdf, 0x2a, 0x88, 0xdc, 0x30, 0x51, + 0x46, 0x56, 0x53, 0x98, 0x6a, 0x26, 0x14, 0x05, + 0x54, 0x81, 0x55, 0x0b, 0x3c, 0x85, 0xdd, 0x33, + 0x81, 0x11, 0x29, 0x82, 0x46, 0x35, 0xe1, 0xdb, + 0x59, 0x7b +}; +static const u8 output67[] __initconst = { + 0x64, 0x6c, 0xda, 0x7f, 0xd4, 0xa9, 0x2a, 0x5e, + 0x22, 0xae, 0x8d, 0x67, 0xdb, 0xee, 0xfd, 0xd0, + 0x44, 0x80, 0x17, 0xb2, 0xe3, 0x87, 0xad, 0x57, + 0x15, 0xcb, 0x88, 0x64, 0xc0, 0xf1, 0x49, 0x3d, + 0xfa, 0xbe, 0xa8, 0x9f, 0x12, 0xc3, 0x57, 0x56, + 0x70, 0xa5, 0xc5, 0x6b, 0xf1, 0xab, 0xd5, 0xde, + 0x77, 0x92, 0x6a, 0x56, 0x03, 0xf5, 0x21, 0x0d, + 0xb6, 0xc4, 0xcc, 0x62, 0x44, 0x3f, 0xb1, 0xc1, + 0x61, 0x41, 0x90, 0xb2, 0xd5, 0xb8, 0xf3, 0x57, + 0xfb, 0xc2, 0x6b, 0x25, 0x58, 0xc8, 0x45, 0x20, + 0x72, 0x29, 0x6f, 0x9d, 0xb5, 0x81, 0x4d, 0x2b, + 0xb2, 0x89, 0x9e, 0x91, 0x53, 0x97, 0x1c, 0xd9, + 0x3d, 0x79, 0xdc, 0x14, 0xae, 0x01, 0x73, 0x75, + 0xf0, 0xca, 0xd5, 0xab, 0x62, 0x5c, 0x7a, 0x7d, + 0x3f, 0xfe, 0x22, 0x7d, 0xee, 0xe2, 0xcb, 0x76, + 0x55, 0xec, 0x06, 0xdd, 0x41, 0x47, 0x18, 0x62, + 0x1d, 0x57, 0xd0, 0xd6, 0xb6, 0x0f, 0x4b, 0xfc, + 0x79, 0x19, 0xf4, 0xd6, 0x37, 0x86, 0x18, 0x1f, + 0x98, 0x0d, 0x9e, 0x15, 0x2d, 0xb6, 0x9a, 0x8a, + 0x8c, 0x80, 0x22, 0x2f, 0x82, 0xc4, 0xc7, 0x36, + 0xfa, 0xfa, 0x07, 0xbd, 0xc2, 0x2a, 0xe2, 0xea, + 0x93, 0xc8, 0xb2, 0x90, 0x33, 0xf2, 0xee, 0x4b, + 0x1b, 0xf4, 0x37, 0x92, 0x13, 0xbb, 0xe2, 0xce, + 0xe3, 0x03, 0xcf, 0x07, 0x94, 0xab, 0x9a, 0xc9, + 0xff, 0x83, 0x69, 0x3a, 0xda, 0x2c, 0xd0, 0x47, + 0x3d, 0x6c, 0x1a, 0x60, 0x68, 0x47, 0xb9, 0x36, + 0x52, 0xdd, 0x16, 0xef, 0x6c, 0xbf, 0x54, 0x11, + 0x72, 0x62, 0xce, 0x8c, 0x9d, 0x90, 0xa0, 0x25, + 0x06, 0x92, 0x3e, 0x12, 0x7e, 0x1a, 0x1d, 0xe5, + 0xa2, 0x71, 0xce, 0x1c, 0x4c, 0x6a, 0x7c, 0xdc, + 0x3d, 0xe3, 0x6e, 0x48, 0x9d, 0xb3, 0x64, 0x7d, + 0x78, 0x40 +}; +static const u8 key67[] __initconst = { + 0xa9, 0x20, 0x75, 0x89, 0x7e, 0x37, 0x85, 0x48, + 0xa3, 0xfb, 0x7b, 0xe8, 0x30, 0xa7, 0xe3, 0x6e, + 0xa6, 0xc1, 0x71, 0x17, 0xc1, 0x6c, 0x9b, 0xc2, + 0xde, 0xf0, 0xa7, 0x19, 0xec, 0xce, 0xc6, 0x53 +}; +enum { nonce67 = 0x4adc4d1f968c8a10ULL }; + +static const u8 input68[] __initconst = { + 0x99, 0xae, 0x72, 0xfb, 0x16, 0xe1, 0xf1, 0x59, + 0x43, 0x15, 0x4e, 0x33, 0xa0, 0x95, 0xe7, 0x6c, + 0x74, 0x24, 0x31, 0xca, 0x3b, 0x2e, 0xeb, 0xd7, + 0x11, 0xd8, 0xe0, 0x56, 0x92, 0x91, 0x61, 0x57, + 0xe2, 0x82, 0x9f, 0x8f, 0x37, 0xf5, 0x3d, 0x24, + 0x92, 0x9d, 0x87, 0x00, 0x8d, 0x89, 0xe0, 0x25, + 0x8b, 0xe4, 0x20, 0x5b, 0x8a, 0x26, 0x2c, 0x61, + 0x78, 0xb0, 0xa6, 0x3e, 0x82, 0x18, 0xcf, 0xdc, + 0x2d, 0x24, 0xdd, 0x81, 0x42, 0xc4, 0x95, 0xf0, + 0x48, 0x60, 0x71, 0xe3, 0xe3, 0xac, 0xec, 0xbe, + 0x98, 0x6b, 0x0c, 0xb5, 0x6a, 0xa9, 0xc8, 0x79, + 0x23, 0x2e, 0x38, 0x0b, 0x72, 0x88, 0x8c, 0xe7, + 0x71, 0x8b, 0x36, 0xe3, 0x58, 0x3d, 0x9c, 0xa0, + 0xa2, 0xea, 0xcf, 0x0c, 0x6a, 0x6c, 0x64, 0xdf, + 0x97, 0x21, 0x8f, 0x93, 0xfb, 0xba, 0xf3, 0x5a, + 0xd7, 0x8f, 0xa6, 0x37, 0x15, 0x50, 0x43, 0x02, + 0x46, 0x7f, 0x93, 0x46, 0x86, 0x31, 0xe2, 0xaa, + 0x24, 0xa8, 0x26, 0xae, 0xe6, 0xc0, 0x05, 0x73, + 0x0b, 0x4f, 0x7e, 0xed, 0x65, 0xeb, 0x56, 0x1e, + 0xb6, 0xb3, 0x0b, 0xc3, 0x0e, 0x31, 0x95, 0xa9, + 0x18, 0x4d, 0xaf, 0x38, 0xd7, 0xec, 0xc6, 0x44, + 0x72, 0x77, 0x4e, 0x25, 0x4b, 0x25, 0xdd, 0x1e, + 0x8c, 0xa2, 0xdf, 0xf6, 0x2a, 0x97, 0x1a, 0x88, + 0x2c, 0x8a, 0x5d, 0xfe, 0xe8, 0xfb, 0x35, 0xe8, + 0x0f, 0x2b, 0x7a, 0x18, 0x69, 0x43, 0x31, 0x1d, + 0x38, 0x6a, 0x62, 0x95, 0x0f, 0x20, 0x4b, 0xbb, + 0x97, 0x3c, 0xe0, 0x64, 0x2f, 0x52, 0xc9, 0x2d, + 0x4d, 0x9d, 0x54, 0x04, 0x3d, 0xc9, 0xea, 0xeb, + 0xd0, 0x86, 0x52, 0xff, 0x42, 0xe1, 0x0d, 0x7a, + 0xad, 0x88, 0xf9, 0x9b, 0x1e, 0x5e, 0x12, 0x27, + 0x95, 0x3e, 0x0c, 0x2c, 0x13, 0x00, 0x6f, 0x8e, + 0x93, 0x69, 0x0e, 0x01, 0x8c, 0xc1, 0xfd, 0xb3 +}; +static const u8 output68[] __initconst = { + 0x26, 0x3e, 0xf2, 0xb1, 0xf5, 0xef, 0x81, 0xa4, + 0xb7, 0x42, 0xd4, 0x26, 0x18, 0x4b, 0xdd, 0x6a, + 0x47, 0x15, 0xcb, 0x0e, 0x57, 0xdb, 0xa7, 0x29, + 0x7e, 0x7b, 0x3f, 0x47, 0x89, 0x57, 0xab, 0xea, + 0x14, 0x7b, 0xcf, 0x37, 0xdb, 0x1c, 0xe1, 0x11, + 0x77, 0xae, 0x2e, 0x4c, 0xd2, 0x08, 0x3f, 0xa6, + 0x62, 0x86, 0xa6, 0xb2, 0x07, 0xd5, 0x3f, 0x9b, + 0xdc, 0xc8, 0x50, 0x4b, 0x7b, 0xb9, 0x06, 0xe6, + 0xeb, 0xac, 0x98, 0x8c, 0x36, 0x0c, 0x1e, 0xb2, + 0xc8, 0xfb, 0x24, 0x60, 0x2c, 0x08, 0x17, 0x26, + 0x5b, 0xc8, 0xc2, 0xdf, 0x9c, 0x73, 0x67, 0x4a, + 0xdb, 0xcf, 0xd5, 0x2c, 0x2b, 0xca, 0x24, 0xcc, + 0xdb, 0xc9, 0xa8, 0xf2, 0x5d, 0x67, 0xdf, 0x5c, + 0x62, 0x0b, 0x58, 0xc0, 0x83, 0xde, 0x8b, 0xf6, + 0x15, 0x0a, 0xd6, 0x32, 0xd8, 0xf5, 0xf2, 0x5f, + 0x33, 0xce, 0x7e, 0xab, 0x76, 0xcd, 0x14, 0x91, + 0xd8, 0x41, 0x90, 0x93, 0xa1, 0xaf, 0xf3, 0x45, + 0x6c, 0x1b, 0x25, 0xbd, 0x48, 0x51, 0x6d, 0x15, + 0x47, 0xe6, 0x23, 0x50, 0x32, 0x69, 0x1e, 0xb5, + 0x94, 0xd3, 0x97, 0xba, 0xd7, 0x37, 0x4a, 0xba, + 0xb9, 0xcd, 0xfb, 0x96, 0x9a, 0x90, 0xe0, 0x37, + 0xf8, 0xdf, 0x91, 0x6c, 0x62, 0x13, 0x19, 0x21, + 0x4b, 0xa9, 0xf1, 0x12, 0x66, 0xe2, 0x74, 0xd7, + 0x81, 0xa0, 0x74, 0x8d, 0x7e, 0x7e, 0xc9, 0xb1, + 0x69, 0x8f, 0xed, 0xb3, 0xf6, 0x97, 0xcd, 0x72, + 0x78, 0x93, 0xd3, 0x54, 0x6b, 0x43, 0xac, 0x29, + 0xb4, 0xbc, 0x7d, 0xa4, 0x26, 0x4b, 0x7b, 0xab, + 0xd6, 0x67, 0x22, 0xff, 0x03, 0x92, 0xb6, 0xd4, + 0x96, 0x94, 0x5a, 0xe5, 0x02, 0x35, 0x77, 0xfa, + 0x3f, 0x54, 0x1d, 0xdd, 0x35, 0x39, 0xfe, 0x03, + 0xdd, 0x8e, 0x3c, 0x8c, 0xc2, 0x69, 0x2a, 0xb1, + 0xb7, 0xb3, 0xa1, 0x89, 0x84, 0xea, 0x16, 0xe2 +}; +static const u8 key68[] __initconst = { + 0xd2, 0x49, 0x7f, 0xd7, 0x49, 0x66, 0x0d, 0xb3, + 0x5a, 0x7e, 0x3c, 0xfc, 0x37, 0x83, 0x0e, 0xf7, + 0x96, 0xd8, 0xd6, 0x33, 0x79, 0x2b, 0x84, 0x53, + 0x06, 0xbc, 0x6c, 0x0a, 0x55, 0x84, 0xfe, 0xab +}; +enum { nonce68 = 0x6a6df7ff0a20de06ULL }; + +static const u8 input69[] __initconst = { + 0xf9, 0x18, 0x4c, 0xd2, 0x3f, 0xf7, 0x22, 0xd9, + 0x58, 0xb6, 0x3b, 0x38, 0x69, 0x79, 0xf4, 0x71, + 0x5f, 0x38, 0x52, 0x1f, 0x17, 0x6f, 0x6f, 0xd9, + 0x09, 0x2b, 0xfb, 0x67, 0xdc, 0xc9, 0xe8, 0x4a, + 0x70, 0x9f, 0x2e, 0x3c, 0x06, 0xe5, 0x12, 0x20, + 0x25, 0x29, 0xd0, 0xdc, 0x81, 0xc5, 0xc6, 0x0f, + 0xd2, 0xa8, 0x81, 0x15, 0x98, 0xb2, 0x71, 0x5a, + 0x9a, 0xe9, 0xfb, 0xaf, 0x0e, 0x5f, 0x8a, 0xf3, + 0x16, 0x4a, 0x47, 0xf2, 0x5c, 0xbf, 0xda, 0x52, + 0x9a, 0xa6, 0x36, 0xfd, 0xc6, 0xf7, 0x66, 0x00, + 0xcc, 0x6c, 0xd4, 0xb3, 0x07, 0x6d, 0xeb, 0xfe, + 0x92, 0x71, 0x25, 0xd0, 0xcf, 0x9c, 0xe8, 0x65, + 0x45, 0x10, 0xcf, 0x62, 0x74, 0x7d, 0xf2, 0x1b, + 0x57, 0xa0, 0xf1, 0x6b, 0xa4, 0xd5, 0xfa, 0x12, + 0x27, 0x5a, 0xf7, 0x99, 0xfc, 0xca, 0xf3, 0xb8, + 0x2c, 0x8b, 0xba, 0x28, 0x74, 0xde, 0x8f, 0x78, + 0xa2, 0x8c, 0xaf, 0x89, 0x4b, 0x05, 0xe2, 0xf3, + 0xf8, 0xd2, 0xef, 0xac, 0xa4, 0xc4, 0xe2, 0xe2, + 0x36, 0xbb, 0x5e, 0xae, 0xe6, 0x87, 0x3d, 0x88, + 0x9f, 0xb8, 0x11, 0xbb, 0xcf, 0x57, 0xce, 0xd0, + 0xba, 0x62, 0xf4, 0xf8, 0x9b, 0x95, 0x04, 0xc9, + 0xcf, 0x01, 0xe9, 0xf1, 0xc8, 0xc6, 0x22, 0xa4, + 0xf2, 0x8b, 0x2f, 0x24, 0x0a, 0xf5, 0x6e, 0xb7, + 0xd4, 0x2c, 0xb6, 0xf7, 0x5c, 0x97, 0x61, 0x0b, + 0xd9, 0xb5, 0x06, 0xcd, 0xed, 0x3e, 0x1f, 0xc5, + 0xb2, 0x6c, 0xa3, 0xea, 0xb8, 0xad, 0xa6, 0x42, + 0x88, 0x7a, 0x52, 0xd5, 0x64, 0xba, 0xb5, 0x20, + 0x10, 0xa0, 0x0f, 0x0d, 0xea, 0xef, 0x5a, 0x9b, + 0x27, 0xb8, 0xca, 0x20, 0x19, 0x6d, 0xa8, 0xc4, + 0x46, 0x04, 0xb3, 0xe8, 0xf8, 0x66, 0x1b, 0x0a, + 0xce, 0x76, 0x5d, 0x59, 0x58, 0x05, 0xee, 0x3e, + 0x3c, 0x86, 0x5b, 0x49, 0x1c, 0x72, 0x18, 0x01, + 0x62, 0x92, 0x0f, 0x3e, 0xd1, 0x57, 0x5e, 0x20, + 0x7b, 0xfb, 0x4d, 0x3c, 0xc5, 0x35, 0x43, 0x2f, + 0xb0, 0xc5, 0x7c, 0xe4, 0xa2, 0x84, 0x13, 0x77 +}; +static const u8 output69[] __initconst = { + 0xbb, 0x4a, 0x7f, 0x7c, 0xd5, 0x2f, 0x89, 0x06, + 0xec, 0x20, 0xf1, 0x9a, 0x11, 0x09, 0x14, 0x2e, + 0x17, 0x50, 0xf9, 0xd5, 0xf5, 0x48, 0x7c, 0x7a, + 0x55, 0xc0, 0x57, 0x03, 0xe3, 0xc4, 0xb2, 0xb7, + 0x18, 0x47, 0x95, 0xde, 0xaf, 0x80, 0x06, 0x3c, + 0x5a, 0xf2, 0xc3, 0x53, 0xe3, 0x29, 0x92, 0xf8, + 0xff, 0x64, 0x85, 0xb9, 0xf7, 0xd3, 0x80, 0xd2, + 0x0c, 0x5d, 0x7b, 0x57, 0x0c, 0x51, 0x79, 0x86, + 0xf3, 0x20, 0xd2, 0xb8, 0x6e, 0x0c, 0x5a, 0xce, + 0xeb, 0x88, 0x02, 0x8b, 0x82, 0x1b, 0x7f, 0xf5, + 0xde, 0x7f, 0x48, 0x48, 0xdf, 0xa0, 0x55, 0xc6, + 0x0c, 0x22, 0xa1, 0x80, 0x8d, 0x3b, 0xcb, 0x40, + 0x2d, 0x3d, 0x0b, 0xf2, 0xe0, 0x22, 0x13, 0x99, + 0xe1, 0xa7, 0x27, 0x68, 0x31, 0xe1, 0x24, 0x5d, + 0xd2, 0xee, 0x16, 0xc1, 0xd7, 0xa8, 0x14, 0x19, + 0x23, 0x72, 0x67, 0x27, 0xdc, 0x5e, 0xb9, 0xc7, + 0xd8, 0xe3, 0x55, 0x50, 0x40, 0x98, 0x7b, 0xe7, + 0x34, 0x1c, 0x3b, 0x18, 0x14, 0xd8, 0x62, 0xc1, + 0x93, 0x84, 0xf3, 0x5b, 0xdd, 0x9e, 0x1f, 0x3b, + 0x0b, 0xbc, 0x4e, 0x5b, 0x79, 0xa3, 0xca, 0x74, + 0x2a, 0x98, 0xe8, 0x04, 0x39, 0xef, 0xc6, 0x76, + 0x6d, 0xee, 0x9f, 0x67, 0x5b, 0x59, 0x3a, 0xe5, + 0xf2, 0x3b, 0xca, 0x89, 0xe8, 0x9b, 0x03, 0x3d, + 0x11, 0xd2, 0x4a, 0x70, 0xaf, 0x88, 0xb0, 0x94, + 0x96, 0x26, 0xab, 0x3c, 0xc1, 0xb8, 0xe4, 0xe7, + 0x14, 0x61, 0x64, 0x3a, 0x61, 0x08, 0x0f, 0xa9, + 0xce, 0x64, 0xb2, 0x40, 0xf8, 0x20, 0x3a, 0xa9, + 0x31, 0xbd, 0x7e, 0x16, 0xca, 0xf5, 0x62, 0x0f, + 0x91, 0x9f, 0x8e, 0x1d, 0xa4, 0x77, 0xf3, 0x87, + 0x61, 0xe8, 0x14, 0xde, 0x18, 0x68, 0x4e, 0x9d, + 0x73, 0xcd, 0x8a, 0xe4, 0x80, 0x84, 0x23, 0xaa, + 0x9d, 0x64, 0x1c, 0x80, 0x41, 0xca, 0x82, 0x40, + 0x94, 0x55, 0xe3, 0x28, 0xa1, 0x97, 0x71, 0xba, + 0xf2, 0x2c, 0x39, 0x62, 0x29, 0x56, 0xd0, 0xff, + 0xb2, 0x82, 0x20, 0x59, 0x1f, 0xc3, 0x64, 0x57 +}; +static const u8 key69[] __initconst = { + 0x19, 0x09, 0xe9, 0x7c, 0xd9, 0x02, 0x4a, 0x0c, + 0x52, 0x25, 0xad, 0x5c, 0x2e, 0x8d, 0x86, 0x10, + 0x85, 0x2b, 0xba, 0xa4, 0x44, 0x5b, 0x39, 0x3e, + 0x18, 0xaa, 0xce, 0x0e, 0xe2, 0x69, 0x3c, 0xcf +}; +enum { nonce69 = 0xdb925a1948f0f060ULL }; + +static const u8 input70[] __initconst = { + 0x10, 0xe7, 0x83, 0xcf, 0x42, 0x9f, 0xf2, 0x41, + 0xc7, 0xe4, 0xdb, 0xf9, 0xa3, 0x02, 0x1d, 0x8d, + 0x50, 0x81, 0x2c, 0x6b, 0x92, 0xe0, 0x4e, 0xea, + 0x26, 0x83, 0x2a, 0xd0, 0x31, 0xf1, 0x23, 0xf3, + 0x0e, 0x88, 0x14, 0x31, 0xf9, 0x01, 0x63, 0x59, + 0x21, 0xd1, 0x8b, 0xdd, 0x06, 0xd0, 0xc6, 0xab, + 0x91, 0x71, 0x82, 0x4d, 0xd4, 0x62, 0x37, 0x17, + 0xf9, 0x50, 0xf9, 0xb5, 0x74, 0xce, 0x39, 0x80, + 0x80, 0x78, 0xf8, 0xdc, 0x1c, 0xdb, 0x7c, 0x3d, + 0xd4, 0x86, 0x31, 0x00, 0x75, 0x7b, 0xd1, 0x42, + 0x9f, 0x1b, 0x97, 0x88, 0x0e, 0x14, 0x0e, 0x1e, + 0x7d, 0x7b, 0xc4, 0xd2, 0xf3, 0xc1, 0x6d, 0x17, + 0x5d, 0xc4, 0x75, 0x54, 0x0f, 0x38, 0x65, 0x89, + 0xd8, 0x7d, 0xab, 0xc9, 0xa7, 0x0a, 0x21, 0x0b, + 0x37, 0x12, 0x05, 0x07, 0xb5, 0x68, 0x32, 0x32, + 0xb9, 0xf8, 0x97, 0x17, 0x03, 0xed, 0x51, 0x8f, + 0x3d, 0x5a, 0xd0, 0x12, 0x01, 0x6e, 0x2e, 0x91, + 0x1c, 0xbe, 0x6b, 0xa3, 0xcc, 0x75, 0x62, 0x06, + 0x8e, 0x65, 0xbb, 0xe2, 0x29, 0x71, 0x4b, 0x89, + 0x6a, 0x9d, 0x85, 0x8c, 0x8c, 0xdf, 0x94, 0x95, + 0x23, 0x66, 0xf8, 0x92, 0xee, 0x56, 0xeb, 0xb3, + 0xeb, 0xd2, 0x4a, 0x3b, 0x77, 0x8a, 0x6e, 0xf6, + 0xca, 0xd2, 0x34, 0x00, 0xde, 0xbe, 0x1d, 0x7a, + 0x73, 0xef, 0x2b, 0x80, 0x56, 0x16, 0x29, 0xbf, + 0x6e, 0x33, 0xed, 0x0d, 0xe2, 0x02, 0x60, 0x74, + 0xe9, 0x0a, 0xbc, 0xd1, 0xc5, 0xe8, 0x53, 0x02, + 0x79, 0x0f, 0x25, 0x0c, 0xef, 0xab, 0xd3, 0xbc, + 0xb7, 0xfc, 0xf3, 0xb0, 0x34, 0xd1, 0x07, 0xd2, + 0x5a, 0x31, 0x1f, 0xec, 0x1f, 0x87, 0xed, 0xdd, + 0x6a, 0xc1, 0xe8, 0xb3, 0x25, 0x4c, 0xc6, 0x9b, + 0x91, 0x73, 0xec, 0x06, 0x73, 0x9e, 0x57, 0x65, + 0x32, 0x75, 0x11, 0x74, 0x6e, 0xa4, 0x7d, 0x0d, + 0x74, 0x9f, 0x51, 0x10, 0x10, 0x47, 0xc9, 0x71, + 0x6e, 0x97, 0xae, 0x44, 0x41, 0xef, 0x98, 0x78, + 0xf4, 0xc5, 0xbd, 0x5e, 0x00, 0xe5, 0xfd, 0xe2, + 0xbe, 0x8c, 0xc2, 0xae, 0xc2, 0xee, 0x59, 0xf6, + 0xcb, 0x20, 0x54, 0x84, 0xc3, 0x31, 0x7e, 0x67, + 0x71, 0xb6, 0x76, 0xbe, 0x81, 0x8f, 0x82, 0xad, + 0x01, 0x8f, 0xc4, 0x00, 0x04, 0x3d, 0x8d, 0x34, + 0xaa, 0xea, 0xc0, 0xea, 0x91, 0x42, 0xb6, 0xb8, + 0x43, 0xf3, 0x17, 0xb2, 0x73, 0x64, 0x82, 0x97, + 0xd5, 0xc9, 0x07, 0x77, 0xb1, 0x26, 0xe2, 0x00, + 0x6a, 0xae, 0x70, 0x0b, 0xbe, 0xe6, 0xb8, 0x42, + 0x81, 0x55, 0xf7, 0xb8, 0x96, 0x41, 0x9d, 0xd4, + 0x2c, 0x27, 0x00, 0xcc, 0x91, 0x28, 0x22, 0xa4, + 0x7b, 0x42, 0x51, 0x9e, 0xd6, 0xec, 0xf3, 0x6b, + 0x00, 0xff, 0x5c, 0xa2, 0xac, 0x47, 0x33, 0x2d, + 0xf8, 0x11, 0x65, 0x5f, 0x4d, 0x79, 0x8b, 0x4f, + 0xad, 0xf0, 0x9d, 0xcd, 0xb9, 0x7b, 0x08, 0xf7, + 0x32, 0x51, 0xfa, 0x39, 0xaa, 0x78, 0x05, 0xb1, + 0xf3, 0x5d, 0xe8, 0x7c, 0x8e, 0x4f, 0xa2, 0xe0, + 0x98, 0x0c, 0xb2, 0xa7, 0xf0, 0x35, 0x8e, 0x70, + 0x7c, 0x82, 0xf3, 0x1b, 0x26, 0x28, 0x12, 0xe5, + 0x23, 0x57, 0xe4, 0xb4, 0x9b, 0x00, 0x39, 0x97, + 0xef, 0x7c, 0x46, 0x9b, 0x34, 0x6b, 0xe7, 0x0e, + 0xa3, 0x2a, 0x18, 0x11, 0x64, 0xc6, 0x7c, 0x8b, + 0x06, 0x02, 0xf5, 0x69, 0x76, 0xf9, 0xaa, 0x09, + 0x5f, 0x68, 0xf8, 0x4a, 0x79, 0x58, 0xec, 0x37, + 0xcf, 0x3a, 0xcc, 0x97, 0x70, 0x1d, 0x3e, 0x52, + 0x18, 0x0a, 0xad, 0x28, 0x5b, 0x3b, 0xe9, 0x03, + 0x84, 0xe9, 0x68, 0x50, 0xce, 0xc4, 0xbc, 0x3e, + 0x21, 0xad, 0x63, 0xfe, 0xc6, 0xfd, 0x6e, 0x69, + 0x84, 0xa9, 0x30, 0xb1, 0x7a, 0xc4, 0x31, 0x10, + 0xc1, 0x1f, 0x6e, 0xeb, 0xa5, 0xa6, 0x01 +}; +static const u8 output70[] __initconst = { + 0x0f, 0x93, 0x2a, 0x20, 0xb3, 0x87, 0x2d, 0xce, + 0xd1, 0x3b, 0x30, 0xfd, 0x06, 0x6d, 0x0a, 0xaa, + 0x3e, 0xc4, 0x29, 0x02, 0x8a, 0xde, 0xa6, 0x4b, + 0x45, 0x1b, 0x4f, 0x25, 0x59, 0xd5, 0x56, 0x6a, + 0x3b, 0x37, 0xbd, 0x3e, 0x47, 0x12, 0x2c, 0x4e, + 0x60, 0x5f, 0x05, 0x75, 0x61, 0x23, 0x05, 0x74, + 0xcb, 0xfc, 0x5a, 0xb3, 0xac, 0x5c, 0x3d, 0xab, + 0x52, 0x5f, 0x05, 0xbc, 0x57, 0xc0, 0x7e, 0xcf, + 0x34, 0x5d, 0x7f, 0x41, 0xa3, 0x17, 0x78, 0xd5, + 0x9f, 0xec, 0x0f, 0x1e, 0xf9, 0xfe, 0xa3, 0xbd, + 0x28, 0xb0, 0xba, 0x4d, 0x84, 0xdb, 0xae, 0x8f, + 0x1d, 0x98, 0xb7, 0xdc, 0xf9, 0xad, 0x55, 0x9c, + 0x89, 0xfe, 0x9b, 0x9c, 0xa9, 0x89, 0xf6, 0x97, + 0x9c, 0x3f, 0x09, 0x3e, 0xc6, 0x02, 0xc2, 0x55, + 0x58, 0x09, 0x54, 0x66, 0xe4, 0x36, 0x81, 0x35, + 0xca, 0x88, 0x17, 0x89, 0x80, 0x24, 0x2b, 0x21, + 0x89, 0xee, 0x45, 0x5a, 0xe7, 0x1f, 0xd5, 0xa5, + 0x16, 0xa4, 0xda, 0x70, 0x7e, 0xe9, 0x4f, 0x24, + 0x61, 0x97, 0xab, 0xa0, 0xe0, 0xe7, 0xb8, 0x5c, + 0x0f, 0x25, 0x17, 0x37, 0x75, 0x12, 0xb5, 0x40, + 0xde, 0x1c, 0x0d, 0x8a, 0x77, 0x62, 0x3c, 0x86, + 0xd9, 0x70, 0x2e, 0x96, 0x30, 0xd2, 0x55, 0xb3, + 0x6b, 0xc3, 0xf2, 0x9c, 0x47, 0xf3, 0x3a, 0x24, + 0x52, 0xc6, 0x38, 0xd8, 0x22, 0xb3, 0x0c, 0xfd, + 0x2f, 0xa3, 0x3c, 0xb5, 0xe8, 0x26, 0xe1, 0xa3, + 0xad, 0xb0, 0x82, 0x17, 0xc1, 0x53, 0xb8, 0x34, + 0x48, 0xee, 0x39, 0xae, 0x51, 0x43, 0xec, 0x82, + 0xce, 0x87, 0xc6, 0x76, 0xb9, 0x76, 0xd3, 0x53, + 0xfe, 0x49, 0x24, 0x7d, 0x02, 0x42, 0x2b, 0x72, + 0xfb, 0xcb, 0xd8, 0x96, 0x02, 0xc6, 0x9a, 0x20, + 0xf3, 0x5a, 0x67, 0xe8, 0x13, 0xf8, 0xb2, 0xcb, + 0xa2, 0xec, 0x18, 0x20, 0x4a, 0xb0, 0x73, 0x53, + 0x21, 0xb0, 0x77, 0x53, 0xd8, 0x76, 0xa1, 0x30, + 0x17, 0x72, 0x2e, 0x33, 0x5f, 0x33, 0x6b, 0x28, + 0xfb, 0xb0, 0xf4, 0xec, 0x8e, 0xed, 0x20, 0x7d, + 0x57, 0x8c, 0x74, 0x28, 0x64, 0x8b, 0xeb, 0x59, + 0x38, 0x3f, 0xe7, 0x83, 0x2e, 0xe5, 0x64, 0x4d, + 0x5c, 0x1f, 0xe1, 0x3b, 0xd9, 0x84, 0xdb, 0xc9, + 0xec, 0xd8, 0xc1, 0x7c, 0x1f, 0x1b, 0x68, 0x35, + 0xc6, 0x34, 0x10, 0xef, 0x19, 0xc9, 0x0a, 0xd6, + 0x43, 0x7f, 0xa6, 0xcb, 0x9d, 0xf4, 0xf0, 0x16, + 0xb1, 0xb1, 0x96, 0x64, 0xec, 0x8d, 0x22, 0x4c, + 0x4b, 0xe8, 0x1a, 0xba, 0x6f, 0xb7, 0xfc, 0xa5, + 0x69, 0x3e, 0xad, 0x78, 0x79, 0x19, 0xb5, 0x04, + 0x69, 0xe5, 0x3f, 0xff, 0x60, 0x8c, 0xda, 0x0b, + 0x7b, 0xf7, 0xe7, 0xe6, 0x29, 0x3a, 0x85, 0xba, + 0xb5, 0xb0, 0x35, 0xbd, 0x38, 0xce, 0x34, 0x5e, + 0xf2, 0xdc, 0xd1, 0x8f, 0xc3, 0x03, 0x24, 0xa2, + 0x03, 0xf7, 0x4e, 0x49, 0x5b, 0xcf, 0x6d, 0xb0, + 0xeb, 0xe3, 0x30, 0x28, 0xd5, 0x5b, 0x82, 0x5f, + 0xe4, 0x7c, 0x1e, 0xec, 0xd2, 0x39, 0xf9, 0x6f, + 0x2e, 0xb3, 0xcd, 0x01, 0xb1, 0x67, 0xaa, 0xea, + 0xaa, 0xb3, 0x63, 0xaf, 0xd9, 0xb2, 0x1f, 0xba, + 0x05, 0x20, 0xeb, 0x19, 0x32, 0xf0, 0x6c, 0x3f, + 0x40, 0xcc, 0x93, 0xb3, 0xd8, 0x25, 0xa6, 0xe4, + 0xce, 0xd7, 0x7e, 0x48, 0x99, 0x65, 0x7f, 0x86, + 0xc5, 0xd4, 0x79, 0x6b, 0xab, 0x43, 0xb8, 0x6b, + 0xf1, 0x2f, 0xea, 0x4c, 0x5e, 0xf0, 0x3b, 0xb4, + 0xb8, 0xb0, 0x94, 0x0c, 0x6b, 0xe7, 0x22, 0x93, + 0xaa, 0x01, 0xcb, 0xf1, 0x11, 0x60, 0xf6, 0x69, + 0xcf, 0x14, 0xde, 0xfb, 0x90, 0x05, 0x27, 0x0c, + 0x1a, 0x9e, 0xf0, 0xb4, 0xc6, 0xa1, 0xe8, 0xdd, + 0xd0, 0x4c, 0x25, 0x4f, 0x9c, 0xb7, 0xb1, 0xb0, + 0x21, 0xdb, 0x87, 0x09, 0x03, 0xf2, 0xb3 +}; +static const u8 key70[] __initconst = { + 0x3b, 0x5b, 0x59, 0x36, 0x44, 0xd1, 0xba, 0x71, + 0x55, 0x87, 0x4d, 0x62, 0x3d, 0xc2, 0xfc, 0xaa, + 0x3f, 0x4e, 0x1a, 0xe4, 0xca, 0x09, 0xfc, 0x6a, + 0xb2, 0xd6, 0x5d, 0x79, 0xf9, 0x1a, 0x91, 0xa7 +}; +enum { nonce70 = 0x3fd6786dd147a85ULL }; + +static const u8 input71[] __initconst = { + 0x18, 0x78, 0xd6, 0x79, 0xe4, 0x9a, 0x6c, 0x73, + 0x17, 0xd4, 0x05, 0x0f, 0x1e, 0x9f, 0xd9, 0x2b, + 0x86, 0x48, 0x7d, 0xf4, 0xd9, 0x1c, 0x76, 0xfc, + 0x8e, 0x22, 0x34, 0xe1, 0x48, 0x4a, 0x8d, 0x79, + 0xb7, 0xbb, 0x88, 0xab, 0x90, 0xde, 0xc5, 0xb4, + 0xb4, 0xe7, 0x85, 0x49, 0xda, 0x57, 0xeb, 0xc9, + 0xcd, 0x21, 0xfc, 0x45, 0x6e, 0x32, 0x67, 0xf2, + 0x4f, 0xa6, 0x54, 0xe5, 0x20, 0xed, 0xcf, 0xc6, + 0x62, 0x25, 0x8e, 0x00, 0xf8, 0x6b, 0xa2, 0x80, + 0xac, 0x88, 0xa6, 0x59, 0x27, 0x83, 0x95, 0x11, + 0x3f, 0x70, 0x5e, 0x3f, 0x11, 0xfb, 0x26, 0xbf, + 0xe1, 0x48, 0x75, 0xf9, 0x86, 0xbf, 0xa6, 0x5d, + 0x15, 0x61, 0x66, 0xbf, 0x78, 0x8f, 0x6b, 0x9b, + 0xda, 0x98, 0xb7, 0x19, 0xe2, 0xf2, 0xa3, 0x9c, + 0x7c, 0x6a, 0x9a, 0xd8, 0x3d, 0x4c, 0x2c, 0xe1, + 0x09, 0xb4, 0x28, 0x82, 0x4e, 0xab, 0x0c, 0x75, + 0x63, 0xeb, 0xbc, 0xd0, 0x71, 0xa2, 0x73, 0x85, + 0xed, 0x53, 0x7a, 0x3f, 0x68, 0x9f, 0xd0, 0xa9, + 0x00, 0x5a, 0x9e, 0x80, 0x55, 0x00, 0xe6, 0xae, + 0x0c, 0x03, 0x40, 0xed, 0xfc, 0x68, 0x4a, 0xb7, + 0x1e, 0x09, 0x65, 0x30, 0x5a, 0x3d, 0x97, 0x4d, + 0x5e, 0x51, 0x8e, 0xda, 0xc3, 0x55, 0x8c, 0xfb, + 0xcf, 0x83, 0x05, 0x35, 0x0d, 0x08, 0x1b, 0xf3, + 0x3a, 0x57, 0x96, 0xac, 0x58, 0x8b, 0xfa, 0x00, + 0x49, 0x15, 0x78, 0xd2, 0x4b, 0xed, 0xb8, 0x59, + 0x78, 0x9b, 0x7f, 0xaa, 0xfc, 0xe7, 0x46, 0xdc, + 0x7b, 0x34, 0xd0, 0x34, 0xe5, 0x10, 0xff, 0x4d, + 0x5a, 0x4d, 0x60, 0xa7, 0x16, 0x54, 0xc4, 0xfd, + 0xca, 0x5d, 0x68, 0xc7, 0x4a, 0x01, 0x8d, 0x7f, + 0x74, 0x5d, 0xff, 0xb8, 0x37, 0x15, 0x62, 0xfa, + 0x44, 0x45, 0xcf, 0x77, 0x3b, 0x1d, 0xb2, 0xd2, + 0x0d, 0x42, 0x00, 0x39, 0x68, 0x1f, 0xcc, 0x89, + 0x73, 0x5d, 0xa9, 0x2e, 0xfd, 0x58, 0x62, 0xca, + 0x35, 0x8e, 0x70, 0x70, 0xaa, 0x6e, 0x14, 0xe9, + 0xa4, 0xe2, 0x10, 0x66, 0x71, 0xdc, 0x4c, 0xfc, + 0xa9, 0xdc, 0x8f, 0x57, 0x4d, 0xc5, 0xac, 0xd7, + 0xa9, 0xf3, 0xf3, 0xa1, 0xff, 0x62, 0xa0, 0x8f, + 0xe4, 0x96, 0x3e, 0xcb, 0x9f, 0x76, 0x42, 0x39, + 0x1f, 0x24, 0xfd, 0xfd, 0x79, 0xe8, 0x27, 0xdf, + 0xa8, 0xf6, 0x33, 0x8b, 0x31, 0x59, 0x69, 0xcf, + 0x6a, 0xef, 0x89, 0x4d, 0xa7, 0xf6, 0x7e, 0x97, + 0x14, 0xbd, 0xda, 0xdd, 0xb4, 0x84, 0x04, 0x24, + 0xe0, 0x17, 0xe1, 0x0f, 0x1f, 0x8a, 0x6a, 0x71, + 0x74, 0x41, 0xdc, 0x59, 0x5c, 0x8f, 0x01, 0x25, + 0x92, 0xf0, 0x2e, 0x15, 0x62, 0x71, 0x9a, 0x9f, + 0x87, 0xdf, 0x62, 0x49, 0x7f, 0x86, 0x62, 0xfc, + 0x20, 0x84, 0xd7, 0xe3, 0x3a, 0xd9, 0x37, 0x85, + 0xb7, 0x84, 0x5a, 0xf9, 0xed, 0x21, 0x32, 0x94, + 0x3e, 0x04, 0xe7, 0x8c, 0x46, 0x76, 0x21, 0x67, + 0xf6, 0x95, 0x64, 0x92, 0xb7, 0x15, 0xf6, 0xe3, + 0x41, 0x27, 0x9d, 0xd7, 0xe3, 0x79, 0x75, 0x92, + 0xd0, 0xc1, 0xf3, 0x40, 0x92, 0x08, 0xde, 0x90, + 0x22, 0x82, 0xb2, 0x69, 0xae, 0x1a, 0x35, 0x11, + 0x89, 0xc8, 0x06, 0x82, 0x95, 0x23, 0x44, 0x08, + 0x22, 0xf2, 0x71, 0x73, 0x1b, 0x88, 0x11, 0xcf, + 0x1c, 0x7e, 0x8a, 0x2e, 0xdc, 0x79, 0x57, 0xce, + 0x1f, 0xe7, 0x6c, 0x07, 0xd8, 0x06, 0xbe, 0xec, + 0xa3, 0xcf, 0xf9, 0x68, 0xa5, 0xb8, 0xf0, 0xe3, + 0x3f, 0x01, 0x92, 0xda, 0xf1, 0xa0, 0x2d, 0x7b, + 0xab, 0x57, 0x58, 0x2a, 0xaf, 0xab, 0xbd, 0xf2, + 0xe5, 0xaf, 0x7e, 0x1f, 0x46, 0x24, 0x9e, 0x20, + 0x22, 0x0f, 0x84, 0x4c, 0xb7, 0xd8, 0x03, 0xe8, + 0x09, 0x73, 0x6c, 0xc6, 0x9b, 0x90, 0xe0, 0xdb, + 0xf2, 0x71, 0xba, 0xad, 0xb3, 0xec, 0xda, 0x7a +}; +static const u8 output71[] __initconst = { + 0x28, 0xc5, 0x9b, 0x92, 0xf9, 0x21, 0x4f, 0xbb, + 0xef, 0x3b, 0xf0, 0xf5, 0x3a, 0x6d, 0x7f, 0xd6, + 0x6a, 0x8d, 0xa1, 0x01, 0x5c, 0x62, 0x20, 0x8b, + 0x5b, 0x39, 0xd5, 0xd3, 0xc2, 0xf6, 0x9d, 0x5e, + 0xcc, 0xe1, 0xa2, 0x61, 0x16, 0xe2, 0xce, 0xe9, + 0x86, 0xd0, 0xfc, 0xce, 0x9a, 0x28, 0x27, 0xc4, + 0x0c, 0xb9, 0xaa, 0x8d, 0x48, 0xdb, 0xbf, 0x82, + 0x7d, 0xd0, 0x35, 0xc4, 0x06, 0x34, 0xb4, 0x19, + 0x51, 0x73, 0xf4, 0x7a, 0xf4, 0xfd, 0xe9, 0x1d, + 0xdc, 0x0f, 0x7e, 0xf7, 0x96, 0x03, 0xe3, 0xb1, + 0x2e, 0x22, 0x59, 0xb7, 0x6d, 0x1c, 0x97, 0x8c, + 0xd7, 0x31, 0x08, 0x26, 0x4c, 0x6d, 0xc6, 0x14, + 0xa5, 0xeb, 0x45, 0x6a, 0x88, 0xa3, 0xa2, 0x36, + 0xc4, 0x35, 0xb1, 0x5a, 0xa0, 0xad, 0xf7, 0x06, + 0x9b, 0x5d, 0xc1, 0x15, 0xc1, 0xce, 0x0a, 0xb0, + 0x57, 0x2e, 0x3f, 0x6f, 0x0d, 0x10, 0xd9, 0x11, + 0x2c, 0x9c, 0xad, 0x2d, 0xa5, 0x81, 0xfb, 0x4e, + 0x8f, 0xd5, 0x32, 0x4e, 0xaf, 0x5c, 0xc1, 0x86, + 0xde, 0x56, 0x5a, 0x33, 0x29, 0xf7, 0x67, 0xc6, + 0x37, 0x6f, 0xb2, 0x37, 0x4e, 0xd4, 0x69, 0x79, + 0xaf, 0xd5, 0x17, 0x79, 0xe0, 0xba, 0x62, 0xa3, + 0x68, 0xa4, 0x87, 0x93, 0x8d, 0x7e, 0x8f, 0xa3, + 0x9c, 0xef, 0xda, 0xe3, 0xa5, 0x1f, 0xcd, 0x30, + 0xa6, 0x55, 0xac, 0x4c, 0x69, 0x74, 0x02, 0xc7, + 0x5d, 0x95, 0x81, 0x4a, 0x68, 0x11, 0xd3, 0xa9, + 0x98, 0xb1, 0x0b, 0x0d, 0xae, 0x40, 0x86, 0x65, + 0xbf, 0xcc, 0x2d, 0xef, 0x57, 0xca, 0x1f, 0xe4, + 0x34, 0x4e, 0xa6, 0x5e, 0x82, 0x6e, 0x61, 0xad, + 0x0b, 0x3c, 0xf8, 0xeb, 0x01, 0x43, 0x7f, 0x87, + 0xa2, 0xa7, 0x6a, 0xe9, 0x62, 0x23, 0x24, 0x61, + 0xf1, 0xf7, 0x36, 0xdb, 0x10, 0xe5, 0x57, 0x72, + 0x3a, 0xc2, 0xae, 0xcc, 0x75, 0xc7, 0x80, 0x05, + 0x0a, 0x5c, 0x4c, 0x95, 0xda, 0x02, 0x01, 0x14, + 0x06, 0x6b, 0x5c, 0x65, 0xc2, 0xb8, 0x4a, 0xd6, + 0xd3, 0xb4, 0xd8, 0x12, 0x52, 0xb5, 0x60, 0xd3, + 0x8e, 0x5f, 0x5c, 0x76, 0x33, 0x7a, 0x05, 0xe5, + 0xcb, 0xef, 0x4f, 0x89, 0xf1, 0xba, 0x32, 0x6f, + 0x33, 0xcd, 0x15, 0x8d, 0xa3, 0x0c, 0x3f, 0x63, + 0x11, 0xe7, 0x0e, 0xe0, 0x00, 0x01, 0xe9, 0xe8, + 0x8e, 0x36, 0x34, 0x8d, 0x96, 0xb5, 0x03, 0xcf, + 0x55, 0x62, 0x49, 0x7a, 0x34, 0x44, 0xa5, 0xee, + 0x8c, 0x46, 0x06, 0x22, 0xab, 0x1d, 0x53, 0x9c, + 0xa1, 0xf9, 0x67, 0x18, 0x57, 0x89, 0xf9, 0xc2, + 0xd1, 0x7e, 0xbe, 0x36, 0x40, 0xcb, 0xe9, 0x04, + 0xde, 0xb1, 0x3b, 0x29, 0x52, 0xc5, 0x9a, 0xb5, + 0xa2, 0x7c, 0x7b, 0xfe, 0xe5, 0x92, 0x73, 0xea, + 0xea, 0x7b, 0xba, 0x0a, 0x8c, 0x88, 0x15, 0xe6, + 0x53, 0xbf, 0x1c, 0x33, 0xf4, 0x9b, 0x9a, 0x5e, + 0x8d, 0xae, 0x60, 0xdc, 0xcb, 0x5d, 0xfa, 0xbe, + 0x06, 0xc3, 0x3f, 0x06, 0xe7, 0x00, 0x40, 0x7b, + 0xaa, 0x94, 0xfa, 0x6d, 0x1f, 0xe4, 0xc5, 0xa9, + 0x1b, 0x5f, 0x36, 0xea, 0x5a, 0xdd, 0xa5, 0x48, + 0x6a, 0x55, 0xd2, 0x47, 0x28, 0xbf, 0x96, 0xf1, + 0x9f, 0xb6, 0x11, 0x4b, 0xd3, 0x44, 0x7d, 0x48, + 0x41, 0x61, 0xdb, 0x12, 0xd4, 0xc2, 0x59, 0x82, + 0x4c, 0x47, 0x5c, 0x04, 0xf6, 0x7b, 0xd3, 0x92, + 0x2e, 0xe8, 0x40, 0xef, 0x15, 0x32, 0x97, 0xdc, + 0x35, 0x4c, 0x6e, 0xa4, 0x97, 0xe9, 0x24, 0xde, + 0x63, 0x8b, 0xb1, 0x6b, 0x48, 0xbb, 0x46, 0x1f, + 0x84, 0xd6, 0x17, 0xb0, 0x5a, 0x4a, 0x4e, 0xd5, + 0x31, 0xd7, 0xcf, 0xa0, 0x39, 0xc6, 0x2e, 0xfc, + 0xa6, 0xa3, 0xd3, 0x0f, 0xa4, 0x28, 0xac, 0xb2, + 0xf4, 0x48, 0x8d, 0x50, 0xa5, 0x1c, 0x44, 0x5d, + 0x6e, 0x38, 0xb7, 0x2b, 0x8a, 0x45, 0xa7, 0x3d +}; +static const u8 key71[] __initconst = { + 0x8b, 0x68, 0xc4, 0xb7, 0x0d, 0x81, 0xef, 0x52, + 0x1e, 0x05, 0x96, 0x72, 0x62, 0x89, 0x27, 0x83, + 0xd0, 0xc7, 0x33, 0x6d, 0xf2, 0xcc, 0x69, 0xf9, + 0x23, 0xae, 0x99, 0xb1, 0xd1, 0x05, 0x4e, 0x54 +}; +enum { nonce71 = 0x983f03656d64b5f6ULL }; + +static const u8 input72[] __initconst = { + 0x6b, 0x09, 0xc9, 0x57, 0x3d, 0x79, 0x04, 0x8c, + 0x65, 0xad, 0x4a, 0x0f, 0xa1, 0x31, 0x3a, 0xdd, + 0x14, 0x8e, 0xe8, 0xfe, 0xbf, 0x42, 0x87, 0x98, + 0x2e, 0x8d, 0x83, 0xa3, 0xf8, 0x55, 0x3d, 0x84, + 0x1e, 0x0e, 0x05, 0x4a, 0x38, 0x9e, 0xe7, 0xfe, + 0xd0, 0x4d, 0x79, 0x74, 0x3a, 0x0b, 0x9b, 0xe1, + 0xfd, 0x51, 0x84, 0x4e, 0xb2, 0x25, 0xe4, 0x64, + 0x4c, 0xda, 0xcf, 0x46, 0xec, 0xba, 0x12, 0xeb, + 0x5a, 0x33, 0x09, 0x6e, 0x78, 0x77, 0x8f, 0x30, + 0xb1, 0x7d, 0x3f, 0x60, 0x8c, 0xf2, 0x1d, 0x8e, + 0xb4, 0x70, 0xa2, 0x90, 0x7c, 0x79, 0x1a, 0x2c, + 0xf6, 0x28, 0x79, 0x7c, 0x53, 0xc5, 0xfa, 0xcc, + 0x65, 0x9b, 0xe1, 0x51, 0xd1, 0x7f, 0x1d, 0xc4, + 0xdb, 0xd4, 0xd9, 0x04, 0x61, 0x7d, 0xbe, 0x12, + 0xfc, 0xcd, 0xaf, 0xe4, 0x0f, 0x9c, 0x20, 0xb5, + 0x22, 0x40, 0x18, 0xda, 0xe4, 0xda, 0x8c, 0x2d, + 0x84, 0xe3, 0x5f, 0x53, 0x17, 0xed, 0x78, 0xdc, + 0x2f, 0xe8, 0x31, 0xc7, 0xe6, 0x39, 0x71, 0x40, + 0xb4, 0x0f, 0xc9, 0xa9, 0x7e, 0x78, 0x87, 0xc1, + 0x05, 0x78, 0xbb, 0x01, 0xf2, 0x8f, 0x33, 0xb0, + 0x6e, 0x84, 0xcd, 0x36, 0x33, 0x5c, 0x5b, 0x8e, + 0xf1, 0xac, 0x30, 0xfe, 0x33, 0xec, 0x08, 0xf3, + 0x7e, 0xf2, 0xf0, 0x4c, 0xf2, 0xad, 0xd8, 0xc1, + 0xd4, 0x4e, 0x87, 0x06, 0xd4, 0x75, 0xe7, 0xe3, + 0x09, 0xd3, 0x4d, 0xe3, 0x21, 0x32, 0xba, 0xb4, + 0x68, 0x68, 0xcb, 0x4c, 0xa3, 0x1e, 0xb3, 0x87, + 0x7b, 0xd3, 0x0c, 0x63, 0x37, 0x71, 0x79, 0xfb, + 0x58, 0x36, 0x57, 0x0f, 0x34, 0x1d, 0xc1, 0x42, + 0x02, 0x17, 0xe7, 0xed, 0xe8, 0xe7, 0x76, 0xcb, + 0x42, 0xc4, 0x4b, 0xe2, 0xb2, 0x5e, 0x42, 0xd5, + 0xec, 0x9d, 0xc1, 0x32, 0x71, 0xe4, 0xeb, 0x10, + 0x68, 0x1a, 0x6e, 0x99, 0x8e, 0x73, 0x12, 0x1f, + 0x97, 0x0c, 0x9e, 0xcd, 0x02, 0x3e, 0x4c, 0xa0, + 0xf2, 0x8d, 0xe5, 0x44, 0xca, 0x6d, 0xfe, 0x07, + 0xe3, 0xe8, 0x9b, 0x76, 0xc1, 0x6d, 0xb7, 0x6e, + 0x0d, 0x14, 0x00, 0x6f, 0x8a, 0xfd, 0x43, 0xc6, + 0x43, 0xa5, 0x9c, 0x02, 0x47, 0x10, 0xd4, 0xb4, + 0x9b, 0x55, 0x67, 0xc8, 0x7f, 0xc1, 0x8a, 0x1f, + 0x1e, 0xd1, 0xbc, 0x99, 0x5d, 0x50, 0x4f, 0x89, + 0xf1, 0xe6, 0x5d, 0x91, 0x40, 0xdc, 0x20, 0x67, + 0x56, 0xc2, 0xef, 0xbd, 0x2c, 0xa2, 0x99, 0x38, + 0xe0, 0x45, 0xec, 0x44, 0x05, 0x52, 0x65, 0x11, + 0xfc, 0x3b, 0x19, 0xcb, 0x71, 0xc2, 0x8e, 0x0e, + 0x03, 0x2a, 0x03, 0x3b, 0x63, 0x06, 0x31, 0x9a, + 0xac, 0x53, 0x04, 0x14, 0xd4, 0x80, 0x9d, 0x6b, + 0x42, 0x7e, 0x7e, 0x4e, 0xdc, 0xc7, 0x01, 0x49, + 0x9f, 0xf5, 0x19, 0x86, 0x13, 0x28, 0x2b, 0xa6, + 0xa6, 0xbe, 0xa1, 0x7e, 0x71, 0x05, 0x00, 0xff, + 0x59, 0x2d, 0xb6, 0x63, 0xf0, 0x1e, 0x2e, 0x69, + 0x9b, 0x85, 0xf1, 0x1e, 0x8a, 0x64, 0x39, 0xab, + 0x00, 0x12, 0xe4, 0x33, 0x4b, 0xb5, 0xd8, 0xb3, + 0x6b, 0x5b, 0x8b, 0x5c, 0xd7, 0x6f, 0x23, 0xcf, + 0x3f, 0x2e, 0x5e, 0x47, 0xb9, 0xb8, 0x1f, 0xf0, + 0x1d, 0xda, 0xe7, 0x4f, 0x6e, 0xab, 0xc3, 0x36, + 0xb4, 0x74, 0x6b, 0xeb, 0xc7, 0x5d, 0x91, 0xe5, + 0xda, 0xf2, 0xc2, 0x11, 0x17, 0x48, 0xf8, 0x9c, + 0xc9, 0x8b, 0xc1, 0xa2, 0xf4, 0xcd, 0x16, 0xf8, + 0x27, 0xd9, 0x6c, 0x6f, 0xb5, 0x8f, 0x77, 0xca, + 0x1b, 0xd8, 0xef, 0x84, 0x68, 0x71, 0x53, 0xc1, + 0x43, 0x0f, 0x9f, 0x98, 0xae, 0x7e, 0x31, 0xd2, + 0x98, 0xfb, 0x20, 0xa2, 0xad, 0x00, 0x10, 0x83, + 0x00, 0x8b, 0xeb, 0x56, 0xd2, 0xc4, 0xcc, 0x7f, + 0x2f, 0x4e, 0xfa, 0x88, 0x13, 0xa4, 0x2c, 0xde, + 0x6b, 0x77, 0x86, 0x10, 0x6a, 0xab, 0x43, 0x0a, + 0x02 +}; +static const u8 output72[] __initconst = { + 0x42, 0x89, 0xa4, 0x80, 0xd2, 0xcb, 0x5f, 0x7f, + 0x2a, 0x1a, 0x23, 0x00, 0xa5, 0x6a, 0x95, 0xa3, + 0x9a, 0x41, 0xa1, 0xd0, 0x2d, 0x1e, 0xd6, 0x13, + 0x34, 0x40, 0x4e, 0x7f, 0x1a, 0xbe, 0xa0, 0x3d, + 0x33, 0x9c, 0x56, 0x2e, 0x89, 0x25, 0x45, 0xf9, + 0xf0, 0xba, 0x9c, 0x6d, 0xd1, 0xd1, 0xde, 0x51, + 0x47, 0x63, 0xc9, 0xbd, 0xfa, 0xa2, 0x9e, 0xad, + 0x6a, 0x7b, 0x21, 0x1a, 0x6c, 0x3e, 0xff, 0x46, + 0xbe, 0xf3, 0x35, 0x7a, 0x6e, 0xb3, 0xb9, 0xf7, + 0xda, 0x5e, 0xf0, 0x14, 0xb5, 0x70, 0xa4, 0x2b, + 0xdb, 0xbb, 0xc7, 0x31, 0x4b, 0x69, 0x5a, 0x83, + 0x70, 0xd9, 0x58, 0xd4, 0x33, 0x84, 0x23, 0xf0, + 0xae, 0xbb, 0x6d, 0x26, 0x7c, 0xc8, 0x30, 0xf7, + 0x24, 0xad, 0xbd, 0xe4, 0x2c, 0x38, 0x38, 0xac, + 0xe1, 0x4a, 0x9b, 0xac, 0x33, 0x0e, 0x4a, 0xf4, + 0x93, 0xed, 0x07, 0x82, 0x81, 0x4f, 0x8f, 0xb1, + 0xdd, 0x73, 0xd5, 0x50, 0x6d, 0x44, 0x1e, 0xbe, + 0xa7, 0xcd, 0x17, 0x57, 0xd5, 0x3b, 0x62, 0x36, + 0xcf, 0x7d, 0xc8, 0xd8, 0xd1, 0x78, 0xd7, 0x85, + 0x46, 0x76, 0x5d, 0xcc, 0xfe, 0xe8, 0x94, 0xc5, + 0xad, 0xbc, 0x5e, 0xbc, 0x8d, 0x1d, 0xdf, 0x03, + 0xc9, 0x6b, 0x1b, 0x81, 0xd1, 0xb6, 0x5a, 0x24, + 0xe3, 0xdc, 0x3f, 0x20, 0xc9, 0x07, 0x73, 0x4c, + 0x43, 0x13, 0x87, 0x58, 0x34, 0x0d, 0x14, 0x63, + 0x0f, 0x6f, 0xad, 0x8d, 0xac, 0x7c, 0x67, 0x68, + 0xa3, 0x9d, 0x7f, 0x00, 0xdf, 0x28, 0xee, 0x67, + 0xf4, 0x5c, 0x26, 0xcb, 0xef, 0x56, 0x71, 0xc8, + 0xc6, 0x67, 0x5f, 0x38, 0xbb, 0xa0, 0xb1, 0x5c, + 0x1f, 0xb3, 0x08, 0xd9, 0x38, 0xcf, 0x74, 0x54, + 0xc6, 0xa4, 0xc4, 0xc0, 0x9f, 0xb3, 0xd0, 0xda, + 0x62, 0x67, 0x8b, 0x81, 0x33, 0xf0, 0xa9, 0x73, + 0xa4, 0xd1, 0x46, 0x88, 0x8d, 0x85, 0x12, 0x40, + 0xba, 0x1a, 0xcd, 0x82, 0xd8, 0x8d, 0xc4, 0x52, + 0xe7, 0x01, 0x94, 0x2e, 0x0e, 0xd0, 0xaf, 0xe7, + 0x2d, 0x3f, 0x3c, 0xaa, 0xf4, 0xf5, 0xa7, 0x01, + 0x4c, 0x14, 0xe2, 0xc2, 0x96, 0x76, 0xbe, 0x05, + 0xaa, 0x19, 0xb1, 0xbd, 0x95, 0xbb, 0x5a, 0xf9, + 0xa5, 0xa7, 0xe6, 0x16, 0x38, 0x34, 0xf7, 0x9d, + 0x19, 0x66, 0x16, 0x8e, 0x7f, 0x2b, 0x5a, 0xfb, + 0xb5, 0x29, 0x79, 0xbf, 0x52, 0xae, 0x30, 0x95, + 0x3f, 0x31, 0x33, 0x28, 0xde, 0xc5, 0x0d, 0x55, + 0x89, 0xec, 0x21, 0x11, 0x0f, 0x8b, 0xfe, 0x63, + 0x3a, 0xf1, 0x95, 0x5c, 0xcd, 0x50, 0xe4, 0x5d, + 0x8f, 0xa7, 0xc8, 0xca, 0x93, 0xa0, 0x67, 0x82, + 0x63, 0x5c, 0xd0, 0xed, 0xe7, 0x08, 0xc5, 0x60, + 0xf8, 0xb4, 0x47, 0xf0, 0x1a, 0x65, 0x4e, 0xa3, + 0x51, 0x68, 0xc7, 0x14, 0xa1, 0xd9, 0x39, 0x72, + 0xa8, 0x6f, 0x7c, 0x7e, 0xf6, 0x03, 0x0b, 0x25, + 0x9b, 0xf2, 0xca, 0x49, 0xae, 0x5b, 0xf8, 0x0f, + 0x71, 0x51, 0x01, 0xa6, 0x23, 0xa9, 0xdf, 0xd0, + 0x7a, 0x39, 0x19, 0xf5, 0xc5, 0x26, 0x44, 0x7b, + 0x0a, 0x4a, 0x41, 0xbf, 0xf2, 0x8e, 0x83, 0x50, + 0x91, 0x96, 0x72, 0x02, 0xf6, 0x80, 0xbf, 0x95, + 0x41, 0xac, 0xda, 0xb0, 0xba, 0xe3, 0x76, 0xb1, + 0x9d, 0xff, 0x1f, 0x33, 0x02, 0x85, 0xfc, 0x2a, + 0x29, 0xe6, 0xe3, 0x9d, 0xd0, 0xef, 0xc2, 0xd6, + 0x9c, 0x4a, 0x62, 0xac, 0xcb, 0xea, 0x8b, 0xc3, + 0x08, 0x6e, 0x49, 0x09, 0x26, 0x19, 0xc1, 0x30, + 0xcc, 0x27, 0xaa, 0xc6, 0x45, 0x88, 0xbd, 0xae, + 0xd6, 0x79, 0xff, 0x4e, 0xfc, 0x66, 0x4d, 0x02, + 0xa5, 0xee, 0x8e, 0xa5, 0xb6, 0x15, 0x72, 0x24, + 0xb1, 0xbf, 0xbf, 0x64, 0xcf, 0xcc, 0x93, 0xe9, + 0xb6, 0xfd, 0xb4, 0xb6, 0x21, 0xb5, 0x48, 0x08, + 0x0f, 0x11, 0x65, 0xe1, 0x47, 0xee, 0x93, 0x29, + 0xad +}; +static const u8 key72[] __initconst = { + 0xb9, 0xa2, 0xfc, 0x59, 0x06, 0x3f, 0x77, 0xa5, + 0x66, 0xd0, 0x2b, 0x22, 0x74, 0x22, 0x4c, 0x1e, + 0x6a, 0x39, 0xdf, 0xe1, 0x0d, 0x4c, 0x64, 0x99, + 0x54, 0x8a, 0xba, 0x1d, 0x2c, 0x21, 0x5f, 0xc3 +}; +enum { nonce72 = 0x3d069308fa3db04bULL }; + +static const u8 input73[] __initconst = { + 0xe4, 0xdd, 0x36, 0xd4, 0xf5, 0x70, 0x51, 0x73, + 0x97, 0x1d, 0x45, 0x05, 0x92, 0xe7, 0xeb, 0xb7, + 0x09, 0x82, 0x6e, 0x25, 0x6c, 0x50, 0xf5, 0x40, + 0x19, 0xba, 0xbc, 0xf4, 0x39, 0x14, 0xc5, 0x15, + 0x83, 0x40, 0xbd, 0x26, 0xe0, 0xff, 0x3b, 0x22, + 0x7c, 0x7c, 0xd7, 0x0b, 0xe9, 0x25, 0x0c, 0x3d, + 0x92, 0x38, 0xbe, 0xe4, 0x22, 0x75, 0x65, 0xf1, + 0x03, 0x85, 0x34, 0x09, 0xb8, 0x77, 0xfb, 0x48, + 0xb1, 0x2e, 0x21, 0x67, 0x9b, 0x9d, 0xad, 0x18, + 0x82, 0x0d, 0x6b, 0xc3, 0xcf, 0x00, 0x61, 0x6e, + 0xda, 0xdc, 0xa7, 0x0b, 0x5c, 0x02, 0x1d, 0xa6, + 0x4e, 0x0d, 0x7f, 0x37, 0x01, 0x5a, 0x37, 0xf3, + 0x2b, 0xbf, 0xba, 0xe2, 0x1c, 0xb3, 0xa3, 0xbc, + 0x1c, 0x93, 0x1a, 0xb1, 0x71, 0xaf, 0xe2, 0xdd, + 0x17, 0xee, 0x53, 0xfa, 0xfb, 0x02, 0x40, 0x3e, + 0x03, 0xca, 0xe7, 0xc3, 0x51, 0x81, 0xcc, 0x8c, + 0xca, 0xcf, 0x4e, 0xc5, 0x78, 0x99, 0xfd, 0xbf, + 0xea, 0xab, 0x38, 0x81, 0xfc, 0xd1, 0x9e, 0x41, + 0x0b, 0x84, 0x25, 0xf1, 0x6b, 0x3c, 0xf5, 0x40, + 0x0d, 0xc4, 0x3e, 0xb3, 0x6a, 0xec, 0x6e, 0x75, + 0xdc, 0x9b, 0xdf, 0x08, 0x21, 0x16, 0xfb, 0x7a, + 0x8e, 0x19, 0x13, 0x02, 0xa7, 0xfc, 0x58, 0x21, + 0xc3, 0xb3, 0x59, 0x5a, 0x9c, 0xef, 0x38, 0xbd, + 0x87, 0x55, 0xd7, 0x0d, 0x1f, 0x84, 0xdc, 0x98, + 0x22, 0xca, 0x87, 0x96, 0x71, 0x6d, 0x68, 0x00, + 0xcb, 0x4f, 0x2f, 0xc4, 0x64, 0x0c, 0xc1, 0x53, + 0x0c, 0x90, 0xe7, 0x3c, 0x88, 0xca, 0xc5, 0x85, + 0xa3, 0x2a, 0x96, 0x7c, 0x82, 0x6d, 0x45, 0xf5, + 0xb7, 0x8d, 0x17, 0x69, 0xd6, 0xcd, 0x3c, 0xd3, + 0xe7, 0x1c, 0xce, 0x93, 0x50, 0xd4, 0x59, 0xa2, + 0xd8, 0x8b, 0x72, 0x60, 0x5b, 0x25, 0x14, 0xcd, + 0x5a, 0xe8, 0x8c, 0xdb, 0x23, 0x8d, 0x2b, 0x59, + 0x12, 0x13, 0x10, 0x47, 0xa4, 0xc8, 0x3c, 0xc1, + 0x81, 0x89, 0x6c, 0x98, 0xec, 0x8f, 0x7b, 0x32, + 0xf2, 0x87, 0xd9, 0xa2, 0x0d, 0xc2, 0x08, 0xf9, + 0xd5, 0xf3, 0x91, 0xe7, 0xb3, 0x87, 0xa7, 0x0b, + 0x64, 0x8f, 0xb9, 0x55, 0x1c, 0x81, 0x96, 0x6c, + 0xa1, 0xc9, 0x6e, 0x3b, 0xcd, 0x17, 0x1b, 0xfc, + 0xa6, 0x05, 0xba, 0x4a, 0x7d, 0x03, 0x3c, 0x59, + 0xc8, 0xee, 0x50, 0xb2, 0x5b, 0xe1, 0x4d, 0x6a, + 0x1f, 0x09, 0xdc, 0xa2, 0x51, 0xd1, 0x93, 0x3a, + 0x5f, 0x72, 0x1d, 0x26, 0x14, 0x62, 0xa2, 0x41, + 0x3d, 0x08, 0x70, 0x7b, 0x27, 0x3d, 0xbc, 0xdf, + 0x15, 0xfa, 0xb9, 0x5f, 0xb5, 0x38, 0x84, 0x0b, + 0x58, 0x3d, 0xee, 0x3f, 0x32, 0x65, 0x6d, 0xd7, + 0xce, 0x97, 0x3c, 0x8d, 0xfb, 0x63, 0xb9, 0xb0, + 0xa8, 0x4a, 0x72, 0x99, 0x97, 0x58, 0xc8, 0xa7, + 0xf9, 0x4c, 0xae, 0xc1, 0x63, 0xb9, 0x57, 0x18, + 0x8a, 0xfa, 0xab, 0xe9, 0xf3, 0x67, 0xe6, 0xfd, + 0xd2, 0x9d, 0x5c, 0xa9, 0x8e, 0x11, 0x0a, 0xf4, + 0x4b, 0xf1, 0xec, 0x1a, 0xaf, 0x50, 0x5d, 0x16, + 0x13, 0x69, 0x2e, 0xbd, 0x0d, 0xe6, 0xf0, 0xb2, + 0xed, 0xb4, 0x4c, 0x59, 0x77, 0x37, 0x00, 0x0b, + 0xc7, 0xa7, 0x9e, 0x37, 0xf3, 0x60, 0x70, 0xef, + 0xf3, 0xc1, 0x74, 0x52, 0x87, 0xc6, 0xa1, 0x81, + 0xbd, 0x0a, 0x2c, 0x5d, 0x2c, 0x0c, 0x6a, 0x81, + 0xa1, 0xfe, 0x26, 0x78, 0x6c, 0x03, 0x06, 0x07, + 0x34, 0xaa, 0xd1, 0x1b, 0x40, 0x03, 0x39, 0x56, + 0xcf, 0x2a, 0x92, 0xc1, 0x4e, 0xdf, 0x29, 0x24, + 0x83, 0x22, 0x7a, 0xea, 0x67, 0x1e, 0xe7, 0x54, + 0x64, 0xd3, 0xbd, 0x3a, 0x5d, 0xae, 0xca, 0xf0, + 0x9c, 0xd6, 0x5a, 0x9a, 0x62, 0xc8, 0xc7, 0x83, + 0xf9, 0x89, 0xde, 0x2d, 0x53, 0x64, 0x61, 0xf7, + 0xa3, 0xa7, 0x31, 0x38, 0xc6, 0x22, 0x9c, 0xb4, + 0x87, 0xe0 +}; +static const u8 output73[] __initconst = { + 0x34, 0xed, 0x05, 0xb0, 0x14, 0xbc, 0x8c, 0xcc, + 0x95, 0xbd, 0x99, 0x0f, 0xb1, 0x98, 0x17, 0x10, + 0xae, 0xe0, 0x08, 0x53, 0xa3, 0x69, 0xd2, 0xed, + 0x66, 0xdb, 0x2a, 0x34, 0x8d, 0x0c, 0x6e, 0xce, + 0x63, 0x69, 0xc9, 0xe4, 0x57, 0xc3, 0x0c, 0x8b, + 0xa6, 0x2c, 0xa7, 0xd2, 0x08, 0xff, 0x4f, 0xec, + 0x61, 0x8c, 0xee, 0x0d, 0xfa, 0x6b, 0xe0, 0xe8, + 0x71, 0xbc, 0x41, 0x46, 0xd7, 0x33, 0x1d, 0xc0, + 0xfd, 0xad, 0xca, 0x8b, 0x34, 0x56, 0xa4, 0x86, + 0x71, 0x62, 0xae, 0x5e, 0x3d, 0x2b, 0x66, 0x3e, + 0xae, 0xd8, 0xc0, 0xe1, 0x21, 0x3b, 0xca, 0xd2, + 0x6b, 0xa2, 0xb8, 0xc7, 0x98, 0x4a, 0xf3, 0xcf, + 0xb8, 0x62, 0xd8, 0x33, 0xe6, 0x80, 0xdb, 0x2f, + 0x0a, 0xaf, 0x90, 0x3c, 0xe1, 0xec, 0xe9, 0x21, + 0x29, 0x42, 0x9e, 0xa5, 0x50, 0xe9, 0x93, 0xd3, + 0x53, 0x1f, 0xac, 0x2a, 0x24, 0x07, 0xb8, 0xed, + 0xed, 0x38, 0x2c, 0xc4, 0xa1, 0x2b, 0x31, 0x5d, + 0x9c, 0x24, 0x7b, 0xbf, 0xd9, 0xbb, 0x4e, 0x87, + 0x8f, 0x32, 0x30, 0xf1, 0x11, 0x29, 0x54, 0x94, + 0x00, 0x95, 0x1d, 0x1d, 0x24, 0xc0, 0xd4, 0x34, + 0x49, 0x1d, 0xd5, 0xe3, 0xa6, 0xde, 0x8b, 0xbf, + 0x5a, 0x9f, 0x58, 0x5a, 0x9b, 0x70, 0xe5, 0x9b, + 0xb3, 0xdb, 0xe8, 0xb8, 0xca, 0x1b, 0x43, 0xe3, + 0xc6, 0x6f, 0x0a, 0xd6, 0x32, 0x11, 0xd4, 0x04, + 0xef, 0xa3, 0xe4, 0x3f, 0x12, 0xd8, 0xc1, 0x73, + 0x51, 0x87, 0x03, 0xbd, 0xba, 0x60, 0x79, 0xee, + 0x08, 0xcc, 0xf7, 0xc0, 0xaa, 0x4c, 0x33, 0xc4, + 0xc7, 0x09, 0xf5, 0x91, 0xcb, 0x74, 0x57, 0x08, + 0x1b, 0x90, 0xa9, 0x1b, 0x60, 0x02, 0xd2, 0x3f, + 0x7a, 0xbb, 0xfd, 0x78, 0xf0, 0x15, 0xf9, 0x29, + 0x82, 0x8f, 0xc4, 0xb2, 0x88, 0x1f, 0xbc, 0xcc, + 0x53, 0x27, 0x8b, 0x07, 0x5f, 0xfc, 0x91, 0x29, + 0x82, 0x80, 0x59, 0x0a, 0x3c, 0xea, 0xc4, 0x7e, + 0xad, 0xd2, 0x70, 0x46, 0xbd, 0x9e, 0x3b, 0x1c, + 0x8a, 0x62, 0xea, 0x69, 0xbd, 0xf6, 0x96, 0x15, + 0xb5, 0x57, 0xe8, 0x63, 0x5f, 0x65, 0x46, 0x84, + 0x58, 0x50, 0x87, 0x4b, 0x0e, 0x5b, 0x52, 0x90, + 0xb0, 0xae, 0x37, 0x0f, 0xdd, 0x7e, 0xa2, 0xa0, + 0x8b, 0x78, 0xc8, 0x5a, 0x1f, 0x53, 0xdb, 0xc5, + 0xbf, 0x73, 0x20, 0xa9, 0x44, 0xfb, 0x1e, 0xc7, + 0x97, 0xb2, 0x3a, 0x5a, 0x17, 0xe6, 0x8b, 0x9b, + 0xe8, 0xf8, 0x2a, 0x01, 0x27, 0xa3, 0x71, 0x28, + 0xe3, 0x19, 0xc6, 0xaf, 0xf5, 0x3a, 0x26, 0xc0, + 0x5c, 0x69, 0x30, 0x78, 0x75, 0x27, 0xf2, 0x0c, + 0x22, 0x71, 0x65, 0xc6, 0x8e, 0x7b, 0x47, 0xe3, + 0x31, 0xaf, 0x7b, 0xc6, 0xc2, 0x55, 0x68, 0x81, + 0xaa, 0x1b, 0x21, 0x65, 0xfb, 0x18, 0x35, 0x45, + 0x36, 0x9a, 0x44, 0xba, 0x5c, 0xff, 0x06, 0xde, + 0x3a, 0xc8, 0x44, 0x0b, 0xaa, 0x8e, 0x34, 0xe2, + 0x84, 0xac, 0x18, 0xfe, 0x9b, 0xe1, 0x4f, 0xaa, + 0xb6, 0x90, 0x0b, 0x1c, 0x2c, 0xd9, 0x9a, 0x10, + 0x18, 0xf9, 0x49, 0x41, 0x42, 0x1b, 0xb5, 0xe1, + 0x26, 0xac, 0x2d, 0x38, 0x00, 0x00, 0xe4, 0xb4, + 0x50, 0x6f, 0x14, 0x18, 0xd6, 0x3d, 0x00, 0x59, + 0x3c, 0x45, 0xf3, 0x42, 0x13, 0x44, 0xb8, 0x57, + 0xd4, 0x43, 0x5c, 0x8a, 0x2a, 0xb4, 0xfc, 0x0a, + 0x25, 0x5a, 0xdc, 0x8f, 0x11, 0x0b, 0x11, 0x44, + 0xc7, 0x0e, 0x54, 0x8b, 0x22, 0x01, 0x7e, 0x67, + 0x2e, 0x15, 0x3a, 0xb9, 0xee, 0x84, 0x10, 0xd4, + 0x80, 0x57, 0xd7, 0x75, 0xcf, 0x8b, 0xcb, 0x03, + 0xc9, 0x92, 0x2b, 0x69, 0xd8, 0x5a, 0x9b, 0x06, + 0x85, 0x47, 0xaa, 0x4c, 0x28, 0xde, 0x49, 0x58, + 0xe6, 0x11, 0x1e, 0x5e, 0x64, 0x8e, 0x3b, 0xe0, + 0x40, 0x2e, 0xac, 0x96, 0x97, 0x15, 0x37, 0x1e, + 0x30, 0xdd +}; +static const u8 key73[] __initconst = { + 0x96, 0x06, 0x1e, 0xc1, 0x6d, 0xba, 0x49, 0x5b, + 0x65, 0x80, 0x79, 0xdd, 0xf3, 0x67, 0xa8, 0x6e, + 0x2d, 0x9c, 0x54, 0x46, 0xd8, 0x4a, 0xeb, 0x7e, + 0x23, 0x86, 0x51, 0xd8, 0x49, 0x49, 0x56, 0xe0 +}; +enum { nonce73 = 0xbefb83cb67e11ffdULL }; + +static const u8 input74[] __initconst = { + 0x47, 0x22, 0x70, 0xe5, 0x2f, 0x41, 0x18, 0x45, + 0x07, 0xd3, 0x6d, 0x32, 0x0d, 0x43, 0x92, 0x2b, + 0x9b, 0x65, 0x73, 0x13, 0x1a, 0x4f, 0x49, 0x8f, + 0xff, 0xf8, 0xcc, 0xae, 0x15, 0xab, 0x9d, 0x7d, + 0xee, 0x22, 0x5d, 0x8b, 0xde, 0x81, 0x5b, 0x81, + 0x83, 0x49, 0x35, 0x9b, 0xb4, 0xbc, 0x4e, 0x01, + 0xc2, 0x29, 0xa7, 0xf1, 0xca, 0x3a, 0xce, 0x3f, + 0xf5, 0x31, 0x93, 0xa8, 0xe2, 0xc9, 0x7d, 0x03, + 0x26, 0xa4, 0xbc, 0xa8, 0x9c, 0xb9, 0x68, 0xf3, + 0xb3, 0x91, 0xe8, 0xe6, 0xc7, 0x2b, 0x1a, 0xce, + 0xd2, 0x41, 0x53, 0xbd, 0xa3, 0x2c, 0x54, 0x94, + 0x21, 0xa1, 0x40, 0xae, 0xc9, 0x0c, 0x11, 0x92, + 0xfd, 0x91, 0xa9, 0x40, 0xca, 0xde, 0x21, 0x4e, + 0x1e, 0x3d, 0xcc, 0x2c, 0x87, 0x11, 0xef, 0x46, + 0xed, 0x52, 0x03, 0x11, 0x19, 0x43, 0x25, 0xc7, + 0x0d, 0xc3, 0x37, 0x5f, 0xd3, 0x6f, 0x0c, 0x6a, + 0x45, 0x30, 0x88, 0xec, 0xf0, 0x21, 0xef, 0x1d, + 0x7b, 0x38, 0x63, 0x4b, 0x49, 0x0c, 0x72, 0xf6, + 0x4c, 0x40, 0xc3, 0xcc, 0x03, 0xa7, 0xae, 0xa8, + 0x8c, 0x37, 0x03, 0x1c, 0x11, 0xae, 0x0d, 0x1b, + 0x62, 0x97, 0x27, 0xfc, 0x56, 0x4b, 0xb7, 0xfd, + 0xbc, 0xfb, 0x0e, 0xfc, 0x61, 0xad, 0xc6, 0xb5, + 0x9c, 0x8c, 0xc6, 0x38, 0x27, 0x91, 0x29, 0x3d, + 0x29, 0xc8, 0x37, 0xc9, 0x96, 0x69, 0xe3, 0xdc, + 0x3e, 0x61, 0x35, 0x9b, 0x99, 0x4f, 0xb9, 0x4e, + 0x5a, 0x29, 0x1c, 0x2e, 0xcf, 0x16, 0xcb, 0x69, + 0x87, 0xe4, 0x1a, 0xc4, 0x6e, 0x78, 0x43, 0x00, + 0x03, 0xb2, 0x8b, 0x03, 0xd0, 0xb4, 0xf1, 0xd2, + 0x7d, 0x2d, 0x7e, 0xfc, 0x19, 0x66, 0x5b, 0xa3, + 0x60, 0x3f, 0x9d, 0xbd, 0xfa, 0x3e, 0xca, 0x7b, + 0x26, 0x08, 0x19, 0x16, 0x93, 0x5d, 0x83, 0xfd, + 0xf9, 0x21, 0xc6, 0x31, 0x34, 0x6f, 0x0c, 0xaa, + 0x28, 0xf9, 0x18, 0xa2, 0xc4, 0x78, 0x3b, 0x56, + 0xc0, 0x88, 0x16, 0xba, 0x22, 0x2c, 0x07, 0x2f, + 0x70, 0xd0, 0xb0, 0x46, 0x35, 0xc7, 0x14, 0xdc, + 0xbb, 0x56, 0x23, 0x1e, 0x36, 0x36, 0x2d, 0x73, + 0x78, 0xc7, 0xce, 0xf3, 0x58, 0xf7, 0x58, 0xb5, + 0x51, 0xff, 0x33, 0x86, 0x0e, 0x3b, 0x39, 0xfb, + 0x1a, 0xfd, 0xf8, 0x8b, 0x09, 0x33, 0x1b, 0x83, + 0xf2, 0xe6, 0x38, 0x37, 0xef, 0x47, 0x84, 0xd9, + 0x82, 0x77, 0x2b, 0x82, 0xcc, 0xf9, 0xee, 0x94, + 0x71, 0x78, 0x81, 0xc8, 0x4d, 0x91, 0xd7, 0x35, + 0x29, 0x31, 0x30, 0x5c, 0x4a, 0x23, 0x23, 0xb1, + 0x38, 0x6b, 0xac, 0x22, 0x3f, 0x80, 0xc7, 0xe0, + 0x7d, 0xfa, 0x76, 0x47, 0xd4, 0x6f, 0x93, 0xa0, + 0xa0, 0x93, 0x5d, 0x68, 0xf7, 0x43, 0x25, 0x8f, + 0x1b, 0xc7, 0x87, 0xea, 0x59, 0x0c, 0xa2, 0xfa, + 0xdb, 0x2f, 0x72, 0x43, 0xcf, 0x90, 0xf1, 0xd6, + 0x58, 0xf3, 0x17, 0x6a, 0xdf, 0xb3, 0x4e, 0x0e, + 0x38, 0x24, 0x48, 0x1f, 0xb7, 0x01, 0xec, 0x81, + 0xb1, 0x87, 0x5b, 0xec, 0x9c, 0x11, 0x1a, 0xff, + 0xa5, 0xca, 0x5a, 0x63, 0x31, 0xb2, 0xe4, 0xc6, + 0x3c, 0x1d, 0xaf, 0x27, 0xb2, 0xd4, 0x19, 0xa2, + 0xcc, 0x04, 0x92, 0x42, 0xd2, 0xc1, 0x8c, 0x3b, + 0xce, 0xf5, 0x74, 0xc1, 0x81, 0xf8, 0x20, 0x23, + 0x6f, 0x20, 0x6d, 0x78, 0x36, 0x72, 0x2c, 0x52, + 0xdf, 0x5e, 0xe8, 0x75, 0xce, 0x1c, 0x49, 0x9d, + 0x93, 0x6f, 0x65, 0xeb, 0xb1, 0xbd, 0x8e, 0x5e, + 0xe5, 0x89, 0xc4, 0x8a, 0x81, 0x3d, 0x9a, 0xa7, + 0x11, 0x82, 0x8e, 0x38, 0x5b, 0x5b, 0xca, 0x7d, + 0x4b, 0x72, 0xc2, 0x9c, 0x30, 0x5e, 0x7f, 0xc0, + 0x6f, 0x91, 0xd5, 0x67, 0x8c, 0x3e, 0xae, 0xda, + 0x2b, 0x3c, 0x53, 0xcc, 0x50, 0x97, 0x36, 0x0b, + 0x79, 0xd6, 0x73, 0x6e, 0x7d, 0x42, 0x56, 0xe1, + 0xaa, 0xfc, 0xb3, 0xa7, 0xc8, 0x01, 0xaa, 0xc1, + 0xfc, 0x5c, 0x72, 0x8e, 0x63, 0xa8, 0x46, 0x18, + 0xee, 0x11, 0xe7, 0x30, 0x09, 0x83, 0x6c, 0xd9, + 0xf4, 0x7a, 0x7b, 0xb5, 0x1f, 0x6d, 0xc7, 0xbc, + 0xcb, 0x55, 0xea, 0x40, 0x58, 0x7a, 0x00, 0x00, + 0x90, 0x60, 0xc5, 0x64, 0x69, 0x05, 0x99, 0xd2, + 0x49, 0x62, 0x4f, 0xcb, 0x97, 0xdf, 0xdd, 0x6b, + 0x60, 0x75, 0xe2, 0xe0, 0x6f, 0x76, 0xd0, 0x37, + 0x67, 0x0a, 0xcf, 0xff, 0xc8, 0x61, 0x84, 0x14, + 0x80, 0x7c, 0x1d, 0x31, 0x8d, 0x90, 0xde, 0x0b, + 0x1c, 0x74, 0x9f, 0x82, 0x96, 0x80, 0xda, 0xaf, + 0x8d, 0x99, 0x86, 0x9f, 0x24, 0x99, 0x28, 0x3e, + 0xe0, 0xa3, 0xc3, 0x90, 0x2d, 0x14, 0x65, 0x1e, + 0x3b, 0xb9, 0xba, 0x13, 0xa5, 0x77, 0x73, 0x63, + 0x9a, 0x06, 0x3d, 0xa9, 0x28, 0x9b, 0xba, 0x25, + 0x61, 0xc9, 0xcd, 0xcf, 0x7a, 0x4d, 0x96, 0x09, + 0xcb, 0xca, 0x03, 0x9c, 0x54, 0x34, 0x31, 0x85, + 0xa0, 0x3d, 0xe5, 0xbc, 0xa5, 0x5f, 0x1b, 0xd3, + 0x10, 0x63, 0x74, 0x9d, 0x01, 0x92, 0x88, 0xf0, + 0x27, 0x9c, 0x28, 0xd9, 0xfd, 0xe2, 0x4e, 0x01, + 0x8d, 0x61, 0x79, 0x60, 0x61, 0x5b, 0x76, 0xab, + 0x06, 0xd3, 0x44, 0x87, 0x43, 0x52, 0xcd, 0x06, + 0x68, 0x1e, 0x2d, 0xc5, 0xb0, 0x07, 0x25, 0xdf, + 0x0a, 0x50, 0xd7, 0xd9, 0x08, 0x53, 0x65, 0xf1, + 0x0c, 0x2c, 0xde, 0x3f, 0x9d, 0x03, 0x1f, 0xe1, + 0x49, 0x43, 0x3c, 0x83, 0x81, 0x37, 0xf8, 0xa2, + 0x0b, 0xf9, 0x61, 0x1c, 0xc1, 0xdb, 0x79, 0xbc, + 0x64, 0xce, 0x06, 0x4e, 0x87, 0x89, 0x62, 0x73, + 0x51, 0xbc, 0xa4, 0x32, 0xd4, 0x18, 0x62, 0xab, + 0x65, 0x7e, 0xad, 0x1e, 0x91, 0xa3, 0xfa, 0x2d, + 0x58, 0x9e, 0x2a, 0xe9, 0x74, 0x44, 0x64, 0x11, + 0xe6, 0xb6, 0xb3, 0x00, 0x7e, 0xa3, 0x16, 0xef, + 0x72 +}; +static const u8 output74[] __initconst = { + 0xf5, 0xca, 0x45, 0x65, 0x50, 0x35, 0x47, 0x67, + 0x6f, 0x4f, 0x67, 0xff, 0x34, 0xd9, 0xc3, 0x37, + 0x2a, 0x26, 0xb0, 0x4f, 0x08, 0x1e, 0x45, 0x13, + 0xc7, 0x2c, 0x14, 0x75, 0x33, 0xd8, 0x8e, 0x1e, + 0x1b, 0x11, 0x0d, 0x97, 0x04, 0x33, 0x8a, 0xe4, + 0xd8, 0x8d, 0x0e, 0x12, 0x8d, 0xdb, 0x6e, 0x02, + 0xfa, 0xe5, 0xbd, 0x3a, 0xb5, 0x28, 0x07, 0x7d, + 0x20, 0xf0, 0x12, 0x64, 0x83, 0x2f, 0x59, 0x79, + 0x17, 0x88, 0x3c, 0x2d, 0x08, 0x2f, 0x55, 0xda, + 0xcc, 0x02, 0x3a, 0x82, 0xcd, 0x03, 0x94, 0xdf, + 0xdf, 0xab, 0x8a, 0x13, 0xf5, 0xe6, 0x74, 0xdf, + 0x7b, 0xe2, 0xab, 0x34, 0xbc, 0x00, 0x85, 0xbf, + 0x5a, 0x48, 0xc8, 0xff, 0x8d, 0x6c, 0x27, 0x48, + 0x19, 0x2d, 0x08, 0xfa, 0x82, 0x62, 0x39, 0x55, + 0x32, 0x11, 0xa8, 0xd7, 0xb9, 0x08, 0x2c, 0xd6, + 0x7a, 0xd9, 0x83, 0x9f, 0x9b, 0xfb, 0xec, 0x3a, + 0xd1, 0x08, 0xc7, 0xad, 0xdc, 0x98, 0x4c, 0xbc, + 0x98, 0xeb, 0x36, 0xb0, 0x39, 0xf4, 0x3a, 0xd6, + 0x53, 0x02, 0xa0, 0xa9, 0x73, 0xa1, 0xca, 0xef, + 0xd8, 0xd2, 0xec, 0x0e, 0xf8, 0xf5, 0xac, 0x8d, + 0x34, 0x41, 0x06, 0xa8, 0xc6, 0xc3, 0x31, 0xbc, + 0xe5, 0xcc, 0x7e, 0x72, 0x63, 0x59, 0x3e, 0x63, + 0xc2, 0x8d, 0x2b, 0xd5, 0xb9, 0xfd, 0x1e, 0x31, + 0x69, 0x32, 0x05, 0xd6, 0xde, 0xc9, 0xe6, 0x4c, + 0xac, 0x68, 0xf7, 0x1f, 0x9d, 0xcd, 0x0e, 0xa2, + 0x15, 0x3d, 0xd6, 0x47, 0x99, 0xab, 0x08, 0x5f, + 0x28, 0xc3, 0x4c, 0xc2, 0xd5, 0xdd, 0x10, 0xb7, + 0xbd, 0xdb, 0x9b, 0xcf, 0x85, 0x27, 0x29, 0x76, + 0x98, 0xeb, 0xad, 0x31, 0x64, 0xe7, 0xfb, 0x61, + 0xe0, 0xd8, 0x1a, 0xa6, 0xe2, 0xe7, 0x43, 0x42, + 0x77, 0xc9, 0x82, 0x00, 0xac, 0x85, 0xe0, 0xa2, + 0xd4, 0x62, 0xe3, 0xb7, 0x17, 0x6e, 0xb2, 0x9e, + 0x21, 0x58, 0x73, 0xa9, 0x53, 0x2d, 0x3c, 0xe1, + 0xdd, 0xd6, 0x6e, 0x92, 0xf2, 0x1d, 0xc2, 0x22, + 0x5f, 0x9a, 0x7e, 0xd0, 0x52, 0xbf, 0x54, 0x19, + 0xd7, 0x80, 0x63, 0x3e, 0xd0, 0x08, 0x2d, 0x37, + 0x0c, 0x15, 0xf7, 0xde, 0xab, 0x2b, 0xe3, 0x16, + 0x21, 0x3a, 0xee, 0xa5, 0xdc, 0xdf, 0xde, 0xa3, + 0x69, 0xcb, 0xfd, 0x92, 0x89, 0x75, 0xcf, 0xc9, + 0x8a, 0xa4, 0xc8, 0xdd, 0xcc, 0x21, 0xe6, 0xfe, + 0x9e, 0x43, 0x76, 0xb2, 0x45, 0x22, 0xb9, 0xb5, + 0xac, 0x7e, 0x3d, 0x26, 0xb0, 0x53, 0xc8, 0xab, + 0xfd, 0xea, 0x2c, 0xd1, 0x44, 0xc5, 0x60, 0x1b, + 0x8a, 0x99, 0x0d, 0xa5, 0x0e, 0x67, 0x6e, 0x3a, + 0x96, 0x55, 0xec, 0xe8, 0xcc, 0xbe, 0x49, 0xd9, + 0xf2, 0x72, 0x9f, 0x30, 0x21, 0x97, 0x57, 0x19, + 0xbe, 0x5e, 0x33, 0x0c, 0xee, 0xc0, 0x72, 0x0d, + 0x2e, 0xd1, 0xe1, 0x52, 0xc2, 0xea, 0x41, 0xbb, + 0xe1, 0x6d, 0xd4, 0x17, 0xa9, 0x8d, 0x89, 0xa9, + 0xd6, 0x4b, 0xc6, 0x4c, 0xf2, 0x88, 0x97, 0x54, + 0x3f, 0x4f, 0x57, 0xb7, 0x37, 0xf0, 0x2c, 0x11, + 0x15, 0x56, 0xdb, 0x28, 0xb5, 0x16, 0x84, 0x66, + 0xce, 0x45, 0x3f, 0x61, 0x75, 0xb6, 0xbe, 0x00, + 0xd1, 0xe4, 0xf5, 0x27, 0x54, 0x7f, 0xc2, 0xf1, + 0xb3, 0x32, 0x9a, 0xe8, 0x07, 0x02, 0xf3, 0xdb, + 0xa9, 0xd1, 0xc2, 0xdf, 0xee, 0xad, 0xe5, 0x8a, + 0x3c, 0xfa, 0x67, 0xec, 0x6b, 0xa4, 0x08, 0xfe, + 0xba, 0x5a, 0x58, 0x0b, 0x78, 0x11, 0x91, 0x76, + 0xe3, 0x1a, 0x28, 0x54, 0x5e, 0xbd, 0x71, 0x1b, + 0x8b, 0xdc, 0x6c, 0xf4, 0x6f, 0xd7, 0xf4, 0xf3, + 0xe1, 0x03, 0xa4, 0x3c, 0x8d, 0x91, 0x2e, 0xba, + 0x5f, 0x7f, 0x8c, 0xaf, 0x69, 0x89, 0x29, 0x0a, + 0x5b, 0x25, 0x13, 0xc4, 0x2e, 0x16, 0xc2, 0x15, + 0x07, 0x5d, 0x58, 0x33, 0x7c, 0xe0, 0xf0, 0x55, + 0x5f, 0xbf, 0x5e, 0xf0, 0x71, 0x48, 0x8f, 0xf7, + 0x48, 0xb3, 0xf7, 0x0d, 0xa1, 0xd0, 0x63, 0xb1, + 0xad, 0xae, 0xb5, 0xb0, 0x5f, 0x71, 0xaf, 0x24, + 0x8b, 0xb9, 0x1c, 0x44, 0xd2, 0x1a, 0x53, 0xd1, + 0xd5, 0xb4, 0xa9, 0xff, 0x88, 0x73, 0xb5, 0xaa, + 0x15, 0x32, 0x5f, 0x59, 0x9d, 0x2e, 0xb5, 0xcb, + 0xde, 0x21, 0x2e, 0xe9, 0x35, 0xed, 0xfd, 0x0f, + 0xb6, 0xbb, 0xe6, 0x4b, 0x16, 0xf1, 0x45, 0x1e, + 0xb4, 0x84, 0xe9, 0x58, 0x1c, 0x0c, 0x95, 0xc0, + 0xcf, 0x49, 0x8b, 0x59, 0xa1, 0x78, 0xe6, 0x80, + 0x12, 0x49, 0x7a, 0xd4, 0x66, 0x62, 0xdf, 0x9c, + 0x18, 0xc8, 0x8c, 0xda, 0xc1, 0xa6, 0xbc, 0x65, + 0x28, 0xd2, 0xa4, 0xe8, 0xf1, 0x35, 0xdb, 0x5a, + 0x75, 0x1f, 0x73, 0x60, 0xec, 0xa8, 0xda, 0x5a, + 0x43, 0x15, 0x83, 0x9b, 0xe7, 0xb1, 0xa6, 0x81, + 0xbb, 0xef, 0xf3, 0x8f, 0x0f, 0xd3, 0x79, 0xa2, + 0xe5, 0xaa, 0x42, 0xef, 0xa0, 0x13, 0x4e, 0x91, + 0x2d, 0xcb, 0x61, 0x7a, 0x9a, 0x33, 0x14, 0x50, + 0x77, 0x4a, 0xd0, 0x91, 0x48, 0xe0, 0x0c, 0xe0, + 0x11, 0xcb, 0xdf, 0xb0, 0xce, 0x06, 0xd2, 0x79, + 0x4d, 0x69, 0xb9, 0xc9, 0x36, 0x74, 0x8f, 0x81, + 0x72, 0x73, 0xf3, 0x17, 0xb7, 0x13, 0xcb, 0x5b, + 0xd2, 0x5c, 0x33, 0x61, 0xb7, 0x61, 0x79, 0xb0, + 0xc0, 0x4d, 0xa1, 0xc7, 0x5d, 0x98, 0xc9, 0xe1, + 0x98, 0xbd, 0x78, 0x5a, 0x2c, 0x64, 0x53, 0xaf, + 0xaf, 0x66, 0x51, 0x47, 0xe4, 0x48, 0x66, 0x8b, + 0x07, 0x52, 0xa3, 0x03, 0x93, 0x28, 0xad, 0xcc, + 0xa3, 0x86, 0xad, 0x63, 0x04, 0x35, 0x6c, 0x49, + 0xd5, 0x28, 0x0e, 0x00, 0x47, 0xf4, 0xd4, 0x32, + 0x27, 0x19, 0xb3, 0x29, 0xe7, 0xbc, 0xbb, 0xce, + 0x3e, 0x3e, 0xd5, 0x67, 0x20, 0xe4, 0x0b, 0x75, + 0x95, 0x24, 0xe0, 0x6c, 0xb6, 0x29, 0x0c, 0x14, + 0xfd +}; +static const u8 key74[] __initconst = { + 0xf0, 0x41, 0x5b, 0x00, 0x56, 0xc4, 0xac, 0xf6, + 0xa2, 0x4c, 0x33, 0x41, 0x16, 0x09, 0x1b, 0x8e, + 0x4d, 0xe8, 0x8c, 0xd9, 0x48, 0xab, 0x3e, 0x60, + 0xcb, 0x49, 0x3e, 0xaf, 0x2b, 0x8b, 0xc8, 0xf0 +}; +enum { nonce74 = 0xcbdb0ffd0e923384ULL }; + +static const struct chacha20_testvec chacha20_testvecs[] __initconst = { + { input01, output01, key01, nonce01, sizeof(input01) }, + { input02, output02, key02, nonce02, sizeof(input02) }, + { input03, output03, key03, nonce03, sizeof(input03) }, + { input04, output04, key04, nonce04, sizeof(input04) }, + { input05, output05, key05, nonce05, sizeof(input05) }, + { input06, output06, key06, nonce06, sizeof(input06) }, + { input07, output07, key07, nonce07, sizeof(input07) }, + { input08, output08, key08, nonce08, sizeof(input08) }, + { input09, output09, key09, nonce09, sizeof(input09) }, + { input10, output10, key10, nonce10, sizeof(input10) }, + { input11, output11, key11, nonce11, sizeof(input11) }, + { input12, output12, key12, nonce12, sizeof(input12) }, + { input13, output13, key13, nonce13, sizeof(input13) }, + { input14, output14, key14, nonce14, sizeof(input14) }, + { input15, output15, key15, nonce15, sizeof(input15) }, + { input16, output16, key16, nonce16, sizeof(input16) }, + { input17, output17, key17, nonce17, sizeof(input17) }, + { input18, output18, key18, nonce18, sizeof(input18) }, + { input19, output19, key19, nonce19, sizeof(input19) }, + { input20, output20, key20, nonce20, sizeof(input20) }, + { input21, output21, key21, nonce21, sizeof(input21) }, + { input22, output22, key22, nonce22, sizeof(input22) }, + { input23, output23, key23, nonce23, sizeof(input23) }, + { input24, output24, key24, nonce24, sizeof(input24) }, + { input25, output25, key25, nonce25, sizeof(input25) }, + { input26, output26, key26, nonce26, sizeof(input26) }, + { input27, output27, key27, nonce27, sizeof(input27) }, + { input28, output28, key28, nonce28, sizeof(input28) }, + { input29, output29, key29, nonce29, sizeof(input29) }, + { input30, output30, key30, nonce30, sizeof(input30) }, + { input31, output31, key31, nonce31, sizeof(input31) }, + { input32, output32, key32, nonce32, sizeof(input32) }, + { input33, output33, key33, nonce33, sizeof(input33) }, + { input34, output34, key34, nonce34, sizeof(input34) }, + { input35, output35, key35, nonce35, sizeof(input35) }, + { input36, output36, key36, nonce36, sizeof(input36) }, + { input37, output37, key37, nonce37, sizeof(input37) }, + { input38, output38, key38, nonce38, sizeof(input38) }, + { input39, output39, key39, nonce39, sizeof(input39) }, + { input40, output40, key40, nonce40, sizeof(input40) }, + { input41, output41, key41, nonce41, sizeof(input41) }, + { input42, output42, key42, nonce42, sizeof(input42) }, + { input43, output43, key43, nonce43, sizeof(input43) }, + { input44, output44, key44, nonce44, sizeof(input44) }, + { input45, output45, key45, nonce45, sizeof(input45) }, + { input46, output46, key46, nonce46, sizeof(input46) }, + { input47, output47, key47, nonce47, sizeof(input47) }, + { input48, output48, key48, nonce48, sizeof(input48) }, + { input49, output49, key49, nonce49, sizeof(input49) }, + { input50, output50, key50, nonce50, sizeof(input50) }, + { input51, output51, key51, nonce51, sizeof(input51) }, + { input52, output52, key52, nonce52, sizeof(input52) }, + { input53, output53, key53, nonce53, sizeof(input53) }, + { input54, output54, key54, nonce54, sizeof(input54) }, + { input55, output55, key55, nonce55, sizeof(input55) }, + { input56, output56, key56, nonce56, sizeof(input56) }, + { input57, output57, key57, nonce57, sizeof(input57) }, + { input58, output58, key58, nonce58, sizeof(input58) }, + { input59, output59, key59, nonce59, sizeof(input59) }, + { input60, output60, key60, nonce60, sizeof(input60) }, + { input61, output61, key61, nonce61, sizeof(input61) }, + { input62, output62, key62, nonce62, sizeof(input62) }, + { input63, output63, key63, nonce63, sizeof(input63) }, + { input64, output64, key64, nonce64, sizeof(input64) }, + { input65, output65, key65, nonce65, sizeof(input65) }, + { input66, output66, key66, nonce66, sizeof(input66) }, + { input67, output67, key67, nonce67, sizeof(input67) }, + { input68, output68, key68, nonce68, sizeof(input68) }, + { input69, output69, key69, nonce69, sizeof(input69) }, + { input70, output70, key70, nonce70, sizeof(input70) }, + { input71, output71, key71, nonce71, sizeof(input71) }, + { input72, output72, key72, nonce72, sizeof(input72) }, + { input73, output73, key73, nonce73, sizeof(input73) }, + { input74, output74, key74, nonce74, sizeof(input74) } +}; + +static const struct hchacha20_testvec hchacha20_testvecs[] __initconst = {{ + .key = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f }, + .nonce = { 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x4a, + 0x00, 0x00, 0x00, 0x00, 0x31, 0x41, 0x59, 0x27 }, + .output = { 0x82, 0x41, 0x3b, 0x42, 0x27, 0xb2, 0x7b, 0xfe, + 0xd3, 0x0e, 0x42, 0x50, 0x8a, 0x87, 0x7d, 0x73, + 0xa0, 0xf9, 0xe4, 0xd5, 0x8a, 0x74, 0xa8, 0x53, + 0xc1, 0x2e, 0xc4, 0x13, 0x26, 0xd3, 0xec, 0xdc } +}}; + +static bool __init chacha20_selftest(void) +{ + enum { + MAXIMUM_TEST_BUFFER_LEN = 1UL << 10, + OUTRAGEOUSLY_HUGE_BUFFER_LEN = PAGE_SIZE * 35 + 17 /* 143k */ + }; + size_t i, j, k; + u32 derived_key[CHACHA20_KEY_WORDS]; + u8 *offset_input = NULL, *computed_output = NULL, *massive_input = NULL; + u8 offset_key[CHACHA20_KEY_SIZE + 1] + __aligned(__alignof__(unsigned long)); + struct chacha20_ctx state; + bool success = true; + simd_context_t simd_context; + + offset_input = kmalloc(MAXIMUM_TEST_BUFFER_LEN + 1, GFP_KERNEL); + computed_output = kmalloc(MAXIMUM_TEST_BUFFER_LEN + 1, GFP_KERNEL); + massive_input = vzalloc(OUTRAGEOUSLY_HUGE_BUFFER_LEN); + if (!computed_output || !offset_input || !massive_input) { + pr_err("chacha20 self-test malloc: FAIL\n"); + success = false; + goto out; + } + + simd_get(&simd_context); + for (i = 0; i < ARRAY_SIZE(chacha20_testvecs); ++i) { + /* Boring case */ + memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN + 1); + memset(&state, 0, sizeof(state)); + chacha20_init(&state, chacha20_testvecs[i].key, + chacha20_testvecs[i].nonce); + chacha20(&state, computed_output, chacha20_testvecs[i].input, + chacha20_testvecs[i].ilen, &simd_context); + if (memcmp(computed_output, chacha20_testvecs[i].output, + chacha20_testvecs[i].ilen)) { + pr_err("chacha20 self-test %zu: FAIL\n", i + 1); + success = false; + } + for (k = chacha20_testvecs[i].ilen; + k < MAXIMUM_TEST_BUFFER_LEN + 1; ++k) { + if (computed_output[k]) { + pr_err("chacha20 self-test %zu (zero check): FAIL\n", + i + 1); + success = false; + break; + } + } + + /* Unaligned case */ + memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN + 1); + memset(&state, 0, sizeof(state)); + memcpy(offset_input + 1, chacha20_testvecs[i].input, + chacha20_testvecs[i].ilen); + memcpy(offset_key + 1, chacha20_testvecs[i].key, + CHACHA20_KEY_SIZE); + chacha20_init(&state, offset_key + 1, chacha20_testvecs[i].nonce); + chacha20(&state, computed_output + 1, offset_input + 1, + chacha20_testvecs[i].ilen, &simd_context); + if (memcmp(computed_output + 1, chacha20_testvecs[i].output, + chacha20_testvecs[i].ilen)) { + pr_err("chacha20 self-test %zu (unaligned): FAIL\n", + i + 1); + success = false; + } + if (computed_output[0]) { + pr_err("chacha20 self-test %zu (unaligned, zero check): FAIL\n", + i + 1); + success = false; + } + for (k = chacha20_testvecs[i].ilen + 1; + k < MAXIMUM_TEST_BUFFER_LEN + 1; ++k) { + if (computed_output[k]) { + pr_err("chacha20 self-test %zu (unaligned, zero check): FAIL\n", + i + 1); + success = false; + break; + } + } + + /* Chunked case */ + if (chacha20_testvecs[i].ilen <= CHACHA20_BLOCK_SIZE) + goto next_test; + memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN + 1); + memset(&state, 0, sizeof(state)); + chacha20_init(&state, chacha20_testvecs[i].key, + chacha20_testvecs[i].nonce); + chacha20(&state, computed_output, chacha20_testvecs[i].input, + CHACHA20_BLOCK_SIZE, &simd_context); + chacha20(&state, computed_output + CHACHA20_BLOCK_SIZE, + chacha20_testvecs[i].input + CHACHA20_BLOCK_SIZE, + chacha20_testvecs[i].ilen - CHACHA20_BLOCK_SIZE, + &simd_context); + if (memcmp(computed_output, chacha20_testvecs[i].output, + chacha20_testvecs[i].ilen)) { + pr_err("chacha20 self-test %zu (chunked): FAIL\n", + i + 1); + success = false; + } + for (k = chacha20_testvecs[i].ilen; + k < MAXIMUM_TEST_BUFFER_LEN + 1; ++k) { + if (computed_output[k]) { + pr_err("chacha20 self-test %zu (chunked, zero check): FAIL\n", + i + 1); + success = false; + break; + } + } + +next_test: + /* Sliding unaligned case */ + if (chacha20_testvecs[i].ilen > CHACHA20_BLOCK_SIZE + 1 || + !chacha20_testvecs[i].ilen) + continue; + for (j = 1; j < CHACHA20_BLOCK_SIZE; ++j) { + memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN + 1); + memset(&state, 0, sizeof(state)); + memcpy(offset_input + j, chacha20_testvecs[i].input, + chacha20_testvecs[i].ilen); + chacha20_init(&state, chacha20_testvecs[i].key, + chacha20_testvecs[i].nonce); + chacha20(&state, computed_output + j, offset_input + j, + chacha20_testvecs[i].ilen, &simd_context); + if (memcmp(computed_output + j, + chacha20_testvecs[i].output, + chacha20_testvecs[i].ilen)) { + pr_err("chacha20 self-test %zu (unaligned, slide %zu): FAIL\n", + i + 1, j); + success = false; + } + for (k = j; k < j; ++k) { + if (computed_output[k]) { + pr_err("chacha20 self-test %zu (unaligned, slide %zu, zero check): FAIL\n", + i + 1, j); + success = false; + break; + } + } + for (k = chacha20_testvecs[i].ilen + j; + k < MAXIMUM_TEST_BUFFER_LEN + 1; ++k) { + if (computed_output[k]) { + pr_err("chacha20 self-test %zu (unaligned, slide %zu, zero check): FAIL\n", + i + 1, j); + success = false; + break; + } + } + } + } + for (i = 0; i < ARRAY_SIZE(hchacha20_testvecs); ++i) { + memset(&derived_key, 0, sizeof(derived_key)); + hchacha20(derived_key, hchacha20_testvecs[i].nonce, + hchacha20_testvecs[i].key, &simd_context); + cpu_to_le32_array(derived_key, ARRAY_SIZE(derived_key)); + if (memcmp(derived_key, hchacha20_testvecs[i].output, + CHACHA20_KEY_SIZE)) { + pr_err("hchacha20 self-test %zu: FAIL\n", i + 1); + success = false; + } + } + memset(&state, 0, sizeof(state)); + chacha20_init(&state, chacha20_testvecs[0].key, + chacha20_testvecs[0].nonce); + chacha20(&state, massive_input, massive_input, + OUTRAGEOUSLY_HUGE_BUFFER_LEN, &simd_context); + chacha20_init(&state, chacha20_testvecs[0].key, + chacha20_testvecs[0].nonce); + chacha20(&state, massive_input, massive_input, + OUTRAGEOUSLY_HUGE_BUFFER_LEN, DONT_USE_SIMD); + for (k = 0; k < OUTRAGEOUSLY_HUGE_BUFFER_LEN; ++k) { + if (massive_input[k]) { + pr_err("chacha20 self-test massive: FAIL\n"); + success = false; + break; + } + } + + simd_put(&simd_context); + +out: + kfree(offset_input); + kfree(computed_output); + vfree(massive_input); + return success; +} diff --git a/net/wireguard/crypto/zinc/selftest/chacha20poly1305.c b/net/wireguard/crypto/zinc/selftest/chacha20poly1305.c new file mode 100644 index 000000000000..c58ac6e69d54 --- /dev/null +++ b/net/wireguard/crypto/zinc/selftest/chacha20poly1305.c @@ -0,0 +1,9076 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +struct chacha20poly1305_testvec { + const u8 *input, *output, *assoc, *nonce, *key; + size_t ilen, alen, nlen; + bool failure; +}; + +/* The first of these are the ChaCha20-Poly1305 AEAD test vectors from RFC7539 + * 2.8.2. After they are generated by reference implementations. And the final + * marked ones are taken from wycheproof, but we only do these for the encrypt + * side, because mostly we're stressing the primitives rather than the actual + * chapoly construction. This also requires adding a 96-bit nonce construction, + * just for the purpose of the tests. + */ + +static const u8 enc_input001[] __initconst = { + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, + 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20, + 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66, + 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20, + 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20, + 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d, + 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e, + 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65, + 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, + 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, + 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f, + 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65, + 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61, + 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e, + 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69, + 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72, + 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20, + 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61, + 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20, + 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65, + 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20, + 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20, + 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b, + 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67, + 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80, + 0x9d +}; +static const u8 enc_output001[] __initconst = { + 0x64, 0xa0, 0x86, 0x15, 0x75, 0x86, 0x1a, 0xf4, + 0x60, 0xf0, 0x62, 0xc7, 0x9b, 0xe6, 0x43, 0xbd, + 0x5e, 0x80, 0x5c, 0xfd, 0x34, 0x5c, 0xf3, 0x89, + 0xf1, 0x08, 0x67, 0x0a, 0xc7, 0x6c, 0x8c, 0xb2, + 0x4c, 0x6c, 0xfc, 0x18, 0x75, 0x5d, 0x43, 0xee, + 0xa0, 0x9e, 0xe9, 0x4e, 0x38, 0x2d, 0x26, 0xb0, + 0xbd, 0xb7, 0xb7, 0x3c, 0x32, 0x1b, 0x01, 0x00, + 0xd4, 0xf0, 0x3b, 0x7f, 0x35, 0x58, 0x94, 0xcf, + 0x33, 0x2f, 0x83, 0x0e, 0x71, 0x0b, 0x97, 0xce, + 0x98, 0xc8, 0xa8, 0x4a, 0xbd, 0x0b, 0x94, 0x81, + 0x14, 0xad, 0x17, 0x6e, 0x00, 0x8d, 0x33, 0xbd, + 0x60, 0xf9, 0x82, 0xb1, 0xff, 0x37, 0xc8, 0x55, + 0x97, 0x97, 0xa0, 0x6e, 0xf4, 0xf0, 0xef, 0x61, + 0xc1, 0x86, 0x32, 0x4e, 0x2b, 0x35, 0x06, 0x38, + 0x36, 0x06, 0x90, 0x7b, 0x6a, 0x7c, 0x02, 0xb0, + 0xf9, 0xf6, 0x15, 0x7b, 0x53, 0xc8, 0x67, 0xe4, + 0xb9, 0x16, 0x6c, 0x76, 0x7b, 0x80, 0x4d, 0x46, + 0xa5, 0x9b, 0x52, 0x16, 0xcd, 0xe7, 0xa4, 0xe9, + 0x90, 0x40, 0xc5, 0xa4, 0x04, 0x33, 0x22, 0x5e, + 0xe2, 0x82, 0xa1, 0xb0, 0xa0, 0x6c, 0x52, 0x3e, + 0xaf, 0x45, 0x34, 0xd7, 0xf8, 0x3f, 0xa1, 0x15, + 0x5b, 0x00, 0x47, 0x71, 0x8c, 0xbc, 0x54, 0x6a, + 0x0d, 0x07, 0x2b, 0x04, 0xb3, 0x56, 0x4e, 0xea, + 0x1b, 0x42, 0x22, 0x73, 0xf5, 0x48, 0x27, 0x1a, + 0x0b, 0xb2, 0x31, 0x60, 0x53, 0xfa, 0x76, 0x99, + 0x19, 0x55, 0xeb, 0xd6, 0x31, 0x59, 0x43, 0x4e, + 0xce, 0xbb, 0x4e, 0x46, 0x6d, 0xae, 0x5a, 0x10, + 0x73, 0xa6, 0x72, 0x76, 0x27, 0x09, 0x7a, 0x10, + 0x49, 0xe6, 0x17, 0xd9, 0x1d, 0x36, 0x10, 0x94, + 0xfa, 0x68, 0xf0, 0xff, 0x77, 0x98, 0x71, 0x30, + 0x30, 0x5b, 0xea, 0xba, 0x2e, 0xda, 0x04, 0xdf, + 0x99, 0x7b, 0x71, 0x4d, 0x6c, 0x6f, 0x2c, 0x29, + 0xa6, 0xad, 0x5c, 0xb4, 0x02, 0x2b, 0x02, 0x70, + 0x9b, 0xee, 0xad, 0x9d, 0x67, 0x89, 0x0c, 0xbb, + 0x22, 0x39, 0x23, 0x36, 0xfe, 0xa1, 0x85, 0x1f, + 0x38 +}; +static const u8 enc_assoc001[] __initconst = { + 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x4e, 0x91 +}; +static const u8 enc_nonce001[] __initconst = { + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08 +}; +static const u8 enc_key001[] __initconst = { + 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a, + 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0, + 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09, + 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0 +}; + +static const u8 enc_input002[] __initconst = { }; +static const u8 enc_output002[] __initconst = { + 0xea, 0xe0, 0x1e, 0x9e, 0x2c, 0x91, 0xaa, 0xe1, + 0xdb, 0x5d, 0x99, 0x3f, 0x8a, 0xf7, 0x69, 0x92 +}; +static const u8 enc_assoc002[] __initconst = { }; +static const u8 enc_nonce002[] __initconst = { + 0xca, 0xbf, 0x33, 0x71, 0x32, 0x45, 0x77, 0x8e +}; +static const u8 enc_key002[] __initconst = { + 0x4c, 0xf5, 0x96, 0x83, 0x38, 0xe6, 0xae, 0x7f, + 0x2d, 0x29, 0x25, 0x76, 0xd5, 0x75, 0x27, 0x86, + 0x91, 0x9a, 0x27, 0x7a, 0xfb, 0x46, 0xc5, 0xef, + 0x94, 0x81, 0x79, 0x57, 0x14, 0x59, 0x40, 0x68 +}; + +static const u8 enc_input003[] __initconst = { }; +static const u8 enc_output003[] __initconst = { + 0xdd, 0x6b, 0x3b, 0x82, 0xce, 0x5a, 0xbd, 0xd6, + 0xa9, 0x35, 0x83, 0xd8, 0x8c, 0x3d, 0x85, 0x77 +}; +static const u8 enc_assoc003[] __initconst = { + 0x33, 0x10, 0x41, 0x12, 0x1f, 0xf3, 0xd2, 0x6b +}; +static const u8 enc_nonce003[] __initconst = { + 0x3d, 0x86, 0xb5, 0x6b, 0xc8, 0xa3, 0x1f, 0x1d +}; +static const u8 enc_key003[] __initconst = { + 0x2d, 0xb0, 0x5d, 0x40, 0xc8, 0xed, 0x44, 0x88, + 0x34, 0xd1, 0x13, 0xaf, 0x57, 0xa1, 0xeb, 0x3a, + 0x2a, 0x80, 0x51, 0x36, 0xec, 0x5b, 0xbc, 0x08, + 0x93, 0x84, 0x21, 0xb5, 0x13, 0x88, 0x3c, 0x0d +}; + +static const u8 enc_input004[] __initconst = { + 0xa4 +}; +static const u8 enc_output004[] __initconst = { + 0xb7, 0x1b, 0xb0, 0x73, 0x59, 0xb0, 0x84, 0xb2, + 0x6d, 0x8e, 0xab, 0x94, 0x31, 0xa1, 0xae, 0xac, + 0x89 +}; +static const u8 enc_assoc004[] __initconst = { + 0x6a, 0xe2, 0xad, 0x3f, 0x88, 0x39, 0x5a, 0x40 +}; +static const u8 enc_nonce004[] __initconst = { + 0xd2, 0x32, 0x1f, 0x29, 0x28, 0xc6, 0xc4, 0xc4 +}; +static const u8 enc_key004[] __initconst = { + 0x4b, 0x28, 0x4b, 0xa3, 0x7b, 0xbe, 0xe9, 0xf8, + 0x31, 0x80, 0x82, 0xd7, 0xd8, 0xe8, 0xb5, 0xa1, + 0xe2, 0x18, 0x18, 0x8a, 0x9c, 0xfa, 0xa3, 0x3d, + 0x25, 0x71, 0x3e, 0x40, 0xbc, 0x54, 0x7a, 0x3e +}; + +static const u8 enc_input005[] __initconst = { + 0x2d +}; +static const u8 enc_output005[] __initconst = { + 0xbf, 0xe1, 0x5b, 0x0b, 0xdb, 0x6b, 0xf5, 0x5e, + 0x6c, 0x5d, 0x84, 0x44, 0x39, 0x81, 0xc1, 0x9c, + 0xac +}; +static const u8 enc_assoc005[] __initconst = { }; +static const u8 enc_nonce005[] __initconst = { + 0x20, 0x1c, 0xaa, 0x5f, 0x9c, 0xbf, 0x92, 0x30 +}; +static const u8 enc_key005[] __initconst = { + 0x66, 0xca, 0x9c, 0x23, 0x2a, 0x4b, 0x4b, 0x31, + 0x0e, 0x92, 0x89, 0x8b, 0xf4, 0x93, 0xc7, 0x87, + 0x98, 0xa3, 0xd8, 0x39, 0xf8, 0xf4, 0xa7, 0x01, + 0xc0, 0x2e, 0x0a, 0xa6, 0x7e, 0x5a, 0x78, 0x87 +}; + +static const u8 enc_input006[] __initconst = { + 0x33, 0x2f, 0x94, 0xc1, 0xa4, 0xef, 0xcc, 0x2a, + 0x5b, 0xa6, 0xe5, 0x8f, 0x1d, 0x40, 0xf0, 0x92, + 0x3c, 0xd9, 0x24, 0x11, 0xa9, 0x71, 0xf9, 0x37, + 0x14, 0x99, 0xfa, 0xbe, 0xe6, 0x80, 0xde, 0x50, + 0xc9, 0x96, 0xd4, 0xb0, 0xec, 0x9e, 0x17, 0xec, + 0xd2, 0x5e, 0x72, 0x99, 0xfc, 0x0a, 0xe1, 0xcb, + 0x48, 0xd2, 0x85, 0xdd, 0x2f, 0x90, 0xe0, 0x66, + 0x3b, 0xe6, 0x20, 0x74, 0xbe, 0x23, 0x8f, 0xcb, + 0xb4, 0xe4, 0xda, 0x48, 0x40, 0xa6, 0xd1, 0x1b, + 0xc7, 0x42, 0xce, 0x2f, 0x0c, 0xa6, 0x85, 0x6e, + 0x87, 0x37, 0x03, 0xb1, 0x7c, 0x25, 0x96, 0xa3, + 0x05, 0xd8, 0xb0, 0xf4, 0xed, 0xea, 0xc2, 0xf0, + 0x31, 0x98, 0x6c, 0xd1, 0x14, 0x25, 0xc0, 0xcb, + 0x01, 0x74, 0xd0, 0x82, 0xf4, 0x36, 0xf5, 0x41, + 0xd5, 0xdc, 0xca, 0xc5, 0xbb, 0x98, 0xfe, 0xfc, + 0x69, 0x21, 0x70, 0xd8, 0xa4, 0x4b, 0xc8, 0xde, + 0x8f +}; +static const u8 enc_output006[] __initconst = { + 0x8b, 0x06, 0xd3, 0x31, 0xb0, 0x93, 0x45, 0xb1, + 0x75, 0x6e, 0x26, 0xf9, 0x67, 0xbc, 0x90, 0x15, + 0x81, 0x2c, 0xb5, 0xf0, 0xc6, 0x2b, 0xc7, 0x8c, + 0x56, 0xd1, 0xbf, 0x69, 0x6c, 0x07, 0xa0, 0xda, + 0x65, 0x27, 0xc9, 0x90, 0x3d, 0xef, 0x4b, 0x11, + 0x0f, 0x19, 0x07, 0xfd, 0x29, 0x92, 0xd9, 0xc8, + 0xf7, 0x99, 0x2e, 0x4a, 0xd0, 0xb8, 0x2c, 0xdc, + 0x93, 0xf5, 0x9e, 0x33, 0x78, 0xd1, 0x37, 0xc3, + 0x66, 0xd7, 0x5e, 0xbc, 0x44, 0xbf, 0x53, 0xa5, + 0xbc, 0xc4, 0xcb, 0x7b, 0x3a, 0x8e, 0x7f, 0x02, + 0xbd, 0xbb, 0xe7, 0xca, 0xa6, 0x6c, 0x6b, 0x93, + 0x21, 0x93, 0x10, 0x61, 0xe7, 0x69, 0xd0, 0x78, + 0xf3, 0x07, 0x5a, 0x1a, 0x8f, 0x73, 0xaa, 0xb1, + 0x4e, 0xd3, 0xda, 0x4f, 0xf3, 0x32, 0xe1, 0x66, + 0x3e, 0x6c, 0xc6, 0x13, 0xba, 0x06, 0x5b, 0xfc, + 0x6a, 0xe5, 0x6f, 0x60, 0xfb, 0x07, 0x40, 0xb0, + 0x8c, 0x9d, 0x84, 0x43, 0x6b, 0xc1, 0xf7, 0x8d, + 0x8d, 0x31, 0xf7, 0x7a, 0x39, 0x4d, 0x8f, 0x9a, + 0xeb +}; +static const u8 enc_assoc006[] __initconst = { + 0x70, 0xd3, 0x33, 0xf3, 0x8b, 0x18, 0x0b +}; +static const u8 enc_nonce006[] __initconst = { + 0xdf, 0x51, 0x84, 0x82, 0x42, 0x0c, 0x75, 0x9c +}; +static const u8 enc_key006[] __initconst = { + 0x68, 0x7b, 0x8d, 0x8e, 0xe3, 0xc4, 0xdd, 0xae, + 0xdf, 0x72, 0x7f, 0x53, 0x72, 0x25, 0x1e, 0x78, + 0x91, 0xcb, 0x69, 0x76, 0x1f, 0x49, 0x93, 0xf9, + 0x6f, 0x21, 0xcc, 0x39, 0x9c, 0xad, 0xb1, 0x01 +}; + +static const u8 enc_input007[] __initconst = { + 0x9b, 0x18, 0xdb, 0xdd, 0x9a, 0x0f, 0x3e, 0xa5, + 0x15, 0x17, 0xde, 0xdf, 0x08, 0x9d, 0x65, 0x0a, + 0x67, 0x30, 0x12, 0xe2, 0x34, 0x77, 0x4b, 0xc1, + 0xd9, 0xc6, 0x1f, 0xab, 0xc6, 0x18, 0x50, 0x17, + 0xa7, 0x9d, 0x3c, 0xa6, 0xc5, 0x35, 0x8c, 0x1c, + 0xc0, 0xa1, 0x7c, 0x9f, 0x03, 0x89, 0xca, 0xe1, + 0xe6, 0xe9, 0xd4, 0xd3, 0x88, 0xdb, 0xb4, 0x51, + 0x9d, 0xec, 0xb4, 0xfc, 0x52, 0xee, 0x6d, 0xf1, + 0x75, 0x42, 0xc6, 0xfd, 0xbd, 0x7a, 0x8e, 0x86, + 0xfc, 0x44, 0xb3, 0x4f, 0xf3, 0xea, 0x67, 0x5a, + 0x41, 0x13, 0xba, 0xb0, 0xdc, 0xe1, 0xd3, 0x2a, + 0x7c, 0x22, 0xb3, 0xca, 0xac, 0x6a, 0x37, 0x98, + 0x3e, 0x1d, 0x40, 0x97, 0xf7, 0x9b, 0x1d, 0x36, + 0x6b, 0xb3, 0x28, 0xbd, 0x60, 0x82, 0x47, 0x34, + 0xaa, 0x2f, 0x7d, 0xe9, 0xa8, 0x70, 0x81, 0x57, + 0xd4, 0xb9, 0x77, 0x0a, 0x9d, 0x29, 0xa7, 0x84, + 0x52, 0x4f, 0xc2, 0x4a, 0x40, 0x3b, 0x3c, 0xd4, + 0xc9, 0x2a, 0xdb, 0x4a, 0x53, 0xc4, 0xbe, 0x80, + 0xe9, 0x51, 0x7f, 0x8f, 0xc7, 0xa2, 0xce, 0x82, + 0x5c, 0x91, 0x1e, 0x74, 0xd9, 0xd0, 0xbd, 0xd5, + 0xf3, 0xfd, 0xda, 0x4d, 0x25, 0xb4, 0xbb, 0x2d, + 0xac, 0x2f, 0x3d, 0x71, 0x85, 0x7b, 0xcf, 0x3c, + 0x7b, 0x3e, 0x0e, 0x22, 0x78, 0x0c, 0x29, 0xbf, + 0xe4, 0xf4, 0x57, 0xb3, 0xcb, 0x49, 0xa0, 0xfc, + 0x1e, 0x05, 0x4e, 0x16, 0xbc, 0xd5, 0xa8, 0xa3, + 0xee, 0x05, 0x35, 0xc6, 0x7c, 0xab, 0x60, 0x14, + 0x55, 0x1a, 0x8e, 0xc5, 0x88, 0x5d, 0xd5, 0x81, + 0xc2, 0x81, 0xa5, 0xc4, 0x60, 0xdb, 0xaf, 0x77, + 0x91, 0xe1, 0xce, 0xa2, 0x7e, 0x7f, 0x42, 0xe3, + 0xb0, 0x13, 0x1c, 0x1f, 0x25, 0x60, 0x21, 0xe2, + 0x40, 0x5f, 0x99, 0xb7, 0x73, 0xec, 0x9b, 0x2b, + 0xf0, 0x65, 0x11, 0xc8, 0xd0, 0x0a, 0x9f, 0xd3 +}; +static const u8 enc_output007[] __initconst = { + 0x85, 0x04, 0xc2, 0xed, 0x8d, 0xfd, 0x97, 0x5c, + 0xd2, 0xb7, 0xe2, 0xc1, 0x6b, 0xa3, 0xba, 0xf8, + 0xc9, 0x50, 0xc3, 0xc6, 0xa5, 0xe3, 0xa4, 0x7c, + 0xc3, 0x23, 0x49, 0x5e, 0xa9, 0xb9, 0x32, 0xeb, + 0x8a, 0x7c, 0xca, 0xe5, 0xec, 0xfb, 0x7c, 0xc0, + 0xcb, 0x7d, 0xdc, 0x2c, 0x9d, 0x92, 0x55, 0x21, + 0x0a, 0xc8, 0x43, 0x63, 0x59, 0x0a, 0x31, 0x70, + 0x82, 0x67, 0x41, 0x03, 0xf8, 0xdf, 0xf2, 0xac, + 0xa7, 0x02, 0xd4, 0xd5, 0x8a, 0x2d, 0xc8, 0x99, + 0x19, 0x66, 0xd0, 0xf6, 0x88, 0x2c, 0x77, 0xd9, + 0xd4, 0x0d, 0x6c, 0xbd, 0x98, 0xde, 0xe7, 0x7f, + 0xad, 0x7e, 0x8a, 0xfb, 0xe9, 0x4b, 0xe5, 0xf7, + 0xe5, 0x50, 0xa0, 0x90, 0x3f, 0xd6, 0x22, 0x53, + 0xe3, 0xfe, 0x1b, 0xcc, 0x79, 0x3b, 0xec, 0x12, + 0x47, 0x52, 0xa7, 0xd6, 0x04, 0xe3, 0x52, 0xe6, + 0x93, 0x90, 0x91, 0x32, 0x73, 0x79, 0xb8, 0xd0, + 0x31, 0xde, 0x1f, 0x9f, 0x2f, 0x05, 0x38, 0x54, + 0x2f, 0x35, 0x04, 0x39, 0xe0, 0xa7, 0xba, 0xc6, + 0x52, 0xf6, 0x37, 0x65, 0x4c, 0x07, 0xa9, 0x7e, + 0xb3, 0x21, 0x6f, 0x74, 0x8c, 0xc9, 0xde, 0xdb, + 0x65, 0x1b, 0x9b, 0xaa, 0x60, 0xb1, 0x03, 0x30, + 0x6b, 0xb2, 0x03, 0xc4, 0x1c, 0x04, 0xf8, 0x0f, + 0x64, 0xaf, 0x46, 0xe4, 0x65, 0x99, 0x49, 0xe2, + 0xea, 0xce, 0x78, 0x00, 0xd8, 0x8b, 0xd5, 0x2e, + 0xcf, 0xfc, 0x40, 0x49, 0xe8, 0x58, 0xdc, 0x34, + 0x9c, 0x8c, 0x61, 0xbf, 0x0a, 0x8e, 0xec, 0x39, + 0xa9, 0x30, 0x05, 0x5a, 0xd2, 0x56, 0x01, 0xc7, + 0xda, 0x8f, 0x4e, 0xbb, 0x43, 0xa3, 0x3a, 0xf9, + 0x15, 0x2a, 0xd0, 0xa0, 0x7a, 0x87, 0x34, 0x82, + 0xfe, 0x8a, 0xd1, 0x2d, 0x5e, 0xc7, 0xbf, 0x04, + 0x53, 0x5f, 0x3b, 0x36, 0xd4, 0x25, 0x5c, 0x34, + 0x7a, 0x8d, 0xd5, 0x05, 0xce, 0x72, 0xca, 0xef, + 0x7a, 0x4b, 0xbc, 0xb0, 0x10, 0x5c, 0x96, 0x42, + 0x3a, 0x00, 0x98, 0xcd, 0x15, 0xe8, 0xb7, 0x53 +}; +static const u8 enc_assoc007[] __initconst = { }; +static const u8 enc_nonce007[] __initconst = { + 0xde, 0x7b, 0xef, 0xc3, 0x65, 0x1b, 0x68, 0xb0 +}; +static const u8 enc_key007[] __initconst = { + 0x8d, 0xb8, 0x91, 0x48, 0xf0, 0xe7, 0x0a, 0xbd, + 0xf9, 0x3f, 0xcd, 0xd9, 0xa0, 0x1e, 0x42, 0x4c, + 0xe7, 0xde, 0x25, 0x3d, 0xa3, 0xd7, 0x05, 0x80, + 0x8d, 0xf2, 0x82, 0xac, 0x44, 0x16, 0x51, 0x01 +}; + +static const u8 enc_input008[] __initconst = { + 0xc3, 0x09, 0x94, 0x62, 0xe6, 0x46, 0x2e, 0x10, + 0xbe, 0x00, 0xe4, 0xfc, 0xf3, 0x40, 0xa3, 0xe2, + 0x0f, 0xc2, 0x8b, 0x28, 0xdc, 0xba, 0xb4, 0x3c, + 0xe4, 0x21, 0x58, 0x61, 0xcd, 0x8b, 0xcd, 0xfb, + 0xac, 0x94, 0xa1, 0x45, 0xf5, 0x1c, 0xe1, 0x12, + 0xe0, 0x3b, 0x67, 0x21, 0x54, 0x5e, 0x8c, 0xaa, + 0xcf, 0xdb, 0xb4, 0x51, 0xd4, 0x13, 0xda, 0xe6, + 0x83, 0x89, 0xb6, 0x92, 0xe9, 0x21, 0x76, 0xa4, + 0x93, 0x7d, 0x0e, 0xfd, 0x96, 0x36, 0x03, 0x91, + 0x43, 0x5c, 0x92, 0x49, 0x62, 0x61, 0x7b, 0xeb, + 0x43, 0x89, 0xb8, 0x12, 0x20, 0x43, 0xd4, 0x47, + 0x06, 0x84, 0xee, 0x47, 0xe9, 0x8a, 0x73, 0x15, + 0x0f, 0x72, 0xcf, 0xed, 0xce, 0x96, 0xb2, 0x7f, + 0x21, 0x45, 0x76, 0xeb, 0x26, 0x28, 0x83, 0x6a, + 0xad, 0xaa, 0xa6, 0x81, 0xd8, 0x55, 0xb1, 0xa3, + 0x85, 0xb3, 0x0c, 0xdf, 0xf1, 0x69, 0x2d, 0x97, + 0x05, 0x2a, 0xbc, 0x7c, 0x7b, 0x25, 0xf8, 0x80, + 0x9d, 0x39, 0x25, 0xf3, 0x62, 0xf0, 0x66, 0x5e, + 0xf4, 0xa0, 0xcf, 0xd8, 0xfd, 0x4f, 0xb1, 0x1f, + 0x60, 0x3a, 0x08, 0x47, 0xaf, 0xe1, 0xf6, 0x10, + 0x77, 0x09, 0xa7, 0x27, 0x8f, 0x9a, 0x97, 0x5a, + 0x26, 0xfa, 0xfe, 0x41, 0x32, 0x83, 0x10, 0xe0, + 0x1d, 0xbf, 0x64, 0x0d, 0xf4, 0x1c, 0x32, 0x35, + 0xe5, 0x1b, 0x36, 0xef, 0xd4, 0x4a, 0x93, 0x4d, + 0x00, 0x7c, 0xec, 0x02, 0x07, 0x8b, 0x5d, 0x7d, + 0x1b, 0x0e, 0xd1, 0xa6, 0xa5, 0x5d, 0x7d, 0x57, + 0x88, 0xa8, 0xcc, 0x81, 0xb4, 0x86, 0x4e, 0xb4, + 0x40, 0xe9, 0x1d, 0xc3, 0xb1, 0x24, 0x3e, 0x7f, + 0xcc, 0x8a, 0x24, 0x9b, 0xdf, 0x6d, 0xf0, 0x39, + 0x69, 0x3e, 0x4c, 0xc0, 0x96, 0xe4, 0x13, 0xda, + 0x90, 0xda, 0xf4, 0x95, 0x66, 0x8b, 0x17, 0x17, + 0xfe, 0x39, 0x43, 0x25, 0xaa, 0xda, 0xa0, 0x43, + 0x3c, 0xb1, 0x41, 0x02, 0xa3, 0xf0, 0xa7, 0x19, + 0x59, 0xbc, 0x1d, 0x7d, 0x6c, 0x6d, 0x91, 0x09, + 0x5c, 0xb7, 0x5b, 0x01, 0xd1, 0x6f, 0x17, 0x21, + 0x97, 0xbf, 0x89, 0x71, 0xa5, 0xb0, 0x6e, 0x07, + 0x45, 0xfd, 0x9d, 0xea, 0x07, 0xf6, 0x7a, 0x9f, + 0x10, 0x18, 0x22, 0x30, 0x73, 0xac, 0xd4, 0x6b, + 0x72, 0x44, 0xed, 0xd9, 0x19, 0x9b, 0x2d, 0x4a, + 0x41, 0xdd, 0xd1, 0x85, 0x5e, 0x37, 0x19, 0xed, + 0xd2, 0x15, 0x8f, 0x5e, 0x91, 0xdb, 0x33, 0xf2, + 0xe4, 0xdb, 0xff, 0x98, 0xfb, 0xa3, 0xb5, 0xca, + 0x21, 0x69, 0x08, 0xe7, 0x8a, 0xdf, 0x90, 0xff, + 0x3e, 0xe9, 0x20, 0x86, 0x3c, 0xe9, 0xfc, 0x0b, + 0xfe, 0x5c, 0x61, 0xaa, 0x13, 0x92, 0x7f, 0x7b, + 0xec, 0xe0, 0x6d, 0xa8, 0x23, 0x22, 0xf6, 0x6b, + 0x77, 0xc4, 0xfe, 0x40, 0x07, 0x3b, 0xb6, 0xf6, + 0x8e, 0x5f, 0xd4, 0xb9, 0xb7, 0x0f, 0x21, 0x04, + 0xef, 0x83, 0x63, 0x91, 0x69, 0x40, 0xa3, 0x48, + 0x5c, 0xd2, 0x60, 0xf9, 0x4f, 0x6c, 0x47, 0x8b, + 0x3b, 0xb1, 0x9f, 0x8e, 0xee, 0x16, 0x8a, 0x13, + 0xfc, 0x46, 0x17, 0xc3, 0xc3, 0x32, 0x56, 0xf8, + 0x3c, 0x85, 0x3a, 0xb6, 0x3e, 0xaa, 0x89, 0x4f, + 0xb3, 0xdf, 0x38, 0xfd, 0xf1, 0xe4, 0x3a, 0xc0, + 0xe6, 0x58, 0xb5, 0x8f, 0xc5, 0x29, 0xa2, 0x92, + 0x4a, 0xb6, 0xa0, 0x34, 0x7f, 0xab, 0xb5, 0x8a, + 0x90, 0xa1, 0xdb, 0x4d, 0xca, 0xb6, 0x2c, 0x41, + 0x3c, 0xf7, 0x2b, 0x21, 0xc3, 0xfd, 0xf4, 0x17, + 0x5c, 0xb5, 0x33, 0x17, 0x68, 0x2b, 0x08, 0x30, + 0xf3, 0xf7, 0x30, 0x3c, 0x96, 0xe6, 0x6a, 0x20, + 0x97, 0xe7, 0x4d, 0x10, 0x5f, 0x47, 0x5f, 0x49, + 0x96, 0x09, 0xf0, 0x27, 0x91, 0xc8, 0xf8, 0x5a, + 0x2e, 0x79, 0xb5, 0xe2, 0xb8, 0xe8, 0xb9, 0x7b, + 0xd5, 0x10, 0xcb, 0xff, 0x5d, 0x14, 0x73, 0xf3 +}; +static const u8 enc_output008[] __initconst = { + 0x14, 0xf6, 0x41, 0x37, 0xa6, 0xd4, 0x27, 0xcd, + 0xdb, 0x06, 0x3e, 0x9a, 0x4e, 0xab, 0xd5, 0xb1, + 0x1e, 0x6b, 0xd2, 0xbc, 0x11, 0xf4, 0x28, 0x93, + 0x63, 0x54, 0xef, 0xbb, 0x5e, 0x1d, 0x3a, 0x1d, + 0x37, 0x3c, 0x0a, 0x6c, 0x1e, 0xc2, 0xd1, 0x2c, + 0xb5, 0xa3, 0xb5, 0x7b, 0xb8, 0x8f, 0x25, 0xa6, + 0x1b, 0x61, 0x1c, 0xec, 0x28, 0x58, 0x26, 0xa4, + 0xa8, 0x33, 0x28, 0x25, 0x5c, 0x45, 0x05, 0xe5, + 0x6c, 0x99, 0xe5, 0x45, 0xc4, 0xa2, 0x03, 0x84, + 0x03, 0x73, 0x1e, 0x8c, 0x49, 0xac, 0x20, 0xdd, + 0x8d, 0xb3, 0xc4, 0xf5, 0xe7, 0x4f, 0xf1, 0xed, + 0xa1, 0x98, 0xde, 0xa4, 0x96, 0xdd, 0x2f, 0xab, + 0xab, 0x97, 0xcf, 0x3e, 0xd2, 0x9e, 0xb8, 0x13, + 0x07, 0x28, 0x29, 0x19, 0xaf, 0xfd, 0xf2, 0x49, + 0x43, 0xea, 0x49, 0x26, 0x91, 0xc1, 0x07, 0xd6, + 0xbb, 0x81, 0x75, 0x35, 0x0d, 0x24, 0x7f, 0xc8, + 0xda, 0xd4, 0xb7, 0xeb, 0xe8, 0x5c, 0x09, 0xa2, + 0x2f, 0xdc, 0x28, 0x7d, 0x3a, 0x03, 0xfa, 0x94, + 0xb5, 0x1d, 0x17, 0x99, 0x36, 0xc3, 0x1c, 0x18, + 0x34, 0xe3, 0x9f, 0xf5, 0x55, 0x7c, 0xb0, 0x60, + 0x9d, 0xff, 0xac, 0xd4, 0x61, 0xf2, 0xad, 0xf8, + 0xce, 0xc7, 0xbe, 0x5c, 0xd2, 0x95, 0xa8, 0x4b, + 0x77, 0x13, 0x19, 0x59, 0x26, 0xc9, 0xb7, 0x8f, + 0x6a, 0xcb, 0x2d, 0x37, 0x91, 0xea, 0x92, 0x9c, + 0x94, 0x5b, 0xda, 0x0b, 0xce, 0xfe, 0x30, 0x20, + 0xf8, 0x51, 0xad, 0xf2, 0xbe, 0xe7, 0xc7, 0xff, + 0xb3, 0x33, 0x91, 0x6a, 0xc9, 0x1a, 0x41, 0xc9, + 0x0f, 0xf3, 0x10, 0x0e, 0xfd, 0x53, 0xff, 0x6c, + 0x16, 0x52, 0xd9, 0xf3, 0xf7, 0x98, 0x2e, 0xc9, + 0x07, 0x31, 0x2c, 0x0c, 0x72, 0xd7, 0xc5, 0xc6, + 0x08, 0x2a, 0x7b, 0xda, 0xbd, 0x7e, 0x02, 0xea, + 0x1a, 0xbb, 0xf2, 0x04, 0x27, 0x61, 0x28, 0x8e, + 0xf5, 0x04, 0x03, 0x1f, 0x4c, 0x07, 0x55, 0x82, + 0xec, 0x1e, 0xd7, 0x8b, 0x2f, 0x65, 0x56, 0xd1, + 0xd9, 0x1e, 0x3c, 0xe9, 0x1f, 0x5e, 0x98, 0x70, + 0x38, 0x4a, 0x8c, 0x49, 0xc5, 0x43, 0xa0, 0xa1, + 0x8b, 0x74, 0x9d, 0x4c, 0x62, 0x0d, 0x10, 0x0c, + 0xf4, 0x6c, 0x8f, 0xe0, 0xaa, 0x9a, 0x8d, 0xb7, + 0xe0, 0xbe, 0x4c, 0x87, 0xf1, 0x98, 0x2f, 0xcc, + 0xed, 0xc0, 0x52, 0x29, 0xdc, 0x83, 0xf8, 0xfc, + 0x2c, 0x0e, 0xa8, 0x51, 0x4d, 0x80, 0x0d, 0xa3, + 0xfe, 0xd8, 0x37, 0xe7, 0x41, 0x24, 0xfc, 0xfb, + 0x75, 0xe3, 0x71, 0x7b, 0x57, 0x45, 0xf5, 0x97, + 0x73, 0x65, 0x63, 0x14, 0x74, 0xb8, 0x82, 0x9f, + 0xf8, 0x60, 0x2f, 0x8a, 0xf2, 0x4e, 0xf1, 0x39, + 0xda, 0x33, 0x91, 0xf8, 0x36, 0xe0, 0x8d, 0x3f, + 0x1f, 0x3b, 0x56, 0xdc, 0xa0, 0x8f, 0x3c, 0x9d, + 0x71, 0x52, 0xa7, 0xb8, 0xc0, 0xa5, 0xc6, 0xa2, + 0x73, 0xda, 0xf4, 0x4b, 0x74, 0x5b, 0x00, 0x3d, + 0x99, 0xd7, 0x96, 0xba, 0xe6, 0xe1, 0xa6, 0x96, + 0x38, 0xad, 0xb3, 0xc0, 0xd2, 0xba, 0x91, 0x6b, + 0xf9, 0x19, 0xdd, 0x3b, 0xbe, 0xbe, 0x9c, 0x20, + 0x50, 0xba, 0xa1, 0xd0, 0xce, 0x11, 0xbd, 0x95, + 0xd8, 0xd1, 0xdd, 0x33, 0x85, 0x74, 0xdc, 0xdb, + 0x66, 0x76, 0x44, 0xdc, 0x03, 0x74, 0x48, 0x35, + 0x98, 0xb1, 0x18, 0x47, 0x94, 0x7d, 0xff, 0x62, + 0xe4, 0x58, 0x78, 0xab, 0xed, 0x95, 0x36, 0xd9, + 0x84, 0x91, 0x82, 0x64, 0x41, 0xbb, 0x58, 0xe6, + 0x1c, 0x20, 0x6d, 0x15, 0x6b, 0x13, 0x96, 0xe8, + 0x35, 0x7f, 0xdc, 0x40, 0x2c, 0xe9, 0xbc, 0x8a, + 0x4f, 0x92, 0xec, 0x06, 0x2d, 0x50, 0xdf, 0x93, + 0x5d, 0x65, 0x5a, 0xa8, 0xfc, 0x20, 0x50, 0x14, + 0xa9, 0x8a, 0x7e, 0x1d, 0x08, 0x1f, 0xe2, 0x99, + 0xd0, 0xbe, 0xfb, 0x3a, 0x21, 0x9d, 0xad, 0x86, + 0x54, 0xfd, 0x0d, 0x98, 0x1c, 0x5a, 0x6f, 0x1f, + 0x9a, 0x40, 0xcd, 0xa2, 0xff, 0x6a, 0xf1, 0x54 +}; +static const u8 enc_assoc008[] __initconst = { }; +static const u8 enc_nonce008[] __initconst = { + 0x0e, 0x0d, 0x57, 0xbb, 0x7b, 0x40, 0x54, 0x02 +}; +static const u8 enc_key008[] __initconst = { + 0xf2, 0xaa, 0x4f, 0x99, 0xfd, 0x3e, 0xa8, 0x53, + 0xc1, 0x44, 0xe9, 0x81, 0x18, 0xdc, 0xf5, 0xf0, + 0x3e, 0x44, 0x15, 0x59, 0xe0, 0xc5, 0x44, 0x86, + 0xc3, 0x91, 0xa8, 0x75, 0xc0, 0x12, 0x46, 0xba +}; + +static const u8 enc_input009[] __initconst = { + 0xe6, 0xc3, 0xdb, 0x63, 0x55, 0x15, 0xe3, 0x5b, + 0xb7, 0x4b, 0x27, 0x8b, 0x5a, 0xdd, 0xc2, 0xe8, + 0x3a, 0x6b, 0xd7, 0x81, 0x96, 0x35, 0x97, 0xca, + 0xd7, 0x68, 0xe8, 0xef, 0xce, 0xab, 0xda, 0x09, + 0x6e, 0xd6, 0x8e, 0xcb, 0x55, 0xb5, 0xe1, 0xe5, + 0x57, 0xfd, 0xc4, 0xe3, 0xe0, 0x18, 0x4f, 0x85, + 0xf5, 0x3f, 0x7e, 0x4b, 0x88, 0xc9, 0x52, 0x44, + 0x0f, 0xea, 0xaf, 0x1f, 0x71, 0x48, 0x9f, 0x97, + 0x6d, 0xb9, 0x6f, 0x00, 0xa6, 0xde, 0x2b, 0x77, + 0x8b, 0x15, 0xad, 0x10, 0xa0, 0x2b, 0x7b, 0x41, + 0x90, 0x03, 0x2d, 0x69, 0xae, 0xcc, 0x77, 0x7c, + 0xa5, 0x9d, 0x29, 0x22, 0xc2, 0xea, 0xb4, 0x00, + 0x1a, 0xd2, 0x7a, 0x98, 0x8a, 0xf9, 0xf7, 0x82, + 0xb0, 0xab, 0xd8, 0xa6, 0x94, 0x8d, 0x58, 0x2f, + 0x01, 0x9e, 0x00, 0x20, 0xfc, 0x49, 0xdc, 0x0e, + 0x03, 0xe8, 0x45, 0x10, 0xd6, 0xa8, 0xda, 0x55, + 0x10, 0x9a, 0xdf, 0x67, 0x22, 0x8b, 0x43, 0xab, + 0x00, 0xbb, 0x02, 0xc8, 0xdd, 0x7b, 0x97, 0x17, + 0xd7, 0x1d, 0x9e, 0x02, 0x5e, 0x48, 0xde, 0x8e, + 0xcf, 0x99, 0x07, 0x95, 0x92, 0x3c, 0x5f, 0x9f, + 0xc5, 0x8a, 0xc0, 0x23, 0xaa, 0xd5, 0x8c, 0x82, + 0x6e, 0x16, 0x92, 0xb1, 0x12, 0x17, 0x07, 0xc3, + 0xfb, 0x36, 0xf5, 0x6c, 0x35, 0xd6, 0x06, 0x1f, + 0x9f, 0xa7, 0x94, 0xa2, 0x38, 0x63, 0x9c, 0xb0, + 0x71, 0xb3, 0xa5, 0xd2, 0xd8, 0xba, 0x9f, 0x08, + 0x01, 0xb3, 0xff, 0x04, 0x97, 0x73, 0x45, 0x1b, + 0xd5, 0xa9, 0x9c, 0x80, 0xaf, 0x04, 0x9a, 0x85, + 0xdb, 0x32, 0x5b, 0x5d, 0x1a, 0xc1, 0x36, 0x28, + 0x10, 0x79, 0xf1, 0x3c, 0xbf, 0x1a, 0x41, 0x5c, + 0x4e, 0xdf, 0xb2, 0x7c, 0x79, 0x3b, 0x7a, 0x62, + 0x3d, 0x4b, 0xc9, 0x9b, 0x2a, 0x2e, 0x7c, 0xa2, + 0xb1, 0x11, 0x98, 0xa7, 0x34, 0x1a, 0x00, 0xf3, + 0xd1, 0xbc, 0x18, 0x22, 0xba, 0x02, 0x56, 0x62, + 0x31, 0x10, 0x11, 0x6d, 0xe0, 0x54, 0x9d, 0x40, + 0x1f, 0x26, 0x80, 0x41, 0xca, 0x3f, 0x68, 0x0f, + 0x32, 0x1d, 0x0a, 0x8e, 0x79, 0xd8, 0xa4, 0x1b, + 0x29, 0x1c, 0x90, 0x8e, 0xc5, 0xe3, 0xb4, 0x91, + 0x37, 0x9a, 0x97, 0x86, 0x99, 0xd5, 0x09, 0xc5, + 0xbb, 0xa3, 0x3f, 0x21, 0x29, 0x82, 0x14, 0x5c, + 0xab, 0x25, 0xfb, 0xf2, 0x4f, 0x58, 0x26, 0xd4, + 0x83, 0xaa, 0x66, 0x89, 0x67, 0x7e, 0xc0, 0x49, + 0xe1, 0x11, 0x10, 0x7f, 0x7a, 0xda, 0x29, 0x04, + 0xff, 0xf0, 0xcb, 0x09, 0x7c, 0x9d, 0xfa, 0x03, + 0x6f, 0x81, 0x09, 0x31, 0x60, 0xfb, 0x08, 0xfa, + 0x74, 0xd3, 0x64, 0x44, 0x7c, 0x55, 0x85, 0xec, + 0x9c, 0x6e, 0x25, 0xb7, 0x6c, 0xc5, 0x37, 0xb6, + 0x83, 0x87, 0x72, 0x95, 0x8b, 0x9d, 0xe1, 0x69, + 0x5c, 0x31, 0x95, 0x42, 0xa6, 0x2c, 0xd1, 0x36, + 0x47, 0x1f, 0xec, 0x54, 0xab, 0xa2, 0x1c, 0xd8, + 0x00, 0xcc, 0xbc, 0x0d, 0x65, 0xe2, 0x67, 0xbf, + 0xbc, 0xea, 0xee, 0x9e, 0xe4, 0x36, 0x95, 0xbe, + 0x73, 0xd9, 0xa6, 0xd9, 0x0f, 0xa0, 0xcc, 0x82, + 0x76, 0x26, 0xad, 0x5b, 0x58, 0x6c, 0x4e, 0xab, + 0x29, 0x64, 0xd3, 0xd9, 0xa9, 0x08, 0x8c, 0x1d, + 0xa1, 0x4f, 0x80, 0xd8, 0x3f, 0x94, 0xfb, 0xd3, + 0x7b, 0xfc, 0xd1, 0x2b, 0xc3, 0x21, 0xeb, 0xe5, + 0x1c, 0x84, 0x23, 0x7f, 0x4b, 0xfa, 0xdb, 0x34, + 0x18, 0xa2, 0xc2, 0xe5, 0x13, 0xfe, 0x6c, 0x49, + 0x81, 0xd2, 0x73, 0xe7, 0xe2, 0xd7, 0xe4, 0x4f, + 0x4b, 0x08, 0x6e, 0xb1, 0x12, 0x22, 0x10, 0x9d, + 0xac, 0x51, 0x1e, 0x17, 0xd9, 0x8a, 0x0b, 0x42, + 0x88, 0x16, 0x81, 0x37, 0x7c, 0x6a, 0xf7, 0xef, + 0x2d, 0xe3, 0xd9, 0xf8, 0x5f, 0xe0, 0x53, 0x27, + 0x74, 0xb9, 0xe2, 0xd6, 0x1c, 0x80, 0x2c, 0x52, + 0x65 +}; +static const u8 enc_output009[] __initconst = { + 0xfd, 0x81, 0x8d, 0xd0, 0x3d, 0xb4, 0xd5, 0xdf, + 0xd3, 0x42, 0x47, 0x5a, 0x6d, 0x19, 0x27, 0x66, + 0x4b, 0x2e, 0x0c, 0x27, 0x9c, 0x96, 0x4c, 0x72, + 0x02, 0xa3, 0x65, 0xc3, 0xb3, 0x6f, 0x2e, 0xbd, + 0x63, 0x8a, 0x4a, 0x5d, 0x29, 0xa2, 0xd0, 0x28, + 0x48, 0xc5, 0x3d, 0x98, 0xa3, 0xbc, 0xe0, 0xbe, + 0x3b, 0x3f, 0xe6, 0x8a, 0xa4, 0x7f, 0x53, 0x06, + 0xfa, 0x7f, 0x27, 0x76, 0x72, 0x31, 0xa1, 0xf5, + 0xd6, 0x0c, 0x52, 0x47, 0xba, 0xcd, 0x4f, 0xd7, + 0xeb, 0x05, 0x48, 0x0d, 0x7c, 0x35, 0x4a, 0x09, + 0xc9, 0x76, 0x71, 0x02, 0xa3, 0xfb, 0xb7, 0x1a, + 0x65, 0xb7, 0xed, 0x98, 0xc6, 0x30, 0x8a, 0x00, + 0xae, 0xa1, 0x31, 0xe5, 0xb5, 0x9e, 0x6d, 0x62, + 0xda, 0xda, 0x07, 0x0f, 0x38, 0x38, 0xd3, 0xcb, + 0xc1, 0xb0, 0xad, 0xec, 0x72, 0xec, 0xb1, 0xa2, + 0x7b, 0x59, 0xf3, 0x3d, 0x2b, 0xef, 0xcd, 0x28, + 0x5b, 0x83, 0xcc, 0x18, 0x91, 0x88, 0xb0, 0x2e, + 0xf9, 0x29, 0x31, 0x18, 0xf9, 0x4e, 0xe9, 0x0a, + 0x91, 0x92, 0x9f, 0xae, 0x2d, 0xad, 0xf4, 0xe6, + 0x1a, 0xe2, 0xa4, 0xee, 0x47, 0x15, 0xbf, 0x83, + 0x6e, 0xd7, 0x72, 0x12, 0x3b, 0x2d, 0x24, 0xe9, + 0xb2, 0x55, 0xcb, 0x3c, 0x10, 0xf0, 0x24, 0x8a, + 0x4a, 0x02, 0xea, 0x90, 0x25, 0xf0, 0xb4, 0x79, + 0x3a, 0xef, 0x6e, 0xf5, 0x52, 0xdf, 0xb0, 0x0a, + 0xcd, 0x24, 0x1c, 0xd3, 0x2e, 0x22, 0x74, 0xea, + 0x21, 0x6f, 0xe9, 0xbd, 0xc8, 0x3e, 0x36, 0x5b, + 0x19, 0xf1, 0xca, 0x99, 0x0a, 0xb4, 0xa7, 0x52, + 0x1a, 0x4e, 0xf2, 0xad, 0x8d, 0x56, 0x85, 0xbb, + 0x64, 0x89, 0xba, 0x26, 0xf9, 0xc7, 0xe1, 0x89, + 0x19, 0x22, 0x77, 0xc3, 0xa8, 0xfc, 0xff, 0xad, + 0xfe, 0xb9, 0x48, 0xae, 0x12, 0x30, 0x9f, 0x19, + 0xfb, 0x1b, 0xef, 0x14, 0x87, 0x8a, 0x78, 0x71, + 0xf3, 0xf4, 0xb7, 0x00, 0x9c, 0x1d, 0xb5, 0x3d, + 0x49, 0x00, 0x0c, 0x06, 0xd4, 0x50, 0xf9, 0x54, + 0x45, 0xb2, 0x5b, 0x43, 0xdb, 0x6d, 0xcf, 0x1a, + 0xe9, 0x7a, 0x7a, 0xcf, 0xfc, 0x8a, 0x4e, 0x4d, + 0x0b, 0x07, 0x63, 0x28, 0xd8, 0xe7, 0x08, 0x95, + 0xdf, 0xa6, 0x72, 0x93, 0x2e, 0xbb, 0xa0, 0x42, + 0x89, 0x16, 0xf1, 0xd9, 0x0c, 0xf9, 0xa1, 0x16, + 0xfd, 0xd9, 0x03, 0xb4, 0x3b, 0x8a, 0xf5, 0xf6, + 0xe7, 0x6b, 0x2e, 0x8e, 0x4c, 0x3d, 0xe2, 0xaf, + 0x08, 0x45, 0x03, 0xff, 0x09, 0xb6, 0xeb, 0x2d, + 0xc6, 0x1b, 0x88, 0x94, 0xac, 0x3e, 0xf1, 0x9f, + 0x0e, 0x0e, 0x2b, 0xd5, 0x00, 0x4d, 0x3f, 0x3b, + 0x53, 0xae, 0xaf, 0x1c, 0x33, 0x5f, 0x55, 0x6e, + 0x8d, 0xaf, 0x05, 0x7a, 0x10, 0x34, 0xc9, 0xf4, + 0x66, 0xcb, 0x62, 0x12, 0xa6, 0xee, 0xe8, 0x1c, + 0x5d, 0x12, 0x86, 0xdb, 0x6f, 0x1c, 0x33, 0xc4, + 0x1c, 0xda, 0x82, 0x2d, 0x3b, 0x59, 0xfe, 0xb1, + 0xa4, 0x59, 0x41, 0x86, 0xd0, 0xef, 0xae, 0xfb, + 0xda, 0x6d, 0x11, 0xb8, 0xca, 0xe9, 0x6e, 0xff, + 0xf7, 0xa9, 0xd9, 0x70, 0x30, 0xfc, 0x53, 0xe2, + 0xd7, 0xa2, 0x4e, 0xc7, 0x91, 0xd9, 0x07, 0x06, + 0xaa, 0xdd, 0xb0, 0x59, 0x28, 0x1d, 0x00, 0x66, + 0xc5, 0x54, 0xc2, 0xfc, 0x06, 0xda, 0x05, 0x90, + 0x52, 0x1d, 0x37, 0x66, 0xee, 0xf0, 0xb2, 0x55, + 0x8a, 0x5d, 0xd2, 0x38, 0x86, 0x94, 0x9b, 0xfc, + 0x10, 0x4c, 0xa1, 0xb9, 0x64, 0x3e, 0x44, 0xb8, + 0x5f, 0xb0, 0x0c, 0xec, 0xe0, 0xc9, 0xe5, 0x62, + 0x75, 0x3f, 0x09, 0xd5, 0xf5, 0xd9, 0x26, 0xba, + 0x9e, 0xd2, 0xf4, 0xb9, 0x48, 0x0a, 0xbc, 0xa2, + 0xd6, 0x7c, 0x36, 0x11, 0x7d, 0x26, 0x81, 0x89, + 0xcf, 0xa4, 0xad, 0x73, 0x0e, 0xee, 0xcc, 0x06, + 0xa9, 0xdb, 0xb1, 0xfd, 0xfb, 0x09, 0x7f, 0x90, + 0x42, 0x37, 0x2f, 0xe1, 0x9c, 0x0f, 0x6f, 0xcf, + 0x43, 0xb5, 0xd9, 0x90, 0xe1, 0x85, 0xf5, 0xa8, + 0xae +}; +static const u8 enc_assoc009[] __initconst = { + 0x5a, 0x27, 0xff, 0xeb, 0xdf, 0x84, 0xb2, 0x9e, + 0xef +}; +static const u8 enc_nonce009[] __initconst = { + 0xef, 0x2d, 0x63, 0xee, 0x6b, 0x80, 0x8b, 0x78 +}; +static const u8 enc_key009[] __initconst = { + 0xea, 0xbc, 0x56, 0x99, 0xe3, 0x50, 0xff, 0xc5, + 0xcc, 0x1a, 0xd7, 0xc1, 0x57, 0x72, 0xea, 0x86, + 0x5b, 0x89, 0x88, 0x61, 0x3d, 0x2f, 0x9b, 0xb2, + 0xe7, 0x9c, 0xec, 0x74, 0x6e, 0x3e, 0xf4, 0x3b +}; + +static const u8 enc_input010[] __initconst = { + 0x42, 0x93, 0xe4, 0xeb, 0x97, 0xb0, 0x57, 0xbf, + 0x1a, 0x8b, 0x1f, 0xe4, 0x5f, 0x36, 0x20, 0x3c, + 0xef, 0x0a, 0xa9, 0x48, 0x5f, 0x5f, 0x37, 0x22, + 0x3a, 0xde, 0xe3, 0xae, 0xbe, 0xad, 0x07, 0xcc, + 0xb1, 0xf6, 0xf5, 0xf9, 0x56, 0xdd, 0xe7, 0x16, + 0x1e, 0x7f, 0xdf, 0x7a, 0x9e, 0x75, 0xb7, 0xc7, + 0xbe, 0xbe, 0x8a, 0x36, 0x04, 0xc0, 0x10, 0xf4, + 0x95, 0x20, 0x03, 0xec, 0xdc, 0x05, 0xa1, 0x7d, + 0xc4, 0xa9, 0x2c, 0x82, 0xd0, 0xbc, 0x8b, 0xc5, + 0xc7, 0x45, 0x50, 0xf6, 0xa2, 0x1a, 0xb5, 0x46, + 0x3b, 0x73, 0x02, 0xa6, 0x83, 0x4b, 0x73, 0x82, + 0x58, 0x5e, 0x3b, 0x65, 0x2f, 0x0e, 0xfd, 0x2b, + 0x59, 0x16, 0xce, 0xa1, 0x60, 0x9c, 0xe8, 0x3a, + 0x99, 0xed, 0x8d, 0x5a, 0xcf, 0xf6, 0x83, 0xaf, + 0xba, 0xd7, 0x73, 0x73, 0x40, 0x97, 0x3d, 0xca, + 0xef, 0x07, 0x57, 0xe6, 0xd9, 0x70, 0x0e, 0x95, + 0xae, 0xa6, 0x8d, 0x04, 0xcc, 0xee, 0xf7, 0x09, + 0x31, 0x77, 0x12, 0xa3, 0x23, 0x97, 0x62, 0xb3, + 0x7b, 0x32, 0xfb, 0x80, 0x14, 0x48, 0x81, 0xc3, + 0xe5, 0xea, 0x91, 0x39, 0x52, 0x81, 0xa2, 0x4f, + 0xe4, 0xb3, 0x09, 0xff, 0xde, 0x5e, 0xe9, 0x58, + 0x84, 0x6e, 0xf9, 0x3d, 0xdf, 0x25, 0xea, 0xad, + 0xae, 0xe6, 0x9a, 0xd1, 0x89, 0x55, 0xd3, 0xde, + 0x6c, 0x52, 0xdb, 0x70, 0xfe, 0x37, 0xce, 0x44, + 0x0a, 0xa8, 0x25, 0x5f, 0x92, 0xc1, 0x33, 0x4a, + 0x4f, 0x9b, 0x62, 0x35, 0xff, 0xce, 0xc0, 0xa9, + 0x60, 0xce, 0x52, 0x00, 0x97, 0x51, 0x35, 0x26, + 0x2e, 0xb9, 0x36, 0xa9, 0x87, 0x6e, 0x1e, 0xcc, + 0x91, 0x78, 0x53, 0x98, 0x86, 0x5b, 0x9c, 0x74, + 0x7d, 0x88, 0x33, 0xe1, 0xdf, 0x37, 0x69, 0x2b, + 0xbb, 0xf1, 0x4d, 0xf4, 0xd1, 0xf1, 0x39, 0x93, + 0x17, 0x51, 0x19, 0xe3, 0x19, 0x1e, 0x76, 0x37, + 0x25, 0xfb, 0x09, 0x27, 0x6a, 0xab, 0x67, 0x6f, + 0x14, 0x12, 0x64, 0xe7, 0xc4, 0x07, 0xdf, 0x4d, + 0x17, 0xbb, 0x6d, 0xe0, 0xe9, 0xb9, 0xab, 0xca, + 0x10, 0x68, 0xaf, 0x7e, 0xb7, 0x33, 0x54, 0x73, + 0x07, 0x6e, 0xf7, 0x81, 0x97, 0x9c, 0x05, 0x6f, + 0x84, 0x5f, 0xd2, 0x42, 0xfb, 0x38, 0xcf, 0xd1, + 0x2f, 0x14, 0x30, 0x88, 0x98, 0x4d, 0x5a, 0xa9, + 0x76, 0xd5, 0x4f, 0x3e, 0x70, 0x6c, 0x85, 0x76, + 0xd7, 0x01, 0xa0, 0x1a, 0xc8, 0x4e, 0xaa, 0xac, + 0x78, 0xfe, 0x46, 0xde, 0x6a, 0x05, 0x46, 0xa7, + 0x43, 0x0c, 0xb9, 0xde, 0xb9, 0x68, 0xfb, 0xce, + 0x42, 0x99, 0x07, 0x4d, 0x0b, 0x3b, 0x5a, 0x30, + 0x35, 0xa8, 0xf9, 0x3a, 0x73, 0xef, 0x0f, 0xdb, + 0x1e, 0x16, 0x42, 0xc4, 0xba, 0xae, 0x58, 0xaa, + 0xf8, 0xe5, 0x75, 0x2f, 0x1b, 0x15, 0x5c, 0xfd, + 0x0a, 0x97, 0xd0, 0xe4, 0x37, 0x83, 0x61, 0x5f, + 0x43, 0xa6, 0xc7, 0x3f, 0x38, 0x59, 0xe6, 0xeb, + 0xa3, 0x90, 0xc3, 0xaa, 0xaa, 0x5a, 0xd3, 0x34, + 0xd4, 0x17, 0xc8, 0x65, 0x3e, 0x57, 0xbc, 0x5e, + 0xdd, 0x9e, 0xb7, 0xf0, 0x2e, 0x5b, 0xb2, 0x1f, + 0x8a, 0x08, 0x0d, 0x45, 0x91, 0x0b, 0x29, 0x53, + 0x4f, 0x4c, 0x5a, 0x73, 0x56, 0xfe, 0xaf, 0x41, + 0x01, 0x39, 0x0a, 0x24, 0x3c, 0x7e, 0xbe, 0x4e, + 0x53, 0xf3, 0xeb, 0x06, 0x66, 0x51, 0x28, 0x1d, + 0xbd, 0x41, 0x0a, 0x01, 0xab, 0x16, 0x47, 0x27, + 0x47, 0x47, 0xf7, 0xcb, 0x46, 0x0a, 0x70, 0x9e, + 0x01, 0x9c, 0x09, 0xe1, 0x2a, 0x00, 0x1a, 0xd8, + 0xd4, 0x79, 0x9d, 0x80, 0x15, 0x8e, 0x53, 0x2a, + 0x65, 0x83, 0x78, 0x3e, 0x03, 0x00, 0x07, 0x12, + 0x1f, 0x33, 0x3e, 0x7b, 0x13, 0x37, 0xf1, 0xc3, + 0xef, 0xb7, 0xc1, 0x20, 0x3c, 0x3e, 0x67, 0x66, + 0x5d, 0x88, 0xa7, 0x7d, 0x33, 0x50, 0x77, 0xb0, + 0x28, 0x8e, 0xe7, 0x2c, 0x2e, 0x7a, 0xf4, 0x3c, + 0x8d, 0x74, 0x83, 0xaf, 0x8e, 0x87, 0x0f, 0xe4, + 0x50, 0xff, 0x84, 0x5c, 0x47, 0x0c, 0x6a, 0x49, + 0xbf, 0x42, 0x86, 0x77, 0x15, 0x48, 0xa5, 0x90, + 0x5d, 0x93, 0xd6, 0x2a, 0x11, 0xd5, 0xd5, 0x11, + 0xaa, 0xce, 0xe7, 0x6f, 0xa5, 0xb0, 0x09, 0x2c, + 0x8d, 0xd3, 0x92, 0xf0, 0x5a, 0x2a, 0xda, 0x5b, + 0x1e, 0xd5, 0x9a, 0xc4, 0xc4, 0xf3, 0x49, 0x74, + 0x41, 0xca, 0xe8, 0xc1, 0xf8, 0x44, 0xd6, 0x3c, + 0xae, 0x6c, 0x1d, 0x9a, 0x30, 0x04, 0x4d, 0x27, + 0x0e, 0xb1, 0x5f, 0x59, 0xa2, 0x24, 0xe8, 0xe1, + 0x98, 0xc5, 0x6a, 0x4c, 0xfe, 0x41, 0xd2, 0x27, + 0x42, 0x52, 0xe1, 0xe9, 0x7d, 0x62, 0xe4, 0x88, + 0x0f, 0xad, 0xb2, 0x70, 0xcb, 0x9d, 0x4c, 0x27, + 0x2e, 0x76, 0x1e, 0x1a, 0x63, 0x65, 0xf5, 0x3b, + 0xf8, 0x57, 0x69, 0xeb, 0x5b, 0x38, 0x26, 0x39, + 0x33, 0x25, 0x45, 0x3e, 0x91, 0xb8, 0xd8, 0xc7, + 0xd5, 0x42, 0xc0, 0x22, 0x31, 0x74, 0xf4, 0xbc, + 0x0c, 0x23, 0xf1, 0xca, 0xc1, 0x8d, 0xd7, 0xbe, + 0xc9, 0x62, 0xe4, 0x08, 0x1a, 0xcf, 0x36, 0xd5, + 0xfe, 0x55, 0x21, 0x59, 0x91, 0x87, 0x87, 0xdf, + 0x06, 0xdb, 0xdf, 0x96, 0x45, 0x58, 0xda, 0x05, + 0xcd, 0x50, 0x4d, 0xd2, 0x7d, 0x05, 0x18, 0x73, + 0x6a, 0x8d, 0x11, 0x85, 0xa6, 0x88, 0xe8, 0xda, + 0xe6, 0x30, 0x33, 0xa4, 0x89, 0x31, 0x75, 0xbe, + 0x69, 0x43, 0x84, 0x43, 0x50, 0x87, 0xdd, 0x71, + 0x36, 0x83, 0xc3, 0x78, 0x74, 0x24, 0x0a, 0xed, + 0x7b, 0xdb, 0xa4, 0x24, 0x0b, 0xb9, 0x7e, 0x5d, + 0xff, 0xde, 0xb1, 0xef, 0x61, 0x5a, 0x45, 0x33, + 0xf6, 0x17, 0x07, 0x08, 0x98, 0x83, 0x92, 0x0f, + 0x23, 0x6d, 0xe6, 0xaa, 0x17, 0x54, 0xad, 0x6a, + 0xc8, 0xdb, 0x26, 0xbe, 0xb8, 0xb6, 0x08, 0xfa, + 0x68, 0xf1, 0xd7, 0x79, 0x6f, 0x18, 0xb4, 0x9e, + 0x2d, 0x3f, 0x1b, 0x64, 0xaf, 0x8d, 0x06, 0x0e, + 0x49, 0x28, 0xe0, 0x5d, 0x45, 0x68, 0x13, 0x87, + 0xfa, 0xde, 0x40, 0x7b, 0xd2, 0xc3, 0x94, 0xd5, + 0xe1, 0xd9, 0xc2, 0xaf, 0x55, 0x89, 0xeb, 0xb4, + 0x12, 0x59, 0xa8, 0xd4, 0xc5, 0x29, 0x66, 0x38, + 0xe6, 0xac, 0x22, 0x22, 0xd9, 0x64, 0x9b, 0x34, + 0x0a, 0x32, 0x9f, 0xc2, 0xbf, 0x17, 0x6c, 0x3f, + 0x71, 0x7a, 0x38, 0x6b, 0x98, 0xfb, 0x49, 0x36, + 0x89, 0xc9, 0xe2, 0xd6, 0xc7, 0x5d, 0xd0, 0x69, + 0x5f, 0x23, 0x35, 0xc9, 0x30, 0xe2, 0xfd, 0x44, + 0x58, 0x39, 0xd7, 0x97, 0xfb, 0x5c, 0x00, 0xd5, + 0x4f, 0x7a, 0x1a, 0x95, 0x8b, 0x62, 0x4b, 0xce, + 0xe5, 0x91, 0x21, 0x7b, 0x30, 0x00, 0xd6, 0xdd, + 0x6d, 0x02, 0x86, 0x49, 0x0f, 0x3c, 0x1a, 0x27, + 0x3c, 0xd3, 0x0e, 0x71, 0xf2, 0xff, 0xf5, 0x2f, + 0x87, 0xac, 0x67, 0x59, 0x81, 0xa3, 0xf7, 0xf8, + 0xd6, 0x11, 0x0c, 0x84, 0xa9, 0x03, 0xee, 0x2a, + 0xc4, 0xf3, 0x22, 0xab, 0x7c, 0xe2, 0x25, 0xf5, + 0x67, 0xa3, 0xe4, 0x11, 0xe0, 0x59, 0xb3, 0xca, + 0x87, 0xa0, 0xae, 0xc9, 0xa6, 0x62, 0x1b, 0x6e, + 0x4d, 0x02, 0x6b, 0x07, 0x9d, 0xfd, 0xd0, 0x92, + 0x06, 0xe1, 0xb2, 0x9a, 0x4a, 0x1f, 0x1f, 0x13, + 0x49, 0x99, 0x97, 0x08, 0xde, 0x7f, 0x98, 0xaf, + 0x51, 0x98, 0xee, 0x2c, 0xcb, 0xf0, 0x0b, 0xc6, + 0xb6, 0xb7, 0x2d, 0x9a, 0xb1, 0xac, 0xa6, 0xe3, + 0x15, 0x77, 0x9d, 0x6b, 0x1a, 0xe4, 0xfc, 0x8b, + 0xf2, 0x17, 0x59, 0x08, 0x04, 0x58, 0x81, 0x9d, + 0x1b, 0x1b, 0x69, 0x55, 0xc2, 0xb4, 0x3c, 0x1f, + 0x50, 0xf1, 0x7f, 0x77, 0x90, 0x4c, 0x66, 0x40, + 0x5a, 0xc0, 0x33, 0x1f, 0xcb, 0x05, 0x6d, 0x5c, + 0x06, 0x87, 0x52, 0xa2, 0x8f, 0x26, 0xd5, 0x4f +}; +static const u8 enc_output010[] __initconst = { + 0xe5, 0x26, 0xa4, 0x3d, 0xbd, 0x33, 0xd0, 0x4b, + 0x6f, 0x05, 0xa7, 0x6e, 0x12, 0x7a, 0xd2, 0x74, + 0xa6, 0xdd, 0xbd, 0x95, 0xeb, 0xf9, 0xa4, 0xf1, + 0x59, 0x93, 0x91, 0x70, 0xd9, 0xfe, 0x9a, 0xcd, + 0x53, 0x1f, 0x3a, 0xab, 0xa6, 0x7c, 0x9f, 0xa6, + 0x9e, 0xbd, 0x99, 0xd9, 0xb5, 0x97, 0x44, 0xd5, + 0x14, 0x48, 0x4d, 0x9d, 0xc0, 0xd0, 0x05, 0x96, + 0xeb, 0x4c, 0x78, 0x55, 0x09, 0x08, 0x01, 0x02, + 0x30, 0x90, 0x7b, 0x96, 0x7a, 0x7b, 0x5f, 0x30, + 0x41, 0x24, 0xce, 0x68, 0x61, 0x49, 0x86, 0x57, + 0x82, 0xdd, 0x53, 0x1c, 0x51, 0x28, 0x2b, 0x53, + 0x6e, 0x2d, 0xc2, 0x20, 0x4c, 0xdd, 0x8f, 0x65, + 0x10, 0x20, 0x50, 0xdd, 0x9d, 0x50, 0xe5, 0x71, + 0x40, 0x53, 0x69, 0xfc, 0x77, 0x48, 0x11, 0xb9, + 0xde, 0xa4, 0x8d, 0x58, 0xe4, 0xa6, 0x1a, 0x18, + 0x47, 0x81, 0x7e, 0xfc, 0xdd, 0xf6, 0xef, 0xce, + 0x2f, 0x43, 0x68, 0xd6, 0x06, 0xe2, 0x74, 0x6a, + 0xad, 0x90, 0xf5, 0x37, 0xf3, 0x3d, 0x82, 0x69, + 0x40, 0xe9, 0x6b, 0xa7, 0x3d, 0xa8, 0x1e, 0xd2, + 0x02, 0x7c, 0xb7, 0x9b, 0xe4, 0xda, 0x8f, 0x95, + 0x06, 0xc5, 0xdf, 0x73, 0xa3, 0x20, 0x9a, 0x49, + 0xde, 0x9c, 0xbc, 0xee, 0x14, 0x3f, 0x81, 0x5e, + 0xf8, 0x3b, 0x59, 0x3c, 0xe1, 0x68, 0x12, 0x5a, + 0x3a, 0x76, 0x3a, 0x3f, 0xf7, 0x87, 0x33, 0x0a, + 0x01, 0xb8, 0xd4, 0xed, 0xb6, 0xbe, 0x94, 0x5e, + 0x70, 0x40, 0x56, 0x67, 0x1f, 0x50, 0x44, 0x19, + 0xce, 0x82, 0x70, 0x10, 0x87, 0x13, 0x20, 0x0b, + 0x4c, 0x5a, 0xb6, 0xf6, 0xa7, 0xae, 0x81, 0x75, + 0x01, 0x81, 0xe6, 0x4b, 0x57, 0x7c, 0xdd, 0x6d, + 0xf8, 0x1c, 0x29, 0x32, 0xf7, 0xda, 0x3c, 0x2d, + 0xf8, 0x9b, 0x25, 0x6e, 0x00, 0xb4, 0xf7, 0x2f, + 0xf7, 0x04, 0xf7, 0xa1, 0x56, 0xac, 0x4f, 0x1a, + 0x64, 0xb8, 0x47, 0x55, 0x18, 0x7b, 0x07, 0x4d, + 0xbd, 0x47, 0x24, 0x80, 0x5d, 0xa2, 0x70, 0xc5, + 0xdd, 0x8e, 0x82, 0xd4, 0xeb, 0xec, 0xb2, 0x0c, + 0x39, 0xd2, 0x97, 0xc1, 0xcb, 0xeb, 0xf4, 0x77, + 0x59, 0xb4, 0x87, 0xef, 0xcb, 0x43, 0x2d, 0x46, + 0x54, 0xd1, 0xa7, 0xd7, 0x15, 0x99, 0x0a, 0x43, + 0xa1, 0xe0, 0x99, 0x33, 0x71, 0xc1, 0xed, 0xfe, + 0x72, 0x46, 0x33, 0x8e, 0x91, 0x08, 0x9f, 0xc8, + 0x2e, 0xca, 0xfa, 0xdc, 0x59, 0xd5, 0xc3, 0x76, + 0x84, 0x9f, 0xa3, 0x37, 0x68, 0xc3, 0xf0, 0x47, + 0x2c, 0x68, 0xdb, 0x5e, 0xc3, 0x49, 0x4c, 0xe8, + 0x92, 0x85, 0xe2, 0x23, 0xd3, 0x3f, 0xad, 0x32, + 0xe5, 0x2b, 0x82, 0xd7, 0x8f, 0x99, 0x0a, 0x59, + 0x5c, 0x45, 0xd9, 0xb4, 0x51, 0x52, 0xc2, 0xae, + 0xbf, 0x80, 0xcf, 0xc9, 0xc9, 0x51, 0x24, 0x2a, + 0x3b, 0x3a, 0x4d, 0xae, 0xeb, 0xbd, 0x22, 0xc3, + 0x0e, 0x0f, 0x59, 0x25, 0x92, 0x17, 0xe9, 0x74, + 0xc7, 0x8b, 0x70, 0x70, 0x36, 0x55, 0x95, 0x75, + 0x4b, 0xad, 0x61, 0x2b, 0x09, 0xbc, 0x82, 0xf2, + 0x6e, 0x94, 0x43, 0xae, 0xc3, 0xd5, 0xcd, 0x8e, + 0xfe, 0x5b, 0x9a, 0x88, 0x43, 0x01, 0x75, 0xb2, + 0x23, 0x09, 0xf7, 0x89, 0x83, 0xe7, 0xfa, 0xf9, + 0xb4, 0x9b, 0xf8, 0xef, 0xbd, 0x1c, 0x92, 0xc1, + 0xda, 0x7e, 0xfe, 0x05, 0xba, 0x5a, 0xcd, 0x07, + 0x6a, 0x78, 0x9e, 0x5d, 0xfb, 0x11, 0x2f, 0x79, + 0x38, 0xb6, 0xc2, 0x5b, 0x6b, 0x51, 0xb4, 0x71, + 0xdd, 0xf7, 0x2a, 0xe4, 0xf4, 0x72, 0x76, 0xad, + 0xc2, 0xdd, 0x64, 0x5d, 0x79, 0xb6, 0xf5, 0x7a, + 0x77, 0x20, 0x05, 0x3d, 0x30, 0x06, 0xd4, 0x4c, + 0x0a, 0x2c, 0x98, 0x5a, 0xb9, 0xd4, 0x98, 0xa9, + 0x3f, 0xc6, 0x12, 0xea, 0x3b, 0x4b, 0xc5, 0x79, + 0x64, 0x63, 0x6b, 0x09, 0x54, 0x3b, 0x14, 0x27, + 0xba, 0x99, 0x80, 0xc8, 0x72, 0xa8, 0x12, 0x90, + 0x29, 0xba, 0x40, 0x54, 0x97, 0x2b, 0x7b, 0xfe, + 0xeb, 0xcd, 0x01, 0x05, 0x44, 0x72, 0xdb, 0x99, + 0xe4, 0x61, 0xc9, 0x69, 0xd6, 0xb9, 0x28, 0xd1, + 0x05, 0x3e, 0xf9, 0x0b, 0x49, 0x0a, 0x49, 0xe9, + 0x8d, 0x0e, 0xa7, 0x4a, 0x0f, 0xaf, 0x32, 0xd0, + 0xe0, 0xb2, 0x3a, 0x55, 0x58, 0xfe, 0x5c, 0x28, + 0x70, 0x51, 0x23, 0xb0, 0x7b, 0x6a, 0x5f, 0x1e, + 0xb8, 0x17, 0xd7, 0x94, 0x15, 0x8f, 0xee, 0x20, + 0xc7, 0x42, 0x25, 0x3e, 0x9a, 0x14, 0xd7, 0x60, + 0x72, 0x39, 0x47, 0x48, 0xa9, 0xfe, 0xdd, 0x47, + 0x0a, 0xb1, 0xe6, 0x60, 0x28, 0x8c, 0x11, 0x68, + 0xe1, 0xff, 0xd7, 0xce, 0xc8, 0xbe, 0xb3, 0xfe, + 0x27, 0x30, 0x09, 0x70, 0xd7, 0xfa, 0x02, 0x33, + 0x3a, 0x61, 0x2e, 0xc7, 0xff, 0xa4, 0x2a, 0xa8, + 0x6e, 0xb4, 0x79, 0x35, 0x6d, 0x4c, 0x1e, 0x38, + 0xf8, 0xee, 0xd4, 0x84, 0x4e, 0x6e, 0x28, 0xa7, + 0xce, 0xc8, 0xc1, 0xcf, 0x80, 0x05, 0xf3, 0x04, + 0xef, 0xc8, 0x18, 0x28, 0x2e, 0x8d, 0x5e, 0x0c, + 0xdf, 0xb8, 0x5f, 0x96, 0xe8, 0xc6, 0x9c, 0x2f, + 0xe5, 0xa6, 0x44, 0xd7, 0xe7, 0x99, 0x44, 0x0c, + 0xec, 0xd7, 0x05, 0x60, 0x97, 0xbb, 0x74, 0x77, + 0x58, 0xd5, 0xbb, 0x48, 0xde, 0x5a, 0xb2, 0x54, + 0x7f, 0x0e, 0x46, 0x70, 0x6a, 0x6f, 0x78, 0xa5, + 0x08, 0x89, 0x05, 0x4e, 0x7e, 0xa0, 0x69, 0xb4, + 0x40, 0x60, 0x55, 0x77, 0x75, 0x9b, 0x19, 0xf2, + 0xd5, 0x13, 0x80, 0x77, 0xf9, 0x4b, 0x3f, 0x1e, + 0xee, 0xe6, 0x76, 0x84, 0x7b, 0x8c, 0xe5, 0x27, + 0xa8, 0x0a, 0x91, 0x01, 0x68, 0x71, 0x8a, 0x3f, + 0x06, 0xab, 0xf6, 0xa9, 0xa5, 0xe6, 0x72, 0x92, + 0xe4, 0x67, 0xe2, 0xa2, 0x46, 0x35, 0x84, 0x55, + 0x7d, 0xca, 0xa8, 0x85, 0xd0, 0xf1, 0x3f, 0xbe, + 0xd7, 0x34, 0x64, 0xfc, 0xae, 0xe3, 0xe4, 0x04, + 0x9f, 0x66, 0x02, 0xb9, 0x88, 0x10, 0xd9, 0xc4, + 0x4c, 0x31, 0x43, 0x7a, 0x93, 0xe2, 0x9b, 0x56, + 0x43, 0x84, 0xdc, 0xdc, 0xde, 0x1d, 0xa4, 0x02, + 0x0e, 0xc2, 0xef, 0xc3, 0xf8, 0x78, 0xd1, 0xb2, + 0x6b, 0x63, 0x18, 0xc9, 0xa9, 0xe5, 0x72, 0xd8, + 0xf3, 0xb9, 0xd1, 0x8a, 0xc7, 0x1a, 0x02, 0x27, + 0x20, 0x77, 0x10, 0xe5, 0xc8, 0xd4, 0x4a, 0x47, + 0xe5, 0xdf, 0x5f, 0x01, 0xaa, 0xb0, 0xd4, 0x10, + 0xbb, 0x69, 0xe3, 0x36, 0xc8, 0xe1, 0x3d, 0x43, + 0xfb, 0x86, 0xcd, 0xcc, 0xbf, 0xf4, 0x88, 0xe0, + 0x20, 0xca, 0xb7, 0x1b, 0xf1, 0x2f, 0x5c, 0xee, + 0xd4, 0xd3, 0xa3, 0xcc, 0xa4, 0x1e, 0x1c, 0x47, + 0xfb, 0xbf, 0xfc, 0xa2, 0x41, 0x55, 0x9d, 0xf6, + 0x5a, 0x5e, 0x65, 0x32, 0x34, 0x7b, 0x52, 0x8d, + 0xd5, 0xd0, 0x20, 0x60, 0x03, 0xab, 0x3f, 0x8c, + 0xd4, 0x21, 0xea, 0x2a, 0xd9, 0xc4, 0xd0, 0xd3, + 0x65, 0xd8, 0x7a, 0x13, 0x28, 0x62, 0x32, 0x4b, + 0x2c, 0x87, 0x93, 0xa8, 0xb4, 0x52, 0x45, 0x09, + 0x44, 0xec, 0xec, 0xc3, 0x17, 0xdb, 0x9a, 0x4d, + 0x5c, 0xa9, 0x11, 0xd4, 0x7d, 0xaf, 0x9e, 0xf1, + 0x2d, 0xb2, 0x66, 0xc5, 0x1d, 0xed, 0xb7, 0xcd, + 0x0b, 0x25, 0x5e, 0x30, 0x47, 0x3f, 0x40, 0xf4, + 0xa1, 0xa0, 0x00, 0x94, 0x10, 0xc5, 0x6a, 0x63, + 0x1a, 0xd5, 0x88, 0x92, 0x8e, 0x82, 0x39, 0x87, + 0x3c, 0x78, 0x65, 0x58, 0x42, 0x75, 0x5b, 0xdd, + 0x77, 0x3e, 0x09, 0x4e, 0x76, 0x5b, 0xe6, 0x0e, + 0x4d, 0x38, 0xb2, 0xc0, 0xb8, 0x95, 0x01, 0x7a, + 0x10, 0xe0, 0xfb, 0x07, 0xf2, 0xab, 0x2d, 0x8c, + 0x32, 0xed, 0x2b, 0xc0, 0x46, 0xc2, 0xf5, 0x38, + 0x83, 0xf0, 0x17, 0xec, 0xc1, 0x20, 0x6a, 0x9a, + 0x0b, 0x00, 0xa0, 0x98, 0x22, 0x50, 0x23, 0xd5, + 0x80, 0x6b, 0xf6, 0x1f, 0xc3, 0xcc, 0x97, 0xc9, + 0x24, 0x9f, 0xf3, 0xaf, 0x43, 0x14, 0xd5, 0xa0 +}; +static const u8 enc_assoc010[] __initconst = { + 0xd2, 0xa1, 0x70, 0xdb, 0x7a, 0xf8, 0xfa, 0x27, + 0xba, 0x73, 0x0f, 0xbf, 0x3d, 0x1e, 0x82, 0xb2 +}; +static const u8 enc_nonce010[] __initconst = { + 0xdb, 0x92, 0x0f, 0x7f, 0x17, 0x54, 0x0c, 0x30 +}; +static const u8 enc_key010[] __initconst = { + 0x47, 0x11, 0xeb, 0x86, 0x2b, 0x2c, 0xab, 0x44, + 0x34, 0xda, 0x7f, 0x57, 0x03, 0x39, 0x0c, 0xaf, + 0x2c, 0x14, 0xfd, 0x65, 0x23, 0xe9, 0x8e, 0x74, + 0xd5, 0x08, 0x68, 0x08, 0xe7, 0xb4, 0x72, 0xd7 +}; + +static const u8 enc_input011[] __initconst = { + 0x7a, 0x57, 0xf2, 0xc7, 0x06, 0x3f, 0x50, 0x7b, + 0x36, 0x1a, 0x66, 0x5c, 0xb9, 0x0e, 0x5e, 0x3b, + 0x45, 0x60, 0xbe, 0x9a, 0x31, 0x9f, 0xff, 0x5d, + 0x66, 0x34, 0xb4, 0xdc, 0xfb, 0x9d, 0x8e, 0xee, + 0x6a, 0x33, 0xa4, 0x07, 0x3c, 0xf9, 0x4c, 0x30, + 0xa1, 0x24, 0x52, 0xf9, 0x50, 0x46, 0x88, 0x20, + 0x02, 0x32, 0x3a, 0x0e, 0x99, 0x63, 0xaf, 0x1f, + 0x15, 0x28, 0x2a, 0x05, 0xff, 0x57, 0x59, 0x5e, + 0x18, 0xa1, 0x1f, 0xd0, 0x92, 0x5c, 0x88, 0x66, + 0x1b, 0x00, 0x64, 0xa5, 0x93, 0x8d, 0x06, 0x46, + 0xb0, 0x64, 0x8b, 0x8b, 0xef, 0x99, 0x05, 0x35, + 0x85, 0xb3, 0xf3, 0x33, 0xbb, 0xec, 0x66, 0xb6, + 0x3d, 0x57, 0x42, 0xe3, 0xb4, 0xc6, 0xaa, 0xb0, + 0x41, 0x2a, 0xb9, 0x59, 0xa9, 0xf6, 0x3e, 0x15, + 0x26, 0x12, 0x03, 0x21, 0x4c, 0x74, 0x43, 0x13, + 0x2a, 0x03, 0x27, 0x09, 0xb4, 0xfb, 0xe7, 0xb7, + 0x40, 0xff, 0x5e, 0xce, 0x48, 0x9a, 0x60, 0xe3, + 0x8b, 0x80, 0x8c, 0x38, 0x2d, 0xcb, 0x93, 0x37, + 0x74, 0x05, 0x52, 0x6f, 0x73, 0x3e, 0xc3, 0xbc, + 0xca, 0x72, 0x0a, 0xeb, 0xf1, 0x3b, 0xa0, 0x95, + 0xdc, 0x8a, 0xc4, 0xa9, 0xdc, 0xca, 0x44, 0xd8, + 0x08, 0x63, 0x6a, 0x36, 0xd3, 0x3c, 0xb8, 0xac, + 0x46, 0x7d, 0xfd, 0xaa, 0xeb, 0x3e, 0x0f, 0x45, + 0x8f, 0x49, 0xda, 0x2b, 0xf2, 0x12, 0xbd, 0xaf, + 0x67, 0x8a, 0x63, 0x48, 0x4b, 0x55, 0x5f, 0x6d, + 0x8c, 0xb9, 0x76, 0x34, 0x84, 0xae, 0xc2, 0xfc, + 0x52, 0x64, 0x82, 0xf7, 0xb0, 0x06, 0xf0, 0x45, + 0x73, 0x12, 0x50, 0x30, 0x72, 0xea, 0x78, 0x9a, + 0xa8, 0xaf, 0xb5, 0xe3, 0xbb, 0x77, 0x52, 0xec, + 0x59, 0x84, 0xbf, 0x6b, 0x8f, 0xce, 0x86, 0x5e, + 0x1f, 0x23, 0xe9, 0xfb, 0x08, 0x86, 0xf7, 0x10, + 0xb9, 0xf2, 0x44, 0x96, 0x44, 0x63, 0xa9, 0xa8, + 0x78, 0x00, 0x23, 0xd6, 0xc7, 0xe7, 0x6e, 0x66, + 0x4f, 0xcc, 0xee, 0x15, 0xb3, 0xbd, 0x1d, 0xa0, + 0xe5, 0x9c, 0x1b, 0x24, 0x2c, 0x4d, 0x3c, 0x62, + 0x35, 0x9c, 0x88, 0x59, 0x09, 0xdd, 0x82, 0x1b, + 0xcf, 0x0a, 0x83, 0x6b, 0x3f, 0xae, 0x03, 0xc4, + 0xb4, 0xdd, 0x7e, 0x5b, 0x28, 0x76, 0x25, 0x96, + 0xd9, 0xc9, 0x9d, 0x5f, 0x86, 0xfa, 0xf6, 0xd7, + 0xd2, 0xe6, 0x76, 0x1d, 0x0f, 0xa1, 0xdc, 0x74, + 0x05, 0x1b, 0x1d, 0xe0, 0xcd, 0x16, 0xb0, 0xa8, + 0x8a, 0x34, 0x7b, 0x15, 0x11, 0x77, 0xe5, 0x7b, + 0x7e, 0x20, 0xf7, 0xda, 0x38, 0xda, 0xce, 0x70, + 0xe9, 0xf5, 0x6c, 0xd9, 0xbe, 0x0c, 0x4c, 0x95, + 0x4c, 0xc2, 0x9b, 0x34, 0x55, 0x55, 0xe1, 0xf3, + 0x46, 0x8e, 0x48, 0x74, 0x14, 0x4f, 0x9d, 0xc9, + 0xf5, 0xe8, 0x1a, 0xf0, 0x11, 0x4a, 0xc1, 0x8d, + 0xe0, 0x93, 0xa0, 0xbe, 0x09, 0x1c, 0x2b, 0x4e, + 0x0f, 0xb2, 0x87, 0x8b, 0x84, 0xfe, 0x92, 0x32, + 0x14, 0xd7, 0x93, 0xdf, 0xe7, 0x44, 0xbc, 0xc5, + 0xae, 0x53, 0x69, 0xd8, 0xb3, 0x79, 0x37, 0x80, + 0xe3, 0x17, 0x5c, 0xec, 0x53, 0x00, 0x9a, 0xe3, + 0x8e, 0xdc, 0x38, 0xb8, 0x66, 0xf0, 0xd3, 0xad, + 0x1d, 0x02, 0x96, 0x86, 0x3e, 0x9d, 0x3b, 0x5d, + 0xa5, 0x7f, 0x21, 0x10, 0xf1, 0x1f, 0x13, 0x20, + 0xf9, 0x57, 0x87, 0x20, 0xf5, 0x5f, 0xf1, 0x17, + 0x48, 0x0a, 0x51, 0x5a, 0xcd, 0x19, 0x03, 0xa6, + 0x5a, 0xd1, 0x12, 0x97, 0xe9, 0x48, 0xe2, 0x1d, + 0x83, 0x75, 0x50, 0xd9, 0x75, 0x7d, 0x6a, 0x82, + 0xa1, 0xf9, 0x4e, 0x54, 0x87, 0x89, 0xc9, 0x0c, + 0xb7, 0x5b, 0x6a, 0x91, 0xc1, 0x9c, 0xb2, 0xa9, + 0xdc, 0x9a, 0xa4, 0x49, 0x0a, 0x6d, 0x0d, 0xbb, + 0xde, 0x86, 0x44, 0xdd, 0x5d, 0x89, 0x2b, 0x96, + 0x0f, 0x23, 0x95, 0xad, 0xcc, 0xa2, 0xb3, 0xb9, + 0x7e, 0x74, 0x38, 0xba, 0x9f, 0x73, 0xae, 0x5f, + 0xf8, 0x68, 0xa2, 0xe0, 0xa9, 0xce, 0xbd, 0x40, + 0xd4, 0x4c, 0x6b, 0xd2, 0x56, 0x62, 0xb0, 0xcc, + 0x63, 0x7e, 0x5b, 0xd3, 0xae, 0xd1, 0x75, 0xce, + 0xbb, 0xb4, 0x5b, 0xa8, 0xf8, 0xb4, 0xac, 0x71, + 0x75, 0xaa, 0xc9, 0x9f, 0xbb, 0x6c, 0xad, 0x0f, + 0x55, 0x5d, 0xe8, 0x85, 0x7d, 0xf9, 0x21, 0x35, + 0xea, 0x92, 0x85, 0x2b, 0x00, 0xec, 0x84, 0x90, + 0x0a, 0x63, 0x96, 0xe4, 0x6b, 0xa9, 0x77, 0xb8, + 0x91, 0xf8, 0x46, 0x15, 0x72, 0x63, 0x70, 0x01, + 0x40, 0xa3, 0xa5, 0x76, 0x62, 0x2b, 0xbf, 0xf1, + 0xe5, 0x8d, 0x9f, 0xa3, 0xfa, 0x9b, 0x03, 0xbe, + 0xfe, 0x65, 0x6f, 0xa2, 0x29, 0x0d, 0x54, 0xb4, + 0x71, 0xce, 0xa9, 0xd6, 0x3d, 0x88, 0xf9, 0xaf, + 0x6b, 0xa8, 0x9e, 0xf4, 0x16, 0x96, 0x36, 0xb9, + 0x00, 0xdc, 0x10, 0xab, 0xb5, 0x08, 0x31, 0x1f, + 0x00, 0xb1, 0x3c, 0xd9, 0x38, 0x3e, 0xc6, 0x04, + 0xa7, 0x4e, 0xe8, 0xae, 0xed, 0x98, 0xc2, 0xf7, + 0xb9, 0x00, 0x5f, 0x8c, 0x60, 0xd1, 0xe5, 0x15, + 0xf7, 0xae, 0x1e, 0x84, 0x88, 0xd1, 0xf6, 0xbc, + 0x3a, 0x89, 0x35, 0x22, 0x83, 0x7c, 0xca, 0xf0, + 0x33, 0x82, 0x4c, 0x79, 0x3c, 0xfd, 0xb1, 0xae, + 0x52, 0x62, 0x55, 0xd2, 0x41, 0x60, 0xc6, 0xbb, + 0xfa, 0x0e, 0x59, 0xd6, 0xa8, 0xfe, 0x5d, 0xed, + 0x47, 0x3d, 0xe0, 0xea, 0x1f, 0x6e, 0x43, 0x51, + 0xec, 0x10, 0x52, 0x56, 0x77, 0x42, 0x6b, 0x52, + 0x87, 0xd8, 0xec, 0xe0, 0xaa, 0x76, 0xa5, 0x84, + 0x2a, 0x22, 0x24, 0xfd, 0x92, 0x40, 0x88, 0xd5, + 0x85, 0x1c, 0x1f, 0x6b, 0x47, 0xa0, 0xc4, 0xe4, + 0xef, 0xf4, 0xea, 0xd7, 0x59, 0xac, 0x2a, 0x9e, + 0x8c, 0xfa, 0x1f, 0x42, 0x08, 0xfe, 0x4f, 0x74, + 0xa0, 0x26, 0xf5, 0xb3, 0x84, 0xf6, 0x58, 0x5f, + 0x26, 0x66, 0x3e, 0xd7, 0xe4, 0x22, 0x91, 0x13, + 0xc8, 0xac, 0x25, 0x96, 0x23, 0xd8, 0x09, 0xea, + 0x45, 0x75, 0x23, 0xb8, 0x5f, 0xc2, 0x90, 0x8b, + 0x09, 0xc4, 0xfc, 0x47, 0x6c, 0x6d, 0x0a, 0xef, + 0x69, 0xa4, 0x38, 0x19, 0xcf, 0x7d, 0xf9, 0x09, + 0x73, 0x9b, 0x60, 0x5a, 0xf7, 0x37, 0xb5, 0xfe, + 0x9f, 0xe3, 0x2b, 0x4c, 0x0d, 0x6e, 0x19, 0xf1, + 0xd6, 0xc0, 0x70, 0xf3, 0x9d, 0x22, 0x3c, 0xf9, + 0x49, 0xce, 0x30, 0x8e, 0x44, 0xb5, 0x76, 0x15, + 0x8f, 0x52, 0xfd, 0xa5, 0x04, 0xb8, 0x55, 0x6a, + 0x36, 0x59, 0x7c, 0xc4, 0x48, 0xb8, 0xd7, 0xab, + 0x05, 0x66, 0xe9, 0x5e, 0x21, 0x6f, 0x6b, 0x36, + 0x29, 0xbb, 0xe9, 0xe3, 0xa2, 0x9a, 0xa8, 0xcd, + 0x55, 0x25, 0x11, 0xba, 0x5a, 0x58, 0xa0, 0xde, + 0xae, 0x19, 0x2a, 0x48, 0x5a, 0xff, 0x36, 0xcd, + 0x6d, 0x16, 0x7a, 0x73, 0x38, 0x46, 0xe5, 0x47, + 0x59, 0xc8, 0xa2, 0xf6, 0xe2, 0x6c, 0x83, 0xc5, + 0x36, 0x2c, 0x83, 0x7d, 0xb4, 0x01, 0x05, 0x69, + 0xe7, 0xaf, 0x5c, 0xc4, 0x64, 0x82, 0x12, 0x21, + 0xef, 0xf7, 0xd1, 0x7d, 0xb8, 0x8d, 0x8c, 0x98, + 0x7c, 0x5f, 0x7d, 0x92, 0x88, 0xb9, 0x94, 0x07, + 0x9c, 0xd8, 0xe9, 0x9c, 0x17, 0x38, 0xe3, 0x57, + 0x6c, 0xe0, 0xdc, 0xa5, 0x92, 0x42, 0xb3, 0xbd, + 0x50, 0xa2, 0x7e, 0xb5, 0xb1, 0x52, 0x72, 0x03, + 0x97, 0xd8, 0xaa, 0x9a, 0x1e, 0x75, 0x41, 0x11, + 0xa3, 0x4f, 0xcc, 0xd4, 0xe3, 0x73, 0xad, 0x96, + 0xdc, 0x47, 0x41, 0x9f, 0xb0, 0xbe, 0x79, 0x91, + 0xf5, 0xb6, 0x18, 0xfe, 0xc2, 0x83, 0x18, 0x7d, + 0x73, 0xd9, 0x4f, 0x83, 0x84, 0x03, 0xb3, 0xf0, + 0x77, 0x66, 0x3d, 0x83, 0x63, 0x2e, 0x2c, 0xf9, + 0xdd, 0xa6, 0x1f, 0x89, 0x82, 0xb8, 0x23, 0x42, + 0xeb, 0xe2, 0xca, 0x70, 0x82, 0x61, 0x41, 0x0a, + 0x6d, 0x5f, 0x75, 0xc5, 0xe2, 0xc4, 0x91, 0x18, + 0x44, 0x22, 0xfa, 0x34, 0x10, 0xf5, 0x20, 0xdc, + 0xb7, 0xdd, 0x2a, 0x20, 0x77, 0xf5, 0xf9, 0xce, + 0xdb, 0xa0, 0x0a, 0x52, 0x2a, 0x4e, 0xdd, 0xcc, + 0x97, 0xdf, 0x05, 0xe4, 0x5e, 0xb7, 0xaa, 0xf0, + 0xe2, 0x80, 0xff, 0xba, 0x1a, 0x0f, 0xac, 0xdf, + 0x02, 0x32, 0xe6, 0xf7, 0xc7, 0x17, 0x13, 0xb7, + 0xfc, 0x98, 0x48, 0x8c, 0x0d, 0x82, 0xc9, 0x80, + 0x7a, 0xe2, 0x0a, 0xc5, 0xb4, 0xde, 0x7c, 0x3c, + 0x79, 0x81, 0x0e, 0x28, 0x65, 0x79, 0x67, 0x82, + 0x69, 0x44, 0x66, 0x09, 0xf7, 0x16, 0x1a, 0xf9, + 0x7d, 0x80, 0xa1, 0x79, 0x14, 0xa9, 0xc8, 0x20, + 0xfb, 0xa2, 0x46, 0xbe, 0x08, 0x35, 0x17, 0x58, + 0xc1, 0x1a, 0xda, 0x2a, 0x6b, 0x2e, 0x1e, 0xe6, + 0x27, 0x55, 0x7b, 0x19, 0xe2, 0xfb, 0x64, 0xfc, + 0x5e, 0x15, 0x54, 0x3c, 0xe7, 0xc2, 0x11, 0x50, + 0x30, 0xb8, 0x72, 0x03, 0x0b, 0x1a, 0x9f, 0x86, + 0x27, 0x11, 0x5c, 0x06, 0x2b, 0xbd, 0x75, 0x1a, + 0x0a, 0xda, 0x01, 0xfa, 0x5c, 0x4a, 0xc1, 0x80, + 0x3a, 0x6e, 0x30, 0xc8, 0x2c, 0xeb, 0x56, 0xec, + 0x89, 0xfa, 0x35, 0x7b, 0xb2, 0xf0, 0x97, 0x08, + 0x86, 0x53, 0xbe, 0xbd, 0x40, 0x41, 0x38, 0x1c, + 0xb4, 0x8b, 0x79, 0x2e, 0x18, 0x96, 0x94, 0xde, + 0xe8, 0xca, 0xe5, 0x9f, 0x92, 0x9f, 0x15, 0x5d, + 0x56, 0x60, 0x5c, 0x09, 0xf9, 0x16, 0xf4, 0x17, + 0x0f, 0xf6, 0x4c, 0xda, 0xe6, 0x67, 0x89, 0x9f, + 0xca, 0x6c, 0xe7, 0x9b, 0x04, 0x62, 0x0e, 0x26, + 0xa6, 0x52, 0xbd, 0x29, 0xff, 0xc7, 0xa4, 0x96, + 0xe6, 0x6a, 0x02, 0xa5, 0x2e, 0x7b, 0xfe, 0x97, + 0x68, 0x3e, 0x2e, 0x5f, 0x3b, 0x0f, 0x36, 0xd6, + 0x98, 0x19, 0x59, 0x48, 0xd2, 0xc6, 0xe1, 0x55, + 0x1a, 0x6e, 0xd6, 0xed, 0x2c, 0xba, 0xc3, 0x9e, + 0x64, 0xc9, 0x95, 0x86, 0x35, 0x5e, 0x3e, 0x88, + 0x69, 0x99, 0x4b, 0xee, 0xbe, 0x9a, 0x99, 0xb5, + 0x6e, 0x58, 0xae, 0xdd, 0x22, 0xdb, 0xdd, 0x6b, + 0xfc, 0xaf, 0x90, 0xa3, 0x3d, 0xa4, 0xc1, 0x15, + 0x92, 0x18, 0x8d, 0xd2, 0x4b, 0x7b, 0x06, 0xd1, + 0x37, 0xb5, 0xe2, 0x7c, 0x2c, 0xf0, 0x25, 0xe4, + 0x94, 0x2a, 0xbd, 0xe3, 0x82, 0x70, 0x78, 0xa3, + 0x82, 0x10, 0x5a, 0x90, 0xd7, 0xa4, 0xfa, 0xaf, + 0x1a, 0x88, 0x59, 0xdc, 0x74, 0x12, 0xb4, 0x8e, + 0xd7, 0x19, 0x46, 0xf4, 0x84, 0x69, 0x9f, 0xbb, + 0x70, 0xa8, 0x4c, 0x52, 0x81, 0xa9, 0xff, 0x76, + 0x1c, 0xae, 0xd8, 0x11, 0x3d, 0x7f, 0x7d, 0xc5, + 0x12, 0x59, 0x28, 0x18, 0xc2, 0xa2, 0xb7, 0x1c, + 0x88, 0xf8, 0xd6, 0x1b, 0xa6, 0x7d, 0x9e, 0xde, + 0x29, 0xf8, 0xed, 0xff, 0xeb, 0x92, 0x24, 0x4f, + 0x05, 0xaa, 0xd9, 0x49, 0xba, 0x87, 0x59, 0x51, + 0xc9, 0x20, 0x5c, 0x9b, 0x74, 0xcf, 0x03, 0xd9, + 0x2d, 0x34, 0xc7, 0x5b, 0xa5, 0x40, 0xb2, 0x99, + 0xf5, 0xcb, 0xb4, 0xf6, 0xb7, 0x72, 0x4a, 0xd6, + 0xbd, 0xb0, 0xf3, 0x93, 0xe0, 0x1b, 0xa8, 0x04, + 0x1e, 0x35, 0xd4, 0x80, 0x20, 0xf4, 0x9c, 0x31, + 0x6b, 0x45, 0xb9, 0x15, 0xb0, 0x5e, 0xdd, 0x0a, + 0x33, 0x9c, 0x83, 0xcd, 0x58, 0x89, 0x50, 0x56, + 0xbb, 0x81, 0x00, 0x91, 0x32, 0xf3, 0x1b, 0x3e, + 0xcf, 0x45, 0xe1, 0xf9, 0xe1, 0x2c, 0x26, 0x78, + 0x93, 0x9a, 0x60, 0x46, 0xc9, 0xb5, 0x5e, 0x6a, + 0x28, 0x92, 0x87, 0x3f, 0x63, 0x7b, 0xdb, 0xf7, + 0xd0, 0x13, 0x9d, 0x32, 0x40, 0x5e, 0xcf, 0xfb, + 0x79, 0x68, 0x47, 0x4c, 0xfd, 0x01, 0x17, 0xe6, + 0x97, 0x93, 0x78, 0xbb, 0xa6, 0x27, 0xa3, 0xe8, + 0x1a, 0xe8, 0x94, 0x55, 0x7d, 0x08, 0xe5, 0xdc, + 0x66, 0xa3, 0x69, 0xc8, 0xca, 0xc5, 0xa1, 0x84, + 0x55, 0xde, 0x08, 0x91, 0x16, 0x3a, 0x0c, 0x86, + 0xab, 0x27, 0x2b, 0x64, 0x34, 0x02, 0x6c, 0x76, + 0x8b, 0xc6, 0xaf, 0xcc, 0xe1, 0xd6, 0x8c, 0x2a, + 0x18, 0x3d, 0xa6, 0x1b, 0x37, 0x75, 0x45, 0x73, + 0xc2, 0x75, 0xd7, 0x53, 0x78, 0x3a, 0xd6, 0xe8, + 0x29, 0xd2, 0x4a, 0xa8, 0x1e, 0x82, 0xf6, 0xb6, + 0x81, 0xde, 0x21, 0xed, 0x2b, 0x56, 0xbb, 0xf2, + 0xd0, 0x57, 0xc1, 0x7c, 0xd2, 0x6a, 0xd2, 0x56, + 0xf5, 0x13, 0x5f, 0x1c, 0x6a, 0x0b, 0x74, 0xfb, + 0xe9, 0xfe, 0x9e, 0xea, 0x95, 0xb2, 0x46, 0xab, + 0x0a, 0xfc, 0xfd, 0xf3, 0xbb, 0x04, 0x2b, 0x76, + 0x1b, 0xa4, 0x74, 0xb0, 0xc1, 0x78, 0xc3, 0x69, + 0xe2, 0xb0, 0x01, 0xe1, 0xde, 0x32, 0x4c, 0x8d, + 0x1a, 0xb3, 0x38, 0x08, 0xd5, 0xfc, 0x1f, 0xdc, + 0x0e, 0x2c, 0x9c, 0xb1, 0xa1, 0x63, 0x17, 0x22, + 0xf5, 0x6c, 0x93, 0x70, 0x74, 0x00, 0xf8, 0x39, + 0x01, 0x94, 0xd1, 0x32, 0x23, 0x56, 0x5d, 0xa6, + 0x02, 0x76, 0x76, 0x93, 0xce, 0x2f, 0x19, 0xe9, + 0x17, 0x52, 0xae, 0x6e, 0x2c, 0x6d, 0x61, 0x7f, + 0x3b, 0xaa, 0xe0, 0x52, 0x85, 0xc5, 0x65, 0xc1, + 0xbb, 0x8e, 0x5b, 0x21, 0xd5, 0xc9, 0x78, 0x83, + 0x07, 0x97, 0x4c, 0x62, 0x61, 0x41, 0xd4, 0xfc, + 0xc9, 0x39, 0xe3, 0x9b, 0xd0, 0xcc, 0x75, 0xc4, + 0x97, 0xe6, 0xdd, 0x2a, 0x5f, 0xa6, 0xe8, 0x59, + 0x6c, 0x98, 0xb9, 0x02, 0xe2, 0xa2, 0xd6, 0x68, + 0xee, 0x3b, 0x1d, 0xe3, 0x4d, 0x5b, 0x30, 0xef, + 0x03, 0xf2, 0xeb, 0x18, 0x57, 0x36, 0xe8, 0xa1, + 0xf4, 0x47, 0xfb, 0xcb, 0x8f, 0xcb, 0xc8, 0xf3, + 0x4f, 0x74, 0x9d, 0x9d, 0xb1, 0x8d, 0x14, 0x44, + 0xd9, 0x19, 0xb4, 0x54, 0x4f, 0x75, 0x19, 0x09, + 0xa0, 0x75, 0xbc, 0x3b, 0x82, 0xc6, 0x3f, 0xb8, + 0x83, 0x19, 0x6e, 0xd6, 0x37, 0xfe, 0x6e, 0x8a, + 0x4e, 0xe0, 0x4a, 0xab, 0x7b, 0xc8, 0xb4, 0x1d, + 0xf4, 0xed, 0x27, 0x03, 0x65, 0xa2, 0xa1, 0xae, + 0x11, 0xe7, 0x98, 0x78, 0x48, 0x91, 0xd2, 0xd2, + 0xd4, 0x23, 0x78, 0x50, 0xb1, 0x5b, 0x85, 0x10, + 0x8d, 0xca, 0x5f, 0x0f, 0x71, 0xae, 0x72, 0x9a, + 0xf6, 0x25, 0x19, 0x60, 0x06, 0xf7, 0x10, 0x34, + 0x18, 0x0d, 0xc9, 0x9f, 0x7b, 0x0c, 0x9b, 0x8f, + 0x91, 0x1b, 0x9f, 0xcd, 0x10, 0xee, 0x75, 0xf9, + 0x97, 0x66, 0xfc, 0x4d, 0x33, 0x6e, 0x28, 0x2b, + 0x92, 0x85, 0x4f, 0xab, 0x43, 0x8d, 0x8f, 0x7d, + 0x86, 0xa7, 0xc7, 0xd8, 0xd3, 0x0b, 0x8b, 0x57, + 0xb6, 0x1d, 0x95, 0x0d, 0xe9, 0xbc, 0xd9, 0x03, + 0xd9, 0x10, 0x19, 0xc3, 0x46, 0x63, 0x55, 0x87, + 0x61, 0x79, 0x6c, 0x95, 0x0e, 0x9c, 0xdd, 0xca, + 0xc3, 0xf3, 0x64, 0xf0, 0x7d, 0x76, 0xb7, 0x53, + 0x67, 0x2b, 0x1e, 0x44, 0x56, 0x81, 0xea, 0x8f, + 0x5c, 0x42, 0x16, 0xb8, 0x28, 0xeb, 0x1b, 0x61, + 0x10, 0x1e, 0xbf, 0xec, 0xa8 +}; +static const u8 enc_output011[] __initconst = { + 0x6a, 0xfc, 0x4b, 0x25, 0xdf, 0xc0, 0xe4, 0xe8, + 0x17, 0x4d, 0x4c, 0xc9, 0x7e, 0xde, 0x3a, 0xcc, + 0x3c, 0xba, 0x6a, 0x77, 0x47, 0xdb, 0xe3, 0x74, + 0x7a, 0x4d, 0x5f, 0x8d, 0x37, 0x55, 0x80, 0x73, + 0x90, 0x66, 0x5d, 0x3a, 0x7d, 0x5d, 0x86, 0x5e, + 0x8d, 0xfd, 0x83, 0xff, 0x4e, 0x74, 0x6f, 0xf9, + 0xe6, 0x70, 0x17, 0x70, 0x3e, 0x96, 0xa7, 0x7e, + 0xcb, 0xab, 0x8f, 0x58, 0x24, 0x9b, 0x01, 0xfd, + 0xcb, 0xe6, 0x4d, 0x9b, 0xf0, 0x88, 0x94, 0x57, + 0x66, 0xef, 0x72, 0x4c, 0x42, 0x6e, 0x16, 0x19, + 0x15, 0xea, 0x70, 0x5b, 0xac, 0x13, 0xdb, 0x9f, + 0x18, 0xe2, 0x3c, 0x26, 0x97, 0xbc, 0xdc, 0x45, + 0x8c, 0x6c, 0x24, 0x69, 0x9c, 0xf7, 0x65, 0x1e, + 0x18, 0x59, 0x31, 0x7c, 0xe4, 0x73, 0xbc, 0x39, + 0x62, 0xc6, 0x5c, 0x9f, 0xbf, 0xfa, 0x90, 0x03, + 0xc9, 0x72, 0x26, 0xb6, 0x1b, 0xc2, 0xb7, 0x3f, + 0xf2, 0x13, 0x77, 0xf2, 0x8d, 0xb9, 0x47, 0xd0, + 0x53, 0xdd, 0xc8, 0x91, 0x83, 0x8b, 0xb1, 0xce, + 0xa3, 0xfe, 0xcd, 0xd9, 0xdd, 0x92, 0x7b, 0xdb, + 0xb8, 0xfb, 0xc9, 0x2d, 0x01, 0x59, 0x39, 0x52, + 0xad, 0x1b, 0xec, 0xcf, 0xd7, 0x70, 0x13, 0x21, + 0xf5, 0x47, 0xaa, 0x18, 0x21, 0x5c, 0xc9, 0x9a, + 0xd2, 0x6b, 0x05, 0x9c, 0x01, 0xa1, 0xda, 0x35, + 0x5d, 0xb3, 0x70, 0xe6, 0xa9, 0x80, 0x8b, 0x91, + 0xb7, 0xb3, 0x5f, 0x24, 0x9a, 0xb7, 0xd1, 0x6b, + 0xa1, 0x1c, 0x50, 0xba, 0x49, 0xe0, 0xee, 0x2e, + 0x75, 0xac, 0x69, 0xc0, 0xeb, 0x03, 0xdd, 0x19, + 0xe5, 0xf6, 0x06, 0xdd, 0xc3, 0xd7, 0x2b, 0x07, + 0x07, 0x30, 0xa7, 0x19, 0x0c, 0xbf, 0xe6, 0x18, + 0xcc, 0xb1, 0x01, 0x11, 0x85, 0x77, 0x1d, 0x96, + 0xa7, 0xa3, 0x00, 0x84, 0x02, 0xa2, 0x83, 0x68, + 0xda, 0x17, 0x27, 0xc8, 0x7f, 0x23, 0xb7, 0xf4, + 0x13, 0x85, 0xcf, 0xdd, 0x7a, 0x7d, 0x24, 0x57, + 0xfe, 0x05, 0x93, 0xf5, 0x74, 0xce, 0xed, 0x0c, + 0x20, 0x98, 0x8d, 0x92, 0x30, 0xa1, 0x29, 0x23, + 0x1a, 0xa0, 0x4f, 0x69, 0x56, 0x4c, 0xe1, 0xc8, + 0xce, 0xf6, 0x9a, 0x0c, 0xa4, 0xfa, 0x04, 0xf6, + 0x62, 0x95, 0xf2, 0xfa, 0xc7, 0x40, 0x68, 0x40, + 0x8f, 0x41, 0xda, 0xb4, 0x26, 0x6f, 0x70, 0xab, + 0x40, 0x61, 0xa4, 0x0e, 0x75, 0xfb, 0x86, 0xeb, + 0x9d, 0x9a, 0x1f, 0xec, 0x76, 0x99, 0xe7, 0xea, + 0xaa, 0x1e, 0x2d, 0xb5, 0xd4, 0xa6, 0x1a, 0xb8, + 0x61, 0x0a, 0x1d, 0x16, 0x5b, 0x98, 0xc2, 0x31, + 0x40, 0xe7, 0x23, 0x1d, 0x66, 0x99, 0xc8, 0xc0, + 0xd7, 0xce, 0xf3, 0x57, 0x40, 0x04, 0x3f, 0xfc, + 0xea, 0xb3, 0xfc, 0xd2, 0xd3, 0x99, 0xa4, 0x94, + 0x69, 0xa0, 0xef, 0xd1, 0x85, 0xb3, 0xa6, 0xb1, + 0x28, 0xbf, 0x94, 0x67, 0x22, 0xc3, 0x36, 0x46, + 0xf8, 0xd2, 0x0f, 0x5f, 0xf4, 0x59, 0x80, 0xe6, + 0x2d, 0x43, 0x08, 0x7d, 0x19, 0x09, 0x97, 0xa7, + 0x4c, 0x3d, 0x8d, 0xba, 0x65, 0x62, 0xa3, 0x71, + 0x33, 0x29, 0x62, 0xdb, 0xc1, 0x33, 0x34, 0x1a, + 0x63, 0x33, 0x16, 0xb6, 0x64, 0x7e, 0xab, 0x33, + 0xf0, 0xe6, 0x26, 0x68, 0xba, 0x1d, 0x2e, 0x38, + 0x08, 0xe6, 0x02, 0xd3, 0x25, 0x2c, 0x47, 0x23, + 0x58, 0x34, 0x0f, 0x9d, 0x63, 0x4f, 0x63, 0xbb, + 0x7f, 0x3b, 0x34, 0x38, 0xa7, 0xb5, 0x8d, 0x65, + 0xd9, 0x9f, 0x79, 0x55, 0x3e, 0x4d, 0xe7, 0x73, + 0xd8, 0xf6, 0x98, 0x97, 0x84, 0x60, 0x9c, 0xc8, + 0xa9, 0x3c, 0xf6, 0xdc, 0x12, 0x5c, 0xe1, 0xbb, + 0x0b, 0x8b, 0x98, 0x9c, 0x9d, 0x26, 0x7c, 0x4a, + 0xe6, 0x46, 0x36, 0x58, 0x21, 0x4a, 0xee, 0xca, + 0xd7, 0x3b, 0xc2, 0x6c, 0x49, 0x2f, 0xe5, 0xd5, + 0x03, 0x59, 0x84, 0x53, 0xcb, 0xfe, 0x92, 0x71, + 0x2e, 0x7c, 0x21, 0xcc, 0x99, 0x85, 0x7f, 0xb8, + 0x74, 0x90, 0x13, 0x42, 0x3f, 0xe0, 0x6b, 0x1d, + 0xf2, 0x4d, 0x54, 0xd4, 0xfc, 0x3a, 0x05, 0xe6, + 0x74, 0xaf, 0xa6, 0xa0, 0x2a, 0x20, 0x23, 0x5d, + 0x34, 0x5c, 0xd9, 0x3e, 0x4e, 0xfa, 0x93, 0xe7, + 0xaa, 0xe9, 0x6f, 0x08, 0x43, 0x67, 0x41, 0xc5, + 0xad, 0xfb, 0x31, 0x95, 0x82, 0x73, 0x32, 0xd8, + 0xa6, 0xa3, 0xed, 0x0e, 0x2d, 0xf6, 0x5f, 0xfd, + 0x80, 0xa6, 0x7a, 0xe0, 0xdf, 0x78, 0x15, 0x29, + 0x74, 0x33, 0xd0, 0x9e, 0x83, 0x86, 0x72, 0x22, + 0x57, 0x29, 0xb9, 0x9e, 0x5d, 0xd3, 0x1a, 0xb5, + 0x96, 0x72, 0x41, 0x3d, 0xf1, 0x64, 0x43, 0x67, + 0xee, 0xaa, 0x5c, 0xd3, 0x9a, 0x96, 0x13, 0x11, + 0x5d, 0xf3, 0x0c, 0x87, 0x82, 0x1e, 0x41, 0x9e, + 0xd0, 0x27, 0xd7, 0x54, 0x3b, 0x67, 0x73, 0x09, + 0x91, 0xe9, 0xd5, 0x36, 0xa7, 0xb5, 0x55, 0xe4, + 0xf3, 0x21, 0x51, 0x49, 0x22, 0x07, 0x55, 0x4f, + 0x44, 0x4b, 0xd2, 0x15, 0x93, 0x17, 0x2a, 0xfa, + 0x4d, 0x4a, 0x57, 0xdb, 0x4c, 0xa6, 0xeb, 0xec, + 0x53, 0x25, 0x6c, 0x21, 0xed, 0x00, 0x4c, 0x3b, + 0xca, 0x14, 0x57, 0xa9, 0xd6, 0x6a, 0xcd, 0x8d, + 0x5e, 0x74, 0xac, 0x72, 0xc1, 0x97, 0xe5, 0x1b, + 0x45, 0x4e, 0xda, 0xfc, 0xcc, 0x40, 0xe8, 0x48, + 0x88, 0x0b, 0xa3, 0xe3, 0x8d, 0x83, 0x42, 0xc3, + 0x23, 0xfd, 0x68, 0xb5, 0x8e, 0xf1, 0x9d, 0x63, + 0x77, 0xe9, 0xa3, 0x8e, 0x8c, 0x26, 0x6b, 0xbd, + 0x72, 0x73, 0x35, 0x0c, 0x03, 0xf8, 0x43, 0x78, + 0x52, 0x71, 0x15, 0x1f, 0x71, 0x5d, 0x6e, 0xed, + 0xb9, 0xcc, 0x86, 0x30, 0xdb, 0x2b, 0xd3, 0x82, + 0x88, 0x23, 0x71, 0x90, 0x53, 0x5c, 0xa9, 0x2f, + 0x76, 0x01, 0xb7, 0x9a, 0xfe, 0x43, 0x55, 0xa3, + 0x04, 0x9b, 0x0e, 0xe4, 0x59, 0xdf, 0xc9, 0xe9, + 0xb1, 0xea, 0x29, 0x28, 0x3c, 0x5c, 0xae, 0x72, + 0x84, 0xb6, 0xc6, 0xeb, 0x0c, 0x27, 0x07, 0x74, + 0x90, 0x0d, 0x31, 0xb0, 0x00, 0x77, 0xe9, 0x40, + 0x70, 0x6f, 0x68, 0xa7, 0xfd, 0x06, 0xec, 0x4b, + 0xc0, 0xb7, 0xac, 0xbc, 0x33, 0xb7, 0x6d, 0x0a, + 0xbd, 0x12, 0x1b, 0x59, 0xcb, 0xdd, 0x32, 0xf5, + 0x1d, 0x94, 0x57, 0x76, 0x9e, 0x0c, 0x18, 0x98, + 0x71, 0xd7, 0x2a, 0xdb, 0x0b, 0x7b, 0xa7, 0x71, + 0xb7, 0x67, 0x81, 0x23, 0x96, 0xae, 0xb9, 0x7e, + 0x32, 0x43, 0x92, 0x8a, 0x19, 0xa0, 0xc4, 0xd4, + 0x3b, 0x57, 0xf9, 0x4a, 0x2c, 0xfb, 0x51, 0x46, + 0xbb, 0xcb, 0x5d, 0xb3, 0xef, 0x13, 0x93, 0x6e, + 0x68, 0x42, 0x54, 0x57, 0xd3, 0x6a, 0x3a, 0x8f, + 0x9d, 0x66, 0xbf, 0xbd, 0x36, 0x23, 0xf5, 0x93, + 0x83, 0x7b, 0x9c, 0xc0, 0xdd, 0xc5, 0x49, 0xc0, + 0x64, 0xed, 0x07, 0x12, 0xb3, 0xe6, 0xe4, 0xe5, + 0x38, 0x95, 0x23, 0xb1, 0xa0, 0x3b, 0x1a, 0x61, + 0xda, 0x17, 0xac, 0xc3, 0x58, 0xdd, 0x74, 0x64, + 0x22, 0x11, 0xe8, 0x32, 0x1d, 0x16, 0x93, 0x85, + 0x99, 0xa5, 0x9c, 0x34, 0x55, 0xb1, 0xe9, 0x20, + 0x72, 0xc9, 0x28, 0x7b, 0x79, 0x00, 0xa1, 0xa6, + 0xa3, 0x27, 0x40, 0x18, 0x8a, 0x54, 0xe0, 0xcc, + 0xe8, 0x4e, 0x8e, 0x43, 0x96, 0xe7, 0x3f, 0xc8, + 0xe9, 0xb2, 0xf9, 0xc9, 0xda, 0x04, 0x71, 0x50, + 0x47, 0xe4, 0xaa, 0xce, 0xa2, 0x30, 0xc8, 0xe4, + 0xac, 0xc7, 0x0d, 0x06, 0x2e, 0xe6, 0xe8, 0x80, + 0x36, 0x29, 0x9e, 0x01, 0xb8, 0xc3, 0xf0, 0xa0, + 0x5d, 0x7a, 0xca, 0x4d, 0xa0, 0x57, 0xbd, 0x2a, + 0x45, 0xa7, 0x7f, 0x9c, 0x93, 0x07, 0x8f, 0x35, + 0x67, 0x92, 0xe3, 0xe9, 0x7f, 0xa8, 0x61, 0x43, + 0x9e, 0x25, 0x4f, 0x33, 0x76, 0x13, 0x6e, 0x12, + 0xb9, 0xdd, 0xa4, 0x7c, 0x08, 0x9f, 0x7c, 0xe7, + 0x0a, 0x8d, 0x84, 0x06, 0xa4, 0x33, 0x17, 0x34, + 0x5e, 0x10, 0x7c, 0xc0, 0xa8, 0x3d, 0x1f, 0x42, + 0x20, 0x51, 0x65, 0x5d, 0x09, 0xc3, 0xaa, 0xc0, + 0xc8, 0x0d, 0xf0, 0x79, 0xbc, 0x20, 0x1b, 0x95, + 0xe7, 0x06, 0x7d, 0x47, 0x20, 0x03, 0x1a, 0x74, + 0xdd, 0xe2, 0xd4, 0xae, 0x38, 0x71, 0x9b, 0xf5, + 0x80, 0xec, 0x08, 0x4e, 0x56, 0xba, 0x76, 0x12, + 0x1a, 0xdf, 0x48, 0xf3, 0xae, 0xb3, 0xe6, 0xe6, + 0xbe, 0xc0, 0x91, 0x2e, 0x01, 0xb3, 0x01, 0x86, + 0xa2, 0xb9, 0x52, 0xd1, 0x21, 0xae, 0xd4, 0x97, + 0x1d, 0xef, 0x41, 0x12, 0x95, 0x3d, 0x48, 0x45, + 0x1c, 0x56, 0x32, 0x8f, 0xb8, 0x43, 0xbb, 0x19, + 0xf3, 0xca, 0xe9, 0xeb, 0x6d, 0x84, 0xbe, 0x86, + 0x06, 0xe2, 0x36, 0xb2, 0x62, 0x9d, 0xd3, 0x4c, + 0x48, 0x18, 0x54, 0x13, 0x4e, 0xcf, 0xfd, 0xba, + 0x84, 0xb9, 0x30, 0x53, 0xcf, 0xfb, 0xb9, 0x29, + 0x8f, 0xdc, 0x9f, 0xef, 0x60, 0x0b, 0x64, 0xf6, + 0x8b, 0xee, 0xa6, 0x91, 0xc2, 0x41, 0x6c, 0xf6, + 0xfa, 0x79, 0x67, 0x4b, 0xc1, 0x3f, 0xaf, 0x09, + 0x81, 0xd4, 0x5d, 0xcb, 0x09, 0xdf, 0x36, 0x31, + 0xc0, 0x14, 0x3c, 0x7c, 0x0e, 0x65, 0x95, 0x99, + 0x6d, 0xa3, 0xf4, 0xd7, 0x38, 0xee, 0x1a, 0x2b, + 0x37, 0xe2, 0xa4, 0x3b, 0x4b, 0xd0, 0x65, 0xca, + 0xf8, 0xc3, 0xe8, 0x15, 0x20, 0xef, 0xf2, 0x00, + 0xfd, 0x01, 0x09, 0xc5, 0xc8, 0x17, 0x04, 0x93, + 0xd0, 0x93, 0x03, 0x55, 0xc5, 0xfe, 0x32, 0xa3, + 0x3e, 0x28, 0x2d, 0x3b, 0x93, 0x8a, 0xcc, 0x07, + 0x72, 0x80, 0x8b, 0x74, 0x16, 0x24, 0xbb, 0xda, + 0x94, 0x39, 0x30, 0x8f, 0xb1, 0xcd, 0x4a, 0x90, + 0x92, 0x7c, 0x14, 0x8f, 0x95, 0x4e, 0xac, 0x9b, + 0xd8, 0x8f, 0x1a, 0x87, 0xa4, 0x32, 0x27, 0x8a, + 0xba, 0xf7, 0x41, 0xcf, 0x84, 0x37, 0x19, 0xe6, + 0x06, 0xf5, 0x0e, 0xcf, 0x36, 0xf5, 0x9e, 0x6c, + 0xde, 0xbc, 0xff, 0x64, 0x7e, 0x4e, 0x59, 0x57, + 0x48, 0xfe, 0x14, 0xf7, 0x9c, 0x93, 0x5d, 0x15, + 0xad, 0xcc, 0x11, 0xb1, 0x17, 0x18, 0xb2, 0x7e, + 0xcc, 0xab, 0xe9, 0xce, 0x7d, 0x77, 0x5b, 0x51, + 0x1b, 0x1e, 0x20, 0xa8, 0x32, 0x06, 0x0e, 0x75, + 0x93, 0xac, 0xdb, 0x35, 0x37, 0x1f, 0xe9, 0x19, + 0x1d, 0xb4, 0x71, 0x97, 0xd6, 0x4e, 0x2c, 0x08, + 0xa5, 0x13, 0xf9, 0x0e, 0x7e, 0x78, 0x6e, 0x14, + 0xe0, 0xa9, 0xb9, 0x96, 0x4c, 0x80, 0x82, 0xba, + 0x17, 0xb3, 0x9d, 0x69, 0xb0, 0x84, 0x46, 0xff, + 0xf9, 0x52, 0x79, 0x94, 0x58, 0x3a, 0x62, 0x90, + 0x15, 0x35, 0x71, 0x10, 0x37, 0xed, 0xa1, 0x8e, + 0x53, 0x6e, 0xf4, 0x26, 0x57, 0x93, 0x15, 0x93, + 0xf6, 0x81, 0x2c, 0x5a, 0x10, 0xda, 0x92, 0xad, + 0x2f, 0xdb, 0x28, 0x31, 0x2d, 0x55, 0x04, 0xd2, + 0x06, 0x28, 0x8c, 0x1e, 0xdc, 0xea, 0x54, 0xac, + 0xff, 0xb7, 0x6c, 0x30, 0x15, 0xd4, 0xb4, 0x0d, + 0x00, 0x93, 0x57, 0xdd, 0xd2, 0x07, 0x07, 0x06, + 0xd9, 0x43, 0x9b, 0xcd, 0x3a, 0xf4, 0x7d, 0x4c, + 0x36, 0x5d, 0x23, 0xa2, 0xcc, 0x57, 0x40, 0x91, + 0xe9, 0x2c, 0x2f, 0x2c, 0xd5, 0x30, 0x9b, 0x17, + 0xb0, 0xc9, 0xf7, 0xa7, 0x2f, 0xd1, 0x93, 0x20, + 0x6b, 0xc6, 0xc1, 0xe4, 0x6f, 0xcb, 0xd1, 0xe7, + 0x09, 0x0f, 0x9e, 0xdc, 0xaa, 0x9f, 0x2f, 0xdf, + 0x56, 0x9f, 0xd4, 0x33, 0x04, 0xaf, 0xd3, 0x6c, + 0x58, 0x61, 0xf0, 0x30, 0xec, 0xf2, 0x7f, 0xf2, + 0x9c, 0xdf, 0x39, 0xbb, 0x6f, 0xa2, 0x8c, 0x7e, + 0xc4, 0x22, 0x51, 0x71, 0xc0, 0x4d, 0x14, 0x1a, + 0xc4, 0xcd, 0x04, 0xd9, 0x87, 0x08, 0x50, 0x05, + 0xcc, 0xaf, 0xf6, 0xf0, 0x8f, 0x92, 0x54, 0x58, + 0xc2, 0xc7, 0x09, 0x7a, 0x59, 0x02, 0x05, 0xe8, + 0xb0, 0x86, 0xd9, 0xbf, 0x7b, 0x35, 0x51, 0x4d, + 0xaf, 0x08, 0x97, 0x2c, 0x65, 0xda, 0x2a, 0x71, + 0x3a, 0xa8, 0x51, 0xcc, 0xf2, 0x73, 0x27, 0xc3, + 0xfd, 0x62, 0xcf, 0xe3, 0xb2, 0xca, 0xcb, 0xbe, + 0x1a, 0x0a, 0xa1, 0x34, 0x7b, 0x77, 0xc4, 0x62, + 0x68, 0x78, 0x5f, 0x94, 0x07, 0x04, 0x65, 0x16, + 0x4b, 0x61, 0xcb, 0xff, 0x75, 0x26, 0x50, 0x66, + 0x1f, 0x6e, 0x93, 0xf8, 0xc5, 0x51, 0xeb, 0xa4, + 0x4a, 0x48, 0x68, 0x6b, 0xe2, 0x5e, 0x44, 0xb2, + 0x50, 0x2c, 0x6c, 0xae, 0x79, 0x4e, 0x66, 0x35, + 0x81, 0x50, 0xac, 0xbc, 0x3f, 0xb1, 0x0c, 0xf3, + 0x05, 0x3c, 0x4a, 0xa3, 0x6c, 0x2a, 0x79, 0xb4, + 0xb7, 0xab, 0xca, 0xc7, 0x9b, 0x8e, 0xcd, 0x5f, + 0x11, 0x03, 0xcb, 0x30, 0xa3, 0xab, 0xda, 0xfe, + 0x64, 0xb9, 0xbb, 0xd8, 0x5e, 0x3a, 0x1a, 0x56, + 0xe5, 0x05, 0x48, 0x90, 0x1e, 0x61, 0x69, 0x1b, + 0x22, 0xe6, 0x1a, 0x3c, 0x75, 0xad, 0x1f, 0x37, + 0x28, 0xdc, 0xe4, 0x6d, 0xbd, 0x42, 0xdc, 0xd3, + 0xc8, 0xb6, 0x1c, 0x48, 0xfe, 0x94, 0x77, 0x7f, + 0xbd, 0x62, 0xac, 0xa3, 0x47, 0x27, 0xcf, 0x5f, + 0xd9, 0xdb, 0xaf, 0xec, 0xf7, 0x5e, 0xc1, 0xb0, + 0x9d, 0x01, 0x26, 0x99, 0x7e, 0x8f, 0x03, 0x70, + 0xb5, 0x42, 0xbe, 0x67, 0x28, 0x1b, 0x7c, 0xbd, + 0x61, 0x21, 0x97, 0xcc, 0x5c, 0xe1, 0x97, 0x8f, + 0x8d, 0xde, 0x2b, 0xaa, 0xa7, 0x71, 0x1d, 0x1e, + 0x02, 0x73, 0x70, 0x58, 0x32, 0x5b, 0x1d, 0x67, + 0x3d, 0xe0, 0x74, 0x4f, 0x03, 0xf2, 0x70, 0x51, + 0x79, 0xf1, 0x61, 0x70, 0x15, 0x74, 0x9d, 0x23, + 0x89, 0xde, 0xac, 0xfd, 0xde, 0xd0, 0x1f, 0xc3, + 0x87, 0x44, 0x35, 0x4b, 0xe5, 0xb0, 0x60, 0xc5, + 0x22, 0xe4, 0x9e, 0xca, 0xeb, 0xd5, 0x3a, 0x09, + 0x45, 0xa4, 0xdb, 0xfa, 0x3f, 0xeb, 0x1b, 0xc7, + 0xc8, 0x14, 0x99, 0x51, 0x92, 0x10, 0xed, 0xed, + 0x28, 0xe0, 0xa1, 0xf8, 0x26, 0xcf, 0xcd, 0xcb, + 0x63, 0xa1, 0x3b, 0xe3, 0xdf, 0x7e, 0xfe, 0xa6, + 0xf0, 0x81, 0x9a, 0xbf, 0x55, 0xde, 0x54, 0xd5, + 0x56, 0x60, 0x98, 0x10, 0x68, 0xf4, 0x38, 0x96, + 0x8e, 0x6f, 0x1d, 0x44, 0x7f, 0xd6, 0x2f, 0xfe, + 0x55, 0xfb, 0x0c, 0x7e, 0x67, 0xe2, 0x61, 0x44, + 0xed, 0xf2, 0x35, 0x30, 0x5d, 0xe9, 0xc7, 0xd6, + 0x6d, 0xe0, 0xa0, 0xed, 0xf3, 0xfc, 0xd8, 0x3e, + 0x0a, 0x7b, 0xcd, 0xaf, 0x65, 0x68, 0x18, 0xc0, + 0xec, 0x04, 0x1c, 0x74, 0x6d, 0xe2, 0x6e, 0x79, + 0xd4, 0x11, 0x2b, 0x62, 0xd5, 0x27, 0xad, 0x4f, + 0x01, 0x59, 0x73, 0xcc, 0x6a, 0x53, 0xfb, 0x2d, + 0xd5, 0x4e, 0x99, 0x21, 0x65, 0x4d, 0xf5, 0x82, + 0xf7, 0xd8, 0x42, 0xce, 0x6f, 0x3d, 0x36, 0x47, + 0xf1, 0x05, 0x16, 0xe8, 0x1b, 0x6a, 0x8f, 0x93, + 0xf2, 0x8f, 0x37, 0x40, 0x12, 0x28, 0xa3, 0xe6, + 0xb9, 0x17, 0x4a, 0x1f, 0xb1, 0xd1, 0x66, 0x69, + 0x86, 0xc4, 0xfc, 0x97, 0xae, 0x3f, 0x8f, 0x1e, + 0x2b, 0xdf, 0xcd, 0xf9, 0x3c +}; +static const u8 enc_assoc011[] __initconst = { + 0xd6, 0x31, 0xda, 0x5d, 0x42, 0x5e, 0xd7 +}; +static const u8 enc_nonce011[] __initconst = { + 0xfd, 0x87, 0xd4, 0xd8, 0x62, 0xfd, 0xec, 0xaa +}; +static const u8 enc_key011[] __initconst = { + 0x35, 0x4e, 0xb5, 0x70, 0x50, 0x42, 0x8a, 0x85, + 0xf2, 0xfb, 0xed, 0x7b, 0xd0, 0x9e, 0x97, 0xca, + 0xfa, 0x98, 0x66, 0x63, 0xee, 0x37, 0xcc, 0x52, + 0xfe, 0xd1, 0xdf, 0x95, 0x15, 0x34, 0x29, 0x38 +}; + +static const u8 enc_input012[] __initconst = { + 0x74, 0xa6, 0x3e, 0xe4, 0xb1, 0xcb, 0xaf, 0xb0, + 0x40, 0xe5, 0x0f, 0x9e, 0xf1, 0xf2, 0x89, 0xb5, + 0x42, 0x34, 0x8a, 0xa1, 0x03, 0xb7, 0xe9, 0x57, + 0x46, 0xbe, 0x20, 0xe4, 0x6e, 0xb0, 0xeb, 0xff, + 0xea, 0x07, 0x7e, 0xef, 0xe2, 0x55, 0x9f, 0xe5, + 0x78, 0x3a, 0xb7, 0x83, 0xc2, 0x18, 0x40, 0x7b, + 0xeb, 0xcd, 0x81, 0xfb, 0x90, 0x12, 0x9e, 0x46, + 0xa9, 0xd6, 0x4a, 0xba, 0xb0, 0x62, 0xdb, 0x6b, + 0x99, 0xc4, 0xdb, 0x54, 0x4b, 0xb8, 0xa5, 0x71, + 0xcb, 0xcd, 0x63, 0x32, 0x55, 0xfb, 0x31, 0xf0, + 0x38, 0xf5, 0xbe, 0x78, 0xe4, 0x45, 0xce, 0x1b, + 0x6a, 0x5b, 0x0e, 0xf4, 0x16, 0xe4, 0xb1, 0x3d, + 0xf6, 0x63, 0x7b, 0xa7, 0x0c, 0xde, 0x6f, 0x8f, + 0x74, 0xdf, 0xe0, 0x1e, 0x9d, 0xce, 0x8f, 0x24, + 0xef, 0x23, 0x35, 0x33, 0x7b, 0x83, 0x34, 0x23, + 0x58, 0x74, 0x14, 0x77, 0x1f, 0xc2, 0x4f, 0x4e, + 0xc6, 0x89, 0xf9, 0x52, 0x09, 0x37, 0x64, 0x14, + 0xc4, 0x01, 0x6b, 0x9d, 0x77, 0xe8, 0x90, 0x5d, + 0xa8, 0x4a, 0x2a, 0xef, 0x5c, 0x7f, 0xeb, 0xbb, + 0xb2, 0xc6, 0x93, 0x99, 0x66, 0xdc, 0x7f, 0xd4, + 0x9e, 0x2a, 0xca, 0x8d, 0xdb, 0xe7, 0x20, 0xcf, + 0xe4, 0x73, 0xae, 0x49, 0x7d, 0x64, 0x0f, 0x0e, + 0x28, 0x46, 0xa9, 0xa8, 0x32, 0xe4, 0x0e, 0xf6, + 0x51, 0x53, 0xb8, 0x3c, 0xb1, 0xff, 0xa3, 0x33, + 0x41, 0x75, 0xff, 0xf1, 0x6f, 0xf1, 0xfb, 0xbb, + 0x83, 0x7f, 0x06, 0x9b, 0xe7, 0x1b, 0x0a, 0xe0, + 0x5c, 0x33, 0x60, 0x5b, 0xdb, 0x5b, 0xed, 0xfe, + 0xa5, 0x16, 0x19, 0x72, 0xa3, 0x64, 0x23, 0x00, + 0x02, 0xc7, 0xf3, 0x6a, 0x81, 0x3e, 0x44, 0x1d, + 0x79, 0x15, 0x5f, 0x9a, 0xde, 0xe2, 0xfd, 0x1b, + 0x73, 0xc1, 0xbc, 0x23, 0xba, 0x31, 0xd2, 0x50, + 0xd5, 0xad, 0x7f, 0x74, 0xa7, 0xc9, 0xf8, 0x3e, + 0x2b, 0x26, 0x10, 0xf6, 0x03, 0x36, 0x74, 0xe4, + 0x0e, 0x6a, 0x72, 0xb7, 0x73, 0x0a, 0x42, 0x28, + 0xc2, 0xad, 0x5e, 0x03, 0xbe, 0xb8, 0x0b, 0xa8, + 0x5b, 0xd4, 0xb8, 0xba, 0x52, 0x89, 0xb1, 0x9b, + 0xc1, 0xc3, 0x65, 0x87, 0xed, 0xa5, 0xf4, 0x86, + 0xfd, 0x41, 0x80, 0x91, 0x27, 0x59, 0x53, 0x67, + 0x15, 0x78, 0x54, 0x8b, 0x2d, 0x3d, 0xc7, 0xff, + 0x02, 0x92, 0x07, 0x5f, 0x7a, 0x4b, 0x60, 0x59, + 0x3c, 0x6f, 0x5c, 0xd8, 0xec, 0x95, 0xd2, 0xfe, + 0xa0, 0x3b, 0xd8, 0x3f, 0xd1, 0x69, 0xa6, 0xd6, + 0x41, 0xb2, 0xf4, 0x4d, 0x12, 0xf4, 0x58, 0x3e, + 0x66, 0x64, 0x80, 0x31, 0x9b, 0xa8, 0x4c, 0x8b, + 0x07, 0xb2, 0xec, 0x66, 0x94, 0x66, 0x47, 0x50, + 0x50, 0x5f, 0x18, 0x0b, 0x0e, 0xd6, 0xc0, 0x39, + 0x21, 0x13, 0x9e, 0x33, 0xbc, 0x79, 0x36, 0x02, + 0x96, 0x70, 0xf0, 0x48, 0x67, 0x2f, 0x26, 0xe9, + 0x6d, 0x10, 0xbb, 0xd6, 0x3f, 0xd1, 0x64, 0x7a, + 0x2e, 0xbe, 0x0c, 0x61, 0xf0, 0x75, 0x42, 0x38, + 0x23, 0xb1, 0x9e, 0x9f, 0x7c, 0x67, 0x66, 0xd9, + 0x58, 0x9a, 0xf1, 0xbb, 0x41, 0x2a, 0x8d, 0x65, + 0x84, 0x94, 0xfc, 0xdc, 0x6a, 0x50, 0x64, 0xdb, + 0x56, 0x33, 0x76, 0x00, 0x10, 0xed, 0xbe, 0xd2, + 0x12, 0xf6, 0xf6, 0x1b, 0xa2, 0x16, 0xde, 0xae, + 0x31, 0x95, 0xdd, 0xb1, 0x08, 0x7e, 0x4e, 0xee, + 0xe7, 0xf9, 0xa5, 0xfb, 0x5b, 0x61, 0x43, 0x00, + 0x40, 0xf6, 0x7e, 0x02, 0x04, 0x32, 0x4e, 0x0c, + 0xe2, 0x66, 0x0d, 0xd7, 0x07, 0x98, 0x0e, 0xf8, + 0x72, 0x34, 0x6d, 0x95, 0x86, 0xd7, 0xcb, 0x31, + 0x54, 0x47, 0xd0, 0x38, 0x29, 0x9c, 0x5a, 0x68, + 0xd4, 0x87, 0x76, 0xc9, 0xe7, 0x7e, 0xe3, 0xf4, + 0x81, 0x6d, 0x18, 0xcb, 0xc9, 0x05, 0xaf, 0xa0, + 0xfb, 0x66, 0xf7, 0xf1, 0x1c, 0xc6, 0x14, 0x11, + 0x4f, 0x2b, 0x79, 0x42, 0x8b, 0xbc, 0xac, 0xe7, + 0x6c, 0xfe, 0x0f, 0x58, 0xe7, 0x7c, 0x78, 0x39, + 0x30, 0xb0, 0x66, 0x2c, 0x9b, 0x6d, 0x3a, 0xe1, + 0xcf, 0xc9, 0xa4, 0x0e, 0x6d, 0x6d, 0x8a, 0xa1, + 0x3a, 0xe7, 0x28, 0xd4, 0x78, 0x4c, 0xa6, 0xa2, + 0x2a, 0xa6, 0x03, 0x30, 0xd7, 0xa8, 0x25, 0x66, + 0x87, 0x2f, 0x69, 0x5c, 0x4e, 0xdd, 0xa5, 0x49, + 0x5d, 0x37, 0x4a, 0x59, 0xc4, 0xaf, 0x1f, 0xa2, + 0xe4, 0xf8, 0xa6, 0x12, 0x97, 0xd5, 0x79, 0xf5, + 0xe2, 0x4a, 0x2b, 0x5f, 0x61, 0xe4, 0x9e, 0xe3, + 0xee, 0xb8, 0xa7, 0x5b, 0x2f, 0xf4, 0x9e, 0x6c, + 0xfb, 0xd1, 0xc6, 0x56, 0x77, 0xba, 0x75, 0xaa, + 0x3d, 0x1a, 0xa8, 0x0b, 0xb3, 0x68, 0x24, 0x00, + 0x10, 0x7f, 0xfd, 0xd7, 0xa1, 0x8d, 0x83, 0x54, + 0x4f, 0x1f, 0xd8, 0x2a, 0xbe, 0x8a, 0x0c, 0x87, + 0xab, 0xa2, 0xde, 0xc3, 0x39, 0xbf, 0x09, 0x03, + 0xa5, 0xf3, 0x05, 0x28, 0xe1, 0xe1, 0xee, 0x39, + 0x70, 0x9c, 0xd8, 0x81, 0x12, 0x1e, 0x02, 0x40, + 0xd2, 0x6e, 0xf0, 0xeb, 0x1b, 0x3d, 0x22, 0xc6, + 0xe5, 0xe3, 0xb4, 0x5a, 0x98, 0xbb, 0xf0, 0x22, + 0x28, 0x8d, 0xe5, 0xd3, 0x16, 0x48, 0x24, 0xa5, + 0xe6, 0x66, 0x0c, 0xf9, 0x08, 0xf9, 0x7e, 0x1e, + 0xe1, 0x28, 0x26, 0x22, 0xc7, 0xc7, 0x0a, 0x32, + 0x47, 0xfa, 0xa3, 0xbe, 0x3c, 0xc4, 0xc5, 0x53, + 0x0a, 0xd5, 0x94, 0x4a, 0xd7, 0x93, 0xd8, 0x42, + 0x99, 0xb9, 0x0a, 0xdb, 0x56, 0xf7, 0xb9, 0x1c, + 0x53, 0x4f, 0xfa, 0xd3, 0x74, 0xad, 0xd9, 0x68, + 0xf1, 0x1b, 0xdf, 0x61, 0xc6, 0x5e, 0xa8, 0x48, + 0xfc, 0xd4, 0x4a, 0x4c, 0x3c, 0x32, 0xf7, 0x1c, + 0x96, 0x21, 0x9b, 0xf9, 0xa3, 0xcc, 0x5a, 0xce, + 0xd5, 0xd7, 0x08, 0x24, 0xf6, 0x1c, 0xfd, 0xdd, + 0x38, 0xc2, 0x32, 0xe9, 0xb8, 0xe7, 0xb6, 0xfa, + 0x9d, 0x45, 0x13, 0x2c, 0x83, 0xfd, 0x4a, 0x69, + 0x82, 0xcd, 0xdc, 0xb3, 0x76, 0x0c, 0x9e, 0xd8, + 0xf4, 0x1b, 0x45, 0x15, 0xb4, 0x97, 0xe7, 0x58, + 0x34, 0xe2, 0x03, 0x29, 0x5a, 0xbf, 0xb6, 0xe0, + 0x5d, 0x13, 0xd9, 0x2b, 0xb4, 0x80, 0xb2, 0x45, + 0x81, 0x6a, 0x2e, 0x6c, 0x89, 0x7d, 0xee, 0xbb, + 0x52, 0xdd, 0x1f, 0x18, 0xe7, 0x13, 0x6b, 0x33, + 0x0e, 0xea, 0x36, 0x92, 0x77, 0x7b, 0x6d, 0x9c, + 0x5a, 0x5f, 0x45, 0x7b, 0x7b, 0x35, 0x62, 0x23, + 0xd1, 0xbf, 0x0f, 0xd0, 0x08, 0x1b, 0x2b, 0x80, + 0x6b, 0x7e, 0xf1, 0x21, 0x47, 0xb0, 0x57, 0xd1, + 0x98, 0x72, 0x90, 0x34, 0x1c, 0x20, 0x04, 0xff, + 0x3d, 0x5c, 0xee, 0x0e, 0x57, 0x5f, 0x6f, 0x24, + 0x4e, 0x3c, 0xea, 0xfc, 0xa5, 0xa9, 0x83, 0xc9, + 0x61, 0xb4, 0x51, 0x24, 0xf8, 0x27, 0x5e, 0x46, + 0x8c, 0xb1, 0x53, 0x02, 0x96, 0x35, 0xba, 0xb8, + 0x4c, 0x71, 0xd3, 0x15, 0x59, 0x35, 0x22, 0x20, + 0xad, 0x03, 0x9f, 0x66, 0x44, 0x3b, 0x9c, 0x35, + 0x37, 0x1f, 0x9b, 0xbb, 0xf3, 0xdb, 0x35, 0x63, + 0x30, 0x64, 0xaa, 0xa2, 0x06, 0xa8, 0x5d, 0xbb, + 0xe1, 0x9f, 0x70, 0xec, 0x82, 0x11, 0x06, 0x36, + 0xec, 0x8b, 0x69, 0x66, 0x24, 0x44, 0xc9, 0x4a, + 0x57, 0xbb, 0x9b, 0x78, 0x13, 0xce, 0x9c, 0x0c, + 0xba, 0x92, 0x93, 0x63, 0xb8, 0xe2, 0x95, 0x0f, + 0x0f, 0x16, 0x39, 0x52, 0xfd, 0x3a, 0x6d, 0x02, + 0x4b, 0xdf, 0x13, 0xd3, 0x2a, 0x22, 0xb4, 0x03, + 0x7c, 0x54, 0x49, 0x96, 0x68, 0x54, 0x10, 0xfa, + 0xef, 0xaa, 0x6c, 0xe8, 0x22, 0xdc, 0x71, 0x16, + 0x13, 0x1a, 0xf6, 0x28, 0xe5, 0x6d, 0x77, 0x3d, + 0xcd, 0x30, 0x63, 0xb1, 0x70, 0x52, 0xa1, 0xc5, + 0x94, 0x5f, 0xcf, 0xe8, 0xb8, 0x26, 0x98, 0xf7, + 0x06, 0xa0, 0x0a, 0x70, 0xfa, 0x03, 0x80, 0xac, + 0xc1, 0xec, 0xd6, 0x4c, 0x54, 0xd7, 0xfe, 0x47, + 0xb6, 0x88, 0x4a, 0xf7, 0x71, 0x24, 0xee, 0xf3, + 0xd2, 0xc2, 0x4a, 0x7f, 0xfe, 0x61, 0xc7, 0x35, + 0xc9, 0x37, 0x67, 0xcb, 0x24, 0x35, 0xda, 0x7e, + 0xca, 0x5f, 0xf3, 0x8d, 0xd4, 0x13, 0x8e, 0xd6, + 0xcb, 0x4d, 0x53, 0x8f, 0x53, 0x1f, 0xc0, 0x74, + 0xf7, 0x53, 0xb9, 0x5e, 0x23, 0x37, 0xba, 0x6e, + 0xe3, 0x9d, 0x07, 0x55, 0x25, 0x7b, 0xe6, 0x2a, + 0x64, 0xd1, 0x32, 0xdd, 0x54, 0x1b, 0x4b, 0xc0, + 0xe1, 0xd7, 0x69, 0x58, 0xf8, 0x93, 0x29, 0xc4, + 0xdd, 0x23, 0x2f, 0xa5, 0xfc, 0x9d, 0x7e, 0xf8, + 0xd4, 0x90, 0xcd, 0x82, 0x55, 0xdc, 0x16, 0x16, + 0x9f, 0x07, 0x52, 0x9b, 0x9d, 0x25, 0xed, 0x32, + 0xc5, 0x7b, 0xdf, 0xf6, 0x83, 0x46, 0x3d, 0x65, + 0xb7, 0xef, 0x87, 0x7a, 0x12, 0x69, 0x8f, 0x06, + 0x7c, 0x51, 0x15, 0x4a, 0x08, 0xe8, 0xac, 0x9a, + 0x0c, 0x24, 0xa7, 0x27, 0xd8, 0x46, 0x2f, 0xe7, + 0x01, 0x0e, 0x1c, 0xc6, 0x91, 0xb0, 0x6e, 0x85, + 0x65, 0xf0, 0x29, 0x0d, 0x2e, 0x6b, 0x3b, 0xfb, + 0x4b, 0xdf, 0xe4, 0x80, 0x93, 0x03, 0x66, 0x46, + 0x3e, 0x8a, 0x6e, 0xf3, 0x5e, 0x4d, 0x62, 0x0e, + 0x49, 0x05, 0xaf, 0xd4, 0xf8, 0x21, 0x20, 0x61, + 0x1d, 0x39, 0x17, 0xf4, 0x61, 0x47, 0x95, 0xfb, + 0x15, 0x2e, 0xb3, 0x4f, 0xd0, 0x5d, 0xf5, 0x7d, + 0x40, 0xda, 0x90, 0x3c, 0x6b, 0xcb, 0x17, 0x00, + 0x13, 0x3b, 0x64, 0x34, 0x1b, 0xf0, 0xf2, 0xe5, + 0x3b, 0xb2, 0xc7, 0xd3, 0x5f, 0x3a, 0x44, 0xa6, + 0x9b, 0xb7, 0x78, 0x0e, 0x42, 0x5d, 0x4c, 0xc1, + 0xe9, 0xd2, 0xcb, 0xb7, 0x78, 0xd1, 0xfe, 0x9a, + 0xb5, 0x07, 0xe9, 0xe0, 0xbe, 0xe2, 0x8a, 0xa7, + 0x01, 0x83, 0x00, 0x8c, 0x5c, 0x08, 0xe6, 0x63, + 0x12, 0x92, 0xb7, 0xb7, 0xa6, 0x19, 0x7d, 0x38, + 0x13, 0x38, 0x92, 0x87, 0x24, 0xf9, 0x48, 0xb3, + 0x5e, 0x87, 0x6a, 0x40, 0x39, 0x5c, 0x3f, 0xed, + 0x8f, 0xee, 0xdb, 0x15, 0x82, 0x06, 0xda, 0x49, + 0x21, 0x2b, 0xb5, 0xbf, 0x32, 0x7c, 0x9f, 0x42, + 0x28, 0x63, 0xcf, 0xaf, 0x1e, 0xf8, 0xc6, 0xa0, + 0xd1, 0x02, 0x43, 0x57, 0x62, 0xec, 0x9b, 0x0f, + 0x01, 0x9e, 0x71, 0xd8, 0x87, 0x9d, 0x01, 0xc1, + 0x58, 0x77, 0xd9, 0xaf, 0xb1, 0x10, 0x7e, 0xdd, + 0xa6, 0x50, 0x96, 0xe5, 0xf0, 0x72, 0x00, 0x6d, + 0x4b, 0xf8, 0x2a, 0x8f, 0x19, 0xf3, 0x22, 0x88, + 0x11, 0x4a, 0x8b, 0x7c, 0xfd, 0xb7, 0xed, 0xe1, + 0xf6, 0x40, 0x39, 0xe0, 0xe9, 0xf6, 0x3d, 0x25, + 0xe6, 0x74, 0x3c, 0x58, 0x57, 0x7f, 0xe1, 0x22, + 0x96, 0x47, 0x31, 0x91, 0xba, 0x70, 0x85, 0x28, + 0x6b, 0x9f, 0x6e, 0x25, 0xac, 0x23, 0x66, 0x2f, + 0x29, 0x88, 0x28, 0xce, 0x8c, 0x5c, 0x88, 0x53, + 0xd1, 0x3b, 0xcc, 0x6a, 0x51, 0xb2, 0xe1, 0x28, + 0x3f, 0x91, 0xb4, 0x0d, 0x00, 0x3a, 0xe3, 0xf8, + 0xc3, 0x8f, 0xd7, 0x96, 0x62, 0x0e, 0x2e, 0xfc, + 0xc8, 0x6c, 0x77, 0xa6, 0x1d, 0x22, 0xc1, 0xb8, + 0xe6, 0x61, 0xd7, 0x67, 0x36, 0x13, 0x7b, 0xbb, + 0x9b, 0x59, 0x09, 0xa6, 0xdf, 0xf7, 0x6b, 0xa3, + 0x40, 0x1a, 0xf5, 0x4f, 0xb4, 0xda, 0xd3, 0xf3, + 0x81, 0x93, 0xc6, 0x18, 0xd9, 0x26, 0xee, 0xac, + 0xf0, 0xaa, 0xdf, 0xc5, 0x9c, 0xca, 0xc2, 0xa2, + 0xcc, 0x7b, 0x5c, 0x24, 0xb0, 0xbc, 0xd0, 0x6a, + 0x4d, 0x89, 0x09, 0xb8, 0x07, 0xfe, 0x87, 0xad, + 0x0a, 0xea, 0xb8, 0x42, 0xf9, 0x5e, 0xb3, 0x3e, + 0x36, 0x4c, 0xaf, 0x75, 0x9e, 0x1c, 0xeb, 0xbd, + 0xbc, 0xbb, 0x80, 0x40, 0xa7, 0x3a, 0x30, 0xbf, + 0xa8, 0x44, 0xf4, 0xeb, 0x38, 0xad, 0x29, 0xba, + 0x23, 0xed, 0x41, 0x0c, 0xea, 0xd2, 0xbb, 0x41, + 0x18, 0xd6, 0xb9, 0xba, 0x65, 0x2b, 0xa3, 0x91, + 0x6d, 0x1f, 0xa9, 0xf4, 0xd1, 0x25, 0x8d, 0x4d, + 0x38, 0xff, 0x64, 0xa0, 0xec, 0xde, 0xa6, 0xb6, + 0x79, 0xab, 0x8e, 0x33, 0x6c, 0x47, 0xde, 0xaf, + 0x94, 0xa4, 0xa5, 0x86, 0x77, 0x55, 0x09, 0x92, + 0x81, 0x31, 0x76, 0xc7, 0x34, 0x22, 0x89, 0x8e, + 0x3d, 0x26, 0x26, 0xd7, 0xfc, 0x1e, 0x16, 0x72, + 0x13, 0x33, 0x63, 0xd5, 0x22, 0xbe, 0xb8, 0x04, + 0x34, 0x84, 0x41, 0xbb, 0x80, 0xd0, 0x9f, 0x46, + 0x48, 0x07, 0xa7, 0xfc, 0x2b, 0x3a, 0x75, 0x55, + 0x8c, 0xc7, 0x6a, 0xbd, 0x7e, 0x46, 0x08, 0x84, + 0x0f, 0xd5, 0x74, 0xc0, 0x82, 0x8e, 0xaa, 0x61, + 0x05, 0x01, 0xb2, 0x47, 0x6e, 0x20, 0x6a, 0x2d, + 0x58, 0x70, 0x48, 0x32, 0xa7, 0x37, 0xd2, 0xb8, + 0x82, 0x1a, 0x51, 0xb9, 0x61, 0xdd, 0xfd, 0x9d, + 0x6b, 0x0e, 0x18, 0x97, 0xf8, 0x45, 0x5f, 0x87, + 0x10, 0xcf, 0x34, 0x72, 0x45, 0x26, 0x49, 0x70, + 0xe7, 0xa3, 0x78, 0xe0, 0x52, 0x89, 0x84, 0x94, + 0x83, 0x82, 0xc2, 0x69, 0x8f, 0xe3, 0xe1, 0x3f, + 0x60, 0x74, 0x88, 0xc4, 0xf7, 0x75, 0x2c, 0xfb, + 0xbd, 0xb6, 0xc4, 0x7e, 0x10, 0x0a, 0x6c, 0x90, + 0x04, 0x9e, 0xc3, 0x3f, 0x59, 0x7c, 0xce, 0x31, + 0x18, 0x60, 0x57, 0x73, 0x46, 0x94, 0x7d, 0x06, + 0xa0, 0x6d, 0x44, 0xec, 0xa2, 0x0a, 0x9e, 0x05, + 0x15, 0xef, 0xca, 0x5c, 0xbf, 0x00, 0xeb, 0xf7, + 0x3d, 0x32, 0xd4, 0xa5, 0xef, 0x49, 0x89, 0x5e, + 0x46, 0xb0, 0xa6, 0x63, 0x5b, 0x8a, 0x73, 0xae, + 0x6f, 0xd5, 0x9d, 0xf8, 0x4f, 0x40, 0xb5, 0xb2, + 0x6e, 0xd3, 0xb6, 0x01, 0xa9, 0x26, 0xa2, 0x21, + 0xcf, 0x33, 0x7a, 0x3a, 0xa4, 0x23, 0x13, 0xb0, + 0x69, 0x6a, 0xee, 0xce, 0xd8, 0x9d, 0x01, 0x1d, + 0x50, 0xc1, 0x30, 0x6c, 0xb1, 0xcd, 0xa0, 0xf0, + 0xf0, 0xa2, 0x64, 0x6f, 0xbb, 0xbf, 0x5e, 0xe6, + 0xab, 0x87, 0xb4, 0x0f, 0x4f, 0x15, 0xaf, 0xb5, + 0x25, 0xa1, 0xb2, 0xd0, 0x80, 0x2c, 0xfb, 0xf9, + 0xfe, 0xd2, 0x33, 0xbb, 0x76, 0xfe, 0x7c, 0xa8, + 0x66, 0xf7, 0xe7, 0x85, 0x9f, 0x1f, 0x85, 0x57, + 0x88, 0xe1, 0xe9, 0x63, 0xe4, 0xd8, 0x1c, 0xa1, + 0xfb, 0xda, 0x44, 0x05, 0x2e, 0x1d, 0x3a, 0x1c, + 0xff, 0xc8, 0x3b, 0xc0, 0xfe, 0xda, 0x22, 0x0b, + 0x43, 0xd6, 0x88, 0x39, 0x4c, 0x4a, 0xa6, 0x69, + 0x18, 0x93, 0x42, 0x4e, 0xb5, 0xcc, 0x66, 0x0d, + 0x09, 0xf8, 0x1e, 0x7c, 0xd3, 0x3c, 0x99, 0x0d, + 0x50, 0x1d, 0x62, 0xe9, 0x57, 0x06, 0xbf, 0x19, + 0x88, 0xdd, 0xad, 0x7b, 0x4f, 0xf9, 0xc7, 0x82, + 0x6d, 0x8d, 0xc8, 0xc4, 0xc5, 0x78, 0x17, 0x20, + 0x15, 0xc5, 0x52, 0x41, 0xcf, 0x5b, 0xd6, 0x7f, + 0x94, 0x02, 0x41, 0xe0, 0x40, 0x22, 0x03, 0x5e, + 0xd1, 0x53, 0xd4, 0x86, 0xd3, 0x2c, 0x9f, 0x0f, + 0x96, 0xe3, 0x6b, 0x9a, 0x76, 0x32, 0x06, 0x47, + 0x4b, 0x11, 0xb3, 0xdd, 0x03, 0x65, 0xbd, 0x9b, + 0x01, 0xda, 0x9c, 0xb9, 0x7e, 0x3f, 0x6a, 0xc4, + 0x7b, 0xea, 0xd4, 0x3c, 0xb9, 0xfb, 0x5c, 0x6b, + 0x64, 0x33, 0x52, 0xba, 0x64, 0x78, 0x8f, 0xa4, + 0xaf, 0x7a, 0x61, 0x8d, 0xbc, 0xc5, 0x73, 0xe9, + 0x6b, 0x58, 0x97, 0x4b, 0xbf, 0x63, 0x22, 0xd3, + 0x37, 0x02, 0x54, 0xc5, 0xb9, 0x16, 0x4a, 0xf0, + 0x19, 0xd8, 0x94, 0x57, 0xb8, 0x8a, 0xb3, 0x16, + 0x3b, 0xd0, 0x84, 0x8e, 0x67, 0xa6, 0xa3, 0x7d, + 0x78, 0xec, 0x00 +}; +static const u8 enc_output012[] __initconst = { + 0x52, 0x34, 0xb3, 0x65, 0x3b, 0xb7, 0xe5, 0xd3, + 0xab, 0x49, 0x17, 0x60, 0xd2, 0x52, 0x56, 0xdf, + 0xdf, 0x34, 0x56, 0x82, 0xe2, 0xbe, 0xe5, 0xe1, + 0x28, 0xd1, 0x4e, 0x5f, 0x4f, 0x01, 0x7d, 0x3f, + 0x99, 0x6b, 0x30, 0x6e, 0x1a, 0x7c, 0x4c, 0x8e, + 0x62, 0x81, 0xae, 0x86, 0x3f, 0x6b, 0xd0, 0xb5, + 0xa9, 0xcf, 0x50, 0xf1, 0x02, 0x12, 0xa0, 0x0b, + 0x24, 0xe9, 0xe6, 0x72, 0x89, 0x2c, 0x52, 0x1b, + 0x34, 0x38, 0xf8, 0x75, 0x5f, 0xa0, 0x74, 0xe2, + 0x99, 0xdd, 0xa6, 0x4b, 0x14, 0x50, 0x4e, 0xf1, + 0xbe, 0xd6, 0x9e, 0xdb, 0xb2, 0x24, 0x27, 0x74, + 0x12, 0x4a, 0x78, 0x78, 0x17, 0xa5, 0x58, 0x8e, + 0x2f, 0xf9, 0xf4, 0x8d, 0xee, 0x03, 0x88, 0xae, + 0xb8, 0x29, 0xa1, 0x2f, 0x4b, 0xee, 0x92, 0xbd, + 0x87, 0xb3, 0xce, 0x34, 0x21, 0x57, 0x46, 0x04, + 0x49, 0x0c, 0x80, 0xf2, 0x01, 0x13, 0xa1, 0x55, + 0xb3, 0xff, 0x44, 0x30, 0x3c, 0x1c, 0xd0, 0xef, + 0xbc, 0x18, 0x74, 0x26, 0xad, 0x41, 0x5b, 0x5b, + 0x3e, 0x9a, 0x7a, 0x46, 0x4f, 0x16, 0xd6, 0x74, + 0x5a, 0xb7, 0x3a, 0x28, 0x31, 0xd8, 0xae, 0x26, + 0xac, 0x50, 0x53, 0x86, 0xf2, 0x56, 0xd7, 0x3f, + 0x29, 0xbc, 0x45, 0x68, 0x8e, 0xcb, 0x98, 0x64, + 0xdd, 0xc9, 0xba, 0xb8, 0x4b, 0x7b, 0x82, 0xdd, + 0x14, 0xa7, 0xcb, 0x71, 0x72, 0x00, 0x5c, 0xad, + 0x7b, 0x6a, 0x89, 0xa4, 0x3d, 0xbf, 0xb5, 0x4b, + 0x3e, 0x7c, 0x5a, 0xcf, 0xb8, 0xa1, 0xc5, 0x6e, + 0xc8, 0xb6, 0x31, 0x57, 0x7b, 0xdf, 0xa5, 0x7e, + 0xb1, 0xd6, 0x42, 0x2a, 0x31, 0x36, 0xd1, 0xd0, + 0x3f, 0x7a, 0xe5, 0x94, 0xd6, 0x36, 0xa0, 0x6f, + 0xb7, 0x40, 0x7d, 0x37, 0xc6, 0x55, 0x7c, 0x50, + 0x40, 0x6d, 0x29, 0x89, 0xe3, 0x5a, 0xae, 0x97, + 0xe7, 0x44, 0x49, 0x6e, 0xbd, 0x81, 0x3d, 0x03, + 0x93, 0x06, 0x12, 0x06, 0xe2, 0x41, 0x12, 0x4a, + 0xf1, 0x6a, 0xa4, 0x58, 0xa2, 0xfb, 0xd2, 0x15, + 0xba, 0xc9, 0x79, 0xc9, 0xce, 0x5e, 0x13, 0xbb, + 0xf1, 0x09, 0x04, 0xcc, 0xfd, 0xe8, 0x51, 0x34, + 0x6a, 0xe8, 0x61, 0x88, 0xda, 0xed, 0x01, 0x47, + 0x84, 0xf5, 0x73, 0x25, 0xf9, 0x1c, 0x42, 0x86, + 0x07, 0xf3, 0x5b, 0x1a, 0x01, 0xb3, 0xeb, 0x24, + 0x32, 0x8d, 0xf6, 0xed, 0x7c, 0x4b, 0xeb, 0x3c, + 0x36, 0x42, 0x28, 0xdf, 0xdf, 0xb6, 0xbe, 0xd9, + 0x8c, 0x52, 0xd3, 0x2b, 0x08, 0x90, 0x8c, 0xe7, + 0x98, 0x31, 0xe2, 0x32, 0x8e, 0xfc, 0x11, 0x48, + 0x00, 0xa8, 0x6a, 0x42, 0x4a, 0x02, 0xc6, 0x4b, + 0x09, 0xf1, 0xe3, 0x49, 0xf3, 0x45, 0x1f, 0x0e, + 0xbc, 0x56, 0xe2, 0xe4, 0xdf, 0xfb, 0xeb, 0x61, + 0xfa, 0x24, 0xc1, 0x63, 0x75, 0xbb, 0x47, 0x75, + 0xaf, 0xe1, 0x53, 0x16, 0x96, 0x21, 0x85, 0x26, + 0x11, 0xb3, 0x76, 0xe3, 0x23, 0xa1, 0x6b, 0x74, + 0x37, 0xd0, 0xde, 0x06, 0x90, 0x71, 0x5d, 0x43, + 0x88, 0x9b, 0x00, 0x54, 0xa6, 0x75, 0x2f, 0xa1, + 0xc2, 0x0b, 0x73, 0x20, 0x1d, 0xb6, 0x21, 0x79, + 0x57, 0x3f, 0xfa, 0x09, 0xbe, 0x8a, 0x33, 0xc3, + 0x52, 0xf0, 0x1d, 0x82, 0x31, 0xd1, 0x55, 0xb5, + 0x6c, 0x99, 0x25, 0xcf, 0x5c, 0x32, 0xce, 0xe9, + 0x0d, 0xfa, 0x69, 0x2c, 0xd5, 0x0d, 0xc5, 0x6d, + 0x86, 0xd0, 0x0c, 0x3b, 0x06, 0x50, 0x79, 0xe8, + 0xc3, 0xae, 0x04, 0xe6, 0xcd, 0x51, 0xe4, 0x26, + 0x9b, 0x4f, 0x7e, 0xa6, 0x0f, 0xab, 0xd8, 0xe5, + 0xde, 0xa9, 0x00, 0x95, 0xbe, 0xa3, 0x9d, 0x5d, + 0xb2, 0x09, 0x70, 0x18, 0x1c, 0xf0, 0xac, 0x29, + 0x23, 0x02, 0x29, 0x28, 0xd2, 0x74, 0x35, 0x57, + 0x62, 0x0f, 0x24, 0xea, 0x5e, 0x33, 0xc2, 0x92, + 0xf3, 0x78, 0x4d, 0x30, 0x1e, 0xa1, 0x99, 0xa9, + 0x82, 0xb0, 0x42, 0x31, 0x8d, 0xad, 0x8a, 0xbc, + 0xfc, 0xd4, 0x57, 0x47, 0x3e, 0xb4, 0x50, 0xdd, + 0x6e, 0x2c, 0x80, 0x4d, 0x22, 0xf1, 0xfb, 0x57, + 0xc4, 0xdd, 0x17, 0xe1, 0x8a, 0x36, 0x4a, 0xb3, + 0x37, 0xca, 0xc9, 0x4e, 0xab, 0xd5, 0x69, 0xc4, + 0xf4, 0xbc, 0x0b, 0x3b, 0x44, 0x4b, 0x29, 0x9c, + 0xee, 0xd4, 0x35, 0x22, 0x21, 0xb0, 0x1f, 0x27, + 0x64, 0xa8, 0x51, 0x1b, 0xf0, 0x9f, 0x19, 0x5c, + 0xfb, 0x5a, 0x64, 0x74, 0x70, 0x45, 0x09, 0xf5, + 0x64, 0xfe, 0x1a, 0x2d, 0xc9, 0x14, 0x04, 0x14, + 0xcf, 0xd5, 0x7d, 0x60, 0xaf, 0x94, 0x39, 0x94, + 0xe2, 0x7d, 0x79, 0x82, 0xd0, 0x65, 0x3b, 0x6b, + 0x9c, 0x19, 0x84, 0xb4, 0x6d, 0xb3, 0x0c, 0x99, + 0xc0, 0x56, 0xa8, 0xbd, 0x73, 0xce, 0x05, 0x84, + 0x3e, 0x30, 0xaa, 0xc4, 0x9b, 0x1b, 0x04, 0x2a, + 0x9f, 0xd7, 0x43, 0x2b, 0x23, 0xdf, 0xbf, 0xaa, + 0xd5, 0xc2, 0x43, 0x2d, 0x70, 0xab, 0xdc, 0x75, + 0xad, 0xac, 0xf7, 0xc0, 0xbe, 0x67, 0xb2, 0x74, + 0xed, 0x67, 0x10, 0x4a, 0x92, 0x60, 0xc1, 0x40, + 0x50, 0x19, 0x8a, 0x8a, 0x8c, 0x09, 0x0e, 0x72, + 0xe1, 0x73, 0x5e, 0xe8, 0x41, 0x85, 0x63, 0x9f, + 0x3f, 0xd7, 0x7d, 0xc4, 0xfb, 0x22, 0x5d, 0x92, + 0x6c, 0xb3, 0x1e, 0xe2, 0x50, 0x2f, 0x82, 0xa8, + 0x28, 0xc0, 0xb5, 0xd7, 0x5f, 0x68, 0x0d, 0x2c, + 0x2d, 0xaf, 0x7e, 0xfa, 0x2e, 0x08, 0x0f, 0x1f, + 0x70, 0x9f, 0xe9, 0x19, 0x72, 0x55, 0xf8, 0xfb, + 0x51, 0xd2, 0x33, 0x5d, 0xa0, 0xd3, 0x2b, 0x0a, + 0x6c, 0xbc, 0x4e, 0xcf, 0x36, 0x4d, 0xdc, 0x3b, + 0xe9, 0x3e, 0x81, 0x7c, 0x61, 0xdb, 0x20, 0x2d, + 0x3a, 0xc3, 0xb3, 0x0c, 0x1e, 0x00, 0xb9, 0x7c, + 0xf5, 0xca, 0x10, 0x5f, 0x3a, 0x71, 0xb3, 0xe4, + 0x20, 0xdb, 0x0c, 0x2a, 0x98, 0x63, 0x45, 0x00, + 0x58, 0xf6, 0x68, 0xe4, 0x0b, 0xda, 0x13, 0x3b, + 0x60, 0x5c, 0x76, 0xdb, 0xb9, 0x97, 0x71, 0xe4, + 0xd9, 0xb7, 0xdb, 0xbd, 0x68, 0xc7, 0x84, 0x84, + 0xaa, 0x7c, 0x68, 0x62, 0x5e, 0x16, 0xfc, 0xba, + 0x72, 0xaa, 0x9a, 0xa9, 0xeb, 0x7c, 0x75, 0x47, + 0x97, 0x7e, 0xad, 0xe2, 0xd9, 0x91, 0xe8, 0xe4, + 0xa5, 0x31, 0xd7, 0x01, 0x8e, 0xa2, 0x11, 0x88, + 0x95, 0xb9, 0xf2, 0x9b, 0xd3, 0x7f, 0x1b, 0x81, + 0x22, 0xf7, 0x98, 0x60, 0x0a, 0x64, 0xa6, 0xc1, + 0xf6, 0x49, 0xc7, 0xe3, 0x07, 0x4d, 0x94, 0x7a, + 0xcf, 0x6e, 0x68, 0x0c, 0x1b, 0x3f, 0x6e, 0x2e, + 0xee, 0x92, 0xfa, 0x52, 0xb3, 0x59, 0xf8, 0xf1, + 0x8f, 0x6a, 0x66, 0xa3, 0x82, 0x76, 0x4a, 0x07, + 0x1a, 0xc7, 0xdd, 0xf5, 0xda, 0x9c, 0x3c, 0x24, + 0xbf, 0xfd, 0x42, 0xa1, 0x10, 0x64, 0x6a, 0x0f, + 0x89, 0xee, 0x36, 0xa5, 0xce, 0x99, 0x48, 0x6a, + 0xf0, 0x9f, 0x9e, 0x69, 0xa4, 0x40, 0x20, 0xe9, + 0x16, 0x15, 0xf7, 0xdb, 0x75, 0x02, 0xcb, 0xe9, + 0x73, 0x8b, 0x3b, 0x49, 0x2f, 0xf0, 0xaf, 0x51, + 0x06, 0x5c, 0xdf, 0x27, 0x27, 0x49, 0x6a, 0xd1, + 0xcc, 0xc7, 0xb5, 0x63, 0xb5, 0xfc, 0xb8, 0x5c, + 0x87, 0x7f, 0x84, 0xb4, 0xcc, 0x14, 0xa9, 0x53, + 0xda, 0xa4, 0x56, 0xf8, 0xb6, 0x1b, 0xcc, 0x40, + 0x27, 0x52, 0x06, 0x5a, 0x13, 0x81, 0xd7, 0x3a, + 0xd4, 0x3b, 0xfb, 0x49, 0x65, 0x31, 0x33, 0xb2, + 0xfa, 0xcd, 0xad, 0x58, 0x4e, 0x2b, 0xae, 0xd2, + 0x20, 0xfb, 0x1a, 0x48, 0xb4, 0x3f, 0x9a, 0xd8, + 0x7a, 0x35, 0x4a, 0xc8, 0xee, 0x88, 0x5e, 0x07, + 0x66, 0x54, 0xb9, 0xec, 0x9f, 0xa3, 0xe3, 0xb9, + 0x37, 0xaa, 0x49, 0x76, 0x31, 0xda, 0x74, 0x2d, + 0x3c, 0xa4, 0x65, 0x10, 0x32, 0x38, 0xf0, 0xde, + 0xd3, 0x99, 0x17, 0xaa, 0x71, 0xaa, 0x8f, 0x0f, + 0x8c, 0xaf, 0xa2, 0xf8, 0x5d, 0x64, 0xba, 0x1d, + 0xa3, 0xef, 0x96, 0x73, 0xe8, 0xa1, 0x02, 0x8d, + 0x0c, 0x6d, 0xb8, 0x06, 0x90, 0xb8, 0x08, 0x56, + 0x2c, 0xa7, 0x06, 0xc9, 0xc2, 0x38, 0xdb, 0x7c, + 0x63, 0xb1, 0x57, 0x8e, 0xea, 0x7c, 0x79, 0xf3, + 0x49, 0x1d, 0xfe, 0x9f, 0xf3, 0x6e, 0xb1, 0x1d, + 0xba, 0x19, 0x80, 0x1a, 0x0a, 0xd3, 0xb0, 0x26, + 0x21, 0x40, 0xb1, 0x7c, 0xf9, 0x4d, 0x8d, 0x10, + 0xc1, 0x7e, 0xf4, 0xf6, 0x3c, 0xa8, 0xfd, 0x7c, + 0xa3, 0x92, 0xb2, 0x0f, 0xaa, 0xcc, 0xa6, 0x11, + 0xfe, 0x04, 0xe3, 0xd1, 0x7a, 0x32, 0x89, 0xdf, + 0x0d, 0xc4, 0x8f, 0x79, 0x6b, 0xca, 0x16, 0x7c, + 0x6e, 0xf9, 0xad, 0x0f, 0xf6, 0xfe, 0x27, 0xdb, + 0xc4, 0x13, 0x70, 0xf1, 0x62, 0x1a, 0x4f, 0x79, + 0x40, 0xc9, 0x9b, 0x8b, 0x21, 0xea, 0x84, 0xfa, + 0xf5, 0xf1, 0x89, 0xce, 0xb7, 0x55, 0x0a, 0x80, + 0x39, 0x2f, 0x55, 0x36, 0x16, 0x9c, 0x7b, 0x08, + 0xbd, 0x87, 0x0d, 0xa5, 0x32, 0xf1, 0x52, 0x7c, + 0xe8, 0x55, 0x60, 0x5b, 0xd7, 0x69, 0xe4, 0xfc, + 0xfa, 0x12, 0x85, 0x96, 0xea, 0x50, 0x28, 0xab, + 0x8a, 0xf7, 0xbb, 0x0e, 0x53, 0x74, 0xca, 0xa6, + 0x27, 0x09, 0xc2, 0xb5, 0xde, 0x18, 0x14, 0xd9, + 0xea, 0xe5, 0x29, 0x1c, 0x40, 0x56, 0xcf, 0xd7, + 0xae, 0x05, 0x3f, 0x65, 0xaf, 0x05, 0x73, 0xe2, + 0x35, 0x96, 0x27, 0x07, 0x14, 0xc0, 0xad, 0x33, + 0xf1, 0xdc, 0x44, 0x7a, 0x89, 0x17, 0x77, 0xd2, + 0x9c, 0x58, 0x60, 0xf0, 0x3f, 0x7b, 0x2d, 0x2e, + 0x57, 0x95, 0x54, 0x87, 0xed, 0xf2, 0xc7, 0x4c, + 0xf0, 0xae, 0x56, 0x29, 0x19, 0x7d, 0x66, 0x4b, + 0x9b, 0x83, 0x84, 0x42, 0x3b, 0x01, 0x25, 0x66, + 0x8e, 0x02, 0xde, 0xb9, 0x83, 0x54, 0x19, 0xf6, + 0x9f, 0x79, 0x0d, 0x67, 0xc5, 0x1d, 0x7a, 0x44, + 0x02, 0x98, 0xa7, 0x16, 0x1c, 0x29, 0x0d, 0x74, + 0xff, 0x85, 0x40, 0x06, 0xef, 0x2c, 0xa9, 0xc6, + 0xf5, 0x53, 0x07, 0x06, 0xae, 0xe4, 0xfa, 0x5f, + 0xd8, 0x39, 0x4d, 0xf1, 0x9b, 0x6b, 0xd9, 0x24, + 0x84, 0xfe, 0x03, 0x4c, 0xb2, 0x3f, 0xdf, 0xa1, + 0x05, 0x9e, 0x50, 0x14, 0x5a, 0xd9, 0x1a, 0xa2, + 0xa7, 0xfa, 0xfa, 0x17, 0xf7, 0x78, 0xd6, 0xb5, + 0x92, 0x61, 0x91, 0xac, 0x36, 0xfa, 0x56, 0x0d, + 0x38, 0x32, 0x18, 0x85, 0x08, 0x58, 0x37, 0xf0, + 0x4b, 0xdb, 0x59, 0xe7, 0xa4, 0x34, 0xc0, 0x1b, + 0x01, 0xaf, 0x2d, 0xde, 0xa1, 0xaa, 0x5d, 0xd3, + 0xec, 0xe1, 0xd4, 0xf7, 0xe6, 0x54, 0x68, 0xf0, + 0x51, 0x97, 0xa7, 0x89, 0xea, 0x24, 0xad, 0xd3, + 0x6e, 0x47, 0x93, 0x8b, 0x4b, 0xb4, 0xf7, 0x1c, + 0x42, 0x06, 0x67, 0xe8, 0x99, 0xf6, 0xf5, 0x7b, + 0x85, 0xb5, 0x65, 0xb5, 0xb5, 0xd2, 0x37, 0xf5, + 0xf3, 0x02, 0xa6, 0x4d, 0x11, 0xa7, 0xdc, 0x51, + 0x09, 0x7f, 0xa0, 0xd8, 0x88, 0x1c, 0x13, 0x71, + 0xae, 0x9c, 0xb7, 0x7b, 0x34, 0xd6, 0x4e, 0x68, + 0x26, 0x83, 0x51, 0xaf, 0x1d, 0xee, 0x8b, 0xbb, + 0x69, 0x43, 0x2b, 0x9e, 0x8a, 0xbc, 0x02, 0x0e, + 0xa0, 0x1b, 0xe0, 0xa8, 0x5f, 0x6f, 0xaf, 0x1b, + 0x8f, 0xe7, 0x64, 0x71, 0x74, 0x11, 0x7e, 0xa8, + 0xd8, 0xf9, 0x97, 0x06, 0xc3, 0xb6, 0xfb, 0xfb, + 0xb7, 0x3d, 0x35, 0x9d, 0x3b, 0x52, 0xed, 0x54, + 0xca, 0xf4, 0x81, 0x01, 0x2d, 0x1b, 0xc3, 0xa7, + 0x00, 0x3d, 0x1a, 0x39, 0x54, 0xe1, 0xf6, 0xff, + 0xed, 0x6f, 0x0b, 0x5a, 0x68, 0xda, 0x58, 0xdd, + 0xa9, 0xcf, 0x5c, 0x4a, 0xe5, 0x09, 0x4e, 0xde, + 0x9d, 0xbc, 0x3e, 0xee, 0x5a, 0x00, 0x3b, 0x2c, + 0x87, 0x10, 0x65, 0x60, 0xdd, 0xd7, 0x56, 0xd1, + 0x4c, 0x64, 0x45, 0xe4, 0x21, 0xec, 0x78, 0xf8, + 0x25, 0x7a, 0x3e, 0x16, 0x5d, 0x09, 0x53, 0x14, + 0xbe, 0x4f, 0xae, 0x87, 0xd8, 0xd1, 0xaa, 0x3c, + 0xf6, 0x3e, 0xa4, 0x70, 0x8c, 0x5e, 0x70, 0xa4, + 0xb3, 0x6b, 0x66, 0x73, 0xd3, 0xbf, 0x31, 0x06, + 0x19, 0x62, 0x93, 0x15, 0xf2, 0x86, 0xe4, 0x52, + 0x7e, 0x53, 0x4c, 0x12, 0x38, 0xcc, 0x34, 0x7d, + 0x57, 0xf6, 0x42, 0x93, 0x8a, 0xc4, 0xee, 0x5c, + 0x8a, 0xe1, 0x52, 0x8f, 0x56, 0x64, 0xf6, 0xa6, + 0xd1, 0x91, 0x57, 0x70, 0xcd, 0x11, 0x76, 0xf5, + 0x59, 0x60, 0x60, 0x3c, 0xc1, 0xc3, 0x0b, 0x7f, + 0x58, 0x1a, 0x50, 0x91, 0xf1, 0x68, 0x8f, 0x6e, + 0x74, 0x74, 0xa8, 0x51, 0x0b, 0xf7, 0x7a, 0x98, + 0x37, 0xf2, 0x0a, 0x0e, 0xa4, 0x97, 0x04, 0xb8, + 0x9b, 0xfd, 0xa0, 0xea, 0xf7, 0x0d, 0xe1, 0xdb, + 0x03, 0xf0, 0x31, 0x29, 0xf8, 0xdd, 0x6b, 0x8b, + 0x5d, 0xd8, 0x59, 0xa9, 0x29, 0xcf, 0x9a, 0x79, + 0x89, 0x19, 0x63, 0x46, 0x09, 0x79, 0x6a, 0x11, + 0xda, 0x63, 0x68, 0x48, 0x77, 0x23, 0xfb, 0x7d, + 0x3a, 0x43, 0xcb, 0x02, 0x3b, 0x7a, 0x6d, 0x10, + 0x2a, 0x9e, 0xac, 0xf1, 0xd4, 0x19, 0xf8, 0x23, + 0x64, 0x1d, 0x2c, 0x5f, 0xf2, 0xb0, 0x5c, 0x23, + 0x27, 0xf7, 0x27, 0x30, 0x16, 0x37, 0xb1, 0x90, + 0xab, 0x38, 0xfb, 0x55, 0xcd, 0x78, 0x58, 0xd4, + 0x7d, 0x43, 0xf6, 0x45, 0x5e, 0x55, 0x8d, 0xb1, + 0x02, 0x65, 0x58, 0xb4, 0x13, 0x4b, 0x36, 0xf7, + 0xcc, 0xfe, 0x3d, 0x0b, 0x82, 0xe2, 0x12, 0x11, + 0xbb, 0xe6, 0xb8, 0x3a, 0x48, 0x71, 0xc7, 0x50, + 0x06, 0x16, 0x3a, 0xe6, 0x7c, 0x05, 0xc7, 0xc8, + 0x4d, 0x2f, 0x08, 0x6a, 0x17, 0x9a, 0x95, 0x97, + 0x50, 0x68, 0xdc, 0x28, 0x18, 0xc4, 0x61, 0x38, + 0xb9, 0xe0, 0x3e, 0x78, 0xdb, 0x29, 0xe0, 0x9f, + 0x52, 0xdd, 0xf8, 0x4f, 0x91, 0xc1, 0xd0, 0x33, + 0xa1, 0x7a, 0x8e, 0x30, 0x13, 0x82, 0x07, 0x9f, + 0xd3, 0x31, 0x0f, 0x23, 0xbe, 0x32, 0x5a, 0x75, + 0xcf, 0x96, 0xb2, 0xec, 0xb5, 0x32, 0xac, 0x21, + 0xd1, 0x82, 0x33, 0xd3, 0x15, 0x74, 0xbd, 0x90, + 0xf1, 0x2c, 0xe6, 0x5f, 0x8d, 0xe3, 0x02, 0xe8, + 0xe9, 0xc4, 0xca, 0x96, 0xeb, 0x0e, 0xbc, 0x91, + 0xf4, 0xb9, 0xea, 0xd9, 0x1b, 0x75, 0xbd, 0xe1, + 0xac, 0x2a, 0x05, 0x37, 0x52, 0x9b, 0x1b, 0x3f, + 0x5a, 0xdc, 0x21, 0xc3, 0x98, 0xbb, 0xaf, 0xa3, + 0xf2, 0x00, 0xbf, 0x0d, 0x30, 0x89, 0x05, 0xcc, + 0xa5, 0x76, 0xf5, 0x06, 0xf0, 0xc6, 0x54, 0x8a, + 0x5d, 0xd4, 0x1e, 0xc1, 0xf2, 0xce, 0xb0, 0x62, + 0xc8, 0xfc, 0x59, 0x42, 0x9a, 0x90, 0x60, 0x55, + 0xfe, 0x88, 0xa5, 0x8b, 0xb8, 0x33, 0x0c, 0x23, + 0x24, 0x0d, 0x15, 0x70, 0x37, 0x1e, 0x3d, 0xf6, + 0xd2, 0xea, 0x92, 0x10, 0xb2, 0xc4, 0x51, 0xac, + 0xf2, 0xac, 0xf3, 0x6b, 0x6c, 0xaa, 0xcf, 0x12, + 0xc5, 0x6c, 0x90, 0x50, 0xb5, 0x0c, 0xfc, 0x1a, + 0x15, 0x52, 0xe9, 0x26, 0xc6, 0x52, 0xa4, 0xe7, + 0x81, 0x69, 0xe1, 0xe7, 0x9e, 0x30, 0x01, 0xec, + 0x84, 0x89, 0xb2, 0x0d, 0x66, 0xdd, 0xce, 0x28, + 0x5c, 0xec, 0x98, 0x46, 0x68, 0x21, 0x9f, 0x88, + 0x3f, 0x1f, 0x42, 0x77, 0xce, 0xd0, 0x61, 0xd4, + 0x20, 0xa7, 0xff, 0x53, 0xad, 0x37, 0xd0, 0x17, + 0x35, 0xc9, 0xfc, 0xba, 0x0a, 0x78, 0x3f, 0xf2, + 0xcc, 0x86, 0x89, 0xe8, 0x4b, 0x3c, 0x48, 0x33, + 0x09, 0x7f, 0xc6, 0xc0, 0xdd, 0xb8, 0xfd, 0x7a, + 0x66, 0x66, 0x65, 0xeb, 0x47, 0xa7, 0x04, 0x28, + 0xa3, 0x19, 0x8e, 0xa9, 0xb1, 0x13, 0x67, 0x62, + 0x70, 0xcf, 0xd6 +}; +static const u8 enc_assoc012[] __initconst = { + 0xb1, 0x69, 0x83, 0x87, 0x30, 0xaa, 0x5d, 0xb8, + 0x77, 0xe8, 0x21, 0xff, 0x06, 0x59, 0x35, 0xce, + 0x75, 0xfe, 0x38, 0xef, 0xb8, 0x91, 0x43, 0x8c, + 0xcf, 0x70, 0xdd, 0x0a, 0x68, 0xbf, 0xd4, 0xbc, + 0x16, 0x76, 0x99, 0x36, 0x1e, 0x58, 0x79, 0x5e, + 0xd4, 0x29, 0xf7, 0x33, 0x93, 0x48, 0xdb, 0x5f, + 0x01, 0xae, 0x9c, 0xb6, 0xe4, 0x88, 0x6d, 0x2b, + 0x76, 0x75, 0xe0, 0xf3, 0x74, 0xe2, 0xc9 +}; +static const u8 enc_nonce012[] __initconst = { + 0x05, 0xa3, 0x93, 0xed, 0x30, 0xc5, 0xa2, 0x06 +}; +static const u8 enc_key012[] __initconst = { + 0xb3, 0x35, 0x50, 0x03, 0x54, 0x2e, 0x40, 0x5e, + 0x8f, 0x59, 0x8e, 0xc5, 0x90, 0xd5, 0x27, 0x2d, + 0xba, 0x29, 0x2e, 0xcb, 0x1b, 0x70, 0x44, 0x1e, + 0x65, 0x91, 0x6e, 0x2a, 0x79, 0x22, 0xda, 0x64 +}; + +/* wycheproof - rfc7539 */ +static const u8 enc_input013[] __initconst = { + 0x4c, 0x61, 0x64, 0x69, 0x65, 0x73, 0x20, 0x61, + 0x6e, 0x64, 0x20, 0x47, 0x65, 0x6e, 0x74, 0x6c, + 0x65, 0x6d, 0x65, 0x6e, 0x20, 0x6f, 0x66, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x20, 0x6f, 0x66, 0x20, 0x27, 0x39, 0x39, + 0x3a, 0x20, 0x49, 0x66, 0x20, 0x49, 0x20, 0x63, + 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x6f, 0x66, 0x66, + 0x65, 0x72, 0x20, 0x79, 0x6f, 0x75, 0x20, 0x6f, + 0x6e, 0x6c, 0x79, 0x20, 0x6f, 0x6e, 0x65, 0x20, + 0x74, 0x69, 0x70, 0x20, 0x66, 0x6f, 0x72, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x66, 0x75, 0x74, 0x75, + 0x72, 0x65, 0x2c, 0x20, 0x73, 0x75, 0x6e, 0x73, + 0x63, 0x72, 0x65, 0x65, 0x6e, 0x20, 0x77, 0x6f, + 0x75, 0x6c, 0x64, 0x20, 0x62, 0x65, 0x20, 0x69, + 0x74, 0x2e +}; +static const u8 enc_output013[] __initconst = { + 0xd3, 0x1a, 0x8d, 0x34, 0x64, 0x8e, 0x60, 0xdb, + 0x7b, 0x86, 0xaf, 0xbc, 0x53, 0xef, 0x7e, 0xc2, + 0xa4, 0xad, 0xed, 0x51, 0x29, 0x6e, 0x08, 0xfe, + 0xa9, 0xe2, 0xb5, 0xa7, 0x36, 0xee, 0x62, 0xd6, + 0x3d, 0xbe, 0xa4, 0x5e, 0x8c, 0xa9, 0x67, 0x12, + 0x82, 0xfa, 0xfb, 0x69, 0xda, 0x92, 0x72, 0x8b, + 0x1a, 0x71, 0xde, 0x0a, 0x9e, 0x06, 0x0b, 0x29, + 0x05, 0xd6, 0xa5, 0xb6, 0x7e, 0xcd, 0x3b, 0x36, + 0x92, 0xdd, 0xbd, 0x7f, 0x2d, 0x77, 0x8b, 0x8c, + 0x98, 0x03, 0xae, 0xe3, 0x28, 0x09, 0x1b, 0x58, + 0xfa, 0xb3, 0x24, 0xe4, 0xfa, 0xd6, 0x75, 0x94, + 0x55, 0x85, 0x80, 0x8b, 0x48, 0x31, 0xd7, 0xbc, + 0x3f, 0xf4, 0xde, 0xf0, 0x8e, 0x4b, 0x7a, 0x9d, + 0xe5, 0x76, 0xd2, 0x65, 0x86, 0xce, 0xc6, 0x4b, + 0x61, 0x16, 0x1a, 0xe1, 0x0b, 0x59, 0x4f, 0x09, + 0xe2, 0x6a, 0x7e, 0x90, 0x2e, 0xcb, 0xd0, 0x60, + 0x06, 0x91 +}; +static const u8 enc_assoc013[] __initconst = { + 0x50, 0x51, 0x52, 0x53, 0xc0, 0xc1, 0xc2, 0xc3, + 0xc4, 0xc5, 0xc6, 0xc7 +}; +static const u8 enc_nonce013[] __initconst = { + 0x07, 0x00, 0x00, 0x00, 0x40, 0x41, 0x42, 0x43, + 0x44, 0x45, 0x46, 0x47 +}; +static const u8 enc_key013[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input014[] __initconst = { }; +static const u8 enc_output014[] __initconst = { + 0x76, 0xac, 0xb3, 0x42, 0xcf, 0x31, 0x66, 0xa5, + 0xb6, 0x3c, 0x0c, 0x0e, 0xa1, 0x38, 0x3c, 0x8d +}; +static const u8 enc_assoc014[] __initconst = { }; +static const u8 enc_nonce014[] __initconst = { + 0x4d, 0xa5, 0xbf, 0x8d, 0xfd, 0x58, 0x52, 0xc1, + 0xea, 0x12, 0x37, 0x9d +}; +static const u8 enc_key014[] __initconst = { + 0x80, 0xba, 0x31, 0x92, 0xc8, 0x03, 0xce, 0x96, + 0x5e, 0xa3, 0x71, 0xd5, 0xff, 0x07, 0x3c, 0xf0, + 0xf4, 0x3b, 0x6a, 0x2a, 0xb5, 0x76, 0xb2, 0x08, + 0x42, 0x6e, 0x11, 0x40, 0x9c, 0x09, 0xb9, 0xb0 +}; + +/* wycheproof - misc */ +static const u8 enc_input015[] __initconst = { }; +static const u8 enc_output015[] __initconst = { + 0x90, 0x6f, 0xa6, 0x28, 0x4b, 0x52, 0xf8, 0x7b, + 0x73, 0x59, 0xcb, 0xaa, 0x75, 0x63, 0xc7, 0x09 +}; +static const u8 enc_assoc015[] __initconst = { + 0xbd, 0x50, 0x67, 0x64, 0xf2, 0xd2, 0xc4, 0x10 +}; +static const u8 enc_nonce015[] __initconst = { + 0xa9, 0x2e, 0xf0, 0xac, 0x99, 0x1d, 0xd5, 0x16, + 0xa3, 0xc6, 0xf6, 0x89 +}; +static const u8 enc_key015[] __initconst = { + 0x7a, 0x4c, 0xd7, 0x59, 0x17, 0x2e, 0x02, 0xeb, + 0x20, 0x4d, 0xb2, 0xc3, 0xf5, 0xc7, 0x46, 0x22, + 0x7d, 0xf5, 0x84, 0xfc, 0x13, 0x45, 0x19, 0x63, + 0x91, 0xdb, 0xb9, 0x57, 0x7a, 0x25, 0x07, 0x42 +}; + +/* wycheproof - misc */ +static const u8 enc_input016[] __initconst = { + 0x2a +}; +static const u8 enc_output016[] __initconst = { + 0x3a, 0xca, 0xc2, 0x7d, 0xec, 0x09, 0x68, 0x80, + 0x1e, 0x9f, 0x6e, 0xde, 0xd6, 0x9d, 0x80, 0x75, + 0x22 +}; +static const u8 enc_assoc016[] __initconst = { }; +static const u8 enc_nonce016[] __initconst = { + 0x99, 0xe2, 0x3e, 0xc4, 0x89, 0x85, 0xbc, 0xcd, + 0xee, 0xab, 0x60, 0xf1 +}; +static const u8 enc_key016[] __initconst = { + 0xcc, 0x56, 0xb6, 0x80, 0x55, 0x2e, 0xb7, 0x50, + 0x08, 0xf5, 0x48, 0x4b, 0x4c, 0xb8, 0x03, 0xfa, + 0x50, 0x63, 0xeb, 0xd6, 0xea, 0xb9, 0x1f, 0x6a, + 0xb6, 0xae, 0xf4, 0x91, 0x6a, 0x76, 0x62, 0x73 +}; + +/* wycheproof - misc */ +static const u8 enc_input017[] __initconst = { + 0x51 +}; +static const u8 enc_output017[] __initconst = { + 0xc4, 0x16, 0x83, 0x10, 0xca, 0x45, 0xb1, 0xf7, + 0xc6, 0x6c, 0xad, 0x4e, 0x99, 0xe4, 0x3f, 0x72, + 0xb9 +}; +static const u8 enc_assoc017[] __initconst = { + 0x91, 0xca, 0x6c, 0x59, 0x2c, 0xbc, 0xca, 0x53 +}; +static const u8 enc_nonce017[] __initconst = { + 0xab, 0x0d, 0xca, 0x71, 0x6e, 0xe0, 0x51, 0xd2, + 0x78, 0x2f, 0x44, 0x03 +}; +static const u8 enc_key017[] __initconst = { + 0x46, 0xf0, 0x25, 0x49, 0x65, 0xf7, 0x69, 0xd5, + 0x2b, 0xdb, 0x4a, 0x70, 0xb4, 0x43, 0x19, 0x9f, + 0x8e, 0xf2, 0x07, 0x52, 0x0d, 0x12, 0x20, 0xc5, + 0x5e, 0x4b, 0x70, 0xf0, 0xfd, 0xa6, 0x20, 0xee +}; + +/* wycheproof - misc */ +static const u8 enc_input018[] __initconst = { + 0x5c, 0x60 +}; +static const u8 enc_output018[] __initconst = { + 0x4d, 0x13, 0x91, 0xe8, 0xb6, 0x1e, 0xfb, 0x39, + 0xc1, 0x22, 0x19, 0x54, 0x53, 0x07, 0x7b, 0x22, + 0xe5, 0xe2 +}; +static const u8 enc_assoc018[] __initconst = { }; +static const u8 enc_nonce018[] __initconst = { + 0x46, 0x1a, 0xf1, 0x22, 0xe9, 0xf2, 0xe0, 0x34, + 0x7e, 0x03, 0xf2, 0xdb +}; +static const u8 enc_key018[] __initconst = { + 0x2f, 0x7f, 0x7e, 0x4f, 0x59, 0x2b, 0xb3, 0x89, + 0x19, 0x49, 0x89, 0x74, 0x35, 0x07, 0xbf, 0x3e, + 0xe9, 0xcb, 0xde, 0x17, 0x86, 0xb6, 0x69, 0x5f, + 0xe6, 0xc0, 0x25, 0xfd, 0x9b, 0xa4, 0xc1, 0x00 +}; + +/* wycheproof - misc */ +static const u8 enc_input019[] __initconst = { + 0xdd, 0xf2 +}; +static const u8 enc_output019[] __initconst = { + 0xb6, 0x0d, 0xea, 0xd0, 0xfd, 0x46, 0x97, 0xec, + 0x2e, 0x55, 0x58, 0x23, 0x77, 0x19, 0xd0, 0x24, + 0x37, 0xa2 +}; +static const u8 enc_assoc019[] __initconst = { + 0x88, 0x36, 0x4f, 0xc8, 0x06, 0x05, 0x18, 0xbf +}; +static const u8 enc_nonce019[] __initconst = { + 0x61, 0x54, 0x6b, 0xa5, 0xf1, 0x72, 0x05, 0x90, + 0xb6, 0x04, 0x0a, 0xc6 +}; +static const u8 enc_key019[] __initconst = { + 0xc8, 0x83, 0x3d, 0xce, 0x5e, 0xa9, 0xf2, 0x48, + 0xaa, 0x20, 0x30, 0xea, 0xcf, 0xe7, 0x2b, 0xff, + 0xe6, 0x9a, 0x62, 0x0c, 0xaf, 0x79, 0x33, 0x44, + 0xe5, 0x71, 0x8f, 0xe0, 0xd7, 0xab, 0x1a, 0x58 +}; + +/* wycheproof - misc */ +static const u8 enc_input020[] __initconst = { + 0xab, 0x85, 0xe9, 0xc1, 0x57, 0x17, 0x31 +}; +static const u8 enc_output020[] __initconst = { + 0x5d, 0xfe, 0x34, 0x40, 0xdb, 0xb3, 0xc3, 0xed, + 0x7a, 0x43, 0x4e, 0x26, 0x02, 0xd3, 0x94, 0x28, + 0x1e, 0x0a, 0xfa, 0x9f, 0xb7, 0xaa, 0x42 +}; +static const u8 enc_assoc020[] __initconst = { }; +static const u8 enc_nonce020[] __initconst = { + 0x3c, 0x4e, 0x65, 0x4d, 0x66, 0x3f, 0xa4, 0x59, + 0x6d, 0xc5, 0x5b, 0xb7 +}; +static const u8 enc_key020[] __initconst = { + 0x55, 0x56, 0x81, 0x58, 0xd3, 0xa6, 0x48, 0x3f, + 0x1f, 0x70, 0x21, 0xea, 0xb6, 0x9b, 0x70, 0x3f, + 0x61, 0x42, 0x51, 0xca, 0xdc, 0x1a, 0xf5, 0xd3, + 0x4a, 0x37, 0x4f, 0xdb, 0xfc, 0x5a, 0xda, 0xc7 +}; + +/* wycheproof - misc */ +static const u8 enc_input021[] __initconst = { + 0x4e, 0xe5, 0xcd, 0xa2, 0x0d, 0x42, 0x90 +}; +static const u8 enc_output021[] __initconst = { + 0x4b, 0xd4, 0x72, 0x12, 0x94, 0x1c, 0xe3, 0x18, + 0x5f, 0x14, 0x08, 0xee, 0x7f, 0xbf, 0x18, 0xf5, + 0xab, 0xad, 0x6e, 0x22, 0x53, 0xa1, 0xba +}; +static const u8 enc_assoc021[] __initconst = { + 0x84, 0xe4, 0x6b, 0xe8, 0xc0, 0x91, 0x90, 0x53 +}; +static const u8 enc_nonce021[] __initconst = { + 0x58, 0x38, 0x93, 0x75, 0xc6, 0x9e, 0xe3, 0x98, + 0xde, 0x94, 0x83, 0x96 +}; +static const u8 enc_key021[] __initconst = { + 0xe3, 0xc0, 0x9e, 0x7f, 0xab, 0x1a, 0xef, 0xb5, + 0x16, 0xda, 0x6a, 0x33, 0x02, 0x2a, 0x1d, 0xd4, + 0xeb, 0x27, 0x2c, 0x80, 0xd5, 0x40, 0xc5, 0xda, + 0x52, 0xa7, 0x30, 0xf3, 0x4d, 0x84, 0x0d, 0x7f +}; + +/* wycheproof - misc */ +static const u8 enc_input022[] __initconst = { + 0xbe, 0x33, 0x08, 0xf7, 0x2a, 0x2c, 0x6a, 0xed +}; +static const u8 enc_output022[] __initconst = { + 0x8e, 0x94, 0x39, 0xa5, 0x6e, 0xee, 0xc8, 0x17, + 0xfb, 0xe8, 0xa6, 0xed, 0x8f, 0xab, 0xb1, 0x93, + 0x75, 0x39, 0xdd, 0x6c, 0x00, 0xe9, 0x00, 0x21 +}; +static const u8 enc_assoc022[] __initconst = { }; +static const u8 enc_nonce022[] __initconst = { + 0x4f, 0x07, 0xaf, 0xed, 0xfd, 0xc3, 0xb6, 0xc2, + 0x36, 0x18, 0x23, 0xd3 +}; +static const u8 enc_key022[] __initconst = { + 0x51, 0xe4, 0xbf, 0x2b, 0xad, 0x92, 0xb7, 0xaf, + 0xf1, 0xa4, 0xbc, 0x05, 0x55, 0x0b, 0xa8, 0x1d, + 0xf4, 0xb9, 0x6f, 0xab, 0xf4, 0x1c, 0x12, 0xc7, + 0xb0, 0x0e, 0x60, 0xe4, 0x8d, 0xb7, 0xe1, 0x52 +}; + +/* wycheproof - misc */ +static const u8 enc_input023[] __initconst = { + 0xa4, 0xc9, 0xc2, 0x80, 0x1b, 0x71, 0xf7, 0xdf +}; +static const u8 enc_output023[] __initconst = { + 0xb9, 0xb9, 0x10, 0x43, 0x3a, 0xf0, 0x52, 0xb0, + 0x45, 0x30, 0xf5, 0x1a, 0xee, 0xe0, 0x24, 0xe0, + 0xa4, 0x45, 0xa6, 0x32, 0x8f, 0xa6, 0x7a, 0x18 +}; +static const u8 enc_assoc023[] __initconst = { + 0x66, 0xc0, 0xae, 0x70, 0x07, 0x6c, 0xb1, 0x4d +}; +static const u8 enc_nonce023[] __initconst = { + 0xb4, 0xea, 0x66, 0x6e, 0xe1, 0x19, 0x56, 0x33, + 0x66, 0x48, 0x4a, 0x78 +}; +static const u8 enc_key023[] __initconst = { + 0x11, 0x31, 0xc1, 0x41, 0x85, 0x77, 0xa0, 0x54, + 0xde, 0x7a, 0x4a, 0xc5, 0x51, 0x95, 0x0f, 0x1a, + 0x05, 0x3f, 0x9a, 0xe4, 0x6e, 0x5b, 0x75, 0xfe, + 0x4a, 0xbd, 0x56, 0x08, 0xd7, 0xcd, 0xda, 0xdd +}; + +/* wycheproof - misc */ +static const u8 enc_input024[] __initconst = { + 0x42, 0xba, 0xae, 0x59, 0x78, 0xfe, 0xaf, 0x5c, + 0x36, 0x8d, 0x14, 0xe0 +}; +static const u8 enc_output024[] __initconst = { + 0xff, 0x7d, 0xc2, 0x03, 0xb2, 0x6c, 0x46, 0x7a, + 0x6b, 0x50, 0xdb, 0x33, 0x57, 0x8c, 0x0f, 0x27, + 0x58, 0xc2, 0xe1, 0x4e, 0x36, 0xd4, 0xfc, 0x10, + 0x6d, 0xcb, 0x29, 0xb4 +}; +static const u8 enc_assoc024[] __initconst = { }; +static const u8 enc_nonce024[] __initconst = { + 0x9a, 0x59, 0xfc, 0xe2, 0x6d, 0xf0, 0x00, 0x5e, + 0x07, 0x53, 0x86, 0x56 +}; +static const u8 enc_key024[] __initconst = { + 0x99, 0xb6, 0x2b, 0xd5, 0xaf, 0xbe, 0x3f, 0xb0, + 0x15, 0xbd, 0xe9, 0x3f, 0x0a, 0xbf, 0x48, 0x39, + 0x57, 0xa1, 0xc3, 0xeb, 0x3c, 0xa5, 0x9c, 0xb5, + 0x0b, 0x39, 0xf7, 0xf8, 0xa9, 0xcc, 0x51, 0xbe +}; + +/* wycheproof - misc */ +static const u8 enc_input025[] __initconst = { + 0xfd, 0xc8, 0x5b, 0x94, 0xa4, 0xb2, 0xa6, 0xb7, + 0x59, 0xb1, 0xa0, 0xda +}; +static const u8 enc_output025[] __initconst = { + 0x9f, 0x88, 0x16, 0xde, 0x09, 0x94, 0xe9, 0x38, + 0xd9, 0xe5, 0x3f, 0x95, 0xd0, 0x86, 0xfc, 0x6c, + 0x9d, 0x8f, 0xa9, 0x15, 0xfd, 0x84, 0x23, 0xa7, + 0xcf, 0x05, 0x07, 0x2f +}; +static const u8 enc_assoc025[] __initconst = { + 0xa5, 0x06, 0xe1, 0xa5, 0xc6, 0x90, 0x93, 0xf9 +}; +static const u8 enc_nonce025[] __initconst = { + 0x58, 0xdb, 0xd4, 0xad, 0x2c, 0x4a, 0xd3, 0x5d, + 0xd9, 0x06, 0xe9, 0xce +}; +static const u8 enc_key025[] __initconst = { + 0x85, 0xf3, 0x5b, 0x62, 0x82, 0xcf, 0xf4, 0x40, + 0xbc, 0x10, 0x20, 0xc8, 0x13, 0x6f, 0xf2, 0x70, + 0x31, 0x11, 0x0f, 0xa6, 0x3e, 0xc1, 0x6f, 0x1e, + 0x82, 0x51, 0x18, 0xb0, 0x06, 0xb9, 0x12, 0x57 +}; + +/* wycheproof - misc */ +static const u8 enc_input026[] __initconst = { + 0x51, 0xf8, 0xc1, 0xf7, 0x31, 0xea, 0x14, 0xac, + 0xdb, 0x21, 0x0a, 0x6d, 0x97, 0x3e, 0x07 +}; +static const u8 enc_output026[] __initconst = { + 0x0b, 0x29, 0x63, 0x8e, 0x1f, 0xbd, 0xd6, 0xdf, + 0x53, 0x97, 0x0b, 0xe2, 0x21, 0x00, 0x42, 0x2a, + 0x91, 0x34, 0x08, 0x7d, 0x67, 0xa4, 0x6e, 0x79, + 0x17, 0x8d, 0x0a, 0x93, 0xf5, 0xe1, 0xd2 +}; +static const u8 enc_assoc026[] __initconst = { }; +static const u8 enc_nonce026[] __initconst = { + 0x68, 0xab, 0x7f, 0xdb, 0xf6, 0x19, 0x01, 0xda, + 0xd4, 0x61, 0xd2, 0x3c +}; +static const u8 enc_key026[] __initconst = { + 0x67, 0x11, 0x96, 0x27, 0xbd, 0x98, 0x8e, 0xda, + 0x90, 0x62, 0x19, 0xe0, 0x8c, 0x0d, 0x0d, 0x77, + 0x9a, 0x07, 0xd2, 0x08, 0xce, 0x8a, 0x4f, 0xe0, + 0x70, 0x9a, 0xf7, 0x55, 0xee, 0xec, 0x6d, 0xcb +}; + +/* wycheproof - misc */ +static const u8 enc_input027[] __initconst = { + 0x97, 0x46, 0x9d, 0xa6, 0x67, 0xd6, 0x11, 0x0f, + 0x9c, 0xbd, 0xa1, 0xd1, 0xa2, 0x06, 0x73 +}; +static const u8 enc_output027[] __initconst = { + 0x32, 0xdb, 0x66, 0xc4, 0xa3, 0x81, 0x9d, 0x81, + 0x55, 0x74, 0x55, 0xe5, 0x98, 0x0f, 0xed, 0xfe, + 0xae, 0x30, 0xde, 0xc9, 0x4e, 0x6a, 0xd3, 0xa9, + 0xee, 0xa0, 0x6a, 0x0d, 0x70, 0x39, 0x17 +}; +static const u8 enc_assoc027[] __initconst = { + 0x64, 0x53, 0xa5, 0x33, 0x84, 0x63, 0x22, 0x12 +}; +static const u8 enc_nonce027[] __initconst = { + 0xd9, 0x5b, 0x32, 0x43, 0xaf, 0xae, 0xf7, 0x14, + 0xc5, 0x03, 0x5b, 0x6a +}; +static const u8 enc_key027[] __initconst = { + 0xe6, 0xf1, 0x11, 0x8d, 0x41, 0xe4, 0xb4, 0x3f, + 0xb5, 0x82, 0x21, 0xb7, 0xed, 0x79, 0x67, 0x38, + 0x34, 0xe0, 0xd8, 0xac, 0x5c, 0x4f, 0xa6, 0x0b, + 0xbc, 0x8b, 0xc4, 0x89, 0x3a, 0x58, 0x89, 0x4d +}; + +/* wycheproof - misc */ +static const u8 enc_input028[] __initconst = { + 0x54, 0x9b, 0x36, 0x5a, 0xf9, 0x13, 0xf3, 0xb0, + 0x81, 0x13, 0x1c, 0xcb, 0x6b, 0x82, 0x55, 0x88 +}; +static const u8 enc_output028[] __initconst = { + 0xe9, 0x11, 0x0e, 0x9f, 0x56, 0xab, 0x3c, 0xa4, + 0x83, 0x50, 0x0c, 0xea, 0xba, 0xb6, 0x7a, 0x13, + 0x83, 0x6c, 0xca, 0xbf, 0x15, 0xa6, 0xa2, 0x2a, + 0x51, 0xc1, 0x07, 0x1c, 0xfa, 0x68, 0xfa, 0x0c +}; +static const u8 enc_assoc028[] __initconst = { }; +static const u8 enc_nonce028[] __initconst = { + 0x2f, 0xcb, 0x1b, 0x38, 0xa9, 0x9e, 0x71, 0xb8, + 0x47, 0x40, 0xad, 0x9b +}; +static const u8 enc_key028[] __initconst = { + 0x59, 0xd4, 0xea, 0xfb, 0x4d, 0xe0, 0xcf, 0xc7, + 0xd3, 0xdb, 0x99, 0xa8, 0xf5, 0x4b, 0x15, 0xd7, + 0xb3, 0x9f, 0x0a, 0xcc, 0x8d, 0xa6, 0x97, 0x63, + 0xb0, 0x19, 0xc1, 0x69, 0x9f, 0x87, 0x67, 0x4a +}; + +/* wycheproof - misc */ +static const u8 enc_input029[] __initconst = { + 0x55, 0xa4, 0x65, 0x64, 0x4f, 0x5b, 0x65, 0x09, + 0x28, 0xcb, 0xee, 0x7c, 0x06, 0x32, 0x14, 0xd6 +}; +static const u8 enc_output029[] __initconst = { + 0xe4, 0xb1, 0x13, 0xcb, 0x77, 0x59, 0x45, 0xf3, + 0xd3, 0xa8, 0xae, 0x9e, 0xc1, 0x41, 0xc0, 0x0c, + 0x7c, 0x43, 0xf1, 0x6c, 0xe0, 0x96, 0xd0, 0xdc, + 0x27, 0xc9, 0x58, 0x49, 0xdc, 0x38, 0x3b, 0x7d +}; +static const u8 enc_assoc029[] __initconst = { + 0x03, 0x45, 0x85, 0x62, 0x1a, 0xf8, 0xd7, 0xff +}; +static const u8 enc_nonce029[] __initconst = { + 0x11, 0x8a, 0x69, 0x64, 0xc2, 0xd3, 0xe3, 0x80, + 0x07, 0x1f, 0x52, 0x66 +}; +static const u8 enc_key029[] __initconst = { + 0xb9, 0x07, 0xa4, 0x50, 0x75, 0x51, 0x3f, 0xe8, + 0xa8, 0x01, 0x9e, 0xde, 0xe3, 0xf2, 0x59, 0x14, + 0x87, 0xb2, 0xa0, 0x30, 0xb0, 0x3c, 0x6e, 0x1d, + 0x77, 0x1c, 0x86, 0x25, 0x71, 0xd2, 0xea, 0x1e +}; + +/* wycheproof - misc */ +static const u8 enc_input030[] __initconst = { + 0x3f, 0xf1, 0x51, 0x4b, 0x1c, 0x50, 0x39, 0x15, + 0x91, 0x8f, 0x0c, 0x0c, 0x31, 0x09, 0x4a, 0x6e, + 0x1f +}; +static const u8 enc_output030[] __initconst = { + 0x02, 0xcc, 0x3a, 0xcb, 0x5e, 0xe1, 0xfc, 0xdd, + 0x12, 0xa0, 0x3b, 0xb8, 0x57, 0x97, 0x64, 0x74, + 0xd3, 0xd8, 0x3b, 0x74, 0x63, 0xa2, 0xc3, 0x80, + 0x0f, 0xe9, 0x58, 0xc2, 0x8e, 0xaa, 0x29, 0x08, + 0x13 +}; +static const u8 enc_assoc030[] __initconst = { }; +static const u8 enc_nonce030[] __initconst = { + 0x45, 0xaa, 0xa3, 0xe5, 0xd1, 0x6d, 0x2d, 0x42, + 0xdc, 0x03, 0x44, 0x5d +}; +static const u8 enc_key030[] __initconst = { + 0x3b, 0x24, 0x58, 0xd8, 0x17, 0x6e, 0x16, 0x21, + 0xc0, 0xcc, 0x24, 0xc0, 0xc0, 0xe2, 0x4c, 0x1e, + 0x80, 0xd7, 0x2f, 0x7e, 0xe9, 0x14, 0x9a, 0x4b, + 0x16, 0x61, 0x76, 0x62, 0x96, 0x16, 0xd0, 0x11 +}; + +/* wycheproof - misc */ +static const u8 enc_input031[] __initconst = { + 0x63, 0x85, 0x8c, 0xa3, 0xe2, 0xce, 0x69, 0x88, + 0x7b, 0x57, 0x8a, 0x3c, 0x16, 0x7b, 0x42, 0x1c, + 0x9c +}; +static const u8 enc_output031[] __initconst = { + 0x35, 0x76, 0x64, 0x88, 0xd2, 0xbc, 0x7c, 0x2b, + 0x8d, 0x17, 0xcb, 0xbb, 0x9a, 0xbf, 0xad, 0x9e, + 0x6d, 0x1f, 0x39, 0x1e, 0x65, 0x7b, 0x27, 0x38, + 0xdd, 0xa0, 0x84, 0x48, 0xcb, 0xa2, 0x81, 0x1c, + 0xeb +}; +static const u8 enc_assoc031[] __initconst = { + 0x9a, 0xaf, 0x29, 0x9e, 0xee, 0xa7, 0x8f, 0x79 +}; +static const u8 enc_nonce031[] __initconst = { + 0xf0, 0x38, 0x4f, 0xb8, 0x76, 0x12, 0x14, 0x10, + 0x63, 0x3d, 0x99, 0x3d +}; +static const u8 enc_key031[] __initconst = { + 0xf6, 0x0c, 0x6a, 0x1b, 0x62, 0x57, 0x25, 0xf7, + 0x6c, 0x70, 0x37, 0xb4, 0x8f, 0xe3, 0x57, 0x7f, + 0xa7, 0xf7, 0xb8, 0x7b, 0x1b, 0xd5, 0xa9, 0x82, + 0x17, 0x6d, 0x18, 0x23, 0x06, 0xff, 0xb8, 0x70 +}; + +/* wycheproof - misc */ +static const u8 enc_input032[] __initconst = { + 0x10, 0xf1, 0xec, 0xf9, 0xc6, 0x05, 0x84, 0x66, + 0x5d, 0x9a, 0xe5, 0xef, 0xe2, 0x79, 0xe7, 0xf7, + 0x37, 0x7e, 0xea, 0x69, 0x16, 0xd2, 0xb1, 0x11 +}; +static const u8 enc_output032[] __initconst = { + 0x42, 0xf2, 0x6c, 0x56, 0xcb, 0x4b, 0xe2, 0x1d, + 0x9d, 0x8d, 0x0c, 0x80, 0xfc, 0x99, 0xdd, 0xe0, + 0x0d, 0x75, 0xf3, 0x80, 0x74, 0xbf, 0xe7, 0x64, + 0x54, 0xaa, 0x7e, 0x13, 0xd4, 0x8f, 0xff, 0x7d, + 0x75, 0x57, 0x03, 0x94, 0x57, 0x04, 0x0a, 0x3a +}; +static const u8 enc_assoc032[] __initconst = { }; +static const u8 enc_nonce032[] __initconst = { + 0xe6, 0xb1, 0xad, 0xf2, 0xfd, 0x58, 0xa8, 0x76, + 0x2c, 0x65, 0xf3, 0x1b +}; +static const u8 enc_key032[] __initconst = { + 0x02, 0x12, 0xa8, 0xde, 0x50, 0x07, 0xed, 0x87, + 0xb3, 0x3f, 0x1a, 0x70, 0x90, 0xb6, 0x11, 0x4f, + 0x9e, 0x08, 0xce, 0xfd, 0x96, 0x07, 0xf2, 0xc2, + 0x76, 0xbd, 0xcf, 0xdb, 0xc5, 0xce, 0x9c, 0xd7 +}; + +/* wycheproof - misc */ +static const u8 enc_input033[] __initconst = { + 0x92, 0x22, 0xf9, 0x01, 0x8e, 0x54, 0xfd, 0x6d, + 0xe1, 0x20, 0x08, 0x06, 0xa9, 0xee, 0x8e, 0x4c, + 0xc9, 0x04, 0xd2, 0x9f, 0x25, 0xcb, 0xa1, 0x93 +}; +static const u8 enc_output033[] __initconst = { + 0x12, 0x30, 0x32, 0x43, 0x7b, 0x4b, 0xfd, 0x69, + 0x20, 0xe8, 0xf7, 0xe7, 0xe0, 0x08, 0x7a, 0xe4, + 0x88, 0x9e, 0xbe, 0x7a, 0x0a, 0xd0, 0xe9, 0x00, + 0x3c, 0xf6, 0x8f, 0x17, 0x95, 0x50, 0xda, 0x63, + 0xd3, 0xb9, 0x6c, 0x2d, 0x55, 0x41, 0x18, 0x65 +}; +static const u8 enc_assoc033[] __initconst = { + 0x3e, 0x8b, 0xc5, 0xad, 0xe1, 0x82, 0xff, 0x08 +}; +static const u8 enc_nonce033[] __initconst = { + 0x6b, 0x28, 0x2e, 0xbe, 0xcc, 0x54, 0x1b, 0xcd, + 0x78, 0x34, 0xed, 0x55 +}; +static const u8 enc_key033[] __initconst = { + 0xc5, 0xbc, 0x09, 0x56, 0x56, 0x46, 0xe7, 0xed, + 0xda, 0x95, 0x4f, 0x1f, 0x73, 0x92, 0x23, 0xda, + 0xda, 0x20, 0xb9, 0x5c, 0x44, 0xab, 0x03, 0x3d, + 0x0f, 0xae, 0x4b, 0x02, 0x83, 0xd1, 0x8b, 0xe3 +}; + +/* wycheproof - misc */ +static const u8 enc_input034[] __initconst = { + 0xb0, 0x53, 0x99, 0x92, 0x86, 0xa2, 0x82, 0x4f, + 0x42, 0xcc, 0x8c, 0x20, 0x3a, 0xb2, 0x4e, 0x2c, + 0x97, 0xa6, 0x85, 0xad, 0xcc, 0x2a, 0xd3, 0x26, + 0x62, 0x55, 0x8e, 0x55, 0xa5, 0xc7, 0x29 +}; +static const u8 enc_output034[] __initconst = { + 0x45, 0xc7, 0xd6, 0xb5, 0x3a, 0xca, 0xd4, 0xab, + 0xb6, 0x88, 0x76, 0xa6, 0xe9, 0x6a, 0x48, 0xfb, + 0x59, 0x52, 0x4d, 0x2c, 0x92, 0xc9, 0xd8, 0xa1, + 0x89, 0xc9, 0xfd, 0x2d, 0xb9, 0x17, 0x46, 0x56, + 0x6d, 0x3c, 0xa1, 0x0e, 0x31, 0x1b, 0x69, 0x5f, + 0x3e, 0xae, 0x15, 0x51, 0x65, 0x24, 0x93 +}; +static const u8 enc_assoc034[] __initconst = { }; +static const u8 enc_nonce034[] __initconst = { + 0x04, 0xa9, 0xbe, 0x03, 0x50, 0x8a, 0x5f, 0x31, + 0x37, 0x1a, 0x6f, 0xd2 +}; +static const u8 enc_key034[] __initconst = { + 0x2e, 0xb5, 0x1c, 0x46, 0x9a, 0xa8, 0xeb, 0x9e, + 0x6c, 0x54, 0xa8, 0x34, 0x9b, 0xae, 0x50, 0xa2, + 0x0f, 0x0e, 0x38, 0x27, 0x11, 0xbb, 0xa1, 0x15, + 0x2c, 0x42, 0x4f, 0x03, 0xb6, 0x67, 0x1d, 0x71 +}; + +/* wycheproof - misc */ +static const u8 enc_input035[] __initconst = { + 0xf4, 0x52, 0x06, 0xab, 0xc2, 0x55, 0x52, 0xb2, + 0xab, 0xc9, 0xab, 0x7f, 0xa2, 0x43, 0x03, 0x5f, + 0xed, 0xaa, 0xdd, 0xc3, 0xb2, 0x29, 0x39, 0x56, + 0xf1, 0xea, 0x6e, 0x71, 0x56, 0xe7, 0xeb +}; +static const u8 enc_output035[] __initconst = { + 0x46, 0xa8, 0x0c, 0x41, 0x87, 0x02, 0x47, 0x20, + 0x08, 0x46, 0x27, 0x58, 0x00, 0x80, 0xdd, 0xe5, + 0xa3, 0xf4, 0xa1, 0x10, 0x93, 0xa7, 0x07, 0x6e, + 0xd6, 0xf3, 0xd3, 0x26, 0xbc, 0x7b, 0x70, 0x53, + 0x4d, 0x4a, 0xa2, 0x83, 0x5a, 0x52, 0xe7, 0x2d, + 0x14, 0xdf, 0x0e, 0x4f, 0x47, 0xf2, 0x5f +}; +static const u8 enc_assoc035[] __initconst = { + 0x37, 0x46, 0x18, 0xa0, 0x6e, 0xa9, 0x8a, 0x48 +}; +static const u8 enc_nonce035[] __initconst = { + 0x47, 0x0a, 0x33, 0x9e, 0xcb, 0x32, 0x19, 0xb8, + 0xb8, 0x1a, 0x1f, 0x8b +}; +static const u8 enc_key035[] __initconst = { + 0x7f, 0x5b, 0x74, 0xc0, 0x7e, 0xd1, 0xb4, 0x0f, + 0xd1, 0x43, 0x58, 0xfe, 0x2f, 0xf2, 0xa7, 0x40, + 0xc1, 0x16, 0xc7, 0x70, 0x65, 0x10, 0xe6, 0xa4, + 0x37, 0xf1, 0x9e, 0xa4, 0x99, 0x11, 0xce, 0xc4 +}; + +/* wycheproof - misc */ +static const u8 enc_input036[] __initconst = { + 0xb9, 0xc5, 0x54, 0xcb, 0xc3, 0x6a, 0xc1, 0x8a, + 0xe8, 0x97, 0xdf, 0x7b, 0xee, 0xca, 0xc1, 0xdb, + 0xeb, 0x4e, 0xaf, 0xa1, 0x56, 0xbb, 0x60, 0xce, + 0x2e, 0x5d, 0x48, 0xf0, 0x57, 0x15, 0xe6, 0x78 +}; +static const u8 enc_output036[] __initconst = { + 0xea, 0x29, 0xaf, 0xa4, 0x9d, 0x36, 0xe8, 0x76, + 0x0f, 0x5f, 0xe1, 0x97, 0x23, 0xb9, 0x81, 0x1e, + 0xd5, 0xd5, 0x19, 0x93, 0x4a, 0x44, 0x0f, 0x50, + 0x81, 0xac, 0x43, 0x0b, 0x95, 0x3b, 0x0e, 0x21, + 0x22, 0x25, 0x41, 0xaf, 0x46, 0xb8, 0x65, 0x33, + 0xc6, 0xb6, 0x8d, 0x2f, 0xf1, 0x08, 0xa7, 0xea +}; +static const u8 enc_assoc036[] __initconst = { }; +static const u8 enc_nonce036[] __initconst = { + 0x72, 0xcf, 0xd9, 0x0e, 0xf3, 0x02, 0x6c, 0xa2, + 0x2b, 0x7e, 0x6e, 0x6a +}; +static const u8 enc_key036[] __initconst = { + 0xe1, 0x73, 0x1d, 0x58, 0x54, 0xe1, 0xb7, 0x0c, + 0xb3, 0xff, 0xe8, 0xb7, 0x86, 0xa2, 0xb3, 0xeb, + 0xf0, 0x99, 0x43, 0x70, 0x95, 0x47, 0x57, 0xb9, + 0xdc, 0x8c, 0x7b, 0xc5, 0x35, 0x46, 0x34, 0xa3 +}; + +/* wycheproof - misc */ +static const u8 enc_input037[] __initconst = { + 0x6b, 0x26, 0x04, 0x99, 0x6c, 0xd3, 0x0c, 0x14, + 0xa1, 0x3a, 0x52, 0x57, 0xed, 0x6c, 0xff, 0xd3, + 0xbc, 0x5e, 0x29, 0xd6, 0xb9, 0x7e, 0xb1, 0x79, + 0x9e, 0xb3, 0x35, 0xe2, 0x81, 0xea, 0x45, 0x1e +}; +static const u8 enc_output037[] __initconst = { + 0x6d, 0xad, 0x63, 0x78, 0x97, 0x54, 0x4d, 0x8b, + 0xf6, 0xbe, 0x95, 0x07, 0xed, 0x4d, 0x1b, 0xb2, + 0xe9, 0x54, 0xbc, 0x42, 0x7e, 0x5d, 0xe7, 0x29, + 0xda, 0xf5, 0x07, 0x62, 0x84, 0x6f, 0xf2, 0xf4, + 0x7b, 0x99, 0x7d, 0x93, 0xc9, 0x82, 0x18, 0x9d, + 0x70, 0x95, 0xdc, 0x79, 0x4c, 0x74, 0x62, 0x32 +}; +static const u8 enc_assoc037[] __initconst = { + 0x23, 0x33, 0xe5, 0xce, 0x0f, 0x93, 0xb0, 0x59 +}; +static const u8 enc_nonce037[] __initconst = { + 0x26, 0x28, 0x80, 0xd4, 0x75, 0xf3, 0xda, 0xc5, + 0x34, 0x0d, 0xd1, 0xb8 +}; +static const u8 enc_key037[] __initconst = { + 0x27, 0xd8, 0x60, 0x63, 0x1b, 0x04, 0x85, 0xa4, + 0x10, 0x70, 0x2f, 0xea, 0x61, 0xbc, 0x87, 0x3f, + 0x34, 0x42, 0x26, 0x0c, 0xad, 0xed, 0x4a, 0xbd, + 0xe2, 0x5b, 0x78, 0x6a, 0x2d, 0x97, 0xf1, 0x45 +}; + +/* wycheproof - misc */ +static const u8 enc_input038[] __initconst = { + 0x97, 0x3d, 0x0c, 0x75, 0x38, 0x26, 0xba, 0xe4, + 0x66, 0xcf, 0x9a, 0xbb, 0x34, 0x93, 0x15, 0x2e, + 0x9d, 0xe7, 0x81, 0x9e, 0x2b, 0xd0, 0xc7, 0x11, + 0x71, 0x34, 0x6b, 0x4d, 0x2c, 0xeb, 0xf8, 0x04, + 0x1a, 0xa3, 0xce, 0xdc, 0x0d, 0xfd, 0x7b, 0x46, + 0x7e, 0x26, 0x22, 0x8b, 0xc8, 0x6c, 0x9a +}; +static const u8 enc_output038[] __initconst = { + 0xfb, 0xa7, 0x8a, 0xe4, 0xf9, 0xd8, 0x08, 0xa6, + 0x2e, 0x3d, 0xa4, 0x0b, 0xe2, 0xcb, 0x77, 0x00, + 0xc3, 0x61, 0x3d, 0x9e, 0xb2, 0xc5, 0x29, 0xc6, + 0x52, 0xe7, 0x6a, 0x43, 0x2c, 0x65, 0x8d, 0x27, + 0x09, 0x5f, 0x0e, 0xb8, 0xf9, 0x40, 0xc3, 0x24, + 0x98, 0x1e, 0xa9, 0x35, 0xe5, 0x07, 0xf9, 0x8f, + 0x04, 0x69, 0x56, 0xdb, 0x3a, 0x51, 0x29, 0x08, + 0xbd, 0x7a, 0xfc, 0x8f, 0x2a, 0xb0, 0xa9 +}; +static const u8 enc_assoc038[] __initconst = { }; +static const u8 enc_nonce038[] __initconst = { + 0xe7, 0x4a, 0x51, 0x5e, 0x7e, 0x21, 0x02, 0xb9, + 0x0b, 0xef, 0x55, 0xd2 +}; +static const u8 enc_key038[] __initconst = { + 0xcf, 0x0d, 0x40, 0xa4, 0x64, 0x4e, 0x5f, 0x51, + 0x81, 0x51, 0x65, 0xd5, 0x30, 0x1b, 0x22, 0x63, + 0x1f, 0x45, 0x44, 0xc4, 0x9a, 0x18, 0x78, 0xe3, + 0xa0, 0xa5, 0xe8, 0xe1, 0xaa, 0xe0, 0xf2, 0x64 +}; + +/* wycheproof - misc */ +static const u8 enc_input039[] __initconst = { + 0xa9, 0x89, 0x95, 0x50, 0x4d, 0xf1, 0x6f, 0x74, + 0x8b, 0xfb, 0x77, 0x85, 0xff, 0x91, 0xee, 0xb3, + 0xb6, 0x60, 0xea, 0x9e, 0xd3, 0x45, 0x0c, 0x3d, + 0x5e, 0x7b, 0x0e, 0x79, 0xef, 0x65, 0x36, 0x59, + 0xa9, 0x97, 0x8d, 0x75, 0x54, 0x2e, 0xf9, 0x1c, + 0x45, 0x67, 0x62, 0x21, 0x56, 0x40, 0xb9 +}; +static const u8 enc_output039[] __initconst = { + 0xa1, 0xff, 0xed, 0x80, 0x76, 0x18, 0x29, 0xec, + 0xce, 0x24, 0x2e, 0x0e, 0x88, 0xb1, 0x38, 0x04, + 0x90, 0x16, 0xbc, 0xa0, 0x18, 0xda, 0x2b, 0x6e, + 0x19, 0x98, 0x6b, 0x3e, 0x31, 0x8c, 0xae, 0x8d, + 0x80, 0x61, 0x98, 0xfb, 0x4c, 0x52, 0x7c, 0xc3, + 0x93, 0x50, 0xeb, 0xdd, 0xea, 0xc5, 0x73, 0xc4, + 0xcb, 0xf0, 0xbe, 0xfd, 0xa0, 0xb7, 0x02, 0x42, + 0xc6, 0x40, 0xd7, 0xcd, 0x02, 0xd7, 0xa3 +}; +static const u8 enc_assoc039[] __initconst = { + 0xb3, 0xe4, 0x06, 0x46, 0x83, 0xb0, 0x2d, 0x84 +}; +static const u8 enc_nonce039[] __initconst = { + 0xd4, 0xd8, 0x07, 0x34, 0x16, 0x83, 0x82, 0x5b, + 0x31, 0xcd, 0x4d, 0x95 +}; +static const u8 enc_key039[] __initconst = { + 0x6c, 0xbf, 0xd7, 0x1c, 0x64, 0x5d, 0x18, 0x4c, + 0xf5, 0xd2, 0x3c, 0x40, 0x2b, 0xdb, 0x0d, 0x25, + 0xec, 0x54, 0x89, 0x8c, 0x8a, 0x02, 0x73, 0xd4, + 0x2e, 0xb5, 0xbe, 0x10, 0x9f, 0xdc, 0xb2, 0xac +}; + +/* wycheproof - misc */ +static const u8 enc_input040[] __initconst = { + 0xd0, 0x96, 0x80, 0x31, 0x81, 0xbe, 0xef, 0x9e, + 0x00, 0x8f, 0xf8, 0x5d, 0x5d, 0xdc, 0x38, 0xdd, + 0xac, 0xf0, 0xf0, 0x9e, 0xe5, 0xf7, 0xe0, 0x7f, + 0x1e, 0x40, 0x79, 0xcb, 0x64, 0xd0, 0xdc, 0x8f, + 0x5e, 0x67, 0x11, 0xcd, 0x49, 0x21, 0xa7, 0x88, + 0x7d, 0xe7, 0x6e, 0x26, 0x78, 0xfd, 0xc6, 0x76, + 0x18, 0xf1, 0x18, 0x55, 0x86, 0xbf, 0xea, 0x9d, + 0x4c, 0x68, 0x5d, 0x50, 0xe4, 0xbb, 0x9a, 0x82 +}; +static const u8 enc_output040[] __initconst = { + 0x9a, 0x4e, 0xf2, 0x2b, 0x18, 0x16, 0x77, 0xb5, + 0x75, 0x5c, 0x08, 0xf7, 0x47, 0xc0, 0xf8, 0xd8, + 0xe8, 0xd4, 0xc1, 0x8a, 0x9c, 0xc2, 0x40, 0x5c, + 0x12, 0xbb, 0x51, 0xbb, 0x18, 0x72, 0xc8, 0xe8, + 0xb8, 0x77, 0x67, 0x8b, 0xec, 0x44, 0x2c, 0xfc, + 0xbb, 0x0f, 0xf4, 0x64, 0xa6, 0x4b, 0x74, 0x33, + 0x2c, 0xf0, 0x72, 0x89, 0x8c, 0x7e, 0x0e, 0xdd, + 0xf6, 0x23, 0x2e, 0xa6, 0xe2, 0x7e, 0xfe, 0x50, + 0x9f, 0xf3, 0x42, 0x7a, 0x0f, 0x32, 0xfa, 0x56, + 0x6d, 0x9c, 0xa0, 0xa7, 0x8a, 0xef, 0xc0, 0x13 +}; +static const u8 enc_assoc040[] __initconst = { }; +static const u8 enc_nonce040[] __initconst = { + 0xd6, 0x10, 0x40, 0xa3, 0x13, 0xed, 0x49, 0x28, + 0x23, 0xcc, 0x06, 0x5b +}; +static const u8 enc_key040[] __initconst = { + 0x5b, 0x1d, 0x10, 0x35, 0xc0, 0xb1, 0x7e, 0xe0, + 0xb0, 0x44, 0x47, 0x67, 0xf8, 0x0a, 0x25, 0xb8, + 0xc1, 0xb7, 0x41, 0xf4, 0xb5, 0x0a, 0x4d, 0x30, + 0x52, 0x22, 0x6b, 0xaa, 0x1c, 0x6f, 0xb7, 0x01 +}; + +/* wycheproof - misc */ +static const u8 enc_input041[] __initconst = { + 0x94, 0xee, 0x16, 0x6d, 0x6d, 0x6e, 0xcf, 0x88, + 0x32, 0x43, 0x71, 0x36, 0xb4, 0xae, 0x80, 0x5d, + 0x42, 0x88, 0x64, 0x35, 0x95, 0x86, 0xd9, 0x19, + 0x3a, 0x25, 0x01, 0x62, 0x93, 0xed, 0xba, 0x44, + 0x3c, 0x58, 0xe0, 0x7e, 0x7b, 0x71, 0x95, 0xec, + 0x5b, 0xd8, 0x45, 0x82, 0xa9, 0xd5, 0x6c, 0x8d, + 0x4a, 0x10, 0x8c, 0x7d, 0x7c, 0xe3, 0x4e, 0x6c, + 0x6f, 0x8e, 0xa1, 0xbe, 0xc0, 0x56, 0x73, 0x17 +}; +static const u8 enc_output041[] __initconst = { + 0x5f, 0xbb, 0xde, 0xcc, 0x34, 0xbe, 0x20, 0x16, + 0x14, 0xf6, 0x36, 0x03, 0x1e, 0xeb, 0x42, 0xf1, + 0xca, 0xce, 0x3c, 0x79, 0xa1, 0x2c, 0xff, 0xd8, + 0x71, 0xee, 0x8e, 0x73, 0x82, 0x0c, 0x82, 0x97, + 0x49, 0xf1, 0xab, 0xb4, 0x29, 0x43, 0x67, 0x84, + 0x9f, 0xb6, 0xc2, 0xaa, 0x56, 0xbd, 0xa8, 0xa3, + 0x07, 0x8f, 0x72, 0x3d, 0x7c, 0x1c, 0x85, 0x20, + 0x24, 0xb0, 0x17, 0xb5, 0x89, 0x73, 0xfb, 0x1e, + 0x09, 0x26, 0x3d, 0xa7, 0xb4, 0xcb, 0x92, 0x14, + 0x52, 0xf9, 0x7d, 0xca, 0x40, 0xf5, 0x80, 0xec +}; +static const u8 enc_assoc041[] __initconst = { + 0x71, 0x93, 0xf6, 0x23, 0x66, 0x33, 0x21, 0xa2 +}; +static const u8 enc_nonce041[] __initconst = { + 0xd3, 0x1c, 0x21, 0xab, 0xa1, 0x75, 0xb7, 0x0d, + 0xe4, 0xeb, 0xb1, 0x9c +}; +static const u8 enc_key041[] __initconst = { + 0x97, 0xd6, 0x35, 0xc4, 0xf4, 0x75, 0x74, 0xd9, + 0x99, 0x8a, 0x90, 0x87, 0x5d, 0xa1, 0xd3, 0xa2, + 0x84, 0xb7, 0x55, 0xb2, 0xd3, 0x92, 0x97, 0xa5, + 0x72, 0x52, 0x35, 0x19, 0x0e, 0x10, 0xa9, 0x7e +}; + +/* wycheproof - misc */ +static const u8 enc_input042[] __initconst = { + 0xb4, 0x29, 0xeb, 0x80, 0xfb, 0x8f, 0xe8, 0xba, + 0xed, 0xa0, 0xc8, 0x5b, 0x9c, 0x33, 0x34, 0x58, + 0xe7, 0xc2, 0x99, 0x2e, 0x55, 0x84, 0x75, 0x06, + 0x9d, 0x12, 0xd4, 0x5c, 0x22, 0x21, 0x75, 0x64, + 0x12, 0x15, 0x88, 0x03, 0x22, 0x97, 0xef, 0xf5, + 0x67, 0x83, 0x74, 0x2a, 0x5f, 0xc2, 0x2d, 0x74, + 0x10, 0xff, 0xb2, 0x9d, 0x66, 0x09, 0x86, 0x61, + 0xd7, 0x6f, 0x12, 0x6c, 0x3c, 0x27, 0x68, 0x9e, + 0x43, 0xb3, 0x72, 0x67, 0xca, 0xc5, 0xa3, 0xa6, + 0xd3, 0xab, 0x49, 0xe3, 0x91, 0xda, 0x29, 0xcd, + 0x30, 0x54, 0xa5, 0x69, 0x2e, 0x28, 0x07, 0xe4, + 0xc3, 0xea, 0x46, 0xc8, 0x76, 0x1d, 0x50, 0xf5, + 0x92 +}; +static const u8 enc_output042[] __initconst = { + 0xd0, 0x10, 0x2f, 0x6c, 0x25, 0x8b, 0xf4, 0x97, + 0x42, 0xce, 0xc3, 0x4c, 0xf2, 0xd0, 0xfe, 0xdf, + 0x23, 0xd1, 0x05, 0xfb, 0x4c, 0x84, 0xcf, 0x98, + 0x51, 0x5e, 0x1b, 0xc9, 0xa6, 0x4f, 0x8a, 0xd5, + 0xbe, 0x8f, 0x07, 0x21, 0xbd, 0xe5, 0x06, 0x45, + 0xd0, 0x00, 0x83, 0xc3, 0xa2, 0x63, 0xa3, 0x10, + 0x53, 0xb7, 0x60, 0x24, 0x5f, 0x52, 0xae, 0x28, + 0x66, 0xa5, 0xec, 0x83, 0xb1, 0x9f, 0x61, 0xbe, + 0x1d, 0x30, 0xd5, 0xc5, 0xd9, 0xfe, 0xcc, 0x4c, + 0xbb, 0xe0, 0x8f, 0xd3, 0x85, 0x81, 0x3a, 0x2a, + 0xa3, 0x9a, 0x00, 0xff, 0x9c, 0x10, 0xf7, 0xf2, + 0x37, 0x02, 0xad, 0xd1, 0xe4, 0xb2, 0xff, 0xa3, + 0x1c, 0x41, 0x86, 0x5f, 0xc7, 0x1d, 0xe1, 0x2b, + 0x19, 0x61, 0x21, 0x27, 0xce, 0x49, 0x99, 0x3b, + 0xb0 +}; +static const u8 enc_assoc042[] __initconst = { }; +static const u8 enc_nonce042[] __initconst = { + 0x17, 0xc8, 0x6a, 0x8a, 0xbb, 0xb7, 0xe0, 0x03, + 0xac, 0xde, 0x27, 0x99 +}; +static const u8 enc_key042[] __initconst = { + 0xfe, 0x6e, 0x55, 0xbd, 0xae, 0xd1, 0xf7, 0x28, + 0x4c, 0xa5, 0xfc, 0x0f, 0x8c, 0x5f, 0x2b, 0x8d, + 0xf5, 0x6d, 0xc0, 0xf4, 0x9e, 0x8c, 0xa6, 0x6a, + 0x41, 0x99, 0x5e, 0x78, 0x33, 0x51, 0xf9, 0x01 +}; + +/* wycheproof - misc */ +static const u8 enc_input043[] __initconst = { + 0xce, 0xb5, 0x34, 0xce, 0x50, 0xdc, 0x23, 0xff, + 0x63, 0x8a, 0xce, 0x3e, 0xf6, 0x3a, 0xb2, 0xcc, + 0x29, 0x73, 0xee, 0xad, 0xa8, 0x07, 0x85, 0xfc, + 0x16, 0x5d, 0x06, 0xc2, 0xf5, 0x10, 0x0f, 0xf5, + 0xe8, 0xab, 0x28, 0x82, 0xc4, 0x75, 0xaf, 0xcd, + 0x05, 0xcc, 0xd4, 0x9f, 0x2e, 0x7d, 0x8f, 0x55, + 0xef, 0x3a, 0x72, 0xe3, 0xdc, 0x51, 0xd6, 0x85, + 0x2b, 0x8e, 0x6b, 0x9e, 0x7a, 0xec, 0xe5, 0x7b, + 0xe6, 0x55, 0x6b, 0x0b, 0x6d, 0x94, 0x13, 0xe3, + 0x3f, 0xc5, 0xfc, 0x24, 0xa9, 0xa2, 0x05, 0xad, + 0x59, 0x57, 0x4b, 0xb3, 0x9d, 0x94, 0x4a, 0x92, + 0xdc, 0x47, 0x97, 0x0d, 0x84, 0xa6, 0xad, 0x31, + 0x76 +}; +static const u8 enc_output043[] __initconst = { + 0x75, 0x45, 0x39, 0x1b, 0x51, 0xde, 0x01, 0xd5, + 0xc5, 0x3d, 0xfa, 0xca, 0x77, 0x79, 0x09, 0x06, + 0x3e, 0x58, 0xed, 0xee, 0x4b, 0xb1, 0x22, 0x7e, + 0x71, 0x10, 0xac, 0x4d, 0x26, 0x20, 0xc2, 0xae, + 0xc2, 0xf8, 0x48, 0xf5, 0x6d, 0xee, 0xb0, 0x37, + 0xa8, 0xdc, 0xed, 0x75, 0xaf, 0xa8, 0xa6, 0xc8, + 0x90, 0xe2, 0xde, 0xe4, 0x2f, 0x95, 0x0b, 0xb3, + 0x3d, 0x9e, 0x24, 0x24, 0xd0, 0x8a, 0x50, 0x5d, + 0x89, 0x95, 0x63, 0x97, 0x3e, 0xd3, 0x88, 0x70, + 0xf3, 0xde, 0x6e, 0xe2, 0xad, 0xc7, 0xfe, 0x07, + 0x2c, 0x36, 0x6c, 0x14, 0xe2, 0xcf, 0x7c, 0xa6, + 0x2f, 0xb3, 0xd3, 0x6b, 0xee, 0x11, 0x68, 0x54, + 0x61, 0xb7, 0x0d, 0x44, 0xef, 0x8c, 0x66, 0xc5, + 0xc7, 0xbb, 0xf1, 0x0d, 0xca, 0xdd, 0x7f, 0xac, + 0xf6 +}; +static const u8 enc_assoc043[] __initconst = { + 0xa1, 0x1c, 0x40, 0xb6, 0x03, 0x76, 0x73, 0x30 +}; +static const u8 enc_nonce043[] __initconst = { + 0x46, 0x36, 0x2f, 0x45, 0xd6, 0x37, 0x9e, 0x63, + 0xe5, 0x22, 0x94, 0x60 +}; +static const u8 enc_key043[] __initconst = { + 0xaa, 0xbc, 0x06, 0x34, 0x74, 0xe6, 0x5c, 0x4c, + 0x3e, 0x9b, 0xdc, 0x48, 0x0d, 0xea, 0x97, 0xb4, + 0x51, 0x10, 0xc8, 0x61, 0x88, 0x46, 0xff, 0x6b, + 0x15, 0xbd, 0xd2, 0xa4, 0xa5, 0x68, 0x2c, 0x4e +}; + +/* wycheproof - misc */ +static const u8 enc_input044[] __initconst = { + 0xe5, 0xcc, 0xaa, 0x44, 0x1b, 0xc8, 0x14, 0x68, + 0x8f, 0x8f, 0x6e, 0x8f, 0x28, 0xb5, 0x00, 0xb2 +}; +static const u8 enc_output044[] __initconst = { + 0x7e, 0x72, 0xf5, 0xa1, 0x85, 0xaf, 0x16, 0xa6, + 0x11, 0x92, 0x1b, 0x43, 0x8f, 0x74, 0x9f, 0x0b, + 0x12, 0x42, 0xc6, 0x70, 0x73, 0x23, 0x34, 0x02, + 0x9a, 0xdf, 0xe1, 0xc5, 0x00, 0x16, 0x51, 0xe4 +}; +static const u8 enc_assoc044[] __initconst = { + 0x02 +}; +static const u8 enc_nonce044[] __initconst = { + 0x87, 0x34, 0x5f, 0x10, 0x55, 0xfd, 0x9e, 0x21, + 0x02, 0xd5, 0x06, 0x56 +}; +static const u8 enc_key044[] __initconst = { + 0x7d, 0x00, 0xb4, 0x80, 0x95, 0xad, 0xfa, 0x32, + 0x72, 0x05, 0x06, 0x07, 0xb2, 0x64, 0x18, 0x50, + 0x02, 0xba, 0x99, 0x95, 0x7c, 0x49, 0x8b, 0xe0, + 0x22, 0x77, 0x0f, 0x2c, 0xe2, 0xf3, 0x14, 0x3c +}; + +/* wycheproof - misc */ +static const u8 enc_input045[] __initconst = { + 0x02, 0xcd, 0xe1, 0x68, 0xfb, 0xa3, 0xf5, 0x44, + 0xbb, 0xd0, 0x33, 0x2f, 0x7a, 0xde, 0xad, 0xa8 +}; +static const u8 enc_output045[] __initconst = { + 0x85, 0xf2, 0x9a, 0x71, 0x95, 0x57, 0xcd, 0xd1, + 0x4d, 0x1f, 0x8f, 0xff, 0xab, 0x6d, 0x9e, 0x60, + 0x73, 0x2c, 0xa3, 0x2b, 0xec, 0xd5, 0x15, 0xa1, + 0xed, 0x35, 0x3f, 0x54, 0x2e, 0x99, 0x98, 0x58 +}; +static const u8 enc_assoc045[] __initconst = { + 0xb6, 0x48 +}; +static const u8 enc_nonce045[] __initconst = { + 0x87, 0xa3, 0x16, 0x3e, 0xc0, 0x59, 0x8a, 0xd9, + 0x5b, 0x3a, 0xa7, 0x13 +}; +static const u8 enc_key045[] __initconst = { + 0x64, 0x32, 0x71, 0x7f, 0x1d, 0xb8, 0x5e, 0x41, + 0xac, 0x78, 0x36, 0xbc, 0xe2, 0x51, 0x85, 0xa0, + 0x80, 0xd5, 0x76, 0x2b, 0x9e, 0x2b, 0x18, 0x44, + 0x4b, 0x6e, 0xc7, 0x2c, 0x3b, 0xd8, 0xe4, 0xdc +}; + +/* wycheproof - misc */ +static const u8 enc_input046[] __initconst = { + 0x16, 0xdd, 0xd2, 0x3f, 0xf5, 0x3f, 0x3d, 0x23, + 0xc0, 0x63, 0x34, 0x48, 0x70, 0x40, 0xeb, 0x47 +}; +static const u8 enc_output046[] __initconst = { + 0xc1, 0xb2, 0x95, 0x93, 0x6d, 0x56, 0xfa, 0xda, + 0xc0, 0x3e, 0x5f, 0x74, 0x2b, 0xff, 0x73, 0xa1, + 0x39, 0xc4, 0x57, 0xdb, 0xab, 0x66, 0x38, 0x2b, + 0xab, 0xb3, 0xb5, 0x58, 0x00, 0xcd, 0xa5, 0xb8 +}; +static const u8 enc_assoc046[] __initconst = { + 0xbd, 0x4c, 0xd0, 0x2f, 0xc7, 0x50, 0x2b, 0xbd, + 0xbd, 0xf6, 0xc9, 0xa3, 0xcb, 0xe8, 0xf0 +}; +static const u8 enc_nonce046[] __initconst = { + 0x6f, 0x57, 0x3a, 0xa8, 0x6b, 0xaa, 0x49, 0x2b, + 0xa4, 0x65, 0x96, 0xdf +}; +static const u8 enc_key046[] __initconst = { + 0x8e, 0x34, 0xcf, 0x73, 0xd2, 0x45, 0xa1, 0x08, + 0x2a, 0x92, 0x0b, 0x86, 0x36, 0x4e, 0xb8, 0x96, + 0xc4, 0x94, 0x64, 0x67, 0xbc, 0xb3, 0xd5, 0x89, + 0x29, 0xfc, 0xb3, 0x66, 0x90, 0xe6, 0x39, 0x4f +}; + +/* wycheproof - misc */ +static const u8 enc_input047[] __initconst = { + 0x62, 0x3b, 0x78, 0x50, 0xc3, 0x21, 0xe2, 0xcf, + 0x0c, 0x6f, 0xbc, 0xc8, 0xdf, 0xd1, 0xaf, 0xf2 +}; +static const u8 enc_output047[] __initconst = { + 0xc8, 0x4c, 0x9b, 0xb7, 0xc6, 0x1c, 0x1b, 0xcb, + 0x17, 0x77, 0x2a, 0x1c, 0x50, 0x0c, 0x50, 0x95, + 0xdb, 0xad, 0xf7, 0xa5, 0x13, 0x8c, 0xa0, 0x34, + 0x59, 0xa2, 0xcd, 0x65, 0x83, 0x1e, 0x09, 0x2f +}; +static const u8 enc_assoc047[] __initconst = { + 0x89, 0xcc, 0xe9, 0xfb, 0x47, 0x44, 0x1d, 0x07, + 0xe0, 0x24, 0x5a, 0x66, 0xfe, 0x8b, 0x77, 0x8b +}; +static const u8 enc_nonce047[] __initconst = { + 0x1a, 0x65, 0x18, 0xf0, 0x2e, 0xde, 0x1d, 0xa6, + 0x80, 0x92, 0x66, 0xd9 +}; +static const u8 enc_key047[] __initconst = { + 0xcb, 0x55, 0x75, 0xf5, 0xc7, 0xc4, 0x5c, 0x91, + 0xcf, 0x32, 0x0b, 0x13, 0x9f, 0xb5, 0x94, 0x23, + 0x75, 0x60, 0xd0, 0xa3, 0xe6, 0xf8, 0x65, 0xa6, + 0x7d, 0x4f, 0x63, 0x3f, 0x2c, 0x08, 0xf0, 0x16 +}; + +/* wycheproof - misc */ +static const u8 enc_input048[] __initconst = { + 0x87, 0xb3, 0xa4, 0xd7, 0xb2, 0x6d, 0x8d, 0x32, + 0x03, 0xa0, 0xde, 0x1d, 0x64, 0xef, 0x82, 0xe3 +}; +static const u8 enc_output048[] __initconst = { + 0x94, 0xbc, 0x80, 0x62, 0x1e, 0xd1, 0xe7, 0x1b, + 0x1f, 0xd2, 0xb5, 0xc3, 0xa1, 0x5e, 0x35, 0x68, + 0x33, 0x35, 0x11, 0x86, 0x17, 0x96, 0x97, 0x84, + 0x01, 0x59, 0x8b, 0x96, 0x37, 0x22, 0xf5, 0xb3 +}; +static const u8 enc_assoc048[] __initconst = { + 0xd1, 0x9f, 0x2d, 0x98, 0x90, 0x95, 0xf7, 0xab, + 0x03, 0xa5, 0xfd, 0xe8, 0x44, 0x16, 0xe0, 0x0c, + 0x0e +}; +static const u8 enc_nonce048[] __initconst = { + 0x56, 0x4d, 0xee, 0x49, 0xab, 0x00, 0xd2, 0x40, + 0xfc, 0x10, 0x68, 0xc3 +}; +static const u8 enc_key048[] __initconst = { + 0xa5, 0x56, 0x9e, 0x72, 0x9a, 0x69, 0xb2, 0x4b, + 0xa6, 0xe0, 0xff, 0x15, 0xc4, 0x62, 0x78, 0x97, + 0x43, 0x68, 0x24, 0xc9, 0x41, 0xe9, 0xd0, 0x0b, + 0x2e, 0x93, 0xfd, 0xdc, 0x4b, 0xa7, 0x76, 0x57 +}; + +/* wycheproof - misc */ +static const u8 enc_input049[] __initconst = { + 0xe6, 0x01, 0xb3, 0x85, 0x57, 0x79, 0x7d, 0xa2, + 0xf8, 0xa4, 0x10, 0x6a, 0x08, 0x9d, 0x1d, 0xa6 +}; +static const u8 enc_output049[] __initconst = { + 0x29, 0x9b, 0x5d, 0x3f, 0x3d, 0x03, 0xc0, 0x87, + 0x20, 0x9a, 0x16, 0xe2, 0x85, 0x14, 0x31, 0x11, + 0x4b, 0x45, 0x4e, 0xd1, 0x98, 0xde, 0x11, 0x7e, + 0x83, 0xec, 0x49, 0xfa, 0x8d, 0x85, 0x08, 0xd6 +}; +static const u8 enc_assoc049[] __initconst = { + 0x5e, 0x64, 0x70, 0xfa, 0xcd, 0x99, 0xc1, 0xd8, + 0x1e, 0x37, 0xcd, 0x44, 0x01, 0x5f, 0xe1, 0x94, + 0x80, 0xa2, 0xa4, 0xd3, 0x35, 0x2a, 0x4f, 0xf5, + 0x60, 0xc0, 0x64, 0x0f, 0xdb, 0xda +}; +static const u8 enc_nonce049[] __initconst = { + 0xdf, 0x87, 0x13, 0xe8, 0x7e, 0xc3, 0xdb, 0xcf, + 0xad, 0x14, 0xd5, 0x3e +}; +static const u8 enc_key049[] __initconst = { + 0x56, 0x20, 0x74, 0x65, 0xb4, 0xe4, 0x8e, 0x6d, + 0x04, 0x63, 0x0f, 0x4a, 0x42, 0xf3, 0x5c, 0xfc, + 0x16, 0x3a, 0xb2, 0x89, 0xc2, 0x2a, 0x2b, 0x47, + 0x84, 0xf6, 0xf9, 0x29, 0x03, 0x30, 0xbe, 0xe0 +}; + +/* wycheproof - misc */ +static const u8 enc_input050[] __initconst = { + 0xdc, 0x9e, 0x9e, 0xaf, 0x11, 0xe3, 0x14, 0x18, + 0x2d, 0xf6, 0xa4, 0xeb, 0xa1, 0x7a, 0xec, 0x9c +}; +static const u8 enc_output050[] __initconst = { + 0x60, 0x5b, 0xbf, 0x90, 0xae, 0xb9, 0x74, 0xf6, + 0x60, 0x2b, 0xc7, 0x78, 0x05, 0x6f, 0x0d, 0xca, + 0x38, 0xea, 0x23, 0xd9, 0x90, 0x54, 0xb4, 0x6b, + 0x42, 0xff, 0xe0, 0x04, 0x12, 0x9d, 0x22, 0x04 +}; +static const u8 enc_assoc050[] __initconst = { + 0xba, 0x44, 0x6f, 0x6f, 0x9a, 0x0c, 0xed, 0x22, + 0x45, 0x0f, 0xeb, 0x10, 0x73, 0x7d, 0x90, 0x07, + 0xfd, 0x69, 0xab, 0xc1, 0x9b, 0x1d, 0x4d, 0x90, + 0x49, 0xa5, 0x55, 0x1e, 0x86, 0xec, 0x2b, 0x37 +}; +static const u8 enc_nonce050[] __initconst = { + 0x8d, 0xf4, 0xb1, 0x5a, 0x88, 0x8c, 0x33, 0x28, + 0x6a, 0x7b, 0x76, 0x51 +}; +static const u8 enc_key050[] __initconst = { + 0x39, 0x37, 0x98, 0x6a, 0xf8, 0x6d, 0xaf, 0xc1, + 0xba, 0x0c, 0x46, 0x72, 0xd8, 0xab, 0xc4, 0x6c, + 0x20, 0x70, 0x62, 0x68, 0x2d, 0x9c, 0x26, 0x4a, + 0xb0, 0x6d, 0x6c, 0x58, 0x07, 0x20, 0x51, 0x30 +}; + +/* wycheproof - misc */ +static const u8 enc_input051[] __initconst = { + 0x81, 0xce, 0x84, 0xed, 0xe9, 0xb3, 0x58, 0x59, + 0xcc, 0x8c, 0x49, 0xa8, 0xf6, 0xbe, 0x7d, 0xc6 +}; +static const u8 enc_output051[] __initconst = { + 0x7b, 0x7c, 0xe0, 0xd8, 0x24, 0x80, 0x9a, 0x70, + 0xde, 0x32, 0x56, 0x2c, 0xcf, 0x2c, 0x2b, 0xbd, + 0x15, 0xd4, 0x4a, 0x00, 0xce, 0x0d, 0x19, 0xb4, + 0x23, 0x1f, 0x92, 0x1e, 0x22, 0xbc, 0x0a, 0x43 +}; +static const u8 enc_assoc051[] __initconst = { + 0xd4, 0x1a, 0x82, 0x8d, 0x5e, 0x71, 0x82, 0x92, + 0x47, 0x02, 0x19, 0x05, 0x40, 0x2e, 0xa2, 0x57, + 0xdc, 0xcb, 0xc3, 0xb8, 0x0f, 0xcd, 0x56, 0x75, + 0x05, 0x6b, 0x68, 0xbb, 0x59, 0xe6, 0x2e, 0x88, + 0x73 +}; +static const u8 enc_nonce051[] __initconst = { + 0xbe, 0x40, 0xe5, 0xf1, 0xa1, 0x18, 0x17, 0xa0, + 0xa8, 0xfa, 0x89, 0x49 +}; +static const u8 enc_key051[] __initconst = { + 0x36, 0x37, 0x2a, 0xbc, 0xdb, 0x78, 0xe0, 0x27, + 0x96, 0x46, 0xac, 0x3d, 0x17, 0x6b, 0x96, 0x74, + 0xe9, 0x15, 0x4e, 0xec, 0xf0, 0xd5, 0x46, 0x9c, + 0x65, 0x1e, 0xc7, 0xe1, 0x6b, 0x4c, 0x11, 0x99 +}; + +/* wycheproof - misc */ +static const u8 enc_input052[] __initconst = { + 0xa6, 0x67, 0x47, 0xc8, 0x9e, 0x85, 0x7a, 0xf3, + 0xa1, 0x8e, 0x2c, 0x79, 0x50, 0x00, 0x87, 0xed +}; +static const u8 enc_output052[] __initconst = { + 0xca, 0x82, 0xbf, 0xf3, 0xe2, 0xf3, 0x10, 0xcc, + 0xc9, 0x76, 0x67, 0x2c, 0x44, 0x15, 0xe6, 0x9b, + 0x57, 0x63, 0x8c, 0x62, 0xa5, 0xd8, 0x5d, 0xed, + 0x77, 0x4f, 0x91, 0x3c, 0x81, 0x3e, 0xa0, 0x32 +}; +static const u8 enc_assoc052[] __initconst = { + 0x3f, 0x2d, 0xd4, 0x9b, 0xbf, 0x09, 0xd6, 0x9a, + 0x78, 0xa3, 0xd8, 0x0e, 0xa2, 0x56, 0x66, 0x14, + 0xfc, 0x37, 0x94, 0x74, 0x19, 0x6c, 0x1a, 0xae, + 0x84, 0x58, 0x3d, 0xa7, 0x3d, 0x7f, 0xf8, 0x5c, + 0x6f, 0x42, 0xca, 0x42, 0x05, 0x6a, 0x97, 0x92, + 0xcc, 0x1b, 0x9f, 0xb3, 0xc7, 0xd2, 0x61 +}; +static const u8 enc_nonce052[] __initconst = { + 0x84, 0xc8, 0x7d, 0xae, 0x4e, 0xee, 0x27, 0x73, + 0x0e, 0xc3, 0x5d, 0x12 +}; +static const u8 enc_key052[] __initconst = { + 0x9f, 0x14, 0x79, 0xed, 0x09, 0x7d, 0x7f, 0xe5, + 0x29, 0xc1, 0x1f, 0x2f, 0x5a, 0xdd, 0x9a, 0xaf, + 0xf4, 0xa1, 0xca, 0x0b, 0x68, 0x99, 0x7a, 0x2c, + 0xb7, 0xf7, 0x97, 0x49, 0xbd, 0x90, 0xaa, 0xf4 +}; + +/* wycheproof - misc */ +static const u8 enc_input053[] __initconst = { + 0x25, 0x6d, 0x40, 0x88, 0x80, 0x94, 0x17, 0x83, + 0x55, 0xd3, 0x04, 0x84, 0x64, 0x43, 0xfe, 0xe8, + 0xdf, 0x99, 0x47, 0x03, 0x03, 0xfb, 0x3b, 0x7b, + 0x80, 0xe0, 0x30, 0xbe, 0xeb, 0xd3, 0x29, 0xbe +}; +static const u8 enc_output053[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xe6, 0xd3, 0xd7, 0x32, 0x4a, 0x1c, 0xbb, 0xa7, + 0x77, 0xbb, 0xb0, 0xec, 0xdd, 0xa3, 0x78, 0x07 +}; +static const u8 enc_assoc053[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 enc_nonce053[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key053[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input054[] __initconst = { + 0x25, 0x6d, 0x40, 0x88, 0x80, 0x94, 0x17, 0x83, + 0x55, 0xd3, 0x04, 0x84, 0x64, 0x43, 0xfe, 0xe8, + 0xdf, 0x99, 0x47, 0x03, 0x03, 0xfb, 0x3b, 0x7b, + 0x80, 0xe0, 0x30, 0xbe, 0xeb, 0xd3, 0x29, 0xbe, + 0xe3, 0xbc, 0xdb, 0x5b, 0x1e, 0xde, 0xfc, 0xfe, + 0x8b, 0xcd, 0xa1, 0xb6, 0xa1, 0x5c, 0x8c, 0x2b, + 0x08, 0x69, 0xff, 0xd2, 0xec, 0x5e, 0x26, 0xe5, + 0x53, 0xb7, 0xb2, 0x27, 0xfe, 0x87, 0xfd, 0xbd +}; +static const u8 enc_output054[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x06, 0x2d, 0xe6, 0x79, 0x5f, 0x27, 0x4f, 0xd2, + 0xa3, 0x05, 0xd7, 0x69, 0x80, 0xbc, 0x9c, 0xce +}; +static const u8 enc_assoc054[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 enc_nonce054[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key054[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input055[] __initconst = { + 0x25, 0x6d, 0x40, 0x88, 0x80, 0x94, 0x17, 0x83, + 0x55, 0xd3, 0x04, 0x84, 0x64, 0x43, 0xfe, 0xe8, + 0xdf, 0x99, 0x47, 0x03, 0x03, 0xfb, 0x3b, 0x7b, + 0x80, 0xe0, 0x30, 0xbe, 0xeb, 0xd3, 0x29, 0xbe, + 0xe3, 0xbc, 0xdb, 0x5b, 0x1e, 0xde, 0xfc, 0xfe, + 0x8b, 0xcd, 0xa1, 0xb6, 0xa1, 0x5c, 0x8c, 0x2b, + 0x08, 0x69, 0xff, 0xd2, 0xec, 0x5e, 0x26, 0xe5, + 0x53, 0xb7, 0xb2, 0x27, 0xfe, 0x87, 0xfd, 0xbd, + 0x7a, 0xda, 0x44, 0x42, 0x42, 0x69, 0xbf, 0xfa, + 0x55, 0x27, 0xf2, 0x70, 0xac, 0xf6, 0x85, 0x02, + 0xb7, 0x4c, 0x5a, 0xe2, 0xe6, 0x0c, 0x05, 0x80, + 0x98, 0x1a, 0x49, 0x38, 0x45, 0x93, 0x92, 0xc4, + 0x9b, 0xb2, 0xf2, 0x84, 0xb6, 0x46, 0xef, 0xc7, + 0xf3, 0xf0, 0xb1, 0x36, 0x1d, 0xc3, 0x48, 0xed, + 0x77, 0xd3, 0x0b, 0xc5, 0x76, 0x92, 0xed, 0x38, + 0xfb, 0xac, 0x01, 0x88, 0x38, 0x04, 0x88, 0xc7 +}; +static const u8 enc_output055[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xd8, 0xb4, 0x79, 0x02, 0xba, 0xae, 0xaf, 0xb3, + 0x42, 0x03, 0x05, 0x15, 0x29, 0xaf, 0x28, 0x2e +}; +static const u8 enc_assoc055[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 enc_nonce055[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key055[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input056[] __initconst = { + 0xda, 0x92, 0xbf, 0x77, 0x7f, 0x6b, 0xe8, 0x7c, + 0xaa, 0x2c, 0xfb, 0x7b, 0x9b, 0xbc, 0x01, 0x17, + 0x20, 0x66, 0xb8, 0xfc, 0xfc, 0x04, 0xc4, 0x84, + 0x7f, 0x1f, 0xcf, 0x41, 0x14, 0x2c, 0xd6, 0x41 +}; +static const u8 enc_output056[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xb3, 0x89, 0x1c, 0x84, 0x9c, 0xb5, 0x2c, 0x27, + 0x74, 0x7e, 0xdf, 0xcf, 0x31, 0x21, 0x3b, 0xb6 +}; +static const u8 enc_assoc056[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce056[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key056[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input057[] __initconst = { + 0xda, 0x92, 0xbf, 0x77, 0x7f, 0x6b, 0xe8, 0x7c, + 0xaa, 0x2c, 0xfb, 0x7b, 0x9b, 0xbc, 0x01, 0x17, + 0x20, 0x66, 0xb8, 0xfc, 0xfc, 0x04, 0xc4, 0x84, + 0x7f, 0x1f, 0xcf, 0x41, 0x14, 0x2c, 0xd6, 0x41, + 0x1c, 0x43, 0x24, 0xa4, 0xe1, 0x21, 0x03, 0x01, + 0x74, 0x32, 0x5e, 0x49, 0x5e, 0xa3, 0x73, 0xd4, + 0xf7, 0x96, 0x00, 0x2d, 0x13, 0xa1, 0xd9, 0x1a, + 0xac, 0x48, 0x4d, 0xd8, 0x01, 0x78, 0x02, 0x42 +}; +static const u8 enc_output057[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xf0, 0xc1, 0x2d, 0x26, 0xef, 0x03, 0x02, 0x9b, + 0x62, 0xc0, 0x08, 0xda, 0x27, 0xc5, 0xdc, 0x68 +}; +static const u8 enc_assoc057[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce057[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key057[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input058[] __initconst = { + 0xda, 0x92, 0xbf, 0x77, 0x7f, 0x6b, 0xe8, 0x7c, + 0xaa, 0x2c, 0xfb, 0x7b, 0x9b, 0xbc, 0x01, 0x17, + 0x20, 0x66, 0xb8, 0xfc, 0xfc, 0x04, 0xc4, 0x84, + 0x7f, 0x1f, 0xcf, 0x41, 0x14, 0x2c, 0xd6, 0x41, + 0x1c, 0x43, 0x24, 0xa4, 0xe1, 0x21, 0x03, 0x01, + 0x74, 0x32, 0x5e, 0x49, 0x5e, 0xa3, 0x73, 0xd4, + 0xf7, 0x96, 0x00, 0x2d, 0x13, 0xa1, 0xd9, 0x1a, + 0xac, 0x48, 0x4d, 0xd8, 0x01, 0x78, 0x02, 0x42, + 0x85, 0x25, 0xbb, 0xbd, 0xbd, 0x96, 0x40, 0x05, + 0xaa, 0xd8, 0x0d, 0x8f, 0x53, 0x09, 0x7a, 0xfd, + 0x48, 0xb3, 0xa5, 0x1d, 0x19, 0xf3, 0xfa, 0x7f, + 0x67, 0xe5, 0xb6, 0xc7, 0xba, 0x6c, 0x6d, 0x3b, + 0x64, 0x4d, 0x0d, 0x7b, 0x49, 0xb9, 0x10, 0x38, + 0x0c, 0x0f, 0x4e, 0xc9, 0xe2, 0x3c, 0xb7, 0x12, + 0x88, 0x2c, 0xf4, 0x3a, 0x89, 0x6d, 0x12, 0xc7, + 0x04, 0x53, 0xfe, 0x77, 0xc7, 0xfb, 0x77, 0x38 +}; +static const u8 enc_output058[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xee, 0x65, 0x78, 0x30, 0x01, 0xc2, 0x56, 0x91, + 0xfa, 0x28, 0xd0, 0xf5, 0xf1, 0xc1, 0xd7, 0x62 +}; +static const u8 enc_assoc058[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce058[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key058[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input059[] __initconst = { + 0x25, 0x6d, 0x40, 0x08, 0x80, 0x94, 0x17, 0x03, + 0x55, 0xd3, 0x04, 0x04, 0x64, 0x43, 0xfe, 0x68, + 0xdf, 0x99, 0x47, 0x83, 0x03, 0xfb, 0x3b, 0xfb, + 0x80, 0xe0, 0x30, 0x3e, 0xeb, 0xd3, 0x29, 0x3e +}; +static const u8 enc_output059[] __initconst = { + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x79, 0xba, 0x7a, 0x29, 0xf5, 0xa7, 0xbb, 0x75, + 0x79, 0x7a, 0xf8, 0x7a, 0x61, 0x01, 0x29, 0xa4 +}; +static const u8 enc_assoc059[] __initconst = { + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80 +}; +static const u8 enc_nonce059[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key059[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input060[] __initconst = { + 0x25, 0x6d, 0x40, 0x08, 0x80, 0x94, 0x17, 0x03, + 0x55, 0xd3, 0x04, 0x04, 0x64, 0x43, 0xfe, 0x68, + 0xdf, 0x99, 0x47, 0x83, 0x03, 0xfb, 0x3b, 0xfb, + 0x80, 0xe0, 0x30, 0x3e, 0xeb, 0xd3, 0x29, 0x3e, + 0xe3, 0xbc, 0xdb, 0xdb, 0x1e, 0xde, 0xfc, 0x7e, + 0x8b, 0xcd, 0xa1, 0x36, 0xa1, 0x5c, 0x8c, 0xab, + 0x08, 0x69, 0xff, 0x52, 0xec, 0x5e, 0x26, 0x65, + 0x53, 0xb7, 0xb2, 0xa7, 0xfe, 0x87, 0xfd, 0x3d +}; +static const u8 enc_output060[] __initconst = { + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x36, 0xb1, 0x74, 0x38, 0x19, 0xe1, 0xb9, 0xba, + 0x15, 0x51, 0xe8, 0xed, 0x92, 0x2a, 0x95, 0x9a +}; +static const u8 enc_assoc060[] __initconst = { + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80 +}; +static const u8 enc_nonce060[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key060[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input061[] __initconst = { + 0x25, 0x6d, 0x40, 0x08, 0x80, 0x94, 0x17, 0x03, + 0x55, 0xd3, 0x04, 0x04, 0x64, 0x43, 0xfe, 0x68, + 0xdf, 0x99, 0x47, 0x83, 0x03, 0xfb, 0x3b, 0xfb, + 0x80, 0xe0, 0x30, 0x3e, 0xeb, 0xd3, 0x29, 0x3e, + 0xe3, 0xbc, 0xdb, 0xdb, 0x1e, 0xde, 0xfc, 0x7e, + 0x8b, 0xcd, 0xa1, 0x36, 0xa1, 0x5c, 0x8c, 0xab, + 0x08, 0x69, 0xff, 0x52, 0xec, 0x5e, 0x26, 0x65, + 0x53, 0xb7, 0xb2, 0xa7, 0xfe, 0x87, 0xfd, 0x3d, + 0x7a, 0xda, 0x44, 0xc2, 0x42, 0x69, 0xbf, 0x7a, + 0x55, 0x27, 0xf2, 0xf0, 0xac, 0xf6, 0x85, 0x82, + 0xb7, 0x4c, 0x5a, 0x62, 0xe6, 0x0c, 0x05, 0x00, + 0x98, 0x1a, 0x49, 0xb8, 0x45, 0x93, 0x92, 0x44, + 0x9b, 0xb2, 0xf2, 0x04, 0xb6, 0x46, 0xef, 0x47, + 0xf3, 0xf0, 0xb1, 0xb6, 0x1d, 0xc3, 0x48, 0x6d, + 0x77, 0xd3, 0x0b, 0x45, 0x76, 0x92, 0xed, 0xb8, + 0xfb, 0xac, 0x01, 0x08, 0x38, 0x04, 0x88, 0x47 +}; +static const u8 enc_output061[] __initconst = { + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0xfe, 0xac, 0x49, 0x55, 0x55, 0x4e, 0x80, 0x6f, + 0x3a, 0x19, 0x02, 0xe2, 0x44, 0x32, 0xc0, 0x8a +}; +static const u8 enc_assoc061[] __initconst = { + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80 +}; +static const u8 enc_nonce061[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key061[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input062[] __initconst = { + 0xda, 0x92, 0xbf, 0xf7, 0x7f, 0x6b, 0xe8, 0xfc, + 0xaa, 0x2c, 0xfb, 0xfb, 0x9b, 0xbc, 0x01, 0x97, + 0x20, 0x66, 0xb8, 0x7c, 0xfc, 0x04, 0xc4, 0x04, + 0x7f, 0x1f, 0xcf, 0xc1, 0x14, 0x2c, 0xd6, 0xc1 +}; +static const u8 enc_output062[] __initconst = { + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0x20, 0xa3, 0x79, 0x8d, 0xf1, 0x29, 0x2c, 0x59, + 0x72, 0xbf, 0x97, 0x41, 0xae, 0xc3, 0x8a, 0x19 +}; +static const u8 enc_assoc062[] __initconst = { + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f +}; +static const u8 enc_nonce062[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key062[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input063[] __initconst = { + 0xda, 0x92, 0xbf, 0xf7, 0x7f, 0x6b, 0xe8, 0xfc, + 0xaa, 0x2c, 0xfb, 0xfb, 0x9b, 0xbc, 0x01, 0x97, + 0x20, 0x66, 0xb8, 0x7c, 0xfc, 0x04, 0xc4, 0x04, + 0x7f, 0x1f, 0xcf, 0xc1, 0x14, 0x2c, 0xd6, 0xc1, + 0x1c, 0x43, 0x24, 0x24, 0xe1, 0x21, 0x03, 0x81, + 0x74, 0x32, 0x5e, 0xc9, 0x5e, 0xa3, 0x73, 0x54, + 0xf7, 0x96, 0x00, 0xad, 0x13, 0xa1, 0xd9, 0x9a, + 0xac, 0x48, 0x4d, 0x58, 0x01, 0x78, 0x02, 0xc2 +}; +static const u8 enc_output063[] __initconst = { + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xc0, 0x3d, 0x9f, 0x67, 0x35, 0x4a, 0x97, 0xb2, + 0xf0, 0x74, 0xf7, 0x55, 0x15, 0x57, 0xe4, 0x9c +}; +static const u8 enc_assoc063[] __initconst = { + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f +}; +static const u8 enc_nonce063[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key063[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input064[] __initconst = { + 0xda, 0x92, 0xbf, 0xf7, 0x7f, 0x6b, 0xe8, 0xfc, + 0xaa, 0x2c, 0xfb, 0xfb, 0x9b, 0xbc, 0x01, 0x97, + 0x20, 0x66, 0xb8, 0x7c, 0xfc, 0x04, 0xc4, 0x04, + 0x7f, 0x1f, 0xcf, 0xc1, 0x14, 0x2c, 0xd6, 0xc1, + 0x1c, 0x43, 0x24, 0x24, 0xe1, 0x21, 0x03, 0x81, + 0x74, 0x32, 0x5e, 0xc9, 0x5e, 0xa3, 0x73, 0x54, + 0xf7, 0x96, 0x00, 0xad, 0x13, 0xa1, 0xd9, 0x9a, + 0xac, 0x48, 0x4d, 0x58, 0x01, 0x78, 0x02, 0xc2, + 0x85, 0x25, 0xbb, 0x3d, 0xbd, 0x96, 0x40, 0x85, + 0xaa, 0xd8, 0x0d, 0x0f, 0x53, 0x09, 0x7a, 0x7d, + 0x48, 0xb3, 0xa5, 0x9d, 0x19, 0xf3, 0xfa, 0xff, + 0x67, 0xe5, 0xb6, 0x47, 0xba, 0x6c, 0x6d, 0xbb, + 0x64, 0x4d, 0x0d, 0xfb, 0x49, 0xb9, 0x10, 0xb8, + 0x0c, 0x0f, 0x4e, 0x49, 0xe2, 0x3c, 0xb7, 0x92, + 0x88, 0x2c, 0xf4, 0xba, 0x89, 0x6d, 0x12, 0x47, + 0x04, 0x53, 0xfe, 0xf7, 0xc7, 0xfb, 0x77, 0xb8 +}; +static const u8 enc_output064[] __initconst = { + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xc8, 0x6d, 0xa8, 0xdd, 0x65, 0x22, 0x86, 0xd5, + 0x02, 0x13, 0xd3, 0x28, 0xd6, 0x3e, 0x40, 0x06 +}; +static const u8 enc_assoc064[] __initconst = { + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f +}; +static const u8 enc_nonce064[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key064[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input065[] __initconst = { + 0x5a, 0x92, 0xbf, 0x77, 0xff, 0x6b, 0xe8, 0x7c, + 0x2a, 0x2c, 0xfb, 0x7b, 0x1b, 0xbc, 0x01, 0x17, + 0xa0, 0x66, 0xb8, 0xfc, 0x7c, 0x04, 0xc4, 0x84, + 0xff, 0x1f, 0xcf, 0x41, 0x94, 0x2c, 0xd6, 0x41 +}; +static const u8 enc_output065[] __initconst = { + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0xbe, 0xde, 0x90, 0x83, 0xce, 0xb3, 0x6d, 0xdf, + 0xe5, 0xfa, 0x81, 0x1f, 0x95, 0x47, 0x1c, 0x67 +}; +static const u8 enc_assoc065[] __initconst = { + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce065[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key065[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input066[] __initconst = { + 0x5a, 0x92, 0xbf, 0x77, 0xff, 0x6b, 0xe8, 0x7c, + 0x2a, 0x2c, 0xfb, 0x7b, 0x1b, 0xbc, 0x01, 0x17, + 0xa0, 0x66, 0xb8, 0xfc, 0x7c, 0x04, 0xc4, 0x84, + 0xff, 0x1f, 0xcf, 0x41, 0x94, 0x2c, 0xd6, 0x41, + 0x9c, 0x43, 0x24, 0xa4, 0x61, 0x21, 0x03, 0x01, + 0xf4, 0x32, 0x5e, 0x49, 0xde, 0xa3, 0x73, 0xd4, + 0x77, 0x96, 0x00, 0x2d, 0x93, 0xa1, 0xd9, 0x1a, + 0x2c, 0x48, 0x4d, 0xd8, 0x81, 0x78, 0x02, 0x42 +}; +static const u8 enc_output066[] __initconst = { + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x30, 0x08, 0x74, 0xbb, 0x06, 0x92, 0xb6, 0x89, + 0xde, 0xad, 0x9a, 0xe1, 0x5b, 0x06, 0x73, 0x90 +}; +static const u8 enc_assoc066[] __initconst = { + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce066[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key066[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input067[] __initconst = { + 0x5a, 0x92, 0xbf, 0x77, 0xff, 0x6b, 0xe8, 0x7c, + 0x2a, 0x2c, 0xfb, 0x7b, 0x1b, 0xbc, 0x01, 0x17, + 0xa0, 0x66, 0xb8, 0xfc, 0x7c, 0x04, 0xc4, 0x84, + 0xff, 0x1f, 0xcf, 0x41, 0x94, 0x2c, 0xd6, 0x41, + 0x9c, 0x43, 0x24, 0xa4, 0x61, 0x21, 0x03, 0x01, + 0xf4, 0x32, 0x5e, 0x49, 0xde, 0xa3, 0x73, 0xd4, + 0x77, 0x96, 0x00, 0x2d, 0x93, 0xa1, 0xd9, 0x1a, + 0x2c, 0x48, 0x4d, 0xd8, 0x81, 0x78, 0x02, 0x42, + 0x05, 0x25, 0xbb, 0xbd, 0x3d, 0x96, 0x40, 0x05, + 0x2a, 0xd8, 0x0d, 0x8f, 0xd3, 0x09, 0x7a, 0xfd, + 0xc8, 0xb3, 0xa5, 0x1d, 0x99, 0xf3, 0xfa, 0x7f, + 0xe7, 0xe5, 0xb6, 0xc7, 0x3a, 0x6c, 0x6d, 0x3b, + 0xe4, 0x4d, 0x0d, 0x7b, 0xc9, 0xb9, 0x10, 0x38, + 0x8c, 0x0f, 0x4e, 0xc9, 0x62, 0x3c, 0xb7, 0x12, + 0x08, 0x2c, 0xf4, 0x3a, 0x09, 0x6d, 0x12, 0xc7, + 0x84, 0x53, 0xfe, 0x77, 0x47, 0xfb, 0x77, 0x38 +}; +static const u8 enc_output067[] __initconst = { + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x99, 0xca, 0xd8, 0x5f, 0x45, 0xca, 0x40, 0x94, + 0x2d, 0x0d, 0x4d, 0x5e, 0x95, 0x0a, 0xde, 0x22 +}; +static const u8 enc_assoc067[] __initconst = { + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, + 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce067[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key067[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input068[] __initconst = { + 0x25, 0x6d, 0x40, 0x88, 0x7f, 0x6b, 0xe8, 0x7c, + 0x55, 0xd3, 0x04, 0x84, 0x9b, 0xbc, 0x01, 0x17, + 0xdf, 0x99, 0x47, 0x03, 0xfc, 0x04, 0xc4, 0x84, + 0x80, 0xe0, 0x30, 0xbe, 0x14, 0x2c, 0xd6, 0x41 +}; +static const u8 enc_output068[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x8b, 0xbe, 0x14, 0x52, 0x72, 0xe7, 0xc2, 0xd9, + 0xa1, 0x89, 0x1a, 0x3a, 0xb0, 0x98, 0x3d, 0x9d +}; +static const u8 enc_assoc068[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce068[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key068[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input069[] __initconst = { + 0x25, 0x6d, 0x40, 0x88, 0x7f, 0x6b, 0xe8, 0x7c, + 0x55, 0xd3, 0x04, 0x84, 0x9b, 0xbc, 0x01, 0x17, + 0xdf, 0x99, 0x47, 0x03, 0xfc, 0x04, 0xc4, 0x84, + 0x80, 0xe0, 0x30, 0xbe, 0x14, 0x2c, 0xd6, 0x41, + 0xe3, 0xbc, 0xdb, 0x5b, 0xe1, 0x21, 0x03, 0x01, + 0x8b, 0xcd, 0xa1, 0xb6, 0x5e, 0xa3, 0x73, 0xd4, + 0x08, 0x69, 0xff, 0xd2, 0x13, 0xa1, 0xd9, 0x1a, + 0x53, 0xb7, 0xb2, 0x27, 0x01, 0x78, 0x02, 0x42 +}; +static const u8 enc_output069[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x3b, 0x41, 0x86, 0x19, 0x13, 0xa8, 0xf6, 0xde, + 0x7f, 0x61, 0xe2, 0x25, 0x63, 0x1b, 0xc3, 0x82 +}; +static const u8 enc_assoc069[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce069[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key069[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input070[] __initconst = { + 0x25, 0x6d, 0x40, 0x88, 0x7f, 0x6b, 0xe8, 0x7c, + 0x55, 0xd3, 0x04, 0x84, 0x9b, 0xbc, 0x01, 0x17, + 0xdf, 0x99, 0x47, 0x03, 0xfc, 0x04, 0xc4, 0x84, + 0x80, 0xe0, 0x30, 0xbe, 0x14, 0x2c, 0xd6, 0x41, + 0xe3, 0xbc, 0xdb, 0x5b, 0xe1, 0x21, 0x03, 0x01, + 0x8b, 0xcd, 0xa1, 0xb6, 0x5e, 0xa3, 0x73, 0xd4, + 0x08, 0x69, 0xff, 0xd2, 0x13, 0xa1, 0xd9, 0x1a, + 0x53, 0xb7, 0xb2, 0x27, 0x01, 0x78, 0x02, 0x42, + 0x7a, 0xda, 0x44, 0x42, 0xbd, 0x96, 0x40, 0x05, + 0x55, 0x27, 0xf2, 0x70, 0x53, 0x09, 0x7a, 0xfd, + 0xb7, 0x4c, 0x5a, 0xe2, 0x19, 0xf3, 0xfa, 0x7f, + 0x98, 0x1a, 0x49, 0x38, 0xba, 0x6c, 0x6d, 0x3b, + 0x9b, 0xb2, 0xf2, 0x84, 0x49, 0xb9, 0x10, 0x38, + 0xf3, 0xf0, 0xb1, 0x36, 0xe2, 0x3c, 0xb7, 0x12, + 0x77, 0xd3, 0x0b, 0xc5, 0x89, 0x6d, 0x12, 0xc7, + 0xfb, 0xac, 0x01, 0x88, 0xc7, 0xfb, 0x77, 0x38 +}; +static const u8 enc_output070[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x84, 0x28, 0xbc, 0xf0, 0x23, 0xec, 0x6b, 0xf3, + 0x1f, 0xd9, 0xef, 0xb2, 0x03, 0xff, 0x08, 0x71 +}; +static const u8 enc_assoc070[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce070[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key070[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input071[] __initconst = { + 0xda, 0x92, 0xbf, 0x77, 0x80, 0x94, 0x17, 0x83, + 0xaa, 0x2c, 0xfb, 0x7b, 0x64, 0x43, 0xfe, 0xe8, + 0x20, 0x66, 0xb8, 0xfc, 0x03, 0xfb, 0x3b, 0x7b, + 0x7f, 0x1f, 0xcf, 0x41, 0xeb, 0xd3, 0x29, 0xbe +}; +static const u8 enc_output071[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0x13, 0x9f, 0xdf, 0x64, 0x74, 0xea, 0x24, 0xf5, + 0x49, 0xb0, 0x75, 0x82, 0x5f, 0x2c, 0x76, 0x20 +}; +static const u8 enc_assoc071[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 enc_nonce071[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key071[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input072[] __initconst = { + 0xda, 0x92, 0xbf, 0x77, 0x80, 0x94, 0x17, 0x83, + 0xaa, 0x2c, 0xfb, 0x7b, 0x64, 0x43, 0xfe, 0xe8, + 0x20, 0x66, 0xb8, 0xfc, 0x03, 0xfb, 0x3b, 0x7b, + 0x7f, 0x1f, 0xcf, 0x41, 0xeb, 0xd3, 0x29, 0xbe, + 0x1c, 0x43, 0x24, 0xa4, 0x1e, 0xde, 0xfc, 0xfe, + 0x74, 0x32, 0x5e, 0x49, 0xa1, 0x5c, 0x8c, 0x2b, + 0xf7, 0x96, 0x00, 0x2d, 0xec, 0x5e, 0x26, 0xe5, + 0xac, 0x48, 0x4d, 0xd8, 0xfe, 0x87, 0xfd, 0xbd +}; +static const u8 enc_output072[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xbb, 0xad, 0x8d, 0x86, 0x3b, 0x83, 0x5a, 0x8e, + 0x86, 0x64, 0xfd, 0x1d, 0x45, 0x66, 0xb6, 0xb4 +}; +static const u8 enc_assoc072[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 enc_nonce072[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key072[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - misc */ +static const u8 enc_input073[] __initconst = { + 0xda, 0x92, 0xbf, 0x77, 0x80, 0x94, 0x17, 0x83, + 0xaa, 0x2c, 0xfb, 0x7b, 0x64, 0x43, 0xfe, 0xe8, + 0x20, 0x66, 0xb8, 0xfc, 0x03, 0xfb, 0x3b, 0x7b, + 0x7f, 0x1f, 0xcf, 0x41, 0xeb, 0xd3, 0x29, 0xbe, + 0x1c, 0x43, 0x24, 0xa4, 0x1e, 0xde, 0xfc, 0xfe, + 0x74, 0x32, 0x5e, 0x49, 0xa1, 0x5c, 0x8c, 0x2b, + 0xf7, 0x96, 0x00, 0x2d, 0xec, 0x5e, 0x26, 0xe5, + 0xac, 0x48, 0x4d, 0xd8, 0xfe, 0x87, 0xfd, 0xbd, + 0x85, 0x25, 0xbb, 0xbd, 0x42, 0x69, 0xbf, 0xfa, + 0xaa, 0xd8, 0x0d, 0x8f, 0xac, 0xf6, 0x85, 0x02, + 0x48, 0xb3, 0xa5, 0x1d, 0xe6, 0x0c, 0x05, 0x80, + 0x67, 0xe5, 0xb6, 0xc7, 0x45, 0x93, 0x92, 0xc4, + 0x64, 0x4d, 0x0d, 0x7b, 0xb6, 0x46, 0xef, 0xc7, + 0x0c, 0x0f, 0x4e, 0xc9, 0x1d, 0xc3, 0x48, 0xed, + 0x88, 0x2c, 0xf4, 0x3a, 0x76, 0x92, 0xed, 0x38, + 0x04, 0x53, 0xfe, 0x77, 0x38, 0x04, 0x88, 0xc7 +}; +static const u8 enc_output073[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0x42, 0xf2, 0x35, 0x42, 0x97, 0x84, 0x9a, 0x51, + 0x1d, 0x53, 0xe5, 0x57, 0x17, 0x72, 0xf7, 0x1f +}; +static const u8 enc_assoc073[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 enc_nonce073[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 +}; +static const u8 enc_key073[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input074[] __initconst = { + 0xd4, 0x50, 0x0b, 0xf0, 0x09, 0x49, 0x35, 0x51, + 0xc3, 0x80, 0xad, 0xf5, 0x2c, 0x57, 0x3a, 0x69, + 0xdf, 0x7e, 0x8b, 0x76, 0x24, 0x63, 0x33, 0x0f, + 0xac, 0xc1, 0x6a, 0x57, 0x26, 0xbe, 0x71, 0x90, + 0xc6, 0x3c, 0x5a, 0x1c, 0x92, 0x65, 0x84, 0xa0, + 0x96, 0x75, 0x68, 0x28, 0xdc, 0xdc, 0x64, 0xac, + 0xdf, 0x96, 0x3d, 0x93, 0x1b, 0xf1, 0xda, 0xe2, + 0x38, 0xf3, 0xf1, 0x57, 0x22, 0x4a, 0xc4, 0xb5, + 0x42, 0xd7, 0x85, 0xb0, 0xdd, 0x84, 0xdb, 0x6b, + 0xe3, 0xbc, 0x5a, 0x36, 0x63, 0xe8, 0x41, 0x49, + 0xff, 0xbe, 0xd0, 0x9e, 0x54, 0xf7, 0x8f, 0x16, + 0xa8, 0x22, 0x3b, 0x24, 0xcb, 0x01, 0x9f, 0x58, + 0xb2, 0x1b, 0x0e, 0x55, 0x1e, 0x7a, 0xa0, 0x73, + 0x27, 0x62, 0x95, 0x51, 0x37, 0x6c, 0xcb, 0xc3, + 0x93, 0x76, 0x71, 0xa0, 0x62, 0x9b, 0xd9, 0x5c, + 0x99, 0x15, 0xc7, 0x85, 0x55, 0x77, 0x1e, 0x7a +}; +static const u8 enc_output074[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x0b, 0x30, 0x0d, 0x8d, 0xa5, 0x6c, 0x21, 0x85, + 0x75, 0x52, 0x79, 0x55, 0x3c, 0x4c, 0x82, 0xca +}; +static const u8 enc_assoc074[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce074[] __initconst = { + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x00, 0x02, 0x50, 0x6e +}; +static const u8 enc_key074[] __initconst = { + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input075[] __initconst = { + 0x7d, 0xe8, 0x7f, 0x67, 0x29, 0x94, 0x52, 0x75, + 0xd0, 0x65, 0x5d, 0xa4, 0xc7, 0xfd, 0xe4, 0x56, + 0x9e, 0x16, 0xf1, 0x11, 0xb5, 0xeb, 0x26, 0xc2, + 0x2d, 0x85, 0x9e, 0x3f, 0xf8, 0x22, 0xec, 0xed, + 0x3a, 0x6d, 0xd9, 0xa6, 0x0f, 0x22, 0x95, 0x7f, + 0x7b, 0x7c, 0x85, 0x7e, 0x88, 0x22, 0xeb, 0x9f, + 0xe0, 0xb8, 0xd7, 0x02, 0x21, 0x41, 0xf2, 0xd0, + 0xb4, 0x8f, 0x4b, 0x56, 0x12, 0xd3, 0x22, 0xa8, + 0x8d, 0xd0, 0xfe, 0x0b, 0x4d, 0x91, 0x79, 0x32, + 0x4f, 0x7c, 0x6c, 0x9e, 0x99, 0x0e, 0xfb, 0xd8, + 0x0e, 0x5e, 0xd6, 0x77, 0x58, 0x26, 0x49, 0x8b, + 0x1e, 0xfe, 0x0f, 0x71, 0xa0, 0xf3, 0xec, 0x5b, + 0x29, 0xcb, 0x28, 0xc2, 0x54, 0x0a, 0x7d, 0xcd, + 0x51, 0xb7, 0xda, 0xae, 0xe0, 0xff, 0x4a, 0x7f, + 0x3a, 0xc1, 0xee, 0x54, 0xc2, 0x9e, 0xe4, 0xc1, + 0x70, 0xde, 0x40, 0x8f, 0x66, 0x69, 0x21, 0x94 +}; +static const u8 enc_output075[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xc5, 0x78, 0xe2, 0xaa, 0x44, 0xd3, 0x09, 0xb7, + 0xb6, 0xa5, 0x19, 0x3b, 0xdc, 0x61, 0x18, 0xf5 +}; +static const u8 enc_assoc075[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce075[] __initconst = { + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x00, 0x03, 0x18, 0xa5 +}; +static const u8 enc_key075[] __initconst = { + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input076[] __initconst = { + 0x1b, 0x99, 0x6f, 0x9a, 0x3c, 0xcc, 0x67, 0x85, + 0xde, 0x22, 0xff, 0x5b, 0x8a, 0xdd, 0x95, 0x02, + 0xce, 0x03, 0xa0, 0xfa, 0xf5, 0x99, 0x2a, 0x09, + 0x52, 0x2c, 0xdd, 0x12, 0x06, 0xd2, 0x20, 0xb8, + 0xf8, 0xbd, 0x07, 0xd1, 0xf1, 0xf5, 0xa1, 0xbd, + 0x9a, 0x71, 0xd1, 0x1c, 0x7f, 0x57, 0x9b, 0x85, + 0x58, 0x18, 0xc0, 0x8d, 0x4d, 0xe0, 0x36, 0x39, + 0x31, 0x83, 0xb7, 0xf5, 0x90, 0xb3, 0x35, 0xae, + 0xd8, 0xde, 0x5b, 0x57, 0xb1, 0x3c, 0x5f, 0xed, + 0xe2, 0x44, 0x1c, 0x3e, 0x18, 0x4a, 0xa9, 0xd4, + 0x6e, 0x61, 0x59, 0x85, 0x06, 0xb3, 0xe1, 0x1c, + 0x43, 0xc6, 0x2c, 0xbc, 0xac, 0xec, 0xed, 0x33, + 0x19, 0x08, 0x75, 0xb0, 0x12, 0x21, 0x8b, 0x19, + 0x30, 0xfb, 0x7c, 0x38, 0xec, 0x45, 0xac, 0x11, + 0xc3, 0x53, 0xd0, 0xcf, 0x93, 0x8d, 0xcc, 0xb9, + 0xef, 0xad, 0x8f, 0xed, 0xbe, 0x46, 0xda, 0xa5 +}; +static const u8 enc_output076[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x4b, 0x0b, 0xda, 0x8a, 0xd0, 0x43, 0x83, 0x0d, + 0x83, 0x19, 0xab, 0x82, 0xc5, 0x0c, 0x76, 0x63 +}; +static const u8 enc_assoc076[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce076[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xb4, 0xf0 +}; +static const u8 enc_key076[] __initconst = { + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input077[] __initconst = { + 0x86, 0xcb, 0xac, 0xae, 0x4d, 0x3f, 0x74, 0xae, + 0x01, 0x21, 0x3e, 0x05, 0x51, 0xcc, 0x15, 0x16, + 0x0e, 0xa1, 0xbe, 0x84, 0x08, 0xe3, 0xd5, 0xd7, + 0x4f, 0x01, 0x46, 0x49, 0x95, 0xa6, 0x9e, 0x61, + 0x76, 0xcb, 0x9e, 0x02, 0xb2, 0x24, 0x7e, 0xd2, + 0x99, 0x89, 0x2f, 0x91, 0x82, 0xa4, 0x5c, 0xaf, + 0x4c, 0x69, 0x40, 0x56, 0x11, 0x76, 0x6e, 0xdf, + 0xaf, 0xdc, 0x28, 0x55, 0x19, 0xea, 0x30, 0x48, + 0x0c, 0x44, 0xf0, 0x5e, 0x78, 0x1e, 0xac, 0xf8, + 0xfc, 0xec, 0xc7, 0x09, 0x0a, 0xbb, 0x28, 0xfa, + 0x5f, 0xd5, 0x85, 0xac, 0x8c, 0xda, 0x7e, 0x87, + 0x72, 0xe5, 0x94, 0xe4, 0xce, 0x6c, 0x88, 0x32, + 0x81, 0x93, 0x2e, 0x0f, 0x89, 0xf8, 0x77, 0xa1, + 0xf0, 0x4d, 0x9c, 0x32, 0xb0, 0x6c, 0xf9, 0x0b, + 0x0e, 0x76, 0x2b, 0x43, 0x0c, 0x4d, 0x51, 0x7c, + 0x97, 0x10, 0x70, 0x68, 0xf4, 0x98, 0xef, 0x7f +}; +static const u8 enc_output077[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x4b, 0xc9, 0x8f, 0x72, 0xc4, 0x94, 0xc2, 0xa4, + 0x3c, 0x2b, 0x15, 0xa1, 0x04, 0x3f, 0x1c, 0xfa +}; +static const u8 enc_assoc077[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce077[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0xfb, 0x66 +}; +static const u8 enc_key077[] __initconst = { + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input078[] __initconst = { + 0xfa, 0xb1, 0xcd, 0xdf, 0x4f, 0xe1, 0x98, 0xef, + 0x63, 0xad, 0xd8, 0x81, 0xd6, 0xea, 0xd6, 0xc5, + 0x76, 0x37, 0xbb, 0xe9, 0x20, 0x18, 0xca, 0x7c, + 0x0b, 0x96, 0xfb, 0xa0, 0x87, 0x1e, 0x93, 0x2d, + 0xb1, 0xfb, 0xf9, 0x07, 0x61, 0xbe, 0x25, 0xdf, + 0x8d, 0xfa, 0xf9, 0x31, 0xce, 0x57, 0x57, 0xe6, + 0x17, 0xb3, 0xd7, 0xa9, 0xf0, 0xbf, 0x0f, 0xfe, + 0x5d, 0x59, 0x1a, 0x33, 0xc1, 0x43, 0xb8, 0xf5, + 0x3f, 0xd0, 0xb5, 0xa1, 0x96, 0x09, 0xfd, 0x62, + 0xe5, 0xc2, 0x51, 0xa4, 0x28, 0x1a, 0x20, 0x0c, + 0xfd, 0xc3, 0x4f, 0x28, 0x17, 0x10, 0x40, 0x6f, + 0x4e, 0x37, 0x62, 0x54, 0x46, 0xff, 0x6e, 0xf2, + 0x24, 0x91, 0x3d, 0xeb, 0x0d, 0x89, 0xaf, 0x33, + 0x71, 0x28, 0xe3, 0xd1, 0x55, 0xd1, 0x6d, 0x3e, + 0xc3, 0x24, 0x60, 0x41, 0x43, 0x21, 0x43, 0xe9, + 0xab, 0x3a, 0x6d, 0x2c, 0xcc, 0x2f, 0x4d, 0x62 +}; +static const u8 enc_output078[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xf7, 0xe9, 0xe1, 0x51, 0xb0, 0x25, 0x33, 0xc7, + 0x46, 0x58, 0xbf, 0xc7, 0x73, 0x7c, 0x68, 0x0d +}; +static const u8 enc_assoc078[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce078[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0xbb, 0x90 +}; +static const u8 enc_key078[] __initconst = { + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input079[] __initconst = { + 0x22, 0x72, 0x02, 0xbe, 0x7f, 0x35, 0x15, 0xe9, + 0xd1, 0xc0, 0x2e, 0xea, 0x2f, 0x19, 0x50, 0xb6, + 0x48, 0x1b, 0x04, 0x8a, 0x4c, 0x91, 0x50, 0x6c, + 0xb4, 0x0d, 0x50, 0x4e, 0x6c, 0x94, 0x9f, 0x82, + 0xd1, 0x97, 0xc2, 0x5a, 0xd1, 0x7d, 0xc7, 0x21, + 0x65, 0x11, 0x25, 0x78, 0x2a, 0xc7, 0xa7, 0x12, + 0x47, 0xfe, 0xae, 0xf3, 0x2f, 0x1f, 0x25, 0x0c, + 0xe4, 0xbb, 0x8f, 0x79, 0xac, 0xaa, 0x17, 0x9d, + 0x45, 0xa7, 0xb0, 0x54, 0x5f, 0x09, 0x24, 0x32, + 0x5e, 0xfa, 0x87, 0xd5, 0xe4, 0x41, 0xd2, 0x84, + 0x78, 0xc6, 0x1f, 0x22, 0x23, 0xee, 0x67, 0xc3, + 0xb4, 0x1f, 0x43, 0x94, 0x53, 0x5e, 0x2a, 0x24, + 0x36, 0x9a, 0x2e, 0x16, 0x61, 0x3c, 0x45, 0x94, + 0x90, 0xc1, 0x4f, 0xb1, 0xd7, 0x55, 0xfe, 0x53, + 0xfb, 0xe1, 0xee, 0x45, 0xb1, 0xb2, 0x1f, 0x71, + 0x62, 0xe2, 0xfc, 0xaa, 0x74, 0x2a, 0xbe, 0xfd +}; +static const u8 enc_output079[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x79, 0x5b, 0xcf, 0xf6, 0x47, 0xc5, 0x53, 0xc2, + 0xe4, 0xeb, 0x6e, 0x0e, 0xaf, 0xd9, 0xe0, 0x4e +}; +static const u8 enc_assoc079[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce079[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x48, 0x4a +}; +static const u8 enc_key079[] __initconst = { + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input080[] __initconst = { + 0xfa, 0xe5, 0x83, 0x45, 0xc1, 0x6c, 0xb0, 0xf5, + 0xcc, 0x53, 0x7f, 0x2b, 0x1b, 0x34, 0x69, 0xc9, + 0x69, 0x46, 0x3b, 0x3e, 0xa7, 0x1b, 0xcf, 0x6b, + 0x98, 0xd6, 0x69, 0xa8, 0xe6, 0x0e, 0x04, 0xfc, + 0x08, 0xd5, 0xfd, 0x06, 0x9c, 0x36, 0x26, 0x38, + 0xe3, 0x40, 0x0e, 0xf4, 0xcb, 0x24, 0x2e, 0x27, + 0xe2, 0x24, 0x5e, 0x68, 0xcb, 0x9e, 0xc5, 0x83, + 0xda, 0x53, 0x40, 0xb1, 0x2e, 0xdf, 0x42, 0x3b, + 0x73, 0x26, 0xad, 0x20, 0xfe, 0xeb, 0x57, 0xda, + 0xca, 0x2e, 0x04, 0x67, 0xa3, 0x28, 0x99, 0xb4, + 0x2d, 0xf8, 0xe5, 0x6d, 0x84, 0xe0, 0x06, 0xbc, + 0x8a, 0x7a, 0xcc, 0x73, 0x1e, 0x7c, 0x1f, 0x6b, + 0xec, 0xb5, 0x71, 0x9f, 0x70, 0x77, 0xf0, 0xd4, + 0xf4, 0xc6, 0x1a, 0xb1, 0x1e, 0xba, 0xc1, 0x00, + 0x18, 0x01, 0xce, 0x33, 0xc4, 0xe4, 0xa7, 0x7d, + 0x83, 0x1d, 0x3c, 0xe3, 0x4e, 0x84, 0x10, 0xe1 +}; +static const u8 enc_output080[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x19, 0x46, 0xd6, 0x53, 0x96, 0x0f, 0x94, 0x7a, + 0x74, 0xd3, 0xe8, 0x09, 0x3c, 0xf4, 0x85, 0x02 +}; +static const u8 enc_assoc080[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce080[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0x2f, 0x40 +}; +static const u8 enc_key080[] __initconst = { + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input081[] __initconst = { + 0xeb, 0xb2, 0x16, 0xdd, 0xd7, 0xca, 0x70, 0x92, + 0x15, 0xf5, 0x03, 0xdf, 0x9c, 0xe6, 0x3c, 0x5c, + 0xd2, 0x19, 0x4e, 0x7d, 0x90, 0x99, 0xe8, 0xa9, + 0x0b, 0x2a, 0xfa, 0xad, 0x5e, 0xba, 0x35, 0x06, + 0x99, 0x25, 0xa6, 0x03, 0xfd, 0xbc, 0x34, 0x1a, + 0xae, 0xd4, 0x15, 0x05, 0xb1, 0x09, 0x41, 0xfa, + 0x38, 0x56, 0xa7, 0xe2, 0x47, 0xb1, 0x04, 0x07, + 0x09, 0x74, 0x6c, 0xfc, 0x20, 0x96, 0xca, 0xa6, + 0x31, 0xb2, 0xff, 0xf4, 0x1c, 0x25, 0x05, 0x06, + 0xd8, 0x89, 0xc1, 0xc9, 0x06, 0x71, 0xad, 0xe8, + 0x53, 0xee, 0x63, 0x94, 0xc1, 0x91, 0x92, 0xa5, + 0xcf, 0x37, 0x10, 0xd1, 0x07, 0x30, 0x99, 0xe5, + 0xbc, 0x94, 0x65, 0x82, 0xfc, 0x0f, 0xab, 0x9f, + 0x54, 0x3c, 0x71, 0x6a, 0xe2, 0x48, 0x6a, 0x86, + 0x83, 0xfd, 0xca, 0x39, 0xd2, 0xe1, 0x4f, 0x23, + 0xd0, 0x0a, 0x58, 0x26, 0x64, 0xf4, 0xec, 0xb1 +}; +static const u8 enc_output081[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x36, 0xc3, 0x00, 0x29, 0x85, 0xdd, 0x21, 0xba, + 0xf8, 0x95, 0xd6, 0x33, 0x57, 0x3f, 0x12, 0xc0 +}; +static const u8 enc_assoc081[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce081[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0xe2, 0x93, 0x35 +}; +static const u8 enc_key081[] __initconst = { + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input082[] __initconst = { + 0x40, 0x8a, 0xe6, 0xef, 0x1c, 0x7e, 0xf0, 0xfb, + 0x2c, 0x2d, 0x61, 0x08, 0x16, 0xfc, 0x78, 0x49, + 0xef, 0xa5, 0x8f, 0x78, 0x27, 0x3f, 0x5f, 0x16, + 0x6e, 0xa6, 0x5f, 0x81, 0xb5, 0x75, 0x74, 0x7d, + 0x03, 0x5b, 0x30, 0x40, 0xfe, 0xde, 0x1e, 0xb9, + 0x45, 0x97, 0x88, 0x66, 0x97, 0x88, 0x40, 0x8e, + 0x00, 0x41, 0x3b, 0x3e, 0x37, 0x6d, 0x15, 0x2d, + 0x20, 0x4a, 0xa2, 0xb7, 0xa8, 0x35, 0x58, 0xfc, + 0xd4, 0x8a, 0x0e, 0xf7, 0xa2, 0x6b, 0x1c, 0xd6, + 0xd3, 0x5d, 0x23, 0xb3, 0xf5, 0xdf, 0xe0, 0xca, + 0x77, 0xa4, 0xce, 0x32, 0xb9, 0x4a, 0xbf, 0x83, + 0xda, 0x2a, 0xef, 0xca, 0xf0, 0x68, 0x38, 0x08, + 0x79, 0xe8, 0x9f, 0xb0, 0xa3, 0x82, 0x95, 0x95, + 0xcf, 0x44, 0xc3, 0x85, 0x2a, 0xe2, 0xcc, 0x66, + 0x2b, 0x68, 0x9f, 0x93, 0x55, 0xd9, 0xc1, 0x83, + 0x80, 0x1f, 0x6a, 0xcc, 0x31, 0x3f, 0x89, 0x07 +}; +static const u8 enc_output082[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x65, 0x14, 0x51, 0x8e, 0x0a, 0x26, 0x41, 0x42, + 0xe0, 0xb7, 0x35, 0x1f, 0x96, 0x7f, 0xc2, 0xae +}; +static const u8 enc_assoc082[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce082[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0xf7, 0xd5 +}; +static const u8 enc_key082[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input083[] __initconst = { + 0x0a, 0x0a, 0x24, 0x49, 0x9b, 0xca, 0xde, 0x58, + 0xcf, 0x15, 0x76, 0xc3, 0x12, 0xac, 0xa9, 0x84, + 0x71, 0x8c, 0xb4, 0xcc, 0x7e, 0x01, 0x53, 0xf5, + 0xa9, 0x01, 0x58, 0x10, 0x85, 0x96, 0x44, 0xdf, + 0xc0, 0x21, 0x17, 0x4e, 0x0b, 0x06, 0x0a, 0x39, + 0x74, 0x48, 0xde, 0x8b, 0x48, 0x4a, 0x86, 0x03, + 0xbe, 0x68, 0x0a, 0x69, 0x34, 0xc0, 0x90, 0x6f, + 0x30, 0xdd, 0x17, 0xea, 0xe2, 0xd4, 0xc5, 0xfa, + 0xa7, 0x77, 0xf8, 0xca, 0x53, 0x37, 0x0e, 0x08, + 0x33, 0x1b, 0x88, 0xc3, 0x42, 0xba, 0xc9, 0x59, + 0x78, 0x7b, 0xbb, 0x33, 0x93, 0x0e, 0x3b, 0x56, + 0xbe, 0x86, 0xda, 0x7f, 0x2a, 0x6e, 0xb1, 0xf9, + 0x40, 0x89, 0xd1, 0xd1, 0x81, 0x07, 0x4d, 0x43, + 0x02, 0xf8, 0xe0, 0x55, 0x2d, 0x0d, 0xe1, 0xfa, + 0xb3, 0x06, 0xa2, 0x1b, 0x42, 0xd4, 0xc3, 0xba, + 0x6e, 0x6f, 0x0c, 0xbc, 0xc8, 0x1e, 0x87, 0x7a +}; +static const u8 enc_output083[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x4c, 0x19, 0x4d, 0xa6, 0xa9, 0x9f, 0xd6, 0x5b, + 0x40, 0xe9, 0xca, 0xd7, 0x98, 0xf4, 0x4b, 0x19 +}; +static const u8 enc_assoc083[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce083[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x3d, 0xfc, 0xe4 +}; +static const u8 enc_key083[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input084[] __initconst = { + 0x4a, 0x0a, 0xaf, 0xf8, 0x49, 0x47, 0x29, 0x18, + 0x86, 0x91, 0x70, 0x13, 0x40, 0xf3, 0xce, 0x2b, + 0x8a, 0x78, 0xee, 0xd3, 0xa0, 0xf0, 0x65, 0x99, + 0x4b, 0x72, 0x48, 0x4e, 0x79, 0x91, 0xd2, 0x5c, + 0x29, 0xaa, 0x07, 0x5e, 0xb1, 0xfc, 0x16, 0xde, + 0x93, 0xfe, 0x06, 0x90, 0x58, 0x11, 0x2a, 0xb2, + 0x84, 0xa3, 0xed, 0x18, 0x78, 0x03, 0x26, 0xd1, + 0x25, 0x8a, 0x47, 0x22, 0x2f, 0xa6, 0x33, 0xd8, + 0xb2, 0x9f, 0x3b, 0xd9, 0x15, 0x0b, 0x23, 0x9b, + 0x15, 0x46, 0xc2, 0xbb, 0x9b, 0x9f, 0x41, 0x0f, + 0xeb, 0xea, 0xd3, 0x96, 0x00, 0x0e, 0xe4, 0x77, + 0x70, 0x15, 0x32, 0xc3, 0xd0, 0xf5, 0xfb, 0xf8, + 0x95, 0xd2, 0x80, 0x19, 0x6d, 0x2f, 0x73, 0x7c, + 0x5e, 0x9f, 0xec, 0x50, 0xd9, 0x2b, 0xb0, 0xdf, + 0x5d, 0x7e, 0x51, 0x3b, 0xe5, 0xb8, 0xea, 0x97, + 0x13, 0x10, 0xd5, 0xbf, 0x16, 0xba, 0x7a, 0xee +}; +static const u8 enc_output084[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xc8, 0xae, 0x77, 0x88, 0xcd, 0x28, 0x74, 0xab, + 0xc1, 0x38, 0x54, 0x1e, 0x11, 0xfd, 0x05, 0x87 +}; +static const u8 enc_assoc084[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce084[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0x84, 0x86, 0xa8 +}; +static const u8 enc_key084[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - checking for int overflows */ +static const u8 enc_input085[] __initconst = { + 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0x78, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, + 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, + 0x9f, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0x9c, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, + 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, + 0x47, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0xd4, 0xd2, 0x06, 0x61, 0x6f, 0x92, 0x93, 0xf6, + 0x5b, 0x45, 0xdb, 0xbc, 0x74, 0xe7, 0xc2, 0xed, + 0xfb, 0xcb, 0xbf, 0x1c, 0xfb, 0x67, 0x9b, 0xb7, + 0x39, 0xa5, 0x86, 0x2d, 0xe2, 0xbc, 0xb9, 0x37, + 0xf7, 0x4d, 0x5b, 0xf8, 0x67, 0x1c, 0x5a, 0x8a, + 0x50, 0x92, 0xf6, 0x1d, 0x54, 0xc9, 0xaa, 0x5b +}; +static const u8 enc_output085[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x93, 0x3a, 0x51, 0x63, 0xc7, 0xf6, 0x23, 0x68, + 0x32, 0x7b, 0x3f, 0xbc, 0x10, 0x36, 0xc9, 0x43 +}; +static const u8 enc_assoc085[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce085[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key085[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - special case tag */ +static const u8 enc_input086[] __initconst = { + 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6, + 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd, + 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b, + 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2, + 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19, + 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4, + 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63, + 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d +}; +static const u8 enc_output086[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f +}; +static const u8 enc_assoc086[] __initconst = { + 0x85, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xa6, 0x90, 0x2f, 0xcb, 0xc8, 0x83, 0xbb, 0xc1, + 0x80, 0xb2, 0x56, 0xae, 0x34, 0xad, 0x7f, 0x00 +}; +static const u8 enc_nonce086[] __initconst = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b +}; +static const u8 enc_key086[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - special case tag */ +static const u8 enc_input087[] __initconst = { + 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6, + 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd, + 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b, + 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2, + 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19, + 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4, + 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63, + 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d +}; +static const u8 enc_output087[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 enc_assoc087[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x24, 0x7e, 0x50, 0x64, 0x2a, 0x1c, 0x0a, 0x2f, + 0x8f, 0x77, 0x21, 0x96, 0x09, 0xdb, 0xa9, 0x58 +}; +static const u8 enc_nonce087[] __initconst = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b +}; +static const u8 enc_key087[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - special case tag */ +static const u8 enc_input088[] __initconst = { + 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6, + 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd, + 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b, + 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2, + 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19, + 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4, + 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63, + 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d +}; +static const u8 enc_output088[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_assoc088[] __initconst = { + 0x7c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xd9, 0xe7, 0x2c, 0x06, 0x4a, 0xc8, 0x96, 0x1f, + 0x3f, 0xa5, 0x85, 0xe0, 0xe2, 0xab, 0xd6, 0x00 +}; +static const u8 enc_nonce088[] __initconst = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b +}; +static const u8 enc_key088[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - special case tag */ +static const u8 enc_input089[] __initconst = { + 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6, + 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd, + 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b, + 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2, + 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19, + 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4, + 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63, + 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d +}; +static const u8 enc_output089[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, + 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80 +}; +static const u8 enc_assoc089[] __initconst = { + 0x65, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x95, 0xaf, 0x0f, 0x4d, 0x0b, 0x68, 0x6e, 0xae, + 0xcc, 0xca, 0x43, 0x07, 0xd5, 0x96, 0xf5, 0x02 +}; +static const u8 enc_nonce089[] __initconst = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b +}; +static const u8 enc_key089[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - special case tag */ +static const u8 enc_input090[] __initconst = { + 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6, + 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd, + 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b, + 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2, + 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19, + 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4, + 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63, + 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d +}; +static const u8 enc_output090[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f +}; +static const u8 enc_assoc090[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x85, 0x40, 0xb4, 0x64, 0x35, 0x77, 0x07, 0xbe, + 0x3a, 0x39, 0xd5, 0x5c, 0x34, 0xf8, 0xbc, 0xb3 +}; +static const u8 enc_nonce090[] __initconst = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b +}; +static const u8 enc_key090[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - special case tag */ +static const u8 enc_input091[] __initconst = { + 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6, + 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd, + 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b, + 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2, + 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19, + 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4, + 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63, + 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d +}; +static const u8 enc_output091[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00 +}; +static const u8 enc_assoc091[] __initconst = { + 0x4f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x66, 0x23, 0xd9, 0x90, 0xb8, 0x98, 0xd8, 0x30, + 0xd2, 0x12, 0xaf, 0x23, 0x83, 0x33, 0x07, 0x01 +}; +static const u8 enc_nonce091[] __initconst = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b +}; +static const u8 enc_key091[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - special case tag */ +static const u8 enc_input092[] __initconst = { + 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6, + 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd, + 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b, + 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2, + 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19, + 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4, + 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63, + 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d +}; +static const u8 enc_output092[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 enc_assoc092[] __initconst = { + 0x83, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x5f, 0x16, 0xd0, 0x9f, 0x17, 0x78, 0x72, 0x11, + 0xb7, 0xd4, 0x84, 0xe0, 0x24, 0xf8, 0x97, 0x01 +}; +static const u8 enc_nonce092[] __initconst = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b +}; +static const u8 enc_key092[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input093[] __initconst = { + 0x00, 0x52, 0x35, 0xd2, 0xa9, 0x19, 0xf2, 0x8d, + 0x3d, 0xb7, 0x66, 0x4a, 0x34, 0xae, 0x6b, 0x44, + 0x4d, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, + 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, + 0x5b, 0x8b, 0x94, 0x50, 0x9e, 0x2b, 0x74, 0xa3, + 0x6d, 0x34, 0x6e, 0x33, 0xd5, 0x72, 0x65, 0x9b, + 0xa9, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, + 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, + 0x83, 0xdc, 0xe9, 0xf3, 0x07, 0x3e, 0xfa, 0xdb, + 0x7d, 0x23, 0xb8, 0x7a, 0xce, 0x35, 0x16, 0x8c +}; +static const u8 enc_output093[] __initconst = { + 0x00, 0x39, 0xe2, 0xfd, 0x2f, 0xd3, 0x12, 0x14, + 0x9e, 0x98, 0x98, 0x80, 0x88, 0x48, 0x13, 0xe7, + 0xca, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x3b, 0x0e, 0x86, 0x9a, 0xaa, 0x8e, 0xa4, 0x96, + 0x32, 0xff, 0xff, 0x37, 0xb9, 0xe8, 0xce, 0x00, + 0xca, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x3b, 0x0e, 0x86, 0x9a, 0xaa, 0x8e, 0xa4, 0x96, + 0x32, 0xff, 0xff, 0x37, 0xb9, 0xe8, 0xce, 0x00, + 0xa5, 0x19, 0xac, 0x1a, 0x35, 0xb4, 0xa5, 0x77, + 0x87, 0x51, 0x0a, 0xf7, 0x8d, 0x8d, 0x20, 0x0a +}; +static const u8 enc_assoc093[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce093[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key093[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input094[] __initconst = { + 0xd3, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0xe5, 0xda, 0x78, 0x76, 0x6f, 0xa1, 0x92, 0x90, + 0xc0, 0x31, 0xf7, 0x52, 0x08, 0x50, 0x67, 0x45, + 0xae, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0x49, 0x6d, 0xde, 0xb0, 0x55, 0x09, 0xc6, 0xef, + 0xff, 0xab, 0x75, 0xeb, 0x2d, 0xf4, 0xab, 0x09, + 0x76, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0x01, 0x49, 0xef, 0x50, 0x4b, 0x71, 0xb1, 0x20, + 0xca, 0x4f, 0xf3, 0x95, 0x19, 0xc2, 0xc2, 0x10 +}; +static const u8 enc_output094[] __initconst = { + 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x62, 0x18, 0xb2, 0x7f, 0x83, 0xb8, 0xb4, 0x66, + 0x02, 0xf6, 0xe1, 0xd8, 0x34, 0x20, 0x7b, 0x02, + 0xce, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x2a, 0x64, 0x16, 0xce, 0xdb, 0x1c, 0xdd, 0x29, + 0x6e, 0xf5, 0xd7, 0xd6, 0x92, 0xda, 0xff, 0x02, + 0xce, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x2a, 0x64, 0x16, 0xce, 0xdb, 0x1c, 0xdd, 0x29, + 0x6e, 0xf5, 0xd7, 0xd6, 0x92, 0xda, 0xff, 0x02, + 0x30, 0x2f, 0xe8, 0x2a, 0xb0, 0xa0, 0x9a, 0xf6, + 0x44, 0x00, 0xd0, 0x15, 0xae, 0x83, 0xd9, 0xcc +}; +static const u8 enc_assoc094[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce094[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key094[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input095[] __initconst = { + 0xe9, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0x6d, 0xf1, 0x39, 0x4e, 0xdc, 0x53, 0x9b, 0x5b, + 0x3a, 0x09, 0x57, 0xbe, 0x0f, 0xb8, 0x59, 0x46, + 0x80, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0xd1, 0x76, 0x9f, 0xe8, 0x06, 0xbb, 0xfe, 0xb6, + 0xf5, 0x90, 0x95, 0x0f, 0x2e, 0xac, 0x9e, 0x0a, + 0x58, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0x99, 0x52, 0xae, 0x08, 0x18, 0xc3, 0x89, 0x79, + 0xc0, 0x74, 0x13, 0x71, 0x1a, 0x9a, 0xf7, 0x13 +}; +static const u8 enc_output095[] __initconst = { + 0xe9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xea, 0x33, 0xf3, 0x47, 0x30, 0x4a, 0xbd, 0xad, + 0xf8, 0xce, 0x41, 0x34, 0x33, 0xc8, 0x45, 0x01, + 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xb2, 0x7f, 0x57, 0x96, 0x88, 0xae, 0xe5, 0x70, + 0x64, 0xce, 0x37, 0x32, 0x91, 0x82, 0xca, 0x01, + 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xb2, 0x7f, 0x57, 0x96, 0x88, 0xae, 0xe5, 0x70, + 0x64, 0xce, 0x37, 0x32, 0x91, 0x82, 0xca, 0x01, + 0x98, 0xa7, 0xe8, 0x36, 0xe0, 0xee, 0x4d, 0x02, + 0x35, 0x00, 0xd0, 0x55, 0x7e, 0xc2, 0xcb, 0xe0 +}; +static const u8 enc_assoc095[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce095[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key095[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input096[] __initconst = { + 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0x64, 0xf9, 0x0f, 0x5b, 0x26, 0x92, 0xb8, 0x60, + 0xd4, 0x59, 0x6f, 0xf4, 0xb3, 0x40, 0x2c, 0x5c, + 0x00, 0xb9, 0xbb, 0x53, 0x70, 0x7a, 0xa6, 0x67, + 0xd3, 0x56, 0xfe, 0x50, 0xc7, 0x19, 0x96, 0x94, + 0x03, 0x35, 0x61, 0xe7, 0xca, 0xca, 0x6d, 0x94, + 0x1d, 0xc3, 0xcd, 0x69, 0x14, 0xad, 0x69, 0x04 +}; +static const u8 enc_output096[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xe3, 0x3b, 0xc5, 0x52, 0xca, 0x8b, 0x9e, 0x96, + 0x16, 0x9e, 0x79, 0x7e, 0x8f, 0x30, 0x30, 0x1b, + 0x60, 0x3c, 0xa9, 0x99, 0x44, 0xdf, 0x76, 0x52, + 0x8c, 0x9d, 0x6f, 0x54, 0xab, 0x83, 0x3d, 0x0f, + 0x60, 0x3c, 0xa9, 0x99, 0x44, 0xdf, 0x76, 0x52, + 0x8c, 0x9d, 0x6f, 0x54, 0xab, 0x83, 0x3d, 0x0f, + 0x6a, 0xb8, 0xdc, 0xe2, 0xc5, 0x9d, 0xa4, 0x73, + 0x71, 0x30, 0xb0, 0x25, 0x2f, 0x68, 0xa8, 0xd8 +}; +static const u8 enc_assoc096[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce096[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key096[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input097[] __initconst = { + 0x68, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0xb0, 0x8f, 0x25, 0x67, 0x5b, 0x9b, 0xcb, 0xf6, + 0xe3, 0x84, 0x07, 0xde, 0x2e, 0xc7, 0x5a, 0x47, + 0x9f, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0x2d, 0x2a, 0xf7, 0xcd, 0x6b, 0x08, 0x05, 0x01, + 0xd3, 0x1b, 0xa5, 0x4f, 0xb2, 0xeb, 0x75, 0x96, + 0x47, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0x65, 0x0e, 0xc6, 0x2d, 0x75, 0x70, 0x72, 0xce, + 0xe6, 0xff, 0x23, 0x31, 0x86, 0xdd, 0x1c, 0x8f +}; +static const u8 enc_output097[] __initconst = { + 0x68, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x37, 0x4d, 0xef, 0x6e, 0xb7, 0x82, 0xed, 0x00, + 0x21, 0x43, 0x11, 0x54, 0x12, 0xb7, 0x46, 0x00, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x4e, 0x23, 0x3f, 0xb3, 0xe5, 0x1d, 0x1e, 0xc7, + 0x42, 0x45, 0x07, 0x72, 0x0d, 0xc5, 0x21, 0x9d, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x4e, 0x23, 0x3f, 0xb3, 0xe5, 0x1d, 0x1e, 0xc7, + 0x42, 0x45, 0x07, 0x72, 0x0d, 0xc5, 0x21, 0x9d, + 0x04, 0x4d, 0xea, 0x60, 0x88, 0x80, 0x41, 0x2b, + 0xfd, 0xff, 0xcf, 0x35, 0x57, 0x9e, 0x9b, 0x26 +}; +static const u8 enc_assoc097[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce097[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key097[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input098[] __initconst = { + 0x6d, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0xa1, 0x61, 0xb5, 0xab, 0x04, 0x09, 0x00, 0x62, + 0x9e, 0xfe, 0xff, 0x78, 0xd7, 0xd8, 0x6b, 0x45, + 0x9f, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0xc6, 0xf8, 0x07, 0x8c, 0xc8, 0xef, 0x12, 0xa0, + 0xff, 0x65, 0x7d, 0x6d, 0x08, 0xdb, 0x10, 0xb8, + 0x47, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0x8e, 0xdc, 0x36, 0x6c, 0xd6, 0x97, 0x65, 0x6f, + 0xca, 0x81, 0xfb, 0x13, 0x3c, 0xed, 0x79, 0xa1 +}; +static const u8 enc_output098[] __initconst = { + 0x6d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x26, 0xa3, 0x7f, 0xa2, 0xe8, 0x10, 0x26, 0x94, + 0x5c, 0x39, 0xe9, 0xf2, 0xeb, 0xa8, 0x77, 0x02, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xa5, 0xf1, 0xcf, 0xf2, 0x46, 0xfa, 0x09, 0x66, + 0x6e, 0x3b, 0xdf, 0x50, 0xb7, 0xf5, 0x44, 0xb3, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xa5, 0xf1, 0xcf, 0xf2, 0x46, 0xfa, 0x09, 0x66, + 0x6e, 0x3b, 0xdf, 0x50, 0xb7, 0xf5, 0x44, 0xb3, + 0x1e, 0x6b, 0xea, 0x63, 0x14, 0x54, 0x2e, 0x2e, + 0xf9, 0xff, 0xcf, 0x45, 0x0b, 0x2e, 0x98, 0x2b +}; +static const u8 enc_assoc098[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce098[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key098[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input099[] __initconst = { + 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0xfc, 0x01, 0xb8, 0x91, 0xe5, 0xf0, 0xf9, 0x12, + 0x8d, 0x7d, 0x1c, 0x57, 0x91, 0x92, 0xb6, 0x98, + 0x63, 0x41, 0x44, 0x15, 0xb6, 0x99, 0x68, 0x95, + 0x9a, 0x72, 0x91, 0xb7, 0xa5, 0xaf, 0x13, 0x48, + 0x60, 0xcd, 0x9e, 0xa1, 0x0c, 0x29, 0xa3, 0x66, + 0x54, 0xe7, 0xa2, 0x8e, 0x76, 0x1b, 0xec, 0xd8 +}; +static const u8 enc_output099[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x7b, 0xc3, 0x72, 0x98, 0x09, 0xe9, 0xdf, 0xe4, + 0x4f, 0xba, 0x0a, 0xdd, 0xad, 0xe2, 0xaa, 0xdf, + 0x03, 0xc4, 0x56, 0xdf, 0x82, 0x3c, 0xb8, 0xa0, + 0xc5, 0xb9, 0x00, 0xb3, 0xc9, 0x35, 0xb8, 0xd3, + 0x03, 0xc4, 0x56, 0xdf, 0x82, 0x3c, 0xb8, 0xa0, + 0xc5, 0xb9, 0x00, 0xb3, 0xc9, 0x35, 0xb8, 0xd3, + 0xed, 0x20, 0x17, 0xc8, 0xdb, 0xa4, 0x77, 0x56, + 0x29, 0x04, 0x9d, 0x78, 0x6e, 0x3b, 0xce, 0xb1 +}; +static const u8 enc_assoc099[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce099[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key099[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input100[] __initconst = { + 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0x6b, 0x6d, 0xc9, 0xd2, 0x1a, 0x81, 0x9e, 0x70, + 0xb5, 0x77, 0xf4, 0x41, 0x37, 0xd3, 0xd6, 0xbd, + 0x13, 0x35, 0xf5, 0xeb, 0x44, 0x49, 0x40, 0x77, + 0xb2, 0x64, 0x49, 0xa5, 0x4b, 0x6c, 0x7c, 0x75, + 0x10, 0xb9, 0x2f, 0x5f, 0xfe, 0xf9, 0x8b, 0x84, + 0x7c, 0xf1, 0x7a, 0x9c, 0x98, 0xd8, 0x83, 0xe5 +}; +static const u8 enc_output100[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xec, 0xaf, 0x03, 0xdb, 0xf6, 0x98, 0xb8, 0x86, + 0x77, 0xb0, 0xe2, 0xcb, 0x0b, 0xa3, 0xca, 0xfa, + 0x73, 0xb0, 0xe7, 0x21, 0x70, 0xec, 0x90, 0x42, + 0xed, 0xaf, 0xd8, 0xa1, 0x27, 0xf6, 0xd7, 0xee, + 0x73, 0xb0, 0xe7, 0x21, 0x70, 0xec, 0x90, 0x42, + 0xed, 0xaf, 0xd8, 0xa1, 0x27, 0xf6, 0xd7, 0xee, + 0x07, 0x3f, 0x17, 0xcb, 0x67, 0x78, 0x64, 0x59, + 0x25, 0x04, 0x9d, 0x88, 0x22, 0xcb, 0xca, 0xb6 +}; +static const u8 enc_assoc100[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce100[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key100[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input101[] __initconst = { + 0xff, 0xcb, 0x2b, 0x11, 0x06, 0xf8, 0x23, 0x4c, + 0x5e, 0x99, 0xd4, 0xdb, 0x4c, 0x70, 0x48, 0xde, + 0x32, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, + 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, + 0x16, 0xe9, 0x88, 0x4a, 0x11, 0x4f, 0x0e, 0x92, + 0x66, 0xce, 0xa3, 0x88, 0x5f, 0xe3, 0x6b, 0x9f, + 0xd6, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, + 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, + 0xce, 0xbe, 0xf5, 0xe9, 0x88, 0x5a, 0x80, 0xea, + 0x76, 0xd9, 0x75, 0xc1, 0x44, 0xa4, 0x18, 0x88 +}; +static const u8 enc_output101[] __initconst = { + 0xff, 0xa0, 0xfc, 0x3e, 0x80, 0x32, 0xc3, 0xd5, + 0xfd, 0xb6, 0x2a, 0x11, 0xf0, 0x96, 0x30, 0x7d, + 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x76, 0x6c, 0x9a, 0x80, 0x25, 0xea, 0xde, 0xa7, + 0x39, 0x05, 0x32, 0x8c, 0x33, 0x79, 0xc0, 0x04, + 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x76, 0x6c, 0x9a, 0x80, 0x25, 0xea, 0xde, 0xa7, + 0x39, 0x05, 0x32, 0x8c, 0x33, 0x79, 0xc0, 0x04, + 0x8b, 0x9b, 0xb4, 0xb4, 0x86, 0x12, 0x89, 0x65, + 0x8c, 0x69, 0x6a, 0x83, 0x40, 0x15, 0x04, 0x05 +}; +static const u8 enc_assoc101[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce101[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key101[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input102[] __initconst = { + 0x6f, 0x9e, 0x70, 0xed, 0x3b, 0x8b, 0xac, 0xa0, + 0x26, 0xe4, 0x6a, 0x5a, 0x09, 0x43, 0x15, 0x8d, + 0x21, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, + 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, + 0x0c, 0x61, 0x2c, 0x5e, 0x8d, 0x89, 0xa8, 0x73, + 0xdb, 0xca, 0xad, 0x5b, 0x73, 0x46, 0x42, 0x9b, + 0xc5, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, + 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, + 0xd4, 0x36, 0x51, 0xfd, 0x14, 0x9c, 0x26, 0x0b, + 0xcb, 0xdd, 0x7b, 0x12, 0x68, 0x01, 0x31, 0x8c +}; +static const u8 enc_output102[] __initconst = { + 0x6f, 0xf5, 0xa7, 0xc2, 0xbd, 0x41, 0x4c, 0x39, + 0x85, 0xcb, 0x94, 0x90, 0xb5, 0xa5, 0x6d, 0x2e, + 0xa6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x6c, 0xe4, 0x3e, 0x94, 0xb9, 0x2c, 0x78, 0x46, + 0x84, 0x01, 0x3c, 0x5f, 0x1f, 0xdc, 0xe9, 0x00, + 0xa6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x6c, 0xe4, 0x3e, 0x94, 0xb9, 0x2c, 0x78, 0x46, + 0x84, 0x01, 0x3c, 0x5f, 0x1f, 0xdc, 0xe9, 0x00, + 0x8b, 0x3b, 0xbd, 0x51, 0x64, 0x44, 0x59, 0x56, + 0x8d, 0x81, 0xca, 0x1f, 0xa7, 0x2c, 0xe4, 0x04 +}; +static const u8 enc_assoc102[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce102[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key102[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input103[] __initconst = { + 0x41, 0x2b, 0x08, 0x0a, 0x3e, 0x19, 0xc1, 0x0d, + 0x44, 0xa1, 0xaf, 0x1e, 0xab, 0xde, 0xb4, 0xce, + 0x35, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, + 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, + 0x6b, 0x83, 0x94, 0x33, 0x09, 0x21, 0x48, 0x6c, + 0xa1, 0x1d, 0x29, 0x1c, 0x3e, 0x97, 0xee, 0x9a, + 0xd1, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, + 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, + 0xb3, 0xd4, 0xe9, 0x90, 0x90, 0x34, 0xc6, 0x14, + 0xb1, 0x0a, 0xff, 0x55, 0x25, 0xd0, 0x9d, 0x8d +}; +static const u8 enc_output103[] __initconst = { + 0x41, 0x40, 0xdf, 0x25, 0xb8, 0xd3, 0x21, 0x94, + 0xe7, 0x8e, 0x51, 0xd4, 0x17, 0x38, 0xcc, 0x6d, + 0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x0b, 0x06, 0x86, 0xf9, 0x3d, 0x84, 0x98, 0x59, + 0xfe, 0xd6, 0xb8, 0x18, 0x52, 0x0d, 0x45, 0x01, + 0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x0b, 0x06, 0x86, 0xf9, 0x3d, 0x84, 0x98, 0x59, + 0xfe, 0xd6, 0xb8, 0x18, 0x52, 0x0d, 0x45, 0x01, + 0x86, 0xfb, 0xab, 0x2b, 0x4a, 0x94, 0xf4, 0x7a, + 0xa5, 0x6f, 0x0a, 0xea, 0x65, 0xd1, 0x10, 0x08 +}; +static const u8 enc_assoc103[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce103[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key103[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input104[] __initconst = { + 0xb2, 0x47, 0xa7, 0x47, 0x23, 0x49, 0x1a, 0xac, + 0xac, 0xaa, 0xd7, 0x09, 0xc9, 0x1e, 0x93, 0x2b, + 0x31, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, + 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, + 0x9a, 0xde, 0x04, 0xe7, 0x5b, 0xb7, 0x01, 0xd9, + 0x66, 0x06, 0x01, 0xb3, 0x47, 0x65, 0xde, 0x98, + 0xd5, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, + 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, + 0x42, 0x89, 0x79, 0x44, 0xc2, 0xa2, 0x8f, 0xa1, + 0x76, 0x11, 0xd7, 0xfa, 0x5c, 0x22, 0xad, 0x8f +}; +static const u8 enc_output104[] __initconst = { + 0xb2, 0x2c, 0x70, 0x68, 0xa5, 0x83, 0xfa, 0x35, + 0x0f, 0x85, 0x29, 0xc3, 0x75, 0xf8, 0xeb, 0x88, + 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfa, 0x5b, 0x16, 0x2d, 0x6f, 0x12, 0xd1, 0xec, + 0x39, 0xcd, 0x90, 0xb7, 0x2b, 0xff, 0x75, 0x03, + 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfa, 0x5b, 0x16, 0x2d, 0x6f, 0x12, 0xd1, 0xec, + 0x39, 0xcd, 0x90, 0xb7, 0x2b, 0xff, 0x75, 0x03, + 0xa0, 0x19, 0xac, 0x2e, 0xd6, 0x67, 0xe1, 0x7d, + 0xa1, 0x6f, 0x0a, 0xfa, 0x19, 0x61, 0x0d, 0x0d +}; +static const u8 enc_assoc104[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce104[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key104[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input105[] __initconst = { + 0x74, 0x0f, 0x9e, 0x49, 0xf6, 0x10, 0xef, 0xa5, + 0x85, 0xb6, 0x59, 0xca, 0x6e, 0xd8, 0xb4, 0x99, + 0x2d, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, + 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, + 0x41, 0x2d, 0x96, 0xaf, 0xbe, 0x80, 0xec, 0x3e, + 0x79, 0xd4, 0x51, 0xb0, 0x0a, 0x2d, 0xb2, 0x9a, + 0xc9, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, + 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, + 0x99, 0x7a, 0xeb, 0x0c, 0x27, 0x95, 0x62, 0x46, + 0x69, 0xc3, 0x87, 0xf9, 0x11, 0x6a, 0xc1, 0x8d +}; +static const u8 enc_output105[] __initconst = { + 0x74, 0x64, 0x49, 0x66, 0x70, 0xda, 0x0f, 0x3c, + 0x26, 0x99, 0xa7, 0x00, 0xd2, 0x3e, 0xcc, 0x3a, + 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x21, 0xa8, 0x84, 0x65, 0x8a, 0x25, 0x3c, 0x0b, + 0x26, 0x1f, 0xc0, 0xb4, 0x66, 0xb7, 0x19, 0x01, + 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x21, 0xa8, 0x84, 0x65, 0x8a, 0x25, 0x3c, 0x0b, + 0x26, 0x1f, 0xc0, 0xb4, 0x66, 0xb7, 0x19, 0x01, + 0x73, 0x6e, 0x18, 0x18, 0x16, 0x96, 0xa5, 0x88, + 0x9c, 0x31, 0x59, 0xfa, 0xab, 0xab, 0x20, 0xfd +}; +static const u8 enc_assoc105[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce105[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key105[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input106[] __initconst = { + 0xad, 0xba, 0x5d, 0x10, 0x5b, 0xc8, 0xaa, 0x06, + 0x2c, 0x23, 0x36, 0xcb, 0x88, 0x9d, 0xdb, 0xd5, + 0x37, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, + 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, + 0x17, 0x7c, 0x5f, 0xfe, 0x28, 0x75, 0xf4, 0x68, + 0xf6, 0xc2, 0x96, 0x57, 0x48, 0xf3, 0x59, 0x9a, + 0xd3, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, + 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, + 0xcf, 0x2b, 0x22, 0x5d, 0xb1, 0x60, 0x7a, 0x10, + 0xe6, 0xd5, 0x40, 0x1e, 0x53, 0xb4, 0x2a, 0x8d +}; +static const u8 enc_output106[] __initconst = { + 0xad, 0xd1, 0x8a, 0x3f, 0xdd, 0x02, 0x4a, 0x9f, + 0x8f, 0x0c, 0xc8, 0x01, 0x34, 0x7b, 0xa3, 0x76, + 0xb0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x77, 0xf9, 0x4d, 0x34, 0x1c, 0xd0, 0x24, 0x5d, + 0xa9, 0x09, 0x07, 0x53, 0x24, 0x69, 0xf2, 0x01, + 0xb0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x77, 0xf9, 0x4d, 0x34, 0x1c, 0xd0, 0x24, 0x5d, + 0xa9, 0x09, 0x07, 0x53, 0x24, 0x69, 0xf2, 0x01, + 0xba, 0xd5, 0x8f, 0x10, 0xa9, 0x1e, 0x6a, 0x88, + 0x9a, 0xba, 0x32, 0xfd, 0x17, 0xd8, 0x33, 0x1a +}; +static const u8 enc_assoc106[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce106[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key106[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input107[] __initconst = { + 0xfe, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0xc0, 0x01, 0xed, 0xc5, 0xda, 0x44, 0x2e, 0x71, + 0x9b, 0xce, 0x9a, 0xbe, 0x27, 0x3a, 0xf1, 0x44, + 0xb4, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0x48, 0x02, 0x5f, 0x41, 0xfa, 0x4e, 0x33, 0x6c, + 0x78, 0x69, 0x57, 0xa2, 0xa7, 0xc4, 0x93, 0x0a, + 0x6c, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0x00, 0x26, 0x6e, 0xa1, 0xe4, 0x36, 0x44, 0xa3, + 0x4d, 0x8d, 0xd1, 0xdc, 0x93, 0xf2, 0xfa, 0x13 +}; +static const u8 enc_output107[] __initconst = { + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x47, 0xc3, 0x27, 0xcc, 0x36, 0x5d, 0x08, 0x87, + 0x59, 0x09, 0x8c, 0x34, 0x1b, 0x4a, 0xed, 0x03, + 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x2b, 0x0b, 0x97, 0x3f, 0x74, 0x5b, 0x28, 0xaa, + 0xe9, 0x37, 0xf5, 0x9f, 0x18, 0xea, 0xc7, 0x01, + 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x2b, 0x0b, 0x97, 0x3f, 0x74, 0x5b, 0x28, 0xaa, + 0xe9, 0x37, 0xf5, 0x9f, 0x18, 0xea, 0xc7, 0x01, + 0xd6, 0x8c, 0xe1, 0x74, 0x07, 0x9a, 0xdd, 0x02, + 0x8d, 0xd0, 0x5c, 0xf8, 0x14, 0x63, 0x04, 0x88 +}; +static const u8 enc_assoc107[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce107[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key107[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input108[] __initconst = { + 0xb5, 0x13, 0xb0, 0x6a, 0xb9, 0xac, 0x14, 0x43, + 0x5a, 0xcb, 0x8a, 0xa3, 0xa3, 0x7a, 0xfd, 0xb6, + 0x54, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, + 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, + 0x61, 0x95, 0x01, 0x93, 0xb1, 0xbf, 0x03, 0x11, + 0xff, 0x11, 0x79, 0x89, 0xae, 0xd9, 0xa9, 0x99, + 0xb0, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, + 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, + 0xb9, 0xc2, 0x7c, 0x30, 0x28, 0xaa, 0x8d, 0x69, + 0xef, 0x06, 0xaf, 0xc0, 0xb5, 0x9e, 0xda, 0x8e +}; +static const u8 enc_output108[] __initconst = { + 0xb5, 0x78, 0x67, 0x45, 0x3f, 0x66, 0xf4, 0xda, + 0xf9, 0xe4, 0x74, 0x69, 0x1f, 0x9c, 0x85, 0x15, + 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x01, 0x10, 0x13, 0x59, 0x85, 0x1a, 0xd3, 0x24, + 0xa0, 0xda, 0xe8, 0x8d, 0xc2, 0x43, 0x02, 0x02, + 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x01, 0x10, 0x13, 0x59, 0x85, 0x1a, 0xd3, 0x24, + 0xa0, 0xda, 0xe8, 0x8d, 0xc2, 0x43, 0x02, 0x02, + 0xaa, 0x48, 0xa3, 0x88, 0x7d, 0x4b, 0x05, 0x96, + 0x99, 0xc2, 0xfd, 0xf9, 0xc6, 0x78, 0x7e, 0x0a +}; +static const u8 enc_assoc108[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce108[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key108[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input109[] __initconst = { + 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0xd4, 0xf1, 0x09, 0xe8, 0x14, 0xce, 0xa8, 0x5a, + 0x08, 0xc0, 0x11, 0xd8, 0x50, 0xdd, 0x1d, 0xcb, + 0xcf, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0x53, 0x40, 0xb8, 0x5a, 0x9a, 0xa0, 0x82, 0x96, + 0xb7, 0x7a, 0x5f, 0xc3, 0x96, 0x1f, 0x66, 0x0f, + 0x17, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0x1b, 0x64, 0x89, 0xba, 0x84, 0xd8, 0xf5, 0x59, + 0x82, 0x9e, 0xd9, 0xbd, 0xa2, 0x29, 0x0f, 0x16 +}; +static const u8 enc_output109[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x53, 0x33, 0xc3, 0xe1, 0xf8, 0xd7, 0x8e, 0xac, + 0xca, 0x07, 0x07, 0x52, 0x6c, 0xad, 0x01, 0x8c, + 0xaf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x30, 0x49, 0x70, 0x24, 0x14, 0xb5, 0x99, 0x50, + 0x26, 0x24, 0xfd, 0xfe, 0x29, 0x31, 0x32, 0x04, + 0xaf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x30, 0x49, 0x70, 0x24, 0x14, 0xb5, 0x99, 0x50, + 0x26, 0x24, 0xfd, 0xfe, 0x29, 0x31, 0x32, 0x04, + 0xb9, 0x36, 0xa8, 0x17, 0xf2, 0x21, 0x1a, 0xf1, + 0x29, 0xe2, 0xcf, 0x16, 0x0f, 0xd4, 0x2b, 0xcb +}; +static const u8 enc_assoc109[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce109[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key109[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input110[] __initconst = { + 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0xdf, 0x4c, 0x62, 0x03, 0x2d, 0x41, 0x19, 0xb5, + 0x88, 0x47, 0x7e, 0x99, 0x92, 0x5a, 0x56, 0xd9, + 0xd6, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0xfa, 0x84, 0xf0, 0x64, 0x55, 0x36, 0x42, 0x1b, + 0x2b, 0xb9, 0x24, 0x6e, 0xc2, 0x19, 0xed, 0x0b, + 0x0e, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0xb2, 0xa0, 0xc1, 0x84, 0x4b, 0x4e, 0x35, 0xd4, + 0x1e, 0x5d, 0xa2, 0x10, 0xf6, 0x2f, 0x84, 0x12 +}; +static const u8 enc_output110[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x58, 0x8e, 0xa8, 0x0a, 0xc1, 0x58, 0x3f, 0x43, + 0x4a, 0x80, 0x68, 0x13, 0xae, 0x2a, 0x4a, 0x9e, + 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x99, 0x8d, 0x38, 0x1a, 0xdb, 0x23, 0x59, 0xdd, + 0xba, 0xe7, 0x86, 0x53, 0x7d, 0x37, 0xb9, 0x00, + 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x99, 0x8d, 0x38, 0x1a, 0xdb, 0x23, 0x59, 0xdd, + 0xba, 0xe7, 0x86, 0x53, 0x7d, 0x37, 0xb9, 0x00, + 0x9f, 0x7a, 0xc4, 0x35, 0x1f, 0x6b, 0x91, 0xe6, + 0x30, 0x97, 0xa7, 0x13, 0x11, 0x5d, 0x05, 0xbe +}; +static const u8 enc_assoc110[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce110[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key110[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input111[] __initconst = { + 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0x13, 0xf8, 0x0a, 0x00, 0x6d, 0xc1, 0xbb, 0xda, + 0xd6, 0x39, 0xa9, 0x2f, 0xc7, 0xec, 0xa6, 0x55, + 0xf7, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0x63, 0x48, 0xb8, 0xfd, 0x29, 0xbf, 0x96, 0xd5, + 0x63, 0xa5, 0x17, 0xe2, 0x7d, 0x7b, 0xfc, 0x0f, + 0x2f, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0x2b, 0x6c, 0x89, 0x1d, 0x37, 0xc7, 0xe1, 0x1a, + 0x56, 0x41, 0x91, 0x9c, 0x49, 0x4d, 0x95, 0x16 +}; +static const u8 enc_output111[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x94, 0x3a, 0xc0, 0x09, 0x81, 0xd8, 0x9d, 0x2c, + 0x14, 0xfe, 0xbf, 0xa5, 0xfb, 0x9c, 0xba, 0x12, + 0x97, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x41, 0x70, 0x83, 0xa7, 0xaa, 0x8d, 0x13, + 0xf2, 0xfb, 0xb5, 0xdf, 0xc2, 0x55, 0xa8, 0x04, + 0x97, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x41, 0x70, 0x83, 0xa7, 0xaa, 0x8d, 0x13, + 0xf2, 0xfb, 0xb5, 0xdf, 0xc2, 0x55, 0xa8, 0x04, + 0x9a, 0x18, 0xa8, 0x28, 0x07, 0x02, 0x69, 0xf4, + 0x47, 0x00, 0xd0, 0x09, 0xe7, 0x17, 0x1c, 0xc9 +}; +static const u8 enc_assoc111[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce111[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key111[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input112[] __initconst = { + 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0x82, 0xe5, 0x9b, 0x45, 0x82, 0x91, 0x50, 0x38, + 0xf9, 0x33, 0x81, 0x1e, 0x65, 0x2d, 0xc6, 0x6a, + 0xfc, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0xb6, 0x71, 0xc8, 0xca, 0xc2, 0x70, 0xc2, 0x65, + 0xa0, 0xac, 0x2f, 0x53, 0x57, 0x99, 0x88, 0x0a, + 0x24, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0xfe, 0x55, 0xf9, 0x2a, 0xdc, 0x08, 0xb5, 0xaa, + 0x95, 0x48, 0xa9, 0x2d, 0x63, 0xaf, 0xe1, 0x13 +}; +static const u8 enc_output112[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x05, 0x27, 0x51, 0x4c, 0x6e, 0x88, 0x76, 0xce, + 0x3b, 0xf4, 0x97, 0x94, 0x59, 0x5d, 0xda, 0x2d, + 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xd5, 0x78, 0x00, 0xb4, 0x4c, 0x65, 0xd9, 0xa3, + 0x31, 0xf2, 0x8d, 0x6e, 0xe8, 0xb7, 0xdc, 0x01, + 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xd5, 0x78, 0x00, 0xb4, 0x4c, 0x65, 0xd9, 0xa3, + 0x31, 0xf2, 0x8d, 0x6e, 0xe8, 0xb7, 0xdc, 0x01, + 0xb4, 0x36, 0xa8, 0x2b, 0x93, 0xd5, 0x55, 0xf7, + 0x43, 0x00, 0xd0, 0x19, 0x9b, 0xa7, 0x18, 0xce +}; +static const u8 enc_assoc112[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce112[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key112[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input113[] __initconst = { + 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0xf1, 0xd1, 0x28, 0x87, 0xb7, 0x21, 0x69, 0x86, + 0xa1, 0x2d, 0x79, 0x09, 0x8b, 0x6d, 0xe6, 0x0f, + 0xc0, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0xa7, 0xc7, 0x58, 0x99, 0xf3, 0xe6, 0x0a, 0xf1, + 0xfc, 0xb6, 0xc7, 0x30, 0x7d, 0x87, 0x59, 0x0f, + 0x18, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0xef, 0xe3, 0x69, 0x79, 0xed, 0x9e, 0x7d, 0x3e, + 0xc9, 0x52, 0x41, 0x4e, 0x49, 0xb1, 0x30, 0x16 +}; +static const u8 enc_output113[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x76, 0x13, 0xe2, 0x8e, 0x5b, 0x38, 0x4f, 0x70, + 0x63, 0xea, 0x6f, 0x83, 0xb7, 0x1d, 0xfa, 0x48, + 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xc4, 0xce, 0x90, 0xe7, 0x7d, 0xf3, 0x11, 0x37, + 0x6d, 0xe8, 0x65, 0x0d, 0xc2, 0xa9, 0x0d, 0x04, + 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xc4, 0xce, 0x90, 0xe7, 0x7d, 0xf3, 0x11, 0x37, + 0x6d, 0xe8, 0x65, 0x0d, 0xc2, 0xa9, 0x0d, 0x04, + 0xce, 0x54, 0xa8, 0x2e, 0x1f, 0xa9, 0x42, 0xfa, + 0x3f, 0x00, 0xd0, 0x29, 0x4f, 0x37, 0x15, 0xd3 +}; +static const u8 enc_assoc113[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce113[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key113[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input114[] __initconst = { + 0xcb, 0xf1, 0xda, 0x9e, 0x0b, 0xa9, 0x37, 0x73, + 0x74, 0xe6, 0x9e, 0x1c, 0x0e, 0x60, 0x0c, 0xfc, + 0x34, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, + 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, + 0xbe, 0x3f, 0xa6, 0x6b, 0x6c, 0xe7, 0x80, 0x8a, + 0xa3, 0xe4, 0x59, 0x49, 0xf9, 0x44, 0x64, 0x9f, + 0xd0, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, + 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, + 0x66, 0x68, 0xdb, 0xc8, 0xf5, 0xf2, 0x0e, 0xf2, + 0xb3, 0xf3, 0x8f, 0x00, 0xe2, 0x03, 0x17, 0x88 +}; +static const u8 enc_output114[] __initconst = { + 0xcb, 0x9a, 0x0d, 0xb1, 0x8d, 0x63, 0xd7, 0xea, + 0xd7, 0xc9, 0x60, 0xd6, 0xb2, 0x86, 0x74, 0x5f, + 0xb3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xde, 0xba, 0xb4, 0xa1, 0x58, 0x42, 0x50, 0xbf, + 0xfc, 0x2f, 0xc8, 0x4d, 0x95, 0xde, 0xcf, 0x04, + 0xb3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xde, 0xba, 0xb4, 0xa1, 0x58, 0x42, 0x50, 0xbf, + 0xfc, 0x2f, 0xc8, 0x4d, 0x95, 0xde, 0xcf, 0x04, + 0x23, 0x83, 0xab, 0x0b, 0x79, 0x92, 0x05, 0x69, + 0x9b, 0x51, 0x0a, 0xa7, 0x09, 0xbf, 0x31, 0xf1 +}; +static const u8 enc_assoc114[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce114[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key114[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input115[] __initconst = { + 0x8f, 0x27, 0x86, 0x94, 0xc4, 0xe9, 0xda, 0xeb, + 0xd5, 0x8d, 0x3e, 0x5b, 0x96, 0x6e, 0x8b, 0x68, + 0x42, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, + 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, + 0x06, 0x53, 0xe7, 0xa3, 0x31, 0x71, 0x88, 0x33, + 0xac, 0xc3, 0xb9, 0xad, 0xff, 0x1c, 0x31, 0x98, + 0xa6, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, + 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, + 0xde, 0x04, 0x9a, 0x00, 0xa8, 0x64, 0x06, 0x4b, + 0xbc, 0xd4, 0x6f, 0xe4, 0xe4, 0x5b, 0x42, 0x8f +}; +static const u8 enc_output115[] __initconst = { + 0x8f, 0x4c, 0x51, 0xbb, 0x42, 0x23, 0x3a, 0x72, + 0x76, 0xa2, 0xc0, 0x91, 0x2a, 0x88, 0xf3, 0xcb, + 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x66, 0xd6, 0xf5, 0x69, 0x05, 0xd4, 0x58, 0x06, + 0xf3, 0x08, 0x28, 0xa9, 0x93, 0x86, 0x9a, 0x03, + 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x66, 0xd6, 0xf5, 0x69, 0x05, 0xd4, 0x58, 0x06, + 0xf3, 0x08, 0x28, 0xa9, 0x93, 0x86, 0x9a, 0x03, + 0x8b, 0xfb, 0xab, 0x17, 0xa9, 0xe0, 0xb8, 0x74, + 0x8b, 0x51, 0x0a, 0xe7, 0xd9, 0xfd, 0x23, 0x05 +}; +static const u8 enc_assoc115[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce115[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key115[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input116[] __initconst = { + 0xd5, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0x9a, 0x22, 0xd7, 0x0a, 0x48, 0xe2, 0x4f, 0xdd, + 0xcd, 0xd4, 0x41, 0x9d, 0xe6, 0x4c, 0x8f, 0x44, + 0xfc, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0x77, 0xb5, 0xc9, 0x07, 0xd9, 0xc9, 0xe1, 0xea, + 0x51, 0x85, 0x1a, 0x20, 0x4a, 0xad, 0x9f, 0x0a, + 0x24, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0x3f, 0x91, 0xf8, 0xe7, 0xc7, 0xb1, 0x96, 0x25, + 0x64, 0x61, 0x9c, 0x5e, 0x7e, 0x9b, 0xf6, 0x13 +}; +static const u8 enc_output116[] __initconst = { + 0xd5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x1d, 0xe0, 0x1d, 0x03, 0xa4, 0xfb, 0x69, 0x2b, + 0x0f, 0x13, 0x57, 0x17, 0xda, 0x3c, 0x93, 0x03, + 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x14, 0xbc, 0x01, 0x79, 0x57, 0xdc, 0xfa, 0x2c, + 0xc0, 0xdb, 0xb8, 0x1d, 0xf5, 0x83, 0xcb, 0x01, + 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x14, 0xbc, 0x01, 0x79, 0x57, 0xdc, 0xfa, 0x2c, + 0xc0, 0xdb, 0xb8, 0x1d, 0xf5, 0x83, 0xcb, 0x01, + 0x49, 0xbc, 0x6e, 0x9f, 0xc5, 0x1c, 0x4d, 0x50, + 0x30, 0x36, 0x64, 0x4d, 0x84, 0x27, 0x73, 0xd2 +}; +static const u8 enc_assoc116[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce116[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key116[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input117[] __initconst = { + 0xdb, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0x75, 0xd5, 0x64, 0x3a, 0xa5, 0xaf, 0x93, 0x4d, + 0x8c, 0xce, 0x39, 0x2c, 0xc3, 0xee, 0xdb, 0x47, + 0xc0, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0x60, 0x1b, 0x5a, 0xd2, 0x06, 0x7f, 0x28, 0x06, + 0x6a, 0x8f, 0x32, 0x81, 0x71, 0x5b, 0xa8, 0x08, + 0x18, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0x28, 0x3f, 0x6b, 0x32, 0x18, 0x07, 0x5f, 0xc9, + 0x5f, 0x6b, 0xb4, 0xff, 0x45, 0x6d, 0xc1, 0x11 +}; +static const u8 enc_output117[] __initconst = { + 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xf2, 0x17, 0xae, 0x33, 0x49, 0xb6, 0xb5, 0xbb, + 0x4e, 0x09, 0x2f, 0xa6, 0xff, 0x9e, 0xc7, 0x00, + 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x03, 0x12, 0x92, 0xac, 0x88, 0x6a, 0x33, 0xc0, + 0xfb, 0xd1, 0x90, 0xbc, 0xce, 0x75, 0xfc, 0x03, + 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x03, 0x12, 0x92, 0xac, 0x88, 0x6a, 0x33, 0xc0, + 0xfb, 0xd1, 0x90, 0xbc, 0xce, 0x75, 0xfc, 0x03, + 0x63, 0xda, 0x6e, 0xa2, 0x51, 0xf0, 0x39, 0x53, + 0x2c, 0x36, 0x64, 0x5d, 0x38, 0xb7, 0x6f, 0xd7 +}; +static const u8 enc_assoc117[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce117[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key117[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +/* wycheproof - edge case intermediate sums in poly1305 */ +static const u8 enc_input118[] __initconst = { + 0x93, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, + 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, + 0x62, 0x48, 0x39, 0x60, 0x42, 0x16, 0xe4, 0x03, + 0xeb, 0xcc, 0x6a, 0xf5, 0x59, 0xec, 0x8b, 0x43, + 0x97, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, + 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, + 0xd8, 0xc8, 0xc3, 0xfa, 0x1a, 0x9e, 0x47, 0x4a, + 0xbe, 0x52, 0xd0, 0x2c, 0x81, 0x87, 0xe9, 0x0f, + 0x4f, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, + 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, + 0x90, 0xec, 0xf2, 0x1a, 0x04, 0xe6, 0x30, 0x85, + 0x8b, 0xb6, 0x56, 0x52, 0xb5, 0xb1, 0x80, 0x16 +}; +static const u8 enc_output118[] __initconst = { + 0x93, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xe5, 0x8a, 0xf3, 0x69, 0xae, 0x0f, 0xc2, 0xf5, + 0x29, 0x0b, 0x7c, 0x7f, 0x65, 0x9c, 0x97, 0x04, + 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xbb, 0xc1, 0x0b, 0x84, 0x94, 0x8b, 0x5c, 0x8c, + 0x2f, 0x0c, 0x72, 0x11, 0x3e, 0xa9, 0xbd, 0x04, + 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xbb, 0xc1, 0x0b, 0x84, 0x94, 0x8b, 0x5c, 0x8c, + 0x2f, 0x0c, 0x72, 0x11, 0x3e, 0xa9, 0xbd, 0x04, + 0x73, 0xeb, 0x27, 0x24, 0xb5, 0xc4, 0x05, 0xf0, + 0x4d, 0x00, 0xd0, 0xf1, 0x58, 0x40, 0xa1, 0xc1 +}; +static const u8 enc_assoc118[] __initconst = { + 0xff, 0xff, 0xff, 0xff +}; +static const u8 enc_nonce118[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 +}; +static const u8 enc_key118[] __initconst = { + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f +}; + +static const struct chacha20poly1305_testvec +chacha20poly1305_enc_vectors[] __initconst = { + { enc_input001, enc_output001, enc_assoc001, enc_nonce001, enc_key001, + sizeof(enc_input001), sizeof(enc_assoc001), sizeof(enc_nonce001) }, + { enc_input002, enc_output002, enc_assoc002, enc_nonce002, enc_key002, + sizeof(enc_input002), sizeof(enc_assoc002), sizeof(enc_nonce002) }, + { enc_input003, enc_output003, enc_assoc003, enc_nonce003, enc_key003, + sizeof(enc_input003), sizeof(enc_assoc003), sizeof(enc_nonce003) }, + { enc_input004, enc_output004, enc_assoc004, enc_nonce004, enc_key004, + sizeof(enc_input004), sizeof(enc_assoc004), sizeof(enc_nonce004) }, + { enc_input005, enc_output005, enc_assoc005, enc_nonce005, enc_key005, + sizeof(enc_input005), sizeof(enc_assoc005), sizeof(enc_nonce005) }, + { enc_input006, enc_output006, enc_assoc006, enc_nonce006, enc_key006, + sizeof(enc_input006), sizeof(enc_assoc006), sizeof(enc_nonce006) }, + { enc_input007, enc_output007, enc_assoc007, enc_nonce007, enc_key007, + sizeof(enc_input007), sizeof(enc_assoc007), sizeof(enc_nonce007) }, + { enc_input008, enc_output008, enc_assoc008, enc_nonce008, enc_key008, + sizeof(enc_input008), sizeof(enc_assoc008), sizeof(enc_nonce008) }, + { enc_input009, enc_output009, enc_assoc009, enc_nonce009, enc_key009, + sizeof(enc_input009), sizeof(enc_assoc009), sizeof(enc_nonce009) }, + { enc_input010, enc_output010, enc_assoc010, enc_nonce010, enc_key010, + sizeof(enc_input010), sizeof(enc_assoc010), sizeof(enc_nonce010) }, + { enc_input011, enc_output011, enc_assoc011, enc_nonce011, enc_key011, + sizeof(enc_input011), sizeof(enc_assoc011), sizeof(enc_nonce011) }, + { enc_input012, enc_output012, enc_assoc012, enc_nonce012, enc_key012, + sizeof(enc_input012), sizeof(enc_assoc012), sizeof(enc_nonce012) }, + { enc_input013, enc_output013, enc_assoc013, enc_nonce013, enc_key013, + sizeof(enc_input013), sizeof(enc_assoc013), sizeof(enc_nonce013) }, + { enc_input014, enc_output014, enc_assoc014, enc_nonce014, enc_key014, + sizeof(enc_input014), sizeof(enc_assoc014), sizeof(enc_nonce014) }, + { enc_input015, enc_output015, enc_assoc015, enc_nonce015, enc_key015, + sizeof(enc_input015), sizeof(enc_assoc015), sizeof(enc_nonce015) }, + { enc_input016, enc_output016, enc_assoc016, enc_nonce016, enc_key016, + sizeof(enc_input016), sizeof(enc_assoc016), sizeof(enc_nonce016) }, + { enc_input017, enc_output017, enc_assoc017, enc_nonce017, enc_key017, + sizeof(enc_input017), sizeof(enc_assoc017), sizeof(enc_nonce017) }, + { enc_input018, enc_output018, enc_assoc018, enc_nonce018, enc_key018, + sizeof(enc_input018), sizeof(enc_assoc018), sizeof(enc_nonce018) }, + { enc_input019, enc_output019, enc_assoc019, enc_nonce019, enc_key019, + sizeof(enc_input019), sizeof(enc_assoc019), sizeof(enc_nonce019) }, + { enc_input020, enc_output020, enc_assoc020, enc_nonce020, enc_key020, + sizeof(enc_input020), sizeof(enc_assoc020), sizeof(enc_nonce020) }, + { enc_input021, enc_output021, enc_assoc021, enc_nonce021, enc_key021, + sizeof(enc_input021), sizeof(enc_assoc021), sizeof(enc_nonce021) }, + { enc_input022, enc_output022, enc_assoc022, enc_nonce022, enc_key022, + sizeof(enc_input022), sizeof(enc_assoc022), sizeof(enc_nonce022) }, + { enc_input023, enc_output023, enc_assoc023, enc_nonce023, enc_key023, + sizeof(enc_input023), sizeof(enc_assoc023), sizeof(enc_nonce023) }, + { enc_input024, enc_output024, enc_assoc024, enc_nonce024, enc_key024, + sizeof(enc_input024), sizeof(enc_assoc024), sizeof(enc_nonce024) }, + { enc_input025, enc_output025, enc_assoc025, enc_nonce025, enc_key025, + sizeof(enc_input025), sizeof(enc_assoc025), sizeof(enc_nonce025) }, + { enc_input026, enc_output026, enc_assoc026, enc_nonce026, enc_key026, + sizeof(enc_input026), sizeof(enc_assoc026), sizeof(enc_nonce026) }, + { enc_input027, enc_output027, enc_assoc027, enc_nonce027, enc_key027, + sizeof(enc_input027), sizeof(enc_assoc027), sizeof(enc_nonce027) }, + { enc_input028, enc_output028, enc_assoc028, enc_nonce028, enc_key028, + sizeof(enc_input028), sizeof(enc_assoc028), sizeof(enc_nonce028) }, + { enc_input029, enc_output029, enc_assoc029, enc_nonce029, enc_key029, + sizeof(enc_input029), sizeof(enc_assoc029), sizeof(enc_nonce029) }, + { enc_input030, enc_output030, enc_assoc030, enc_nonce030, enc_key030, + sizeof(enc_input030), sizeof(enc_assoc030), sizeof(enc_nonce030) }, + { enc_input031, enc_output031, enc_assoc031, enc_nonce031, enc_key031, + sizeof(enc_input031), sizeof(enc_assoc031), sizeof(enc_nonce031) }, + { enc_input032, enc_output032, enc_assoc032, enc_nonce032, enc_key032, + sizeof(enc_input032), sizeof(enc_assoc032), sizeof(enc_nonce032) }, + { enc_input033, enc_output033, enc_assoc033, enc_nonce033, enc_key033, + sizeof(enc_input033), sizeof(enc_assoc033), sizeof(enc_nonce033) }, + { enc_input034, enc_output034, enc_assoc034, enc_nonce034, enc_key034, + sizeof(enc_input034), sizeof(enc_assoc034), sizeof(enc_nonce034) }, + { enc_input035, enc_output035, enc_assoc035, enc_nonce035, enc_key035, + sizeof(enc_input035), sizeof(enc_assoc035), sizeof(enc_nonce035) }, + { enc_input036, enc_output036, enc_assoc036, enc_nonce036, enc_key036, + sizeof(enc_input036), sizeof(enc_assoc036), sizeof(enc_nonce036) }, + { enc_input037, enc_output037, enc_assoc037, enc_nonce037, enc_key037, + sizeof(enc_input037), sizeof(enc_assoc037), sizeof(enc_nonce037) }, + { enc_input038, enc_output038, enc_assoc038, enc_nonce038, enc_key038, + sizeof(enc_input038), sizeof(enc_assoc038), sizeof(enc_nonce038) }, + { enc_input039, enc_output039, enc_assoc039, enc_nonce039, enc_key039, + sizeof(enc_input039), sizeof(enc_assoc039), sizeof(enc_nonce039) }, + { enc_input040, enc_output040, enc_assoc040, enc_nonce040, enc_key040, + sizeof(enc_input040), sizeof(enc_assoc040), sizeof(enc_nonce040) }, + { enc_input041, enc_output041, enc_assoc041, enc_nonce041, enc_key041, + sizeof(enc_input041), sizeof(enc_assoc041), sizeof(enc_nonce041) }, + { enc_input042, enc_output042, enc_assoc042, enc_nonce042, enc_key042, + sizeof(enc_input042), sizeof(enc_assoc042), sizeof(enc_nonce042) }, + { enc_input043, enc_output043, enc_assoc043, enc_nonce043, enc_key043, + sizeof(enc_input043), sizeof(enc_assoc043), sizeof(enc_nonce043) }, + { enc_input044, enc_output044, enc_assoc044, enc_nonce044, enc_key044, + sizeof(enc_input044), sizeof(enc_assoc044), sizeof(enc_nonce044) }, + { enc_input045, enc_output045, enc_assoc045, enc_nonce045, enc_key045, + sizeof(enc_input045), sizeof(enc_assoc045), sizeof(enc_nonce045) }, + { enc_input046, enc_output046, enc_assoc046, enc_nonce046, enc_key046, + sizeof(enc_input046), sizeof(enc_assoc046), sizeof(enc_nonce046) }, + { enc_input047, enc_output047, enc_assoc047, enc_nonce047, enc_key047, + sizeof(enc_input047), sizeof(enc_assoc047), sizeof(enc_nonce047) }, + { enc_input048, enc_output048, enc_assoc048, enc_nonce048, enc_key048, + sizeof(enc_input048), sizeof(enc_assoc048), sizeof(enc_nonce048) }, + { enc_input049, enc_output049, enc_assoc049, enc_nonce049, enc_key049, + sizeof(enc_input049), sizeof(enc_assoc049), sizeof(enc_nonce049) }, + { enc_input050, enc_output050, enc_assoc050, enc_nonce050, enc_key050, + sizeof(enc_input050), sizeof(enc_assoc050), sizeof(enc_nonce050) }, + { enc_input051, enc_output051, enc_assoc051, enc_nonce051, enc_key051, + sizeof(enc_input051), sizeof(enc_assoc051), sizeof(enc_nonce051) }, + { enc_input052, enc_output052, enc_assoc052, enc_nonce052, enc_key052, + sizeof(enc_input052), sizeof(enc_assoc052), sizeof(enc_nonce052) }, + { enc_input053, enc_output053, enc_assoc053, enc_nonce053, enc_key053, + sizeof(enc_input053), sizeof(enc_assoc053), sizeof(enc_nonce053) }, + { enc_input054, enc_output054, enc_assoc054, enc_nonce054, enc_key054, + sizeof(enc_input054), sizeof(enc_assoc054), sizeof(enc_nonce054) }, + { enc_input055, enc_output055, enc_assoc055, enc_nonce055, enc_key055, + sizeof(enc_input055), sizeof(enc_assoc055), sizeof(enc_nonce055) }, + { enc_input056, enc_output056, enc_assoc056, enc_nonce056, enc_key056, + sizeof(enc_input056), sizeof(enc_assoc056), sizeof(enc_nonce056) }, + { enc_input057, enc_output057, enc_assoc057, enc_nonce057, enc_key057, + sizeof(enc_input057), sizeof(enc_assoc057), sizeof(enc_nonce057) }, + { enc_input058, enc_output058, enc_assoc058, enc_nonce058, enc_key058, + sizeof(enc_input058), sizeof(enc_assoc058), sizeof(enc_nonce058) }, + { enc_input059, enc_output059, enc_assoc059, enc_nonce059, enc_key059, + sizeof(enc_input059), sizeof(enc_assoc059), sizeof(enc_nonce059) }, + { enc_input060, enc_output060, enc_assoc060, enc_nonce060, enc_key060, + sizeof(enc_input060), sizeof(enc_assoc060), sizeof(enc_nonce060) }, + { enc_input061, enc_output061, enc_assoc061, enc_nonce061, enc_key061, + sizeof(enc_input061), sizeof(enc_assoc061), sizeof(enc_nonce061) }, + { enc_input062, enc_output062, enc_assoc062, enc_nonce062, enc_key062, + sizeof(enc_input062), sizeof(enc_assoc062), sizeof(enc_nonce062) }, + { enc_input063, enc_output063, enc_assoc063, enc_nonce063, enc_key063, + sizeof(enc_input063), sizeof(enc_assoc063), sizeof(enc_nonce063) }, + { enc_input064, enc_output064, enc_assoc064, enc_nonce064, enc_key064, + sizeof(enc_input064), sizeof(enc_assoc064), sizeof(enc_nonce064) }, + { enc_input065, enc_output065, enc_assoc065, enc_nonce065, enc_key065, + sizeof(enc_input065), sizeof(enc_assoc065), sizeof(enc_nonce065) }, + { enc_input066, enc_output066, enc_assoc066, enc_nonce066, enc_key066, + sizeof(enc_input066), sizeof(enc_assoc066), sizeof(enc_nonce066) }, + { enc_input067, enc_output067, enc_assoc067, enc_nonce067, enc_key067, + sizeof(enc_input067), sizeof(enc_assoc067), sizeof(enc_nonce067) }, + { enc_input068, enc_output068, enc_assoc068, enc_nonce068, enc_key068, + sizeof(enc_input068), sizeof(enc_assoc068), sizeof(enc_nonce068) }, + { enc_input069, enc_output069, enc_assoc069, enc_nonce069, enc_key069, + sizeof(enc_input069), sizeof(enc_assoc069), sizeof(enc_nonce069) }, + { enc_input070, enc_output070, enc_assoc070, enc_nonce070, enc_key070, + sizeof(enc_input070), sizeof(enc_assoc070), sizeof(enc_nonce070) }, + { enc_input071, enc_output071, enc_assoc071, enc_nonce071, enc_key071, + sizeof(enc_input071), sizeof(enc_assoc071), sizeof(enc_nonce071) }, + { enc_input072, enc_output072, enc_assoc072, enc_nonce072, enc_key072, + sizeof(enc_input072), sizeof(enc_assoc072), sizeof(enc_nonce072) }, + { enc_input073, enc_output073, enc_assoc073, enc_nonce073, enc_key073, + sizeof(enc_input073), sizeof(enc_assoc073), sizeof(enc_nonce073) }, + { enc_input074, enc_output074, enc_assoc074, enc_nonce074, enc_key074, + sizeof(enc_input074), sizeof(enc_assoc074), sizeof(enc_nonce074) }, + { enc_input075, enc_output075, enc_assoc075, enc_nonce075, enc_key075, + sizeof(enc_input075), sizeof(enc_assoc075), sizeof(enc_nonce075) }, + { enc_input076, enc_output076, enc_assoc076, enc_nonce076, enc_key076, + sizeof(enc_input076), sizeof(enc_assoc076), sizeof(enc_nonce076) }, + { enc_input077, enc_output077, enc_assoc077, enc_nonce077, enc_key077, + sizeof(enc_input077), sizeof(enc_assoc077), sizeof(enc_nonce077) }, + { enc_input078, enc_output078, enc_assoc078, enc_nonce078, enc_key078, + sizeof(enc_input078), sizeof(enc_assoc078), sizeof(enc_nonce078) }, + { enc_input079, enc_output079, enc_assoc079, enc_nonce079, enc_key079, + sizeof(enc_input079), sizeof(enc_assoc079), sizeof(enc_nonce079) }, + { enc_input080, enc_output080, enc_assoc080, enc_nonce080, enc_key080, + sizeof(enc_input080), sizeof(enc_assoc080), sizeof(enc_nonce080) }, + { enc_input081, enc_output081, enc_assoc081, enc_nonce081, enc_key081, + sizeof(enc_input081), sizeof(enc_assoc081), sizeof(enc_nonce081) }, + { enc_input082, enc_output082, enc_assoc082, enc_nonce082, enc_key082, + sizeof(enc_input082), sizeof(enc_assoc082), sizeof(enc_nonce082) }, + { enc_input083, enc_output083, enc_assoc083, enc_nonce083, enc_key083, + sizeof(enc_input083), sizeof(enc_assoc083), sizeof(enc_nonce083) }, + { enc_input084, enc_output084, enc_assoc084, enc_nonce084, enc_key084, + sizeof(enc_input084), sizeof(enc_assoc084), sizeof(enc_nonce084) }, + { enc_input085, enc_output085, enc_assoc085, enc_nonce085, enc_key085, + sizeof(enc_input085), sizeof(enc_assoc085), sizeof(enc_nonce085) }, + { enc_input086, enc_output086, enc_assoc086, enc_nonce086, enc_key086, + sizeof(enc_input086), sizeof(enc_assoc086), sizeof(enc_nonce086) }, + { enc_input087, enc_output087, enc_assoc087, enc_nonce087, enc_key087, + sizeof(enc_input087), sizeof(enc_assoc087), sizeof(enc_nonce087) }, + { enc_input088, enc_output088, enc_assoc088, enc_nonce088, enc_key088, + sizeof(enc_input088), sizeof(enc_assoc088), sizeof(enc_nonce088) }, + { enc_input089, enc_output089, enc_assoc089, enc_nonce089, enc_key089, + sizeof(enc_input089), sizeof(enc_assoc089), sizeof(enc_nonce089) }, + { enc_input090, enc_output090, enc_assoc090, enc_nonce090, enc_key090, + sizeof(enc_input090), sizeof(enc_assoc090), sizeof(enc_nonce090) }, + { enc_input091, enc_output091, enc_assoc091, enc_nonce091, enc_key091, + sizeof(enc_input091), sizeof(enc_assoc091), sizeof(enc_nonce091) }, + { enc_input092, enc_output092, enc_assoc092, enc_nonce092, enc_key092, + sizeof(enc_input092), sizeof(enc_assoc092), sizeof(enc_nonce092) }, + { enc_input093, enc_output093, enc_assoc093, enc_nonce093, enc_key093, + sizeof(enc_input093), sizeof(enc_assoc093), sizeof(enc_nonce093) }, + { enc_input094, enc_output094, enc_assoc094, enc_nonce094, enc_key094, + sizeof(enc_input094), sizeof(enc_assoc094), sizeof(enc_nonce094) }, + { enc_input095, enc_output095, enc_assoc095, enc_nonce095, enc_key095, + sizeof(enc_input095), sizeof(enc_assoc095), sizeof(enc_nonce095) }, + { enc_input096, enc_output096, enc_assoc096, enc_nonce096, enc_key096, + sizeof(enc_input096), sizeof(enc_assoc096), sizeof(enc_nonce096) }, + { enc_input097, enc_output097, enc_assoc097, enc_nonce097, enc_key097, + sizeof(enc_input097), sizeof(enc_assoc097), sizeof(enc_nonce097) }, + { enc_input098, enc_output098, enc_assoc098, enc_nonce098, enc_key098, + sizeof(enc_input098), sizeof(enc_assoc098), sizeof(enc_nonce098) }, + { enc_input099, enc_output099, enc_assoc099, enc_nonce099, enc_key099, + sizeof(enc_input099), sizeof(enc_assoc099), sizeof(enc_nonce099) }, + { enc_input100, enc_output100, enc_assoc100, enc_nonce100, enc_key100, + sizeof(enc_input100), sizeof(enc_assoc100), sizeof(enc_nonce100) }, + { enc_input101, enc_output101, enc_assoc101, enc_nonce101, enc_key101, + sizeof(enc_input101), sizeof(enc_assoc101), sizeof(enc_nonce101) }, + { enc_input102, enc_output102, enc_assoc102, enc_nonce102, enc_key102, + sizeof(enc_input102), sizeof(enc_assoc102), sizeof(enc_nonce102) }, + { enc_input103, enc_output103, enc_assoc103, enc_nonce103, enc_key103, + sizeof(enc_input103), sizeof(enc_assoc103), sizeof(enc_nonce103) }, + { enc_input104, enc_output104, enc_assoc104, enc_nonce104, enc_key104, + sizeof(enc_input104), sizeof(enc_assoc104), sizeof(enc_nonce104) }, + { enc_input105, enc_output105, enc_assoc105, enc_nonce105, enc_key105, + sizeof(enc_input105), sizeof(enc_assoc105), sizeof(enc_nonce105) }, + { enc_input106, enc_output106, enc_assoc106, enc_nonce106, enc_key106, + sizeof(enc_input106), sizeof(enc_assoc106), sizeof(enc_nonce106) }, + { enc_input107, enc_output107, enc_assoc107, enc_nonce107, enc_key107, + sizeof(enc_input107), sizeof(enc_assoc107), sizeof(enc_nonce107) }, + { enc_input108, enc_output108, enc_assoc108, enc_nonce108, enc_key108, + sizeof(enc_input108), sizeof(enc_assoc108), sizeof(enc_nonce108) }, + { enc_input109, enc_output109, enc_assoc109, enc_nonce109, enc_key109, + sizeof(enc_input109), sizeof(enc_assoc109), sizeof(enc_nonce109) }, + { enc_input110, enc_output110, enc_assoc110, enc_nonce110, enc_key110, + sizeof(enc_input110), sizeof(enc_assoc110), sizeof(enc_nonce110) }, + { enc_input111, enc_output111, enc_assoc111, enc_nonce111, enc_key111, + sizeof(enc_input111), sizeof(enc_assoc111), sizeof(enc_nonce111) }, + { enc_input112, enc_output112, enc_assoc112, enc_nonce112, enc_key112, + sizeof(enc_input112), sizeof(enc_assoc112), sizeof(enc_nonce112) }, + { enc_input113, enc_output113, enc_assoc113, enc_nonce113, enc_key113, + sizeof(enc_input113), sizeof(enc_assoc113), sizeof(enc_nonce113) }, + { enc_input114, enc_output114, enc_assoc114, enc_nonce114, enc_key114, + sizeof(enc_input114), sizeof(enc_assoc114), sizeof(enc_nonce114) }, + { enc_input115, enc_output115, enc_assoc115, enc_nonce115, enc_key115, + sizeof(enc_input115), sizeof(enc_assoc115), sizeof(enc_nonce115) }, + { enc_input116, enc_output116, enc_assoc116, enc_nonce116, enc_key116, + sizeof(enc_input116), sizeof(enc_assoc116), sizeof(enc_nonce116) }, + { enc_input117, enc_output117, enc_assoc117, enc_nonce117, enc_key117, + sizeof(enc_input117), sizeof(enc_assoc117), sizeof(enc_nonce117) }, + { enc_input118, enc_output118, enc_assoc118, enc_nonce118, enc_key118, + sizeof(enc_input118), sizeof(enc_assoc118), sizeof(enc_nonce118) } +}; + +static const u8 dec_input001[] __initconst = { + 0x64, 0xa0, 0x86, 0x15, 0x75, 0x86, 0x1a, 0xf4, + 0x60, 0xf0, 0x62, 0xc7, 0x9b, 0xe6, 0x43, 0xbd, + 0x5e, 0x80, 0x5c, 0xfd, 0x34, 0x5c, 0xf3, 0x89, + 0xf1, 0x08, 0x67, 0x0a, 0xc7, 0x6c, 0x8c, 0xb2, + 0x4c, 0x6c, 0xfc, 0x18, 0x75, 0x5d, 0x43, 0xee, + 0xa0, 0x9e, 0xe9, 0x4e, 0x38, 0x2d, 0x26, 0xb0, + 0xbd, 0xb7, 0xb7, 0x3c, 0x32, 0x1b, 0x01, 0x00, + 0xd4, 0xf0, 0x3b, 0x7f, 0x35, 0x58, 0x94, 0xcf, + 0x33, 0x2f, 0x83, 0x0e, 0x71, 0x0b, 0x97, 0xce, + 0x98, 0xc8, 0xa8, 0x4a, 0xbd, 0x0b, 0x94, 0x81, + 0x14, 0xad, 0x17, 0x6e, 0x00, 0x8d, 0x33, 0xbd, + 0x60, 0xf9, 0x82, 0xb1, 0xff, 0x37, 0xc8, 0x55, + 0x97, 0x97, 0xa0, 0x6e, 0xf4, 0xf0, 0xef, 0x61, + 0xc1, 0x86, 0x32, 0x4e, 0x2b, 0x35, 0x06, 0x38, + 0x36, 0x06, 0x90, 0x7b, 0x6a, 0x7c, 0x02, 0xb0, + 0xf9, 0xf6, 0x15, 0x7b, 0x53, 0xc8, 0x67, 0xe4, + 0xb9, 0x16, 0x6c, 0x76, 0x7b, 0x80, 0x4d, 0x46, + 0xa5, 0x9b, 0x52, 0x16, 0xcd, 0xe7, 0xa4, 0xe9, + 0x90, 0x40, 0xc5, 0xa4, 0x04, 0x33, 0x22, 0x5e, + 0xe2, 0x82, 0xa1, 0xb0, 0xa0, 0x6c, 0x52, 0x3e, + 0xaf, 0x45, 0x34, 0xd7, 0xf8, 0x3f, 0xa1, 0x15, + 0x5b, 0x00, 0x47, 0x71, 0x8c, 0xbc, 0x54, 0x6a, + 0x0d, 0x07, 0x2b, 0x04, 0xb3, 0x56, 0x4e, 0xea, + 0x1b, 0x42, 0x22, 0x73, 0xf5, 0x48, 0x27, 0x1a, + 0x0b, 0xb2, 0x31, 0x60, 0x53, 0xfa, 0x76, 0x99, + 0x19, 0x55, 0xeb, 0xd6, 0x31, 0x59, 0x43, 0x4e, + 0xce, 0xbb, 0x4e, 0x46, 0x6d, 0xae, 0x5a, 0x10, + 0x73, 0xa6, 0x72, 0x76, 0x27, 0x09, 0x7a, 0x10, + 0x49, 0xe6, 0x17, 0xd9, 0x1d, 0x36, 0x10, 0x94, + 0xfa, 0x68, 0xf0, 0xff, 0x77, 0x98, 0x71, 0x30, + 0x30, 0x5b, 0xea, 0xba, 0x2e, 0xda, 0x04, 0xdf, + 0x99, 0x7b, 0x71, 0x4d, 0x6c, 0x6f, 0x2c, 0x29, + 0xa6, 0xad, 0x5c, 0xb4, 0x02, 0x2b, 0x02, 0x70, + 0x9b, 0xee, 0xad, 0x9d, 0x67, 0x89, 0x0c, 0xbb, + 0x22, 0x39, 0x23, 0x36, 0xfe, 0xa1, 0x85, 0x1f, + 0x38 +}; +static const u8 dec_output001[] __initconst = { + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, + 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20, + 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66, + 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20, + 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20, + 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d, + 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e, + 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65, + 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, + 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, + 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f, + 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65, + 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61, + 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e, + 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69, + 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72, + 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20, + 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61, + 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20, + 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65, + 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20, + 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20, + 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b, + 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67, + 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80, + 0x9d +}; +static const u8 dec_assoc001[] __initconst = { + 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x4e, 0x91 +}; +static const u8 dec_nonce001[] __initconst = { + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08 +}; +static const u8 dec_key001[] __initconst = { + 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a, + 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0, + 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09, + 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0 +}; + +static const u8 dec_input002[] __initconst = { + 0xea, 0xe0, 0x1e, 0x9e, 0x2c, 0x91, 0xaa, 0xe1, + 0xdb, 0x5d, 0x99, 0x3f, 0x8a, 0xf7, 0x69, 0x92 +}; +static const u8 dec_output002[] __initconst = { }; +static const u8 dec_assoc002[] __initconst = { }; +static const u8 dec_nonce002[] __initconst = { + 0xca, 0xbf, 0x33, 0x71, 0x32, 0x45, 0x77, 0x8e +}; +static const u8 dec_key002[] __initconst = { + 0x4c, 0xf5, 0x96, 0x83, 0x38, 0xe6, 0xae, 0x7f, + 0x2d, 0x29, 0x25, 0x76, 0xd5, 0x75, 0x27, 0x86, + 0x91, 0x9a, 0x27, 0x7a, 0xfb, 0x46, 0xc5, 0xef, + 0x94, 0x81, 0x79, 0x57, 0x14, 0x59, 0x40, 0x68 +}; + +static const u8 dec_input003[] __initconst = { + 0xdd, 0x6b, 0x3b, 0x82, 0xce, 0x5a, 0xbd, 0xd6, + 0xa9, 0x35, 0x83, 0xd8, 0x8c, 0x3d, 0x85, 0x77 +}; +static const u8 dec_output003[] __initconst = { }; +static const u8 dec_assoc003[] __initconst = { + 0x33, 0x10, 0x41, 0x12, 0x1f, 0xf3, 0xd2, 0x6b +}; +static const u8 dec_nonce003[] __initconst = { + 0x3d, 0x86, 0xb5, 0x6b, 0xc8, 0xa3, 0x1f, 0x1d +}; +static const u8 dec_key003[] __initconst = { + 0x2d, 0xb0, 0x5d, 0x40, 0xc8, 0xed, 0x44, 0x88, + 0x34, 0xd1, 0x13, 0xaf, 0x57, 0xa1, 0xeb, 0x3a, + 0x2a, 0x80, 0x51, 0x36, 0xec, 0x5b, 0xbc, 0x08, + 0x93, 0x84, 0x21, 0xb5, 0x13, 0x88, 0x3c, 0x0d +}; + +static const u8 dec_input004[] __initconst = { + 0xb7, 0x1b, 0xb0, 0x73, 0x59, 0xb0, 0x84, 0xb2, + 0x6d, 0x8e, 0xab, 0x94, 0x31, 0xa1, 0xae, 0xac, + 0x89 +}; +static const u8 dec_output004[] __initconst = { + 0xa4 +}; +static const u8 dec_assoc004[] __initconst = { + 0x6a, 0xe2, 0xad, 0x3f, 0x88, 0x39, 0x5a, 0x40 +}; +static const u8 dec_nonce004[] __initconst = { + 0xd2, 0x32, 0x1f, 0x29, 0x28, 0xc6, 0xc4, 0xc4 +}; +static const u8 dec_key004[] __initconst = { + 0x4b, 0x28, 0x4b, 0xa3, 0x7b, 0xbe, 0xe9, 0xf8, + 0x31, 0x80, 0x82, 0xd7, 0xd8, 0xe8, 0xb5, 0xa1, + 0xe2, 0x18, 0x18, 0x8a, 0x9c, 0xfa, 0xa3, 0x3d, + 0x25, 0x71, 0x3e, 0x40, 0xbc, 0x54, 0x7a, 0x3e +}; + +static const u8 dec_input005[] __initconst = { + 0xbf, 0xe1, 0x5b, 0x0b, 0xdb, 0x6b, 0xf5, 0x5e, + 0x6c, 0x5d, 0x84, 0x44, 0x39, 0x81, 0xc1, 0x9c, + 0xac +}; +static const u8 dec_output005[] __initconst = { + 0x2d +}; +static const u8 dec_assoc005[] __initconst = { }; +static const u8 dec_nonce005[] __initconst = { + 0x20, 0x1c, 0xaa, 0x5f, 0x9c, 0xbf, 0x92, 0x30 +}; +static const u8 dec_key005[] __initconst = { + 0x66, 0xca, 0x9c, 0x23, 0x2a, 0x4b, 0x4b, 0x31, + 0x0e, 0x92, 0x89, 0x8b, 0xf4, 0x93, 0xc7, 0x87, + 0x98, 0xa3, 0xd8, 0x39, 0xf8, 0xf4, 0xa7, 0x01, + 0xc0, 0x2e, 0x0a, 0xa6, 0x7e, 0x5a, 0x78, 0x87 +}; + +static const u8 dec_input006[] __initconst = { + 0x8b, 0x06, 0xd3, 0x31, 0xb0, 0x93, 0x45, 0xb1, + 0x75, 0x6e, 0x26, 0xf9, 0x67, 0xbc, 0x90, 0x15, + 0x81, 0x2c, 0xb5, 0xf0, 0xc6, 0x2b, 0xc7, 0x8c, + 0x56, 0xd1, 0xbf, 0x69, 0x6c, 0x07, 0xa0, 0xda, + 0x65, 0x27, 0xc9, 0x90, 0x3d, 0xef, 0x4b, 0x11, + 0x0f, 0x19, 0x07, 0xfd, 0x29, 0x92, 0xd9, 0xc8, + 0xf7, 0x99, 0x2e, 0x4a, 0xd0, 0xb8, 0x2c, 0xdc, + 0x93, 0xf5, 0x9e, 0x33, 0x78, 0xd1, 0x37, 0xc3, + 0x66, 0xd7, 0x5e, 0xbc, 0x44, 0xbf, 0x53, 0xa5, + 0xbc, 0xc4, 0xcb, 0x7b, 0x3a, 0x8e, 0x7f, 0x02, + 0xbd, 0xbb, 0xe7, 0xca, 0xa6, 0x6c, 0x6b, 0x93, + 0x21, 0x93, 0x10, 0x61, 0xe7, 0x69, 0xd0, 0x78, + 0xf3, 0x07, 0x5a, 0x1a, 0x8f, 0x73, 0xaa, 0xb1, + 0x4e, 0xd3, 0xda, 0x4f, 0xf3, 0x32, 0xe1, 0x66, + 0x3e, 0x6c, 0xc6, 0x13, 0xba, 0x06, 0x5b, 0xfc, + 0x6a, 0xe5, 0x6f, 0x60, 0xfb, 0x07, 0x40, 0xb0, + 0x8c, 0x9d, 0x84, 0x43, 0x6b, 0xc1, 0xf7, 0x8d, + 0x8d, 0x31, 0xf7, 0x7a, 0x39, 0x4d, 0x8f, 0x9a, + 0xeb +}; +static const u8 dec_output006[] __initconst = { + 0x33, 0x2f, 0x94, 0xc1, 0xa4, 0xef, 0xcc, 0x2a, + 0x5b, 0xa6, 0xe5, 0x8f, 0x1d, 0x40, 0xf0, 0x92, + 0x3c, 0xd9, 0x24, 0x11, 0xa9, 0x71, 0xf9, 0x37, + 0x14, 0x99, 0xfa, 0xbe, 0xe6, 0x80, 0xde, 0x50, + 0xc9, 0x96, 0xd4, 0xb0, 0xec, 0x9e, 0x17, 0xec, + 0xd2, 0x5e, 0x72, 0x99, 0xfc, 0x0a, 0xe1, 0xcb, + 0x48, 0xd2, 0x85, 0xdd, 0x2f, 0x90, 0xe0, 0x66, + 0x3b, 0xe6, 0x20, 0x74, 0xbe, 0x23, 0x8f, 0xcb, + 0xb4, 0xe4, 0xda, 0x48, 0x40, 0xa6, 0xd1, 0x1b, + 0xc7, 0x42, 0xce, 0x2f, 0x0c, 0xa6, 0x85, 0x6e, + 0x87, 0x37, 0x03, 0xb1, 0x7c, 0x25, 0x96, 0xa3, + 0x05, 0xd8, 0xb0, 0xf4, 0xed, 0xea, 0xc2, 0xf0, + 0x31, 0x98, 0x6c, 0xd1, 0x14, 0x25, 0xc0, 0xcb, + 0x01, 0x74, 0xd0, 0x82, 0xf4, 0x36, 0xf5, 0x41, + 0xd5, 0xdc, 0xca, 0xc5, 0xbb, 0x98, 0xfe, 0xfc, + 0x69, 0x21, 0x70, 0xd8, 0xa4, 0x4b, 0xc8, 0xde, + 0x8f +}; +static const u8 dec_assoc006[] __initconst = { + 0x70, 0xd3, 0x33, 0xf3, 0x8b, 0x18, 0x0b +}; +static const u8 dec_nonce006[] __initconst = { + 0xdf, 0x51, 0x84, 0x82, 0x42, 0x0c, 0x75, 0x9c +}; +static const u8 dec_key006[] __initconst = { + 0x68, 0x7b, 0x8d, 0x8e, 0xe3, 0xc4, 0xdd, 0xae, + 0xdf, 0x72, 0x7f, 0x53, 0x72, 0x25, 0x1e, 0x78, + 0x91, 0xcb, 0x69, 0x76, 0x1f, 0x49, 0x93, 0xf9, + 0x6f, 0x21, 0xcc, 0x39, 0x9c, 0xad, 0xb1, 0x01 +}; + +static const u8 dec_input007[] __initconst = { + 0x85, 0x04, 0xc2, 0xed, 0x8d, 0xfd, 0x97, 0x5c, + 0xd2, 0xb7, 0xe2, 0xc1, 0x6b, 0xa3, 0xba, 0xf8, + 0xc9, 0x50, 0xc3, 0xc6, 0xa5, 0xe3, 0xa4, 0x7c, + 0xc3, 0x23, 0x49, 0x5e, 0xa9, 0xb9, 0x32, 0xeb, + 0x8a, 0x7c, 0xca, 0xe5, 0xec, 0xfb, 0x7c, 0xc0, + 0xcb, 0x7d, 0xdc, 0x2c, 0x9d, 0x92, 0x55, 0x21, + 0x0a, 0xc8, 0x43, 0x63, 0x59, 0x0a, 0x31, 0x70, + 0x82, 0x67, 0x41, 0x03, 0xf8, 0xdf, 0xf2, 0xac, + 0xa7, 0x02, 0xd4, 0xd5, 0x8a, 0x2d, 0xc8, 0x99, + 0x19, 0x66, 0xd0, 0xf6, 0x88, 0x2c, 0x77, 0xd9, + 0xd4, 0x0d, 0x6c, 0xbd, 0x98, 0xde, 0xe7, 0x7f, + 0xad, 0x7e, 0x8a, 0xfb, 0xe9, 0x4b, 0xe5, 0xf7, + 0xe5, 0x50, 0xa0, 0x90, 0x3f, 0xd6, 0x22, 0x53, + 0xe3, 0xfe, 0x1b, 0xcc, 0x79, 0x3b, 0xec, 0x12, + 0x47, 0x52, 0xa7, 0xd6, 0x04, 0xe3, 0x52, 0xe6, + 0x93, 0x90, 0x91, 0x32, 0x73, 0x79, 0xb8, 0xd0, + 0x31, 0xde, 0x1f, 0x9f, 0x2f, 0x05, 0x38, 0x54, + 0x2f, 0x35, 0x04, 0x39, 0xe0, 0xa7, 0xba, 0xc6, + 0x52, 0xf6, 0x37, 0x65, 0x4c, 0x07, 0xa9, 0x7e, + 0xb3, 0x21, 0x6f, 0x74, 0x8c, 0xc9, 0xde, 0xdb, + 0x65, 0x1b, 0x9b, 0xaa, 0x60, 0xb1, 0x03, 0x30, + 0x6b, 0xb2, 0x03, 0xc4, 0x1c, 0x04, 0xf8, 0x0f, + 0x64, 0xaf, 0x46, 0xe4, 0x65, 0x99, 0x49, 0xe2, + 0xea, 0xce, 0x78, 0x00, 0xd8, 0x8b, 0xd5, 0x2e, + 0xcf, 0xfc, 0x40, 0x49, 0xe8, 0x58, 0xdc, 0x34, + 0x9c, 0x8c, 0x61, 0xbf, 0x0a, 0x8e, 0xec, 0x39, + 0xa9, 0x30, 0x05, 0x5a, 0xd2, 0x56, 0x01, 0xc7, + 0xda, 0x8f, 0x4e, 0xbb, 0x43, 0xa3, 0x3a, 0xf9, + 0x15, 0x2a, 0xd0, 0xa0, 0x7a, 0x87, 0x34, 0x82, + 0xfe, 0x8a, 0xd1, 0x2d, 0x5e, 0xc7, 0xbf, 0x04, + 0x53, 0x5f, 0x3b, 0x36, 0xd4, 0x25, 0x5c, 0x34, + 0x7a, 0x8d, 0xd5, 0x05, 0xce, 0x72, 0xca, 0xef, + 0x7a, 0x4b, 0xbc, 0xb0, 0x10, 0x5c, 0x96, 0x42, + 0x3a, 0x00, 0x98, 0xcd, 0x15, 0xe8, 0xb7, 0x53 +}; +static const u8 dec_output007[] __initconst = { + 0x9b, 0x18, 0xdb, 0xdd, 0x9a, 0x0f, 0x3e, 0xa5, + 0x15, 0x17, 0xde, 0xdf, 0x08, 0x9d, 0x65, 0x0a, + 0x67, 0x30, 0x12, 0xe2, 0x34, 0x77, 0x4b, 0xc1, + 0xd9, 0xc6, 0x1f, 0xab, 0xc6, 0x18, 0x50, 0x17, + 0xa7, 0x9d, 0x3c, 0xa6, 0xc5, 0x35, 0x8c, 0x1c, + 0xc0, 0xa1, 0x7c, 0x9f, 0x03, 0x89, 0xca, 0xe1, + 0xe6, 0xe9, 0xd4, 0xd3, 0x88, 0xdb, 0xb4, 0x51, + 0x9d, 0xec, 0xb4, 0xfc, 0x52, 0xee, 0x6d, 0xf1, + 0x75, 0x42, 0xc6, 0xfd, 0xbd, 0x7a, 0x8e, 0x86, + 0xfc, 0x44, 0xb3, 0x4f, 0xf3, 0xea, 0x67, 0x5a, + 0x41, 0x13, 0xba, 0xb0, 0xdc, 0xe1, 0xd3, 0x2a, + 0x7c, 0x22, 0xb3, 0xca, 0xac, 0x6a, 0x37, 0x98, + 0x3e, 0x1d, 0x40, 0x97, 0xf7, 0x9b, 0x1d, 0x36, + 0x6b, 0xb3, 0x28, 0xbd, 0x60, 0x82, 0x47, 0x34, + 0xaa, 0x2f, 0x7d, 0xe9, 0xa8, 0x70, 0x81, 0x57, + 0xd4, 0xb9, 0x77, 0x0a, 0x9d, 0x29, 0xa7, 0x84, + 0x52, 0x4f, 0xc2, 0x4a, 0x40, 0x3b, 0x3c, 0xd4, + 0xc9, 0x2a, 0xdb, 0x4a, 0x53, 0xc4, 0xbe, 0x80, + 0xe9, 0x51, 0x7f, 0x8f, 0xc7, 0xa2, 0xce, 0x82, + 0x5c, 0x91, 0x1e, 0x74, 0xd9, 0xd0, 0xbd, 0xd5, + 0xf3, 0xfd, 0xda, 0x4d, 0x25, 0xb4, 0xbb, 0x2d, + 0xac, 0x2f, 0x3d, 0x71, 0x85, 0x7b, 0xcf, 0x3c, + 0x7b, 0x3e, 0x0e, 0x22, 0x78, 0x0c, 0x29, 0xbf, + 0xe4, 0xf4, 0x57, 0xb3, 0xcb, 0x49, 0xa0, 0xfc, + 0x1e, 0x05, 0x4e, 0x16, 0xbc, 0xd5, 0xa8, 0xa3, + 0xee, 0x05, 0x35, 0xc6, 0x7c, 0xab, 0x60, 0x14, + 0x55, 0x1a, 0x8e, 0xc5, 0x88, 0x5d, 0xd5, 0x81, + 0xc2, 0x81, 0xa5, 0xc4, 0x60, 0xdb, 0xaf, 0x77, + 0x91, 0xe1, 0xce, 0xa2, 0x7e, 0x7f, 0x42, 0xe3, + 0xb0, 0x13, 0x1c, 0x1f, 0x25, 0x60, 0x21, 0xe2, + 0x40, 0x5f, 0x99, 0xb7, 0x73, 0xec, 0x9b, 0x2b, + 0xf0, 0x65, 0x11, 0xc8, 0xd0, 0x0a, 0x9f, 0xd3 +}; +static const u8 dec_assoc007[] __initconst = { }; +static const u8 dec_nonce007[] __initconst = { + 0xde, 0x7b, 0xef, 0xc3, 0x65, 0x1b, 0x68, 0xb0 +}; +static const u8 dec_key007[] __initconst = { + 0x8d, 0xb8, 0x91, 0x48, 0xf0, 0xe7, 0x0a, 0xbd, + 0xf9, 0x3f, 0xcd, 0xd9, 0xa0, 0x1e, 0x42, 0x4c, + 0xe7, 0xde, 0x25, 0x3d, 0xa3, 0xd7, 0x05, 0x80, + 0x8d, 0xf2, 0x82, 0xac, 0x44, 0x16, 0x51, 0x01 +}; + +static const u8 dec_input008[] __initconst = { + 0x14, 0xf6, 0x41, 0x37, 0xa6, 0xd4, 0x27, 0xcd, + 0xdb, 0x06, 0x3e, 0x9a, 0x4e, 0xab, 0xd5, 0xb1, + 0x1e, 0x6b, 0xd2, 0xbc, 0x11, 0xf4, 0x28, 0x93, + 0x63, 0x54, 0xef, 0xbb, 0x5e, 0x1d, 0x3a, 0x1d, + 0x37, 0x3c, 0x0a, 0x6c, 0x1e, 0xc2, 0xd1, 0x2c, + 0xb5, 0xa3, 0xb5, 0x7b, 0xb8, 0x8f, 0x25, 0xa6, + 0x1b, 0x61, 0x1c, 0xec, 0x28, 0x58, 0x26, 0xa4, + 0xa8, 0x33, 0x28, 0x25, 0x5c, 0x45, 0x05, 0xe5, + 0x6c, 0x99, 0xe5, 0x45, 0xc4, 0xa2, 0x03, 0x84, + 0x03, 0x73, 0x1e, 0x8c, 0x49, 0xac, 0x20, 0xdd, + 0x8d, 0xb3, 0xc4, 0xf5, 0xe7, 0x4f, 0xf1, 0xed, + 0xa1, 0x98, 0xde, 0xa4, 0x96, 0xdd, 0x2f, 0xab, + 0xab, 0x97, 0xcf, 0x3e, 0xd2, 0x9e, 0xb8, 0x13, + 0x07, 0x28, 0x29, 0x19, 0xaf, 0xfd, 0xf2, 0x49, + 0x43, 0xea, 0x49, 0x26, 0x91, 0xc1, 0x07, 0xd6, + 0xbb, 0x81, 0x75, 0x35, 0x0d, 0x24, 0x7f, 0xc8, + 0xda, 0xd4, 0xb7, 0xeb, 0xe8, 0x5c, 0x09, 0xa2, + 0x2f, 0xdc, 0x28, 0x7d, 0x3a, 0x03, 0xfa, 0x94, + 0xb5, 0x1d, 0x17, 0x99, 0x36, 0xc3, 0x1c, 0x18, + 0x34, 0xe3, 0x9f, 0xf5, 0x55, 0x7c, 0xb0, 0x60, + 0x9d, 0xff, 0xac, 0xd4, 0x61, 0xf2, 0xad, 0xf8, + 0xce, 0xc7, 0xbe, 0x5c, 0xd2, 0x95, 0xa8, 0x4b, + 0x77, 0x13, 0x19, 0x59, 0x26, 0xc9, 0xb7, 0x8f, + 0x6a, 0xcb, 0x2d, 0x37, 0x91, 0xea, 0x92, 0x9c, + 0x94, 0x5b, 0xda, 0x0b, 0xce, 0xfe, 0x30, 0x20, + 0xf8, 0x51, 0xad, 0xf2, 0xbe, 0xe7, 0xc7, 0xff, + 0xb3, 0x33, 0x91, 0x6a, 0xc9, 0x1a, 0x41, 0xc9, + 0x0f, 0xf3, 0x10, 0x0e, 0xfd, 0x53, 0xff, 0x6c, + 0x16, 0x52, 0xd9, 0xf3, 0xf7, 0x98, 0x2e, 0xc9, + 0x07, 0x31, 0x2c, 0x0c, 0x72, 0xd7, 0xc5, 0xc6, + 0x08, 0x2a, 0x7b, 0xda, 0xbd, 0x7e, 0x02, 0xea, + 0x1a, 0xbb, 0xf2, 0x04, 0x27, 0x61, 0x28, 0x8e, + 0xf5, 0x04, 0x03, 0x1f, 0x4c, 0x07, 0x55, 0x82, + 0xec, 0x1e, 0xd7, 0x8b, 0x2f, 0x65, 0x56, 0xd1, + 0xd9, 0x1e, 0x3c, 0xe9, 0x1f, 0x5e, 0x98, 0x70, + 0x38, 0x4a, 0x8c, 0x49, 0xc5, 0x43, 0xa0, 0xa1, + 0x8b, 0x74, 0x9d, 0x4c, 0x62, 0x0d, 0x10, 0x0c, + 0xf4, 0x6c, 0x8f, 0xe0, 0xaa, 0x9a, 0x8d, 0xb7, + 0xe0, 0xbe, 0x4c, 0x87, 0xf1, 0x98, 0x2f, 0xcc, + 0xed, 0xc0, 0x52, 0x29, 0xdc, 0x83, 0xf8, 0xfc, + 0x2c, 0x0e, 0xa8, 0x51, 0x4d, 0x80, 0x0d, 0xa3, + 0xfe, 0xd8, 0x37, 0xe7, 0x41, 0x24, 0xfc, 0xfb, + 0x75, 0xe3, 0x71, 0x7b, 0x57, 0x45, 0xf5, 0x97, + 0x73, 0x65, 0x63, 0x14, 0x74, 0xb8, 0x82, 0x9f, + 0xf8, 0x60, 0x2f, 0x8a, 0xf2, 0x4e, 0xf1, 0x39, + 0xda, 0x33, 0x91, 0xf8, 0x36, 0xe0, 0x8d, 0x3f, + 0x1f, 0x3b, 0x56, 0xdc, 0xa0, 0x8f, 0x3c, 0x9d, + 0x71, 0x52, 0xa7, 0xb8, 0xc0, 0xa5, 0xc6, 0xa2, + 0x73, 0xda, 0xf4, 0x4b, 0x74, 0x5b, 0x00, 0x3d, + 0x99, 0xd7, 0x96, 0xba, 0xe6, 0xe1, 0xa6, 0x96, + 0x38, 0xad, 0xb3, 0xc0, 0xd2, 0xba, 0x91, 0x6b, + 0xf9, 0x19, 0xdd, 0x3b, 0xbe, 0xbe, 0x9c, 0x20, + 0x50, 0xba, 0xa1, 0xd0, 0xce, 0x11, 0xbd, 0x95, + 0xd8, 0xd1, 0xdd, 0x33, 0x85, 0x74, 0xdc, 0xdb, + 0x66, 0x76, 0x44, 0xdc, 0x03, 0x74, 0x48, 0x35, + 0x98, 0xb1, 0x18, 0x47, 0x94, 0x7d, 0xff, 0x62, + 0xe4, 0x58, 0x78, 0xab, 0xed, 0x95, 0x36, 0xd9, + 0x84, 0x91, 0x82, 0x64, 0x41, 0xbb, 0x58, 0xe6, + 0x1c, 0x20, 0x6d, 0x15, 0x6b, 0x13, 0x96, 0xe8, + 0x35, 0x7f, 0xdc, 0x40, 0x2c, 0xe9, 0xbc, 0x8a, + 0x4f, 0x92, 0xec, 0x06, 0x2d, 0x50, 0xdf, 0x93, + 0x5d, 0x65, 0x5a, 0xa8, 0xfc, 0x20, 0x50, 0x14, + 0xa9, 0x8a, 0x7e, 0x1d, 0x08, 0x1f, 0xe2, 0x99, + 0xd0, 0xbe, 0xfb, 0x3a, 0x21, 0x9d, 0xad, 0x86, + 0x54, 0xfd, 0x0d, 0x98, 0x1c, 0x5a, 0x6f, 0x1f, + 0x9a, 0x40, 0xcd, 0xa2, 0xff, 0x6a, 0xf1, 0x54 +}; +static const u8 dec_output008[] __initconst = { + 0xc3, 0x09, 0x94, 0x62, 0xe6, 0x46, 0x2e, 0x10, + 0xbe, 0x00, 0xe4, 0xfc, 0xf3, 0x40, 0xa3, 0xe2, + 0x0f, 0xc2, 0x8b, 0x28, 0xdc, 0xba, 0xb4, 0x3c, + 0xe4, 0x21, 0x58, 0x61, 0xcd, 0x8b, 0xcd, 0xfb, + 0xac, 0x94, 0xa1, 0x45, 0xf5, 0x1c, 0xe1, 0x12, + 0xe0, 0x3b, 0x67, 0x21, 0x54, 0x5e, 0x8c, 0xaa, + 0xcf, 0xdb, 0xb4, 0x51, 0xd4, 0x13, 0xda, 0xe6, + 0x83, 0x89, 0xb6, 0x92, 0xe9, 0x21, 0x76, 0xa4, + 0x93, 0x7d, 0x0e, 0xfd, 0x96, 0x36, 0x03, 0x91, + 0x43, 0x5c, 0x92, 0x49, 0x62, 0x61, 0x7b, 0xeb, + 0x43, 0x89, 0xb8, 0x12, 0x20, 0x43, 0xd4, 0x47, + 0x06, 0x84, 0xee, 0x47, 0xe9, 0x8a, 0x73, 0x15, + 0x0f, 0x72, 0xcf, 0xed, 0xce, 0x96, 0xb2, 0x7f, + 0x21, 0x45, 0x76, 0xeb, 0x26, 0x28, 0x83, 0x6a, + 0xad, 0xaa, 0xa6, 0x81, 0xd8, 0x55, 0xb1, 0xa3, + 0x85, 0xb3, 0x0c, 0xdf, 0xf1, 0x69, 0x2d, 0x97, + 0x05, 0x2a, 0xbc, 0x7c, 0x7b, 0x25, 0xf8, 0x80, + 0x9d, 0x39, 0x25, 0xf3, 0x62, 0xf0, 0x66, 0x5e, + 0xf4, 0xa0, 0xcf, 0xd8, 0xfd, 0x4f, 0xb1, 0x1f, + 0x60, 0x3a, 0x08, 0x47, 0xaf, 0xe1, 0xf6, 0x10, + 0x77, 0x09, 0xa7, 0x27, 0x8f, 0x9a, 0x97, 0x5a, + 0x26, 0xfa, 0xfe, 0x41, 0x32, 0x83, 0x10, 0xe0, + 0x1d, 0xbf, 0x64, 0x0d, 0xf4, 0x1c, 0x32, 0x35, + 0xe5, 0x1b, 0x36, 0xef, 0xd4, 0x4a, 0x93, 0x4d, + 0x00, 0x7c, 0xec, 0x02, 0x07, 0x8b, 0x5d, 0x7d, + 0x1b, 0x0e, 0xd1, 0xa6, 0xa5, 0x5d, 0x7d, 0x57, + 0x88, 0xa8, 0xcc, 0x81, 0xb4, 0x86, 0x4e, 0xb4, + 0x40, 0xe9, 0x1d, 0xc3, 0xb1, 0x24, 0x3e, 0x7f, + 0xcc, 0x8a, 0x24, 0x9b, 0xdf, 0x6d, 0xf0, 0x39, + 0x69, 0x3e, 0x4c, 0xc0, 0x96, 0xe4, 0x13, 0xda, + 0x90, 0xda, 0xf4, 0x95, 0x66, 0x8b, 0x17, 0x17, + 0xfe, 0x39, 0x43, 0x25, 0xaa, 0xda, 0xa0, 0x43, + 0x3c, 0xb1, 0x41, 0x02, 0xa3, 0xf0, 0xa7, 0x19, + 0x59, 0xbc, 0x1d, 0x7d, 0x6c, 0x6d, 0x91, 0x09, + 0x5c, 0xb7, 0x5b, 0x01, 0xd1, 0x6f, 0x17, 0x21, + 0x97, 0xbf, 0x89, 0x71, 0xa5, 0xb0, 0x6e, 0x07, + 0x45, 0xfd, 0x9d, 0xea, 0x07, 0xf6, 0x7a, 0x9f, + 0x10, 0x18, 0x22, 0x30, 0x73, 0xac, 0xd4, 0x6b, + 0x72, 0x44, 0xed, 0xd9, 0x19, 0x9b, 0x2d, 0x4a, + 0x41, 0xdd, 0xd1, 0x85, 0x5e, 0x37, 0x19, 0xed, + 0xd2, 0x15, 0x8f, 0x5e, 0x91, 0xdb, 0x33, 0xf2, + 0xe4, 0xdb, 0xff, 0x98, 0xfb, 0xa3, 0xb5, 0xca, + 0x21, 0x69, 0x08, 0xe7, 0x8a, 0xdf, 0x90, 0xff, + 0x3e, 0xe9, 0x20, 0x86, 0x3c, 0xe9, 0xfc, 0x0b, + 0xfe, 0x5c, 0x61, 0xaa, 0x13, 0x92, 0x7f, 0x7b, + 0xec, 0xe0, 0x6d, 0xa8, 0x23, 0x22, 0xf6, 0x6b, + 0x77, 0xc4, 0xfe, 0x40, 0x07, 0x3b, 0xb6, 0xf6, + 0x8e, 0x5f, 0xd4, 0xb9, 0xb7, 0x0f, 0x21, 0x04, + 0xef, 0x83, 0x63, 0x91, 0x69, 0x40, 0xa3, 0x48, + 0x5c, 0xd2, 0x60, 0xf9, 0x4f, 0x6c, 0x47, 0x8b, + 0x3b, 0xb1, 0x9f, 0x8e, 0xee, 0x16, 0x8a, 0x13, + 0xfc, 0x46, 0x17, 0xc3, 0xc3, 0x32, 0x56, 0xf8, + 0x3c, 0x85, 0x3a, 0xb6, 0x3e, 0xaa, 0x89, 0x4f, + 0xb3, 0xdf, 0x38, 0xfd, 0xf1, 0xe4, 0x3a, 0xc0, + 0xe6, 0x58, 0xb5, 0x8f, 0xc5, 0x29, 0xa2, 0x92, + 0x4a, 0xb6, 0xa0, 0x34, 0x7f, 0xab, 0xb5, 0x8a, + 0x90, 0xa1, 0xdb, 0x4d, 0xca, 0xb6, 0x2c, 0x41, + 0x3c, 0xf7, 0x2b, 0x21, 0xc3, 0xfd, 0xf4, 0x17, + 0x5c, 0xb5, 0x33, 0x17, 0x68, 0x2b, 0x08, 0x30, + 0xf3, 0xf7, 0x30, 0x3c, 0x96, 0xe6, 0x6a, 0x20, + 0x97, 0xe7, 0x4d, 0x10, 0x5f, 0x47, 0x5f, 0x49, + 0x96, 0x09, 0xf0, 0x27, 0x91, 0xc8, 0xf8, 0x5a, + 0x2e, 0x79, 0xb5, 0xe2, 0xb8, 0xe8, 0xb9, 0x7b, + 0xd5, 0x10, 0xcb, 0xff, 0x5d, 0x14, 0x73, 0xf3 +}; +static const u8 dec_assoc008[] __initconst = { }; +static const u8 dec_nonce008[] __initconst = { + 0x0e, 0x0d, 0x57, 0xbb, 0x7b, 0x40, 0x54, 0x02 +}; +static const u8 dec_key008[] __initconst = { + 0xf2, 0xaa, 0x4f, 0x99, 0xfd, 0x3e, 0xa8, 0x53, + 0xc1, 0x44, 0xe9, 0x81, 0x18, 0xdc, 0xf5, 0xf0, + 0x3e, 0x44, 0x15, 0x59, 0xe0, 0xc5, 0x44, 0x86, + 0xc3, 0x91, 0xa8, 0x75, 0xc0, 0x12, 0x46, 0xba +}; + +static const u8 dec_input009[] __initconst = { + 0xfd, 0x81, 0x8d, 0xd0, 0x3d, 0xb4, 0xd5, 0xdf, + 0xd3, 0x42, 0x47, 0x5a, 0x6d, 0x19, 0x27, 0x66, + 0x4b, 0x2e, 0x0c, 0x27, 0x9c, 0x96, 0x4c, 0x72, + 0x02, 0xa3, 0x65, 0xc3, 0xb3, 0x6f, 0x2e, 0xbd, + 0x63, 0x8a, 0x4a, 0x5d, 0x29, 0xa2, 0xd0, 0x28, + 0x48, 0xc5, 0x3d, 0x98, 0xa3, 0xbc, 0xe0, 0xbe, + 0x3b, 0x3f, 0xe6, 0x8a, 0xa4, 0x7f, 0x53, 0x06, + 0xfa, 0x7f, 0x27, 0x76, 0x72, 0x31, 0xa1, 0xf5, + 0xd6, 0x0c, 0x52, 0x47, 0xba, 0xcd, 0x4f, 0xd7, + 0xeb, 0x05, 0x48, 0x0d, 0x7c, 0x35, 0x4a, 0x09, + 0xc9, 0x76, 0x71, 0x02, 0xa3, 0xfb, 0xb7, 0x1a, + 0x65, 0xb7, 0xed, 0x98, 0xc6, 0x30, 0x8a, 0x00, + 0xae, 0xa1, 0x31, 0xe5, 0xb5, 0x9e, 0x6d, 0x62, + 0xda, 0xda, 0x07, 0x0f, 0x38, 0x38, 0xd3, 0xcb, + 0xc1, 0xb0, 0xad, 0xec, 0x72, 0xec, 0xb1, 0xa2, + 0x7b, 0x59, 0xf3, 0x3d, 0x2b, 0xef, 0xcd, 0x28, + 0x5b, 0x83, 0xcc, 0x18, 0x91, 0x88, 0xb0, 0x2e, + 0xf9, 0x29, 0x31, 0x18, 0xf9, 0x4e, 0xe9, 0x0a, + 0x91, 0x92, 0x9f, 0xae, 0x2d, 0xad, 0xf4, 0xe6, + 0x1a, 0xe2, 0xa4, 0xee, 0x47, 0x15, 0xbf, 0x83, + 0x6e, 0xd7, 0x72, 0x12, 0x3b, 0x2d, 0x24, 0xe9, + 0xb2, 0x55, 0xcb, 0x3c, 0x10, 0xf0, 0x24, 0x8a, + 0x4a, 0x02, 0xea, 0x90, 0x25, 0xf0, 0xb4, 0x79, + 0x3a, 0xef, 0x6e, 0xf5, 0x52, 0xdf, 0xb0, 0x0a, + 0xcd, 0x24, 0x1c, 0xd3, 0x2e, 0x22, 0x74, 0xea, + 0x21, 0x6f, 0xe9, 0xbd, 0xc8, 0x3e, 0x36, 0x5b, + 0x19, 0xf1, 0xca, 0x99, 0x0a, 0xb4, 0xa7, 0x52, + 0x1a, 0x4e, 0xf2, 0xad, 0x8d, 0x56, 0x85, 0xbb, + 0x64, 0x89, 0xba, 0x26, 0xf9, 0xc7, 0xe1, 0x89, + 0x19, 0x22, 0x77, 0xc3, 0xa8, 0xfc, 0xff, 0xad, + 0xfe, 0xb9, 0x48, 0xae, 0x12, 0x30, 0x9f, 0x19, + 0xfb, 0x1b, 0xef, 0x14, 0x87, 0x8a, 0x78, 0x71, + 0xf3, 0xf4, 0xb7, 0x00, 0x9c, 0x1d, 0xb5, 0x3d, + 0x49, 0x00, 0x0c, 0x06, 0xd4, 0x50, 0xf9, 0x54, + 0x45, 0xb2, 0x5b, 0x43, 0xdb, 0x6d, 0xcf, 0x1a, + 0xe9, 0x7a, 0x7a, 0xcf, 0xfc, 0x8a, 0x4e, 0x4d, + 0x0b, 0x07, 0x63, 0x28, 0xd8, 0xe7, 0x08, 0x95, + 0xdf, 0xa6, 0x72, 0x93, 0x2e, 0xbb, 0xa0, 0x42, + 0x89, 0x16, 0xf1, 0xd9, 0x0c, 0xf9, 0xa1, 0x16, + 0xfd, 0xd9, 0x03, 0xb4, 0x3b, 0x8a, 0xf5, 0xf6, + 0xe7, 0x6b, 0x2e, 0x8e, 0x4c, 0x3d, 0xe2, 0xaf, + 0x08, 0x45, 0x03, 0xff, 0x09, 0xb6, 0xeb, 0x2d, + 0xc6, 0x1b, 0x88, 0x94, 0xac, 0x3e, 0xf1, 0x9f, + 0x0e, 0x0e, 0x2b, 0xd5, 0x00, 0x4d, 0x3f, 0x3b, + 0x53, 0xae, 0xaf, 0x1c, 0x33, 0x5f, 0x55, 0x6e, + 0x8d, 0xaf, 0x05, 0x7a, 0x10, 0x34, 0xc9, 0xf4, + 0x66, 0xcb, 0x62, 0x12, 0xa6, 0xee, 0xe8, 0x1c, + 0x5d, 0x12, 0x86, 0xdb, 0x6f, 0x1c, 0x33, 0xc4, + 0x1c, 0xda, 0x82, 0x2d, 0x3b, 0x59, 0xfe, 0xb1, + 0xa4, 0x59, 0x41, 0x86, 0xd0, 0xef, 0xae, 0xfb, + 0xda, 0x6d, 0x11, 0xb8, 0xca, 0xe9, 0x6e, 0xff, + 0xf7, 0xa9, 0xd9, 0x70, 0x30, 0xfc, 0x53, 0xe2, + 0xd7, 0xa2, 0x4e, 0xc7, 0x91, 0xd9, 0x07, 0x06, + 0xaa, 0xdd, 0xb0, 0x59, 0x28, 0x1d, 0x00, 0x66, + 0xc5, 0x54, 0xc2, 0xfc, 0x06, 0xda, 0x05, 0x90, + 0x52, 0x1d, 0x37, 0x66, 0xee, 0xf0, 0xb2, 0x55, + 0x8a, 0x5d, 0xd2, 0x38, 0x86, 0x94, 0x9b, 0xfc, + 0x10, 0x4c, 0xa1, 0xb9, 0x64, 0x3e, 0x44, 0xb8, + 0x5f, 0xb0, 0x0c, 0xec, 0xe0, 0xc9, 0xe5, 0x62, + 0x75, 0x3f, 0x09, 0xd5, 0xf5, 0xd9, 0x26, 0xba, + 0x9e, 0xd2, 0xf4, 0xb9, 0x48, 0x0a, 0xbc, 0xa2, + 0xd6, 0x7c, 0x36, 0x11, 0x7d, 0x26, 0x81, 0x89, + 0xcf, 0xa4, 0xad, 0x73, 0x0e, 0xee, 0xcc, 0x06, + 0xa9, 0xdb, 0xb1, 0xfd, 0xfb, 0x09, 0x7f, 0x90, + 0x42, 0x37, 0x2f, 0xe1, 0x9c, 0x0f, 0x6f, 0xcf, + 0x43, 0xb5, 0xd9, 0x90, 0xe1, 0x85, 0xf5, 0xa8, + 0xae +}; +static const u8 dec_output009[] __initconst = { + 0xe6, 0xc3, 0xdb, 0x63, 0x55, 0x15, 0xe3, 0x5b, + 0xb7, 0x4b, 0x27, 0x8b, 0x5a, 0xdd, 0xc2, 0xe8, + 0x3a, 0x6b, 0xd7, 0x81, 0x96, 0x35, 0x97, 0xca, + 0xd7, 0x68, 0xe8, 0xef, 0xce, 0xab, 0xda, 0x09, + 0x6e, 0xd6, 0x8e, 0xcb, 0x55, 0xb5, 0xe1, 0xe5, + 0x57, 0xfd, 0xc4, 0xe3, 0xe0, 0x18, 0x4f, 0x85, + 0xf5, 0x3f, 0x7e, 0x4b, 0x88, 0xc9, 0x52, 0x44, + 0x0f, 0xea, 0xaf, 0x1f, 0x71, 0x48, 0x9f, 0x97, + 0x6d, 0xb9, 0x6f, 0x00, 0xa6, 0xde, 0x2b, 0x77, + 0x8b, 0x15, 0xad, 0x10, 0xa0, 0x2b, 0x7b, 0x41, + 0x90, 0x03, 0x2d, 0x69, 0xae, 0xcc, 0x77, 0x7c, + 0xa5, 0x9d, 0x29, 0x22, 0xc2, 0xea, 0xb4, 0x00, + 0x1a, 0xd2, 0x7a, 0x98, 0x8a, 0xf9, 0xf7, 0x82, + 0xb0, 0xab, 0xd8, 0xa6, 0x94, 0x8d, 0x58, 0x2f, + 0x01, 0x9e, 0x00, 0x20, 0xfc, 0x49, 0xdc, 0x0e, + 0x03, 0xe8, 0x45, 0x10, 0xd6, 0xa8, 0xda, 0x55, + 0x10, 0x9a, 0xdf, 0x67, 0x22, 0x8b, 0x43, 0xab, + 0x00, 0xbb, 0x02, 0xc8, 0xdd, 0x7b, 0x97, 0x17, + 0xd7, 0x1d, 0x9e, 0x02, 0x5e, 0x48, 0xde, 0x8e, + 0xcf, 0x99, 0x07, 0x95, 0x92, 0x3c, 0x5f, 0x9f, + 0xc5, 0x8a, 0xc0, 0x23, 0xaa, 0xd5, 0x8c, 0x82, + 0x6e, 0x16, 0x92, 0xb1, 0x12, 0x17, 0x07, 0xc3, + 0xfb, 0x36, 0xf5, 0x6c, 0x35, 0xd6, 0x06, 0x1f, + 0x9f, 0xa7, 0x94, 0xa2, 0x38, 0x63, 0x9c, 0xb0, + 0x71, 0xb3, 0xa5, 0xd2, 0xd8, 0xba, 0x9f, 0x08, + 0x01, 0xb3, 0xff, 0x04, 0x97, 0x73, 0x45, 0x1b, + 0xd5, 0xa9, 0x9c, 0x80, 0xaf, 0x04, 0x9a, 0x85, + 0xdb, 0x32, 0x5b, 0x5d, 0x1a, 0xc1, 0x36, 0x28, + 0x10, 0x79, 0xf1, 0x3c, 0xbf, 0x1a, 0x41, 0x5c, + 0x4e, 0xdf, 0xb2, 0x7c, 0x79, 0x3b, 0x7a, 0x62, + 0x3d, 0x4b, 0xc9, 0x9b, 0x2a, 0x2e, 0x7c, 0xa2, + 0xb1, 0x11, 0x98, 0xa7, 0x34, 0x1a, 0x00, 0xf3, + 0xd1, 0xbc, 0x18, 0x22, 0xba, 0x02, 0x56, 0x62, + 0x31, 0x10, 0x11, 0x6d, 0xe0, 0x54, 0x9d, 0x40, + 0x1f, 0x26, 0x80, 0x41, 0xca, 0x3f, 0x68, 0x0f, + 0x32, 0x1d, 0x0a, 0x8e, 0x79, 0xd8, 0xa4, 0x1b, + 0x29, 0x1c, 0x90, 0x8e, 0xc5, 0xe3, 0xb4, 0x91, + 0x37, 0x9a, 0x97, 0x86, 0x99, 0xd5, 0x09, 0xc5, + 0xbb, 0xa3, 0x3f, 0x21, 0x29, 0x82, 0x14, 0x5c, + 0xab, 0x25, 0xfb, 0xf2, 0x4f, 0x58, 0x26, 0xd4, + 0x83, 0xaa, 0x66, 0x89, 0x67, 0x7e, 0xc0, 0x49, + 0xe1, 0x11, 0x10, 0x7f, 0x7a, 0xda, 0x29, 0x04, + 0xff, 0xf0, 0xcb, 0x09, 0x7c, 0x9d, 0xfa, 0x03, + 0x6f, 0x81, 0x09, 0x31, 0x60, 0xfb, 0x08, 0xfa, + 0x74, 0xd3, 0x64, 0x44, 0x7c, 0x55, 0x85, 0xec, + 0x9c, 0x6e, 0x25, 0xb7, 0x6c, 0xc5, 0x37, 0xb6, + 0x83, 0x87, 0x72, 0x95, 0x8b, 0x9d, 0xe1, 0x69, + 0x5c, 0x31, 0x95, 0x42, 0xa6, 0x2c, 0xd1, 0x36, + 0x47, 0x1f, 0xec, 0x54, 0xab, 0xa2, 0x1c, 0xd8, + 0x00, 0xcc, 0xbc, 0x0d, 0x65, 0xe2, 0x67, 0xbf, + 0xbc, 0xea, 0xee, 0x9e, 0xe4, 0x36, 0x95, 0xbe, + 0x73, 0xd9, 0xa6, 0xd9, 0x0f, 0xa0, 0xcc, 0x82, + 0x76, 0x26, 0xad, 0x5b, 0x58, 0x6c, 0x4e, 0xab, + 0x29, 0x64, 0xd3, 0xd9, 0xa9, 0x08, 0x8c, 0x1d, + 0xa1, 0x4f, 0x80, 0xd8, 0x3f, 0x94, 0xfb, 0xd3, + 0x7b, 0xfc, 0xd1, 0x2b, 0xc3, 0x21, 0xeb, 0xe5, + 0x1c, 0x84, 0x23, 0x7f, 0x4b, 0xfa, 0xdb, 0x34, + 0x18, 0xa2, 0xc2, 0xe5, 0x13, 0xfe, 0x6c, 0x49, + 0x81, 0xd2, 0x73, 0xe7, 0xe2, 0xd7, 0xe4, 0x4f, + 0x4b, 0x08, 0x6e, 0xb1, 0x12, 0x22, 0x10, 0x9d, + 0xac, 0x51, 0x1e, 0x17, 0xd9, 0x8a, 0x0b, 0x42, + 0x88, 0x16, 0x81, 0x37, 0x7c, 0x6a, 0xf7, 0xef, + 0x2d, 0xe3, 0xd9, 0xf8, 0x5f, 0xe0, 0x53, 0x27, + 0x74, 0xb9, 0xe2, 0xd6, 0x1c, 0x80, 0x2c, 0x52, + 0x65 +}; +static const u8 dec_assoc009[] __initconst = { + 0x5a, 0x27, 0xff, 0xeb, 0xdf, 0x84, 0xb2, 0x9e, + 0xef +}; +static const u8 dec_nonce009[] __initconst = { + 0xef, 0x2d, 0x63, 0xee, 0x6b, 0x80, 0x8b, 0x78 +}; +static const u8 dec_key009[] __initconst = { + 0xea, 0xbc, 0x56, 0x99, 0xe3, 0x50, 0xff, 0xc5, + 0xcc, 0x1a, 0xd7, 0xc1, 0x57, 0x72, 0xea, 0x86, + 0x5b, 0x89, 0x88, 0x61, 0x3d, 0x2f, 0x9b, 0xb2, + 0xe7, 0x9c, 0xec, 0x74, 0x6e, 0x3e, 0xf4, 0x3b +}; + +static const u8 dec_input010[] __initconst = { + 0xe5, 0x26, 0xa4, 0x3d, 0xbd, 0x33, 0xd0, 0x4b, + 0x6f, 0x05, 0xa7, 0x6e, 0x12, 0x7a, 0xd2, 0x74, + 0xa6, 0xdd, 0xbd, 0x95, 0xeb, 0xf9, 0xa4, 0xf1, + 0x59, 0x93, 0x91, 0x70, 0xd9, 0xfe, 0x9a, 0xcd, + 0x53, 0x1f, 0x3a, 0xab, 0xa6, 0x7c, 0x9f, 0xa6, + 0x9e, 0xbd, 0x99, 0xd9, 0xb5, 0x97, 0x44, 0xd5, + 0x14, 0x48, 0x4d, 0x9d, 0xc0, 0xd0, 0x05, 0x96, + 0xeb, 0x4c, 0x78, 0x55, 0x09, 0x08, 0x01, 0x02, + 0x30, 0x90, 0x7b, 0x96, 0x7a, 0x7b, 0x5f, 0x30, + 0x41, 0x24, 0xce, 0x68, 0x61, 0x49, 0x86, 0x57, + 0x82, 0xdd, 0x53, 0x1c, 0x51, 0x28, 0x2b, 0x53, + 0x6e, 0x2d, 0xc2, 0x20, 0x4c, 0xdd, 0x8f, 0x65, + 0x10, 0x20, 0x50, 0xdd, 0x9d, 0x50, 0xe5, 0x71, + 0x40, 0x53, 0x69, 0xfc, 0x77, 0x48, 0x11, 0xb9, + 0xde, 0xa4, 0x8d, 0x58, 0xe4, 0xa6, 0x1a, 0x18, + 0x47, 0x81, 0x7e, 0xfc, 0xdd, 0xf6, 0xef, 0xce, + 0x2f, 0x43, 0x68, 0xd6, 0x06, 0xe2, 0x74, 0x6a, + 0xad, 0x90, 0xf5, 0x37, 0xf3, 0x3d, 0x82, 0x69, + 0x40, 0xe9, 0x6b, 0xa7, 0x3d, 0xa8, 0x1e, 0xd2, + 0x02, 0x7c, 0xb7, 0x9b, 0xe4, 0xda, 0x8f, 0x95, + 0x06, 0xc5, 0xdf, 0x73, 0xa3, 0x20, 0x9a, 0x49, + 0xde, 0x9c, 0xbc, 0xee, 0x14, 0x3f, 0x81, 0x5e, + 0xf8, 0x3b, 0x59, 0x3c, 0xe1, 0x68, 0x12, 0x5a, + 0x3a, 0x76, 0x3a, 0x3f, 0xf7, 0x87, 0x33, 0x0a, + 0x01, 0xb8, 0xd4, 0xed, 0xb6, 0xbe, 0x94, 0x5e, + 0x70, 0x40, 0x56, 0x67, 0x1f, 0x50, 0x44, 0x19, + 0xce, 0x82, 0x70, 0x10, 0x87, 0x13, 0x20, 0x0b, + 0x4c, 0x5a, 0xb6, 0xf6, 0xa7, 0xae, 0x81, 0x75, + 0x01, 0x81, 0xe6, 0x4b, 0x57, 0x7c, 0xdd, 0x6d, + 0xf8, 0x1c, 0x29, 0x32, 0xf7, 0xda, 0x3c, 0x2d, + 0xf8, 0x9b, 0x25, 0x6e, 0x00, 0xb4, 0xf7, 0x2f, + 0xf7, 0x04, 0xf7, 0xa1, 0x56, 0xac, 0x4f, 0x1a, + 0x64, 0xb8, 0x47, 0x55, 0x18, 0x7b, 0x07, 0x4d, + 0xbd, 0x47, 0x24, 0x80, 0x5d, 0xa2, 0x70, 0xc5, + 0xdd, 0x8e, 0x82, 0xd4, 0xeb, 0xec, 0xb2, 0x0c, + 0x39, 0xd2, 0x97, 0xc1, 0xcb, 0xeb, 0xf4, 0x77, + 0x59, 0xb4, 0x87, 0xef, 0xcb, 0x43, 0x2d, 0x46, + 0x54, 0xd1, 0xa7, 0xd7, 0x15, 0x99, 0x0a, 0x43, + 0xa1, 0xe0, 0x99, 0x33, 0x71, 0xc1, 0xed, 0xfe, + 0x72, 0x46, 0x33, 0x8e, 0x91, 0x08, 0x9f, 0xc8, + 0x2e, 0xca, 0xfa, 0xdc, 0x59, 0xd5, 0xc3, 0x76, + 0x84, 0x9f, 0xa3, 0x37, 0x68, 0xc3, 0xf0, 0x47, + 0x2c, 0x68, 0xdb, 0x5e, 0xc3, 0x49, 0x4c, 0xe8, + 0x92, 0x85, 0xe2, 0x23, 0xd3, 0x3f, 0xad, 0x32, + 0xe5, 0x2b, 0x82, 0xd7, 0x8f, 0x99, 0x0a, 0x59, + 0x5c, 0x45, 0xd9, 0xb4, 0x51, 0x52, 0xc2, 0xae, + 0xbf, 0x80, 0xcf, 0xc9, 0xc9, 0x51, 0x24, 0x2a, + 0x3b, 0x3a, 0x4d, 0xae, 0xeb, 0xbd, 0x22, 0xc3, + 0x0e, 0x0f, 0x59, 0x25, 0x92, 0x17, 0xe9, 0x74, + 0xc7, 0x8b, 0x70, 0x70, 0x36, 0x55, 0x95, 0x75, + 0x4b, 0xad, 0x61, 0x2b, 0x09, 0xbc, 0x82, 0xf2, + 0x6e, 0x94, 0x43, 0xae, 0xc3, 0xd5, 0xcd, 0x8e, + 0xfe, 0x5b, 0x9a, 0x88, 0x43, 0x01, 0x75, 0xb2, + 0x23, 0x09, 0xf7, 0x89, 0x83, 0xe7, 0xfa, 0xf9, + 0xb4, 0x9b, 0xf8, 0xef, 0xbd, 0x1c, 0x92, 0xc1, + 0xda, 0x7e, 0xfe, 0x05, 0xba, 0x5a, 0xcd, 0x07, + 0x6a, 0x78, 0x9e, 0x5d, 0xfb, 0x11, 0x2f, 0x79, + 0x38, 0xb6, 0xc2, 0x5b, 0x6b, 0x51, 0xb4, 0x71, + 0xdd, 0xf7, 0x2a, 0xe4, 0xf4, 0x72, 0x76, 0xad, + 0xc2, 0xdd, 0x64, 0x5d, 0x79, 0xb6, 0xf5, 0x7a, + 0x77, 0x20, 0x05, 0x3d, 0x30, 0x06, 0xd4, 0x4c, + 0x0a, 0x2c, 0x98, 0x5a, 0xb9, 0xd4, 0x98, 0xa9, + 0x3f, 0xc6, 0x12, 0xea, 0x3b, 0x4b, 0xc5, 0x79, + 0x64, 0x63, 0x6b, 0x09, 0x54, 0x3b, 0x14, 0x27, + 0xba, 0x99, 0x80, 0xc8, 0x72, 0xa8, 0x12, 0x90, + 0x29, 0xba, 0x40, 0x54, 0x97, 0x2b, 0x7b, 0xfe, + 0xeb, 0xcd, 0x01, 0x05, 0x44, 0x72, 0xdb, 0x99, + 0xe4, 0x61, 0xc9, 0x69, 0xd6, 0xb9, 0x28, 0xd1, + 0x05, 0x3e, 0xf9, 0x0b, 0x49, 0x0a, 0x49, 0xe9, + 0x8d, 0x0e, 0xa7, 0x4a, 0x0f, 0xaf, 0x32, 0xd0, + 0xe0, 0xb2, 0x3a, 0x55, 0x58, 0xfe, 0x5c, 0x28, + 0x70, 0x51, 0x23, 0xb0, 0x7b, 0x6a, 0x5f, 0x1e, + 0xb8, 0x17, 0xd7, 0x94, 0x15, 0x8f, 0xee, 0x20, + 0xc7, 0x42, 0x25, 0x3e, 0x9a, 0x14, 0xd7, 0x60, + 0x72, 0x39, 0x47, 0x48, 0xa9, 0xfe, 0xdd, 0x47, + 0x0a, 0xb1, 0xe6, 0x60, 0x28, 0x8c, 0x11, 0x68, + 0xe1, 0xff, 0xd7, 0xce, 0xc8, 0xbe, 0xb3, 0xfe, + 0x27, 0x30, 0x09, 0x70, 0xd7, 0xfa, 0x02, 0x33, + 0x3a, 0x61, 0x2e, 0xc7, 0xff, 0xa4, 0x2a, 0xa8, + 0x6e, 0xb4, 0x79, 0x35, 0x6d, 0x4c, 0x1e, 0x38, + 0xf8, 0xee, 0xd4, 0x84, 0x4e, 0x6e, 0x28, 0xa7, + 0xce, 0xc8, 0xc1, 0xcf, 0x80, 0x05, 0xf3, 0x04, + 0xef, 0xc8, 0x18, 0x28, 0x2e, 0x8d, 0x5e, 0x0c, + 0xdf, 0xb8, 0x5f, 0x96, 0xe8, 0xc6, 0x9c, 0x2f, + 0xe5, 0xa6, 0x44, 0xd7, 0xe7, 0x99, 0x44, 0x0c, + 0xec, 0xd7, 0x05, 0x60, 0x97, 0xbb, 0x74, 0x77, + 0x58, 0xd5, 0xbb, 0x48, 0xde, 0x5a, 0xb2, 0x54, + 0x7f, 0x0e, 0x46, 0x70, 0x6a, 0x6f, 0x78, 0xa5, + 0x08, 0x89, 0x05, 0x4e, 0x7e, 0xa0, 0x69, 0xb4, + 0x40, 0x60, 0x55, 0x77, 0x75, 0x9b, 0x19, 0xf2, + 0xd5, 0x13, 0x80, 0x77, 0xf9, 0x4b, 0x3f, 0x1e, + 0xee, 0xe6, 0x76, 0x84, 0x7b, 0x8c, 0xe5, 0x27, + 0xa8, 0x0a, 0x91, 0x01, 0x68, 0x71, 0x8a, 0x3f, + 0x06, 0xab, 0xf6, 0xa9, 0xa5, 0xe6, 0x72, 0x92, + 0xe4, 0x67, 0xe2, 0xa2, 0x46, 0x35, 0x84, 0x55, + 0x7d, 0xca, 0xa8, 0x85, 0xd0, 0xf1, 0x3f, 0xbe, + 0xd7, 0x34, 0x64, 0xfc, 0xae, 0xe3, 0xe4, 0x04, + 0x9f, 0x66, 0x02, 0xb9, 0x88, 0x10, 0xd9, 0xc4, + 0x4c, 0x31, 0x43, 0x7a, 0x93, 0xe2, 0x9b, 0x56, + 0x43, 0x84, 0xdc, 0xdc, 0xde, 0x1d, 0xa4, 0x02, + 0x0e, 0xc2, 0xef, 0xc3, 0xf8, 0x78, 0xd1, 0xb2, + 0x6b, 0x63, 0x18, 0xc9, 0xa9, 0xe5, 0x72, 0xd8, + 0xf3, 0xb9, 0xd1, 0x8a, 0xc7, 0x1a, 0x02, 0x27, + 0x20, 0x77, 0x10, 0xe5, 0xc8, 0xd4, 0x4a, 0x47, + 0xe5, 0xdf, 0x5f, 0x01, 0xaa, 0xb0, 0xd4, 0x10, + 0xbb, 0x69, 0xe3, 0x36, 0xc8, 0xe1, 0x3d, 0x43, + 0xfb, 0x86, 0xcd, 0xcc, 0xbf, 0xf4, 0x88, 0xe0, + 0x20, 0xca, 0xb7, 0x1b, 0xf1, 0x2f, 0x5c, 0xee, + 0xd4, 0xd3, 0xa3, 0xcc, 0xa4, 0x1e, 0x1c, 0x47, + 0xfb, 0xbf, 0xfc, 0xa2, 0x41, 0x55, 0x9d, 0xf6, + 0x5a, 0x5e, 0x65, 0x32, 0x34, 0x7b, 0x52, 0x8d, + 0xd5, 0xd0, 0x20, 0x60, 0x03, 0xab, 0x3f, 0x8c, + 0xd4, 0x21, 0xea, 0x2a, 0xd9, 0xc4, 0xd0, 0xd3, + 0x65, 0xd8, 0x7a, 0x13, 0x28, 0x62, 0x32, 0x4b, + 0x2c, 0x87, 0x93, 0xa8, 0xb4, 0x52, 0x45, 0x09, + 0x44, 0xec, 0xec, 0xc3, 0x17, 0xdb, 0x9a, 0x4d, + 0x5c, 0xa9, 0x11, 0xd4, 0x7d, 0xaf, 0x9e, 0xf1, + 0x2d, 0xb2, 0x66, 0xc5, 0x1d, 0xed, 0xb7, 0xcd, + 0x0b, 0x25, 0x5e, 0x30, 0x47, 0x3f, 0x40, 0xf4, + 0xa1, 0xa0, 0x00, 0x94, 0x10, 0xc5, 0x6a, 0x63, + 0x1a, 0xd5, 0x88, 0x92, 0x8e, 0x82, 0x39, 0x87, + 0x3c, 0x78, 0x65, 0x58, 0x42, 0x75, 0x5b, 0xdd, + 0x77, 0x3e, 0x09, 0x4e, 0x76, 0x5b, 0xe6, 0x0e, + 0x4d, 0x38, 0xb2, 0xc0, 0xb8, 0x95, 0x01, 0x7a, + 0x10, 0xe0, 0xfb, 0x07, 0xf2, 0xab, 0x2d, 0x8c, + 0x32, 0xed, 0x2b, 0xc0, 0x46, 0xc2, 0xf5, 0x38, + 0x83, 0xf0, 0x17, 0xec, 0xc1, 0x20, 0x6a, 0x9a, + 0x0b, 0x00, 0xa0, 0x98, 0x22, 0x50, 0x23, 0xd5, + 0x80, 0x6b, 0xf6, 0x1f, 0xc3, 0xcc, 0x97, 0xc9, + 0x24, 0x9f, 0xf3, 0xaf, 0x43, 0x14, 0xd5, 0xa0 +}; +static const u8 dec_output010[] __initconst = { + 0x42, 0x93, 0xe4, 0xeb, 0x97, 0xb0, 0x57, 0xbf, + 0x1a, 0x8b, 0x1f, 0xe4, 0x5f, 0x36, 0x20, 0x3c, + 0xef, 0x0a, 0xa9, 0x48, 0x5f, 0x5f, 0x37, 0x22, + 0x3a, 0xde, 0xe3, 0xae, 0xbe, 0xad, 0x07, 0xcc, + 0xb1, 0xf6, 0xf5, 0xf9, 0x56, 0xdd, 0xe7, 0x16, + 0x1e, 0x7f, 0xdf, 0x7a, 0x9e, 0x75, 0xb7, 0xc7, + 0xbe, 0xbe, 0x8a, 0x36, 0x04, 0xc0, 0x10, 0xf4, + 0x95, 0x20, 0x03, 0xec, 0xdc, 0x05, 0xa1, 0x7d, + 0xc4, 0xa9, 0x2c, 0x82, 0xd0, 0xbc, 0x8b, 0xc5, + 0xc7, 0x45, 0x50, 0xf6, 0xa2, 0x1a, 0xb5, 0x46, + 0x3b, 0x73, 0x02, 0xa6, 0x83, 0x4b, 0x73, 0x82, + 0x58, 0x5e, 0x3b, 0x65, 0x2f, 0x0e, 0xfd, 0x2b, + 0x59, 0x16, 0xce, 0xa1, 0x60, 0x9c, 0xe8, 0x3a, + 0x99, 0xed, 0x8d, 0x5a, 0xcf, 0xf6, 0x83, 0xaf, + 0xba, 0xd7, 0x73, 0x73, 0x40, 0x97, 0x3d, 0xca, + 0xef, 0x07, 0x57, 0xe6, 0xd9, 0x70, 0x0e, 0x95, + 0xae, 0xa6, 0x8d, 0x04, 0xcc, 0xee, 0xf7, 0x09, + 0x31, 0x77, 0x12, 0xa3, 0x23, 0x97, 0x62, 0xb3, + 0x7b, 0x32, 0xfb, 0x80, 0x14, 0x48, 0x81, 0xc3, + 0xe5, 0xea, 0x91, 0x39, 0x52, 0x81, 0xa2, 0x4f, + 0xe4, 0xb3, 0x09, 0xff, 0xde, 0x5e, 0xe9, 0x58, + 0x84, 0x6e, 0xf9, 0x3d, 0xdf, 0x25, 0xea, 0xad, + 0xae, 0xe6, 0x9a, 0xd1, 0x89, 0x55, 0xd3, 0xde, + 0x6c, 0x52, 0xdb, 0x70, 0xfe, 0x37, 0xce, 0x44, + 0x0a, 0xa8, 0x25, 0x5f, 0x92, 0xc1, 0x33, 0x4a, + 0x4f, 0x9b, 0x62, 0x35, 0xff, 0xce, 0xc0, 0xa9, + 0x60, 0xce, 0x52, 0x00, 0x97, 0x51, 0x35, 0x26, + 0x2e, 0xb9, 0x36, 0xa9, 0x87, 0x6e, 0x1e, 0xcc, + 0x91, 0x78, 0x53, 0x98, 0x86, 0x5b, 0x9c, 0x74, + 0x7d, 0x88, 0x33, 0xe1, 0xdf, 0x37, 0x69, 0x2b, + 0xbb, 0xf1, 0x4d, 0xf4, 0xd1, 0xf1, 0x39, 0x93, + 0x17, 0x51, 0x19, 0xe3, 0x19, 0x1e, 0x76, 0x37, + 0x25, 0xfb, 0x09, 0x27, 0x6a, 0xab, 0x67, 0x6f, + 0x14, 0x12, 0x64, 0xe7, 0xc4, 0x07, 0xdf, 0x4d, + 0x17, 0xbb, 0x6d, 0xe0, 0xe9, 0xb9, 0xab, 0xca, + 0x10, 0x68, 0xaf, 0x7e, 0xb7, 0x33, 0x54, 0x73, + 0x07, 0x6e, 0xf7, 0x81, 0x97, 0x9c, 0x05, 0x6f, + 0x84, 0x5f, 0xd2, 0x42, 0xfb, 0x38, 0xcf, 0xd1, + 0x2f, 0x14, 0x30, 0x88, 0x98, 0x4d, 0x5a, 0xa9, + 0x76, 0xd5, 0x4f, 0x3e, 0x70, 0x6c, 0x85, 0x76, + 0xd7, 0x01, 0xa0, 0x1a, 0xc8, 0x4e, 0xaa, 0xac, + 0x78, 0xfe, 0x46, 0xde, 0x6a, 0x05, 0x46, 0xa7, + 0x43, 0x0c, 0xb9, 0xde, 0xb9, 0x68, 0xfb, 0xce, + 0x42, 0x99, 0x07, 0x4d, 0x0b, 0x3b, 0x5a, 0x30, + 0x35, 0xa8, 0xf9, 0x3a, 0x73, 0xef, 0x0f, 0xdb, + 0x1e, 0x16, 0x42, 0xc4, 0xba, 0xae, 0x58, 0xaa, + 0xf8, 0xe5, 0x75, 0x2f, 0x1b, 0x15, 0x5c, 0xfd, + 0x0a, 0x97, 0xd0, 0xe4, 0x37, 0x83, 0x61, 0x5f, + 0x43, 0xa6, 0xc7, 0x3f, 0x38, 0x59, 0xe6, 0xeb, + 0xa3, 0x90, 0xc3, 0xaa, 0xaa, 0x5a, 0xd3, 0x34, + 0xd4, 0x17, 0xc8, 0x65, 0x3e, 0x57, 0xbc, 0x5e, + 0xdd, 0x9e, 0xb7, 0xf0, 0x2e, 0x5b, 0xb2, 0x1f, + 0x8a, 0x08, 0x0d, 0x45, 0x91, 0x0b, 0x29, 0x53, + 0x4f, 0x4c, 0x5a, 0x73, 0x56, 0xfe, 0xaf, 0x41, + 0x01, 0x39, 0x0a, 0x24, 0x3c, 0x7e, 0xbe, 0x4e, + 0x53, 0xf3, 0xeb, 0x06, 0x66, 0x51, 0x28, 0x1d, + 0xbd, 0x41, 0x0a, 0x01, 0xab, 0x16, 0x47, 0x27, + 0x47, 0x47, 0xf7, 0xcb, 0x46, 0x0a, 0x70, 0x9e, + 0x01, 0x9c, 0x09, 0xe1, 0x2a, 0x00, 0x1a, 0xd8, + 0xd4, 0x79, 0x9d, 0x80, 0x15, 0x8e, 0x53, 0x2a, + 0x65, 0x83, 0x78, 0x3e, 0x03, 0x00, 0x07, 0x12, + 0x1f, 0x33, 0x3e, 0x7b, 0x13, 0x37, 0xf1, 0xc3, + 0xef, 0xb7, 0xc1, 0x20, 0x3c, 0x3e, 0x67, 0x66, + 0x5d, 0x88, 0xa7, 0x7d, 0x33, 0x50, 0x77, 0xb0, + 0x28, 0x8e, 0xe7, 0x2c, 0x2e, 0x7a, 0xf4, 0x3c, + 0x8d, 0x74, 0x83, 0xaf, 0x8e, 0x87, 0x0f, 0xe4, + 0x50, 0xff, 0x84, 0x5c, 0x47, 0x0c, 0x6a, 0x49, + 0xbf, 0x42, 0x86, 0x77, 0x15, 0x48, 0xa5, 0x90, + 0x5d, 0x93, 0xd6, 0x2a, 0x11, 0xd5, 0xd5, 0x11, + 0xaa, 0xce, 0xe7, 0x6f, 0xa5, 0xb0, 0x09, 0x2c, + 0x8d, 0xd3, 0x92, 0xf0, 0x5a, 0x2a, 0xda, 0x5b, + 0x1e, 0xd5, 0x9a, 0xc4, 0xc4, 0xf3, 0x49, 0x74, + 0x41, 0xca, 0xe8, 0xc1, 0xf8, 0x44, 0xd6, 0x3c, + 0xae, 0x6c, 0x1d, 0x9a, 0x30, 0x04, 0x4d, 0x27, + 0x0e, 0xb1, 0x5f, 0x59, 0xa2, 0x24, 0xe8, 0xe1, + 0x98, 0xc5, 0x6a, 0x4c, 0xfe, 0x41, 0xd2, 0x27, + 0x42, 0x52, 0xe1, 0xe9, 0x7d, 0x62, 0xe4, 0x88, + 0x0f, 0xad, 0xb2, 0x70, 0xcb, 0x9d, 0x4c, 0x27, + 0x2e, 0x76, 0x1e, 0x1a, 0x63, 0x65, 0xf5, 0x3b, + 0xf8, 0x57, 0x69, 0xeb, 0x5b, 0x38, 0x26, 0x39, + 0x33, 0x25, 0x45, 0x3e, 0x91, 0xb8, 0xd8, 0xc7, + 0xd5, 0x42, 0xc0, 0x22, 0x31, 0x74, 0xf4, 0xbc, + 0x0c, 0x23, 0xf1, 0xca, 0xc1, 0x8d, 0xd7, 0xbe, + 0xc9, 0x62, 0xe4, 0x08, 0x1a, 0xcf, 0x36, 0xd5, + 0xfe, 0x55, 0x21, 0x59, 0x91, 0x87, 0x87, 0xdf, + 0x06, 0xdb, 0xdf, 0x96, 0x45, 0x58, 0xda, 0x05, + 0xcd, 0x50, 0x4d, 0xd2, 0x7d, 0x05, 0x18, 0x73, + 0x6a, 0x8d, 0x11, 0x85, 0xa6, 0x88, 0xe8, 0xda, + 0xe6, 0x30, 0x33, 0xa4, 0x89, 0x31, 0x75, 0xbe, + 0x69, 0x43, 0x84, 0x43, 0x50, 0x87, 0xdd, 0x71, + 0x36, 0x83, 0xc3, 0x78, 0x74, 0x24, 0x0a, 0xed, + 0x7b, 0xdb, 0xa4, 0x24, 0x0b, 0xb9, 0x7e, 0x5d, + 0xff, 0xde, 0xb1, 0xef, 0x61, 0x5a, 0x45, 0x33, + 0xf6, 0x17, 0x07, 0x08, 0x98, 0x83, 0x92, 0x0f, + 0x23, 0x6d, 0xe6, 0xaa, 0x17, 0x54, 0xad, 0x6a, + 0xc8, 0xdb, 0x26, 0xbe, 0xb8, 0xb6, 0x08, 0xfa, + 0x68, 0xf1, 0xd7, 0x79, 0x6f, 0x18, 0xb4, 0x9e, + 0x2d, 0x3f, 0x1b, 0x64, 0xaf, 0x8d, 0x06, 0x0e, + 0x49, 0x28, 0xe0, 0x5d, 0x45, 0x68, 0x13, 0x87, + 0xfa, 0xde, 0x40, 0x7b, 0xd2, 0xc3, 0x94, 0xd5, + 0xe1, 0xd9, 0xc2, 0xaf, 0x55, 0x89, 0xeb, 0xb4, + 0x12, 0x59, 0xa8, 0xd4, 0xc5, 0x29, 0x66, 0x38, + 0xe6, 0xac, 0x22, 0x22, 0xd9, 0x64, 0x9b, 0x34, + 0x0a, 0x32, 0x9f, 0xc2, 0xbf, 0x17, 0x6c, 0x3f, + 0x71, 0x7a, 0x38, 0x6b, 0x98, 0xfb, 0x49, 0x36, + 0x89, 0xc9, 0xe2, 0xd6, 0xc7, 0x5d, 0xd0, 0x69, + 0x5f, 0x23, 0x35, 0xc9, 0x30, 0xe2, 0xfd, 0x44, + 0x58, 0x39, 0xd7, 0x97, 0xfb, 0x5c, 0x00, 0xd5, + 0x4f, 0x7a, 0x1a, 0x95, 0x8b, 0x62, 0x4b, 0xce, + 0xe5, 0x91, 0x21, 0x7b, 0x30, 0x00, 0xd6, 0xdd, + 0x6d, 0x02, 0x86, 0x49, 0x0f, 0x3c, 0x1a, 0x27, + 0x3c, 0xd3, 0x0e, 0x71, 0xf2, 0xff, 0xf5, 0x2f, + 0x87, 0xac, 0x67, 0x59, 0x81, 0xa3, 0xf7, 0xf8, + 0xd6, 0x11, 0x0c, 0x84, 0xa9, 0x03, 0xee, 0x2a, + 0xc4, 0xf3, 0x22, 0xab, 0x7c, 0xe2, 0x25, 0xf5, + 0x67, 0xa3, 0xe4, 0x11, 0xe0, 0x59, 0xb3, 0xca, + 0x87, 0xa0, 0xae, 0xc9, 0xa6, 0x62, 0x1b, 0x6e, + 0x4d, 0x02, 0x6b, 0x07, 0x9d, 0xfd, 0xd0, 0x92, + 0x06, 0xe1, 0xb2, 0x9a, 0x4a, 0x1f, 0x1f, 0x13, + 0x49, 0x99, 0x97, 0x08, 0xde, 0x7f, 0x98, 0xaf, + 0x51, 0x98, 0xee, 0x2c, 0xcb, 0xf0, 0x0b, 0xc6, + 0xb6, 0xb7, 0x2d, 0x9a, 0xb1, 0xac, 0xa6, 0xe3, + 0x15, 0x77, 0x9d, 0x6b, 0x1a, 0xe4, 0xfc, 0x8b, + 0xf2, 0x17, 0x59, 0x08, 0x04, 0x58, 0x81, 0x9d, + 0x1b, 0x1b, 0x69, 0x55, 0xc2, 0xb4, 0x3c, 0x1f, + 0x50, 0xf1, 0x7f, 0x77, 0x90, 0x4c, 0x66, 0x40, + 0x5a, 0xc0, 0x33, 0x1f, 0xcb, 0x05, 0x6d, 0x5c, + 0x06, 0x87, 0x52, 0xa2, 0x8f, 0x26, 0xd5, 0x4f +}; +static const u8 dec_assoc010[] __initconst = { + 0xd2, 0xa1, 0x70, 0xdb, 0x7a, 0xf8, 0xfa, 0x27, + 0xba, 0x73, 0x0f, 0xbf, 0x3d, 0x1e, 0x82, 0xb2 +}; +static const u8 dec_nonce010[] __initconst = { + 0xdb, 0x92, 0x0f, 0x7f, 0x17, 0x54, 0x0c, 0x30 +}; +static const u8 dec_key010[] __initconst = { + 0x47, 0x11, 0xeb, 0x86, 0x2b, 0x2c, 0xab, 0x44, + 0x34, 0xda, 0x7f, 0x57, 0x03, 0x39, 0x0c, 0xaf, + 0x2c, 0x14, 0xfd, 0x65, 0x23, 0xe9, 0x8e, 0x74, + 0xd5, 0x08, 0x68, 0x08, 0xe7, 0xb4, 0x72, 0xd7 +}; + +static const u8 dec_input011[] __initconst = { + 0x6a, 0xfc, 0x4b, 0x25, 0xdf, 0xc0, 0xe4, 0xe8, + 0x17, 0x4d, 0x4c, 0xc9, 0x7e, 0xde, 0x3a, 0xcc, + 0x3c, 0xba, 0x6a, 0x77, 0x47, 0xdb, 0xe3, 0x74, + 0x7a, 0x4d, 0x5f, 0x8d, 0x37, 0x55, 0x80, 0x73, + 0x90, 0x66, 0x5d, 0x3a, 0x7d, 0x5d, 0x86, 0x5e, + 0x8d, 0xfd, 0x83, 0xff, 0x4e, 0x74, 0x6f, 0xf9, + 0xe6, 0x70, 0x17, 0x70, 0x3e, 0x96, 0xa7, 0x7e, + 0xcb, 0xab, 0x8f, 0x58, 0x24, 0x9b, 0x01, 0xfd, + 0xcb, 0xe6, 0x4d, 0x9b, 0xf0, 0x88, 0x94, 0x57, + 0x66, 0xef, 0x72, 0x4c, 0x42, 0x6e, 0x16, 0x19, + 0x15, 0xea, 0x70, 0x5b, 0xac, 0x13, 0xdb, 0x9f, + 0x18, 0xe2, 0x3c, 0x26, 0x97, 0xbc, 0xdc, 0x45, + 0x8c, 0x6c, 0x24, 0x69, 0x9c, 0xf7, 0x65, 0x1e, + 0x18, 0x59, 0x31, 0x7c, 0xe4, 0x73, 0xbc, 0x39, + 0x62, 0xc6, 0x5c, 0x9f, 0xbf, 0xfa, 0x90, 0x03, + 0xc9, 0x72, 0x26, 0xb6, 0x1b, 0xc2, 0xb7, 0x3f, + 0xf2, 0x13, 0x77, 0xf2, 0x8d, 0xb9, 0x47, 0xd0, + 0x53, 0xdd, 0xc8, 0x91, 0x83, 0x8b, 0xb1, 0xce, + 0xa3, 0xfe, 0xcd, 0xd9, 0xdd, 0x92, 0x7b, 0xdb, + 0xb8, 0xfb, 0xc9, 0x2d, 0x01, 0x59, 0x39, 0x52, + 0xad, 0x1b, 0xec, 0xcf, 0xd7, 0x70, 0x13, 0x21, + 0xf5, 0x47, 0xaa, 0x18, 0x21, 0x5c, 0xc9, 0x9a, + 0xd2, 0x6b, 0x05, 0x9c, 0x01, 0xa1, 0xda, 0x35, + 0x5d, 0xb3, 0x70, 0xe6, 0xa9, 0x80, 0x8b, 0x91, + 0xb7, 0xb3, 0x5f, 0x24, 0x9a, 0xb7, 0xd1, 0x6b, + 0xa1, 0x1c, 0x50, 0xba, 0x49, 0xe0, 0xee, 0x2e, + 0x75, 0xac, 0x69, 0xc0, 0xeb, 0x03, 0xdd, 0x19, + 0xe5, 0xf6, 0x06, 0xdd, 0xc3, 0xd7, 0x2b, 0x07, + 0x07, 0x30, 0xa7, 0x19, 0x0c, 0xbf, 0xe6, 0x18, + 0xcc, 0xb1, 0x01, 0x11, 0x85, 0x77, 0x1d, 0x96, + 0xa7, 0xa3, 0x00, 0x84, 0x02, 0xa2, 0x83, 0x68, + 0xda, 0x17, 0x27, 0xc8, 0x7f, 0x23, 0xb7, 0xf4, + 0x13, 0x85, 0xcf, 0xdd, 0x7a, 0x7d, 0x24, 0x57, + 0xfe, 0x05, 0x93, 0xf5, 0x74, 0xce, 0xed, 0x0c, + 0x20, 0x98, 0x8d, 0x92, 0x30, 0xa1, 0x29, 0x23, + 0x1a, 0xa0, 0x4f, 0x69, 0x56, 0x4c, 0xe1, 0xc8, + 0xce, 0xf6, 0x9a, 0x0c, 0xa4, 0xfa, 0x04, 0xf6, + 0x62, 0x95, 0xf2, 0xfa, 0xc7, 0x40, 0x68, 0x40, + 0x8f, 0x41, 0xda, 0xb4, 0x26, 0x6f, 0x70, 0xab, + 0x40, 0x61, 0xa4, 0x0e, 0x75, 0xfb, 0x86, 0xeb, + 0x9d, 0x9a, 0x1f, 0xec, 0x76, 0x99, 0xe7, 0xea, + 0xaa, 0x1e, 0x2d, 0xb5, 0xd4, 0xa6, 0x1a, 0xb8, + 0x61, 0x0a, 0x1d, 0x16, 0x5b, 0x98, 0xc2, 0x31, + 0x40, 0xe7, 0x23, 0x1d, 0x66, 0x99, 0xc8, 0xc0, + 0xd7, 0xce, 0xf3, 0x57, 0x40, 0x04, 0x3f, 0xfc, + 0xea, 0xb3, 0xfc, 0xd2, 0xd3, 0x99, 0xa4, 0x94, + 0x69, 0xa0, 0xef, 0xd1, 0x85, 0xb3, 0xa6, 0xb1, + 0x28, 0xbf, 0x94, 0x67, 0x22, 0xc3, 0x36, 0x46, + 0xf8, 0xd2, 0x0f, 0x5f, 0xf4, 0x59, 0x80, 0xe6, + 0x2d, 0x43, 0x08, 0x7d, 0x19, 0x09, 0x97, 0xa7, + 0x4c, 0x3d, 0x8d, 0xba, 0x65, 0x62, 0xa3, 0x71, + 0x33, 0x29, 0x62, 0xdb, 0xc1, 0x33, 0x34, 0x1a, + 0x63, 0x33, 0x16, 0xb6, 0x64, 0x7e, 0xab, 0x33, + 0xf0, 0xe6, 0x26, 0x68, 0xba, 0x1d, 0x2e, 0x38, + 0x08, 0xe6, 0x02, 0xd3, 0x25, 0x2c, 0x47, 0x23, + 0x58, 0x34, 0x0f, 0x9d, 0x63, 0x4f, 0x63, 0xbb, + 0x7f, 0x3b, 0x34, 0x38, 0xa7, 0xb5, 0x8d, 0x65, + 0xd9, 0x9f, 0x79, 0x55, 0x3e, 0x4d, 0xe7, 0x73, + 0xd8, 0xf6, 0x98, 0x97, 0x84, 0x60, 0x9c, 0xc8, + 0xa9, 0x3c, 0xf6, 0xdc, 0x12, 0x5c, 0xe1, 0xbb, + 0x0b, 0x8b, 0x98, 0x9c, 0x9d, 0x26, 0x7c, 0x4a, + 0xe6, 0x46, 0x36, 0x58, 0x21, 0x4a, 0xee, 0xca, + 0xd7, 0x3b, 0xc2, 0x6c, 0x49, 0x2f, 0xe5, 0xd5, + 0x03, 0x59, 0x84, 0x53, 0xcb, 0xfe, 0x92, 0x71, + 0x2e, 0x7c, 0x21, 0xcc, 0x99, 0x85, 0x7f, 0xb8, + 0x74, 0x90, 0x13, 0x42, 0x3f, 0xe0, 0x6b, 0x1d, + 0xf2, 0x4d, 0x54, 0xd4, 0xfc, 0x3a, 0x05, 0xe6, + 0x74, 0xaf, 0xa6, 0xa0, 0x2a, 0x20, 0x23, 0x5d, + 0x34, 0x5c, 0xd9, 0x3e, 0x4e, 0xfa, 0x93, 0xe7, + 0xaa, 0xe9, 0x6f, 0x08, 0x43, 0x67, 0x41, 0xc5, + 0xad, 0xfb, 0x31, 0x95, 0x82, 0x73, 0x32, 0xd8, + 0xa6, 0xa3, 0xed, 0x0e, 0x2d, 0xf6, 0x5f, 0xfd, + 0x80, 0xa6, 0x7a, 0xe0, 0xdf, 0x78, 0x15, 0x29, + 0x74, 0x33, 0xd0, 0x9e, 0x83, 0x86, 0x72, 0x22, + 0x57, 0x29, 0xb9, 0x9e, 0x5d, 0xd3, 0x1a, 0xb5, + 0x96, 0x72, 0x41, 0x3d, 0xf1, 0x64, 0x43, 0x67, + 0xee, 0xaa, 0x5c, 0xd3, 0x9a, 0x96, 0x13, 0x11, + 0x5d, 0xf3, 0x0c, 0x87, 0x82, 0x1e, 0x41, 0x9e, + 0xd0, 0x27, 0xd7, 0x54, 0x3b, 0x67, 0x73, 0x09, + 0x91, 0xe9, 0xd5, 0x36, 0xa7, 0xb5, 0x55, 0xe4, + 0xf3, 0x21, 0x51, 0x49, 0x22, 0x07, 0x55, 0x4f, + 0x44, 0x4b, 0xd2, 0x15, 0x93, 0x17, 0x2a, 0xfa, + 0x4d, 0x4a, 0x57, 0xdb, 0x4c, 0xa6, 0xeb, 0xec, + 0x53, 0x25, 0x6c, 0x21, 0xed, 0x00, 0x4c, 0x3b, + 0xca, 0x14, 0x57, 0xa9, 0xd6, 0x6a, 0xcd, 0x8d, + 0x5e, 0x74, 0xac, 0x72, 0xc1, 0x97, 0xe5, 0x1b, + 0x45, 0x4e, 0xda, 0xfc, 0xcc, 0x40, 0xe8, 0x48, + 0x88, 0x0b, 0xa3, 0xe3, 0x8d, 0x83, 0x42, 0xc3, + 0x23, 0xfd, 0x68, 0xb5, 0x8e, 0xf1, 0x9d, 0x63, + 0x77, 0xe9, 0xa3, 0x8e, 0x8c, 0x26, 0x6b, 0xbd, + 0x72, 0x73, 0x35, 0x0c, 0x03, 0xf8, 0x43, 0x78, + 0x52, 0x71, 0x15, 0x1f, 0x71, 0x5d, 0x6e, 0xed, + 0xb9, 0xcc, 0x86, 0x30, 0xdb, 0x2b, 0xd3, 0x82, + 0x88, 0x23, 0x71, 0x90, 0x53, 0x5c, 0xa9, 0x2f, + 0x76, 0x01, 0xb7, 0x9a, 0xfe, 0x43, 0x55, 0xa3, + 0x04, 0x9b, 0x0e, 0xe4, 0x59, 0xdf, 0xc9, 0xe9, + 0xb1, 0xea, 0x29, 0x28, 0x3c, 0x5c, 0xae, 0x72, + 0x84, 0xb6, 0xc6, 0xeb, 0x0c, 0x27, 0x07, 0x74, + 0x90, 0x0d, 0x31, 0xb0, 0x00, 0x77, 0xe9, 0x40, + 0x70, 0x6f, 0x68, 0xa7, 0xfd, 0x06, 0xec, 0x4b, + 0xc0, 0xb7, 0xac, 0xbc, 0x33, 0xb7, 0x6d, 0x0a, + 0xbd, 0x12, 0x1b, 0x59, 0xcb, 0xdd, 0x32, 0xf5, + 0x1d, 0x94, 0x57, 0x76, 0x9e, 0x0c, 0x18, 0x98, + 0x71, 0xd7, 0x2a, 0xdb, 0x0b, 0x7b, 0xa7, 0x71, + 0xb7, 0x67, 0x81, 0x23, 0x96, 0xae, 0xb9, 0x7e, + 0x32, 0x43, 0x92, 0x8a, 0x19, 0xa0, 0xc4, 0xd4, + 0x3b, 0x57, 0xf9, 0x4a, 0x2c, 0xfb, 0x51, 0x46, + 0xbb, 0xcb, 0x5d, 0xb3, 0xef, 0x13, 0x93, 0x6e, + 0x68, 0x42, 0x54, 0x57, 0xd3, 0x6a, 0x3a, 0x8f, + 0x9d, 0x66, 0xbf, 0xbd, 0x36, 0x23, 0xf5, 0x93, + 0x83, 0x7b, 0x9c, 0xc0, 0xdd, 0xc5, 0x49, 0xc0, + 0x64, 0xed, 0x07, 0x12, 0xb3, 0xe6, 0xe4, 0xe5, + 0x38, 0x95, 0x23, 0xb1, 0xa0, 0x3b, 0x1a, 0x61, + 0xda, 0x17, 0xac, 0xc3, 0x58, 0xdd, 0x74, 0x64, + 0x22, 0x11, 0xe8, 0x32, 0x1d, 0x16, 0x93, 0x85, + 0x99, 0xa5, 0x9c, 0x34, 0x55, 0xb1, 0xe9, 0x20, + 0x72, 0xc9, 0x28, 0x7b, 0x79, 0x00, 0xa1, 0xa6, + 0xa3, 0x27, 0x40, 0x18, 0x8a, 0x54, 0xe0, 0xcc, + 0xe8, 0x4e, 0x8e, 0x43, 0x96, 0xe7, 0x3f, 0xc8, + 0xe9, 0xb2, 0xf9, 0xc9, 0xda, 0x04, 0x71, 0x50, + 0x47, 0xe4, 0xaa, 0xce, 0xa2, 0x30, 0xc8, 0xe4, + 0xac, 0xc7, 0x0d, 0x06, 0x2e, 0xe6, 0xe8, 0x80, + 0x36, 0x29, 0x9e, 0x01, 0xb8, 0xc3, 0xf0, 0xa0, + 0x5d, 0x7a, 0xca, 0x4d, 0xa0, 0x57, 0xbd, 0x2a, + 0x45, 0xa7, 0x7f, 0x9c, 0x93, 0x07, 0x8f, 0x35, + 0x67, 0x92, 0xe3, 0xe9, 0x7f, 0xa8, 0x61, 0x43, + 0x9e, 0x25, 0x4f, 0x33, 0x76, 0x13, 0x6e, 0x12, + 0xb9, 0xdd, 0xa4, 0x7c, 0x08, 0x9f, 0x7c, 0xe7, + 0x0a, 0x8d, 0x84, 0x06, 0xa4, 0x33, 0x17, 0x34, + 0x5e, 0x10, 0x7c, 0xc0, 0xa8, 0x3d, 0x1f, 0x42, + 0x20, 0x51, 0x65, 0x5d, 0x09, 0xc3, 0xaa, 0xc0, + 0xc8, 0x0d, 0xf0, 0x79, 0xbc, 0x20, 0x1b, 0x95, + 0xe7, 0x06, 0x7d, 0x47, 0x20, 0x03, 0x1a, 0x74, + 0xdd, 0xe2, 0xd4, 0xae, 0x38, 0x71, 0x9b, 0xf5, + 0x80, 0xec, 0x08, 0x4e, 0x56, 0xba, 0x76, 0x12, + 0x1a, 0xdf, 0x48, 0xf3, 0xae, 0xb3, 0xe6, 0xe6, + 0xbe, 0xc0, 0x91, 0x2e, 0x01, 0xb3, 0x01, 0x86, + 0xa2, 0xb9, 0x52, 0xd1, 0x21, 0xae, 0xd4, 0x97, + 0x1d, 0xef, 0x41, 0x12, 0x95, 0x3d, 0x48, 0x45, + 0x1c, 0x56, 0x32, 0x8f, 0xb8, 0x43, 0xbb, 0x19, + 0xf3, 0xca, 0xe9, 0xeb, 0x6d, 0x84, 0xbe, 0x86, + 0x06, 0xe2, 0x36, 0xb2, 0x62, 0x9d, 0xd3, 0x4c, + 0x48, 0x18, 0x54, 0x13, 0x4e, 0xcf, 0xfd, 0xba, + 0x84, 0xb9, 0x30, 0x53, 0xcf, 0xfb, 0xb9, 0x29, + 0x8f, 0xdc, 0x9f, 0xef, 0x60, 0x0b, 0x64, 0xf6, + 0x8b, 0xee, 0xa6, 0x91, 0xc2, 0x41, 0x6c, 0xf6, + 0xfa, 0x79, 0x67, 0x4b, 0xc1, 0x3f, 0xaf, 0x09, + 0x81, 0xd4, 0x5d, 0xcb, 0x09, 0xdf, 0x36, 0x31, + 0xc0, 0x14, 0x3c, 0x7c, 0x0e, 0x65, 0x95, 0x99, + 0x6d, 0xa3, 0xf4, 0xd7, 0x38, 0xee, 0x1a, 0x2b, + 0x37, 0xe2, 0xa4, 0x3b, 0x4b, 0xd0, 0x65, 0xca, + 0xf8, 0xc3, 0xe8, 0x15, 0x20, 0xef, 0xf2, 0x00, + 0xfd, 0x01, 0x09, 0xc5, 0xc8, 0x17, 0x04, 0x93, + 0xd0, 0x93, 0x03, 0x55, 0xc5, 0xfe, 0x32, 0xa3, + 0x3e, 0x28, 0x2d, 0x3b, 0x93, 0x8a, 0xcc, 0x07, + 0x72, 0x80, 0x8b, 0x74, 0x16, 0x24, 0xbb, 0xda, + 0x94, 0x39, 0x30, 0x8f, 0xb1, 0xcd, 0x4a, 0x90, + 0x92, 0x7c, 0x14, 0x8f, 0x95, 0x4e, 0xac, 0x9b, + 0xd8, 0x8f, 0x1a, 0x87, 0xa4, 0x32, 0x27, 0x8a, + 0xba, 0xf7, 0x41, 0xcf, 0x84, 0x37, 0x19, 0xe6, + 0x06, 0xf5, 0x0e, 0xcf, 0x36, 0xf5, 0x9e, 0x6c, + 0xde, 0xbc, 0xff, 0x64, 0x7e, 0x4e, 0x59, 0x57, + 0x48, 0xfe, 0x14, 0xf7, 0x9c, 0x93, 0x5d, 0x15, + 0xad, 0xcc, 0x11, 0xb1, 0x17, 0x18, 0xb2, 0x7e, + 0xcc, 0xab, 0xe9, 0xce, 0x7d, 0x77, 0x5b, 0x51, + 0x1b, 0x1e, 0x20, 0xa8, 0x32, 0x06, 0x0e, 0x75, + 0x93, 0xac, 0xdb, 0x35, 0x37, 0x1f, 0xe9, 0x19, + 0x1d, 0xb4, 0x71, 0x97, 0xd6, 0x4e, 0x2c, 0x08, + 0xa5, 0x13, 0xf9, 0x0e, 0x7e, 0x78, 0x6e, 0x14, + 0xe0, 0xa9, 0xb9, 0x96, 0x4c, 0x80, 0x82, 0xba, + 0x17, 0xb3, 0x9d, 0x69, 0xb0, 0x84, 0x46, 0xff, + 0xf9, 0x52, 0x79, 0x94, 0x58, 0x3a, 0x62, 0x90, + 0x15, 0x35, 0x71, 0x10, 0x37, 0xed, 0xa1, 0x8e, + 0x53, 0x6e, 0xf4, 0x26, 0x57, 0x93, 0x15, 0x93, + 0xf6, 0x81, 0x2c, 0x5a, 0x10, 0xda, 0x92, 0xad, + 0x2f, 0xdb, 0x28, 0x31, 0x2d, 0x55, 0x04, 0xd2, + 0x06, 0x28, 0x8c, 0x1e, 0xdc, 0xea, 0x54, 0xac, + 0xff, 0xb7, 0x6c, 0x30, 0x15, 0xd4, 0xb4, 0x0d, + 0x00, 0x93, 0x57, 0xdd, 0xd2, 0x07, 0x07, 0x06, + 0xd9, 0x43, 0x9b, 0xcd, 0x3a, 0xf4, 0x7d, 0x4c, + 0x36, 0x5d, 0x23, 0xa2, 0xcc, 0x57, 0x40, 0x91, + 0xe9, 0x2c, 0x2f, 0x2c, 0xd5, 0x30, 0x9b, 0x17, + 0xb0, 0xc9, 0xf7, 0xa7, 0x2f, 0xd1, 0x93, 0x20, + 0x6b, 0xc6, 0xc1, 0xe4, 0x6f, 0xcb, 0xd1, 0xe7, + 0x09, 0x0f, 0x9e, 0xdc, 0xaa, 0x9f, 0x2f, 0xdf, + 0x56, 0x9f, 0xd4, 0x33, 0x04, 0xaf, 0xd3, 0x6c, + 0x58, 0x61, 0xf0, 0x30, 0xec, 0xf2, 0x7f, 0xf2, + 0x9c, 0xdf, 0x39, 0xbb, 0x6f, 0xa2, 0x8c, 0x7e, + 0xc4, 0x22, 0x51, 0x71, 0xc0, 0x4d, 0x14, 0x1a, + 0xc4, 0xcd, 0x04, 0xd9, 0x87, 0x08, 0x50, 0x05, + 0xcc, 0xaf, 0xf6, 0xf0, 0x8f, 0x92, 0x54, 0x58, + 0xc2, 0xc7, 0x09, 0x7a, 0x59, 0x02, 0x05, 0xe8, + 0xb0, 0x86, 0xd9, 0xbf, 0x7b, 0x35, 0x51, 0x4d, + 0xaf, 0x08, 0x97, 0x2c, 0x65, 0xda, 0x2a, 0x71, + 0x3a, 0xa8, 0x51, 0xcc, 0xf2, 0x73, 0x27, 0xc3, + 0xfd, 0x62, 0xcf, 0xe3, 0xb2, 0xca, 0xcb, 0xbe, + 0x1a, 0x0a, 0xa1, 0x34, 0x7b, 0x77, 0xc4, 0x62, + 0x68, 0x78, 0x5f, 0x94, 0x07, 0x04, 0x65, 0x16, + 0x4b, 0x61, 0xcb, 0xff, 0x75, 0x26, 0x50, 0x66, + 0x1f, 0x6e, 0x93, 0xf8, 0xc5, 0x51, 0xeb, 0xa4, + 0x4a, 0x48, 0x68, 0x6b, 0xe2, 0x5e, 0x44, 0xb2, + 0x50, 0x2c, 0x6c, 0xae, 0x79, 0x4e, 0x66, 0x35, + 0x81, 0x50, 0xac, 0xbc, 0x3f, 0xb1, 0x0c, 0xf3, + 0x05, 0x3c, 0x4a, 0xa3, 0x6c, 0x2a, 0x79, 0xb4, + 0xb7, 0xab, 0xca, 0xc7, 0x9b, 0x8e, 0xcd, 0x5f, + 0x11, 0x03, 0xcb, 0x30, 0xa3, 0xab, 0xda, 0xfe, + 0x64, 0xb9, 0xbb, 0xd8, 0x5e, 0x3a, 0x1a, 0x56, + 0xe5, 0x05, 0x48, 0x90, 0x1e, 0x61, 0x69, 0x1b, + 0x22, 0xe6, 0x1a, 0x3c, 0x75, 0xad, 0x1f, 0x37, + 0x28, 0xdc, 0xe4, 0x6d, 0xbd, 0x42, 0xdc, 0xd3, + 0xc8, 0xb6, 0x1c, 0x48, 0xfe, 0x94, 0x77, 0x7f, + 0xbd, 0x62, 0xac, 0xa3, 0x47, 0x27, 0xcf, 0x5f, + 0xd9, 0xdb, 0xaf, 0xec, 0xf7, 0x5e, 0xc1, 0xb0, + 0x9d, 0x01, 0x26, 0x99, 0x7e, 0x8f, 0x03, 0x70, + 0xb5, 0x42, 0xbe, 0x67, 0x28, 0x1b, 0x7c, 0xbd, + 0x61, 0x21, 0x97, 0xcc, 0x5c, 0xe1, 0x97, 0x8f, + 0x8d, 0xde, 0x2b, 0xaa, 0xa7, 0x71, 0x1d, 0x1e, + 0x02, 0x73, 0x70, 0x58, 0x32, 0x5b, 0x1d, 0x67, + 0x3d, 0xe0, 0x74, 0x4f, 0x03, 0xf2, 0x70, 0x51, + 0x79, 0xf1, 0x61, 0x70, 0x15, 0x74, 0x9d, 0x23, + 0x89, 0xde, 0xac, 0xfd, 0xde, 0xd0, 0x1f, 0xc3, + 0x87, 0x44, 0x35, 0x4b, 0xe5, 0xb0, 0x60, 0xc5, + 0x22, 0xe4, 0x9e, 0xca, 0xeb, 0xd5, 0x3a, 0x09, + 0x45, 0xa4, 0xdb, 0xfa, 0x3f, 0xeb, 0x1b, 0xc7, + 0xc8, 0x14, 0x99, 0x51, 0x92, 0x10, 0xed, 0xed, + 0x28, 0xe0, 0xa1, 0xf8, 0x26, 0xcf, 0xcd, 0xcb, + 0x63, 0xa1, 0x3b, 0xe3, 0xdf, 0x7e, 0xfe, 0xa6, + 0xf0, 0x81, 0x9a, 0xbf, 0x55, 0xde, 0x54, 0xd5, + 0x56, 0x60, 0x98, 0x10, 0x68, 0xf4, 0x38, 0x96, + 0x8e, 0x6f, 0x1d, 0x44, 0x7f, 0xd6, 0x2f, 0xfe, + 0x55, 0xfb, 0x0c, 0x7e, 0x67, 0xe2, 0x61, 0x44, + 0xed, 0xf2, 0x35, 0x30, 0x5d, 0xe9, 0xc7, 0xd6, + 0x6d, 0xe0, 0xa0, 0xed, 0xf3, 0xfc, 0xd8, 0x3e, + 0x0a, 0x7b, 0xcd, 0xaf, 0x65, 0x68, 0x18, 0xc0, + 0xec, 0x04, 0x1c, 0x74, 0x6d, 0xe2, 0x6e, 0x79, + 0xd4, 0x11, 0x2b, 0x62, 0xd5, 0x27, 0xad, 0x4f, + 0x01, 0x59, 0x73, 0xcc, 0x6a, 0x53, 0xfb, 0x2d, + 0xd5, 0x4e, 0x99, 0x21, 0x65, 0x4d, 0xf5, 0x82, + 0xf7, 0xd8, 0x42, 0xce, 0x6f, 0x3d, 0x36, 0x47, + 0xf1, 0x05, 0x16, 0xe8, 0x1b, 0x6a, 0x8f, 0x93, + 0xf2, 0x8f, 0x37, 0x40, 0x12, 0x28, 0xa3, 0xe6, + 0xb9, 0x17, 0x4a, 0x1f, 0xb1, 0xd1, 0x66, 0x69, + 0x86, 0xc4, 0xfc, 0x97, 0xae, 0x3f, 0x8f, 0x1e, + 0x2b, 0xdf, 0xcd, 0xf9, 0x3c +}; +static const u8 dec_output011[] __initconst = { + 0x7a, 0x57, 0xf2, 0xc7, 0x06, 0x3f, 0x50, 0x7b, + 0x36, 0x1a, 0x66, 0x5c, 0xb9, 0x0e, 0x5e, 0x3b, + 0x45, 0x60, 0xbe, 0x9a, 0x31, 0x9f, 0xff, 0x5d, + 0x66, 0x34, 0xb4, 0xdc, 0xfb, 0x9d, 0x8e, 0xee, + 0x6a, 0x33, 0xa4, 0x07, 0x3c, 0xf9, 0x4c, 0x30, + 0xa1, 0x24, 0x52, 0xf9, 0x50, 0x46, 0x88, 0x20, + 0x02, 0x32, 0x3a, 0x0e, 0x99, 0x63, 0xaf, 0x1f, + 0x15, 0x28, 0x2a, 0x05, 0xff, 0x57, 0x59, 0x5e, + 0x18, 0xa1, 0x1f, 0xd0, 0x92, 0x5c, 0x88, 0x66, + 0x1b, 0x00, 0x64, 0xa5, 0x93, 0x8d, 0x06, 0x46, + 0xb0, 0x64, 0x8b, 0x8b, 0xef, 0x99, 0x05, 0x35, + 0x85, 0xb3, 0xf3, 0x33, 0xbb, 0xec, 0x66, 0xb6, + 0x3d, 0x57, 0x42, 0xe3, 0xb4, 0xc6, 0xaa, 0xb0, + 0x41, 0x2a, 0xb9, 0x59, 0xa9, 0xf6, 0x3e, 0x15, + 0x26, 0x12, 0x03, 0x21, 0x4c, 0x74, 0x43, 0x13, + 0x2a, 0x03, 0x27, 0x09, 0xb4, 0xfb, 0xe7, 0xb7, + 0x40, 0xff, 0x5e, 0xce, 0x48, 0x9a, 0x60, 0xe3, + 0x8b, 0x80, 0x8c, 0x38, 0x2d, 0xcb, 0x93, 0x37, + 0x74, 0x05, 0x52, 0x6f, 0x73, 0x3e, 0xc3, 0xbc, + 0xca, 0x72, 0x0a, 0xeb, 0xf1, 0x3b, 0xa0, 0x95, + 0xdc, 0x8a, 0xc4, 0xa9, 0xdc, 0xca, 0x44, 0xd8, + 0x08, 0x63, 0x6a, 0x36, 0xd3, 0x3c, 0xb8, 0xac, + 0x46, 0x7d, 0xfd, 0xaa, 0xeb, 0x3e, 0x0f, 0x45, + 0x8f, 0x49, 0xda, 0x2b, 0xf2, 0x12, 0xbd, 0xaf, + 0x67, 0x8a, 0x63, 0x48, 0x4b, 0x55, 0x5f, 0x6d, + 0x8c, 0xb9, 0x76, 0x34, 0x84, 0xae, 0xc2, 0xfc, + 0x52, 0x64, 0x82, 0xf7, 0xb0, 0x06, 0xf0, 0x45, + 0x73, 0x12, 0x50, 0x30, 0x72, 0xea, 0x78, 0x9a, + 0xa8, 0xaf, 0xb5, 0xe3, 0xbb, 0x77, 0x52, 0xec, + 0x59, 0x84, 0xbf, 0x6b, 0x8f, 0xce, 0x86, 0x5e, + 0x1f, 0x23, 0xe9, 0xfb, 0x08, 0x86, 0xf7, 0x10, + 0xb9, 0xf2, 0x44, 0x96, 0x44, 0x63, 0xa9, 0xa8, + 0x78, 0x00, 0x23, 0xd6, 0xc7, 0xe7, 0x6e, 0x66, + 0x4f, 0xcc, 0xee, 0x15, 0xb3, 0xbd, 0x1d, 0xa0, + 0xe5, 0x9c, 0x1b, 0x24, 0x2c, 0x4d, 0x3c, 0x62, + 0x35, 0x9c, 0x88, 0x59, 0x09, 0xdd, 0x82, 0x1b, + 0xcf, 0x0a, 0x83, 0x6b, 0x3f, 0xae, 0x03, 0xc4, + 0xb4, 0xdd, 0x7e, 0x5b, 0x28, 0x76, 0x25, 0x96, + 0xd9, 0xc9, 0x9d, 0x5f, 0x86, 0xfa, 0xf6, 0xd7, + 0xd2, 0xe6, 0x76, 0x1d, 0x0f, 0xa1, 0xdc, 0x74, + 0x05, 0x1b, 0x1d, 0xe0, 0xcd, 0x16, 0xb0, 0xa8, + 0x8a, 0x34, 0x7b, 0x15, 0x11, 0x77, 0xe5, 0x7b, + 0x7e, 0x20, 0xf7, 0xda, 0x38, 0xda, 0xce, 0x70, + 0xe9, 0xf5, 0x6c, 0xd9, 0xbe, 0x0c, 0x4c, 0x95, + 0x4c, 0xc2, 0x9b, 0x34, 0x55, 0x55, 0xe1, 0xf3, + 0x46, 0x8e, 0x48, 0x74, 0x14, 0x4f, 0x9d, 0xc9, + 0xf5, 0xe8, 0x1a, 0xf0, 0x11, 0x4a, 0xc1, 0x8d, + 0xe0, 0x93, 0xa0, 0xbe, 0x09, 0x1c, 0x2b, 0x4e, + 0x0f, 0xb2, 0x87, 0x8b, 0x84, 0xfe, 0x92, 0x32, + 0x14, 0xd7, 0x93, 0xdf, 0xe7, 0x44, 0xbc, 0xc5, + 0xae, 0x53, 0x69, 0xd8, 0xb3, 0x79, 0x37, 0x80, + 0xe3, 0x17, 0x5c, 0xec, 0x53, 0x00, 0x9a, 0xe3, + 0x8e, 0xdc, 0x38, 0xb8, 0x66, 0xf0, 0xd3, 0xad, + 0x1d, 0x02, 0x96, 0x86, 0x3e, 0x9d, 0x3b, 0x5d, + 0xa5, 0x7f, 0x21, 0x10, 0xf1, 0x1f, 0x13, 0x20, + 0xf9, 0x57, 0x87, 0x20, 0xf5, 0x5f, 0xf1, 0x17, + 0x48, 0x0a, 0x51, 0x5a, 0xcd, 0x19, 0x03, 0xa6, + 0x5a, 0xd1, 0x12, 0x97, 0xe9, 0x48, 0xe2, 0x1d, + 0x83, 0x75, 0x50, 0xd9, 0x75, 0x7d, 0x6a, 0x82, + 0xa1, 0xf9, 0x4e, 0x54, 0x87, 0x89, 0xc9, 0x0c, + 0xb7, 0x5b, 0x6a, 0x91, 0xc1, 0x9c, 0xb2, 0xa9, + 0xdc, 0x9a, 0xa4, 0x49, 0x0a, 0x6d, 0x0d, 0xbb, + 0xde, 0x86, 0x44, 0xdd, 0x5d, 0x89, 0x2b, 0x96, + 0x0f, 0x23, 0x95, 0xad, 0xcc, 0xa2, 0xb3, 0xb9, + 0x7e, 0x74, 0x38, 0xba, 0x9f, 0x73, 0xae, 0x5f, + 0xf8, 0x68, 0xa2, 0xe0, 0xa9, 0xce, 0xbd, 0x40, + 0xd4, 0x4c, 0x6b, 0xd2, 0x56, 0x62, 0xb0, 0xcc, + 0x63, 0x7e, 0x5b, 0xd3, 0xae, 0xd1, 0x75, 0xce, + 0xbb, 0xb4, 0x5b, 0xa8, 0xf8, 0xb4, 0xac, 0x71, + 0x75, 0xaa, 0xc9, 0x9f, 0xbb, 0x6c, 0xad, 0x0f, + 0x55, 0x5d, 0xe8, 0x85, 0x7d, 0xf9, 0x21, 0x35, + 0xea, 0x92, 0x85, 0x2b, 0x00, 0xec, 0x84, 0x90, + 0x0a, 0x63, 0x96, 0xe4, 0x6b, 0xa9, 0x77, 0xb8, + 0x91, 0xf8, 0x46, 0x15, 0x72, 0x63, 0x70, 0x01, + 0x40, 0xa3, 0xa5, 0x76, 0x62, 0x2b, 0xbf, 0xf1, + 0xe5, 0x8d, 0x9f, 0xa3, 0xfa, 0x9b, 0x03, 0xbe, + 0xfe, 0x65, 0x6f, 0xa2, 0x29, 0x0d, 0x54, 0xb4, + 0x71, 0xce, 0xa9, 0xd6, 0x3d, 0x88, 0xf9, 0xaf, + 0x6b, 0xa8, 0x9e, 0xf4, 0x16, 0x96, 0x36, 0xb9, + 0x00, 0xdc, 0x10, 0xab, 0xb5, 0x08, 0x31, 0x1f, + 0x00, 0xb1, 0x3c, 0xd9, 0x38, 0x3e, 0xc6, 0x04, + 0xa7, 0x4e, 0xe8, 0xae, 0xed, 0x98, 0xc2, 0xf7, + 0xb9, 0x00, 0x5f, 0x8c, 0x60, 0xd1, 0xe5, 0x15, + 0xf7, 0xae, 0x1e, 0x84, 0x88, 0xd1, 0xf6, 0xbc, + 0x3a, 0x89, 0x35, 0x22, 0x83, 0x7c, 0xca, 0xf0, + 0x33, 0x82, 0x4c, 0x79, 0x3c, 0xfd, 0xb1, 0xae, + 0x52, 0x62, 0x55, 0xd2, 0x41, 0x60, 0xc6, 0xbb, + 0xfa, 0x0e, 0x59, 0xd6, 0xa8, 0xfe, 0x5d, 0xed, + 0x47, 0x3d, 0xe0, 0xea, 0x1f, 0x6e, 0x43, 0x51, + 0xec, 0x10, 0x52, 0x56, 0x77, 0x42, 0x6b, 0x52, + 0x87, 0xd8, 0xec, 0xe0, 0xaa, 0x76, 0xa5, 0x84, + 0x2a, 0x22, 0x24, 0xfd, 0x92, 0x40, 0x88, 0xd5, + 0x85, 0x1c, 0x1f, 0x6b, 0x47, 0xa0, 0xc4, 0xe4, + 0xef, 0xf4, 0xea, 0xd7, 0x59, 0xac, 0x2a, 0x9e, + 0x8c, 0xfa, 0x1f, 0x42, 0x08, 0xfe, 0x4f, 0x74, + 0xa0, 0x26, 0xf5, 0xb3, 0x84, 0xf6, 0x58, 0x5f, + 0x26, 0x66, 0x3e, 0xd7, 0xe4, 0x22, 0x91, 0x13, + 0xc8, 0xac, 0x25, 0x96, 0x23, 0xd8, 0x09, 0xea, + 0x45, 0x75, 0x23, 0xb8, 0x5f, 0xc2, 0x90, 0x8b, + 0x09, 0xc4, 0xfc, 0x47, 0x6c, 0x6d, 0x0a, 0xef, + 0x69, 0xa4, 0x38, 0x19, 0xcf, 0x7d, 0xf9, 0x09, + 0x73, 0x9b, 0x60, 0x5a, 0xf7, 0x37, 0xb5, 0xfe, + 0x9f, 0xe3, 0x2b, 0x4c, 0x0d, 0x6e, 0x19, 0xf1, + 0xd6, 0xc0, 0x70, 0xf3, 0x9d, 0x22, 0x3c, 0xf9, + 0x49, 0xce, 0x30, 0x8e, 0x44, 0xb5, 0x76, 0x15, + 0x8f, 0x52, 0xfd, 0xa5, 0x04, 0xb8, 0x55, 0x6a, + 0x36, 0x59, 0x7c, 0xc4, 0x48, 0xb8, 0xd7, 0xab, + 0x05, 0x66, 0xe9, 0x5e, 0x21, 0x6f, 0x6b, 0x36, + 0x29, 0xbb, 0xe9, 0xe3, 0xa2, 0x9a, 0xa8, 0xcd, + 0x55, 0x25, 0x11, 0xba, 0x5a, 0x58, 0xa0, 0xde, + 0xae, 0x19, 0x2a, 0x48, 0x5a, 0xff, 0x36, 0xcd, + 0x6d, 0x16, 0x7a, 0x73, 0x38, 0x46, 0xe5, 0x47, + 0x59, 0xc8, 0xa2, 0xf6, 0xe2, 0x6c, 0x83, 0xc5, + 0x36, 0x2c, 0x83, 0x7d, 0xb4, 0x01, 0x05, 0x69, + 0xe7, 0xaf, 0x5c, 0xc4, 0x64, 0x82, 0x12, 0x21, + 0xef, 0xf7, 0xd1, 0x7d, 0xb8, 0x8d, 0x8c, 0x98, + 0x7c, 0x5f, 0x7d, 0x92, 0x88, 0xb9, 0x94, 0x07, + 0x9c, 0xd8, 0xe9, 0x9c, 0x17, 0x38, 0xe3, 0x57, + 0x6c, 0xe0, 0xdc, 0xa5, 0x92, 0x42, 0xb3, 0xbd, + 0x50, 0xa2, 0x7e, 0xb5, 0xb1, 0x52, 0x72, 0x03, + 0x97, 0xd8, 0xaa, 0x9a, 0x1e, 0x75, 0x41, 0x11, + 0xa3, 0x4f, 0xcc, 0xd4, 0xe3, 0x73, 0xad, 0x96, + 0xdc, 0x47, 0x41, 0x9f, 0xb0, 0xbe, 0x79, 0x91, + 0xf5, 0xb6, 0x18, 0xfe, 0xc2, 0x83, 0x18, 0x7d, + 0x73, 0xd9, 0x4f, 0x83, 0x84, 0x03, 0xb3, 0xf0, + 0x77, 0x66, 0x3d, 0x83, 0x63, 0x2e, 0x2c, 0xf9, + 0xdd, 0xa6, 0x1f, 0x89, 0x82, 0xb8, 0x23, 0x42, + 0xeb, 0xe2, 0xca, 0x70, 0x82, 0x61, 0x41, 0x0a, + 0x6d, 0x5f, 0x75, 0xc5, 0xe2, 0xc4, 0x91, 0x18, + 0x44, 0x22, 0xfa, 0x34, 0x10, 0xf5, 0x20, 0xdc, + 0xb7, 0xdd, 0x2a, 0x20, 0x77, 0xf5, 0xf9, 0xce, + 0xdb, 0xa0, 0x0a, 0x52, 0x2a, 0x4e, 0xdd, 0xcc, + 0x97, 0xdf, 0x05, 0xe4, 0x5e, 0xb7, 0xaa, 0xf0, + 0xe2, 0x80, 0xff, 0xba, 0x1a, 0x0f, 0xac, 0xdf, + 0x02, 0x32, 0xe6, 0xf7, 0xc7, 0x17, 0x13, 0xb7, + 0xfc, 0x98, 0x48, 0x8c, 0x0d, 0x82, 0xc9, 0x80, + 0x7a, 0xe2, 0x0a, 0xc5, 0xb4, 0xde, 0x7c, 0x3c, + 0x79, 0x81, 0x0e, 0x28, 0x65, 0x79, 0x67, 0x82, + 0x69, 0x44, 0x66, 0x09, 0xf7, 0x16, 0x1a, 0xf9, + 0x7d, 0x80, 0xa1, 0x79, 0x14, 0xa9, 0xc8, 0x20, + 0xfb, 0xa2, 0x46, 0xbe, 0x08, 0x35, 0x17, 0x58, + 0xc1, 0x1a, 0xda, 0x2a, 0x6b, 0x2e, 0x1e, 0xe6, + 0x27, 0x55, 0x7b, 0x19, 0xe2, 0xfb, 0x64, 0xfc, + 0x5e, 0x15, 0x54, 0x3c, 0xe7, 0xc2, 0x11, 0x50, + 0x30, 0xb8, 0x72, 0x03, 0x0b, 0x1a, 0x9f, 0x86, + 0x27, 0x11, 0x5c, 0x06, 0x2b, 0xbd, 0x75, 0x1a, + 0x0a, 0xda, 0x01, 0xfa, 0x5c, 0x4a, 0xc1, 0x80, + 0x3a, 0x6e, 0x30, 0xc8, 0x2c, 0xeb, 0x56, 0xec, + 0x89, 0xfa, 0x35, 0x7b, 0xb2, 0xf0, 0x97, 0x08, + 0x86, 0x53, 0xbe, 0xbd, 0x40, 0x41, 0x38, 0x1c, + 0xb4, 0x8b, 0x79, 0x2e, 0x18, 0x96, 0x94, 0xde, + 0xe8, 0xca, 0xe5, 0x9f, 0x92, 0x9f, 0x15, 0x5d, + 0x56, 0x60, 0x5c, 0x09, 0xf9, 0x16, 0xf4, 0x17, + 0x0f, 0xf6, 0x4c, 0xda, 0xe6, 0x67, 0x89, 0x9f, + 0xca, 0x6c, 0xe7, 0x9b, 0x04, 0x62, 0x0e, 0x26, + 0xa6, 0x52, 0xbd, 0x29, 0xff, 0xc7, 0xa4, 0x96, + 0xe6, 0x6a, 0x02, 0xa5, 0x2e, 0x7b, 0xfe, 0x97, + 0x68, 0x3e, 0x2e, 0x5f, 0x3b, 0x0f, 0x36, 0xd6, + 0x98, 0x19, 0x59, 0x48, 0xd2, 0xc6, 0xe1, 0x55, + 0x1a, 0x6e, 0xd6, 0xed, 0x2c, 0xba, 0xc3, 0x9e, + 0x64, 0xc9, 0x95, 0x86, 0x35, 0x5e, 0x3e, 0x88, + 0x69, 0x99, 0x4b, 0xee, 0xbe, 0x9a, 0x99, 0xb5, + 0x6e, 0x58, 0xae, 0xdd, 0x22, 0xdb, 0xdd, 0x6b, + 0xfc, 0xaf, 0x90, 0xa3, 0x3d, 0xa4, 0xc1, 0x15, + 0x92, 0x18, 0x8d, 0xd2, 0x4b, 0x7b, 0x06, 0xd1, + 0x37, 0xb5, 0xe2, 0x7c, 0x2c, 0xf0, 0x25, 0xe4, + 0x94, 0x2a, 0xbd, 0xe3, 0x82, 0x70, 0x78, 0xa3, + 0x82, 0x10, 0x5a, 0x90, 0xd7, 0xa4, 0xfa, 0xaf, + 0x1a, 0x88, 0x59, 0xdc, 0x74, 0x12, 0xb4, 0x8e, + 0xd7, 0x19, 0x46, 0xf4, 0x84, 0x69, 0x9f, 0xbb, + 0x70, 0xa8, 0x4c, 0x52, 0x81, 0xa9, 0xff, 0x76, + 0x1c, 0xae, 0xd8, 0x11, 0x3d, 0x7f, 0x7d, 0xc5, + 0x12, 0x59, 0x28, 0x18, 0xc2, 0xa2, 0xb7, 0x1c, + 0x88, 0xf8, 0xd6, 0x1b, 0xa6, 0x7d, 0x9e, 0xde, + 0x29, 0xf8, 0xed, 0xff, 0xeb, 0x92, 0x24, 0x4f, + 0x05, 0xaa, 0xd9, 0x49, 0xba, 0x87, 0x59, 0x51, + 0xc9, 0x20, 0x5c, 0x9b, 0x74, 0xcf, 0x03, 0xd9, + 0x2d, 0x34, 0xc7, 0x5b, 0xa5, 0x40, 0xb2, 0x99, + 0xf5, 0xcb, 0xb4, 0xf6, 0xb7, 0x72, 0x4a, 0xd6, + 0xbd, 0xb0, 0xf3, 0x93, 0xe0, 0x1b, 0xa8, 0x04, + 0x1e, 0x35, 0xd4, 0x80, 0x20, 0xf4, 0x9c, 0x31, + 0x6b, 0x45, 0xb9, 0x15, 0xb0, 0x5e, 0xdd, 0x0a, + 0x33, 0x9c, 0x83, 0xcd, 0x58, 0x89, 0x50, 0x56, + 0xbb, 0x81, 0x00, 0x91, 0x32, 0xf3, 0x1b, 0x3e, + 0xcf, 0x45, 0xe1, 0xf9, 0xe1, 0x2c, 0x26, 0x78, + 0x93, 0x9a, 0x60, 0x46, 0xc9, 0xb5, 0x5e, 0x6a, + 0x28, 0x92, 0x87, 0x3f, 0x63, 0x7b, 0xdb, 0xf7, + 0xd0, 0x13, 0x9d, 0x32, 0x40, 0x5e, 0xcf, 0xfb, + 0x79, 0x68, 0x47, 0x4c, 0xfd, 0x01, 0x17, 0xe6, + 0x97, 0x93, 0x78, 0xbb, 0xa6, 0x27, 0xa3, 0xe8, + 0x1a, 0xe8, 0x94, 0x55, 0x7d, 0x08, 0xe5, 0xdc, + 0x66, 0xa3, 0x69, 0xc8, 0xca, 0xc5, 0xa1, 0x84, + 0x55, 0xde, 0x08, 0x91, 0x16, 0x3a, 0x0c, 0x86, + 0xab, 0x27, 0x2b, 0x64, 0x34, 0x02, 0x6c, 0x76, + 0x8b, 0xc6, 0xaf, 0xcc, 0xe1, 0xd6, 0x8c, 0x2a, + 0x18, 0x3d, 0xa6, 0x1b, 0x37, 0x75, 0x45, 0x73, + 0xc2, 0x75, 0xd7, 0x53, 0x78, 0x3a, 0xd6, 0xe8, + 0x29, 0xd2, 0x4a, 0xa8, 0x1e, 0x82, 0xf6, 0xb6, + 0x81, 0xde, 0x21, 0xed, 0x2b, 0x56, 0xbb, 0xf2, + 0xd0, 0x57, 0xc1, 0x7c, 0xd2, 0x6a, 0xd2, 0x56, + 0xf5, 0x13, 0x5f, 0x1c, 0x6a, 0x0b, 0x74, 0xfb, + 0xe9, 0xfe, 0x9e, 0xea, 0x95, 0xb2, 0x46, 0xab, + 0x0a, 0xfc, 0xfd, 0xf3, 0xbb, 0x04, 0x2b, 0x76, + 0x1b, 0xa4, 0x74, 0xb0, 0xc1, 0x78, 0xc3, 0x69, + 0xe2, 0xb0, 0x01, 0xe1, 0xde, 0x32, 0x4c, 0x8d, + 0x1a, 0xb3, 0x38, 0x08, 0xd5, 0xfc, 0x1f, 0xdc, + 0x0e, 0x2c, 0x9c, 0xb1, 0xa1, 0x63, 0x17, 0x22, + 0xf5, 0x6c, 0x93, 0x70, 0x74, 0x00, 0xf8, 0x39, + 0x01, 0x94, 0xd1, 0x32, 0x23, 0x56, 0x5d, 0xa6, + 0x02, 0x76, 0x76, 0x93, 0xce, 0x2f, 0x19, 0xe9, + 0x17, 0x52, 0xae, 0x6e, 0x2c, 0x6d, 0x61, 0x7f, + 0x3b, 0xaa, 0xe0, 0x52, 0x85, 0xc5, 0x65, 0xc1, + 0xbb, 0x8e, 0x5b, 0x21, 0xd5, 0xc9, 0x78, 0x83, + 0x07, 0x97, 0x4c, 0x62, 0x61, 0x41, 0xd4, 0xfc, + 0xc9, 0x39, 0xe3, 0x9b, 0xd0, 0xcc, 0x75, 0xc4, + 0x97, 0xe6, 0xdd, 0x2a, 0x5f, 0xa6, 0xe8, 0x59, + 0x6c, 0x98, 0xb9, 0x02, 0xe2, 0xa2, 0xd6, 0x68, + 0xee, 0x3b, 0x1d, 0xe3, 0x4d, 0x5b, 0x30, 0xef, + 0x03, 0xf2, 0xeb, 0x18, 0x57, 0x36, 0xe8, 0xa1, + 0xf4, 0x47, 0xfb, 0xcb, 0x8f, 0xcb, 0xc8, 0xf3, + 0x4f, 0x74, 0x9d, 0x9d, 0xb1, 0x8d, 0x14, 0x44, + 0xd9, 0x19, 0xb4, 0x54, 0x4f, 0x75, 0x19, 0x09, + 0xa0, 0x75, 0xbc, 0x3b, 0x82, 0xc6, 0x3f, 0xb8, + 0x83, 0x19, 0x6e, 0xd6, 0x37, 0xfe, 0x6e, 0x8a, + 0x4e, 0xe0, 0x4a, 0xab, 0x7b, 0xc8, 0xb4, 0x1d, + 0xf4, 0xed, 0x27, 0x03, 0x65, 0xa2, 0xa1, 0xae, + 0x11, 0xe7, 0x98, 0x78, 0x48, 0x91, 0xd2, 0xd2, + 0xd4, 0x23, 0x78, 0x50, 0xb1, 0x5b, 0x85, 0x10, + 0x8d, 0xca, 0x5f, 0x0f, 0x71, 0xae, 0x72, 0x9a, + 0xf6, 0x25, 0x19, 0x60, 0x06, 0xf7, 0x10, 0x34, + 0x18, 0x0d, 0xc9, 0x9f, 0x7b, 0x0c, 0x9b, 0x8f, + 0x91, 0x1b, 0x9f, 0xcd, 0x10, 0xee, 0x75, 0xf9, + 0x97, 0x66, 0xfc, 0x4d, 0x33, 0x6e, 0x28, 0x2b, + 0x92, 0x85, 0x4f, 0xab, 0x43, 0x8d, 0x8f, 0x7d, + 0x86, 0xa7, 0xc7, 0xd8, 0xd3, 0x0b, 0x8b, 0x57, + 0xb6, 0x1d, 0x95, 0x0d, 0xe9, 0xbc, 0xd9, 0x03, + 0xd9, 0x10, 0x19, 0xc3, 0x46, 0x63, 0x55, 0x87, + 0x61, 0x79, 0x6c, 0x95, 0x0e, 0x9c, 0xdd, 0xca, + 0xc3, 0xf3, 0x64, 0xf0, 0x7d, 0x76, 0xb7, 0x53, + 0x67, 0x2b, 0x1e, 0x44, 0x56, 0x81, 0xea, 0x8f, + 0x5c, 0x42, 0x16, 0xb8, 0x28, 0xeb, 0x1b, 0x61, + 0x10, 0x1e, 0xbf, 0xec, 0xa8 +}; +static const u8 dec_assoc011[] __initconst = { + 0xd6, 0x31, 0xda, 0x5d, 0x42, 0x5e, 0xd7 +}; +static const u8 dec_nonce011[] __initconst = { + 0xfd, 0x87, 0xd4, 0xd8, 0x62, 0xfd, 0xec, 0xaa +}; +static const u8 dec_key011[] __initconst = { + 0x35, 0x4e, 0xb5, 0x70, 0x50, 0x42, 0x8a, 0x85, + 0xf2, 0xfb, 0xed, 0x7b, 0xd0, 0x9e, 0x97, 0xca, + 0xfa, 0x98, 0x66, 0x63, 0xee, 0x37, 0xcc, 0x52, + 0xfe, 0xd1, 0xdf, 0x95, 0x15, 0x34, 0x29, 0x38 +}; + +static const u8 dec_input012[] __initconst = { + 0x52, 0x34, 0xb3, 0x65, 0x3b, 0xb7, 0xe5, 0xd3, + 0xab, 0x49, 0x17, 0x60, 0xd2, 0x52, 0x56, 0xdf, + 0xdf, 0x34, 0x56, 0x82, 0xe2, 0xbe, 0xe5, 0xe1, + 0x28, 0xd1, 0x4e, 0x5f, 0x4f, 0x01, 0x7d, 0x3f, + 0x99, 0x6b, 0x30, 0x6e, 0x1a, 0x7c, 0x4c, 0x8e, + 0x62, 0x81, 0xae, 0x86, 0x3f, 0x6b, 0xd0, 0xb5, + 0xa9, 0xcf, 0x50, 0xf1, 0x02, 0x12, 0xa0, 0x0b, + 0x24, 0xe9, 0xe6, 0x72, 0x89, 0x2c, 0x52, 0x1b, + 0x34, 0x38, 0xf8, 0x75, 0x5f, 0xa0, 0x74, 0xe2, + 0x99, 0xdd, 0xa6, 0x4b, 0x14, 0x50, 0x4e, 0xf1, + 0xbe, 0xd6, 0x9e, 0xdb, 0xb2, 0x24, 0x27, 0x74, + 0x12, 0x4a, 0x78, 0x78, 0x17, 0xa5, 0x58, 0x8e, + 0x2f, 0xf9, 0xf4, 0x8d, 0xee, 0x03, 0x88, 0xae, + 0xb8, 0x29, 0xa1, 0x2f, 0x4b, 0xee, 0x92, 0xbd, + 0x87, 0xb3, 0xce, 0x34, 0x21, 0x57, 0x46, 0x04, + 0x49, 0x0c, 0x80, 0xf2, 0x01, 0x13, 0xa1, 0x55, + 0xb3, 0xff, 0x44, 0x30, 0x3c, 0x1c, 0xd0, 0xef, + 0xbc, 0x18, 0x74, 0x26, 0xad, 0x41, 0x5b, 0x5b, + 0x3e, 0x9a, 0x7a, 0x46, 0x4f, 0x16, 0xd6, 0x74, + 0x5a, 0xb7, 0x3a, 0x28, 0x31, 0xd8, 0xae, 0x26, + 0xac, 0x50, 0x53, 0x86, 0xf2, 0x56, 0xd7, 0x3f, + 0x29, 0xbc, 0x45, 0x68, 0x8e, 0xcb, 0x98, 0x64, + 0xdd, 0xc9, 0xba, 0xb8, 0x4b, 0x7b, 0x82, 0xdd, + 0x14, 0xa7, 0xcb, 0x71, 0x72, 0x00, 0x5c, 0xad, + 0x7b, 0x6a, 0x89, 0xa4, 0x3d, 0xbf, 0xb5, 0x4b, + 0x3e, 0x7c, 0x5a, 0xcf, 0xb8, 0xa1, 0xc5, 0x6e, + 0xc8, 0xb6, 0x31, 0x57, 0x7b, 0xdf, 0xa5, 0x7e, + 0xb1, 0xd6, 0x42, 0x2a, 0x31, 0x36, 0xd1, 0xd0, + 0x3f, 0x7a, 0xe5, 0x94, 0xd6, 0x36, 0xa0, 0x6f, + 0xb7, 0x40, 0x7d, 0x37, 0xc6, 0x55, 0x7c, 0x50, + 0x40, 0x6d, 0x29, 0x89, 0xe3, 0x5a, 0xae, 0x97, + 0xe7, 0x44, 0x49, 0x6e, 0xbd, 0x81, 0x3d, 0x03, + 0x93, 0x06, 0x12, 0x06, 0xe2, 0x41, 0x12, 0x4a, + 0xf1, 0x6a, 0xa4, 0x58, 0xa2, 0xfb, 0xd2, 0x15, + 0xba, 0xc9, 0x79, 0xc9, 0xce, 0x5e, 0x13, 0xbb, + 0xf1, 0x09, 0x04, 0xcc, 0xfd, 0xe8, 0x51, 0x34, + 0x6a, 0xe8, 0x61, 0x88, 0xda, 0xed, 0x01, 0x47, + 0x84, 0xf5, 0x73, 0x25, 0xf9, 0x1c, 0x42, 0x86, + 0x07, 0xf3, 0x5b, 0x1a, 0x01, 0xb3, 0xeb, 0x24, + 0x32, 0x8d, 0xf6, 0xed, 0x7c, 0x4b, 0xeb, 0x3c, + 0x36, 0x42, 0x28, 0xdf, 0xdf, 0xb6, 0xbe, 0xd9, + 0x8c, 0x52, 0xd3, 0x2b, 0x08, 0x90, 0x8c, 0xe7, + 0x98, 0x31, 0xe2, 0x32, 0x8e, 0xfc, 0x11, 0x48, + 0x00, 0xa8, 0x6a, 0x42, 0x4a, 0x02, 0xc6, 0x4b, + 0x09, 0xf1, 0xe3, 0x49, 0xf3, 0x45, 0x1f, 0x0e, + 0xbc, 0x56, 0xe2, 0xe4, 0xdf, 0xfb, 0xeb, 0x61, + 0xfa, 0x24, 0xc1, 0x63, 0x75, 0xbb, 0x47, 0x75, + 0xaf, 0xe1, 0x53, 0x16, 0x96, 0x21, 0x85, 0x26, + 0x11, 0xb3, 0x76, 0xe3, 0x23, 0xa1, 0x6b, 0x74, + 0x37, 0xd0, 0xde, 0x06, 0x90, 0x71, 0x5d, 0x43, + 0x88, 0x9b, 0x00, 0x54, 0xa6, 0x75, 0x2f, 0xa1, + 0xc2, 0x0b, 0x73, 0x20, 0x1d, 0xb6, 0x21, 0x79, + 0x57, 0x3f, 0xfa, 0x09, 0xbe, 0x8a, 0x33, 0xc3, + 0x52, 0xf0, 0x1d, 0x82, 0x31, 0xd1, 0x55, 0xb5, + 0x6c, 0x99, 0x25, 0xcf, 0x5c, 0x32, 0xce, 0xe9, + 0x0d, 0xfa, 0x69, 0x2c, 0xd5, 0x0d, 0xc5, 0x6d, + 0x86, 0xd0, 0x0c, 0x3b, 0x06, 0x50, 0x79, 0xe8, + 0xc3, 0xae, 0x04, 0xe6, 0xcd, 0x51, 0xe4, 0x26, + 0x9b, 0x4f, 0x7e, 0xa6, 0x0f, 0xab, 0xd8, 0xe5, + 0xde, 0xa9, 0x00, 0x95, 0xbe, 0xa3, 0x9d, 0x5d, + 0xb2, 0x09, 0x70, 0x18, 0x1c, 0xf0, 0xac, 0x29, + 0x23, 0x02, 0x29, 0x28, 0xd2, 0x74, 0x35, 0x57, + 0x62, 0x0f, 0x24, 0xea, 0x5e, 0x33, 0xc2, 0x92, + 0xf3, 0x78, 0x4d, 0x30, 0x1e, 0xa1, 0x99, 0xa9, + 0x82, 0xb0, 0x42, 0x31, 0x8d, 0xad, 0x8a, 0xbc, + 0xfc, 0xd4, 0x57, 0x47, 0x3e, 0xb4, 0x50, 0xdd, + 0x6e, 0x2c, 0x80, 0x4d, 0x22, 0xf1, 0xfb, 0x57, + 0xc4, 0xdd, 0x17, 0xe1, 0x8a, 0x36, 0x4a, 0xb3, + 0x37, 0xca, 0xc9, 0x4e, 0xab, 0xd5, 0x69, 0xc4, + 0xf4, 0xbc, 0x0b, 0x3b, 0x44, 0x4b, 0x29, 0x9c, + 0xee, 0xd4, 0x35, 0x22, 0x21, 0xb0, 0x1f, 0x27, + 0x64, 0xa8, 0x51, 0x1b, 0xf0, 0x9f, 0x19, 0x5c, + 0xfb, 0x5a, 0x64, 0x74, 0x70, 0x45, 0x09, 0xf5, + 0x64, 0xfe, 0x1a, 0x2d, 0xc9, 0x14, 0x04, 0x14, + 0xcf, 0xd5, 0x7d, 0x60, 0xaf, 0x94, 0x39, 0x94, + 0xe2, 0x7d, 0x79, 0x82, 0xd0, 0x65, 0x3b, 0x6b, + 0x9c, 0x19, 0x84, 0xb4, 0x6d, 0xb3, 0x0c, 0x99, + 0xc0, 0x56, 0xa8, 0xbd, 0x73, 0xce, 0x05, 0x84, + 0x3e, 0x30, 0xaa, 0xc4, 0x9b, 0x1b, 0x04, 0x2a, + 0x9f, 0xd7, 0x43, 0x2b, 0x23, 0xdf, 0xbf, 0xaa, + 0xd5, 0xc2, 0x43, 0x2d, 0x70, 0xab, 0xdc, 0x75, + 0xad, 0xac, 0xf7, 0xc0, 0xbe, 0x67, 0xb2, 0x74, + 0xed, 0x67, 0x10, 0x4a, 0x92, 0x60, 0xc1, 0x40, + 0x50, 0x19, 0x8a, 0x8a, 0x8c, 0x09, 0x0e, 0x72, + 0xe1, 0x73, 0x5e, 0xe8, 0x41, 0x85, 0x63, 0x9f, + 0x3f, 0xd7, 0x7d, 0xc4, 0xfb, 0x22, 0x5d, 0x92, + 0x6c, 0xb3, 0x1e, 0xe2, 0x50, 0x2f, 0x82, 0xa8, + 0x28, 0xc0, 0xb5, 0xd7, 0x5f, 0x68, 0x0d, 0x2c, + 0x2d, 0xaf, 0x7e, 0xfa, 0x2e, 0x08, 0x0f, 0x1f, + 0x70, 0x9f, 0xe9, 0x19, 0x72, 0x55, 0xf8, 0xfb, + 0x51, 0xd2, 0x33, 0x5d, 0xa0, 0xd3, 0x2b, 0x0a, + 0x6c, 0xbc, 0x4e, 0xcf, 0x36, 0x4d, 0xdc, 0x3b, + 0xe9, 0x3e, 0x81, 0x7c, 0x61, 0xdb, 0x20, 0x2d, + 0x3a, 0xc3, 0xb3, 0x0c, 0x1e, 0x00, 0xb9, 0x7c, + 0xf5, 0xca, 0x10, 0x5f, 0x3a, 0x71, 0xb3, 0xe4, + 0x20, 0xdb, 0x0c, 0x2a, 0x98, 0x63, 0x45, 0x00, + 0x58, 0xf6, 0x68, 0xe4, 0x0b, 0xda, 0x13, 0x3b, + 0x60, 0x5c, 0x76, 0xdb, 0xb9, 0x97, 0x71, 0xe4, + 0xd9, 0xb7, 0xdb, 0xbd, 0x68, 0xc7, 0x84, 0x84, + 0xaa, 0x7c, 0x68, 0x62, 0x5e, 0x16, 0xfc, 0xba, + 0x72, 0xaa, 0x9a, 0xa9, 0xeb, 0x7c, 0x75, 0x47, + 0x97, 0x7e, 0xad, 0xe2, 0xd9, 0x91, 0xe8, 0xe4, + 0xa5, 0x31, 0xd7, 0x01, 0x8e, 0xa2, 0x11, 0x88, + 0x95, 0xb9, 0xf2, 0x9b, 0xd3, 0x7f, 0x1b, 0x81, + 0x22, 0xf7, 0x98, 0x60, 0x0a, 0x64, 0xa6, 0xc1, + 0xf6, 0x49, 0xc7, 0xe3, 0x07, 0x4d, 0x94, 0x7a, + 0xcf, 0x6e, 0x68, 0x0c, 0x1b, 0x3f, 0x6e, 0x2e, + 0xee, 0x92, 0xfa, 0x52, 0xb3, 0x59, 0xf8, 0xf1, + 0x8f, 0x6a, 0x66, 0xa3, 0x82, 0x76, 0x4a, 0x07, + 0x1a, 0xc7, 0xdd, 0xf5, 0xda, 0x9c, 0x3c, 0x24, + 0xbf, 0xfd, 0x42, 0xa1, 0x10, 0x64, 0x6a, 0x0f, + 0x89, 0xee, 0x36, 0xa5, 0xce, 0x99, 0x48, 0x6a, + 0xf0, 0x9f, 0x9e, 0x69, 0xa4, 0x40, 0x20, 0xe9, + 0x16, 0x15, 0xf7, 0xdb, 0x75, 0x02, 0xcb, 0xe9, + 0x73, 0x8b, 0x3b, 0x49, 0x2f, 0xf0, 0xaf, 0x51, + 0x06, 0x5c, 0xdf, 0x27, 0x27, 0x49, 0x6a, 0xd1, + 0xcc, 0xc7, 0xb5, 0x63, 0xb5, 0xfc, 0xb8, 0x5c, + 0x87, 0x7f, 0x84, 0xb4, 0xcc, 0x14, 0xa9, 0x53, + 0xda, 0xa4, 0x56, 0xf8, 0xb6, 0x1b, 0xcc, 0x40, + 0x27, 0x52, 0x06, 0x5a, 0x13, 0x81, 0xd7, 0x3a, + 0xd4, 0x3b, 0xfb, 0x49, 0x65, 0x31, 0x33, 0xb2, + 0xfa, 0xcd, 0xad, 0x58, 0x4e, 0x2b, 0xae, 0xd2, + 0x20, 0xfb, 0x1a, 0x48, 0xb4, 0x3f, 0x9a, 0xd8, + 0x7a, 0x35, 0x4a, 0xc8, 0xee, 0x88, 0x5e, 0x07, + 0x66, 0x54, 0xb9, 0xec, 0x9f, 0xa3, 0xe3, 0xb9, + 0x37, 0xaa, 0x49, 0x76, 0x31, 0xda, 0x74, 0x2d, + 0x3c, 0xa4, 0x65, 0x10, 0x32, 0x38, 0xf0, 0xde, + 0xd3, 0x99, 0x17, 0xaa, 0x71, 0xaa, 0x8f, 0x0f, + 0x8c, 0xaf, 0xa2, 0xf8, 0x5d, 0x64, 0xba, 0x1d, + 0xa3, 0xef, 0x96, 0x73, 0xe8, 0xa1, 0x02, 0x8d, + 0x0c, 0x6d, 0xb8, 0x06, 0x90, 0xb8, 0x08, 0x56, + 0x2c, 0xa7, 0x06, 0xc9, 0xc2, 0x38, 0xdb, 0x7c, + 0x63, 0xb1, 0x57, 0x8e, 0xea, 0x7c, 0x79, 0xf3, + 0x49, 0x1d, 0xfe, 0x9f, 0xf3, 0x6e, 0xb1, 0x1d, + 0xba, 0x19, 0x80, 0x1a, 0x0a, 0xd3, 0xb0, 0x26, + 0x21, 0x40, 0xb1, 0x7c, 0xf9, 0x4d, 0x8d, 0x10, + 0xc1, 0x7e, 0xf4, 0xf6, 0x3c, 0xa8, 0xfd, 0x7c, + 0xa3, 0x92, 0xb2, 0x0f, 0xaa, 0xcc, 0xa6, 0x11, + 0xfe, 0x04, 0xe3, 0xd1, 0x7a, 0x32, 0x89, 0xdf, + 0x0d, 0xc4, 0x8f, 0x79, 0x6b, 0xca, 0x16, 0x7c, + 0x6e, 0xf9, 0xad, 0x0f, 0xf6, 0xfe, 0x27, 0xdb, + 0xc4, 0x13, 0x70, 0xf1, 0x62, 0x1a, 0x4f, 0x79, + 0x40, 0xc9, 0x9b, 0x8b, 0x21, 0xea, 0x84, 0xfa, + 0xf5, 0xf1, 0x89, 0xce, 0xb7, 0x55, 0x0a, 0x80, + 0x39, 0x2f, 0x55, 0x36, 0x16, 0x9c, 0x7b, 0x08, + 0xbd, 0x87, 0x0d, 0xa5, 0x32, 0xf1, 0x52, 0x7c, + 0xe8, 0x55, 0x60, 0x5b, 0xd7, 0x69, 0xe4, 0xfc, + 0xfa, 0x12, 0x85, 0x96, 0xea, 0x50, 0x28, 0xab, + 0x8a, 0xf7, 0xbb, 0x0e, 0x53, 0x74, 0xca, 0xa6, + 0x27, 0x09, 0xc2, 0xb5, 0xde, 0x18, 0x14, 0xd9, + 0xea, 0xe5, 0x29, 0x1c, 0x40, 0x56, 0xcf, 0xd7, + 0xae, 0x05, 0x3f, 0x65, 0xaf, 0x05, 0x73, 0xe2, + 0x35, 0x96, 0x27, 0x07, 0x14, 0xc0, 0xad, 0x33, + 0xf1, 0xdc, 0x44, 0x7a, 0x89, 0x17, 0x77, 0xd2, + 0x9c, 0x58, 0x60, 0xf0, 0x3f, 0x7b, 0x2d, 0x2e, + 0x57, 0x95, 0x54, 0x87, 0xed, 0xf2, 0xc7, 0x4c, + 0xf0, 0xae, 0x56, 0x29, 0x19, 0x7d, 0x66, 0x4b, + 0x9b, 0x83, 0x84, 0x42, 0x3b, 0x01, 0x25, 0x66, + 0x8e, 0x02, 0xde, 0xb9, 0x83, 0x54, 0x19, 0xf6, + 0x9f, 0x79, 0x0d, 0x67, 0xc5, 0x1d, 0x7a, 0x44, + 0x02, 0x98, 0xa7, 0x16, 0x1c, 0x29, 0x0d, 0x74, + 0xff, 0x85, 0x40, 0x06, 0xef, 0x2c, 0xa9, 0xc6, + 0xf5, 0x53, 0x07, 0x06, 0xae, 0xe4, 0xfa, 0x5f, + 0xd8, 0x39, 0x4d, 0xf1, 0x9b, 0x6b, 0xd9, 0x24, + 0x84, 0xfe, 0x03, 0x4c, 0xb2, 0x3f, 0xdf, 0xa1, + 0x05, 0x9e, 0x50, 0x14, 0x5a, 0xd9, 0x1a, 0xa2, + 0xa7, 0xfa, 0xfa, 0x17, 0xf7, 0x78, 0xd6, 0xb5, + 0x92, 0x61, 0x91, 0xac, 0x36, 0xfa, 0x56, 0x0d, + 0x38, 0x32, 0x18, 0x85, 0x08, 0x58, 0x37, 0xf0, + 0x4b, 0xdb, 0x59, 0xe7, 0xa4, 0x34, 0xc0, 0x1b, + 0x01, 0xaf, 0x2d, 0xde, 0xa1, 0xaa, 0x5d, 0xd3, + 0xec, 0xe1, 0xd4, 0xf7, 0xe6, 0x54, 0x68, 0xf0, + 0x51, 0x97, 0xa7, 0x89, 0xea, 0x24, 0xad, 0xd3, + 0x6e, 0x47, 0x93, 0x8b, 0x4b, 0xb4, 0xf7, 0x1c, + 0x42, 0x06, 0x67, 0xe8, 0x99, 0xf6, 0xf5, 0x7b, + 0x85, 0xb5, 0x65, 0xb5, 0xb5, 0xd2, 0x37, 0xf5, + 0xf3, 0x02, 0xa6, 0x4d, 0x11, 0xa7, 0xdc, 0x51, + 0x09, 0x7f, 0xa0, 0xd8, 0x88, 0x1c, 0x13, 0x71, + 0xae, 0x9c, 0xb7, 0x7b, 0x34, 0xd6, 0x4e, 0x68, + 0x26, 0x83, 0x51, 0xaf, 0x1d, 0xee, 0x8b, 0xbb, + 0x69, 0x43, 0x2b, 0x9e, 0x8a, 0xbc, 0x02, 0x0e, + 0xa0, 0x1b, 0xe0, 0xa8, 0x5f, 0x6f, 0xaf, 0x1b, + 0x8f, 0xe7, 0x64, 0x71, 0x74, 0x11, 0x7e, 0xa8, + 0xd8, 0xf9, 0x97, 0x06, 0xc3, 0xb6, 0xfb, 0xfb, + 0xb7, 0x3d, 0x35, 0x9d, 0x3b, 0x52, 0xed, 0x54, + 0xca, 0xf4, 0x81, 0x01, 0x2d, 0x1b, 0xc3, 0xa7, + 0x00, 0x3d, 0x1a, 0x39, 0x54, 0xe1, 0xf6, 0xff, + 0xed, 0x6f, 0x0b, 0x5a, 0x68, 0xda, 0x58, 0xdd, + 0xa9, 0xcf, 0x5c, 0x4a, 0xe5, 0x09, 0x4e, 0xde, + 0x9d, 0xbc, 0x3e, 0xee, 0x5a, 0x00, 0x3b, 0x2c, + 0x87, 0x10, 0x65, 0x60, 0xdd, 0xd7, 0x56, 0xd1, + 0x4c, 0x64, 0x45, 0xe4, 0x21, 0xec, 0x78, 0xf8, + 0x25, 0x7a, 0x3e, 0x16, 0x5d, 0x09, 0x53, 0x14, + 0xbe, 0x4f, 0xae, 0x87, 0xd8, 0xd1, 0xaa, 0x3c, + 0xf6, 0x3e, 0xa4, 0x70, 0x8c, 0x5e, 0x70, 0xa4, + 0xb3, 0x6b, 0x66, 0x73, 0xd3, 0xbf, 0x31, 0x06, + 0x19, 0x62, 0x93, 0x15, 0xf2, 0x86, 0xe4, 0x52, + 0x7e, 0x53, 0x4c, 0x12, 0x38, 0xcc, 0x34, 0x7d, + 0x57, 0xf6, 0x42, 0x93, 0x8a, 0xc4, 0xee, 0x5c, + 0x8a, 0xe1, 0x52, 0x8f, 0x56, 0x64, 0xf6, 0xa6, + 0xd1, 0x91, 0x57, 0x70, 0xcd, 0x11, 0x76, 0xf5, + 0x59, 0x60, 0x60, 0x3c, 0xc1, 0xc3, 0x0b, 0x7f, + 0x58, 0x1a, 0x50, 0x91, 0xf1, 0x68, 0x8f, 0x6e, + 0x74, 0x74, 0xa8, 0x51, 0x0b, 0xf7, 0x7a, 0x98, + 0x37, 0xf2, 0x0a, 0x0e, 0xa4, 0x97, 0x04, 0xb8, + 0x9b, 0xfd, 0xa0, 0xea, 0xf7, 0x0d, 0xe1, 0xdb, + 0x03, 0xf0, 0x31, 0x29, 0xf8, 0xdd, 0x6b, 0x8b, + 0x5d, 0xd8, 0x59, 0xa9, 0x29, 0xcf, 0x9a, 0x79, + 0x89, 0x19, 0x63, 0x46, 0x09, 0x79, 0x6a, 0x11, + 0xda, 0x63, 0x68, 0x48, 0x77, 0x23, 0xfb, 0x7d, + 0x3a, 0x43, 0xcb, 0x02, 0x3b, 0x7a, 0x6d, 0x10, + 0x2a, 0x9e, 0xac, 0xf1, 0xd4, 0x19, 0xf8, 0x23, + 0x64, 0x1d, 0x2c, 0x5f, 0xf2, 0xb0, 0x5c, 0x23, + 0x27, 0xf7, 0x27, 0x30, 0x16, 0x37, 0xb1, 0x90, + 0xab, 0x38, 0xfb, 0x55, 0xcd, 0x78, 0x58, 0xd4, + 0x7d, 0x43, 0xf6, 0x45, 0x5e, 0x55, 0x8d, 0xb1, + 0x02, 0x65, 0x58, 0xb4, 0x13, 0x4b, 0x36, 0xf7, + 0xcc, 0xfe, 0x3d, 0x0b, 0x82, 0xe2, 0x12, 0x11, + 0xbb, 0xe6, 0xb8, 0x3a, 0x48, 0x71, 0xc7, 0x50, + 0x06, 0x16, 0x3a, 0xe6, 0x7c, 0x05, 0xc7, 0xc8, + 0x4d, 0x2f, 0x08, 0x6a, 0x17, 0x9a, 0x95, 0x97, + 0x50, 0x68, 0xdc, 0x28, 0x18, 0xc4, 0x61, 0x38, + 0xb9, 0xe0, 0x3e, 0x78, 0xdb, 0x29, 0xe0, 0x9f, + 0x52, 0xdd, 0xf8, 0x4f, 0x91, 0xc1, 0xd0, 0x33, + 0xa1, 0x7a, 0x8e, 0x30, 0x13, 0x82, 0x07, 0x9f, + 0xd3, 0x31, 0x0f, 0x23, 0xbe, 0x32, 0x5a, 0x75, + 0xcf, 0x96, 0xb2, 0xec, 0xb5, 0x32, 0xac, 0x21, + 0xd1, 0x82, 0x33, 0xd3, 0x15, 0x74, 0xbd, 0x90, + 0xf1, 0x2c, 0xe6, 0x5f, 0x8d, 0xe3, 0x02, 0xe8, + 0xe9, 0xc4, 0xca, 0x96, 0xeb, 0x0e, 0xbc, 0x91, + 0xf4, 0xb9, 0xea, 0xd9, 0x1b, 0x75, 0xbd, 0xe1, + 0xac, 0x2a, 0x05, 0x37, 0x52, 0x9b, 0x1b, 0x3f, + 0x5a, 0xdc, 0x21, 0xc3, 0x98, 0xbb, 0xaf, 0xa3, + 0xf2, 0x00, 0xbf, 0x0d, 0x30, 0x89, 0x05, 0xcc, + 0xa5, 0x76, 0xf5, 0x06, 0xf0, 0xc6, 0x54, 0x8a, + 0x5d, 0xd4, 0x1e, 0xc1, 0xf2, 0xce, 0xb0, 0x62, + 0xc8, 0xfc, 0x59, 0x42, 0x9a, 0x90, 0x60, 0x55, + 0xfe, 0x88, 0xa5, 0x8b, 0xb8, 0x33, 0x0c, 0x23, + 0x24, 0x0d, 0x15, 0x70, 0x37, 0x1e, 0x3d, 0xf6, + 0xd2, 0xea, 0x92, 0x10, 0xb2, 0xc4, 0x51, 0xac, + 0xf2, 0xac, 0xf3, 0x6b, 0x6c, 0xaa, 0xcf, 0x12, + 0xc5, 0x6c, 0x90, 0x50, 0xb5, 0x0c, 0xfc, 0x1a, + 0x15, 0x52, 0xe9, 0x26, 0xc6, 0x52, 0xa4, 0xe7, + 0x81, 0x69, 0xe1, 0xe7, 0x9e, 0x30, 0x01, 0xec, + 0x84, 0x89, 0xb2, 0x0d, 0x66, 0xdd, 0xce, 0x28, + 0x5c, 0xec, 0x98, 0x46, 0x68, 0x21, 0x9f, 0x88, + 0x3f, 0x1f, 0x42, 0x77, 0xce, 0xd0, 0x61, 0xd4, + 0x20, 0xa7, 0xff, 0x53, 0xad, 0x37, 0xd0, 0x17, + 0x35, 0xc9, 0xfc, 0xba, 0x0a, 0x78, 0x3f, 0xf2, + 0xcc, 0x86, 0x89, 0xe8, 0x4b, 0x3c, 0x48, 0x33, + 0x09, 0x7f, 0xc6, 0xc0, 0xdd, 0xb8, 0xfd, 0x7a, + 0x66, 0x66, 0x65, 0xeb, 0x47, 0xa7, 0x04, 0x28, + 0xa3, 0x19, 0x8e, 0xa9, 0xb1, 0x13, 0x67, 0x62, + 0x70, 0xcf, 0xd6 +}; +static const u8 dec_output012[] __initconst = { + 0x74, 0xa6, 0x3e, 0xe4, 0xb1, 0xcb, 0xaf, 0xb0, + 0x40, 0xe5, 0x0f, 0x9e, 0xf1, 0xf2, 0x89, 0xb5, + 0x42, 0x34, 0x8a, 0xa1, 0x03, 0xb7, 0xe9, 0x57, + 0x46, 0xbe, 0x20, 0xe4, 0x6e, 0xb0, 0xeb, 0xff, + 0xea, 0x07, 0x7e, 0xef, 0xe2, 0x55, 0x9f, 0xe5, + 0x78, 0x3a, 0xb7, 0x83, 0xc2, 0x18, 0x40, 0x7b, + 0xeb, 0xcd, 0x81, 0xfb, 0x90, 0x12, 0x9e, 0x46, + 0xa9, 0xd6, 0x4a, 0xba, 0xb0, 0x62, 0xdb, 0x6b, + 0x99, 0xc4, 0xdb, 0x54, 0x4b, 0xb8, 0xa5, 0x71, + 0xcb, 0xcd, 0x63, 0x32, 0x55, 0xfb, 0x31, 0xf0, + 0x38, 0xf5, 0xbe, 0x78, 0xe4, 0x45, 0xce, 0x1b, + 0x6a, 0x5b, 0x0e, 0xf4, 0x16, 0xe4, 0xb1, 0x3d, + 0xf6, 0x63, 0x7b, 0xa7, 0x0c, 0xde, 0x6f, 0x8f, + 0x74, 0xdf, 0xe0, 0x1e, 0x9d, 0xce, 0x8f, 0x24, + 0xef, 0x23, 0x35, 0x33, 0x7b, 0x83, 0x34, 0x23, + 0x58, 0x74, 0x14, 0x77, 0x1f, 0xc2, 0x4f, 0x4e, + 0xc6, 0x89, 0xf9, 0x52, 0x09, 0x37, 0x64, 0x14, + 0xc4, 0x01, 0x6b, 0x9d, 0x77, 0xe8, 0x90, 0x5d, + 0xa8, 0x4a, 0x2a, 0xef, 0x5c, 0x7f, 0xeb, 0xbb, + 0xb2, 0xc6, 0x93, 0x99, 0x66, 0xdc, 0x7f, 0xd4, + 0x9e, 0x2a, 0xca, 0x8d, 0xdb, 0xe7, 0x20, 0xcf, + 0xe4, 0x73, 0xae, 0x49, 0x7d, 0x64, 0x0f, 0x0e, + 0x28, 0x46, 0xa9, 0xa8, 0x32, 0xe4, 0x0e, 0xf6, + 0x51, 0x53, 0xb8, 0x3c, 0xb1, 0xff, 0xa3, 0x33, + 0x41, 0x75, 0xff, 0xf1, 0x6f, 0xf1, 0xfb, 0xbb, + 0x83, 0x7f, 0x06, 0x9b, 0xe7, 0x1b, 0x0a, 0xe0, + 0x5c, 0x33, 0x60, 0x5b, 0xdb, 0x5b, 0xed, 0xfe, + 0xa5, 0x16, 0x19, 0x72, 0xa3, 0x64, 0x23, 0x00, + 0x02, 0xc7, 0xf3, 0x6a, 0x81, 0x3e, 0x44, 0x1d, + 0x79, 0x15, 0x5f, 0x9a, 0xde, 0xe2, 0xfd, 0x1b, + 0x73, 0xc1, 0xbc, 0x23, 0xba, 0x31, 0xd2, 0x50, + 0xd5, 0xad, 0x7f, 0x74, 0xa7, 0xc9, 0xf8, 0x3e, + 0x2b, 0x26, 0x10, 0xf6, 0x03, 0x36, 0x74, 0xe4, + 0x0e, 0x6a, 0x72, 0xb7, 0x73, 0x0a, 0x42, 0x28, + 0xc2, 0xad, 0x5e, 0x03, 0xbe, 0xb8, 0x0b, 0xa8, + 0x5b, 0xd4, 0xb8, 0xba, 0x52, 0x89, 0xb1, 0x9b, + 0xc1, 0xc3, 0x65, 0x87, 0xed, 0xa5, 0xf4, 0x86, + 0xfd, 0x41, 0x80, 0x91, 0x27, 0x59, 0x53, 0x67, + 0x15, 0x78, 0x54, 0x8b, 0x2d, 0x3d, 0xc7, 0xff, + 0x02, 0x92, 0x07, 0x5f, 0x7a, 0x4b, 0x60, 0x59, + 0x3c, 0x6f, 0x5c, 0xd8, 0xec, 0x95, 0xd2, 0xfe, + 0xa0, 0x3b, 0xd8, 0x3f, 0xd1, 0x69, 0xa6, 0xd6, + 0x41, 0xb2, 0xf4, 0x4d, 0x12, 0xf4, 0x58, 0x3e, + 0x66, 0x64, 0x80, 0x31, 0x9b, 0xa8, 0x4c, 0x8b, + 0x07, 0xb2, 0xec, 0x66, 0x94, 0x66, 0x47, 0x50, + 0x50, 0x5f, 0x18, 0x0b, 0x0e, 0xd6, 0xc0, 0x39, + 0x21, 0x13, 0x9e, 0x33, 0xbc, 0x79, 0x36, 0x02, + 0x96, 0x70, 0xf0, 0x48, 0x67, 0x2f, 0x26, 0xe9, + 0x6d, 0x10, 0xbb, 0xd6, 0x3f, 0xd1, 0x64, 0x7a, + 0x2e, 0xbe, 0x0c, 0x61, 0xf0, 0x75, 0x42, 0x38, + 0x23, 0xb1, 0x9e, 0x9f, 0x7c, 0x67, 0x66, 0xd9, + 0x58, 0x9a, 0xf1, 0xbb, 0x41, 0x2a, 0x8d, 0x65, + 0x84, 0x94, 0xfc, 0xdc, 0x6a, 0x50, 0x64, 0xdb, + 0x56, 0x33, 0x76, 0x00, 0x10, 0xed, 0xbe, 0xd2, + 0x12, 0xf6, 0xf6, 0x1b, 0xa2, 0x16, 0xde, 0xae, + 0x31, 0x95, 0xdd, 0xb1, 0x08, 0x7e, 0x4e, 0xee, + 0xe7, 0xf9, 0xa5, 0xfb, 0x5b, 0x61, 0x43, 0x00, + 0x40, 0xf6, 0x7e, 0x02, 0x04, 0x32, 0x4e, 0x0c, + 0xe2, 0x66, 0x0d, 0xd7, 0x07, 0x98, 0x0e, 0xf8, + 0x72, 0x34, 0x6d, 0x95, 0x86, 0xd7, 0xcb, 0x31, + 0x54, 0x47, 0xd0, 0x38, 0x29, 0x9c, 0x5a, 0x68, + 0xd4, 0x87, 0x76, 0xc9, 0xe7, 0x7e, 0xe3, 0xf4, + 0x81, 0x6d, 0x18, 0xcb, 0xc9, 0x05, 0xaf, 0xa0, + 0xfb, 0x66, 0xf7, 0xf1, 0x1c, 0xc6, 0x14, 0x11, + 0x4f, 0x2b, 0x79, 0x42, 0x8b, 0xbc, 0xac, 0xe7, + 0x6c, 0xfe, 0x0f, 0x58, 0xe7, 0x7c, 0x78, 0x39, + 0x30, 0xb0, 0x66, 0x2c, 0x9b, 0x6d, 0x3a, 0xe1, + 0xcf, 0xc9, 0xa4, 0x0e, 0x6d, 0x6d, 0x8a, 0xa1, + 0x3a, 0xe7, 0x28, 0xd4, 0x78, 0x4c, 0xa6, 0xa2, + 0x2a, 0xa6, 0x03, 0x30, 0xd7, 0xa8, 0x25, 0x66, + 0x87, 0x2f, 0x69, 0x5c, 0x4e, 0xdd, 0xa5, 0x49, + 0x5d, 0x37, 0x4a, 0x59, 0xc4, 0xaf, 0x1f, 0xa2, + 0xe4, 0xf8, 0xa6, 0x12, 0x97, 0xd5, 0x79, 0xf5, + 0xe2, 0x4a, 0x2b, 0x5f, 0x61, 0xe4, 0x9e, 0xe3, + 0xee, 0xb8, 0xa7, 0x5b, 0x2f, 0xf4, 0x9e, 0x6c, + 0xfb, 0xd1, 0xc6, 0x56, 0x77, 0xba, 0x75, 0xaa, + 0x3d, 0x1a, 0xa8, 0x0b, 0xb3, 0x68, 0x24, 0x00, + 0x10, 0x7f, 0xfd, 0xd7, 0xa1, 0x8d, 0x83, 0x54, + 0x4f, 0x1f, 0xd8, 0x2a, 0xbe, 0x8a, 0x0c, 0x87, + 0xab, 0xa2, 0xde, 0xc3, 0x39, 0xbf, 0x09, 0x03, + 0xa5, 0xf3, 0x05, 0x28, 0xe1, 0xe1, 0xee, 0x39, + 0x70, 0x9c, 0xd8, 0x81, 0x12, 0x1e, 0x02, 0x40, + 0xd2, 0x6e, 0xf0, 0xeb, 0x1b, 0x3d, 0x22, 0xc6, + 0xe5, 0xe3, 0xb4, 0x5a, 0x98, 0xbb, 0xf0, 0x22, + 0x28, 0x8d, 0xe5, 0xd3, 0x16, 0x48, 0x24, 0xa5, + 0xe6, 0x66, 0x0c, 0xf9, 0x08, 0xf9, 0x7e, 0x1e, + 0xe1, 0x28, 0x26, 0x22, 0xc7, 0xc7, 0x0a, 0x32, + 0x47, 0xfa, 0xa3, 0xbe, 0x3c, 0xc4, 0xc5, 0x53, + 0x0a, 0xd5, 0x94, 0x4a, 0xd7, 0x93, 0xd8, 0x42, + 0x99, 0xb9, 0x0a, 0xdb, 0x56, 0xf7, 0xb9, 0x1c, + 0x53, 0x4f, 0xfa, 0xd3, 0x74, 0xad, 0xd9, 0x68, + 0xf1, 0x1b, 0xdf, 0x61, 0xc6, 0x5e, 0xa8, 0x48, + 0xfc, 0xd4, 0x4a, 0x4c, 0x3c, 0x32, 0xf7, 0x1c, + 0x96, 0x21, 0x9b, 0xf9, 0xa3, 0xcc, 0x5a, 0xce, + 0xd5, 0xd7, 0x08, 0x24, 0xf6, 0x1c, 0xfd, 0xdd, + 0x38, 0xc2, 0x32, 0xe9, 0xb8, 0xe7, 0xb6, 0xfa, + 0x9d, 0x45, 0x13, 0x2c, 0x83, 0xfd, 0x4a, 0x69, + 0x82, 0xcd, 0xdc, 0xb3, 0x76, 0x0c, 0x9e, 0xd8, + 0xf4, 0x1b, 0x45, 0x15, 0xb4, 0x97, 0xe7, 0x58, + 0x34, 0xe2, 0x03, 0x29, 0x5a, 0xbf, 0xb6, 0xe0, + 0x5d, 0x13, 0xd9, 0x2b, 0xb4, 0x80, 0xb2, 0x45, + 0x81, 0x6a, 0x2e, 0x6c, 0x89, 0x7d, 0xee, 0xbb, + 0x52, 0xdd, 0x1f, 0x18, 0xe7, 0x13, 0x6b, 0x33, + 0x0e, 0xea, 0x36, 0x92, 0x77, 0x7b, 0x6d, 0x9c, + 0x5a, 0x5f, 0x45, 0x7b, 0x7b, 0x35, 0x62, 0x23, + 0xd1, 0xbf, 0x0f, 0xd0, 0x08, 0x1b, 0x2b, 0x80, + 0x6b, 0x7e, 0xf1, 0x21, 0x47, 0xb0, 0x57, 0xd1, + 0x98, 0x72, 0x90, 0x34, 0x1c, 0x20, 0x04, 0xff, + 0x3d, 0x5c, 0xee, 0x0e, 0x57, 0x5f, 0x6f, 0x24, + 0x4e, 0x3c, 0xea, 0xfc, 0xa5, 0xa9, 0x83, 0xc9, + 0x61, 0xb4, 0x51, 0x24, 0xf8, 0x27, 0x5e, 0x46, + 0x8c, 0xb1, 0x53, 0x02, 0x96, 0x35, 0xba, 0xb8, + 0x4c, 0x71, 0xd3, 0x15, 0x59, 0x35, 0x22, 0x20, + 0xad, 0x03, 0x9f, 0x66, 0x44, 0x3b, 0x9c, 0x35, + 0x37, 0x1f, 0x9b, 0xbb, 0xf3, 0xdb, 0x35, 0x63, + 0x30, 0x64, 0xaa, 0xa2, 0x06, 0xa8, 0x5d, 0xbb, + 0xe1, 0x9f, 0x70, 0xec, 0x82, 0x11, 0x06, 0x36, + 0xec, 0x8b, 0x69, 0x66, 0x24, 0x44, 0xc9, 0x4a, + 0x57, 0xbb, 0x9b, 0x78, 0x13, 0xce, 0x9c, 0x0c, + 0xba, 0x92, 0x93, 0x63, 0xb8, 0xe2, 0x95, 0x0f, + 0x0f, 0x16, 0x39, 0x52, 0xfd, 0x3a, 0x6d, 0x02, + 0x4b, 0xdf, 0x13, 0xd3, 0x2a, 0x22, 0xb4, 0x03, + 0x7c, 0x54, 0x49, 0x96, 0x68, 0x54, 0x10, 0xfa, + 0xef, 0xaa, 0x6c, 0xe8, 0x22, 0xdc, 0x71, 0x16, + 0x13, 0x1a, 0xf6, 0x28, 0xe5, 0x6d, 0x77, 0x3d, + 0xcd, 0x30, 0x63, 0xb1, 0x70, 0x52, 0xa1, 0xc5, + 0x94, 0x5f, 0xcf, 0xe8, 0xb8, 0x26, 0x98, 0xf7, + 0x06, 0xa0, 0x0a, 0x70, 0xfa, 0x03, 0x80, 0xac, + 0xc1, 0xec, 0xd6, 0x4c, 0x54, 0xd7, 0xfe, 0x47, + 0xb6, 0x88, 0x4a, 0xf7, 0x71, 0x24, 0xee, 0xf3, + 0xd2, 0xc2, 0x4a, 0x7f, 0xfe, 0x61, 0xc7, 0x35, + 0xc9, 0x37, 0x67, 0xcb, 0x24, 0x35, 0xda, 0x7e, + 0xca, 0x5f, 0xf3, 0x8d, 0xd4, 0x13, 0x8e, 0xd6, + 0xcb, 0x4d, 0x53, 0x8f, 0x53, 0x1f, 0xc0, 0x74, + 0xf7, 0x53, 0xb9, 0x5e, 0x23, 0x37, 0xba, 0x6e, + 0xe3, 0x9d, 0x07, 0x55, 0x25, 0x7b, 0xe6, 0x2a, + 0x64, 0xd1, 0x32, 0xdd, 0x54, 0x1b, 0x4b, 0xc0, + 0xe1, 0xd7, 0x69, 0x58, 0xf8, 0x93, 0x29, 0xc4, + 0xdd, 0x23, 0x2f, 0xa5, 0xfc, 0x9d, 0x7e, 0xf8, + 0xd4, 0x90, 0xcd, 0x82, 0x55, 0xdc, 0x16, 0x16, + 0x9f, 0x07, 0x52, 0x9b, 0x9d, 0x25, 0xed, 0x32, + 0xc5, 0x7b, 0xdf, 0xf6, 0x83, 0x46, 0x3d, 0x65, + 0xb7, 0xef, 0x87, 0x7a, 0x12, 0x69, 0x8f, 0x06, + 0x7c, 0x51, 0x15, 0x4a, 0x08, 0xe8, 0xac, 0x9a, + 0x0c, 0x24, 0xa7, 0x27, 0xd8, 0x46, 0x2f, 0xe7, + 0x01, 0x0e, 0x1c, 0xc6, 0x91, 0xb0, 0x6e, 0x85, + 0x65, 0xf0, 0x29, 0x0d, 0x2e, 0x6b, 0x3b, 0xfb, + 0x4b, 0xdf, 0xe4, 0x80, 0x93, 0x03, 0x66, 0x46, + 0x3e, 0x8a, 0x6e, 0xf3, 0x5e, 0x4d, 0x62, 0x0e, + 0x49, 0x05, 0xaf, 0xd4, 0xf8, 0x21, 0x20, 0x61, + 0x1d, 0x39, 0x17, 0xf4, 0x61, 0x47, 0x95, 0xfb, + 0x15, 0x2e, 0xb3, 0x4f, 0xd0, 0x5d, 0xf5, 0x7d, + 0x40, 0xda, 0x90, 0x3c, 0x6b, 0xcb, 0x17, 0x00, + 0x13, 0x3b, 0x64, 0x34, 0x1b, 0xf0, 0xf2, 0xe5, + 0x3b, 0xb2, 0xc7, 0xd3, 0x5f, 0x3a, 0x44, 0xa6, + 0x9b, 0xb7, 0x78, 0x0e, 0x42, 0x5d, 0x4c, 0xc1, + 0xe9, 0xd2, 0xcb, 0xb7, 0x78, 0xd1, 0xfe, 0x9a, + 0xb5, 0x07, 0xe9, 0xe0, 0xbe, 0xe2, 0x8a, 0xa7, + 0x01, 0x83, 0x00, 0x8c, 0x5c, 0x08, 0xe6, 0x63, + 0x12, 0x92, 0xb7, 0xb7, 0xa6, 0x19, 0x7d, 0x38, + 0x13, 0x38, 0x92, 0x87, 0x24, 0xf9, 0x48, 0xb3, + 0x5e, 0x87, 0x6a, 0x40, 0x39, 0x5c, 0x3f, 0xed, + 0x8f, 0xee, 0xdb, 0x15, 0x82, 0x06, 0xda, 0x49, + 0x21, 0x2b, 0xb5, 0xbf, 0x32, 0x7c, 0x9f, 0x42, + 0x28, 0x63, 0xcf, 0xaf, 0x1e, 0xf8, 0xc6, 0xa0, + 0xd1, 0x02, 0x43, 0x57, 0x62, 0xec, 0x9b, 0x0f, + 0x01, 0x9e, 0x71, 0xd8, 0x87, 0x9d, 0x01, 0xc1, + 0x58, 0x77, 0xd9, 0xaf, 0xb1, 0x10, 0x7e, 0xdd, + 0xa6, 0x50, 0x96, 0xe5, 0xf0, 0x72, 0x00, 0x6d, + 0x4b, 0xf8, 0x2a, 0x8f, 0x19, 0xf3, 0x22, 0x88, + 0x11, 0x4a, 0x8b, 0x7c, 0xfd, 0xb7, 0xed, 0xe1, + 0xf6, 0x40, 0x39, 0xe0, 0xe9, 0xf6, 0x3d, 0x25, + 0xe6, 0x74, 0x3c, 0x58, 0x57, 0x7f, 0xe1, 0x22, + 0x96, 0x47, 0x31, 0x91, 0xba, 0x70, 0x85, 0x28, + 0x6b, 0x9f, 0x6e, 0x25, 0xac, 0x23, 0x66, 0x2f, + 0x29, 0x88, 0x28, 0xce, 0x8c, 0x5c, 0x88, 0x53, + 0xd1, 0x3b, 0xcc, 0x6a, 0x51, 0xb2, 0xe1, 0x28, + 0x3f, 0x91, 0xb4, 0x0d, 0x00, 0x3a, 0xe3, 0xf8, + 0xc3, 0x8f, 0xd7, 0x96, 0x62, 0x0e, 0x2e, 0xfc, + 0xc8, 0x6c, 0x77, 0xa6, 0x1d, 0x22, 0xc1, 0xb8, + 0xe6, 0x61, 0xd7, 0x67, 0x36, 0x13, 0x7b, 0xbb, + 0x9b, 0x59, 0x09, 0xa6, 0xdf, 0xf7, 0x6b, 0xa3, + 0x40, 0x1a, 0xf5, 0x4f, 0xb4, 0xda, 0xd3, 0xf3, + 0x81, 0x93, 0xc6, 0x18, 0xd9, 0x26, 0xee, 0xac, + 0xf0, 0xaa, 0xdf, 0xc5, 0x9c, 0xca, 0xc2, 0xa2, + 0xcc, 0x7b, 0x5c, 0x24, 0xb0, 0xbc, 0xd0, 0x6a, + 0x4d, 0x89, 0x09, 0xb8, 0x07, 0xfe, 0x87, 0xad, + 0x0a, 0xea, 0xb8, 0x42, 0xf9, 0x5e, 0xb3, 0x3e, + 0x36, 0x4c, 0xaf, 0x75, 0x9e, 0x1c, 0xeb, 0xbd, + 0xbc, 0xbb, 0x80, 0x40, 0xa7, 0x3a, 0x30, 0xbf, + 0xa8, 0x44, 0xf4, 0xeb, 0x38, 0xad, 0x29, 0xba, + 0x23, 0xed, 0x41, 0x0c, 0xea, 0xd2, 0xbb, 0x41, + 0x18, 0xd6, 0xb9, 0xba, 0x65, 0x2b, 0xa3, 0x91, + 0x6d, 0x1f, 0xa9, 0xf4, 0xd1, 0x25, 0x8d, 0x4d, + 0x38, 0xff, 0x64, 0xa0, 0xec, 0xde, 0xa6, 0xb6, + 0x79, 0xab, 0x8e, 0x33, 0x6c, 0x47, 0xde, 0xaf, + 0x94, 0xa4, 0xa5, 0x86, 0x77, 0x55, 0x09, 0x92, + 0x81, 0x31, 0x76, 0xc7, 0x34, 0x22, 0x89, 0x8e, + 0x3d, 0x26, 0x26, 0xd7, 0xfc, 0x1e, 0x16, 0x72, + 0x13, 0x33, 0x63, 0xd5, 0x22, 0xbe, 0xb8, 0x04, + 0x34, 0x84, 0x41, 0xbb, 0x80, 0xd0, 0x9f, 0x46, + 0x48, 0x07, 0xa7, 0xfc, 0x2b, 0x3a, 0x75, 0x55, + 0x8c, 0xc7, 0x6a, 0xbd, 0x7e, 0x46, 0x08, 0x84, + 0x0f, 0xd5, 0x74, 0xc0, 0x82, 0x8e, 0xaa, 0x61, + 0x05, 0x01, 0xb2, 0x47, 0x6e, 0x20, 0x6a, 0x2d, + 0x58, 0x70, 0x48, 0x32, 0xa7, 0x37, 0xd2, 0xb8, + 0x82, 0x1a, 0x51, 0xb9, 0x61, 0xdd, 0xfd, 0x9d, + 0x6b, 0x0e, 0x18, 0x97, 0xf8, 0x45, 0x5f, 0x87, + 0x10, 0xcf, 0x34, 0x72, 0x45, 0x26, 0x49, 0x70, + 0xe7, 0xa3, 0x78, 0xe0, 0x52, 0x89, 0x84, 0x94, + 0x83, 0x82, 0xc2, 0x69, 0x8f, 0xe3, 0xe1, 0x3f, + 0x60, 0x74, 0x88, 0xc4, 0xf7, 0x75, 0x2c, 0xfb, + 0xbd, 0xb6, 0xc4, 0x7e, 0x10, 0x0a, 0x6c, 0x90, + 0x04, 0x9e, 0xc3, 0x3f, 0x59, 0x7c, 0xce, 0x31, + 0x18, 0x60, 0x57, 0x73, 0x46, 0x94, 0x7d, 0x06, + 0xa0, 0x6d, 0x44, 0xec, 0xa2, 0x0a, 0x9e, 0x05, + 0x15, 0xef, 0xca, 0x5c, 0xbf, 0x00, 0xeb, 0xf7, + 0x3d, 0x32, 0xd4, 0xa5, 0xef, 0x49, 0x89, 0x5e, + 0x46, 0xb0, 0xa6, 0x63, 0x5b, 0x8a, 0x73, 0xae, + 0x6f, 0xd5, 0x9d, 0xf8, 0x4f, 0x40, 0xb5, 0xb2, + 0x6e, 0xd3, 0xb6, 0x01, 0xa9, 0x26, 0xa2, 0x21, + 0xcf, 0x33, 0x7a, 0x3a, 0xa4, 0x23, 0x13, 0xb0, + 0x69, 0x6a, 0xee, 0xce, 0xd8, 0x9d, 0x01, 0x1d, + 0x50, 0xc1, 0x30, 0x6c, 0xb1, 0xcd, 0xa0, 0xf0, + 0xf0, 0xa2, 0x64, 0x6f, 0xbb, 0xbf, 0x5e, 0xe6, + 0xab, 0x87, 0xb4, 0x0f, 0x4f, 0x15, 0xaf, 0xb5, + 0x25, 0xa1, 0xb2, 0xd0, 0x80, 0x2c, 0xfb, 0xf9, + 0xfe, 0xd2, 0x33, 0xbb, 0x76, 0xfe, 0x7c, 0xa8, + 0x66, 0xf7, 0xe7, 0x85, 0x9f, 0x1f, 0x85, 0x57, + 0x88, 0xe1, 0xe9, 0x63, 0xe4, 0xd8, 0x1c, 0xa1, + 0xfb, 0xda, 0x44, 0x05, 0x2e, 0x1d, 0x3a, 0x1c, + 0xff, 0xc8, 0x3b, 0xc0, 0xfe, 0xda, 0x22, 0x0b, + 0x43, 0xd6, 0x88, 0x39, 0x4c, 0x4a, 0xa6, 0x69, + 0x18, 0x93, 0x42, 0x4e, 0xb5, 0xcc, 0x66, 0x0d, + 0x09, 0xf8, 0x1e, 0x7c, 0xd3, 0x3c, 0x99, 0x0d, + 0x50, 0x1d, 0x62, 0xe9, 0x57, 0x06, 0xbf, 0x19, + 0x88, 0xdd, 0xad, 0x7b, 0x4f, 0xf9, 0xc7, 0x82, + 0x6d, 0x8d, 0xc8, 0xc4, 0xc5, 0x78, 0x17, 0x20, + 0x15, 0xc5, 0x52, 0x41, 0xcf, 0x5b, 0xd6, 0x7f, + 0x94, 0x02, 0x41, 0xe0, 0x40, 0x22, 0x03, 0x5e, + 0xd1, 0x53, 0xd4, 0x86, 0xd3, 0x2c, 0x9f, 0x0f, + 0x96, 0xe3, 0x6b, 0x9a, 0x76, 0x32, 0x06, 0x47, + 0x4b, 0x11, 0xb3, 0xdd, 0x03, 0x65, 0xbd, 0x9b, + 0x01, 0xda, 0x9c, 0xb9, 0x7e, 0x3f, 0x6a, 0xc4, + 0x7b, 0xea, 0xd4, 0x3c, 0xb9, 0xfb, 0x5c, 0x6b, + 0x64, 0x33, 0x52, 0xba, 0x64, 0x78, 0x8f, 0xa4, + 0xaf, 0x7a, 0x61, 0x8d, 0xbc, 0xc5, 0x73, 0xe9, + 0x6b, 0x58, 0x97, 0x4b, 0xbf, 0x63, 0x22, 0xd3, + 0x37, 0x02, 0x54, 0xc5, 0xb9, 0x16, 0x4a, 0xf0, + 0x19, 0xd8, 0x94, 0x57, 0xb8, 0x8a, 0xb3, 0x16, + 0x3b, 0xd0, 0x84, 0x8e, 0x67, 0xa6, 0xa3, 0x7d, + 0x78, 0xec, 0x00 +}; +static const u8 dec_assoc012[] __initconst = { + 0xb1, 0x69, 0x83, 0x87, 0x30, 0xaa, 0x5d, 0xb8, + 0x77, 0xe8, 0x21, 0xff, 0x06, 0x59, 0x35, 0xce, + 0x75, 0xfe, 0x38, 0xef, 0xb8, 0x91, 0x43, 0x8c, + 0xcf, 0x70, 0xdd, 0x0a, 0x68, 0xbf, 0xd4, 0xbc, + 0x16, 0x76, 0x99, 0x36, 0x1e, 0x58, 0x79, 0x5e, + 0xd4, 0x29, 0xf7, 0x33, 0x93, 0x48, 0xdb, 0x5f, + 0x01, 0xae, 0x9c, 0xb6, 0xe4, 0x88, 0x6d, 0x2b, + 0x76, 0x75, 0xe0, 0xf3, 0x74, 0xe2, 0xc9 +}; +static const u8 dec_nonce012[] __initconst = { + 0x05, 0xa3, 0x93, 0xed, 0x30, 0xc5, 0xa2, 0x06 +}; +static const u8 dec_key012[] __initconst = { + 0xb3, 0x35, 0x50, 0x03, 0x54, 0x2e, 0x40, 0x5e, + 0x8f, 0x59, 0x8e, 0xc5, 0x90, 0xd5, 0x27, 0x2d, + 0xba, 0x29, 0x2e, 0xcb, 0x1b, 0x70, 0x44, 0x1e, + 0x65, 0x91, 0x6e, 0x2a, 0x79, 0x22, 0xda, 0x64 +}; + +static const u8 dec_input013[] __initconst = { + 0x52, 0x34, 0xb3, 0x65, 0x3b, 0xb7, 0xe5, 0xd3, + 0xab, 0x49, 0x17, 0x60, 0xd2, 0x52, 0x56, 0xdf, + 0xdf, 0x34, 0x56, 0x82, 0xe2, 0xbe, 0xe5, 0xe1, + 0x28, 0xd1, 0x4e, 0x5f, 0x4f, 0x01, 0x7d, 0x3f, + 0x99, 0x6b, 0x30, 0x6e, 0x1a, 0x7c, 0x4c, 0x8e, + 0x62, 0x81, 0xae, 0x86, 0x3f, 0x6b, 0xd0, 0xb5, + 0xa9, 0xcf, 0x50, 0xf1, 0x02, 0x12, 0xa0, 0x0b, + 0x24, 0xe9, 0xe6, 0x72, 0x89, 0x2c, 0x52, 0x1b, + 0x34, 0x38, 0xf8, 0x75, 0x5f, 0xa0, 0x74, 0xe2, + 0x99, 0xdd, 0xa6, 0x4b, 0x14, 0x50, 0x4e, 0xf1, + 0xbe, 0xd6, 0x9e, 0xdb, 0xb2, 0x24, 0x27, 0x74, + 0x12, 0x4a, 0x78, 0x78, 0x17, 0xa5, 0x58, 0x8e, + 0x2f, 0xf9, 0xf4, 0x8d, 0xee, 0x03, 0x88, 0xae, + 0xb8, 0x29, 0xa1, 0x2f, 0x4b, 0xee, 0x92, 0xbd, + 0x87, 0xb3, 0xce, 0x34, 0x21, 0x57, 0x46, 0x04, + 0x49, 0x0c, 0x80, 0xf2, 0x01, 0x13, 0xa1, 0x55, + 0xb3, 0xff, 0x44, 0x30, 0x3c, 0x1c, 0xd0, 0xef, + 0xbc, 0x18, 0x74, 0x26, 0xad, 0x41, 0x5b, 0x5b, + 0x3e, 0x9a, 0x7a, 0x46, 0x4f, 0x16, 0xd6, 0x74, + 0x5a, 0xb7, 0x3a, 0x28, 0x31, 0xd8, 0xae, 0x26, + 0xac, 0x50, 0x53, 0x86, 0xf2, 0x56, 0xd7, 0x3f, + 0x29, 0xbc, 0x45, 0x68, 0x8e, 0xcb, 0x98, 0x64, + 0xdd, 0xc9, 0xba, 0xb8, 0x4b, 0x7b, 0x82, 0xdd, + 0x14, 0xa7, 0xcb, 0x71, 0x72, 0x00, 0x5c, 0xad, + 0x7b, 0x6a, 0x89, 0xa4, 0x3d, 0xbf, 0xb5, 0x4b, + 0x3e, 0x7c, 0x5a, 0xcf, 0xb8, 0xa1, 0xc5, 0x6e, + 0xc8, 0xb6, 0x31, 0x57, 0x7b, 0xdf, 0xa5, 0x7e, + 0xb1, 0xd6, 0x42, 0x2a, 0x31, 0x36, 0xd1, 0xd0, + 0x3f, 0x7a, 0xe5, 0x94, 0xd6, 0x36, 0xa0, 0x6f, + 0xb7, 0x40, 0x7d, 0x37, 0xc6, 0x55, 0x7c, 0x50, + 0x40, 0x6d, 0x29, 0x89, 0xe3, 0x5a, 0xae, 0x97, + 0xe7, 0x44, 0x49, 0x6e, 0xbd, 0x81, 0x3d, 0x03, + 0x93, 0x06, 0x12, 0x06, 0xe2, 0x41, 0x12, 0x4a, + 0xf1, 0x6a, 0xa4, 0x58, 0xa2, 0xfb, 0xd2, 0x15, + 0xba, 0xc9, 0x79, 0xc9, 0xce, 0x5e, 0x13, 0xbb, + 0xf1, 0x09, 0x04, 0xcc, 0xfd, 0xe8, 0x51, 0x34, + 0x6a, 0xe8, 0x61, 0x88, 0xda, 0xed, 0x01, 0x47, + 0x84, 0xf5, 0x73, 0x25, 0xf9, 0x1c, 0x42, 0x86, + 0x07, 0xf3, 0x5b, 0x1a, 0x01, 0xb3, 0xeb, 0x24, + 0x32, 0x8d, 0xf6, 0xed, 0x7c, 0x4b, 0xeb, 0x3c, + 0x36, 0x42, 0x28, 0xdf, 0xdf, 0xb6, 0xbe, 0xd9, + 0x8c, 0x52, 0xd3, 0x2b, 0x08, 0x90, 0x8c, 0xe7, + 0x98, 0x31, 0xe2, 0x32, 0x8e, 0xfc, 0x11, 0x48, + 0x00, 0xa8, 0x6a, 0x42, 0x4a, 0x02, 0xc6, 0x4b, + 0x09, 0xf1, 0xe3, 0x49, 0xf3, 0x45, 0x1f, 0x0e, + 0xbc, 0x56, 0xe2, 0xe4, 0xdf, 0xfb, 0xeb, 0x61, + 0xfa, 0x24, 0xc1, 0x63, 0x75, 0xbb, 0x47, 0x75, + 0xaf, 0xe1, 0x53, 0x16, 0x96, 0x21, 0x85, 0x26, + 0x11, 0xb3, 0x76, 0xe3, 0x23, 0xa1, 0x6b, 0x74, + 0x37, 0xd0, 0xde, 0x06, 0x90, 0x71, 0x5d, 0x43, + 0x88, 0x9b, 0x00, 0x54, 0xa6, 0x75, 0x2f, 0xa1, + 0xc2, 0x0b, 0x73, 0x20, 0x1d, 0xb6, 0x21, 0x79, + 0x57, 0x3f, 0xfa, 0x09, 0xbe, 0x8a, 0x33, 0xc3, + 0x52, 0xf0, 0x1d, 0x82, 0x31, 0xd1, 0x55, 0xb5, + 0x6c, 0x99, 0x25, 0xcf, 0x5c, 0x32, 0xce, 0xe9, + 0x0d, 0xfa, 0x69, 0x2c, 0xd5, 0x0d, 0xc5, 0x6d, + 0x86, 0xd0, 0x0c, 0x3b, 0x06, 0x50, 0x79, 0xe8, + 0xc3, 0xae, 0x04, 0xe6, 0xcd, 0x51, 0xe4, 0x26, + 0x9b, 0x4f, 0x7e, 0xa6, 0x0f, 0xab, 0xd8, 0xe5, + 0xde, 0xa9, 0x00, 0x95, 0xbe, 0xa3, 0x9d, 0x5d, + 0xb2, 0x09, 0x70, 0x18, 0x1c, 0xf0, 0xac, 0x29, + 0x23, 0x02, 0x29, 0x28, 0xd2, 0x74, 0x35, 0x57, + 0x62, 0x0f, 0x24, 0xea, 0x5e, 0x33, 0xc2, 0x92, + 0xf3, 0x78, 0x4d, 0x30, 0x1e, 0xa1, 0x99, 0xa9, + 0x82, 0xb0, 0x42, 0x31, 0x8d, 0xad, 0x8a, 0xbc, + 0xfc, 0xd4, 0x57, 0x47, 0x3e, 0xb4, 0x50, 0xdd, + 0x6e, 0x2c, 0x80, 0x4d, 0x22, 0xf1, 0xfb, 0x57, + 0xc4, 0xdd, 0x17, 0xe1, 0x8a, 0x36, 0x4a, 0xb3, + 0x37, 0xca, 0xc9, 0x4e, 0xab, 0xd5, 0x69, 0xc4, + 0xf4, 0xbc, 0x0b, 0x3b, 0x44, 0x4b, 0x29, 0x9c, + 0xee, 0xd4, 0x35, 0x22, 0x21, 0xb0, 0x1f, 0x27, + 0x64, 0xa8, 0x51, 0x1b, 0xf0, 0x9f, 0x19, 0x5c, + 0xfb, 0x5a, 0x64, 0x74, 0x70, 0x45, 0x09, 0xf5, + 0x64, 0xfe, 0x1a, 0x2d, 0xc9, 0x14, 0x04, 0x14, + 0xcf, 0xd5, 0x7d, 0x60, 0xaf, 0x94, 0x39, 0x94, + 0xe2, 0x7d, 0x79, 0x82, 0xd0, 0x65, 0x3b, 0x6b, + 0x9c, 0x19, 0x84, 0xb4, 0x6d, 0xb3, 0x0c, 0x99, + 0xc0, 0x56, 0xa8, 0xbd, 0x73, 0xce, 0x05, 0x84, + 0x3e, 0x30, 0xaa, 0xc4, 0x9b, 0x1b, 0x04, 0x2a, + 0x9f, 0xd7, 0x43, 0x2b, 0x23, 0xdf, 0xbf, 0xaa, + 0xd5, 0xc2, 0x43, 0x2d, 0x70, 0xab, 0xdc, 0x75, + 0xad, 0xac, 0xf7, 0xc0, 0xbe, 0x67, 0xb2, 0x74, + 0xed, 0x67, 0x10, 0x4a, 0x92, 0x60, 0xc1, 0x40, + 0x50, 0x19, 0x8a, 0x8a, 0x8c, 0x09, 0x0e, 0x72, + 0xe1, 0x73, 0x5e, 0xe8, 0x41, 0x85, 0x63, 0x9f, + 0x3f, 0xd7, 0x7d, 0xc4, 0xfb, 0x22, 0x5d, 0x92, + 0x6c, 0xb3, 0x1e, 0xe2, 0x50, 0x2f, 0x82, 0xa8, + 0x28, 0xc0, 0xb5, 0xd7, 0x5f, 0x68, 0x0d, 0x2c, + 0x2d, 0xaf, 0x7e, 0xfa, 0x2e, 0x08, 0x0f, 0x1f, + 0x70, 0x9f, 0xe9, 0x19, 0x72, 0x55, 0xf8, 0xfb, + 0x51, 0xd2, 0x33, 0x5d, 0xa0, 0xd3, 0x2b, 0x0a, + 0x6c, 0xbc, 0x4e, 0xcf, 0x36, 0x4d, 0xdc, 0x3b, + 0xe9, 0x3e, 0x81, 0x7c, 0x61, 0xdb, 0x20, 0x2d, + 0x3a, 0xc3, 0xb3, 0x0c, 0x1e, 0x00, 0xb9, 0x7c, + 0xf5, 0xca, 0x10, 0x5f, 0x3a, 0x71, 0xb3, 0xe4, + 0x20, 0xdb, 0x0c, 0x2a, 0x98, 0x63, 0x45, 0x00, + 0x58, 0xf6, 0x68, 0xe4, 0x0b, 0xda, 0x13, 0x3b, + 0x60, 0x5c, 0x76, 0xdb, 0xb9, 0x97, 0x71, 0xe4, + 0xd9, 0xb7, 0xdb, 0xbd, 0x68, 0xc7, 0x84, 0x84, + 0xaa, 0x7c, 0x68, 0x62, 0x5e, 0x16, 0xfc, 0xba, + 0x72, 0xaa, 0x9a, 0xa9, 0xeb, 0x7c, 0x75, 0x47, + 0x97, 0x7e, 0xad, 0xe2, 0xd9, 0x91, 0xe8, 0xe4, + 0xa5, 0x31, 0xd7, 0x01, 0x8e, 0xa2, 0x11, 0x88, + 0x95, 0xb9, 0xf2, 0x9b, 0xd3, 0x7f, 0x1b, 0x81, + 0x22, 0xf7, 0x98, 0x60, 0x0a, 0x64, 0xa6, 0xc1, + 0xf6, 0x49, 0xc7, 0xe3, 0x07, 0x4d, 0x94, 0x7a, + 0xcf, 0x6e, 0x68, 0x0c, 0x1b, 0x3f, 0x6e, 0x2e, + 0xee, 0x92, 0xfa, 0x52, 0xb3, 0x59, 0xf8, 0xf1, + 0x8f, 0x6a, 0x66, 0xa3, 0x82, 0x76, 0x4a, 0x07, + 0x1a, 0xc7, 0xdd, 0xf5, 0xda, 0x9c, 0x3c, 0x24, + 0xbf, 0xfd, 0x42, 0xa1, 0x10, 0x64, 0x6a, 0x0f, + 0x89, 0xee, 0x36, 0xa5, 0xce, 0x99, 0x48, 0x6a, + 0xf0, 0x9f, 0x9e, 0x69, 0xa4, 0x40, 0x20, 0xe9, + 0x16, 0x15, 0xf7, 0xdb, 0x75, 0x02, 0xcb, 0xe9, + 0x73, 0x8b, 0x3b, 0x49, 0x2f, 0xf0, 0xaf, 0x51, + 0x06, 0x5c, 0xdf, 0x27, 0x27, 0x49, 0x6a, 0xd1, + 0xcc, 0xc7, 0xb5, 0x63, 0xb5, 0xfc, 0xb8, 0x5c, + 0x87, 0x7f, 0x84, 0xb4, 0xcc, 0x14, 0xa9, 0x53, + 0xda, 0xa4, 0x56, 0xf8, 0xb6, 0x1b, 0xcc, 0x40, + 0x27, 0x52, 0x06, 0x5a, 0x13, 0x81, 0xd7, 0x3a, + 0xd4, 0x3b, 0xfb, 0x49, 0x65, 0x31, 0x33, 0xb2, + 0xfa, 0xcd, 0xad, 0x58, 0x4e, 0x2b, 0xae, 0xd2, + 0x20, 0xfb, 0x1a, 0x48, 0xb4, 0x3f, 0x9a, 0xd8, + 0x7a, 0x35, 0x4a, 0xc8, 0xee, 0x88, 0x5e, 0x07, + 0x66, 0x54, 0xb9, 0xec, 0x9f, 0xa3, 0xe3, 0xb9, + 0x37, 0xaa, 0x49, 0x76, 0x31, 0xda, 0x74, 0x2d, + 0x3c, 0xa4, 0x65, 0x10, 0x32, 0x38, 0xf0, 0xde, + 0xd3, 0x99, 0x17, 0xaa, 0x71, 0xaa, 0x8f, 0x0f, + 0x8c, 0xaf, 0xa2, 0xf8, 0x5d, 0x64, 0xba, 0x1d, + 0xa3, 0xef, 0x96, 0x73, 0xe8, 0xa1, 0x02, 0x8d, + 0x0c, 0x6d, 0xb8, 0x06, 0x90, 0xb8, 0x08, 0x56, + 0x2c, 0xa7, 0x06, 0xc9, 0xc2, 0x38, 0xdb, 0x7c, + 0x63, 0xb1, 0x57, 0x8e, 0xea, 0x7c, 0x79, 0xf3, + 0x49, 0x1d, 0xfe, 0x9f, 0xf3, 0x6e, 0xb1, 0x1d, + 0xba, 0x19, 0x80, 0x1a, 0x0a, 0xd3, 0xb0, 0x26, + 0x21, 0x40, 0xb1, 0x7c, 0xf9, 0x4d, 0x8d, 0x10, + 0xc1, 0x7e, 0xf4, 0xf6, 0x3c, 0xa8, 0xfd, 0x7c, + 0xa3, 0x92, 0xb2, 0x0f, 0xaa, 0xcc, 0xa6, 0x11, + 0xfe, 0x04, 0xe3, 0xd1, 0x7a, 0x32, 0x89, 0xdf, + 0x0d, 0xc4, 0x8f, 0x79, 0x6b, 0xca, 0x16, 0x7c, + 0x6e, 0xf9, 0xad, 0x0f, 0xf6, 0xfe, 0x27, 0xdb, + 0xc4, 0x13, 0x70, 0xf1, 0x62, 0x1a, 0x4f, 0x79, + 0x40, 0xc9, 0x9b, 0x8b, 0x21, 0xea, 0x84, 0xfa, + 0xf5, 0xf1, 0x89, 0xce, 0xb7, 0x55, 0x0a, 0x80, + 0x39, 0x2f, 0x55, 0x36, 0x16, 0x9c, 0x7b, 0x08, + 0xbd, 0x87, 0x0d, 0xa5, 0x32, 0xf1, 0x52, 0x7c, + 0xe8, 0x55, 0x60, 0x5b, 0xd7, 0x69, 0xe4, 0xfc, + 0xfa, 0x12, 0x85, 0x96, 0xea, 0x50, 0x28, 0xab, + 0x8a, 0xf7, 0xbb, 0x0e, 0x53, 0x74, 0xca, 0xa6, + 0x27, 0x09, 0xc2, 0xb5, 0xde, 0x18, 0x14, 0xd9, + 0xea, 0xe5, 0x29, 0x1c, 0x40, 0x56, 0xcf, 0xd7, + 0xae, 0x05, 0x3f, 0x65, 0xaf, 0x05, 0x73, 0xe2, + 0x35, 0x96, 0x27, 0x07, 0x14, 0xc0, 0xad, 0x33, + 0xf1, 0xdc, 0x44, 0x7a, 0x89, 0x17, 0x77, 0xd2, + 0x9c, 0x58, 0x60, 0xf0, 0x3f, 0x7b, 0x2d, 0x2e, + 0x57, 0x95, 0x54, 0x87, 0xed, 0xf2, 0xc7, 0x4c, + 0xf0, 0xae, 0x56, 0x29, 0x19, 0x7d, 0x66, 0x4b, + 0x9b, 0x83, 0x84, 0x42, 0x3b, 0x01, 0x25, 0x66, + 0x8e, 0x02, 0xde, 0xb9, 0x83, 0x54, 0x19, 0xf6, + 0x9f, 0x79, 0x0d, 0x67, 0xc5, 0x1d, 0x7a, 0x44, + 0x02, 0x98, 0xa7, 0x16, 0x1c, 0x29, 0x0d, 0x74, + 0xff, 0x85, 0x40, 0x06, 0xef, 0x2c, 0xa9, 0xc6, + 0xf5, 0x53, 0x07, 0x06, 0xae, 0xe4, 0xfa, 0x5f, + 0xd8, 0x39, 0x4d, 0xf1, 0x9b, 0x6b, 0xd9, 0x24, + 0x84, 0xfe, 0x03, 0x4c, 0xb2, 0x3f, 0xdf, 0xa1, + 0x05, 0x9e, 0x50, 0x14, 0x5a, 0xd9, 0x1a, 0xa2, + 0xa7, 0xfa, 0xfa, 0x17, 0xf7, 0x78, 0xd6, 0xb5, + 0x92, 0x61, 0x91, 0xac, 0x36, 0xfa, 0x56, 0x0d, + 0x38, 0x32, 0x18, 0x85, 0x08, 0x58, 0x37, 0xf0, + 0x4b, 0xdb, 0x59, 0xe7, 0xa4, 0x34, 0xc0, 0x1b, + 0x01, 0xaf, 0x2d, 0xde, 0xa1, 0xaa, 0x5d, 0xd3, + 0xec, 0xe1, 0xd4, 0xf7, 0xe6, 0x54, 0x68, 0xf0, + 0x51, 0x97, 0xa7, 0x89, 0xea, 0x24, 0xad, 0xd3, + 0x6e, 0x47, 0x93, 0x8b, 0x4b, 0xb4, 0xf7, 0x1c, + 0x42, 0x06, 0x67, 0xe8, 0x99, 0xf6, 0xf5, 0x7b, + 0x85, 0xb5, 0x65, 0xb5, 0xb5, 0xd2, 0x37, 0xf5, + 0xf3, 0x02, 0xa6, 0x4d, 0x11, 0xa7, 0xdc, 0x51, + 0x09, 0x7f, 0xa0, 0xd8, 0x88, 0x1c, 0x13, 0x71, + 0xae, 0x9c, 0xb7, 0x7b, 0x34, 0xd6, 0x4e, 0x68, + 0x26, 0x83, 0x51, 0xaf, 0x1d, 0xee, 0x8b, 0xbb, + 0x69, 0x43, 0x2b, 0x9e, 0x8a, 0xbc, 0x02, 0x0e, + 0xa0, 0x1b, 0xe0, 0xa8, 0x5f, 0x6f, 0xaf, 0x1b, + 0x8f, 0xe7, 0x64, 0x71, 0x74, 0x11, 0x7e, 0xa8, + 0xd8, 0xf9, 0x97, 0x06, 0xc3, 0xb6, 0xfb, 0xfb, + 0xb7, 0x3d, 0x35, 0x9d, 0x3b, 0x52, 0xed, 0x54, + 0xca, 0xf4, 0x81, 0x01, 0x2d, 0x1b, 0xc3, 0xa7, + 0x00, 0x3d, 0x1a, 0x39, 0x54, 0xe1, 0xf6, 0xff, + 0xed, 0x6f, 0x0b, 0x5a, 0x68, 0xda, 0x58, 0xdd, + 0xa9, 0xcf, 0x5c, 0x4a, 0xe5, 0x09, 0x4e, 0xde, + 0x9d, 0xbc, 0x3e, 0xee, 0x5a, 0x00, 0x3b, 0x2c, + 0x87, 0x10, 0x65, 0x60, 0xdd, 0xd7, 0x56, 0xd1, + 0x4c, 0x64, 0x45, 0xe4, 0x21, 0xec, 0x78, 0xf8, + 0x25, 0x7a, 0x3e, 0x16, 0x5d, 0x09, 0x53, 0x14, + 0xbe, 0x4f, 0xae, 0x87, 0xd8, 0xd1, 0xaa, 0x3c, + 0xf6, 0x3e, 0xa4, 0x70, 0x8c, 0x5e, 0x70, 0xa4, + 0xb3, 0x6b, 0x66, 0x73, 0xd3, 0xbf, 0x31, 0x06, + 0x19, 0x62, 0x93, 0x15, 0xf2, 0x86, 0xe4, 0x52, + 0x7e, 0x53, 0x4c, 0x12, 0x38, 0xcc, 0x34, 0x7d, + 0x57, 0xf6, 0x42, 0x93, 0x8a, 0xc4, 0xee, 0x5c, + 0x8a, 0xe1, 0x52, 0x8f, 0x56, 0x64, 0xf6, 0xa6, + 0xd1, 0x91, 0x57, 0x70, 0xcd, 0x11, 0x76, 0xf5, + 0x59, 0x60, 0x60, 0x3c, 0xc1, 0xc3, 0x0b, 0x7f, + 0x58, 0x1a, 0x50, 0x91, 0xf1, 0x68, 0x8f, 0x6e, + 0x74, 0x74, 0xa8, 0x51, 0x0b, 0xf7, 0x7a, 0x98, + 0x37, 0xf2, 0x0a, 0x0e, 0xa4, 0x97, 0x04, 0xb8, + 0x9b, 0xfd, 0xa0, 0xea, 0xf7, 0x0d, 0xe1, 0xdb, + 0x03, 0xf0, 0x31, 0x29, 0xf8, 0xdd, 0x6b, 0x8b, + 0x5d, 0xd8, 0x59, 0xa9, 0x29, 0xcf, 0x9a, 0x79, + 0x89, 0x19, 0x63, 0x46, 0x09, 0x79, 0x6a, 0x11, + 0xda, 0x63, 0x68, 0x48, 0x77, 0x23, 0xfb, 0x7d, + 0x3a, 0x43, 0xcb, 0x02, 0x3b, 0x7a, 0x6d, 0x10, + 0x2a, 0x9e, 0xac, 0xf1, 0xd4, 0x19, 0xf8, 0x23, + 0x64, 0x1d, 0x2c, 0x5f, 0xf2, 0xb0, 0x5c, 0x23, + 0x27, 0xf7, 0x27, 0x30, 0x16, 0x37, 0xb1, 0x90, + 0xab, 0x38, 0xfb, 0x55, 0xcd, 0x78, 0x58, 0xd4, + 0x7d, 0x43, 0xf6, 0x45, 0x5e, 0x55, 0x8d, 0xb1, + 0x02, 0x65, 0x58, 0xb4, 0x13, 0x4b, 0x36, 0xf7, + 0xcc, 0xfe, 0x3d, 0x0b, 0x82, 0xe2, 0x12, 0x11, + 0xbb, 0xe6, 0xb8, 0x3a, 0x48, 0x71, 0xc7, 0x50, + 0x06, 0x16, 0x3a, 0xe6, 0x7c, 0x05, 0xc7, 0xc8, + 0x4d, 0x2f, 0x08, 0x6a, 0x17, 0x9a, 0x95, 0x97, + 0x50, 0x68, 0xdc, 0x28, 0x18, 0xc4, 0x61, 0x38, + 0xb9, 0xe0, 0x3e, 0x78, 0xdb, 0x29, 0xe0, 0x9f, + 0x52, 0xdd, 0xf8, 0x4f, 0x91, 0xc1, 0xd0, 0x33, + 0xa1, 0x7a, 0x8e, 0x30, 0x13, 0x82, 0x07, 0x9f, + 0xd3, 0x31, 0x0f, 0x23, 0xbe, 0x32, 0x5a, 0x75, + 0xcf, 0x96, 0xb2, 0xec, 0xb5, 0x32, 0xac, 0x21, + 0xd1, 0x82, 0x33, 0xd3, 0x15, 0x74, 0xbd, 0x90, + 0xf1, 0x2c, 0xe6, 0x5f, 0x8d, 0xe3, 0x02, 0xe8, + 0xe9, 0xc4, 0xca, 0x96, 0xeb, 0x0e, 0xbc, 0x91, + 0xf4, 0xb9, 0xea, 0xd9, 0x1b, 0x75, 0xbd, 0xe1, + 0xac, 0x2a, 0x05, 0x37, 0x52, 0x9b, 0x1b, 0x3f, + 0x5a, 0xdc, 0x21, 0xc3, 0x98, 0xbb, 0xaf, 0xa3, + 0xf2, 0x00, 0xbf, 0x0d, 0x30, 0x89, 0x05, 0xcc, + 0xa5, 0x76, 0xf5, 0x06, 0xf0, 0xc6, 0x54, 0x8a, + 0x5d, 0xd4, 0x1e, 0xc1, 0xf2, 0xce, 0xb0, 0x62, + 0xc8, 0xfc, 0x59, 0x42, 0x9a, 0x90, 0x60, 0x55, + 0xfe, 0x88, 0xa5, 0x8b, 0xb8, 0x33, 0x0c, 0x23, + 0x24, 0x0d, 0x15, 0x70, 0x37, 0x1e, 0x3d, 0xf6, + 0xd2, 0xea, 0x92, 0x10, 0xb2, 0xc4, 0x51, 0xac, + 0xf2, 0xac, 0xf3, 0x6b, 0x6c, 0xaa, 0xcf, 0x12, + 0xc5, 0x6c, 0x90, 0x50, 0xb5, 0x0c, 0xfc, 0x1a, + 0x15, 0x52, 0xe9, 0x26, 0xc6, 0x52, 0xa4, 0xe7, + 0x81, 0x69, 0xe1, 0xe7, 0x9e, 0x30, 0x01, 0xec, + 0x84, 0x89, 0xb2, 0x0d, 0x66, 0xdd, 0xce, 0x28, + 0x5c, 0xec, 0x98, 0x46, 0x68, 0x21, 0x9f, 0x88, + 0x3f, 0x1f, 0x42, 0x77, 0xce, 0xd0, 0x61, 0xd4, + 0x20, 0xa7, 0xff, 0x53, 0xad, 0x37, 0xd0, 0x17, + 0x35, 0xc9, 0xfc, 0xba, 0x0a, 0x78, 0x3f, 0xf2, + 0xcc, 0x86, 0x89, 0xe8, 0x4b, 0x3c, 0x48, 0x33, + 0x09, 0x7f, 0xc6, 0xc0, 0xdd, 0xb8, 0xfd, 0x7a, + 0x66, 0x66, 0x65, 0xeb, 0x47, 0xa7, 0x04, 0x28, + 0xa3, 0x19, 0x8e, 0xa9, 0xb1, 0x13, 0x67, 0x62, + 0x70, 0xcf, 0xd7 +}; +static const u8 dec_output013[] __initconst = { + 0x74, 0xa6, 0x3e, 0xe4, 0xb1, 0xcb, 0xaf, 0xb0, + 0x40, 0xe5, 0x0f, 0x9e, 0xf1, 0xf2, 0x89, 0xb5, + 0x42, 0x34, 0x8a, 0xa1, 0x03, 0xb7, 0xe9, 0x57, + 0x46, 0xbe, 0x20, 0xe4, 0x6e, 0xb0, 0xeb, 0xff, + 0xea, 0x07, 0x7e, 0xef, 0xe2, 0x55, 0x9f, 0xe5, + 0x78, 0x3a, 0xb7, 0x83, 0xc2, 0x18, 0x40, 0x7b, + 0xeb, 0xcd, 0x81, 0xfb, 0x90, 0x12, 0x9e, 0x46, + 0xa9, 0xd6, 0x4a, 0xba, 0xb0, 0x62, 0xdb, 0x6b, + 0x99, 0xc4, 0xdb, 0x54, 0x4b, 0xb8, 0xa5, 0x71, + 0xcb, 0xcd, 0x63, 0x32, 0x55, 0xfb, 0x31, 0xf0, + 0x38, 0xf5, 0xbe, 0x78, 0xe4, 0x45, 0xce, 0x1b, + 0x6a, 0x5b, 0x0e, 0xf4, 0x16, 0xe4, 0xb1, 0x3d, + 0xf6, 0x63, 0x7b, 0xa7, 0x0c, 0xde, 0x6f, 0x8f, + 0x74, 0xdf, 0xe0, 0x1e, 0x9d, 0xce, 0x8f, 0x24, + 0xef, 0x23, 0x35, 0x33, 0x7b, 0x83, 0x34, 0x23, + 0x58, 0x74, 0x14, 0x77, 0x1f, 0xc2, 0x4f, 0x4e, + 0xc6, 0x89, 0xf9, 0x52, 0x09, 0x37, 0x64, 0x14, + 0xc4, 0x01, 0x6b, 0x9d, 0x77, 0xe8, 0x90, 0x5d, + 0xa8, 0x4a, 0x2a, 0xef, 0x5c, 0x7f, 0xeb, 0xbb, + 0xb2, 0xc6, 0x93, 0x99, 0x66, 0xdc, 0x7f, 0xd4, + 0x9e, 0x2a, 0xca, 0x8d, 0xdb, 0xe7, 0x20, 0xcf, + 0xe4, 0x73, 0xae, 0x49, 0x7d, 0x64, 0x0f, 0x0e, + 0x28, 0x46, 0xa9, 0xa8, 0x32, 0xe4, 0x0e, 0xf6, + 0x51, 0x53, 0xb8, 0x3c, 0xb1, 0xff, 0xa3, 0x33, + 0x41, 0x75, 0xff, 0xf1, 0x6f, 0xf1, 0xfb, 0xbb, + 0x83, 0x7f, 0x06, 0x9b, 0xe7, 0x1b, 0x0a, 0xe0, + 0x5c, 0x33, 0x60, 0x5b, 0xdb, 0x5b, 0xed, 0xfe, + 0xa5, 0x16, 0x19, 0x72, 0xa3, 0x64, 0x23, 0x00, + 0x02, 0xc7, 0xf3, 0x6a, 0x81, 0x3e, 0x44, 0x1d, + 0x79, 0x15, 0x5f, 0x9a, 0xde, 0xe2, 0xfd, 0x1b, + 0x73, 0xc1, 0xbc, 0x23, 0xba, 0x31, 0xd2, 0x50, + 0xd5, 0xad, 0x7f, 0x74, 0xa7, 0xc9, 0xf8, 0x3e, + 0x2b, 0x26, 0x10, 0xf6, 0x03, 0x36, 0x74, 0xe4, + 0x0e, 0x6a, 0x72, 0xb7, 0x73, 0x0a, 0x42, 0x28, + 0xc2, 0xad, 0x5e, 0x03, 0xbe, 0xb8, 0x0b, 0xa8, + 0x5b, 0xd4, 0xb8, 0xba, 0x52, 0x89, 0xb1, 0x9b, + 0xc1, 0xc3, 0x65, 0x87, 0xed, 0xa5, 0xf4, 0x86, + 0xfd, 0x41, 0x80, 0x91, 0x27, 0x59, 0x53, 0x67, + 0x15, 0x78, 0x54, 0x8b, 0x2d, 0x3d, 0xc7, 0xff, + 0x02, 0x92, 0x07, 0x5f, 0x7a, 0x4b, 0x60, 0x59, + 0x3c, 0x6f, 0x5c, 0xd8, 0xec, 0x95, 0xd2, 0xfe, + 0xa0, 0x3b, 0xd8, 0x3f, 0xd1, 0x69, 0xa6, 0xd6, + 0x41, 0xb2, 0xf4, 0x4d, 0x12, 0xf4, 0x58, 0x3e, + 0x66, 0x64, 0x80, 0x31, 0x9b, 0xa8, 0x4c, 0x8b, + 0x07, 0xb2, 0xec, 0x66, 0x94, 0x66, 0x47, 0x50, + 0x50, 0x5f, 0x18, 0x0b, 0x0e, 0xd6, 0xc0, 0x39, + 0x21, 0x13, 0x9e, 0x33, 0xbc, 0x79, 0x36, 0x02, + 0x96, 0x70, 0xf0, 0x48, 0x67, 0x2f, 0x26, 0xe9, + 0x6d, 0x10, 0xbb, 0xd6, 0x3f, 0xd1, 0x64, 0x7a, + 0x2e, 0xbe, 0x0c, 0x61, 0xf0, 0x75, 0x42, 0x38, + 0x23, 0xb1, 0x9e, 0x9f, 0x7c, 0x67, 0x66, 0xd9, + 0x58, 0x9a, 0xf1, 0xbb, 0x41, 0x2a, 0x8d, 0x65, + 0x84, 0x94, 0xfc, 0xdc, 0x6a, 0x50, 0x64, 0xdb, + 0x56, 0x33, 0x76, 0x00, 0x10, 0xed, 0xbe, 0xd2, + 0x12, 0xf6, 0xf6, 0x1b, 0xa2, 0x16, 0xde, 0xae, + 0x31, 0x95, 0xdd, 0xb1, 0x08, 0x7e, 0x4e, 0xee, + 0xe7, 0xf9, 0xa5, 0xfb, 0x5b, 0x61, 0x43, 0x00, + 0x40, 0xf6, 0x7e, 0x02, 0x04, 0x32, 0x4e, 0x0c, + 0xe2, 0x66, 0x0d, 0xd7, 0x07, 0x98, 0x0e, 0xf8, + 0x72, 0x34, 0x6d, 0x95, 0x86, 0xd7, 0xcb, 0x31, + 0x54, 0x47, 0xd0, 0x38, 0x29, 0x9c, 0x5a, 0x68, + 0xd4, 0x87, 0x76, 0xc9, 0xe7, 0x7e, 0xe3, 0xf4, + 0x81, 0x6d, 0x18, 0xcb, 0xc9, 0x05, 0xaf, 0xa0, + 0xfb, 0x66, 0xf7, 0xf1, 0x1c, 0xc6, 0x14, 0x11, + 0x4f, 0x2b, 0x79, 0x42, 0x8b, 0xbc, 0xac, 0xe7, + 0x6c, 0xfe, 0x0f, 0x58, 0xe7, 0x7c, 0x78, 0x39, + 0x30, 0xb0, 0x66, 0x2c, 0x9b, 0x6d, 0x3a, 0xe1, + 0xcf, 0xc9, 0xa4, 0x0e, 0x6d, 0x6d, 0x8a, 0xa1, + 0x3a, 0xe7, 0x28, 0xd4, 0x78, 0x4c, 0xa6, 0xa2, + 0x2a, 0xa6, 0x03, 0x30, 0xd7, 0xa8, 0x25, 0x66, + 0x87, 0x2f, 0x69, 0x5c, 0x4e, 0xdd, 0xa5, 0x49, + 0x5d, 0x37, 0x4a, 0x59, 0xc4, 0xaf, 0x1f, 0xa2, + 0xe4, 0xf8, 0xa6, 0x12, 0x97, 0xd5, 0x79, 0xf5, + 0xe2, 0x4a, 0x2b, 0x5f, 0x61, 0xe4, 0x9e, 0xe3, + 0xee, 0xb8, 0xa7, 0x5b, 0x2f, 0xf4, 0x9e, 0x6c, + 0xfb, 0xd1, 0xc6, 0x56, 0x77, 0xba, 0x75, 0xaa, + 0x3d, 0x1a, 0xa8, 0x0b, 0xb3, 0x68, 0x24, 0x00, + 0x10, 0x7f, 0xfd, 0xd7, 0xa1, 0x8d, 0x83, 0x54, + 0x4f, 0x1f, 0xd8, 0x2a, 0xbe, 0x8a, 0x0c, 0x87, + 0xab, 0xa2, 0xde, 0xc3, 0x39, 0xbf, 0x09, 0x03, + 0xa5, 0xf3, 0x05, 0x28, 0xe1, 0xe1, 0xee, 0x39, + 0x70, 0x9c, 0xd8, 0x81, 0x12, 0x1e, 0x02, 0x40, + 0xd2, 0x6e, 0xf0, 0xeb, 0x1b, 0x3d, 0x22, 0xc6, + 0xe5, 0xe3, 0xb4, 0x5a, 0x98, 0xbb, 0xf0, 0x22, + 0x28, 0x8d, 0xe5, 0xd3, 0x16, 0x48, 0x24, 0xa5, + 0xe6, 0x66, 0x0c, 0xf9, 0x08, 0xf9, 0x7e, 0x1e, + 0xe1, 0x28, 0x26, 0x22, 0xc7, 0xc7, 0x0a, 0x32, + 0x47, 0xfa, 0xa3, 0xbe, 0x3c, 0xc4, 0xc5, 0x53, + 0x0a, 0xd5, 0x94, 0x4a, 0xd7, 0x93, 0xd8, 0x42, + 0x99, 0xb9, 0x0a, 0xdb, 0x56, 0xf7, 0xb9, 0x1c, + 0x53, 0x4f, 0xfa, 0xd3, 0x74, 0xad, 0xd9, 0x68, + 0xf1, 0x1b, 0xdf, 0x61, 0xc6, 0x5e, 0xa8, 0x48, + 0xfc, 0xd4, 0x4a, 0x4c, 0x3c, 0x32, 0xf7, 0x1c, + 0x96, 0x21, 0x9b, 0xf9, 0xa3, 0xcc, 0x5a, 0xce, + 0xd5, 0xd7, 0x08, 0x24, 0xf6, 0x1c, 0xfd, 0xdd, + 0x38, 0xc2, 0x32, 0xe9, 0xb8, 0xe7, 0xb6, 0xfa, + 0x9d, 0x45, 0x13, 0x2c, 0x83, 0xfd, 0x4a, 0x69, + 0x82, 0xcd, 0xdc, 0xb3, 0x76, 0x0c, 0x9e, 0xd8, + 0xf4, 0x1b, 0x45, 0x15, 0xb4, 0x97, 0xe7, 0x58, + 0x34, 0xe2, 0x03, 0x29, 0x5a, 0xbf, 0xb6, 0xe0, + 0x5d, 0x13, 0xd9, 0x2b, 0xb4, 0x80, 0xb2, 0x45, + 0x81, 0x6a, 0x2e, 0x6c, 0x89, 0x7d, 0xee, 0xbb, + 0x52, 0xdd, 0x1f, 0x18, 0xe7, 0x13, 0x6b, 0x33, + 0x0e, 0xea, 0x36, 0x92, 0x77, 0x7b, 0x6d, 0x9c, + 0x5a, 0x5f, 0x45, 0x7b, 0x7b, 0x35, 0x62, 0x23, + 0xd1, 0xbf, 0x0f, 0xd0, 0x08, 0x1b, 0x2b, 0x80, + 0x6b, 0x7e, 0xf1, 0x21, 0x47, 0xb0, 0x57, 0xd1, + 0x98, 0x72, 0x90, 0x34, 0x1c, 0x20, 0x04, 0xff, + 0x3d, 0x5c, 0xee, 0x0e, 0x57, 0x5f, 0x6f, 0x24, + 0x4e, 0x3c, 0xea, 0xfc, 0xa5, 0xa9, 0x83, 0xc9, + 0x61, 0xb4, 0x51, 0x24, 0xf8, 0x27, 0x5e, 0x46, + 0x8c, 0xb1, 0x53, 0x02, 0x96, 0x35, 0xba, 0xb8, + 0x4c, 0x71, 0xd3, 0x15, 0x59, 0x35, 0x22, 0x20, + 0xad, 0x03, 0x9f, 0x66, 0x44, 0x3b, 0x9c, 0x35, + 0x37, 0x1f, 0x9b, 0xbb, 0xf3, 0xdb, 0x35, 0x63, + 0x30, 0x64, 0xaa, 0xa2, 0x06, 0xa8, 0x5d, 0xbb, + 0xe1, 0x9f, 0x70, 0xec, 0x82, 0x11, 0x06, 0x36, + 0xec, 0x8b, 0x69, 0x66, 0x24, 0x44, 0xc9, 0x4a, + 0x57, 0xbb, 0x9b, 0x78, 0x13, 0xce, 0x9c, 0x0c, + 0xba, 0x92, 0x93, 0x63, 0xb8, 0xe2, 0x95, 0x0f, + 0x0f, 0x16, 0x39, 0x52, 0xfd, 0x3a, 0x6d, 0x02, + 0x4b, 0xdf, 0x13, 0xd3, 0x2a, 0x22, 0xb4, 0x03, + 0x7c, 0x54, 0x49, 0x96, 0x68, 0x54, 0x10, 0xfa, + 0xef, 0xaa, 0x6c, 0xe8, 0x22, 0xdc, 0x71, 0x16, + 0x13, 0x1a, 0xf6, 0x28, 0xe5, 0x6d, 0x77, 0x3d, + 0xcd, 0x30, 0x63, 0xb1, 0x70, 0x52, 0xa1, 0xc5, + 0x94, 0x5f, 0xcf, 0xe8, 0xb8, 0x26, 0x98, 0xf7, + 0x06, 0xa0, 0x0a, 0x70, 0xfa, 0x03, 0x80, 0xac, + 0xc1, 0xec, 0xd6, 0x4c, 0x54, 0xd7, 0xfe, 0x47, + 0xb6, 0x88, 0x4a, 0xf7, 0x71, 0x24, 0xee, 0xf3, + 0xd2, 0xc2, 0x4a, 0x7f, 0xfe, 0x61, 0xc7, 0x35, + 0xc9, 0x37, 0x67, 0xcb, 0x24, 0x35, 0xda, 0x7e, + 0xca, 0x5f, 0xf3, 0x8d, 0xd4, 0x13, 0x8e, 0xd6, + 0xcb, 0x4d, 0x53, 0x8f, 0x53, 0x1f, 0xc0, 0x74, + 0xf7, 0x53, 0xb9, 0x5e, 0x23, 0x37, 0xba, 0x6e, + 0xe3, 0x9d, 0x07, 0x55, 0x25, 0x7b, 0xe6, 0x2a, + 0x64, 0xd1, 0x32, 0xdd, 0x54, 0x1b, 0x4b, 0xc0, + 0xe1, 0xd7, 0x69, 0x58, 0xf8, 0x93, 0x29, 0xc4, + 0xdd, 0x23, 0x2f, 0xa5, 0xfc, 0x9d, 0x7e, 0xf8, + 0xd4, 0x90, 0xcd, 0x82, 0x55, 0xdc, 0x16, 0x16, + 0x9f, 0x07, 0x52, 0x9b, 0x9d, 0x25, 0xed, 0x32, + 0xc5, 0x7b, 0xdf, 0xf6, 0x83, 0x46, 0x3d, 0x65, + 0xb7, 0xef, 0x87, 0x7a, 0x12, 0x69, 0x8f, 0x06, + 0x7c, 0x51, 0x15, 0x4a, 0x08, 0xe8, 0xac, 0x9a, + 0x0c, 0x24, 0xa7, 0x27, 0xd8, 0x46, 0x2f, 0xe7, + 0x01, 0x0e, 0x1c, 0xc6, 0x91, 0xb0, 0x6e, 0x85, + 0x65, 0xf0, 0x29, 0x0d, 0x2e, 0x6b, 0x3b, 0xfb, + 0x4b, 0xdf, 0xe4, 0x80, 0x93, 0x03, 0x66, 0x46, + 0x3e, 0x8a, 0x6e, 0xf3, 0x5e, 0x4d, 0x62, 0x0e, + 0x49, 0x05, 0xaf, 0xd4, 0xf8, 0x21, 0x20, 0x61, + 0x1d, 0x39, 0x17, 0xf4, 0x61, 0x47, 0x95, 0xfb, + 0x15, 0x2e, 0xb3, 0x4f, 0xd0, 0x5d, 0xf5, 0x7d, + 0x40, 0xda, 0x90, 0x3c, 0x6b, 0xcb, 0x17, 0x00, + 0x13, 0x3b, 0x64, 0x34, 0x1b, 0xf0, 0xf2, 0xe5, + 0x3b, 0xb2, 0xc7, 0xd3, 0x5f, 0x3a, 0x44, 0xa6, + 0x9b, 0xb7, 0x78, 0x0e, 0x42, 0x5d, 0x4c, 0xc1, + 0xe9, 0xd2, 0xcb, 0xb7, 0x78, 0xd1, 0xfe, 0x9a, + 0xb5, 0x07, 0xe9, 0xe0, 0xbe, 0xe2, 0x8a, 0xa7, + 0x01, 0x83, 0x00, 0x8c, 0x5c, 0x08, 0xe6, 0x63, + 0x12, 0x92, 0xb7, 0xb7, 0xa6, 0x19, 0x7d, 0x38, + 0x13, 0x38, 0x92, 0x87, 0x24, 0xf9, 0x48, 0xb3, + 0x5e, 0x87, 0x6a, 0x40, 0x39, 0x5c, 0x3f, 0xed, + 0x8f, 0xee, 0xdb, 0x15, 0x82, 0x06, 0xda, 0x49, + 0x21, 0x2b, 0xb5, 0xbf, 0x32, 0x7c, 0x9f, 0x42, + 0x28, 0x63, 0xcf, 0xaf, 0x1e, 0xf8, 0xc6, 0xa0, + 0xd1, 0x02, 0x43, 0x57, 0x62, 0xec, 0x9b, 0x0f, + 0x01, 0x9e, 0x71, 0xd8, 0x87, 0x9d, 0x01, 0xc1, + 0x58, 0x77, 0xd9, 0xaf, 0xb1, 0x10, 0x7e, 0xdd, + 0xa6, 0x50, 0x96, 0xe5, 0xf0, 0x72, 0x00, 0x6d, + 0x4b, 0xf8, 0x2a, 0x8f, 0x19, 0xf3, 0x22, 0x88, + 0x11, 0x4a, 0x8b, 0x7c, 0xfd, 0xb7, 0xed, 0xe1, + 0xf6, 0x40, 0x39, 0xe0, 0xe9, 0xf6, 0x3d, 0x25, + 0xe6, 0x74, 0x3c, 0x58, 0x57, 0x7f, 0xe1, 0x22, + 0x96, 0x47, 0x31, 0x91, 0xba, 0x70, 0x85, 0x28, + 0x6b, 0x9f, 0x6e, 0x25, 0xac, 0x23, 0x66, 0x2f, + 0x29, 0x88, 0x28, 0xce, 0x8c, 0x5c, 0x88, 0x53, + 0xd1, 0x3b, 0xcc, 0x6a, 0x51, 0xb2, 0xe1, 0x28, + 0x3f, 0x91, 0xb4, 0x0d, 0x00, 0x3a, 0xe3, 0xf8, + 0xc3, 0x8f, 0xd7, 0x96, 0x62, 0x0e, 0x2e, 0xfc, + 0xc8, 0x6c, 0x77, 0xa6, 0x1d, 0x22, 0xc1, 0xb8, + 0xe6, 0x61, 0xd7, 0x67, 0x36, 0x13, 0x7b, 0xbb, + 0x9b, 0x59, 0x09, 0xa6, 0xdf, 0xf7, 0x6b, 0xa3, + 0x40, 0x1a, 0xf5, 0x4f, 0xb4, 0xda, 0xd3, 0xf3, + 0x81, 0x93, 0xc6, 0x18, 0xd9, 0x26, 0xee, 0xac, + 0xf0, 0xaa, 0xdf, 0xc5, 0x9c, 0xca, 0xc2, 0xa2, + 0xcc, 0x7b, 0x5c, 0x24, 0xb0, 0xbc, 0xd0, 0x6a, + 0x4d, 0x89, 0x09, 0xb8, 0x07, 0xfe, 0x87, 0xad, + 0x0a, 0xea, 0xb8, 0x42, 0xf9, 0x5e, 0xb3, 0x3e, + 0x36, 0x4c, 0xaf, 0x75, 0x9e, 0x1c, 0xeb, 0xbd, + 0xbc, 0xbb, 0x80, 0x40, 0xa7, 0x3a, 0x30, 0xbf, + 0xa8, 0x44, 0xf4, 0xeb, 0x38, 0xad, 0x29, 0xba, + 0x23, 0xed, 0x41, 0x0c, 0xea, 0xd2, 0xbb, 0x41, + 0x18, 0xd6, 0xb9, 0xba, 0x65, 0x2b, 0xa3, 0x91, + 0x6d, 0x1f, 0xa9, 0xf4, 0xd1, 0x25, 0x8d, 0x4d, + 0x38, 0xff, 0x64, 0xa0, 0xec, 0xde, 0xa6, 0xb6, + 0x79, 0xab, 0x8e, 0x33, 0x6c, 0x47, 0xde, 0xaf, + 0x94, 0xa4, 0xa5, 0x86, 0x77, 0x55, 0x09, 0x92, + 0x81, 0x31, 0x76, 0xc7, 0x34, 0x22, 0x89, 0x8e, + 0x3d, 0x26, 0x26, 0xd7, 0xfc, 0x1e, 0x16, 0x72, + 0x13, 0x33, 0x63, 0xd5, 0x22, 0xbe, 0xb8, 0x04, + 0x34, 0x84, 0x41, 0xbb, 0x80, 0xd0, 0x9f, 0x46, + 0x48, 0x07, 0xa7, 0xfc, 0x2b, 0x3a, 0x75, 0x55, + 0x8c, 0xc7, 0x6a, 0xbd, 0x7e, 0x46, 0x08, 0x84, + 0x0f, 0xd5, 0x74, 0xc0, 0x82, 0x8e, 0xaa, 0x61, + 0x05, 0x01, 0xb2, 0x47, 0x6e, 0x20, 0x6a, 0x2d, + 0x58, 0x70, 0x48, 0x32, 0xa7, 0x37, 0xd2, 0xb8, + 0x82, 0x1a, 0x51, 0xb9, 0x61, 0xdd, 0xfd, 0x9d, + 0x6b, 0x0e, 0x18, 0x97, 0xf8, 0x45, 0x5f, 0x87, + 0x10, 0xcf, 0x34, 0x72, 0x45, 0x26, 0x49, 0x70, + 0xe7, 0xa3, 0x78, 0xe0, 0x52, 0x89, 0x84, 0x94, + 0x83, 0x82, 0xc2, 0x69, 0x8f, 0xe3, 0xe1, 0x3f, + 0x60, 0x74, 0x88, 0xc4, 0xf7, 0x75, 0x2c, 0xfb, + 0xbd, 0xb6, 0xc4, 0x7e, 0x10, 0x0a, 0x6c, 0x90, + 0x04, 0x9e, 0xc3, 0x3f, 0x59, 0x7c, 0xce, 0x31, + 0x18, 0x60, 0x57, 0x73, 0x46, 0x94, 0x7d, 0x06, + 0xa0, 0x6d, 0x44, 0xec, 0xa2, 0x0a, 0x9e, 0x05, + 0x15, 0xef, 0xca, 0x5c, 0xbf, 0x00, 0xeb, 0xf7, + 0x3d, 0x32, 0xd4, 0xa5, 0xef, 0x49, 0x89, 0x5e, + 0x46, 0xb0, 0xa6, 0x63, 0x5b, 0x8a, 0x73, 0xae, + 0x6f, 0xd5, 0x9d, 0xf8, 0x4f, 0x40, 0xb5, 0xb2, + 0x6e, 0xd3, 0xb6, 0x01, 0xa9, 0x26, 0xa2, 0x21, + 0xcf, 0x33, 0x7a, 0x3a, 0xa4, 0x23, 0x13, 0xb0, + 0x69, 0x6a, 0xee, 0xce, 0xd8, 0x9d, 0x01, 0x1d, + 0x50, 0xc1, 0x30, 0x6c, 0xb1, 0xcd, 0xa0, 0xf0, + 0xf0, 0xa2, 0x64, 0x6f, 0xbb, 0xbf, 0x5e, 0xe6, + 0xab, 0x87, 0xb4, 0x0f, 0x4f, 0x15, 0xaf, 0xb5, + 0x25, 0xa1, 0xb2, 0xd0, 0x80, 0x2c, 0xfb, 0xf9, + 0xfe, 0xd2, 0x33, 0xbb, 0x76, 0xfe, 0x7c, 0xa8, + 0x66, 0xf7, 0xe7, 0x85, 0x9f, 0x1f, 0x85, 0x57, + 0x88, 0xe1, 0xe9, 0x63, 0xe4, 0xd8, 0x1c, 0xa1, + 0xfb, 0xda, 0x44, 0x05, 0x2e, 0x1d, 0x3a, 0x1c, + 0xff, 0xc8, 0x3b, 0xc0, 0xfe, 0xda, 0x22, 0x0b, + 0x43, 0xd6, 0x88, 0x39, 0x4c, 0x4a, 0xa6, 0x69, + 0x18, 0x93, 0x42, 0x4e, 0xb5, 0xcc, 0x66, 0x0d, + 0x09, 0xf8, 0x1e, 0x7c, 0xd3, 0x3c, 0x99, 0x0d, + 0x50, 0x1d, 0x62, 0xe9, 0x57, 0x06, 0xbf, 0x19, + 0x88, 0xdd, 0xad, 0x7b, 0x4f, 0xf9, 0xc7, 0x82, + 0x6d, 0x8d, 0xc8, 0xc4, 0xc5, 0x78, 0x17, 0x20, + 0x15, 0xc5, 0x52, 0x41, 0xcf, 0x5b, 0xd6, 0x7f, + 0x94, 0x02, 0x41, 0xe0, 0x40, 0x22, 0x03, 0x5e, + 0xd1, 0x53, 0xd4, 0x86, 0xd3, 0x2c, 0x9f, 0x0f, + 0x96, 0xe3, 0x6b, 0x9a, 0x76, 0x32, 0x06, 0x47, + 0x4b, 0x11, 0xb3, 0xdd, 0x03, 0x65, 0xbd, 0x9b, + 0x01, 0xda, 0x9c, 0xb9, 0x7e, 0x3f, 0x6a, 0xc4, + 0x7b, 0xea, 0xd4, 0x3c, 0xb9, 0xfb, 0x5c, 0x6b, + 0x64, 0x33, 0x52, 0xba, 0x64, 0x78, 0x8f, 0xa4, + 0xaf, 0x7a, 0x61, 0x8d, 0xbc, 0xc5, 0x73, 0xe9, + 0x6b, 0x58, 0x97, 0x4b, 0xbf, 0x63, 0x22, 0xd3, + 0x37, 0x02, 0x54, 0xc5, 0xb9, 0x16, 0x4a, 0xf0, + 0x19, 0xd8, 0x94, 0x57, 0xb8, 0x8a, 0xb3, 0x16, + 0x3b, 0xd0, 0x84, 0x8e, 0x67, 0xa6, 0xa3, 0x7d, + 0x78, 0xec, 0x00 +}; +static const u8 dec_assoc013[] __initconst = { + 0xb1, 0x69, 0x83, 0x87, 0x30, 0xaa, 0x5d, 0xb8, + 0x77, 0xe8, 0x21, 0xff, 0x06, 0x59, 0x35, 0xce, + 0x75, 0xfe, 0x38, 0xef, 0xb8, 0x91, 0x43, 0x8c, + 0xcf, 0x70, 0xdd, 0x0a, 0x68, 0xbf, 0xd4, 0xbc, + 0x16, 0x76, 0x99, 0x36, 0x1e, 0x58, 0x79, 0x5e, + 0xd4, 0x29, 0xf7, 0x33, 0x93, 0x48, 0xdb, 0x5f, + 0x01, 0xae, 0x9c, 0xb6, 0xe4, 0x88, 0x6d, 0x2b, + 0x76, 0x75, 0xe0, 0xf3, 0x74, 0xe2, 0xc9 +}; +static const u8 dec_nonce013[] __initconst = { + 0x05, 0xa3, 0x93, 0xed, 0x30, 0xc5, 0xa2, 0x06 +}; +static const u8 dec_key013[] __initconst = { + 0xb3, 0x35, 0x50, 0x03, 0x54, 0x2e, 0x40, 0x5e, + 0x8f, 0x59, 0x8e, 0xc5, 0x90, 0xd5, 0x27, 0x2d, + 0xba, 0x29, 0x2e, 0xcb, 0x1b, 0x70, 0x44, 0x1e, + 0x65, 0x91, 0x6e, 0x2a, 0x79, 0x22, 0xda, 0x64 +}; + +static const struct chacha20poly1305_testvec +chacha20poly1305_dec_vectors[] __initconst = { + { dec_input001, dec_output001, dec_assoc001, dec_nonce001, dec_key001, + sizeof(dec_input001), sizeof(dec_assoc001), sizeof(dec_nonce001) }, + { dec_input002, dec_output002, dec_assoc002, dec_nonce002, dec_key002, + sizeof(dec_input002), sizeof(dec_assoc002), sizeof(dec_nonce002) }, + { dec_input003, dec_output003, dec_assoc003, dec_nonce003, dec_key003, + sizeof(dec_input003), sizeof(dec_assoc003), sizeof(dec_nonce003) }, + { dec_input004, dec_output004, dec_assoc004, dec_nonce004, dec_key004, + sizeof(dec_input004), sizeof(dec_assoc004), sizeof(dec_nonce004) }, + { dec_input005, dec_output005, dec_assoc005, dec_nonce005, dec_key005, + sizeof(dec_input005), sizeof(dec_assoc005), sizeof(dec_nonce005) }, + { dec_input006, dec_output006, dec_assoc006, dec_nonce006, dec_key006, + sizeof(dec_input006), sizeof(dec_assoc006), sizeof(dec_nonce006) }, + { dec_input007, dec_output007, dec_assoc007, dec_nonce007, dec_key007, + sizeof(dec_input007), sizeof(dec_assoc007), sizeof(dec_nonce007) }, + { dec_input008, dec_output008, dec_assoc008, dec_nonce008, dec_key008, + sizeof(dec_input008), sizeof(dec_assoc008), sizeof(dec_nonce008) }, + { dec_input009, dec_output009, dec_assoc009, dec_nonce009, dec_key009, + sizeof(dec_input009), sizeof(dec_assoc009), sizeof(dec_nonce009) }, + { dec_input010, dec_output010, dec_assoc010, dec_nonce010, dec_key010, + sizeof(dec_input010), sizeof(dec_assoc010), sizeof(dec_nonce010) }, + { dec_input011, dec_output011, dec_assoc011, dec_nonce011, dec_key011, + sizeof(dec_input011), sizeof(dec_assoc011), sizeof(dec_nonce011) }, + { dec_input012, dec_output012, dec_assoc012, dec_nonce012, dec_key012, + sizeof(dec_input012), sizeof(dec_assoc012), sizeof(dec_nonce012) }, + { dec_input013, dec_output013, dec_assoc013, dec_nonce013, dec_key013, + sizeof(dec_input013), sizeof(dec_assoc013), sizeof(dec_nonce013), + true } +}; + +static const u8 xenc_input001[] __initconst = { + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, + 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20, + 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66, + 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20, + 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20, + 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d, + 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e, + 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65, + 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, + 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, + 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f, + 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65, + 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61, + 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e, + 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69, + 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72, + 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20, + 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61, + 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20, + 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65, + 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20, + 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20, + 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b, + 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67, + 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80, + 0x9d +}; +static const u8 xenc_output001[] __initconst = { + 0x1a, 0x6e, 0x3a, 0xd9, 0xfd, 0x41, 0x3f, 0x77, + 0x54, 0x72, 0x0a, 0x70, 0x9a, 0xa0, 0x29, 0x92, + 0x2e, 0xed, 0x93, 0xcf, 0x0f, 0x71, 0x88, 0x18, + 0x7a, 0x9d, 0x2d, 0x24, 0xe0, 0xf5, 0xea, 0x3d, + 0x55, 0x64, 0xd7, 0xad, 0x2a, 0x1a, 0x1f, 0x7e, + 0x86, 0x6d, 0xb0, 0xce, 0x80, 0x41, 0x72, 0x86, + 0x26, 0xee, 0x84, 0xd7, 0xef, 0x82, 0x9e, 0xe2, + 0x60, 0x9d, 0x5a, 0xfc, 0xf0, 0xe4, 0x19, 0x85, + 0xea, 0x09, 0xc6, 0xfb, 0xb3, 0xa9, 0x50, 0x09, + 0xec, 0x5e, 0x11, 0x90, 0xa1, 0xc5, 0x4e, 0x49, + 0xef, 0x50, 0xd8, 0x8f, 0xe0, 0x78, 0xd7, 0xfd, + 0xb9, 0x3b, 0xc9, 0xf2, 0x91, 0xc8, 0x25, 0xc8, + 0xa7, 0x63, 0x60, 0xce, 0x10, 0xcd, 0xc6, 0x7f, + 0xf8, 0x16, 0xf8, 0xe1, 0x0a, 0xd9, 0xde, 0x79, + 0x50, 0x33, 0xf2, 0x16, 0x0f, 0x17, 0xba, 0xb8, + 0x5d, 0xd8, 0xdf, 0x4e, 0x51, 0xa8, 0x39, 0xd0, + 0x85, 0xca, 0x46, 0x6a, 0x10, 0xa7, 0xa3, 0x88, + 0xef, 0x79, 0xb9, 0xf8, 0x24, 0xf3, 0xe0, 0x71, + 0x7b, 0x76, 0x28, 0x46, 0x3a, 0x3a, 0x1b, 0x91, + 0xb6, 0xd4, 0x3e, 0x23, 0xe5, 0x44, 0x15, 0xbf, + 0x60, 0x43, 0x9d, 0xa4, 0xbb, 0xd5, 0x5f, 0x89, + 0xeb, 0xef, 0x8e, 0xfd, 0xdd, 0xb4, 0x0d, 0x46, + 0xf0, 0x69, 0x23, 0x63, 0xae, 0x94, 0xf5, 0x5e, + 0xa5, 0xad, 0x13, 0x1c, 0x41, 0x76, 0xe6, 0x90, + 0xd6, 0x6d, 0xa2, 0x8f, 0x97, 0x4c, 0xa8, 0x0b, + 0xcf, 0x8d, 0x43, 0x2b, 0x9c, 0x9b, 0xc5, 0x58, + 0xa5, 0xb6, 0x95, 0x9a, 0xbf, 0x81, 0xc6, 0x54, + 0xc9, 0x66, 0x0c, 0xe5, 0x4f, 0x6a, 0x53, 0xa1, + 0xe5, 0x0c, 0xba, 0x31, 0xde, 0x34, 0x64, 0x73, + 0x8a, 0x3b, 0xbd, 0x92, 0x01, 0xdb, 0x71, 0x69, + 0xf3, 0x58, 0x99, 0xbc, 0xd1, 0xcb, 0x4a, 0x05, + 0xe2, 0x58, 0x9c, 0x25, 0x17, 0xcd, 0xdc, 0x83, + 0xb7, 0xff, 0xfb, 0x09, 0x61, 0xad, 0xbf, 0x13, + 0x5b, 0x5e, 0xed, 0x46, 0x82, 0x6f, 0x22, 0xd8, + 0x93, 0xa6, 0x85, 0x5b, 0x40, 0x39, 0x5c, 0xc5, + 0x9c +}; +static const u8 xenc_assoc001[] __initconst = { + 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x4e, 0x91 +}; +static const u8 xenc_nonce001[] __initconst = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 +}; +static const u8 xenc_key001[] __initconst = { + 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a, + 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0, + 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09, + 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0 +}; + +static const struct chacha20poly1305_testvec +xchacha20poly1305_enc_vectors[] __initconst = { + { xenc_input001, xenc_output001, xenc_assoc001, xenc_nonce001, xenc_key001, + sizeof(xenc_input001), sizeof(xenc_assoc001), sizeof(xenc_nonce001) } +}; + +static const u8 xdec_input001[] __initconst = { + 0x1a, 0x6e, 0x3a, 0xd9, 0xfd, 0x41, 0x3f, 0x77, + 0x54, 0x72, 0x0a, 0x70, 0x9a, 0xa0, 0x29, 0x92, + 0x2e, 0xed, 0x93, 0xcf, 0x0f, 0x71, 0x88, 0x18, + 0x7a, 0x9d, 0x2d, 0x24, 0xe0, 0xf5, 0xea, 0x3d, + 0x55, 0x64, 0xd7, 0xad, 0x2a, 0x1a, 0x1f, 0x7e, + 0x86, 0x6d, 0xb0, 0xce, 0x80, 0x41, 0x72, 0x86, + 0x26, 0xee, 0x84, 0xd7, 0xef, 0x82, 0x9e, 0xe2, + 0x60, 0x9d, 0x5a, 0xfc, 0xf0, 0xe4, 0x19, 0x85, + 0xea, 0x09, 0xc6, 0xfb, 0xb3, 0xa9, 0x50, 0x09, + 0xec, 0x5e, 0x11, 0x90, 0xa1, 0xc5, 0x4e, 0x49, + 0xef, 0x50, 0xd8, 0x8f, 0xe0, 0x78, 0xd7, 0xfd, + 0xb9, 0x3b, 0xc9, 0xf2, 0x91, 0xc8, 0x25, 0xc8, + 0xa7, 0x63, 0x60, 0xce, 0x10, 0xcd, 0xc6, 0x7f, + 0xf8, 0x16, 0xf8, 0xe1, 0x0a, 0xd9, 0xde, 0x79, + 0x50, 0x33, 0xf2, 0x16, 0x0f, 0x17, 0xba, 0xb8, + 0x5d, 0xd8, 0xdf, 0x4e, 0x51, 0xa8, 0x39, 0xd0, + 0x85, 0xca, 0x46, 0x6a, 0x10, 0xa7, 0xa3, 0x88, + 0xef, 0x79, 0xb9, 0xf8, 0x24, 0xf3, 0xe0, 0x71, + 0x7b, 0x76, 0x28, 0x46, 0x3a, 0x3a, 0x1b, 0x91, + 0xb6, 0xd4, 0x3e, 0x23, 0xe5, 0x44, 0x15, 0xbf, + 0x60, 0x43, 0x9d, 0xa4, 0xbb, 0xd5, 0x5f, 0x89, + 0xeb, 0xef, 0x8e, 0xfd, 0xdd, 0xb4, 0x0d, 0x46, + 0xf0, 0x69, 0x23, 0x63, 0xae, 0x94, 0xf5, 0x5e, + 0xa5, 0xad, 0x13, 0x1c, 0x41, 0x76, 0xe6, 0x90, + 0xd6, 0x6d, 0xa2, 0x8f, 0x97, 0x4c, 0xa8, 0x0b, + 0xcf, 0x8d, 0x43, 0x2b, 0x9c, 0x9b, 0xc5, 0x58, + 0xa5, 0xb6, 0x95, 0x9a, 0xbf, 0x81, 0xc6, 0x54, + 0xc9, 0x66, 0x0c, 0xe5, 0x4f, 0x6a, 0x53, 0xa1, + 0xe5, 0x0c, 0xba, 0x31, 0xde, 0x34, 0x64, 0x73, + 0x8a, 0x3b, 0xbd, 0x92, 0x01, 0xdb, 0x71, 0x69, + 0xf3, 0x58, 0x99, 0xbc, 0xd1, 0xcb, 0x4a, 0x05, + 0xe2, 0x58, 0x9c, 0x25, 0x17, 0xcd, 0xdc, 0x83, + 0xb7, 0xff, 0xfb, 0x09, 0x61, 0xad, 0xbf, 0x13, + 0x5b, 0x5e, 0xed, 0x46, 0x82, 0x6f, 0x22, 0xd8, + 0x93, 0xa6, 0x85, 0x5b, 0x40, 0x39, 0x5c, 0xc5, + 0x9c +}; +static const u8 xdec_output001[] __initconst = { + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, + 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20, + 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66, + 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20, + 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20, + 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d, + 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e, + 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65, + 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, + 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, + 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f, + 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65, + 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61, + 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e, + 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69, + 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72, + 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20, + 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61, + 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20, + 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65, + 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20, + 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20, + 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b, + 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67, + 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80, + 0x9d +}; +static const u8 xdec_assoc001[] __initconst = { + 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x4e, 0x91 +}; +static const u8 xdec_nonce001[] __initconst = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 +}; +static const u8 xdec_key001[] __initconst = { + 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a, + 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0, + 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09, + 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0 +}; + +static const struct chacha20poly1305_testvec +xchacha20poly1305_dec_vectors[] __initconst = { + { xdec_input001, xdec_output001, xdec_assoc001, xdec_nonce001, xdec_key001, + sizeof(xdec_input001), sizeof(xdec_assoc001), sizeof(xdec_nonce001) } +}; + +static void __init +chacha20poly1305_selftest_encrypt_bignonce(u8 *dst, const u8 *src, + const size_t src_len, const u8 *ad, + const size_t ad_len, + const u8 nonce[12], + const u8 key[CHACHA20POLY1305_KEY_SIZE]) +{ + simd_context_t simd_context; + struct poly1305_ctx poly1305_state; + struct chacha20_ctx chacha20_state; + union { + u8 block0[POLY1305_KEY_SIZE]; + __le64 lens[2]; + } b = {{ 0 }}; + + simd_get(&simd_context); + chacha20_init(&chacha20_state, key, 0); + chacha20_state.counter[1] = get_unaligned_le32(nonce + 0); + chacha20_state.counter[2] = get_unaligned_le32(nonce + 4); + chacha20_state.counter[3] = get_unaligned_le32(nonce + 8); + chacha20(&chacha20_state, b.block0, b.block0, sizeof(b.block0), + &simd_context); + poly1305_init(&poly1305_state, b.block0); + poly1305_update(&poly1305_state, ad, ad_len, &simd_context); + poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf, + &simd_context); + chacha20(&chacha20_state, dst, src, src_len, &simd_context); + poly1305_update(&poly1305_state, dst, src_len, &simd_context); + poly1305_update(&poly1305_state, pad0, (0x10 - src_len) & 0xf, + &simd_context); + b.lens[0] = cpu_to_le64(ad_len); + b.lens[1] = cpu_to_le64(src_len); + poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens), + &simd_context); + poly1305_final(&poly1305_state, dst + src_len, &simd_context); + simd_put(&simd_context); + memzero_explicit(&chacha20_state, sizeof(chacha20_state)); + memzero_explicit(&b, sizeof(b)); +} + +static void __init +chacha20poly1305_selftest_encrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, + const u8 *nonce, const size_t nonce_len, + const u8 key[CHACHA20POLY1305_KEY_SIZE]) +{ + if (nonce_len == 8) + chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, + get_unaligned_le64(nonce), key); + else if (nonce_len == 12) + chacha20poly1305_selftest_encrypt_bignonce(dst, src, src_len, + ad, ad_len, nonce, + key); + else + BUG(); +} + +static bool __init +decryption_success(bool func_ret, bool expect_failure, int memcmp_result) +{ + if (expect_failure) + return !func_ret; + return func_ret && !memcmp_result; +} + +static bool __init chacha20poly1305_selftest(void) +{ + enum { MAXIMUM_TEST_BUFFER_LEN = 1UL << 12 }; + size_t i, j, k, total_len; + u8 *computed_output = NULL, *input = NULL; + bool success = true, ret; + simd_context_t simd_context; + struct scatterlist sg_src[3]; + + computed_output = kmalloc(MAXIMUM_TEST_BUFFER_LEN, GFP_KERNEL); + input = kmalloc(MAXIMUM_TEST_BUFFER_LEN, GFP_KERNEL); + if (!computed_output || !input) { + pr_err("chacha20poly1305 self-test malloc: FAIL\n"); + success = false; + goto out; + } + + for (i = 0; i < ARRAY_SIZE(chacha20poly1305_enc_vectors); ++i) { + memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN); + chacha20poly1305_selftest_encrypt(computed_output, + chacha20poly1305_enc_vectors[i].input, + chacha20poly1305_enc_vectors[i].ilen, + chacha20poly1305_enc_vectors[i].assoc, + chacha20poly1305_enc_vectors[i].alen, + chacha20poly1305_enc_vectors[i].nonce, + chacha20poly1305_enc_vectors[i].nlen, + chacha20poly1305_enc_vectors[i].key); + if (memcmp(computed_output, + chacha20poly1305_enc_vectors[i].output, + chacha20poly1305_enc_vectors[i].ilen + + POLY1305_MAC_SIZE)) { + pr_err("chacha20poly1305 encryption self-test %zu: FAIL\n", + i + 1); + success = false; + } + } + simd_get(&simd_context); + for (i = 0; i < ARRAY_SIZE(chacha20poly1305_enc_vectors); ++i) { + if (chacha20poly1305_enc_vectors[i].nlen != 8) + continue; + memcpy(computed_output, chacha20poly1305_enc_vectors[i].input, + chacha20poly1305_enc_vectors[i].ilen); + sg_init_one(sg_src, computed_output, + chacha20poly1305_enc_vectors[i].ilen + + POLY1305_MAC_SIZE); + ret = chacha20poly1305_encrypt_sg_inplace(sg_src, + chacha20poly1305_enc_vectors[i].ilen, + chacha20poly1305_enc_vectors[i].assoc, + chacha20poly1305_enc_vectors[i].alen, + get_unaligned_le64(chacha20poly1305_enc_vectors[i].nonce), + chacha20poly1305_enc_vectors[i].key, + &simd_context); + if (!ret || memcmp(computed_output, + chacha20poly1305_enc_vectors[i].output, + chacha20poly1305_enc_vectors[i].ilen + + POLY1305_MAC_SIZE)) { + pr_err("chacha20poly1305 sg encryption self-test %zu: FAIL\n", + i + 1); + success = false; + } + } + simd_put(&simd_context); + for (i = 0; i < ARRAY_SIZE(chacha20poly1305_dec_vectors); ++i) { + memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN); + ret = chacha20poly1305_decrypt(computed_output, + chacha20poly1305_dec_vectors[i].input, + chacha20poly1305_dec_vectors[i].ilen, + chacha20poly1305_dec_vectors[i].assoc, + chacha20poly1305_dec_vectors[i].alen, + get_unaligned_le64(chacha20poly1305_dec_vectors[i].nonce), + chacha20poly1305_dec_vectors[i].key); + if (!decryption_success(ret, + chacha20poly1305_dec_vectors[i].failure, + memcmp(computed_output, + chacha20poly1305_dec_vectors[i].output, + chacha20poly1305_dec_vectors[i].ilen - + POLY1305_MAC_SIZE))) { + pr_err("chacha20poly1305 decryption self-test %zu: FAIL\n", + i + 1); + success = false; + } + } + simd_get(&simd_context); + for (i = 0; i < ARRAY_SIZE(chacha20poly1305_dec_vectors); ++i) { + memcpy(computed_output, chacha20poly1305_dec_vectors[i].input, + chacha20poly1305_dec_vectors[i].ilen); + sg_init_one(sg_src, computed_output, + chacha20poly1305_dec_vectors[i].ilen); + ret = chacha20poly1305_decrypt_sg_inplace(sg_src, + chacha20poly1305_dec_vectors[i].ilen, + chacha20poly1305_dec_vectors[i].assoc, + chacha20poly1305_dec_vectors[i].alen, + get_unaligned_le64(chacha20poly1305_dec_vectors[i].nonce), + chacha20poly1305_dec_vectors[i].key, &simd_context); + if (!decryption_success(ret, + chacha20poly1305_dec_vectors[i].failure, + memcmp(computed_output, chacha20poly1305_dec_vectors[i].output, + chacha20poly1305_dec_vectors[i].ilen - + POLY1305_MAC_SIZE))) { + pr_err("chacha20poly1305 sg decryption self-test %zu: FAIL\n", + i + 1); + success = false; + } + } + simd_put(&simd_context); + for (i = 0; i < ARRAY_SIZE(xchacha20poly1305_enc_vectors); ++i) { + memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN); + xchacha20poly1305_encrypt(computed_output, + xchacha20poly1305_enc_vectors[i].input, + xchacha20poly1305_enc_vectors[i].ilen, + xchacha20poly1305_enc_vectors[i].assoc, + xchacha20poly1305_enc_vectors[i].alen, + xchacha20poly1305_enc_vectors[i].nonce, + xchacha20poly1305_enc_vectors[i].key); + if (memcmp(computed_output, + xchacha20poly1305_enc_vectors[i].output, + xchacha20poly1305_enc_vectors[i].ilen + + POLY1305_MAC_SIZE)) { + pr_err("xchacha20poly1305 encryption self-test %zu: FAIL\n", + i + 1); + success = false; + } + } + for (i = 0; i < ARRAY_SIZE(xchacha20poly1305_dec_vectors); ++i) { + memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN); + ret = xchacha20poly1305_decrypt(computed_output, + xchacha20poly1305_dec_vectors[i].input, + xchacha20poly1305_dec_vectors[i].ilen, + xchacha20poly1305_dec_vectors[i].assoc, + xchacha20poly1305_dec_vectors[i].alen, + xchacha20poly1305_dec_vectors[i].nonce, + xchacha20poly1305_dec_vectors[i].key); + if (!decryption_success(ret, + xchacha20poly1305_dec_vectors[i].failure, + memcmp(computed_output, + xchacha20poly1305_dec_vectors[i].output, + xchacha20poly1305_dec_vectors[i].ilen - + POLY1305_MAC_SIZE))) { + pr_err("xchacha20poly1305 decryption self-test %zu: FAIL\n", + i + 1); + success = false; + } + } + + simd_get(&simd_context); + for (total_len = POLY1305_MAC_SIZE; IS_ENABLED(DEBUG_CHACHA20POLY1305_SLOW_CHUNK_TEST) + && total_len <= 1 << 10; ++total_len) { + for (i = 0; i <= total_len; ++i) { + for (j = i; j <= total_len; ++j) { + sg_init_table(sg_src, 3); + sg_set_buf(&sg_src[0], input, i); + sg_set_buf(&sg_src[1], input + i, j - i); + sg_set_buf(&sg_src[2], input + j, total_len - j); + memset(computed_output, 0, total_len); + memset(input, 0, total_len); + + if (!chacha20poly1305_encrypt_sg_inplace(sg_src, + total_len - POLY1305_MAC_SIZE, NULL, 0, + 0, enc_key001, &simd_context)) + goto chunkfail; + chacha20poly1305_encrypt(computed_output, + computed_output, + total_len - POLY1305_MAC_SIZE, NULL, 0, 0, + enc_key001); + if (memcmp(computed_output, input, total_len)) + goto chunkfail;; + if (!chacha20poly1305_decrypt(computed_output, + input, total_len, NULL, 0, 0, enc_key001)) + goto chunkfail; + for (k = 0; k < total_len - POLY1305_MAC_SIZE; ++k) { + if (computed_output[k]) + goto chunkfail; + } + if (!chacha20poly1305_decrypt_sg_inplace(sg_src, + total_len, NULL, 0, 0, enc_key001, + &simd_context)) + goto chunkfail; + for (k = 0; k < total_len - POLY1305_MAC_SIZE; ++k) { + if (input[k]) + goto chunkfail; + } + continue; + + chunkfail: + pr_err("chacha20poly1305 chunked self-test %zu/%zu/%zu: FAIL\n", + total_len, i, j); + success = false; + } + + } + } + simd_put(&simd_context); + +out: + kfree(computed_output); + kfree(input); + return success; +} diff --git a/net/wireguard/crypto/zinc/selftest/curve25519.c b/net/wireguard/crypto/zinc/selftest/curve25519.c new file mode 100644 index 000000000000..0e3e3af06ba4 --- /dev/null +++ b/net/wireguard/crypto/zinc/selftest/curve25519.c @@ -0,0 +1,1315 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +struct curve25519_test_vector { + u8 private[CURVE25519_KEY_SIZE]; + u8 public[CURVE25519_KEY_SIZE]; + u8 result[CURVE25519_KEY_SIZE]; + bool valid; +}; +static const struct curve25519_test_vector curve25519_test_vectors[] __initconst = { + { + .private = { 0x77, 0x07, 0x6d, 0x0a, 0x73, 0x18, 0xa5, 0x7d, + 0x3c, 0x16, 0xc1, 0x72, 0x51, 0xb2, 0x66, 0x45, + 0xdf, 0x4c, 0x2f, 0x87, 0xeb, 0xc0, 0x99, 0x2a, + 0xb1, 0x77, 0xfb, 0xa5, 0x1d, 0xb9, 0x2c, 0x2a }, + .public = { 0xde, 0x9e, 0xdb, 0x7d, 0x7b, 0x7d, 0xc1, 0xb4, + 0xd3, 0x5b, 0x61, 0xc2, 0xec, 0xe4, 0x35, 0x37, + 0x3f, 0x83, 0x43, 0xc8, 0x5b, 0x78, 0x67, 0x4d, + 0xad, 0xfc, 0x7e, 0x14, 0x6f, 0x88, 0x2b, 0x4f }, + .result = { 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1, + 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25, + 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33, + 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 }, + .valid = true + }, + { + .private = { 0x5d, 0xab, 0x08, 0x7e, 0x62, 0x4a, 0x8a, 0x4b, + 0x79, 0xe1, 0x7f, 0x8b, 0x83, 0x80, 0x0e, 0xe6, + 0x6f, 0x3b, 0xb1, 0x29, 0x26, 0x18, 0xb6, 0xfd, + 0x1c, 0x2f, 0x8b, 0x27, 0xff, 0x88, 0xe0, 0xeb }, + .public = { 0x85, 0x20, 0xf0, 0x09, 0x89, 0x30, 0xa7, 0x54, + 0x74, 0x8b, 0x7d, 0xdc, 0xb4, 0x3e, 0xf7, 0x5a, + 0x0d, 0xbf, 0x3a, 0x0d, 0x26, 0x38, 0x1a, 0xf4, + 0xeb, 0xa4, 0xa9, 0x8e, 0xaa, 0x9b, 0x4e, 0x6a }, + .result = { 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1, + 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25, + 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33, + 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 }, + .valid = true + }, + { + .private = { 1 }, + .public = { 0x25, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .result = { 0x3c, 0x77, 0x77, 0xca, 0xf9, 0x97, 0xb2, 0x64, + 0x41, 0x60, 0x77, 0x66, 0x5b, 0x4e, 0x22, 0x9d, + 0x0b, 0x95, 0x48, 0xdc, 0x0c, 0xd8, 0x19, 0x98, + 0xdd, 0xcd, 0xc5, 0xc8, 0x53, 0x3c, 0x79, 0x7f }, + .valid = true + }, + { + .private = { 1 }, + .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0xb3, 0x2d, 0x13, 0x62, 0xc2, 0x48, 0xd6, 0x2f, + 0xe6, 0x26, 0x19, 0xcf, 0xf0, 0x4d, 0xd4, 0x3d, + 0xb7, 0x3f, 0xfc, 0x1b, 0x63, 0x08, 0xed, 0xe3, + 0x0b, 0x78, 0xd8, 0x73, 0x80, 0xf1, 0xe8, 0x34 }, + .valid = true + }, + { + .private = { 0xa5, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d, + 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd, + 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18, + 0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0xc4 }, + .public = { 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb, + 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c, + 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b, + 0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c }, + .result = { 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90, + 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f, + 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7, + 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 }, + .valid = true + }, + { + .private = { 1, 2, 3, 4 }, + .public = { 0 }, + .result = { 0 }, + .valid = false + }, + { + .private = { 2, 4, 6, 8 }, + .public = { 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae, + 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a, + 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd, + 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8 }, + .result = { 0 }, + .valid = false + }, + { + .private = { 0xff, 0xff, 0xff, 0xff, 0x0a, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0x0a, 0x00, 0xfb, 0x9f }, + .result = { 0x77, 0x52, 0xb6, 0x18, 0xc1, 0x2d, 0x48, 0xd2, + 0xc6, 0x93, 0x46, 0x83, 0x81, 0x7c, 0xc6, 0x57, + 0xf3, 0x31, 0x03, 0x19, 0x49, 0x48, 0x20, 0x05, + 0x42, 0x2b, 0x4e, 0xae, 0x8d, 0x1d, 0x43, 0x23 }, + .valid = true + }, + { + .private = { 0x8e, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .public = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x06 }, + .result = { 0x5a, 0xdf, 0xaa, 0x25, 0x86, 0x8e, 0x32, 0x3d, + 0xae, 0x49, 0x62, 0xc1, 0x01, 0x5c, 0xb3, 0x12, + 0xe1, 0xc5, 0xc7, 0x9e, 0x95, 0x3f, 0x03, 0x99, + 0xb0, 0xba, 0x16, 0x22, 0xf3, 0xb6, 0xf7, 0x0c }, + .valid = true + }, + /* wycheproof - normal case */ + { + .private = { 0x48, 0x52, 0x83, 0x4d, 0x9d, 0x6b, 0x77, 0xda, + 0xde, 0xab, 0xaa, 0xf2, 0xe1, 0x1d, 0xca, 0x66, + 0xd1, 0x9f, 0xe7, 0x49, 0x93, 0xa7, 0xbe, 0xc3, + 0x6c, 0x6e, 0x16, 0xa0, 0x98, 0x3f, 0xea, 0xba }, + .public = { 0x9c, 0x64, 0x7d, 0x9a, 0xe5, 0x89, 0xb9, 0xf5, + 0x8f, 0xdc, 0x3c, 0xa4, 0x94, 0x7e, 0xfb, 0xc9, + 0x15, 0xc4, 0xb2, 0xe0, 0x8e, 0x74, 0x4a, 0x0e, + 0xdf, 0x46, 0x9d, 0xac, 0x59, 0xc8, 0xf8, 0x5a }, + .result = { 0x87, 0xb7, 0xf2, 0x12, 0xb6, 0x27, 0xf7, 0xa5, + 0x4c, 0xa5, 0xe0, 0xbc, 0xda, 0xdd, 0xd5, 0x38, + 0x9d, 0x9d, 0xe6, 0x15, 0x6c, 0xdb, 0xcf, 0x8e, + 0xbe, 0x14, 0xff, 0xbc, 0xfb, 0x43, 0x65, 0x51 }, + .valid = true + }, + /* wycheproof - public key on twist */ + { + .private = { 0x58, 0x8c, 0x06, 0x1a, 0x50, 0x80, 0x4a, 0xc4, + 0x88, 0xad, 0x77, 0x4a, 0xc7, 0x16, 0xc3, 0xf5, + 0xba, 0x71, 0x4b, 0x27, 0x12, 0xe0, 0x48, 0x49, + 0x13, 0x79, 0xa5, 0x00, 0x21, 0x19, 0x98, 0xa8 }, + .public = { 0x63, 0xaa, 0x40, 0xc6, 0xe3, 0x83, 0x46, 0xc5, + 0xca, 0xf2, 0x3a, 0x6d, 0xf0, 0xa5, 0xe6, 0xc8, + 0x08, 0x89, 0xa0, 0x86, 0x47, 0xe5, 0x51, 0xb3, + 0x56, 0x34, 0x49, 0xbe, 0xfc, 0xfc, 0x97, 0x33 }, + .result = { 0xb1, 0xa7, 0x07, 0x51, 0x94, 0x95, 0xff, 0xff, + 0xb2, 0x98, 0xff, 0x94, 0x17, 0x16, 0xb0, 0x6d, + 0xfa, 0xb8, 0x7c, 0xf8, 0xd9, 0x11, 0x23, 0xfe, + 0x2b, 0xe9, 0xa2, 0x33, 0xdd, 0xa2, 0x22, 0x12 }, + .valid = true + }, + /* wycheproof - public key on twist */ + { + .private = { 0xb0, 0x5b, 0xfd, 0x32, 0xe5, 0x53, 0x25, 0xd9, + 0xfd, 0x64, 0x8c, 0xb3, 0x02, 0x84, 0x80, 0x39, + 0x00, 0x0b, 0x39, 0x0e, 0x44, 0xd5, 0x21, 0xe5, + 0x8a, 0xab, 0x3b, 0x29, 0xa6, 0x96, 0x0b, 0xa8 }, + .public = { 0x0f, 0x83, 0xc3, 0x6f, 0xde, 0xd9, 0xd3, 0x2f, + 0xad, 0xf4, 0xef, 0xa3, 0xae, 0x93, 0xa9, 0x0b, + 0xb5, 0xcf, 0xa6, 0x68, 0x93, 0xbc, 0x41, 0x2c, + 0x43, 0xfa, 0x72, 0x87, 0xdb, 0xb9, 0x97, 0x79 }, + .result = { 0x67, 0xdd, 0x4a, 0x6e, 0x16, 0x55, 0x33, 0x53, + 0x4c, 0x0e, 0x3f, 0x17, 0x2e, 0x4a, 0xb8, 0x57, + 0x6b, 0xca, 0x92, 0x3a, 0x5f, 0x07, 0xb2, 0xc0, + 0x69, 0xb4, 0xc3, 0x10, 0xff, 0x2e, 0x93, 0x5b }, + .valid = true + }, + /* wycheproof - public key on twist */ + { + .private = { 0x70, 0xe3, 0x4b, 0xcb, 0xe1, 0xf4, 0x7f, 0xbc, + 0x0f, 0xdd, 0xfd, 0x7c, 0x1e, 0x1a, 0xa5, 0x3d, + 0x57, 0xbf, 0xe0, 0xf6, 0x6d, 0x24, 0x30, 0x67, + 0xb4, 0x24, 0xbb, 0x62, 0x10, 0xbe, 0xd1, 0x9c }, + .public = { 0x0b, 0x82, 0x11, 0xa2, 0xb6, 0x04, 0x90, 0x97, + 0xf6, 0x87, 0x1c, 0x6c, 0x05, 0x2d, 0x3c, 0x5f, + 0xc1, 0xba, 0x17, 0xda, 0x9e, 0x32, 0xae, 0x45, + 0x84, 0x03, 0xb0, 0x5b, 0xb2, 0x83, 0x09, 0x2a }, + .result = { 0x4a, 0x06, 0x38, 0xcf, 0xaa, 0x9e, 0xf1, 0x93, + 0x3b, 0x47, 0xf8, 0x93, 0x92, 0x96, 0xa6, 0xb2, + 0x5b, 0xe5, 0x41, 0xef, 0x7f, 0x70, 0xe8, 0x44, + 0xc0, 0xbc, 0xc0, 0x0b, 0x13, 0x4d, 0xe6, 0x4a }, + .valid = true + }, + /* wycheproof - public key on twist */ + { + .private = { 0x68, 0xc1, 0xf3, 0xa6, 0x53, 0xa4, 0xcd, 0xb1, + 0xd3, 0x7b, 0xba, 0x94, 0x73, 0x8f, 0x8b, 0x95, + 0x7a, 0x57, 0xbe, 0xb2, 0x4d, 0x64, 0x6e, 0x99, + 0x4d, 0xc2, 0x9a, 0x27, 0x6a, 0xad, 0x45, 0x8d }, + .public = { 0x34, 0x3a, 0xc2, 0x0a, 0x3b, 0x9c, 0x6a, 0x27, + 0xb1, 0x00, 0x81, 0x76, 0x50, 0x9a, 0xd3, 0x07, + 0x35, 0x85, 0x6e, 0xc1, 0xc8, 0xd8, 0xfc, 0xae, + 0x13, 0x91, 0x2d, 0x08, 0xd1, 0x52, 0xf4, 0x6c }, + .result = { 0x39, 0x94, 0x91, 0xfc, 0xe8, 0xdf, 0xab, 0x73, + 0xb4, 0xf9, 0xf6, 0x11, 0xde, 0x8e, 0xa0, 0xb2, + 0x7b, 0x28, 0xf8, 0x59, 0x94, 0x25, 0x0b, 0x0f, + 0x47, 0x5d, 0x58, 0x5d, 0x04, 0x2a, 0xc2, 0x07 }, + .valid = true + }, + /* wycheproof - public key on twist */ + { + .private = { 0xd8, 0x77, 0xb2, 0x6d, 0x06, 0xdf, 0xf9, 0xd9, + 0xf7, 0xfd, 0x4c, 0x5b, 0x37, 0x69, 0xf8, 0xcd, + 0xd5, 0xb3, 0x05, 0x16, 0xa5, 0xab, 0x80, 0x6b, + 0xe3, 0x24, 0xff, 0x3e, 0xb6, 0x9e, 0xa0, 0xb2 }, + .public = { 0xfa, 0x69, 0x5f, 0xc7, 0xbe, 0x8d, 0x1b, 0xe5, + 0xbf, 0x70, 0x48, 0x98, 0xf3, 0x88, 0xc4, 0x52, + 0xba, 0xfd, 0xd3, 0xb8, 0xea, 0xe8, 0x05, 0xf8, + 0x68, 0x1a, 0x8d, 0x15, 0xc2, 0xd4, 0xe1, 0x42 }, + .result = { 0x2c, 0x4f, 0xe1, 0x1d, 0x49, 0x0a, 0x53, 0x86, + 0x17, 0x76, 0xb1, 0x3b, 0x43, 0x54, 0xab, 0xd4, + 0xcf, 0x5a, 0x97, 0x69, 0x9d, 0xb6, 0xe6, 0xc6, + 0x8c, 0x16, 0x26, 0xd0, 0x76, 0x62, 0xf7, 0x58 }, + .valid = true + }, + /* wycheproof - public key = 0 */ + { + .private = { 0x20, 0x74, 0x94, 0x03, 0x8f, 0x2b, 0xb8, 0x11, + 0xd4, 0x78, 0x05, 0xbc, 0xdf, 0x04, 0xa2, 0xac, + 0x58, 0x5a, 0xda, 0x7f, 0x2f, 0x23, 0x38, 0x9b, + 0xfd, 0x46, 0x58, 0xf9, 0xdd, 0xd4, 0xde, 0xbc }, + .public = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key = 1 */ + { + .private = { 0x20, 0x2e, 0x89, 0x72, 0xb6, 0x1c, 0x7e, 0x61, + 0x93, 0x0e, 0xb9, 0x45, 0x0b, 0x50, 0x70, 0xea, + 0xe1, 0xc6, 0x70, 0x47, 0x56, 0x85, 0x54, 0x1f, + 0x04, 0x76, 0x21, 0x7e, 0x48, 0x18, 0xcf, 0xab }, + .public = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - edge case on twist */ + { + .private = { 0x38, 0xdd, 0xe9, 0xf3, 0xe7, 0xb7, 0x99, 0x04, + 0x5f, 0x9a, 0xc3, 0x79, 0x3d, 0x4a, 0x92, 0x77, + 0xda, 0xde, 0xad, 0xc4, 0x1b, 0xec, 0x02, 0x90, + 0xf8, 0x1f, 0x74, 0x4f, 0x73, 0x77, 0x5f, 0x84 }, + .public = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .result = { 0x9a, 0x2c, 0xfe, 0x84, 0xff, 0x9c, 0x4a, 0x97, + 0x39, 0x62, 0x5c, 0xae, 0x4a, 0x3b, 0x82, 0xa9, + 0x06, 0x87, 0x7a, 0x44, 0x19, 0x46, 0xf8, 0xd7, + 0xb3, 0xd7, 0x95, 0xfe, 0x8f, 0x5d, 0x16, 0x39 }, + .valid = true + }, + /* wycheproof - edge case on twist */ + { + .private = { 0x98, 0x57, 0xa9, 0x14, 0xe3, 0xc2, 0x90, 0x36, + 0xfd, 0x9a, 0x44, 0x2b, 0xa5, 0x26, 0xb5, 0xcd, + 0xcd, 0xf2, 0x82, 0x16, 0x15, 0x3e, 0x63, 0x6c, + 0x10, 0x67, 0x7a, 0xca, 0xb6, 0xbd, 0x6a, 0xa5 }, + .public = { 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .result = { 0x4d, 0xa4, 0xe0, 0xaa, 0x07, 0x2c, 0x23, 0x2e, + 0xe2, 0xf0, 0xfa, 0x4e, 0x51, 0x9a, 0xe5, 0x0b, + 0x52, 0xc1, 0xed, 0xd0, 0x8a, 0x53, 0x4d, 0x4e, + 0xf3, 0x46, 0xc2, 0xe1, 0x06, 0xd2, 0x1d, 0x60 }, + .valid = true + }, + /* wycheproof - edge case on twist */ + { + .private = { 0x48, 0xe2, 0x13, 0x0d, 0x72, 0x33, 0x05, 0xed, + 0x05, 0xe6, 0xe5, 0x89, 0x4d, 0x39, 0x8a, 0x5e, + 0x33, 0x36, 0x7a, 0x8c, 0x6a, 0xac, 0x8f, 0xcd, + 0xf0, 0xa8, 0x8e, 0x4b, 0x42, 0x82, 0x0d, 0xb7 }, + .public = { 0xff, 0xff, 0xff, 0x03, 0x00, 0x00, 0xf8, 0xff, + 0xff, 0x1f, 0x00, 0x00, 0xc0, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, 0x07, 0x00, + 0x00, 0xf0, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00 }, + .result = { 0x9e, 0xd1, 0x0c, 0x53, 0x74, 0x7f, 0x64, 0x7f, + 0x82, 0xf4, 0x51, 0x25, 0xd3, 0xde, 0x15, 0xa1, + 0xe6, 0xb8, 0x24, 0x49, 0x6a, 0xb4, 0x04, 0x10, + 0xff, 0xcc, 0x3c, 0xfe, 0x95, 0x76, 0x0f, 0x3b }, + .valid = true + }, + /* wycheproof - edge case on twist */ + { + .private = { 0x28, 0xf4, 0x10, 0x11, 0x69, 0x18, 0x51, 0xb3, + 0xa6, 0x2b, 0x64, 0x15, 0x53, 0xb3, 0x0d, 0x0d, + 0xfd, 0xdc, 0xb8, 0xff, 0xfc, 0xf5, 0x37, 0x00, + 0xa7, 0xbe, 0x2f, 0x6a, 0x87, 0x2e, 0x9f, 0xb0 }, + .public = { 0x00, 0x00, 0x00, 0xfc, 0xff, 0xff, 0x07, 0x00, + 0x00, 0xe0, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0xf8, 0xff, + 0xff, 0x0f, 0x00, 0x00, 0xc0, 0xff, 0xff, 0x7f }, + .result = { 0xcf, 0x72, 0xb4, 0xaa, 0x6a, 0xa1, 0xc9, 0xf8, + 0x94, 0xf4, 0x16, 0x5b, 0x86, 0x10, 0x9a, 0xa4, + 0x68, 0x51, 0x76, 0x48, 0xe1, 0xf0, 0xcc, 0x70, + 0xe1, 0xab, 0x08, 0x46, 0x01, 0x76, 0x50, 0x6b }, + .valid = true + }, + /* wycheproof - edge case on twist */ + { + .private = { 0x18, 0xa9, 0x3b, 0x64, 0x99, 0xb9, 0xf6, 0xb3, + 0x22, 0x5c, 0xa0, 0x2f, 0xef, 0x41, 0x0e, 0x0a, + 0xde, 0xc2, 0x35, 0x32, 0x32, 0x1d, 0x2d, 0x8e, + 0xf1, 0xa6, 0xd6, 0x02, 0xa8, 0xc6, 0x5b, 0x83 }, + .public = { 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x7f }, + .result = { 0x5d, 0x50, 0xb6, 0x28, 0x36, 0xbb, 0x69, 0x57, + 0x94, 0x10, 0x38, 0x6c, 0xf7, 0xbb, 0x81, 0x1c, + 0x14, 0xbf, 0x85, 0xb1, 0xc7, 0xb1, 0x7e, 0x59, + 0x24, 0xc7, 0xff, 0xea, 0x91, 0xef, 0x9e, 0x12 }, + .valid = true + }, + /* wycheproof - edge case on twist */ + { + .private = { 0xc0, 0x1d, 0x13, 0x05, 0xa1, 0x33, 0x8a, 0x1f, + 0xca, 0xc2, 0xba, 0x7e, 0x2e, 0x03, 0x2b, 0x42, + 0x7e, 0x0b, 0x04, 0x90, 0x31, 0x65, 0xac, 0xa9, + 0x57, 0xd8, 0xd0, 0x55, 0x3d, 0x87, 0x17, 0xb0 }, + .public = { 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .result = { 0x19, 0x23, 0x0e, 0xb1, 0x48, 0xd5, 0xd6, 0x7c, + 0x3c, 0x22, 0xab, 0x1d, 0xae, 0xff, 0x80, 0xa5, + 0x7e, 0xae, 0x42, 0x65, 0xce, 0x28, 0x72, 0x65, + 0x7b, 0x2c, 0x80, 0x99, 0xfc, 0x69, 0x8e, 0x50 }, + .valid = true + }, + /* wycheproof - edge case for public key */ + { + .private = { 0x38, 0x6f, 0x7f, 0x16, 0xc5, 0x07, 0x31, 0xd6, + 0x4f, 0x82, 0xe6, 0xa1, 0x70, 0xb1, 0x42, 0xa4, + 0xe3, 0x4f, 0x31, 0xfd, 0x77, 0x68, 0xfc, 0xb8, + 0x90, 0x29, 0x25, 0xe7, 0xd1, 0xe2, 0x1a, 0xbe }, + .public = { 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .result = { 0x0f, 0xca, 0xb5, 0xd8, 0x42, 0xa0, 0x78, 0xd7, + 0xa7, 0x1f, 0xc5, 0x9b, 0x57, 0xbf, 0xb4, 0xca, + 0x0b, 0xe6, 0x87, 0x3b, 0x49, 0xdc, 0xdb, 0x9f, + 0x44, 0xe1, 0x4a, 0xe8, 0xfb, 0xdf, 0xa5, 0x42 }, + .valid = true + }, + /* wycheproof - edge case for public key */ + { + .private = { 0xe0, 0x23, 0xa2, 0x89, 0xbd, 0x5e, 0x90, 0xfa, + 0x28, 0x04, 0xdd, 0xc0, 0x19, 0xa0, 0x5e, 0xf3, + 0xe7, 0x9d, 0x43, 0x4b, 0xb6, 0xea, 0x2f, 0x52, + 0x2e, 0xcb, 0x64, 0x3a, 0x75, 0x29, 0x6e, 0x95 }, + .public = { 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }, + .result = { 0x54, 0xce, 0x8f, 0x22, 0x75, 0xc0, 0x77, 0xe3, + 0xb1, 0x30, 0x6a, 0x39, 0x39, 0xc5, 0xe0, 0x3e, + 0xef, 0x6b, 0xbb, 0x88, 0x06, 0x05, 0x44, 0x75, + 0x8d, 0x9f, 0xef, 0x59, 0xb0, 0xbc, 0x3e, 0x4f }, + .valid = true + }, + /* wycheproof - edge case for public key */ + { + .private = { 0x68, 0xf0, 0x10, 0xd6, 0x2e, 0xe8, 0xd9, 0x26, + 0x05, 0x3a, 0x36, 0x1c, 0x3a, 0x75, 0xc6, 0xea, + 0x4e, 0xbd, 0xc8, 0x60, 0x6a, 0xb2, 0x85, 0x00, + 0x3a, 0x6f, 0x8f, 0x40, 0x76, 0xb0, 0x1e, 0x83 }, + .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03 }, + .result = { 0xf1, 0x36, 0x77, 0x5c, 0x5b, 0xeb, 0x0a, 0xf8, + 0x11, 0x0a, 0xf1, 0x0b, 0x20, 0x37, 0x23, 0x32, + 0x04, 0x3c, 0xab, 0x75, 0x24, 0x19, 0x67, 0x87, + 0x75, 0xa2, 0x23, 0xdf, 0x57, 0xc9, 0xd3, 0x0d }, + .valid = true + }, + /* wycheproof - edge case for public key */ + { + .private = { 0x58, 0xeb, 0xcb, 0x35, 0xb0, 0xf8, 0x84, 0x5c, + 0xaf, 0x1e, 0xc6, 0x30, 0xf9, 0x65, 0x76, 0xb6, + 0x2c, 0x4b, 0x7b, 0x6c, 0x36, 0xb2, 0x9d, 0xeb, + 0x2c, 0xb0, 0x08, 0x46, 0x51, 0x75, 0x5c, 0x96 }, + .public = { 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfb, 0xff, + 0xff, 0xdf, 0xff, 0xff, 0xdf, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf7, 0xff, + 0xff, 0xf7, 0xff, 0xff, 0xbf, 0xff, 0xff, 0x3f }, + .result = { 0xbf, 0x9a, 0xff, 0xd0, 0x6b, 0x84, 0x40, 0x85, + 0x58, 0x64, 0x60, 0x96, 0x2e, 0xf2, 0x14, 0x6f, + 0xf3, 0xd4, 0x53, 0x3d, 0x94, 0x44, 0xaa, 0xb0, + 0x06, 0xeb, 0x88, 0xcc, 0x30, 0x54, 0x40, 0x7d }, + .valid = true + }, + /* wycheproof - edge case for public key */ + { + .private = { 0x18, 0x8c, 0x4b, 0xc5, 0xb9, 0xc4, 0x4b, 0x38, + 0xbb, 0x65, 0x8b, 0x9b, 0x2a, 0xe8, 0x2d, 0x5b, + 0x01, 0x01, 0x5e, 0x09, 0x31, 0x84, 0xb1, 0x7c, + 0xb7, 0x86, 0x35, 0x03, 0xa7, 0x83, 0xe1, 0xbb }, + .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, + .result = { 0xd4, 0x80, 0xde, 0x04, 0xf6, 0x99, 0xcb, 0x3b, + 0xe0, 0x68, 0x4a, 0x9c, 0xc2, 0xe3, 0x12, 0x81, + 0xea, 0x0b, 0xc5, 0xa9, 0xdc, 0xc1, 0x57, 0xd3, + 0xd2, 0x01, 0x58, 0xd4, 0x6c, 0xa5, 0x24, 0x6d }, + .valid = true + }, + /* wycheproof - edge case for public key */ + { + .private = { 0xe0, 0x6c, 0x11, 0xbb, 0x2e, 0x13, 0xce, 0x3d, + 0xc7, 0x67, 0x3f, 0x67, 0xf5, 0x48, 0x22, 0x42, + 0x90, 0x94, 0x23, 0xa9, 0xae, 0x95, 0xee, 0x98, + 0x6a, 0x98, 0x8d, 0x98, 0xfa, 0xee, 0x23, 0xa2 }, + .public = { 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f }, + .result = { 0x4c, 0x44, 0x01, 0xcc, 0xe6, 0xb5, 0x1e, 0x4c, + 0xb1, 0x8f, 0x27, 0x90, 0x24, 0x6c, 0x9b, 0xf9, + 0x14, 0xdb, 0x66, 0x77, 0x50, 0xa1, 0xcb, 0x89, + 0x06, 0x90, 0x92, 0xaf, 0x07, 0x29, 0x22, 0x76 }, + .valid = true + }, + /* wycheproof - edge case for public key */ + { + .private = { 0xc0, 0x65, 0x8c, 0x46, 0xdd, 0xe1, 0x81, 0x29, + 0x29, 0x38, 0x77, 0x53, 0x5b, 0x11, 0x62, 0xb6, + 0xf9, 0xf5, 0x41, 0x4a, 0x23, 0xcf, 0x4d, 0x2c, + 0xbc, 0x14, 0x0a, 0x4d, 0x99, 0xda, 0x2b, 0x8f }, + .public = { 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .result = { 0x57, 0x8b, 0xa8, 0xcc, 0x2d, 0xbd, 0xc5, 0x75, + 0xaf, 0xcf, 0x9d, 0xf2, 0xb3, 0xee, 0x61, 0x89, + 0xf5, 0x33, 0x7d, 0x68, 0x54, 0xc7, 0x9b, 0x4c, + 0xe1, 0x65, 0xea, 0x12, 0x29, 0x3b, 0x3a, 0x0f }, + .valid = true + }, + /* wycheproof - public key with low order */ + { + .private = { 0x10, 0x25, 0x5c, 0x92, 0x30, 0xa9, 0x7a, 0x30, + 0xa4, 0x58, 0xca, 0x28, 0x4a, 0x62, 0x96, 0x69, + 0x29, 0x3a, 0x31, 0x89, 0x0c, 0xda, 0x9d, 0x14, + 0x7f, 0xeb, 0xc7, 0xd1, 0xe2, 0x2d, 0x6b, 0xb1 }, + .public = { 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae, + 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a, + 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd, + 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x00 }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key with low order */ + { + .private = { 0x78, 0xf1, 0xe8, 0xed, 0xf1, 0x44, 0x81, 0xb3, + 0x89, 0x44, 0x8d, 0xac, 0x8f, 0x59, 0xc7, 0x0b, + 0x03, 0x8e, 0x7c, 0xf9, 0x2e, 0xf2, 0xc7, 0xef, + 0xf5, 0x7a, 0x72, 0x46, 0x6e, 0x11, 0x52, 0x96 }, + .public = { 0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24, + 0xb1, 0xd0, 0xb1, 0x55, 0x9c, 0x83, 0xef, 0x5b, + 0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c, 0x8e, 0x86, + 0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0x57 }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key with low order */ + { + .private = { 0xa0, 0xa0, 0x5a, 0x3e, 0x8f, 0x9f, 0x44, 0x20, + 0x4d, 0x5f, 0x80, 0x59, 0xa9, 0x4a, 0xc7, 0xdf, + 0xc3, 0x9a, 0x49, 0xac, 0x01, 0x6d, 0xd7, 0x43, + 0xdb, 0xfa, 0x43, 0xc5, 0xd6, 0x71, 0xfd, 0x88 }, + .public = { 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key with low order */ + { + .private = { 0xd0, 0xdb, 0xb3, 0xed, 0x19, 0x06, 0x66, 0x3f, + 0x15, 0x42, 0x0a, 0xf3, 0x1f, 0x4e, 0xaf, 0x65, + 0x09, 0xd9, 0xa9, 0x94, 0x97, 0x23, 0x50, 0x06, + 0x05, 0xad, 0x7c, 0x1c, 0x6e, 0x74, 0x50, 0xa9 }, + .public = { 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key with low order */ + { + .private = { 0xc0, 0xb1, 0xd0, 0xeb, 0x22, 0xb2, 0x44, 0xfe, + 0x32, 0x91, 0x14, 0x00, 0x72, 0xcd, 0xd9, 0xd9, + 0x89, 0xb5, 0xf0, 0xec, 0xd9, 0x6c, 0x10, 0x0f, + 0xeb, 0x5b, 0xca, 0x24, 0x1c, 0x1d, 0x9f, 0x8f }, + .public = { 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key with low order */ + { + .private = { 0x48, 0x0b, 0xf4, 0x5f, 0x59, 0x49, 0x42, 0xa8, + 0xbc, 0x0f, 0x33, 0x53, 0xc6, 0xe8, 0xb8, 0x85, + 0x3d, 0x77, 0xf3, 0x51, 0xf1, 0xc2, 0xca, 0x6c, + 0x2d, 0x1a, 0xbf, 0x8a, 0x00, 0xb4, 0x22, 0x9c }, + .public = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key with low order */ + { + .private = { 0x30, 0xf9, 0x93, 0xfc, 0xf8, 0x51, 0x4f, 0xc8, + 0x9b, 0xd8, 0xdb, 0x14, 0xcd, 0x43, 0xba, 0x0d, + 0x4b, 0x25, 0x30, 0xe7, 0x3c, 0x42, 0x76, 0xa0, + 0x5e, 0x1b, 0x14, 0x5d, 0x42, 0x0c, 0xed, 0xb4 }, + .public = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key with low order */ + { + .private = { 0xc0, 0x49, 0x74, 0xb7, 0x58, 0x38, 0x0e, 0x2a, + 0x5b, 0x5d, 0xf6, 0xeb, 0x09, 0xbb, 0x2f, 0x6b, + 0x34, 0x34, 0xf9, 0x82, 0x72, 0x2a, 0x8e, 0x67, + 0x6d, 0x3d, 0xa2, 0x51, 0xd1, 0xb3, 0xde, 0x83 }, + .public = { 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae, + 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a, + 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd, + 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x80 }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key with low order */ + { + .private = { 0x50, 0x2a, 0x31, 0x37, 0x3d, 0xb3, 0x24, 0x46, + 0x84, 0x2f, 0xe5, 0xad, 0xd3, 0xe0, 0x24, 0x02, + 0x2e, 0xa5, 0x4f, 0x27, 0x41, 0x82, 0xaf, 0xc3, + 0xd9, 0xf1, 0xbb, 0x3d, 0x39, 0x53, 0x4e, 0xb5 }, + .public = { 0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24, + 0xb1, 0xd0, 0xb1, 0x55, 0x9c, 0x83, 0xef, 0x5b, + 0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c, 0x8e, 0x86, + 0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0xd7 }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key with low order */ + { + .private = { 0x90, 0xfa, 0x64, 0x17, 0xb0, 0xe3, 0x70, 0x30, + 0xfd, 0x6e, 0x43, 0xef, 0xf2, 0xab, 0xae, 0xf1, + 0x4c, 0x67, 0x93, 0x11, 0x7a, 0x03, 0x9c, 0xf6, + 0x21, 0x31, 0x8b, 0xa9, 0x0f, 0x4e, 0x98, 0xbe }, + .public = { 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key with low order */ + { + .private = { 0x78, 0xad, 0x3f, 0x26, 0x02, 0x7f, 0x1c, 0x9f, + 0xdd, 0x97, 0x5a, 0x16, 0x13, 0xb9, 0x47, 0x77, + 0x9b, 0xad, 0x2c, 0xf2, 0xb7, 0x41, 0xad, 0xe0, + 0x18, 0x40, 0x88, 0x5a, 0x30, 0xbb, 0x97, 0x9c }, + .public = { 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key with low order */ + { + .private = { 0x98, 0xe2, 0x3d, 0xe7, 0xb1, 0xe0, 0x92, 0x6e, + 0xd9, 0xc8, 0x7e, 0x7b, 0x14, 0xba, 0xf5, 0x5f, + 0x49, 0x7a, 0x1d, 0x70, 0x96, 0xf9, 0x39, 0x77, + 0x68, 0x0e, 0x44, 0xdc, 0x1c, 0x7b, 0x7b, 0x8b }, + .public = { 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = false + }, + /* wycheproof - public key >= p */ + { + .private = { 0xf0, 0x1e, 0x48, 0xda, 0xfa, 0xc9, 0xd7, 0xbc, + 0xf5, 0x89, 0xcb, 0xc3, 0x82, 0xc8, 0x78, 0xd1, + 0x8b, 0xda, 0x35, 0x50, 0x58, 0x9f, 0xfb, 0x5d, + 0x50, 0xb5, 0x23, 0xbe, 0xbe, 0x32, 0x9d, 0xae }, + .public = { 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .result = { 0xbd, 0x36, 0xa0, 0x79, 0x0e, 0xb8, 0x83, 0x09, + 0x8c, 0x98, 0x8b, 0x21, 0x78, 0x67, 0x73, 0xde, + 0x0b, 0x3a, 0x4d, 0xf1, 0x62, 0x28, 0x2c, 0xf1, + 0x10, 0xde, 0x18, 0xdd, 0x48, 0x4c, 0xe7, 0x4b }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0x28, 0x87, 0x96, 0xbc, 0x5a, 0xff, 0x4b, 0x81, + 0xa3, 0x75, 0x01, 0x75, 0x7b, 0xc0, 0x75, 0x3a, + 0x3c, 0x21, 0x96, 0x47, 0x90, 0xd3, 0x86, 0x99, + 0x30, 0x8d, 0xeb, 0xc1, 0x7a, 0x6e, 0xaf, 0x8d }, + .public = { 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .result = { 0xb4, 0xe0, 0xdd, 0x76, 0xda, 0x7b, 0x07, 0x17, + 0x28, 0xb6, 0x1f, 0x85, 0x67, 0x71, 0xaa, 0x35, + 0x6e, 0x57, 0xed, 0xa7, 0x8a, 0x5b, 0x16, 0x55, + 0xcc, 0x38, 0x20, 0xfb, 0x5f, 0x85, 0x4c, 0x5c }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0x98, 0xdf, 0x84, 0x5f, 0x66, 0x51, 0xbf, 0x11, + 0x38, 0x22, 0x1f, 0x11, 0x90, 0x41, 0xf7, 0x2b, + 0x6d, 0xbc, 0x3c, 0x4a, 0xce, 0x71, 0x43, 0xd9, + 0x9f, 0xd5, 0x5a, 0xd8, 0x67, 0x48, 0x0d, 0xa8 }, + .public = { 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .result = { 0x6f, 0xdf, 0x6c, 0x37, 0x61, 0x1d, 0xbd, 0x53, + 0x04, 0xdc, 0x0f, 0x2e, 0xb7, 0xc9, 0x51, 0x7e, + 0xb3, 0xc5, 0x0e, 0x12, 0xfd, 0x05, 0x0a, 0xc6, + 0xde, 0xc2, 0x70, 0x71, 0xd4, 0xbf, 0xc0, 0x34 }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0xf0, 0x94, 0x98, 0xe4, 0x6f, 0x02, 0xf8, 0x78, + 0x82, 0x9e, 0x78, 0xb8, 0x03, 0xd3, 0x16, 0xa2, + 0xed, 0x69, 0x5d, 0x04, 0x98, 0xa0, 0x8a, 0xbd, + 0xf8, 0x27, 0x69, 0x30, 0xe2, 0x4e, 0xdc, 0xb0 }, + .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .result = { 0x4c, 0x8f, 0xc4, 0xb1, 0xc6, 0xab, 0x88, 0xfb, + 0x21, 0xf1, 0x8f, 0x6d, 0x4c, 0x81, 0x02, 0x40, + 0xd4, 0xe9, 0x46, 0x51, 0xba, 0x44, 0xf7, 0xa2, + 0xc8, 0x63, 0xce, 0xc7, 0xdc, 0x56, 0x60, 0x2d }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0x18, 0x13, 0xc1, 0x0a, 0x5c, 0x7f, 0x21, 0xf9, + 0x6e, 0x17, 0xf2, 0x88, 0xc0, 0xcc, 0x37, 0x60, + 0x7c, 0x04, 0xc5, 0xf5, 0xae, 0xa2, 0xdb, 0x13, + 0x4f, 0x9e, 0x2f, 0xfc, 0x66, 0xbd, 0x9d, 0xb8 }, + .public = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 }, + .result = { 0x1c, 0xd0, 0xb2, 0x82, 0x67, 0xdc, 0x54, 0x1c, + 0x64, 0x2d, 0x6d, 0x7d, 0xca, 0x44, 0xa8, 0xb3, + 0x8a, 0x63, 0x73, 0x6e, 0xef, 0x5c, 0x4e, 0x65, + 0x01, 0xff, 0xbb, 0xb1, 0x78, 0x0c, 0x03, 0x3c }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0x78, 0x57, 0xfb, 0x80, 0x86, 0x53, 0x64, 0x5a, + 0x0b, 0xeb, 0x13, 0x8a, 0x64, 0xf5, 0xf4, 0xd7, + 0x33, 0xa4, 0x5e, 0xa8, 0x4c, 0x3c, 0xda, 0x11, + 0xa9, 0xc0, 0x6f, 0x7e, 0x71, 0x39, 0x14, 0x9e }, + .public = { 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 }, + .result = { 0x87, 0x55, 0xbe, 0x01, 0xc6, 0x0a, 0x7e, 0x82, + 0x5c, 0xff, 0x3e, 0x0e, 0x78, 0xcb, 0x3a, 0xa4, + 0x33, 0x38, 0x61, 0x51, 0x6a, 0xa5, 0x9b, 0x1c, + 0x51, 0xa8, 0xb2, 0xa5, 0x43, 0xdf, 0xa8, 0x22 }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0xe0, 0x3a, 0xa8, 0x42, 0xe2, 0xab, 0xc5, 0x6e, + 0x81, 0xe8, 0x7b, 0x8b, 0x9f, 0x41, 0x7b, 0x2a, + 0x1e, 0x59, 0x13, 0xc7, 0x23, 0xee, 0xd2, 0x8d, + 0x75, 0x2f, 0x8d, 0x47, 0xa5, 0x9f, 0x49, 0x8f }, + .public = { 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 }, + .result = { 0x54, 0xc9, 0xa1, 0xed, 0x95, 0xe5, 0x46, 0xd2, + 0x78, 0x22, 0xa3, 0x60, 0x93, 0x1d, 0xda, 0x60, + 0xa1, 0xdf, 0x04, 0x9d, 0xa6, 0xf9, 0x04, 0x25, + 0x3c, 0x06, 0x12, 0xbb, 0xdc, 0x08, 0x74, 0x76 }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0xf8, 0xf7, 0x07, 0xb7, 0x99, 0x9b, 0x18, 0xcb, + 0x0d, 0x6b, 0x96, 0x12, 0x4f, 0x20, 0x45, 0x97, + 0x2c, 0xa2, 0x74, 0xbf, 0xc1, 0x54, 0xad, 0x0c, + 0x87, 0x03, 0x8c, 0x24, 0xc6, 0xd0, 0xd4, 0xb2 }, + .public = { 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0xcc, 0x1f, 0x40, 0xd7, 0x43, 0xcd, 0xc2, 0x23, + 0x0e, 0x10, 0x43, 0xda, 0xba, 0x8b, 0x75, 0xe8, + 0x10, 0xf1, 0xfb, 0xab, 0x7f, 0x25, 0x52, 0x69, + 0xbd, 0x9e, 0xbb, 0x29, 0xe6, 0xbf, 0x49, 0x4f }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0xa0, 0x34, 0xf6, 0x84, 0xfa, 0x63, 0x1e, 0x1a, + 0x34, 0x81, 0x18, 0xc1, 0xce, 0x4c, 0x98, 0x23, + 0x1f, 0x2d, 0x9e, 0xec, 0x9b, 0xa5, 0x36, 0x5b, + 0x4a, 0x05, 0xd6, 0x9a, 0x78, 0x5b, 0x07, 0x96 }, + .public = { 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0x54, 0x99, 0x8e, 0xe4, 0x3a, 0x5b, 0x00, 0x7b, + 0xf4, 0x99, 0xf0, 0x78, 0xe7, 0x36, 0x52, 0x44, + 0x00, 0xa8, 0xb5, 0xc7, 0xe9, 0xb9, 0xb4, 0x37, + 0x71, 0x74, 0x8c, 0x7c, 0xdf, 0x88, 0x04, 0x12 }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0x30, 0xb6, 0xc6, 0xa0, 0xf2, 0xff, 0xa6, 0x80, + 0x76, 0x8f, 0x99, 0x2b, 0xa8, 0x9e, 0x15, 0x2d, + 0x5b, 0xc9, 0x89, 0x3d, 0x38, 0xc9, 0x11, 0x9b, + 0xe4, 0xf7, 0x67, 0xbf, 0xab, 0x6e, 0x0c, 0xa5 }, + .public = { 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0xea, 0xd9, 0xb3, 0x8e, 0xfd, 0xd7, 0x23, 0x63, + 0x79, 0x34, 0xe5, 0x5a, 0xb7, 0x17, 0xa7, 0xae, + 0x09, 0xeb, 0x86, 0xa2, 0x1d, 0xc3, 0x6a, 0x3f, + 0xee, 0xb8, 0x8b, 0x75, 0x9e, 0x39, 0x1e, 0x09 }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0x90, 0x1b, 0x9d, 0xcf, 0x88, 0x1e, 0x01, 0xe0, + 0x27, 0x57, 0x50, 0x35, 0xd4, 0x0b, 0x43, 0xbd, + 0xc1, 0xc5, 0x24, 0x2e, 0x03, 0x08, 0x47, 0x49, + 0x5b, 0x0c, 0x72, 0x86, 0x46, 0x9b, 0x65, 0x91 }, + .public = { 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0x60, 0x2f, 0xf4, 0x07, 0x89, 0xb5, 0x4b, 0x41, + 0x80, 0x59, 0x15, 0xfe, 0x2a, 0x62, 0x21, 0xf0, + 0x7a, 0x50, 0xff, 0xc2, 0xc3, 0xfc, 0x94, 0xcf, + 0x61, 0xf1, 0x3d, 0x79, 0x04, 0xe8, 0x8e, 0x0e }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0x80, 0x46, 0x67, 0x7c, 0x28, 0xfd, 0x82, 0xc9, + 0xa1, 0xbd, 0xb7, 0x1a, 0x1a, 0x1a, 0x34, 0xfa, + 0xba, 0x12, 0x25, 0xe2, 0x50, 0x7f, 0xe3, 0xf5, + 0x4d, 0x10, 0xbd, 0x5b, 0x0d, 0x86, 0x5f, 0x8e }, + .public = { 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0xe0, 0x0a, 0xe8, 0xb1, 0x43, 0x47, 0x12, 0x47, + 0xba, 0x24, 0xf1, 0x2c, 0x88, 0x55, 0x36, 0xc3, + 0xcb, 0x98, 0x1b, 0x58, 0xe1, 0xe5, 0x6b, 0x2b, + 0xaf, 0x35, 0xc1, 0x2a, 0xe1, 0xf7, 0x9c, 0x26 }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0x60, 0x2f, 0x7e, 0x2f, 0x68, 0xa8, 0x46, 0xb8, + 0x2c, 0xc2, 0x69, 0xb1, 0xd4, 0x8e, 0x93, 0x98, + 0x86, 0xae, 0x54, 0xfd, 0x63, 0x6c, 0x1f, 0xe0, + 0x74, 0xd7, 0x10, 0x12, 0x7d, 0x47, 0x24, 0x91 }, + .public = { 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0x98, 0xcb, 0x9b, 0x50, 0xdd, 0x3f, 0xc2, 0xb0, + 0xd4, 0xf2, 0xd2, 0xbf, 0x7c, 0x5c, 0xfd, 0xd1, + 0x0c, 0x8f, 0xcd, 0x31, 0xfc, 0x40, 0xaf, 0x1a, + 0xd4, 0x4f, 0x47, 0xc1, 0x31, 0x37, 0x63, 0x62 }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0x60, 0x88, 0x7b, 0x3d, 0xc7, 0x24, 0x43, 0x02, + 0x6e, 0xbe, 0xdb, 0xbb, 0xb7, 0x06, 0x65, 0xf4, + 0x2b, 0x87, 0xad, 0xd1, 0x44, 0x0e, 0x77, 0x68, + 0xfb, 0xd7, 0xe8, 0xe2, 0xce, 0x5f, 0x63, 0x9d }, + .public = { 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0x38, 0xd6, 0x30, 0x4c, 0x4a, 0x7e, 0x6d, 0x9f, + 0x79, 0x59, 0x33, 0x4f, 0xb5, 0x24, 0x5b, 0xd2, + 0xc7, 0x54, 0x52, 0x5d, 0x4c, 0x91, 0xdb, 0x95, + 0x02, 0x06, 0x92, 0x62, 0x34, 0xc1, 0xf6, 0x33 }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0x78, 0xd3, 0x1d, 0xfa, 0x85, 0x44, 0x97, 0xd7, + 0x2d, 0x8d, 0xef, 0x8a, 0x1b, 0x7f, 0xb0, 0x06, + 0xce, 0xc2, 0xd8, 0xc4, 0x92, 0x46, 0x47, 0xc9, + 0x38, 0x14, 0xae, 0x56, 0xfa, 0xed, 0xa4, 0x95 }, + .public = { 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0x78, 0x6c, 0xd5, 0x49, 0x96, 0xf0, 0x14, 0xa5, + 0xa0, 0x31, 0xec, 0x14, 0xdb, 0x81, 0x2e, 0xd0, + 0x83, 0x55, 0x06, 0x1f, 0xdb, 0x5d, 0xe6, 0x80, + 0xa8, 0x00, 0xac, 0x52, 0x1f, 0x31, 0x8e, 0x23 }, + .valid = true + }, + /* wycheproof - public key >= p */ + { + .private = { 0xc0, 0x4c, 0x5b, 0xae, 0xfa, 0x83, 0x02, 0xdd, + 0xde, 0xd6, 0xa4, 0xbb, 0x95, 0x77, 0x61, 0xb4, + 0xeb, 0x97, 0xae, 0xfa, 0x4f, 0xc3, 0xb8, 0x04, + 0x30, 0x85, 0xf9, 0x6a, 0x56, 0x59, 0xb3, 0xa5 }, + .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .result = { 0x29, 0xae, 0x8b, 0xc7, 0x3e, 0x9b, 0x10, 0xa0, + 0x8b, 0x4f, 0x68, 0x1c, 0x43, 0xc3, 0xe0, 0xac, + 0x1a, 0x17, 0x1d, 0x31, 0xb3, 0x8f, 0x1a, 0x48, + 0xef, 0xba, 0x29, 0xae, 0x63, 0x9e, 0xa1, 0x34 }, + .valid = true + }, + /* wycheproof - RFC 7748 */ + { + .private = { 0xa0, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d, + 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd, + 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18, + 0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0x44 }, + .public = { 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb, + 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c, + 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b, + 0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c }, + .result = { 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90, + 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f, + 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7, + 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 }, + .valid = true + }, + /* wycheproof - RFC 7748 */ + { + .private = { 0x48, 0x66, 0xe9, 0xd4, 0xd1, 0xb4, 0x67, 0x3c, + 0x5a, 0xd2, 0x26, 0x91, 0x95, 0x7d, 0x6a, 0xf5, + 0xc1, 0x1b, 0x64, 0x21, 0xe0, 0xea, 0x01, 0xd4, + 0x2c, 0xa4, 0x16, 0x9e, 0x79, 0x18, 0xba, 0x4d }, + .public = { 0xe5, 0x21, 0x0f, 0x12, 0x78, 0x68, 0x11, 0xd3, + 0xf4, 0xb7, 0x95, 0x9d, 0x05, 0x38, 0xae, 0x2c, + 0x31, 0xdb, 0xe7, 0x10, 0x6f, 0xc0, 0x3c, 0x3e, + 0xfc, 0x4c, 0xd5, 0x49, 0xc7, 0x15, 0xa4, 0x13 }, + .result = { 0x95, 0xcb, 0xde, 0x94, 0x76, 0xe8, 0x90, 0x7d, + 0x7a, 0xad, 0xe4, 0x5c, 0xb4, 0xb8, 0x73, 0xf8, + 0x8b, 0x59, 0x5a, 0x68, 0x79, 0x9f, 0xa1, 0x52, + 0xe6, 0xf8, 0xf7, 0x64, 0x7a, 0xac, 0x79, 0x57 }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0x0a, 0xb4, 0xe7, 0x63, 0x80, 0xd8, 0x4d, 0xde, + 0x4f, 0x68, 0x33, 0xc5, 0x8f, 0x2a, 0x9f, 0xb8, + 0xf8, 0x3b, 0xb0, 0x16, 0x9b, 0x17, 0x2b, 0xe4, + 0xb6, 0xe0, 0x59, 0x28, 0x87, 0x74, 0x1a, 0x36 }, + .result = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0x89, 0xe1, 0x0d, 0x57, 0x01, 0xb4, 0x33, 0x7d, + 0x2d, 0x03, 0x21, 0x81, 0x53, 0x8b, 0x10, 0x64, + 0xbd, 0x40, 0x84, 0x40, 0x1c, 0xec, 0xa1, 0xfd, + 0x12, 0x66, 0x3a, 0x19, 0x59, 0x38, 0x80, 0x00 }, + .result = { 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0x2b, 0x55, 0xd3, 0xaa, 0x4a, 0x8f, 0x80, 0xc8, + 0xc0, 0xb2, 0xae, 0x5f, 0x93, 0x3e, 0x85, 0xaf, + 0x49, 0xbe, 0xac, 0x36, 0xc2, 0xfa, 0x73, 0x94, + 0xba, 0xb7, 0x6c, 0x89, 0x33, 0xf8, 0xf8, 0x1d }, + .result = { 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0x63, 0xe5, 0xb1, 0xfe, 0x96, 0x01, 0xfe, 0x84, + 0x38, 0x5d, 0x88, 0x66, 0xb0, 0x42, 0x12, 0x62, + 0xf7, 0x8f, 0xbf, 0xa5, 0xaf, 0xf9, 0x58, 0x5e, + 0x62, 0x66, 0x79, 0xb1, 0x85, 0x47, 0xd9, 0x59 }, + .result = { 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0xe4, 0x28, 0xf3, 0xda, 0xc1, 0x78, 0x09, 0xf8, + 0x27, 0xa5, 0x22, 0xce, 0x32, 0x35, 0x50, 0x58, + 0xd0, 0x73, 0x69, 0x36, 0x4a, 0xa7, 0x89, 0x02, + 0xee, 0x10, 0x13, 0x9b, 0x9f, 0x9d, 0xd6, 0x53 }, + .result = { 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0xb3, 0xb5, 0x0e, 0x3e, 0xd3, 0xa4, 0x07, 0xb9, + 0x5d, 0xe9, 0x42, 0xef, 0x74, 0x57, 0x5b, 0x5a, + 0xb8, 0xa1, 0x0c, 0x09, 0xee, 0x10, 0x35, 0x44, + 0xd6, 0x0b, 0xdf, 0xed, 0x81, 0x38, 0xab, 0x2b }, + .result = { 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0x21, 0x3f, 0xff, 0xe9, 0x3d, 0x5e, 0xa8, 0xcd, + 0x24, 0x2e, 0x46, 0x28, 0x44, 0x02, 0x99, 0x22, + 0xc4, 0x3c, 0x77, 0xc9, 0xe3, 0xe4, 0x2f, 0x56, + 0x2f, 0x48, 0x5d, 0x24, 0xc5, 0x01, 0xa2, 0x0b }, + .result = { 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0x91, 0xb2, 0x32, 0xa1, 0x78, 0xb3, 0xcd, 0x53, + 0x09, 0x32, 0x44, 0x1e, 0x61, 0x39, 0x41, 0x8f, + 0x72, 0x17, 0x22, 0x92, 0xf1, 0xda, 0x4c, 0x18, + 0x34, 0xfc, 0x5e, 0xbf, 0xef, 0xb5, 0x1e, 0x3f }, + .result = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03 }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0x04, 0x5c, 0x6e, 0x11, 0xc5, 0xd3, 0x32, 0x55, + 0x6c, 0x78, 0x22, 0xfe, 0x94, 0xeb, 0xf8, 0x9b, + 0x56, 0xa3, 0x87, 0x8d, 0xc2, 0x7c, 0xa0, 0x79, + 0x10, 0x30, 0x58, 0x84, 0x9f, 0xab, 0xcb, 0x4f }, + .result = { 0xe5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0x1c, 0xa2, 0x19, 0x0b, 0x71, 0x16, 0x35, 0x39, + 0x06, 0x3c, 0x35, 0x77, 0x3b, 0xda, 0x0c, 0x9c, + 0x92, 0x8e, 0x91, 0x36, 0xf0, 0x62, 0x0a, 0xeb, + 0x09, 0x3f, 0x09, 0x91, 0x97, 0xb7, 0xf7, 0x4e }, + .result = { 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0xf7, 0x6e, 0x90, 0x10, 0xac, 0x33, 0xc5, 0x04, + 0x3b, 0x2d, 0x3b, 0x76, 0xa8, 0x42, 0x17, 0x10, + 0x00, 0xc4, 0x91, 0x62, 0x22, 0xe9, 0xe8, 0x58, + 0x97, 0xa0, 0xae, 0xc7, 0xf6, 0x35, 0x0b, 0x3c }, + .result = { 0xdd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0xbb, 0x72, 0x68, 0x8d, 0x8f, 0x8a, 0xa7, 0xa3, + 0x9c, 0xd6, 0x06, 0x0c, 0xd5, 0xc8, 0x09, 0x3c, + 0xde, 0xc6, 0xfe, 0x34, 0x19, 0x37, 0xc3, 0x88, + 0x6a, 0x99, 0x34, 0x6c, 0xd0, 0x7f, 0xaa, 0x55 }, + .result = { 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0x88, 0xfd, 0xde, 0xa1, 0x93, 0x39, 0x1c, 0x6a, + 0x59, 0x33, 0xef, 0x9b, 0x71, 0x90, 0x15, 0x49, + 0x44, 0x72, 0x05, 0xaa, 0xe9, 0xda, 0x92, 0x8a, + 0x6b, 0x91, 0xa3, 0x52, 0xba, 0x10, 0xf4, 0x1f }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 }, + .valid = true + }, + /* wycheproof - edge case for shared secret */ + { + .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, + 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, + 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, + 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, + .public = { 0x30, 0x3b, 0x39, 0x2f, 0x15, 0x31, 0x16, 0xca, + 0xd9, 0xcc, 0x68, 0x2a, 0x00, 0xcc, 0xc4, 0x4c, + 0x95, 0xff, 0x0d, 0x3b, 0xbe, 0x56, 0x8b, 0xeb, + 0x6c, 0x4e, 0x73, 0x9b, 0xaf, 0xdc, 0x2c, 0x68 }, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00 }, + .valid = true + }, + /* wycheproof - checking for overflow */ + { + .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, + 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, + 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, + 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, + .public = { 0xfd, 0x30, 0x0a, 0xeb, 0x40, 0xe1, 0xfa, 0x58, + 0x25, 0x18, 0x41, 0x2b, 0x49, 0xb2, 0x08, 0xa7, + 0x84, 0x2b, 0x1e, 0x1f, 0x05, 0x6a, 0x04, 0x01, + 0x78, 0xea, 0x41, 0x41, 0x53, 0x4f, 0x65, 0x2d }, + .result = { 0xb7, 0x34, 0x10, 0x5d, 0xc2, 0x57, 0x58, 0x5d, + 0x73, 0xb5, 0x66, 0xcc, 0xb7, 0x6f, 0x06, 0x27, + 0x95, 0xcc, 0xbe, 0xc8, 0x91, 0x28, 0xe5, 0x2b, + 0x02, 0xf3, 0xe5, 0x96, 0x39, 0xf1, 0x3c, 0x46 }, + .valid = true + }, + /* wycheproof - checking for overflow */ + { + .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, + 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, + 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, + 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, + .public = { 0xc8, 0xef, 0x79, 0xb5, 0x14, 0xd7, 0x68, 0x26, + 0x77, 0xbc, 0x79, 0x31, 0xe0, 0x6e, 0xe5, 0xc2, + 0x7c, 0x9b, 0x39, 0x2b, 0x4a, 0xe9, 0x48, 0x44, + 0x73, 0xf5, 0x54, 0xe6, 0x67, 0x8e, 0xcc, 0x2e }, + .result = { 0x64, 0x7a, 0x46, 0xb6, 0xfc, 0x3f, 0x40, 0xd6, + 0x21, 0x41, 0xee, 0x3c, 0xee, 0x70, 0x6b, 0x4d, + 0x7a, 0x92, 0x71, 0x59, 0x3a, 0x7b, 0x14, 0x3e, + 0x8e, 0x2e, 0x22, 0x79, 0x88, 0x3e, 0x45, 0x50 }, + .valid = true + }, + /* wycheproof - checking for overflow */ + { + .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, + 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, + 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, + 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, + .public = { 0x64, 0xae, 0xac, 0x25, 0x04, 0x14, 0x48, 0x61, + 0x53, 0x2b, 0x7b, 0xbc, 0xb6, 0xc8, 0x7d, 0x67, + 0xdd, 0x4c, 0x1f, 0x07, 0xeb, 0xc2, 0xe0, 0x6e, + 0xff, 0xb9, 0x5a, 0xec, 0xc6, 0x17, 0x0b, 0x2c }, + .result = { 0x4f, 0xf0, 0x3d, 0x5f, 0xb4, 0x3c, 0xd8, 0x65, + 0x7a, 0x3c, 0xf3, 0x7c, 0x13, 0x8c, 0xad, 0xce, + 0xcc, 0xe5, 0x09, 0xe4, 0xeb, 0xa0, 0x89, 0xd0, + 0xef, 0x40, 0xb4, 0xe4, 0xfb, 0x94, 0x61, 0x55 }, + .valid = true + }, + /* wycheproof - checking for overflow */ + { + .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, + 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, + 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, + 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, + .public = { 0xbf, 0x68, 0xe3, 0x5e, 0x9b, 0xdb, 0x7e, 0xee, + 0x1b, 0x50, 0x57, 0x02, 0x21, 0x86, 0x0f, 0x5d, + 0xcd, 0xad, 0x8a, 0xcb, 0xab, 0x03, 0x1b, 0x14, + 0x97, 0x4c, 0xc4, 0x90, 0x13, 0xc4, 0x98, 0x31 }, + .result = { 0x21, 0xce, 0xe5, 0x2e, 0xfd, 0xbc, 0x81, 0x2e, + 0x1d, 0x02, 0x1a, 0x4a, 0xf1, 0xe1, 0xd8, 0xbc, + 0x4d, 0xb3, 0xc4, 0x00, 0xe4, 0xd2, 0xa2, 0xc5, + 0x6a, 0x39, 0x26, 0xdb, 0x4d, 0x99, 0xc6, 0x5b }, + .valid = true + }, + /* wycheproof - checking for overflow */ + { + .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, + 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, + 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, + 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, + .public = { 0x53, 0x47, 0xc4, 0x91, 0x33, 0x1a, 0x64, 0xb4, + 0x3d, 0xdc, 0x68, 0x30, 0x34, 0xe6, 0x77, 0xf5, + 0x3d, 0xc3, 0x2b, 0x52, 0xa5, 0x2a, 0x57, 0x7c, + 0x15, 0xa8, 0x3b, 0xf2, 0x98, 0xe9, 0x9f, 0x19 }, + .result = { 0x18, 0xcb, 0x89, 0xe4, 0xe2, 0x0c, 0x0c, 0x2b, + 0xd3, 0x24, 0x30, 0x52, 0x45, 0x26, 0x6c, 0x93, + 0x27, 0x69, 0x0b, 0xbe, 0x79, 0xac, 0xb8, 0x8f, + 0x5b, 0x8f, 0xb3, 0xf7, 0x4e, 0xca, 0x3e, 0x52 }, + .valid = true + }, + /* wycheproof - private key == -1 (mod order) */ + { + .private = { 0xa0, 0x23, 0xcd, 0xd0, 0x83, 0xef, 0x5b, 0xb8, + 0x2f, 0x10, 0xd6, 0x2e, 0x59, 0xe1, 0x5a, 0x68, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50 }, + .public = { 0x25, 0x8e, 0x04, 0x52, 0x3b, 0x8d, 0x25, 0x3e, + 0xe6, 0x57, 0x19, 0xfc, 0x69, 0x06, 0xc6, 0x57, + 0x19, 0x2d, 0x80, 0x71, 0x7e, 0xdc, 0x82, 0x8f, + 0xa0, 0xaf, 0x21, 0x68, 0x6e, 0x2f, 0xaa, 0x75 }, + .result = { 0x25, 0x8e, 0x04, 0x52, 0x3b, 0x8d, 0x25, 0x3e, + 0xe6, 0x57, 0x19, 0xfc, 0x69, 0x06, 0xc6, 0x57, + 0x19, 0x2d, 0x80, 0x71, 0x7e, 0xdc, 0x82, 0x8f, + 0xa0, 0xaf, 0x21, 0x68, 0x6e, 0x2f, 0xaa, 0x75 }, + .valid = true + }, + /* wycheproof - private key == 1 (mod order) on twist */ + { + .private = { 0x58, 0x08, 0x3d, 0xd2, 0x61, 0xad, 0x91, 0xef, + 0xf9, 0x52, 0x32, 0x2e, 0xc8, 0x24, 0xc6, 0x82, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5f }, + .public = { 0x2e, 0xae, 0x5e, 0xc3, 0xdd, 0x49, 0x4e, 0x9f, + 0x2d, 0x37, 0xd2, 0x58, 0xf8, 0x73, 0xa8, 0xe6, + 0xe9, 0xd0, 0xdb, 0xd1, 0xe3, 0x83, 0xef, 0x64, + 0xd9, 0x8b, 0xb9, 0x1b, 0x3e, 0x0b, 0xe0, 0x35 }, + .result = { 0x2e, 0xae, 0x5e, 0xc3, 0xdd, 0x49, 0x4e, 0x9f, + 0x2d, 0x37, 0xd2, 0x58, 0xf8, 0x73, 0xa8, 0xe6, + 0xe9, 0xd0, 0xdb, 0xd1, 0xe3, 0x83, 0xef, 0x64, + 0xd9, 0x8b, 0xb9, 0x1b, 0x3e, 0x0b, 0xe0, 0x35 }, + .valid = true + } +}; + +static bool __init curve25519_selftest(void) +{ + bool success = true, ret, ret2; + size_t i = 0, j; + u8 in[CURVE25519_KEY_SIZE]; + u8 out[CURVE25519_KEY_SIZE], out2[CURVE25519_KEY_SIZE]; + + for (i = 0; i < ARRAY_SIZE(curve25519_test_vectors); ++i) { + memset(out, 0, CURVE25519_KEY_SIZE); + ret = curve25519(out, curve25519_test_vectors[i].private, + curve25519_test_vectors[i].public); + if (ret != curve25519_test_vectors[i].valid || + memcmp(out, curve25519_test_vectors[i].result, + CURVE25519_KEY_SIZE)) { + pr_err("curve25519 self-test %zu: FAIL\n", i + 1); + success = false; + } + } + + for (i = 0; i < 5; ++i) { + get_random_bytes(in, sizeof(in)); + ret = curve25519_generate_public(out, in); + ret2 = curve25519(out2, in, (u8[CURVE25519_KEY_SIZE]){ 9 }); + if (ret != ret2 || memcmp(out, out2, CURVE25519_KEY_SIZE)) { + pr_err("curve25519 basepoint self-test %zu: FAIL: input - 0x", + i + 1); + for (j = CURVE25519_KEY_SIZE; j-- > 0;) + printk(KERN_CONT "%02x", in[j]); + printk(KERN_CONT "\n"); + success = false; + } + } + + return success; +} diff --git a/net/wireguard/crypto/zinc/selftest/poly1305.c b/net/wireguard/crypto/zinc/selftest/poly1305.c new file mode 100644 index 000000000000..b4d7a9c2f6ec --- /dev/null +++ b/net/wireguard/crypto/zinc/selftest/poly1305.c @@ -0,0 +1,1107 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +struct poly1305_testvec { + const u8 *input, *output, *key; + size_t ilen; +}; + +/* RFC7539 */ +static const u8 input01[] __initconst = { + 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x67, 0x72, + 0x61, 0x70, 0x68, 0x69, 0x63, 0x20, 0x46, 0x6f, + 0x72, 0x75, 0x6d, 0x20, 0x52, 0x65, 0x73, 0x65, + 0x61, 0x72, 0x63, 0x68, 0x20, 0x47, 0x72, 0x6f, + 0x75, 0x70 +}; +static const u8 output01[] __initconst = { + 0xa8, 0x06, 0x1d, 0xc1, 0x30, 0x51, 0x36, 0xc6, + 0xc2, 0x2b, 0x8b, 0xaf, 0x0c, 0x01, 0x27, 0xa9 +}; +static const u8 key01[] __initconst = { + 0x85, 0xd6, 0xbe, 0x78, 0x57, 0x55, 0x6d, 0x33, + 0x7f, 0x44, 0x52, 0xfe, 0x42, 0xd5, 0x06, 0xa8, + 0x01, 0x03, 0x80, 0x8a, 0xfb, 0x0d, 0xb2, 0xfd, + 0x4a, 0xbf, 0xf6, 0xaf, 0x41, 0x49, 0xf5, 0x1b +}; + +/* "The Poly1305-AES message-authentication code" */ +static const u8 input02[] __initconst = { + 0xf3, 0xf6 +}; +static const u8 output02[] __initconst = { + 0xf4, 0xc6, 0x33, 0xc3, 0x04, 0x4f, 0xc1, 0x45, + 0xf8, 0x4f, 0x33, 0x5c, 0xb8, 0x19, 0x53, 0xde +}; +static const u8 key02[] __initconst = { + 0x85, 0x1f, 0xc4, 0x0c, 0x34, 0x67, 0xac, 0x0b, + 0xe0, 0x5c, 0xc2, 0x04, 0x04, 0xf3, 0xf7, 0x00, + 0x58, 0x0b, 0x3b, 0x0f, 0x94, 0x47, 0xbb, 0x1e, + 0x69, 0xd0, 0x95, 0xb5, 0x92, 0x8b, 0x6d, 0xbc +}; + +static const u8 input03[] __initconst = { }; +static const u8 output03[] __initconst = { + 0xdd, 0x3f, 0xab, 0x22, 0x51, 0xf1, 0x1a, 0xc7, + 0x59, 0xf0, 0x88, 0x71, 0x29, 0xcc, 0x2e, 0xe7 +}; +static const u8 key03[] __initconst = { + 0xa0, 0xf3, 0x08, 0x00, 0x00, 0xf4, 0x64, 0x00, + 0xd0, 0xc7, 0xe9, 0x07, 0x6c, 0x83, 0x44, 0x03, + 0xdd, 0x3f, 0xab, 0x22, 0x51, 0xf1, 0x1a, 0xc7, + 0x59, 0xf0, 0x88, 0x71, 0x29, 0xcc, 0x2e, 0xe7 +}; + +static const u8 input04[] __initconst = { + 0x66, 0x3c, 0xea, 0x19, 0x0f, 0xfb, 0x83, 0xd8, + 0x95, 0x93, 0xf3, 0xf4, 0x76, 0xb6, 0xbc, 0x24, + 0xd7, 0xe6, 0x79, 0x10, 0x7e, 0xa2, 0x6a, 0xdb, + 0x8c, 0xaf, 0x66, 0x52, 0xd0, 0x65, 0x61, 0x36 +}; +static const u8 output04[] __initconst = { + 0x0e, 0xe1, 0xc1, 0x6b, 0xb7, 0x3f, 0x0f, 0x4f, + 0xd1, 0x98, 0x81, 0x75, 0x3c, 0x01, 0xcd, 0xbe +}; +static const u8 key04[] __initconst = { + 0x48, 0x44, 0x3d, 0x0b, 0xb0, 0xd2, 0x11, 0x09, + 0xc8, 0x9a, 0x10, 0x0b, 0x5c, 0xe2, 0xc2, 0x08, + 0x83, 0x14, 0x9c, 0x69, 0xb5, 0x61, 0xdd, 0x88, + 0x29, 0x8a, 0x17, 0x98, 0xb1, 0x07, 0x16, 0xef +}; + +static const u8 input05[] __initconst = { + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67, + 0xfa, 0x83, 0xe1, 0x58, 0xc9, 0x94, 0xd9, 0x61, + 0xc4, 0xcb, 0x21, 0x09, 0x5c, 0x1b, 0xf9 +}; +static const u8 output05[] __initconst = { + 0x51, 0x54, 0xad, 0x0d, 0x2c, 0xb2, 0x6e, 0x01, + 0x27, 0x4f, 0xc5, 0x11, 0x48, 0x49, 0x1f, 0x1b +}; +static const u8 key05[] __initconst = { + 0x12, 0x97, 0x6a, 0x08, 0xc4, 0x42, 0x6d, 0x0c, + 0xe8, 0xa8, 0x24, 0x07, 0xc4, 0xf4, 0x82, 0x07, + 0x80, 0xf8, 0xc2, 0x0a, 0xa7, 0x12, 0x02, 0xd1, + 0xe2, 0x91, 0x79, 0xcb, 0xcb, 0x55, 0x5a, 0x57 +}; + +/* self-generated vectors exercise "significant" lengths, such that they + * are handled by different code paths */ +static const u8 input06[] __initconst = { + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67, + 0xfa, 0x83, 0xe1, 0x58, 0xc9, 0x94, 0xd9, 0x61, + 0xc4, 0xcb, 0x21, 0x09, 0x5c, 0x1b, 0xf9, 0xaf +}; +static const u8 output06[] __initconst = { + 0x81, 0x20, 0x59, 0xa5, 0xda, 0x19, 0x86, 0x37, + 0xca, 0xc7, 0xc4, 0xa6, 0x31, 0xbe, 0xe4, 0x66 +}; +static const u8 key06[] __initconst = { + 0x12, 0x97, 0x6a, 0x08, 0xc4, 0x42, 0x6d, 0x0c, + 0xe8, 0xa8, 0x24, 0x07, 0xc4, 0xf4, 0x82, 0x07, + 0x80, 0xf8, 0xc2, 0x0a, 0xa7, 0x12, 0x02, 0xd1, + 0xe2, 0x91, 0x79, 0xcb, 0xcb, 0x55, 0x5a, 0x57 +}; + +static const u8 input07[] __initconst = { + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67 +}; +static const u8 output07[] __initconst = { + 0x5b, 0x88, 0xd7, 0xf6, 0x22, 0x8b, 0x11, 0xe2, + 0xe2, 0x85, 0x79, 0xa5, 0xc0, 0xc1, 0xf7, 0x61 +}; +static const u8 key07[] __initconst = { + 0x12, 0x97, 0x6a, 0x08, 0xc4, 0x42, 0x6d, 0x0c, + 0xe8, 0xa8, 0x24, 0x07, 0xc4, 0xf4, 0x82, 0x07, + 0x80, 0xf8, 0xc2, 0x0a, 0xa7, 0x12, 0x02, 0xd1, + 0xe2, 0x91, 0x79, 0xcb, 0xcb, 0x55, 0x5a, 0x57 +}; + +static const u8 input08[] __initconst = { + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67, + 0xfa, 0x83, 0xe1, 0x58, 0xc9, 0x94, 0xd9, 0x61, + 0xc4, 0xcb, 0x21, 0x09, 0x5c, 0x1b, 0xf9, 0xaf, + 0x66, 0x3c, 0xea, 0x19, 0x0f, 0xfb, 0x83, 0xd8, + 0x95, 0x93, 0xf3, 0xf4, 0x76, 0xb6, 0xbc, 0x24, + 0xd7, 0xe6, 0x79, 0x10, 0x7e, 0xa2, 0x6a, 0xdb, + 0x8c, 0xaf, 0x66, 0x52, 0xd0, 0x65, 0x61, 0x36 +}; +static const u8 output08[] __initconst = { + 0xbb, 0xb6, 0x13, 0xb2, 0xb6, 0xd7, 0x53, 0xba, + 0x07, 0x39, 0x5b, 0x91, 0x6a, 0xae, 0xce, 0x15 +}; +static const u8 key08[] __initconst = { + 0x12, 0x97, 0x6a, 0x08, 0xc4, 0x42, 0x6d, 0x0c, + 0xe8, 0xa8, 0x24, 0x07, 0xc4, 0xf4, 0x82, 0x07, + 0x80, 0xf8, 0xc2, 0x0a, 0xa7, 0x12, 0x02, 0xd1, + 0xe2, 0x91, 0x79, 0xcb, 0xcb, 0x55, 0x5a, 0x57 +}; + +static const u8 input09[] __initconst = { + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67, + 0xfa, 0x83, 0xe1, 0x58, 0xc9, 0x94, 0xd9, 0x61, + 0xc4, 0xcb, 0x21, 0x09, 0x5c, 0x1b, 0xf9, 0xaf, + 0x48, 0x44, 0x3d, 0x0b, 0xb0, 0xd2, 0x11, 0x09, + 0xc8, 0x9a, 0x10, 0x0b, 0x5c, 0xe2, 0xc2, 0x08, + 0x83, 0x14, 0x9c, 0x69, 0xb5, 0x61, 0xdd, 0x88, + 0x29, 0x8a, 0x17, 0x98, 0xb1, 0x07, 0x16, 0xef, + 0x66, 0x3c, 0xea, 0x19, 0x0f, 0xfb, 0x83, 0xd8, + 0x95, 0x93, 0xf3, 0xf4, 0x76, 0xb6, 0xbc, 0x24 +}; +static const u8 output09[] __initconst = { + 0xc7, 0x94, 0xd7, 0x05, 0x7d, 0x17, 0x78, 0xc4, + 0xbb, 0xee, 0x0a, 0x39, 0xb3, 0xd9, 0x73, 0x42 +}; +static const u8 key09[] __initconst = { + 0x12, 0x97, 0x6a, 0x08, 0xc4, 0x42, 0x6d, 0x0c, + 0xe8, 0xa8, 0x24, 0x07, 0xc4, 0xf4, 0x82, 0x07, + 0x80, 0xf8, 0xc2, 0x0a, 0xa7, 0x12, 0x02, 0xd1, + 0xe2, 0x91, 0x79, 0xcb, 0xcb, 0x55, 0x5a, 0x57 +}; + +static const u8 input10[] __initconst = { + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67, + 0xfa, 0x83, 0xe1, 0x58, 0xc9, 0x94, 0xd9, 0x61, + 0xc4, 0xcb, 0x21, 0x09, 0x5c, 0x1b, 0xf9, 0xaf, + 0x48, 0x44, 0x3d, 0x0b, 0xb0, 0xd2, 0x11, 0x09, + 0xc8, 0x9a, 0x10, 0x0b, 0x5c, 0xe2, 0xc2, 0x08, + 0x83, 0x14, 0x9c, 0x69, 0xb5, 0x61, 0xdd, 0x88, + 0x29, 0x8a, 0x17, 0x98, 0xb1, 0x07, 0x16, 0xef, + 0x66, 0x3c, 0xea, 0x19, 0x0f, 0xfb, 0x83, 0xd8, + 0x95, 0x93, 0xf3, 0xf4, 0x76, 0xb6, 0xbc, 0x24, + 0xd7, 0xe6, 0x79, 0x10, 0x7e, 0xa2, 0x6a, 0xdb, + 0x8c, 0xaf, 0x66, 0x52, 0xd0, 0x65, 0x61, 0x36 +}; +static const u8 output10[] __initconst = { + 0xff, 0xbc, 0xb9, 0xb3, 0x71, 0x42, 0x31, 0x52, + 0xd7, 0xfc, 0xa5, 0xad, 0x04, 0x2f, 0xba, 0xa9 +}; +static const u8 key10[] __initconst = { + 0x12, 0x97, 0x6a, 0x08, 0xc4, 0x42, 0x6d, 0x0c, + 0xe8, 0xa8, 0x24, 0x07, 0xc4, 0xf4, 0x82, 0x07, + 0x80, 0xf8, 0xc2, 0x0a, 0xa7, 0x12, 0x02, 0xd1, + 0xe2, 0x91, 0x79, 0xcb, 0xcb, 0x55, 0x5a, 0x57 +}; + +static const u8 input11[] __initconst = { + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67, + 0xfa, 0x83, 0xe1, 0x58, 0xc9, 0x94, 0xd9, 0x61, + 0xc4, 0xcb, 0x21, 0x09, 0x5c, 0x1b, 0xf9, 0xaf, + 0x48, 0x44, 0x3d, 0x0b, 0xb0, 0xd2, 0x11, 0x09, + 0xc8, 0x9a, 0x10, 0x0b, 0x5c, 0xe2, 0xc2, 0x08, + 0x83, 0x14, 0x9c, 0x69, 0xb5, 0x61, 0xdd, 0x88, + 0x29, 0x8a, 0x17, 0x98, 0xb1, 0x07, 0x16, 0xef, + 0x66, 0x3c, 0xea, 0x19, 0x0f, 0xfb, 0x83, 0xd8, + 0x95, 0x93, 0xf3, 0xf4, 0x76, 0xb6, 0xbc, 0x24, + 0xd7, 0xe6, 0x79, 0x10, 0x7e, 0xa2, 0x6a, 0xdb, + 0x8c, 0xaf, 0x66, 0x52, 0xd0, 0x65, 0x61, 0x36, + 0x81, 0x20, 0x59, 0xa5, 0xda, 0x19, 0x86, 0x37, + 0xca, 0xc7, 0xc4, 0xa6, 0x31, 0xbe, 0xe4, 0x66 +}; +static const u8 output11[] __initconst = { + 0x06, 0x9e, 0xd6, 0xb8, 0xef, 0x0f, 0x20, 0x7b, + 0x3e, 0x24, 0x3b, 0xb1, 0x01, 0x9f, 0xe6, 0x32 +}; +static const u8 key11[] __initconst = { + 0x12, 0x97, 0x6a, 0x08, 0xc4, 0x42, 0x6d, 0x0c, + 0xe8, 0xa8, 0x24, 0x07, 0xc4, 0xf4, 0x82, 0x07, + 0x80, 0xf8, 0xc2, 0x0a, 0xa7, 0x12, 0x02, 0xd1, + 0xe2, 0x91, 0x79, 0xcb, 0xcb, 0x55, 0x5a, 0x57 +}; + +static const u8 input12[] __initconst = { + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67, + 0xfa, 0x83, 0xe1, 0x58, 0xc9, 0x94, 0xd9, 0x61, + 0xc4, 0xcb, 0x21, 0x09, 0x5c, 0x1b, 0xf9, 0xaf, + 0x48, 0x44, 0x3d, 0x0b, 0xb0, 0xd2, 0x11, 0x09, + 0xc8, 0x9a, 0x10, 0x0b, 0x5c, 0xe2, 0xc2, 0x08, + 0x83, 0x14, 0x9c, 0x69, 0xb5, 0x61, 0xdd, 0x88, + 0x29, 0x8a, 0x17, 0x98, 0xb1, 0x07, 0x16, 0xef, + 0x66, 0x3c, 0xea, 0x19, 0x0f, 0xfb, 0x83, 0xd8, + 0x95, 0x93, 0xf3, 0xf4, 0x76, 0xb6, 0xbc, 0x24, + 0xd7, 0xe6, 0x79, 0x10, 0x7e, 0xa2, 0x6a, 0xdb, + 0x8c, 0xaf, 0x66, 0x52, 0xd0, 0x65, 0x61, 0x36, + 0x81, 0x20, 0x59, 0xa5, 0xda, 0x19, 0x86, 0x37, + 0xca, 0xc7, 0xc4, 0xa6, 0x31, 0xbe, 0xe4, 0x66, + 0x5b, 0x88, 0xd7, 0xf6, 0x22, 0x8b, 0x11, 0xe2, + 0xe2, 0x85, 0x79, 0xa5, 0xc0, 0xc1, 0xf7, 0x61 +}; +static const u8 output12[] __initconst = { + 0xcc, 0xa3, 0x39, 0xd9, 0xa4, 0x5f, 0xa2, 0x36, + 0x8c, 0x2c, 0x68, 0xb3, 0xa4, 0x17, 0x91, 0x33 +}; +static const u8 key12[] __initconst = { + 0x12, 0x97, 0x6a, 0x08, 0xc4, 0x42, 0x6d, 0x0c, + 0xe8, 0xa8, 0x24, 0x07, 0xc4, 0xf4, 0x82, 0x07, + 0x80, 0xf8, 0xc2, 0x0a, 0xa7, 0x12, 0x02, 0xd1, + 0xe2, 0x91, 0x79, 0xcb, 0xcb, 0x55, 0x5a, 0x57 +}; + +static const u8 input13[] __initconst = { + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67, + 0xfa, 0x83, 0xe1, 0x58, 0xc9, 0x94, 0xd9, 0x61, + 0xc4, 0xcb, 0x21, 0x09, 0x5c, 0x1b, 0xf9, 0xaf, + 0x48, 0x44, 0x3d, 0x0b, 0xb0, 0xd2, 0x11, 0x09, + 0xc8, 0x9a, 0x10, 0x0b, 0x5c, 0xe2, 0xc2, 0x08, + 0x83, 0x14, 0x9c, 0x69, 0xb5, 0x61, 0xdd, 0x88, + 0x29, 0x8a, 0x17, 0x98, 0xb1, 0x07, 0x16, 0xef, + 0x66, 0x3c, 0xea, 0x19, 0x0f, 0xfb, 0x83, 0xd8, + 0x95, 0x93, 0xf3, 0xf4, 0x76, 0xb6, 0xbc, 0x24, + 0xd7, 0xe6, 0x79, 0x10, 0x7e, 0xa2, 0x6a, 0xdb, + 0x8c, 0xaf, 0x66, 0x52, 0xd0, 0x65, 0x61, 0x36, + 0x81, 0x20, 0x59, 0xa5, 0xda, 0x19, 0x86, 0x37, + 0xca, 0xc7, 0xc4, 0xa6, 0x31, 0xbe, 0xe4, 0x66, + 0x5b, 0x88, 0xd7, 0xf6, 0x22, 0x8b, 0x11, 0xe2, + 0xe2, 0x85, 0x79, 0xa5, 0xc0, 0xc1, 0xf7, 0x61, + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67, + 0xfa, 0x83, 0xe1, 0x58, 0xc9, 0x94, 0xd9, 0x61, + 0xc4, 0xcb, 0x21, 0x09, 0x5c, 0x1b, 0xf9, 0xaf, + 0x48, 0x44, 0x3d, 0x0b, 0xb0, 0xd2, 0x11, 0x09, + 0xc8, 0x9a, 0x10, 0x0b, 0x5c, 0xe2, 0xc2, 0x08, + 0x83, 0x14, 0x9c, 0x69, 0xb5, 0x61, 0xdd, 0x88, + 0x29, 0x8a, 0x17, 0x98, 0xb1, 0x07, 0x16, 0xef, + 0x66, 0x3c, 0xea, 0x19, 0x0f, 0xfb, 0x83, 0xd8, + 0x95, 0x93, 0xf3, 0xf4, 0x76, 0xb6, 0xbc, 0x24, + 0xd7, 0xe6, 0x79, 0x10, 0x7e, 0xa2, 0x6a, 0xdb, + 0x8c, 0xaf, 0x66, 0x52, 0xd0, 0x65, 0x61, 0x36 +}; +static const u8 output13[] __initconst = { + 0x53, 0xf6, 0xe8, 0x28, 0xa2, 0xf0, 0xfe, 0x0e, + 0xe8, 0x15, 0xbf, 0x0b, 0xd5, 0x84, 0x1a, 0x34 +}; +static const u8 key13[] __initconst = { + 0x12, 0x97, 0x6a, 0x08, 0xc4, 0x42, 0x6d, 0x0c, + 0xe8, 0xa8, 0x24, 0x07, 0xc4, 0xf4, 0x82, 0x07, + 0x80, 0xf8, 0xc2, 0x0a, 0xa7, 0x12, 0x02, 0xd1, + 0xe2, 0x91, 0x79, 0xcb, 0xcb, 0x55, 0x5a, 0x57 +}; + +static const u8 input14[] __initconst = { + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67, + 0xfa, 0x83, 0xe1, 0x58, 0xc9, 0x94, 0xd9, 0x61, + 0xc4, 0xcb, 0x21, 0x09, 0x5c, 0x1b, 0xf9, 0xaf, + 0x48, 0x44, 0x3d, 0x0b, 0xb0, 0xd2, 0x11, 0x09, + 0xc8, 0x9a, 0x10, 0x0b, 0x5c, 0xe2, 0xc2, 0x08, + 0x83, 0x14, 0x9c, 0x69, 0xb5, 0x61, 0xdd, 0x88, + 0x29, 0x8a, 0x17, 0x98, 0xb1, 0x07, 0x16, 0xef, + 0x66, 0x3c, 0xea, 0x19, 0x0f, 0xfb, 0x83, 0xd8, + 0x95, 0x93, 0xf3, 0xf4, 0x76, 0xb6, 0xbc, 0x24, + 0xd7, 0xe6, 0x79, 0x10, 0x7e, 0xa2, 0x6a, 0xdb, + 0x8c, 0xaf, 0x66, 0x52, 0xd0, 0x65, 0x61, 0x36, + 0x81, 0x20, 0x59, 0xa5, 0xda, 0x19, 0x86, 0x37, + 0xca, 0xc7, 0xc4, 0xa6, 0x31, 0xbe, 0xe4, 0x66, + 0x5b, 0x88, 0xd7, 0xf6, 0x22, 0x8b, 0x11, 0xe2, + 0xe2, 0x85, 0x79, 0xa5, 0xc0, 0xc1, 0xf7, 0x61, + 0xab, 0x08, 0x12, 0x72, 0x4a, 0x7f, 0x1e, 0x34, + 0x27, 0x42, 0xcb, 0xed, 0x37, 0x4d, 0x94, 0xd1, + 0x36, 0xc6, 0xb8, 0x79, 0x5d, 0x45, 0xb3, 0x81, + 0x98, 0x30, 0xf2, 0xc0, 0x44, 0x91, 0xfa, 0xf0, + 0x99, 0x0c, 0x62, 0xe4, 0x8b, 0x80, 0x18, 0xb2, + 0xc3, 0xe4, 0xa0, 0xfa, 0x31, 0x34, 0xcb, 0x67, + 0xfa, 0x83, 0xe1, 0x58, 0xc9, 0x94, 0xd9, 0x61, + 0xc4, 0xcb, 0x21, 0x09, 0x5c, 0x1b, 0xf9, 0xaf, + 0x48, 0x44, 0x3d, 0x0b, 0xb0, 0xd2, 0x11, 0x09, + 0xc8, 0x9a, 0x10, 0x0b, 0x5c, 0xe2, 0xc2, 0x08, + 0x83, 0x14, 0x9c, 0x69, 0xb5, 0x61, 0xdd, 0x88, + 0x29, 0x8a, 0x17, 0x98, 0xb1, 0x07, 0x16, 0xef, + 0x66, 0x3c, 0xea, 0x19, 0x0f, 0xfb, 0x83, 0xd8, + 0x95, 0x93, 0xf3, 0xf4, 0x76, 0xb6, 0xbc, 0x24, + 0xd7, 0xe6, 0x79, 0x10, 0x7e, 0xa2, 0x6a, 0xdb, + 0x8c, 0xaf, 0x66, 0x52, 0xd0, 0x65, 0x61, 0x36, + 0x81, 0x20, 0x59, 0xa5, 0xda, 0x19, 0x86, 0x37, + 0xca, 0xc7, 0xc4, 0xa6, 0x31, 0xbe, 0xe4, 0x66, + 0x5b, 0x88, 0xd7, 0xf6, 0x22, 0x8b, 0x11, 0xe2, + 0xe2, 0x85, 0x79, 0xa5, 0xc0, 0xc1, 0xf7, 0x61 +}; +static const u8 output14[] __initconst = { + 0xb8, 0x46, 0xd4, 0x4e, 0x9b, 0xbd, 0x53, 0xce, + 0xdf, 0xfb, 0xfb, 0xb6, 0xb7, 0xfa, 0x49, 0x33 +}; +static const u8 key14[] __initconst = { + 0x12, 0x97, 0x6a, 0x08, 0xc4, 0x42, 0x6d, 0x0c, + 0xe8, 0xa8, 0x24, 0x07, 0xc4, 0xf4, 0x82, 0x07, + 0x80, 0xf8, 0xc2, 0x0a, 0xa7, 0x12, 0x02, 0xd1, + 0xe2, 0x91, 0x79, 0xcb, 0xcb, 0x55, 0x5a, 0x57 +}; + +/* 4th power of the key spills to 131th bit in SIMD key setup */ +static const u8 input15[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 output15[] __initconst = { + 0x07, 0x14, 0x5a, 0x4c, 0x02, 0xfe, 0x5f, 0xa3, + 0x20, 0x36, 0xde, 0x68, 0xfa, 0xbe, 0x90, 0x66 +}; +static const u8 key15[] __initconst = { + 0xad, 0x62, 0x81, 0x07, 0xe8, 0x35, 0x1d, 0x0f, + 0x2c, 0x23, 0x1a, 0x05, 0xdc, 0x4a, 0x41, 0x06, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +/* OpenSSL's poly1305_ieee754.c failed this in final stage */ +static const u8 input16[] __initconst = { + 0x84, 0x23, 0x64, 0xe1, 0x56, 0x33, 0x6c, 0x09, + 0x98, 0xb9, 0x33, 0xa6, 0x23, 0x77, 0x26, 0x18, + 0x0d, 0x9e, 0x3f, 0xdc, 0xbd, 0xe4, 0xcd, 0x5d, + 0x17, 0x08, 0x0f, 0xc3, 0xbe, 0xb4, 0x96, 0x14, + 0xd7, 0x12, 0x2c, 0x03, 0x74, 0x63, 0xff, 0x10, + 0x4d, 0x73, 0xf1, 0x9c, 0x12, 0x70, 0x46, 0x28, + 0xd4, 0x17, 0xc4, 0xc5, 0x4a, 0x3f, 0xe3, 0x0d, + 0x3c, 0x3d, 0x77, 0x14, 0x38, 0x2d, 0x43, 0xb0, + 0x38, 0x2a, 0x50, 0xa5, 0xde, 0xe5, 0x4b, 0xe8, + 0x44, 0xb0, 0x76, 0xe8, 0xdf, 0x88, 0x20, 0x1a, + 0x1c, 0xd4, 0x3b, 0x90, 0xeb, 0x21, 0x64, 0x3f, + 0xa9, 0x6f, 0x39, 0xb5, 0x18, 0xaa, 0x83, 0x40, + 0xc9, 0x42, 0xff, 0x3c, 0x31, 0xba, 0xf7, 0xc9, + 0xbd, 0xbf, 0x0f, 0x31, 0xae, 0x3f, 0xa0, 0x96, + 0xbf, 0x8c, 0x63, 0x03, 0x06, 0x09, 0x82, 0x9f, + 0xe7, 0x2e, 0x17, 0x98, 0x24, 0x89, 0x0b, 0xc8, + 0xe0, 0x8c, 0x31, 0x5c, 0x1c, 0xce, 0x2a, 0x83, + 0x14, 0x4d, 0xbb, 0xff, 0x09, 0xf7, 0x4e, 0x3e, + 0xfc, 0x77, 0x0b, 0x54, 0xd0, 0x98, 0x4a, 0x8f, + 0x19, 0xb1, 0x47, 0x19, 0xe6, 0x36, 0x35, 0x64, + 0x1d, 0x6b, 0x1e, 0xed, 0xf6, 0x3e, 0xfb, 0xf0, + 0x80, 0xe1, 0x78, 0x3d, 0x32, 0x44, 0x54, 0x12, + 0x11, 0x4c, 0x20, 0xde, 0x0b, 0x83, 0x7a, 0x0d, + 0xfa, 0x33, 0xd6, 0xb8, 0x28, 0x25, 0xff, 0xf4, + 0x4c, 0x9a, 0x70, 0xea, 0x54, 0xce, 0x47, 0xf0, + 0x7d, 0xf6, 0x98, 0xe6, 0xb0, 0x33, 0x23, 0xb5, + 0x30, 0x79, 0x36, 0x4a, 0x5f, 0xc3, 0xe9, 0xdd, + 0x03, 0x43, 0x92, 0xbd, 0xde, 0x86, 0xdc, 0xcd, + 0xda, 0x94, 0x32, 0x1c, 0x5e, 0x44, 0x06, 0x04, + 0x89, 0x33, 0x6c, 0xb6, 0x5b, 0xf3, 0x98, 0x9c, + 0x36, 0xf7, 0x28, 0x2c, 0x2f, 0x5d, 0x2b, 0x88, + 0x2c, 0x17, 0x1e, 0x74 +}; +static const u8 output16[] __initconst = { + 0xf2, 0x48, 0x31, 0x2e, 0x57, 0x8d, 0x9d, 0x58, + 0xf8, 0xb7, 0xbb, 0x4d, 0x19, 0x10, 0x54, 0x31 +}; +static const u8 key16[] __initconst = { + 0x95, 0xd5, 0xc0, 0x05, 0x50, 0x3e, 0x51, 0x0d, + 0x8c, 0xd0, 0xaa, 0x07, 0x2c, 0x4a, 0x4d, 0x06, + 0x6e, 0xab, 0xc5, 0x2d, 0x11, 0x65, 0x3d, 0xf4, + 0x7f, 0xbf, 0x63, 0xab, 0x19, 0x8b, 0xcc, 0x26 +}; + +/* AVX2 in OpenSSL's poly1305-x86.pl failed this with 176+32 split */ +static const u8 input17[] __initconst = { + 0x24, 0x8a, 0xc3, 0x10, 0x85, 0xb6, 0xc2, 0xad, + 0xaa, 0xa3, 0x82, 0x59, 0xa0, 0xd7, 0x19, 0x2c, + 0x5c, 0x35, 0xd1, 0xbb, 0x4e, 0xf3, 0x9a, 0xd9, + 0x4c, 0x38, 0xd1, 0xc8, 0x24, 0x79, 0xe2, 0xdd, + 0x21, 0x59, 0xa0, 0x77, 0x02, 0x4b, 0x05, 0x89, + 0xbc, 0x8a, 0x20, 0x10, 0x1b, 0x50, 0x6f, 0x0a, + 0x1a, 0xd0, 0xbb, 0xab, 0x76, 0xe8, 0x3a, 0x83, + 0xf1, 0xb9, 0x4b, 0xe6, 0xbe, 0xae, 0x74, 0xe8, + 0x74, 0xca, 0xb6, 0x92, 0xc5, 0x96, 0x3a, 0x75, + 0x43, 0x6b, 0x77, 0x61, 0x21, 0xec, 0x9f, 0x62, + 0x39, 0x9a, 0x3e, 0x66, 0xb2, 0xd2, 0x27, 0x07, + 0xda, 0xe8, 0x19, 0x33, 0xb6, 0x27, 0x7f, 0x3c, + 0x85, 0x16, 0xbc, 0xbe, 0x26, 0xdb, 0xbd, 0x86, + 0xf3, 0x73, 0x10, 0x3d, 0x7c, 0xf4, 0xca, 0xd1, + 0x88, 0x8c, 0x95, 0x21, 0x18, 0xfb, 0xfb, 0xd0, + 0xd7, 0xb4, 0xbe, 0xdc, 0x4a, 0xe4, 0x93, 0x6a, + 0xff, 0x91, 0x15, 0x7e, 0x7a, 0xa4, 0x7c, 0x54, + 0x44, 0x2e, 0xa7, 0x8d, 0x6a, 0xc2, 0x51, 0xd3, + 0x24, 0xa0, 0xfb, 0xe4, 0x9d, 0x89, 0xcc, 0x35, + 0x21, 0xb6, 0x6d, 0x16, 0xe9, 0xc6, 0x6a, 0x37, + 0x09, 0x89, 0x4e, 0x4e, 0xb0, 0xa4, 0xee, 0xdc, + 0x4a, 0xe1, 0x94, 0x68, 0xe6, 0x6b, 0x81, 0xf2, + 0x71, 0x35, 0x1b, 0x1d, 0x92, 0x1e, 0xa5, 0x51, + 0x04, 0x7a, 0xbc, 0xc6, 0xb8, 0x7a, 0x90, 0x1f, + 0xde, 0x7d, 0xb7, 0x9f, 0xa1, 0x81, 0x8c, 0x11, + 0x33, 0x6d, 0xbc, 0x07, 0x24, 0x4a, 0x40, 0xeb +}; +static const u8 output17[] __initconst = { + 0xbc, 0x93, 0x9b, 0xc5, 0x28, 0x14, 0x80, 0xfa, + 0x99, 0xc6, 0xd6, 0x8c, 0x25, 0x8e, 0xc4, 0x2f +}; +static const u8 key17[] __initconst = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +/* test vectors from Google */ +static const u8 input18[] __initconst = { }; +static const u8 output18[] __initconst = { + 0x47, 0x10, 0x13, 0x0e, 0x9f, 0x6f, 0xea, 0x8d, + 0x72, 0x29, 0x38, 0x50, 0xa6, 0x67, 0xd8, 0x6c +}; +static const u8 key18[] __initconst = { + 0xc8, 0xaf, 0xaa, 0xc3, 0x31, 0xee, 0x37, 0x2c, + 0xd6, 0x08, 0x2d, 0xe1, 0x34, 0x94, 0x3b, 0x17, + 0x47, 0x10, 0x13, 0x0e, 0x9f, 0x6f, 0xea, 0x8d, + 0x72, 0x29, 0x38, 0x50, 0xa6, 0x67, 0xd8, 0x6c +}; + +static const u8 input19[] __initconst = { + 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, + 0x72, 0x6c, 0x64, 0x21 +}; +static const u8 output19[] __initconst = { + 0xa6, 0xf7, 0x45, 0x00, 0x8f, 0x81, 0xc9, 0x16, + 0xa2, 0x0d, 0xcc, 0x74, 0xee, 0xf2, 0xb2, 0xf0 +}; +static const u8 key19[] __initconst = { + 0x74, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, + 0x33, 0x32, 0x2d, 0x62, 0x79, 0x74, 0x65, 0x20, + 0x6b, 0x65, 0x79, 0x20, 0x66, 0x6f, 0x72, 0x20, + 0x50, 0x6f, 0x6c, 0x79, 0x31, 0x33, 0x30, 0x35 +}; + +static const u8 input20[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 output20[] __initconst = { + 0x49, 0xec, 0x78, 0x09, 0x0e, 0x48, 0x1e, 0xc6, + 0xc2, 0x6b, 0x33, 0xb9, 0x1c, 0xcc, 0x03, 0x07 +}; +static const u8 key20[] __initconst = { + 0x74, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, + 0x33, 0x32, 0x2d, 0x62, 0x79, 0x74, 0x65, 0x20, + 0x6b, 0x65, 0x79, 0x20, 0x66, 0x6f, 0x72, 0x20, + 0x50, 0x6f, 0x6c, 0x79, 0x31, 0x33, 0x30, 0x35 +}; + +static const u8 input21[] __initconst = { + 0x89, 0xda, 0xb8, 0x0b, 0x77, 0x17, 0xc1, 0xdb, + 0x5d, 0xb4, 0x37, 0x86, 0x0a, 0x3f, 0x70, 0x21, + 0x8e, 0x93, 0xe1, 0xb8, 0xf4, 0x61, 0xfb, 0x67, + 0x7f, 0x16, 0xf3, 0x5f, 0x6f, 0x87, 0xe2, 0xa9, + 0x1c, 0x99, 0xbc, 0x3a, 0x47, 0xac, 0xe4, 0x76, + 0x40, 0xcc, 0x95, 0xc3, 0x45, 0xbe, 0x5e, 0xcc, + 0xa5, 0xa3, 0x52, 0x3c, 0x35, 0xcc, 0x01, 0x89, + 0x3a, 0xf0, 0xb6, 0x4a, 0x62, 0x03, 0x34, 0x27, + 0x03, 0x72, 0xec, 0x12, 0x48, 0x2d, 0x1b, 0x1e, + 0x36, 0x35, 0x61, 0x69, 0x8a, 0x57, 0x8b, 0x35, + 0x98, 0x03, 0x49, 0x5b, 0xb4, 0xe2, 0xef, 0x19, + 0x30, 0xb1, 0x7a, 0x51, 0x90, 0xb5, 0x80, 0xf1, + 0x41, 0x30, 0x0d, 0xf3, 0x0a, 0xdb, 0xec, 0xa2, + 0x8f, 0x64, 0x27, 0xa8, 0xbc, 0x1a, 0x99, 0x9f, + 0xd5, 0x1c, 0x55, 0x4a, 0x01, 0x7d, 0x09, 0x5d, + 0x8c, 0x3e, 0x31, 0x27, 0xda, 0xf9, 0xf5, 0x95 +}; +static const u8 output21[] __initconst = { + 0xc8, 0x5d, 0x15, 0xed, 0x44, 0xc3, 0x78, 0xd6, + 0xb0, 0x0e, 0x23, 0x06, 0x4c, 0x7b, 0xcd, 0x51 +}; +static const u8 key21[] __initconst = { + 0x2d, 0x77, 0x3b, 0xe3, 0x7a, 0xdb, 0x1e, 0x4d, + 0x68, 0x3b, 0xf0, 0x07, 0x5e, 0x79, 0xc4, 0xee, + 0x03, 0x79, 0x18, 0x53, 0x5a, 0x7f, 0x99, 0xcc, + 0xb7, 0x04, 0x0f, 0xb5, 0xf5, 0xf4, 0x3a, 0xea +}; + +static const u8 input22[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, + 0x17, 0x03, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, + 0x06, 0xdb, 0x1f, 0x1f, 0x36, 0x8d, 0x69, 0x6a, + 0x81, 0x0a, 0x34, 0x9c, 0x0c, 0x71, 0x4c, 0x9a, + 0x5e, 0x78, 0x50, 0xc2, 0x40, 0x7d, 0x72, 0x1a, + 0xcd, 0xed, 0x95, 0xe0, 0x18, 0xd7, 0xa8, 0x52, + 0x66, 0xa6, 0xe1, 0x28, 0x9c, 0xdb, 0x4a, 0xeb, + 0x18, 0xda, 0x5a, 0xc8, 0xa2, 0xb0, 0x02, 0x6d, + 0x24, 0xa5, 0x9a, 0xd4, 0x85, 0x22, 0x7f, 0x3e, + 0xae, 0xdb, 0xb2, 0xe7, 0xe3, 0x5e, 0x1c, 0x66, + 0xcd, 0x60, 0xf9, 0xab, 0xf7, 0x16, 0xdc, 0xc9, + 0xac, 0x42, 0x68, 0x2d, 0xd7, 0xda, 0xb2, 0x87, + 0xa7, 0x02, 0x4c, 0x4e, 0xef, 0xc3, 0x21, 0xcc, + 0x05, 0x74, 0xe1, 0x67, 0x93, 0xe3, 0x7c, 0xec, + 0x03, 0xc5, 0xbd, 0xa4, 0x2b, 0x54, 0xc1, 0x14, + 0xa8, 0x0b, 0x57, 0xaf, 0x26, 0x41, 0x6c, 0x7b, + 0xe7, 0x42, 0x00, 0x5e, 0x20, 0x85, 0x5c, 0x73, + 0xe2, 0x1d, 0xc8, 0xe2, 0xed, 0xc9, 0xd4, 0x35, + 0xcb, 0x6f, 0x60, 0x59, 0x28, 0x00, 0x11, 0xc2, + 0x70, 0xb7, 0x15, 0x70, 0x05, 0x1c, 0x1c, 0x9b, + 0x30, 0x52, 0x12, 0x66, 0x20, 0xbc, 0x1e, 0x27, + 0x30, 0xfa, 0x06, 0x6c, 0x7a, 0x50, 0x9d, 0x53, + 0xc6, 0x0e, 0x5a, 0xe1, 0xb4, 0x0a, 0xa6, 0xe3, + 0x9e, 0x49, 0x66, 0x92, 0x28, 0xc9, 0x0e, 0xec, + 0xb4, 0xa5, 0x0d, 0xb3, 0x2a, 0x50, 0xbc, 0x49, + 0xe9, 0x0b, 0x4f, 0x4b, 0x35, 0x9a, 0x1d, 0xfd, + 0x11, 0x74, 0x9c, 0xd3, 0x86, 0x7f, 0xcf, 0x2f, + 0xb7, 0xbb, 0x6c, 0xd4, 0x73, 0x8f, 0x6a, 0x4a, + 0xd6, 0xf7, 0xca, 0x50, 0x58, 0xf7, 0x61, 0x88, + 0x45, 0xaf, 0x9f, 0x02, 0x0f, 0x6c, 0x3b, 0x96, + 0x7b, 0x8f, 0x4c, 0xd4, 0xa9, 0x1e, 0x28, 0x13, + 0xb5, 0x07, 0xae, 0x66, 0xf2, 0xd3, 0x5c, 0x18, + 0x28, 0x4f, 0x72, 0x92, 0x18, 0x60, 0x62, 0xe1, + 0x0f, 0xd5, 0x51, 0x0d, 0x18, 0x77, 0x53, 0x51, + 0xef, 0x33, 0x4e, 0x76, 0x34, 0xab, 0x47, 0x43, + 0xf5, 0xb6, 0x8f, 0x49, 0xad, 0xca, 0xb3, 0x84, + 0xd3, 0xfd, 0x75, 0xf7, 0x39, 0x0f, 0x40, 0x06, + 0xef, 0x2a, 0x29, 0x5c, 0x8c, 0x7a, 0x07, 0x6a, + 0xd5, 0x45, 0x46, 0xcd, 0x25, 0xd2, 0x10, 0x7f, + 0xbe, 0x14, 0x36, 0xc8, 0x40, 0x92, 0x4a, 0xae, + 0xbe, 0x5b, 0x37, 0x08, 0x93, 0xcd, 0x63, 0xd1, + 0x32, 0x5b, 0x86, 0x16, 0xfc, 0x48, 0x10, 0x88, + 0x6b, 0xc1, 0x52, 0xc5, 0x32, 0x21, 0xb6, 0xdf, + 0x37, 0x31, 0x19, 0x39, 0x32, 0x55, 0xee, 0x72, + 0xbc, 0xaa, 0x88, 0x01, 0x74, 0xf1, 0x71, 0x7f, + 0x91, 0x84, 0xfa, 0x91, 0x64, 0x6f, 0x17, 0xa2, + 0x4a, 0xc5, 0x5d, 0x16, 0xbf, 0xdd, 0xca, 0x95, + 0x81, 0xa9, 0x2e, 0xda, 0x47, 0x92, 0x01, 0xf0, + 0xed, 0xbf, 0x63, 0x36, 0x00, 0xd6, 0x06, 0x6d, + 0x1a, 0xb3, 0x6d, 0x5d, 0x24, 0x15, 0xd7, 0x13, + 0x51, 0xbb, 0xcd, 0x60, 0x8a, 0x25, 0x10, 0x8d, + 0x25, 0x64, 0x19, 0x92, 0xc1, 0xf2, 0x6c, 0x53, + 0x1c, 0xf9, 0xf9, 0x02, 0x03, 0xbc, 0x4c, 0xc1, + 0x9f, 0x59, 0x27, 0xd8, 0x34, 0xb0, 0xa4, 0x71, + 0x16, 0xd3, 0x88, 0x4b, 0xbb, 0x16, 0x4b, 0x8e, + 0xc8, 0x83, 0xd1, 0xac, 0x83, 0x2e, 0x56, 0xb3, + 0x91, 0x8a, 0x98, 0x60, 0x1a, 0x08, 0xd1, 0x71, + 0x88, 0x15, 0x41, 0xd5, 0x94, 0xdb, 0x39, 0x9c, + 0x6a, 0xe6, 0x15, 0x12, 0x21, 0x74, 0x5a, 0xec, + 0x81, 0x4c, 0x45, 0xb0, 0xb0, 0x5b, 0x56, 0x54, + 0x36, 0xfd, 0x6f, 0x13, 0x7a, 0xa1, 0x0a, 0x0c, + 0x0b, 0x64, 0x37, 0x61, 0xdb, 0xd6, 0xf9, 0xa9, + 0xdc, 0xb9, 0x9b, 0x1a, 0x6e, 0x69, 0x08, 0x54, + 0xce, 0x07, 0x69, 0xcd, 0xe3, 0x97, 0x61, 0xd8, + 0x2f, 0xcd, 0xec, 0x15, 0xf0, 0xd9, 0x2d, 0x7d, + 0x8e, 0x94, 0xad, 0xe8, 0xeb, 0x83, 0xfb, 0xe0 +}; +static const u8 output22[] __initconst = { + 0x26, 0x37, 0x40, 0x8f, 0xe1, 0x30, 0x86, 0xea, + 0x73, 0xf9, 0x71, 0xe3, 0x42, 0x5e, 0x28, 0x20 +}; +static const u8 key22[] __initconst = { + 0x99, 0xe5, 0x82, 0x2d, 0xd4, 0x17, 0x3c, 0x99, + 0x5e, 0x3d, 0xae, 0x0d, 0xde, 0xfb, 0x97, 0x74, + 0x3f, 0xde, 0x3b, 0x08, 0x01, 0x34, 0xb3, 0x9f, + 0x76, 0xe9, 0xbf, 0x8d, 0x0e, 0x88, 0xd5, 0x46 +}; + +/* test vectors from Hanno Böck */ +static const u8 input23[] __initconst = { + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0x80, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xc5, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xe3, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xac, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xe6, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x00, 0x00, 0x00, + 0xaf, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xff, 0xff, 0xff, 0xf5, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xff, 0xff, 0xff, 0xe7, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x71, 0x92, 0x05, 0xa8, 0x52, 0x1d, + 0xfc +}; +static const u8 output23[] __initconst = { + 0x85, 0x59, 0xb8, 0x76, 0xec, 0xee, 0xd6, 0x6e, + 0xb3, 0x77, 0x98, 0xc0, 0x45, 0x7b, 0xaf, 0xf9 +}; +static const u8 key23[] __initconst = { + 0x7f, 0x1b, 0x02, 0x64, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc +}; + +static const u8 input24[] __initconst = { + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x64 +}; +static const u8 output24[] __initconst = { + 0x00, 0xbd, 0x12, 0x58, 0x97, 0x8e, 0x20, 0x54, + 0x44, 0xc9, 0xaa, 0xaa, 0x82, 0x00, 0x6f, 0xed +}; +static const u8 key24[] __initconst = { + 0xe0, 0x00, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa +}; + +static const u8 input25[] __initconst = { + 0x02, 0xfc +}; +static const u8 output25[] __initconst = { + 0x06, 0x12, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c +}; +static const u8 key25[] __initconst = { + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c +}; + +static const u8 input26[] __initconst = { + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7a, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x5c, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x6e, 0x7b, 0x00, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7a, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x5c, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x6e, 0x7b, 0x00, 0x13, 0x00, 0x00, 0x00, + 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xf2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x20, 0x00, 0xef, 0xff, 0x00, + 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, + 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x64, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x00, + 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf2, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x20, 0x00, 0xef, 0xff, 0x00, 0x09, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x7a, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, + 0x00, 0x09, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc +}; +static const u8 output26[] __initconst = { + 0x33, 0x20, 0x5b, 0xbf, 0x9e, 0x9f, 0x8f, 0x72, + 0x12, 0xab, 0x9e, 0x2a, 0xb9, 0xb7, 0xe4, 0xa5 +}; +static const u8 key26[] __initconst = { + 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0x7b +}; + +static const u8 input27[] __initconst = { + 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, + 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, + 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, + 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, + 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, + 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, + 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, + 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, + 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, + 0x77, 0x77, 0x77, 0x77, 0xff, 0xff, 0xff, 0xe9, + 0xe9, 0xac, 0xac, 0xac, 0xac, 0xac, 0xac, 0xac, + 0xac, 0xac, 0xac, 0xac, 0x00, 0x00, 0xac, 0xac, + 0xec, 0x01, 0x00, 0xac, 0xac, 0xac, 0x2c, 0xac, + 0xa2, 0xac, 0xac, 0xac, 0xac, 0xac, 0xac, 0xac, + 0xac, 0xac, 0xac, 0xac, 0x64, 0xf2 +}; +static const u8 output27[] __initconst = { + 0x02, 0xee, 0x7c, 0x8c, 0x54, 0x6d, 0xde, 0xb1, + 0xa4, 0x67, 0xe4, 0xc3, 0x98, 0x11, 0x58, 0xb9 +}; +static const u8 key27[] __initconst = { + 0x00, 0x00, 0x00, 0x7f, 0x00, 0x00, 0x00, 0x7f, + 0x01, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xcf, 0x77, 0x77, 0x77, 0x77, 0x77, + 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77 +}; + +/* nacl */ +static const u8 input28[] __initconst = { + 0x8e, 0x99, 0x3b, 0x9f, 0x48, 0x68, 0x12, 0x73, + 0xc2, 0x96, 0x50, 0xba, 0x32, 0xfc, 0x76, 0xce, + 0x48, 0x33, 0x2e, 0xa7, 0x16, 0x4d, 0x96, 0xa4, + 0x47, 0x6f, 0xb8, 0xc5, 0x31, 0xa1, 0x18, 0x6a, + 0xc0, 0xdf, 0xc1, 0x7c, 0x98, 0xdc, 0xe8, 0x7b, + 0x4d, 0xa7, 0xf0, 0x11, 0xec, 0x48, 0xc9, 0x72, + 0x71, 0xd2, 0xc2, 0x0f, 0x9b, 0x92, 0x8f, 0xe2, + 0x27, 0x0d, 0x6f, 0xb8, 0x63, 0xd5, 0x17, 0x38, + 0xb4, 0x8e, 0xee, 0xe3, 0x14, 0xa7, 0xcc, 0x8a, + 0xb9, 0x32, 0x16, 0x45, 0x48, 0xe5, 0x26, 0xae, + 0x90, 0x22, 0x43, 0x68, 0x51, 0x7a, 0xcf, 0xea, + 0xbd, 0x6b, 0xb3, 0x73, 0x2b, 0xc0, 0xe9, 0xda, + 0x99, 0x83, 0x2b, 0x61, 0xca, 0x01, 0xb6, 0xde, + 0x56, 0x24, 0x4a, 0x9e, 0x88, 0xd5, 0xf9, 0xb3, + 0x79, 0x73, 0xf6, 0x22, 0xa4, 0x3d, 0x14, 0xa6, + 0x59, 0x9b, 0x1f, 0x65, 0x4c, 0xb4, 0x5a, 0x74, + 0xe3, 0x55, 0xa5 +}; +static const u8 output28[] __initconst = { + 0xf3, 0xff, 0xc7, 0x70, 0x3f, 0x94, 0x00, 0xe5, + 0x2a, 0x7d, 0xfb, 0x4b, 0x3d, 0x33, 0x05, 0xd9 +}; +static const u8 key28[] __initconst = { + 0xee, 0xa6, 0xa7, 0x25, 0x1c, 0x1e, 0x72, 0x91, + 0x6d, 0x11, 0xc2, 0xcb, 0x21, 0x4d, 0x3c, 0x25, + 0x25, 0x39, 0x12, 0x1d, 0x8e, 0x23, 0x4e, 0x65, + 0x2d, 0x65, 0x1f, 0xa4, 0xc8, 0xcf, 0xf8, 0x80 +}; + +/* wrap 2^130-5 */ +static const u8 input29[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 output29[] __initconst = { + 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 key29[] __initconst = { + 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +/* wrap 2^128 */ +static const u8 input30[] __initconst = { + 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 output30[] __initconst = { + 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 key30[] __initconst = { + 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; + +/* limb carry */ +static const u8 input31[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 output31[] __initconst = { + 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 key31[] __initconst = { + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +/* 2^130-5 */ +static const u8 input32[] __initconst = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfb, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 +}; +static const u8 output32[] __initconst = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 key32[] __initconst = { + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +/* 2^130-6 */ +static const u8 input33[] __initconst = { + 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 output33[] __initconst = { + 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; +static const u8 key33[] __initconst = { + 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +/* 5*H+L reduction intermediate */ +static const u8 input34[] __initconst = { + 0xe3, 0x35, 0x94, 0xd7, 0x50, 0x5e, 0x43, 0xb9, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x33, 0x94, 0xd7, 0x50, 0x5e, 0x43, 0x79, 0xcd, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 output34[] __initconst = { + 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 key34[] __initconst = { + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +/* 5*H+L reduction final */ +static const u8 input35[] __initconst = { + 0xe3, 0x35, 0x94, 0xd7, 0x50, 0x5e, 0x43, 0xb9, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x33, 0x94, 0xd7, 0x50, 0x5e, 0x43, 0x79, 0xcd, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 output35[] __initconst = { + 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8 key35[] __initconst = { + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +static const struct poly1305_testvec poly1305_testvecs[] __initconst = { + { input01, output01, key01, sizeof(input01) }, + { input02, output02, key02, sizeof(input02) }, + { input03, output03, key03, sizeof(input03) }, + { input04, output04, key04, sizeof(input04) }, + { input05, output05, key05, sizeof(input05) }, + { input06, output06, key06, sizeof(input06) }, + { input07, output07, key07, sizeof(input07) }, + { input08, output08, key08, sizeof(input08) }, + { input09, output09, key09, sizeof(input09) }, + { input10, output10, key10, sizeof(input10) }, + { input11, output11, key11, sizeof(input11) }, + { input12, output12, key12, sizeof(input12) }, + { input13, output13, key13, sizeof(input13) }, + { input14, output14, key14, sizeof(input14) }, + { input15, output15, key15, sizeof(input15) }, + { input16, output16, key16, sizeof(input16) }, + { input17, output17, key17, sizeof(input17) }, + { input18, output18, key18, sizeof(input18) }, + { input19, output19, key19, sizeof(input19) }, + { input20, output20, key20, sizeof(input20) }, + { input21, output21, key21, sizeof(input21) }, + { input22, output22, key22, sizeof(input22) }, + { input23, output23, key23, sizeof(input23) }, + { input24, output24, key24, sizeof(input24) }, + { input25, output25, key25, sizeof(input25) }, + { input26, output26, key26, sizeof(input26) }, + { input27, output27, key27, sizeof(input27) }, + { input28, output28, key28, sizeof(input28) }, + { input29, output29, key29, sizeof(input29) }, + { input30, output30, key30, sizeof(input30) }, + { input31, output31, key31, sizeof(input31) }, + { input32, output32, key32, sizeof(input32) }, + { input33, output33, key33, sizeof(input33) }, + { input34, output34, key34, sizeof(input34) }, + { input35, output35, key35, sizeof(input35) } +}; + +static bool __init poly1305_selftest(void) +{ + simd_context_t simd_context; + bool success = true; + size_t i, j; + + simd_get(&simd_context); + for (i = 0; i < ARRAY_SIZE(poly1305_testvecs); ++i) { + struct poly1305_ctx poly1305; + u8 out[POLY1305_MAC_SIZE]; + + memset(out, 0, sizeof(out)); + memset(&poly1305, 0, sizeof(poly1305)); + poly1305_init(&poly1305, poly1305_testvecs[i].key); + poly1305_update(&poly1305, poly1305_testvecs[i].input, + poly1305_testvecs[i].ilen, &simd_context); + poly1305_final(&poly1305, out, &simd_context); + if (memcmp(out, poly1305_testvecs[i].output, + POLY1305_MAC_SIZE)) { + pr_err("poly1305 self-test %zu: FAIL\n", i + 1); + success = false; + } + simd_relax(&simd_context); + + if (poly1305_testvecs[i].ilen <= 1) + continue; + + for (j = 1; j < poly1305_testvecs[i].ilen - 1; ++j) { + memset(out, 0, sizeof(out)); + memset(&poly1305, 0, sizeof(poly1305)); + poly1305_init(&poly1305, poly1305_testvecs[i].key); + poly1305_update(&poly1305, poly1305_testvecs[i].input, + j, &simd_context); + poly1305_update(&poly1305, + poly1305_testvecs[i].input + j, + poly1305_testvecs[i].ilen - j, + &simd_context); + poly1305_final(&poly1305, out, &simd_context); + if (memcmp(out, poly1305_testvecs[i].output, + POLY1305_MAC_SIZE)) { + pr_err("poly1305 self-test %zu (split %zu): FAIL\n", + i + 1, j); + success = false; + } + + memset(out, 0, sizeof(out)); + memset(&poly1305, 0, sizeof(poly1305)); + poly1305_init(&poly1305, poly1305_testvecs[i].key); + poly1305_update(&poly1305, poly1305_testvecs[i].input, + j, &simd_context); + poly1305_update(&poly1305, + poly1305_testvecs[i].input + j, + poly1305_testvecs[i].ilen - j, + DONT_USE_SIMD); + poly1305_final(&poly1305, out, &simd_context); + if (memcmp(out, poly1305_testvecs[i].output, + POLY1305_MAC_SIZE)) { + pr_err("poly1305 self-test %zu (split %zu, mixed simd): FAIL\n", + i + 1, j); + success = false; + } + simd_relax(&simd_context); + } + } + simd_put(&simd_context); + + return success; +} diff --git a/net/wireguard/crypto/zinc/selftest/run.h b/net/wireguard/crypto/zinc/selftest/run.h new file mode 100644 index 000000000000..4ffaf6089eea --- /dev/null +++ b/net/wireguard/crypto/zinc/selftest/run.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _ZINC_SELFTEST_RUN_H +#define _ZINC_SELFTEST_RUN_H + +#include <linux/kernel.h> +#include <linux/printk.h> +#include <linux/bug.h> + +static inline bool selftest_run(const char *name, bool (*selftest)(void), + bool *const nobs[], unsigned int nobs_len) +{ + unsigned long set = 0, subset = 0, largest_subset = 0; + unsigned int i; + + BUILD_BUG_ON(!__builtin_constant_p(nobs_len) || + nobs_len >= BITS_PER_LONG); + + if (!IS_ENABLED(CONFIG_ZINC_SELFTEST)) + return true; + + for (i = 0; i < nobs_len; ++i) + set |= ((unsigned long)*nobs[i]) << i; + + do { + for (i = 0; i < nobs_len; ++i) + *nobs[i] = BIT(i) & subset; + if (selftest()) + largest_subset = max(subset, largest_subset); + else + pr_err("%s self-test combination 0x%lx: FAIL\n", name, + subset); + subset = (subset - set) & set; + } while (subset); + + for (i = 0; i < nobs_len; ++i) + *nobs[i] = BIT(i) & largest_subset; + + if (largest_subset == set) + pr_info("%s self-tests: pass\n", name); + + return !WARN_ON(largest_subset != set); +} + +#endif diff --git a/net/wireguard/device.c b/net/wireguard/device.c new file mode 100644 index 000000000000..062490f1b8a7 --- /dev/null +++ b/net/wireguard/device.c @@ -0,0 +1,475 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include "queueing.h" +#include "socket.h" +#include "timers.h" +#include "device.h" +#include "ratelimiter.h" +#include "peer.h" +#include "messages.h" + +#include <linux/module.h> +#include <linux/rtnetlink.h> +#include <linux/inet.h> +#include <linux/netdevice.h> +#include <linux/inetdevice.h> +#include <linux/if_arp.h> +#include <linux/icmp.h> +#include <linux/suspend.h> +#include <net/dst_metadata.h> +#include <net/icmp.h> +#include <net/rtnetlink.h> +#include <net/ip_tunnels.h> +#include <net/addrconf.h> + +static LIST_HEAD(device_list); + +static int wg_open(struct net_device *dev) +{ + struct in_device *dev_v4 = __in_dev_get_rtnl(dev); +#ifndef COMPAT_CANNOT_USE_IN6_DEV_GET + struct inet6_dev *dev_v6 = __in6_dev_get(dev); +#endif + struct wg_device *wg = netdev_priv(dev); + struct wg_peer *peer; + int ret; + + if (dev_v4) { + /* At some point we might put this check near the ip_rt_send_ + * redirect call of ip_forward in net/ipv4/ip_forward.c, similar + * to the current secpath check. + */ + IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false); + IPV4_DEVCONF_ALL(dev_net(dev), SEND_REDIRECTS) = false; + } +#ifndef COMPAT_CANNOT_USE_IN6_DEV_GET + if (dev_v6) +#ifndef COMPAT_CANNOT_USE_DEV_CNF + dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE; +#else + dev_v6->addr_gen_mode = IN6_ADDR_GEN_MODE_NONE; +#endif +#endif + + mutex_lock(&wg->device_update_lock); + ret = wg_socket_init(wg, wg->incoming_port); + if (ret < 0) + goto out; + list_for_each_entry(peer, &wg->peer_list, peer_list) { + wg_packet_send_staged_packets(peer); + if (peer->persistent_keepalive_interval) + wg_packet_send_keepalive(peer); + } +out: + mutex_unlock(&wg->device_update_lock); + return ret; +} + +#ifdef CONFIG_PM_SLEEP +static int wg_pm_notification(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct wg_device *wg; + struct wg_peer *peer; + + /* If the machine is constantly suspending and resuming, as part of + * its normal operation rather than as a somewhat rare event, then we + * don't actually want to clear keys. + */ + if (IS_ENABLED(CONFIG_PM_AUTOSLEEP) || IS_ENABLED(CONFIG_ANDROID)) + return 0; + + if (action != PM_HIBERNATION_PREPARE && action != PM_SUSPEND_PREPARE) + return 0; + + rtnl_lock(); + list_for_each_entry(wg, &device_list, device_list) { + mutex_lock(&wg->device_update_lock); + list_for_each_entry(peer, &wg->peer_list, peer_list) { + del_timer(&peer->timer_zero_key_material); + wg_noise_handshake_clear(&peer->handshake); + wg_noise_keypairs_clear(&peer->keypairs); + } + mutex_unlock(&wg->device_update_lock); + } + rtnl_unlock(); + rcu_barrier(); + return 0; +} + +static struct notifier_block pm_notifier = { .notifier_call = wg_pm_notification }; +#endif + +static int wg_stop(struct net_device *dev) +{ + struct wg_device *wg = netdev_priv(dev); + struct wg_peer *peer; + struct sk_buff *skb; + + mutex_lock(&wg->device_update_lock); + list_for_each_entry(peer, &wg->peer_list, peer_list) { + wg_packet_purge_staged_packets(peer); + wg_timers_stop(peer); + wg_noise_handshake_clear(&peer->handshake); + wg_noise_keypairs_clear(&peer->keypairs); + wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); + } + mutex_unlock(&wg->device_update_lock); + while ((skb = ptr_ring_consume(&wg->handshake_queue.ring)) != NULL) + kfree_skb(skb); + atomic_set(&wg->handshake_queue_len, 0); + wg_socket_reinit(wg, NULL, NULL); + return 0; +} + +static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct wg_device *wg = netdev_priv(dev); + struct sk_buff_head packets; + struct wg_peer *peer; + struct sk_buff *next; + sa_family_t family; + u32 mtu; + int ret; + + if (unlikely(!wg_check_packet_protocol(skb))) { + ret = -EPROTONOSUPPORT; + net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name); + goto err; + } + + peer = wg_allowedips_lookup_dst(&wg->peer_allowedips, skb); + if (unlikely(!peer)) { + ret = -ENOKEY; + if (skb->protocol == htons(ETH_P_IP)) + net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI4\n", + dev->name, &ip_hdr(skb)->daddr); + else if (skb->protocol == htons(ETH_P_IPV6)) + net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n", + dev->name, &ipv6_hdr(skb)->daddr); + goto err_icmp; + } + + family = READ_ONCE(peer->endpoint.addr.sa_family); + if (unlikely(family != AF_INET && family != AF_INET6)) { + ret = -EDESTADDRREQ; + net_dbg_ratelimited("%s: No valid endpoint has been configured or discovered for peer %llu\n", + dev->name, peer->internal_id); + goto err_peer; + } + + mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; + + __skb_queue_head_init(&packets); + if (!skb_is_gso(skb)) { + skb_mark_not_on_list(skb); + } else { + struct sk_buff *segs = skb_gso_segment(skb, 0); + + if (IS_ERR(segs)) { + ret = PTR_ERR(segs); + goto err_peer; + } + dev_kfree_skb(skb); + skb = segs; + } + + skb_list_walk_safe(skb, skb, next) { + skb_mark_not_on_list(skb); + + skb = skb_share_check(skb, GFP_ATOMIC); + if (unlikely(!skb)) + continue; + + /* We only need to keep the original dst around for icmp, + * so at this point we're in a position to drop it. + */ + skb_dst_drop(skb); + + PACKET_CB(skb)->mtu = mtu; + + __skb_queue_tail(&packets, skb); + } + + spin_lock_bh(&peer->staged_packet_queue.lock); + /* If the queue is getting too big, we start removing the oldest packets + * until it's small again. We do this before adding the new packet, so + * we don't remove GSO segments that are in excess. + */ + while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) { + dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue)); + ++dev->stats.tx_dropped; + } + skb_queue_splice_tail(&packets, &peer->staged_packet_queue); + spin_unlock_bh(&peer->staged_packet_queue.lock); + + wg_packet_send_staged_packets(peer); + + wg_peer_put(peer); + return NETDEV_TX_OK; + +err_peer: + wg_peer_put(peer); +err_icmp: + if (skb->protocol == htons(ETH_P_IP)) + icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); + else if (skb->protocol == htons(ETH_P_IPV6)) + icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); +err: + ++dev->stats.tx_errors; + kfree_skb(skb); + return ret; +} + +static const struct net_device_ops netdev_ops = { + .ndo_open = wg_open, + .ndo_stop = wg_stop, + .ndo_start_xmit = wg_xmit, + .ndo_get_stats64 = ip_tunnel_get_stats64 +}; + +static void wg_destruct(struct net_device *dev) +{ + struct wg_device *wg = netdev_priv(dev); + + rtnl_lock(); + list_del(&wg->device_list); + rtnl_unlock(); + mutex_lock(&wg->device_update_lock); + rcu_assign_pointer(wg->creating_net, NULL); + wg->incoming_port = 0; + wg_socket_reinit(wg, NULL, NULL); + /* The final references are cleared in the below calls to destroy_workqueue. */ + wg_peer_remove_all(wg); + destroy_workqueue(wg->handshake_receive_wq); + destroy_workqueue(wg->handshake_send_wq); + destroy_workqueue(wg->packet_crypt_wq); + wg_packet_queue_free(&wg->handshake_queue, true); + wg_packet_queue_free(&wg->decrypt_queue, false); + wg_packet_queue_free(&wg->encrypt_queue, false); + rcu_barrier(); /* Wait for all the peers to be actually freed. */ + wg_ratelimiter_uninit(); + memzero_explicit(&wg->static_identity, sizeof(wg->static_identity)); + free_percpu(dev->tstats); + kvfree(wg->index_hashtable); + kvfree(wg->peer_hashtable); + mutex_unlock(&wg->device_update_lock); + + pr_debug("%s: Interface destroyed\n", dev->name); + free_netdev(dev); +} + +static const struct device_type device_type = { .name = KBUILD_MODNAME }; + +static void wg_setup(struct net_device *dev) +{ + struct wg_device *wg = netdev_priv(dev); + enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM | + NETIF_F_SG | NETIF_F_GSO | + NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA }; + const int overhead = MESSAGE_MINIMUM_LENGTH + sizeof(struct udphdr) + + max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); + + dev->netdev_ops = &netdev_ops; + dev->header_ops = &ip_tunnel_header_ops; + dev->hard_header_len = 0; + dev->addr_len = 0; + dev->needed_headroom = DATA_PACKET_HEAD_ROOM; + dev->needed_tailroom = noise_encrypted_len(MESSAGE_PADDING_MULTIPLE); + dev->type = ARPHRD_NONE; + dev->flags = IFF_POINTOPOINT | IFF_NOARP; +#ifndef COMPAT_CANNOT_USE_IFF_NO_QUEUE + dev->priv_flags |= IFF_NO_QUEUE; +#else + dev->tx_queue_len = 0; +#endif + dev->features |= NETIF_F_LLTX; + dev->features |= WG_NETDEV_FEATURES; + dev->hw_features |= WG_NETDEV_FEATURES; + dev->hw_enc_features |= WG_NETDEV_FEATURES; + dev->mtu = ETH_DATA_LEN - overhead; +#ifndef COMPAT_CANNOT_USE_MAX_MTU + dev->max_mtu = round_down(INT_MAX, MESSAGE_PADDING_MULTIPLE) - overhead; +#endif + + SET_NETDEV_DEVTYPE(dev, &device_type); + + /* We need to keep the dst around in case of icmp replies. */ + netif_keep_dst(dev); + + memset(wg, 0, sizeof(*wg)); + wg->dev = dev; +} + +static int wg_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct wg_device *wg = netdev_priv(dev); + int ret = -ENOMEM; + + rcu_assign_pointer(wg->creating_net, src_net); + init_rwsem(&wg->static_identity.lock); + mutex_init(&wg->socket_update_lock); + mutex_init(&wg->device_update_lock); + wg_allowedips_init(&wg->peer_allowedips); + wg_cookie_checker_init(&wg->cookie_checker, wg); + INIT_LIST_HEAD(&wg->peer_list); + wg->device_update_gen = 1; + + wg->peer_hashtable = wg_pubkey_hashtable_alloc(); + if (!wg->peer_hashtable) + return ret; + + wg->index_hashtable = wg_index_hashtable_alloc(); + if (!wg->index_hashtable) + goto err_free_peer_hashtable; + + dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!dev->tstats) + goto err_free_index_hashtable; + + wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s", + WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name); + if (!wg->handshake_receive_wq) + goto err_free_tstats; + + wg->handshake_send_wq = alloc_workqueue("wg-kex-%s", + WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name); + if (!wg->handshake_send_wq) + goto err_destroy_handshake_receive; + + wg->packet_crypt_wq = alloc_workqueue("wg-crypt-%s", + WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0, dev->name); + if (!wg->packet_crypt_wq) + goto err_destroy_handshake_send; + + ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker, + MAX_QUEUED_PACKETS); + if (ret < 0) + goto err_destroy_packet_crypt; + + ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker, + MAX_QUEUED_PACKETS); + if (ret < 0) + goto err_free_encrypt_queue; + + ret = wg_packet_queue_init(&wg->handshake_queue, wg_packet_handshake_receive_worker, + MAX_QUEUED_INCOMING_HANDSHAKES); + if (ret < 0) + goto err_free_decrypt_queue; + + ret = wg_ratelimiter_init(); + if (ret < 0) + goto err_free_handshake_queue; + + ret = register_netdevice(dev); + if (ret < 0) + goto err_uninit_ratelimiter; + + list_add(&wg->device_list, &device_list); + + /* We wait until the end to assign priv_destructor, so that + * register_netdevice doesn't call it for us if it fails. + */ + dev->priv_destructor = wg_destruct; + + pr_debug("%s: Interface created\n", dev->name); + return ret; + +err_uninit_ratelimiter: + wg_ratelimiter_uninit(); +err_free_handshake_queue: + wg_packet_queue_free(&wg->handshake_queue, false); +err_free_decrypt_queue: + wg_packet_queue_free(&wg->decrypt_queue, false); +err_free_encrypt_queue: + wg_packet_queue_free(&wg->encrypt_queue, false); +err_destroy_packet_crypt: + destroy_workqueue(wg->packet_crypt_wq); +err_destroy_handshake_send: + destroy_workqueue(wg->handshake_send_wq); +err_destroy_handshake_receive: + destroy_workqueue(wg->handshake_receive_wq); +err_free_tstats: + free_percpu(dev->tstats); +err_free_index_hashtable: + kvfree(wg->index_hashtable); +err_free_peer_hashtable: + kvfree(wg->peer_hashtable); + return ret; +} + +static struct rtnl_link_ops link_ops __read_mostly = { + .kind = KBUILD_MODNAME, + .priv_size = sizeof(struct wg_device), + .setup = wg_setup, + .newlink = wg_newlink, +}; + +static void wg_netns_pre_exit(struct net *net) +{ + struct wg_device *wg; + struct wg_peer *peer; + + rtnl_lock(); + list_for_each_entry(wg, &device_list, device_list) { + if (rcu_access_pointer(wg->creating_net) == net) { + pr_debug("%s: Creating namespace exiting\n", wg->dev->name); + netif_carrier_off(wg->dev); + mutex_lock(&wg->device_update_lock); + rcu_assign_pointer(wg->creating_net, NULL); + wg_socket_reinit(wg, NULL, NULL); + list_for_each_entry(peer, &wg->peer_list, peer_list) + wg_socket_clear_peer_endpoint_src(peer); + mutex_unlock(&wg->device_update_lock); + } + } + rtnl_unlock(); +} + +static struct pernet_operations pernet_ops = { + .pre_exit = wg_netns_pre_exit +}; + +int __init wg_device_init(void) +{ + int ret; + +#ifdef CONFIG_PM_SLEEP + ret = register_pm_notifier(&pm_notifier); + if (ret) + return ret; +#endif + + ret = register_pernet_device(&pernet_ops); + if (ret) + goto error_pm; + + ret = rtnl_link_register(&link_ops); + if (ret) + goto error_pernet; + + return 0; + +error_pernet: + unregister_pernet_device(&pernet_ops); +error_pm: +#ifdef CONFIG_PM_SLEEP + unregister_pm_notifier(&pm_notifier); +#endif + return ret; +} + +void wg_device_uninit(void) +{ + rtnl_link_unregister(&link_ops); + unregister_pernet_device(&pernet_ops); +#ifdef CONFIG_PM_SLEEP + unregister_pm_notifier(&pm_notifier); +#endif + rcu_barrier(); +} diff --git a/net/wireguard/device.h b/net/wireguard/device.h new file mode 100644 index 000000000000..43c7cebbf50b --- /dev/null +++ b/net/wireguard/device.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _WG_DEVICE_H +#define _WG_DEVICE_H + +#include "noise.h" +#include "allowedips.h" +#include "peerlookup.h" +#include "cookie.h" + +#include <linux/types.h> +#include <linux/netdevice.h> +#include <linux/workqueue.h> +#include <linux/mutex.h> +#include <linux/net.h> +#include <linux/ptr_ring.h> + +struct wg_device; + +struct multicore_worker { + void *ptr; + struct work_struct work; +}; + +struct crypt_queue { + struct ptr_ring ring; + struct multicore_worker __percpu *worker; + int last_cpu; +}; + +struct prev_queue { + struct sk_buff *head, *tail, *peeked; + struct { struct sk_buff *next, *prev; } empty; // Match first 2 members of struct sk_buff. + atomic_t count; +}; + +struct wg_device { + struct net_device *dev; + struct crypt_queue encrypt_queue, decrypt_queue, handshake_queue; + struct sock __rcu *sock4, *sock6; + struct net __rcu *creating_net; + struct noise_static_identity static_identity; + struct workqueue_struct *packet_crypt_wq,*handshake_receive_wq, *handshake_send_wq; + struct cookie_checker cookie_checker; + struct pubkey_hashtable *peer_hashtable; + struct index_hashtable *index_hashtable; + struct allowedips peer_allowedips; + struct mutex device_update_lock, socket_update_lock; + struct list_head device_list, peer_list; + atomic_t handshake_queue_len; + unsigned int num_peers, device_update_gen; + u32 fwmark; + u16 incoming_port; +}; + +int wg_device_init(void); +void wg_device_uninit(void); + +#endif /* _WG_DEVICE_H */ diff --git a/net/wireguard/main.c b/net/wireguard/main.c new file mode 100644 index 000000000000..d5ce491e822e --- /dev/null +++ b/net/wireguard/main.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include "version.h" +#include "device.h" +#include "noise.h" +#include "queueing.h" +#include "ratelimiter.h" +#include "netlink.h" +#include "uapi/wireguard.h" +#include "crypto/zinc.h" + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/genetlink.h> +#include <net/rtnetlink.h> + +static int __init wg_mod_init(void) +{ + int ret; + + if ((ret = chacha20_mod_init()) || (ret = poly1305_mod_init()) || + (ret = chacha20poly1305_mod_init()) || (ret = blake2s_mod_init()) || + (ret = curve25519_mod_init())) + return ret; + + ret = wg_allowedips_slab_init(); + if (ret < 0) + goto err_allowedips; + +#ifdef DEBUG + ret = -ENOTRECOVERABLE; + if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() || + !wg_ratelimiter_selftest()) + goto err_peer; +#endif + wg_noise_init(); + + ret = wg_peer_init(); + if (ret < 0) + goto err_peer; + + ret = wg_device_init(); + if (ret < 0) + goto err_device; + + ret = wg_genetlink_init(); + if (ret < 0) + goto err_netlink; + + pr_info("WireGuard " WIREGUARD_VERSION " loaded. See www.wireguard.com for information.\n"); + pr_info("Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.\n"); + + return 0; + +err_netlink: + wg_device_uninit(); +err_device: + wg_peer_uninit(); +err_peer: + wg_allowedips_slab_uninit(); +err_allowedips: + return ret; +} + +static void __exit wg_mod_exit(void) +{ + wg_genetlink_uninit(); + wg_device_uninit(); + wg_peer_uninit(); + wg_allowedips_slab_uninit(); +} + +module_init(wg_mod_init); +module_exit(wg_mod_exit); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("WireGuard secure network tunnel"); +MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>"); +MODULE_VERSION(WIREGUARD_VERSION); +MODULE_ALIAS_RTNL_LINK(KBUILD_MODNAME); +MODULE_ALIAS_GENL_FAMILY(WG_GENL_NAME); +MODULE_INFO(intree, "Y"); diff --git a/net/wireguard/messages.h b/net/wireguard/messages.h new file mode 100644 index 000000000000..1d1ed18f11f8 --- /dev/null +++ b/net/wireguard/messages.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _WG_MESSAGES_H +#define _WG_MESSAGES_H + +#include <zinc/curve25519.h> +#include <zinc/chacha20poly1305.h> +#include <zinc/blake2s.h> + +#include <linux/kernel.h> +#include <linux/param.h> +#include <linux/skbuff.h> + +enum noise_lengths { + NOISE_PUBLIC_KEY_LEN = CURVE25519_KEY_SIZE, + NOISE_SYMMETRIC_KEY_LEN = CHACHA20POLY1305_KEY_SIZE, + NOISE_TIMESTAMP_LEN = sizeof(u64) + sizeof(u32), + NOISE_AUTHTAG_LEN = CHACHA20POLY1305_AUTHTAG_SIZE, + NOISE_HASH_LEN = BLAKE2S_HASH_SIZE +}; + +#define noise_encrypted_len(plain_len) ((plain_len) + NOISE_AUTHTAG_LEN) + +enum cookie_values { + COOKIE_SECRET_MAX_AGE = 2 * 60, + COOKIE_SECRET_LATENCY = 5, + COOKIE_NONCE_LEN = XCHACHA20POLY1305_NONCE_SIZE, + COOKIE_LEN = 16 +}; + +enum counter_values { + COUNTER_BITS_TOTAL = 8192, + COUNTER_REDUNDANT_BITS = BITS_PER_LONG, + COUNTER_WINDOW_SIZE = COUNTER_BITS_TOTAL - COUNTER_REDUNDANT_BITS +}; + +enum limits { + REKEY_AFTER_MESSAGES = 1ULL << 60, + REJECT_AFTER_MESSAGES = U64_MAX - COUNTER_WINDOW_SIZE - 1, + REKEY_TIMEOUT = 5, + REKEY_TIMEOUT_JITTER_MAX_JIFFIES = HZ / 3, + REKEY_AFTER_TIME = 120, + REJECT_AFTER_TIME = 180, + INITIATIONS_PER_SECOND = 50, + MAX_PEERS_PER_DEVICE = 1U << 20, + KEEPALIVE_TIMEOUT = 10, + MAX_TIMER_HANDSHAKES = 90 / REKEY_TIMEOUT, + MAX_QUEUED_INCOMING_HANDSHAKES = 4096, /* TODO: replace this with DQL */ + MAX_STAGED_PACKETS = 128, + MAX_QUEUED_PACKETS = 1024 /* TODO: replace this with DQL */ +}; + +enum message_type { + MESSAGE_INVALID = 0, + MESSAGE_HANDSHAKE_INITIATION = 1, + MESSAGE_HANDSHAKE_RESPONSE = 2, + MESSAGE_HANDSHAKE_COOKIE = 3, + MESSAGE_DATA = 4 +}; + +struct message_header { + /* The actual layout of this that we want is: + * u8 type + * u8 reserved_zero[3] + * + * But it turns out that by encoding this as little endian, + * we achieve the same thing, and it makes checking faster. + */ + __le32 type; +}; + +struct message_macs { + u8 mac1[COOKIE_LEN]; + u8 mac2[COOKIE_LEN]; +}; + +struct message_handshake_initiation { + struct message_header header; + __le32 sender_index; + u8 unencrypted_ephemeral[NOISE_PUBLIC_KEY_LEN]; + u8 encrypted_static[noise_encrypted_len(NOISE_PUBLIC_KEY_LEN)]; + u8 encrypted_timestamp[noise_encrypted_len(NOISE_TIMESTAMP_LEN)]; + struct message_macs macs; +}; + +struct message_handshake_response { + struct message_header header; + __le32 sender_index; + __le32 receiver_index; + u8 unencrypted_ephemeral[NOISE_PUBLIC_KEY_LEN]; + u8 encrypted_nothing[noise_encrypted_len(0)]; + struct message_macs macs; +}; + +struct message_handshake_cookie { + struct message_header header; + __le32 receiver_index; + u8 nonce[COOKIE_NONCE_LEN]; + u8 encrypted_cookie[noise_encrypted_len(COOKIE_LEN)]; +}; + +struct message_data { + struct message_header header; + __le32 key_idx; + __le64 counter; + u8 encrypted_data[]; +}; + +#define message_data_len(plain_len) \ + (noise_encrypted_len(plain_len) + sizeof(struct message_data)) + +enum message_alignments { + MESSAGE_PADDING_MULTIPLE = 16, + MESSAGE_MINIMUM_LENGTH = message_data_len(0) +}; + +#define SKB_HEADER_LEN \ + (max(sizeof(struct iphdr), sizeof(struct ipv6hdr)) + \ + sizeof(struct udphdr) + NET_SKB_PAD) +#define DATA_PACKET_HEAD_ROOM \ + ALIGN(sizeof(struct message_data) + SKB_HEADER_LEN, 4) + +enum { HANDSHAKE_DSCP = 0x88 /* AF41, plus 00 ECN */ }; + +#endif /* _WG_MESSAGES_H */ diff --git a/net/wireguard/netlink.c b/net/wireguard/netlink.c new file mode 100644 index 000000000000..ef239ab1e2d6 --- /dev/null +++ b/net/wireguard/netlink.c @@ -0,0 +1,658 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include "netlink.h" +#include "device.h" +#include "peer.h" +#include "socket.h" +#include "queueing.h" +#include "messages.h" +#include "uapi/wireguard.h" +#include <linux/if.h> +#include <net/genetlink.h> +#include <net/sock.h> +#include <crypto/algapi.h> + +static struct genl_family genl_family; + +static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = { + [WGDEVICE_A_IFINDEX] = { .type = NLA_U32 }, + [WGDEVICE_A_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, + [WGDEVICE_A_PRIVATE_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN), + [WGDEVICE_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN), + [WGDEVICE_A_FLAGS] = { .type = NLA_U32 }, + [WGDEVICE_A_LISTEN_PORT] = { .type = NLA_U16 }, + [WGDEVICE_A_FWMARK] = { .type = NLA_U32 }, + [WGDEVICE_A_PEERS] = { .type = NLA_NESTED } +}; + +static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = { + [WGPEER_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN), + [WGPEER_A_PRESHARED_KEY] = NLA_POLICY_EXACT_LEN(NOISE_SYMMETRIC_KEY_LEN), + [WGPEER_A_FLAGS] = { .type = NLA_U32 }, + [WGPEER_A_ENDPOINT] = NLA_POLICY_MIN_LEN(sizeof(struct sockaddr)), + [WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16 }, + [WGPEER_A_LAST_HANDSHAKE_TIME] = NLA_POLICY_EXACT_LEN(sizeof(struct __kernel_timespec)), + [WGPEER_A_RX_BYTES] = { .type = NLA_U64 }, + [WGPEER_A_TX_BYTES] = { .type = NLA_U64 }, + [WGPEER_A_ALLOWEDIPS] = { .type = NLA_NESTED }, + [WGPEER_A_PROTOCOL_VERSION] = { .type = NLA_U32 } +}; + +static const struct nla_policy allowedip_policy[WGALLOWEDIP_A_MAX + 1] = { + [WGALLOWEDIP_A_FAMILY] = { .type = NLA_U16 }, + [WGALLOWEDIP_A_IPADDR] = NLA_POLICY_MIN_LEN(sizeof(struct in_addr)), + [WGALLOWEDIP_A_CIDR_MASK] = { .type = NLA_U8 } +}; + +static struct wg_device *lookup_interface(struct nlattr **attrs, + struct sk_buff *skb) +{ + struct net_device *dev = NULL; + + if (!attrs[WGDEVICE_A_IFINDEX] == !attrs[WGDEVICE_A_IFNAME]) + return ERR_PTR(-EBADR); + if (attrs[WGDEVICE_A_IFINDEX]) + dev = dev_get_by_index(sock_net(skb->sk), + nla_get_u32(attrs[WGDEVICE_A_IFINDEX])); + else if (attrs[WGDEVICE_A_IFNAME]) + dev = dev_get_by_name(sock_net(skb->sk), + nla_data(attrs[WGDEVICE_A_IFNAME])); + if (!dev) + return ERR_PTR(-ENODEV); + if (!dev->rtnl_link_ops || !dev->rtnl_link_ops->kind || + strcmp(dev->rtnl_link_ops->kind, KBUILD_MODNAME)) { + dev_put(dev); + return ERR_PTR(-EOPNOTSUPP); + } + return netdev_priv(dev); +} + +static int get_allowedips(struct sk_buff *skb, const u8 *ip, u8 cidr, + int family) +{ + struct nlattr *allowedip_nest; + + allowedip_nest = nla_nest_start(skb, 0); + if (!allowedip_nest) + return -EMSGSIZE; + + if (nla_put_u8(skb, WGALLOWEDIP_A_CIDR_MASK, cidr) || + nla_put_u16(skb, WGALLOWEDIP_A_FAMILY, family) || + nla_put(skb, WGALLOWEDIP_A_IPADDR, family == AF_INET6 ? + sizeof(struct in6_addr) : sizeof(struct in_addr), ip)) { + nla_nest_cancel(skb, allowedip_nest); + return -EMSGSIZE; + } + + nla_nest_end(skb, allowedip_nest); + return 0; +} + +struct dump_ctx { + struct wg_device *wg; + struct wg_peer *next_peer; + u64 allowedips_seq; + struct allowedips_node *next_allowedip; +}; + +#define DUMP_CTX(cb) ((struct dump_ctx *)(cb)->args) + +static int +get_peer(struct wg_peer *peer, struct sk_buff *skb, struct dump_ctx *ctx) +{ + + struct nlattr *allowedips_nest, *peer_nest = nla_nest_start(skb, 0); + struct allowedips_node *allowedips_node = ctx->next_allowedip; + bool fail; + + if (!peer_nest) + return -EMSGSIZE; + + down_read(&peer->handshake.lock); + fail = nla_put(skb, WGPEER_A_PUBLIC_KEY, NOISE_PUBLIC_KEY_LEN, + peer->handshake.remote_static); + up_read(&peer->handshake.lock); + if (fail) + goto err; + + if (!allowedips_node) { + const struct __kernel_timespec last_handshake = { + .tv_sec = peer->walltime_last_handshake.tv_sec, + .tv_nsec = peer->walltime_last_handshake.tv_nsec + }; + + down_read(&peer->handshake.lock); + fail = nla_put(skb, WGPEER_A_PRESHARED_KEY, + NOISE_SYMMETRIC_KEY_LEN, + peer->handshake.preshared_key); + up_read(&peer->handshake.lock); + if (fail) + goto err; + + if (nla_put(skb, WGPEER_A_LAST_HANDSHAKE_TIME, + sizeof(last_handshake), &last_handshake) || + nla_put_u16(skb, WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL, + peer->persistent_keepalive_interval) || + nla_put_u64_64bit(skb, WGPEER_A_TX_BYTES, peer->tx_bytes, + WGPEER_A_UNSPEC) || + nla_put_u64_64bit(skb, WGPEER_A_RX_BYTES, peer->rx_bytes, + WGPEER_A_UNSPEC) || + nla_put_u32(skb, WGPEER_A_PROTOCOL_VERSION, 1)) + goto err; + + read_lock_bh(&peer->endpoint_lock); + if (peer->endpoint.addr.sa_family == AF_INET) + fail = nla_put(skb, WGPEER_A_ENDPOINT, + sizeof(peer->endpoint.addr4), + &peer->endpoint.addr4); + else if (peer->endpoint.addr.sa_family == AF_INET6) + fail = nla_put(skb, WGPEER_A_ENDPOINT, + sizeof(peer->endpoint.addr6), + &peer->endpoint.addr6); + read_unlock_bh(&peer->endpoint_lock); + if (fail) + goto err; + allowedips_node = + list_first_entry_or_null(&peer->allowedips_list, + struct allowedips_node, peer_list); + } + if (!allowedips_node) + goto no_allowedips; + if (!ctx->allowedips_seq) + ctx->allowedips_seq = peer->device->peer_allowedips.seq; + else if (ctx->allowedips_seq != peer->device->peer_allowedips.seq) + goto no_allowedips; + + allowedips_nest = nla_nest_start(skb, WGPEER_A_ALLOWEDIPS); + if (!allowedips_nest) + goto err; + + list_for_each_entry_from(allowedips_node, &peer->allowedips_list, + peer_list) { + u8 cidr, ip[16] __aligned(__alignof(u64)); + int family; + + family = wg_allowedips_read_node(allowedips_node, ip, &cidr); + if (get_allowedips(skb, ip, cidr, family)) { + nla_nest_end(skb, allowedips_nest); + nla_nest_end(skb, peer_nest); + ctx->next_allowedip = allowedips_node; + return -EMSGSIZE; + } + } + nla_nest_end(skb, allowedips_nest); +no_allowedips: + nla_nest_end(skb, peer_nest); + ctx->next_allowedip = NULL; + ctx->allowedips_seq = 0; + return 0; +err: + nla_nest_cancel(skb, peer_nest); + return -EMSGSIZE; +} + +static int wg_get_device_start(struct netlink_callback *cb) +{ + struct wg_device *wg; + + wg = lookup_interface(genl_dumpit_info(cb)->attrs, cb->skb); + if (IS_ERR(wg)) + return PTR_ERR(wg); + DUMP_CTX(cb)->wg = wg; + return 0; +} + +static int wg_get_device_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct wg_peer *peer, *next_peer_cursor; + struct dump_ctx *ctx = DUMP_CTX(cb); + struct wg_device *wg = ctx->wg; + struct nlattr *peers_nest; + int ret = -EMSGSIZE; + bool done = true; + void *hdr; + + rtnl_lock(); + mutex_lock(&wg->device_update_lock); + cb->seq = wg->device_update_gen; + next_peer_cursor = ctx->next_peer; + + hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + &genl_family, NLM_F_MULTI, WG_CMD_GET_DEVICE); + if (!hdr) + goto out; + genl_dump_check_consistent(cb, hdr); + + if (!ctx->next_peer) { + if (nla_put_u16(skb, WGDEVICE_A_LISTEN_PORT, + wg->incoming_port) || + nla_put_u32(skb, WGDEVICE_A_FWMARK, wg->fwmark) || + nla_put_u32(skb, WGDEVICE_A_IFINDEX, wg->dev->ifindex) || + nla_put_string(skb, WGDEVICE_A_IFNAME, wg->dev->name)) + goto out; + + down_read(&wg->static_identity.lock); + if (wg->static_identity.has_identity) { + if (nla_put(skb, WGDEVICE_A_PRIVATE_KEY, + NOISE_PUBLIC_KEY_LEN, + wg->static_identity.static_private) || + nla_put(skb, WGDEVICE_A_PUBLIC_KEY, + NOISE_PUBLIC_KEY_LEN, + wg->static_identity.static_public)) { + up_read(&wg->static_identity.lock); + goto out; + } + } + up_read(&wg->static_identity.lock); + } + + peers_nest = nla_nest_start(skb, WGDEVICE_A_PEERS); + if (!peers_nest) + goto out; + ret = 0; + /* If the last cursor was removed via list_del_init in peer_remove, then + * we just treat this the same as there being no more peers left. The + * reason is that seq_nr should indicate to userspace that this isn't a + * coherent dump anyway, so they'll try again. + */ + if (list_empty(&wg->peer_list) || + (ctx->next_peer && list_empty(&ctx->next_peer->peer_list))) { + nla_nest_cancel(skb, peers_nest); + goto out; + } + lockdep_assert_held(&wg->device_update_lock); + peer = list_prepare_entry(ctx->next_peer, &wg->peer_list, peer_list); + list_for_each_entry_continue(peer, &wg->peer_list, peer_list) { + if (get_peer(peer, skb, ctx)) { + done = false; + break; + } + next_peer_cursor = peer; + } + nla_nest_end(skb, peers_nest); + +out: + if (!ret && !done && next_peer_cursor) + wg_peer_get(next_peer_cursor); + wg_peer_put(ctx->next_peer); + mutex_unlock(&wg->device_update_lock); + rtnl_unlock(); + + if (ret) { + genlmsg_cancel(skb, hdr); + return ret; + } + genlmsg_end(skb, hdr); + if (done) { + ctx->next_peer = NULL; + return 0; + } + ctx->next_peer = next_peer_cursor; + return skb->len; + + /* At this point, we can't really deal ourselves with safely zeroing out + * the private key material after usage. This will need an additional API + * in the kernel for marking skbs as zero_on_free. + */ +} + +static int wg_get_device_done(struct netlink_callback *cb) +{ + struct dump_ctx *ctx = DUMP_CTX(cb); + + if (ctx->wg) + dev_put(ctx->wg->dev); + wg_peer_put(ctx->next_peer); + return 0; +} + +static int set_port(struct wg_device *wg, u16 port) +{ + struct wg_peer *peer; + + if (wg->incoming_port == port) + return 0; + list_for_each_entry(peer, &wg->peer_list, peer_list) + wg_socket_clear_peer_endpoint_src(peer); + if (!netif_running(wg->dev)) { + wg->incoming_port = port; + return 0; + } + return wg_socket_init(wg, port); +} + +static int set_allowedip(struct wg_peer *peer, struct nlattr **attrs) +{ + int ret = -EINVAL; + u16 family; + u8 cidr; + + if (!attrs[WGALLOWEDIP_A_FAMILY] || !attrs[WGALLOWEDIP_A_IPADDR] || + !attrs[WGALLOWEDIP_A_CIDR_MASK]) + return ret; + family = nla_get_u16(attrs[WGALLOWEDIP_A_FAMILY]); + cidr = nla_get_u8(attrs[WGALLOWEDIP_A_CIDR_MASK]); + + if (family == AF_INET && cidr <= 32 && + nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in_addr)) + ret = wg_allowedips_insert_v4( + &peer->device->peer_allowedips, + nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer, + &peer->device->device_update_lock); + else if (family == AF_INET6 && cidr <= 128 && + nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in6_addr)) + ret = wg_allowedips_insert_v6( + &peer->device->peer_allowedips, + nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer, + &peer->device->device_update_lock); + + return ret; +} + +static int set_peer(struct wg_device *wg, struct nlattr **attrs) +{ + u8 *public_key = NULL, *preshared_key = NULL; + struct wg_peer *peer = NULL; + u32 flags = 0; + int ret; + + ret = -EINVAL; + if (attrs[WGPEER_A_PUBLIC_KEY] && + nla_len(attrs[WGPEER_A_PUBLIC_KEY]) == NOISE_PUBLIC_KEY_LEN) + public_key = nla_data(attrs[WGPEER_A_PUBLIC_KEY]); + else + goto out; + if (attrs[WGPEER_A_PRESHARED_KEY] && + nla_len(attrs[WGPEER_A_PRESHARED_KEY]) == NOISE_SYMMETRIC_KEY_LEN) + preshared_key = nla_data(attrs[WGPEER_A_PRESHARED_KEY]); + + if (attrs[WGPEER_A_FLAGS]) + flags = nla_get_u32(attrs[WGPEER_A_FLAGS]); + ret = -EOPNOTSUPP; + if (flags & ~__WGPEER_F_ALL) + goto out; + + ret = -EPFNOSUPPORT; + if (attrs[WGPEER_A_PROTOCOL_VERSION]) { + if (nla_get_u32(attrs[WGPEER_A_PROTOCOL_VERSION]) != 1) + goto out; + } + + peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable, + nla_data(attrs[WGPEER_A_PUBLIC_KEY])); + ret = 0; + if (!peer) { /* Peer doesn't exist yet. Add a new one. */ + if (flags & (WGPEER_F_REMOVE_ME | WGPEER_F_UPDATE_ONLY)) + goto out; + + /* The peer is new, so there aren't allowed IPs to remove. */ + flags &= ~WGPEER_F_REPLACE_ALLOWEDIPS; + + down_read(&wg->static_identity.lock); + if (wg->static_identity.has_identity && + !memcmp(nla_data(attrs[WGPEER_A_PUBLIC_KEY]), + wg->static_identity.static_public, + NOISE_PUBLIC_KEY_LEN)) { + /* We silently ignore peers that have the same public + * key as the device. The reason we do it silently is + * that we'd like for people to be able to reuse the + * same set of API calls across peers. + */ + up_read(&wg->static_identity.lock); + ret = 0; + goto out; + } + up_read(&wg->static_identity.lock); + + peer = wg_peer_create(wg, public_key, preshared_key); + if (IS_ERR(peer)) { + ret = PTR_ERR(peer); + peer = NULL; + goto out; + } + /* Take additional reference, as though we've just been + * looked up. + */ + wg_peer_get(peer); + } + + if (flags & WGPEER_F_REMOVE_ME) { + wg_peer_remove(peer); + goto out; + } + + if (preshared_key) { + down_write(&peer->handshake.lock); + memcpy(&peer->handshake.preshared_key, preshared_key, + NOISE_SYMMETRIC_KEY_LEN); + up_write(&peer->handshake.lock); + } + + if (attrs[WGPEER_A_ENDPOINT]) { + struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]); + size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]); + + if ((len == sizeof(struct sockaddr_in) && + addr->sa_family == AF_INET) || + (len == sizeof(struct sockaddr_in6) && + addr->sa_family == AF_INET6)) { + struct endpoint endpoint = { { { 0 } } }; + + memcpy(&endpoint.addr, addr, len); + wg_socket_set_peer_endpoint(peer, &endpoint); + } + } + + if (flags & WGPEER_F_REPLACE_ALLOWEDIPS) + wg_allowedips_remove_by_peer(&wg->peer_allowedips, peer, + &wg->device_update_lock); + + if (attrs[WGPEER_A_ALLOWEDIPS]) { + struct nlattr *attr, *allowedip[WGALLOWEDIP_A_MAX + 1]; + int rem; + + nla_for_each_nested(attr, attrs[WGPEER_A_ALLOWEDIPS], rem) { + ret = nla_parse_nested(allowedip, WGALLOWEDIP_A_MAX, + attr, allowedip_policy, NULL); + if (ret < 0) + goto out; + ret = set_allowedip(peer, allowedip); + if (ret < 0) + goto out; + } + } + + if (attrs[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL]) { + const u16 persistent_keepalive_interval = nla_get_u16( + attrs[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL]); + const bool send_keepalive = + !peer->persistent_keepalive_interval && + persistent_keepalive_interval && + netif_running(wg->dev); + + peer->persistent_keepalive_interval = persistent_keepalive_interval; + if (send_keepalive) + wg_packet_send_keepalive(peer); + } + + if (netif_running(wg->dev)) + wg_packet_send_staged_packets(peer); + +out: + wg_peer_put(peer); + if (attrs[WGPEER_A_PRESHARED_KEY]) + memzero_explicit(nla_data(attrs[WGPEER_A_PRESHARED_KEY]), + nla_len(attrs[WGPEER_A_PRESHARED_KEY])); + return ret; +} + +static int wg_set_device(struct sk_buff *skb, struct genl_info *info) +{ + struct wg_device *wg = lookup_interface(info->attrs, skb); + u32 flags = 0; + int ret; + + if (IS_ERR(wg)) { + ret = PTR_ERR(wg); + goto out_nodev; + } + + rtnl_lock(); + mutex_lock(&wg->device_update_lock); + + if (info->attrs[WGDEVICE_A_FLAGS]) + flags = nla_get_u32(info->attrs[WGDEVICE_A_FLAGS]); + ret = -EOPNOTSUPP; + if (flags & ~__WGDEVICE_F_ALL) + goto out; + + if (info->attrs[WGDEVICE_A_LISTEN_PORT] || info->attrs[WGDEVICE_A_FWMARK]) { + struct net *net; + rcu_read_lock(); + net = rcu_dereference(wg->creating_net); + ret = !net || !ns_capable(net->user_ns, CAP_NET_ADMIN) ? -EPERM : 0; + rcu_read_unlock(); + if (ret) + goto out; + } + + ++wg->device_update_gen; + + if (info->attrs[WGDEVICE_A_FWMARK]) { + struct wg_peer *peer; + + wg->fwmark = nla_get_u32(info->attrs[WGDEVICE_A_FWMARK]); + list_for_each_entry(peer, &wg->peer_list, peer_list) + wg_socket_clear_peer_endpoint_src(peer); + } + + if (info->attrs[WGDEVICE_A_LISTEN_PORT]) { + ret = set_port(wg, + nla_get_u16(info->attrs[WGDEVICE_A_LISTEN_PORT])); + if (ret) + goto out; + } + + if (flags & WGDEVICE_F_REPLACE_PEERS) + wg_peer_remove_all(wg); + + if (info->attrs[WGDEVICE_A_PRIVATE_KEY] && + nla_len(info->attrs[WGDEVICE_A_PRIVATE_KEY]) == + NOISE_PUBLIC_KEY_LEN) { + u8 *private_key = nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]); + u8 public_key[NOISE_PUBLIC_KEY_LEN]; + struct wg_peer *peer, *temp; + + if (!crypto_memneq(wg->static_identity.static_private, + private_key, NOISE_PUBLIC_KEY_LEN)) + goto skip_set_private_key; + + /* We remove before setting, to prevent race, which means doing + * two 25519-genpub ops. + */ + if (curve25519_generate_public(public_key, private_key)) { + peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable, + public_key); + if (peer) { + wg_peer_put(peer); + wg_peer_remove(peer); + } + } + + down_write(&wg->static_identity.lock); + wg_noise_set_static_identity_private_key(&wg->static_identity, + private_key); + list_for_each_entry_safe(peer, temp, &wg->peer_list, + peer_list) { + wg_noise_precompute_static_static(peer); + wg_noise_expire_current_peer_keypairs(peer); + } + wg_cookie_checker_precompute_device_keys(&wg->cookie_checker); + up_write(&wg->static_identity.lock); + } +skip_set_private_key: + + if (info->attrs[WGDEVICE_A_PEERS]) { + struct nlattr *attr, *peer[WGPEER_A_MAX + 1]; + int rem; + + nla_for_each_nested(attr, info->attrs[WGDEVICE_A_PEERS], rem) { + ret = nla_parse_nested(peer, WGPEER_A_MAX, attr, + peer_policy, NULL); + if (ret < 0) + goto out; + ret = set_peer(wg, peer); + if (ret < 0) + goto out; + } + } + ret = 0; + +out: + mutex_unlock(&wg->device_update_lock); + rtnl_unlock(); + dev_put(wg->dev); +out_nodev: + if (info->attrs[WGDEVICE_A_PRIVATE_KEY]) + memzero_explicit(nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]), + nla_len(info->attrs[WGDEVICE_A_PRIVATE_KEY])); + return ret; +} + +#ifndef COMPAT_CANNOT_USE_CONST_GENL_OPS +static const +#else +static +#endif +struct genl_ops genl_ops[] = { + { + .cmd = WG_CMD_GET_DEVICE, +#ifndef COMPAT_CANNOT_USE_NETLINK_START + .start = wg_get_device_start, +#endif + .dumpit = wg_get_device_dump, + .done = wg_get_device_done, +#ifdef COMPAT_CANNOT_INDIVIDUAL_NETLINK_OPS_POLICY + .policy = device_policy, +#endif + .flags = GENL_UNS_ADMIN_PERM + }, { + .cmd = WG_CMD_SET_DEVICE, + .doit = wg_set_device, +#ifdef COMPAT_CANNOT_INDIVIDUAL_NETLINK_OPS_POLICY + .policy = device_policy, +#endif + .flags = GENL_UNS_ADMIN_PERM + } +}; + +static struct genl_family genl_family +#ifndef COMPAT_CANNOT_USE_GENL_NOPS +__ro_after_init = { + .ops = genl_ops, + .n_ops = ARRAY_SIZE(genl_ops), +#else += { +#endif + .name = WG_GENL_NAME, + .version = WG_GENL_VERSION, + .maxattr = WGDEVICE_A_MAX, + .module = THIS_MODULE, +#ifndef COMPAT_CANNOT_INDIVIDUAL_NETLINK_OPS_POLICY + .policy = device_policy, +#endif + .netnsok = true +}; + +int __init wg_genetlink_init(void) +{ + return genl_register_family(&genl_family); +} + +void __exit wg_genetlink_uninit(void) +{ + genl_unregister_family(&genl_family); +} diff --git a/net/wireguard/netlink.h b/net/wireguard/netlink.h new file mode 100644 index 000000000000..15100d92e2e3 --- /dev/null +++ b/net/wireguard/netlink.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _WG_NETLINK_H +#define _WG_NETLINK_H + +int wg_genetlink_init(void); +void wg_genetlink_uninit(void); + +#endif /* _WG_NETLINK_H */ diff --git a/net/wireguard/noise.c b/net/wireguard/noise.c new file mode 100644 index 000000000000..baf455e21e79 --- /dev/null +++ b/net/wireguard/noise.c @@ -0,0 +1,830 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include "noise.h" +#include "device.h" +#include "peer.h" +#include "messages.h" +#include "queueing.h" +#include "peerlookup.h" + +#include <linux/rcupdate.h> +#include <linux/slab.h> +#include <linux/bitmap.h> +#include <linux/scatterlist.h> +#include <linux/highmem.h> +#include <crypto/algapi.h> + +/* This implements Noise_IKpsk2: + * + * <- s + * ****** + * -> e, es, s, ss, {t} + * <- e, ee, se, psk, {} + */ + +static const u8 handshake_name[37] = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s"; +static const u8 identifier_name[34] = "WireGuard v1 zx2c4 Jason@zx2c4.com"; +static u8 handshake_init_hash[NOISE_HASH_LEN] __ro_after_init; +static u8 handshake_init_chaining_key[NOISE_HASH_LEN] __ro_after_init; +static atomic64_t keypair_counter = ATOMIC64_INIT(0); + +void __init wg_noise_init(void) +{ + struct blake2s_state blake; + + blake2s(handshake_init_chaining_key, handshake_name, NULL, + NOISE_HASH_LEN, sizeof(handshake_name), 0); + blake2s_init(&blake, NOISE_HASH_LEN); + blake2s_update(&blake, handshake_init_chaining_key, NOISE_HASH_LEN); + blake2s_update(&blake, identifier_name, sizeof(identifier_name)); + blake2s_final(&blake, handshake_init_hash); +} + +/* Must hold peer->handshake.static_identity->lock */ +void wg_noise_precompute_static_static(struct wg_peer *peer) +{ + down_write(&peer->handshake.lock); + if (!peer->handshake.static_identity->has_identity || + !curve25519(peer->handshake.precomputed_static_static, + peer->handshake.static_identity->static_private, + peer->handshake.remote_static)) + memset(peer->handshake.precomputed_static_static, 0, + NOISE_PUBLIC_KEY_LEN); + up_write(&peer->handshake.lock); +} + +void wg_noise_handshake_init(struct noise_handshake *handshake, + struct noise_static_identity *static_identity, + const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], + const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], + struct wg_peer *peer) +{ + memset(handshake, 0, sizeof(*handshake)); + init_rwsem(&handshake->lock); + handshake->entry.type = INDEX_HASHTABLE_HANDSHAKE; + handshake->entry.peer = peer; + memcpy(handshake->remote_static, peer_public_key, NOISE_PUBLIC_KEY_LEN); + if (peer_preshared_key) + memcpy(handshake->preshared_key, peer_preshared_key, + NOISE_SYMMETRIC_KEY_LEN); + handshake->static_identity = static_identity; + handshake->state = HANDSHAKE_ZEROED; + wg_noise_precompute_static_static(peer); +} + +static void handshake_zero(struct noise_handshake *handshake) +{ + memset(&handshake->ephemeral_private, 0, NOISE_PUBLIC_KEY_LEN); + memset(&handshake->remote_ephemeral, 0, NOISE_PUBLIC_KEY_LEN); + memset(&handshake->hash, 0, NOISE_HASH_LEN); + memset(&handshake->chaining_key, 0, NOISE_HASH_LEN); + handshake->remote_index = 0; + handshake->state = HANDSHAKE_ZEROED; +} + +void wg_noise_handshake_clear(struct noise_handshake *handshake) +{ + down_write(&handshake->lock); + wg_index_hashtable_remove( + handshake->entry.peer->device->index_hashtable, + &handshake->entry); + handshake_zero(handshake); + up_write(&handshake->lock); +} + +static struct noise_keypair *keypair_create(struct wg_peer *peer) +{ + struct noise_keypair *keypair = kzalloc(sizeof(*keypair), GFP_KERNEL); + + if (unlikely(!keypair)) + return NULL; + spin_lock_init(&keypair->receiving_counter.lock); + keypair->internal_id = atomic64_inc_return(&keypair_counter); + keypair->entry.type = INDEX_HASHTABLE_KEYPAIR; + keypair->entry.peer = peer; + kref_init(&keypair->refcount); + return keypair; +} + +static void keypair_free_rcu(struct rcu_head *rcu) +{ + kfree_sensitive(container_of(rcu, struct noise_keypair, rcu)); +} + +static void keypair_free_kref(struct kref *kref) +{ + struct noise_keypair *keypair = + container_of(kref, struct noise_keypair, refcount); + + net_dbg_ratelimited("%s: Keypair %llu destroyed for peer %llu\n", + keypair->entry.peer->device->dev->name, + keypair->internal_id, + keypair->entry.peer->internal_id); + wg_index_hashtable_remove(keypair->entry.peer->device->index_hashtable, + &keypair->entry); + call_rcu(&keypair->rcu, keypair_free_rcu); +} + +void wg_noise_keypair_put(struct noise_keypair *keypair, bool unreference_now) +{ + if (unlikely(!keypair)) + return; + if (unlikely(unreference_now)) + wg_index_hashtable_remove( + keypair->entry.peer->device->index_hashtable, + &keypair->entry); + kref_put(&keypair->refcount, keypair_free_kref); +} + +struct noise_keypair *wg_noise_keypair_get(struct noise_keypair *keypair) +{ + RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(), + "Taking noise keypair reference without holding the RCU BH read lock"); + if (unlikely(!keypair || !kref_get_unless_zero(&keypair->refcount))) + return NULL; + return keypair; +} + +void wg_noise_keypairs_clear(struct noise_keypairs *keypairs) +{ + struct noise_keypair *old; + + spin_lock_bh(&keypairs->keypair_update_lock); + + /* We zero the next_keypair before zeroing the others, so that + * wg_noise_received_with_keypair returns early before subsequent ones + * are zeroed. + */ + old = rcu_dereference_protected(keypairs->next_keypair, + lockdep_is_held(&keypairs->keypair_update_lock)); + RCU_INIT_POINTER(keypairs->next_keypair, NULL); + wg_noise_keypair_put(old, true); + + old = rcu_dereference_protected(keypairs->previous_keypair, + lockdep_is_held(&keypairs->keypair_update_lock)); + RCU_INIT_POINTER(keypairs->previous_keypair, NULL); + wg_noise_keypair_put(old, true); + + old = rcu_dereference_protected(keypairs->current_keypair, + lockdep_is_held(&keypairs->keypair_update_lock)); + RCU_INIT_POINTER(keypairs->current_keypair, NULL); + wg_noise_keypair_put(old, true); + + spin_unlock_bh(&keypairs->keypair_update_lock); +} + +void wg_noise_expire_current_peer_keypairs(struct wg_peer *peer) +{ + struct noise_keypair *keypair; + + wg_noise_handshake_clear(&peer->handshake); + wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); + + spin_lock_bh(&peer->keypairs.keypair_update_lock); + keypair = rcu_dereference_protected(peer->keypairs.next_keypair, + lockdep_is_held(&peer->keypairs.keypair_update_lock)); + if (keypair) + keypair->sending.is_valid = false; + keypair = rcu_dereference_protected(peer->keypairs.current_keypair, + lockdep_is_held(&peer->keypairs.keypair_update_lock)); + if (keypair) + keypair->sending.is_valid = false; + spin_unlock_bh(&peer->keypairs.keypair_update_lock); +} + +static void add_new_keypair(struct noise_keypairs *keypairs, + struct noise_keypair *new_keypair) +{ + struct noise_keypair *previous_keypair, *next_keypair, *current_keypair; + + spin_lock_bh(&keypairs->keypair_update_lock); + previous_keypair = rcu_dereference_protected(keypairs->previous_keypair, + lockdep_is_held(&keypairs->keypair_update_lock)); + next_keypair = rcu_dereference_protected(keypairs->next_keypair, + lockdep_is_held(&keypairs->keypair_update_lock)); + current_keypair = rcu_dereference_protected(keypairs->current_keypair, + lockdep_is_held(&keypairs->keypair_update_lock)); + if (new_keypair->i_am_the_initiator) { + /* If we're the initiator, it means we've sent a handshake, and + * received a confirmation response, which means this new + * keypair can now be used. + */ + if (next_keypair) { + /* If there already was a next keypair pending, we + * demote it to be the previous keypair, and free the + * existing current. Note that this means KCI can result + * in this transition. It would perhaps be more sound to + * always just get rid of the unused next keypair + * instead of putting it in the previous slot, but this + * might be a bit less robust. Something to think about + * for the future. + */ + RCU_INIT_POINTER(keypairs->next_keypair, NULL); + rcu_assign_pointer(keypairs->previous_keypair, + next_keypair); + wg_noise_keypair_put(current_keypair, true); + } else /* If there wasn't an existing next keypair, we replace + * the previous with the current one. + */ + rcu_assign_pointer(keypairs->previous_keypair, + current_keypair); + /* At this point we can get rid of the old previous keypair, and + * set up the new keypair. + */ + wg_noise_keypair_put(previous_keypair, true); + rcu_assign_pointer(keypairs->current_keypair, new_keypair); + } else { + /* If we're the responder, it means we can't use the new keypair + * until we receive confirmation via the first data packet, so + * we get rid of the existing previous one, the possibly + * existing next one, and slide in the new next one. + */ + rcu_assign_pointer(keypairs->next_keypair, new_keypair); + wg_noise_keypair_put(next_keypair, true); + RCU_INIT_POINTER(keypairs->previous_keypair, NULL); + wg_noise_keypair_put(previous_keypair, true); + } + spin_unlock_bh(&keypairs->keypair_update_lock); +} + +bool wg_noise_received_with_keypair(struct noise_keypairs *keypairs, + struct noise_keypair *received_keypair) +{ + struct noise_keypair *old_keypair; + bool key_is_new; + + /* We first check without taking the spinlock. */ + key_is_new = received_keypair == + rcu_access_pointer(keypairs->next_keypair); + if (likely(!key_is_new)) + return false; + + spin_lock_bh(&keypairs->keypair_update_lock); + /* After locking, we double check that things didn't change from + * beneath us. + */ + if (unlikely(received_keypair != + rcu_dereference_protected(keypairs->next_keypair, + lockdep_is_held(&keypairs->keypair_update_lock)))) { + spin_unlock_bh(&keypairs->keypair_update_lock); + return false; + } + + /* When we've finally received the confirmation, we slide the next + * into the current, the current into the previous, and get rid of + * the old previous. + */ + old_keypair = rcu_dereference_protected(keypairs->previous_keypair, + lockdep_is_held(&keypairs->keypair_update_lock)); + rcu_assign_pointer(keypairs->previous_keypair, + rcu_dereference_protected(keypairs->current_keypair, + lockdep_is_held(&keypairs->keypair_update_lock))); + wg_noise_keypair_put(old_keypair, true); + rcu_assign_pointer(keypairs->current_keypair, received_keypair); + RCU_INIT_POINTER(keypairs->next_keypair, NULL); + + spin_unlock_bh(&keypairs->keypair_update_lock); + return true; +} + +/* Must hold static_identity->lock */ +void wg_noise_set_static_identity_private_key( + struct noise_static_identity *static_identity, + const u8 private_key[NOISE_PUBLIC_KEY_LEN]) +{ + memcpy(static_identity->static_private, private_key, + NOISE_PUBLIC_KEY_LEN); + curve25519_clamp_secret(static_identity->static_private); + static_identity->has_identity = curve25519_generate_public( + static_identity->static_public, private_key); +} + +/* This is Hugo Krawczyk's HKDF: + * - https://eprint.iacr.org/2010/264.pdf + * - https://tools.ietf.org/html/rfc5869 + */ +static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data, + size_t first_len, size_t second_len, size_t third_len, + size_t data_len, const u8 chaining_key[NOISE_HASH_LEN]) +{ + u8 output[BLAKE2S_HASH_SIZE + 1]; + u8 secret[BLAKE2S_HASH_SIZE]; + + WARN_ON(IS_ENABLED(DEBUG) && + (first_len > BLAKE2S_HASH_SIZE || + second_len > BLAKE2S_HASH_SIZE || + third_len > BLAKE2S_HASH_SIZE || + ((second_len || second_dst || third_len || third_dst) && + (!first_len || !first_dst)) || + ((third_len || third_dst) && (!second_len || !second_dst)))); + + /* Extract entropy from data into secret */ + blake2s_hmac(secret, data, chaining_key, BLAKE2S_HASH_SIZE, data_len, + NOISE_HASH_LEN); + + if (!first_dst || !first_len) + goto out; + + /* Expand first key: key = secret, data = 0x1 */ + output[0] = 1; + blake2s_hmac(output, output, secret, BLAKE2S_HASH_SIZE, 1, + BLAKE2S_HASH_SIZE); + memcpy(first_dst, output, first_len); + + if (!second_dst || !second_len) + goto out; + + /* Expand second key: key = secret, data = first-key || 0x2 */ + output[BLAKE2S_HASH_SIZE] = 2; + blake2s_hmac(output, output, secret, BLAKE2S_HASH_SIZE, + BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE); + memcpy(second_dst, output, second_len); + + if (!third_dst || !third_len) + goto out; + + /* Expand third key: key = secret, data = second-key || 0x3 */ + output[BLAKE2S_HASH_SIZE] = 3; + blake2s_hmac(output, output, secret, BLAKE2S_HASH_SIZE, + BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE); + memcpy(third_dst, output, third_len); + +out: + /* Clear sensitive data from stack */ + memzero_explicit(secret, BLAKE2S_HASH_SIZE); + memzero_explicit(output, BLAKE2S_HASH_SIZE + 1); +} + +static void derive_keys(struct noise_symmetric_key *first_dst, + struct noise_symmetric_key *second_dst, + const u8 chaining_key[NOISE_HASH_LEN]) +{ + u64 birthdate = ktime_get_coarse_boottime_ns(); + kdf(first_dst->key, second_dst->key, NULL, NULL, + NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, 0, + chaining_key); + first_dst->birthdate = second_dst->birthdate = birthdate; + first_dst->is_valid = second_dst->is_valid = true; +} + +static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN], + u8 key[NOISE_SYMMETRIC_KEY_LEN], + const u8 private[NOISE_PUBLIC_KEY_LEN], + const u8 public[NOISE_PUBLIC_KEY_LEN]) +{ + u8 dh_calculation[NOISE_PUBLIC_KEY_LEN]; + + if (unlikely(!curve25519(dh_calculation, private, public))) + return false; + kdf(chaining_key, key, NULL, dh_calculation, NOISE_HASH_LEN, + NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, chaining_key); + memzero_explicit(dh_calculation, NOISE_PUBLIC_KEY_LEN); + return true; +} + +static bool __must_check mix_precomputed_dh(u8 chaining_key[NOISE_HASH_LEN], + u8 key[NOISE_SYMMETRIC_KEY_LEN], + const u8 precomputed[NOISE_PUBLIC_KEY_LEN]) +{ + static u8 zero_point[NOISE_PUBLIC_KEY_LEN]; + if (unlikely(!crypto_memneq(precomputed, zero_point, NOISE_PUBLIC_KEY_LEN))) + return false; + kdf(chaining_key, key, NULL, precomputed, NOISE_HASH_LEN, + NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, + chaining_key); + return true; +} + +static void mix_hash(u8 hash[NOISE_HASH_LEN], const u8 *src, size_t src_len) +{ + struct blake2s_state blake; + + blake2s_init(&blake, NOISE_HASH_LEN); + blake2s_update(&blake, hash, NOISE_HASH_LEN); + blake2s_update(&blake, src, src_len); + blake2s_final(&blake, hash); +} + +static void mix_psk(u8 chaining_key[NOISE_HASH_LEN], u8 hash[NOISE_HASH_LEN], + u8 key[NOISE_SYMMETRIC_KEY_LEN], + const u8 psk[NOISE_SYMMETRIC_KEY_LEN]) +{ + u8 temp_hash[NOISE_HASH_LEN]; + + kdf(chaining_key, temp_hash, key, psk, NOISE_HASH_LEN, NOISE_HASH_LEN, + NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, chaining_key); + mix_hash(hash, temp_hash, NOISE_HASH_LEN); + memzero_explicit(temp_hash, NOISE_HASH_LEN); +} + +static void handshake_init(u8 chaining_key[NOISE_HASH_LEN], + u8 hash[NOISE_HASH_LEN], + const u8 remote_static[NOISE_PUBLIC_KEY_LEN]) +{ + memcpy(hash, handshake_init_hash, NOISE_HASH_LEN); + memcpy(chaining_key, handshake_init_chaining_key, NOISE_HASH_LEN); + mix_hash(hash, remote_static, NOISE_PUBLIC_KEY_LEN); +} + +static void message_encrypt(u8 *dst_ciphertext, const u8 *src_plaintext, + size_t src_len, u8 key[NOISE_SYMMETRIC_KEY_LEN], + u8 hash[NOISE_HASH_LEN]) +{ + chacha20poly1305_encrypt(dst_ciphertext, src_plaintext, src_len, hash, + NOISE_HASH_LEN, + 0 /* Always zero for Noise_IK */, key); + mix_hash(hash, dst_ciphertext, noise_encrypted_len(src_len)); +} + +static bool message_decrypt(u8 *dst_plaintext, const u8 *src_ciphertext, + size_t src_len, u8 key[NOISE_SYMMETRIC_KEY_LEN], + u8 hash[NOISE_HASH_LEN]) +{ + if (!chacha20poly1305_decrypt(dst_plaintext, src_ciphertext, src_len, + hash, NOISE_HASH_LEN, + 0 /* Always zero for Noise_IK */, key)) + return false; + mix_hash(hash, src_ciphertext, src_len); + return true; +} + +static void message_ephemeral(u8 ephemeral_dst[NOISE_PUBLIC_KEY_LEN], + const u8 ephemeral_src[NOISE_PUBLIC_KEY_LEN], + u8 chaining_key[NOISE_HASH_LEN], + u8 hash[NOISE_HASH_LEN]) +{ + if (ephemeral_dst != ephemeral_src) + memcpy(ephemeral_dst, ephemeral_src, NOISE_PUBLIC_KEY_LEN); + mix_hash(hash, ephemeral_src, NOISE_PUBLIC_KEY_LEN); + kdf(chaining_key, NULL, NULL, ephemeral_src, NOISE_HASH_LEN, 0, 0, + NOISE_PUBLIC_KEY_LEN, chaining_key); +} + +static void tai64n_now(u8 output[NOISE_TIMESTAMP_LEN]) +{ + struct timespec64 now; + + ktime_get_real_ts64(&now); + + /* In order to prevent some sort of infoleak from precise timers, we + * round down the nanoseconds part to the closest rounded-down power of + * two to the maximum initiations per second allowed anyway by the + * implementation. + */ + now.tv_nsec = ALIGN_DOWN(now.tv_nsec, + rounddown_pow_of_two(NSEC_PER_SEC / INITIATIONS_PER_SECOND)); + + /* https://cr.yp.to/libtai/tai64.html */ + *(__be64 *)output = cpu_to_be64(0x400000000000000aULL + now.tv_sec); + *(__be32 *)(output + sizeof(__be64)) = cpu_to_be32(now.tv_nsec); +} + +bool +wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst, + struct noise_handshake *handshake) +{ + u8 timestamp[NOISE_TIMESTAMP_LEN]; + u8 key[NOISE_SYMMETRIC_KEY_LEN]; + bool ret = false; + + /* We need to wait for crng _before_ taking any locks, since + * curve25519_generate_secret uses get_random_bytes_wait. + */ + wait_for_random_bytes(); + + down_read(&handshake->static_identity->lock); + down_write(&handshake->lock); + + if (unlikely(!handshake->static_identity->has_identity)) + goto out; + + dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION); + + handshake_init(handshake->chaining_key, handshake->hash, + handshake->remote_static); + + /* e */ + curve25519_generate_secret(handshake->ephemeral_private); + if (!curve25519_generate_public(dst->unencrypted_ephemeral, + handshake->ephemeral_private)) + goto out; + message_ephemeral(dst->unencrypted_ephemeral, + dst->unencrypted_ephemeral, handshake->chaining_key, + handshake->hash); + + /* es */ + if (!mix_dh(handshake->chaining_key, key, handshake->ephemeral_private, + handshake->remote_static)) + goto out; + + /* s */ + message_encrypt(dst->encrypted_static, + handshake->static_identity->static_public, + NOISE_PUBLIC_KEY_LEN, key, handshake->hash); + + /* ss */ + if (!mix_precomputed_dh(handshake->chaining_key, key, + handshake->precomputed_static_static)) + goto out; + + /* {t} */ + tai64n_now(timestamp); + message_encrypt(dst->encrypted_timestamp, timestamp, + NOISE_TIMESTAMP_LEN, key, handshake->hash); + + dst->sender_index = wg_index_hashtable_insert( + handshake->entry.peer->device->index_hashtable, + &handshake->entry); + + handshake->state = HANDSHAKE_CREATED_INITIATION; + ret = true; + +out: + up_write(&handshake->lock); + up_read(&handshake->static_identity->lock); + memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN); + return ret; +} + +struct wg_peer * +wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src, + struct wg_device *wg) +{ + struct wg_peer *peer = NULL, *ret_peer = NULL; + struct noise_handshake *handshake; + bool replay_attack, flood_attack; + u8 key[NOISE_SYMMETRIC_KEY_LEN]; + u8 chaining_key[NOISE_HASH_LEN]; + u8 hash[NOISE_HASH_LEN]; + u8 s[NOISE_PUBLIC_KEY_LEN]; + u8 e[NOISE_PUBLIC_KEY_LEN]; + u8 t[NOISE_TIMESTAMP_LEN]; + u64 initiation_consumption; + + down_read(&wg->static_identity.lock); + if (unlikely(!wg->static_identity.has_identity)) + goto out; + + handshake_init(chaining_key, hash, wg->static_identity.static_public); + + /* e */ + message_ephemeral(e, src->unencrypted_ephemeral, chaining_key, hash); + + /* es */ + if (!mix_dh(chaining_key, key, wg->static_identity.static_private, e)) + goto out; + + /* s */ + if (!message_decrypt(s, src->encrypted_static, + sizeof(src->encrypted_static), key, hash)) + goto out; + + /* Lookup which peer we're actually talking to */ + peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable, s); + if (!peer) + goto out; + handshake = &peer->handshake; + + /* ss */ + if (!mix_precomputed_dh(chaining_key, key, + handshake->precomputed_static_static)) + goto out; + + /* {t} */ + if (!message_decrypt(t, src->encrypted_timestamp, + sizeof(src->encrypted_timestamp), key, hash)) + goto out; + + down_read(&handshake->lock); + replay_attack = memcmp(t, handshake->latest_timestamp, + NOISE_TIMESTAMP_LEN) <= 0; + flood_attack = (s64)handshake->last_initiation_consumption + + NSEC_PER_SEC / INITIATIONS_PER_SECOND > + (s64)ktime_get_coarse_boottime_ns(); + up_read(&handshake->lock); + if (replay_attack || flood_attack) + goto out; + + /* Success! Copy everything to peer */ + down_write(&handshake->lock); + memcpy(handshake->remote_ephemeral, e, NOISE_PUBLIC_KEY_LEN); + if (memcmp(t, handshake->latest_timestamp, NOISE_TIMESTAMP_LEN) > 0) + memcpy(handshake->latest_timestamp, t, NOISE_TIMESTAMP_LEN); + memcpy(handshake->hash, hash, NOISE_HASH_LEN); + memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN); + handshake->remote_index = src->sender_index; + initiation_consumption = ktime_get_coarse_boottime_ns(); + if ((s64)(handshake->last_initiation_consumption - initiation_consumption) < 0) + handshake->last_initiation_consumption = initiation_consumption; + handshake->state = HANDSHAKE_CONSUMED_INITIATION; + up_write(&handshake->lock); + ret_peer = peer; + +out: + memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN); + memzero_explicit(hash, NOISE_HASH_LEN); + memzero_explicit(chaining_key, NOISE_HASH_LEN); + up_read(&wg->static_identity.lock); + if (!ret_peer) + wg_peer_put(peer); + return ret_peer; +} + +bool wg_noise_handshake_create_response(struct message_handshake_response *dst, + struct noise_handshake *handshake) +{ + u8 key[NOISE_SYMMETRIC_KEY_LEN]; + bool ret = false; + + /* We need to wait for crng _before_ taking any locks, since + * curve25519_generate_secret uses get_random_bytes_wait. + */ + wait_for_random_bytes(); + + down_read(&handshake->static_identity->lock); + down_write(&handshake->lock); + + if (handshake->state != HANDSHAKE_CONSUMED_INITIATION) + goto out; + + dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE); + dst->receiver_index = handshake->remote_index; + + /* e */ + curve25519_generate_secret(handshake->ephemeral_private); + if (!curve25519_generate_public(dst->unencrypted_ephemeral, + handshake->ephemeral_private)) + goto out; + message_ephemeral(dst->unencrypted_ephemeral, + dst->unencrypted_ephemeral, handshake->chaining_key, + handshake->hash); + + /* ee */ + if (!mix_dh(handshake->chaining_key, NULL, handshake->ephemeral_private, + handshake->remote_ephemeral)) + goto out; + + /* se */ + if (!mix_dh(handshake->chaining_key, NULL, handshake->ephemeral_private, + handshake->remote_static)) + goto out; + + /* psk */ + mix_psk(handshake->chaining_key, handshake->hash, key, + handshake->preshared_key); + + /* {} */ + message_encrypt(dst->encrypted_nothing, NULL, 0, key, handshake->hash); + + dst->sender_index = wg_index_hashtable_insert( + handshake->entry.peer->device->index_hashtable, + &handshake->entry); + + handshake->state = HANDSHAKE_CREATED_RESPONSE; + ret = true; + +out: + up_write(&handshake->lock); + up_read(&handshake->static_identity->lock); + memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN); + return ret; +} + +struct wg_peer * +wg_noise_handshake_consume_response(struct message_handshake_response *src, + struct wg_device *wg) +{ + enum noise_handshake_state state = HANDSHAKE_ZEROED; + struct wg_peer *peer = NULL, *ret_peer = NULL; + struct noise_handshake *handshake; + u8 key[NOISE_SYMMETRIC_KEY_LEN]; + u8 hash[NOISE_HASH_LEN]; + u8 chaining_key[NOISE_HASH_LEN]; + u8 e[NOISE_PUBLIC_KEY_LEN]; + u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN]; + u8 static_private[NOISE_PUBLIC_KEY_LEN]; + u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]; + + down_read(&wg->static_identity.lock); + + if (unlikely(!wg->static_identity.has_identity)) + goto out; + + handshake = (struct noise_handshake *)wg_index_hashtable_lookup( + wg->index_hashtable, INDEX_HASHTABLE_HANDSHAKE, + src->receiver_index, &peer); + if (unlikely(!handshake)) + goto out; + + down_read(&handshake->lock); + state = handshake->state; + memcpy(hash, handshake->hash, NOISE_HASH_LEN); + memcpy(chaining_key, handshake->chaining_key, NOISE_HASH_LEN); + memcpy(ephemeral_private, handshake->ephemeral_private, + NOISE_PUBLIC_KEY_LEN); + memcpy(preshared_key, handshake->preshared_key, + NOISE_SYMMETRIC_KEY_LEN); + up_read(&handshake->lock); + + if (state != HANDSHAKE_CREATED_INITIATION) + goto fail; + + /* e */ + message_ephemeral(e, src->unencrypted_ephemeral, chaining_key, hash); + + /* ee */ + if (!mix_dh(chaining_key, NULL, ephemeral_private, e)) + goto fail; + + /* se */ + if (!mix_dh(chaining_key, NULL, wg->static_identity.static_private, e)) + goto fail; + + /* psk */ + mix_psk(chaining_key, hash, key, preshared_key); + + /* {} */ + if (!message_decrypt(NULL, src->encrypted_nothing, + sizeof(src->encrypted_nothing), key, hash)) + goto fail; + + /* Success! Copy everything to peer */ + down_write(&handshake->lock); + /* It's important to check that the state is still the same, while we + * have an exclusive lock. + */ + if (handshake->state != state) { + up_write(&handshake->lock); + goto fail; + } + memcpy(handshake->remote_ephemeral, e, NOISE_PUBLIC_KEY_LEN); + memcpy(handshake->hash, hash, NOISE_HASH_LEN); + memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN); + handshake->remote_index = src->sender_index; + handshake->state = HANDSHAKE_CONSUMED_RESPONSE; + up_write(&handshake->lock); + ret_peer = peer; + goto out; + +fail: + wg_peer_put(peer); +out: + memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN); + memzero_explicit(hash, NOISE_HASH_LEN); + memzero_explicit(chaining_key, NOISE_HASH_LEN); + memzero_explicit(ephemeral_private, NOISE_PUBLIC_KEY_LEN); + memzero_explicit(static_private, NOISE_PUBLIC_KEY_LEN); + memzero_explicit(preshared_key, NOISE_SYMMETRIC_KEY_LEN); + up_read(&wg->static_identity.lock); + return ret_peer; +} + +bool wg_noise_handshake_begin_session(struct noise_handshake *handshake, + struct noise_keypairs *keypairs) +{ + struct noise_keypair *new_keypair; + bool ret = false; + + down_write(&handshake->lock); + if (handshake->state != HANDSHAKE_CREATED_RESPONSE && + handshake->state != HANDSHAKE_CONSUMED_RESPONSE) + goto out; + + new_keypair = keypair_create(handshake->entry.peer); + if (!new_keypair) + goto out; + new_keypair->i_am_the_initiator = handshake->state == + HANDSHAKE_CONSUMED_RESPONSE; + new_keypair->remote_index = handshake->remote_index; + + if (new_keypair->i_am_the_initiator) + derive_keys(&new_keypair->sending, &new_keypair->receiving, + handshake->chaining_key); + else + derive_keys(&new_keypair->receiving, &new_keypair->sending, + handshake->chaining_key); + + handshake_zero(handshake); + rcu_read_lock_bh(); + if (likely(!READ_ONCE(container_of(handshake, struct wg_peer, + handshake)->is_dead))) { + add_new_keypair(keypairs, new_keypair); + net_dbg_ratelimited("%s: Keypair %llu created for peer %llu\n", + handshake->entry.peer->device->dev->name, + new_keypair->internal_id, + handshake->entry.peer->internal_id); + ret = wg_index_hashtable_replace( + handshake->entry.peer->device->index_hashtable, + &handshake->entry, &new_keypair->entry); + } else { + kfree_sensitive(new_keypair); + } + rcu_read_unlock_bh(); + +out: + up_write(&handshake->lock); + return ret; +} diff --git a/net/wireguard/noise.h b/net/wireguard/noise.h new file mode 100644 index 000000000000..c527253dba80 --- /dev/null +++ b/net/wireguard/noise.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ +#ifndef _WG_NOISE_H +#define _WG_NOISE_H + +#include "messages.h" +#include "peerlookup.h" + +#include <linux/types.h> +#include <linux/spinlock.h> +#include <linux/atomic.h> +#include <linux/rwsem.h> +#include <linux/mutex.h> +#include <linux/kref.h> + +struct noise_replay_counter { + u64 counter; + spinlock_t lock; + unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG]; +}; + +struct noise_symmetric_key { + u8 key[NOISE_SYMMETRIC_KEY_LEN]; + u64 birthdate; + bool is_valid; +}; + +struct noise_keypair { + struct index_hashtable_entry entry; + struct noise_symmetric_key sending; + atomic64_t sending_counter; + struct noise_symmetric_key receiving; + struct noise_replay_counter receiving_counter; + __le32 remote_index; + bool i_am_the_initiator; + struct kref refcount; + struct rcu_head rcu; + u64 internal_id; +}; + +struct noise_keypairs { + struct noise_keypair __rcu *current_keypair; + struct noise_keypair __rcu *previous_keypair; + struct noise_keypair __rcu *next_keypair; + spinlock_t keypair_update_lock; +}; + +struct noise_static_identity { + u8 static_public[NOISE_PUBLIC_KEY_LEN]; + u8 static_private[NOISE_PUBLIC_KEY_LEN]; + struct rw_semaphore lock; + bool has_identity; +}; + +enum noise_handshake_state { + HANDSHAKE_ZEROED, + HANDSHAKE_CREATED_INITIATION, + HANDSHAKE_CONSUMED_INITIATION, + HANDSHAKE_CREATED_RESPONSE, + HANDSHAKE_CONSUMED_RESPONSE +}; + +struct noise_handshake { + struct index_hashtable_entry entry; + + enum noise_handshake_state state; + u64 last_initiation_consumption; + + struct noise_static_identity *static_identity; + + u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN]; + u8 remote_static[NOISE_PUBLIC_KEY_LEN]; + u8 remote_ephemeral[NOISE_PUBLIC_KEY_LEN]; + u8 precomputed_static_static[NOISE_PUBLIC_KEY_LEN]; + + u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]; + + u8 hash[NOISE_HASH_LEN]; + u8 chaining_key[NOISE_HASH_LEN]; + + u8 latest_timestamp[NOISE_TIMESTAMP_LEN]; + __le32 remote_index; + + /* Protects all members except the immutable (after noise_handshake_ + * init): remote_static, precomputed_static_static, static_identity. + */ + struct rw_semaphore lock; +}; + +struct wg_device; + +void wg_noise_init(void); +void wg_noise_handshake_init(struct noise_handshake *handshake, + struct noise_static_identity *static_identity, + const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], + const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], + struct wg_peer *peer); +void wg_noise_handshake_clear(struct noise_handshake *handshake); +static inline void wg_noise_reset_last_sent_handshake(atomic64_t *handshake_ns) +{ + atomic64_set(handshake_ns, ktime_get_coarse_boottime_ns() - + (u64)(REKEY_TIMEOUT + 1) * NSEC_PER_SEC); +} + +void wg_noise_keypair_put(struct noise_keypair *keypair, bool unreference_now); +struct noise_keypair *wg_noise_keypair_get(struct noise_keypair *keypair); +void wg_noise_keypairs_clear(struct noise_keypairs *keypairs); +bool wg_noise_received_with_keypair(struct noise_keypairs *keypairs, + struct noise_keypair *received_keypair); +void wg_noise_expire_current_peer_keypairs(struct wg_peer *peer); + +void wg_noise_set_static_identity_private_key( + struct noise_static_identity *static_identity, + const u8 private_key[NOISE_PUBLIC_KEY_LEN]); +void wg_noise_precompute_static_static(struct wg_peer *peer); + +bool +wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst, + struct noise_handshake *handshake); +struct wg_peer * +wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src, + struct wg_device *wg); + +bool wg_noise_handshake_create_response(struct message_handshake_response *dst, + struct noise_handshake *handshake); +struct wg_peer * +wg_noise_handshake_consume_response(struct message_handshake_response *src, + struct wg_device *wg); + +bool wg_noise_handshake_begin_session(struct noise_handshake *handshake, + struct noise_keypairs *keypairs); + +#endif /* _WG_NOISE_H */ diff --git a/net/wireguard/peer.c b/net/wireguard/peer.c new file mode 100644 index 000000000000..1acd00ab2fbc --- /dev/null +++ b/net/wireguard/peer.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include "peer.h" +#include "device.h" +#include "queueing.h" +#include "timers.h" +#include "peerlookup.h" +#include "noise.h" + +#include <linux/kref.h> +#include <linux/lockdep.h> +#include <linux/rcupdate.h> +#include <linux/list.h> + +static struct kmem_cache *peer_cache; +static atomic64_t peer_counter = ATOMIC64_INIT(0); + +struct wg_peer *wg_peer_create(struct wg_device *wg, + const u8 public_key[NOISE_PUBLIC_KEY_LEN], + const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]) +{ + struct wg_peer *peer; + int ret = -ENOMEM; + + lockdep_assert_held(&wg->device_update_lock); + + if (wg->num_peers >= MAX_PEERS_PER_DEVICE) + return ERR_PTR(ret); + + peer = kmem_cache_zalloc(peer_cache, GFP_KERNEL); + if (unlikely(!peer)) + return ERR_PTR(ret); + if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))) + goto err; + + peer->device = wg; + wg_noise_handshake_init(&peer->handshake, &wg->static_identity, + public_key, preshared_key, peer); + peer->internal_id = atomic64_inc_return(&peer_counter); + peer->serial_work_cpu = nr_cpumask_bits; + wg_cookie_init(&peer->latest_cookie); + wg_timers_init(peer); + wg_cookie_checker_precompute_peer_keys(peer); + spin_lock_init(&peer->keypairs.keypair_update_lock); + INIT_WORK(&peer->transmit_handshake_work, wg_packet_handshake_send_worker); + INIT_WORK(&peer->transmit_packet_work, wg_packet_tx_worker); + wg_prev_queue_init(&peer->tx_queue); + wg_prev_queue_init(&peer->rx_queue); + rwlock_init(&peer->endpoint_lock); + kref_init(&peer->refcount); + skb_queue_head_init(&peer->staged_packet_queue); + wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); + set_bit(NAPI_STATE_NO_BUSY_POLL, &peer->napi.state); + netif_napi_add(wg->dev, &peer->napi, wg_packet_rx_poll, + NAPI_POLL_WEIGHT); + napi_enable(&peer->napi); + list_add_tail(&peer->peer_list, &wg->peer_list); + INIT_LIST_HEAD(&peer->allowedips_list); + wg_pubkey_hashtable_add(wg->peer_hashtable, peer); + ++wg->num_peers; + pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id); + return peer; + +err: + kmem_cache_free(peer_cache, peer); + return ERR_PTR(ret); +} + +struct wg_peer *wg_peer_get_maybe_zero(struct wg_peer *peer) +{ + RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(), + "Taking peer reference without holding the RCU read lock"); + if (unlikely(!peer || !kref_get_unless_zero(&peer->refcount))) + return NULL; + return peer; +} + +static void peer_make_dead(struct wg_peer *peer) +{ + /* Remove from configuration-time lookup structures. */ + list_del_init(&peer->peer_list); + wg_allowedips_remove_by_peer(&peer->device->peer_allowedips, peer, + &peer->device->device_update_lock); + wg_pubkey_hashtable_remove(peer->device->peer_hashtable, peer); + + /* Mark as dead, so that we don't allow jumping contexts after. */ + WRITE_ONCE(peer->is_dead, true); + + /* The caller must now synchronize_net() for this to take effect. */ +} + +static void peer_remove_after_dead(struct wg_peer *peer) +{ + WARN_ON(!peer->is_dead); + + /* No more keypairs can be created for this peer, since is_dead protects + * add_new_keypair, so we can now destroy existing ones. + */ + wg_noise_keypairs_clear(&peer->keypairs); + + /* Destroy all ongoing timers that were in-flight at the beginning of + * this function. + */ + wg_timers_stop(peer); + + /* The transition between packet encryption/decryption queues isn't + * guarded by is_dead, but each reference's life is strictly bounded by + * two generations: once for parallel crypto and once for serial + * ingestion, so we can simply flush twice, and be sure that we no + * longer have references inside these queues. + */ + + /* a) For encrypt/decrypt. */ + flush_workqueue(peer->device->packet_crypt_wq); + /* b.1) For send (but not receive, since that's napi). */ + flush_workqueue(peer->device->packet_crypt_wq); + /* b.2.1) For receive (but not send, since that's wq). */ + napi_disable(&peer->napi); + /* b.2.1) It's now safe to remove the napi struct, which must be done + * here from process context. + */ + netif_napi_del(&peer->napi); + + /* Ensure any workstructs we own (like transmit_handshake_work or + * clear_peer_work) no longer are in use. + */ + flush_workqueue(peer->device->handshake_send_wq); + + /* After the above flushes, a peer might still be active in a few + * different contexts: 1) from xmit(), before hitting is_dead and + * returning, 2) from wg_packet_consume_data(), before hitting is_dead + * and returning, 3) from wg_receive_handshake_packet() after a point + * where it has processed an incoming handshake packet, but where + * all calls to pass it off to timers fails because of is_dead. We won't + * have new references in (1) eventually, because we're removed from + * allowedips; we won't have new references in (2) eventually, because + * wg_index_hashtable_lookup will always return NULL, since we removed + * all existing keypairs and no more can be created; we won't have new + * references in (3) eventually, because we're removed from the pubkey + * hash table, which allows for a maximum of one handshake response, + * via the still-uncleared index hashtable entry, but not more than one, + * and in wg_cookie_message_consume, the lookup eventually gets a peer + * with a refcount of zero, so no new reference is taken. + */ + + --peer->device->num_peers; + wg_peer_put(peer); +} + +/* We have a separate "remove" function make sure that all active places where + * a peer is currently operating will eventually come to an end and not pass + * their reference onto another context. + */ +void wg_peer_remove(struct wg_peer *peer) +{ + if (unlikely(!peer)) + return; + lockdep_assert_held(&peer->device->device_update_lock); + + peer_make_dead(peer); + synchronize_net(); + peer_remove_after_dead(peer); +} + +void wg_peer_remove_all(struct wg_device *wg) +{ + struct wg_peer *peer, *temp; + LIST_HEAD(dead_peers); + + lockdep_assert_held(&wg->device_update_lock); + + /* Avoid having to traverse individually for each one. */ + wg_allowedips_free(&wg->peer_allowedips, &wg->device_update_lock); + + list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) { + peer_make_dead(peer); + list_add_tail(&peer->peer_list, &dead_peers); + } + synchronize_net(); + list_for_each_entry_safe(peer, temp, &dead_peers, peer_list) + peer_remove_after_dead(peer); +} + +static void rcu_release(struct rcu_head *rcu) +{ + struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu); + + dst_cache_destroy(&peer->endpoint_cache); + WARN_ON(wg_prev_queue_peek(&peer->tx_queue) || wg_prev_queue_peek(&peer->rx_queue)); + + /* The final zeroing takes care of clearing any remaining handshake key + * material and other potentially sensitive information. + */ + memzero_explicit(peer, sizeof(*peer)); + kmem_cache_free(peer_cache, peer); +} + +static void kref_release(struct kref *refcount) +{ + struct wg_peer *peer = container_of(refcount, struct wg_peer, refcount); + + pr_debug("%s: Peer %llu (%pISpfsc) destroyed\n", + peer->device->dev->name, peer->internal_id, + &peer->endpoint.addr); + + /* Remove ourself from dynamic runtime lookup structures, now that the + * last reference is gone. + */ + wg_index_hashtable_remove(peer->device->index_hashtable, + &peer->handshake.entry); + + /* Remove any lingering packets that didn't have a chance to be + * transmitted. + */ + wg_packet_purge_staged_packets(peer); + + /* Free the memory used. */ + call_rcu(&peer->rcu, rcu_release); +} + +void wg_peer_put(struct wg_peer *peer) +{ + if (unlikely(!peer)) + return; + kref_put(&peer->refcount, kref_release); +} + +int __init wg_peer_init(void) +{ + peer_cache = KMEM_CACHE(wg_peer, 0); + return peer_cache ? 0 : -ENOMEM; +} + +void wg_peer_uninit(void) +{ + kmem_cache_destroy(peer_cache); +} diff --git a/net/wireguard/peer.h b/net/wireguard/peer.h new file mode 100644 index 000000000000..76e4d3128ad4 --- /dev/null +++ b/net/wireguard/peer.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _WG_PEER_H +#define _WG_PEER_H + +#include "device.h" +#include "noise.h" +#include "cookie.h" + +#include <linux/types.h> +#include <linux/netfilter.h> +#include <linux/spinlock.h> +#include <linux/kref.h> +#include <net/dst_cache.h> + +struct wg_device; + +struct endpoint { + union { + struct sockaddr addr; + struct sockaddr_in addr4; + struct sockaddr_in6 addr6; + }; + union { + struct { + struct in_addr src4; + /* Essentially the same as addr6->scope_id */ + int src_if4; + }; + struct in6_addr src6; + }; +}; + +struct wg_peer { + struct wg_device *device; + struct prev_queue tx_queue, rx_queue; + struct sk_buff_head staged_packet_queue; + int serial_work_cpu; + bool is_dead; + struct noise_keypairs keypairs; + struct endpoint endpoint; + struct dst_cache endpoint_cache; + rwlock_t endpoint_lock; + struct noise_handshake handshake; + atomic64_t last_sent_handshake; + struct work_struct transmit_handshake_work, clear_peer_work, transmit_packet_work; + struct cookie latest_cookie; + struct hlist_node pubkey_hash; + u64 rx_bytes, tx_bytes; + struct timer_list timer_retransmit_handshake, timer_send_keepalive; + struct timer_list timer_new_handshake, timer_zero_key_material; + struct timer_list timer_persistent_keepalive; + unsigned int timer_handshake_attempts; + u16 persistent_keepalive_interval; + bool timer_need_another_keepalive; + bool sent_lastminute_handshake; + struct timespec64 walltime_last_handshake; + struct kref refcount; + struct rcu_head rcu; + struct list_head peer_list; + struct list_head allowedips_list; + struct napi_struct napi; + u64 internal_id; +}; + +struct wg_peer *wg_peer_create(struct wg_device *wg, + const u8 public_key[NOISE_PUBLIC_KEY_LEN], + const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]); + +struct wg_peer *__must_check wg_peer_get_maybe_zero(struct wg_peer *peer); +static inline struct wg_peer *wg_peer_get(struct wg_peer *peer) +{ + kref_get(&peer->refcount); + return peer; +} +void wg_peer_put(struct wg_peer *peer); +void wg_peer_remove(struct wg_peer *peer); +void wg_peer_remove_all(struct wg_device *wg); + +int wg_peer_init(void); +void wg_peer_uninit(void); + +#endif /* _WG_PEER_H */ diff --git a/net/wireguard/peerlookup.c b/net/wireguard/peerlookup.c new file mode 100644 index 000000000000..f2783aa7a88f --- /dev/null +++ b/net/wireguard/peerlookup.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include "peerlookup.h" +#include "peer.h" +#include "noise.h" + +static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table, + const u8 pubkey[NOISE_PUBLIC_KEY_LEN]) +{ + /* siphash gives us a secure 64bit number based on a random key. Since + * the bits are uniformly distributed, we can then mask off to get the + * bits we need. + */ + const u64 hash = siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key); + + return &table->hashtable[hash & (HASH_SIZE(table->hashtable) - 1)]; +} + +struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void) +{ + struct pubkey_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL); + + if (!table) + return NULL; + + get_random_bytes(&table->key, sizeof(table->key)); + hash_init(table->hashtable); + mutex_init(&table->lock); + return table; +} + +void wg_pubkey_hashtable_add(struct pubkey_hashtable *table, + struct wg_peer *peer) +{ + mutex_lock(&table->lock); + hlist_add_head_rcu(&peer->pubkey_hash, + pubkey_bucket(table, peer->handshake.remote_static)); + mutex_unlock(&table->lock); +} + +void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table, + struct wg_peer *peer) +{ + mutex_lock(&table->lock); + hlist_del_init_rcu(&peer->pubkey_hash); + mutex_unlock(&table->lock); +} + +/* Returns a strong reference to a peer */ +struct wg_peer * +wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table, + const u8 pubkey[NOISE_PUBLIC_KEY_LEN]) +{ + struct wg_peer *iter_peer, *peer = NULL; + + rcu_read_lock_bh(); + hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey), + pubkey_hash) { + if (!memcmp(pubkey, iter_peer->handshake.remote_static, + NOISE_PUBLIC_KEY_LEN)) { + peer = iter_peer; + break; + } + } + peer = wg_peer_get_maybe_zero(peer); + rcu_read_unlock_bh(); + return peer; +} + +static struct hlist_head *index_bucket(struct index_hashtable *table, + const __le32 index) +{ + /* Since the indices are random and thus all bits are uniformly + * distributed, we can find its bucket simply by masking. + */ + return &table->hashtable[(__force u32)index & + (HASH_SIZE(table->hashtable) - 1)]; +} + +struct index_hashtable *wg_index_hashtable_alloc(void) +{ + struct index_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL); + + if (!table) + return NULL; + + hash_init(table->hashtable); + spin_lock_init(&table->lock); + return table; +} + +/* At the moment, we limit ourselves to 2^20 total peers, which generally might + * amount to 2^20*3 items in this hashtable. The algorithm below works by + * picking a random number and testing it. We can see that these limits mean we + * usually succeed pretty quickly: + * + * >>> def calculation(tries, size): + * ... return (size / 2**32)**(tries - 1) * (1 - (size / 2**32)) + * ... + * >>> calculation(1, 2**20 * 3) + * 0.999267578125 + * >>> calculation(2, 2**20 * 3) + * 0.0007318854331970215 + * >>> calculation(3, 2**20 * 3) + * 5.360489012673497e-07 + * >>> calculation(4, 2**20 * 3) + * 3.9261394135792216e-10 + * + * At the moment, we don't do any masking, so this algorithm isn't exactly + * constant time in either the random guessing or in the hash list lookup. We + * could require a minimum of 3 tries, which would successfully mask the + * guessing. this would not, however, help with the growing hash lengths, which + * is another thing to consider moving forward. + */ + +__le32 wg_index_hashtable_insert(struct index_hashtable *table, + struct index_hashtable_entry *entry) +{ + struct index_hashtable_entry *existing_entry; + + spin_lock_bh(&table->lock); + hlist_del_init_rcu(&entry->index_hash); + spin_unlock_bh(&table->lock); + + rcu_read_lock_bh(); + +search_unused_slot: + /* First we try to find an unused slot, randomly, while unlocked. */ + entry->index = (__force __le32)get_random_u32(); + hlist_for_each_entry_rcu_bh(existing_entry, + index_bucket(table, entry->index), + index_hash) { + if (existing_entry->index == entry->index) + /* If it's already in use, we continue searching. */ + goto search_unused_slot; + } + + /* Once we've found an unused slot, we lock it, and then double-check + * that nobody else stole it from us. + */ + spin_lock_bh(&table->lock); + hlist_for_each_entry_rcu_bh(existing_entry, + index_bucket(table, entry->index), + index_hash) { + if (existing_entry->index == entry->index) { + spin_unlock_bh(&table->lock); + /* If it was stolen, we start over. */ + goto search_unused_slot; + } + } + /* Otherwise, we know we have it exclusively (since we're locked), + * so we insert. + */ + hlist_add_head_rcu(&entry->index_hash, + index_bucket(table, entry->index)); + spin_unlock_bh(&table->lock); + + rcu_read_unlock_bh(); + + return entry->index; +} + +bool wg_index_hashtable_replace(struct index_hashtable *table, + struct index_hashtable_entry *old, + struct index_hashtable_entry *new) +{ + bool ret; + + spin_lock_bh(&table->lock); + ret = !hlist_unhashed(&old->index_hash); + if (unlikely(!ret)) + goto out; + + new->index = old->index; + hlist_replace_rcu(&old->index_hash, &new->index_hash); + + /* Calling init here NULLs out index_hash, and in fact after this + * function returns, it's theoretically possible for this to get + * reinserted elsewhere. That means the RCU lookup below might either + * terminate early or jump between buckets, in which case the packet + * simply gets dropped, which isn't terrible. + */ + INIT_HLIST_NODE(&old->index_hash); +out: + spin_unlock_bh(&table->lock); + return ret; +} + +void wg_index_hashtable_remove(struct index_hashtable *table, + struct index_hashtable_entry *entry) +{ + spin_lock_bh(&table->lock); + hlist_del_init_rcu(&entry->index_hash); + spin_unlock_bh(&table->lock); +} + +/* Returns a strong reference to a entry->peer */ +struct index_hashtable_entry * +wg_index_hashtable_lookup(struct index_hashtable *table, + const enum index_hashtable_type type_mask, + const __le32 index, struct wg_peer **peer) +{ + struct index_hashtable_entry *iter_entry, *entry = NULL; + + rcu_read_lock_bh(); + hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index), + index_hash) { + if (iter_entry->index == index) { + if (likely(iter_entry->type & type_mask)) + entry = iter_entry; + break; + } + } + if (likely(entry)) { + entry->peer = wg_peer_get_maybe_zero(entry->peer); + if (likely(entry->peer)) + *peer = entry->peer; + else + entry = NULL; + } + rcu_read_unlock_bh(); + return entry; +} diff --git a/net/wireguard/peerlookup.h b/net/wireguard/peerlookup.h new file mode 100644 index 000000000000..ced811797680 --- /dev/null +++ b/net/wireguard/peerlookup.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _WG_PEERLOOKUP_H +#define _WG_PEERLOOKUP_H + +#include "messages.h" + +#include <linux/hashtable.h> +#include <linux/mutex.h> +#include <linux/siphash.h> + +struct wg_peer; + +struct pubkey_hashtable { + /* TODO: move to rhashtable */ + DECLARE_HASHTABLE(hashtable, 11); + siphash_key_t key; + struct mutex lock; +}; + +struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void); +void wg_pubkey_hashtable_add(struct pubkey_hashtable *table, + struct wg_peer *peer); +void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table, + struct wg_peer *peer); +struct wg_peer * +wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table, + const u8 pubkey[NOISE_PUBLIC_KEY_LEN]); + +struct index_hashtable { + /* TODO: move to rhashtable */ + DECLARE_HASHTABLE(hashtable, 13); + spinlock_t lock; +}; + +enum index_hashtable_type { + INDEX_HASHTABLE_HANDSHAKE = 1U << 0, + INDEX_HASHTABLE_KEYPAIR = 1U << 1 +}; + +struct index_hashtable_entry { + struct wg_peer *peer; + struct hlist_node index_hash; + enum index_hashtable_type type; + __le32 index; +}; + +struct index_hashtable *wg_index_hashtable_alloc(void); +__le32 wg_index_hashtable_insert(struct index_hashtable *table, + struct index_hashtable_entry *entry); +bool wg_index_hashtable_replace(struct index_hashtable *table, + struct index_hashtable_entry *old, + struct index_hashtable_entry *new); +void wg_index_hashtable_remove(struct index_hashtable *table, + struct index_hashtable_entry *entry); +struct index_hashtable_entry * +wg_index_hashtable_lookup(struct index_hashtable *table, + const enum index_hashtable_type type_mask, + const __le32 index, struct wg_peer **peer); + +#endif /* _WG_PEERLOOKUP_H */ diff --git a/net/wireguard/queueing.c b/net/wireguard/queueing.c new file mode 100644 index 000000000000..8084e7408c0a --- /dev/null +++ b/net/wireguard/queueing.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include "queueing.h" +#include <linux/skb_array.h> + +struct multicore_worker __percpu * +wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr) +{ + int cpu; + struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker); + + if (!worker) + return NULL; + + for_each_possible_cpu(cpu) { + per_cpu_ptr(worker, cpu)->ptr = ptr; + INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function); + } + return worker; +} + +int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, + unsigned int len) +{ + int ret; + + memset(queue, 0, sizeof(*queue)); + ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); + if (ret) + return ret; + queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); + if (!queue->worker) { + ptr_ring_cleanup(&queue->ring, NULL); + return -ENOMEM; + } + return 0; +} + +void wg_packet_queue_free(struct crypt_queue *queue, bool purge) +{ + free_percpu(queue->worker); + WARN_ON(!purge && !__ptr_ring_empty(&queue->ring)); + ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL); +} + +#define NEXT(skb) ((skb)->prev) +#define STUB(queue) ((struct sk_buff *)&queue->empty) + +void wg_prev_queue_init(struct prev_queue *queue) +{ + NEXT(STUB(queue)) = NULL; + queue->head = queue->tail = STUB(queue); + queue->peeked = NULL; + atomic_set(&queue->count, 0); + BUILD_BUG_ON( + offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) - + offsetof(struct prev_queue, empty) || + offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) - + offsetof(struct prev_queue, empty)); +} + +static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) +{ + WRITE_ONCE(NEXT(skb), NULL); + WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb); +} + +bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) +{ + if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS)) + return false; + __wg_prev_queue_enqueue(queue, skb); + return true; +} + +struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue) +{ + struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail)); + + if (tail == STUB(queue)) { + if (!next) + return NULL; + queue->tail = next; + tail = next; + next = smp_load_acquire(&NEXT(next)); + } + if (next) { + queue->tail = next; + atomic_dec(&queue->count); + return tail; + } + if (tail != READ_ONCE(queue->head)) + return NULL; + __wg_prev_queue_enqueue(queue, STUB(queue)); + next = smp_load_acquire(&NEXT(tail)); + if (next) { + queue->tail = next; + atomic_dec(&queue->count); + return tail; + } + return NULL; +} + +#undef NEXT +#undef STUB diff --git a/net/wireguard/queueing.h b/net/wireguard/queueing.h new file mode 100644 index 000000000000..03850c43ebaf --- /dev/null +++ b/net/wireguard/queueing.h @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _WG_QUEUEING_H +#define _WG_QUEUEING_H + +#include "peer.h" +#include <linux/types.h> +#include <linux/skbuff.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <net/ip_tunnels.h> + +struct wg_device; +struct wg_peer; +struct multicore_worker; +struct crypt_queue; +struct prev_queue; +struct sk_buff; + +/* queueing.c APIs: */ +int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, + unsigned int len); +void wg_packet_queue_free(struct crypt_queue *queue, bool purge); +struct multicore_worker __percpu * +wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr); + +/* receive.c APIs: */ +void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb); +void wg_packet_handshake_receive_worker(struct work_struct *work); +/* NAPI poll function: */ +int wg_packet_rx_poll(struct napi_struct *napi, int budget); +/* Workqueue worker: */ +void wg_packet_decrypt_worker(struct work_struct *work); + +/* send.c APIs: */ +void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer, + bool is_retry); +void wg_packet_send_handshake_response(struct wg_peer *peer); +void wg_packet_send_handshake_cookie(struct wg_device *wg, + struct sk_buff *initiating_skb, + __le32 sender_index); +void wg_packet_send_keepalive(struct wg_peer *peer); +void wg_packet_purge_staged_packets(struct wg_peer *peer); +void wg_packet_send_staged_packets(struct wg_peer *peer); +/* Workqueue workers: */ +void wg_packet_handshake_send_worker(struct work_struct *work); +void wg_packet_tx_worker(struct work_struct *work); +void wg_packet_encrypt_worker(struct work_struct *work); + +enum packet_state { + PACKET_STATE_UNCRYPTED, + PACKET_STATE_CRYPTED, + PACKET_STATE_DEAD +}; + +struct packet_cb { + u64 nonce; + struct noise_keypair *keypair; + atomic_t state; + u32 mtu; + u8 ds; +}; + +#define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb)) +#define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer) + +static inline bool wg_check_packet_protocol(struct sk_buff *skb) +{ + __be16 real_protocol = ip_tunnel_parse_protocol(skb); + return real_protocol && skb->protocol == real_protocol; +} + +static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating) +{ + const int pfmemalloc = skb->pfmemalloc; + u32 hash = skb->hash; + u8 l4_hash = skb->l4_hash; + u8 sw_hash = skb->sw_hash; + + skb_scrub_packet(skb, true); + memset(&skb->headers_start, 0, + offsetof(struct sk_buff, headers_end) - + offsetof(struct sk_buff, headers_start)); + skb->pfmemalloc = pfmemalloc; + if (encapsulating) { + skb->hash = hash; + skb->l4_hash = l4_hash; + skb->sw_hash = sw_hash; + } + skb->queue_mapping = 0; + skb->nohdr = 0; + skb->peeked = 0; + skb->mac_len = 0; + skb->dev = NULL; +#ifdef CONFIG_NET_SCHED + skb->tc_index = 0; +#endif + skb_reset_redirect(skb); + skb->hdr_len = skb_headroom(skb); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + skb_probe_transport_header(skb); + skb_reset_inner_headers(skb); +} + +static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id) +{ + unsigned int cpu = *stored_cpu, cpu_index, i; + + if (unlikely(cpu == nr_cpumask_bits || + !cpumask_test_cpu(cpu, cpu_online_mask))) { + cpu_index = id % cpumask_weight(cpu_online_mask); + cpu = cpumask_first(cpu_online_mask); + for (i = 0; i < cpu_index; ++i) + cpu = cpumask_next(cpu, cpu_online_mask); + *stored_cpu = cpu; + } + return cpu; +} + +/* This function is racy, in the sense that next is unlocked, so it could return + * the same CPU twice. A race-free version of this would be to instead store an + * atomic sequence number, do an increment-and-return, and then iterate through + * every possible CPU until we get to that index -- choose_cpu. However that's + * a bit slower, and it doesn't seem like this potential race actually + * introduces any performance loss, so we live with it. + */ +static inline int wg_cpumask_next_online(int *next) +{ + int cpu = *next; + + while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask))) + cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits; + *next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits; + return cpu; +} + +void wg_prev_queue_init(struct prev_queue *queue); + +/* Multi producer */ +bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb); + +/* Single consumer */ +struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue); + +/* Single consumer */ +static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue) +{ + if (queue->peeked) + return queue->peeked; + queue->peeked = wg_prev_queue_dequeue(queue); + return queue->peeked; +} + +/* Single consumer */ +static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue) +{ + queue->peeked = NULL; +} + +static inline int wg_queue_enqueue_per_device_and_peer( + struct crypt_queue *device_queue, struct prev_queue *peer_queue, + struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu) +{ + int cpu; + + atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED); + /* We first queue this up for the peer ingestion, but the consumer + * will wait for the state to change to CRYPTED or DEAD before. + */ + if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb))) + return -ENOSPC; + + /* Then we queue it up in the device queue, which consumes the + * packet as soon as it can. + */ + cpu = wg_cpumask_next_online(next_cpu); + if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb))) + return -EPIPE; + queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work); + return 0; +} + +static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state) +{ + /* We take a reference, because as soon as we call atomic_set, the + * peer can be freed from below us. + */ + struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb)); + + atomic_set_release(&PACKET_CB(skb)->state, state); + queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id), + peer->device->packet_crypt_wq, &peer->transmit_packet_work); + wg_peer_put(peer); +} + +static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state) +{ + /* We take a reference, because as soon as we call atomic_set, the + * peer can be freed from below us. + */ + struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb)); + + atomic_set_release(&PACKET_CB(skb)->state, state); + napi_schedule(&peer->napi); + wg_peer_put(peer); +} + +#ifdef DEBUG +bool wg_packet_counter_selftest(void); +#endif + +#endif /* _WG_QUEUEING_H */ diff --git a/net/wireguard/ratelimiter.c b/net/wireguard/ratelimiter.c new file mode 100644 index 000000000000..ecee41f528a5 --- /dev/null +++ b/net/wireguard/ratelimiter.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifdef COMPAT_CANNOT_DEPRECIATE_BH_RCU +/* We normally alias all non-_bh functions to the _bh ones in the compat layer, + * but that's not appropriate here, where we actually do want non-_bh ones. + */ +#undef synchronize_rcu +#define synchronize_rcu old_synchronize_rcu +#undef call_rcu +#define call_rcu old_call_rcu +#undef rcu_barrier +#define rcu_barrier old_rcu_barrier +#endif + +#include "ratelimiter.h" +#include <linux/siphash.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <net/ip.h> + +static struct kmem_cache *entry_cache; +static hsiphash_key_t key; +static spinlock_t table_lock = __SPIN_LOCK_UNLOCKED("ratelimiter_table_lock"); +static DEFINE_MUTEX(init_lock); +static u64 init_refcnt; /* Protected by init_lock, hence not atomic. */ +static atomic_t total_entries = ATOMIC_INIT(0); +static unsigned int max_entries, table_size; +static void wg_ratelimiter_gc_entries(struct work_struct *); +static DECLARE_DEFERRABLE_WORK(gc_work, wg_ratelimiter_gc_entries); +static struct hlist_head *table_v4; +#if IS_ENABLED(CONFIG_IPV6) +static struct hlist_head *table_v6; +#endif + +struct ratelimiter_entry { + u64 last_time_ns, tokens, ip; + void *net; + spinlock_t lock; + struct hlist_node hash; + struct rcu_head rcu; +}; + +enum { + PACKETS_PER_SECOND = 20, + PACKETS_BURSTABLE = 5, + PACKET_COST = NSEC_PER_SEC / PACKETS_PER_SECOND, + TOKEN_MAX = PACKET_COST * PACKETS_BURSTABLE +}; + +static void entry_free(struct rcu_head *rcu) +{ + kmem_cache_free(entry_cache, + container_of(rcu, struct ratelimiter_entry, rcu)); + atomic_dec(&total_entries); +} + +static void entry_uninit(struct ratelimiter_entry *entry) +{ + hlist_del_rcu(&entry->hash); + call_rcu(&entry->rcu, entry_free); +} + +/* Calling this function with a NULL work uninits all entries. */ +static void wg_ratelimiter_gc_entries(struct work_struct *work) +{ + const u64 now = ktime_get_coarse_boottime_ns(); + struct ratelimiter_entry *entry; + struct hlist_node *temp; + unsigned int i; + + for (i = 0; i < table_size; ++i) { + spin_lock(&table_lock); + hlist_for_each_entry_safe(entry, temp, &table_v4[i], hash) { + if (unlikely(!work) || + now - entry->last_time_ns > NSEC_PER_SEC) + entry_uninit(entry); + } +#if IS_ENABLED(CONFIG_IPV6) + hlist_for_each_entry_safe(entry, temp, &table_v6[i], hash) { + if (unlikely(!work) || + now - entry->last_time_ns > NSEC_PER_SEC) + entry_uninit(entry); + } +#endif + spin_unlock(&table_lock); + if (likely(work)) + cond_resched(); + } + if (likely(work)) + queue_delayed_work(system_power_efficient_wq, &gc_work, HZ); +} + +bool wg_ratelimiter_allow(struct sk_buff *skb, struct net *net) +{ + /* We only take the bottom half of the net pointer, so that we can hash + * 3 words in the end. This way, siphash's len param fits into the final + * u32, and we don't incur an extra round. + */ + const u32 net_word = (unsigned long)net; + struct ratelimiter_entry *entry; + struct hlist_head *bucket; + u64 ip; + + if (skb->protocol == htons(ETH_P_IP)) { + ip = (u64 __force)ip_hdr(skb)->saddr; + bucket = &table_v4[hsiphash_2u32(net_word, ip, &key) & + (table_size - 1)]; + } +#if IS_ENABLED(CONFIG_IPV6) + else if (skb->protocol == htons(ETH_P_IPV6)) { + /* Only use 64 bits, so as to ratelimit the whole /64. */ + memcpy(&ip, &ipv6_hdr(skb)->saddr, sizeof(ip)); + bucket = &table_v6[hsiphash_3u32(net_word, ip >> 32, ip, &key) & + (table_size - 1)]; + } +#endif + else + return false; + rcu_read_lock(); + hlist_for_each_entry_rcu(entry, bucket, hash) { + if (entry->net == net && entry->ip == ip) { + u64 now, tokens; + bool ret; + /* Quasi-inspired by nft_limit.c, but this is actually a + * slightly different algorithm. Namely, we incorporate + * the burst as part of the maximum tokens, rather than + * as part of the rate. + */ + spin_lock(&entry->lock); + now = ktime_get_coarse_boottime_ns(); + tokens = min_t(u64, TOKEN_MAX, + entry->tokens + now - + entry->last_time_ns); + entry->last_time_ns = now; + ret = tokens >= PACKET_COST; + entry->tokens = ret ? tokens - PACKET_COST : tokens; + spin_unlock(&entry->lock); + rcu_read_unlock(); + return ret; + } + } + rcu_read_unlock(); + + if (atomic_inc_return(&total_entries) > max_entries) + goto err_oom; + + entry = kmem_cache_alloc(entry_cache, GFP_KERNEL); + if (unlikely(!entry)) + goto err_oom; + + entry->net = net; + entry->ip = ip; + INIT_HLIST_NODE(&entry->hash); + spin_lock_init(&entry->lock); + entry->last_time_ns = ktime_get_coarse_boottime_ns(); + entry->tokens = TOKEN_MAX - PACKET_COST; + spin_lock(&table_lock); + hlist_add_head_rcu(&entry->hash, bucket); + spin_unlock(&table_lock); + return true; + +err_oom: + atomic_dec(&total_entries); + return false; +} + +int wg_ratelimiter_init(void) +{ + mutex_lock(&init_lock); + if (++init_refcnt != 1) + goto out; + + entry_cache = KMEM_CACHE(ratelimiter_entry, 0); + if (!entry_cache) + goto err; + + /* xt_hashlimit.c uses a slightly different algorithm for ratelimiting, + * but what it shares in common is that it uses a massive hashtable. So, + * we borrow their wisdom about good table sizes on different systems + * dependent on RAM. This calculation here comes from there. + */ + table_size = (totalram_pages() > (1U << 30) / PAGE_SIZE) ? 8192 : + max_t(unsigned long, 16, roundup_pow_of_two( + (totalram_pages() << PAGE_SHIFT) / + (1U << 14) / sizeof(struct hlist_head))); + max_entries = table_size * 8; + + table_v4 = kvcalloc(table_size, sizeof(*table_v4), GFP_KERNEL); + if (unlikely(!table_v4)) + goto err_kmemcache; + +#if IS_ENABLED(CONFIG_IPV6) + table_v6 = kvcalloc(table_size, sizeof(*table_v6), GFP_KERNEL); + if (unlikely(!table_v6)) { + kvfree(table_v4); + goto err_kmemcache; + } +#endif + + queue_delayed_work(system_power_efficient_wq, &gc_work, HZ); + get_random_bytes(&key, sizeof(key)); +out: + mutex_unlock(&init_lock); + return 0; + +err_kmemcache: + kmem_cache_destroy(entry_cache); +err: + --init_refcnt; + mutex_unlock(&init_lock); + return -ENOMEM; +} + +void wg_ratelimiter_uninit(void) +{ + mutex_lock(&init_lock); + if (!init_refcnt || --init_refcnt) + goto out; + + cancel_delayed_work_sync(&gc_work); + wg_ratelimiter_gc_entries(NULL); + rcu_barrier(); + kvfree(table_v4); +#if IS_ENABLED(CONFIG_IPV6) + kvfree(table_v6); +#endif + kmem_cache_destroy(entry_cache); +out: + mutex_unlock(&init_lock); +} + +#include "selftest/ratelimiter.c" diff --git a/net/wireguard/ratelimiter.h b/net/wireguard/ratelimiter.h new file mode 100644 index 000000000000..83067f71ea99 --- /dev/null +++ b/net/wireguard/ratelimiter.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _WG_RATELIMITER_H +#define _WG_RATELIMITER_H + +#include <linux/skbuff.h> + +int wg_ratelimiter_init(void); +void wg_ratelimiter_uninit(void); +bool wg_ratelimiter_allow(struct sk_buff *skb, struct net *net); + +#ifdef DEBUG +bool wg_ratelimiter_selftest(void); +#endif + +#endif /* _WG_RATELIMITER_H */ diff --git a/net/wireguard/receive.c b/net/wireguard/receive.c new file mode 100644 index 000000000000..214889edb48e --- /dev/null +++ b/net/wireguard/receive.c @@ -0,0 +1,602 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include "queueing.h" +#include "device.h" +#include "peer.h" +#include "timers.h" +#include "messages.h" +#include "cookie.h" +#include "socket.h" + +#include <linux/simd.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/udp.h> +#include <net/ip_tunnels.h> + +/* Must be called with bh disabled. */ +static void update_rx_stats(struct wg_peer *peer, size_t len) +{ + struct pcpu_sw_netstats *tstats = + get_cpu_ptr(peer->device->dev->tstats); + + u64_stats_update_begin(&tstats->syncp); + ++tstats->rx_packets; + tstats->rx_bytes += len; + peer->rx_bytes += len; + u64_stats_update_end(&tstats->syncp); + put_cpu_ptr(tstats); +} + +#define SKB_TYPE_LE32(skb) (((struct message_header *)(skb)->data)->type) + +static size_t validate_header_len(struct sk_buff *skb) +{ + if (unlikely(skb->len < sizeof(struct message_header))) + return 0; + if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_DATA) && + skb->len >= MESSAGE_MINIMUM_LENGTH) + return sizeof(struct message_data); + if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION) && + skb->len == sizeof(struct message_handshake_initiation)) + return sizeof(struct message_handshake_initiation); + if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE) && + skb->len == sizeof(struct message_handshake_response)) + return sizeof(struct message_handshake_response); + if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE) && + skb->len == sizeof(struct message_handshake_cookie)) + return sizeof(struct message_handshake_cookie); + return 0; +} + +static int prepare_skb_header(struct sk_buff *skb, struct wg_device *wg) +{ + size_t data_offset, data_len, header_len; + struct udphdr *udp; + + if (unlikely(!wg_check_packet_protocol(skb) || + skb_transport_header(skb) < skb->head || + (skb_transport_header(skb) + sizeof(struct udphdr)) > + skb_tail_pointer(skb))) + return -EINVAL; /* Bogus IP header */ + udp = udp_hdr(skb); + data_offset = (u8 *)udp - skb->data; + if (unlikely(data_offset > U16_MAX || + data_offset + sizeof(struct udphdr) > skb->len)) + /* Packet has offset at impossible location or isn't big enough + * to have UDP fields. + */ + return -EINVAL; + data_len = ntohs(udp->len); + if (unlikely(data_len < sizeof(struct udphdr) || + data_len > skb->len - data_offset)) + /* UDP packet is reporting too small of a size or lying about + * its size. + */ + return -EINVAL; + data_len -= sizeof(struct udphdr); + data_offset = (u8 *)udp + sizeof(struct udphdr) - skb->data; + if (unlikely(!pskb_may_pull(skb, + data_offset + sizeof(struct message_header)) || + pskb_trim(skb, data_len + data_offset) < 0)) + return -EINVAL; + skb_pull(skb, data_offset); + if (unlikely(skb->len != data_len)) + /* Final len does not agree with calculated len */ + return -EINVAL; + header_len = validate_header_len(skb); + if (unlikely(!header_len)) + return -EINVAL; + __skb_push(skb, data_offset); + if (unlikely(!pskb_may_pull(skb, data_offset + header_len))) + return -EINVAL; + __skb_pull(skb, data_offset); + return 0; +} + +static void wg_receive_handshake_packet(struct wg_device *wg, + struct sk_buff *skb) +{ + enum cookie_mac_state mac_state; + struct wg_peer *peer = NULL; + /* This is global, so that our load calculation applies to the whole + * system. We don't care about races with it at all. + */ + static u64 last_under_load; + bool packet_needs_cookie; + bool under_load; + + if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE)) { + net_dbg_skb_ratelimited("%s: Receiving cookie response from %pISpfsc\n", + wg->dev->name, skb); + wg_cookie_message_consume( + (struct message_handshake_cookie *)skb->data, wg); + return; + } + + under_load = atomic_read(&wg->handshake_queue_len) >= + MAX_QUEUED_INCOMING_HANDSHAKES / 8; + if (under_load) { + last_under_load = ktime_get_coarse_boottime_ns(); + } else if (last_under_load) { + under_load = !wg_birthdate_has_expired(last_under_load, 1); + if (!under_load) + last_under_load = 0; + } + mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb, + under_load); + if ((under_load && mac_state == VALID_MAC_WITH_COOKIE) || + (!under_load && mac_state == VALID_MAC_BUT_NO_COOKIE)) { + packet_needs_cookie = false; + } else if (under_load && mac_state == VALID_MAC_BUT_NO_COOKIE) { + packet_needs_cookie = true; + } else { + net_dbg_skb_ratelimited("%s: Invalid MAC of handshake, dropping packet from %pISpfsc\n", + wg->dev->name, skb); + return; + } + + switch (SKB_TYPE_LE32(skb)) { + case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): { + struct message_handshake_initiation *message = + (struct message_handshake_initiation *)skb->data; + + if (packet_needs_cookie) { + wg_packet_send_handshake_cookie(wg, skb, + message->sender_index); + return; + } + peer = wg_noise_handshake_consume_initiation(message, wg); + if (unlikely(!peer)) { + net_dbg_skb_ratelimited("%s: Invalid handshake initiation from %pISpfsc\n", + wg->dev->name, skb); + return; + } + wg_socket_set_peer_endpoint_from_skb(peer, skb); + net_dbg_ratelimited("%s: Receiving handshake initiation from peer %llu (%pISpfsc)\n", + wg->dev->name, peer->internal_id, + &peer->endpoint.addr); + wg_packet_send_handshake_response(peer); + break; + } + case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): { + struct message_handshake_response *message = + (struct message_handshake_response *)skb->data; + + if (packet_needs_cookie) { + wg_packet_send_handshake_cookie(wg, skb, + message->sender_index); + return; + } + peer = wg_noise_handshake_consume_response(message, wg); + if (unlikely(!peer)) { + net_dbg_skb_ratelimited("%s: Invalid handshake response from %pISpfsc\n", + wg->dev->name, skb); + return; + } + wg_socket_set_peer_endpoint_from_skb(peer, skb); + net_dbg_ratelimited("%s: Receiving handshake response from peer %llu (%pISpfsc)\n", + wg->dev->name, peer->internal_id, + &peer->endpoint.addr); + if (wg_noise_handshake_begin_session(&peer->handshake, + &peer->keypairs)) { + wg_timers_session_derived(peer); + wg_timers_handshake_complete(peer); + /* Calling this function will either send any existing + * packets in the queue and not send a keepalive, which + * is the best case, Or, if there's nothing in the + * queue, it will send a keepalive, in order to give + * immediate confirmation of the session. + */ + wg_packet_send_keepalive(peer); + } + break; + } + } + + if (unlikely(!peer)) { + WARN(1, "Somehow a wrong type of packet wound up in the handshake queue!\n"); + return; + } + + local_bh_disable(); + update_rx_stats(peer, skb->len); + local_bh_enable(); + + wg_timers_any_authenticated_packet_received(peer); + wg_timers_any_authenticated_packet_traversal(peer); + wg_peer_put(peer); +} + +void wg_packet_handshake_receive_worker(struct work_struct *work) +{ + struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr; + struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue); + struct sk_buff *skb; + + while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) { + wg_receive_handshake_packet(wg, skb); + dev_kfree_skb(skb); + atomic_dec(&wg->handshake_queue_len); + cond_resched(); + } +} + +static void keep_key_fresh(struct wg_peer *peer) +{ + struct noise_keypair *keypair; + bool send; + + if (peer->sent_lastminute_handshake) + return; + + rcu_read_lock_bh(); + keypair = rcu_dereference_bh(peer->keypairs.current_keypair); + send = keypair && READ_ONCE(keypair->sending.is_valid) && + keypair->i_am_the_initiator && + wg_birthdate_has_expired(keypair->sending.birthdate, + REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT); + rcu_read_unlock_bh(); + + if (unlikely(send)) { + peer->sent_lastminute_handshake = true; + wg_packet_send_queued_handshake_initiation(peer, false); + } +} + +static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair, + simd_context_t *simd_context) +{ + struct scatterlist sg[MAX_SKB_FRAGS + 8]; + struct sk_buff *trailer; + unsigned int offset; + int num_frags; + + if (unlikely(!keypair)) + return false; + + if (unlikely(!READ_ONCE(keypair->receiving.is_valid) || + wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) || + keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) { + WRITE_ONCE(keypair->receiving.is_valid, false); + return false; + } + + PACKET_CB(skb)->nonce = + le64_to_cpu(((struct message_data *)skb->data)->counter); + + /* We ensure that the network header is part of the packet before we + * call skb_cow_data, so that there's no chance that data is removed + * from the skb, so that later we can extract the original endpoint. + */ + offset = skb->data - skb_network_header(skb); + skb_push(skb, offset); + num_frags = skb_cow_data(skb, 0, &trailer); + offset += sizeof(struct message_data); + skb_pull(skb, offset); + if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg))) + return false; + + sg_init_table(sg, num_frags); + if (skb_to_sgvec(skb, sg, 0, skb->len) <= 0) + return false; + + if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0, + PACKET_CB(skb)->nonce, + keypair->receiving.key, + simd_context)) + return false; + + /* Another ugly situation of pushing and pulling the header so as to + * keep endpoint information intact. + */ + skb_push(skb, offset); + if (pskb_trim(skb, skb->len - noise_encrypted_len(0))) + return false; + skb_pull(skb, offset); + + return true; +} + +/* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */ +static bool counter_validate(struct noise_replay_counter *counter, u64 their_counter) +{ + unsigned long index, index_current, top, i; + bool ret = false; + + spin_lock_bh(&counter->lock); + + if (unlikely(counter->counter >= REJECT_AFTER_MESSAGES + 1 || + their_counter >= REJECT_AFTER_MESSAGES)) + goto out; + + ++their_counter; + + if (unlikely((COUNTER_WINDOW_SIZE + their_counter) < + counter->counter)) + goto out; + + index = their_counter >> ilog2(BITS_PER_LONG); + + if (likely(their_counter > counter->counter)) { + index_current = counter->counter >> ilog2(BITS_PER_LONG); + top = min_t(unsigned long, index - index_current, + COUNTER_BITS_TOTAL / BITS_PER_LONG); + for (i = 1; i <= top; ++i) + counter->backtrack[(i + index_current) & + ((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0; + counter->counter = their_counter; + } + + index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1; + ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1), + &counter->backtrack[index]); + +out: + spin_unlock_bh(&counter->lock); + return ret; +} + +#include "selftest/counter.c" + +static void wg_packet_consume_data_done(struct wg_peer *peer, + struct sk_buff *skb, + struct endpoint *endpoint) +{ + struct net_device *dev = peer->device->dev; + unsigned int len, len_before_trim; + struct wg_peer *routed_peer; + + wg_socket_set_peer_endpoint(peer, endpoint); + + if (unlikely(wg_noise_received_with_keypair(&peer->keypairs, + PACKET_CB(skb)->keypair))) { + wg_timers_handshake_complete(peer); + wg_packet_send_staged_packets(peer); + } + + keep_key_fresh(peer); + + wg_timers_any_authenticated_packet_received(peer); + wg_timers_any_authenticated_packet_traversal(peer); + + /* A packet with length 0 is a keepalive packet */ + if (unlikely(!skb->len)) { + update_rx_stats(peer, message_data_len(0)); + net_dbg_ratelimited("%s: Receiving keepalive packet from peer %llu (%pISpfsc)\n", + dev->name, peer->internal_id, + &peer->endpoint.addr); + goto packet_processed; + } + + wg_timers_data_received(peer); + + if (unlikely(skb_network_header(skb) < skb->head)) + goto dishonest_packet_size; + if (unlikely(!(pskb_network_may_pull(skb, sizeof(struct iphdr)) && + (ip_hdr(skb)->version == 4 || + (ip_hdr(skb)->version == 6 && + pskb_network_may_pull(skb, sizeof(struct ipv6hdr))))))) + goto dishonest_packet_type; + + skb->dev = dev; + /* We've already verified the Poly1305 auth tag, which means this packet + * was not modified in transit. We can therefore tell the networking + * stack that all checksums of every layer of encapsulation have already + * been checked "by the hardware" and therefore is unnecessary to check + * again in software. + */ + skb->ip_summed = CHECKSUM_UNNECESSARY; +#ifndef COMPAT_CANNOT_USE_CSUM_LEVEL + skb->csum_level = ~0; /* All levels */ +#endif + skb->protocol = ip_tunnel_parse_protocol(skb); + if (skb->protocol == htons(ETH_P_IP)) { + len = ntohs(ip_hdr(skb)->tot_len); + if (unlikely(len < sizeof(struct iphdr))) + goto dishonest_packet_size; + INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ip_hdr(skb)->tos); + } else if (skb->protocol == htons(ETH_P_IPV6)) { + len = ntohs(ipv6_hdr(skb)->payload_len) + + sizeof(struct ipv6hdr); + INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ipv6_get_dsfield(ipv6_hdr(skb))); + } else { + goto dishonest_packet_type; + } + + if (unlikely(len > skb->len)) + goto dishonest_packet_size; + len_before_trim = skb->len; + if (unlikely(pskb_trim(skb, len))) + goto packet_processed; + + routed_peer = wg_allowedips_lookup_src(&peer->device->peer_allowedips, + skb); + wg_peer_put(routed_peer); /* We don't need the extra reference. */ + + if (unlikely(routed_peer != peer)) + goto dishonest_packet_peer; + + napi_gro_receive(&peer->napi, skb); + update_rx_stats(peer, message_data_len(len_before_trim)); + return; + +dishonest_packet_peer: + net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n", + dev->name, skb, peer->internal_id, + &peer->endpoint.addr); + ++dev->stats.rx_errors; + ++dev->stats.rx_frame_errors; + goto packet_processed; +dishonest_packet_type: + net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n", + dev->name, peer->internal_id, &peer->endpoint.addr); + ++dev->stats.rx_errors; + ++dev->stats.rx_frame_errors; + goto packet_processed; +dishonest_packet_size: + net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n", + dev->name, peer->internal_id, &peer->endpoint.addr); + ++dev->stats.rx_errors; + ++dev->stats.rx_length_errors; + goto packet_processed; +packet_processed: + dev_kfree_skb(skb); +} + +int wg_packet_rx_poll(struct napi_struct *napi, int budget) +{ + struct wg_peer *peer = container_of(napi, struct wg_peer, napi); + struct noise_keypair *keypair; + struct endpoint endpoint; + enum packet_state state; + struct sk_buff *skb; + int work_done = 0; + bool free; + + if (unlikely(budget <= 0)) + return 0; + + while ((skb = wg_prev_queue_peek(&peer->rx_queue)) != NULL && + (state = atomic_read_acquire(&PACKET_CB(skb)->state)) != + PACKET_STATE_UNCRYPTED) { + wg_prev_queue_drop_peeked(&peer->rx_queue); + keypair = PACKET_CB(skb)->keypair; + free = true; + + if (unlikely(state != PACKET_STATE_CRYPTED)) + goto next; + + if (unlikely(!counter_validate(&keypair->receiving_counter, + PACKET_CB(skb)->nonce))) { + net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n", + peer->device->dev->name, + PACKET_CB(skb)->nonce, + keypair->receiving_counter.counter); + goto next; + } + + if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb))) + goto next; + + wg_reset_packet(skb, false); + wg_packet_consume_data_done(peer, skb, &endpoint); + free = false; + +next: + wg_noise_keypair_put(keypair, false); + wg_peer_put(peer); + if (unlikely(free)) + dev_kfree_skb(skb); + + if (++work_done >= budget) + break; + } + + if (work_done < budget) + napi_complete_done(napi, work_done); + + return work_done; +} + +void wg_packet_decrypt_worker(struct work_struct *work) +{ + struct crypt_queue *queue = container_of(work, struct multicore_worker, + work)->ptr; + simd_context_t simd_context; + struct sk_buff *skb; + + simd_get(&simd_context); + while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) { + enum packet_state state = + likely(decrypt_packet(skb, PACKET_CB(skb)->keypair, + &simd_context)) ? + PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; + wg_queue_enqueue_per_peer_rx(skb, state); + simd_relax(&simd_context); + } + + simd_put(&simd_context); +} + +static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb) +{ + __le32 idx = ((struct message_data *)skb->data)->key_idx; + struct wg_peer *peer = NULL; + int ret; + + rcu_read_lock_bh(); + PACKET_CB(skb)->keypair = + (struct noise_keypair *)wg_index_hashtable_lookup( + wg->index_hashtable, INDEX_HASHTABLE_KEYPAIR, idx, + &peer); + if (unlikely(!wg_noise_keypair_get(PACKET_CB(skb)->keypair))) + goto err_keypair; + + if (unlikely(READ_ONCE(peer->is_dead))) + goto err; + + ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb, + wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu); + if (unlikely(ret == -EPIPE)) + wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD); + if (likely(!ret || ret == -EPIPE)) { + rcu_read_unlock_bh(); + return; + } +err: + wg_noise_keypair_put(PACKET_CB(skb)->keypair, false); +err_keypair: + rcu_read_unlock_bh(); + wg_peer_put(peer); + dev_kfree_skb(skb); +} + +void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb) +{ + if (unlikely(prepare_skb_header(skb, wg) < 0)) + goto err; + switch (SKB_TYPE_LE32(skb)) { + case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): + case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): + case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): { + int cpu, ret = -EBUSY; + + if (unlikely(!rng_is_initialized())) + goto drop; + if (atomic_read(&wg->handshake_queue_len) > MAX_QUEUED_INCOMING_HANDSHAKES / 2) { + if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) { + ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb); + spin_unlock_bh(&wg->handshake_queue.ring.producer_lock); + } + } else + ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb); + if (ret) { + drop: + net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n", + wg->dev->name, skb); + goto err; + } + atomic_inc(&wg->handshake_queue_len); + cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu); + /* Queues up a call to packet_process_queued_handshake_packets(skb): */ + queue_work_on(cpu, wg->handshake_receive_wq, + &per_cpu_ptr(wg->handshake_queue.worker, cpu)->work); + break; + } + case cpu_to_le32(MESSAGE_DATA): + PACKET_CB(skb)->ds = ip_tunnel_get_dsfield(ip_hdr(skb), skb); + wg_packet_consume_data(wg, skb); + break; + default: + WARN(1, "Non-exhaustive parsing of packet header lead to unknown packet type!\n"); + goto err; + } + return; + +err: + dev_kfree_skb(skb); +} diff --git a/net/wireguard/selftest/allowedips.c b/net/wireguard/selftest/allowedips.c new file mode 100644 index 000000000000..e173204ae7d7 --- /dev/null +++ b/net/wireguard/selftest/allowedips.c @@ -0,0 +1,676 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * + * This contains some basic static unit tests for the allowedips data structure. + * It also has two additional modes that are disabled and meant to be used by + * folks directly playing with this file. If you define the macro + * DEBUG_PRINT_TRIE_GRAPHVIZ to be 1, then every time there's a full tree in + * memory, it will be printed out as KERN_DEBUG in a format that can be passed + * to graphviz (the dot command) to visualize it. If you define the macro + * DEBUG_RANDOM_TRIE to be 1, then there will be an extremely costly set of + * randomized tests done against a trivial implementation, which may take + * upwards of a half-hour to complete. There's no set of users who should be + * enabling these, and the only developers that should go anywhere near these + * nobs are the ones who are reading this comment. + */ + +#ifdef DEBUG + +#include <linux/siphash.h> + +static __init void print_node(struct allowedips_node *node, u8 bits) +{ + char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n"; + char *fmt_declaration = KERN_DEBUG "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n"; + u8 ip1[16], ip2[16], cidr1, cidr2; + char *style = "dotted"; + u32 color = 0; + + if (node == NULL) + return; + if (bits == 32) { + fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n"; + fmt_declaration = KERN_DEBUG "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n"; + } else if (bits == 128) { + fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n"; + fmt_declaration = KERN_DEBUG "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n"; + } + if (node->peer) { + hsiphash_key_t key = { { 0 } }; + + memcpy(&key, &node->peer, sizeof(node->peer)); + color = hsiphash_1u32(0xdeadbeef, &key) % 200 << 16 | + hsiphash_1u32(0xbabecafe, &key) % 200 << 8 | + hsiphash_1u32(0xabad1dea, &key) % 200; + style = "bold"; + } + wg_allowedips_read_node(node, ip1, &cidr1); + printk(fmt_declaration, ip1, cidr1, style, color); + if (node->bit[0]) { + wg_allowedips_read_node(rcu_dereference_raw(node->bit[0]), ip2, &cidr2); + printk(fmt_connection, ip1, cidr1, ip2, cidr2); + } + if (node->bit[1]) { + wg_allowedips_read_node(rcu_dereference_raw(node->bit[1]), ip2, &cidr2); + printk(fmt_connection, ip1, cidr1, ip2, cidr2); + } + if (node->bit[0]) + print_node(rcu_dereference_raw(node->bit[0]), bits); + if (node->bit[1]) + print_node(rcu_dereference_raw(node->bit[1]), bits); +} + +static __init void print_tree(struct allowedips_node __rcu *top, u8 bits) +{ + printk(KERN_DEBUG "digraph trie {\n"); + print_node(rcu_dereference_raw(top), bits); + printk(KERN_DEBUG "}\n"); +} + +enum { + NUM_PEERS = 2000, + NUM_RAND_ROUTES = 400, + NUM_MUTATED_ROUTES = 100, + NUM_QUERIES = NUM_RAND_ROUTES * NUM_MUTATED_ROUTES * 30 +}; + +struct horrible_allowedips { + struct hlist_head head; +}; + +struct horrible_allowedips_node { + struct hlist_node table; + union nf_inet_addr ip; + union nf_inet_addr mask; + u8 ip_version; + void *value; +}; + +static __init void horrible_allowedips_init(struct horrible_allowedips *table) +{ + INIT_HLIST_HEAD(&table->head); +} + +static __init void horrible_allowedips_free(struct horrible_allowedips *table) +{ + struct horrible_allowedips_node *node; + struct hlist_node *h; + + hlist_for_each_entry_safe(node, h, &table->head, table) { + hlist_del(&node->table); + kfree(node); + } +} + +static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr) +{ + union nf_inet_addr mask; + + memset(&mask, 0, sizeof(mask)); + memset(&mask.all, 0xff, cidr / 8); + if (cidr % 32) + mask.all[cidr / 32] = (__force u32)htonl( + (0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL); + return mask; +} + +static __init inline u8 horrible_mask_to_cidr(union nf_inet_addr subnet) +{ + return hweight32(subnet.all[0]) + hweight32(subnet.all[1]) + + hweight32(subnet.all[2]) + hweight32(subnet.all[3]); +} + +static __init inline void +horrible_mask_self(struct horrible_allowedips_node *node) +{ + if (node->ip_version == 4) { + node->ip.ip &= node->mask.ip; + } else if (node->ip_version == 6) { + node->ip.ip6[0] &= node->mask.ip6[0]; + node->ip.ip6[1] &= node->mask.ip6[1]; + node->ip.ip6[2] &= node->mask.ip6[2]; + node->ip.ip6[3] &= node->mask.ip6[3]; + } +} + +static __init inline bool +horrible_match_v4(const struct horrible_allowedips_node *node, struct in_addr *ip) +{ + return (ip->s_addr & node->mask.ip) == node->ip.ip; +} + +static __init inline bool +horrible_match_v6(const struct horrible_allowedips_node *node, struct in6_addr *ip) +{ + return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == node->ip.ip6[0] && + (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == node->ip.ip6[1] && + (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == node->ip.ip6[2] && + (ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3]; +} + +static __init void +horrible_insert_ordered(struct horrible_allowedips *table, struct horrible_allowedips_node *node) +{ + struct horrible_allowedips_node *other = NULL, *where = NULL; + u8 my_cidr = horrible_mask_to_cidr(node->mask); + + hlist_for_each_entry(other, &table->head, table) { + if (other->ip_version == node->ip_version && + !memcmp(&other->mask, &node->mask, sizeof(union nf_inet_addr)) && + !memcmp(&other->ip, &node->ip, sizeof(union nf_inet_addr))) { + other->value = node->value; + kfree(node); + return; + } + } + hlist_for_each_entry(other, &table->head, table) { + where = other; + if (horrible_mask_to_cidr(other->mask) <= my_cidr) + break; + } + if (!other && !where) + hlist_add_head(&node->table, &table->head); + else if (!other) + hlist_add_behind(&node->table, &where->table); + else + hlist_add_before(&node->table, &where->table); +} + +static __init int +horrible_allowedips_insert_v4(struct horrible_allowedips *table, + struct in_addr *ip, u8 cidr, void *value) +{ + struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL); + + if (unlikely(!node)) + return -ENOMEM; + node->ip.in = *ip; + node->mask = horrible_cidr_to_mask(cidr); + node->ip_version = 4; + node->value = value; + horrible_mask_self(node); + horrible_insert_ordered(table, node); + return 0; +} + +static __init int +horrible_allowedips_insert_v6(struct horrible_allowedips *table, + struct in6_addr *ip, u8 cidr, void *value) +{ + struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL); + + if (unlikely(!node)) + return -ENOMEM; + node->ip.in6 = *ip; + node->mask = horrible_cidr_to_mask(cidr); + node->ip_version = 6; + node->value = value; + horrible_mask_self(node); + horrible_insert_ordered(table, node); + return 0; +} + +static __init void * +horrible_allowedips_lookup_v4(struct horrible_allowedips *table, struct in_addr *ip) +{ + struct horrible_allowedips_node *node; + + hlist_for_each_entry(node, &table->head, table) { + if (node->ip_version == 4 && horrible_match_v4(node, ip)) + return node->value; + } + return NULL; +} + +static __init void * +horrible_allowedips_lookup_v6(struct horrible_allowedips *table, struct in6_addr *ip) +{ + struct horrible_allowedips_node *node; + + hlist_for_each_entry(node, &table->head, table) { + if (node->ip_version == 6 && horrible_match_v6(node, ip)) + return node->value; + } + return NULL; +} + + +static __init void +horrible_allowedips_remove_by_value(struct horrible_allowedips *table, void *value) +{ + struct horrible_allowedips_node *node; + struct hlist_node *h; + + hlist_for_each_entry_safe(node, h, &table->head, table) { + if (node->value != value) + continue; + hlist_del(&node->table); + kfree(node); + } + +} + +static __init bool randomized_test(void) +{ + unsigned int i, j, k, mutate_amount, cidr; + u8 ip[16], mutate_mask[16], mutated[16]; + struct wg_peer **peers, *peer; + struct horrible_allowedips h; + DEFINE_MUTEX(mutex); + struct allowedips t; + bool ret = false; + + mutex_init(&mutex); + + wg_allowedips_init(&t); + horrible_allowedips_init(&h); + + peers = kcalloc(NUM_PEERS, sizeof(*peers), GFP_KERNEL); + if (unlikely(!peers)) { + pr_err("allowedips random self-test malloc: FAIL\n"); + goto free; + } + for (i = 0; i < NUM_PEERS; ++i) { + peers[i] = kzalloc(sizeof(*peers[i]), GFP_KERNEL); + if (unlikely(!peers[i])) { + pr_err("allowedips random self-test malloc: FAIL\n"); + goto free; + } + kref_init(&peers[i]->refcount); + INIT_LIST_HEAD(&peers[i]->allowedips_list); + } + + mutex_lock(&mutex); + + for (i = 0; i < NUM_RAND_ROUTES; ++i) { + prandom_bytes(ip, 4); + cidr = prandom_u32_max(32) + 1; + peer = peers[prandom_u32_max(NUM_PEERS)]; + if (wg_allowedips_insert_v4(&t, (struct in_addr *)ip, cidr, + peer, &mutex) < 0) { + pr_err("allowedips random self-test malloc: FAIL\n"); + goto free_locked; + } + if (horrible_allowedips_insert_v4(&h, (struct in_addr *)ip, + cidr, peer) < 0) { + pr_err("allowedips random self-test malloc: FAIL\n"); + goto free_locked; + } + for (j = 0; j < NUM_MUTATED_ROUTES; ++j) { + memcpy(mutated, ip, 4); + prandom_bytes(mutate_mask, 4); + mutate_amount = prandom_u32_max(32); + for (k = 0; k < mutate_amount / 8; ++k) + mutate_mask[k] = 0xff; + mutate_mask[k] = 0xff + << ((8 - (mutate_amount % 8)) % 8); + for (; k < 4; ++k) + mutate_mask[k] = 0; + for (k = 0; k < 4; ++k) + mutated[k] = (mutated[k] & mutate_mask[k]) | + (~mutate_mask[k] & + prandom_u32_max(256)); + cidr = prandom_u32_max(32) + 1; + peer = peers[prandom_u32_max(NUM_PEERS)]; + if (wg_allowedips_insert_v4(&t, + (struct in_addr *)mutated, + cidr, peer, &mutex) < 0) { + pr_err("allowedips random self-test malloc: FAIL\n"); + goto free_locked; + } + if (horrible_allowedips_insert_v4(&h, + (struct in_addr *)mutated, cidr, peer)) { + pr_err("allowedips random self-test malloc: FAIL\n"); + goto free_locked; + } + } + } + + for (i = 0; i < NUM_RAND_ROUTES; ++i) { + prandom_bytes(ip, 16); + cidr = prandom_u32_max(128) + 1; + peer = peers[prandom_u32_max(NUM_PEERS)]; + if (wg_allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr, + peer, &mutex) < 0) { + pr_err("allowedips random self-test malloc: FAIL\n"); + goto free_locked; + } + if (horrible_allowedips_insert_v6(&h, (struct in6_addr *)ip, + cidr, peer) < 0) { + pr_err("allowedips random self-test malloc: FAIL\n"); + goto free_locked; + } + for (j = 0; j < NUM_MUTATED_ROUTES; ++j) { + memcpy(mutated, ip, 16); + prandom_bytes(mutate_mask, 16); + mutate_amount = prandom_u32_max(128); + for (k = 0; k < mutate_amount / 8; ++k) + mutate_mask[k] = 0xff; + mutate_mask[k] = 0xff + << ((8 - (mutate_amount % 8)) % 8); + for (; k < 4; ++k) + mutate_mask[k] = 0; + for (k = 0; k < 4; ++k) + mutated[k] = (mutated[k] & mutate_mask[k]) | + (~mutate_mask[k] & + prandom_u32_max(256)); + cidr = prandom_u32_max(128) + 1; + peer = peers[prandom_u32_max(NUM_PEERS)]; + if (wg_allowedips_insert_v6(&t, + (struct in6_addr *)mutated, + cidr, peer, &mutex) < 0) { + pr_err("allowedips random self-test malloc: FAIL\n"); + goto free_locked; + } + if (horrible_allowedips_insert_v6( + &h, (struct in6_addr *)mutated, cidr, + peer)) { + pr_err("allowedips random self-test malloc: FAIL\n"); + goto free_locked; + } + } + } + + mutex_unlock(&mutex); + + if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) { + print_tree(t.root4, 32); + print_tree(t.root6, 128); + } + + for (j = 0;; ++j) { + for (i = 0; i < NUM_QUERIES; ++i) { + prandom_bytes(ip, 4); + if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) { + horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip); + pr_err("allowedips random v4 self-test: FAIL\n"); + goto free; + } + prandom_bytes(ip, 16); + if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) { + pr_err("allowedips random v6 self-test: FAIL\n"); + goto free; + } + } + if (j >= NUM_PEERS) + break; + mutex_lock(&mutex); + wg_allowedips_remove_by_peer(&t, peers[j], &mutex); + mutex_unlock(&mutex); + horrible_allowedips_remove_by_value(&h, peers[j]); + } + + if (t.root4 || t.root6) { + pr_err("allowedips random self-test removal: FAIL\n"); + goto free; + } + + ret = true; + +free: + mutex_lock(&mutex); +free_locked: + wg_allowedips_free(&t, &mutex); + mutex_unlock(&mutex); + horrible_allowedips_free(&h); + if (peers) { + for (i = 0; i < NUM_PEERS; ++i) + kfree(peers[i]); + } + kfree(peers); + return ret; +} + +static __init inline struct in_addr *ip4(u8 a, u8 b, u8 c, u8 d) +{ + static struct in_addr ip; + u8 *split = (u8 *)&ip; + + split[0] = a; + split[1] = b; + split[2] = c; + split[3] = d; + return &ip; +} + +static __init inline struct in6_addr *ip6(u32 a, u32 b, u32 c, u32 d) +{ + static struct in6_addr ip; + __be32 *split = (__be32 *)&ip; + + split[0] = cpu_to_be32(a); + split[1] = cpu_to_be32(b); + split[2] = cpu_to_be32(c); + split[3] = cpu_to_be32(d); + return &ip; +} + +static __init struct wg_peer *init_peer(void) +{ + struct wg_peer *peer = kzalloc(sizeof(*peer), GFP_KERNEL); + + if (!peer) + return NULL; + kref_init(&peer->refcount); + INIT_LIST_HEAD(&peer->allowedips_list); + return peer; +} + +#define insert(version, mem, ipa, ipb, ipc, ipd, cidr) \ + wg_allowedips_insert_v##version(&t, ip##version(ipa, ipb, ipc, ipd), \ + cidr, mem, &mutex) + +#define maybe_fail() do { \ + ++i; \ + if (!_s) { \ + pr_info("allowedips self-test %zu: FAIL\n", i); \ + success = false; \ + } \ + } while (0) + +#define test(version, mem, ipa, ipb, ipc, ipd) do { \ + bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \ + ip##version(ipa, ipb, ipc, ipd)) == (mem); \ + maybe_fail(); \ + } while (0) + +#define test_negative(version, mem, ipa, ipb, ipc, ipd) do { \ + bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \ + ip##version(ipa, ipb, ipc, ipd)) != (mem); \ + maybe_fail(); \ + } while (0) + +#define test_boolean(cond) do { \ + bool _s = (cond); \ + maybe_fail(); \ + } while (0) + +bool __init wg_allowedips_selftest(void) +{ + bool found_a = false, found_b = false, found_c = false, found_d = false, + found_e = false, found_other = false; + struct wg_peer *a = init_peer(), *b = init_peer(), *c = init_peer(), + *d = init_peer(), *e = init_peer(), *f = init_peer(), + *g = init_peer(), *h = init_peer(); + struct allowedips_node *iter_node; + bool success = false; + struct allowedips t; + DEFINE_MUTEX(mutex); + struct in6_addr ip; + size_t i = 0, count = 0; + __be64 part; + + mutex_init(&mutex); + mutex_lock(&mutex); + wg_allowedips_init(&t); + + if (!a || !b || !c || !d || !e || !f || !g || !h) { + pr_err("allowedips self-test malloc: FAIL\n"); + goto free; + } + + insert(4, a, 192, 168, 4, 0, 24); + insert(4, b, 192, 168, 4, 4, 32); + insert(4, c, 192, 168, 0, 0, 16); + insert(4, d, 192, 95, 5, 64, 27); + /* replaces previous entry, and maskself is required */ + insert(4, c, 192, 95, 5, 65, 27); + insert(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128); + insert(6, c, 0x26075300, 0x60006b00, 0, 0, 64); + insert(4, e, 0, 0, 0, 0, 0); + insert(6, e, 0, 0, 0, 0, 0); + /* replaces previous entry */ + insert(6, f, 0, 0, 0, 0, 0); + insert(6, g, 0x24046800, 0, 0, 0, 32); + /* maskself is required */ + insert(6, h, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 64); + insert(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 128); + insert(6, c, 0x24446800, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128); + insert(6, b, 0x24446800, 0xf0e40800, 0xeeaebeef, 0, 98); + insert(4, g, 64, 15, 112, 0, 20); + /* maskself is required */ + insert(4, h, 64, 15, 123, 211, 25); + insert(4, a, 10, 0, 0, 0, 25); + insert(4, b, 10, 0, 0, 128, 25); + insert(4, a, 10, 1, 0, 0, 30); + insert(4, b, 10, 1, 0, 4, 30); + insert(4, c, 10, 1, 0, 8, 29); + insert(4, d, 10, 1, 0, 16, 29); + + if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) { + print_tree(t.root4, 32); + print_tree(t.root6, 128); + } + + success = true; + + test(4, a, 192, 168, 4, 20); + test(4, a, 192, 168, 4, 0); + test(4, b, 192, 168, 4, 4); + test(4, c, 192, 168, 200, 182); + test(4, c, 192, 95, 5, 68); + test(4, e, 192, 95, 5, 96); + test(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543); + test(6, c, 0x26075300, 0x60006b00, 0, 0xc02e01ee); + test(6, f, 0x26075300, 0x60006b01, 0, 0); + test(6, g, 0x24046800, 0x40040806, 0, 0x1006); + test(6, g, 0x24046800, 0x40040806, 0x1234, 0x5678); + test(6, f, 0x240467ff, 0x40040806, 0x1234, 0x5678); + test(6, f, 0x24046801, 0x40040806, 0x1234, 0x5678); + test(6, h, 0x24046800, 0x40040800, 0x1234, 0x5678); + test(6, h, 0x24046800, 0x40040800, 0, 0); + test(6, h, 0x24046800, 0x40040800, 0x10101010, 0x10101010); + test(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef); + test(4, g, 64, 15, 116, 26); + test(4, g, 64, 15, 127, 3); + test(4, g, 64, 15, 123, 1); + test(4, h, 64, 15, 123, 128); + test(4, h, 64, 15, 123, 129); + test(4, a, 10, 0, 0, 52); + test(4, b, 10, 0, 0, 220); + test(4, a, 10, 1, 0, 2); + test(4, b, 10, 1, 0, 6); + test(4, c, 10, 1, 0, 10); + test(4, d, 10, 1, 0, 20); + + insert(4, a, 1, 0, 0, 0, 32); + insert(4, a, 64, 0, 0, 0, 32); + insert(4, a, 128, 0, 0, 0, 32); + insert(4, a, 192, 0, 0, 0, 32); + insert(4, a, 255, 0, 0, 0, 32); + wg_allowedips_remove_by_peer(&t, a, &mutex); + test_negative(4, a, 1, 0, 0, 0); + test_negative(4, a, 64, 0, 0, 0); + test_negative(4, a, 128, 0, 0, 0); + test_negative(4, a, 192, 0, 0, 0); + test_negative(4, a, 255, 0, 0, 0); + + wg_allowedips_free(&t, &mutex); + wg_allowedips_init(&t); + insert(4, a, 192, 168, 0, 0, 16); + insert(4, a, 192, 168, 0, 0, 24); + wg_allowedips_remove_by_peer(&t, a, &mutex); + test_negative(4, a, 192, 168, 0, 1); + + /* These will hit the WARN_ON(len >= 128) in free_node if something + * goes wrong. + */ + for (i = 0; i < 128; ++i) { + part = cpu_to_be64(~(1LLU << (i % 64))); + memset(&ip, 0xff, 16); + memcpy((u8 *)&ip + (i < 64) * 8, &part, 8); + wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex); + } + + wg_allowedips_free(&t, &mutex); + + wg_allowedips_init(&t); + insert(4, a, 192, 95, 5, 93, 27); + insert(6, a, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128); + insert(4, a, 10, 1, 0, 20, 29); + insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 83); + insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 21); + list_for_each_entry(iter_node, &a->allowedips_list, peer_list) { + u8 cidr, ip[16] __aligned(__alignof(u64)); + int family = wg_allowedips_read_node(iter_node, ip, &cidr); + + count++; + + if (cidr == 27 && family == AF_INET && + !memcmp(ip, ip4(192, 95, 5, 64), sizeof(struct in_addr))) + found_a = true; + else if (cidr == 128 && family == AF_INET6 && + !memcmp(ip, ip6(0x26075300, 0x60006b00, 0, 0xc05f0543), + sizeof(struct in6_addr))) + found_b = true; + else if (cidr == 29 && family == AF_INET && + !memcmp(ip, ip4(10, 1, 0, 16), sizeof(struct in_addr))) + found_c = true; + else if (cidr == 83 && family == AF_INET6 && + !memcmp(ip, ip6(0x26075300, 0x6d8a6bf8, 0xdab1e000, 0), + sizeof(struct in6_addr))) + found_d = true; + else if (cidr == 21 && family == AF_INET6 && + !memcmp(ip, ip6(0x26075000, 0, 0, 0), + sizeof(struct in6_addr))) + found_e = true; + else + found_other = true; + } + test_boolean(count == 5); + test_boolean(found_a); + test_boolean(found_b); + test_boolean(found_c); + test_boolean(found_d); + test_boolean(found_e); + test_boolean(!found_other); + + if (IS_ENABLED(DEBUG_RANDOM_TRIE) && success) + success = randomized_test(); + + if (success) + pr_info("allowedips self-tests: pass\n"); + +free: + wg_allowedips_free(&t, &mutex); + kfree(a); + kfree(b); + kfree(c); + kfree(d); + kfree(e); + kfree(f); + kfree(g); + kfree(h); + mutex_unlock(&mutex); + + return success; +} + +#undef test_negative +#undef test +#undef remove +#undef insert +#undef init_peer + +#endif diff --git a/net/wireguard/selftest/counter.c b/net/wireguard/selftest/counter.c new file mode 100644 index 000000000000..ec3c156bf91b --- /dev/null +++ b/net/wireguard/selftest/counter.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifdef DEBUG +bool __init wg_packet_counter_selftest(void) +{ + struct noise_replay_counter *counter; + unsigned int test_num = 0, i; + bool success = true; + + counter = kmalloc(sizeof(*counter), GFP_KERNEL); + if (unlikely(!counter)) { + pr_err("nonce counter self-test malloc: FAIL\n"); + return false; + } + +#define T_INIT do { \ + memset(counter, 0, sizeof(*counter)); \ + spin_lock_init(&counter->lock); \ + } while (0) +#define T_LIM (COUNTER_WINDOW_SIZE + 1) +#define T(n, v) do { \ + ++test_num; \ + if (counter_validate(counter, n) != (v)) { \ + pr_err("nonce counter self-test %u: FAIL\n", \ + test_num); \ + success = false; \ + } \ + } while (0) + + T_INIT; + /* 1 */ T(0, true); + /* 2 */ T(1, true); + /* 3 */ T(1, false); + /* 4 */ T(9, true); + /* 5 */ T(8, true); + /* 6 */ T(7, true); + /* 7 */ T(7, false); + /* 8 */ T(T_LIM, true); + /* 9 */ T(T_LIM - 1, true); + /* 10 */ T(T_LIM - 1, false); + /* 11 */ T(T_LIM - 2, true); + /* 12 */ T(2, true); + /* 13 */ T(2, false); + /* 14 */ T(T_LIM + 16, true); + /* 15 */ T(3, false); + /* 16 */ T(T_LIM + 16, false); + /* 17 */ T(T_LIM * 4, true); + /* 18 */ T(T_LIM * 4 - (T_LIM - 1), true); + /* 19 */ T(10, false); + /* 20 */ T(T_LIM * 4 - T_LIM, false); + /* 21 */ T(T_LIM * 4 - (T_LIM + 1), false); + /* 22 */ T(T_LIM * 4 - (T_LIM - 2), true); + /* 23 */ T(T_LIM * 4 + 1 - T_LIM, false); + /* 24 */ T(0, false); + /* 25 */ T(REJECT_AFTER_MESSAGES, false); + /* 26 */ T(REJECT_AFTER_MESSAGES - 1, true); + /* 27 */ T(REJECT_AFTER_MESSAGES, false); + /* 28 */ T(REJECT_AFTER_MESSAGES - 1, false); + /* 29 */ T(REJECT_AFTER_MESSAGES - 2, true); + /* 30 */ T(REJECT_AFTER_MESSAGES + 1, false); + /* 31 */ T(REJECT_AFTER_MESSAGES + 2, false); + /* 32 */ T(REJECT_AFTER_MESSAGES - 2, false); + /* 33 */ T(REJECT_AFTER_MESSAGES - 3, true); + /* 34 */ T(0, false); + + T_INIT; + for (i = 1; i <= COUNTER_WINDOW_SIZE; ++i) + T(i, true); + T(0, true); + T(0, false); + + T_INIT; + for (i = 2; i <= COUNTER_WINDOW_SIZE + 1; ++i) + T(i, true); + T(1, true); + T(0, false); + + T_INIT; + for (i = COUNTER_WINDOW_SIZE + 1; i-- > 0;) + T(i, true); + + T_INIT; + for (i = COUNTER_WINDOW_SIZE + 2; i-- > 1;) + T(i, true); + T(0, false); + + T_INIT; + for (i = COUNTER_WINDOW_SIZE + 1; i-- > 1;) + T(i, true); + T(COUNTER_WINDOW_SIZE + 1, true); + T(0, false); + + T_INIT; + for (i = COUNTER_WINDOW_SIZE + 1; i-- > 1;) + T(i, true); + T(0, true); + T(COUNTER_WINDOW_SIZE + 1, true); + +#undef T +#undef T_LIM +#undef T_INIT + + if (success) + pr_info("nonce counter self-tests: pass\n"); + kfree(counter); + return success; +} +#endif diff --git a/net/wireguard/selftest/ratelimiter.c b/net/wireguard/selftest/ratelimiter.c new file mode 100644 index 000000000000..007cd4457c5f --- /dev/null +++ b/net/wireguard/selftest/ratelimiter.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifdef DEBUG + +#include <linux/jiffies.h> + +static const struct { + bool result; + unsigned int msec_to_sleep_before; +} expected_results[] __initconst = { + [0 ... PACKETS_BURSTABLE - 1] = { true, 0 }, + [PACKETS_BURSTABLE] = { false, 0 }, + [PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND }, + [PACKETS_BURSTABLE + 2] = { false, 0 }, + [PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 }, + [PACKETS_BURSTABLE + 4] = { true, 0 }, + [PACKETS_BURSTABLE + 5] = { false, 0 } +}; + +static __init unsigned int maximum_jiffies_at_index(int index) +{ + unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3; + int i; + + for (i = 0; i <= index; ++i) + total_msecs += expected_results[i].msec_to_sleep_before; + return msecs_to_jiffies(total_msecs); +} + +static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4, + struct sk_buff *skb6, struct ipv6hdr *hdr6, + int *test) +{ + unsigned long loop_start_time; + int i; + + wg_ratelimiter_gc_entries(NULL); + rcu_barrier(); + loop_start_time = jiffies; + + for (i = 0; i < ARRAY_SIZE(expected_results); ++i) { + if (expected_results[i].msec_to_sleep_before) + msleep(expected_results[i].msec_to_sleep_before); + + if (time_is_before_jiffies(loop_start_time + + maximum_jiffies_at_index(i))) + return -ETIMEDOUT; + if (wg_ratelimiter_allow(skb4, &init_net) != + expected_results[i].result) + return -EXFULL; + ++(*test); + + hdr4->saddr = htonl(ntohl(hdr4->saddr) + i + 1); + if (time_is_before_jiffies(loop_start_time + + maximum_jiffies_at_index(i))) + return -ETIMEDOUT; + if (!wg_ratelimiter_allow(skb4, &init_net)) + return -EXFULL; + ++(*test); + + hdr4->saddr = htonl(ntohl(hdr4->saddr) - i - 1); + +#if IS_ENABLED(CONFIG_IPV6) + hdr6->saddr.in6_u.u6_addr32[2] = htonl(i); + hdr6->saddr.in6_u.u6_addr32[3] = htonl(i); + if (time_is_before_jiffies(loop_start_time + + maximum_jiffies_at_index(i))) + return -ETIMEDOUT; + if (wg_ratelimiter_allow(skb6, &init_net) != + expected_results[i].result) + return -EXFULL; + ++(*test); + + hdr6->saddr.in6_u.u6_addr32[0] = + htonl(ntohl(hdr6->saddr.in6_u.u6_addr32[0]) + i + 1); + if (time_is_before_jiffies(loop_start_time + + maximum_jiffies_at_index(i))) + return -ETIMEDOUT; + if (!wg_ratelimiter_allow(skb6, &init_net)) + return -EXFULL; + ++(*test); + + hdr6->saddr.in6_u.u6_addr32[0] = + htonl(ntohl(hdr6->saddr.in6_u.u6_addr32[0]) - i - 1); + + if (time_is_before_jiffies(loop_start_time + + maximum_jiffies_at_index(i))) + return -ETIMEDOUT; +#endif + } + return 0; +} + +static __init int capacity_test(struct sk_buff *skb4, struct iphdr *hdr4, + int *test) +{ + int i; + + wg_ratelimiter_gc_entries(NULL); + rcu_barrier(); + + if (atomic_read(&total_entries)) + return -EXFULL; + ++(*test); + + for (i = 0; i <= max_entries; ++i) { + hdr4->saddr = htonl(i); + if (wg_ratelimiter_allow(skb4, &init_net) != (i != max_entries)) + return -EXFULL; + ++(*test); + } + return 0; +} + +bool __init wg_ratelimiter_selftest(void) +{ + enum { TRIALS_BEFORE_GIVING_UP = 5000 }; + bool success = false; + int test = 0, trials; + struct sk_buff *skb4, *skb6 = NULL; + struct iphdr *hdr4; + struct ipv6hdr *hdr6 = NULL; + + if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN)) + return true; + + BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0); + + if (wg_ratelimiter_init()) + goto out; + ++test; + if (wg_ratelimiter_init()) { + wg_ratelimiter_uninit(); + goto out; + } + ++test; + if (wg_ratelimiter_init()) { + wg_ratelimiter_uninit(); + wg_ratelimiter_uninit(); + goto out; + } + ++test; + + skb4 = alloc_skb(sizeof(struct iphdr), GFP_KERNEL); + if (unlikely(!skb4)) + goto err_nofree; + skb4->protocol = htons(ETH_P_IP); + hdr4 = (struct iphdr *)skb_put(skb4, sizeof(*hdr4)); + hdr4->saddr = htonl(8182); + skb_reset_network_header(skb4); + ++test; + +#if IS_ENABLED(CONFIG_IPV6) + skb6 = alloc_skb(sizeof(struct ipv6hdr), GFP_KERNEL); + if (unlikely(!skb6)) { + kfree_skb(skb4); + goto err_nofree; + } + skb6->protocol = htons(ETH_P_IPV6); + hdr6 = (struct ipv6hdr *)skb_put(skb6, sizeof(*hdr6)); + hdr6->saddr.in6_u.u6_addr32[0] = htonl(1212); + hdr6->saddr.in6_u.u6_addr32[1] = htonl(289188); + skb_reset_network_header(skb6); + ++test; +#endif + + for (trials = TRIALS_BEFORE_GIVING_UP;;) { + int test_count = 0, ret; + + ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count); + if (ret == -ETIMEDOUT) { + if (!trials--) { + test += test_count; + goto err; + } + msleep(500); + continue; + } else if (ret < 0) { + test += test_count; + goto err; + } else { + test += test_count; + break; + } + } + + for (trials = TRIALS_BEFORE_GIVING_UP;;) { + int test_count = 0; + + if (capacity_test(skb4, hdr4, &test_count) < 0) { + if (!trials--) { + test += test_count; + goto err; + } + msleep(50); + continue; + } + test += test_count; + break; + } + + success = true; + +err: + kfree_skb(skb4); +#if IS_ENABLED(CONFIG_IPV6) + kfree_skb(skb6); +#endif +err_nofree: + wg_ratelimiter_uninit(); + wg_ratelimiter_uninit(); + wg_ratelimiter_uninit(); + /* Uninit one extra time to check underflow detection. */ + wg_ratelimiter_uninit(); +out: + if (success) + pr_info("ratelimiter self-tests: pass\n"); + else + pr_err("ratelimiter self-test %d: FAIL\n", test); + + return success; +} +#endif diff --git a/net/wireguard/send.c b/net/wireguard/send.c new file mode 100644 index 000000000000..55bb0c9313d7 --- /dev/null +++ b/net/wireguard/send.c @@ -0,0 +1,420 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include "queueing.h" +#include "timers.h" +#include "device.h" +#include "peer.h" +#include "socket.h" +#include "messages.h" +#include "cookie.h" + +#include <linux/simd.h> +#include <linux/uio.h> +#include <linux/inetdevice.h> +#include <linux/socket.h> +#include <net/ip_tunnels.h> +#include <net/udp.h> +#include <net/sock.h> + +static void wg_packet_send_handshake_initiation(struct wg_peer *peer) +{ + struct message_handshake_initiation packet; + + if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake), + REKEY_TIMEOUT)) + return; /* This function is rate limited. */ + + atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns()); + net_dbg_ratelimited("%s: Sending handshake initiation to peer %llu (%pISpfsc)\n", + peer->device->dev->name, peer->internal_id, + &peer->endpoint.addr); + + if (wg_noise_handshake_create_initiation(&packet, &peer->handshake)) { + wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer); + wg_timers_any_authenticated_packet_traversal(peer); + wg_timers_any_authenticated_packet_sent(peer); + atomic64_set(&peer->last_sent_handshake, + ktime_get_coarse_boottime_ns()); + wg_socket_send_buffer_to_peer(peer, &packet, sizeof(packet), + HANDSHAKE_DSCP); + wg_timers_handshake_initiated(peer); + } +} + +void wg_packet_handshake_send_worker(struct work_struct *work) +{ + struct wg_peer *peer = container_of(work, struct wg_peer, + transmit_handshake_work); + + wg_packet_send_handshake_initiation(peer); + wg_peer_put(peer); +} + +void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer, + bool is_retry) +{ + if (!is_retry) + peer->timer_handshake_attempts = 0; + + rcu_read_lock_bh(); + /* We check last_sent_handshake here in addition to the actual function + * we're queueing up, so that we don't queue things if not strictly + * necessary: + */ + if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake), + REKEY_TIMEOUT) || + unlikely(READ_ONCE(peer->is_dead))) + goto out; + + wg_peer_get(peer); + /* Queues up calling packet_send_queued_handshakes(peer), where we do a + * peer_put(peer) after: + */ + if (!queue_work(peer->device->handshake_send_wq, + &peer->transmit_handshake_work)) + /* If the work was already queued, we want to drop the + * extra reference: + */ + wg_peer_put(peer); +out: + rcu_read_unlock_bh(); +} + +void wg_packet_send_handshake_response(struct wg_peer *peer) +{ + struct message_handshake_response packet; + + atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns()); + net_dbg_ratelimited("%s: Sending handshake response to peer %llu (%pISpfsc)\n", + peer->device->dev->name, peer->internal_id, + &peer->endpoint.addr); + + if (wg_noise_handshake_create_response(&packet, &peer->handshake)) { + wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer); + if (wg_noise_handshake_begin_session(&peer->handshake, + &peer->keypairs)) { + wg_timers_session_derived(peer); + wg_timers_any_authenticated_packet_traversal(peer); + wg_timers_any_authenticated_packet_sent(peer); + atomic64_set(&peer->last_sent_handshake, + ktime_get_coarse_boottime_ns()); + wg_socket_send_buffer_to_peer(peer, &packet, + sizeof(packet), + HANDSHAKE_DSCP); + } + } +} + +void wg_packet_send_handshake_cookie(struct wg_device *wg, + struct sk_buff *initiating_skb, + __le32 sender_index) +{ + struct message_handshake_cookie packet; + + net_dbg_skb_ratelimited("%s: Sending cookie response for denied handshake message for %pISpfsc\n", + wg->dev->name, initiating_skb); + wg_cookie_message_create(&packet, initiating_skb, sender_index, + &wg->cookie_checker); + wg_socket_send_buffer_as_reply_to_skb(wg, initiating_skb, &packet, + sizeof(packet)); +} + +static void keep_key_fresh(struct wg_peer *peer) +{ + struct noise_keypair *keypair; + bool send; + + rcu_read_lock_bh(); + keypair = rcu_dereference_bh(peer->keypairs.current_keypair); + send = keypair && READ_ONCE(keypair->sending.is_valid) && + (atomic64_read(&keypair->sending_counter) > REKEY_AFTER_MESSAGES || + (keypair->i_am_the_initiator && + wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME))); + rcu_read_unlock_bh(); + + if (unlikely(send)) + wg_packet_send_queued_handshake_initiation(peer, false); +} + +static unsigned int calculate_skb_padding(struct sk_buff *skb) +{ + unsigned int padded_size, last_unit = skb->len; + + if (unlikely(!PACKET_CB(skb)->mtu)) + return ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE) - last_unit; + + /* We do this modulo business with the MTU, just in case the networking + * layer gives us a packet that's bigger than the MTU. In that case, we + * wouldn't want the final subtraction to overflow in the case of the + * padded_size being clamped. Fortunately, that's very rarely the case, + * so we optimize for that not happening. + */ + if (unlikely(last_unit > PACKET_CB(skb)->mtu)) + last_unit %= PACKET_CB(skb)->mtu; + + padded_size = min(PACKET_CB(skb)->mtu, + ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE)); + return padded_size - last_unit; +} + +static bool encrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair, + simd_context_t *simd_context) +{ + unsigned int padding_len, plaintext_len, trailer_len; + struct scatterlist sg[MAX_SKB_FRAGS + 8]; + struct message_data *header; + struct sk_buff *trailer; + int num_frags; + + /* Force hash calculation before encryption so that flow analysis is + * consistent over the inner packet. + */ + skb_get_hash(skb); + + /* Calculate lengths. */ + padding_len = calculate_skb_padding(skb); + trailer_len = padding_len + noise_encrypted_len(0); + plaintext_len = skb->len + padding_len; + + /* Expand data section to have room for padding and auth tag. */ + num_frags = skb_cow_data(skb, trailer_len, &trailer); + if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg))) + return false; + + /* Set the padding to zeros, and make sure it and the auth tag are part + * of the skb. + */ + memset(skb_tail_pointer(trailer), 0, padding_len); + + /* Expand head section to have room for our header and the network + * stack's headers. + */ + if (unlikely(skb_cow_head(skb, DATA_PACKET_HEAD_ROOM) < 0)) + return false; + + /* Finalize checksum calculation for the inner packet, if required. */ + if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL && + skb_checksum_help(skb))) + return false; + + /* Only after checksumming can we safely add on the padding at the end + * and the header. + */ + skb_set_inner_network_header(skb, 0); + header = (struct message_data *)skb_push(skb, sizeof(*header)); + header->header.type = cpu_to_le32(MESSAGE_DATA); + header->key_idx = keypair->remote_index; + header->counter = cpu_to_le64(PACKET_CB(skb)->nonce); + pskb_put(skb, trailer, trailer_len); + + /* Now we can encrypt the scattergather segments */ + sg_init_table(sg, num_frags); + if (skb_to_sgvec(skb, sg, sizeof(struct message_data), + noise_encrypted_len(plaintext_len)) <= 0) + return false; + return chacha20poly1305_encrypt_sg_inplace(sg, plaintext_len, NULL, 0, + PACKET_CB(skb)->nonce, + keypair->sending.key, + simd_context); +} + +void wg_packet_send_keepalive(struct wg_peer *peer) +{ + struct sk_buff *skb; + + if (skb_queue_empty(&peer->staged_packet_queue)) { + skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH, + GFP_ATOMIC); + if (unlikely(!skb)) + return; + skb_reserve(skb, DATA_PACKET_HEAD_ROOM); + skb->dev = peer->device->dev; + PACKET_CB(skb)->mtu = skb->dev->mtu; + skb_queue_tail(&peer->staged_packet_queue, skb); + net_dbg_ratelimited("%s: Sending keepalive packet to peer %llu (%pISpfsc)\n", + peer->device->dev->name, peer->internal_id, + &peer->endpoint.addr); + } + + wg_packet_send_staged_packets(peer); +} + +static void wg_packet_create_data_done(struct wg_peer *peer, struct sk_buff *first) +{ + struct sk_buff *skb, *next; + bool is_keepalive, data_sent = false; + + wg_timers_any_authenticated_packet_traversal(peer); + wg_timers_any_authenticated_packet_sent(peer); + skb_list_walk_safe(first, skb, next) { + is_keepalive = skb->len == message_data_len(0); + if (likely(!wg_socket_send_skb_to_peer(peer, skb, + PACKET_CB(skb)->ds) && !is_keepalive)) + data_sent = true; + } + + if (likely(data_sent)) + wg_timers_data_sent(peer); + + keep_key_fresh(peer); +} + +void wg_packet_tx_worker(struct work_struct *work) +{ + struct wg_peer *peer = container_of(work, struct wg_peer, transmit_packet_work); + struct noise_keypair *keypair; + enum packet_state state; + struct sk_buff *first; + + while ((first = wg_prev_queue_peek(&peer->tx_queue)) != NULL && + (state = atomic_read_acquire(&PACKET_CB(first)->state)) != + PACKET_STATE_UNCRYPTED) { + wg_prev_queue_drop_peeked(&peer->tx_queue); + keypair = PACKET_CB(first)->keypair; + + if (likely(state == PACKET_STATE_CRYPTED)) + wg_packet_create_data_done(peer, first); + else + kfree_skb_list(first); + + wg_noise_keypair_put(keypair, false); + wg_peer_put(peer); + if (need_resched()) + cond_resched(); + } +} + +void wg_packet_encrypt_worker(struct work_struct *work) +{ + struct crypt_queue *queue = container_of(work, struct multicore_worker, + work)->ptr; + struct sk_buff *first, *skb, *next; + simd_context_t simd_context; + + simd_get(&simd_context); + while ((first = ptr_ring_consume_bh(&queue->ring)) != NULL) { + enum packet_state state = PACKET_STATE_CRYPTED; + + skb_list_walk_safe(first, skb, next) { + if (likely(encrypt_packet(skb, + PACKET_CB(first)->keypair, + &simd_context))) { + wg_reset_packet(skb, true); + } else { + state = PACKET_STATE_DEAD; + break; + } + } + wg_queue_enqueue_per_peer_tx(first, state); + + simd_relax(&simd_context); + } + simd_put(&simd_context); +} + +static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first) +{ + struct wg_device *wg = peer->device; + int ret = -EINVAL; + + rcu_read_lock_bh(); + if (unlikely(READ_ONCE(peer->is_dead))) + goto err; + + ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first, + wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu); + if (unlikely(ret == -EPIPE)) + wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD); +err: + rcu_read_unlock_bh(); + if (likely(!ret || ret == -EPIPE)) + return; + wg_noise_keypair_put(PACKET_CB(first)->keypair, false); + wg_peer_put(peer); + kfree_skb_list(first); +} + +void wg_packet_purge_staged_packets(struct wg_peer *peer) +{ + spin_lock_bh(&peer->staged_packet_queue.lock); + peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen; + __skb_queue_purge(&peer->staged_packet_queue); + spin_unlock_bh(&peer->staged_packet_queue.lock); +} + +void wg_packet_send_staged_packets(struct wg_peer *peer) +{ + struct noise_keypair *keypair; + struct sk_buff_head packets; + struct sk_buff *skb; + + /* Steal the current queue into our local one. */ + __skb_queue_head_init(&packets); + spin_lock_bh(&peer->staged_packet_queue.lock); + skb_queue_splice_init(&peer->staged_packet_queue, &packets); + spin_unlock_bh(&peer->staged_packet_queue.lock); + if (unlikely(skb_queue_empty(&packets))) + return; + + /* First we make sure we have a valid reference to a valid key. */ + rcu_read_lock_bh(); + keypair = wg_noise_keypair_get( + rcu_dereference_bh(peer->keypairs.current_keypair)); + rcu_read_unlock_bh(); + if (unlikely(!keypair)) + goto out_nokey; + if (unlikely(!READ_ONCE(keypair->sending.is_valid))) + goto out_nokey; + if (unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, + REJECT_AFTER_TIME))) + goto out_invalid; + + /* After we know we have a somewhat valid key, we now try to assign + * nonces to all of the packets in the queue. If we can't assign nonces + * for all of them, we just consider it a failure and wait for the next + * handshake. + */ + skb_queue_walk(&packets, skb) { + /* 0 for no outer TOS: no leak. TODO: at some later point, we + * might consider using flowi->tos as outer instead. + */ + PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb); + PACKET_CB(skb)->nonce = + atomic64_inc_return(&keypair->sending_counter) - 1; + if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES)) + goto out_invalid; + } + + packets.prev->next = NULL; + wg_peer_get(keypair->entry.peer); + PACKET_CB(packets.next)->keypair = keypair; + wg_packet_create_data(peer, packets.next); + return; + +out_invalid: + WRITE_ONCE(keypair->sending.is_valid, false); +out_nokey: + wg_noise_keypair_put(keypair, false); + + /* We orphan the packets if we're waiting on a handshake, so that they + * don't block a socket's pool. + */ + skb_queue_walk(&packets, skb) + skb_orphan(skb); + /* Then we put them back on the top of the queue. We're not too + * concerned about accidentally getting things a little out of order if + * packets are being added really fast, because this queue is for before + * packets can even be sent and it's small anyway. + */ + spin_lock_bh(&peer->staged_packet_queue.lock); + skb_queue_splice(&packets, &peer->staged_packet_queue); + spin_unlock_bh(&peer->staged_packet_queue.lock); + + /* If we're exiting because there's something wrong with the key, it + * means we should initiate a new handshake. + */ + wg_packet_send_queued_handshake_initiation(peer, false); +} diff --git a/net/wireguard/socket.c b/net/wireguard/socket.c new file mode 100644 index 000000000000..9e0af9320c6b --- /dev/null +++ b/net/wireguard/socket.c @@ -0,0 +1,437 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include "device.h" +#include "peer.h" +#include "socket.h" +#include "queueing.h" +#include "messages.h" + +#include <linux/ctype.h> +#include <linux/net.h> +#include <linux/if_vlan.h> +#include <linux/if_ether.h> +#include <linux/inetdevice.h> +#include <net/udp_tunnel.h> +#include <net/ipv6.h> + +static int send4(struct wg_device *wg, struct sk_buff *skb, + struct endpoint *endpoint, u8 ds, struct dst_cache *cache) +{ + struct flowi4 fl = { + .saddr = endpoint->src4.s_addr, + .daddr = endpoint->addr4.sin_addr.s_addr, + .fl4_dport = endpoint->addr4.sin_port, + .flowi4_mark = wg->fwmark, + .flowi4_proto = IPPROTO_UDP + }; + struct rtable *rt = NULL; + struct sock *sock; + int ret = 0; + + skb_mark_not_on_list(skb); + skb->dev = wg->dev; + skb->mark = wg->fwmark; + + rcu_read_lock_bh(); + sock = rcu_dereference_bh(wg->sock4); + + if (unlikely(!sock)) { + ret = -ENONET; + goto err; + } + + fl.fl4_sport = inet_sk(sock)->inet_sport; + + if (cache) + rt = dst_cache_get_ip4(cache, &fl.saddr); + + if (!rt) { + security_sk_classify_flow(sock, flowi4_to_flowi(&fl)); + if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0, + fl.saddr, RT_SCOPE_HOST))) { + endpoint->src4.s_addr = 0; + endpoint->src_if4 = 0; + fl.saddr = 0; + if (cache) + dst_cache_reset(cache); + } + rt = ip_route_output_flow(sock_net(sock), &fl, sock); + if (unlikely(endpoint->src_if4 && ((IS_ERR(rt) && + PTR_ERR(rt) == -EINVAL) || (!IS_ERR(rt) && + rt->dst.dev->ifindex != endpoint->src_if4)))) { + endpoint->src4.s_addr = 0; + endpoint->src_if4 = 0; + fl.saddr = 0; + if (cache) + dst_cache_reset(cache); + if (!IS_ERR(rt)) + ip_rt_put(rt); + rt = ip_route_output_flow(sock_net(sock), &fl, sock); + } + if (IS_ERR(rt)) { + ret = PTR_ERR(rt); + net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", + wg->dev->name, &endpoint->addr, ret); + goto err; + } + if (cache) + dst_cache_set_ip4(cache, &rt->dst, fl.saddr); + } + + skb->ignore_df = 1; + udp_tunnel_xmit_skb(rt, sock, skb, fl.saddr, fl.daddr, ds, + ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport, + fl.fl4_dport, false, false); + goto out; + +err: + kfree_skb(skb); +out: + rcu_read_unlock_bh(); + return ret; +} + +static int send6(struct wg_device *wg, struct sk_buff *skb, + struct endpoint *endpoint, u8 ds, struct dst_cache *cache) +{ +#if IS_ENABLED(CONFIG_IPV6) + struct flowi6 fl = { + .saddr = endpoint->src6, + .daddr = endpoint->addr6.sin6_addr, + .fl6_dport = endpoint->addr6.sin6_port, + .flowi6_mark = wg->fwmark, + .flowi6_oif = endpoint->addr6.sin6_scope_id, + .flowi6_proto = IPPROTO_UDP + /* TODO: addr->sin6_flowinfo */ + }; + struct dst_entry *dst = NULL; + struct sock *sock; + int ret = 0; + + skb_mark_not_on_list(skb); + skb->dev = wg->dev; + skb->mark = wg->fwmark; + + rcu_read_lock_bh(); + sock = rcu_dereference_bh(wg->sock6); + + if (unlikely(!sock)) { + ret = -ENONET; + goto err; + } + + fl.fl6_sport = inet_sk(sock)->inet_sport; + + if (cache) + dst = dst_cache_get_ip6(cache, &fl.saddr); + + if (!dst) { + security_sk_classify_flow(sock, flowi6_to_flowi(&fl)); + if (unlikely(!ipv6_addr_any(&fl.saddr) && + !ipv6_chk_addr(sock_net(sock), &fl.saddr, NULL, 0))) { + endpoint->src6 = fl.saddr = in6addr_any; + if (cache) + dst_cache_reset(cache); + } + dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sock), sock, &fl, + NULL); + if (IS_ERR(dst)) { + ret = PTR_ERR(dst); + net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", + wg->dev->name, &endpoint->addr, ret); + goto err; + } + if (cache) + dst_cache_set_ip6(cache, dst, &fl.saddr); + } + + skb->ignore_df = 1; + udp_tunnel6_xmit_skb(dst, sock, skb, skb->dev, &fl.saddr, &fl.daddr, ds, + ip6_dst_hoplimit(dst), 0, fl.fl6_sport, + fl.fl6_dport, false); + goto out; + +err: + kfree_skb(skb); +out: + rcu_read_unlock_bh(); + return ret; +#else + kfree_skb(skb); + return -EAFNOSUPPORT; +#endif +} + +int wg_socket_send_skb_to_peer(struct wg_peer *peer, struct sk_buff *skb, u8 ds) +{ + size_t skb_len = skb->len; + int ret = -EAFNOSUPPORT; + + read_lock_bh(&peer->endpoint_lock); + if (peer->endpoint.addr.sa_family == AF_INET) + ret = send4(peer->device, skb, &peer->endpoint, ds, + &peer->endpoint_cache); + else if (peer->endpoint.addr.sa_family == AF_INET6) + ret = send6(peer->device, skb, &peer->endpoint, ds, + &peer->endpoint_cache); + else + dev_kfree_skb(skb); + if (likely(!ret)) + peer->tx_bytes += skb_len; + read_unlock_bh(&peer->endpoint_lock); + + return ret; +} + +int wg_socket_send_buffer_to_peer(struct wg_peer *peer, void *buffer, + size_t len, u8 ds) +{ + struct sk_buff *skb = alloc_skb(len + SKB_HEADER_LEN, GFP_ATOMIC); + + if (unlikely(!skb)) + return -ENOMEM; + + skb_reserve(skb, SKB_HEADER_LEN); + skb_set_inner_network_header(skb, 0); + skb_put_data(skb, buffer, len); + return wg_socket_send_skb_to_peer(peer, skb, ds); +} + +int wg_socket_send_buffer_as_reply_to_skb(struct wg_device *wg, + struct sk_buff *in_skb, void *buffer, + size_t len) +{ + int ret = 0; + struct sk_buff *skb; + struct endpoint endpoint; + + if (unlikely(!in_skb)) + return -EINVAL; + ret = wg_socket_endpoint_from_skb(&endpoint, in_skb); + if (unlikely(ret < 0)) + return ret; + + skb = alloc_skb(len + SKB_HEADER_LEN, GFP_ATOMIC); + if (unlikely(!skb)) + return -ENOMEM; + skb_reserve(skb, SKB_HEADER_LEN); + skb_set_inner_network_header(skb, 0); + skb_put_data(skb, buffer, len); + + if (endpoint.addr.sa_family == AF_INET) + ret = send4(wg, skb, &endpoint, 0, NULL); + else if (endpoint.addr.sa_family == AF_INET6) + ret = send6(wg, skb, &endpoint, 0, NULL); + /* No other possibilities if the endpoint is valid, which it is, + * as we checked above. + */ + + return ret; +} + +int wg_socket_endpoint_from_skb(struct endpoint *endpoint, + const struct sk_buff *skb) +{ + memset(endpoint, 0, sizeof(*endpoint)); + if (skb->protocol == htons(ETH_P_IP)) { + endpoint->addr4.sin_family = AF_INET; + endpoint->addr4.sin_port = udp_hdr(skb)->source; + endpoint->addr4.sin_addr.s_addr = ip_hdr(skb)->saddr; + endpoint->src4.s_addr = ip_hdr(skb)->daddr; + endpoint->src_if4 = skb->skb_iif; + } else if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) { + endpoint->addr6.sin6_family = AF_INET6; + endpoint->addr6.sin6_port = udp_hdr(skb)->source; + endpoint->addr6.sin6_addr = ipv6_hdr(skb)->saddr; + endpoint->addr6.sin6_scope_id = ipv6_iface_scope_id( + &ipv6_hdr(skb)->saddr, skb->skb_iif); + endpoint->src6 = ipv6_hdr(skb)->daddr; + } else { + return -EINVAL; + } + return 0; +} + +static bool endpoint_eq(const struct endpoint *a, const struct endpoint *b) +{ + return (a->addr.sa_family == AF_INET && b->addr.sa_family == AF_INET && + a->addr4.sin_port == b->addr4.sin_port && + a->addr4.sin_addr.s_addr == b->addr4.sin_addr.s_addr && + a->src4.s_addr == b->src4.s_addr && a->src_if4 == b->src_if4) || + (a->addr.sa_family == AF_INET6 && + b->addr.sa_family == AF_INET6 && + a->addr6.sin6_port == b->addr6.sin6_port && + ipv6_addr_equal(&a->addr6.sin6_addr, &b->addr6.sin6_addr) && + a->addr6.sin6_scope_id == b->addr6.sin6_scope_id && + ipv6_addr_equal(&a->src6, &b->src6)) || + unlikely(!a->addr.sa_family && !b->addr.sa_family); +} + +void wg_socket_set_peer_endpoint(struct wg_peer *peer, + const struct endpoint *endpoint) +{ + /* First we check unlocked, in order to optimize, since it's pretty rare + * that an endpoint will change. If we happen to be mid-write, and two + * CPUs wind up writing the same thing or something slightly different, + * it doesn't really matter much either. + */ + if (endpoint_eq(endpoint, &peer->endpoint)) + return; + write_lock_bh(&peer->endpoint_lock); + if (endpoint->addr.sa_family == AF_INET) { + peer->endpoint.addr4 = endpoint->addr4; + peer->endpoint.src4 = endpoint->src4; + peer->endpoint.src_if4 = endpoint->src_if4; + } else if (IS_ENABLED(CONFIG_IPV6) && endpoint->addr.sa_family == AF_INET6) { + peer->endpoint.addr6 = endpoint->addr6; + peer->endpoint.src6 = endpoint->src6; + } else { + goto out; + } + dst_cache_reset(&peer->endpoint_cache); +out: + write_unlock_bh(&peer->endpoint_lock); +} + +void wg_socket_set_peer_endpoint_from_skb(struct wg_peer *peer, + const struct sk_buff *skb) +{ + struct endpoint endpoint; + + if (!wg_socket_endpoint_from_skb(&endpoint, skb)) + wg_socket_set_peer_endpoint(peer, &endpoint); +} + +void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer) +{ + write_lock_bh(&peer->endpoint_lock); + memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6)); + dst_cache_reset_now(&peer->endpoint_cache); + write_unlock_bh(&peer->endpoint_lock); +} + +static int wg_receive(struct sock *sk, struct sk_buff *skb) +{ + struct wg_device *wg; + + if (unlikely(!sk)) + goto err; + wg = sk->sk_user_data; + if (unlikely(!wg)) + goto err; + skb_mark_not_on_list(skb); + wg_packet_receive(wg, skb); + return 0; + +err: + kfree_skb(skb); + return 0; +} + +static void sock_free(struct sock *sock) +{ + if (unlikely(!sock)) + return; + sk_clear_memalloc(sock); + udp_tunnel_sock_release(sock->sk_socket); +} + +static void set_sock_opts(struct socket *sock) +{ + sock->sk->sk_allocation = GFP_ATOMIC; + sock->sk->sk_sndbuf = INT_MAX; + sk_set_memalloc(sock->sk); +} + +int wg_socket_init(struct wg_device *wg, u16 port) +{ + struct net *net; + int ret; + struct udp_tunnel_sock_cfg cfg = { + .sk_user_data = wg, + .encap_type = 1, + .encap_rcv = wg_receive + }; + struct socket *new4 = NULL, *new6 = NULL; + struct udp_port_cfg port4 = { + .family = AF_INET, + .local_ip.s_addr = htonl(INADDR_ANY), + .local_udp_port = htons(port), + .use_udp_checksums = true + }; +#if IS_ENABLED(CONFIG_IPV6) + int retries = 0; + struct udp_port_cfg port6 = { + .family = AF_INET6, + .local_ip6 = IN6ADDR_ANY_INIT, + .use_udp6_tx_checksums = true, + .use_udp6_rx_checksums = true, + .ipv6_v6only = true + }; +#endif + + rcu_read_lock(); + net = rcu_dereference(wg->creating_net); + net = net ? maybe_get_net(net) : NULL; + rcu_read_unlock(); + if (unlikely(!net)) + return -ENONET; + +#if IS_ENABLED(CONFIG_IPV6) +retry: +#endif + + ret = udp_sock_create(net, &port4, &new4); + if (ret < 0) { + pr_err("%s: Could not create IPv4 socket\n", wg->dev->name); + goto out; + } + set_sock_opts(new4); + setup_udp_tunnel_sock(net, new4, &cfg); + +#if IS_ENABLED(CONFIG_IPV6) + if (ipv6_mod_enabled()) { + port6.local_udp_port = inet_sk(new4->sk)->inet_sport; + ret = udp_sock_create(net, &port6, &new6); + if (ret < 0) { + udp_tunnel_sock_release(new4); + if (ret == -EADDRINUSE && !port && retries++ < 100) + goto retry; + pr_err("%s: Could not create IPv6 socket\n", + wg->dev->name); + goto out; + } + set_sock_opts(new6); + setup_udp_tunnel_sock(net, new6, &cfg); + } +#endif + + wg_socket_reinit(wg, new4->sk, new6 ? new6->sk : NULL); + ret = 0; +out: + put_net(net); + return ret; +} + +void wg_socket_reinit(struct wg_device *wg, struct sock *new4, + struct sock *new6) +{ + struct sock *old4, *old6; + + mutex_lock(&wg->socket_update_lock); + old4 = rcu_dereference_protected(wg->sock4, + lockdep_is_held(&wg->socket_update_lock)); + old6 = rcu_dereference_protected(wg->sock6, + lockdep_is_held(&wg->socket_update_lock)); + rcu_assign_pointer(wg->sock4, new4); + rcu_assign_pointer(wg->sock6, new6); + if (new4) + wg->incoming_port = ntohs(inet_sk(new4)->inet_sport); + mutex_unlock(&wg->socket_update_lock); + synchronize_net(); + sock_free(old4); + sock_free(old6); +} diff --git a/net/wireguard/socket.h b/net/wireguard/socket.h new file mode 100644 index 000000000000..bab5848efbcd --- /dev/null +++ b/net/wireguard/socket.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _WG_SOCKET_H +#define _WG_SOCKET_H + +#include <linux/netdevice.h> +#include <linux/udp.h> +#include <linux/if_vlan.h> +#include <linux/if_ether.h> + +int wg_socket_init(struct wg_device *wg, u16 port); +void wg_socket_reinit(struct wg_device *wg, struct sock *new4, + struct sock *new6); +int wg_socket_send_buffer_to_peer(struct wg_peer *peer, void *data, + size_t len, u8 ds); +int wg_socket_send_skb_to_peer(struct wg_peer *peer, struct sk_buff *skb, + u8 ds); +int wg_socket_send_buffer_as_reply_to_skb(struct wg_device *wg, + struct sk_buff *in_skb, + void *out_buffer, size_t len); + +int wg_socket_endpoint_from_skb(struct endpoint *endpoint, + const struct sk_buff *skb); +void wg_socket_set_peer_endpoint(struct wg_peer *peer, + const struct endpoint *endpoint); +void wg_socket_set_peer_endpoint_from_skb(struct wg_peer *peer, + const struct sk_buff *skb); +void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer); + +#if defined(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG) +#define net_dbg_skb_ratelimited(fmt, dev, skb, ...) do { \ + struct endpoint __endpoint; \ + wg_socket_endpoint_from_skb(&__endpoint, skb); \ + net_dbg_ratelimited(fmt, dev, &__endpoint.addr, \ + ##__VA_ARGS__); \ + } while (0) +#else +#define net_dbg_skb_ratelimited(fmt, skb, ...) +#endif + +#endif /* _WG_SOCKET_H */ diff --git a/net/wireguard/timers.c b/net/wireguard/timers.c new file mode 100644 index 000000000000..d54d32ac9bc4 --- /dev/null +++ b/net/wireguard/timers.c @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include "timers.h" +#include "device.h" +#include "peer.h" +#include "queueing.h" +#include "socket.h" + +/* + * - Timer for retransmitting the handshake if we don't hear back after + * `REKEY_TIMEOUT + jitter` ms. + * + * - Timer for sending empty packet if we have received a packet but after have + * not sent one for `KEEPALIVE_TIMEOUT` ms. + * + * - Timer for initiating new handshake if we have sent a packet but after have + * not received one (even empty) for `(KEEPALIVE_TIMEOUT + REKEY_TIMEOUT) + + * jitter` ms. + * + * - Timer for zeroing out all ephemeral keys after `(REJECT_AFTER_TIME * 3)` ms + * if no new keys have been received. + * + * - Timer for, if enabled, sending an empty authenticated packet every user- + * specified seconds. + */ + +static inline void mod_peer_timer(struct wg_peer *peer, + struct timer_list *timer, + unsigned long expires) +{ + rcu_read_lock_bh(); + if (likely(netif_running(peer->device->dev) && + !READ_ONCE(peer->is_dead))) + mod_timer(timer, expires); + rcu_read_unlock_bh(); +} + +static void wg_expired_retransmit_handshake(struct timer_list *timer) +{ + struct wg_peer *peer = from_timer(peer, timer, + timer_retransmit_handshake); + + if (peer->timer_handshake_attempts > MAX_TIMER_HANDSHAKES) { + pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d attempts, giving up\n", + peer->device->dev->name, peer->internal_id, + &peer->endpoint.addr, MAX_TIMER_HANDSHAKES + 2); + + del_timer(&peer->timer_send_keepalive); + /* We drop all packets without a keypair and don't try again, + * if we try unsuccessfully for too long to make a handshake. + */ + wg_packet_purge_staged_packets(peer); + + /* We set a timer for destroying any residue that might be left + * of a partial exchange. + */ + if (!timer_pending(&peer->timer_zero_key_material)) + mod_peer_timer(peer, &peer->timer_zero_key_material, + jiffies + REJECT_AFTER_TIME * 3 * HZ); + } else { + ++peer->timer_handshake_attempts; + pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d seconds, retrying (try %d)\n", + peer->device->dev->name, peer->internal_id, + &peer->endpoint.addr, REKEY_TIMEOUT, + peer->timer_handshake_attempts + 1); + + /* We clear the endpoint address src address, in case this is + * the cause of trouble. + */ + wg_socket_clear_peer_endpoint_src(peer); + + wg_packet_send_queued_handshake_initiation(peer, true); + } +} + +static void wg_expired_send_keepalive(struct timer_list *timer) +{ + struct wg_peer *peer = from_timer(peer, timer, timer_send_keepalive); + + wg_packet_send_keepalive(peer); + if (peer->timer_need_another_keepalive) { + peer->timer_need_another_keepalive = false; + mod_peer_timer(peer, &peer->timer_send_keepalive, + jiffies + KEEPALIVE_TIMEOUT * HZ); + } +} + +static void wg_expired_new_handshake(struct timer_list *timer) +{ + struct wg_peer *peer = from_timer(peer, timer, timer_new_handshake); + + pr_debug("%s: Retrying handshake with peer %llu (%pISpfsc) because we stopped hearing back after %d seconds\n", + peer->device->dev->name, peer->internal_id, + &peer->endpoint.addr, KEEPALIVE_TIMEOUT + REKEY_TIMEOUT); + /* We clear the endpoint address src address, in case this is the cause + * of trouble. + */ + wg_socket_clear_peer_endpoint_src(peer); + wg_packet_send_queued_handshake_initiation(peer, false); +} + +static void wg_expired_zero_key_material(struct timer_list *timer) +{ + struct wg_peer *peer = from_timer(peer, timer, timer_zero_key_material); + + rcu_read_lock_bh(); + if (!READ_ONCE(peer->is_dead)) { + wg_peer_get(peer); + if (!queue_work(peer->device->handshake_send_wq, + &peer->clear_peer_work)) + /* If the work was already on the queue, we want to drop + * the extra reference. + */ + wg_peer_put(peer); + } + rcu_read_unlock_bh(); +} + +static void wg_queued_expired_zero_key_material(struct work_struct *work) +{ + struct wg_peer *peer = container_of(work, struct wg_peer, + clear_peer_work); + + pr_debug("%s: Zeroing out all keys for peer %llu (%pISpfsc), since we haven't received a new one in %d seconds\n", + peer->device->dev->name, peer->internal_id, + &peer->endpoint.addr, REJECT_AFTER_TIME * 3); + wg_noise_handshake_clear(&peer->handshake); + wg_noise_keypairs_clear(&peer->keypairs); + wg_peer_put(peer); +} + +static void wg_expired_send_persistent_keepalive(struct timer_list *timer) +{ + struct wg_peer *peer = from_timer(peer, timer, + timer_persistent_keepalive); + + if (likely(peer->persistent_keepalive_interval)) + wg_packet_send_keepalive(peer); +} + +/* Should be called after an authenticated data packet is sent. */ +void wg_timers_data_sent(struct wg_peer *peer) +{ + if (!timer_pending(&peer->timer_new_handshake)) + mod_peer_timer(peer, &peer->timer_new_handshake, + jiffies + (KEEPALIVE_TIMEOUT + REKEY_TIMEOUT) * HZ + + prandom_u32_max(REKEY_TIMEOUT_JITTER_MAX_JIFFIES)); +} + +/* Should be called after an authenticated data packet is received. */ +void wg_timers_data_received(struct wg_peer *peer) +{ + if (likely(netif_running(peer->device->dev))) { + if (!timer_pending(&peer->timer_send_keepalive)) + mod_peer_timer(peer, &peer->timer_send_keepalive, + jiffies + KEEPALIVE_TIMEOUT * HZ); + else + peer->timer_need_another_keepalive = true; + } +} + +/* Should be called after any type of authenticated packet is sent, whether + * keepalive, data, or handshake. + */ +void wg_timers_any_authenticated_packet_sent(struct wg_peer *peer) +{ + del_timer(&peer->timer_send_keepalive); +} + +/* Should be called after any type of authenticated packet is received, whether + * keepalive, data, or handshake. + */ +void wg_timers_any_authenticated_packet_received(struct wg_peer *peer) +{ + del_timer(&peer->timer_new_handshake); +} + +/* Should be called after a handshake initiation message is sent. */ +void wg_timers_handshake_initiated(struct wg_peer *peer) +{ + mod_peer_timer(peer, &peer->timer_retransmit_handshake, + jiffies + REKEY_TIMEOUT * HZ + + prandom_u32_max(REKEY_TIMEOUT_JITTER_MAX_JIFFIES)); +} + +/* Should be called after a handshake response message is received and processed + * or when getting key confirmation via the first data message. + */ +void wg_timers_handshake_complete(struct wg_peer *peer) +{ + del_timer(&peer->timer_retransmit_handshake); + peer->timer_handshake_attempts = 0; + peer->sent_lastminute_handshake = false; + ktime_get_real_ts64(&peer->walltime_last_handshake); +} + +/* Should be called after an ephemeral key is created, which is before sending a + * handshake response or after receiving a handshake response. + */ +void wg_timers_session_derived(struct wg_peer *peer) +{ + mod_peer_timer(peer, &peer->timer_zero_key_material, + jiffies + REJECT_AFTER_TIME * 3 * HZ); +} + +/* Should be called before a packet with authentication, whether + * keepalive, data, or handshakem is sent, or after one is received. + */ +void wg_timers_any_authenticated_packet_traversal(struct wg_peer *peer) +{ + if (peer->persistent_keepalive_interval) + mod_peer_timer(peer, &peer->timer_persistent_keepalive, + jiffies + peer->persistent_keepalive_interval * HZ); +} + +void wg_timers_init(struct wg_peer *peer) +{ + timer_setup(&peer->timer_retransmit_handshake, + wg_expired_retransmit_handshake, 0); + timer_setup(&peer->timer_send_keepalive, wg_expired_send_keepalive, 0); + timer_setup(&peer->timer_new_handshake, wg_expired_new_handshake, 0); + timer_setup(&peer->timer_zero_key_material, + wg_expired_zero_key_material, 0); + timer_setup(&peer->timer_persistent_keepalive, + wg_expired_send_persistent_keepalive, 0); + INIT_WORK(&peer->clear_peer_work, wg_queued_expired_zero_key_material); + peer->timer_handshake_attempts = 0; + peer->sent_lastminute_handshake = false; + peer->timer_need_another_keepalive = false; +} + +void wg_timers_stop(struct wg_peer *peer) +{ + del_timer_sync(&peer->timer_retransmit_handshake); + del_timer_sync(&peer->timer_send_keepalive); + del_timer_sync(&peer->timer_new_handshake); + del_timer_sync(&peer->timer_zero_key_material); + del_timer_sync(&peer->timer_persistent_keepalive); + flush_work(&peer->clear_peer_work); +} diff --git a/net/wireguard/timers.h b/net/wireguard/timers.h new file mode 100644 index 000000000000..f0653dcb1326 --- /dev/null +++ b/net/wireguard/timers.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _WG_TIMERS_H +#define _WG_TIMERS_H + +#include <linux/ktime.h> + +struct wg_peer; + +void wg_timers_init(struct wg_peer *peer); +void wg_timers_stop(struct wg_peer *peer); +void wg_timers_data_sent(struct wg_peer *peer); +void wg_timers_data_received(struct wg_peer *peer); +void wg_timers_any_authenticated_packet_sent(struct wg_peer *peer); +void wg_timers_any_authenticated_packet_received(struct wg_peer *peer); +void wg_timers_handshake_initiated(struct wg_peer *peer); +void wg_timers_handshake_complete(struct wg_peer *peer); +void wg_timers_session_derived(struct wg_peer *peer); +void wg_timers_any_authenticated_packet_traversal(struct wg_peer *peer); + +static inline bool wg_birthdate_has_expired(u64 birthday_nanoseconds, + u64 expiration_seconds) +{ + return (s64)(birthday_nanoseconds + expiration_seconds * NSEC_PER_SEC) + <= (s64)ktime_get_coarse_boottime_ns(); +} + +#endif /* _WG_TIMERS_H */ diff --git a/net/wireguard/uapi/wireguard.h b/net/wireguard/uapi/wireguard.h new file mode 100644 index 000000000000..ae88be14c947 --- /dev/null +++ b/net/wireguard/uapi/wireguard.h @@ -0,0 +1,196 @@ +/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * + * Documentation + * ============= + * + * The below enums and macros are for interfacing with WireGuard, using generic + * netlink, with family WG_GENL_NAME and version WG_GENL_VERSION. It defines two + * methods: get and set. Note that while they share many common attributes, + * these two functions actually accept a slightly different set of inputs and + * outputs. + * + * WG_CMD_GET_DEVICE + * ----------------- + * + * May only be called via NLM_F_REQUEST | NLM_F_DUMP. The command should contain + * one but not both of: + * + * WGDEVICE_A_IFINDEX: NLA_U32 + * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1 + * + * The kernel will then return several messages (NLM_F_MULTI) containing the + * following tree of nested items: + * + * WGDEVICE_A_IFINDEX: NLA_U32 + * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1 + * WGDEVICE_A_PRIVATE_KEY: NLA_EXACT_LEN, len WG_KEY_LEN + * WGDEVICE_A_PUBLIC_KEY: NLA_EXACT_LEN, len WG_KEY_LEN + * WGDEVICE_A_LISTEN_PORT: NLA_U16 + * WGDEVICE_A_FWMARK: NLA_U32 + * WGDEVICE_A_PEERS: NLA_NESTED + * 0: NLA_NESTED + * WGPEER_A_PUBLIC_KEY: NLA_EXACT_LEN, len WG_KEY_LEN + * WGPEER_A_PRESHARED_KEY: NLA_EXACT_LEN, len WG_KEY_LEN + * WGPEER_A_ENDPOINT: NLA_MIN_LEN(struct sockaddr), struct sockaddr_in or struct sockaddr_in6 + * WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL: NLA_U16 + * WGPEER_A_LAST_HANDSHAKE_TIME: NLA_EXACT_LEN, struct __kernel_timespec + * WGPEER_A_RX_BYTES: NLA_U64 + * WGPEER_A_TX_BYTES: NLA_U64 + * WGPEER_A_ALLOWEDIPS: NLA_NESTED + * 0: NLA_NESTED + * WGALLOWEDIP_A_FAMILY: NLA_U16 + * WGALLOWEDIP_A_IPADDR: NLA_MIN_LEN(struct in_addr), struct in_addr or struct in6_addr + * WGALLOWEDIP_A_CIDR_MASK: NLA_U8 + * 0: NLA_NESTED + * ... + * 0: NLA_NESTED + * ... + * ... + * WGPEER_A_PROTOCOL_VERSION: NLA_U32 + * 0: NLA_NESTED + * ... + * ... + * + * It is possible that all of the allowed IPs of a single peer will not + * fit within a single netlink message. In that case, the same peer will + * be written in the following message, except it will only contain + * WGPEER_A_PUBLIC_KEY and WGPEER_A_ALLOWEDIPS. This may occur several + * times in a row for the same peer. It is then up to the receiver to + * coalesce adjacent peers. Likewise, it is possible that all peers will + * not fit within a single message. So, subsequent peers will be sent + * in following messages, except those will only contain WGDEVICE_A_IFNAME + * and WGDEVICE_A_PEERS. It is then up to the receiver to coalesce these + * messages to form the complete list of peers. + * + * Since this is an NLA_F_DUMP command, the final message will always be + * NLMSG_DONE, even if an error occurs. However, this NLMSG_DONE message + * contains an integer error code. It is either zero or a negative error + * code corresponding to the errno. + * + * WG_CMD_SET_DEVICE + * ----------------- + * + * May only be called via NLM_F_REQUEST. The command should contain the + * following tree of nested items, containing one but not both of + * WGDEVICE_A_IFINDEX and WGDEVICE_A_IFNAME: + * + * WGDEVICE_A_IFINDEX: NLA_U32 + * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1 + * WGDEVICE_A_FLAGS: NLA_U32, 0 or WGDEVICE_F_REPLACE_PEERS if all current + * peers should be removed prior to adding the list below. + * WGDEVICE_A_PRIVATE_KEY: len WG_KEY_LEN, all zeros to remove + * WGDEVICE_A_LISTEN_PORT: NLA_U16, 0 to choose randomly + * WGDEVICE_A_FWMARK: NLA_U32, 0 to disable + * WGDEVICE_A_PEERS: NLA_NESTED + * 0: NLA_NESTED + * WGPEER_A_PUBLIC_KEY: len WG_KEY_LEN + * WGPEER_A_FLAGS: NLA_U32, 0 and/or WGPEER_F_REMOVE_ME if the + * specified peer should not exist at the end of the + * operation, rather than added/updated and/or + * WGPEER_F_REPLACE_ALLOWEDIPS if all current allowed + * IPs of this peer should be removed prior to adding + * the list below and/or WGPEER_F_UPDATE_ONLY if the + * peer should only be set if it already exists. + * WGPEER_A_PRESHARED_KEY: len WG_KEY_LEN, all zeros to remove + * WGPEER_A_ENDPOINT: struct sockaddr_in or struct sockaddr_in6 + * WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL: NLA_U16, 0 to disable + * WGPEER_A_ALLOWEDIPS: NLA_NESTED + * 0: NLA_NESTED + * WGALLOWEDIP_A_FAMILY: NLA_U16 + * WGALLOWEDIP_A_IPADDR: struct in_addr or struct in6_addr + * WGALLOWEDIP_A_CIDR_MASK: NLA_U8 + * 0: NLA_NESTED + * ... + * 0: NLA_NESTED + * ... + * ... + * WGPEER_A_PROTOCOL_VERSION: NLA_U32, should not be set or used at + * all by most users of this API, as the + * most recent protocol will be used when + * this is unset. Otherwise, must be set + * to 1. + * 0: NLA_NESTED + * ... + * ... + * + * It is possible that the amount of configuration data exceeds that of + * the maximum message length accepted by the kernel. In that case, several + * messages should be sent one after another, with each successive one + * filling in information not contained in the prior. Note that if + * WGDEVICE_F_REPLACE_PEERS is specified in the first message, it probably + * should not be specified in fragments that come after, so that the list + * of peers is only cleared the first time but appended after. Likewise for + * peers, if WGPEER_F_REPLACE_ALLOWEDIPS is specified in the first message + * of a peer, it likely should not be specified in subsequent fragments. + * + * If an error occurs, NLMSG_ERROR will reply containing an errno. + */ + +#ifndef _WG_UAPI_WIREGUARD_H +#define _WG_UAPI_WIREGUARD_H + +#define WG_GENL_NAME "wireguard" +#define WG_GENL_VERSION 1 + +#define WG_KEY_LEN 32 + +enum wg_cmd { + WG_CMD_GET_DEVICE, + WG_CMD_SET_DEVICE, + __WG_CMD_MAX +}; +#define WG_CMD_MAX (__WG_CMD_MAX - 1) + +enum wgdevice_flag { + WGDEVICE_F_REPLACE_PEERS = 1U << 0, + __WGDEVICE_F_ALL = WGDEVICE_F_REPLACE_PEERS +}; +enum wgdevice_attribute { + WGDEVICE_A_UNSPEC, + WGDEVICE_A_IFINDEX, + WGDEVICE_A_IFNAME, + WGDEVICE_A_PRIVATE_KEY, + WGDEVICE_A_PUBLIC_KEY, + WGDEVICE_A_FLAGS, + WGDEVICE_A_LISTEN_PORT, + WGDEVICE_A_FWMARK, + WGDEVICE_A_PEERS, + __WGDEVICE_A_LAST +}; +#define WGDEVICE_A_MAX (__WGDEVICE_A_LAST - 1) + +enum wgpeer_flag { + WGPEER_F_REMOVE_ME = 1U << 0, + WGPEER_F_REPLACE_ALLOWEDIPS = 1U << 1, + WGPEER_F_UPDATE_ONLY = 1U << 2, + __WGPEER_F_ALL = WGPEER_F_REMOVE_ME | WGPEER_F_REPLACE_ALLOWEDIPS | + WGPEER_F_UPDATE_ONLY +}; +enum wgpeer_attribute { + WGPEER_A_UNSPEC, + WGPEER_A_PUBLIC_KEY, + WGPEER_A_PRESHARED_KEY, + WGPEER_A_FLAGS, + WGPEER_A_ENDPOINT, + WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL, + WGPEER_A_LAST_HANDSHAKE_TIME, + WGPEER_A_RX_BYTES, + WGPEER_A_TX_BYTES, + WGPEER_A_ALLOWEDIPS, + WGPEER_A_PROTOCOL_VERSION, + __WGPEER_A_LAST +}; +#define WGPEER_A_MAX (__WGPEER_A_LAST - 1) + +enum wgallowedip_attribute { + WGALLOWEDIP_A_UNSPEC, + WGALLOWEDIP_A_FAMILY, + WGALLOWEDIP_A_IPADDR, + WGALLOWEDIP_A_CIDR_MASK, + __WGALLOWEDIP_A_LAST +}; +#define WGALLOWEDIP_A_MAX (__WGALLOWEDIP_A_LAST - 1) + +#endif /* _WG_UAPI_WIREGUARD_H */ diff --git a/net/wireguard/version.h b/net/wireguard/version.h new file mode 100644 index 000000000000..c7f9028f0177 --- /dev/null +++ b/net/wireguard/version.h @@ -0,0 +1,3 @@ +#ifndef WIREGUARD_VERSION +#define WIREGUARD_VERSION "1.0.20220627" +#endif From 9392fe7a168ecadc78fc148e35335856efd9d5cc Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 23 Mar 2020 23:56:48 +0300 Subject: [PATCH 258/452] net: wireguard: ignore generated files Signed-off-by: Denis Efremov <efremov@linux.com> --- net/wireguard/crypto/zinc/chacha20/.gitignore | 1 + net/wireguard/crypto/zinc/poly1305/.gitignore | 1 + 2 files changed, 2 insertions(+) create mode 100644 net/wireguard/crypto/zinc/chacha20/.gitignore create mode 100644 net/wireguard/crypto/zinc/poly1305/.gitignore diff --git a/net/wireguard/crypto/zinc/chacha20/.gitignore b/net/wireguard/crypto/zinc/chacha20/.gitignore new file mode 100644 index 000000000000..50a214c24cdb --- /dev/null +++ b/net/wireguard/crypto/zinc/chacha20/.gitignore @@ -0,0 +1 @@ +/chacha20-arm64.S diff --git a/net/wireguard/crypto/zinc/poly1305/.gitignore b/net/wireguard/crypto/zinc/poly1305/.gitignore new file mode 100644 index 000000000000..0a7e308ae2ba --- /dev/null +++ b/net/wireguard/crypto/zinc/poly1305/.gitignore @@ -0,0 +1 @@ +/poly1305-arm64.S From 7d4a9744af2f8584f788e657728acb1ebaabdf16 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Thu, 20 Feb 2020 21:53:45 +0300 Subject: [PATCH 259/452] net: wireguard: switch wireguard to n by default Signed-off-by: Denis Efremov <efremov@linux.com> --- net/wireguard/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/wireguard/Kconfig b/net/wireguard/Kconfig index 156e9dbfc051..e4b07763a6e5 100644 --- a/net/wireguard/Kconfig +++ b/net/wireguard/Kconfig @@ -10,7 +10,7 @@ config WIREGUARD select VFPv3 if CPU_V7 select NEON if CPU_V7 select KERNEL_MODE_NEON if CPU_V7 - default m + default n help WireGuard is a secure, fast, and easy to use replacement for IPsec that uses modern cryptography and clever networking tricks. It's @@ -24,6 +24,7 @@ config WIREGUARD config WIREGUARD_DEBUG bool "Debugging checks and verbose messages" depends on WIREGUARD + default n help This will write log messages for handshake and other events that occur for a WireGuard interface. It will also perform some From b5480f3406e47cfd6c687a18407b534f6fee7927 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 12 Jun 2020 01:41:58 +0300 Subject: [PATCH 260/452] scripts: add wireguard updater script Signed-off-by: Denis Efremov <efremov@linux.com> --- scripts/update_wireguard.sh | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100755 scripts/update_wireguard.sh diff --git a/scripts/update_wireguard.sh b/scripts/update_wireguard.sh new file mode 100755 index 000000000000..fd56e21e595b --- /dev/null +++ b/scripts/update_wireguard.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +git clone git://git.zx2c4.com/wireguard-linux-compat +rm -fr net/wireguard/* + +perl -i -ne 'BEGIN{$print=1;} $print = 0 if m/cat/; print $_ if $print;' wireguard-linux-compat/kernel-tree-scripts/create-patch.sh + +wireguard-linux-compat/kernel-tree-scripts/create-patch.sh | patch -p1 + +rm -fr wireguard-linux-compat + +git checkout net/wireguard/crypto/zinc/chacha20/.gitignore \ + net/wireguard/crypto/zinc/poly1305/.gitignore \ + net/wireguard/Kconfig + From 557d6b9b8058758963af3d4c0488d4d0b4079ecc Mon Sep 17 00:00:00 2001 From: andip71 <andreasp@gmx.de> Date: Mon, 8 Jan 2018 00:50:49 +0100 Subject: [PATCH 261/452] power: Add generic wakelock blocker driver v1.1.0 Based on ideas of FranciscoFranco's non-generic driver. Sysfs node: /sys/class/misc/boeffla_wakelock_blocker/wakelock_blocker - list of wakelocks to be blocked, separated by semicolons /sys/class/misc/boeffla_wakelock_blocker/debug - write: 0/1 to switch off and on debug logging into dmesg - read: get current driver internals /sys/class/misc/boeffla_wakelock_blocker/version - show driver version Signed-off-by: andip71 <andreasp@gmx.de> --- drivers/base/power/Makefile | 1 + drivers/base/power/boeffla_wl_blocker.c | 236 ++++++++++++++++++++++++ drivers/base/power/boeffla_wl_blocker.h | 23 +++ drivers/base/power/main.c | 11 ++ drivers/base/power/wakeup.c | 87 ++++++++- kernel/power/Kconfig | 5 + 6 files changed, 356 insertions(+), 7 deletions(-) create mode 100644 drivers/base/power/boeffla_wl_blocker.c create mode 100644 drivers/base/power/boeffla_wl_blocker.h diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 29cd71d8b360..4f9c632738f6 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -5,5 +5,6 @@ obj-$(CONFIG_PM_TRACE_RTC) += trace.o obj-$(CONFIG_PM_OPP) += opp/ obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o obj-$(CONFIG_HAVE_CLK) += clock_ops.o +obj-$(CONFIG_BOEFFLA_WL_BLOCKER) += boeffla_wl_blocker.o ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG diff --git a/drivers/base/power/boeffla_wl_blocker.c b/drivers/base/power/boeffla_wl_blocker.c new file mode 100644 index 000000000000..e9c93ce97ce9 --- /dev/null +++ b/drivers/base/power/boeffla_wl_blocker.c @@ -0,0 +1,236 @@ +/* + * Author: andip71, 01.09.2017 + * + * Version 1.1.0 + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* + * Change log: + * + * 1.1.0 (01.09.2017) + * - By default, the following wakelocks are blocked in an own list + * qcom_rx_wakelock, wlan, wlan_wow_wl, wlan_extscan_wl, NETLINK + * + * 1.0.1 (29.08.2017) + * - Add killing wakelock when currently active + * + * 1.0.0 (28.08.2017) + * - Initial version + * + */ + +#include <linux/module.h> +#include <linux/kobject.h> +#include <linux/sysfs.h> +#include <linux/device.h> +#include <linux/miscdevice.h> +#include <linux/printk.h> +#include "boeffla_wl_blocker.h" + + +/*****************************************/ +// Variables +/*****************************************/ + +char list_wl[LENGTH_LIST_WL] = {0}; +char list_wl_default[LENGTH_LIST_WL_DEFAULT] = {0}; + +extern char list_wl_search[LENGTH_LIST_WL_SEARCH]; +extern bool wl_blocker_active; +extern bool wl_blocker_debug; + + +/*****************************************/ +// internal functions +/*****************************************/ + +static void build_search_string(char *list1, char *list2) +{ + // store wakelock list and search string (with semicolons added at start and end) + sprintf(list_wl_search, ";%s;%s;", list1, list2); + + // set flag if wakelock blocker should be active (for performance reasons) + if (strlen(list_wl_search) > 5) + wl_blocker_active = true; + else + wl_blocker_active = false; +} + + +/*****************************************/ +// sysfs interface functions +/*****************************************/ + +// show list of user configured wakelocks +static ssize_t wakelock_blocker_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + // return list of wakelocks to be blocked + return sprintf(buf, "%s\n", list_wl); +} + + +// store list of user configured wakelocks +static ssize_t wakelock_blocker_store(struct device * dev, struct device_attribute *attr, + const char * buf, size_t n) +{ + int len = n; + + // check if string is too long to be stored + if (len > LENGTH_LIST_WL) + return -EINVAL; + + // store user configured wakelock list and rebuild search string + sscanf(buf, "%s", list_wl); + build_search_string(list_wl_default, list_wl); + + return n; +} + + +// show list of default, predefined wakelocks +static ssize_t wakelock_blocker_default_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + // return list of wakelocks to be blocked + return sprintf(buf, "%s\n", list_wl_default); +} + + +// store list of default, predefined wakelocks +static ssize_t wakelock_blocker_default_store(struct device * dev, struct device_attribute *attr, + const char * buf, size_t n) +{ + int len = n; + + // check if string is too long to be stored + if (len > LENGTH_LIST_WL_DEFAULT) + return -EINVAL; + + // store default, predefined wakelock list and rebuild search string + sscanf(buf, "%s", list_wl_default); + build_search_string(list_wl_default, list_wl); + + return n; +} + + +// show debug information of driver internals +static ssize_t debug_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + // return current debug status + return sprintf(buf, "Debug status: %d\n\nUser list: %s\nDefault list: %s\nSearch list: %s\nActive: %d\n", + wl_blocker_debug, list_wl, list_wl_default, list_wl_search, wl_blocker_active); +} + + +// store debug mode on/off (1/0) +static ssize_t debug_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int ret = -EINVAL; + unsigned int val; + + // check data and store if valid + ret = sscanf(buf, "%d", &val); + + if (ret != 1) + return -EINVAL; + + if (val == 1) + wl_blocker_debug = true; + else + wl_blocker_debug = false; + + return count; +} + + +static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + // return version information + return sprintf(buf, "%s\n", BOEFFLA_WL_BLOCKER_VERSION); +} + + + +/*****************************************/ +// Initialize sysfs objects +/*****************************************/ + +// define objects +static DEVICE_ATTR(wakelock_blocker, 0644, wakelock_blocker_show, wakelock_blocker_store); +static DEVICE_ATTR(wakelock_blocker_default, 0644, wakelock_blocker_default_show, wakelock_blocker_default_store); +static DEVICE_ATTR(debug, 0664, debug_show, debug_store); +static DEVICE_ATTR(version, 0664, version_show, NULL); + +// define attributes +static struct attribute *boeffla_wl_blocker_attributes[] = { + &dev_attr_wakelock_blocker.attr, + &dev_attr_wakelock_blocker_default.attr, + &dev_attr_debug.attr, + &dev_attr_version.attr, + NULL +}; + +// define attribute group +static struct attribute_group boeffla_wl_blocker_control_group = { + .attrs = boeffla_wl_blocker_attributes, +}; + +// define control device +static struct miscdevice boeffla_wl_blocker_control_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "boeffla_wakelock_blocker", +}; + + +/*****************************************/ +// Driver init and exit functions +/*****************************************/ + +static int boeffla_wl_blocker_init(void) +{ + // register boeffla wakelock blocker control device + misc_register(&boeffla_wl_blocker_control_device); + if (sysfs_create_group(&boeffla_wl_blocker_control_device.this_device->kobj, + &boeffla_wl_blocker_control_group) < 0) { + printk("Boeffla WL blocker: failed to create sys fs object.\n"); + return 0; + } + + // initialize default list + sprintf(list_wl_default, "%s", LIST_WL_DEFAULT); + build_search_string(list_wl_default, list_wl); + + // Print debug info + printk("Boeffla WL blocker: driver version %s started\n", BOEFFLA_WL_BLOCKER_VERSION); + + return 0; +} + + +static void boeffla_wl_blocker_exit(void) +{ + // remove boeffla wakelock blocker control device + sysfs_remove_group(&boeffla_wl_blocker_control_device.this_device->kobj, + &boeffla_wl_blocker_control_group); + + // Print debug info + printk("Boeffla WL blocker: driver stopped\n"); +} + + +/* define driver entry points */ +module_init(boeffla_wl_blocker_init); +module_exit(boeffla_wl_blocker_exit); diff --git a/drivers/base/power/boeffla_wl_blocker.h b/drivers/base/power/boeffla_wl_blocker.h new file mode 100644 index 000000000000..63603edc4b43 --- /dev/null +++ b/drivers/base/power/boeffla_wl_blocker.h @@ -0,0 +1,23 @@ +/* + * Author: andip71, 01.09.2017 + * + * Version 1.1.0 + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define BOEFFLA_WL_BLOCKER_VERSION "1.1.0" + +#define LIST_WL_DEFAULT "" + +#define LENGTH_LIST_WL 255 +#define LENGTH_LIST_WL_DEFAULT 150 +#define LENGTH_LIST_WL_SEARCH LENGTH_LIST_WL + LENGTH_LIST_WL_DEFAULT + 5 diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index a3e4afff3fea..0d052b36c189 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -40,6 +40,14 @@ #include "../base.h" #include "power.h" +#ifdef CONFIG_SEC_DEBUG +#include <linux/sec_debug.h> +#endif + +#ifdef CONFIG_BOEFFLA_WL_BLOCKER +void pm_print_active_wakeup_sources(void); +#endif + typedef int (*pm_callback_t)(struct device *); /* @@ -761,6 +769,9 @@ void dpm_resume_early(pm_message_t state) trace_suspend_resume(TPS("dpm_resume_early"), state.event, true); dbg_snapshot_suspend("dpm_resume_early", dpm_resume_early, NULL, state.event, DSS_FLAG_IN); +#ifdef CONFIG_BOEFFLA_WL_BLOCKER + pm_print_active_wakeup_sources(); +#endif mutex_lock(&dpm_list_mtx); pm_transition = state; diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index e9591af2e2ee..ff1396b40c02 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -26,6 +26,18 @@ #include "power.h" + +#ifdef CONFIG_BOEFFLA_WL_BLOCKER +#include "boeffla_wl_blocker.h" + +char list_wl_search[LENGTH_LIST_WL_SEARCH] = {0}; +bool wl_blocker_active = false; +bool wl_blocker_debug = false; + +static void wakeup_source_deactivate(struct wakeup_source *ws); +#endif + + /* * If set, the suspend/hibernate code will abort transitions to a sleep state * if wakeup events are registered during or immediately before the transition. @@ -562,6 +574,57 @@ static void wakeup_source_activate(struct wakeup_source *ws) trace_wakeup_source_activate(ws->name, cec); } +#ifdef CONFIG_BOEFFLA_WL_BLOCKER +// AP: Function to check if a wakelock is on the wakelock blocker list +static bool check_for_block(struct wakeup_source *ws) +{ + char wakelock_name[52] = {0}; + int length; + + // if debug mode on, print every wakelock requested + if (wl_blocker_debug) + printk("Boeffla WL blocker: %s requested\n", ws->name); + + // if there is no list of wakelocks to be blocked, exit without futher checking + if (!wl_blocker_active) + return false; + + // only if ws structure is valid + if (ws) + { + // wake lock names handled have maximum length=50 and minimum=1 + length = strlen(ws->name); + if ((length > 50) || (length < 1)) + return false; + + // check if wakelock is in wake lock list to be blocked + sprintf(wakelock_name, ";%s;", ws->name); + + if(strstr(list_wl_search, wakelock_name) == NULL) + return false; + + // wake lock is in list, print it if debug mode on + if (wl_blocker_debug) + printk("Boeffla WL blocker: %s blocked\n", ws->name); + + // if it is currently active, deactivate it immediately + log in debug mode + if (ws->active) + { + wakeup_source_deactivate(ws); + + if (wl_blocker_debug) + printk("Boeffla WL blocker: %s killed\n", ws->name); + } + + // finally block it + return true; + } + + // there was no valid ws structure, do not block by default + return false; +} +#endif + /** * wakeup_source_report_event - Report wakeup event using the given source. * @ws: Wakeup source to report the event for. @@ -569,16 +632,23 @@ static void wakeup_source_activate(struct wakeup_source *ws) */ static void wakeup_source_report_event(struct wakeup_source *ws, bool hard) { - ws->event_count++; - /* This is racy, but the counter is approximate anyway. */ - if (events_check_enabled) - ws->wakeup_count++; +#ifdef CONFIG_BOEFFLA_WL_BLOCKER + if (!check_for_block(ws)) // AP: check if wakelock is on wakelock blocker list + { +#endif + ws->event_count++; + /* This is racy, but the counter is approximate anyway. */ + if (events_check_enabled) + ws->wakeup_count++; - if (!ws->active) - wakeup_source_activate(ws); + if (!ws->active) + wakeup_source_activate(ws); if (hard) pm_system_wakeup(); +#ifdef CONFIG_BOEFFLA_WL_BLOCKER + } +#endif } /** @@ -940,7 +1010,10 @@ void pm_print_active_wakeup_sources(void) list_for_each_entry_rcu(ws, &wakeup_sources, entry) { if (ws->active) { pr_info("active wakeup source: %s\n", ws->name); - active = 1; +#ifdef CONFIG_BOEFFLA_WL_BLOCKER + if (!check_for_block(ws)) // AP: check if wakelock is on wakelock blocker list +#endif + active = 1; } else if (!active && (!last_activity_ws || ktime_to_ns(ws->last_time) > diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index dd2b5a4d89a5..a69065e41913 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -312,3 +312,8 @@ config PM_GENERIC_DOMAINS_OF config CPU_PM bool + +config BOEFFLA_WL_BLOCKER + bool "Boeffla generic wakelock blocker driver" + depends on PM + default N From c6bfbb182e87e3c065074779307aa9e3439fa4a3 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 13 Jun 2020 11:02:24 +0300 Subject: [PATCH 262/452] power: wl_blocker: add generic size Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/base/power/boeffla_wl_blocker.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/base/power/boeffla_wl_blocker.h b/drivers/base/power/boeffla_wl_blocker.h index 63603edc4b43..f32b51651927 100644 --- a/drivers/base/power/boeffla_wl_blocker.h +++ b/drivers/base/power/boeffla_wl_blocker.h @@ -19,5 +19,5 @@ #define LIST_WL_DEFAULT "" #define LENGTH_LIST_WL 255 -#define LENGTH_LIST_WL_DEFAULT 150 +#define LENGTH_LIST_WL_DEFAULT (sizeof(LIST_WL_DEFAULT)) #define LENGTH_LIST_WL_SEARCH LENGTH_LIST_WL + LENGTH_LIST_WL_DEFAULT + 5 From 04c0ff4ecd82b652bcfa663ab8fb51c6bab2616e Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 13 Jun 2020 11:09:19 +0300 Subject: [PATCH 263/452] power: wl_blocker: use scnprintf PAGE_SIZE Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/base/power/boeffla_wl_blocker.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/base/power/boeffla_wl_blocker.c b/drivers/base/power/boeffla_wl_blocker.c index e9c93ce97ce9..a5a36cd528ac 100644 --- a/drivers/base/power/boeffla_wl_blocker.c +++ b/drivers/base/power/boeffla_wl_blocker.c @@ -76,7 +76,7 @@ static ssize_t wakelock_blocker_show(struct device *dev, struct device_attribute char *buf) { // return list of wakelocks to be blocked - return sprintf(buf, "%s\n", list_wl); + return scnprintf(buf, PAGE_SIZE, "%s\n", list_wl); } @@ -103,7 +103,7 @@ static ssize_t wakelock_blocker_default_show(struct device *dev, struct device_a char *buf) { // return list of wakelocks to be blocked - return sprintf(buf, "%s\n", list_wl_default); + return scnprintf(buf, PAGE_SIZE, "%s\n", list_wl_default); } @@ -129,8 +129,11 @@ static ssize_t wakelock_blocker_default_store(struct device * dev, struct device static ssize_t debug_show(struct device *dev, struct device_attribute *attr, char *buf) { // return current debug status - return sprintf(buf, "Debug status: %d\n\nUser list: %s\nDefault list: %s\nSearch list: %s\nActive: %d\n", - wl_blocker_debug, list_wl, list_wl_default, list_wl_search, wl_blocker_active); + return scnprintf(buf, PAGE_SIZE, + "Debug status: %d\n\nUser list: %s\nDefault list: %s\n" + "Search list: %s\nActive: %d\n", + wl_blocker_debug, list_wl, list_wl_default, + list_wl_search, wl_blocker_active); } @@ -159,7 +162,7 @@ static ssize_t debug_store(struct device *dev, struct device_attribute *attr, static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf) { // return version information - return sprintf(buf, "%s\n", BOEFFLA_WL_BLOCKER_VERSION); + return scnprintf(buf, PAGE_SIZE, "%s\n", BOEFFLA_WL_BLOCKER_VERSION); } From c46ae4153521d1d7aee8675d96a1e03f858eeba5 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 13 Jun 2020 11:24:06 +0300 Subject: [PATCH 264/452] power: wl_blocker: fix permissions for version_show Use only read permissions for version_show. Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/base/power/boeffla_wl_blocker.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/base/power/boeffla_wl_blocker.c b/drivers/base/power/boeffla_wl_blocker.c index a5a36cd528ac..b8814b16d3f1 100644 --- a/drivers/base/power/boeffla_wl_blocker.c +++ b/drivers/base/power/boeffla_wl_blocker.c @@ -172,10 +172,10 @@ static ssize_t version_show(struct device *dev, struct device_attribute *attr, c /*****************************************/ // define objects -static DEVICE_ATTR(wakelock_blocker, 0644, wakelock_blocker_show, wakelock_blocker_store); -static DEVICE_ATTR(wakelock_blocker_default, 0644, wakelock_blocker_default_show, wakelock_blocker_default_store); -static DEVICE_ATTR(debug, 0664, debug_show, debug_store); -static DEVICE_ATTR(version, 0664, version_show, NULL); +static DEVICE_ATTR_RW(wakelock_blocker); +static DEVICE_ATTR_RW(wakelock_blocker_default); +static DEVICE_ATTR_RW(debug); +static DEVICE_ATTR_RO(version); // define attributes static struct attribute *boeffla_wl_blocker_attributes[] = { From 9f2fe681394ef6e1f89b2769319febddcdda8426 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 13 Jun 2020 16:16:07 +0300 Subject: [PATCH 265/452] power: wl_blocker: remove excessive len variable It's not used. Type conversion size_t > int is not needed. Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/base/power/boeffla_wl_blocker.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/base/power/boeffla_wl_blocker.c b/drivers/base/power/boeffla_wl_blocker.c index b8814b16d3f1..5d4af4e8217c 100644 --- a/drivers/base/power/boeffla_wl_blocker.c +++ b/drivers/base/power/boeffla_wl_blocker.c @@ -84,10 +84,8 @@ static ssize_t wakelock_blocker_show(struct device *dev, struct device_attribute static ssize_t wakelock_blocker_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t n) { - int len = n; - // check if string is too long to be stored - if (len > LENGTH_LIST_WL) + if (n > LENGTH_LIST_WL) return -EINVAL; // store user configured wakelock list and rebuild search string @@ -111,10 +109,8 @@ static ssize_t wakelock_blocker_default_show(struct device *dev, struct device_a static ssize_t wakelock_blocker_default_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t n) { - int len = n; - // check if string is too long to be stored - if (len > LENGTH_LIST_WL_DEFAULT) + if (n > LENGTH_LIST_WL_DEFAULT) return -EINVAL; // store default, predefined wakelock list and rebuild search string From 209d3249836d7d0b6106db0bc70b5ebec67354f2 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 13 Jun 2020 17:36:29 +0300 Subject: [PATCH 266/452] power: wl_blocker: use strcpy() in init Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/base/power/boeffla_wl_blocker.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/base/power/boeffla_wl_blocker.c b/drivers/base/power/boeffla_wl_blocker.c index 5d4af4e8217c..99d3637c5180 100644 --- a/drivers/base/power/boeffla_wl_blocker.c +++ b/drivers/base/power/boeffla_wl_blocker.c @@ -54,7 +54,7 @@ extern bool wl_blocker_debug; // internal functions /*****************************************/ -static void build_search_string(char *list1, char *list2) +static void build_search_string(const char *list1, const char *list2) { // store wakelock list and search string (with semicolons added at start and end) sprintf(list_wl_search, ";%s;%s;", list1, list2); @@ -209,7 +209,7 @@ static int boeffla_wl_blocker_init(void) } // initialize default list - sprintf(list_wl_default, "%s", LIST_WL_DEFAULT); + strcpy(list_wl_default, LIST_WL_DEFAULT); build_search_string(list_wl_default, list_wl); // Print debug info From 67a44a68f2965a2b3dda861d39db1b3936016b0c Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 21 Jun 2020 22:01:38 +0300 Subject: [PATCH 267/452] power: wl_blocker: mark functions with __init, __exit attrs Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/base/power/boeffla_wl_blocker.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/base/power/boeffla_wl_blocker.c b/drivers/base/power/boeffla_wl_blocker.c index 99d3637c5180..92f285a44f3f 100644 --- a/drivers/base/power/boeffla_wl_blocker.c +++ b/drivers/base/power/boeffla_wl_blocker.c @@ -198,7 +198,7 @@ static struct miscdevice boeffla_wl_blocker_control_device = { // Driver init and exit functions /*****************************************/ -static int boeffla_wl_blocker_init(void) +static int __init boeffla_wl_blocker_init(void) { // register boeffla wakelock blocker control device misc_register(&boeffla_wl_blocker_control_device); @@ -219,7 +219,7 @@ static int boeffla_wl_blocker_init(void) } -static void boeffla_wl_blocker_exit(void) +static void __exit boeffla_wl_blocker_exit(void) { // remove boeffla wakelock blocker control device sysfs_remove_group(&boeffla_wl_blocker_control_device.this_device->kobj, From 229b8f460b9c3e47cd61ed0a5f19d610e6279d0c Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 21 Jun 2020 22:14:04 +0300 Subject: [PATCH 268/452] power: wl_blocker: add pr_fmt Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/base/power/boeffla_wl_blocker.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/base/power/boeffla_wl_blocker.c b/drivers/base/power/boeffla_wl_blocker.c index 92f285a44f3f..b45d03e0aa21 100644 --- a/drivers/base/power/boeffla_wl_blocker.c +++ b/drivers/base/power/boeffla_wl_blocker.c @@ -29,6 +29,8 @@ * */ +#define pr_fmt(fmt) "Boeffla WL blocker: " fmt + #include <linux/module.h> #include <linux/kobject.h> #include <linux/sysfs.h> @@ -204,7 +206,7 @@ static int __init boeffla_wl_blocker_init(void) misc_register(&boeffla_wl_blocker_control_device); if (sysfs_create_group(&boeffla_wl_blocker_control_device.this_device->kobj, &boeffla_wl_blocker_control_group) < 0) { - printk("Boeffla WL blocker: failed to create sys fs object.\n"); + pr_err("failed to create sys fs object.\n"); return 0; } @@ -213,7 +215,7 @@ static int __init boeffla_wl_blocker_init(void) build_search_string(list_wl_default, list_wl); // Print debug info - printk("Boeffla WL blocker: driver version %s started\n", BOEFFLA_WL_BLOCKER_VERSION); + pr_info("driver version %s started\n", BOEFFLA_WL_BLOCKER_VERSION); return 0; } @@ -226,7 +228,7 @@ static void __exit boeffla_wl_blocker_exit(void) &boeffla_wl_blocker_control_group); // Print debug info - printk("Boeffla WL blocker: driver stopped\n"); + pr_info("driver stopped\n"); } From 8a70b7eaed4779813a0602668f070810fb168ab1 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 21 Jun 2020 22:15:14 +0300 Subject: [PATCH 269/452] power: wl_blocker: add misc_deregister in exit function Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/base/power/boeffla_wl_blocker.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/base/power/boeffla_wl_blocker.c b/drivers/base/power/boeffla_wl_blocker.c index b45d03e0aa21..0da0d73bcf73 100644 --- a/drivers/base/power/boeffla_wl_blocker.c +++ b/drivers/base/power/boeffla_wl_blocker.c @@ -227,6 +227,8 @@ static void __exit boeffla_wl_blocker_exit(void) sysfs_remove_group(&boeffla_wl_blocker_control_device.this_device->kobj, &boeffla_wl_blocker_control_group); + misc_deregister(&boeffla_wl_blocker_control_device); + // Print debug info pr_info("driver stopped\n"); } From 6807c03eb799afaf811bd77aee072fa1dd12fc0e Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 21 Jun 2020 22:23:11 +0300 Subject: [PATCH 270/452] power: wl_blocker: add error handling to init function Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/base/power/boeffla_wl_blocker.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/base/power/boeffla_wl_blocker.c b/drivers/base/power/boeffla_wl_blocker.c index 0da0d73bcf73..fb74ca25f854 100644 --- a/drivers/base/power/boeffla_wl_blocker.c +++ b/drivers/base/power/boeffla_wl_blocker.c @@ -202,12 +202,21 @@ static struct miscdevice boeffla_wl_blocker_control_device = { static int __init boeffla_wl_blocker_init(void) { + int err = 0; + // register boeffla wakelock blocker control device - misc_register(&boeffla_wl_blocker_control_device); - if (sysfs_create_group(&boeffla_wl_blocker_control_device.this_device->kobj, - &boeffla_wl_blocker_control_group) < 0) { + err = misc_register(&boeffla_wl_blocker_control_device); + if (err) { + pr_err("failed register the device.\n"); + return err; + } + + err = sysfs_create_group(&boeffla_wl_blocker_control_device.this_device->kobj, + &boeffla_wl_blocker_control_group); + if (err) { pr_err("failed to create sys fs object.\n"); - return 0; + misc_deregister(&boeffla_wl_blocker_control_device); + return err; } // initialize default list From d94a0fcf07029389966070dcbf0acb40fa92f89c Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 21 Jun 2020 22:29:07 +0300 Subject: [PATCH 271/452] power: wl_blocker: use device_store_bool() to store wl_blocker_debug Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/base/power/boeffla_wl_blocker.c | 29 +++++-------------------- 1 file changed, 5 insertions(+), 24 deletions(-) diff --git a/drivers/base/power/boeffla_wl_blocker.c b/drivers/base/power/boeffla_wl_blocker.c index fb74ca25f854..40d3c5647f3c 100644 --- a/drivers/base/power/boeffla_wl_blocker.c +++ b/drivers/base/power/boeffla_wl_blocker.c @@ -135,28 +135,6 @@ static ssize_t debug_show(struct device *dev, struct device_attribute *attr, cha } -// store debug mode on/off (1/0) -static ssize_t debug_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - unsigned int ret = -EINVAL; - unsigned int val; - - // check data and store if valid - ret = sscanf(buf, "%d", &val); - - if (ret != 1) - return -EINVAL; - - if (val == 1) - wl_blocker_debug = true; - else - wl_blocker_debug = false; - - return count; -} - - static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf) { // return version information @@ -172,14 +150,17 @@ static ssize_t version_show(struct device *dev, struct device_attribute *attr, c // define objects static DEVICE_ATTR_RW(wakelock_blocker); static DEVICE_ATTR_RW(wakelock_blocker_default); -static DEVICE_ATTR_RW(debug); static DEVICE_ATTR_RO(version); +static struct dev_ext_attribute dev_attr_debug = { + __ATTR(debug, 0644, debug_show, device_store_bool), + &wl_blocker_debug +}; // define attributes static struct attribute *boeffla_wl_blocker_attributes[] = { &dev_attr_wakelock_blocker.attr, &dev_attr_wakelock_blocker_default.attr, - &dev_attr_debug.attr, + &dev_attr_debug.attr.attr, &dev_attr_version.attr, NULL }; From 9a7019f8c5483d2569b415206e76d74a6e4504e3 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 13 Jun 2020 16:49:57 +0300 Subject: [PATCH 272/452] power: wl_blocker: add default list Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/base/power/boeffla_wl_blocker.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/base/power/boeffla_wl_blocker.h b/drivers/base/power/boeffla_wl_blocker.h index f32b51651927..42ee96769fc6 100644 --- a/drivers/base/power/boeffla_wl_blocker.h +++ b/drivers/base/power/boeffla_wl_blocker.h @@ -16,7 +16,7 @@ #define BOEFFLA_WL_BLOCKER_VERSION "1.1.0" -#define LIST_WL_DEFAULT "" +#define LIST_WL_DEFAULT "wlan_wake;wlan_rx_wake;wlan_ctrl_wake;wlan_txfl_wake;bluetooth_timer;BT_bt_wake;BT_host_wake;bbd_wake_lock;ssp_sensorhub_wake_lock;ssp_wake_lock;ssp_comm_wake_lock;mmc0_detect" #define LENGTH_LIST_WL 255 #define LENGTH_LIST_WL_DEFAULT (sizeof(LIST_WL_DEFAULT)) From 58eb738b4108f9ef2ba5bcaa63ca797033b7e7fa Mon Sep 17 00:00:00 2001 From: Aner Torre <anernaiz@gmail.com> Date: Wed, 17 Jun 2020 11:33:50 +0200 Subject: [PATCH 273/452] power: wl_blocker: increase blacklist max length The previous 255 limit seemed to be not enough to hold all the Samsung stock defined wakelocks that look safe to block. --- drivers/base/power/boeffla_wl_blocker.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/base/power/boeffla_wl_blocker.h b/drivers/base/power/boeffla_wl_blocker.h index 42ee96769fc6..5ee4a755b8a1 100644 --- a/drivers/base/power/boeffla_wl_blocker.h +++ b/drivers/base/power/boeffla_wl_blocker.h @@ -18,6 +18,6 @@ #define LIST_WL_DEFAULT "wlan_wake;wlan_rx_wake;wlan_ctrl_wake;wlan_txfl_wake;bluetooth_timer;BT_bt_wake;BT_host_wake;bbd_wake_lock;ssp_sensorhub_wake_lock;ssp_wake_lock;ssp_comm_wake_lock;mmc0_detect" -#define LENGTH_LIST_WL 255 +#define LENGTH_LIST_WL 1024 #define LENGTH_LIST_WL_DEFAULT (sizeof(LIST_WL_DEFAULT)) #define LENGTH_LIST_WL_SEARCH LENGTH_LIST_WL + LENGTH_LIST_WL_DEFAULT + 5 From ca006735d9297e437bc1359744651731b824c480 Mon Sep 17 00:00:00 2001 From: Aner Torre <anernaiz@gmail.com> Date: Wed, 17 Jun 2020 11:57:15 +0200 Subject: [PATCH 274/452] power: wl_blocker: add some wakelocks to the blacklist Seems blocking them does not affect device stability. --- drivers/base/power/boeffla_wl_blocker.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/base/power/boeffla_wl_blocker.h b/drivers/base/power/boeffla_wl_blocker.h index 5ee4a755b8a1..753b3afe4962 100644 --- a/drivers/base/power/boeffla_wl_blocker.h +++ b/drivers/base/power/boeffla_wl_blocker.h @@ -16,7 +16,7 @@ #define BOEFFLA_WL_BLOCKER_VERSION "1.1.0" -#define LIST_WL_DEFAULT "wlan_wake;wlan_rx_wake;wlan_ctrl_wake;wlan_txfl_wake;bluetooth_timer;BT_bt_wake;BT_host_wake;bbd_wake_lock;ssp_sensorhub_wake_lock;ssp_wake_lock;ssp_comm_wake_lock;mmc0_detect" +#define LIST_WL_DEFAULT "wlan_wake;wlan_rx_wake;wlan_ctrl_wake;wlan_txfl_wake;bluetooth_timer;BT_bt_wake;BT_host_wake;bbd_wake_lock;ssp_sensorhub_wake_lock;ssp_wake_lock;ssp_comm_wake_lock;mmc0_detect;grip_wake_lock;wlan_scan_wake;wlan_pm_wake;nfc_wake_lock" #define LENGTH_LIST_WL 1024 #define LENGTH_LIST_WL_DEFAULT (sizeof(LIST_WL_DEFAULT)) From 4e051042cecb27b0edfd4ccab016acc195e839e0 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 9 Sep 2020 01:52:11 +0300 Subject: [PATCH 275/452] power: wl_blocker: drop redundant global initializers Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/base/power/boeffla_wl_blocker.c | 4 ++-- drivers/base/power/wakeup.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/base/power/boeffla_wl_blocker.c b/drivers/base/power/boeffla_wl_blocker.c index 40d3c5647f3c..9e848ba9df5a 100644 --- a/drivers/base/power/boeffla_wl_blocker.c +++ b/drivers/base/power/boeffla_wl_blocker.c @@ -44,8 +44,8 @@ // Variables /*****************************************/ -char list_wl[LENGTH_LIST_WL] = {0}; -char list_wl_default[LENGTH_LIST_WL_DEFAULT] = {0}; +char list_wl[LENGTH_LIST_WL]; +char list_wl_default[LENGTH_LIST_WL_DEFAULT]; extern char list_wl_search[LENGTH_LIST_WL_SEARCH]; extern bool wl_blocker_active; diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index ff1396b40c02..5dc98b862b5a 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -30,7 +30,7 @@ #ifdef CONFIG_BOEFFLA_WL_BLOCKER #include "boeffla_wl_blocker.h" -char list_wl_search[LENGTH_LIST_WL_SEARCH] = {0}; +char list_wl_search[LENGTH_LIST_WL_SEARCH]; bool wl_blocker_active = false; bool wl_blocker_debug = false; From 141651451fa003d3d20fa3ea5b76ad933316a8df Mon Sep 17 00:00:00 2001 From: Nico Becker <nicistyler98@gmail.com> Date: Sun, 22 Mar 2020 04:31:51 +0100 Subject: [PATCH 276/452] sound: add moro sound module Added Moro Sound Module Signed-off-by: Nico Becker <nicistyler98@gmail.com> Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/base/regmap/regmap.c | 39 ++ sound/soc/codecs/Kconfig | 4 + sound/soc/codecs/Makefile | 1 + sound/soc/codecs/cs47l92.c | 10 + sound/soc/codecs/madera.c | 7 + sound/soc/codecs/moro_sound.c | 770 ++++++++++++++++++++++++++++++++++ sound/soc/codecs/moro_sound.h | 63 +++ 7 files changed, 894 insertions(+) create mode 100644 sound/soc/codecs/moro_sound.c create mode 100644 sound/soc/codecs/moro_sound.h diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 8fd08023c0f5..cb409bd2cc7a 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -34,6 +34,10 @@ */ #undef LOG_DEVICE +#ifdef CONFIG_MORO_SOUND +int moro_sound_write_hook(unsigned int reg, unsigned int val); +#endif + static int _regmap_update_bits(struct regmap *map, unsigned int reg, unsigned int mask, unsigned int val, bool *change, bool force_write); @@ -1636,6 +1640,10 @@ int _regmap_write(struct regmap *map, unsigned int reg, if (!regmap_writeable(map, reg)) return -EIO; +#ifdef CONFIG_MORO_SOUND + val = moro_sound_write_hook(reg, val); +#endif + if (!map->cache_bypass && !map->defer_caching) { ret = regcache_write(map, reg, val); if (ret != 0) @@ -1656,6 +1664,37 @@ int _regmap_write(struct regmap *map, unsigned int reg, return map->reg_write(context, reg, val); } +#ifdef CONFIG_MORO_SOUND +int _regmap_write_nohook(struct regmap *map, unsigned int reg, + unsigned int val) +{ + int ret; + void *context = _regmap_map_get_context(map); + + if (!regmap_writeable(map, reg)) + return -EIO; + + if (!map->cache_bypass && !map->defer_caching) { + ret = regcache_write(map, reg, val); + if (ret != 0) + return ret; + if (map->cache_only) { + map->cache_dirty = true; + return 0; + } + } + +#ifdef LOG_DEVICE + if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) + dev_info(map->dev, "%x <= %x\n", reg, val); +#endif + + trace_regmap_reg_write(map, reg, val); + + return map->reg_write(context, reg, val); +} +#endif + /** * regmap_write() - Write a value to a single register * diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 9ec790676217..811e5eda4db0 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -1245,4 +1245,8 @@ config SND_SOC_TPA6130A2 tristate "Texas Instruments TPA6130A2 headphone amplifier" depends on I2C +config MORO_SOUND + bool "Sound control for S10 madera driver" + default n + endmenu diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile index 10b1aec4addf..930652f3b7c6 100644 --- a/sound/soc/codecs/Makefile +++ b/sound/soc/codecs/Makefile @@ -490,6 +490,7 @@ obj-$(CONFIG_SND_SOC_WM9713) += snd-soc-wm9713.o obj-$(CONFIG_SND_SOC_WM_ADSP) += snd-soc-wm-adsp.o obj-$(CONFIG_SND_SOC_WM_HUBS) += snd-soc-wm-hubs.o obj-$(CONFIG_SND_SOC_ZX_AUD96P22) += snd-soc-zx-aud96p22.o +obj-$(CONFIG_MORO_SOUND) += moro_sound.o # Amp obj-$(CONFIG_SND_SOC_DIO2125) += snd-soc-dio2125.o diff --git a/sound/soc/codecs/cs47l92.c b/sound/soc/codecs/cs47l92.c index 253a844b5060..f03c291ea3e3 100644 --- a/sound/soc/codecs/cs47l92.c +++ b/sound/soc/codecs/cs47l92.c @@ -30,6 +30,10 @@ #include "madera.h" #include "wm_adsp.h" +#ifdef CONFIG_MORO_SOUND +#include "moro_sound.h" +#endif + #define CS47L92_NUM_ADSP 1 #define CS47L92_MONO_OUTPUTS 3 @@ -1974,6 +1978,12 @@ static int cs47l92_codec_probe(struct snd_soc_codec *codec) madera->dapm = snd_soc_codec_get_dapm(codec); +#ifdef CONFIG_MORO_SOUND + moro_sound_hook_madera_pcm_probe(madera->regmap); + + cs47l92->core.madera->dapm = snd_soc_codec_get_dapm(codec); +#endif + ret = madera_init_inputs(codec, cs47l92_dmic_inputs, ARRAY_SIZE(cs47l92_dmic_inputs), diff --git a/sound/soc/codecs/madera.c b/sound/soc/codecs/madera.c index da7037f0d300..93bb47d0c5bc 100644 --- a/sound/soc/codecs/madera.c +++ b/sound/soc/codecs/madera.c @@ -16,6 +16,9 @@ #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/tlv.h> +#ifdef CONFIG_MORO_SOUND +#include "moro_sound.h" +#endif #include <linux/mfd/madera/core.h> #include <linux/mfd/madera/registers.h> @@ -1382,6 +1385,10 @@ int madera_init_aif(struct snd_soc_codec *codec) struct madera *madera = priv->madera; int ret; +#ifdef CONFIG_MORO_SOUND + moro_sound_hook_madera_pcm_probe(madera->regmap); +#endif + /* Update Sample Rate 1 to 48kHz for cases when no AIF1 hw_params */ ret = regmap_update_bits(madera->regmap, MADERA_SAMPLE_RATE_1, MADERA_SAMPLE_RATE_1_MASK, 0x03); diff --git a/sound/soc/codecs/moro_sound.c b/sound/soc/codecs/moro_sound.c new file mode 100644 index 000000000000..8ca5c3d97387 --- /dev/null +++ b/sound/soc/codecs/moro_sound.c @@ -0,0 +1,770 @@ +/* + * moro_sound.c -- Sound mod for Madera, S8 sound driver + * + * Author : @morogoku https://github.com/morogoku + * Edited by : @noxxxious https://github.com/noxxxious + * + * Date : March 2019 - v1.0 + * + * + * Based on the Boeffla Sound 1.6 for Galaxy S3 + * + * Credits: andip71, author of Boeffla Sound + * Supercurio, Yank555 and Gokhanmoral. + * AndreiLux, for his Madera control sound mod + * Flar2, for his speaker gain mod + * + */ + + +#include "moro_sound.h" + + +/* Variables */ +static struct regmap *map; + +/* Internal moro sound variables */ +static int first = 1; +static int moro_sound = 0; + +static int headphone_gain_l, headphone_gain_r; +static int earpiece_gain; +static int out2l_mix_source, out2r_mix_source; +static int eq1_mix_source, eq2_mix_source; + +static int eq = 0; +static int eq_gains[5]; + +static unsigned int get_headphone_gain_l(void); +static unsigned int get_headphone_gain_r(void); +static void set_headphone_gain_l(int gain); +static void set_headphone_gain_r(int gain); + +static unsigned int get_earpiece_gain(void); +static void set_earpiece_gain(int gain); + +static void set_out2l_mix_source(int value); +static void set_out2r_mix_source(int value); + +static void set_eq1_mix_source(int value); +static void set_eq2_mix_source(int value); + +static void set_eq(void); +static void set_eq_gains(void); + +static void reset_moro_sound(void); +static void reset_audio_hub(void); +static void update_audio_hub(void); + +/* Internal helper functions */ + +#define madera_write(reg, val) _regmap_write_nohook(map, reg, val) + +#define madera_read(reg, val) regmap_read(map, reg, val) + +static unsigned int get_headphone_gain_l(void) +{ + unsigned int val; + + madera_read(MADERA_DAC_DIGITAL_VOLUME_2L, &val); + val &= MADERA_OUT2L_VOL_MASK; + val >>= MADERA_OUT2L_VOL_SHIFT; + + return val; +} + +static unsigned int get_headphone_gain_r(void) +{ + unsigned int val; + + madera_read(MADERA_DAC_DIGITAL_VOLUME_2R, &val); + val &= MADERA_OUT2R_VOL_MASK; + val >>= MADERA_OUT2R_VOL_SHIFT; + + return val; +} + +static void set_headphone_gain_l(int gain) +{ + unsigned int val; + + madera_read(MADERA_DAC_DIGITAL_VOLUME_2L, &val); + val &= ~MADERA_OUT2L_VOL_MASK; + val |= (gain << MADERA_OUT2L_VOL_SHIFT); + madera_write(MADERA_DAC_DIGITAL_VOLUME_2L, val); +} + +static void set_headphone_gain_r(int gain) +{ + unsigned int val; + + madera_read(MADERA_DAC_DIGITAL_VOLUME_2R, &val); + val &= ~MADERA_OUT2R_VOL_MASK; + val |= (gain << MADERA_OUT2R_VOL_SHIFT); + madera_write(MADERA_DAC_DIGITAL_VOLUME_2R, val); +} + +static unsigned int get_earpiece_gain(void) +{ + unsigned int val; + + madera_read(MADERA_DAC_DIGITAL_VOLUME_3L, &val); + val &= MADERA_OUT3L_VOL_MASK; + val >>= MADERA_OUT3L_VOL_SHIFT; + + return val; +} + +static void set_earpiece_gain(int gain) +{ + unsigned int val; + + madera_read(MADERA_DAC_DIGITAL_VOLUME_3L, &val); + val &= ~MADERA_OUT3L_VOL_MASK; + val |= (gain << MADERA_OUT3L_VOL_SHIFT); + madera_write(MADERA_DAC_DIGITAL_VOLUME_3L, val); +} + +static void set_out2l_mix_source(int value) +{ + unsigned int val; + + madera_read(MADERA_OUT2LMIX_INPUT_1_SOURCE, &val); + val &= ~MADERA_MIXER_SOURCE_MASK; + val |= (value << MADERA_MIXER_SOURCE_SHIFT); + madera_write(MADERA_OUT2LMIX_INPUT_1_SOURCE, val); +} + +static void set_out2r_mix_source(int value) +{ + unsigned int val; + + madera_read(MADERA_OUT2RMIX_INPUT_1_SOURCE, &val); + val &= ~MADERA_MIXER_SOURCE_MASK; + val |= (value << MADERA_MIXER_SOURCE_SHIFT); + madera_write(MADERA_OUT2RMIX_INPUT_1_SOURCE, val); +} + +static void set_eq1_mix_source(int value) +{ + unsigned int val; + + madera_read(MADERA_EQ1MIX_INPUT_1_SOURCE, &val); + val &= ~MADERA_MIXER_SOURCE_MASK; + val |= (value << MADERA_MIXER_SOURCE_SHIFT); + madera_write(MADERA_EQ1MIX_INPUT_1_SOURCE, val); +} + + +static void set_eq2_mix_source(int value) +{ + unsigned int val; + + madera_read(MADERA_EQ2MIX_INPUT_1_SOURCE, &val); + val &= ~MADERA_MIXER_SOURCE_MASK; + val |= (value << MADERA_MIXER_SOURCE_SHIFT); + madera_write(MADERA_EQ2MIX_INPUT_1_SOURCE, val); +} + +static void set_eq(void) +{ + unsigned int val; + + if (eq && moro_sound) { + madera_read(MADERA_EQ1_1, &val); + val &= ~MADERA_EQ1_ENA_MASK; + val |= 1 << MADERA_EQ1_ENA_SHIFT; + madera_write(MADERA_EQ1_1, val); + madera_read(MADERA_EQ2_1, &val); + val &= ~MADERA_EQ2_ENA_MASK; + val |= 1 << MADERA_EQ2_ENA_SHIFT; + madera_write(MADERA_EQ2_1, val); + set_eq1_mix_source(32); + set_eq2_mix_source(33); + set_out2l_mix_source(80); + set_out2r_mix_source(81); + } else { + madera_read(MADERA_EQ1_1, &val); + val &= ~MADERA_EQ1_ENA_MASK; + val |= 0 << MADERA_EQ1_ENA_SHIFT; + madera_write(MADERA_EQ1_1, val); + madera_read(MADERA_EQ2_1, &val); + val &= ~MADERA_EQ2_ENA_MASK; + val |= 0 << MADERA_EQ2_ENA_SHIFT; + madera_write(MADERA_EQ2_1, val); + eq1_mix_source = EQ1_MIX_DEFAULT; + eq2_mix_source = EQ2_MIX_DEFAULT; + set_eq1_mix_source(eq1_mix_source); + set_eq2_mix_source(eq2_mix_source); + out2l_mix_source = OUT2L_MIX_DEFAULT; + out2r_mix_source = OUT2R_MIX_DEFAULT; + set_out2l_mix_source(out2l_mix_source); + set_out2r_mix_source(out2r_mix_source); + } + + set_eq_gains(); +} + +static void set_eq_gains(void) +{ + unsigned int val; + unsigned int gain1, gain2, gain3, gain4, gain5; + + gain1 = eq_gains[0]; + gain2 = eq_gains[1]; + gain3 = eq_gains[2]; + gain4 = eq_gains[3]; + gain5 = eq_gains[4]; + + madera_read(MADERA_EQ1_1, &val); + + val &= MADERA_EQ1_ENA_MASK; + val |= ((gain1 + EQ_GAIN_OFFSET) << MADERA_EQ1_B1_GAIN_SHIFT); + val |= ((gain2 + EQ_GAIN_OFFSET) << MADERA_EQ1_B2_GAIN_SHIFT); + val |= ((gain3 + EQ_GAIN_OFFSET) << MADERA_EQ1_B3_GAIN_SHIFT); + + madera_write(MADERA_EQ1_1, val); + madera_write(MADERA_EQ2_1, val); + + madera_read(MADERA_EQ1_2, &val); + + val &= MADERA_EQ1_B1_MODE_MASK; + val |= ((gain4 + EQ_GAIN_OFFSET) << MADERA_EQ1_B4_GAIN_SHIFT); + val |= ((gain5 + EQ_GAIN_OFFSET) << MADERA_EQ1_B5_GAIN_SHIFT); + + madera_write(MADERA_EQ1_2, val); + madera_write(MADERA_EQ2_2, val); +} + +/* Sound hook functions */ +void moro_sound_hook_madera_pcm_probe(struct regmap *pmap) +{ + map = pmap; + moro_sound = MORO_SOUND_DEFAULT; + eq = EQ_DEFAULT; + set_eq(); + + if (moro_sound) + reset_moro_sound(); +} + +unsigned int moro_sound_write_hook(unsigned int reg, unsigned int val) +{ + if (!moro_sound) + return val; + + switch (reg) { + case MADERA_DAC_DIGITAL_VOLUME_2L: + val &= ~MADERA_OUT2L_VOL_MASK; + val |= (headphone_gain_l << MADERA_OUT2L_VOL_SHIFT); + break; + case MADERA_DAC_DIGITAL_VOLUME_2R: + val &= ~MADERA_OUT2R_VOL_MASK; + val |= (headphone_gain_r << MADERA_OUT2R_VOL_SHIFT); + break; + case MADERA_DAC_DIGITAL_VOLUME_3L: + val &= ~MADERA_OUT3L_VOL_MASK; + val |= (earpiece_gain << MADERA_OUT3L_VOL_SHIFT); + break; + if (eq) { + case MADERA_OUT2LMIX_INPUT_1_SOURCE: + val &= ~MADERA_MIXER_SOURCE_MASK; + val |= (out2l_mix_source << MADERA_MIXER_SOURCE_SHIFT); + break; + case MADERA_OUT2RMIX_INPUT_1_SOURCE: + val &= ~MADERA_MIXER_SOURCE_MASK; + val |= (out2r_mix_source << MADERA_MIXER_SOURCE_SHIFT); + break; + } + default: + break; + } + + return val; +} + +/* Initialization functions */ + +static void reset_moro_sound(void) +{ + headphone_gain_l = HEADPHONE_DEFAULT; + headphone_gain_r = HEADPHONE_DEFAULT; + + earpiece_gain = EARPIECE_DEFAULT; + + out2l_mix_source = OUT2L_MIX_DEFAULT; + out2r_mix_source = OUT2R_MIX_DEFAULT; + + eq1_mix_source = EQ1_MIX_DEFAULT; + eq2_mix_source = EQ2_MIX_DEFAULT; +} + + +static void reset_audio_hub(void) +{ + set_headphone_gain_l(HEADPHONE_DEFAULT); + set_headphone_gain_r(HEADPHONE_DEFAULT); + + set_earpiece_gain(EARPIECE_DEFAULT); + + set_out2l_mix_source(OUT2L_MIX_DEFAULT); + set_out2r_mix_source(OUT2R_MIX_DEFAULT); + + set_eq1_mix_source(EQ1_MIX_DEFAULT); + set_eq2_mix_source(EQ2_MIX_DEFAULT); + + set_eq(); +} + +static void update_audio_hub(void) +{ + set_headphone_gain_l(headphone_gain_l); + set_headphone_gain_r(headphone_gain_r); + + set_earpiece_gain(earpiece_gain); + + set_out2l_mix_source(out2l_mix_source); + set_out2r_mix_source(out2r_mix_source); + + set_eq1_mix_source(eq1_mix_source); + set_eq2_mix_source(eq2_mix_source); + + set_eq(); +} + +/* sysfs interface functions */ +static ssize_t moro_sound_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", moro_sound); +} + + +static ssize_t moro_sound_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int val; + + if (sscanf(buf, "%d", &val) < 1) + return -EINVAL; + + if (((val == 0) || (val == 1))) { + if (moro_sound != val) { + moro_sound = val; + if (first) { + reset_moro_sound(); + first = 0; + } + + if (val == 1) + update_audio_hub(); + else if (val == 0) + reset_audio_hub(); + } + } + + return count; +} + + +/* Headphone volume */ +static ssize_t headphone_gain_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d %d\n", headphone_gain_l, headphone_gain_r); +} + + +static ssize_t headphone_gain_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int val_l; + int val_r; + + if (!moro_sound) + return count; + + if (sscanf(buf, "%d %d", &val_l, &val_r) < 2) + return -EINVAL; + + if (val_l < HEADPHONE_MIN) + val_l = HEADPHONE_MIN; + + if (val_l > HEADPHONE_MAX) + val_l = HEADPHONE_MAX; + + if (val_r < HEADPHONE_MIN) + val_r = HEADPHONE_MIN; + + if (val_r > HEADPHONE_MAX) + val_r = HEADPHONE_MAX; + + headphone_gain_l = val_l; + headphone_gain_r = val_r; + + set_headphone_gain_l(headphone_gain_l); + set_headphone_gain_r(headphone_gain_r); + + return count; +} + +static ssize_t headphone_limits_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Min: %d Max: %d Def: %d\n", HEADPHONE_MIN, HEADPHONE_MAX, HEADPHONE_DEFAULT); +} + +/* Earpiece Volume */ + +static ssize_t earpiece_gain_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", earpiece_gain); +} + +static ssize_t earpiece_gain_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int val; + + if (!moro_sound) + return count; + + if (sscanf(buf, "%d", &val) < 1) + return -EINVAL; + + if (val < EARPIECE_MIN) + val = EARPIECE_MIN; + + if (val > EARPIECE_MAX) + val = EARPIECE_MAX; + + earpiece_gain = val; + set_earpiece_gain(earpiece_gain); + + return count; +} + +static ssize_t earpiece_limits_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + // return version information + return sprintf(buf, "Min: %d Max: %d Def:%d\n", EARPIECE_MIN, EARPIECE_MAX, EARPIECE_DEFAULT); +} + +/* EQ */ +static ssize_t eq_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", eq); +} + +static ssize_t eq_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int val; + + if (!moro_sound) + return count; + + if (sscanf(buf, "%d", &val) < 1) + return -EINVAL; + + if (((val == 0) || (val == 1))) { + if (eq != val) { + eq = val; + set_eq(); + } + } + + return count; +} + + +/* EQ GAIN */ + +static ssize_t eq_gains_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d %d %d %d %d\n", eq_gains[0], eq_gains[1], eq_gains[2], eq_gains[3], eq_gains[4]); +} + +static ssize_t eq_gains_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int gains[5]; + int i; + + if (!moro_sound) + return count; + + if (sscanf(buf, "%d %d %d %d %d", &gains[0], &gains[1], &gains[2], &gains[3], &gains[4]) < 5) + return -EINVAL; + + for (i = 0; i <= 4; i++) { + if (gains[i] < EQ_GAIN_MIN) + gains[i] = EQ_GAIN_MIN; + if (gains[i] > EQ_GAIN_MAX) + gains[i] = EQ_GAIN_MAX; + eq_gains[i] = gains[i]; + } + + set_eq_gains(); + + return count; +} + +static ssize_t eq_b1_gain_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", eq_gains[0]); +} + +static ssize_t eq_b1_gain_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int val; + + if (sscanf(buf, "%d", &val) < 1) + return -EINVAL; + + if (val < EQ_GAIN_MIN) + val = EQ_GAIN_MIN; + + if (val > EQ_GAIN_MAX) + val = EQ_GAIN_MAX; + + eq_gains[0] = val; + set_eq_gains(); + + return count; +} + +static ssize_t eq_b2_gain_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", eq_gains[1]); +} + +static ssize_t eq_b2_gain_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int val; + + if (sscanf(buf, "%d", &val) < 1) + return -EINVAL; + + if (val < EQ_GAIN_MIN) + val = EQ_GAIN_MIN; + + if (val > EQ_GAIN_MAX) + val = EQ_GAIN_MAX; + + eq_gains[1] = val; + set_eq_gains(); + + return count; +} + +static ssize_t eq_b3_gain_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", eq_gains[2]); +} + +static ssize_t eq_b3_gain_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int val; + + if (sscanf(buf, "%d", &val) < 1) + return -EINVAL; + + if (val < EQ_GAIN_MIN) + val = EQ_GAIN_MIN; + + if (val > EQ_GAIN_MAX) + val = EQ_GAIN_MAX; + + eq_gains[2] = val; + set_eq_gains(); + + return count; +} + +static ssize_t eq_b4_gain_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", eq_gains[3]); +} + +static ssize_t eq_b4_gain_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int val; + + if (sscanf(buf, "%d", &val) < 1) + return -EINVAL; + + if (val < EQ_GAIN_MIN) + val = EQ_GAIN_MIN; + + if (val > EQ_GAIN_MAX) + val = EQ_GAIN_MAX; + + eq_gains[3] = val; + set_eq_gains(); + + return count; +} + +static ssize_t eq_b5_gain_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", eq_gains[4]); +} + +static ssize_t eq_b5_gain_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int val; + + if (sscanf(buf, "%d", &val) < 1) + return -EINVAL; + + if (val < EQ_GAIN_MIN) + val = EQ_GAIN_MIN; + + if (val > EQ_GAIN_MAX) + val = EQ_GAIN_MAX; + + eq_gains[4] = val; + set_eq_gains(); + + return count; +} + +static ssize_t reg_dump_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + unsigned int out2_ena, out2l_mix, out2r_mix, eq1_ena, eq2_ena, eq1_mix, eq2_mix, eq_b1, + eq_b2, eq_b3, eq_b4, eq_b5; + + madera_read(MADERA_OUTPUT_ENABLES_1, &out2_ena); + out2_ena = (out2_ena & MADERA_OUT2L_ENA_MASK) >> MADERA_OUT2L_ENA_SHIFT; + + madera_read(MADERA_OUT2LMIX_INPUT_1_SOURCE, &out2l_mix); + madera_read(MADERA_OUT2RMIX_INPUT_1_SOURCE, &out2r_mix); + madera_read(MADERA_EQ1_1, &eq1_ena); + eq1_ena = (eq1_ena & MADERA_EQ1_ENA_MASK) >> MADERA_EQ1_ENA_SHIFT; + madera_read(MADERA_EQ2_1, &eq2_ena); + eq2_ena = (eq2_ena & MADERA_EQ2_ENA_MASK) >> MADERA_EQ2_ENA_SHIFT; + madera_read(MADERA_EQ1MIX_INPUT_1_SOURCE, &eq1_mix); + madera_read(MADERA_EQ2MIX_INPUT_1_SOURCE, &eq2_mix); + madera_read(MADERA_EQ1_1, &eq_b1); + eq_b1 = ((eq_b1 & MADERA_EQ1_B1_GAIN_MASK) >> MADERA_EQ1_B1_GAIN_SHIFT) - EQ_GAIN_OFFSET; + madera_read(MADERA_EQ1_1, &eq_b2); + eq_b2 = ((eq_b2 & MADERA_EQ1_B2_GAIN_MASK) >> MADERA_EQ1_B2_GAIN_SHIFT) - EQ_GAIN_OFFSET; + madera_read(MADERA_EQ1_1, &eq_b3); + eq_b3 = ((eq_b3 & MADERA_EQ1_B3_GAIN_MASK) >> MADERA_EQ1_B3_GAIN_SHIFT) - EQ_GAIN_OFFSET; + madera_read(MADERA_EQ1_2, &eq_b4); + eq_b4 = ((eq_b4 & MADERA_EQ1_B4_GAIN_MASK) >> MADERA_EQ1_B4_GAIN_SHIFT) - EQ_GAIN_OFFSET; + madera_read(MADERA_EQ1_2, &eq_b5); + eq_b5 = ((eq_b5 & MADERA_EQ1_B5_GAIN_MASK) >> MADERA_EQ1_B5_GAIN_SHIFT) - EQ_GAIN_OFFSET; + + return sprintf(buf, "\ +headphone_gain_l: reg: %d, variable: %d \ +headphone_gain_r: reg: %d, variable: %d \ +earpiece_gain: %d \ +HPOUT Enabled: %d \ +HPOUT2L Source: %d \ +HPOUT2R Source: %d \ +EQ1 Enabled: %d \ +EQ2 Enabled: %d \ +EQ1MIX source: %d \ +EQ2MIX source: %d \ +EQ b1 gain: %d \ +EQ b2 gain: %d \ +EQ b3 gain: %d \ +EQ b4 gain: %d \ +EQ b5 gain: %d \ +", +get_headphone_gain_l(), +get_headphone_gain_r(), +headphone_gain_l, +headphone_gain_r, +first, +get_earpiece_gain(), +out2_ena, +out2l_mix, +out2r_mix, +eq1_ena, +eq2_ena, +eq1_mix, +eq2_mix, +eq_b1, +eq_b2, +eq_b3, +eq_b4, +eq_b5); +} + +static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%s\n", MORO_SOUND_VERSION); +} + + +/* Sysfs permissions */ +static DEVICE_ATTR(moro_sound, 0664, moro_sound_show, moro_sound_store); +static DEVICE_ATTR(headphone_gain, 0664, headphone_gain_show, headphone_gain_store); +static DEVICE_ATTR(headphone_limits, 0664, headphone_limits_show, NULL); +static DEVICE_ATTR(earpiece_gain, 0664, earpiece_gain_show, earpiece_gain_store); +static DEVICE_ATTR(earpiece_limits, 0664, earpiece_limits_show, NULL); +static DEVICE_ATTR(eq, 0664, eq_show, eq_store); +static DEVICE_ATTR(eq_gains, 0664, eq_gains_show, eq_gains_store); +static DEVICE_ATTR(eq_b1_gain, 0664, eq_b1_gain_show, eq_b1_gain_store); +static DEVICE_ATTR(eq_b2_gain, 0664, eq_b2_gain_show, eq_b2_gain_store); +static DEVICE_ATTR(eq_b3_gain, 0664, eq_b3_gain_show, eq_b3_gain_store); +static DEVICE_ATTR(eq_b4_gain, 0664, eq_b4_gain_show, eq_b4_gain_store); +static DEVICE_ATTR(eq_b5_gain, 0664, eq_b5_gain_show, eq_b5_gain_store); +static DEVICE_ATTR(version, 0664, version_show, NULL); +static DEVICE_ATTR(reg_dump, 0664, reg_dump_show, NULL); + +static struct attribute *moro_sound_attributes[] = { + &dev_attr_moro_sound.attr, + &dev_attr_headphone_gain.attr, + &dev_attr_headphone_limits.attr, + &dev_attr_earpiece_gain.attr, + &dev_attr_earpiece_limits.attr, + &dev_attr_eq.attr, + &dev_attr_eq_gains.attr, + &dev_attr_eq_b1_gain.attr, + &dev_attr_eq_b2_gain.attr, + &dev_attr_eq_b3_gain.attr, + &dev_attr_eq_b4_gain.attr, + &dev_attr_eq_b5_gain.attr, + &dev_attr_version.attr, + &dev_attr_reg_dump.attr, + NULL +}; + +static struct attribute_group moro_sound_control_group = { + .attrs = moro_sound_attributes, +}; + +static struct miscdevice moro_sound_control_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "moro_sound", +}; + +static int moro_sound_init(void) +{ + misc_register(&moro_sound_control_device); + + if (sysfs_create_group(&moro_sound_control_device.this_device->kobj, + &moro_sound_control_group) < 0) { + return 0; + } + + reset_moro_sound(); + + return 0; +} + +static void moro_sound_exit(void) +{ + sysfs_remove_group(&moro_sound_control_device.this_device->kobj, + &moro_sound_control_group); +} + +/* Driver init and exit functions */ +module_init(moro_sound_init); +module_exit(moro_sound_exit); diff --git a/sound/soc/codecs/moro_sound.h b/sound/soc/codecs/moro_sound.h new file mode 100644 index 000000000000..3c3594523ed4 --- /dev/null +++ b/sound/soc/codecs/moro_sound.h @@ -0,0 +1,63 @@ +/* + * moro_sound.h -- Sound mod for Madera, S10 sound driver + * + * Author : @morogoku https://github.com/morogoku + * + * + */ + + +#include <linux/module.h> +#include <linux/kobject.h> +#include <linux/sysfs.h> +#include <linux/device.h> +#include <linux/miscdevice.h> +#include <sound/soc.h> + +#include <linux/mfd/madera/registers.h> + +/* External function declarations */ +void moro_sound_hook_madera_pcm_probe(struct regmap *pmap); +int _regmap_write_nohook(struct regmap *map, unsigned int reg, unsigned int val); +int set_speaker_gain(int gain); +int get_speaker_gain(void); + +/* Definitions */ + +/* Moro sound general */ +#define MORO_SOUND_DEFAULT 0 +#define MORO_SOUND_VERSION "2.1.1" + +/* Headphone levels */ +#define HEADPHONE_DEFAULT 113 +#define HEADPHONE_MIN 60 +#define HEADPHONE_MAX 170 + +/* Earpiece levels */ +#define EARPIECE_DEFAULT 128 +#define EARPIECE_MIN 60 +#define EARPIECE_MAX 190 + +/* Speaker levels */ +#define SPEAKER_DEFAULT 20 +#define SPEAKER_MIN 0 +#define SPEAKER_MAX 63 + +/* Mixers sources */ +#define OUT2L_MIX_DEFAULT 32 +#define OUT2R_MIX_DEFAULT 33 +#define EQ1_MIX_DEFAULT 0 +#define EQ2_MIX_DEFAULT 0 + +/* EQ gain */ +#define EQ_DEFAULT 0 +#define EQ_GAIN_DEFAULT 0 +#define EQ_GAIN_OFFSET 12 +#define EQ_GAIN_MIN -12 +#define EQ_GAIN_MAX 12 + +/* Mixers */ +#define MADERA_MIXER_SOURCE_MASK 0xff +#define MADERA_MIXER_SOURCE_SHIFT 0 +#define MADERA_MIXER_VOLUME_MASK 0xfe +#define MADERA_MIXER_VOLUME_SHIFT 1 From 6da89768d5dd764698feb30f8ac321d42b894392 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 21 Jun 2020 18:54:53 +0300 Subject: [PATCH 277/452] sound: moro: fix reg_dump_show() output Remove first variable, because it's not used in fmt string. Swap get_headphone_gain_l(), headphone_gain_l, get_headphone_gain_r(), headphone_gain_r according to the fmt string. Signed-off-by: Denis Efremov <efremov@linux.com> --- sound/soc/codecs/moro_sound.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/sound/soc/codecs/moro_sound.c b/sound/soc/codecs/moro_sound.c index 8ca5c3d97387..03561a05d80b 100644 --- a/sound/soc/codecs/moro_sound.c +++ b/sound/soc/codecs/moro_sound.c @@ -676,11 +676,8 @@ EQ b3 gain: %d \ EQ b4 gain: %d \ EQ b5 gain: %d \ ", -get_headphone_gain_l(), -get_headphone_gain_r(), -headphone_gain_l, -headphone_gain_r, -first, +get_headphone_gain_l(), headphone_gain_l, +get_headphone_gain_r(), headphone_gain_r, get_earpiece_gain(), out2_ena, out2l_mix, From 7101c79d2e1d9e346fe110021417ecc056eaa111 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 21 Jun 2020 22:02:22 +0300 Subject: [PATCH 278/452] sound: moro: mark functions with __init, __exit attrs Signed-off-by: Denis Efremov <efremov@linux.com> --- sound/soc/codecs/moro_sound.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sound/soc/codecs/moro_sound.c b/sound/soc/codecs/moro_sound.c index 03561a05d80b..45f7e6d62046 100644 --- a/sound/soc/codecs/moro_sound.c +++ b/sound/soc/codecs/moro_sound.c @@ -742,7 +742,7 @@ static struct miscdevice moro_sound_control_device = { .name = "moro_sound", }; -static int moro_sound_init(void) +static int __init moro_sound_init(void) { misc_register(&moro_sound_control_device); @@ -756,7 +756,7 @@ static int moro_sound_init(void) return 0; } -static void moro_sound_exit(void) +static void __exit moro_sound_exit(void) { sysfs_remove_group(&moro_sound_control_device.this_device->kobj, &moro_sound_control_group); From 33a0e8f2c7dee3777d0026e4152ca028dfc1c96f Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 21 Jun 2020 22:35:51 +0300 Subject: [PATCH 279/452] sound: moro: add pr_fmt macro Signed-off-by: Denis Efremov <efremov@linux.com> --- sound/soc/codecs/moro_sound.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/soc/codecs/moro_sound.c b/sound/soc/codecs/moro_sound.c index 45f7e6d62046..b8d6fbd504a5 100644 --- a/sound/soc/codecs/moro_sound.c +++ b/sound/soc/codecs/moro_sound.c @@ -16,6 +16,7 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "moro_sound.h" From 941aff0f4d2d6879e0149a01696686da77918d8d Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 21 Jun 2020 22:37:54 +0300 Subject: [PATCH 280/452] sound: moro: add misc_deregister in exit function Signed-off-by: Denis Efremov <efremov@linux.com> --- sound/soc/codecs/moro_sound.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/soc/codecs/moro_sound.c b/sound/soc/codecs/moro_sound.c index b8d6fbd504a5..a298fe04f662 100644 --- a/sound/soc/codecs/moro_sound.c +++ b/sound/soc/codecs/moro_sound.c @@ -761,6 +761,8 @@ static void __exit moro_sound_exit(void) { sysfs_remove_group(&moro_sound_control_device.this_device->kobj, &moro_sound_control_group); + + misc_deregister(&moro_sound_control_device); } /* Driver init and exit functions */ From 6f18d443809e7896e22536ef0b65296214e22c91 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 21 Jun 2020 22:40:25 +0300 Subject: [PATCH 281/452] sound: moro: add error handling to init function Signed-off-by: Denis Efremov <efremov@linux.com> --- sound/soc/codecs/moro_sound.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/sound/soc/codecs/moro_sound.c b/sound/soc/codecs/moro_sound.c index a298fe04f662..0e66750e0972 100644 --- a/sound/soc/codecs/moro_sound.c +++ b/sound/soc/codecs/moro_sound.c @@ -745,11 +745,20 @@ static struct miscdevice moro_sound_control_device = { static int __init moro_sound_init(void) { - misc_register(&moro_sound_control_device); + int err = 0; - if (sysfs_create_group(&moro_sound_control_device.this_device->kobj, - &moro_sound_control_group) < 0) { - return 0; + err = misc_register(&moro_sound_control_device); + if (err) { + pr_err("failed register the device.\n"); + return err; + } + + err = sysfs_create_group(&moro_sound_control_device.this_device->kobj, + &moro_sound_control_group); + if (err) { + pr_err("failed to create sys fs object.\n"); + misc_deregister(&moro_sound_control_device); + return err; } reset_moro_sound(); From 4697b247156e67776a2917756988a38b48670994 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 21 Jun 2020 18:05:30 +0300 Subject: [PATCH 282/452] sound: moro: fix device attr permissions Signed-off-by: Denis Efremov <efremov@linux.com> --- sound/soc/codecs/moro_sound.c | 217 +++++++++------------------------- 1 file changed, 54 insertions(+), 163 deletions(-) diff --git a/sound/soc/codecs/moro_sound.c b/sound/soc/codecs/moro_sound.c index 0e66750e0972..37ab93ebd1b8 100644 --- a/sound/soc/codecs/moro_sound.c +++ b/sound/soc/codecs/moro_sound.c @@ -333,13 +333,6 @@ static void update_audio_hub(void) set_eq(); } -/* sysfs interface functions */ -static ssize_t moro_sound_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", moro_sound); -} - - static ssize_t moro_sound_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { @@ -414,11 +407,6 @@ static ssize_t headphone_limits_show(struct device *dev, struct device_attribute /* Earpiece Volume */ -static ssize_t earpiece_gain_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", earpiece_gain); -} - static ssize_t earpiece_gain_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { @@ -449,10 +437,6 @@ static ssize_t earpiece_limits_show(struct device *dev, struct device_attribute } /* EQ */ -static ssize_t eq_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", eq); -} static ssize_t eq_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -508,131 +492,6 @@ static ssize_t eq_gains_store(struct device *dev, struct device_attribute *attr, return count; } -static ssize_t eq_b1_gain_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", eq_gains[0]); -} - -static ssize_t eq_b1_gain_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - int val; - - if (sscanf(buf, "%d", &val) < 1) - return -EINVAL; - - if (val < EQ_GAIN_MIN) - val = EQ_GAIN_MIN; - - if (val > EQ_GAIN_MAX) - val = EQ_GAIN_MAX; - - eq_gains[0] = val; - set_eq_gains(); - - return count; -} - -static ssize_t eq_b2_gain_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", eq_gains[1]); -} - -static ssize_t eq_b2_gain_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - int val; - - if (sscanf(buf, "%d", &val) < 1) - return -EINVAL; - - if (val < EQ_GAIN_MIN) - val = EQ_GAIN_MIN; - - if (val > EQ_GAIN_MAX) - val = EQ_GAIN_MAX; - - eq_gains[1] = val; - set_eq_gains(); - - return count; -} - -static ssize_t eq_b3_gain_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", eq_gains[2]); -} - -static ssize_t eq_b3_gain_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - int val; - - if (sscanf(buf, "%d", &val) < 1) - return -EINVAL; - - if (val < EQ_GAIN_MIN) - val = EQ_GAIN_MIN; - - if (val > EQ_GAIN_MAX) - val = EQ_GAIN_MAX; - - eq_gains[2] = val; - set_eq_gains(); - - return count; -} - -static ssize_t eq_b4_gain_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", eq_gains[3]); -} - -static ssize_t eq_b4_gain_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - int val; - - if (sscanf(buf, "%d", &val) < 1) - return -EINVAL; - - if (val < EQ_GAIN_MIN) - val = EQ_GAIN_MIN; - - if (val > EQ_GAIN_MAX) - val = EQ_GAIN_MAX; - - eq_gains[3] = val; - set_eq_gains(); - - return count; -} - -static ssize_t eq_b5_gain_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", eq_gains[4]); -} - -static ssize_t eq_b5_gain_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - int val; - - if (sscanf(buf, "%d", &val) < 1) - return -EINVAL; - - if (val < EQ_GAIN_MIN) - val = EQ_GAIN_MIN; - - if (val > EQ_GAIN_MAX) - val = EQ_GAIN_MAX; - - eq_gains[4] = val; - set_eq_gains(); - - return count; -} - static ssize_t reg_dump_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned int out2_ena, out2l_mix, out2r_mix, eq1_ena, eq2_ena, eq1_mix, eq2_mix, eq_b1, @@ -701,34 +560,66 @@ static ssize_t version_show(struct device *dev, struct device_attribute *attr, c /* Sysfs permissions */ -static DEVICE_ATTR(moro_sound, 0664, moro_sound_show, moro_sound_store); -static DEVICE_ATTR(headphone_gain, 0664, headphone_gain_show, headphone_gain_store); -static DEVICE_ATTR(headphone_limits, 0664, headphone_limits_show, NULL); -static DEVICE_ATTR(earpiece_gain, 0664, earpiece_gain_show, earpiece_gain_store); -static DEVICE_ATTR(earpiece_limits, 0664, earpiece_limits_show, NULL); -static DEVICE_ATTR(eq, 0664, eq_show, eq_store); -static DEVICE_ATTR(eq_gains, 0664, eq_gains_show, eq_gains_store); -static DEVICE_ATTR(eq_b1_gain, 0664, eq_b1_gain_show, eq_b1_gain_store); -static DEVICE_ATTR(eq_b2_gain, 0664, eq_b2_gain_show, eq_b2_gain_store); -static DEVICE_ATTR(eq_b3_gain, 0664, eq_b3_gain_show, eq_b3_gain_store); -static DEVICE_ATTR(eq_b4_gain, 0664, eq_b4_gain_show, eq_b4_gain_store); -static DEVICE_ATTR(eq_b5_gain, 0664, eq_b5_gain_show, eq_b5_gain_store); -static DEVICE_ATTR(version, 0664, version_show, NULL); -static DEVICE_ATTR(reg_dump, 0664, reg_dump_show, NULL); +static DEVICE_ATTR_RW(headphone_gain); +static DEVICE_ATTR_RW(eq_gains); +static DEVICE_ATTR_RO(earpiece_limits); +static DEVICE_ATTR_RO(headphone_limits); +static DEVICE_ATTR_RO(version); +static DEVICE_ATTR_RO(reg_dump); + +static struct dev_ext_attribute dev_attr_moro_sound = { + __ATTR(moro_sound, 0644, device_show_bool, moro_sound_store), + &moro_sound +}; +static struct dev_ext_attribute dev_attr_earpiece_gain = { + __ATTR(earpiece_gain, 0644, device_show_int, earpiece_gain_store), + &earpiece_gain +}; +static struct dev_ext_attribute dev_attr_eq = { + __ATTR(eq, 0644, device_show_bool, eq_store), + &eq +}; + + +#define MORO_DEVICE_ATTR_EQ_B_GAIN(num) \ +static ssize_t eq_b##num##_gain_store(struct device *dev, struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + int val; \ + if (sscanf(buf, "%d", &val) < 1) \ + return -EINVAL; \ + if (val < EQ_GAIN_MIN) \ + val = EQ_GAIN_MIN; \ + if (val > EQ_GAIN_MAX) \ + val = EQ_GAIN_MAX; \ + eq_gains[num - 1] = val; \ + set_eq_gains(); \ + return count; \ +} \ +struct dev_ext_attribute dev_attr_eq_b##num##_gain = { \ + __ATTR(eq_b##num##_gain, 0644, device_show_int, eq_b##num##_gain_store), \ + &eq_gains[num - 1] \ +}; + +MORO_DEVICE_ATTR_EQ_B_GAIN(1); +MORO_DEVICE_ATTR_EQ_B_GAIN(2); +MORO_DEVICE_ATTR_EQ_B_GAIN(3); +MORO_DEVICE_ATTR_EQ_B_GAIN(4); +MORO_DEVICE_ATTR_EQ_B_GAIN(5); static struct attribute *moro_sound_attributes[] = { - &dev_attr_moro_sound.attr, + &dev_attr_moro_sound.attr.attr, &dev_attr_headphone_gain.attr, &dev_attr_headphone_limits.attr, - &dev_attr_earpiece_gain.attr, + &dev_attr_earpiece_gain.attr.attr, &dev_attr_earpiece_limits.attr, - &dev_attr_eq.attr, + &dev_attr_eq.attr.attr, &dev_attr_eq_gains.attr, - &dev_attr_eq_b1_gain.attr, - &dev_attr_eq_b2_gain.attr, - &dev_attr_eq_b3_gain.attr, - &dev_attr_eq_b4_gain.attr, - &dev_attr_eq_b5_gain.attr, + &dev_attr_eq_b1_gain.attr.attr, + &dev_attr_eq_b2_gain.attr.attr, + &dev_attr_eq_b3_gain.attr.attr, + &dev_attr_eq_b4_gain.attr.attr, + &dev_attr_eq_b5_gain.attr.attr, &dev_attr_version.attr, &dev_attr_reg_dump.attr, NULL From 34ae03e0523623fc0ffb9418a7c7f2abdd7c8a92 Mon Sep 17 00:00:00 2001 From: Joe Maples <joe@frap129.org> Date: Thu, 11 May 2017 22:36:29 -0500 Subject: [PATCH 283/452] cpufreq: Introduce fingerprint boost driver This driver, based on Sultanxda's input boost driver, boosts all available cpus to max freq after recieving an input notification from the fingerprint sensor to reduce lag. Signed-off-by: Joe Maples <joe@frap129.org> --- drivers/cpufreq/Kconfig | 6 + drivers/cpufreq/Makefile | 2 + drivers/cpufreq/fp-boost.c | 393 +++++++++++++++++++++++++++++++++++++ 3 files changed, 401 insertions(+) create mode 100644 drivers/cpufreq/fp-boost.c diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index bf084b3a6715..a8a1e16dbe7d 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -242,6 +242,12 @@ config CPUFREQ_DT_PLATDEV If in doubt, say N. +config FINGERPRINT_BOOST + bool "Fingerprint Boost" + default n + help + Boosts available CPUs to max frequency on fingerprint sensor input. + if X86 source "drivers/cpufreq/Kconfig.x86" endif diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 39cc6c6e76cd..dc75e7ce3e83 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -20,6 +20,8 @@ obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o +obj-$(CONFIG_FINGERPRINT_BOOST) += fp-boost.o + ################################################################################## # x86 drivers. # Link order matters. K8 is preferred to ACPI because of firmware bugs in early diff --git a/drivers/cpufreq/fp-boost.c b/drivers/cpufreq/fp-boost.c new file mode 100644 index 000000000000..9ff17a0f8525 --- /dev/null +++ b/drivers/cpufreq/fp-boost.c @@ -0,0 +1,393 @@ +/* + * Copyright (C) 2014-2017, Sultanxda <sultanxda@gmail.com> + * (C) 2017, Joe Maples <joe@frap129.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* drivers/cpufreq/fp-boost.c: Fingerprint boost driver + * + * fp-boost is a simple cpufreq driver that boosts all CPU frequencies + * to max when an input notification is recieved from the devices + * fingerprint sensor. This aims to wake speed-up device unlock, + * especially from the deep sleep state. + * + * fp-boost is based on cpu_input_boost by Sultanxda, and all copyright + * information has been retained. Huge thanks to him for writing the + * initial driver this was based on. + */ + +#define pr_fmt(fmt) "fp-boost: " fmt + +#include <linux/cpu.h> +#include <linux/cpufreq.h> +#include <linux/input.h> +#include <linux/slab.h> + +/* Available bits for boost_policy state */ +#define DRIVER_ENABLED (1U << 0) +#define FINGERPRINT_BOOST (1U << 1) + +/* Fingerprint sensor input key */ +#define FINGERPRINT_KEY 0x2ee + +/* The duration in milliseconds for the fingerprint boost */ +#define FP_BOOST_MS (2000) + +/* + * "fp_config" = "fingerprint boost configuration". This contains the data and + * workers used for a single input-boost event. + */ +struct fp_config { + struct delayed_work boost_work; + struct delayed_work unboost_work; + uint32_t adj_duration_ms; + uint32_t cpus_to_boost; + uint32_t duration_ms; + uint32_t freq[2]; +}; + +/* + * This is the struct that contains all of the data for the entire driver. It + * encapsulates all of the other structs, so all data can be accessed through + * this struct. + */ +struct boost_policy { + spinlock_t lock; + struct fp_config fp; + struct workqueue_struct *wq; + uint32_t state; +}; + +/* Global pointer to all of the data for the driver */ +static struct boost_policy *boost_policy_g; + +static uint32_t get_boost_state(struct boost_policy *b); +static void set_boost_bit(struct boost_policy *b, uint32_t state); +static void clear_boost_bit(struct boost_policy *b, uint32_t state); +static void unboost_all_cpus(struct boost_policy *b); +static void update_online_cpu_policy(void); + +/* Boolean to let us know if input is already recieved */ +static bool touched; + +static void fp_boost_main(struct work_struct *work) +{ + struct boost_policy *b = boost_policy_g; + struct fp_config *fp = &b->fp; + + /* All CPUs will be boosted to policy->max */ + set_boost_bit(b, FINGERPRINT_BOOST); + + /* Immediately boost the online CPUs */ + update_online_cpu_policy(); + + queue_delayed_work(b->wq, &fp->unboost_work, + msecs_to_jiffies(FP_BOOST_MS)); + +} + +static void fp_unboost_main(struct work_struct *work) +{ + struct boost_policy *b = boost_policy_g; + pr_info("Unboosting\n"); + touched = false; + /* This clears the wake-boost bit and unboosts everything */ + unboost_all_cpus(b); +} + +static int do_cpu_boost(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct cpufreq_policy *policy = data; + struct boost_policy *b = boost_policy_g; + uint32_t state; + + if (action != CPUFREQ_ADJUST) + return NOTIFY_OK; + + state = get_boost_state(b); + + /* + * Don't do anything when the driver is disabled, unless there are + * still CPUs that need to be unboosted. + */ + if (!(state & DRIVER_ENABLED) && + policy->min == policy->cpuinfo.min_freq) + return NOTIFY_OK; + + /* Boost CPU to max frequency for fingerprint boost */ + if (state & FINGERPRINT_BOOST) { + pr_info("Boosting\n"); + policy->cur = policy->max; + policy->min = policy->max; + return NOTIFY_OK; + } + + return NOTIFY_OK; +} + +static struct notifier_block do_cpu_boost_nb = { + .notifier_call = do_cpu_boost, + .priority = INT_MAX, +}; + +static void cpu_fp_input_event(struct input_handle *handle, unsigned int type, + unsigned int code, int value) +{ + struct boost_policy *b = boost_policy_g; + struct fp_config *fp = &b->fp; + uint32_t state; + + state = get_boost_state(b); + + if (!(state & DRIVER_ENABLED) || touched) + return; + + pr_info("Recieved input event\n"); + touched = true; + set_boost_bit(b, FINGERPRINT_BOOST); + + /* Delaying work to ensure all CPUs are online */ + queue_delayed_work(b->wq, &fp->boost_work, + msecs_to_jiffies(20)); +} + +static int cpu_fp_input_connect(struct input_handler *handler, + struct input_dev *dev, const struct input_device_id *id) +{ + struct input_handle *handle; + int ret; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->dev = dev; + handle->handler = handler; + handle->name = "cpu_fp_handle"; + + ret = input_register_handle(handle); + if (ret) + goto err2; + + ret = input_open_device(handle); + if (ret) + goto err1; + + return 0; + +err1: + input_unregister_handle(handle); +err2: + kfree(handle); + return ret; +} + +static void cpu_fp_input_disconnect(struct input_handle *handle) +{ + input_close_device(handle); + input_unregister_handle(handle); + kfree(handle); +} + +static const struct input_device_id cpu_fp_ids[] = { + /* fingerprint sensor */ + { + .flags = INPUT_DEVICE_ID_MATCH_KEYBIT, + .keybit = { [BIT_WORD(FINGERPRINT_KEY)] = BIT_MASK(FINGERPRINT_KEY) }, + }, + { }, +}; + +static struct input_handler cpu_fp_input_handler = { + .event = cpu_fp_input_event, + .connect = cpu_fp_input_connect, + .disconnect = cpu_fp_input_disconnect, + .name = "cpu_fp_handler", + .id_table = cpu_fp_ids, +}; + +static uint32_t get_boost_state(struct boost_policy *b) +{ + uint32_t state; + + spin_lock(&b->lock); + state = b->state; + spin_unlock(&b->lock); + + return state; +} + +static void set_boost_bit(struct boost_policy *b, uint32_t state) +{ + spin_lock(&b->lock); + b->state |= state; + spin_unlock(&b->lock); +} + +static void clear_boost_bit(struct boost_policy *b, uint32_t state) +{ + spin_lock(&b->lock); + b->state &= ~state; + spin_unlock(&b->lock); +} + +static void unboost_all_cpus(struct boost_policy *b) +{ + struct fp_config *fp = &b->fp; + + /* Clear boost bit */ + clear_boost_bit(b, FINGERPRINT_BOOST); + + /* Clear cpus_to_boost bits for all CPUs */ + fp->cpus_to_boost = 0; + + /* Immediately unboost the online CPUs */ + update_online_cpu_policy(); +} + +static void update_online_cpu_policy(void) +{ + uint32_t cpu; + + /* Trigger cpufreq notifier for online CPUs */ + get_online_cpus(); + for_each_online_cpu(cpu) + cpufreq_update_policy(cpu); + put_online_cpus(); +} + +static ssize_t enabled_write(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + struct boost_policy *b = boost_policy_g; + uint32_t data; + int ret; + + ret = kstrtou32(buf, 10, &data); + if (ret) + return -EINVAL; + + if (data) { + set_boost_bit(b, DRIVER_ENABLED); + } else { + clear_boost_bit(b, DRIVER_ENABLED); + /* Stop everything */ + unboost_all_cpus(b); + } + + return size; +} + +static ssize_t enabled_read(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct boost_policy *b = boost_policy_g; + + return snprintf(buf, PAGE_SIZE, "%u\n", + get_boost_state(b) & DRIVER_ENABLED); +} + +static DEVICE_ATTR(enabled, 0644, + enabled_read, enabled_write); + +static struct attribute *cpu_fp_attr[] = { + &dev_attr_enabled.attr, + NULL +}; + +static struct attribute_group cpu_fp_attr_group = { + .attrs = cpu_fp_attr, +}; + +static int sysfs_fp_init(void) +{ + struct kobject *kobj; + int ret; + + kobj = kobject_create_and_add("fp_boost", kernel_kobj); + if (!kobj) { + pr_err("Failed to create kobject\n"); + return -ENOMEM; + } + + ret = sysfs_create_group(kobj, &cpu_fp_attr_group); + if (ret) { + pr_err("Failed to create sysfs interface\n"); + kobject_put(kobj); + } + + return ret; +} + +static struct boost_policy *alloc_boost_policy(void) +{ + struct boost_policy *b; + + b = kzalloc(sizeof(*b), GFP_KERNEL); + if (!b) + return NULL; + + b->wq = alloc_workqueue("cpu_fp_wq", WQ_HIGHPRI, 0); + if (!b->wq) { + pr_err("Failed to allocate workqueue\n"); + goto free_b; + } + + return b; + +free_b: + kfree(b); + return NULL; +} + +static int __init cpu_fp_init(void) +{ + struct boost_policy *b; + int ret; + touched = false; + + b = alloc_boost_policy(); + if (!b) { + pr_err("Failed to allocate boost policy\n"); + return -ENOMEM; + } + + spin_lock_init(&b->lock); + + INIT_DELAYED_WORK(&b->fp.boost_work, fp_boost_main); + INIT_DELAYED_WORK(&b->fp.unboost_work, fp_unboost_main); + + /* Allow global boost config access */ + boost_policy_g = b; + + ret = input_register_handler(&cpu_fp_input_handler); + if (ret) { + pr_err("Failed to register input handler, err: %d\n", ret); + goto free_mem; + } + + ret = sysfs_fp_init(); + if (ret) + goto input_unregister; + + cpufreq_register_notifier(&do_cpu_boost_nb, CPUFREQ_POLICY_NOTIFIER); + + return 0; + +input_unregister: + input_unregister_handler(&cpu_fp_input_handler); +free_mem: + kfree(b); + return ret; +} +late_initcall(cpu_fp_init); + From 00d4e386ed2618508427b3a805ac210ef75993e4 Mon Sep 17 00:00:00 2001 From: Angheloaia Victor <extremegrief@pop-os.localdomain> Date: Thu, 26 Mar 2020 21:30:28 +0200 Subject: [PATCH 284/452] block: Add Maple I/O scheduler --- block/Kconfig.iosched | 8 + block/Makefile | 1 + block/maple-iosched.c | 459 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 468 insertions(+) create mode 100644 block/maple-iosched.c diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index a4a8914bf7a4..9ee2932fd174 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -22,6 +22,10 @@ config IOSCHED_DEADLINE a new point in the service tree and doing a batch of IO from there in case of expiry. +config IOSCHED_MAPLE + tristate "Maple I/O scheduler" + default n + config IOSCHED_CFQ tristate "CFQ I/O scheduler" default y @@ -54,6 +58,9 @@ choice config DEFAULT_CFQ bool "CFQ" if IOSCHED_CFQ=y + config DEFAULT_MAPLE + bool "MAPLE" if IOSCHED_MAPLE=y + config DEFAULT_NOOP bool "No-op" @@ -63,6 +70,7 @@ config DEFAULT_IOSCHED string default "deadline" if DEFAULT_DEADLINE default "cfq" if DEFAULT_CFQ + default "maple" if DEFAULT_MAPLE default "noop" if DEFAULT_NOOP config MQ_IOSCHED_DEADLINE diff --git a/block/Makefile b/block/Makefile index b0249cd24633..dc1c0dfdbed1 100644 --- a/block/Makefile +++ b/block/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o +obj-$(CONFIG_IOSCHED_MAPLE) += maple-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o diff --git a/block/maple-iosched.c b/block/maple-iosched.c new file mode 100644 index 000000000000..beb474e83ec9 --- /dev/null +++ b/block/maple-iosched.c @@ -0,0 +1,459 @@ +/* + * Maple I/O Scheduler + * Based on Zen and SIO. + * + * Copyright (C) 2016 Joe Maples <joe@frap129.org> + * (C) 2012 Brandon Berhent <bbedward@gmail.com + * (C) 2012 Miguel Boton <mboton@gmail.com> + * + * Maple uses a first come first serve style algorithm with seperated read/write + * handling to allow for read biases. By prioritizing reads, simple tasks should improve + * in performance. Maple also uses hooks for the powersuspend driver to increase + * expirations when power is suspended to decrease workload. + */ +#include <linux/blkdev.h> +#include <linux/elevator.h> +#include <linux/bio.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/fb.h> + +#define MAPLE_IOSCHED_PATCHLEVEL (8) + +enum { ASYNC, SYNC }; + +/* Tunables */ +static const int sync_read_expire = 350; /* max time before a read sync is submitted. */ +static const int sync_write_expire = 550; /* max time before a write sync is submitted. */ +static const int async_read_expire = 250; /* ditto for read async, these limits are SOFT! */ +static const int async_write_expire = 450; /* ditto for write async, these limits are SOFT! */ +static const int fifo_batch = 16; /* # of sequential requests treated as one by the above parameters. */ +static const int writes_starved = 4; /* max times reads can starve a write */ +static const int sleep_latency_multiple = 10; /* multple for expire time when device is asleep */ + +/* Elevator data */ +struct maple_data { + /* Request queues */ + struct list_head fifo_list[2][2]; + + /* Attributes */ + unsigned int batched; + unsigned int starved; + + /* Settings */ + int fifo_expire[2][2]; + int fifo_batch; + int writes_starved; + int sleep_latency_multiple; + + /* Display state */ + struct notifier_block fb_notifier; + bool display_on; +}; + +static inline struct maple_data * +maple_get_data(struct request_queue *q) { + return q->elevator->elevator_data; +} + +static void +maple_merged_requests(struct request_queue *q, struct request *rq, + struct request *next) +{ + /* + * If next expires before rq, assign its expire time to rq + * and move into next position (next will be deleted) in fifo. + */ + if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) { + if (time_before((unsigned long)next->fifo_time, (unsigned long)rq->fifo_time)) { + list_move(&rq->queuelist, &next->queuelist); + rq->fifo_time = next->fifo_time; + } + } + + /* Delete next request */ + rq_fifo_clear(next); +} + +static void +maple_add_request(struct request_queue *q, struct request *rq) +{ + struct maple_data *mdata = maple_get_data(q); + const int sync = rq_is_sync(rq); + const int dir = rq_data_dir(rq); + + /* + * Add request to the proper fifo list and set its + * expire time. + */ + + /* inrease expiration when device is asleep */ + unsigned int fifo_expire_suspended = mdata->fifo_expire[sync][dir] * sleep_latency_multiple; + if (mdata->display_on && mdata->fifo_expire[sync][dir]) { + rq->fifo_time = jiffies + mdata->fifo_expire[sync][dir]; + list_add_tail(&rq->queuelist, &mdata->fifo_list[sync][dir]); + } else if (!mdata->display_on && fifo_expire_suspended) { + rq->fifo_time = jiffies + fifo_expire_suspended; + list_add_tail(&rq->queuelist, &mdata->fifo_list[sync][dir]); + } +} + +static struct request * +maple_expired_request(struct maple_data *mdata, int sync, int data_dir) +{ + struct list_head *list = &mdata->fifo_list[sync][data_dir]; + struct request *rq; + + if (list_empty(list)) + return NULL; + + /* Retrieve request */ + rq = rq_entry_fifo(list->next); + + /* Request has expired */ + if (time_after_eq(jiffies, (unsigned long)rq->fifo_time)) + return rq; + + return NULL; +} + +static struct request * +maple_choose_expired_request(struct maple_data *mdata) +{ + struct request *rq_sync_read = maple_expired_request(mdata, SYNC, READ); + struct request *rq_sync_write = maple_expired_request(mdata, SYNC, WRITE); + struct request *rq_async_read = maple_expired_request(mdata, ASYNC, READ); + struct request *rq_async_write = maple_expired_request(mdata, ASYNC, WRITE); + + /* Reset (non-expired-)batch-counter */ + mdata->batched = 0; + + /* + * Check expired requests. + * Asynchronous requests have priority over synchronous. + * Read requests have priority over write. + */ + + if (rq_async_read && rq_sync_read) { + if (time_after((unsigned long)rq_sync_read->fifo_time, (unsigned long)rq_async_read->fifo_time)) + return rq_async_read; + } else if (rq_async_read) { + return rq_async_read; + } else if (rq_sync_read) { + return rq_sync_read; + } + + if (rq_async_write && rq_sync_write) { +if (time_after((unsigned long)rq_sync_write->fifo_time, (unsigned long)rq_async_write->fifo_time)) + return rq_async_write; + } else if (rq_async_write) { + return rq_async_write; + } else if (rq_sync_write) { + return rq_sync_write; + } + + return NULL; +} + +static struct request * +maple_choose_request(struct maple_data *mdata, int data_dir) +{ + struct list_head *sync = mdata->fifo_list[SYNC]; + struct list_head *async = mdata->fifo_list[ASYNC]; + + /* Increase (non-expired-)batch-counter */ + mdata->batched++; + + + /* + * Retrieve request from available fifo list. + * Asynchronous requests have priority over synchronous. + * Read requests have priority over write. + */ + if (!list_empty(&async[data_dir])) + return rq_entry_fifo(async[data_dir].next); + if (!list_empty(&sync[data_dir])) + return rq_entry_fifo(sync[data_dir].next); + + if (!list_empty(&async[!data_dir])) + return rq_entry_fifo(async[!data_dir].next); + if (!list_empty(&sync[!data_dir])) + return rq_entry_fifo(sync[!data_dir].next); + + return NULL; +} + +static inline void +maple_dispatch_request(struct maple_data *mdata, struct request *rq) +{ + /* + * Remove the request from the fifo list + * and dispatch it. + */ + rq_fifo_clear(rq); + elv_dispatch_add_tail(rq->q, rq); + + if (rq_data_dir(rq)) { + mdata->starved = 0; + } else { + if (!list_empty(&mdata->fifo_list[SYNC][WRITE]) || + !list_empty(&mdata->fifo_list[ASYNC][WRITE])) + mdata->starved++; + } +} + +static int +maple_dispatch_requests(struct request_queue *q, int force) +{ + struct maple_data *mdata = maple_get_data(q); + struct request *rq = NULL; + int data_dir = READ; + + /* + * Retrieve any expired request after a batch of + * sequential requests. + */ + if (mdata->batched >= mdata->fifo_batch) + rq = maple_choose_expired_request(mdata); + + /* Retrieve request */ + if (!rq) { + /* Treat writes fairly while suspended, otherwise allow them to be starved */ + if (mdata->display_on && mdata->starved >= mdata->writes_starved) + data_dir = WRITE; + else if (!mdata->display_on && mdata->starved >= 1) + data_dir = WRITE; + + rq = maple_choose_request(mdata, data_dir); + if (!rq) + return 0; + } + + /* Dispatch request */ + maple_dispatch_request(mdata, rq); + + return 1; +} + +static struct request * +maple_former_request(struct request_queue *q, struct request *rq) +{ + struct maple_data *mdata = maple_get_data(q); + const int sync = rq_is_sync(rq); + const int data_dir = rq_data_dir(rq); + + if (rq->queuelist.prev == &mdata->fifo_list[sync][data_dir]) + return NULL; + + /* Return former request */ + return list_entry(rq->queuelist.prev, struct request, queuelist); +} + +static struct request * +maple_latter_request(struct request_queue *q, struct request *rq) +{ + struct maple_data *mdata = maple_get_data(q); + const int sync = rq_is_sync(rq); + const int data_dir = rq_data_dir(rq); + + if (rq->queuelist.next == &mdata->fifo_list[sync][data_dir]) + return NULL; + + /* Return latter request */ + return list_entry(rq->queuelist.next, struct request, queuelist); +} + +static int fb_notifier_callback(struct notifier_block *self, + unsigned long event, void *data) +{ + struct maple_data *mdata = container_of(self, + struct maple_data, fb_notifier); + struct fb_event *evdata = data; + int *blank; + + if (evdata && evdata->data && event == FB_EVENT_BLANK) { + blank = evdata->data; + switch (*blank) { + case FB_BLANK_UNBLANK: + mdata->display_on = true; + break; + case FB_BLANK_POWERDOWN: + case FB_BLANK_HSYNC_SUSPEND: + case FB_BLANK_VSYNC_SUSPEND: + case FB_BLANK_NORMAL: + mdata->display_on = false; + break; + } + } + + return 0; +} + +static int maple_init_queue(struct request_queue *q, struct elevator_type *e) +{ + struct maple_data *mdata; + struct elevator_queue *eq; + + eq = elevator_alloc(q, e); + if (!eq) + return -ENOMEM; + + /* Allocate structure */ + mdata = kmalloc_node(sizeof(*mdata), GFP_KERNEL, q->node); + if (!mdata) { + kobject_put(&eq->kobj); + return -ENOMEM; + } + eq->elevator_data = mdata; + + mdata->fb_notifier.notifier_call = fb_notifier_callback; + fb_register_client(&mdata->fb_notifier); + + /* Initialize fifo lists */ + INIT_LIST_HEAD(&mdata->fifo_list[SYNC][READ]); + INIT_LIST_HEAD(&mdata->fifo_list[SYNC][WRITE]); + INIT_LIST_HEAD(&mdata->fifo_list[ASYNC][READ]); + INIT_LIST_HEAD(&mdata->fifo_list[ASYNC][WRITE]); + + /* Initialize data */ + mdata->batched = 0; + mdata->fifo_expire[SYNC][READ] = sync_read_expire; + mdata->fifo_expire[SYNC][WRITE] = sync_write_expire; + mdata->fifo_expire[ASYNC][READ] = async_read_expire; + mdata->fifo_expire[ASYNC][WRITE] = async_write_expire; + mdata->fifo_batch = fifo_batch; + mdata->writes_starved = writes_starved; + mdata->sleep_latency_multiple = sleep_latency_multiple; + + spin_lock_irq(q->queue_lock); + q->elevator = eq; + spin_unlock_irq(q->queue_lock); + return 0; +} + +static void +maple_exit_queue(struct elevator_queue *e) +{ + struct maple_data *mdata = e->elevator_data; + + fb_unregister_client(&mdata->fb_notifier); + + /* Free structure */ + kfree(mdata); +} + +/* + * sysfs code + */ + +static ssize_t +maple_var_show(int var, char *page) +{ + return sprintf(page, "%d\n", var); +} + +static ssize_t +maple_var_store(int *var, const char *page, size_t count) +{ + char *p = (char *) page; + + *var = simple_strtol(p, &p, 10); + return count; +} + +#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ +{ \ + struct maple_data *mdata = e->elevator_data; \ + int __data = __VAR; \ + if (__CONV) \ + __data = jiffies_to_msecs(__data); \ + return maple_var_show(__data, (page)); \ +} +SHOW_FUNCTION(maple_sync_read_expire_show, mdata->fifo_expire[SYNC][READ], 1); +SHOW_FUNCTION(maple_sync_write_expire_show, mdata->fifo_expire[SYNC][WRITE], 1); +SHOW_FUNCTION(maple_async_read_expire_show, mdata->fifo_expire[ASYNC][READ], 1); +SHOW_FUNCTION(maple_async_write_expire_show, mdata->fifo_expire[ASYNC][WRITE], 1); +SHOW_FUNCTION(maple_fifo_batch_show, mdata->fifo_batch, 0); +SHOW_FUNCTION(maple_writes_starved_show, mdata->writes_starved, 0); +SHOW_FUNCTION(maple_sleep_latency_multiple_show, mdata->sleep_latency_multiple, 0); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ +{ \ + struct maple_data *mdata = e->elevator_data; \ + int __data; \ + int ret = maple_var_store(&__data, (page), count); \ + if (__data < (MIN)) \ + __data = (MIN); \ + else if (__data > (MAX)) \ + __data = (MAX); \ + if (__CONV) \ + *(__PTR) = msecs_to_jiffies(__data); \ + else \ + *(__PTR) = __data; \ + return ret; \ +} +STORE_FUNCTION(maple_sync_read_expire_store, &mdata->fifo_expire[SYNC][READ], 0, INT_MAX, 1); +STORE_FUNCTION(maple_sync_write_expire_store, &mdata->fifo_expire[SYNC][WRITE], 0, INT_MAX, 1); +STORE_FUNCTION(maple_async_read_expire_store, &mdata->fifo_expire[ASYNC][READ], 0, INT_MAX, 1); +STORE_FUNCTION(maple_async_write_expire_store, &mdata->fifo_expire[ASYNC][WRITE], 0, INT_MAX, 1); +STORE_FUNCTION(maple_fifo_batch_store, &mdata->fifo_batch, 1, INT_MAX, 0); +STORE_FUNCTION(maple_writes_starved_store, &mdata->writes_starved, 1, INT_MAX, 0); +STORE_FUNCTION(maple_sleep_latency_multiple_store, &mdata->sleep_latency_multiple, 1, INT_MAX, 0); +#undef STORE_FUNCTION + +#define DD_ATTR(name) \ + __ATTR(name, S_IRUGO|S_IWUSR, maple_##name##_show, \ + maple_##name##_store) + +static struct elv_fs_entry maple_attrs[] = { + DD_ATTR(sync_read_expire), + DD_ATTR(sync_write_expire), + DD_ATTR(async_read_expire), + DD_ATTR(async_write_expire), + DD_ATTR(fifo_batch), + DD_ATTR(writes_starved), + DD_ATTR(sleep_latency_multiple), + __ATTR_NULL +}; + +static struct elevator_type iosched_maple = { + .ops.sq = { + .elevator_merge_req_fn = maple_merged_requests, + .elevator_dispatch_fn = maple_dispatch_requests, + .elevator_add_req_fn = maple_add_request, + .elevator_former_req_fn = maple_former_request, + .elevator_latter_req_fn = maple_latter_request, + .elevator_init_fn = maple_init_queue, + .elevator_exit_fn = maple_exit_queue, + }, + + .elevator_attrs = maple_attrs, + .elevator_name = "maple", + .elevator_owner = THIS_MODULE, +}; + +static int __init maple_init(void) +{ + /* Register elevator */ + elv_register(&iosched_maple); + + return 0; +} + +static void __exit maple_exit(void) +{ + /* Unregister elevator */ + elv_unregister(&iosched_maple); +} + +module_init(maple_init); +module_exit(maple_exit); + +MODULE_AUTHOR("Joe Maples"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Maple I/O Scheduler"); +MODULE_VERSION("1.0"); From 629de1f396ddbf38dbe63e87d21b6a25be03438f Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 5 Apr 2020 16:54:43 +0300 Subject: [PATCH 285/452] block: Add FIOPS I/O scheduler Signed-off-by: Denis Efremov <efremov@linux.com> --- block/Kconfig.iosched | 12 + block/Makefile | 1 + block/fiops-iosched.c | 770 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 783 insertions(+) create mode 100644 block/fiops-iosched.c diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index 9ee2932fd174..855fe54e5470 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -44,6 +44,14 @@ config CFQ_GROUP_IOSCHED ---help--- Enable group IO scheduling in CFQ. +config IOSCHED_FIOPS + tristate "IOPS based I/O scheduler" + default n + ---help--- + This is an IOPS based I/O scheduler. It will try to distribute + IOPS equally among all processes in the system. It's mainly for + Flash based storage. + choice prompt "Default I/O scheduler" @@ -61,6 +69,9 @@ choice config DEFAULT_MAPLE bool "MAPLE" if IOSCHED_MAPLE=y + config DEFAULT_FIOPS + bool "FIOPS" if IOSCHED_FIOPS=y + config DEFAULT_NOOP bool "No-op" @@ -71,6 +82,7 @@ config DEFAULT_IOSCHED default "deadline" if DEFAULT_DEADLINE default "cfq" if DEFAULT_CFQ default "maple" if DEFAULT_MAPLE + default "fiops" if DEFAULT_FIOPS default "noop" if DEFAULT_NOOP config MQ_IOSCHED_DEADLINE diff --git a/block/Makefile b/block/Makefile index dc1c0dfdbed1..2023d8e298b1 100644 --- a/block/Makefile +++ b/block/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o obj-$(CONFIG_IOSCHED_MAPLE) += maple-iosched.o +obj-$(CONFIG_IOSCHED_FIOPS) += fiops-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o diff --git a/block/fiops-iosched.c b/block/fiops-iosched.c new file mode 100644 index 000000000000..72d22b5062e9 --- /dev/null +++ b/block/fiops-iosched.c @@ -0,0 +1,770 @@ +/* + * IOPS based IO scheduler. Based on CFQ. + * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> + * Shaohua Li <shli@kernel.org> + */ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/blkdev.h> +#include <linux/elevator.h> +#include <linux/jiffies.h> +#include <linux/rbtree.h> +#include <linux/ioprio.h> +#include <linux/blktrace_api.h> +#include "blk.h" + +#define VIOS_SCALE_SHIFT 10 +#define VIOS_SCALE (1 << VIOS_SCALE_SHIFT) + +#define VIOS_READ_SCALE (1) +#define VIOS_WRITE_SCALE (1) +#define VIOS_SYNC_SCALE (2) +#define VIOS_ASYNC_SCALE (5) + +#define VIOS_PRIO_SCALE (5) + +struct fiops_rb_root { + struct rb_root rb; + struct rb_node *left; + unsigned count; + + u64 min_vios; +}; +#define FIOPS_RB_ROOT (struct fiops_rb_root) { .rb = RB_ROOT} + +enum wl_prio_t { + IDLE_WORKLOAD = 0, + BE_WORKLOAD = 1, + RT_WORKLOAD = 2, + FIOPS_PRIO_NR, +}; + +struct fiops_data { + struct request_queue *queue; + + struct fiops_rb_root service_tree[FIOPS_PRIO_NR]; + + unsigned int busy_queues; + unsigned int in_flight[2]; + + struct work_struct unplug_work; + + unsigned int read_scale; + unsigned int write_scale; + unsigned int sync_scale; + unsigned int async_scale; +}; + +struct fiops_ioc { + struct io_cq icq; + + unsigned int flags; + struct fiops_data *fiopsd; + struct rb_node rb_node; + u64 vios; /* key in service_tree */ + struct fiops_rb_root *service_tree; + + unsigned int in_flight; + + struct rb_root sort_list; + struct list_head fifo; + + pid_t pid; + unsigned short ioprio; + enum wl_prio_t wl_type; +}; + +#define ioc_service_tree(ioc) (&((ioc)->fiopsd->service_tree[(ioc)->wl_type])) +#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq) + +enum ioc_state_flags { + FIOPS_IOC_FLAG_on_rr = 0, /* on round-robin busy list */ + FIOPS_IOC_FLAG_prio_changed, /* task priority has changed */ +}; + +#define FIOPS_IOC_FNS(name) \ +static inline void fiops_mark_ioc_##name(struct fiops_ioc *ioc) \ +{ \ + ioc->flags |= (1 << FIOPS_IOC_FLAG_##name); \ +} \ +static inline void fiops_clear_ioc_##name(struct fiops_ioc *ioc) \ +{ \ + ioc->flags &= ~(1 << FIOPS_IOC_FLAG_##name); \ +} \ +static inline int fiops_ioc_##name(const struct fiops_ioc *ioc) \ +{ \ + return ((ioc)->flags & (1 << FIOPS_IOC_FLAG_##name)) != 0; \ +} + +FIOPS_IOC_FNS(on_rr); +FIOPS_IOC_FNS(prio_changed); +#undef FIOPS_IOC_FNS + +#define fiops_log_ioc(fiopsd, ioc, fmt, args...) \ + blk_add_trace_msg((fiopsd)->queue, "ioc%d " fmt, (ioc)->pid, ##args) +#define fiops_log(fiopsd, fmt, args...) \ + blk_add_trace_msg((fiopsd)->queue, "fiops " fmt, ##args) + +enum wl_prio_t fiops_wl_type(short prio_class) +{ + if (prio_class == IOPRIO_CLASS_RT) + return RT_WORKLOAD; + if (prio_class == IOPRIO_CLASS_BE) + return BE_WORKLOAD; + return IDLE_WORKLOAD; +} + +static inline struct fiops_ioc *icq_to_cic(struct io_cq *icq) +{ + /* cic->icq is the first member, %NULL will convert to %NULL */ + return container_of(icq, struct fiops_ioc, icq); +} + +static inline struct fiops_ioc *fiops_cic_lookup(struct fiops_data *fiopsd, + struct io_context *ioc) +{ + if (ioc) + return icq_to_cic(ioc_lookup_icq(ioc, fiopsd->queue)); + return NULL; +} + +/* + * The below is leftmost cache rbtree addon + */ +static struct fiops_ioc *fiops_rb_first(struct fiops_rb_root *root) +{ + /* Service tree is empty */ + if (!root->count) + return NULL; + + if (!root->left) + root->left = rb_first(&root->rb); + + if (root->left) + return rb_entry(root->left, struct fiops_ioc, rb_node); + + return NULL; +} + +static void rb_erase_init(struct rb_node *n, struct rb_root *root) +{ + rb_erase(n, root); + RB_CLEAR_NODE(n); +} + +static void fiops_rb_erase(struct rb_node *n, struct fiops_rb_root *root) +{ + if (root->left == n) + root->left = NULL; + rb_erase_init(n, &root->rb); + --root->count; +} + +static inline u64 max_vios(u64 min_vios, u64 vios) +{ + s64 delta = (s64)(vios - min_vios); + if (delta > 0) + min_vios = vios; + + return min_vios; +} + +static void fiops_update_min_vios(struct fiops_rb_root *service_tree) +{ + struct fiops_ioc *ioc; + + ioc = fiops_rb_first(service_tree); + if (!ioc) + return; + service_tree->min_vios = max_vios(service_tree->min_vios, ioc->vios); +} + +/* + * The fiopsd->service_trees holds all pending fiops_ioc's that have + * requests waiting to be processed. It is sorted in the order that + * we will service the queues. + */ +static void fiops_service_tree_add(struct fiops_data *fiopsd, + struct fiops_ioc *ioc) +{ + struct rb_node **p, *parent; + struct fiops_ioc *__ioc; + struct fiops_rb_root *service_tree = ioc_service_tree(ioc); + u64 vios; + int left; + + /* New added IOC */ + if (RB_EMPTY_NODE(&ioc->rb_node)) { + if (ioc->in_flight > 0) + vios = ioc->vios; + else + vios = max_vios(service_tree->min_vios, ioc->vios); + } else { + vios = ioc->vios; + /* ioc->service_tree might not equal to service_tree */ + fiops_rb_erase(&ioc->rb_node, ioc->service_tree); + ioc->service_tree = NULL; + } + + fiops_log_ioc(fiopsd, ioc, "service tree add, vios %lld", vios); + + left = 1; + parent = NULL; + ioc->service_tree = service_tree; + p = &service_tree->rb.rb_node; + while (*p) { + struct rb_node **n; + + parent = *p; + __ioc = rb_entry(parent, struct fiops_ioc, rb_node); + + /* + * sort by key, that represents service time. + */ + if (vios < __ioc->vios) + n = &(*p)->rb_left; + else { + n = &(*p)->rb_right; + left = 0; + } + + p = n; + } + + if (left) + service_tree->left = &ioc->rb_node; + + ioc->vios = vios; + rb_link_node(&ioc->rb_node, parent, p); + rb_insert_color(&ioc->rb_node, &service_tree->rb); + service_tree->count++; + + fiops_update_min_vios(service_tree); +} + +/* + * Update ioc's position in the service tree. + */ +static void fiops_resort_rr_list(struct fiops_data *fiopsd, + struct fiops_ioc *ioc) +{ + /* + * Resorting requires the ioc to be on the RR list already. + */ + if (fiops_ioc_on_rr(ioc)) + fiops_service_tree_add(fiopsd, ioc); +} + +/* + * add to busy list of queues for service, trying to be fair in ordering + * the pending list according to last request service + */ +static void fiops_add_ioc_rr(struct fiops_data *fiopsd, struct fiops_ioc *ioc) +{ + BUG_ON(fiops_ioc_on_rr(ioc)); + fiops_mark_ioc_on_rr(ioc); + + fiopsd->busy_queues++; + + fiops_resort_rr_list(fiopsd, ioc); +} + +/* + * Called when the ioc no longer has requests pending, remove it from + * the service tree. + */ +static void fiops_del_ioc_rr(struct fiops_data *fiopsd, struct fiops_ioc *ioc) +{ + BUG_ON(!fiops_ioc_on_rr(ioc)); + fiops_clear_ioc_on_rr(ioc); + + if (!RB_EMPTY_NODE(&ioc->rb_node)) { + fiops_rb_erase(&ioc->rb_node, ioc->service_tree); + ioc->service_tree = NULL; + } + + BUG_ON(!fiopsd->busy_queues); + fiopsd->busy_queues--; +} + +/* + * rb tree support functions + */ +static void fiops_del_rq_rb(struct request *rq) +{ + struct fiops_ioc *ioc = RQ_CIC(rq); + + elv_rb_del(&ioc->sort_list, rq); +} + +static void fiops_add_rq_rb(struct request *rq) +{ + struct fiops_ioc *ioc = RQ_CIC(rq); + struct fiops_data *fiopsd = ioc->fiopsd; + + elv_rb_add(&ioc->sort_list, rq); + + if (!fiops_ioc_on_rr(ioc)) + fiops_add_ioc_rr(fiopsd, ioc); +} + +static void fiops_reposition_rq_rb(struct fiops_ioc *ioc, struct request *rq) +{ + elv_rb_del(&ioc->sort_list, rq); + fiops_add_rq_rb(rq); +} + +static void fiops_remove_request(struct request *rq) +{ + list_del_init(&rq->queuelist); + fiops_del_rq_rb(rq); +} + +static u64 fiops_scaled_vios(struct fiops_data *fiopsd, + struct fiops_ioc *ioc, struct request *rq) +{ + int vios = VIOS_SCALE; + + if (rq_data_dir(rq) == WRITE) + vios = vios * fiopsd->write_scale / fiopsd->read_scale; + + if (!rq_is_sync(rq)) + vios = vios * fiopsd->async_scale / fiopsd->sync_scale; + + vios += vios * (ioc->ioprio - IOPRIO_NORM) / VIOS_PRIO_SCALE; + + return vios; +} + +/* return vios dispatched */ +static u64 fiops_dispatch_request(struct fiops_data *fiopsd, + struct fiops_ioc *ioc) +{ + struct request *rq; + struct request_queue *q = fiopsd->queue; + + rq = rq_entry_fifo(ioc->fifo.next); + + fiops_remove_request(rq); + elv_dispatch_add_tail(q, rq); + + fiopsd->in_flight[rq_is_sync(rq)]++; + ioc->in_flight++; + + return fiops_scaled_vios(fiopsd, ioc, rq); +} + +static int fiops_forced_dispatch(struct fiops_data *fiopsd) +{ + struct fiops_ioc *ioc; + int dispatched = 0; + int i; + + for (i = RT_WORKLOAD; i >= IDLE_WORKLOAD; i--) { + while (!RB_EMPTY_ROOT(&fiopsd->service_tree[i].rb)) { + ioc = fiops_rb_first(&fiopsd->service_tree[i]); + + while (!list_empty(&ioc->fifo)) { + fiops_dispatch_request(fiopsd, ioc); + dispatched++; + } + if (fiops_ioc_on_rr(ioc)) + fiops_del_ioc_rr(fiopsd, ioc); + } + } + return dispatched; +} + +static struct fiops_ioc *fiops_select_ioc(struct fiops_data *fiopsd) +{ + struct fiops_ioc *ioc; + struct fiops_rb_root *service_tree = NULL; + int i; + struct request *rq; + + for (i = RT_WORKLOAD; i >= IDLE_WORKLOAD; i--) { + if (!RB_EMPTY_ROOT(&fiopsd->service_tree[i].rb)) { + service_tree = &fiopsd->service_tree[i]; + break; + } + } + + if (!service_tree) + return NULL; + + ioc = fiops_rb_first(service_tree); + + rq = rq_entry_fifo(ioc->fifo.next); + /* + * we are the only async task and sync requests are in flight, delay a + * moment. If there are other tasks coming, sync tasks have no chance + * to be starved, don't delay + */ + if (!rq_is_sync(rq) && fiopsd->in_flight[1] != 0 && + service_tree->count == 1) { + fiops_log_ioc(fiopsd, ioc, + "postpone async, in_flight async %d sync %d", + fiopsd->in_flight[0], fiopsd->in_flight[1]); + return NULL; + } + + return ioc; +} + +static void fiops_charge_vios(struct fiops_data *fiopsd, + struct fiops_ioc *ioc, u64 vios) +{ + struct fiops_rb_root *service_tree = ioc->service_tree; + ioc->vios += vios; + + fiops_log_ioc(fiopsd, ioc, "charge vios %lld, new vios %lld", vios, ioc->vios); + + if (RB_EMPTY_ROOT(&ioc->sort_list)) + fiops_del_ioc_rr(fiopsd, ioc); + else + fiops_resort_rr_list(fiopsd, ioc); + + fiops_update_min_vios(service_tree); +} + +static int fiops_dispatch_requests(struct request_queue *q, int force) +{ + struct fiops_data *fiopsd = q->elevator->elevator_data; + struct fiops_ioc *ioc; + u64 vios; + + if (unlikely(force)) + return fiops_forced_dispatch(fiopsd); + + ioc = fiops_select_ioc(fiopsd); + if (!ioc) + return 0; + + vios = fiops_dispatch_request(fiopsd, ioc); + + fiops_charge_vios(fiopsd, ioc, vios); + return 1; +} + +static void fiops_init_prio_data(struct fiops_ioc *cic) +{ + struct task_struct *tsk = current; + struct io_context *ioc = cic->icq.ioc; + int ioprio_class; + + if (!fiops_ioc_prio_changed(cic)) + return; + + ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); + switch (ioprio_class) { + default: + printk(KERN_ERR "fiops: bad prio %x\n", ioprio_class); + case IOPRIO_CLASS_NONE: + /* + * no prio set, inherit CPU scheduling settings + */ + cic->ioprio = task_nice_ioprio(tsk); + cic->wl_type = fiops_wl_type(task_nice_ioclass(tsk)); + break; + case IOPRIO_CLASS_RT: + cic->ioprio = IOPRIO_PRIO_DATA(ioc->ioprio); + cic->wl_type = fiops_wl_type(IOPRIO_CLASS_RT); + break; + case IOPRIO_CLASS_BE: + cic->ioprio = IOPRIO_PRIO_DATA(ioc->ioprio); + cic->wl_type = fiops_wl_type(IOPRIO_CLASS_BE); + break; + case IOPRIO_CLASS_IDLE: + cic->wl_type = fiops_wl_type(IOPRIO_CLASS_IDLE); + cic->ioprio = 7; + break; + } + + fiops_clear_ioc_prio_changed(cic); +} + +static void fiops_insert_request(struct request_queue *q, struct request *rq) +{ + struct fiops_ioc *ioc = RQ_CIC(rq); + + fiops_init_prio_data(ioc); + + list_add_tail(&rq->queuelist, &ioc->fifo); + + fiops_add_rq_rb(rq); +} + +/* + * scheduler run of queue, if there are requests pending and no one in the + * driver that will restart queueing + */ +static inline void fiops_schedule_dispatch(struct fiops_data *fiopsd) +{ + if (fiopsd->busy_queues) + kblockd_schedule_work(&fiopsd->unplug_work); +} + +static void fiops_completed_request(struct request_queue *q, struct request *rq) +{ + struct fiops_data *fiopsd = q->elevator->elevator_data; + struct fiops_ioc *ioc = RQ_CIC(rq); + + fiopsd->in_flight[rq_is_sync(rq)]--; + ioc->in_flight--; + + fiops_log_ioc(fiopsd, ioc, "in_flight %d, busy queues %d", + ioc->in_flight, fiopsd->busy_queues); + + if (fiopsd->in_flight[0] + fiopsd->in_flight[1] == 0) + fiops_schedule_dispatch(fiopsd); +} + +static struct request * +fiops_find_rq_fmerge(struct fiops_data *fiopsd, struct bio *bio) +{ + struct task_struct *tsk = current; + struct fiops_ioc *cic; + + cic = fiops_cic_lookup(fiopsd, tsk->io_context); + + if (cic) { + return elv_rb_find(&cic->sort_list, bio_end_sector(bio)); + } + + return NULL; +} + +static enum elv_merge +fiops_merge(struct request_queue *q, struct request **req, + struct bio *bio) +{ + struct fiops_data *fiopsd = q->elevator->elevator_data; + struct request *__rq; + + __rq = fiops_find_rq_fmerge(fiopsd, bio); + if (__rq && elv_bio_merge_ok(__rq, bio)) { + *req = __rq; + return ELEVATOR_FRONT_MERGE; + } + + return ELEVATOR_NO_MERGE; +} + +static void fiops_merged_request(struct request_queue *q, struct request *req, + enum elv_merge type) +{ + if (type == ELEVATOR_FRONT_MERGE) { + struct fiops_ioc *ioc = RQ_CIC(req); + + fiops_reposition_rq_rb(ioc, req); + } +} + +static void +fiops_merged_requests(struct request_queue *q, struct request *rq, + struct request *next) +{ + struct fiops_ioc *ioc = RQ_CIC(rq); + struct fiops_data *fiopsd = q->elevator->elevator_data; + + fiops_remove_request(next); + + ioc = RQ_CIC(next); + /* + * all requests of this task are merged to other tasks, delete it + * from the service tree. + */ + if (fiops_ioc_on_rr(ioc) && RB_EMPTY_ROOT(&ioc->sort_list)) + fiops_del_ioc_rr(fiopsd, ioc); +} + +static int fiops_allow_bio_merge(struct request_queue *q, struct request *rq, + struct bio *bio) +{ + struct fiops_data *fiopsd = q->elevator->elevator_data; + struct fiops_ioc *cic; + + /* + * Lookup the ioc that this bio will be queued with. Allow + * merge only if rq is queued there. + */ + cic = fiops_cic_lookup(fiopsd, current->io_context); + + return cic == RQ_CIC(rq); +} + +static int fiops_allow_rq_merge(struct request_queue *q, struct request *rq, + struct request *next) +{ + return RQ_CIC(rq) == RQ_CIC(next); +} + +static void fiops_exit_queue(struct elevator_queue *e) +{ + struct fiops_data *fiopsd = e->elevator_data; + + cancel_work_sync(&fiopsd->unplug_work); + + kfree(fiopsd); +} + +static void fiops_kick_queue(struct work_struct *work) +{ + struct fiops_data *fiopsd = + container_of(work, struct fiops_data, unplug_work); + struct request_queue *q = fiopsd->queue; + + spin_lock_irq(q->queue_lock); + __blk_run_queue(q); + spin_unlock_irq(q->queue_lock); +} + +static int fiops_init_queue(struct request_queue *q, struct elevator_type *e) +{ + struct fiops_data *fiopsd; + int i; + struct elevator_queue *eq; + + eq = elevator_alloc(q, e); + if (!eq) + return -ENOMEM; + + fiopsd = kzalloc_node(sizeof(*fiopsd), GFP_KERNEL, q->node); + if (!fiopsd) { + kobject_put(&eq->kobj); + return -ENOMEM; + } + eq->elevator_data = fiopsd; + + fiopsd->queue = q; + spin_lock_irq(q->queue_lock); + q->elevator = eq; + spin_unlock_irq(q->queue_lock); + + for (i = IDLE_WORKLOAD; i <= RT_WORKLOAD; i++) + fiopsd->service_tree[i] = FIOPS_RB_ROOT; + + INIT_WORK(&fiopsd->unplug_work, fiops_kick_queue); + + fiopsd->read_scale = VIOS_READ_SCALE; + fiopsd->write_scale = VIOS_WRITE_SCALE; + fiopsd->sync_scale = VIOS_SYNC_SCALE; + fiopsd->async_scale = VIOS_ASYNC_SCALE; + + return 0; +} + +static void fiops_init_icq(struct io_cq *icq) +{ + struct fiops_data *fiopsd = icq->q->elevator->elevator_data; + struct fiops_ioc *ioc = icq_to_cic(icq); + + RB_CLEAR_NODE(&ioc->rb_node); + INIT_LIST_HEAD(&ioc->fifo); + ioc->sort_list = RB_ROOT; + + ioc->fiopsd = fiopsd; + + ioc->pid = current->pid; + fiops_mark_ioc_prio_changed(ioc); +} + +/* + * sysfs parts below --> + */ +static ssize_t +fiops_var_show(unsigned int var, char *page) +{ + return sprintf(page, "%d\n", var); +} + +static ssize_t +fiops_var_store(unsigned int *var, const char *page, size_t count) +{ + char *p = (char *) page; + + *var = simple_strtoul(p, &p, 10); + return count; +} + +#define SHOW_FUNCTION(__FUNC, __VAR) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ +{ \ + struct fiops_data *fiopsd = e->elevator_data; \ + return fiops_var_show(__VAR, (page)); \ +} +SHOW_FUNCTION(fiops_read_scale_show, fiopsd->read_scale); +SHOW_FUNCTION(fiops_write_scale_show, fiopsd->write_scale); +SHOW_FUNCTION(fiops_sync_scale_show, fiopsd->sync_scale); +SHOW_FUNCTION(fiops_async_scale_show, fiopsd->async_scale); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ +{ \ + struct fiops_data *fiopsd = e->elevator_data; \ + unsigned int __data; \ + int ret = fiops_var_store(&__data, (page), count); \ + if (__data < (MIN)) \ + __data = (MIN); \ + else if (__data > (MAX)) \ + __data = (MAX); \ + *(__PTR) = __data; \ + return ret; \ +} +STORE_FUNCTION(fiops_read_scale_store, &fiopsd->read_scale, 1, 100); +STORE_FUNCTION(fiops_write_scale_store, &fiopsd->write_scale, 1, 100); +STORE_FUNCTION(fiops_sync_scale_store, &fiopsd->sync_scale, 1, 100); +STORE_FUNCTION(fiops_async_scale_store, &fiopsd->async_scale, 1, 100); +#undef STORE_FUNCTION + +#define FIOPS_ATTR(name) \ + __ATTR(name, S_IRUGO|S_IWUSR, fiops_##name##_show, fiops_##name##_store) + +static struct elv_fs_entry fiops_attrs[] = { + FIOPS_ATTR(read_scale), + FIOPS_ATTR(write_scale), + FIOPS_ATTR(sync_scale), + FIOPS_ATTR(async_scale), + __ATTR_NULL +}; + +static struct elevator_type iosched_fiops = { + .ops.sq = { + .elevator_merge_fn = fiops_merge, + .elevator_merged_fn = fiops_merged_request, + .elevator_merge_req_fn = fiops_merged_requests, + .elevator_allow_bio_merge_fn = fiops_allow_bio_merge, + .elevator_allow_rq_merge_fn = fiops_allow_rq_merge, + .elevator_dispatch_fn = fiops_dispatch_requests, + .elevator_add_req_fn = fiops_insert_request, + .elevator_completed_req_fn = fiops_completed_request, + .elevator_former_req_fn = elv_rb_former_request, + .elevator_latter_req_fn = elv_rb_latter_request, + .elevator_init_icq_fn = fiops_init_icq, + .elevator_init_fn = fiops_init_queue, + .elevator_exit_fn = fiops_exit_queue, + }, + .icq_size = sizeof(struct fiops_ioc), + .icq_align = __alignof__(struct fiops_ioc), + .elevator_attrs = fiops_attrs, + .elevator_name = "fiops", + .elevator_owner = THIS_MODULE, +}; + +static int __init fiops_init(void) +{ + return elv_register(&iosched_fiops); +} + +static void __exit fiops_exit(void) +{ + elv_unregister(&iosched_fiops); +} + +module_init(fiops_init); +module_exit(fiops_exit); + +MODULE_AUTHOR("Jens Axboe, Shaohua Li <shli@kernel.org>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("IOPS based IO scheduler"); From dab9d63b1c08bdca1e5b161aa77f527441de0fce Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 6 Apr 2020 10:25:27 +0300 Subject: [PATCH 286/452] block: Add SIO I/O scheduler Signed-off-by: Denis Efremov <efremov@linux.com> --- block/Kconfig.iosched | 14 ++ block/Makefile | 1 + block/sio-iosched.c | 412 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 427 insertions(+) create mode 100644 block/sio-iosched.c diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index 855fe54e5470..02ace3ceb2b0 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -52,6 +52,16 @@ config IOSCHED_FIOPS IOPS equally among all processes in the system. It's mainly for Flash based storage. +config IOSCHED_SIO + tristate "Simple I/O scheduler" + default n + ---help--- + The Simple I/O scheduler is an extremely simple scheduler, + based on noop and deadline, that relies on deadlines to + ensure fairness. The algorithm does not do any sorting but + basic merging, trying to keep a minimum overhead. It is aimed + mainly for aleatory access devices (eg: flash devices). + choice prompt "Default I/O scheduler" @@ -72,6 +82,9 @@ choice config DEFAULT_FIOPS bool "FIOPS" if IOSCHED_FIOPS=y + config DEFAULT_SIO + bool "SIO" if IOSCHED_SIO=y + config DEFAULT_NOOP bool "No-op" @@ -83,6 +96,7 @@ config DEFAULT_IOSCHED default "cfq" if DEFAULT_CFQ default "maple" if DEFAULT_MAPLE default "fiops" if DEFAULT_FIOPS + default "sio" if DEFAULT_SIO default "noop" if DEFAULT_NOOP config MQ_IOSCHED_DEADLINE diff --git a/block/Makefile b/block/Makefile index 2023d8e298b1..25320b87d941 100644 --- a/block/Makefile +++ b/block/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o obj-$(CONFIG_IOSCHED_MAPLE) += maple-iosched.o obj-$(CONFIG_IOSCHED_FIOPS) += fiops-iosched.o +obj-$(CONFIG_IOSCHED_SIO) += sio-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o diff --git a/block/sio-iosched.c b/block/sio-iosched.c new file mode 100644 index 000000000000..45b97b8db612 --- /dev/null +++ b/block/sio-iosched.c @@ -0,0 +1,412 @@ +/* + * Simple IO scheduler + * Based on Noop, Deadline and V(R) IO schedulers. + * + * Copyright (C) 2012 Miguel Boton <mboton@gmail.com> + * + * + * This algorithm does not do any kind of sorting, as it is aimed for + * aleatory access devices, but it does some basic merging. We try to + * keep minimum overhead to achieve low latency. + * + * Asynchronous and synchronous requests are not treated separately, but + * we relay on deadlines to ensure fairness. + * + */ +#include <linux/blkdev.h> +#include <linux/elevator.h> +#include <linux/bio.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/version.h> +#include <linux/slab.h> + +enum { ASYNC, SYNC }; + +/* Tunables */ +static const int sync_read_expire = HZ / 2; /* max time before a sync read is submitted. */ +static const int sync_write_expire = 2 * HZ; /* max time before a sync write is submitted. */ + +static const int async_read_expire = 4 * HZ; /* ditto for async, these limits are SOFT! */ +static const int async_write_expire = 16 * HZ; /* ditto for async, these limits are SOFT! */ + +static const int writes_starved = 2; /* max times reads can starve a write */ +static const int fifo_batch = 8; /* # of sequential requests treated as one + by the above parameters. For throughput. */ + +/* Elevator data */ +struct sio_data { + /* Request queues */ + struct list_head fifo_list[2][2]; + + /* Attributes */ + unsigned int batched; + unsigned int starved; + + /* Settings */ + int fifo_expire[2][2]; + int fifo_batch; + int writes_starved; +}; + +static void +sio_merged_requests(struct request_queue *q, struct request *rq, + struct request *next) +{ + /* + * If next expires before rq, assign its expire time to rq + * and move into next position (next will be deleted) in fifo. + */ + if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) { + if (time_before((unsigned long)next->fifo_time, (unsigned long)rq->fifo_time)) { + list_move(&rq->queuelist, &next->queuelist); + rq->fifo_time = next->fifo_time; + } + } + + /* Delete next request */ + rq_fifo_clear(next); +} + +static void +sio_add_request(struct request_queue *q, struct request *rq) +{ + struct sio_data *sd = q->elevator->elevator_data; + const int sync = rq_is_sync(rq); + const int data_dir = rq_data_dir(rq); + + /* + * Add request to the proper fifo list and set its + * expire time. + */ + rq->fifo_time = jiffies + sd->fifo_expire[sync][data_dir]; + list_add_tail(&rq->queuelist, &sd->fifo_list[sync][data_dir]); +} + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38) +static int +sio_queue_empty(struct request_queue *q) +{ + struct sio_data *sd = q->elevator->elevator_data; + + /* Check if fifo lists are empty */ + return list_empty(&sd->fifo_list[SYNC][READ]) && list_empty(&sd->fifo_list[SYNC][WRITE]) && + list_empty(&sd->fifo_list[ASYNC][READ]) && list_empty(&sd->fifo_list[ASYNC][WRITE]); +} +#endif + +static struct request * +sio_expired_request(struct sio_data *sd, int sync, int data_dir) +{ + struct list_head *list = &sd->fifo_list[sync][data_dir]; + struct request *rq; + + if (list_empty(list)) + return NULL; + + /* Retrieve request */ + rq = rq_entry_fifo(list->next); + + /* Request has expired */ + if (time_after(jiffies, (unsigned long)rq->fifo_time)) + return rq; + + return NULL; +} + +static struct request * +sio_choose_expired_request(struct sio_data *sd) +{ + struct request *rq; + + /* + * Check expired requests. + * Asynchronous requests have priority over synchronous. + * Write requests have priority over read. + */ + rq = sio_expired_request(sd, ASYNC, WRITE); + if (rq) + return rq; + rq = sio_expired_request(sd, ASYNC, READ); + if (rq) + return rq; + + rq = sio_expired_request(sd, SYNC, WRITE); + if (rq) + return rq; + rq = sio_expired_request(sd, SYNC, READ); + if (rq) + return rq; + + return NULL; +} + +static struct request * +sio_choose_request(struct sio_data *sd, int data_dir) +{ + struct list_head *sync = sd->fifo_list[SYNC]; + struct list_head *async = sd->fifo_list[ASYNC]; + + /* + * Retrieve request from available fifo list. + * Synchronous requests have priority over asynchronous. + * Read requests have priority over write. + */ + if (!list_empty(&sync[data_dir])) + return rq_entry_fifo(sync[data_dir].next); + if (!list_empty(&async[data_dir])) + return rq_entry_fifo(async[data_dir].next); + + if (!list_empty(&sync[!data_dir])) + return rq_entry_fifo(sync[!data_dir].next); + if (!list_empty(&async[!data_dir])) + return rq_entry_fifo(async[!data_dir].next); + + return NULL; +} + +static inline void +sio_dispatch_request(struct sio_data *sd, struct request *rq) +{ + /* + * Remove the request from the fifo list + * and dispatch it. + */ + rq_fifo_clear(rq); + elv_dispatch_add_tail(rq->q, rq); + + sd->batched++; + + if (rq_data_dir(rq)) + sd->starved = 0; + else + sd->starved++; +} + +static int +sio_dispatch_requests(struct request_queue *q, int force) +{ + struct sio_data *sd = q->elevator->elevator_data; + struct request *rq = NULL; + int data_dir = READ; + + /* + * Retrieve any expired request after a batch of + * sequential requests. + */ + if (sd->batched > sd->fifo_batch) { + sd->batched = 0; + rq = sio_choose_expired_request(sd); + } + + /* Retrieve request */ + if (!rq) { + if (sd->starved > sd->writes_starved) + data_dir = WRITE; + + rq = sio_choose_request(sd, data_dir); + if (!rq) + return 0; + } + + /* Dispatch request */ + sio_dispatch_request(sd, rq); + + return 1; +} + +static struct request * +sio_former_request(struct request_queue *q, struct request *rq) +{ + struct sio_data *sd = q->elevator->elevator_data; + const int sync = rq_is_sync(rq); + const int data_dir = rq_data_dir(rq); + + if (rq->queuelist.prev == &sd->fifo_list[sync][data_dir]) + return NULL; + + /* Return former request */ + return list_entry(rq->queuelist.prev, struct request, queuelist); +} + +static struct request * +sio_latter_request(struct request_queue *q, struct request *rq) +{ + struct sio_data *sd = q->elevator->elevator_data; + const int sync = rq_is_sync(rq); + const int data_dir = rq_data_dir(rq); + + if (rq->queuelist.next == &sd->fifo_list[sync][data_dir]) + return NULL; + + /* Return latter request */ + return list_entry(rq->queuelist.next, struct request, queuelist); +} + +static int sio_init_queue(struct request_queue *q, struct elevator_type *e) +{ + struct sio_data *sd; + struct elevator_queue *eq; + + eq = elevator_alloc(q, e); + if (!eq) + return -ENOMEM; + + /* Allocate structure */ + sd = kmalloc_node(sizeof(*sd), GFP_KERNEL, q->node); + if (!sd) { + kobject_put(&eq->kobj); + return -ENOMEM; + } + eq->elevator_data = sd; + + spin_lock_irq(q->queue_lock); + q->elevator = eq; + spin_unlock_irq(q->queue_lock); + + /* Initialize fifo lists */ + INIT_LIST_HEAD(&sd->fifo_list[SYNC][READ]); + INIT_LIST_HEAD(&sd->fifo_list[SYNC][WRITE]); + INIT_LIST_HEAD(&sd->fifo_list[ASYNC][READ]); + INIT_LIST_HEAD(&sd->fifo_list[ASYNC][WRITE]); + + /* Initialize data */ + sd->batched = 0; + sd->fifo_expire[SYNC][READ] = sync_read_expire; + sd->fifo_expire[SYNC][WRITE] = sync_write_expire; + sd->fifo_expire[ASYNC][READ] = async_read_expire; + sd->fifo_expire[ASYNC][WRITE] = async_write_expire; + sd->fifo_batch = fifo_batch; + + return 0; +} + +static void +sio_exit_queue(struct elevator_queue *e) +{ + struct sio_data *sd = e->elevator_data; + + BUG_ON(!list_empty(&sd->fifo_list[SYNC][READ])); + BUG_ON(!list_empty(&sd->fifo_list[SYNC][WRITE])); + BUG_ON(!list_empty(&sd->fifo_list[ASYNC][READ])); + BUG_ON(!list_empty(&sd->fifo_list[ASYNC][WRITE])); + + /* Free structure */ + kfree(sd); +} + +/* + * sysfs code + */ + +static ssize_t +sio_var_show(int var, char *page) +{ + return sprintf(page, "%d\n", var); +} + +static ssize_t +sio_var_store(int *var, const char *page, size_t count) +{ + char *p = (char *) page; + + *var = simple_strtol(p, &p, 10); + return count; +} + +#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ +{ \ + struct sio_data *sd = e->elevator_data; \ + int __data = __VAR; \ + if (__CONV) \ + __data = jiffies_to_msecs(__data); \ + return sio_var_show(__data, (page)); \ +} +SHOW_FUNCTION(sio_sync_read_expire_show, sd->fifo_expire[SYNC][READ], 1); +SHOW_FUNCTION(sio_sync_write_expire_show, sd->fifo_expire[SYNC][WRITE], 1); +SHOW_FUNCTION(sio_async_read_expire_show, sd->fifo_expire[ASYNC][READ], 1); +SHOW_FUNCTION(sio_async_write_expire_show, sd->fifo_expire[ASYNC][WRITE], 1); +SHOW_FUNCTION(sio_fifo_batch_show, sd->fifo_batch, 0); +SHOW_FUNCTION(sio_writes_starved_show, sd->writes_starved, 0); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ +{ \ + struct sio_data *sd = e->elevator_data; \ + int __data; \ + int ret = sio_var_store(&__data, (page), count); \ + if (__data < (MIN)) \ + __data = (MIN); \ + else if (__data > (MAX)) \ + __data = (MAX); \ + if (__CONV) \ + *(__PTR) = msecs_to_jiffies(__data); \ + else \ + *(__PTR) = __data; \ + return ret; \ +} +STORE_FUNCTION(sio_sync_read_expire_store, &sd->fifo_expire[SYNC][READ], 0, INT_MAX, 1); +STORE_FUNCTION(sio_sync_write_expire_store, &sd->fifo_expire[SYNC][WRITE], 0, INT_MAX, 1); +STORE_FUNCTION(sio_async_read_expire_store, &sd->fifo_expire[ASYNC][READ], 0, INT_MAX, 1); +STORE_FUNCTION(sio_async_write_expire_store, &sd->fifo_expire[ASYNC][WRITE], 0, INT_MAX, 1); +STORE_FUNCTION(sio_fifo_batch_store, &sd->fifo_batch, 0, INT_MAX, 0); +STORE_FUNCTION(sio_writes_starved_store, &sd->writes_starved, 0, INT_MAX, 0); +#undef STORE_FUNCTION + +#define DD_ATTR(name) \ + __ATTR(name, S_IRUGO|S_IWUSR, sio_##name##_show, \ + sio_##name##_store) + +static struct elv_fs_entry sio_attrs[] = { + DD_ATTR(sync_read_expire), + DD_ATTR(sync_write_expire), + DD_ATTR(async_read_expire), + DD_ATTR(async_write_expire), + DD_ATTR(fifo_batch), + DD_ATTR(writes_starved), + __ATTR_NULL +}; + +static struct elevator_type iosched_sio = { + .ops.sq = { + .elevator_merge_req_fn = sio_merged_requests, + .elevator_dispatch_fn = sio_dispatch_requests, + .elevator_add_req_fn = sio_add_request, +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38) + .elevator_queue_empty_fn = sio_queue_empty, +#endif + .elevator_former_req_fn = sio_former_request, + .elevator_latter_req_fn = sio_latter_request, + .elevator_init_fn = sio_init_queue, + .elevator_exit_fn = sio_exit_queue, + }, + + .elevator_attrs = sio_attrs, + .elevator_name = "sio", + .elevator_owner = THIS_MODULE, +}; + +static int __init sio_init(void) +{ + /* Register elevator */ + elv_register(&iosched_sio); + + return 0; +} + +static void __exit sio_exit(void) +{ + /* Unregister elevator */ + elv_unregister(&iosched_sio); +} + +module_init(sio_init); +module_exit(sio_exit); + +MODULE_AUTHOR("Miguel Boton"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Simple IO scheduler"); +MODULE_VERSION("0.2"); + From 50dfc1f70563108955c77c2e182761407042dd54 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 6 Apr 2020 10:35:16 +0300 Subject: [PATCH 287/452] block: Add ZEN I/O Scheduler Signed-off-by: Denis Efremov <efremov@linux.com> --- block/Kconfig.iosched | 11 ++ block/Makefile | 1 + block/zen-iosched.c | 288 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 300 insertions(+) create mode 100644 block/zen-iosched.c diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index 02ace3ceb2b0..ff662fa22fe1 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -62,6 +62,13 @@ config IOSCHED_SIO basic merging, trying to keep a minimum overhead. It is aimed mainly for aleatory access devices (eg: flash devices). +config IOSCHED_ZEN + tristate "Zen I/O scheduler" + default n + ---help--- + FCFS, dispatches are back-inserted, deadlines ensure fairness. + Should work best with devices where there is no travel delay. + choice prompt "Default I/O scheduler" @@ -85,6 +92,9 @@ choice config DEFAULT_SIO bool "SIO" if IOSCHED_SIO=y + config DEFAULT_ZEN + bool "ZEN" if IOSCHED_ZEN=y + config DEFAULT_NOOP bool "No-op" @@ -97,6 +107,7 @@ config DEFAULT_IOSCHED default "maple" if DEFAULT_MAPLE default "fiops" if DEFAULT_FIOPS default "sio" if DEFAULT_SIO + default "zen" if DEFAULT_ZEN default "noop" if DEFAULT_NOOP config MQ_IOSCHED_DEADLINE diff --git a/block/Makefile b/block/Makefile index 25320b87d941..96179eddd688 100644 --- a/block/Makefile +++ b/block/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o obj-$(CONFIG_IOSCHED_MAPLE) += maple-iosched.o obj-$(CONFIG_IOSCHED_FIOPS) += fiops-iosched.o obj-$(CONFIG_IOSCHED_SIO) += sio-iosched.o +obj-$(CONFIG_IOSCHED_ZEN) += zen-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o diff --git a/block/zen-iosched.c b/block/zen-iosched.c new file mode 100644 index 000000000000..80af3ef83243 --- /dev/null +++ b/block/zen-iosched.c @@ -0,0 +1,288 @@ +/* + * Zen IO scheduler + * Primarily based on Noop, deadline, and SIO IO schedulers. + * + * Copyright (C) 2012 Brandon Berhent <bbedward@gmail.com> + * + * FCFS, dispatches are back-inserted, deadlines ensure fairness. + * Should work best with devices where there is no travel delay. + */ +#include <linux/blkdev.h> +#include <linux/elevator.h> +#include <linux/bio.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/init.h> + +enum zen_data_dir { ASYNC, SYNC }; + +static const int sync_expire = HZ / 2; /* max time before a sync is submitted. */ +static const int async_expire = 5 * HZ; /* ditto for async, these limits are SOFT! */ +static const int fifo_batch = 1; + +struct zen_data { + /* Runtime Data */ + /* Requests are only present on fifo_list */ + struct list_head fifo_list[2]; + + unsigned int batching; /* number of sequential requests made */ + + /* tunables */ + int fifo_expire[2]; + int fifo_batch; +}; + +static inline struct zen_data * +zen_get_data(struct request_queue *q) { + return q->elevator->elevator_data; +} + +static void zen_dispatch(struct zen_data *, struct request *); + +static void +zen_merged_requests(struct request_queue *q, struct request *req, + struct request *next) +{ + /* + * if next expires before rq, assign its expire time to arq + * and move into next position (next will be deleted) in fifo + */ + if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { + if (time_before((unsigned long)next->fifo_time, (unsigned long)req->fifo_time)) { + list_move(&req->queuelist, &next->queuelist); + req->fifo_time = next->fifo_time; + } + } + + /* next request is gone */ + rq_fifo_clear(next); +} + +static void zen_add_request(struct request_queue *q, struct request *rq) +{ + struct zen_data *zdata = zen_get_data(q); + const int sync = rq_is_sync(rq); + + if (zdata->fifo_expire[sync]) { + rq->fifo_time = jiffies + zdata->fifo_expire[sync]; + list_add_tail(&rq->queuelist, &zdata->fifo_list[sync]); + } +} + +static void zen_dispatch(struct zen_data *zdata, struct request *rq) +{ + /* Remove request from list and dispatch it */ + rq_fifo_clear(rq); + elv_dispatch_add_tail(rq->q, rq); + + /* Increment # of sequential requests */ + zdata->batching++; +} + +/* + * get the first expired request in direction ddir + */ +static struct request * +zen_expired_request(struct zen_data *zdata, int ddir) +{ + struct request *rq; + + if (list_empty(&zdata->fifo_list[ddir])) + return NULL; + + rq = rq_entry_fifo(zdata->fifo_list[ddir].next); + if (time_after(jiffies, (unsigned long)rq->fifo_time)) + return rq; + + return NULL; +} + +/* + * zen_check_fifo returns 0 if there are no expired requests on the fifo, + * otherwise it returns the next expired request + */ +static struct request * +zen_check_fifo(struct zen_data *zdata) +{ + struct request *rq_sync = zen_expired_request(zdata, SYNC); + struct request *rq_async = zen_expired_request(zdata, ASYNC); + + if (rq_async && rq_sync) { + if (time_after((unsigned long)rq_async->fifo_time, (unsigned long)rq_sync->fifo_time)) + return rq_sync; + } else if (rq_sync) { + return rq_sync; + } else if (rq_async) { + return rq_async; + } + + return 0; +} + +static struct request * +zen_choose_request(struct zen_data *zdata) +{ + /* + * Retrieve request from available fifo list. + * Synchronous requests have priority over asynchronous. + */ + if (!list_empty(&zdata->fifo_list[SYNC])) + return rq_entry_fifo(zdata->fifo_list[SYNC].next); + if (!list_empty(&zdata->fifo_list[ASYNC])) + return rq_entry_fifo(zdata->fifo_list[ASYNC].next); + + return NULL; +} + +static int zen_dispatch_requests(struct request_queue *q, int force) +{ + struct zen_data *zdata = zen_get_data(q); + struct request *rq = NULL; + + /* Check for and issue expired requests */ + if (zdata->batching > zdata->fifo_batch) { + zdata->batching = 0; + rq = zen_check_fifo(zdata); + } + + if (!rq) { + rq = zen_choose_request(zdata); + if (!rq) + return 0; + } + + zen_dispatch(zdata, rq); + + return 1; +} + +static int zen_init_queue(struct request_queue *q, struct elevator_type *e) +{ + struct zen_data *zdata; + struct elevator_queue *eq; + + eq = elevator_alloc(q, e); + if (!eq) + return -ENOMEM; + + zdata = kmalloc_node(sizeof(*zdata), GFP_KERNEL, q->node); + if (!zdata) { + kobject_put(&eq->kobj); + return -ENOMEM; + } + eq->elevator_data = zdata; + + INIT_LIST_HEAD(&zdata->fifo_list[SYNC]); + INIT_LIST_HEAD(&zdata->fifo_list[ASYNC]); + zdata->fifo_expire[SYNC] = sync_expire; + zdata->fifo_expire[ASYNC] = async_expire; + zdata->fifo_batch = fifo_batch; + + spin_lock_irq(q->queue_lock); + q->elevator = eq; + spin_unlock_irq(q->queue_lock); + return 0; +} + +static void zen_exit_queue(struct elevator_queue *e) +{ + struct zen_data *zdata = e->elevator_data; + + BUG_ON(!list_empty(&zdata->fifo_list[SYNC])); + BUG_ON(!list_empty(&zdata->fifo_list[ASYNC])); + kfree(zdata); +} + +/* Sysfs */ +static ssize_t +zen_var_show(int var, char *page) +{ + return sprintf(page, "%d\n", var); +} + +static ssize_t +zen_var_store(int *var, const char *page, size_t count) +{ + *var = simple_strtol(page, NULL, 10); + return count; +} + +#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ +{ \ + struct zen_data *zdata = e->elevator_data; \ + int __data = __VAR; \ + if (__CONV) \ + __data = jiffies_to_msecs(__data); \ + return zen_var_show(__data, (page)); \ +} +SHOW_FUNCTION(zen_sync_expire_show, zdata->fifo_expire[SYNC], 1); +SHOW_FUNCTION(zen_async_expire_show, zdata->fifo_expire[ASYNC], 1); +SHOW_FUNCTION(zen_fifo_batch_show, zdata->fifo_batch, 0); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ +{ \ + struct zen_data *zdata = e->elevator_data; \ + int __data; \ + int ret = zen_var_store(&__data, (page), count); \ + if (__data < (MIN)) \ + __data = (MIN); \ + else if (__data > (MAX)) \ + __data = (MAX); \ + if (__CONV) \ + *(__PTR) = msecs_to_jiffies(__data); \ + else \ + *(__PTR) = __data; \ + return ret; \ +} +STORE_FUNCTION(zen_sync_expire_store, &zdata->fifo_expire[SYNC], 0, INT_MAX, 1); +STORE_FUNCTION(zen_async_expire_store, &zdata->fifo_expire[ASYNC], 0, INT_MAX, 1); +STORE_FUNCTION(zen_fifo_batch_store, &zdata->fifo_batch, 0, INT_MAX, 0); +#undef STORE_FUNCTION + +#define DD_ATTR(name) \ + __ATTR(name, S_IRUGO|S_IWUSR, zen_##name##_show, \ + zen_##name##_store) + +static struct elv_fs_entry zen_attrs[] = { + DD_ATTR(sync_expire), + DD_ATTR(async_expire), + DD_ATTR(fifo_batch), + __ATTR_NULL +}; + +static struct elevator_type iosched_zen = { + .ops.sq = { + .elevator_merge_req_fn = zen_merged_requests, + .elevator_dispatch_fn = zen_dispatch_requests, + .elevator_add_req_fn = zen_add_request, + .elevator_former_req_fn = elv_rb_former_request, + .elevator_latter_req_fn = elv_rb_latter_request, + .elevator_init_fn = zen_init_queue, + .elevator_exit_fn = zen_exit_queue, + }, + .elevator_attrs = zen_attrs, + .elevator_name = "zen", + .elevator_owner = THIS_MODULE, +}; + +static int __init zen_init(void) +{ + return elv_register(&iosched_zen); +} + +static void __exit zen_exit(void) +{ + elv_unregister(&iosched_zen); +} + +module_init(zen_init); +module_exit(zen_exit); + + +MODULE_AUTHOR("Brandon Berhent"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Zen IO scheduler"); +MODULE_VERSION("1.0"); From 694d7c7123be12613cccd5ddf22e1c7e4f0fe577 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Thu, 9 Apr 2020 00:59:33 +0300 Subject: [PATCH 288/452] block: Add Anxiety I/O scheduler Signed-off-by: Denis Efremov <efremov@linux.com> --- block/Kconfig.iosched | 12 ++ block/Makefile | 1 + block/anxiety-iosched.c | 255 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 268 insertions(+) create mode 100644 block/anxiety-iosched.c diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index ff662fa22fe1..57b77ecca2a5 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -69,6 +69,14 @@ config IOSCHED_ZEN FCFS, dispatches are back-inserted, deadlines ensure fairness. Should work best with devices where there is no travel delay. +config IOSCHED_ANXIETY + tristate "Anxiety I/O scheduler" + default n + ---help--- + The Anxiety I/O scheduler prioritizes latency over everything + else. When a request comes in, it will use a lighweight + selection algorithm to swiftly process the current pending task. + choice prompt "Default I/O scheduler" @@ -95,6 +103,9 @@ choice config DEFAULT_ZEN bool "ZEN" if IOSCHED_ZEN=y + config DEFAULT_ANXIETY + bool "Anxiety" if IOSCHED_ANXIETY=y + config DEFAULT_NOOP bool "No-op" @@ -108,6 +119,7 @@ config DEFAULT_IOSCHED default "fiops" if DEFAULT_FIOPS default "sio" if DEFAULT_SIO default "zen" if DEFAULT_ZEN + default "anxiety" if DEFAULT_ANXIETY default "noop" if DEFAULT_NOOP config MQ_IOSCHED_DEADLINE diff --git a/block/Makefile b/block/Makefile index 96179eddd688..f9009a1aed6e 100644 --- a/block/Makefile +++ b/block/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_IOSCHED_MAPLE) += maple-iosched.o obj-$(CONFIG_IOSCHED_FIOPS) += fiops-iosched.o obj-$(CONFIG_IOSCHED_SIO) += sio-iosched.o obj-$(CONFIG_IOSCHED_ZEN) += zen-iosched.o +obj-$(CONFIG_IOSCHED_ANXIETY) += anxiety-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o diff --git a/block/anxiety-iosched.c b/block/anxiety-iosched.c new file mode 100644 index 000000000000..c7d818eff167 --- /dev/null +++ b/block/anxiety-iosched.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Anxiety I/O Scheduler + * + * Copyright (c) 2020, Tyler Nijmeh <tylernij@gmail.com> + */ + +#include <linux/blkdev.h> +#include <linux/elevator.h> +#include <linux/bio.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/init.h> + +/* Batch this many synchronous requests at a time */ +#define DEFAULT_SYNC_RATIO (8) + +/* Run each batch this many times*/ +#define DEFAULT_BATCH_COUNT (4) + +struct anxiety_data { + struct list_head sync_queue; + struct list_head async_queue; + + /* Tunables */ + uint8_t sync_ratio; + uint8_t batch_count; +}; + +static inline struct request *anxiety_next_entry(struct list_head *queue) +{ + return list_first_entry(queue, struct request, + queuelist); +} + +static void anxiety_merged_requests(struct request_queue *q, struct request *rq, + struct request *next) +{ + list_del_init(&next->queuelist); +} + +static inline int __anxiety_dispatch(struct request_queue *q, + struct request *rq) +{ + if (unlikely(!rq)) + return -EINVAL; + + list_del_init(&rq->queuelist); + elv_dispatch_add_tail(q, rq); + + return 0; +} + +static uint16_t anxiety_dispatch_batch(struct request_queue *q) +{ + struct anxiety_data *adata = q->elevator->elevator_data; + uint8_t i, j; + uint16_t dispatched = 0; + int ret; + + /* Perform each batch adata->batch_count many times */ + for (i = 0; i < adata->batch_count; i++) { + /* Batch sync requests according to tunables */ + for (j = 0; j < adata->sync_ratio; j++) { + if (list_empty(&adata->sync_queue)) + break; + + ret = __anxiety_dispatch(q, + anxiety_next_entry(&adata->sync_queue)); + + if (!ret) + dispatched++; + } + + /* Submit one async request after the sync batch to avoid starvation */ + if (!list_empty(&adata->async_queue)) { + ret = __anxiety_dispatch(q, + anxiety_next_entry(&adata->async_queue)); + + if (!ret) + dispatched++; + } + + /* If we didn't have anything to dispatch; don't batch again */ + if (!dispatched) + break; + } + + return dispatched; +} + +static uint16_t anxiety_dispatch_drain(struct request_queue *q) +{ + struct anxiety_data *adata = q->elevator->elevator_data; + uint16_t dispatched = 0; + int ret; + + /* + * Drain out all of the synchronous requests first, + * then drain the asynchronous requests. + */ + while (!list_empty(&adata->sync_queue)) { + ret = __anxiety_dispatch(q, + anxiety_next_entry(&adata->sync_queue)); + + if (!ret) + dispatched++; + } + + while (!list_empty(&adata->async_queue)) { + ret = __anxiety_dispatch(q, + anxiety_next_entry(&adata->async_queue)); + + if (!ret) + dispatched++; + } + + return dispatched; +} + +static int anxiety_dispatch(struct request_queue *q, int force) +{ + /* + * When requested by the elevator, a full queue drain can be + * performed in one scheduler dispatch. + */ + if (unlikely(force)) + return anxiety_dispatch_drain(q); + + return anxiety_dispatch_batch(q); +} + +static void anxiety_add_request(struct request_queue *q, struct request *rq) +{ + struct anxiety_data *adata = q->elevator->elevator_data; + + list_add_tail(&rq->queuelist, + rq_is_sync(rq) ? &adata->sync_queue : &adata->async_queue); +} + +static int anxiety_init_queue(struct request_queue *q, + struct elevator_type *elv) +{ + struct anxiety_data *adata; + struct elevator_queue *eq = elevator_alloc(q, elv); + + if (!eq) + return -ENOMEM; + + /* Allocate the data */ + adata = kmalloc_node(sizeof(*adata), GFP_KERNEL, q->node); + if (!adata) { + kobject_put(&eq->kobj); + return -ENOMEM; + } + + /* Set the elevator data */ + eq->elevator_data = adata; + + /* Initialize */ + INIT_LIST_HEAD(&adata->sync_queue); + INIT_LIST_HEAD(&adata->async_queue); + adata->sync_ratio = DEFAULT_SYNC_RATIO; + adata->batch_count = DEFAULT_BATCH_COUNT; + + /* Set elevator to Anxiety */ + spin_lock_irq(q->queue_lock); + q->elevator = eq; + spin_unlock_irq(q->queue_lock); + + return 0; +} + +/* Sysfs access */ +static ssize_t anxiety_sync_ratio_show(struct elevator_queue *e, char *page) +{ + struct anxiety_data *adata = e->elevator_data; + + return snprintf(page, PAGE_SIZE, "%u\n", adata->sync_ratio); +} + +static ssize_t anxiety_sync_ratio_store(struct elevator_queue *e, + const char *page, size_t count) +{ + struct anxiety_data *adata = e->elevator_data; + int ret; + + ret = kstrtou8(page, 0, &adata->sync_ratio); + if (ret < 0) + return ret; + + return count; +} + +static ssize_t anxiety_batch_count_show(struct elevator_queue *e, char *page) +{ + struct anxiety_data *adata = e->elevator_data; + + return snprintf(page, PAGE_SIZE, "%u\n", adata->batch_count); +} + +static ssize_t anxiety_batch_count_store(struct elevator_queue *e, + const char *page, size_t count) +{ + struct anxiety_data *adata = e->elevator_data; + int ret; + + ret = kstrtou8(page, 0, &adata->batch_count); + if (ret < 0) + return ret; + + if (adata->batch_count < 1) + adata->batch_count = 1; + + return count; +} + +static struct elv_fs_entry anxiety_attrs[] = { + __ATTR(sync_ratio, 0644, anxiety_sync_ratio_show, + anxiety_sync_ratio_store), + __ATTR(batch_count, 0644, anxiety_batch_count_show, + anxiety_batch_count_store), + __ATTR_NULL +}; + +static struct elevator_type elevator_anxiety = { + .ops.sq = { + .elevator_merge_req_fn = anxiety_merged_requests, + .elevator_dispatch_fn = anxiety_dispatch, + .elevator_add_req_fn = anxiety_add_request, + .elevator_former_req_fn = elv_rb_former_request, + .elevator_latter_req_fn = elv_rb_latter_request, + .elevator_init_fn = anxiety_init_queue, + }, + .elevator_name = "anxiety", + .elevator_attrs = anxiety_attrs, + .elevator_owner = THIS_MODULE, +}; + +static int __init anxiety_init(void) +{ + return elv_register(&elevator_anxiety); +} + +static void __exit anxiety_exit(void) +{ + elv_unregister(&elevator_anxiety); +} + +module_init(anxiety_init); +module_exit(anxiety_exit); + +MODULE_AUTHOR("Tyler Nijmeh"); +MODULE_LICENSE("GPLv3"); +MODULE_DESCRIPTION("Anxiety I/O scheduler"); From 3e2fc013f59104d176695ce0bb17330f48abcbe8 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 26 Oct 2020 17:42:44 +0300 Subject: [PATCH 289/452] fs: add reiser4 filesystem Signed-off-by: Denis Efremov <efremov@linux.com> --- Documentation/filesystems/reiser4.txt | 75 + Documentation/process/changes.rst | 12 + fs/Kconfig | 1 + fs/Makefile | 1 + fs/fs-writeback.c | 105 +- fs/read_write.c | 18 +- fs/reiser4/Kconfig | 36 + fs/reiser4/Makefile | 105 + fs/reiser4/README | 128 + fs/reiser4/as_ops.c | 348 ++ fs/reiser4/block_alloc.c | 1176 +++++ fs/reiser4/block_alloc.h | 177 + fs/reiser4/blocknrlist.c | 336 ++ fs/reiser4/blocknrset.c | 399 ++ fs/reiser4/carry.c | 1408 ++++++ fs/reiser4/carry.h | 445 ++ fs/reiser4/carry_ops.c | 2136 ++++++++++ fs/reiser4/carry_ops.h | 43 + fs/reiser4/checksum.c | 33 + fs/reiser4/checksum.h | 39 + fs/reiser4/context.c | 288 ++ fs/reiser4/context.h | 233 + fs/reiser4/coord.c | 928 ++++ fs/reiser4/coord.h | 399 ++ fs/reiser4/debug.c | 309 ++ fs/reiser4/debug.h | 353 ++ fs/reiser4/dformat.h | 73 + fs/reiser4/discard.c | 179 + fs/reiser4/discard.h | 42 + fs/reiser4/dscale.c | 192 + fs/reiser4/dscale.h | 28 + fs/reiser4/entd.c | 361 ++ fs/reiser4/entd.h | 90 + fs/reiser4/eottl.c | 510 +++ fs/reiser4/estimate.c | 129 + fs/reiser4/export_ops.c | 325 ++ fs/reiser4/flush.c | 3522 +++++++++++++++ fs/reiser4/flush.h | 290 ++ fs/reiser4/flush_queue.c | 677 +++ fs/reiser4/forward.h | 259 ++ fs/reiser4/fsdata.c | 801 ++++ fs/reiser4/fsdata.h | 203 + fs/reiser4/init_super.c | 806 ++++ fs/reiser4/inode.c | 711 +++ fs/reiser4/inode.h | 506 +++ fs/reiser4/ioctl.h | 41 + fs/reiser4/jnode.c | 1905 +++++++++ fs/reiser4/jnode.h | 704 +++ fs/reiser4/kassign.c | 677 +++ fs/reiser4/kassign.h | 111 + fs/reiser4/key.c | 138 + fs/reiser4/key.h | 392 ++ fs/reiser4/ktxnmgrd.c | 215 + fs/reiser4/ktxnmgrd.h | 52 + fs/reiser4/lock.c | 1237 ++++++ fs/reiser4/lock.h | 250 ++ fs/reiser4/oid.c | 141 + fs/reiser4/page_cache.c | 691 +++ fs/reiser4/page_cache.h | 64 + fs/reiser4/plugin/Makefile | 26 + fs/reiser4/plugin/cluster.c | 72 + fs/reiser4/plugin/cluster.h | 410 ++ fs/reiser4/plugin/compress/Makefile | 5 + fs/reiser4/plugin/compress/compress.c | 521 +++ fs/reiser4/plugin/compress/compress.h | 44 + fs/reiser4/plugin/compress/compress_mode.c | 162 + fs/reiser4/plugin/compress/lzoconf.h | 216 + fs/reiser4/plugin/compress/minilzo.c | 1967 +++++++++ fs/reiser4/plugin/compress/minilzo.h | 70 + fs/reiser4/plugin/crypto/cipher.c | 37 + fs/reiser4/plugin/crypto/cipher.h | 55 + fs/reiser4/plugin/crypto/digest.c | 58 + fs/reiser4/plugin/dir/Makefile | 5 + fs/reiser4/plugin/dir/dir.h | 36 + fs/reiser4/plugin/dir/hashed_dir.c | 81 + fs/reiser4/plugin/dir/seekable_dir.c | 46 + fs/reiser4/plugin/dir_plugin_common.c | 865 ++++ fs/reiser4/plugin/disk_format/Makefile | 5 + fs/reiser4/plugin/disk_format/disk_format.c | 38 + fs/reiser4/plugin/disk_format/disk_format.h | 27 + fs/reiser4/plugin/disk_format/disk_format40.c | 664 +++ fs/reiser4/plugin/disk_format/disk_format40.h | 111 + fs/reiser4/plugin/fibration.c | 175 + fs/reiser4/plugin/fibration.h | 37 + fs/reiser4/plugin/file/Makefile | 7 + fs/reiser4/plugin/file/cryptcompress.c | 3797 +++++++++++++++++ fs/reiser4/plugin/file/cryptcompress.h | 619 +++ fs/reiser4/plugin/file/file.c | 2796 ++++++++++++ fs/reiser4/plugin/file/file.h | 322 ++ fs/reiser4/plugin/file/file_conversion.c | 755 ++++ fs/reiser4/plugin/file/invert.c | 493 +++ fs/reiser4/plugin/file/symfile.c | 87 + fs/reiser4/plugin/file/symlink.c | 95 + fs/reiser4/plugin/file/tail_conversion.c | 763 ++++ fs/reiser4/plugin/file_ops.c | 119 + fs/reiser4/plugin/file_ops_readdir.c | 658 +++ fs/reiser4/plugin/file_plugin_common.c | 1004 +++++ fs/reiser4/plugin/hash.c | 352 ++ fs/reiser4/plugin/inode_ops.c | 891 ++++ fs/reiser4/plugin/inode_ops_rename.c | 958 +++++ fs/reiser4/plugin/item/Makefile | 18 + fs/reiser4/plugin/item/acl.h | 66 + fs/reiser4/plugin/item/blackbox.c | 142 + fs/reiser4/plugin/item/blackbox.h | 33 + fs/reiser4/plugin/item/cde.c | 1004 +++++ fs/reiser4/plugin/item/cde.h | 87 + fs/reiser4/plugin/item/ctail.c | 1769 ++++++++ fs/reiser4/plugin/item/ctail.h | 102 + fs/reiser4/plugin/item/extent.c | 197 + fs/reiser4/plugin/item/extent.h | 231 + fs/reiser4/plugin/item/extent_file_ops.c | 1434 +++++++ fs/reiser4/plugin/item/extent_flush_ops.c | 686 +++ fs/reiser4/plugin/item/extent_item_ops.c | 887 ++++ fs/reiser4/plugin/item/internal.c | 404 ++ fs/reiser4/plugin/item/internal.h | 57 + fs/reiser4/plugin/item/item.c | 719 ++++ fs/reiser4/plugin/item/item.h | 398 ++ fs/reiser4/plugin/item/sde.c | 186 + fs/reiser4/plugin/item/sde.h | 66 + fs/reiser4/plugin/item/static_stat.c | 1114 +++++ fs/reiser4/plugin/item/static_stat.h | 224 + fs/reiser4/plugin/item/tail.c | 810 ++++ fs/reiser4/plugin/item/tail.h | 59 + fs/reiser4/plugin/node/Makefile | 6 + fs/reiser4/plugin/node/node.c | 170 + fs/reiser4/plugin/node/node.h | 275 ++ fs/reiser4/plugin/node/node40.c | 3073 +++++++++++++ fs/reiser4/plugin/node/node40.h | 130 + fs/reiser4/plugin/node/node41.c | 137 + fs/reiser4/plugin/node/node41.h | 50 + fs/reiser4/plugin/object.c | 553 +++ fs/reiser4/plugin/object.h | 117 + fs/reiser4/plugin/plugin.c | 569 +++ fs/reiser4/plugin/plugin.h | 999 +++++ fs/reiser4/plugin/plugin_header.h | 150 + fs/reiser4/plugin/plugin_set.c | 387 ++ fs/reiser4/plugin/plugin_set.h | 78 + fs/reiser4/plugin/regular.c | 44 + fs/reiser4/plugin/security/Makefile | 4 + fs/reiser4/plugin/security/perm.c | 33 + fs/reiser4/plugin/security/perm.h | 38 + fs/reiser4/plugin/space/Makefile | 4 + fs/reiser4/plugin/space/bitmap.c | 1609 +++++++ fs/reiser4/plugin/space/bitmap.h | 47 + fs/reiser4/plugin/space/space_allocator.h | 80 + fs/reiser4/plugin/tail_policy.c | 113 + fs/reiser4/plugin/txmod.c | 1238 ++++++ fs/reiser4/pool.c | 231 + fs/reiser4/pool.h | 57 + fs/reiser4/readahead.c | 140 + fs/reiser4/readahead.h | 42 + fs/reiser4/reiser4.h | 260 ++ fs/reiser4/safe_link.c | 354 ++ fs/reiser4/safe_link.h | 29 + fs/reiser4/seal.c | 219 + fs/reiser4/seal.h | 49 + fs/reiser4/search.c | 1612 +++++++ fs/reiser4/status_flags.c | 180 + fs/reiser4/status_flags.h | 47 + fs/reiser4/super.c | 306 ++ fs/reiser4/super.h | 472 ++ fs/reiser4/super_ops.c | 783 ++++ fs/reiser4/tap.c | 376 ++ fs/reiser4/tap.h | 70 + fs/reiser4/tree.c | 1884 ++++++++ fs/reiser4/tree.h | 577 +++ fs/reiser4/tree_mod.c | 387 ++ fs/reiser4/tree_mod.h | 29 + fs/reiser4/tree_walk.c | 927 ++++ fs/reiser4/tree_walk.h | 125 + fs/reiser4/txnmgr.c | 3163 ++++++++++++++ fs/reiser4/txnmgr.h | 755 ++++ fs/reiser4/type_safe_hash.h | 320 ++ fs/reiser4/vfs_ops.c | 260 ++ fs/reiser4/vfs_ops.h | 60 + fs/reiser4/wander.c | 1757 ++++++++ fs/reiser4/wander.h | 135 + fs/reiser4/writeout.h | 21 + fs/reiser4/znode.c | 1027 +++++ fs/reiser4/znode.h | 435 ++ include/linux/fs.h | 21 + include/linux/mm.h | 1 + include/linux/sched.h | 1 + include/linux/writeback.h | 26 + mm/filemap.c | 1 + mm/page-writeback.c | 29 + mm/vmscan.c | 6 + 187 files changed, 83026 insertions(+), 47 deletions(-) create mode 100644 Documentation/filesystems/reiser4.txt create mode 100644 fs/reiser4/Kconfig create mode 100644 fs/reiser4/Makefile create mode 100644 fs/reiser4/README create mode 100644 fs/reiser4/as_ops.c create mode 100644 fs/reiser4/block_alloc.c create mode 100644 fs/reiser4/block_alloc.h create mode 100644 fs/reiser4/blocknrlist.c create mode 100644 fs/reiser4/blocknrset.c create mode 100644 fs/reiser4/carry.c create mode 100644 fs/reiser4/carry.h create mode 100644 fs/reiser4/carry_ops.c create mode 100644 fs/reiser4/carry_ops.h create mode 100644 fs/reiser4/checksum.c create mode 100644 fs/reiser4/checksum.h create mode 100644 fs/reiser4/context.c create mode 100644 fs/reiser4/context.h create mode 100644 fs/reiser4/coord.c create mode 100644 fs/reiser4/coord.h create mode 100644 fs/reiser4/debug.c create mode 100644 fs/reiser4/debug.h create mode 100644 fs/reiser4/dformat.h create mode 100644 fs/reiser4/discard.c create mode 100644 fs/reiser4/discard.h create mode 100644 fs/reiser4/dscale.c create mode 100644 fs/reiser4/dscale.h create mode 100644 fs/reiser4/entd.c create mode 100644 fs/reiser4/entd.h create mode 100644 fs/reiser4/eottl.c create mode 100644 fs/reiser4/estimate.c create mode 100644 fs/reiser4/export_ops.c create mode 100644 fs/reiser4/flush.c create mode 100644 fs/reiser4/flush.h create mode 100644 fs/reiser4/flush_queue.c create mode 100644 fs/reiser4/forward.h create mode 100644 fs/reiser4/fsdata.c create mode 100644 fs/reiser4/fsdata.h create mode 100644 fs/reiser4/init_super.c create mode 100644 fs/reiser4/inode.c create mode 100644 fs/reiser4/inode.h create mode 100644 fs/reiser4/ioctl.h create mode 100644 fs/reiser4/jnode.c create mode 100644 fs/reiser4/jnode.h create mode 100644 fs/reiser4/kassign.c create mode 100644 fs/reiser4/kassign.h create mode 100644 fs/reiser4/key.c create mode 100644 fs/reiser4/key.h create mode 100644 fs/reiser4/ktxnmgrd.c create mode 100644 fs/reiser4/ktxnmgrd.h create mode 100644 fs/reiser4/lock.c create mode 100644 fs/reiser4/lock.h create mode 100644 fs/reiser4/oid.c create mode 100644 fs/reiser4/page_cache.c create mode 100644 fs/reiser4/page_cache.h create mode 100644 fs/reiser4/plugin/Makefile create mode 100644 fs/reiser4/plugin/cluster.c create mode 100644 fs/reiser4/plugin/cluster.h create mode 100644 fs/reiser4/plugin/compress/Makefile create mode 100644 fs/reiser4/plugin/compress/compress.c create mode 100644 fs/reiser4/plugin/compress/compress.h create mode 100644 fs/reiser4/plugin/compress/compress_mode.c create mode 100644 fs/reiser4/plugin/compress/lzoconf.h create mode 100644 fs/reiser4/plugin/compress/minilzo.c create mode 100644 fs/reiser4/plugin/compress/minilzo.h create mode 100644 fs/reiser4/plugin/crypto/cipher.c create mode 100644 fs/reiser4/plugin/crypto/cipher.h create mode 100644 fs/reiser4/plugin/crypto/digest.c create mode 100644 fs/reiser4/plugin/dir/Makefile create mode 100644 fs/reiser4/plugin/dir/dir.h create mode 100644 fs/reiser4/plugin/dir/hashed_dir.c create mode 100644 fs/reiser4/plugin/dir/seekable_dir.c create mode 100644 fs/reiser4/plugin/dir_plugin_common.c create mode 100644 fs/reiser4/plugin/disk_format/Makefile create mode 100644 fs/reiser4/plugin/disk_format/disk_format.c create mode 100644 fs/reiser4/plugin/disk_format/disk_format.h create mode 100644 fs/reiser4/plugin/disk_format/disk_format40.c create mode 100644 fs/reiser4/plugin/disk_format/disk_format40.h create mode 100644 fs/reiser4/plugin/fibration.c create mode 100644 fs/reiser4/plugin/fibration.h create mode 100644 fs/reiser4/plugin/file/Makefile create mode 100644 fs/reiser4/plugin/file/cryptcompress.c create mode 100644 fs/reiser4/plugin/file/cryptcompress.h create mode 100644 fs/reiser4/plugin/file/file.c create mode 100644 fs/reiser4/plugin/file/file.h create mode 100644 fs/reiser4/plugin/file/file_conversion.c create mode 100644 fs/reiser4/plugin/file/invert.c create mode 100644 fs/reiser4/plugin/file/symfile.c create mode 100644 fs/reiser4/plugin/file/symlink.c create mode 100644 fs/reiser4/plugin/file/tail_conversion.c create mode 100644 fs/reiser4/plugin/file_ops.c create mode 100644 fs/reiser4/plugin/file_ops_readdir.c create mode 100644 fs/reiser4/plugin/file_plugin_common.c create mode 100644 fs/reiser4/plugin/hash.c create mode 100644 fs/reiser4/plugin/inode_ops.c create mode 100644 fs/reiser4/plugin/inode_ops_rename.c create mode 100644 fs/reiser4/plugin/item/Makefile create mode 100644 fs/reiser4/plugin/item/acl.h create mode 100644 fs/reiser4/plugin/item/blackbox.c create mode 100644 fs/reiser4/plugin/item/blackbox.h create mode 100644 fs/reiser4/plugin/item/cde.c create mode 100644 fs/reiser4/plugin/item/cde.h create mode 100644 fs/reiser4/plugin/item/ctail.c create mode 100644 fs/reiser4/plugin/item/ctail.h create mode 100644 fs/reiser4/plugin/item/extent.c create mode 100644 fs/reiser4/plugin/item/extent.h create mode 100644 fs/reiser4/plugin/item/extent_file_ops.c create mode 100644 fs/reiser4/plugin/item/extent_flush_ops.c create mode 100644 fs/reiser4/plugin/item/extent_item_ops.c create mode 100644 fs/reiser4/plugin/item/internal.c create mode 100644 fs/reiser4/plugin/item/internal.h create mode 100644 fs/reiser4/plugin/item/item.c create mode 100644 fs/reiser4/plugin/item/item.h create mode 100644 fs/reiser4/plugin/item/sde.c create mode 100644 fs/reiser4/plugin/item/sde.h create mode 100644 fs/reiser4/plugin/item/static_stat.c create mode 100644 fs/reiser4/plugin/item/static_stat.h create mode 100644 fs/reiser4/plugin/item/tail.c create mode 100644 fs/reiser4/plugin/item/tail.h create mode 100644 fs/reiser4/plugin/node/Makefile create mode 100644 fs/reiser4/plugin/node/node.c create mode 100644 fs/reiser4/plugin/node/node.h create mode 100644 fs/reiser4/plugin/node/node40.c create mode 100644 fs/reiser4/plugin/node/node40.h create mode 100644 fs/reiser4/plugin/node/node41.c create mode 100644 fs/reiser4/plugin/node/node41.h create mode 100644 fs/reiser4/plugin/object.c create mode 100644 fs/reiser4/plugin/object.h create mode 100644 fs/reiser4/plugin/plugin.c create mode 100644 fs/reiser4/plugin/plugin.h create mode 100644 fs/reiser4/plugin/plugin_header.h create mode 100644 fs/reiser4/plugin/plugin_set.c create mode 100644 fs/reiser4/plugin/plugin_set.h create mode 100644 fs/reiser4/plugin/regular.c create mode 100644 fs/reiser4/plugin/security/Makefile create mode 100644 fs/reiser4/plugin/security/perm.c create mode 100644 fs/reiser4/plugin/security/perm.h create mode 100644 fs/reiser4/plugin/space/Makefile create mode 100644 fs/reiser4/plugin/space/bitmap.c create mode 100644 fs/reiser4/plugin/space/bitmap.h create mode 100644 fs/reiser4/plugin/space/space_allocator.h create mode 100644 fs/reiser4/plugin/tail_policy.c create mode 100644 fs/reiser4/plugin/txmod.c create mode 100644 fs/reiser4/pool.c create mode 100644 fs/reiser4/pool.h create mode 100644 fs/reiser4/readahead.c create mode 100644 fs/reiser4/readahead.h create mode 100644 fs/reiser4/reiser4.h create mode 100644 fs/reiser4/safe_link.c create mode 100644 fs/reiser4/safe_link.h create mode 100644 fs/reiser4/seal.c create mode 100644 fs/reiser4/seal.h create mode 100644 fs/reiser4/search.c create mode 100644 fs/reiser4/status_flags.c create mode 100644 fs/reiser4/status_flags.h create mode 100644 fs/reiser4/super.c create mode 100644 fs/reiser4/super.h create mode 100644 fs/reiser4/super_ops.c create mode 100644 fs/reiser4/tap.c create mode 100644 fs/reiser4/tap.h create mode 100644 fs/reiser4/tree.c create mode 100644 fs/reiser4/tree.h create mode 100644 fs/reiser4/tree_mod.c create mode 100644 fs/reiser4/tree_mod.h create mode 100644 fs/reiser4/tree_walk.c create mode 100644 fs/reiser4/tree_walk.h create mode 100644 fs/reiser4/txnmgr.c create mode 100644 fs/reiser4/txnmgr.h create mode 100644 fs/reiser4/type_safe_hash.h create mode 100644 fs/reiser4/vfs_ops.c create mode 100644 fs/reiser4/vfs_ops.h create mode 100644 fs/reiser4/wander.c create mode 100644 fs/reiser4/wander.h create mode 100644 fs/reiser4/writeout.h create mode 100644 fs/reiser4/znode.c create mode 100644 fs/reiser4/znode.h diff --git a/Documentation/filesystems/reiser4.txt b/Documentation/filesystems/reiser4.txt new file mode 100644 index 000000000000..8e07c9e24aa0 --- /dev/null +++ b/Documentation/filesystems/reiser4.txt @@ -0,0 +1,75 @@ +Reiser4 filesystem +================== +Reiser4 is a file system based on dancing tree algorithms, and is +described at http://www.namesys.com + + +References +========== +web page http://namesys.com/v4/v4.html +source code ftp://ftp.namesys.com/pub/reiser4-for-2.6/ +userland tools ftp://ftp.namesys.com/pub/reiser4progs/ +install page http://www.namesys.com/install_v4.html + +Compile options +=============== +Enable reiser4 debug mode + This checks everything imaginable while reiser4 + runs + +Mount options +============= +tmgr.atom_max_size=N + Atoms containing more than N blocks will be forced to commit. + N is decimal. + Default is nr_free_pagecache_pages() / 2 at mount time. + +tmgr.atom_max_age=N + Atoms older than N seconds will be forced to commit. N is decimal. + Default is 600. + +tmgr.atom_max_flushers=N + Limit of concurrent flushers for one atom. 0 means no limit. + Default is 0. + +tree.cbk_cache.nr_slots=N + Number of slots in the cbk cache. + +flush.relocate_threshold=N + If flush finds more than N adjacent dirty leaf-level blocks it + will force them to be relocated. + Default is 64. + +flush.relocate_distance=N + If flush finds can find a block allocation closer than at most + N from the preceder it will relocate to that position. + Default is 64. + +flush.scan_maxnodes=N + The maximum number of nodes to scan left on a level during + flush. + Default is 10000. + +optimal_io_size=N + Preferred IO size. This value is used to set st_blksize of + struct stat. + Default is 65536. + +bsdgroups + Turn on BSD-style gid assignment. + +32bittimes + By default file in reiser4 have 64 bit timestamps. Files + created when filesystem is mounted with 32bittimes mount + option will get 32 bit timestamps. + +mtflush + Turn off concurrent flushing. + +nopseudo + Disable pseudo files support. See + http://namesys.com/v4/pseudo.html for more about pseudo files. + +dont_load_bitmap + Don't load all bitmap blocks at mount time, it is useful for + machines with tiny RAM and large disks. diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst index 73fcdcd52b87..2591b7571b85 100644 --- a/Documentation/process/changes.rst +++ b/Documentation/process/changes.rst @@ -174,6 +174,13 @@ The reiserfsprogs package should be used for reiserfs-3.6.x versions of ``mkreiserfs``, ``resize_reiserfs``, ``debugreiserfs`` and ``reiserfsck``. These utils work on both i386 and alpha platforms. +Reiser4progs +------------ + +The reiser4progs package contains utilities for the reiser4 file system. +Detailed instructions are provided in the README file located at: +<https://github.com/edward6/reiser4progs>. + Xfsprogs -------- @@ -371,6 +378,11 @@ Reiserfsprogs - <http://www.kernel.org/pub/linux/utils/fs/reiserfs/> +Reiser4progs +------------ + +- <http://sourceforge.net/projects/reiser4/> + Xfsprogs -------- diff --git a/fs/Kconfig b/fs/Kconfig index b42d356344ab..b7b75d07be2c 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -42,6 +42,7 @@ config FS_MBCACHE default y if EXT4_FS=y default m if EXT2_FS_XATTR || EXT4_FS +source "fs/reiser4/Kconfig" source "fs/reiserfs/Kconfig" source "fs/jfs/Kconfig" diff --git a/fs/Makefile b/fs/Makefile index 08fe716e57c7..12c076b80834 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -67,6 +67,7 @@ obj-$(CONFIG_DLM) += dlm/ # Do not add any filesystems before this line obj-$(CONFIG_FSCACHE) += fscache/ obj-$(CONFIG_REISERFS_FS) += reiserfs/ +obj-$(CONFIG_REISER4_FS) += reiser4/ obj-$(CONFIG_EXT4_FS) += ext4/ # We place ext4 before ext2 so that clean ext3 root fs's do NOT mount using the # ext2 driver, which doesn't know about journalling! Explicitly request ext2 diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 8d3f57eb0ebe..be633c836b79 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -39,25 +39,6 @@ struct wb_completion { atomic_t cnt; }; -/* - * Passed into wb_writeback(), essentially a subset of writeback_control - */ -struct wb_writeback_work { - long nr_pages; - struct super_block *sb; - enum writeback_sync_modes sync_mode; - unsigned int tagged_writepages:1; - unsigned int for_kupdate:1; - unsigned int range_cyclic:1; - unsigned int for_background:1; - unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ - unsigned int auto_free:1; /* free on completion */ - enum wb_reason reason; /* why was writeback initiated? */ - - struct list_head list; /* pending work list */ - struct wb_completion *done; /* set if the caller waits */ -}; - /* * If one wants to wait for one or more wb_writeback_works, each work's * ->done should be set to a wb_completion defined using the following @@ -270,6 +251,7 @@ void __inode_attach_wb(struct inode *inode, struct page *page) if (unlikely(cmpxchg(&inode->i_wb, NULL, wb))) wb_put(wb); } +EXPORT_SYMBOL_GPL(__inode_attach_wb); /** * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it @@ -1531,20 +1513,12 @@ static long writeback_chunk_size(struct bdi_writeback *wb, * unlock and relock that for each inode it ends up doing * IO for. */ -static long writeback_sb_inodes(struct super_block *sb, - struct bdi_writeback *wb, - struct wb_writeback_work *work) +long generic_writeback_sb_inodes(struct super_block *sb, + struct bdi_writeback *wb, + struct writeback_control *wbc, + struct wb_writeback_work *work, + bool flush_all) { - struct writeback_control wbc = { - .sync_mode = work->sync_mode, - .tagged_writepages = work->tagged_writepages, - .for_kupdate = work->for_kupdate, - .for_background = work->for_background, - .for_sync = work->for_sync, - .range_cyclic = work->range_cyclic, - .range_start = 0, - .range_end = LLONG_MAX, - }; unsigned long start_time = jiffies; long write_chunk; long wrote = 0; /* count both pages and inodes */ @@ -1583,7 +1557,7 @@ static long writeback_sb_inodes(struct super_block *sb, spin_unlock(&inode->i_lock); continue; } - if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { + if ((inode->i_state & I_SYNC) && wbc->sync_mode != WB_SYNC_ALL) { /* * If this inode is locked for writeback and we are not * doing writeback-for-data-integrity, move it to @@ -1613,21 +1587,21 @@ static long writeback_sb_inodes(struct super_block *sb, continue; } inode->i_state |= I_SYNC; - wbc_attach_and_unlock_inode(&wbc, inode); + wbc_attach_and_unlock_inode(wbc, inode); write_chunk = writeback_chunk_size(wb, work); - wbc.nr_to_write = write_chunk; - wbc.pages_skipped = 0; + wbc->nr_to_write = write_chunk; + wbc->pages_skipped = 0; /* * We use I_SYNC to pin the inode in memory. While it is set * evict_inode() will wait so the inode cannot be freed. */ - __writeback_single_inode(inode, &wbc); + __writeback_single_inode(inode, wbc); - wbc_detach_inode(&wbc); - work->nr_pages -= write_chunk - wbc.nr_to_write; - wrote += write_chunk - wbc.nr_to_write; + wbc_detach_inode(wbc); + work->nr_pages -= write_chunk - wbc->nr_to_write; + wrote += write_chunk - wbc->nr_to_write; if (need_resched()) { /* @@ -1650,7 +1624,7 @@ static long writeback_sb_inodes(struct super_block *sb, spin_lock(&inode->i_lock); if (!(inode->i_state & I_DIRTY_ALL)) wrote++; - requeue_inode(inode, tmp_wb, &wbc); + requeue_inode(inode, tmp_wb, wbc); inode_sync_complete(inode); spin_unlock(&inode->i_lock); @@ -1664,7 +1638,7 @@ static long writeback_sb_inodes(struct super_block *sb, * background threshold and other termination conditions. */ if (wrote) { - if (time_is_before_jiffies(start_time + HZ / 10UL)) + if (!flush_all && time_is_before_jiffies(start_time + HZ / 10UL)) break; if (work->nr_pages <= 0) break; @@ -1672,6 +1646,26 @@ static long writeback_sb_inodes(struct super_block *sb, } return wrote; } +EXPORT_SYMBOL(generic_writeback_sb_inodes); + +long writeback_sb_inodes(struct super_block *sb, + struct bdi_writeback *wb, + struct wb_writeback_work *work) +{ + struct writeback_control wbc = { + .sync_mode = work->sync_mode, + .tagged_writepages = work->tagged_writepages, + .for_kupdate = work->for_kupdate, + .for_background = work->for_background, + .range_cyclic = work->range_cyclic, + .range_start = 0, + .range_end = LLONG_MAX, + }; + if (sb->s_op->writeback_inodes) + return sb->s_op->writeback_inodes(sb, wb, &wbc, work, false); + else + return generic_writeback_sb_inodes(sb, wb, &wbc, work, false); +} static long __writeback_inodes_wb(struct bdi_writeback *wb, struct wb_writeback_work *work) @@ -1942,6 +1936,31 @@ static long wb_do_writeback(struct bdi_writeback *wb) return wrote; } +/* + * This function is for file systems which have their + * own means of periodical write-out of old data. + * NOTE: inode_lock should be hold. + * + * Skip a portion of b_io inodes which belong to @sb + * and go sequentially in reverse order. + */ +void writeback_skip_sb_inodes(struct super_block *sb, + struct bdi_writeback *wb) +{ + while (1) { + struct inode *inode; + + if (list_empty(&wb->b_io)) + break; + inode = wb_inode(wb->b_io.prev); + if (sb != inode->i_sb) + break; + redirty_tail(inode, wb); + } +} +EXPORT_SYMBOL(writeback_skip_sb_inodes); + + /* * Handle writeback of dirty data for the device backed by this bdi. Also * reschedules periodically and does kupdated style flushing. @@ -1953,7 +1972,7 @@ void wb_workfn(struct work_struct *work) long pages_written; set_worker_desc("flush-%s", dev_name(wb->bdi->dev)); - current->flags |= PF_SWAPWRITE; + current->flags |= PF_FLUSHER | PF_SWAPWRITE; if (likely(!current_is_workqueue_rescuer() || !test_bit(WB_registered, &wb->state))) { diff --git a/fs/read_write.c b/fs/read_write.c index ee66fa47b0c1..032fdb0040f2 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -241,12 +241,11 @@ loff_t no_llseek(struct file *file, loff_t offset, int whence) } EXPORT_SYMBOL(no_llseek); -loff_t default_llseek(struct file *file, loff_t offset, int whence) +loff_t default_llseek_unlocked(struct file *file, loff_t offset, int whence) { struct inode *inode = file_inode(file); loff_t retval; - inode_lock(inode); switch (whence) { case SEEK_END: offset += i_size_read(inode); @@ -291,9 +290,19 @@ loff_t default_llseek(struct file *file, loff_t offset, int whence) retval = offset; } out: - inode_unlock(inode); return retval; } +EXPORT_SYMBOL(default_llseek_unlocked); + +loff_t default_llseek(struct file *file, loff_t offset, int origin) +{ + loff_t retval; + + inode_lock(file_inode(file)); + retval = default_llseek_unlocked(file, offset, origin); + inode_unlock(file_inode(file)); + return retval; +} EXPORT_SYMBOL(default_llseek); loff_t vfs_llseek(struct file *file, loff_t offset, int whence) @@ -395,7 +404,7 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t read_write == READ ? MAY_READ : MAY_WRITE); } -static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) +ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) { struct iovec iov = { .iov_base = buf, .iov_len = len }; struct kiocb kiocb; @@ -411,6 +420,7 @@ static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, lo *ppos = kiocb.ki_pos; return ret; } +EXPORT_SYMBOL(new_sync_read); ssize_t __vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos) diff --git a/fs/reiser4/Kconfig b/fs/reiser4/Kconfig new file mode 100644 index 000000000000..e55f8bc51caf --- /dev/null +++ b/fs/reiser4/Kconfig @@ -0,0 +1,36 @@ +config REISER4_FS + tristate "Reiser4 (EXPERIMENTAL)" + select ZLIB_INFLATE + select ZLIB_DEFLATE + select LZO_COMPRESS + select LZO_DECOMPRESS + select ZSTD_COMPRESS + select ZSTD_DECOMPRESS + select CRYPTO + select CRYPTO_CRC32C + help + Reiser4 is a filesystem that performs all filesystem operations + as atomic transactions, which means that it either performs a + write, or it does not, and in the event of a crash it does not + partially perform it or corrupt it. + + It stores files in dancing trees, which are like balanced trees but + faster. It packs small files together so that they share blocks + without wasting space. This means you can use it to store really + small files. It also means that it saves you disk space. It avoids + hassling you with anachronisms like having a maximum number of + inodes, and wasting space if you use less than that number. + + Reiser4 is a distinct filesystem type from reiserfs (V3). + It's therefore not possible to use reiserfs file systems + with reiser4. + + To learn more about reiser4, go to http://www.namesys.com + +config REISER4_DEBUG + bool "Enable reiser4 debug mode" + depends on REISER4_FS + help + Don't use this unless you are debugging reiser4. + + If unsure, say N. diff --git a/fs/reiser4/Makefile b/fs/reiser4/Makefile new file mode 100644 index 000000000000..8917789dc106 --- /dev/null +++ b/fs/reiser4/Makefile @@ -0,0 +1,105 @@ +# +# reiser4/Makefile +# + +obj-$(CONFIG_REISER4_FS) += reiser4.o + +ccflags-$(CONFIG_REISER4_FS) += -Wno-incompatible-pointer-types + +reiser4-y := \ + debug.o \ + jnode.o \ + znode.o \ + key.o \ + pool.o \ + tree_mod.o \ + estimate.o \ + carry.o \ + carry_ops.o \ + lock.o \ + tree.o \ + context.o \ + tap.o \ + coord.o \ + block_alloc.o \ + txnmgr.o \ + kassign.o \ + flush.o \ + wander.o \ + eottl.o \ + search.o \ + page_cache.o \ + seal.o \ + dscale.o \ + flush_queue.o \ + ktxnmgrd.o \ + blocknrset.o \ + super.o \ + super_ops.o \ + fsdata.o \ + export_ops.o \ + oid.o \ + tree_walk.o \ + inode.o \ + vfs_ops.o \ + as_ops.o \ + entd.o\ + readahead.o \ + status_flags.o \ + init_super.o \ + safe_link.o \ + blocknrlist.o \ + discard.o \ + checksum.o \ + \ + plugin/plugin.o \ + plugin/plugin_set.o \ + plugin/node/node.o \ + plugin/object.o \ + plugin/cluster.o \ + plugin/txmod.o \ + plugin/inode_ops.o \ + plugin/inode_ops_rename.o \ + plugin/file_ops.o \ + plugin/file_ops_readdir.o \ + plugin/file_plugin_common.o \ + plugin/file/file.o \ + plugin/file/tail_conversion.o \ + plugin/file/file_conversion.o \ + plugin/file/symlink.o \ + plugin/file/cryptcompress.o \ + plugin/dir_plugin_common.o \ + plugin/dir/hashed_dir.o \ + plugin/dir/seekable_dir.o \ + plugin/node/node40.o \ + plugin/node/node41.o \ + \ + plugin/crypto/cipher.o \ + plugin/crypto/digest.o \ + \ + plugin/compress/compress.o \ + plugin/compress/compress_mode.o \ + \ + plugin/item/static_stat.o \ + plugin/item/sde.o \ + plugin/item/cde.o \ + plugin/item/blackbox.o \ + plugin/item/internal.o \ + plugin/item/tail.o \ + plugin/item/ctail.o \ + plugin/item/extent.o \ + plugin/item/extent_item_ops.o \ + plugin/item/extent_file_ops.o \ + plugin/item/extent_flush_ops.o \ + \ + plugin/hash.o \ + plugin/fibration.o \ + plugin/tail_policy.o \ + plugin/item/item.o \ + \ + plugin/security/perm.o \ + plugin/space/bitmap.o \ + \ + plugin/disk_format/disk_format40.o \ + plugin/disk_format/disk_format.o + diff --git a/fs/reiser4/README b/fs/reiser4/README new file mode 100644 index 000000000000..80c5efe15c2d --- /dev/null +++ b/fs/reiser4/README @@ -0,0 +1,128 @@ +[LICENSING] + +Reiser4 is hereby licensed under the GNU General +Public License version 2. + +Source code files that contain the phrase "licensing governed by +reiser4/README" are "governed files" throughout this file. Governed +files are licensed under the GPL. The portions of them owned by Hans +Reiser, or authorized to be licensed by him, have been in the past, +and likely will be in the future, licensed to other parties under +other licenses. If you add your code to governed files, and don't +want it to be owned by Hans Reiser, put your copyright label on that +code so the poor blight and his customers can keep things straight. +All portions of governed files not labeled otherwise are owned by Hans +Reiser, and by adding your code to it, widely distributing it to +others or sending us a patch, and leaving the sentence in stating that +licensing is governed by the statement in this file, you accept this. +It will be a kindness if you identify whether Hans Reiser is allowed +to license code labeled as owned by you on your behalf other than +under the GPL, because he wants to know if it is okay to do so and put +a check in the mail to you (for non-trivial improvements) when he +makes his next sale. He makes no guarantees as to the amount if any, +though he feels motivated to motivate contributors, and you can surely +discuss this with him before or after contributing. You have the +right to decline to allow him to license your code contribution other +than under the GPL. + +Further licensing options are available for commercial and/or other +interests directly from Hans Reiser: reiser@namesys.com. If you interpret +the GPL as not allowing those additional licensing options, you read +it wrongly, and Richard Stallman agrees with me, when carefully read +you can see that those restrictions on additional terms do not apply +to the owner of the copyright, and my interpretation of this shall +govern for this license. + +[END LICENSING] + +Reiser4 is a file system based on dancing tree algorithms, and is +described at http://www.namesys.com + +mkfs.reiser4 and other utilities are on our webpage or wherever your +Linux provider put them. You really want to be running the latest +version off the website if you use fsck. + +Yes, if you update your reiser4 kernel module you do have to +recompile your kernel, most of the time. The errors you get will be +quite cryptic if your forget to do so. + +Hideous Commercial Pitch: Spread your development costs across other OS +vendors. Select from the best in the world, not the best in your +building, by buying from third party OS component suppliers. Leverage +the software component development power of the internet. Be the most +aggressive in taking advantage of the commercial possibilities of +decentralized internet development, and add value through your branded +integration that you sell as an operating system. Let your competitors +be the ones to compete against the entire internet by themselves. Be +hip, get with the new economic trend, before your competitors do. Send +email to reiser@namesys.com + +Hans Reiser was the primary architect of Reiser4, but a whole team +chipped their ideas in. He invested everything he had into Namesys +for 5.5 dark years of no money before Reiser3 finally started to work well +enough to bring in money. He owns the copyright. + +DARPA was the primary sponsor of Reiser4. DARPA does not endorse +Reiser4, it merely sponsors it. DARPA is, in solely Hans's personal +opinion, unique in its willingness to invest into things more +theoretical than the VC community can readily understand, and more +longterm than allows them to be sure that they will be the ones to +extract the economic benefits from. DARPA also integrated us into a +security community that transformed our security worldview. + +Vladimir Saveliev is our lead programmer, with us from the beginning, +and he worked long hours writing the cleanest code. This is why he is +now the lead programmer after years of commitment to our work. He +always made the effort to be the best he could be, and to make his +code the best that it could be. What resulted was quite remarkable. I +don't think that money can ever motivate someone to work the way he +did, he is one of the most selfless men I know. + +Alexander Lyamin was our sysadmin, and helped to educate us in +security issues. Moscow State University and IMT were very generous +in the internet access they provided us, and in lots of other little +ways that a generous institution can be. + +Alexander Zarochentcev (sometimes known as zam, or sasha), wrote the +locking code, the block allocator, and finished the flushing code. +His code is always crystal clean and well structured. + +Nikita Danilov wrote the core of the balancing code, the core of the +plugins code, and the directory code. He worked a steady pace of long +hours that produced a whole lot of well abstracted code. He is our +senior computer scientist. + +Vladimir Demidov wrote the parser. Writing an in kernel parser is +something very few persons have the skills for, and it is thanks to +him that we can say that the parser is really not so big compared to +various bits of our other code, and making a parser work in the kernel +was not so complicated as everyone would imagine mainly because it was +him doing it... + +Joshua McDonald wrote the transaction manager, and the flush code. +The flush code unexpectedly turned out be extremely hairy for reasons +you can read about on our web page, and he did a great job on an +extremely difficult task. + +Nina Reiser handled our accounting, government relations, and much +more. + +Ramon Reiser developed our website. + +Beverly Palmer drew our graphics. + +Vitaly Fertman developed librepair, userspace plugins repair code, fsck +and worked with Umka on developing libreiser4 and userspace plugins. + +Yury Umanets (aka Umka) developed libreiser4, userspace plugins and +userspace tools (reiser4progs). + +Oleg Drokin (aka Green) is the release manager who fixes everything. +It is so nice to have someone like that on the team. He (plus Chris +and Jeff) make it possible for the entire rest of the Namesys team to +focus on Reiser4, and he fixed a whole lot of Reiser4 bugs also. It +is just amazing to watch his talent for spotting bugs in action. + +Edward Shishkin wrote cryptcompress file plugin (which manages files +built of encrypted and(or) compressed bodies) and other plugins related +to transparent encryption and compression support. diff --git a/fs/reiser4/as_ops.c b/fs/reiser4/as_ops.c new file mode 100644 index 000000000000..393e9d123c88 --- /dev/null +++ b/fs/reiser4/as_ops.c @@ -0,0 +1,348 @@ +/* Copyright 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Interface to VFS. Reiser4 address_space_operations are defined here. */ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "coord.h" +#include "plugin/item/item.h" +#include "plugin/file/file.h" +#include "plugin/security/perm.h" +#include "plugin/disk_format/disk_format.h" +#include "plugin/plugin.h" +#include "plugin/plugin_set.h" +#include "plugin/object.h" +#include "txnmgr.h" +#include "jnode.h" +#include "znode.h" +#include "block_alloc.h" +#include "tree.h" +#include "vfs_ops.h" +#include "inode.h" +#include "page_cache.h" +#include "ktxnmgrd.h" +#include "super.h" +#include "reiser4.h" +#include "entd.h" + +#include <linux/profile.h> +#include <linux/types.h> +#include <linux/mount.h> +#include <linux/vfs.h> +#include <linux/mm.h> +#include <linux/buffer_head.h> +#include <linux/dcache.h> +#include <linux/list.h> +#include <linux/pagemap.h> +#include <linux/slab.h> +#include <linux/seq_file.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/writeback.h> +#include <linux/backing-dev.h> +#include <linux/security.h> +#include <linux/migrate.h> + +/* address space operations */ + +/** + * reiser4_set_page_dirty - set dirty bit, tag in page tree, dirty accounting + * @page: page to be dirtied + * + * Operation of struct address_space_operations. This implementation is used by + * unix and cryptcompress file plugins. + * + * This is called when reiser4 page gets dirtied outside of reiser4, for + * example, when dirty bit is moved from pte to physical page. + * + * Tags page in the mapping's page tree with special tag so that it is possible + * to do all the reiser4 specific work wrt dirty pages (jnode creation, + * capturing by an atom) later because it can not be done in the contexts where + * set_page_dirty is called. + */ +int reiser4_set_page_dirty(struct page *page) +{ + /* this page can be unformatted only */ + assert("vs-1734", (page->mapping && + page->mapping->host && + reiser4_get_super_fake(page->mapping->host->i_sb) != + page->mapping->host && + reiser4_get_cc_fake(page->mapping->host->i_sb) != + page->mapping->host && + reiser4_get_bitmap_fake(page->mapping->host->i_sb) != + page->mapping->host)); + return __set_page_dirty_nobuffers(page); +} + +/* ->invalidatepage method for reiser4 */ + +/* + * this is called for each truncated page from + * truncate_inode_pages()->truncate_{complete,partial}_page(). + * + * At the moment of call, page is under lock, and outstanding io (if any) has + * completed. + */ + +/** + * reiser4_invalidatepage + * @page: page to invalidate + * @offset: starting offset for partial invalidation + * + */ +void reiser4_invalidatepage(struct page *page, unsigned int offset, unsigned int length) +{ + int ret = 0; + int partial_page = (offset || length < PAGE_SIZE); + reiser4_context *ctx; + struct inode *inode; + jnode *node; + + /* + * This is called to truncate file's page. + * + * Originally, reiser4 implemented truncate in a standard way + * (vmtruncate() calls ->invalidatepage() on all truncated pages + * first, then file system ->truncate() call-back is invoked). + * + * This lead to the problem when ->invalidatepage() was called on a + * page with jnode that was captured into atom in ASTAGE_PRE_COMMIT + * process. That is, truncate was bypassing transactions. To avoid + * this, try_capture_page_to_invalidate() call was added here. + * + * After many troubles with vmtruncate() based truncate (including + * races with flush, tail conversion, etc.) it was re-written in the + * top-to-bottom style: items are killed in reiser4_cut_tree_object() + * and pages belonging to extent are invalidated in kill_hook_extent(). + * So probably now additional call to capture is not needed here. + */ + + assert("nikita-3137", PageLocked(page)); + assert("nikita-3138", !PageWriteback(page)); + inode = page->mapping->host; + + /* + * ->invalidatepage() should only be called for the unformatted + * jnodes. Destruction of all other types of jnodes is performed + * separately. But, during some corner cases (like handling errors + * during mount) it is simpler to let ->invalidatepage to be called on + * them. Check for this, and do nothing. + */ + if (reiser4_get_super_fake(inode->i_sb) == inode) + return; + if (reiser4_get_cc_fake(inode->i_sb) == inode) + return; + if (reiser4_get_bitmap_fake(inode->i_sb) == inode) + return; + assert("vs-1426", PagePrivate(page)); + assert("vs-1427", + page->mapping == jnode_get_mapping(jnode_by_page(page))); + assert("", jprivate(page) != NULL); + assert("", ergo(inode_file_plugin(inode) != + file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID), + offset == 0)); + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return; + + node = jprivate(page); + spin_lock_jnode(node); + if (!(node->state & ((1 << JNODE_DIRTY) | (1 << JNODE_FLUSH_QUEUED) | + (1 << JNODE_WRITEBACK) | (1 << JNODE_OVRWR)))) { + /* there is not need to capture */ + jref(node); + JF_SET(node, JNODE_HEARD_BANSHEE); + page_clear_jnode(page, node); + reiser4_uncapture_jnode(node); + unhash_unformatted_jnode(node); + jput(node); + reiser4_exit_context(ctx); + return; + } + spin_unlock_jnode(node); + + /* capture page being truncated. */ + ret = try_capture_page_to_invalidate(page); + if (ret != 0) + warning("nikita-3141", "Cannot capture: %i", ret); + + if (!partial_page) { + /* remove jnode from transaction and detach it from page. */ + jref(node); + JF_SET(node, JNODE_HEARD_BANSHEE); + /* page cannot be detached from jnode concurrently, because it + * is locked */ + reiser4_uncapture_page(page); + + /* this detaches page from jnode, so that jdelete will not try + * to lock page which is already locked */ + spin_lock_jnode(node); + page_clear_jnode(page, node); + spin_unlock_jnode(node); + unhash_unformatted_jnode(node); + + jput(node); + } + + reiser4_exit_context(ctx); +} + +/* help function called from reiser4_releasepage(). It returns true if jnode + * can be detached from its page and page released. */ +int jnode_is_releasable(jnode * node/* node to check */) +{ + assert("nikita-2781", node != NULL); + assert_spin_locked(&(node->guard)); + assert_spin_locked(&(node->load)); + + /* is some thread is currently using jnode page, later cannot be + * detached */ + if (atomic_read(&node->d_count) != 0) + return 0; + + assert("vs-1214", !jnode_is_loaded(node)); + + /* + * can only release page if real block number is assigned to it. Simple + * check for ->atom wouldn't do, because it is possible for node to be + * clean, not it atom yet, and still having fake block number. For + * example, node just created in jinit_new(). + */ + if (reiser4_blocknr_is_fake(jnode_get_block(node))) + return 0; + + /* + * pages prepared for write can not be released anyway, so avoid + * detaching jnode from the page + */ + if (JF_ISSET(node, JNODE_WRITE_PREPARED)) + return 0; + + /* + * dirty jnode cannot be released. It can however be submitted to disk + * as part of early flushing, but only after getting flush-prepped. + */ + if (JF_ISSET(node, JNODE_DIRTY)) + return 0; + + /* overwrite set is only written by log writer. */ + if (JF_ISSET(node, JNODE_OVRWR)) + return 0; + + /* jnode is already under writeback */ + if (JF_ISSET(node, JNODE_WRITEBACK)) + return 0; + + /* don't flush bitmaps or journal records */ + if (!jnode_is_znode(node) && !jnode_is_unformatted(node)) + return 0; + + return 1; +} + +/* + * ->releasepage method for reiser4 + * + * This is called by VM scanner when it comes across clean page. What we have + * to do here is to check whether page can really be released (freed that is) + * and if so, detach jnode from it and remove page from the page cache. + * + * Check for releasability is done by releasable() function. + */ +int reiser4_releasepage(struct page *page, gfp_t gfp UNUSED_ARG) +{ + jnode *node; + + assert("nikita-2257", PagePrivate(page)); + assert("nikita-2259", PageLocked(page)); + assert("nikita-2892", !PageWriteback(page)); + assert("nikita-3019", reiser4_schedulable()); + + /* NOTE-NIKITA: this can be called in the context of reiser4 call. It + is not clear what to do in this case. A lot of deadlocks seems be + possible. */ + + node = jnode_by_page(page); + assert("nikita-2258", node != NULL); + assert("reiser4-4", page->mapping != NULL); + assert("reiser4-5", page->mapping->host != NULL); + + if (PageDirty(page)) + return 0; + + /* extra page reference is used by reiser4 to protect + * jnode<->page link from this ->releasepage(). */ + if (page_count(page) > 3) + return 0; + + /* releasable() needs jnode lock, because it looks at the jnode fields + * and we need jload_lock here to avoid races with jload(). */ + spin_lock_jnode(node); + spin_lock(&(node->load)); + if (jnode_is_releasable(node)) { + struct address_space *mapping; + + mapping = page->mapping; + jref(node); + /* there is no need to synchronize against + * jnode_extent_write() here, because pages seen by + * jnode_extent_write() are !releasable(). */ + page_clear_jnode(page, node); + spin_unlock(&(node->load)); + spin_unlock_jnode(node); + + /* we are under memory pressure so release jnode also. */ + jput(node); + + return 1; + } else { + spin_unlock(&(node->load)); + spin_unlock_jnode(node); + assert("nikita-3020", reiser4_schedulable()); + return 0; + } +} + +#ifdef CONFIG_MIGRATION +int reiser4_migratepage(struct address_space *mapping, struct page *newpage, + struct page *page, enum migrate_mode mode) +{ + /* TODO: implement movable mapping + */ + return -EIO; +} +#endif /* CONFIG_MIGRATION */ + +int reiser4_readpage_dispatch(struct file *file, struct page *page) +{ + assert("edward-1533", PageLocked(page)); + assert("edward-1534", !PageUptodate(page)); + assert("edward-1535", page->mapping && page->mapping->host); + + return inode_file_plugin(page->mapping->host)->readpage(file, page); +} + +int reiser4_readpages_dispatch(struct file *file, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages) +{ + return inode_file_plugin(mapping->host)->readpages(file, mapping, + pages, nr_pages); +} + +int reiser4_writepages_dispatch(struct address_space *mapping, + struct writeback_control *wbc) +{ + return inode_file_plugin(mapping->host)->writepages(mapping, wbc); +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/block_alloc.c b/fs/reiser4/block_alloc.c new file mode 100644 index 000000000000..fee1c185e0b2 --- /dev/null +++ b/fs/reiser4/block_alloc.c @@ -0,0 +1,1176 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by +reiser4/README */ + +#include "debug.h" +#include "dformat.h" +#include "plugin/plugin.h" +#include "txnmgr.h" +#include "znode.h" +#include "block_alloc.h" +#include "tree.h" +#include "super.h" +#include "discard.h" + +#include <linux/types.h> /* for __u?? */ +#include <linux/fs.h> /* for struct super_block */ +#include <linux/spinlock.h> + +/* THE REISER4 DISK SPACE RESERVATION SCHEME. */ + +/* We need to be able to reserve enough disk space to ensure that an atomic + operation will have enough disk space to flush (see flush.c and + http://namesys.com/v4/v4.html) and commit it once it is started. + + In our design a call for reserving disk space may fail but not an actual + block allocation. + + All free blocks, already allocated blocks, and all kinds of reserved blocks + are counted in different per-fs block counters. + + A reiser4 super block's set of block counters currently is: + + free -- free blocks, + used -- already allocated blocks, + + grabbed -- initially reserved for performing an fs operation, those blocks + are taken from free blocks, then grabbed disk space leaks from grabbed + blocks counter to other counters like "fake allocated", "flush + reserved", "used", the rest of not used grabbed space is returned to + free space at the end of fs operation; + + fake allocated -- counts all nodes without real disk block numbers assigned, + we have separate accounting for formatted and unformatted + nodes (for easier debugging); + + flush reserved -- disk space needed for flushing and committing an atom. + Each dirty already allocated block could be written as a + part of atom's overwrite set or as a part of atom's + relocate set. In both case one additional block is needed, + it is used as a wandered block if we do overwrite or as a + new location for a relocated block. + + In addition, blocks in some states are counted on per-thread and per-atom + basis. A reiser4 context has a counter of blocks grabbed by this transaction + and the sb's grabbed blocks counter is a sum of grabbed blocks counter values + of each reiser4 context. Each reiser4 atom has a counter of "flush reserved" + blocks, which are reserved for flush processing and atom commit. */ + +/* AN EXAMPLE: suppose we insert new item to the reiser4 tree. We estimate + number of blocks to grab for most expensive case of balancing when the leaf + node we insert new item to gets split and new leaf node is allocated. + + So, we need to grab blocks for + + 1) one block for possible dirtying the node we insert an item to. That block + would be used for node relocation at flush time or for allocating of a + wandered one, it depends what will be a result (what set, relocate or + overwrite the node gets assigned to) of the node processing by the flush + algorithm. + + 2) one block for either allocating a new node, or dirtying of right or left + clean neighbor, only one case may happen. + + VS-FIXME-HANS: why can only one case happen? I would expect to see dirtying + of left neighbor, right neighbor, current node, and creation of new node. + Have I forgotten something? email me. + + These grabbed blocks are counted in both reiser4 context "grabbed blocks" + counter and in the fs-wide one (both ctx->grabbed_blocks and + sbinfo->blocks_grabbed get incremented by 2), sb's free blocks counter is + decremented by 2. + + Suppose both two blocks were spent for dirtying of an already allocated clean + node (one block went from "grabbed" to "flush reserved") and for new block + allocating (one block went from "grabbed" to "fake allocated formatted"). + + Inserting of a child pointer to the parent node caused parent node to be + split, the balancing code takes care about this grabbing necessary space + immediately by calling reiser4_grab with BA_RESERVED flag set which means + "can use the 5% reserved disk space". + + At this moment insertion completes and grabbed blocks (if they were not used) + should be returned to the free space counter. + + However the atom life-cycle is not completed. The atom had one "flush + reserved" block added by our insertion and the new fake allocated node is + counted as a "fake allocated formatted" one. The atom has to be fully + processed by flush before commit. Suppose that the flush moved the first, + already allocated node to the atom's overwrite list, the new fake allocated + node, obviously, went into the atom relocate set. The reiser4 flush + allocates the new node using one unit from "fake allocated formatted" + counter, the log writer uses one from "flush reserved" for wandered block + allocation. + + And, it is not the end. When the wandered block is deallocated after the + atom gets fully played (see wander.c for term description), the disk space + occupied for it is returned to free blocks. */ + +/* BLOCK NUMBERS */ + +/* Any reiser4 node has a block number assigned to it. We use these numbers for + indexing in hash tables, so if a block has not yet been assigned a location + on disk we need to give it a temporary fake block number. + + Current implementation of reiser4 uses 64-bit integers for block numbers. We + use highest bit in 64-bit block number to distinguish fake and real block + numbers. So, only 63 bits may be used to addressing of real device + blocks. That "fake" block numbers space is divided into subspaces of fake + block numbers for data blocks and for shadow (working) bitmap blocks. + + Fake block numbers for data blocks are generated by a cyclic counter, which + gets incremented after each real block allocation. We assume that it is + impossible to overload this counter during one transaction life. */ + +/* Initialize a blocknr hint. */ +void reiser4_blocknr_hint_init(reiser4_blocknr_hint * hint) +{ + memset(hint, 0, sizeof(reiser4_blocknr_hint)); +} + +/* Release any resources of a blocknr hint. */ +void reiser4_blocknr_hint_done(reiser4_blocknr_hint * hint UNUSED_ARG) +{ +/* No resources should be freed in current blocknr_hint implementation. */ +} + +/* see above for explanation of fake block number. */ +/* Audited by: green(2002.06.11) */ +int reiser4_blocknr_is_fake(const reiser4_block_nr * da) +{ + /* The reason for not simply returning result of '&' operation is that + while return value is (possibly 32bit) int, the reiser4_block_nr is + at least 64 bits long, and high bit (which is the only possible + non zero bit after the masking) would be stripped off */ + return (*da & REISER4_FAKE_BLOCKNR_BIT_MASK) ? 1 : 0; +} + +/* Static functions for <reiser4 super block>/<reiser4 context> block counters + arithmetic. Mostly, they are isolated to not to code same assertions in + several places. */ +static void sub_from_ctx_grabbed(reiser4_context * ctx, __u64 count) +{ + BUG_ON(ctx->grabbed_blocks < count); + assert("zam-527", ctx->grabbed_blocks >= count); + ctx->grabbed_blocks -= count; +} + +static void add_to_ctx_grabbed(reiser4_context * ctx, __u64 count) +{ + ctx->grabbed_blocks += count; +} + +static void sub_from_sb_grabbed(reiser4_super_info_data * sbinfo, __u64 count) +{ + assert("zam-525", sbinfo->blocks_grabbed >= count); + sbinfo->blocks_grabbed -= count; +} + +/* Decrease the counter of block reserved for flush in super block. */ +static void +sub_from_sb_flush_reserved(reiser4_super_info_data * sbinfo, __u64 count) +{ + assert("vpf-291", sbinfo->blocks_flush_reserved >= count); + sbinfo->blocks_flush_reserved -= count; +} + +static void +sub_from_sb_fake_allocated(reiser4_super_info_data * sbinfo, __u64 count, + reiser4_ba_flags_t flags) +{ + if (flags & BA_FORMATTED) { + assert("zam-806", sbinfo->blocks_fake_allocated >= count); + sbinfo->blocks_fake_allocated -= count; + } else { + assert("zam-528", + sbinfo->blocks_fake_allocated_unformatted >= count); + sbinfo->blocks_fake_allocated_unformatted -= count; + } +} + +static void sub_from_sb_used(reiser4_super_info_data * sbinfo, __u64 count) +{ + assert("zam-530", + sbinfo->blocks_used >= count + sbinfo->min_blocks_used); + sbinfo->blocks_used -= count; +} + +static void +sub_from_cluster_reserved(reiser4_super_info_data * sbinfo, __u64 count) +{ + assert("edward-501", sbinfo->blocks_clustered >= count); + sbinfo->blocks_clustered -= count; +} + +/* Increase the counter of block reserved for flush in atom. */ +static void add_to_atom_flush_reserved_nolock(txn_atom * atom, __u32 count) +{ + assert("zam-772", atom != NULL); + assert_spin_locked(&(atom->alock)); + atom->flush_reserved += count; +} + +/* Decrease the counter of block reserved for flush in atom. */ +static void sub_from_atom_flush_reserved_nolock(txn_atom * atom, __u32 count) +{ + assert("zam-774", atom != NULL); + assert_spin_locked(&(atom->alock)); + assert("nikita-2790", atom->flush_reserved >= count); + atom->flush_reserved -= count; +} + +/* super block has 6 counters: free, used, grabbed, fake allocated + (formatted and unformatted) and flush reserved. Their sum must be + number of blocks on a device. This function checks this */ +int reiser4_check_block_counters(const struct super_block *super) +{ + __u64 sum; + + sum = reiser4_grabbed_blocks(super) + reiser4_free_blocks(super) + + reiser4_data_blocks(super) + reiser4_fake_allocated(super) + + reiser4_fake_allocated_unformatted(super) + reiser4_flush_reserved(super) + + reiser4_clustered_blocks(super); + if (reiser4_block_count(super) != sum) { + printk("super block counters: " + "used %llu, free %llu, " + "grabbed %llu, fake allocated (formatetd %llu, unformatted %llu), " + "reserved %llu, clustered %llu, sum %llu, must be (block count) %llu\n", + (unsigned long long)reiser4_data_blocks(super), + (unsigned long long)reiser4_free_blocks(super), + (unsigned long long)reiser4_grabbed_blocks(super), + (unsigned long long)reiser4_fake_allocated(super), + (unsigned long long) + reiser4_fake_allocated_unformatted(super), + (unsigned long long)reiser4_flush_reserved(super), + (unsigned long long)reiser4_clustered_blocks(super), + (unsigned long long)sum, + (unsigned long long)reiser4_block_count(super)); + return 0; + } + return 1; +} + +/* Adjust "working" free blocks counter for number of blocks we are going to + allocate. Record number of grabbed blocks in fs-wide and per-thread + counters. This function should be called before bitmap scanning or + allocating fake block numbers + + @super -- pointer to reiser4 super block; + @count -- number of blocks we reserve; + + @return -- 0 if success, -ENOSPC, if all + free blocks are preserved or already allocated. +*/ + +static int +reiser4_grab(reiser4_context * ctx, __u64 count, reiser4_ba_flags_t flags) +{ + __u64 free_blocks; + int ret = 0, use_reserved = flags & BA_RESERVED; + reiser4_super_info_data *sbinfo; + + assert("vs-1276", ctx == get_current_context()); + + /* Do not grab anything on ro-mounted fs. */ + if (rofs_super(ctx->super)) { + ctx->grab_enabled = 0; + return 0; + } + + sbinfo = get_super_private(ctx->super); + + spin_lock_reiser4_super(sbinfo); + + free_blocks = sbinfo->blocks_free; + + if ((use_reserved && free_blocks < count) || + (!use_reserved && free_blocks < count + sbinfo->blocks_reserved)) { + ret = RETERR(-ENOSPC); + goto unlock_and_ret; + } + + add_to_ctx_grabbed(ctx, count); + + sbinfo->blocks_grabbed += count; + sbinfo->blocks_free -= count; + +#if REISER4_DEBUG + if (ctx->grabbed_initially == 0) + ctx->grabbed_initially = count; +#endif + + assert("nikita-2986", reiser4_check_block_counters(ctx->super)); + + /* disable grab space in current context */ + ctx->grab_enabled = 0; + +unlock_and_ret: + spin_unlock_reiser4_super(sbinfo); + + return ret; +} + +int reiser4_grab_space(__u64 count, reiser4_ba_flags_t flags) +{ + int ret; + reiser4_context *ctx; + + assert("nikita-2964", ergo(flags & BA_CAN_COMMIT, + lock_stack_isclean(get_current_lock_stack + ()))); + ctx = get_current_context(); + if (!(flags & BA_FORCE) && !is_grab_enabled(ctx)) + return 0; + + ret = reiser4_grab(ctx, count, flags); + if (ret == -ENOSPC) { + + /* Trying to commit the all transactions if BA_CAN_COMMIT flag + present */ + if (flags & BA_CAN_COMMIT) { + txnmgr_force_commit_all(ctx->super, 0); + ctx->grab_enabled = 1; + ret = reiser4_grab(ctx, count, flags); + } + } + /* + * allocation from reserved pool cannot fail. This is severe error. + */ + assert("nikita-3005", ergo(flags & BA_RESERVED, ret == 0)); + return ret; +} + +/* + * SPACE RESERVED FOR UNLINK/TRUNCATE + * + * Unlink and truncate require space in transaction (to update stat data, at + * least). But we don't want rm(1) to fail with "No space on device" error. + * + * Solution is to reserve 5% of disk space for truncates and + * unlinks. Specifically, normal space grabbing requests don't grab space from + * reserved area. Only requests with BA_RESERVED bit in flags are allowed to + * drain it. Per super block delete mutex is used to allow only one + * thread at a time to grab from reserved area. + * + * Grabbing from reserved area should always be performed with BA_CAN_COMMIT + * flag. + * + */ + +int reiser4_grab_reserved(struct super_block *super, + __u64 count, reiser4_ba_flags_t flags) +{ + reiser4_super_info_data *sbinfo = get_super_private(super); + + assert("nikita-3175", flags & BA_CAN_COMMIT); + + /* Check the delete mutex already taken by us, we assume that + * reading of machine word is atomic. */ + if (sbinfo->delete_mutex_owner == current) { + if (reiser4_grab_space + (count, (flags | BA_RESERVED) & ~BA_CAN_COMMIT)) { + warning("zam-1003", + "nested call of grab_reserved fails count=(%llu)", + (unsigned long long)count); + reiser4_release_reserved(super); + return RETERR(-ENOSPC); + } + return 0; + } + + if (reiser4_grab_space(count, flags)) { + mutex_lock(&sbinfo->delete_mutex); + assert("nikita-2929", sbinfo->delete_mutex_owner == NULL); + sbinfo->delete_mutex_owner = current; + + if (reiser4_grab_space(count, flags | BA_RESERVED)) { + warning("zam-833", + "reserved space is not enough (%llu)", + (unsigned long long)count); + reiser4_release_reserved(super); + return RETERR(-ENOSPC); + } + } + return 0; +} + +void reiser4_release_reserved(struct super_block *super) +{ + reiser4_super_info_data *info; + + info = get_super_private(super); + if (info->delete_mutex_owner == current) { + info->delete_mutex_owner = NULL; + mutex_unlock(&info->delete_mutex); + } +} + +static reiser4_super_info_data *grabbed2fake_allocated_head(int count) +{ + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + + ctx = get_current_context(); + sub_from_ctx_grabbed(ctx, count); + + sbinfo = get_super_private(ctx->super); + spin_lock_reiser4_super(sbinfo); + + sub_from_sb_grabbed(sbinfo, count); + /* return sbinfo locked */ + return sbinfo; +} + +/* is called after @count fake block numbers are allocated and pointer to + those blocks are inserted into tree. */ +static void grabbed2fake_allocated_formatted(void) +{ + reiser4_super_info_data *sbinfo; + + sbinfo = grabbed2fake_allocated_head(1); + sbinfo->blocks_fake_allocated++; + + assert("vs-922", reiser4_check_block_counters(reiser4_get_current_sb())); + + spin_unlock_reiser4_super(sbinfo); +} + +/** + * grabbed2fake_allocated_unformatted + * @count: + * + */ +static void grabbed2fake_allocated_unformatted(int count) +{ + reiser4_super_info_data *sbinfo; + + sbinfo = grabbed2fake_allocated_head(count); + sbinfo->blocks_fake_allocated_unformatted += count; + + assert("vs-9221", reiser4_check_block_counters(reiser4_get_current_sb())); + + spin_unlock_reiser4_super(sbinfo); +} + +void grabbed2cluster_reserved(int count) +{ + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + + ctx = get_current_context(); + sub_from_ctx_grabbed(ctx, count); + + sbinfo = get_super_private(ctx->super); + spin_lock_reiser4_super(sbinfo); + + sub_from_sb_grabbed(sbinfo, count); + sbinfo->blocks_clustered += count; + + assert("edward-504", reiser4_check_block_counters(ctx->super)); + + spin_unlock_reiser4_super(sbinfo); +} + +void cluster_reserved2grabbed(int count) +{ + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + + ctx = get_current_context(); + + sbinfo = get_super_private(ctx->super); + spin_lock_reiser4_super(sbinfo); + + sub_from_cluster_reserved(sbinfo, count); + sbinfo->blocks_grabbed += count; + + assert("edward-505", reiser4_check_block_counters(ctx->super)); + + spin_unlock_reiser4_super(sbinfo); + add_to_ctx_grabbed(ctx, count); +} + +void cluster_reserved2free(int count) +{ + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + + ctx = get_current_context(); + sbinfo = get_super_private(ctx->super); + + cluster_reserved2grabbed(count); + grabbed2free(ctx, sbinfo, count); +} + +static DEFINE_SPINLOCK(fake_lock); +static reiser4_block_nr fake_gen = 0; + +/** + * assign_fake_blocknr + * @blocknr: + * @count: + * + * Obtain a fake block number for new node which will be used to refer to + * this newly allocated node until real allocation is done. + */ +static void assign_fake_blocknr(reiser4_block_nr *blocknr, int count) +{ + spin_lock(&fake_lock); + *blocknr = fake_gen; + fake_gen += count; + spin_unlock(&fake_lock); + + BUG_ON(*blocknr & REISER4_BLOCKNR_STATUS_BIT_MASK); + /**blocknr &= ~REISER4_BLOCKNR_STATUS_BIT_MASK;*/ + *blocknr |= REISER4_UNALLOCATED_STATUS_VALUE; + assert("zam-394", zlook(current_tree, blocknr) == NULL); +} + +int assign_fake_blocknr_formatted(reiser4_block_nr * blocknr) +{ + assign_fake_blocknr(blocknr, 1); + grabbed2fake_allocated_formatted(); + return 0; +} + +/** + * fake_blocknrs_unformatted + * @count: number of fake numbers to get + * + * Allocates @count fake block numbers which will be assigned to jnodes + */ +reiser4_block_nr fake_blocknr_unformatted(int count) +{ + reiser4_block_nr blocknr; + + assign_fake_blocknr(&blocknr, count); + grabbed2fake_allocated_unformatted(count); + + return blocknr; +} + +/* adjust sb block counters, if real (on-disk) block allocation immediately + follows grabbing of free disk space. */ +static void grabbed2used(reiser4_context *ctx, reiser4_super_info_data *sbinfo, + __u64 count) +{ + sub_from_ctx_grabbed(ctx, count); + + spin_lock_reiser4_super(sbinfo); + + sub_from_sb_grabbed(sbinfo, count); + sbinfo->blocks_used += count; + + assert("nikita-2679", reiser4_check_block_counters(ctx->super)); + + spin_unlock_reiser4_super(sbinfo); +} + +/* adjust sb block counters when @count unallocated blocks get mapped to disk */ +static void fake_allocated2used(reiser4_super_info_data *sbinfo, __u64 count, + reiser4_ba_flags_t flags) +{ + spin_lock_reiser4_super(sbinfo); + + sub_from_sb_fake_allocated(sbinfo, count, flags); + sbinfo->blocks_used += count; + + assert("nikita-2680", + reiser4_check_block_counters(reiser4_get_current_sb())); + + spin_unlock_reiser4_super(sbinfo); +} + +static void flush_reserved2used(txn_atom * atom, __u64 count) +{ + reiser4_super_info_data *sbinfo; + + assert("zam-787", atom != NULL); + assert_spin_locked(&(atom->alock)); + + sub_from_atom_flush_reserved_nolock(atom, (__u32) count); + + sbinfo = get_current_super_private(); + spin_lock_reiser4_super(sbinfo); + + sub_from_sb_flush_reserved(sbinfo, count); + sbinfo->blocks_used += count; + + assert("zam-789", + reiser4_check_block_counters(reiser4_get_current_sb())); + + spin_unlock_reiser4_super(sbinfo); +} + +/* update the per fs blocknr hint default value. */ +void +update_blocknr_hint_default(const struct super_block *s, + const reiser4_block_nr * block) +{ + reiser4_super_info_data *sbinfo = get_super_private(s); + + assert("nikita-3342", !reiser4_blocknr_is_fake(block)); + + spin_lock_reiser4_super(sbinfo); + if (*block < sbinfo->block_count) { + sbinfo->blocknr_hint_default = *block; + } else { + warning("zam-676", + "block number %llu is too large to be used in a blocknr hint\n", + (unsigned long long)*block); + dump_stack(); + DEBUGON(1); + } + spin_unlock_reiser4_super(sbinfo); +} + +/* get current value of the default blocknr hint. */ +void get_blocknr_hint_default(reiser4_block_nr * result) +{ + reiser4_super_info_data *sbinfo = get_current_super_private(); + + spin_lock_reiser4_super(sbinfo); + *result = sbinfo->blocknr_hint_default; + assert("zam-677", *result < sbinfo->block_count); + spin_unlock_reiser4_super(sbinfo); +} + +/* Allocate "real" disk blocks by calling a proper space allocation plugin + * method. Blocks are allocated in one contiguous disk region. The plugin + * independent part accounts blocks by subtracting allocated amount from grabbed + * or fake block counter and add the same amount to the counter of allocated + * blocks. + * + * @hint -- a reiser4 blocknr hint object which contains further block + * allocation hints and parameters (search start, a stage of block + * which will be mapped to disk, etc.), + * @blk -- an out parameter for the beginning of the allocated region, + * @len -- in/out parameter, it should contain the maximum number of allocated + * blocks, after block allocation completes, it contains the length of + * allocated disk region. + * @flags -- see reiser4_ba_flags_t description. + * + * @return -- 0 if success, error code otherwise. + */ +int +reiser4_alloc_blocks(reiser4_blocknr_hint * hint, reiser4_block_nr * blk, + reiser4_block_nr * len, reiser4_ba_flags_t flags) +{ + __u64 needed = *len; + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + int ret; + + assert("zam-986", hint != NULL); + + ctx = get_current_context(); + sbinfo = get_super_private(ctx->super); + + /* For write-optimized data we use default search start value, which is + * close to last write location. */ + if (flags & BA_USE_DEFAULT_SEARCH_START) + get_blocknr_hint_default(&hint->blk); + + /* VITALY: allocator should grab this for internal/tx-lists/similar + only. */ +/* VS-FIXME-HANS: why is this comment above addressed to vitaly (from vitaly)?*/ + if (hint->block_stage == BLOCK_NOT_COUNTED) { + ret = reiser4_grab_space_force(*len, flags); + if (ret != 0) + return ret; + } + + ret = + sa_alloc_blocks(reiser4_get_space_allocator(ctx->super), + hint, (int)needed, blk, len); + + if (!ret) { + assert("zam-680", *blk < reiser4_block_count(ctx->super)); + assert("zam-681", + *blk + *len <= reiser4_block_count(ctx->super)); + + if (flags & BA_PERMANENT) { + /* we assume that current atom exists at this moment */ + txn_atom *atom = get_current_atom_locked(); + atom->nr_blocks_allocated += *len; + spin_unlock_atom(atom); + } + + switch (hint->block_stage) { + case BLOCK_NOT_COUNTED: + case BLOCK_GRABBED: + grabbed2used(ctx, sbinfo, *len); + break; + case BLOCK_UNALLOCATED: + fake_allocated2used(sbinfo, *len, flags); + break; + case BLOCK_FLUSH_RESERVED: + { + txn_atom *atom = get_current_atom_locked(); + flush_reserved2used(atom, *len); + spin_unlock_atom(atom); + } + break; + default: + impossible("zam-531", "wrong block stage"); + } + } else { + assert("zam-821", + ergo(hint->max_dist == 0 + && !hint->backward, ret != -ENOSPC)); + if (hint->block_stage == BLOCK_NOT_COUNTED) + grabbed2free(ctx, sbinfo, needed); + } + + return ret; +} + +/** + * ask block allocator for some unformatted blocks + */ +void allocate_blocks_unformatted(reiser4_blocknr_hint *preceder, + reiser4_block_nr wanted_count, + reiser4_block_nr *first_allocated, + reiser4_block_nr *allocated, + block_stage_t block_stage) +{ + *allocated = wanted_count; + preceder->max_dist = 0; /* scan whole disk, if needed */ + + /* that number of blocks (wanted_count) is either in UNALLOCATED or in GRABBED */ + preceder->block_stage = block_stage; + + /* FIXME: we do not handle errors here now */ + check_me("vs-420", + reiser4_alloc_blocks(preceder, first_allocated, allocated, + BA_PERMANENT) == 0); + /* update flush_pos's preceder to last allocated block number */ + preceder->blk = *first_allocated + *allocated - 1; +} + +/* used -> fake_allocated -> grabbed -> free */ + +/* adjust sb block counters when @count unallocated blocks get unmapped from + disk */ +static void +used2fake_allocated(reiser4_super_info_data * sbinfo, __u64 count, + int formatted) +{ + spin_lock_reiser4_super(sbinfo); + + if (formatted) + sbinfo->blocks_fake_allocated += count; + else + sbinfo->blocks_fake_allocated_unformatted += count; + + sub_from_sb_used(sbinfo, count); + + assert("nikita-2681", + reiser4_check_block_counters(reiser4_get_current_sb())); + + spin_unlock_reiser4_super(sbinfo); +} + +static void +used2flush_reserved(reiser4_super_info_data * sbinfo, txn_atom * atom, + __u64 count, reiser4_ba_flags_t flags UNUSED_ARG) +{ + assert("nikita-2791", atom != NULL); + assert_spin_locked(&(atom->alock)); + + add_to_atom_flush_reserved_nolock(atom, (__u32) count); + + spin_lock_reiser4_super(sbinfo); + + sbinfo->blocks_flush_reserved += count; + /*add_to_sb_flush_reserved(sbinfo, count); */ + sub_from_sb_used(sbinfo, count); + + assert("nikita-2681", + reiser4_check_block_counters(reiser4_get_current_sb())); + + spin_unlock_reiser4_super(sbinfo); +} + +/* disk space, virtually used by fake block numbers is counted as "grabbed" + again. */ +static void +fake_allocated2grabbed(reiser4_context * ctx, reiser4_super_info_data * sbinfo, + __u64 count, reiser4_ba_flags_t flags) +{ + add_to_ctx_grabbed(ctx, count); + + spin_lock_reiser4_super(sbinfo); + + assert("nikita-2682", reiser4_check_block_counters(ctx->super)); + + sbinfo->blocks_grabbed += count; + sub_from_sb_fake_allocated(sbinfo, count, flags & BA_FORMATTED); + + assert("nikita-2683", reiser4_check_block_counters(ctx->super)); + + spin_unlock_reiser4_super(sbinfo); +} + +void fake_allocated2free(__u64 count, reiser4_ba_flags_t flags) +{ + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + + ctx = get_current_context(); + sbinfo = get_super_private(ctx->super); + + fake_allocated2grabbed(ctx, sbinfo, count, flags); + grabbed2free(ctx, sbinfo, count); +} + +void grabbed2free_mark(__u64 mark) +{ + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + + ctx = get_current_context(); + sbinfo = get_super_private(ctx->super); + + assert("nikita-3007", (__s64) mark >= 0); + assert("nikita-3006", ctx->grabbed_blocks >= mark); + grabbed2free(ctx, sbinfo, ctx->grabbed_blocks - mark); +} + +/** + * grabbed2free - adjust grabbed and free block counters + * @ctx: context to update grabbed block counter of + * @sbinfo: super block to update grabbed and free block counters of + * @count: number of blocks to adjust counters by + * + * Decreases context's and per filesystem's counters of grabbed + * blocks. Increases per filesystem's counter of free blocks. + */ +void grabbed2free(reiser4_context *ctx, reiser4_super_info_data *sbinfo, + __u64 count) +{ + sub_from_ctx_grabbed(ctx, count); + + spin_lock_reiser4_super(sbinfo); + + sub_from_sb_grabbed(sbinfo, count); + sbinfo->blocks_free += count; + assert("nikita-2684", reiser4_check_block_counters(ctx->super)); + + spin_unlock_reiser4_super(sbinfo); +} + +void grabbed2flush_reserved_nolock(txn_atom * atom, __u64 count) +{ + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + + assert("vs-1095", atom); + + ctx = get_current_context(); + sbinfo = get_super_private(ctx->super); + + sub_from_ctx_grabbed(ctx, count); + + add_to_atom_flush_reserved_nolock(atom, count); + + spin_lock_reiser4_super(sbinfo); + + sbinfo->blocks_flush_reserved += count; + sub_from_sb_grabbed(sbinfo, count); + + assert("vpf-292", reiser4_check_block_counters(ctx->super)); + + spin_unlock_reiser4_super(sbinfo); +} + +void grabbed2flush_reserved(__u64 count) +{ + txn_atom *atom = get_current_atom_locked(); + + grabbed2flush_reserved_nolock(atom, count); + + spin_unlock_atom(atom); +} + +void flush_reserved2grabbed(txn_atom * atom, __u64 count) +{ + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + + assert("nikita-2788", atom != NULL); + assert_spin_locked(&(atom->alock)); + + ctx = get_current_context(); + sbinfo = get_super_private(ctx->super); + + add_to_ctx_grabbed(ctx, count); + + sub_from_atom_flush_reserved_nolock(atom, (__u32) count); + + spin_lock_reiser4_super(sbinfo); + + sbinfo->blocks_grabbed += count; + sub_from_sb_flush_reserved(sbinfo, count); + + assert("vpf-292", reiser4_check_block_counters(ctx->super)); + + spin_unlock_reiser4_super(sbinfo); +} + +/** + * all_grabbed2free - releases all blocks grabbed in context + * + * Decreases context's and super block's grabbed block counters by number of + * blocks grabbed by current context and increases super block's free block + * counter correspondingly. + */ +void all_grabbed2free(void) +{ + reiser4_context *ctx = get_current_context(); + + grabbed2free(ctx, get_super_private(ctx->super), ctx->grabbed_blocks); +} + +/* adjust sb block counters if real (on-disk) blocks do not become unallocated + after freeing, @count blocks become "grabbed". */ +static void +used2grabbed(reiser4_context * ctx, reiser4_super_info_data * sbinfo, + __u64 count) +{ + add_to_ctx_grabbed(ctx, count); + + spin_lock_reiser4_super(sbinfo); + + sbinfo->blocks_grabbed += count; + sub_from_sb_used(sbinfo, count); + + assert("nikita-2685", reiser4_check_block_counters(ctx->super)); + + spin_unlock_reiser4_super(sbinfo); +} + +/* this used to be done through used2grabbed and grabbed2free*/ +static void used2free(reiser4_super_info_data * sbinfo, __u64 count) +{ + spin_lock_reiser4_super(sbinfo); + + sbinfo->blocks_free += count; + sub_from_sb_used(sbinfo, count); + + assert("nikita-2685", + reiser4_check_block_counters(reiser4_get_current_sb())); + + spin_unlock_reiser4_super(sbinfo); +} + +/* check "allocated" state of given block range */ +int +reiser4_check_blocks(const reiser4_block_nr * start, + const reiser4_block_nr * len, int desired) +{ + return sa_check_blocks(start, len, desired); +} + +/* Blocks deallocation function may do an actual deallocation through space + plugin allocation or store deleted block numbers in atom's delete_set data + structure depend on @defer parameter. */ + +/* if BA_DEFER bit is not turned on, @target_stage means the stage of blocks + which will be deleted from WORKING bitmap. They might be just unmapped from + disk, or freed but disk space is still grabbed by current thread, or these + blocks must not be counted in any reiser4 sb block counters, + see block_stage_t comment */ + +/* BA_FORMATTED bit is only used when BA_DEFER in not present: it is used to + distinguish blocks allocated for unformatted and formatted nodes */ + +int +reiser4_dealloc_blocks(const reiser4_block_nr * start, + const reiser4_block_nr * len, + block_stage_t target_stage, reiser4_ba_flags_t flags) +{ + txn_atom *atom = NULL; + int ret; + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + void *new_entry = NULL; + + ctx = get_current_context(); + sbinfo = get_super_private(ctx->super); + + if (REISER4_DEBUG) { + assert("zam-431", *len != 0); + assert("zam-432", *start != 0); + assert("zam-558", !reiser4_blocknr_is_fake(start)); + + spin_lock_reiser4_super(sbinfo); + assert("zam-562", *start < sbinfo->block_count); + spin_unlock_reiser4_super(sbinfo); + } + + if (flags & BA_DEFER) { + /* + * These blocks will be later deallocated by apply_dset(). + * It is equivalent to a non-deferred deallocation with target + * stage BLOCK_NOT_COUNTED. + */ + + /* store deleted block numbers in the atom's deferred delete set + for further actual deletion */ + do { + atom = get_current_atom_locked(); + assert("zam-430", atom != NULL); + + ret = atom_dset_deferred_add_extent(atom, &new_entry, start, len); + + if (ret == -ENOMEM) + return ret; + + /* This loop might spin at most two times */ + } while (ret == -E_REPEAT); + + assert("zam-477", ret == 0); + assert("zam-433", atom != NULL); + + spin_unlock_atom(atom); + + } else { + assert("zam-425", get_current_super_private() != NULL); + sa_dealloc_blocks(reiser4_get_space_allocator(ctx->super), + *start, *len); + + if (flags & BA_PERMANENT) { + /* These blocks were counted as allocated, we have to + * revert it back if allocation is discarded. */ + txn_atom *atom = get_current_atom_locked(); + atom->nr_blocks_allocated -= *len; + spin_unlock_atom(atom); + } + + switch (target_stage) { + case BLOCK_NOT_COUNTED: + assert("vs-960", flags & BA_FORMATTED); + /* VITALY: This is what was grabbed for + internal/tx-lists/similar only */ + used2free(sbinfo, *len); + break; + + case BLOCK_GRABBED: + used2grabbed(ctx, sbinfo, *len); + break; + + case BLOCK_UNALLOCATED: + used2fake_allocated(sbinfo, *len, flags & BA_FORMATTED); + break; + + case BLOCK_FLUSH_RESERVED:{ + txn_atom *atom; + + atom = get_current_atom_locked(); + used2flush_reserved(sbinfo, atom, *len, + flags & BA_FORMATTED); + spin_unlock_atom(atom); + break; + } + default: + impossible("zam-532", "wrong block stage"); + } + } + + return 0; +} + +/* wrappers for block allocator plugin methods */ +int reiser4_pre_commit_hook(void) +{ + assert("zam-502", get_current_super_private() != NULL); + sa_pre_commit_hook(); + return 0; +} + +/* an actor which applies delete set to block allocator data */ +static int +apply_dset(txn_atom * atom UNUSED_ARG, const reiser4_block_nr * a, + const reiser4_block_nr * b, void *data UNUSED_ARG) +{ + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + + __u64 len = 1; + + ctx = get_current_context(); + sbinfo = get_super_private(ctx->super); + + assert("zam-877", atom->stage >= ASTAGE_PRE_COMMIT); + assert("zam-552", sbinfo != NULL); + + if (b != NULL) + len = *b; + + if (REISER4_DEBUG) { + spin_lock_reiser4_super(sbinfo); + + assert("zam-554", *a < reiser4_block_count(ctx->super)); + assert("zam-555", *a + len <= reiser4_block_count(ctx->super)); + + spin_unlock_reiser4_super(sbinfo); + } + + sa_dealloc_blocks(&sbinfo->space_allocator, *a, len); + /* adjust sb block counters */ + used2free(sbinfo, len); + return 0; +} + +void reiser4_post_commit_hook(void) +{ +#ifdef REISER4_DEBUG + txn_atom *atom; + + atom = get_current_atom_locked(); + assert("zam-452", atom->stage == ASTAGE_POST_COMMIT); + spin_unlock_atom(atom); +#endif + + assert("zam-504", get_current_super_private() != NULL); + sa_post_commit_hook(); +} + +void reiser4_post_write_back_hook(void) +{ + struct list_head discarded_set; + txn_atom *atom; + int ret; + + /* process and issue discard requests */ + blocknr_list_init (&discarded_set); + do { + atom = get_current_atom_locked(); + ret = discard_atom(atom, &discarded_set); + } while (ret == -E_REPEAT); + + if (ret) { + warning("intelfx-8", "discard atom failed (%d)", ret); + } + + atom = get_current_atom_locked(); + discard_atom_post(atom, &discarded_set); + + /* do the block deallocation which was deferred + until commit is done */ + atom_dset_deferred_apply(atom, apply_dset, NULL, 1); + + assert("zam-504", get_current_super_private() != NULL); + sa_post_write_back_hook(); +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/block_alloc.h b/fs/reiser4/block_alloc.h new file mode 100644 index 000000000000..a4e98af51903 --- /dev/null +++ b/fs/reiser4/block_alloc.h @@ -0,0 +1,177 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#if !defined(__FS_REISER4_BLOCK_ALLOC_H__) +#define __FS_REISER4_BLOCK_ALLOC_H__ + +#include "dformat.h" +#include "forward.h" + +#include <linux/types.h> /* for __u?? */ +#include <linux/fs.h> + +/* Mask when is applied to given block number shows is that block number is a + fake one */ +#define REISER4_FAKE_BLOCKNR_BIT_MASK 0x8000000000000000ULL +/* Mask which isolates a type of object this fake block number was assigned + to */ +#define REISER4_BLOCKNR_STATUS_BIT_MASK 0xC000000000000000ULL + +/*result after applying the REISER4_BLOCKNR_STATUS_BIT_MASK should be compared + against these two values to understand is the object unallocated or bitmap + shadow object (WORKING BITMAP block, look at the plugin/space/bitmap.c) */ +#define REISER4_UNALLOCATED_STATUS_VALUE 0xC000000000000000ULL +#define REISER4_BITMAP_BLOCKS_STATUS_VALUE 0x8000000000000000ULL + +/* specification how block allocation was counted in sb block counters */ +typedef enum { + BLOCK_NOT_COUNTED = 0, /* reiser4 has no info about this block yet */ + BLOCK_GRABBED = 1, /* free space grabbed for further allocation + of this block */ + BLOCK_FLUSH_RESERVED = 2, /* block is reserved for flush needs. */ + BLOCK_UNALLOCATED = 3, /* block is used for existing in-memory object + ( unallocated formatted or unformatted + node) */ + BLOCK_ALLOCATED = 4 /* block is mapped to disk, real on-disk block + number assigned */ +} block_stage_t; + +/* a hint for block allocator */ +struct reiser4_blocknr_hint { + /* FIXME: I think we want to add a longterm lock on the bitmap block + here. This is to prevent jnode_flush() calls from interleaving + allocations on the same bitmap, once a hint is established. */ + + /* search start hint */ + reiser4_block_nr blk; + /* if not zero, it is a region size we search for free blocks in */ + reiser4_block_nr max_dist; + /* level for allocation, may be useful have branch-level and higher + write-optimized. */ + tree_level level; + /* block allocator assumes that blocks, which will be mapped to disk, + are in this specified block_stage */ + block_stage_t block_stage; + /* If direction = 1 allocate blocks in backward direction from the end + * of disk to the beginning of disk. */ + unsigned int backward:1; + +}; + +/* These flags control block allocation/deallocation behavior */ +enum reiser4_ba_flags { + /* do allocatations from reserved (5%) area */ + BA_RESERVED = (1 << 0), + + /* block allocator can do commit trying to recover free space */ + BA_CAN_COMMIT = (1 << 1), + + /* if operation will be applied to formatted block */ + BA_FORMATTED = (1 << 2), + + /* defer actual block freeing until transaction commit */ + BA_DEFER = (1 << 3), + + /* allocate blocks for permanent fs objects (formatted or unformatted), + not wandered of log blocks */ + BA_PERMANENT = (1 << 4), + + /* grab space even it was disabled */ + BA_FORCE = (1 << 5), + + /* use default start value for free blocks search. */ + BA_USE_DEFAULT_SEARCH_START = (1 << 6) +}; + +typedef enum reiser4_ba_flags reiser4_ba_flags_t; + +extern void reiser4_blocknr_hint_init(reiser4_blocknr_hint * hint); +extern void reiser4_blocknr_hint_done(reiser4_blocknr_hint * hint); +extern void update_blocknr_hint_default(const struct super_block *, + const reiser4_block_nr *); +extern void get_blocknr_hint_default(reiser4_block_nr *); + +extern reiser4_block_nr reiser4_fs_reserved_space(struct super_block *super); + +int assign_fake_blocknr_formatted(reiser4_block_nr *); +reiser4_block_nr fake_blocknr_unformatted(int); + +/* free -> grabbed -> fake_allocated -> used */ + +int reiser4_grab_space(__u64 count, reiser4_ba_flags_t flags); +void all_grabbed2free(void); +void grabbed2free(reiser4_context * , reiser4_super_info_data * , __u64 count); +void fake_allocated2free(__u64 count, reiser4_ba_flags_t flags); +void grabbed2flush_reserved_nolock(txn_atom * atom, __u64 count); +void grabbed2flush_reserved(__u64 count); +int reiser4_alloc_blocks(reiser4_blocknr_hint * hint, + reiser4_block_nr * start, + reiser4_block_nr * len, reiser4_ba_flags_t flags); +int reiser4_dealloc_blocks(const reiser4_block_nr *, + const reiser4_block_nr *, + block_stage_t, reiser4_ba_flags_t flags); + +static inline int reiser4_alloc_block(reiser4_blocknr_hint * hint, + reiser4_block_nr * start, + reiser4_ba_flags_t flags) +{ + reiser4_block_nr one = 1; + return reiser4_alloc_blocks(hint, start, &one, flags); +} + +static inline int reiser4_dealloc_block(const reiser4_block_nr * block, + block_stage_t stage, + reiser4_ba_flags_t flags) +{ + const reiser4_block_nr one = 1; + return reiser4_dealloc_blocks(block, &one, stage, flags); +} + +#define reiser4_grab_space_force(count, flags) \ + reiser4_grab_space(count, flags | BA_FORCE) + +extern void grabbed2free_mark(__u64 mark); +extern int reiser4_grab_reserved(struct super_block *, + __u64, reiser4_ba_flags_t); +extern void reiser4_release_reserved(struct super_block *super); + +/* grabbed -> fake_allocated */ + +/* fake_allocated -> used */ + +/* used -> fake_allocated -> grabbed -> free */ + +extern void flush_reserved2grabbed(txn_atom * atom, __u64 count); + +extern int reiser4_blocknr_is_fake(const reiser4_block_nr * da); + +extern void grabbed2cluster_reserved(int count); +extern void cluster_reserved2grabbed(int count); +extern void cluster_reserved2free(int count); + +extern int reiser4_check_block_counters(const struct super_block *); + + +extern int reiser4_check_blocks(const reiser4_block_nr *start, + const reiser4_block_nr *len, int desired); + +static inline int reiser4_check_block(const reiser4_block_nr *start, + int desired) +{ + return reiser4_check_blocks(start, NULL, desired); +} + +extern int reiser4_pre_commit_hook(void); +extern void reiser4_post_commit_hook(void); +extern void reiser4_post_write_back_hook(void); + +#endif /* __FS_REISER4_BLOCK_ALLOC_H__ */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/blocknrlist.c b/fs/reiser4/blocknrlist.c new file mode 100644 index 000000000000..39a4a9bd78ec --- /dev/null +++ b/fs/reiser4/blocknrlist.c @@ -0,0 +1,336 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* This is a block list implementation, used to create ordered block sets + (at the cost of being less memory efficient than blocknr_set). + It is used by discard code. */ + +#include "debug.h" +#include "dformat.h" +#include "txnmgr.h" +#include "context.h" +#include "super.h" + +#include <linux/slab.h> +#include <linux/list_sort.h> + +static struct kmem_cache *blocknr_list_slab = NULL; + +/** + * Represents an extent range [@start; @end). + */ +struct blocknr_list_entry { + reiser4_block_nr start, len; + struct list_head link; +}; + +#define blocknr_list_entry(ptr) list_entry(ptr, blocknr_list_entry, link) + +static void blocknr_list_entry_init(blocknr_list_entry *entry) +{ + assert("intelfx-11", entry != NULL); + + entry->start = 0; + entry->len = 0; + INIT_LIST_HEAD(&entry->link); +} + +static blocknr_list_entry *blocknr_list_entry_alloc(void) +{ + blocknr_list_entry *entry; + + entry = (blocknr_list_entry *)kmem_cache_alloc(blocknr_list_slab, + reiser4_ctx_gfp_mask_get()); + if (entry == NULL) { + return NULL; + } + + blocknr_list_entry_init(entry); + + return entry; +} + +static void blocknr_list_entry_free(blocknr_list_entry *entry) +{ + assert("intelfx-12", entry != NULL); + + kmem_cache_free(blocknr_list_slab, entry); +} + +/** + * Given ranges @to and [@start; @end), if they overlap, their union + * is calculated and saved in @to. + */ +static int blocknr_list_entry_merge(blocknr_list_entry *to, + reiser4_block_nr start, + reiser4_block_nr len) +{ + reiser4_block_nr end, to_end; + + assert("intelfx-13", to != NULL); + + assert("intelfx-16", to->len > 0); + assert("intelfx-17", len > 0); + + end = start + len; + to_end = to->start + to->len; + + if ((to->start <= end) && (start <= to_end)) { + if (start < to->start) { + to->start = start; + } + + if (end > to_end) { + to_end = end; + } + + to->len = to_end - to->start; + + return 0; + } + + return -1; +} + +static int blocknr_list_entry_merge_entry(blocknr_list_entry *to, + blocknr_list_entry *from) +{ + assert("intelfx-18", from != NULL); + + return blocknr_list_entry_merge(to, from->start, from->len); +} + +/** + * A comparison function for list_sort(). + * + * "The comparison function @cmp must return a negative value if @a + * should sort before @b, and a positive value if @a should sort after + * @b. If @a and @b are equivalent, and their original relative + * ordering is to be preserved, @cmp must return 0." + */ +static int blocknr_list_entry_compare(void* priv UNUSED_ARG, + struct list_head *a, struct list_head *b) +{ + blocknr_list_entry *entry_a, *entry_b; + reiser4_block_nr entry_a_end, entry_b_end; + + assert("intelfx-19", a != NULL); + assert("intelfx-20", b != NULL); + + entry_a = blocknr_list_entry(a); + entry_b = blocknr_list_entry(b); + + entry_a_end = entry_a->start + entry_a->len; + entry_b_end = entry_b->start + entry_b->len; + + /* First sort by starting block numbers... */ + if (entry_a->start < entry_b->start) { + return -1; + } + + if (entry_a->start > entry_b->start) { + return 1; + } + + /** Then by ending block numbers. + * If @a contains @b, it will be sorted before. */ + if (entry_a_end > entry_b_end) { + return -1; + } + + if (entry_a_end < entry_b_end) { + return 1; + } + + return 0; +} + +int blocknr_list_init_static(void) +{ + assert("intelfx-54", blocknr_list_slab == NULL); + + blocknr_list_slab = kmem_cache_create("blocknr_list_entry", + sizeof(blocknr_list_entry), + 0, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT, + NULL); + if (blocknr_list_slab == NULL) { + return RETERR(-ENOMEM); + } + + return 0; +} + +void blocknr_list_done_static(void) +{ + destroy_reiser4_cache(&blocknr_list_slab); +} + +void blocknr_list_init(struct list_head* blist) +{ + assert("intelfx-24", blist != NULL); + + INIT_LIST_HEAD(blist); +} + +void blocknr_list_destroy(struct list_head* blist) +{ + struct list_head *pos, *tmp; + blocknr_list_entry *entry; + + assert("intelfx-25", blist != NULL); + + list_for_each_safe(pos, tmp, blist) { + entry = blocknr_list_entry(pos); + list_del_init(pos); + blocknr_list_entry_free(entry); + } + + assert("intelfx-48", list_empty(blist)); +} + +void blocknr_list_merge(struct list_head *from, struct list_head *to) +{ + assert("intelfx-26", from != NULL); + assert("intelfx-27", to != NULL); + + list_splice_tail_init(from, to); + + assert("intelfx-49", list_empty(from)); +} + +void blocknr_list_sort_and_join(struct list_head *blist) +{ + struct list_head *pos, *next; + struct blocknr_list_entry *entry, *next_entry; + + assert("intelfx-50", blist != NULL); + + /* Step 1. Sort the extent list. */ + list_sort(NULL, blist, blocknr_list_entry_compare); + + /* Step 2. Join adjacent extents in the list. */ + pos = blist->next; + next = pos->next; + entry = blocknr_list_entry(pos); + + for (; next != blist; next = pos->next) { + /** @next is a valid node at this point */ + next_entry = blocknr_list_entry(next); + + /** try to merge @next into @pos */ + if (!blocknr_list_entry_merge_entry(entry, next_entry)) { + /** successful; delete the @next node. + * next merge will be attempted into the same node. */ + list_del_init(next); + blocknr_list_entry_free(next_entry); + } else { + /** otherwise advance @pos. */ + pos = next; + entry = next_entry; + } + } +} + +int blocknr_list_add_extent(txn_atom *atom, + struct list_head *blist, + blocknr_list_entry **new_entry, + const reiser4_block_nr *start, + const reiser4_block_nr *len) +{ + assert("intelfx-29", atom != NULL); + assert("intelfx-42", atom_is_protected(atom)); + assert("intelfx-43", blist != NULL); + assert("intelfx-30", new_entry != NULL); + assert("intelfx-31", start != NULL); + assert("intelfx-32", len != NULL && *len > 0); + + if (*new_entry == NULL) { + /* + * Optimization: try to merge new extent into the last one. + */ + if (!list_empty(blist)) { + blocknr_list_entry *last_entry; + last_entry = blocknr_list_entry(blist->prev); + if (!blocknr_list_entry_merge(last_entry, *start, *len)) { + return 0; + } + } + + /* + * Otherwise, allocate a new entry and tell -E_REPEAT. + * Next time we'll take the branch below. + */ + spin_unlock_atom(atom); + *new_entry = blocknr_list_entry_alloc(); + return (*new_entry != NULL) ? -E_REPEAT : RETERR(-ENOMEM); + } + + /* + * The entry has been allocated beforehand, fill it and link to the list. + */ + (*new_entry)->start = *start; + (*new_entry)->len = *len; + list_add_tail(&(*new_entry)->link, blist); + + return 0; +} + +int blocknr_list_iterator(txn_atom *atom, + struct list_head *blist, + blocknr_set_actor_f actor, + void *data, + int delete) +{ + struct list_head *pos; + blocknr_list_entry *entry; + int ret = 0; + + assert("intelfx-46", blist != NULL); + assert("intelfx-47", actor != NULL); + + if (delete) { + struct list_head *tmp; + + list_for_each_safe(pos, tmp, blist) { + entry = blocknr_list_entry(pos); + + /* + * Do not exit, delete flag is set. Instead, on the first error we + * downgrade from iterating to just deleting. + */ + if (ret == 0) { + ret = actor(atom, &entry->start, &entry->len, data); + } + + list_del_init(pos); + blocknr_list_entry_free(entry); + } + + assert("intelfx-44", list_empty(blist)); + } else { + list_for_each(pos, blist) { + entry = blocknr_list_entry(pos); + + ret = actor(atom, &entry->start, &entry->len, data); + + if (ret != 0) { + return ret; + } + } + } + + return ret; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/blocknrset.c b/fs/reiser4/blocknrset.c new file mode 100644 index 000000000000..2f18cbc10da3 --- /dev/null +++ b/fs/reiser4/blocknrset.c @@ -0,0 +1,399 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by +reiser4/README */ + +/* This file contains code for various block number sets used by the atom to + track the deleted set and wandered block mappings. */ + +#include "debug.h" +#include "dformat.h" +#include "txnmgr.h" +#include "context.h" +#include "super.h" + +#include <linux/slab.h> + +/* The proposed data structure for storing unordered block number sets is a + list of elements, each of which contains an array of block number or/and + array of block number pairs. That element called blocknr_set_entry is used + to store block numbers from the beginning and for extents from the end of + the data field (char data[...]). The ->nr_blocks and ->nr_pairs fields + count numbers of blocks and extents. + + +------------------- blocknr_set_entry->data ------------------+ + |block1|block2| ... <free space> ... |pair3|pair2|pair1| + +------------------------------------------------------------+ + + When current blocknr_set_entry is full, allocate a new one. */ + +/* Usage examples: blocknr sets are used in reiser4 for storing atom's delete + * set (single blocks and block extents), in that case blocknr pair represent an + * extent; atom's wandered map is also stored as a blocknr set, blocknr pairs + * there represent a (real block) -> (wandered block) mapping. */ + +/* Protection: blocknr sets belong to reiser4 atom, and + * their modifications are performed with the atom lock held */ + +/* The total size of a blocknr_set_entry. */ +#define BLOCKNR_SET_ENTRY_SIZE 128 + +/* The number of blocks that can fit the blocknr data area. */ +#define BLOCKNR_SET_ENTRIES_NUMBER \ + ((BLOCKNR_SET_ENTRY_SIZE - \ + 2 * sizeof(unsigned) - \ + sizeof(struct list_head)) / \ + sizeof(reiser4_block_nr)) + +static struct kmem_cache *blocknr_set_slab = NULL; + +/* An entry of the blocknr_set */ +struct blocknr_set_entry { + unsigned nr_singles; + unsigned nr_pairs; + struct list_head link; + reiser4_block_nr entries[BLOCKNR_SET_ENTRIES_NUMBER]; +}; + +/* A pair of blocks as recorded in the blocknr_set_entry data. */ +struct blocknr_pair { + reiser4_block_nr a; + reiser4_block_nr b; +}; + +/* Return the number of blocknr slots available in a blocknr_set_entry. */ +/* Audited by: green(2002.06.11) */ +static unsigned bse_avail(blocknr_set_entry * bse) +{ + unsigned used = bse->nr_singles + 2 * bse->nr_pairs; + + assert("jmacd-5088", BLOCKNR_SET_ENTRIES_NUMBER >= used); + cassert(sizeof(blocknr_set_entry) == BLOCKNR_SET_ENTRY_SIZE); + + return BLOCKNR_SET_ENTRIES_NUMBER - used; +} + +/* Initialize a blocknr_set_entry. */ +static void bse_init(blocknr_set_entry *bse) +{ + bse->nr_singles = 0; + bse->nr_pairs = 0; + INIT_LIST_HEAD(&bse->link); +} + +/* Allocate and initialize a blocknr_set_entry. */ +/* Audited by: green(2002.06.11) */ +static blocknr_set_entry *bse_alloc(void) +{ + blocknr_set_entry *e; + + if ((e = (blocknr_set_entry *) kmem_cache_alloc(blocknr_set_slab, + reiser4_ctx_gfp_mask_get())) == NULL) + return NULL; + + bse_init(e); + + return e; +} + +/* Free a blocknr_set_entry. */ +/* Audited by: green(2002.06.11) */ +static void bse_free(blocknr_set_entry * bse) +{ + kmem_cache_free(blocknr_set_slab, bse); +} + +/* Add a block number to a blocknr_set_entry */ +/* Audited by: green(2002.06.11) */ +static void +bse_put_single(blocknr_set_entry * bse, const reiser4_block_nr * block) +{ + assert("jmacd-5099", bse_avail(bse) >= 1); + + bse->entries[bse->nr_singles++] = *block; +} + +/* Get a pair of block numbers */ +/* Audited by: green(2002.06.11) */ +static inline struct blocknr_pair *bse_get_pair(blocknr_set_entry * bse, + unsigned pno) +{ + assert("green-1", BLOCKNR_SET_ENTRIES_NUMBER >= 2 * (pno + 1)); + + return (struct blocknr_pair *) (bse->entries + + BLOCKNR_SET_ENTRIES_NUMBER - + 2 * (pno + 1)); +} + +/* Add a pair of block numbers to a blocknr_set_entry */ +/* Audited by: green(2002.06.11) */ +static void +bse_put_pair(blocknr_set_entry * bse, const reiser4_block_nr * a, + const reiser4_block_nr * b) +{ + struct blocknr_pair *pair; + + assert("jmacd-5100", bse_avail(bse) >= 2 && a != NULL && b != NULL); + + pair = bse_get_pair(bse, bse->nr_pairs++); + + pair->a = *a; + pair->b = *b; +} + +/* Add either a block or pair of blocks to the block number set. The first + blocknr (@a) must be non-NULL. If @b is NULL a single blocknr is added, if + @b is non-NULL a pair is added. The block number set belongs to atom, and + the call is made with the atom lock held. There may not be enough space in + the current blocknr_set_entry. If new_bsep points to a non-NULL + blocknr_set_entry then it will be added to the blocknr_set and new_bsep + will be set to NULL. If new_bsep contains NULL then the atom lock will be + released and a new bse will be allocated in new_bsep. E_REPEAT will be + returned with the atom unlocked for the operation to be tried again. If + the operation succeeds, 0 is returned. If new_bsep is non-NULL and not + used during the call, it will be freed automatically. */ +static int blocknr_set_add(txn_atom *atom, struct list_head *bset, + blocknr_set_entry **new_bsep, const reiser4_block_nr *a, + const reiser4_block_nr *b) +{ + blocknr_set_entry *bse; + unsigned entries_needed; + + assert("jmacd-5101", a != NULL); + + entries_needed = (b == NULL) ? 1 : 2; + if (list_empty(bset) || + bse_avail(list_entry(bset->next, blocknr_set_entry, link)) < entries_needed) { + /* See if a bse was previously allocated. */ + if (*new_bsep == NULL) { + spin_unlock_atom(atom); + *new_bsep = bse_alloc(); + return (*new_bsep != NULL) ? -E_REPEAT : + RETERR(-ENOMEM); + } + + /* Put it on the head of the list. */ + list_add(&((*new_bsep)->link), bset); + + *new_bsep = NULL; + } + + /* Add the single or pair. */ + bse = list_entry(bset->next, blocknr_set_entry, link); + if (b == NULL) { + bse_put_single(bse, a); + } else { + bse_put_pair(bse, a, b); + } + + /* If new_bsep is non-NULL then there was an allocation race, free this + copy. */ + if (*new_bsep != NULL) { + bse_free(*new_bsep); + *new_bsep = NULL; + } + + return 0; +} + +/* Add an extent to the block set. If the length is 1, it is treated as a + single block (e.g., reiser4_set_add_block). */ +/* Audited by: green(2002.06.11) */ +/* Auditor note: Entire call chain cannot hold any spinlocks, because + kmalloc might schedule. The only exception is atom spinlock, which is + properly freed. */ +int +blocknr_set_add_extent(txn_atom * atom, + struct list_head *bset, + blocknr_set_entry ** new_bsep, + const reiser4_block_nr * start, + const reiser4_block_nr * len) +{ + assert("jmacd-5102", start != NULL && len != NULL && *len > 0); + return blocknr_set_add(atom, bset, new_bsep, start, + *len == 1 ? NULL : len); +} + +/* Add a block pair to the block set. It adds exactly a pair, which is checked + * by an assertion that both arguments are not null.*/ +/* Audited by: green(2002.06.11) */ +/* Auditor note: Entire call chain cannot hold any spinlocks, because + kmalloc might schedule. The only exception is atom spinlock, which is + properly freed. */ +int +blocknr_set_add_pair(txn_atom * atom, + struct list_head *bset, + blocknr_set_entry ** new_bsep, const reiser4_block_nr * a, + const reiser4_block_nr * b) +{ + assert("jmacd-5103", a != NULL && b != NULL); + return blocknr_set_add(atom, bset, new_bsep, a, b); +} + +/* Initialize slab cache of blocknr_set_entry objects. */ +int blocknr_set_init_static(void) +{ + assert("intelfx-55", blocknr_set_slab == NULL); + + blocknr_set_slab = kmem_cache_create("blocknr_set_entry", + sizeof(blocknr_set_entry), + 0, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT, + NULL); + + if (blocknr_set_slab == NULL) { + return RETERR(-ENOMEM); + } + + return 0; +} + +/* Destroy slab cache of blocknr_set_entry objects. */ +void blocknr_set_done_static(void) +{ + destroy_reiser4_cache(&blocknr_set_slab); +} + +/* Initialize a blocknr_set. */ +void blocknr_set_init(struct list_head *bset) +{ + INIT_LIST_HEAD(bset); +} + +/* Release the entries of a blocknr_set. */ +void blocknr_set_destroy(struct list_head *bset) +{ + blocknr_set_entry *bse; + + while (!list_empty(bset)) { + bse = list_entry(bset->next, blocknr_set_entry, link); + list_del_init(&bse->link); + bse_free(bse); + } +} + +/* Merge blocknr_set entries out of @from into @into. */ +/* Audited by: green(2002.06.11) */ +/* Auditor comments: This merge does not know if merged sets contain + blocks pairs (As for wandered sets) or extents, so it cannot really merge + overlapping ranges if there is some. So I believe it may lead to + some blocks being presented several times in one blocknr_set. To help + debugging such problems it might help to check for duplicate entries on + actual processing of this set. Testing this kind of stuff right here is + also complicated by the fact that these sets are not sorted and going + through whole set on each element addition is going to be CPU-heavy task */ +void blocknr_set_merge(struct list_head *from, struct list_head *into) +{ + blocknr_set_entry *bse_into = NULL; + + /* If @from is empty, no work to perform. */ + if (list_empty(from)) + return; + /* If @into is not empty, try merging partial-entries. */ + if (!list_empty(into)) { + + /* Neither set is empty, pop the front to members and try to + combine them. */ + blocknr_set_entry *bse_from; + unsigned into_avail; + + bse_into = list_entry(into->next, blocknr_set_entry, link); + list_del_init(&bse_into->link); + bse_from = list_entry(from->next, blocknr_set_entry, link); + list_del_init(&bse_from->link); + + /* Combine singles. */ + for (into_avail = bse_avail(bse_into); + into_avail != 0 && bse_from->nr_singles != 0; + into_avail -= 1) { + bse_put_single(bse_into, + &bse_from->entries[--bse_from-> + nr_singles]); + } + + /* Combine pairs. */ + for (; into_avail > 1 && bse_from->nr_pairs != 0; + into_avail -= 2) { + struct blocknr_pair *pair = + bse_get_pair(bse_from, --bse_from->nr_pairs); + bse_put_pair(bse_into, &pair->a, &pair->b); + } + + /* If bse_from is empty, delete it now. */ + if (bse_avail(bse_from) == BLOCKNR_SET_ENTRIES_NUMBER) { + bse_free(bse_from); + } else { + /* Otherwise, bse_into is full or nearly full (e.g., + it could have one slot avail and bse_from has one + pair left). Push it back onto the list. bse_from + becomes bse_into, which will be the new partial. */ + list_add(&bse_into->link, into); + bse_into = bse_from; + } + } + + /* Splice lists together. */ + list_splice_init(from, into->prev); + + /* Add the partial entry back to the head of the list. */ + if (bse_into != NULL) + list_add(&bse_into->link, into); +} + +/* Iterate over all blocknr set elements. */ +int blocknr_set_iterator(txn_atom *atom, struct list_head *bset, + blocknr_set_actor_f actor, void *data, int delete) +{ + + blocknr_set_entry *entry; + + assert("zam-429", atom != NULL); + assert("zam-430", atom_is_protected(atom)); + assert("zam-431", bset != 0); + assert("zam-432", actor != NULL); + + entry = list_entry(bset->next, blocknr_set_entry, link); + while (bset != &entry->link) { + blocknr_set_entry *tmp = list_entry(entry->link.next, blocknr_set_entry, link); + unsigned int i; + int ret; + + for (i = 0; i < entry->nr_singles; i++) { + ret = actor(atom, &entry->entries[i], NULL, data); + + /* We can't break a loop if delete flag is set. */ + if (ret != 0 && !delete) + return ret; + } + + for (i = 0; i < entry->nr_pairs; i++) { + struct blocknr_pair *ab; + + ab = bse_get_pair(entry, i); + + ret = actor(atom, &ab->a, &ab->b, data); + + if (ret != 0 && !delete) + return ret; + } + + if (delete) { + list_del(&entry->link); + bse_free(entry); + } + + entry = tmp; + } + + return 0; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/carry.c b/fs/reiser4/carry.c new file mode 100644 index 000000000000..536ab6213642 --- /dev/null +++ b/fs/reiser4/carry.c @@ -0,0 +1,1408 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ +/* Functions to "carry" tree modification(s) upward. */ +/* Tree is modified one level at a time. As we modify a level we accumulate a + set of changes that need to be propagated to the next level. We manage + node locking such that any searches that collide with carrying are + restarted, from the root if necessary. + + Insertion of a new item may result in items being moved among nodes and + this requires the delimiting key to be updated at the least common parent + of the nodes modified to preserve search tree invariants. Also, insertion + may require allocation of a new node. A pointer to the new node has to be + inserted into some node on the parent level, etc. + + Tree carrying is meant to be analogous to arithmetic carrying. + + A carry operation is always associated with some node (&carry_node). + + Carry process starts with some initial set of operations to be performed + and an initial set of already locked nodes. Operations are performed one + by one. Performing each single operation has following possible effects: + + - content of carry node associated with operation is modified + - new carry nodes are locked and involved into carry process on this level + - new carry operations are posted to the next level + + After all carry operations on this level are done, process is repeated for + the accumulated sequence on carry operations for the next level. This + starts by trying to lock (in left to right order) all carry nodes + associated with carry operations on the parent level. After this, we decide + whether more nodes are required on the left of already locked set. If so, + all locks taken on the parent level are released, new carry nodes are + added, and locking process repeats. + + It may happen that balancing process fails owing to unrecoverable error on + some of upper levels of a tree (possible causes are io error, failure to + allocate new node, etc.). In this case we should unmount the filesystem, + rebooting if it is the root, and possibly advise the use of fsck. + + USAGE: + + int some_tree_operation( znode *node, ... ) + { + // Allocate on a stack pool of carry objects: operations and nodes. + // Most carry processes will only take objects from here, without + // dynamic allocation. + +I feel uneasy about this pool. It adds to code complexity, I understand why it +exists, but.... -Hans + + carry_pool pool; + carry_level lowest_level; + carry_op *op; + + init_carry_pool( &pool ); + init_carry_level( &lowest_level, &pool ); + + // operation may be one of: + // COP_INSERT --- insert new item into node + // COP_CUT --- remove part of or whole node + // COP_PASTE --- increase size of item + // COP_DELETE --- delete pointer from parent node + // COP_UPDATE --- update delimiting key in least + // common ancestor of two + + op = reiser4_post_carry( &lowest_level, operation, node, 0 ); + if( IS_ERR( op ) || ( op == NULL ) ) { + handle error + } else { + // fill in remaining fields in @op, according to carry.h:carry_op + result = carry(&lowest_level, NULL); + } + done_carry_pool(&pool); + } + + When you are implementing node plugin method that participates in carry + (shifting, insertion, deletion, etc.), do the following: + + int foo_node_method(znode * node, ..., carry_level * todo) + { + carry_op *op; + + .... + + // note, that last argument to reiser4_post_carry() is non-null + // here, because @op is to be applied to the parent of @node, rather + // than to the @node itself as in the previous case. + + op = node_post_carry(todo, operation, node, 1); + // fill in remaining fields in @op, according to carry.h:carry_op + + .... + + } + + BATCHING: + + One of the main advantages of level-by-level balancing implemented here is + ability to batch updates on a parent level and to peform them more + efficiently as a result. + + Description To Be Done (TBD). + + DIFFICULTIES AND SUBTLE POINTS: + + 1. complex plumbing is required, because: + + a. effective allocation through pools is needed + + b. target of operation is not exactly known when operation is + posted. This is worked around through bitfields in &carry_node and + logic in lock_carry_node() + + c. of interaction with locking code: node should be added into sibling + list when pointer to it is inserted into its parent, which is some time + after node was created. Between these moments, node is somewhat in + suspended state and is only registered in the carry lists + + 2. whole balancing logic is implemented here, in particular, insertion + logic is coded in make_space(). + + 3. special cases like insertion (reiser4_add_tree_root()) or deletion + (reiser4_kill_tree_root()) of tree root and morphing of paste into insert + (insert_paste()) have to be handled. + + 4. there is non-trivial interdependency between allocation of new nodes + and almost everything else. This is mainly due to the (1.c) above. I shall + write about this later. + +*/ + +#include "forward.h" +#include "debug.h" +#include "key.h" +#include "coord.h" +#include "plugin/item/item.h" +#include "plugin/item/extent.h" +#include "plugin/node/node.h" +#include "jnode.h" +#include "znode.h" +#include "tree_mod.h" +#include "tree_walk.h" +#include "block_alloc.h" +#include "pool.h" +#include "tree.h" +#include "carry.h" +#include "carry_ops.h" +#include "super.h" +#include "reiser4.h" + +#include <linux/types.h> + +/* level locking/unlocking */ +static int lock_carry_level(carry_level * level); +static void unlock_carry_level(carry_level * level, int failure); +static void done_carry_level(carry_level * level); +static void unlock_carry_node(carry_level * level, carry_node * node, int fail); + +int lock_carry_node(carry_level * level, carry_node * node); +int lock_carry_node_tail(carry_node * node); + +/* carry processing proper */ +static int carry_on_level(carry_level * doing, carry_level * todo); + +static carry_op *add_op(carry_level * level, pool_ordering order, + carry_op * reference); + +/* handlers for carry operations. */ + +static void fatal_carry_error(carry_level * doing, int ecode); +static int add_new_root(carry_level * level, carry_node * node, znode * fake); + +static void print_level(const char *prefix, carry_level * level); + +#if REISER4_DEBUG +typedef enum { + CARRY_TODO, + CARRY_DOING +} carry_queue_state; +static int carry_level_invariant(carry_level * level, carry_queue_state state); +#endif + +/* main entry point for tree balancing. + + Tree carry performs operations from @doing and while doing so accumulates + information about operations to be performed on the next level ("carried" + to the parent level). Carried operations are performed, causing possibly + more operations to be carried upward etc. carry() takes care about + locking and pinning znodes while operating on them. + + For usage, see comment at the top of fs/reiser4/carry.c + +*/ +int reiser4_carry(carry_level * doing /* set of carry operations to be + * performed */ , + carry_level * done /* set of nodes, already performed + * at the previous level. + * NULL in most cases */) +{ + int result = 0; + gfp_t old_mask; + /* queue of new requests */ + carry_level *todo; + ON_DEBUG(STORE_COUNTERS); + + assert("nikita-888", doing != NULL); + BUG_ON(done != NULL); + + todo = doing + 1; + init_carry_level(todo, doing->pool); + + /* queue of requests preformed on the previous level */ + done = todo + 1; + init_carry_level(done, doing->pool); + /* + * NOTE: We are not allowed to fail in the loop below. + * Incomplete carry (even if carry_on_level is complete) + * can leave the tree in an inconsistent state (broken + * order of keys in a node, etc). + */ + old_mask = get_current_context()->gfp_mask; + get_current_context()->gfp_mask |= __GFP_NOFAIL; + + /* iterate until there is nothing more to do */ + while (result == 0 && doing->ops_num > 0) { + carry_level *tmp; + + /* at this point @done is locked. */ + /* repeat lock/do/unlock while + + (1) lock_carry_level() fails due to deadlock avoidance, or + + (2) carry_on_level() decides that more nodes have to + be involved. + + (3) some unexpected error occurred while balancing on the + upper levels. In this case all changes are rolled back. + + */ + while (1) { + result = lock_carry_level(doing); + if (result == 0) { + /* perform operations from @doing and + accumulate new requests in @todo */ + result = carry_on_level(doing, todo); + if (result == 0) + break; + else if (result != -E_REPEAT || + !doing->restartable) { + warning("nikita-1043", + "Fatal error during carry: %i", + result); + print_level("done", done); + print_level("doing", doing); + print_level("todo", todo); + /* do some rough stuff like aborting + all pending transcrashes and thus + pushing tree back to the consistent + state. Alternatvely, just panic. + */ + fatal_carry_error(doing, result); + return result; + } + } else if (result != -E_REPEAT) { + fatal_carry_error(doing, result); + return result; + } + unlock_carry_level(doing, 1); + } + /* at this point @done can be safely unlocked */ + done_carry_level(done); + + /* cyclically shift queues */ + tmp = done; + done = doing; + doing = todo; + todo = tmp; + init_carry_level(todo, doing->pool); + + /* give other threads chance to run */ + reiser4_preempt_point(); + } + get_current_context()->gfp_mask = old_mask; + done_carry_level(done); + + /* all counters, but x_refs should remain the same. x_refs can change + owing to transaction manager */ + ON_DEBUG(CHECK_COUNTERS); + return result; +} + +/* perform carry operations on given level. + + Optimizations proposed by pooh: + + (1) don't lock all nodes from queue at the same time. Lock nodes lazily as + required; + + (2) unlock node if there are no more operations to be performed upon it and + node didn't add any operation to @todo. This can be implemented by + attaching to each node two counters: counter of operaions working on this + node and counter and operations carried upward from this node. + +*/ +static int carry_on_level(carry_level * doing /* queue of carry operations to + * do on this level */ , + carry_level * todo /* queue where new carry + * operations to be performed on + * the * parent level are + * accumulated during @doing + * processing. */ ) +{ + int result; + int (*f) (carry_op *, carry_level *, carry_level *); + carry_op *op; + carry_op *tmp_op; + + assert("nikita-1034", doing != NULL); + assert("nikita-1035", todo != NULL); + + /* @doing->nodes are locked. */ + + /* This function can be split into two phases: analysis and modification + + Analysis calculates precisely what items should be moved between + nodes. This information is gathered in some structures attached to + each carry_node in a @doing queue. Analysis also determines whether + new nodes are to be allocated etc. + + After analysis is completed, actual modification is performed. Here + we can take advantage of "batch modification": if there are several + operations acting on the same node, modifications can be performed + more efficiently when batched together. + + Above is an optimization left for the future. + */ + /* Important, but delayed optimization: it's possible to batch + operations together and perform them more efficiently as a + result. For example, deletion of several neighboring items from a + node can be converted to a single ->cut() operation. + + Before processing queue, it should be scanned and "mergeable" + operations merged. + */ + result = 0; + for_all_ops(doing, op, tmp_op) { + carry_opcode opcode; + + assert("nikita-1041", op != NULL); + opcode = op->op; + assert("nikita-1042", op->op < COP_LAST_OP); + f = op_dispatch_table[op->op].handler; + result = f(op, doing, todo); + /* locking can fail with -E_REPEAT. Any different error is fatal + and will be handled by fatal_carry_error() sledgehammer. + */ + if (result != 0) + break; + } + if (result == 0) { + carry_plugin_info info; + carry_node *scan; + carry_node *tmp_scan; + + info.doing = doing; + info.todo = todo; + + assert("nikita-3002", + carry_level_invariant(doing, CARRY_DOING)); + for_all_nodes(doing, scan, tmp_scan) { + znode *node; + + node = reiser4_carry_real(scan); + assert("nikita-2547", node != NULL); + if (node_is_empty(node)) { + result = + node_plugin_by_node(node)-> + prepare_removal(node, &info); + if (result != 0) + break; + } + } + } + return result; +} + +/* post carry operation + + This is main function used by external carry clients: node layout plugins + and tree operations to create new carry operation to be performed on some + level. + + New operation will be included in the @level queue. To actually perform it, + call carry( level, ... ). This function takes write lock on @node. Carry + manages all its locks by itself, don't worry about this. + + This function adds operation and node at the end of the queue. It is up to + caller to guarantee proper ordering of node queue. + +*/ +carry_op * reiser4_post_carry(carry_level * level /* queue where new operation + * is to be posted at */ , + carry_opcode op /* opcode of operation */ , + znode * node /* node on which this operation + * will operate */ , + int apply_to_parent_p /* whether operation will + * operate directly on @node + * or on it parent. */) +{ + carry_op *result; + carry_node *child; + + assert("nikita-1046", level != NULL); + assert("nikita-1788", znode_is_write_locked(node)); + + result = add_op(level, POOLO_LAST, NULL); + if (IS_ERR(result)) + return result; + child = reiser4_add_carry(level, POOLO_LAST, NULL); + if (IS_ERR(child)) { + reiser4_pool_free(&level->pool->op_pool, &result->header); + return (carry_op *) child; + } + result->node = child; + result->op = op; + child->parent = apply_to_parent_p; + if (ZF_ISSET(node, JNODE_ORPHAN)) + child->left_before = 1; + child->node = node; + return result; +} + +/* initialize carry queue */ +void init_carry_level(carry_level * level /* level to initialize */ , + carry_pool * pool /* pool @level will allocate objects + * from */ ) +{ + assert("nikita-1045", level != NULL); + assert("nikita-967", pool != NULL); + + memset(level, 0, sizeof *level); + level->pool = pool; + + INIT_LIST_HEAD(&level->nodes); + INIT_LIST_HEAD(&level->ops); +} + +/* allocate carry pool and initialize pools within queue */ +carry_pool *init_carry_pool(int size) +{ + carry_pool *pool; + + assert("", size >= sizeof(carry_pool) + 3 * sizeof(carry_level)); + pool = kmalloc(size, reiser4_ctx_gfp_mask_get()); + if (pool == NULL) + return ERR_PTR(RETERR(-ENOMEM)); + + reiser4_init_pool(&pool->op_pool, sizeof(carry_op), CARRIES_POOL_SIZE, + (char *)pool->op); + reiser4_init_pool(&pool->node_pool, sizeof(carry_node), + NODES_LOCKED_POOL_SIZE, (char *)pool->node); + return pool; +} + +/* finish with queue pools */ +void done_carry_pool(carry_pool * pool/* pool to destroy */) +{ + reiser4_done_pool(&pool->op_pool); + reiser4_done_pool(&pool->node_pool); + kfree(pool); +} + +/* add new carry node to the @level. + + Returns pointer to the new carry node allocated from pool. It's up to + callers to maintain proper order in the @level. Assumption is that if carry + nodes on one level are already sorted and modifications are peroformed from + left to right, carry nodes added on the parent level will be ordered + automatically. To control ordering use @order and @reference parameters. + +*/ +carry_node *reiser4_add_carry_skip(carry_level * level /* &carry_level to add + * node to */ , + pool_ordering order /* where to insert: + * at the beginning of + * @level, + * before @reference, + * after @reference, + * at the end of @level + */ , + carry_node * reference/* reference node for + * insertion */) +{ + ON_DEBUG(carry_node * orig_ref = reference); + + if (order == POOLO_BEFORE) { + reference = find_left_carry(reference, level); + if (reference == NULL) + reference = list_entry(level->nodes.next, carry_node, + header.level_linkage); + else + reference = list_entry(reference->header.level_linkage.next, + carry_node, header.level_linkage); + } else if (order == POOLO_AFTER) { + reference = find_right_carry(reference, level); + if (reference == NULL) + reference = list_entry(level->nodes.prev, carry_node, + header.level_linkage); + else + reference = list_entry(reference->header.level_linkage.prev, + carry_node, header.level_linkage); + } + assert("nikita-2209", + ergo(orig_ref != NULL, + reiser4_carry_real(reference) == + reiser4_carry_real(orig_ref))); + return reiser4_add_carry(level, order, reference); +} + +carry_node *reiser4_add_carry(carry_level * level, /* carry_level to add + node to */ + pool_ordering order, /* where to insert: + * at the beginning of + * @level; + * before @reference; + * after @reference; + * at the end of @level + */ + carry_node * reference /* reference node for + * insertion */) +{ + carry_node *result; + + result = + (carry_node *) reiser4_add_obj(&level->pool->node_pool, + &level->nodes, + order, &reference->header); + if (!IS_ERR(result) && (result != NULL)) + ++level->nodes_num; + return result; +} + +/** + * add new carry operation to the @level. + * + * Returns pointer to the new carry operations allocated from pool. It's up to + * callers to maintain proper order in the @level. To control ordering use + * @order and @reference parameters. + */ +static carry_op *add_op(carry_level * level, /* &carry_level to add node to */ + pool_ordering order, /* where to insert: + * at the beginning of @level; + * before @reference; + * after @reference; + * at the end of @level */ + carry_op * reference /* reference node for insertion */) +{ + carry_op *result; + + result = + (carry_op *) reiser4_add_obj(&level->pool->op_pool, &level->ops, + order, &reference->header); + if (!IS_ERR(result) && (result != NULL)) + ++level->ops_num; + return result; +} + +/** + * Return node on the right of which @node was created. + * + * Each node is created on the right of some existing node (or it is new root, + * which is special case not handled here). + * + * @node is new node created on some level, but not yet inserted into its + * parent, it has corresponding bit (JNODE_ORPHAN) set in zstate. + */ +static carry_node *find_begetting_brother(carry_node * node,/* node to start + search from */ + carry_level * kin UNUSED_ARG + /* level to scan */) +{ + carry_node *scan; + + assert("nikita-1614", node != NULL); + assert("nikita-1615", kin != NULL); + assert("nikita-1616", LOCK_CNT_GTZ(rw_locked_tree)); + assert("nikita-1619", ergo(reiser4_carry_real(node) != NULL, + ZF_ISSET(reiser4_carry_real(node), + JNODE_ORPHAN))); + for (scan = node;; + scan = list_entry(scan->header.level_linkage.prev, carry_node, + header.level_linkage)) { + assert("nikita-1617", &kin->nodes != &scan->header.level_linkage); + if ((scan->node != node->node) && + !ZF_ISSET(scan->node, JNODE_ORPHAN)) { + assert("nikita-1618", reiser4_carry_real(scan) != NULL); + break; + } + } + return scan; +} + +static cmp_t +carry_node_cmp(carry_level * level, carry_node * n1, carry_node * n2) +{ + assert("nikita-2199", n1 != NULL); + assert("nikita-2200", n2 != NULL); + + if (n1 == n2) + return EQUAL_TO; + while (1) { + n1 = carry_node_next(n1); + if (carry_node_end(level, n1)) + return GREATER_THAN; + if (n1 == n2) + return LESS_THAN; + } + impossible("nikita-2201", "End of level reached"); +} + +carry_node *find_carry_node(carry_level * level, const znode * node) +{ + carry_node *scan; + carry_node *tmp_scan; + + assert("nikita-2202", level != NULL); + assert("nikita-2203", node != NULL); + + for_all_nodes(level, scan, tmp_scan) { + if (reiser4_carry_real(scan) == node) + return scan; + } + return NULL; +} + +znode *reiser4_carry_real(const carry_node * node) +{ + assert("nikita-3061", node != NULL); + + return node->lock_handle.node; +} + +carry_node *insert_carry_node(carry_level * doing, carry_level * todo, + const znode * node) +{ + carry_node *base; + carry_node *scan; + carry_node *tmp_scan; + carry_node *proj; + + base = find_carry_node(doing, node); + assert("nikita-2204", base != NULL); + + for_all_nodes(todo, scan, tmp_scan) { + proj = find_carry_node(doing, scan->node); + assert("nikita-2205", proj != NULL); + if (carry_node_cmp(doing, proj, base) != LESS_THAN) + break; + } + return scan; +} + +static carry_node *add_carry_atplace(carry_level * doing, carry_level * todo, + znode * node) +{ + carry_node *reference; + + assert("nikita-2994", doing != NULL); + assert("nikita-2995", todo != NULL); + assert("nikita-2996", node != NULL); + + reference = insert_carry_node(doing, todo, node); + assert("nikita-2997", reference != NULL); + + return reiser4_add_carry(todo, POOLO_BEFORE, reference); +} + +/* like reiser4_post_carry(), but designed to be called from node plugin + methods. This function is different from reiser4_post_carry() in that it + finds proper place to insert node in the queue. */ +carry_op *node_post_carry(carry_plugin_info * info /* carry parameters + * passed down to node + * plugin */ , + carry_opcode op /* opcode of operation */ , + znode * node /* node on which this + * operation will operate */ , + int apply_to_parent_p /* whether operation will + * operate directly on @node + * or on it parent. */ ) +{ + carry_op *result; + carry_node *child; + + assert("nikita-2207", info != NULL); + assert("nikita-2208", info->todo != NULL); + + if (info->doing == NULL) + return reiser4_post_carry(info->todo, op, node, + apply_to_parent_p); + + result = add_op(info->todo, POOLO_LAST, NULL); + if (IS_ERR(result)) + return result; + child = add_carry_atplace(info->doing, info->todo, node); + if (IS_ERR(child)) { + reiser4_pool_free(&info->todo->pool->op_pool, &result->header); + return (carry_op *) child; + } + result->node = child; + result->op = op; + child->parent = apply_to_parent_p; + if (ZF_ISSET(node, JNODE_ORPHAN)) + child->left_before = 1; + child->node = node; + return result; +} + +/* lock all carry nodes in @level */ +static int lock_carry_level(carry_level * level/* level to lock */) +{ + int result; + carry_node *node; + carry_node *tmp_node; + + assert("nikita-881", level != NULL); + assert("nikita-2229", carry_level_invariant(level, CARRY_TODO)); + + /* lock nodes from left to right */ + result = 0; + for_all_nodes(level, node, tmp_node) { + result = lock_carry_node(level, node); + if (result != 0) + break; + } + return result; +} + +/* Synchronize delimiting keys between @node and its left neighbor. + + To reduce contention on dk key and simplify carry code, we synchronize + delimiting keys only when carry ultimately leaves tree level (carrying + changes upward) and unlocks nodes at this level. + + This function first finds left neighbor of @node and then updates left + neighbor's right delimiting key to conincide with least key in @node. + +*/ + +ON_DEBUG(extern atomic_t delim_key_version; + ) + +static void sync_dkeys(znode * spot/* node to update */) +{ + reiser4_key pivot; + reiser4_tree *tree; + + assert("nikita-1610", spot != NULL); + assert("nikita-1612", LOCK_CNT_NIL(rw_locked_dk)); + + tree = znode_get_tree(spot); + read_lock_tree(tree); + write_lock_dk(tree); + + assert("nikita-2192", znode_is_loaded(spot)); + + /* sync left delimiting key of @spot with key in its leftmost item */ + if (node_is_empty(spot)) + pivot = *znode_get_rd_key(spot); + else + leftmost_key_in_node(spot, &pivot); + + znode_set_ld_key(spot, &pivot); + + /* there can be sequence of empty nodes pending removal on the left of + @spot. Scan them and update their left and right delimiting keys to + match left delimiting key of @spot. Also, update right delimiting + key of first non-empty left neighbor. + */ + while (1) { + if (!ZF_ISSET(spot, JNODE_LEFT_CONNECTED)) + break; + + spot = spot->left; + if (spot == NULL) + break; + + znode_set_rd_key(spot, &pivot); + /* don't sink into the domain of another balancing */ + if (!znode_is_write_locked(spot)) + break; + if (ZF_ISSET(spot, JNODE_HEARD_BANSHEE)) + znode_set_ld_key(spot, &pivot); + else + break; + } + + write_unlock_dk(tree); + read_unlock_tree(tree); +} + +/* unlock all carry nodes in @level */ +static void unlock_carry_level(carry_level * level /* level to unlock */ , + int failure /* true if unlocking owing to + * failure */ ) +{ + carry_node *node; + carry_node *tmp_node; + + assert("nikita-889", level != NULL); + + if (!failure) { + znode *spot; + + spot = NULL; + /* update delimiting keys */ + for_all_nodes(level, node, tmp_node) { + if (reiser4_carry_real(node) != spot) { + spot = reiser4_carry_real(node); + sync_dkeys(spot); + } + } + } + + /* nodes can be unlocked in arbitrary order. In preemptible + environment it's better to unlock in reverse order of locking, + though. + */ + for_all_nodes_back(level, node, tmp_node) { + /* all allocated nodes should be already linked to their + parents at this moment. */ + assert("nikita-1631", + ergo(!failure, !ZF_ISSET(reiser4_carry_real(node), + JNODE_ORPHAN))); + ON_DEBUG(check_dkeys(reiser4_carry_real(node))); + unlock_carry_node(level, node, failure); + } + level->new_root = NULL; +} + +/* finish with @level + + Unlock nodes and release all allocated resources */ +static void done_carry_level(carry_level * level/* level to finish */) +{ + carry_node *node; + carry_node *tmp_node; + carry_op *op; + carry_op *tmp_op; + + assert("nikita-1076", level != NULL); + + unlock_carry_level(level, 0); + for_all_nodes(level, node, tmp_node) { + assert("nikita-2113", list_empty_careful(&node->lock_handle.locks_link)); + assert("nikita-2114", list_empty_careful(&node->lock_handle.owners_link)); + reiser4_pool_free(&level->pool->node_pool, &node->header); + } + for_all_ops(level, op, tmp_op) + reiser4_pool_free(&level->pool->op_pool, &op->header); +} + +/* helper function to complete locking of carry node + + Finish locking of carry node. There are several ways in which new carry + node can be added into carry level and locked. Normal is through + lock_carry_node(), but also from find_{left|right}_neighbor(). This + function factors out common final part of all locking scenarios. It + supposes that @node -> lock_handle is lock handle for lock just taken and + fills ->real_node from this lock handle. + +*/ +int lock_carry_node_tail(carry_node * node/* node to complete locking of */) +{ + assert("nikita-1052", node != NULL); + assert("nikita-1187", reiser4_carry_real(node) != NULL); + assert("nikita-1188", !node->unlock); + + node->unlock = 1; + /* Load node content into memory and install node plugin by + looking at the node header. + + Most of the time this call is cheap because the node is + already in memory. + + Corresponding zrelse() is in unlock_carry_node() + */ + return zload(reiser4_carry_real(node)); +} + +/* lock carry node + + "Resolve" node to real znode, lock it and mark as locked. + This requires recursive locking of znodes. + + When operation is posted to the parent level, node it will be applied to is + not yet known. For example, when shifting data between two nodes, + delimiting has to be updated in parent or parents of nodes involved. But + their parents is not yet locked and, moreover said nodes can be reparented + by concurrent balancing. + + To work around this, carry operation is applied to special "carry node" + rather than to the znode itself. Carry node consists of some "base" or + "reference" znode and flags indicating how to get to the target of carry + operation (->real_node field of carry_node) from base. + +*/ +int lock_carry_node(carry_level * level /* level @node is in */ , + carry_node * node/* node to lock */) +{ + int result; + znode *reference_point; + lock_handle lh; + lock_handle tmp_lh; + reiser4_tree *tree; + + assert("nikita-887", level != NULL); + assert("nikita-882", node != NULL); + + result = 0; + reference_point = node->node; + init_lh(&lh); + init_lh(&tmp_lh); + if (node->left_before) { + /* handling of new nodes, allocated on the previous level: + + some carry ops were propably posted from the new node, but + this node neither has parent pointer set, nor is + connected. This will be done in ->create_hook() for + internal item. + + No then less, parent of new node has to be locked. To do + this, first go to the "left" in the carry order. This + depends on the decision to always allocate new node on the + right of existing one. + + Loop handles case when multiple nodes, all orphans, were + inserted. + + Strictly speaking, taking tree lock is not necessary here, + because all nodes scanned by loop in + find_begetting_brother() are write-locked by this thread, + and thus, their sibling linkage cannot change. + + */ + tree = znode_get_tree(reference_point); + read_lock_tree(tree); + reference_point = find_begetting_brother(node, level)->node; + read_unlock_tree(tree); + assert("nikita-1186", reference_point != NULL); + } + if (node->parent && (result == 0)) { + result = + reiser4_get_parent(&tmp_lh, reference_point, + ZNODE_WRITE_LOCK); + if (result != 0) { + ; /* nothing */ + } else if (znode_get_level(tmp_lh.node) == 0) { + assert("nikita-1347", znode_above_root(tmp_lh.node)); + result = add_new_root(level, node, tmp_lh.node); + if (result == 0) { + reference_point = level->new_root; + move_lh(&lh, &node->lock_handle); + } + } else if ((level->new_root != NULL) + && (level->new_root != + znode_parent_nolock(reference_point))) { + /* parent of node exists, but this level aready + created different new root, so */ + warning("nikita-1109", + /* it should be "radicis", but tradition is + tradition. do banshees read latin? */ + "hodie natus est radici frater"); + result = -EIO; + } else { + move_lh(&lh, &tmp_lh); + reference_point = lh.node; + } + } + if (node->left && (result == 0)) { + assert("nikita-1183", node->parent); + assert("nikita-883", reference_point != NULL); + result = + reiser4_get_left_neighbor(&tmp_lh, reference_point, + ZNODE_WRITE_LOCK, + GN_CAN_USE_UPPER_LEVELS); + if (result == 0) { + done_lh(&lh); + move_lh(&lh, &tmp_lh); + reference_point = lh.node; + } + } + if (!node->parent && !node->left && !node->left_before) { + result = + longterm_lock_znode(&lh, reference_point, ZNODE_WRITE_LOCK, + ZNODE_LOCK_HIPRI); + } + if (result == 0) { + move_lh(&node->lock_handle, &lh); + result = lock_carry_node_tail(node); + } + done_lh(&tmp_lh); + done_lh(&lh); + return result; +} + +/* release a lock on &carry_node. + + Release if necessary lock on @node. This opearion is pair of + lock_carry_node() and is idempotent: you can call it more than once on the + same node. + +*/ +static void +unlock_carry_node(carry_level * level, + carry_node * node /* node to be released */ , + int failure /* 0 if node is unlocked due + * to some error */ ) +{ + znode *real_node; + + assert("nikita-884", node != NULL); + + real_node = reiser4_carry_real(node); + /* pair to zload() in lock_carry_node_tail() */ + zrelse(real_node); + if (node->unlock && (real_node != NULL)) { + assert("nikita-899", real_node == node->lock_handle.node); + longterm_unlock_znode(&node->lock_handle); + } + if (failure) { + if (node->deallocate && (real_node != NULL)) { + /* free node in bitmap + + Prepare node for removal. Last zput() will finish + with it. + */ + ZF_SET(real_node, JNODE_HEARD_BANSHEE); + } + if (node->free) { + assert("nikita-2177", + list_empty_careful(&node->lock_handle.locks_link)); + assert("nikita-2112", + list_empty_careful(&node->lock_handle.owners_link)); + reiser4_pool_free(&level->pool->node_pool, + &node->header); + } + } +} + +/* fatal_carry_error() - all-catching error handling function + + It is possible that carry faces unrecoverable error, like unability to + insert pointer at the internal level. Our simple solution is just panic in + this situation. More sophisticated things like attempt to remount + file-system as read-only can be implemented without much difficlties. + + It is believed, that: + + 1. in stead of panicking, all current transactions can be aborted rolling + system back to the consistent state. + +Umm, if you simply panic without doing anything more at all, then all current +transactions are aborted and the system is rolled back to a consistent state, +by virtue of the design of the transactional mechanism. Well, wait, let's be +precise. If an internal node is corrupted on disk due to hardware failure, +then there may be no consistent state that can be rolled back to, so instead +we should say that it will rollback the transactions, which barring other +factors means rolling back to a consistent state. + +# Nikita: there is a subtle difference between panic and aborting +# transactions: machine doesn't reboot. Processes aren't killed. Processes +# don't using reiser4 (not that we care about such processes), or using other +# reiser4 mounts (about them we do care) will simply continue to run. With +# some luck, even application using aborted file system can survive: it will +# get some error, like EBADF, from each file descriptor on failed file system, +# but applications that do care about tolerance will cope with this (squid +# will). + +It would be a nice feature though to support rollback without rebooting +followed by remount, but this can wait for later versions. + + 2. once isolated transactions will be implemented it will be possible to + roll back offending transaction. + +2. is additional code complexity of inconsistent value (it implies that a +broken tree should be kept in operation), so we must think about it more +before deciding if it should be done. -Hans + +*/ +static void fatal_carry_error(carry_level * doing UNUSED_ARG /* carry level + * where + * unrecoverable + * error + * occurred */ , + int ecode/* error code */) +{ + assert("nikita-1230", doing != NULL); + assert("nikita-1231", ecode < 0); + + reiser4_panic("nikita-1232", "Carry failed: %i", ecode); +} + +/** + * Add new root to the tree + * + * This function itself only manages changes in carry structures and delegates + * all hard work (allocation of znode for new root, changes of parent and + * sibling pointers) to the reiser4_add_tree_root(). + * + * Locking: old tree root is locked by carry at this point. Fake znode is also + * locked. + */ +static int add_new_root(carry_level * level,/* carry level in context of which + * operation is performed */ + carry_node * node, /* carry node for existing root */ + znode * fake /* "fake" znode already locked by + * us */) +{ + int result; + + assert("nikita-1104", level != NULL); + assert("nikita-1105", node != NULL); + + assert("nikita-1403", znode_is_write_locked(node->node)); + assert("nikita-1404", znode_is_write_locked(fake)); + + /* trying to create new root. */ + /* @node is root and it's already locked by us. This + means that nobody else can be trying to add/remove + tree root right now. + */ + if (level->new_root == NULL) + level->new_root = reiser4_add_tree_root(node->node, fake); + if (!IS_ERR(level->new_root)) { + assert("nikita-1210", znode_is_root(level->new_root)); + node->deallocate = 1; + result = + longterm_lock_znode(&node->lock_handle, level->new_root, + ZNODE_WRITE_LOCK, ZNODE_LOCK_LOPRI); + if (result == 0) + zput(level->new_root); + } else { + result = PTR_ERR(level->new_root); + level->new_root = NULL; + } + return result; +} + +/* allocate new znode and add the operation that inserts the + pointer to it into the parent node into the todo level + + Allocate new znode, add it into carry queue and post into @todo queue + request to add pointer to new node into its parent. + + This is carry related routing that calls reiser4_new_node() to allocate new + node. +*/ +carry_node *add_new_znode(znode * brother /* existing left neighbor of new + * node */ , + carry_node * ref /* carry node after which new + * carry node is to be inserted + * into queue. This affects + * locking. */ , + carry_level * doing /* carry queue where new node is + * to be added */ , + carry_level * todo /* carry queue where COP_INSERT + * operation to add pointer to + * new node will ne added */ ) +{ + carry_node *fresh; + znode *new_znode; + carry_op *add_pointer; + carry_plugin_info info; + + assert("nikita-1048", brother != NULL); + assert("nikita-1049", todo != NULL); + + /* There is a lot of possible variations here: to what parent + new node will be attached and where. For simplicity, always + do the following: + + (1) new node and @brother will have the same parent. + + (2) new node is added on the right of @brother + + */ + + fresh = reiser4_add_carry_skip(doing, + ref ? POOLO_AFTER : POOLO_LAST, ref); + if (IS_ERR(fresh)) + return fresh; + + fresh->deallocate = 1; + fresh->free = 1; + + new_znode = reiser4_new_node(brother, znode_get_level(brother)); + if (IS_ERR(new_znode)) + /* @fresh will be deallocated automatically by error + handling code in the caller. */ + return (carry_node *) new_znode; + + /* new_znode returned znode with x_count 1. Caller has to decrease + it. make_space() does. */ + + ZF_SET(new_znode, JNODE_ORPHAN); + fresh->node = new_znode; + + while (ZF_ISSET(reiser4_carry_real(ref), JNODE_ORPHAN)) { + ref = carry_node_prev(ref); + assert("nikita-1606", !carry_node_end(doing, ref)); + } + + info.todo = todo; + info.doing = doing; + add_pointer = node_post_carry(&info, COP_INSERT, + reiser4_carry_real(ref), 1); + if (IS_ERR(add_pointer)) { + /* no need to deallocate @new_znode here: it will be + deallocated during carry error handling. */ + return (carry_node *) add_pointer; + } + + add_pointer->u.insert.type = COPT_CHILD; + add_pointer->u.insert.child = fresh; + add_pointer->u.insert.brother = brother; + /* initially new node spawns empty key range */ + write_lock_dk(znode_get_tree(brother)); + znode_set_ld_key(new_znode, + znode_set_rd_key(new_znode, + znode_get_rd_key(brother))); + write_unlock_dk(znode_get_tree(brother)); + return fresh; +} + +/* DEBUGGING FUNCTIONS. + + Probably we also should leave them on even when + debugging is turned off to print dumps at errors. +*/ +#if REISER4_DEBUG +static int carry_level_invariant(carry_level * level, carry_queue_state state) +{ + carry_node *node; + carry_node *tmp_node; + + if (level == NULL) + return 0; + + if (level->track_type != 0 && + level->track_type != CARRY_TRACK_NODE && + level->track_type != CARRY_TRACK_CHANGE) + return 0; + + /* check that nodes are in ascending order */ + for_all_nodes(level, node, tmp_node) { + znode *left; + znode *right; + + reiser4_key lkey; + reiser4_key rkey; + + if (node != carry_node_front(level)) { + if (state == CARRY_TODO) { + right = node->node; + left = carry_node_prev(node)->node; + } else { + right = reiser4_carry_real(node); + left = reiser4_carry_real(carry_node_prev(node)); + } + if (right == NULL || left == NULL) + continue; + if (node_is_empty(right) || node_is_empty(left)) + continue; + if (!keyle(leftmost_key_in_node(left, &lkey), + leftmost_key_in_node(right, &rkey))) { + warning("", "wrong key order"); + return 0; + } + } + } + return 1; +} +#endif + +/* get symbolic name for boolean */ +static const char *tf(int boolean/* truth value */) +{ + return boolean ? "t" : "f"; +} + +/* symbolic name for carry operation */ +static const char *carry_op_name(carry_opcode op/* carry opcode */) +{ + switch (op) { + case COP_INSERT: + return "COP_INSERT"; + case COP_DELETE: + return "COP_DELETE"; + case COP_CUT: + return "COP_CUT"; + case COP_PASTE: + return "COP_PASTE"; + case COP_UPDATE: + return "COP_UPDATE"; + case COP_EXTENT: + return "COP_EXTENT"; + case COP_INSERT_FLOW: + return "COP_INSERT_FLOW"; + default:{ + /* not mt safe, but who cares? */ + static char buf[20]; + + sprintf(buf, "unknown op: %x", op); + return buf; + } + } +} + +/* dump information about carry node */ +static void print_carry(const char *prefix /* prefix to print */ , + carry_node * node/* node to print */) +{ + if (node == NULL) { + printk("%s: null\n", prefix); + return; + } + printk + ("%s: %p parent: %s, left: %s, unlock: %s, free: %s, dealloc: %s\n", + prefix, node, tf(node->parent), tf(node->left), tf(node->unlock), + tf(node->free), tf(node->deallocate)); +} + +/* dump information about carry operation */ +static void print_op(const char *prefix /* prefix to print */ , + carry_op * op/* operation to print */) +{ + if (op == NULL) { + printk("%s: null\n", prefix); + return; + } + printk("%s: %p carry_opcode: %s\n", prefix, op, carry_op_name(op->op)); + print_carry("\tnode", op->node); + switch (op->op) { + case COP_INSERT: + case COP_PASTE: + print_coord("\tcoord", + op->u.insert.d ? op->u.insert.d->coord : NULL, 0); + reiser4_print_key("\tkey", + op->u.insert.d ? op->u.insert.d->key : NULL); + print_carry("\tchild", op->u.insert.child); + break; + case COP_DELETE: + print_carry("\tchild", op->u.delete.child); + break; + case COP_CUT: + if (op->u.cut_or_kill.is_cut) { + print_coord("\tfrom", + op->u.cut_or_kill.u.kill->params.from, 0); + print_coord("\tto", op->u.cut_or_kill.u.kill->params.to, + 0); + } else { + print_coord("\tfrom", + op->u.cut_or_kill.u.cut->params.from, 0); + print_coord("\tto", op->u.cut_or_kill.u.cut->params.to, + 0); + } + break; + case COP_UPDATE: + print_carry("\tleft", op->u.update.left); + break; + default: + /* do nothing */ + break; + } +} + +/* dump information about all nodes and operations in a @level */ +static void print_level(const char *prefix /* prefix to print */ , + carry_level * level/* level to print */) +{ + carry_node *node; + carry_node *tmp_node; + carry_op *op; + carry_op *tmp_op; + + if (level == NULL) { + printk("%s: null\n", prefix); + return; + } + printk("%s: %p, restartable: %s\n", + prefix, level, tf(level->restartable)); + + for_all_nodes(level, node, tmp_node) + print_carry("\tcarry node", node); + for_all_ops(level, op, tmp_op) + print_op("\tcarry op", op); +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/carry.h b/fs/reiser4/carry.h new file mode 100644 index 000000000000..d1f5b608442b --- /dev/null +++ b/fs/reiser4/carry.h @@ -0,0 +1,445 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +/* Functions and data types to "carry" tree modification(s) upward. + See fs/reiser4/carry.c for details. */ + +#if !defined(__FS_REISER4_CARRY_H__) +#define __FS_REISER4_CARRY_H__ + +#include "forward.h" +#include "debug.h" +#include "pool.h" +#include "znode.h" + +#include <linux/types.h> + +/* &carry_node - "location" of carry node. + + "location" of node that is involved or going to be involved into + carry process. Node where operation will be carried to on the + parent level cannot be recorded explicitly. Operation will be carried + usually to the parent of some node (where changes are performed at + the current level) or, to the left neighbor of its parent. But while + modifications are performed at the current level, parent may + change. So, we have to allow some indirection (or, positevly, + flexibility) in locating carry nodes. + +*/ +typedef struct carry_node { + /* pool linkage */ + struct reiser4_pool_header header; + + /* base node from which real_node is calculated. See + fs/reiser4/carry.c:lock_carry_node(). */ + znode *node; + + /* how to get ->real_node */ + /* to get ->real_node obtain parent of ->node */ + __u32 parent:1; + /* to get ->real_node obtain left neighbor of parent of + ->node */ + __u32 left:1; + __u32 left_before:1; + + /* locking */ + + /* this node was locked by carry process and should be + unlocked when carry leaves a level */ + __u32 unlock:1; + + /* disk block for this node was allocated by carry process and + should be deallocated when carry leaves a level */ + __u32 deallocate:1; + /* this carry node was allocated by carry process and should be + freed when carry leaves a level */ + __u32 free:1; + + /* type of lock we want to take on this node */ + lock_handle lock_handle; +} carry_node; + +/* &carry_opcode - elementary operations that can be carried upward + + Operations that carry() can handle. This list is supposed to be + expanded. + + Each carry operation (cop) is handled by appropriate function defined + in fs/reiser4/carry.c. For example COP_INSERT is handled by + fs/reiser4/carry.c:carry_insert() etc. These functions in turn + call plugins of nodes affected by operation to modify nodes' content + and to gather operations to be performed on the next level. + +*/ +typedef enum { + /* insert new item into node. */ + COP_INSERT, + /* delete pointer from parent node */ + COP_DELETE, + /* remove part of or whole node. */ + COP_CUT, + /* increase size of item. */ + COP_PASTE, + /* insert extent (that is sequence of unformatted nodes). */ + COP_EXTENT, + /* update delimiting key in least common ancestor of two + nodes. This is performed when items are moved between two + nodes. + */ + COP_UPDATE, + /* insert flow */ + COP_INSERT_FLOW, + COP_LAST_OP, +} carry_opcode; + +#define CARRY_FLOW_NEW_NODES_LIMIT 20 + +/* mode (or subtype) of COP_{INSERT|PASTE} operation. Specifies how target + item is determined. */ +typedef enum { + /* target item is one containing pointer to the ->child node */ + COPT_CHILD, + /* target item is given explicitly by @coord */ + COPT_ITEM_DATA, + /* target item is given by key */ + COPT_KEY, + /* see insert_paste_common() for more comments on this. */ + COPT_PASTE_RESTARTED, +} cop_insert_pos_type; + +/* flags to cut and delete */ +typedef enum { + /* don't kill node even if it became completely empty as results of + * cut. This is needed for eottl handling. See carry_extent() for + * details. */ + DELETE_RETAIN_EMPTY = (1 << 0) +} cop_delete_flag; + +/* + * carry() implements "lock handle tracking" feature. + * + * Callers supply carry with node where to perform initial operation and lock + * handle on this node. Trying to optimize node utilization carry may actually + * move insertion point to different node. Callers expect that lock handle + * will rebe transferred to the new node also. + * + */ +typedef enum { + /* transfer lock handle along with insertion point */ + CARRY_TRACK_CHANGE = 1, + /* acquire new lock handle to the node where insertion point is. This + * is used when carry() client doesn't initially possess lock handle + * on the insertion point node, for example, by extent insertion + * code. See carry_extent(). */ + CARRY_TRACK_NODE = 2 +} carry_track_type; + +/* data supplied to COP_{INSERT|PASTE} by callers */ +typedef struct carry_insert_data { + /* position where new item is to be inserted */ + coord_t *coord; + /* new item description */ + reiser4_item_data * data; + /* key of new item */ + const reiser4_key * key; +} carry_insert_data; + +/* cut and kill are similar, so carry_cut_data and carry_kill_data share the + below structure of parameters */ +struct cut_kill_params { + /* coord where cut starts (inclusive) */ + coord_t *from; + /* coord where cut stops (inclusive, this item/unit will also be + * cut) */ + coord_t *to; + /* starting key. This is necessary when item and unit pos don't + * uniquely identify what portion or tree to remove. For example, this + * indicates what portion of extent unit will be affected. */ + const reiser4_key * from_key; + /* exclusive stop key */ + const reiser4_key * to_key; + /* if this is not NULL, smallest actually removed key is stored + * here. */ + reiser4_key *smallest_removed; + /* kill_node_content() is called for file truncate */ + int truncate; +}; + +struct carry_cut_data { + struct cut_kill_params params; +}; + +struct carry_kill_data { + struct cut_kill_params params; + /* parameter to be passed to the ->kill_hook() method of item + * plugin */ + /*void *iplug_params; *//* FIXME: unused currently */ + /* if not NULL---inode whose items are being removed. This is needed + * for ->kill_hook() of extent item to update VM structures when + * removing pages. */ + struct inode *inode; + /* sibling list maintenance is complicated by existence of eottl. When + * eottl whose left and right neighbors are formatted leaves is + * removed, one has to connect said leaves in the sibling list. This + * cannot be done when extent removal is just started as locking rules + * require sibling list update to happen atomically with removal of + * extent item. Therefore: 1. pointers to left and right neighbors + * have to be passed down to the ->kill_hook() of extent item, and + * 2. said neighbors have to be locked. */ + lock_handle *left; + lock_handle *right; + /* flags modifying behavior of kill. Currently, it may have + DELETE_RETAIN_EMPTY set. */ + unsigned flags; + char *buf; +}; + +/* &carry_tree_op - operation to "carry" upward. + + Description of an operation we want to "carry" to the upper level of + a tree: e.g, when we insert something and there is not enough space + we allocate a new node and "carry" the operation of inserting a + pointer to the new node to the upper level, on removal of empty node, + we carry up operation of removing appropriate entry from parent. + + There are two types of carry ops: when adding or deleting node we + node at the parent level where appropriate modification has to be + performed is known in advance. When shifting items between nodes + (split, merge), delimiting key should be changed in the least common + parent of the nodes involved that is not known in advance. + + For the operations of the first type we store in &carry_op pointer to + the &carry_node at the parent level. For the operation of the second + type we store &carry_node or parents of the left and right nodes + modified and keep track of them upward until they coincide. + +*/ +typedef struct carry_op { + /* pool linkage */ + struct reiser4_pool_header header; + carry_opcode op; + /* node on which operation is to be performed: + + for insert, paste: node where new item is to be inserted + + for delete: node where pointer is to be deleted + + for cut: node to cut from + + for update: node where delimiting key is to be modified + + for modify: parent of modified node + + */ + carry_node *node; + union { + struct { + /* (sub-)type of insertion/paste. Taken from + cop_insert_pos_type. */ + __u8 type; + /* various operation flags. Taken from + cop_insert_flag. */ + __u8 flags; + carry_insert_data *d; + carry_node *child; + znode *brother; + } insert, paste, extent; + + struct { + int is_cut; + union { + carry_kill_data *kill; + carry_cut_data *cut; + } u; + } cut_or_kill; + + struct { + carry_node *left; + } update; + struct { + /* changed child */ + carry_node *child; + /* bitmask of changes. See &cop_modify_flag */ + __u32 flag; + } modify; + struct { + /* flags to deletion operation. Are taken from + cop_delete_flag */ + __u32 flags; + /* child to delete from parent. If this is + NULL, delete op->node. */ + carry_node *child; + } delete; + struct { + /* various operation flags. Taken from + cop_insert_flag. */ + __u32 flags; + flow_t *flow; + coord_t *insert_point; + reiser4_item_data *data; + /* flow insertion is limited by number of new blocks + added in that operation which do not get any data + but part of flow. This limit is set by macro + CARRY_FLOW_NEW_NODES_LIMIT. This field stores number + of nodes added already during one carry_flow */ + int new_nodes; + } insert_flow; + } u; +} carry_op; + +/* &carry_op_pool - preallocated pool of carry operations, and nodes */ +typedef struct carry_pool { + carry_op op[CARRIES_POOL_SIZE]; + struct reiser4_pool op_pool; + carry_node node[NODES_LOCKED_POOL_SIZE]; + struct reiser4_pool node_pool; +} carry_pool; + +/* &carry_tree_level - carry process on given level + + Description of balancing process on the given level. + + No need for locking here, as carry_tree_level is essentially per + thread thing (for now). + +*/ +struct carry_level { + /* this level may be restarted */ + __u32 restartable:1; + /* list of carry nodes on this level, ordered by key order */ + struct list_head nodes; + struct list_head ops; + /* pool where new objects are allocated from */ + carry_pool *pool; + int ops_num; + int nodes_num; + /* new root created on this level, if any */ + znode *new_root; + /* This is set by caller (insert_by_key(), rreiser4_esize_item(), etc.) + when they want ->tracked to automagically wander to the node where + insertion point moved after insert or paste. + */ + carry_track_type track_type; + /* lock handle supplied by user that we are tracking. See + above. */ + lock_handle *tracked; +}; + +/* information carry passes to plugin methods that may add new operations to + the @todo queue */ +struct carry_plugin_info { + carry_level *doing; + carry_level *todo; +}; + +int reiser4_carry(carry_level * doing, carry_level * done); + +carry_node *reiser4_add_carry(carry_level * level, pool_ordering order, + carry_node * reference); +carry_node *reiser4_add_carry_skip(carry_level * level, pool_ordering order, + carry_node * reference); + +extern carry_node *insert_carry_node(carry_level * doing, + carry_level * todo, const znode * node); + +extern carry_pool *init_carry_pool(int); +extern void done_carry_pool(carry_pool * pool); + +extern void init_carry_level(carry_level * level, carry_pool * pool); + +extern carry_op *reiser4_post_carry(carry_level * level, carry_opcode op, + znode * node, int apply_to_parent); +extern carry_op *node_post_carry(carry_plugin_info * info, carry_opcode op, + znode * node, int apply_to_parent_p); + +carry_node *add_new_znode(znode * brother, carry_node * reference, + carry_level * doing, carry_level * todo); + +carry_node *find_carry_node(carry_level * level, const znode * node); + +extern znode *reiser4_carry_real(const carry_node * node); + +/* helper macros to iterate over carry queues */ + +#define carry_node_next(node) \ + list_entry((node)->header.level_linkage.next, carry_node, \ + header.level_linkage) + +#define carry_node_prev(node) \ + list_entry((node)->header.level_linkage.prev, carry_node, \ + header.level_linkage) + +#define carry_node_front(level) \ + list_entry((level)->nodes.next, carry_node, header.level_linkage) + +#define carry_node_back(level) \ + list_entry((level)->nodes.prev, carry_node, header.level_linkage) + +#define carry_node_end(level, node) \ + (&(level)->nodes == &(node)->header.level_linkage) + +/* macro to iterate over all operations in a @level */ +#define for_all_ops(level /* carry level (of type carry_level *) */, \ + op /* pointer to carry operation, modified by loop (of \ + * type carry_op *) */, \ + tmp /* pointer to carry operation (of type carry_op *), \ + * used to make iterator stable in the face of \ + * deletions from the level */ ) \ +for (op = list_entry(level->ops.next, carry_op, header.level_linkage), \ + tmp = list_entry(op->header.level_linkage.next, carry_op, header.level_linkage); \ + &op->header.level_linkage != &level->ops; \ + op = tmp, \ + tmp = list_entry(op->header.level_linkage.next, carry_op, header.level_linkage)) + +#if 0 +for (op = (carry_op *) pool_level_list_front(&level->ops), \ + tmp = (carry_op *) pool_level_list_next(&op->header) ; \ + !pool_level_list_end(&level->ops, &op->header) ; \ + op = tmp, tmp = (carry_op *) pool_level_list_next(&op->header)) +#endif + +/* macro to iterate over all nodes in a @level */ \ +#define for_all_nodes(level /* carry level (of type carry_level *) */, \ + node /* pointer to carry node, modified by loop (of \ + * type carry_node *) */, \ + tmp /* pointer to carry node (of type carry_node *), \ + * used to make iterator stable in the face of * \ + * deletions from the level */ ) \ +for (node = list_entry(level->nodes.next, carry_node, header.level_linkage), \ + tmp = list_entry(node->header.level_linkage.next, carry_node, header.level_linkage); \ + &node->header.level_linkage != &level->nodes; \ + node = tmp, \ + tmp = list_entry(node->header.level_linkage.next, carry_node, header.level_linkage)) + +#if 0 +for (node = carry_node_front(level), \ + tmp = carry_node_next(node) ; !carry_node_end(level, node) ; \ + node = tmp, tmp = carry_node_next(node)) +#endif + +/* macro to iterate over all nodes in a @level in reverse order + + This is used, because nodes are unlocked in reversed order of locking */ +#define for_all_nodes_back(level /* carry level (of type carry_level *) */, \ + node /* pointer to carry node, modified by loop \ + * (of type carry_node *) */, \ + tmp /* pointer to carry node (of type carry_node \ + * *), used to make iterator stable in the \ + * face of deletions from the level */ ) \ +for (node = carry_node_back(level), \ + tmp = carry_node_prev(node) ; !carry_node_end(level, node) ; \ + node = tmp, tmp = carry_node_prev(node)) + +/* __FS_REISER4_CARRY_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/carry_ops.c b/fs/reiser4/carry_ops.c new file mode 100644 index 000000000000..9871da4464f1 --- /dev/null +++ b/fs/reiser4/carry_ops.c @@ -0,0 +1,2136 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +/* implementation of carry operations */ + +#include "forward.h" +#include "debug.h" +#include "key.h" +#include "coord.h" +#include "plugin/item/item.h" +#include "plugin/node/node.h" +#include "jnode.h" +#include "znode.h" +#include "block_alloc.h" +#include "tree_walk.h" +#include "pool.h" +#include "tree_mod.h" +#include "carry.h" +#include "carry_ops.h" +#include "tree.h" +#include "super.h" +#include "reiser4.h" + +#include <linux/types.h> +#include <linux/err.h> + +static int carry_shift_data(sideof side, coord_t *insert_coord, znode * node, + carry_level * doing, carry_level * todo, + unsigned int including_insert_coord_p); + +extern int lock_carry_node(carry_level * level, carry_node * node); +extern int lock_carry_node_tail(carry_node * node); + +/* find left neighbor of a carry node + + Look for left neighbor of @node and add it to the @doing queue. See + comments in the body. + +*/ +static carry_node *find_left_neighbor(carry_op * op /* node to find left + * neighbor of */ , + carry_level * doing/* level to scan */) +{ + int result; + carry_node *node; + carry_node *left; + int flags; + reiser4_tree *tree; + + node = op->node; + + tree = current_tree; + read_lock_tree(tree); + /* first, check whether left neighbor is already in a @doing queue */ + if (reiser4_carry_real(node)->left != NULL) { + /* NOTE: there is locking subtlety here. Look into + * find_right_neighbor() for more info */ + if (find_carry_node(doing, + reiser4_carry_real(node)->left) != NULL) { + read_unlock_tree(tree); + left = node; + do { + left = list_entry(left->header.level_linkage.prev, + carry_node, header.level_linkage); + assert("nikita-3408", !carry_node_end(doing, + left)); + } while (reiser4_carry_real(left) == + reiser4_carry_real(node)); + return left; + } + } + read_unlock_tree(tree); + + left = reiser4_add_carry_skip(doing, POOLO_BEFORE, node); + if (IS_ERR(left)) + return left; + + left->node = node->node; + left->free = 1; + + flags = GN_TRY_LOCK; + if (!(op->u.insert.flags & COPI_LOAD_LEFT)) + flags |= GN_NO_ALLOC; + + /* then, feeling lucky, peek left neighbor in the cache. */ + result = reiser4_get_left_neighbor(&left->lock_handle, + reiser4_carry_real(node), + ZNODE_WRITE_LOCK, flags); + if (result == 0) { + /* ok, node found and locked. */ + result = lock_carry_node_tail(left); + if (result != 0) + left = ERR_PTR(result); + } else if (result == -E_NO_NEIGHBOR || result == -ENOENT) { + /* node is leftmost node in a tree, or neighbor wasn't in + cache, or there is an extent on the left. */ + reiser4_pool_free(&doing->pool->node_pool, &left->header); + left = NULL; + } else if (doing->restartable) { + /* if left neighbor is locked, and level is restartable, add + new node to @doing and restart. */ + assert("nikita-913", node->parent != 0); + assert("nikita-914", node->node != NULL); + left->left = 1; + left->free = 0; + left = ERR_PTR(-E_REPEAT); + } else { + /* left neighbor is locked, level cannot be restarted. Just + ignore left neighbor. */ + reiser4_pool_free(&doing->pool->node_pool, &left->header); + left = NULL; + } + return left; +} + +/* find right neighbor of a carry node + + Look for right neighbor of @node and add it to the @doing queue. See + comments in the body. + +*/ +static carry_node *find_right_neighbor(carry_op * op /* node to find right + * neighbor of */ , + carry_level * doing/* level to scan */) +{ + int result; + carry_node *node; + carry_node *right; + lock_handle lh; + int flags; + reiser4_tree *tree; + + init_lh(&lh); + + node = op->node; + + tree = current_tree; + read_lock_tree(tree); + /* first, check whether right neighbor is already in a @doing queue */ + if (reiser4_carry_real(node)->right != NULL) { + /* + * Tree lock is taken here anyway, because, even if _outcome_ + * of (find_carry_node() != NULL) doesn't depends on + * concurrent updates to ->right, find_carry_node() cannot + * work with second argument NULL. Hence, following comment is + * of historic importance only. + * + * Subtle: + * + * Q: why don't we need tree lock here, looking for the right + * neighbor? + * + * A: even if value of node->real_node->right were changed + * during find_carry_node() execution, outcome of execution + * wouldn't change, because (in short) other thread cannot add + * elements to the @doing, and if node->real_node->right + * already was in @doing, value of node->real_node->right + * couldn't change, because node cannot be inserted between + * locked neighbors. + */ + if (find_carry_node(doing, + reiser4_carry_real(node)->right) != NULL) { + read_unlock_tree(tree); + /* + * What we are doing here (this is also applicable to + * the find_left_neighbor()). + * + * tree_walk.c code requires that insertion of a + * pointer to a child, modification of parent pointer + * in the child, and insertion of the child into + * sibling list are atomic (see + * plugin/item/internal.c:create_hook_internal()). + * + * carry allocates new node long before pointer to it + * is inserted into parent and, actually, long before + * parent is even known. Such allocated-but-orphaned + * nodes are only trackable through carry level lists. + * + * Situation that is handled here is following: @node + * has valid ->right pointer, but there is + * allocated-but-orphaned node in the carry queue that + * is logically between @node and @node->right. Here + * we are searching for it. Critical point is that + * this is only possible if @node->right is also in + * the carry queue (this is checked above), because + * this is the only way new orphaned node could be + * inserted between them (before inserting new node, + * make_space() first tries to shift to the right, so, + * right neighbor will be locked and queued). + * + */ + right = node; + do { + right = list_entry(right->header.level_linkage.next, + carry_node, header.level_linkage); + assert("nikita-3408", !carry_node_end(doing, + right)); + } while (reiser4_carry_real(right) == + reiser4_carry_real(node)); + return right; + } + } + read_unlock_tree(tree); + + flags = GN_CAN_USE_UPPER_LEVELS; + if (!(op->u.insert.flags & COPI_LOAD_RIGHT)) + flags = GN_NO_ALLOC; + + /* then, try to lock right neighbor */ + init_lh(&lh); + result = reiser4_get_right_neighbor(&lh, + reiser4_carry_real(node), + ZNODE_WRITE_LOCK, flags); + if (result == 0) { + /* ok, node found and locked. */ + right = reiser4_add_carry_skip(doing, POOLO_AFTER, node); + if (!IS_ERR(right)) { + right->node = lh.node; + move_lh(&right->lock_handle, &lh); + right->free = 1; + result = lock_carry_node_tail(right); + if (result != 0) + right = ERR_PTR(result); + } + } else if ((result == -E_NO_NEIGHBOR) || (result == -ENOENT)) { + /* node is rightmost node in a tree, or neighbor wasn't in + cache, or there is an extent on the right. */ + right = NULL; + } else + right = ERR_PTR(result); + done_lh(&lh); + return right; +} + +/* how much free space in a @node is needed for @op + + How much space in @node is required for completion of @op, where @op is + insert or paste operation. +*/ +static unsigned int space_needed_for_op(znode * node /* znode data are + * inserted or + * pasted in */ , + carry_op * op /* carry + operation */ ) +{ + assert("nikita-919", op != NULL); + + switch (op->op) { + default: + impossible("nikita-1701", "Wrong opcode"); + case COP_INSERT: + return space_needed(node, NULL, op->u.insert.d->data, 1); + case COP_PASTE: + return space_needed(node, op->u.insert.d->coord, + op->u.insert.d->data, 0); + } +} + +/* how much space in @node is required to insert or paste @data at + @coord. */ +unsigned int space_needed(const znode * node /* node data are inserted or + * pasted in */ , + const coord_t *coord /* coord where data are + * inserted or pasted + * at */ , + const reiser4_item_data * data /* data to insert or + * paste */ , + int insertion/* non-0 is inserting, 0---paste */) +{ + int result; + item_plugin *iplug; + + assert("nikita-917", node != NULL); + assert("nikita-918", node_plugin_by_node(node) != NULL); + assert("vs-230", !insertion || (coord == NULL)); + + result = 0; + iplug = data->iplug; + if (iplug->b.estimate != NULL) { + /* ask item plugin how much space is needed to insert this + item */ + result += iplug->b.estimate(insertion ? NULL : coord, data); + } else { + /* reasonable default */ + result += data->length; + } + if (insertion) { + node_plugin *nplug; + + nplug = node->nplug; + /* and add node overhead */ + if (nplug->item_overhead != NULL) + result += nplug->item_overhead(node, NULL); + } + return result; +} + +/* find &coord in parent where pointer to new child is to be stored. */ +static int find_new_child_coord(carry_op * op /* COP_INSERT carry operation to + * insert pointer to new + * child */ ) +{ + int result; + znode *node; + znode *child; + + assert("nikita-941", op != NULL); + assert("nikita-942", op->op == COP_INSERT); + + node = reiser4_carry_real(op->node); + assert("nikita-943", node != NULL); + assert("nikita-944", node_plugin_by_node(node) != NULL); + + child = reiser4_carry_real(op->u.insert.child); + result = + find_new_child_ptr(node, child, op->u.insert.brother, + op->u.insert.d->coord); + + build_child_ptr_data(child, op->u.insert.d->data); + return result; +} + +/* additional amount of free space in @node required to complete @op */ +static int free_space_shortage(znode * node /* node to check */ , + carry_op * op/* operation being performed */) +{ + assert("nikita-1061", node != NULL); + assert("nikita-1062", op != NULL); + + switch (op->op) { + default: + impossible("nikita-1702", "Wrong opcode"); + case COP_INSERT: + case COP_PASTE: + return space_needed_for_op(node, op) - znode_free_space(node); + case COP_EXTENT: + /* when inserting extent shift data around until insertion + point is utmost in the node. */ + if (coord_wrt(op->u.insert.d->coord) == COORD_INSIDE) + return +1; + else + return -1; + } +} + +/* helper function: update node pointer in operation after insertion + point was probably shifted into @target. */ +static znode *sync_op(carry_op * op, carry_node * target) +{ + znode *insertion_node; + + /* reget node from coord: shift might move insertion coord to + the neighbor */ + insertion_node = op->u.insert.d->coord->node; + /* if insertion point was actually moved into new node, + update carry node pointer in operation. */ + if (insertion_node != reiser4_carry_real(op->node)) { + op->node = target; + assert("nikita-2540", + reiser4_carry_real(target) == insertion_node); + } + assert("nikita-2541", + reiser4_carry_real(op->node) == op->u.insert.d->coord->node); + return insertion_node; +} + +/* + * complete make_space() call: update tracked lock handle if necessary. See + * comments for fs/reiser4/carry.h:carry_track_type + */ +static int +make_space_tail(carry_op * op, carry_level * doing, znode * orig_node) +{ + int result; + carry_track_type tracking; + znode *node; + + tracking = doing->track_type; + node = op->u.insert.d->coord->node; + + if (tracking == CARRY_TRACK_NODE || + (tracking == CARRY_TRACK_CHANGE && node != orig_node)) { + /* inserting or pasting into node different from + original. Update lock handle supplied by caller. */ + assert("nikita-1417", doing->tracked != NULL); + done_lh(doing->tracked); + init_lh(doing->tracked); + result = longterm_lock_znode(doing->tracked, node, + ZNODE_WRITE_LOCK, + ZNODE_LOCK_HIPRI); + } else + result = 0; + return result; +} + +/* This is insertion policy function. It shifts data to the left and right + neighbors of insertion coord and allocates new nodes until there is enough + free space to complete @op. + + See comments in the body. + + Assumes that the node format favors insertions at the right end of the node + as node40 does. + + See carry_flow() on detail about flow insertion +*/ +static int make_space(carry_op * op /* carry operation, insert or paste */ , + carry_level * doing /* current carry queue */ , + carry_level * todo/* carry queue on the parent level */) +{ + znode *node; + int result; + int not_enough_space; + int blk_alloc; + znode *orig_node; + __u32 flags; + + coord_t *coord; + + assert("nikita-890", op != NULL); + assert("nikita-891", todo != NULL); + assert("nikita-892", + op->op == COP_INSERT || + op->op == COP_PASTE || op->op == COP_EXTENT); + assert("nikita-1607", + reiser4_carry_real(op->node) == op->u.insert.d->coord->node); + + flags = op->u.insert.flags; + + /* NOTE check that new node can only be allocated after checking left + * and right neighbors. This is necessary for proper work of + * find_{left,right}_neighbor(). */ + assert("nikita-3410", ergo(flags & COPI_DONT_ALLOCATE, + flags & COPI_DONT_SHIFT_LEFT)); + assert("nikita-3411", ergo(flags & COPI_DONT_ALLOCATE, + flags & COPI_DONT_SHIFT_RIGHT)); + + coord = op->u.insert.d->coord; + orig_node = node = coord->node; + + assert("nikita-908", node != NULL); + assert("nikita-909", node_plugin_by_node(node) != NULL); + + result = 0; + /* If there is not enough space in a node, try to shift something to + the left neighbor. This is a bit tricky, as locking to the left is + low priority. This is handled by restart logic in carry(). + */ + not_enough_space = free_space_shortage(node, op); + if (not_enough_space <= 0) + /* it is possible that carry was called when there actually + was enough space in the node. For example, when inserting + leftmost item so that delimiting keys have to be updated. + */ + return make_space_tail(op, doing, orig_node); + if (!(flags & COPI_DONT_SHIFT_LEFT)) { + carry_node *left; + /* make note in statistics of an attempt to move + something into the left neighbor */ + left = find_left_neighbor(op, doing); + if (unlikely(IS_ERR(left))) { + if (PTR_ERR(left) == -E_REPEAT) + return -E_REPEAT; + else { + /* some error other than restart request + occurred. This shouldn't happen. Issue a + warning and continue as if left neighbor + weren't existing. + */ + warning("nikita-924", + "Error accessing left neighbor: %li", + PTR_ERR(left)); + } + } else if (left != NULL) { + + /* shift everything possible on the left of and + including insertion coord into the left neighbor */ + result = carry_shift_data(LEFT_SIDE, coord, + reiser4_carry_real(left), + doing, todo, + flags & COPI_GO_LEFT); + + /* reget node from coord: shift_left() might move + insertion coord to the left neighbor */ + node = sync_op(op, left); + + not_enough_space = free_space_shortage(node, op); + /* There is not enough free space in @node, but + may be, there is enough free space in + @left. Various balancing decisions are valid here. + The same for the shifiting to the right. + */ + } + } + /* If there still is not enough space, shift to the right */ + if (not_enough_space > 0 && !(flags & COPI_DONT_SHIFT_RIGHT)) { + carry_node *right; + + right = find_right_neighbor(op, doing); + if (IS_ERR(right)) { + warning("nikita-1065", + "Error accessing right neighbor: %li", + PTR_ERR(right)); + } else if (right != NULL) { + /* node containing insertion point, and its right + neighbor node are write locked by now. + + shift everything possible on the right of but + excluding insertion coord into the right neighbor + */ + result = carry_shift_data(RIGHT_SIDE, coord, + reiser4_carry_real(right), + doing, todo, + flags & COPI_GO_RIGHT); + /* reget node from coord: shift_right() might move + insertion coord to the right neighbor */ + node = sync_op(op, right); + not_enough_space = free_space_shortage(node, op); + } + } + /* If there is still not enough space, allocate new node(s). + + We try to allocate new blocks if COPI_DONT_ALLOCATE is not set in + the carry operation flags (currently this is needed during flush + only). + */ + for (blk_alloc = 0; + not_enough_space > 0 && result == 0 && blk_alloc < 2 && + !(flags & COPI_DONT_ALLOCATE); ++blk_alloc) { + carry_node *fresh; /* new node we are allocating */ + coord_t coord_shadow; /* remembered insertion point before + * shifting data into new node */ + carry_node *node_shadow; /* remembered insertion node + * before shifting */ + unsigned int gointo; /* whether insertion point should move + * into newly allocated node */ + + /* allocate new node on the right of @node. Znode and disk + fake block number for new node are allocated. + + add_new_znode() posts carry operation COP_INSERT with + COPT_CHILD option to the parent level to add + pointer to newly created node to its parent. + + Subtle point: if several new nodes are required to complete + insertion operation at this level, they will be inserted + into their parents in the order of creation, which means + that @node will be valid "cookie" at the time of insertion. + + */ + fresh = add_new_znode(node, op->node, doing, todo); + if (IS_ERR(fresh)) + return PTR_ERR(fresh); + + /* Try to shift into new node. */ + result = lock_carry_node(doing, fresh); + zput(reiser4_carry_real(fresh)); + if (result != 0) { + warning("nikita-947", + "Cannot lock new node: %i", result); + return result; + } + + /* both nodes are write locked by now. + + shift everything possible on the right of and + including insertion coord into the right neighbor. + */ + coord_dup(&coord_shadow, op->u.insert.d->coord); + node_shadow = op->node; + /* move insertion point into newly created node if: + + . insertion point is rightmost in the source node, or + . this is not the first node we are allocating in a row. + */ + gointo = + (blk_alloc > 0) || + coord_is_after_rightmost(op->u.insert.d->coord); + + if (gointo && + op->op == COP_PASTE && + coord_is_existing_item(op->u.insert.d->coord) && + is_solid_item((item_plugin_by_coord(op->u.insert.d->coord)))) { + /* paste into solid (atomic) item, which can contain + only one unit, so we need to shift it right, where + insertion point supposed to be */ + + assert("edward-1444", op->u.insert.d->data->iplug == + item_plugin_by_id(STATIC_STAT_DATA_ID)); + assert("edward-1445", + op->u.insert.d->data->length > + node_plugin_by_node(coord->node)->free_space + (coord->node)); + + op->u.insert.d->coord->between = BEFORE_UNIT; + } + + result = carry_shift_data(RIGHT_SIDE, coord, + reiser4_carry_real(fresh), + doing, todo, gointo); + /* if insertion point was actually moved into new node, + update carry node pointer in operation. */ + node = sync_op(op, fresh); + not_enough_space = free_space_shortage(node, op); + if ((not_enough_space > 0) && (node != coord_shadow.node)) { + /* there is not enough free in new node. Shift + insertion point back to the @shadow_node so that + next new node would be inserted between + @shadow_node and @fresh. + */ + coord_normalize(&coord_shadow); + coord_dup(coord, &coord_shadow); + node = coord->node; + op->node = node_shadow; + if (1 || (flags & COPI_STEP_BACK)) { + /* still not enough space?! Maybe there is + enough space in the source node (i.e., node + data are moved from) now. + */ + not_enough_space = + free_space_shortage(node, op); + } + } + } + if (not_enough_space > 0) { + if (!(flags & COPI_DONT_ALLOCATE)) + warning("nikita-948", "Cannot insert new item"); + result = -E_NODE_FULL; + } + assert("nikita-1622", ergo(result == 0, + reiser4_carry_real(op->node) == coord->node)); + assert("nikita-2616", coord == op->u.insert.d->coord); + if (result == 0) + result = make_space_tail(op, doing, orig_node); + return result; +} + +/* insert_paste_common() - common part of insert and paste operations + + This function performs common part of COP_INSERT and COP_PASTE. + + There are two ways in which insertion/paste can be requested: + + . by directly supplying reiser4_item_data. In this case, op -> + u.insert.type is set to COPT_ITEM_DATA. + + . by supplying child pointer to which is to inserted into parent. In this + case op -> u.insert.type == COPT_CHILD. + + . by supplying key of new item/unit. This is currently only used during + extent insertion + + This is required, because when new node is allocated we don't know at what + position pointer to it is to be stored in the parent. Actually, we don't + even know what its parent will be, because parent can be re-balanced + concurrently and new node re-parented, and because parent can be full and + pointer to the new node will go into some other node. + + insert_paste_common() resolves pointer to child node into position in the + parent by calling find_new_child_coord(), that fills + reiser4_item_data. After this, insertion/paste proceeds uniformly. + + Another complication is with finding free space during pasting. It may + happen that while shifting items to the neighbors and newly allocated + nodes, insertion coord can no longer be in the item we wanted to paste + into. At this point, paste becomes (morphs) into insert. Moreover free + space analysis has to be repeated, because amount of space required for + insertion is different from that of paste (item header overhead, etc). + + This function "unifies" different insertion modes (by resolving child + pointer or key into insertion coord), and then calls make_space() to free + enough space in the node by shifting data to the left and right and by + allocating new nodes if necessary. Carry operation knows amount of space + required for its completion. After enough free space is obtained, caller of + this function (carry_{insert,paste,etc.}) performs actual insertion/paste + by calling item plugin method. + +*/ +static int insert_paste_common(carry_op * op /* carry operation being + * performed */ , + carry_level * doing /* current carry level */ , + carry_level * todo /* next carry level */ , + carry_insert_data * cdata /* pointer to + * cdata */ , + coord_t *coord /* insertion/paste coord */ , + reiser4_item_data * data /* data to be + * inserted/pasted */ ) +{ + assert("nikita-981", op != NULL); + assert("nikita-980", todo != NULL); + assert("nikita-979", (op->op == COP_INSERT) || (op->op == COP_PASTE) + || (op->op == COP_EXTENT)); + + if (op->u.insert.type == COPT_PASTE_RESTARTED) { + /* nothing to do. Fall through to make_space(). */ + ; + } else if (op->u.insert.type == COPT_KEY) { + node_search_result intra_node; + znode *node; + /* Problem with doing batching at the lowest level, is that + operations here are given by coords where modification is + to be performed, and one modification can invalidate coords + of all following operations. + + So, we are implementing yet another type for operation that + will use (the only) "locator" stable across shifting of + data between nodes, etc.: key (COPT_KEY). + + This clause resolves key to the coord in the node. + + But node can change also. Probably some pieces have to be + added to the lock_carry_node(), to lock node by its key. + + */ + /* NOTE-NIKITA Lookup bias is fixed to FIND_EXACT. Complain + if you need something else. */ + op->u.insert.d->coord = coord; + node = reiser4_carry_real(op->node); + intra_node = node_plugin_by_node(node)->lookup + (node, op->u.insert.d->key, FIND_EXACT, + op->u.insert.d->coord); + if ((intra_node != NS_FOUND) && (intra_node != NS_NOT_FOUND)) { + warning("nikita-1715", "Intra node lookup failure: %i", + intra_node); + return intra_node; + } + } else if (op->u.insert.type == COPT_CHILD) { + /* if we are asked to insert pointer to the child into + internal node, first convert pointer to the child into + coord within parent node. + */ + znode *child; + int result; + + op->u.insert.d = cdata; + op->u.insert.d->coord = coord; + op->u.insert.d->data = data; + op->u.insert.d->coord->node = reiser4_carry_real(op->node); + result = find_new_child_coord(op); + child = reiser4_carry_real(op->u.insert.child); + if (result != NS_NOT_FOUND) { + warning("nikita-993", + "Cannot find a place for child pointer: %i", + result); + return result; + } + /* This only happens when we did multiple insertions at + the previous level, trying to insert single item and + it so happened, that insertion of pointers to all new + nodes before this one already caused parent node to + split (may be several times). + + I am going to come up with better solution. + + You are not expected to understand this. + -- v6root/usr/sys/ken/slp.c + + Basically, what happens here is the following: carry came + to the parent level and is about to insert internal item + pointing to the child node that it just inserted in the + level below. Position where internal item is to be inserted + was found by find_new_child_coord() above, but node of the + current carry operation (that is, parent node of child + inserted on the previous level), was determined earlier in + the lock_carry_level/lock_carry_node. It could so happen + that other carry operations already performed on the parent + level already split parent node, so that insertion point + moved into another node. Handle this by creating new carry + node for insertion point if necessary. + */ + if (reiser4_carry_real(op->node) != + op->u.insert.d->coord->node) { + pool_ordering direction; + znode *z1; + znode *z2; + reiser4_key k1; + reiser4_key k2; + + /* + * determine in what direction insertion point + * moved. Do this by comparing delimiting keys. + */ + z1 = op->u.insert.d->coord->node; + z2 = reiser4_carry_real(op->node); + if (keyle(leftmost_key_in_node(z1, &k1), + leftmost_key_in_node(z2, &k2))) + /* insertion point moved to the left */ + direction = POOLO_BEFORE; + else + /* insertion point moved to the right */ + direction = POOLO_AFTER; + + op->node = reiser4_add_carry_skip(doing, + direction, op->node); + if (IS_ERR(op->node)) + return PTR_ERR(op->node); + op->node->node = op->u.insert.d->coord->node; + op->node->free = 1; + result = lock_carry_node(doing, op->node); + if (result != 0) + return result; + } + + /* + * set up key of an item being inserted: we are inserting + * internal item and its key is (by the very definition of + * search tree) is leftmost key in the child node. + */ + write_lock_dk(znode_get_tree(child)); + op->u.insert.d->key = leftmost_key_in_node(child, + znode_get_ld_key(child)); + write_unlock_dk(znode_get_tree(child)); + op->u.insert.d->data->arg = op->u.insert.brother; + } else { + assert("vs-243", op->u.insert.d->coord != NULL); + op->u.insert.d->coord->node = reiser4_carry_real(op->node); + } + + /* find free space. */ + return make_space(op, doing, todo); +} + +/* handle carry COP_INSERT operation. + + Insert new item into node. New item can be given in one of two ways: + + - by passing &tree_coord and &reiser4_item_data as part of @op. This is + only applicable at the leaf/twig level. + + - by passing a child node pointer to which is to be inserted by this + operation. + +*/ +static int carry_insert(carry_op * op /* operation to perform */ , + carry_level * doing /* queue of operations @op + * is part of */ , + carry_level * todo /* queue where new operations + * are accumulated */ ) +{ + znode *node; + carry_insert_data cdata; + coord_t coord; + reiser4_item_data data; + carry_plugin_info info; + int result; + + assert("nikita-1036", op != NULL); + assert("nikita-1037", todo != NULL); + assert("nikita-1038", op->op == COP_INSERT); + + coord_init_zero(&coord); + + /* perform common functionality of insert and paste. */ + result = insert_paste_common(op, doing, todo, &cdata, &coord, &data); + if (result != 0) + return result; + + node = op->u.insert.d->coord->node; + assert("nikita-1039", node != NULL); + assert("nikita-1040", node_plugin_by_node(node) != NULL); + + assert("nikita-949", + space_needed_for_op(node, op) <= znode_free_space(node)); + + /* ask node layout to create new item. */ + info.doing = doing; + info.todo = todo; + result = node_plugin_by_node(node)->create_item + (op->u.insert.d->coord, op->u.insert.d->key, op->u.insert.d->data, + &info); + doing->restartable = 0; + znode_make_dirty(node); + + return result; +} + +/* + * Flow insertion code. COP_INSERT_FLOW is special tree operation that is + * supplied with a "flow" (that is, a stream of data) and inserts it into tree + * by slicing into multiple items. + */ + +#define flow_insert_point(op) ((op)->u.insert_flow.insert_point) +#define flow_insert_flow(op) ((op)->u.insert_flow.flow) +#define flow_insert_data(op) ((op)->u.insert_flow.data) + +static size_t item_data_overhead(carry_op * op) +{ + if (flow_insert_data(op)->iplug->b.estimate == NULL) + return 0; + return (flow_insert_data(op)->iplug->b. + estimate(NULL /* estimate insertion */ , flow_insert_data(op)) - + flow_insert_data(op)->length); +} + +/* FIXME-VS: this is called several times during one make_flow_for_insertion + and it will always return the same result. Some optimization could be made + by calculating this value once at the beginning and passing it around. That + would reduce some flexibility in future changes +*/ +static int can_paste(coord_t *, const reiser4_key *, const reiser4_item_data *); +static size_t flow_insertion_overhead(carry_op * op) +{ + znode *node; + size_t insertion_overhead; + + node = flow_insert_point(op)->node; + insertion_overhead = 0; + if (node->nplug->item_overhead && + !can_paste(flow_insert_point(op), &flow_insert_flow(op)->key, + flow_insert_data(op))) + insertion_overhead = + node->nplug->item_overhead(node, NULL) + + item_data_overhead(op); + return insertion_overhead; +} + +/* how many bytes of flow does fit to the node */ +static int what_can_fit_into_node(carry_op * op) +{ + size_t free, overhead; + + overhead = flow_insertion_overhead(op); + free = znode_free_space(flow_insert_point(op)->node); + if (free <= overhead) + return 0; + free -= overhead; + /* FIXME: flow->length is loff_t only to not get overflowed in case of + expandign truncate */ + if (free < op->u.insert_flow.flow->length) + return free; + return (int)op->u.insert_flow.flow->length; +} + +/* in make_space_for_flow_insertion we need to check either whether whole flow + fits into a node or whether minimal fraction of flow fits into a node */ +static int enough_space_for_whole_flow(carry_op * op) +{ + return (unsigned)what_can_fit_into_node(op) == + op->u.insert_flow.flow->length; +} + +#define MIN_FLOW_FRACTION 1 +static int enough_space_for_min_flow_fraction(carry_op * op) +{ + //assert("vs-902", coord_is_after_rightmost(flow_insert_point(op))); + + return what_can_fit_into_node(op) >= MIN_FLOW_FRACTION; +} + +/* this returns 0 if left neighbor was obtained successfully and everything + upto insertion point including it were shifted and left neighbor still has + some free space to put minimal fraction of flow into it */ +static int +make_space_by_shift_left(carry_op * op, carry_level * doing, carry_level * todo) +{ + carry_node *left; + znode *orig; + + left = find_left_neighbor(op, doing); + if (unlikely(IS_ERR(left))) { + warning("vs-899", + "make_space_by_shift_left: " + "error accessing left neighbor: %li", PTR_ERR(left)); + return 1; + } + if (left == NULL) + /* left neighbor either does not exist or is unformatted + node */ + return 1; + + orig = flow_insert_point(op)->node; + /* try to shift content of node @orig from its head upto insert point + including insertion point into the left neighbor */ + carry_shift_data(LEFT_SIDE, flow_insert_point(op), + reiser4_carry_real(left), doing, todo, + 1/* including insert point */); + if (reiser4_carry_real(left) != flow_insert_point(op)->node) { + /* insertion point did not move */ + return 1; + } + + /* insertion point is set after last item in the node */ + assert("vs-900", coord_is_after_rightmost(flow_insert_point(op))); + + if (!enough_space_for_min_flow_fraction(op)) { + /* insertion point node does not have enough free space to put + even minimal portion of flow into it, therefore, move + insertion point back to orig node (before first item) */ + coord_init_before_first_item(flow_insert_point(op), orig); + return 1; + } + + /* part of flow is to be written to the end of node */ + op->node = left; + return 0; +} + +/* this returns 0 if right neighbor was obtained successfully and everything to + the right of insertion point was shifted to it and node got enough free + space to put minimal fraction of flow into it */ +static int +make_space_by_shift_right(carry_op * op, carry_level * doing, + carry_level * todo) +{ + carry_node *right; + + right = find_right_neighbor(op, doing); + if (unlikely(IS_ERR(right))) { + warning("nikita-1065", "shift_right_excluding_insert_point: " + "error accessing right neighbor: %li", PTR_ERR(right)); + return 1; + } + if (right) { + /* shift everything possible on the right of but excluding + insertion coord into the right neighbor */ + carry_shift_data(RIGHT_SIDE, flow_insert_point(op), + reiser4_carry_real(right), doing, todo, + 0/* not including insert point */); + } else { + /* right neighbor either does not exist or is unformatted + node */ + ; + } + if (coord_is_after_rightmost(flow_insert_point(op))) { + if (enough_space_for_min_flow_fraction(op)) { + /* part of flow is to be written to the end of node */ + return 0; + } + } + + /* new node is to be added if insert point node did not get enough + space for whole flow */ + return 1; +} + +/* this returns 0 when insert coord is set at the node end and fraction of flow + fits into that node */ +static int +make_space_by_new_nodes(carry_op * op, carry_level * doing, carry_level * todo) +{ + int result; + znode *node; + carry_node *new; + + node = flow_insert_point(op)->node; + + if (op->u.insert_flow.new_nodes == CARRY_FLOW_NEW_NODES_LIMIT) + return RETERR(-E_NODE_FULL); + /* add new node after insert point node */ + new = add_new_znode(node, op->node, doing, todo); + if (unlikely(IS_ERR(new))) + return PTR_ERR(new); + result = lock_carry_node(doing, new); + zput(reiser4_carry_real(new)); + if (unlikely(result)) + return result; + op->u.insert_flow.new_nodes++; + if (!coord_is_after_rightmost(flow_insert_point(op))) { + carry_shift_data(RIGHT_SIDE, flow_insert_point(op), + reiser4_carry_real(new), doing, todo, + 0/* not including insert point */); + assert("vs-901", + coord_is_after_rightmost(flow_insert_point(op))); + + if (enough_space_for_min_flow_fraction(op)) + return 0; + if (op->u.insert_flow.new_nodes == CARRY_FLOW_NEW_NODES_LIMIT) + return RETERR(-E_NODE_FULL); + + /* add one more new node */ + new = add_new_znode(node, op->node, doing, todo); + if (unlikely(IS_ERR(new))) + return PTR_ERR(new); + result = lock_carry_node(doing, new); + zput(reiser4_carry_real(new)); + if (unlikely(result)) + return result; + op->u.insert_flow.new_nodes++; + } + + /* move insertion point to new node */ + coord_init_before_first_item(flow_insert_point(op), + reiser4_carry_real(new)); + op->node = new; + return 0; +} + +static int +make_space_for_flow_insertion(carry_op * op, carry_level * doing, + carry_level * todo) +{ + __u32 flags = op->u.insert_flow.flags; + + if (enough_space_for_whole_flow(op)) { + /* whole flow fits into insert point node */ + return 0; + } + if ((flags & COPI_SWEEP) && + enough_space_for_min_flow_fraction(op)) + /* use the rest of space in the current node */ + return 0; + + if (!(flags & COPI_DONT_SHIFT_LEFT) + && (make_space_by_shift_left(op, doing, todo) == 0)) { + /* insert point is shifted to left neighbor of original insert + point node and is set after last unit in that node. It has + enough space to fit at least minimal fraction of flow. */ + return 0; + } + + if (enough_space_for_whole_flow(op)) { + /* whole flow fits into insert point node */ + return 0; + } + + if (!(flags & COPI_DONT_SHIFT_RIGHT) + && (make_space_by_shift_right(op, doing, todo) == 0)) { + /* insert point is still set to the same node, but there is + nothing to the right of insert point. */ + return 0; + } + + if (enough_space_for_whole_flow(op)) { + /* whole flow fits into insert point node */ + return 0; + } + + return make_space_by_new_nodes(op, doing, todo); +} + +/* implements COP_INSERT_FLOW operation */ +static int +carry_insert_flow(carry_op * op, carry_level * doing, carry_level * todo) +{ + int result; + flow_t *f; + coord_t *insert_point; + node_plugin *nplug; + carry_plugin_info info; + znode *orig_node; + lock_handle *orig_lh; + + f = op->u.insert_flow.flow; + result = 0; + + /* carry system needs this to work */ + info.doing = doing; + info.todo = todo; + + orig_node = flow_insert_point(op)->node; + orig_lh = doing->tracked; + + while (f->length) { + result = make_space_for_flow_insertion(op, doing, todo); + if (result) + break; + + insert_point = flow_insert_point(op); + nplug = node_plugin_by_node(insert_point->node); + + /* compose item data for insertion/pasting */ + flow_insert_data(op)->data = f->data; + flow_insert_data(op)->length = what_can_fit_into_node(op); + + if (can_paste(insert_point, &f->key, flow_insert_data(op))) { + /* insert point is set to item of file we are writing to + and we have to append to it */ + assert("vs-903", insert_point->between == AFTER_UNIT); + nplug->change_item_size(insert_point, + flow_insert_data(op)->length); + flow_insert_data(op)->iplug->b.paste(insert_point, + flow_insert_data + (op), &info); + } else { + /* new item must be inserted */ + pos_in_node_t new_pos; + flow_insert_data(op)->length += item_data_overhead(op); + + /* FIXME-VS: this is because node40_create_item changes + insert_point for obscure reasons */ + switch (insert_point->between) { + case AFTER_ITEM: + new_pos = insert_point->item_pos + 1; + break; + case EMPTY_NODE: + new_pos = 0; + break; + case BEFORE_ITEM: + assert("vs-905", insert_point->item_pos == 0); + new_pos = 0; + break; + default: + impossible("vs-906", + "carry_insert_flow: invalid coord"); + new_pos = 0; + break; + } + + nplug->create_item(insert_point, &f->key, + flow_insert_data(op), &info); + coord_set_item_pos(insert_point, new_pos); + } + coord_init_after_item_end(insert_point); + doing->restartable = 0; + znode_make_dirty(insert_point->node); + + move_flow_forward(f, (unsigned)flow_insert_data(op)->length); + } + + if (orig_node != flow_insert_point(op)->node) { + /* move lock to new insert point */ + done_lh(orig_lh); + init_lh(orig_lh); + result = + longterm_lock_znode(orig_lh, flow_insert_point(op)->node, + ZNODE_WRITE_LOCK, ZNODE_LOCK_HIPRI); + } + + return result; +} + +/* implements COP_DELETE operation + + Remove pointer to @op -> u.delete.child from it's parent. + + This function also handles killing of a tree root is last pointer from it + was removed. This is complicated by our handling of "twig" level: root on + twig level is never killed. + +*/ +static int carry_delete(carry_op * op /* operation to be performed */ , + carry_level * doing UNUSED_ARG /* current carry + * level */ , + carry_level * todo/* next carry level */) +{ + int result; + coord_t coord; + coord_t coord2; + znode *parent; + znode *child; + carry_plugin_info info; + reiser4_tree *tree; + + /* + * This operation is called to delete internal item pointing to the + * child node that was removed by carry from the tree on the previous + * tree level. + */ + + assert("nikita-893", op != NULL); + assert("nikita-894", todo != NULL); + assert("nikita-895", op->op == COP_DELETE); + + coord_init_zero(&coord); + coord_init_zero(&coord2); + + parent = reiser4_carry_real(op->node); + child = op->u.delete.child ? + reiser4_carry_real(op->u.delete.child) : op->node->node; + tree = znode_get_tree(child); + read_lock_tree(tree); + + /* + * @parent was determined when carry entered parent level + * (lock_carry_level/lock_carry_node). Since then, actual parent of + * @child node could change due to other carry operations performed on + * the parent level. Check for this. + */ + + if (znode_parent(child) != parent) { + /* NOTE-NIKITA add stat counter for this. */ + parent = znode_parent(child); + assert("nikita-2581", find_carry_node(doing, parent)); + } + read_unlock_tree(tree); + + assert("nikita-1213", znode_get_level(parent) > LEAF_LEVEL); + + /* Twig level horrors: tree should be of height at least 2. So, last + pointer from the root at twig level is preserved even if child is + empty. This is ugly, but so it was architectured. + */ + + if (znode_is_root(parent) && + znode_get_level(parent) <= REISER4_MIN_TREE_HEIGHT && + node_num_items(parent) == 1) { + /* Delimiting key manipulations. */ + write_lock_dk(tree); + znode_set_ld_key(child, znode_set_ld_key(parent, reiser4_min_key())); + znode_set_rd_key(child, znode_set_rd_key(parent, reiser4_max_key())); + ZF_SET(child, JNODE_DKSET); + write_unlock_dk(tree); + + /* @child escaped imminent death! */ + ZF_CLR(child, JNODE_HEARD_BANSHEE); + return 0; + } + + /* convert child pointer to the coord_t */ + result = find_child_ptr(parent, child, &coord); + if (result != NS_FOUND) { + warning("nikita-994", "Cannot find child pointer: %i", result); + print_coord_content("coord", &coord); + return result; + } + + coord_dup(&coord2, &coord); + info.doing = doing; + info.todo = todo; + { + /* + * Actually kill internal item: prepare structure with + * arguments for ->cut_and_kill() method... + */ + + struct carry_kill_data kdata; + kdata.params.from = &coord; + kdata.params.to = &coord2; + kdata.params.from_key = NULL; + kdata.params.to_key = NULL; + kdata.params.smallest_removed = NULL; + kdata.params.truncate = 1; + kdata.flags = op->u.delete.flags; + kdata.inode = NULL; + kdata.left = NULL; + kdata.right = NULL; + kdata.buf = NULL; + /* ... and call it. */ + result = node_plugin_by_node(parent)->cut_and_kill(&kdata, + &info); + } + doing->restartable = 0; + + /* check whether root should be killed violently */ + if (znode_is_root(parent) && + /* don't kill roots at and lower than twig level */ + znode_get_level(parent) > REISER4_MIN_TREE_HEIGHT && + node_num_items(parent) == 1) + result = reiser4_kill_tree_root(coord.node); + + return result < 0 ? result : 0; +} + +/* implements COP_CUT opration + + Cuts part or whole content of node. + +*/ +static int carry_cut(carry_op * op /* operation to be performed */ , + carry_level * doing /* current carry level */ , + carry_level * todo/* next carry level */) +{ + int result; + carry_plugin_info info; + node_plugin *nplug; + + assert("nikita-896", op != NULL); + assert("nikita-897", todo != NULL); + assert("nikita-898", op->op == COP_CUT); + + info.doing = doing; + info.todo = todo; + + nplug = node_plugin_by_node(reiser4_carry_real(op->node)); + if (op->u.cut_or_kill.is_cut) + result = nplug->cut(op->u.cut_or_kill.u.cut, &info); + else + result = nplug->cut_and_kill(op->u.cut_or_kill.u.kill, &info); + + doing->restartable = 0; + return result < 0 ? result : 0; +} + +/* helper function for carry_paste(): returns true if @op can be continued as + paste */ +static int +can_paste(coord_t *icoord, const reiser4_key * key, + const reiser4_item_data * data) +{ + coord_t circa; + item_plugin *new_iplug; + item_plugin *old_iplug; + int result = 0; /* to keep gcc shut */ + + assert("", icoord->between != AT_UNIT); + + /* obviously, one cannot paste when node is empty---there is nothing + to paste into. */ + if (node_is_empty(icoord->node)) + return 0; + /* if insertion point is at the middle of the item, then paste */ + if (!coord_is_between_items(icoord)) + return 1; + coord_dup(&circa, icoord); + circa.between = AT_UNIT; + + old_iplug = item_plugin_by_coord(&circa); + new_iplug = data->iplug; + + /* check whether we can paste to the item @icoord is "at" when we + ignore ->between field */ + if (old_iplug == new_iplug && item_can_contain_key(&circa, key, data)) + result = 1; + else if (icoord->between == BEFORE_UNIT + || icoord->between == BEFORE_ITEM) { + /* otherwise, try to glue to the item at the left, if any */ + coord_dup(&circa, icoord); + if (coord_set_to_left(&circa)) { + result = 0; + coord_init_before_item(icoord); + } else { + old_iplug = item_plugin_by_coord(&circa); + result = (old_iplug == new_iplug) + && item_can_contain_key(icoord, key, data); + if (result) { + coord_dup(icoord, &circa); + icoord->between = AFTER_UNIT; + } + } + } else if (icoord->between == AFTER_UNIT + || icoord->between == AFTER_ITEM) { + coord_dup(&circa, icoord); + /* otherwise, try to glue to the item at the right, if any */ + if (coord_set_to_right(&circa)) { + result = 0; + coord_init_after_item(icoord); + } else { + int (*cck) (const coord_t *, const reiser4_key *, + const reiser4_item_data *); + + old_iplug = item_plugin_by_coord(&circa); + + cck = old_iplug->b.can_contain_key; + if (cck == NULL) + /* item doesn't define ->can_contain_key + method? So it is not expandable. */ + result = 0; + else { + result = (old_iplug == new_iplug) + && cck(&circa /*icoord */ , key, data); + if (result) { + coord_dup(icoord, &circa); + icoord->between = BEFORE_UNIT; + } + } + } + } else + impossible("nikita-2513", "Nothing works"); + if (result) { + if (icoord->between == BEFORE_ITEM) { + assert("vs-912", icoord->unit_pos == 0); + icoord->between = BEFORE_UNIT; + } else if (icoord->between == AFTER_ITEM) { + coord_init_after_item_end(icoord); + } + } + return result; +} + +/* implements COP_PASTE operation + + Paste data into existing item. This is complicated by the fact that after + we shifted something to the left or right neighbors trying to free some + space, item we were supposed to paste into can be in different node than + insertion coord. If so, we are no longer doing paste, but insert. See + comments in insert_paste_common(). + +*/ +static int carry_paste(carry_op * op /* operation to be performed */ , + carry_level * doing UNUSED_ARG /* current carry + * level */ , + carry_level * todo/* next carry level */) +{ + znode *node; + carry_insert_data cdata; + coord_t dcoord; + reiser4_item_data data; + int result; + int real_size; + item_plugin *iplug; + carry_plugin_info info; + coord_t *coord; + + assert("nikita-982", op != NULL); + assert("nikita-983", todo != NULL); + assert("nikita-984", op->op == COP_PASTE); + + coord_init_zero(&dcoord); + + result = insert_paste_common(op, doing, todo, &cdata, &dcoord, &data); + if (result != 0) + return result; + + coord = op->u.insert.d->coord; + + /* handle case when op -> u.insert.coord doesn't point to the item + of required type. restart as insert. */ + if (!can_paste(coord, op->u.insert.d->key, op->u.insert.d->data)) { + op->op = COP_INSERT; + op->u.insert.type = COPT_PASTE_RESTARTED; + result = op_dispatch_table[COP_INSERT].handler(op, doing, todo); + + return result; + } + + node = coord->node; + iplug = item_plugin_by_coord(coord); + assert("nikita-992", iplug != NULL); + + assert("nikita-985", node != NULL); + assert("nikita-986", node_plugin_by_node(node) != NULL); + + assert("nikita-987", + space_needed_for_op(node, op) <= znode_free_space(node)); + + assert("nikita-1286", coord_is_existing_item(coord)); + + /* + * if item is expanded as a result of this operation, we should first + * change item size, than call ->b.paste item method. If item is + * shrunk, it should be done other way around: first call ->b.paste + * method, then reduce item size. + */ + + real_size = space_needed_for_op(node, op); + if (real_size > 0) + node->nplug->change_item_size(coord, real_size); + + doing->restartable = 0; + info.doing = doing; + info.todo = todo; + + result = iplug->b.paste(coord, op->u.insert.d->data, &info); + + if (real_size < 0) + node->nplug->change_item_size(coord, real_size); + + /* if we pasted at the beginning of the item, update item's key. */ + if (coord->unit_pos == 0 && coord->between != AFTER_UNIT) + node->nplug->update_item_key(coord, op->u.insert.d->key, &info); + + znode_make_dirty(node); + return result; +} + +/* handle carry COP_EXTENT operation. */ +static int carry_extent(carry_op * op /* operation to perform */ , + carry_level * doing /* queue of operations @op + * is part of */ , + carry_level * todo /* queue where new operations + * are accumulated */ ) +{ + znode *node; + carry_insert_data cdata; + coord_t coord; + reiser4_item_data data; + carry_op *delete_dummy; + carry_op *insert_extent; + int result; + carry_plugin_info info; + + assert("nikita-1751", op != NULL); + assert("nikita-1752", todo != NULL); + assert("nikita-1753", op->op == COP_EXTENT); + + /* extent insertion overview: + + extents live on the TWIG LEVEL, which is level one above the leaf + one. This complicates extent insertion logic somewhat: it may + happen (and going to happen all the time) that in logical key + ordering extent has to be placed between items I1 and I2, located + at the leaf level, but I1 and I2 are in the same formatted leaf + node N1. To insert extent one has to + + (1) reach node N1 and shift data between N1, its neighbors and + possibly newly allocated nodes until I1 and I2 fall into different + nodes. Since I1 and I2 are still neighboring items in logical key + order, they will be necessary utmost items in their respective + nodes. + + (2) After this new extent item is inserted into node on the twig + level. + + Fortunately this process can reuse almost all code from standard + insertion procedure (viz. make_space() and insert_paste_common()), + due to the following observation: make_space() only shifts data up + to and excluding or including insertion point. It never + "over-moves" through insertion point. Thus, one can use + make_space() to perform step (1). All required for this is just to + instruct free_space_shortage() to keep make_space() shifting data + until insertion point is at the node border. + + */ + + /* perform common functionality of insert and paste. */ + result = insert_paste_common(op, doing, todo, &cdata, &coord, &data); + if (result != 0) + return result; + + node = op->u.extent.d->coord->node; + assert("nikita-1754", node != NULL); + assert("nikita-1755", node_plugin_by_node(node) != NULL); + assert("nikita-1700", coord_wrt(op->u.extent.d->coord) != COORD_INSIDE); + + /* NOTE-NIKITA add some checks here. Not assertions, -EIO. Check that + extent fits between items. */ + + info.doing = doing; + info.todo = todo; + + /* there is another complication due to placement of extents on the + twig level: extents are "rigid" in the sense that key-range + occupied by extent cannot grow indefinitely to the right as it is + for the formatted leaf nodes. Because of this when search finds two + adjacent extents on the twig level, it has to "drill" to the leaf + level, creating new node. Here we are removing this node. + */ + if (node_is_empty(node)) { + delete_dummy = node_post_carry(&info, COP_DELETE, node, 1); + if (IS_ERR(delete_dummy)) + return PTR_ERR(delete_dummy); + delete_dummy->u.delete.child = NULL; + delete_dummy->u.delete.flags = DELETE_RETAIN_EMPTY; + ZF_SET(node, JNODE_HEARD_BANSHEE); + } + + /* proceed with inserting extent item into parent. We are definitely + inserting rather than pasting if we get that far. */ + insert_extent = node_post_carry(&info, COP_INSERT, node, 1); + if (IS_ERR(insert_extent)) + /* @delete_dummy will be automatically destroyed on the level + exiting */ + return PTR_ERR(insert_extent); + /* NOTE-NIKITA insertion by key is simplest option here. Another + possibility is to insert on the left or right of already existing + item. + */ + insert_extent->u.insert.type = COPT_KEY; + insert_extent->u.insert.d = op->u.extent.d; + assert("nikita-1719", op->u.extent.d->key != NULL); + insert_extent->u.insert.d->data->arg = op->u.extent.d->coord; + insert_extent->u.insert.flags = + znode_get_tree(node)->carry.new_extent_flags; + + /* + * if carry was asked to track lock handle we should actually track + * lock handle on the twig node rather than on the leaf where + * operation was started from. Transfer tracked lock handle. + */ + if (doing->track_type) { + assert("nikita-3242", doing->tracked != NULL); + assert("nikita-3244", todo->tracked == NULL); + todo->tracked = doing->tracked; + todo->track_type = CARRY_TRACK_NODE; + doing->tracked = NULL; + doing->track_type = 0; + } + + return 0; +} + +/* update key in @parent between pointers to @left and @right. + + Find coords of @left and @right and update delimiting key between them. + This is helper function called by carry_update(). Finds position of + internal item involved. Updates item key. Updates delimiting keys of child + nodes involved. +*/ +static int update_delimiting_key(znode * parent /* node key is updated + * in */ , + znode * left /* child of @parent */ , + znode * right /* child of @parent */ , + carry_level * doing /* current carry + * level */ , + carry_level * todo /* parent carry + * level */ , + const char **error_msg /* place to + * store error + * message */ ) +{ + coord_t left_pos; + coord_t right_pos; + int result; + reiser4_key ldkey; + carry_plugin_info info; + + assert("nikita-1177", right != NULL); + /* find position of right left child in a parent */ + result = find_child_ptr(parent, right, &right_pos); + if (result != NS_FOUND) { + *error_msg = "Cannot find position of right child"; + return result; + } + + if ((left != NULL) && !coord_is_leftmost_unit(&right_pos)) { + /* find position of the left child in a parent */ + result = find_child_ptr(parent, left, &left_pos); + if (result != NS_FOUND) { + *error_msg = "Cannot find position of left child"; + return result; + } + assert("nikita-1355", left_pos.node != NULL); + } else + left_pos.node = NULL; + + /* check that they are separated by exactly one key and are basically + sane */ + if (REISER4_DEBUG) { + if ((left_pos.node != NULL) + && !coord_is_existing_unit(&left_pos)) { + *error_msg = "Left child is bastard"; + return RETERR(-EIO); + } + if (!coord_is_existing_unit(&right_pos)) { + *error_msg = "Right child is bastard"; + return RETERR(-EIO); + } + if (left_pos.node != NULL && + !coord_are_neighbors(&left_pos, &right_pos)) { + *error_msg = "Children are not direct siblings"; + return RETERR(-EIO); + } + } + *error_msg = NULL; + + info.doing = doing; + info.todo = todo; + + /* + * If child node is not empty, new key of internal item is a key of + * leftmost item in the child node. If the child is empty, take its + * right delimiting key as a new key of the internal item. Precise key + * in the latter case is not important per se, because the child (and + * the internal item) are going to be killed shortly anyway, but we + * have to preserve correct order of keys in the parent node. + */ + + if (!ZF_ISSET(right, JNODE_HEARD_BANSHEE)) + leftmost_key_in_node(right, &ldkey); + else { + read_lock_dk(znode_get_tree(parent)); + ldkey = *znode_get_rd_key(right); + read_unlock_dk(znode_get_tree(parent)); + } + node_plugin_by_node(parent)->update_item_key(&right_pos, &ldkey, &info); + doing->restartable = 0; + znode_make_dirty(parent); + return 0; +} + +/* implements COP_UPDATE opration + + Update delimiting keys. + +*/ +static int carry_update(carry_op * op /* operation to be performed */ , + carry_level * doing /* current carry level */ , + carry_level * todo/* next carry level */) +{ + int result; + carry_node *missing UNUSED_ARG; + znode *left; + znode *right; + carry_node *lchild; + carry_node *rchild; + const char *error_msg; + reiser4_tree *tree; + + /* + * This operation is called to update key of internal item. This is + * necessary when carry shifted of cut data on the child + * level. Arguments of this operation are: + * + * @right --- child node. Operation should update key of internal + * item pointing to @right. + * + * @left --- left neighbor of @right. This parameter is optional. + */ + + assert("nikita-902", op != NULL); + assert("nikita-903", todo != NULL); + assert("nikita-904", op->op == COP_UPDATE); + + lchild = op->u.update.left; + rchild = op->node; + + if (lchild != NULL) { + assert("nikita-1001", lchild->parent); + assert("nikita-1003", !lchild->left); + left = reiser4_carry_real(lchild); + } else + left = NULL; + + tree = znode_get_tree(rchild->node); + read_lock_tree(tree); + right = znode_parent(rchild->node); + read_unlock_tree(tree); + + if (right != NULL) { + result = update_delimiting_key(right, + lchild ? lchild->node : NULL, + rchild->node, + doing, todo, &error_msg); + } else { + error_msg = "Cannot find node to update key in"; + result = RETERR(-EIO); + } + /* operation will be reposted to the next level by the + ->update_item_key() method of node plugin, if necessary. */ + + if (result != 0) { + warning("nikita-999", "Error updating delimiting key: %s (%i)", + error_msg ? : "", result); + } + return result; +} + +/* move items from @node during carry */ +static int carry_shift_data(sideof side /* in what direction to move data */ , + coord_t *insert_coord /* coord where new item + * is to be inserted */, + znode * node /* node which data are moved from */ , + carry_level * doing /* active carry queue */ , + carry_level * todo /* carry queue where new + * operations are to be put + * in */ , + unsigned int including_insert_coord_p + /* true if @insertion_coord can be moved */ ) +{ + int result; + znode *source; + carry_plugin_info info; + node_plugin *nplug; + + source = insert_coord->node; + + info.doing = doing; + info.todo = todo; + + nplug = node_plugin_by_node(node); + result = nplug->shift(insert_coord, node, + (side == LEFT_SIDE) ? SHIFT_LEFT : SHIFT_RIGHT, 0, + (int)including_insert_coord_p, &info); + /* the only error ->shift() method of node plugin can return is + -ENOMEM due to carry node/operation allocation. */ + assert("nikita-915", result >= 0 || result == -ENOMEM); + if (result > 0) { + /* + * if some number of bytes was actually shifted, mark nodes + * dirty, and carry level as non-restartable. + */ + doing->restartable = 0; + znode_make_dirty(source); + znode_make_dirty(node); + } + + assert("nikita-2077", coord_check(insert_coord)); + return 0; +} + +typedef carry_node *(*carry_iterator) (carry_node * node); +static carry_node *find_dir_carry(carry_node * node, carry_level * level, + carry_iterator iterator); + +static carry_node *pool_level_list_prev(carry_node *node) +{ + return list_entry(node->header.level_linkage.prev, carry_node, header.level_linkage); +} + +/* look for the left neighbor of given carry node in a carry queue. + + This is used by find_left_neighbor(), but I am not sure that this + really gives any advantage. More statistics required. + +*/ +carry_node *find_left_carry(carry_node * node /* node to find left neighbor + * of */ , + carry_level * level/* level to scan */) +{ + return find_dir_carry(node, level, + (carry_iterator) pool_level_list_prev); +} + +static carry_node *pool_level_list_next(carry_node *node) +{ + return list_entry(node->header.level_linkage.next, carry_node, header.level_linkage); +} + +/* look for the right neighbor of given carry node in a + carry queue. + + This is used by find_right_neighbor(), but I am not sure that this + really gives any advantage. More statistics required. + +*/ +carry_node *find_right_carry(carry_node * node /* node to find right neighbor + * of */ , + carry_level * level/* level to scan */) +{ + return find_dir_carry(node, level, + (carry_iterator) pool_level_list_next); +} + +/* look for the left or right neighbor of given carry node in a carry + queue. + + Helper function used by find_{left|right}_carry(). +*/ +static carry_node *find_dir_carry(carry_node * node /* node to start + * scanning from */ , + carry_level * level /* level to scan */ , + carry_iterator iterator /* operation to + * move to the + * next node */) +{ + carry_node *neighbor; + + assert("nikita-1059", node != NULL); + assert("nikita-1060", level != NULL); + + /* scan list of carry nodes on this list dir-ward, skipping all + carry nodes referencing the same znode. */ + neighbor = node; + while (1) { + neighbor = iterator(neighbor); + if (carry_node_end(level, neighbor)) + /* list head is reached */ + return NULL; + if (reiser4_carry_real(neighbor) != reiser4_carry_real(node)) + return neighbor; + } +} + +/* + * Memory reservation estimation. + * + * Carry process proceeds through tree levels upwards. Carry assumes that it + * takes tree in consistent state (e.g., that search tree invariants hold), + * and leaves tree consistent after it finishes. This means that when some + * error occurs carry cannot simply return if there are pending carry + * operations. Generic solution for this problem is carry-undo either as + * transaction manager feature (requiring checkpoints and isolation), or + * through some carry specific mechanism. + * + * Our current approach is to panic if carry hits an error while tree is + * inconsistent. Unfortunately -ENOMEM can easily be triggered. To work around + * this "memory reservation" mechanism was added. + * + * Memory reservation is implemented by perthread-pages.diff patch from + * core-patches. Its API is defined in <linux/gfp.h> + * + * int perthread_pages_reserve(int nrpages, gfp_t gfp); + * void perthread_pages_release(int nrpages); + * int perthread_pages_count(void); + * + * carry estimates its worst case memory requirements at the entry, reserved + * enough memory, and released unused pages before returning. + * + * Code below estimates worst case memory requirements for a given carry + * queue. This is dome by summing worst case memory requirements for each + * operation in the queue. + * + */ + +/* + * Memory memory requirements of many operations depends on the tree + * height. For example, item insertion requires new node to be inserted at + * each tree level in the worst case. What tree height should be used for + * estimation? Current tree height is wrong, because tree height can change + * between the time when estimation was done and the time when operation is + * actually performed. Maximal possible tree height (REISER4_MAX_ZTREE_HEIGHT) + * is also not desirable, because it would lead to the huge over-estimation + * all the time. Plausible solution is "capped tree height": if current tree + * height is less than some TREE_HEIGHT_CAP constant, capped tree height is + * TREE_HEIGHT_CAP, otherwise it's current tree height. Idea behind this is + * that if tree height is TREE_HEIGHT_CAP or larger, it's extremely unlikely + * to be increased even more during short interval of time. + */ +#define TREE_HEIGHT_CAP (5) + +/* return capped tree height for the @tree. See comment above. */ +static int cap_tree_height(reiser4_tree * tree) +{ + return max_t(int, tree->height, TREE_HEIGHT_CAP); +} + +/* return capped tree height for the current tree. */ +static int capped_height(void) +{ + return cap_tree_height(current_tree); +} + +/* return number of pages required to store given number of bytes */ +static int bytes_to_pages(int bytes) +{ + return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT; +} + +/* how many pages are required to allocate znodes during item insertion. */ +static int carry_estimate_znodes(void) +{ + /* + * Note, that there we have some problem here: there is no way to + * reserve pages specifically for the given slab. This means that + * these pages can be hijacked for some other end. + */ + + /* in the worst case we need 3 new znode on each tree level */ + return bytes_to_pages(capped_height() * sizeof(znode) * 3); +} + +/* + * how many pages are required to load bitmaps. One bitmap per level. + */ +static int carry_estimate_bitmaps(void) +{ + if (reiser4_is_set(reiser4_get_current_sb(), REISER4_DONT_LOAD_BITMAP)) { + int bytes; + + bytes = capped_height() * (0 + /* bnode should be added, but + * it is private to bitmap.c, + * skip for now. */ + 2 * sizeof(jnode)); + /* working and commit jnodes */ + return bytes_to_pages(bytes) + 2; /* and their contents */ + } else + /* bitmaps were pre-loaded during mount */ + return 0; +} + +/* worst case item insertion memory requirements */ +static int carry_estimate_insert(carry_op * op, carry_level * level) +{ + return carry_estimate_bitmaps() + carry_estimate_znodes() + 1 + + /* new atom */ + capped_height() + /* new block on each level */ + 1 + /* and possibly extra new block at the leaf level */ + 3; /* loading of leaves into memory */ +} + +/* worst case item deletion memory requirements */ +static int carry_estimate_delete(carry_op * op, carry_level * level) +{ + return carry_estimate_bitmaps() + carry_estimate_znodes() + 1 + + /* new atom */ + 3; /* loading of leaves into memory */ +} + +/* worst case tree cut memory requirements */ +static int carry_estimate_cut(carry_op * op, carry_level * level) +{ + return carry_estimate_bitmaps() + carry_estimate_znodes() + 1 + + /* new atom */ + 3; /* loading of leaves into memory */ +} + +/* worst case memory requirements of pasting into item */ +static int carry_estimate_paste(carry_op * op, carry_level * level) +{ + return carry_estimate_bitmaps() + carry_estimate_znodes() + 1 + + /* new atom */ + capped_height() + /* new block on each level */ + 1 + /* and possibly extra new block at the leaf level */ + 3; /* loading of leaves into memory */ +} + +/* worst case memory requirements of extent insertion */ +static int carry_estimate_extent(carry_op * op, carry_level * level) +{ + return carry_estimate_insert(op, level) + /* insert extent */ + carry_estimate_delete(op, level); /* kill leaf */ +} + +/* worst case memory requirements of key update */ +static int carry_estimate_update(carry_op * op, carry_level * level) +{ + return 0; +} + +/* worst case memory requirements of flow insertion */ +static int carry_estimate_insert_flow(carry_op * op, carry_level * level) +{ + int newnodes; + + newnodes = min(bytes_to_pages(op->u.insert_flow.flow->length), + CARRY_FLOW_NEW_NODES_LIMIT); + /* + * roughly estimate insert_flow as a sequence of insertions. + */ + return newnodes * carry_estimate_insert(op, level); +} + +/* This is dispatch table for carry operations. It can be trivially + abstracted into useful plugin: tunable balancing policy is a good + thing. */ +carry_op_handler op_dispatch_table[COP_LAST_OP] = { + [COP_INSERT] = { + .handler = carry_insert, + .estimate = carry_estimate_insert} + , + [COP_DELETE] = { + .handler = carry_delete, + .estimate = carry_estimate_delete} + , + [COP_CUT] = { + .handler = carry_cut, + .estimate = carry_estimate_cut} + , + [COP_PASTE] = { + .handler = carry_paste, + .estimate = carry_estimate_paste} + , + [COP_EXTENT] = { + .handler = carry_extent, + .estimate = carry_estimate_extent} + , + [COP_UPDATE] = { + .handler = carry_update, + .estimate = carry_estimate_update} + , + [COP_INSERT_FLOW] = { + .handler = carry_insert_flow, + .estimate = carry_estimate_insert_flow} +}; + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/carry_ops.h b/fs/reiser4/carry_ops.h new file mode 100644 index 000000000000..bda0e5c90eec --- /dev/null +++ b/fs/reiser4/carry_ops.h @@ -0,0 +1,43 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +/* implementation of carry operations. See carry_ops.c for details. */ + +#if !defined(__CARRY_OPS_H__) +#define __CARRY_OPS_H__ + +#include "forward.h" +#include "znode.h" +#include "carry.h" + +/* carry operation handlers */ +typedef struct carry_op_handler { + /* perform operation */ + int (*handler) (carry_op * op, carry_level * doing, carry_level * todo); + /* estimate memory requirements for @op */ + int (*estimate) (carry_op * op, carry_level * level); +} carry_op_handler; + +/* This is dispatch table for carry operations. It can be trivially + abstracted into useful plugin: tunable balancing policy is a good + thing. */ +extern carry_op_handler op_dispatch_table[COP_LAST_OP]; + +unsigned int space_needed(const znode * node, const coord_t *coord, + const reiser4_item_data * data, int inserting); +extern carry_node *find_left_carry(carry_node * node, carry_level * level); +extern carry_node *find_right_carry(carry_node * node, carry_level * level); + +/* __CARRY_OPS_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/checksum.c b/fs/reiser4/checksum.c new file mode 100644 index 000000000000..2a35f4260db7 --- /dev/null +++ b/fs/reiser4/checksum.c @@ -0,0 +1,33 @@ +#include <linux/err.h> +#include "debug.h" +#include "checksum.h" + +int reiser4_init_csum_tfm(struct crypto_shash **tfm) +{ + struct crypto_shash *new_tfm; + + new_tfm = crypto_alloc_shash("crc32c", 0, 0); + if (IS_ERR(new_tfm)) { + warning("intelfx-81", "Could not load crc32c driver"); + return PTR_ERR(new_tfm); + } + + *tfm = new_tfm; + return 0; +} + +void reiser4_done_csum_tfm(struct crypto_shash *tfm) +{ + crypto_free_shash(tfm); +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/checksum.h b/fs/reiser4/checksum.h new file mode 100644 index 000000000000..ff6812f759ad --- /dev/null +++ b/fs/reiser4/checksum.h @@ -0,0 +1,39 @@ +#ifndef __CHECKSUM__ +#define __CHECKSUM__ + +#include <crypto/hash.h> + +int reiser4_init_csum_tfm(struct crypto_shash **tfm); +void reiser4_done_csum_tfm(struct crypto_shash *tfm); +u32 static inline reiser4_crc32c(struct crypto_shash *tfm, + u32 crc, const void *address, + unsigned int length) +{ + struct { + struct shash_desc shash; + char ctx[4]; + } desc; + int err; + + desc.shash.tfm = tfm; + desc.shash.flags = 0; + *(u32 *)desc.ctx = crc; + + err = crypto_shash_update(&desc.shash, address, length); + BUG_ON(err); + return *(u32 *)desc.ctx; +} + +#endif /* __CHECKSUM__ */ + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ + diff --git a/fs/reiser4/context.c b/fs/reiser4/context.c new file mode 100644 index 000000000000..40fa203e81c9 --- /dev/null +++ b/fs/reiser4/context.c @@ -0,0 +1,288 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Manipulation of reiser4_context */ + +/* + * global context used during system call. Variable of this type is allocated + * on the stack at the beginning of the reiser4 part of the system call and + * pointer to it is stored in the current->fs_context. This allows us to avoid + * passing pointer to current transaction and current lockstack (both in + * one-to-one mapping with threads) all over the call chain. + * + * It's kind of like those global variables the prof used to tell you not to + * use in CS1, except thread specific.;-) Nikita, this was a good idea. + * + * In some situations it is desirable to have ability to enter reiser4_context + * more than once for the same thread (nested contexts). For example, there + * are some functions that can be called either directly from VFS/VM or from + * already active reiser4 context (->writepage, for example). + * + * In such situations "child" context acts like dummy: all activity is + * actually performed in the top level context, and get_current_context() + * always returns top level context. + * Of course, reiser4_init_context()/reiser4_done_context() have to be properly + * nested any way. + * + * Note that there is an important difference between reiser4 uses + * ->fs_context and the way other file systems use it. Other file systems + * (ext3 and reiserfs) use ->fs_context only for the duration of _transaction_ + * (this is why ->fs_context was initially called ->journal_info). This means, + * that when ext3 or reiserfs finds that ->fs_context is not NULL on the entry + * to the file system, they assume that some transaction is already underway, + * and usually bail out, because starting nested transaction would most likely + * lead to the deadlock. This gives false positives with reiser4, because we + * set ->fs_context before starting transaction. + */ + +#include "debug.h" +#include "super.h" +#include "context.h" +#include "vfs_ops.h" /* for reiser4_throttle_write() */ + +#include <linux/writeback.h> /* for current_is_pdflush() */ +#include <linux/hardirq.h> + +static void _reiser4_init_context(reiser4_context * context, + struct super_block *super) +{ + memset(context, 0, sizeof(*context)); + + context->super = super; + context->magic = context_magic; + context->outer = current->journal_info; + current->journal_info = (void *)context; + context->nr_children = 0; + context->gfp_mask = GFP_KERNEL; + + init_lock_stack(&context->stack); + + reiser4_txn_begin(context); + + /* initialize head of tap list */ + INIT_LIST_HEAD(&context->taps); +#if REISER4_DEBUG + context->task = current; +#endif + grab_space_enable(); +} + +/* initialize context and bind it to the current thread + + This function should be called at the beginning of reiser4 part of + syscall. +*/ +reiser4_context * reiser4_init_context(struct super_block *super) +{ + reiser4_context *context; + + assert("nikita-2662", !in_interrupt() && !in_irq()); + assert("nikita-3357", super != NULL); + assert("nikita-3358", super->s_op == NULL || is_reiser4_super(super)); + + context = get_current_context_check(); + if (context && context->super == super) { + context = (reiser4_context *) current->journal_info; + context->nr_children++; + return context; + } + + context = kmalloc(sizeof(*context), GFP_KERNEL); + if (context == NULL) + return ERR_PTR(RETERR(-ENOMEM)); + + _reiser4_init_context(context, super); + return context; +} + +/* this is used in scan_mgr which is called with spinlock held and in + reiser4_fill_super magic */ +void init_stack_context(reiser4_context *context, struct super_block *super) +{ + assert("nikita-2662", !in_interrupt() && !in_irq()); + assert("nikita-3357", super != NULL); + assert("nikita-3358", super->s_op == NULL || is_reiser4_super(super)); + assert("vs-12", !is_in_reiser4_context()); + + _reiser4_init_context(context, super); + context->on_stack = 1; + return; +} + +/* cast lock stack embedded into reiser4 context up to its container */ +reiser4_context *get_context_by_lock_stack(lock_stack * owner) +{ + return container_of(owner, reiser4_context, stack); +} + +/* true if there is already _any_ reiser4 context for the current thread */ +int is_in_reiser4_context(void) +{ + reiser4_context *ctx; + + ctx = current->journal_info; + return ctx != NULL && ((unsigned long)ctx->magic) == context_magic; +} + +/* + * call balance dirty pages for the current context. + * + * File system is expected to call balance_dirty_pages_ratelimited() whenever + * it dirties a page. reiser4 does this for unformatted nodes (that is, during + * write---this covers vast majority of all dirty traffic), but we cannot do + * this immediately when formatted node is dirtied, because long term lock is + * usually held at that time. To work around this, dirtying of formatted node + * simply increases ->nr_marked_dirty counter in the current reiser4 + * context. When we are about to leave this context, + * balance_dirty_pages_ratelimited() is called, if necessary. + * + * This introduces another problem: sometimes we do not want to run + * balance_dirty_pages_ratelimited() when leaving a context, for example + * because some important lock (like ->i_mutex on the parent directory) is + * held. To achieve this, ->nobalance flag can be set in the current context. + */ +static void reiser4_throttle_write_at(reiser4_context *context) +{ + reiser4_super_info_data *sbinfo = get_super_private(context->super); + + /* + * call balance_dirty_pages_ratelimited() to process formatted nodes + * dirtied during this system call. Do that only if we are not in mount + * and there were nodes dirtied in this context and we are not in + * writepage (to avoid deadlock) and not in pdflush + */ + if (sbinfo != NULL && sbinfo->fake != NULL && + context->nr_marked_dirty != 0 && + !(current->flags & PF_MEMALLOC) && + !current_is_flush_bd_task()) + reiser4_throttle_write(sbinfo->fake); +} + +/* release resources associated with context. + + This function should be called at the end of "session" with reiser4, + typically just before leaving reiser4 driver back to VFS. + + This is good place to put some degugging consistency checks, like that + thread released all locks and closed transcrash etc. + +*/ +static void reiser4_done_context(reiser4_context * context) + /* context being released */ +{ + assert("nikita-860", context != NULL); + assert("nikita-859", context->magic == context_magic); + assert("vs-646", (reiser4_context *) current->journal_info == context); + assert("zam-686", !in_interrupt() && !in_irq()); + + /* only do anything when leaving top-level reiser4 context. All nested + * contexts are just dummies. */ + if (context->nr_children == 0) { + assert("jmacd-673", context->trans == NULL); + assert("jmacd-1002", lock_stack_isclean(&context->stack)); + assert("nikita-1936", reiser4_no_counters_are_held()); + assert("nikita-2626", list_empty_careful(reiser4_taps_list())); + assert("zam-1004", ergo(get_super_private(context->super), + get_super_private(context->super)->delete_mutex_owner != + current)); + + /* release all grabbed but as yet unused blocks */ + if (context->grabbed_blocks != 0) + all_grabbed2free(); + + /* + * synchronize against longterm_unlock_znode(): + * wake_up_requestor() wakes up requestors without holding + * zlock (otherwise they will immediately bump into that lock + * after wake up on another CPU). To work around (rare) + * situation where requestor has been woken up asynchronously + * and managed to run until completion (and destroy its + * context and lock stack) before wake_up_requestor() called + * wake_up() on it, wake_up_requestor() synchronize on lock + * stack spin lock. It has actually been observed that spin + * lock _was_ locked at this point, because + * wake_up_requestor() took interrupt. + */ + spin_lock_stack(&context->stack); + spin_unlock_stack(&context->stack); + + assert("zam-684", context->nr_children == 0); + /* restore original ->fs_context value */ + current->journal_info = context->outer; + if (context->on_stack == 0) + kfree(context); + } else { + context->nr_children--; +#if REISER4_DEBUG + assert("zam-685", context->nr_children >= 0); +#endif + } +} + +/* + * exit reiser4 context. Call balance_dirty_pages_at() if necessary. Close + * transaction. Call done_context() to do context related book-keeping. + */ +void reiser4_exit_context(reiser4_context * context) +{ + assert("nikita-3021", reiser4_schedulable()); + + if (context->nr_children == 0) { + if (!context->nobalance) + reiser4_throttle_write_at(context); + + /* if filesystem is mounted with -o sync or -o dirsync - commit + transaction. FIXME: TXNH_DONT_COMMIT is used to avoid + commiting on exit_context when inode semaphore is held and + to have ktxnmgrd to do commit instead to get better + concurrent filesystem accesses. But, when one mounts with -o + sync, he cares more about reliability than about + performance. So, for now we have this simple mount -o sync + support. */ + if (context->super->s_flags & (MS_SYNCHRONOUS | MS_DIRSYNC)) { + txn_atom *atom; + + atom = get_current_atom_locked_nocheck(); + if (atom) { + atom->flags |= ATOM_FORCE_COMMIT; + context->trans->flags &= ~TXNH_DONT_COMMIT; + spin_unlock_atom(atom); + } + } + reiser4_txn_end(context); + } + reiser4_done_context(context); +} + +void reiser4_ctx_gfp_mask_set(void) +{ + reiser4_context *ctx; + + ctx = get_current_context(); + if (ctx->entd == 0 && + list_empty(&ctx->stack.locks) && + ctx->trans->atom == NULL) + ctx->gfp_mask = GFP_KERNEL; + else + ctx->gfp_mask = GFP_NOFS; +} + +void reiser4_ctx_gfp_mask_force(gfp_t mask) +{ + reiser4_context *ctx; + ctx = get_current_context(); + + assert("edward-1454", ctx != NULL); + + ctx->gfp_mask = mask; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 120 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/context.h b/fs/reiser4/context.h new file mode 100644 index 000000000000..57b6817157a3 --- /dev/null +++ b/fs/reiser4/context.h @@ -0,0 +1,233 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Reiser4 context. See context.c for details. */ + +#if !defined( __REISER4_CONTEXT_H__ ) +#define __REISER4_CONTEXT_H__ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "tap.h" +#include "lock.h" + +#include <linux/types.h> /* for __u?? */ +#include <linux/fs.h> /* for struct super_block */ +#include <linux/spinlock.h> +#include <linux/sched.h> /* for struct task_struct */ + +/* reiser4 per-thread context */ +struct reiser4_context { + /* magic constant. For identification of reiser4 contexts. */ + __u32 magic; + + /* current lock stack. See lock.[ch]. This is where list of all + locks taken by current thread is kept. This is also used in + deadlock detection. */ + lock_stack stack; + + /* current transcrash. */ + txn_handle *trans; + /* transaction handle embedded into reiser4_context. ->trans points + * here by default. */ + txn_handle trans_in_ctx; + + /* super block we are working with. To get the current tree + use &get_super_private (reiser4_get_current_sb ())->tree. */ + struct super_block *super; + + /* parent fs activation */ + struct fs_activation *outer; + + /* per-thread grabbed (for further allocation) blocks counter */ + reiser4_block_nr grabbed_blocks; + + /* list of taps currently monitored. See tap.c */ + struct list_head taps; + + /* grabbing space is enabled */ + unsigned int grab_enabled:1; + /* should be set when we are write dirty nodes to disk in jnode_flush or + * reiser4_write_logs() */ + unsigned int writeout_mode:1; + /* true, if current thread is an ent thread */ + unsigned int entd:1; + /* true, if balance_dirty_pages() should not be run when leaving this + * context. This is used to avoid lengthly balance_dirty_pages() + * operation when holding some important resource, like directory + * ->i_mutex */ + unsigned int nobalance:1; + + /* this bit is used on reiser4_done_context to decide whether context is + kmalloc-ed and has to be kfree-ed */ + unsigned int on_stack:1; + + /* count non-trivial jnode_set_dirty() calls */ + unsigned long nr_marked_dirty; + /* + * reiser4_writeback_inodes calls (via generic_writeback_sb_inodes) + * reiser4_writepages_dispatch for each of dirty inodes. + * Reiser4_writepages_dispatch captures pages. When number of pages + * captured in one reiser4_writeback_inodes reaches some threshold - + * some atoms get flushed + */ + int nr_captured; + int nr_children; /* number of child contexts */ + struct page *locked_page; /* page that should be unlocked in + * reiser4_dirty_inode() before taking + * a longterm lock (to not violate + * reiser4 lock ordering) */ +#if REISER4_DEBUG + /* debugging information about reiser4 locks held by the current + * thread */ + reiser4_lock_cnt_info locks; + struct task_struct *task; /* so we can easily find owner of the stack */ + + /* + * disk space grabbing debugging support + */ + /* how many disk blocks were grabbed by the first call to + * reiser4_grab_space() in this context */ + reiser4_block_nr grabbed_initially; + + /* list of all threads doing flush currently */ + struct list_head flushers_link; + /* information about last error encountered by reiser4 */ + err_site err; +#endif + void *vp; + gfp_t gfp_mask; +}; + +extern reiser4_context *get_context_by_lock_stack(lock_stack *); + +/* Debugging helps. */ +#if REISER4_DEBUG +extern void print_contexts(void); +#endif + +#define current_tree (&(get_super_private(reiser4_get_current_sb())->tree)) +#define current_blocksize reiser4_get_current_sb()->s_blocksize +#define current_blocksize_bits reiser4_get_current_sb()->s_blocksize_bits + +extern reiser4_context *reiser4_init_context(struct super_block *); +extern void init_stack_context(reiser4_context *, struct super_block *); +extern void reiser4_exit_context(reiser4_context *); + +/* magic constant we store in reiser4_context allocated at the stack. Used to + catch accesses to staled or uninitialized contexts. */ +#define context_magic ((__u32) 0x4b1b5d0b) + +extern int is_in_reiser4_context(void); + +/* + * return reiser4_context for the thread @tsk + */ +static inline reiser4_context *get_context(const struct task_struct *tsk) +{ + assert("vs-1682", + ((reiser4_context *) tsk->journal_info)->magic == context_magic); + return (reiser4_context *) tsk->journal_info; +} + +/* + * return reiser4 context of the current thread, or NULL if there is none. + */ +static inline reiser4_context *get_current_context_check(void) +{ + if (is_in_reiser4_context()) + return get_context(current); + else + return NULL; +} + +static inline reiser4_context *get_current_context(void); /* __attribute__((const)); */ + +/* return context associated with current thread */ +static inline reiser4_context *get_current_context(void) +{ + return get_context(current); +} + +static inline gfp_t reiser4_ctx_gfp_mask_get(void) +{ + reiser4_context *ctx; + + ctx = get_current_context_check(); + return (ctx == NULL) ? GFP_KERNEL : ctx->gfp_mask; +} + +void reiser4_ctx_gfp_mask_set(void); +void reiser4_ctx_gfp_mask_force (gfp_t mask); + +/* + * true if current thread is in the write-out mode. Thread enters write-out + * mode during jnode_flush and reiser4_write_logs(). + */ +static inline int is_writeout_mode(void) +{ + return get_current_context()->writeout_mode; +} + +/* + * enter write-out mode + */ +static inline void writeout_mode_enable(void) +{ + assert("zam-941", !get_current_context()->writeout_mode); + get_current_context()->writeout_mode = 1; +} + +/* + * leave write-out mode + */ +static inline void writeout_mode_disable(void) +{ + assert("zam-942", get_current_context()->writeout_mode); + get_current_context()->writeout_mode = 0; +} + +static inline void grab_space_enable(void) +{ + get_current_context()->grab_enabled = 1; +} + +static inline void grab_space_disable(void) +{ + get_current_context()->grab_enabled = 0; +} + +static inline void grab_space_set_enabled(int enabled) +{ + get_current_context()->grab_enabled = enabled; +} + +static inline int is_grab_enabled(reiser4_context * ctx) +{ + return ctx->grab_enabled; +} + +/* mark transaction handle in @ctx as TXNH_DONT_COMMIT, so that no commit or + * flush would be performed when it is closed. This is necessary when handle + * has to be closed under some coarse semaphore, like i_mutex of + * directory. Commit will be performed by ktxnmgrd. */ +static inline void context_set_commit_async(reiser4_context * context) +{ + context->nobalance = 1; + context->trans->flags |= TXNH_DONT_COMMIT; +} + +/* __REISER4_CONTEXT_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/coord.c b/fs/reiser4/coord.c new file mode 100644 index 000000000000..5c34e0aba68d --- /dev/null +++ b/fs/reiser4/coord.c @@ -0,0 +1,928 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "tree.h" +#include "plugin/item/item.h" +#include "znode.h" +#include "coord.h" + +/* Internal constructor. */ +static inline void +coord_init_values(coord_t *coord, const znode * node, pos_in_node_t item_pos, + pos_in_node_t unit_pos, between_enum between) +{ + coord->node = (znode *) node; + coord_set_item_pos(coord, item_pos); + coord->unit_pos = unit_pos; + coord->between = between; + ON_DEBUG(coord->plug_v = 0); + ON_DEBUG(coord->body_v = 0); + + /*ON_TRACE (TRACE_COORDS, "init coord %p node %p: %u %u %s\n", coord, + node, item_pos, unit_pos, coord_tween_tostring (between)); */ +} + +/* after shifting of node content, coord previously set properly may become + invalid, try to "normalize" it. */ +void coord_normalize(coord_t *coord) +{ + znode *node; + + node = coord->node; + assert("vs-683", node); + + coord_clear_iplug(coord); + + if (node_is_empty(node)) { + coord_init_first_unit(coord, node); + } else if ((coord->between == AFTER_ITEM) + || (coord->between == AFTER_UNIT)) { + return; + } else if (coord->item_pos == coord_num_items(coord) + && coord->between == BEFORE_ITEM) { + coord_dec_item_pos(coord); + coord->between = AFTER_ITEM; + } else if (coord->unit_pos == coord_num_units(coord) + && coord->between == BEFORE_UNIT) { + coord->unit_pos--; + coord->between = AFTER_UNIT; + } else if (coord->item_pos == coord_num_items(coord) + && coord->unit_pos == 0 && coord->between == BEFORE_UNIT) { + coord_dec_item_pos(coord); + coord->unit_pos = 0; + coord->between = AFTER_ITEM; + } +} + +/* Copy a coordinate. */ +void coord_dup(coord_t *coord, const coord_t *old_coord) +{ + assert("jmacd-9800", coord_check(old_coord)); + coord_dup_nocheck(coord, old_coord); +} + +/* Copy a coordinate without check. Useful when old_coord->node is not + loaded. As in cbk_tree_lookup -> connect_znode -> connect_one_side */ +void coord_dup_nocheck(coord_t *coord, const coord_t *old_coord) +{ + coord->node = old_coord->node; + coord_set_item_pos(coord, old_coord->item_pos); + coord->unit_pos = old_coord->unit_pos; + coord->between = old_coord->between; + coord->iplugid = old_coord->iplugid; + ON_DEBUG(coord->plug_v = old_coord->plug_v); + ON_DEBUG(coord->body_v = old_coord->body_v); +} + +/* Initialize an invalid coordinate. */ +void coord_init_invalid(coord_t *coord, const znode * node) +{ + coord_init_values(coord, node, 0, 0, INVALID_COORD); +} + +void coord_init_first_unit_nocheck(coord_t *coord, const znode * node) +{ + coord_init_values(coord, node, 0, 0, AT_UNIT); +} + +/* Initialize a coordinate to point at the first unit of the first item. If the + node is empty, it is positioned at the EMPTY_NODE. */ +void coord_init_first_unit(coord_t *coord, const znode * node) +{ + int is_empty = node_is_empty(node); + + coord_init_values(coord, node, 0, 0, (is_empty ? EMPTY_NODE : AT_UNIT)); + + assert("jmacd-9801", coord_check(coord)); +} + +/* Initialize a coordinate to point at the last unit of the last item. If the + node is empty, it is positioned at the EMPTY_NODE. */ +void coord_init_last_unit(coord_t *coord, const znode * node) +{ + int is_empty = node_is_empty(node); + + coord_init_values(coord, node, + (is_empty ? 0 : node_num_items(node) - 1), 0, + (is_empty ? EMPTY_NODE : AT_UNIT)); + if (!is_empty) + coord->unit_pos = coord_last_unit_pos(coord); + assert("jmacd-9802", coord_check(coord)); +} + +/* Initialize a coordinate to before the first item. If the node is empty, it is + positioned at the EMPTY_NODE. */ +void coord_init_before_first_item(coord_t *coord, const znode * node) +{ + int is_empty = node_is_empty(node); + + coord_init_values(coord, node, 0, 0, + (is_empty ? EMPTY_NODE : BEFORE_UNIT)); + + assert("jmacd-9803", coord_check(coord)); +} + +/* Initialize a coordinate to after the last item. If the node is empty, it is + positioned at the EMPTY_NODE. */ +void coord_init_after_last_item(coord_t *coord, const znode * node) +{ + int is_empty = node_is_empty(node); + + coord_init_values(coord, node, + (is_empty ? 0 : node_num_items(node) - 1), 0, + (is_empty ? EMPTY_NODE : AFTER_ITEM)); + + assert("jmacd-9804", coord_check(coord)); +} + +/* Initialize a coordinate to after last unit in the item. Coord must be set + already to existing item */ +void coord_init_after_item_end(coord_t *coord) +{ + coord->between = AFTER_UNIT; + coord->unit_pos = coord_last_unit_pos(coord); +} + +/* Initialize a coordinate to before the item. Coord must be set already to + existing item */ +void coord_init_before_item(coord_t *coord) +{ + coord->unit_pos = 0; + coord->between = BEFORE_ITEM; +} + +/* Initialize a coordinate to after the item. Coord must be set already to + existing item */ +void coord_init_after_item(coord_t *coord) +{ + coord->unit_pos = 0; + coord->between = AFTER_ITEM; +} + +/* Initialize a coordinate by 0s. Used in places where init_coord was used and + it was not clear how actually */ +void coord_init_zero(coord_t *coord) +{ + memset(coord, 0, sizeof(*coord)); +} + +/* Return the number of units at the present item. + Asserts coord_is_existing_item(). */ +unsigned coord_num_units(const coord_t *coord) +{ + assert("jmacd-9806", coord_is_existing_item(coord)); + + return item_plugin_by_coord(coord)->b.nr_units(coord); +} + +/* Returns true if the coord was initializewd by coord_init_invalid (). */ +/* Audited by: green(2002.06.15) */ +int coord_is_invalid(const coord_t *coord) +{ + return coord->between == INVALID_COORD; +} + +/* Returns true if the coordinate is positioned at an existing item, not before + or after an item. It may be placed at, before, or after any unit within the + item, whether existing or not. */ +int coord_is_existing_item(const coord_t *coord) +{ + switch (coord->between) { + case EMPTY_NODE: + case BEFORE_ITEM: + case AFTER_ITEM: + case INVALID_COORD: + return 0; + + case BEFORE_UNIT: + case AT_UNIT: + case AFTER_UNIT: + return coord->item_pos < coord_num_items(coord); + } + + impossible("jmacd-9900", "unreachable coord: %p", coord); + return 0; +} + +/* Returns true if the coordinate is positioned at an existing unit, not before + or after a unit. */ +/* Audited by: green(2002.06.15) */ +int coord_is_existing_unit(const coord_t *coord) +{ + switch (coord->between) { + case EMPTY_NODE: + case BEFORE_UNIT: + case AFTER_UNIT: + case BEFORE_ITEM: + case AFTER_ITEM: + case INVALID_COORD: + return 0; + + case AT_UNIT: + return (coord->item_pos < coord_num_items(coord) + && coord->unit_pos < coord_num_units(coord)); + } + + impossible("jmacd-9902", "unreachable"); + return 0; +} + +/* Returns true if the coordinate is positioned at the first unit of the first + item. Not true for empty nodes nor coordinates positioned before the first + item. */ +/* Audited by: green(2002.06.15) */ +int coord_is_leftmost_unit(const coord_t *coord) +{ + return (coord->between == AT_UNIT && coord->item_pos == 0 + && coord->unit_pos == 0); +} + +#if REISER4_DEBUG +/* For assertions only, checks for a valid coordinate. */ +int coord_check(const coord_t *coord) +{ + if (coord->node == NULL) + return 0; + if (znode_above_root(coord->node)) + return 1; + + switch (coord->between) { + default: + case INVALID_COORD: + return 0; + case EMPTY_NODE: + if (!node_is_empty(coord->node)) + return 0; + return coord->item_pos == 0 && coord->unit_pos == 0; + + case BEFORE_UNIT: + case AFTER_UNIT: + if (node_is_empty(coord->node) && (coord->item_pos == 0) + && (coord->unit_pos == 0)) + return 1; + case AT_UNIT: + break; + case AFTER_ITEM: + case BEFORE_ITEM: + /* before/after item should not set unit_pos. */ + if (coord->unit_pos != 0) + return 0; + break; + } + + if (coord->item_pos >= node_num_items(coord->node)) + return 0; + + /* FIXME-VS: we are going to check unit_pos. This makes no sense when + between is set either AFTER_ITEM or BEFORE_ITEM */ + if (coord->between == AFTER_ITEM || coord->between == BEFORE_ITEM) + return 1; + + if (coord_is_iplug_set(coord) && + coord->unit_pos > + item_plugin_by_coord(coord)->b.nr_units(coord) - 1) + return 0; + return 1; +} +#endif + +/* Adjust coordinate boundaries based on the number of items prior to + coord_next/prev. Returns 1 if the new position is does not exist. */ +static int coord_adjust_items(coord_t *coord, unsigned items, int is_next) +{ + /* If the node is invalid, leave it. */ + if (coord->between == INVALID_COORD) + return 1; + + /* If the node is empty, set it appropriately. */ + if (items == 0) { + coord->between = EMPTY_NODE; + coord_set_item_pos(coord, 0); + coord->unit_pos = 0; + return 1; + } + + /* If it was empty and it no longer is, set to BEFORE/AFTER_ITEM. */ + if (coord->between == EMPTY_NODE) { + coord->between = (is_next ? BEFORE_ITEM : AFTER_ITEM); + coord_set_item_pos(coord, 0); + coord->unit_pos = 0; + return 0; + } + + /* If the item_pos is out-of-range, set it appropriatly. */ + if (coord->item_pos >= items) { + coord->between = AFTER_ITEM; + coord_set_item_pos(coord, items - 1); + coord->unit_pos = 0; + /* If is_next, return 1 (can't go any further). */ + return is_next; + } + + return 0; +} + +/* Advances the coordinate by one unit to the right. If empty, no change. If + coord_is_rightmost_unit, advances to AFTER THE LAST ITEM. Returns 0 if new + position is an existing unit. */ +int coord_next_unit(coord_t *coord) +{ + unsigned items = coord_num_items(coord); + + if (coord_adjust_items(coord, items, 1) == 1) + return 1; + + switch (coord->between) { + case BEFORE_UNIT: + /* Now it is positioned at the same unit. */ + coord->between = AT_UNIT; + return 0; + + case AFTER_UNIT: + case AT_UNIT: + /* If it was at or after a unit and there are more units in this + item, advance to the next one. */ + if (coord->unit_pos < coord_last_unit_pos(coord)) { + coord->unit_pos += 1; + coord->between = AT_UNIT; + return 0; + } + + /* Otherwise, it is crossing an item boundary and treated as if + it was after the current item. */ + coord->between = AFTER_ITEM; + coord->unit_pos = 0; + /* FALLTHROUGH */ + + case AFTER_ITEM: + /* Check for end-of-node. */ + if (coord->item_pos == items - 1) + return 1; + + coord_inc_item_pos(coord); + coord->unit_pos = 0; + coord->between = AT_UNIT; + return 0; + + case BEFORE_ITEM: + /* The adjust_items checks ensure that we are valid here. */ + coord->unit_pos = 0; + coord->between = AT_UNIT; + return 0; + + case INVALID_COORD: + case EMPTY_NODE: + /* Handled in coord_adjust_items(). */ + break; + } + + impossible("jmacd-9902", "unreachable"); + return 0; +} + +/* Advances the coordinate by one item to the right. If empty, no change. If + coord_is_rightmost_unit, advances to AFTER THE LAST ITEM. Returns 0 if new + position is an existing item. */ +int coord_next_item(coord_t *coord) +{ + unsigned items = coord_num_items(coord); + + if (coord_adjust_items(coord, items, 1) == 1) + return 1; + + switch (coord->between) { + case AFTER_UNIT: + case AT_UNIT: + case BEFORE_UNIT: + case AFTER_ITEM: + /* Check for end-of-node. */ + if (coord->item_pos == items - 1) { + coord->between = AFTER_ITEM; + coord->unit_pos = 0; + coord_clear_iplug(coord); + return 1; + } + + /* Anywhere in an item, go to the next one. */ + coord->between = AT_UNIT; + coord_inc_item_pos(coord); + coord->unit_pos = 0; + return 0; + + case BEFORE_ITEM: + /* The out-of-range check ensures that we are valid here. */ + coord->unit_pos = 0; + coord->between = AT_UNIT; + return 0; + case INVALID_COORD: + case EMPTY_NODE: + /* Handled in coord_adjust_items(). */ + break; + } + + impossible("jmacd-9903", "unreachable"); + return 0; +} + +/* Advances the coordinate by one unit to the left. If empty, no change. If + coord_is_leftmost_unit, advances to BEFORE THE FIRST ITEM. Returns 0 if new + position is an existing unit. */ +int coord_prev_unit(coord_t *coord) +{ + unsigned items = coord_num_items(coord); + + if (coord_adjust_items(coord, items, 0) == 1) + return 1; + + switch (coord->between) { + case AT_UNIT: + case BEFORE_UNIT: + if (coord->unit_pos > 0) { + coord->unit_pos -= 1; + coord->between = AT_UNIT; + return 0; + } + + if (coord->item_pos == 0) { + coord->between = BEFORE_ITEM; + return 1; + } + + coord_dec_item_pos(coord); + coord->unit_pos = coord_last_unit_pos(coord); + coord->between = AT_UNIT; + return 0; + + case AFTER_UNIT: + /* What if unit_pos is out-of-range? */ + assert("jmacd-5442", + coord->unit_pos <= coord_last_unit_pos(coord)); + coord->between = AT_UNIT; + return 0; + + case BEFORE_ITEM: + if (coord->item_pos == 0) + return 1; + + coord_dec_item_pos(coord); + /* FALLTHROUGH */ + + case AFTER_ITEM: + coord->between = AT_UNIT; + coord->unit_pos = coord_last_unit_pos(coord); + return 0; + + case INVALID_COORD: + case EMPTY_NODE: + break; + } + + impossible("jmacd-9904", "unreachable"); + return 0; +} + +/* Advances the coordinate by one item to the left. If empty, no change. If + coord_is_leftmost_unit, advances to BEFORE THE FIRST ITEM. Returns 0 if new + position is an existing item. */ +int coord_prev_item(coord_t *coord) +{ + unsigned items = coord_num_items(coord); + + if (coord_adjust_items(coord, items, 0) == 1) + return 1; + + switch (coord->between) { + case AT_UNIT: + case AFTER_UNIT: + case BEFORE_UNIT: + case BEFORE_ITEM: + + if (coord->item_pos == 0) { + coord->between = BEFORE_ITEM; + coord->unit_pos = 0; + return 1; + } + + coord_dec_item_pos(coord); + coord->unit_pos = 0; + coord->between = AT_UNIT; + return 0; + + case AFTER_ITEM: + coord->between = AT_UNIT; + coord->unit_pos = 0; + return 0; + + case INVALID_COORD: + case EMPTY_NODE: + break; + } + + impossible("jmacd-9905", "unreachable"); + return 0; +} + +/* Calls either coord_init_first_unit or coord_init_last_unit depending on + sideof argument. */ +void coord_init_sideof_unit(coord_t *coord, const znode * node, sideof dir) +{ + assert("jmacd-9821", dir == LEFT_SIDE || dir == RIGHT_SIDE); + if (dir == LEFT_SIDE) { + coord_init_first_unit(coord, node); + } else { + coord_init_last_unit(coord, node); + } +} + +/* Calls either coord_is_before_leftmost or coord_is_after_rightmost depending + on sideof argument. */ +/* Audited by: green(2002.06.15) */ +int coord_is_after_sideof_unit(coord_t *coord, sideof dir) +{ + assert("jmacd-9822", dir == LEFT_SIDE || dir == RIGHT_SIDE); + if (dir == LEFT_SIDE) { + return coord_is_before_leftmost(coord); + } else { + return coord_is_after_rightmost(coord); + } +} + +/* Calls either coord_next_unit or coord_prev_unit depending on sideof argument. + */ +/* Audited by: green(2002.06.15) */ +int coord_sideof_unit(coord_t *coord, sideof dir) +{ + assert("jmacd-9823", dir == LEFT_SIDE || dir == RIGHT_SIDE); + if (dir == LEFT_SIDE) { + return coord_prev_unit(coord); + } else { + return coord_next_unit(coord); + } +} + +#if REISER4_DEBUG +int coords_equal(const coord_t *c1, const coord_t *c2) +{ + assert("nikita-2840", c1 != NULL); + assert("nikita-2841", c2 != NULL); + + return + c1->node == c2->node && + c1->item_pos == c2->item_pos && + c1->unit_pos == c2->unit_pos && c1->between == c2->between; +} +#endif /* REISER4_DEBUG */ + +/* If coord_is_after_rightmost return NCOORD_ON_THE_RIGHT, if + coord_is_after_leftmost return NCOORD_ON_THE_LEFT, otherwise return + NCOORD_INSIDE. */ +/* Audited by: green(2002.06.15) */ +coord_wrt_node coord_wrt(const coord_t *coord) +{ + if (coord_is_before_leftmost(coord)) + return COORD_ON_THE_LEFT; + + if (coord_is_after_rightmost(coord)) + return COORD_ON_THE_RIGHT; + + return COORD_INSIDE; +} + +/* Returns true if the coordinate is positioned after the last item or after the + last unit of the last item or it is an empty node. */ +/* Audited by: green(2002.06.15) */ +int coord_is_after_rightmost(const coord_t *coord) +{ + assert("jmacd-7313", coord_check(coord)); + + switch (coord->between) { + case INVALID_COORD: + case AT_UNIT: + case BEFORE_UNIT: + case BEFORE_ITEM: + return 0; + + case EMPTY_NODE: + return 1; + + case AFTER_ITEM: + return (coord->item_pos == node_num_items(coord->node) - 1); + + case AFTER_UNIT: + return ((coord->item_pos == node_num_items(coord->node) - 1) && + coord->unit_pos == coord_last_unit_pos(coord)); + } + + impossible("jmacd-9908", "unreachable"); + return 0; +} + +/* Returns true if the coordinate is positioned before the first item or it is + an empty node. */ +int coord_is_before_leftmost(const coord_t *coord) +{ + /* FIXME-VS: coord_check requires node to be loaded whereas it is not + necessary to check if coord is set before leftmost + assert ("jmacd-7313", coord_check (coord)); */ + switch (coord->between) { + case INVALID_COORD: + case AT_UNIT: + case AFTER_ITEM: + case AFTER_UNIT: + return 0; + + case EMPTY_NODE: + return 1; + + case BEFORE_ITEM: + case BEFORE_UNIT: + return (coord->item_pos == 0) && (coord->unit_pos == 0); + } + + impossible("jmacd-9908", "unreachable"); + return 0; +} + +/* Returns true if the coordinate is positioned after a item, before a item, + after the last unit of an item, before the first unit of an item, or at an + empty node. */ +/* Audited by: green(2002.06.15) */ +int coord_is_between_items(const coord_t *coord) +{ + assert("jmacd-7313", coord_check(coord)); + + switch (coord->between) { + case INVALID_COORD: + case AT_UNIT: + return 0; + + case AFTER_ITEM: + case BEFORE_ITEM: + case EMPTY_NODE: + return 1; + + case BEFORE_UNIT: + return coord->unit_pos == 0; + + case AFTER_UNIT: + return coord->unit_pos == coord_last_unit_pos(coord); + } + + impossible("jmacd-9908", "unreachable"); + return 0; +} + +#if REISER4_DEBUG +/* Returns true if the coordinates are positioned at adjacent units, regardless + of before-after or item boundaries. */ +int coord_are_neighbors(coord_t *c1, coord_t *c2) +{ + coord_t *left; + coord_t *right; + + assert("nikita-1241", c1 != NULL); + assert("nikita-1242", c2 != NULL); + assert("nikita-1243", c1->node == c2->node); + assert("nikita-1244", coord_is_existing_unit(c1)); + assert("nikita-1245", coord_is_existing_unit(c2)); + + left = right = NULL; + switch (coord_compare(c1, c2)) { + case COORD_CMP_ON_LEFT: + left = c1; + right = c2; + break; + case COORD_CMP_ON_RIGHT: + left = c2; + right = c1; + break; + case COORD_CMP_SAME: + return 0; + default: + wrong_return_value("nikita-1246", "compare_coords()"); + } + assert("vs-731", left && right); + if (left->item_pos == right->item_pos) { + return left->unit_pos + 1 == right->unit_pos; + } else if (left->item_pos + 1 == right->item_pos) { + return (left->unit_pos == coord_last_unit_pos(left)) + && (right->unit_pos == 0); + } else { + return 0; + } +} +#endif /* REISER4_DEBUG */ + +/* Assuming two coordinates are positioned in the same node, return + COORD_CMP_ON_RIGHT, COORD_CMP_ON_LEFT, or COORD_CMP_SAME depending on c1's + position relative to c2. */ +/* Audited by: green(2002.06.15) */ +coord_cmp coord_compare(coord_t *c1, coord_t *c2) +{ + assert("vs-209", c1->node == c2->node); + assert("vs-194", coord_is_existing_unit(c1) + && coord_is_existing_unit(c2)); + + if (c1->item_pos > c2->item_pos) + return COORD_CMP_ON_RIGHT; + if (c1->item_pos < c2->item_pos) + return COORD_CMP_ON_LEFT; + if (c1->unit_pos > c2->unit_pos) + return COORD_CMP_ON_RIGHT; + if (c1->unit_pos < c2->unit_pos) + return COORD_CMP_ON_LEFT; + return COORD_CMP_SAME; +} + +/* If the coordinate is between items, shifts it to the right. Returns 0 on + success and non-zero if there is no position to the right. */ +int coord_set_to_right(coord_t *coord) +{ + unsigned items = coord_num_items(coord); + + if (coord_adjust_items(coord, items, 1) == 1) + return 1; + + switch (coord->between) { + case AT_UNIT: + return 0; + + case BEFORE_ITEM: + case BEFORE_UNIT: + coord->between = AT_UNIT; + return 0; + + case AFTER_UNIT: + if (coord->unit_pos < coord_last_unit_pos(coord)) { + coord->unit_pos += 1; + coord->between = AT_UNIT; + return 0; + } else { + + coord->unit_pos = 0; + + if (coord->item_pos == items - 1) { + coord->between = AFTER_ITEM; + return 1; + } + + coord_inc_item_pos(coord); + coord->between = AT_UNIT; + return 0; + } + + case AFTER_ITEM: + if (coord->item_pos == items - 1) + return 1; + + coord_inc_item_pos(coord); + coord->unit_pos = 0; + coord->between = AT_UNIT; + return 0; + + case EMPTY_NODE: + return 1; + + case INVALID_COORD: + break; + } + + impossible("jmacd-9920", "unreachable"); + return 0; +} + +/* If the coordinate is between items, shifts it to the left. Returns 0 on + success and non-zero if there is no position to the left. */ +int coord_set_to_left(coord_t *coord) +{ + unsigned items = coord_num_items(coord); + + if (coord_adjust_items(coord, items, 0) == 1) + return 1; + + switch (coord->between) { + case AT_UNIT: + return 0; + + case AFTER_UNIT: + coord->between = AT_UNIT; + return 0; + + case AFTER_ITEM: + coord->between = AT_UNIT; + coord->unit_pos = coord_last_unit_pos(coord); + return 0; + + case BEFORE_UNIT: + if (coord->unit_pos > 0) { + coord->unit_pos -= 1; + coord->between = AT_UNIT; + return 0; + } else { + + if (coord->item_pos == 0) { + coord->between = BEFORE_ITEM; + return 1; + } + + coord->unit_pos = coord_last_unit_pos(coord); + coord_dec_item_pos(coord); + coord->between = AT_UNIT; + return 0; + } + + case BEFORE_ITEM: + if (coord->item_pos == 0) + return 1; + + coord_dec_item_pos(coord); + coord->unit_pos = coord_last_unit_pos(coord); + coord->between = AT_UNIT; + return 0; + + case EMPTY_NODE: + return 1; + + case INVALID_COORD: + break; + } + + impossible("jmacd-9920", "unreachable"); + return 0; +} + +static const char *coord_tween_tostring(between_enum n) +{ + switch (n) { + case BEFORE_UNIT: + return "before unit"; + case BEFORE_ITEM: + return "before item"; + case AT_UNIT: + return "at unit"; + case AFTER_UNIT: + return "after unit"; + case AFTER_ITEM: + return "after item"; + case EMPTY_NODE: + return "empty node"; + case INVALID_COORD: + return "invalid"; + default: + { + static char buf[30]; + + sprintf(buf, "unknown: %i", n); + return buf; + } + } +} + +void print_coord(const char *mes, const coord_t *coord, int node) +{ + if (coord == NULL) { + printk("%s: null\n", mes); + return; + } + printk("%s: item_pos = %d, unit_pos %d, tween=%s, iplug=%d\n", + mes, coord->item_pos, coord->unit_pos, + coord_tween_tostring(coord->between), coord->iplugid); +} + +int +item_utmost_child_real_block(const coord_t *coord, sideof side, + reiser4_block_nr * blk) +{ + return item_plugin_by_coord(coord)->f.utmost_child_real_block(coord, + side, + blk); +} + +int item_utmost_child(const coord_t *coord, sideof side, jnode ** child) +{ + return item_plugin_by_coord(coord)->f.utmost_child(coord, side, child); +} + +/* @count bytes of flow @f got written, update correspondingly f->length, + f->data and f->key */ +void move_flow_forward(flow_t *f, unsigned count) +{ + if (f->data) + f->data += count; + f->length -= count; + set_key_offset(&f->key, get_key_offset(&f->key) + count); +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/coord.h b/fs/reiser4/coord.h new file mode 100644 index 000000000000..a1dd724fc464 --- /dev/null +++ b/fs/reiser4/coord.h @@ -0,0 +1,399 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +/* Coords */ + +#if !defined(__REISER4_COORD_H__) +#define __REISER4_COORD_H__ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "key.h" + +/* insertions happen between coords in the tree, so we need some means + of specifying the sense of betweenness. */ +typedef enum { + BEFORE_UNIT, /* Note: we/init_coord depends on this value being zero. */ + AT_UNIT, + AFTER_UNIT, + BEFORE_ITEM, + AFTER_ITEM, + INVALID_COORD, + EMPTY_NODE, +} between_enum; + +/* location of coord w.r.t. its node */ +typedef enum { + COORD_ON_THE_LEFT = -1, + COORD_ON_THE_RIGHT = +1, + COORD_INSIDE = 0 +} coord_wrt_node; + +typedef enum { + COORD_CMP_SAME = 0, COORD_CMP_ON_LEFT = -1, COORD_CMP_ON_RIGHT = +1 +} coord_cmp; + +struct coord { + /* node in a tree */ + /* 0 */ znode *node; + + /* position of item within node */ + /* 4 */ pos_in_node_t item_pos; + /* position of unit within item */ + /* 6 */ pos_in_node_t unit_pos; + /* optimization: plugin of item is stored in coord_t. Until this was + implemented, item_plugin_by_coord() was major CPU consumer. ->iplugid + is invalidated (set to 0xff) on each modification of ->item_pos, + and all such modifications are funneled through coord_*_item_pos() + functions below. + */ + /* 8 */ char iplugid; + /* position of coord w.r.t. to neighboring items and/or units. + Values are taken from &between_enum above. + */ + /* 9 */ char between; + /* padding. It will be added by the compiler anyway to conform to the + * C language alignment requirements. We keep it here to be on the + * safe side and to have a clear picture of the memory layout of this + * structure. */ + /* 10 */ __u16 pad; + /* 12 */ int offset; +#if REISER4_DEBUG + unsigned long plug_v; + unsigned long body_v; +#endif +}; + +#define INVALID_PLUGID ((char)((1 << 8) - 1)) +#define INVALID_OFFSET -1 + +static inline void coord_clear_iplug(coord_t *coord) +{ + assert("nikita-2835", coord != NULL); + coord->iplugid = INVALID_PLUGID; + coord->offset = INVALID_OFFSET; +} + +static inline int coord_is_iplug_set(const coord_t *coord) +{ + assert("nikita-2836", coord != NULL); + return coord->iplugid != INVALID_PLUGID; +} + +static inline void coord_set_item_pos(coord_t *coord, pos_in_node_t pos) +{ + assert("nikita-2478", coord != NULL); + coord->item_pos = pos; + coord_clear_iplug(coord); +} + +static inline void coord_dec_item_pos(coord_t *coord) +{ + assert("nikita-2480", coord != NULL); + --coord->item_pos; + coord_clear_iplug(coord); +} + +static inline void coord_inc_item_pos(coord_t *coord) +{ + assert("nikita-2481", coord != NULL); + ++coord->item_pos; + coord_clear_iplug(coord); +} + +static inline void coord_add_item_pos(coord_t *coord, int delta) +{ + assert("nikita-2482", coord != NULL); + coord->item_pos += delta; + coord_clear_iplug(coord); +} + +static inline void coord_invalid_item_pos(coord_t *coord) +{ + assert("nikita-2832", coord != NULL); + coord->item_pos = (unsigned short)~0; + coord_clear_iplug(coord); +} + +/* Reverse a direction. */ +static inline sideof sideof_reverse(sideof side) +{ + return side == LEFT_SIDE ? RIGHT_SIDE : LEFT_SIDE; +} + +/* NOTE: There is a somewhat odd mixture of the following opposed terms: + + "first" and "last" + "next" and "prev" + "before" and "after" + "leftmost" and "rightmost" + + But I think the chosen names are decent the way they are. +*/ + +/* COORD INITIALIZERS */ + +/* Initialize an invalid coordinate. */ +extern void coord_init_invalid(coord_t *coord, const znode * node); + +extern void coord_init_first_unit_nocheck(coord_t *coord, const znode * node); + +/* Initialize a coordinate to point at the first unit of the first item. If the + node is empty, it is positioned at the EMPTY_NODE. */ +extern void coord_init_first_unit(coord_t *coord, const znode * node); + +/* Initialize a coordinate to point at the last unit of the last item. If the + node is empty, it is positioned at the EMPTY_NODE. */ +extern void coord_init_last_unit(coord_t *coord, const znode * node); + +/* Initialize a coordinate to before the first item. If the node is empty, it is + positioned at the EMPTY_NODE. */ +extern void coord_init_before_first_item(coord_t *coord, const znode * node); + +/* Initialize a coordinate to after the last item. If the node is empty, it is + positioned at the EMPTY_NODE. */ +extern void coord_init_after_last_item(coord_t *coord, const znode * node); + +/* Initialize a coordinate to after last unit in the item. Coord must be set + already to existing item */ +void coord_init_after_item_end(coord_t *coord); + +/* Initialize a coordinate to before the item. Coord must be set already to + existing item */ +void coord_init_before_item(coord_t *); +/* Initialize a coordinate to after the item. Coord must be set already to + existing item */ +void coord_init_after_item(coord_t *); + +/* Calls either coord_init_first_unit or coord_init_last_unit depending on + sideof argument. */ +extern void coord_init_sideof_unit(coord_t *coord, const znode * node, + sideof dir); + +/* Initialize a coordinate by 0s. Used in places where init_coord was used and + it was not clear how actually + FIXME-VS: added by vs (2002, june, 8) */ +extern void coord_init_zero(coord_t *coord); + +/* COORD METHODS */ + +/* after shifting of node content, coord previously set properly may become + invalid, try to "normalize" it. */ +void coord_normalize(coord_t *coord); + +/* Copy a coordinate. */ +extern void coord_dup(coord_t *coord, const coord_t *old_coord); + +/* Copy a coordinate without check. */ +void coord_dup_nocheck(coord_t *coord, const coord_t *old_coord); + +unsigned coord_num_units(const coord_t *coord); + +/* Return the last valid unit number at the present item (i.e., + coord_num_units() - 1). */ +static inline unsigned coord_last_unit_pos(const coord_t *coord) +{ + return coord_num_units(coord) - 1; +} + +#if REISER4_DEBUG +/* For assertions only, checks for a valid coordinate. */ +extern int coord_check(const coord_t *coord); + +extern unsigned long znode_times_locked(const znode * z); + +static inline void coord_update_v(coord_t *coord) +{ + coord->plug_v = coord->body_v = znode_times_locked(coord->node); +} +#endif + +extern int coords_equal(const coord_t *c1, const coord_t *c2); + +extern void print_coord(const char *mes, const coord_t *coord, int print_node); + +/* If coord_is_after_rightmost return NCOORD_ON_THE_RIGHT, if + coord_is_after_leftmost return NCOORD_ON_THE_LEFT, otherwise return + NCOORD_INSIDE. */ +extern coord_wrt_node coord_wrt(const coord_t *coord); + +/* Returns true if the coordinates are positioned at adjacent units, regardless + of before-after or item boundaries. */ +extern int coord_are_neighbors(coord_t *c1, coord_t *c2); + +/* Assuming two coordinates are positioned in the same node, return + NCOORD_CMP_ON_RIGHT, NCOORD_CMP_ON_LEFT, or NCOORD_CMP_SAME depending on c1's + position relative to c2. */ +extern coord_cmp coord_compare(coord_t *c1, coord_t *c2); + +/* COORD PREDICATES */ + +/* Returns true if the coord was initializewd by coord_init_invalid (). */ +extern int coord_is_invalid(const coord_t *coord); + +/* Returns true if the coordinate is positioned at an existing item, not before + or after an item. It may be placed at, before, or after any unit within the + item, whether existing or not. If this is true you can call methods of the + item plugin. */ +extern int coord_is_existing_item(const coord_t *coord); + +/* Returns true if the coordinate is positioned after a item, before a item, + after the last unit of an item, before the first unit of an item, or at an + empty node. */ +extern int coord_is_between_items(const coord_t *coord); + +/* Returns true if the coordinate is positioned at an existing unit, not before + or after a unit. */ +extern int coord_is_existing_unit(const coord_t *coord); + +/* Returns true if the coordinate is positioned at an empty node. */ +extern int coord_is_empty(const coord_t *coord); + +/* Returns true if the coordinate is positioned at the first unit of the first + item. Not true for empty nodes nor coordinates positioned before the first + item. */ +extern int coord_is_leftmost_unit(const coord_t *coord); + +/* Returns true if the coordinate is positioned after the last item or after the + last unit of the last item or it is an empty node. */ +extern int coord_is_after_rightmost(const coord_t *coord); + +/* Returns true if the coordinate is positioned before the first item or it is + an empty node. */ +extern int coord_is_before_leftmost(const coord_t *coord); + +/* Calls either coord_is_before_leftmost or coord_is_after_rightmost depending + on sideof argument. */ +extern int coord_is_after_sideof_unit(coord_t *coord, sideof dir); + +/* COORD MODIFIERS */ + +/* Advances the coordinate by one unit to the right. If empty, no change. If + coord_is_rightmost_unit, advances to AFTER THE LAST ITEM. Returns 0 if new + position is an existing unit. */ +extern int coord_next_unit(coord_t *coord); + +/* Advances the coordinate by one item to the right. If empty, no change. If + coord_is_rightmost_unit, advances to AFTER THE LAST ITEM. Returns 0 if new + position is an existing item. */ +extern int coord_next_item(coord_t *coord); + +/* Advances the coordinate by one unit to the left. If empty, no change. If + coord_is_leftmost_unit, advances to BEFORE THE FIRST ITEM. Returns 0 if new + position is an existing unit. */ +extern int coord_prev_unit(coord_t *coord); + +/* Advances the coordinate by one item to the left. If empty, no change. If + coord_is_leftmost_unit, advances to BEFORE THE FIRST ITEM. Returns 0 if new + position is an existing item. */ +extern int coord_prev_item(coord_t *coord); + +/* If the coordinate is between items, shifts it to the right. Returns 0 on + success and non-zero if there is no position to the right. */ +extern int coord_set_to_right(coord_t *coord); + +/* If the coordinate is between items, shifts it to the left. Returns 0 on + success and non-zero if there is no position to the left. */ +extern int coord_set_to_left(coord_t *coord); + +/* If the coordinate is at an existing unit, set to after that unit. Returns 0 + on success and non-zero if the unit did not exist. */ +extern int coord_set_after_unit(coord_t *coord); + +/* Calls either coord_next_unit or coord_prev_unit depending on sideof + argument. */ +extern int coord_sideof_unit(coord_t *coord, sideof dir); + +/* iterate over all units in @node */ +#define for_all_units(coord, node) \ + for (coord_init_before_first_item((coord), (node)) ; \ + coord_next_unit(coord) == 0 ;) + +/* iterate over all items in @node */ +#define for_all_items(coord, node) \ + for (coord_init_before_first_item((coord), (node)) ; \ + coord_next_item(coord) == 0 ;) + +/* COORD/ITEM METHODS */ + +extern int item_utmost_child_real_block(const coord_t *coord, sideof side, + reiser4_block_nr * blk); +extern int item_utmost_child(const coord_t *coord, sideof side, + jnode ** child); + +/* a flow is a sequence of bytes being written to or read from the tree. The + tree will slice the flow into items while storing it into nodes, but all of + that is hidden from anything outside the tree. */ + +struct flow { + reiser4_key key; /* key of start of flow's sequence of bytes */ + loff_t length; /* length of flow's sequence of bytes */ + char *data; /* start of flow's sequence of bytes */ + int user; /* if 1 data is user space, 0 - kernel space */ + rw_op op; /* NIKITA-FIXME-HANS: comment is where? */ +}; + +void move_flow_forward(flow_t *f, unsigned count); + +/* &reiser4_item_data - description of data to be inserted or pasted + + Q: articulate the reasons for the difference between this and flow. + + A: Becides flow we insert into tree other things: stat data, directory + entry, etc. To insert them into tree one has to provide this structure. If + one is going to insert flow - he can use insert_flow, where this structure + does not have to be created +*/ +struct reiser4_item_data { + /* actual data to be inserted. If NULL, ->create_item() will not + do xmemcpy itself, leaving this up to the caller. This can + save some amount of unnecessary memory copying, for example, + during insertion of stat data. + + */ + char *data; + /* 1 if 'char * data' contains pointer to user space and 0 if it is + kernel space */ + int user; + /* amount of data we are going to insert or paste */ + int length; + /* "Arg" is opaque data that is passed down to the + ->create_item() method of node layout, which in turn + hands it to the ->create_hook() of item being created. This + arg is currently used by: + + . ->create_hook() of internal item + (fs/reiser4/plugin/item/internal.c:internal_create_hook()), + . ->paste() method of directory item. + . ->create_hook() of extent item + + For internal item, this is left "brother" of new node being + inserted and it is used to add new node into sibling list + after parent to it was just inserted into parent. + + While ->arg does look somewhat of unnecessary compication, + it actually saves a lot of headache in many places, because + all data necessary to insert or paste new data into tree are + collected in one place, and this eliminates a lot of extra + argument passing and storing everywhere. + + */ + void *arg; + /* plugin of item we are inserting */ + item_plugin *iplug; +}; + +/* __REISER4_COORD_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/debug.c b/fs/reiser4/debug.c new file mode 100644 index 000000000000..96c95085e81a --- /dev/null +++ b/fs/reiser4/debug.c @@ -0,0 +1,309 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Debugging facilities. */ + +/* + * This file contains generic debugging functions used by reiser4. Roughly + * following: + * + * panicking: reiser4_do_panic(), reiser4_print_prefix(). + * + * locking: + * reiser4_schedulable(), reiser4_lock_counters(), print_lock_counters(), + * reiser4_no_counters_are_held(), reiser4_commit_check_locks() + * + * error code monitoring (see comment before RETERR macro): + * reiser4_return_err(), reiser4_report_err(). + * + * stack back-tracing: fill_backtrace() + * + * miscellaneous: reiser4_preempt_point(), call_on_each_assert(), + * reiser4_debugtrap(). + * + */ + +#include "reiser4.h" +#include "context.h" +#include "super.h" +#include "txnmgr.h" +#include "znode.h" + +#include <linux/sysfs.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/fs.h> +#include <linux/spinlock.h> +#include <linux/kallsyms.h> +#include <linux/vmalloc.h> +#include <linux/ctype.h> +#include <linux/sysctl.h> +#include <linux/hardirq.h> +#include <linux/sched/signal.h> /* signal_pending() */ + +#if 0 +#if REISER4_DEBUG +static void reiser4_report_err(void); +#else +#define reiser4_report_err() noop +#endif +#endif /* 0 */ + +/* + * global buffer where message given to reiser4_panic is formatted. + */ +static char panic_buf[REISER4_PANIC_MSG_BUFFER_SIZE]; + +/* + * lock protecting consistency of panic_buf under concurrent panics + */ +static DEFINE_SPINLOCK(panic_guard); + +/* Your best friend. Call it on each occasion. This is called by + fs/reiser4/debug.h:reiser4_panic(). */ +void reiser4_do_panic(const char *format/* format string */ , ... /* rest */) +{ + static int in_panic = 0; + va_list args; + + /* + * check for recursive panic. + */ + if (in_panic == 0) { + in_panic = 1; + + spin_lock(&panic_guard); + va_start(args, format); + vsnprintf(panic_buf, sizeof(panic_buf), format, args); + va_end(args); + printk(KERN_EMERG "reiser4 panicked cowardly: %s", panic_buf); + spin_unlock(&panic_guard); + + /* + * if kernel debugger is configured---drop in. Early dropping + * into kgdb is not always convenient, because panic message + * is not yet printed most of the times. But: + * + * (1) message can be extracted from printk_buf[] + * (declared static inside of printk()), and + * + * (2) sometimes serial/kgdb combo dies while printing + * long panic message, so it's more prudent to break into + * debugger earlier. + * + */ + DEBUGON(1); + } + /* to make gcc happy about noreturn attribute */ + panic("%s", panic_buf); +} + +#if 0 +void +reiser4_print_prefix(const char *level, int reperr, const char *mid, + const char *function, const char *file, int lineno) +{ + const char *comm; + int pid; + + if (unlikely(in_interrupt() || in_irq())) { + comm = "interrupt"; + pid = 0; + } else { + comm = current->comm; + pid = current->pid; + } + printk("%sreiser4[%.16s(%i)]: %s (%s:%i)[%s]:\n", + level, comm, pid, function, file, lineno, mid); + if (reperr) + reiser4_report_err(); +} +#endif /* 0 */ + +/* Preemption point: this should be called periodically during long running + operations (carry, allocate, and squeeze are best examples) */ +int reiser4_preempt_point(void) +{ + assert("nikita-3008", reiser4_schedulable()); + cond_resched(); + return signal_pending(current); +} + +#if REISER4_DEBUG +/* Debugging aid: return struct where information about locks taken by current + thread is accumulated. This can be used to formulate lock ordering + constraints and various assertions. + +*/ +reiser4_lock_cnt_info *reiser4_lock_counters(void) +{ + reiser4_context *ctx = get_current_context(); + assert("jmacd-1123", ctx != NULL); + return &ctx->locks; +} + +/* + * print human readable information about locks held by the reiser4 context. + */ +static void print_lock_counters(const char *prefix, + const reiser4_lock_cnt_info * info) +{ + printk("%s: jnode: %i, tree: %i (r:%i,w:%i), dk: %i (r:%i,w:%i)\n" + "jload: %i, " + "txnh: %i, atom: %i, stack: %i, txnmgr: %i, " + "ktxnmgrd: %i, fq: %i\n" + "inode: %i, " + "cbk_cache: %i (r:%i,w%i), " + "eflush: %i, " + "zlock: %i,\n" + "spin: %i, long: %i inode_sem: (r:%i,w:%i)\n" + "d: %i, x: %i, t: %i\n", prefix, + info->spin_locked_jnode, + info->rw_locked_tree, info->read_locked_tree, + info->write_locked_tree, + info->rw_locked_dk, info->read_locked_dk, info->write_locked_dk, + info->spin_locked_jload, + info->spin_locked_txnh, + info->spin_locked_atom, info->spin_locked_stack, + info->spin_locked_txnmgr, info->spin_locked_ktxnmgrd, + info->spin_locked_fq, + info->spin_locked_inode, + info->rw_locked_cbk_cache, + info->read_locked_cbk_cache, + info->write_locked_cbk_cache, + info->spin_locked_super_eflush, + info->spin_locked_zlock, + info->spin_locked, + info->long_term_locked_znode, + info->inode_sem_r, info->inode_sem_w, + info->d_refs, info->x_refs, info->t_refs); +} + +/* check that no spinlocks are held */ +int reiser4_schedulable(void) +{ + if (get_current_context_check() != NULL) { + if (!LOCK_CNT_NIL(spin_locked)) { + print_lock_counters("in atomic", reiser4_lock_counters()); + return 0; + } + } + might_sleep(); + return 1; +} +/* + * return true, iff no locks are held. + */ +int reiser4_no_counters_are_held(void) +{ + reiser4_lock_cnt_info *counters; + + counters = reiser4_lock_counters(); + return + (counters->spin_locked_zlock == 0) && + (counters->spin_locked_jnode == 0) && + (counters->rw_locked_tree == 0) && + (counters->read_locked_tree == 0) && + (counters->write_locked_tree == 0) && + (counters->rw_locked_dk == 0) && + (counters->read_locked_dk == 0) && + (counters->write_locked_dk == 0) && + (counters->spin_locked_txnh == 0) && + (counters->spin_locked_atom == 0) && + (counters->spin_locked_stack == 0) && + (counters->spin_locked_txnmgr == 0) && + (counters->spin_locked_inode == 0) && + (counters->spin_locked == 0) && + (counters->long_term_locked_znode == 0) && + (counters->inode_sem_r == 0) && + (counters->inode_sem_w == 0) && (counters->d_refs == 0); +} + +/* + * return true, iff transaction commit can be done under locks held by the + * current thread. + */ +int reiser4_commit_check_locks(void) +{ + reiser4_lock_cnt_info *counters; + int inode_sem_r; + int inode_sem_w; + int result; + + /* + * inode's read/write semaphore is the only reiser4 lock that can be + * held during commit. + */ + + counters = reiser4_lock_counters(); + inode_sem_r = counters->inode_sem_r; + inode_sem_w = counters->inode_sem_w; + + counters->inode_sem_r = counters->inode_sem_w = 0; + result = reiser4_no_counters_are_held(); + counters->inode_sem_r = inode_sem_r; + counters->inode_sem_w = inode_sem_w; + return result; +} + +/* + * fill "error site" in the current reiser4 context. See comment before RETERR + * macro for more details. + */ +void reiser4_return_err(int code, const char *file, int line) +{ + if (code < 0 && is_in_reiser4_context()) { + reiser4_context *ctx = get_current_context(); + + if (ctx != NULL) { + ctx->err.code = code; + ctx->err.file = file; + ctx->err.line = line; + } + } +} + +#if 0 +/* + * report error information recorder by reiser4_return_err(). + */ +static void reiser4_report_err(void) +{ + reiser4_context *ctx = get_current_context_check(); + + if (ctx != NULL) { + if (ctx->err.code != 0) { + printk("code: %i at %s:%i\n", + ctx->err.code, ctx->err.file, ctx->err.line); + } + } +} +#endif /* 0 */ + +#endif /* REISER4_DEBUG */ + +#if KERNEL_DEBUGGER + +/* + * this functions just drops into kernel debugger. It is a convenient place to + * put breakpoint in. + */ +void reiser4_debugtrap(void) +{ + /* do nothing. Put break point here. */ +#if defined(CONFIG_KGDB) && !defined(CONFIG_REISER4_FS_MODULE) + extern void kgdb_breakpoint(void); + kgdb_breakpoint(); +#endif +} +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/debug.h b/fs/reiser4/debug.h new file mode 100644 index 000000000000..a2a6c6745ce2 --- /dev/null +++ b/fs/reiser4/debug.h @@ -0,0 +1,353 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +/* Declarations of debug macros. */ + +#if !defined(__FS_REISER4_DEBUG_H__) +#define __FS_REISER4_DEBUG_H__ + +#include "forward.h" +#include "reiser4.h" + +/** + * generic function to produce formatted output, decorating it with + * whatever standard prefixes/postfixes we want. "Fun" is a function + * that will be actually called, can be printk, panic etc. + * This is for use by other debugging macros, not by users. + */ +#define DCALL(lev, fun, reperr, label, format, ...) \ +({ \ + fun(lev "reiser4[%.16s(%i)]: %s (%s:%i)[%s]:\n" format "\n" , \ + current->comm, current->pid, __FUNCTION__, \ + __FILE__, __LINE__, label, ## __VA_ARGS__); \ +}) + +/* + * cause kernel to crash + */ +#define reiser4_panic(mid, format, ...) \ + DCALL("", reiser4_do_panic, 1, mid, format , ## __VA_ARGS__) + +/* print message with indication of current process, file, line and + function */ +#define reiser4_log(label, format, ...) \ + DCALL(KERN_DEBUG, printk, 0, label, format , ## __VA_ARGS__) + +/* Assertion checked during compilation. + If "cond" is false (0) we get duplicate case label in switch. + Use this to check something like famous + cassert (sizeof(struct reiserfs_journal_commit) == 4096) ; + in 3.x journal.c. If cassertion fails you get compiler error, + so no "maintainer-id". +*/ +#define cassert(cond) ({ switch (-1) { case (cond): case 0: break; } }) + +#define noop do {; } while (0) + +#if REISER4_DEBUG +/* version of info that only actually prints anything when _d_ebugging + is on */ +#define dinfo(format, ...) printk(format , ## __VA_ARGS__) +/* macro to catch logical errors. Put it into `default' clause of + switch() statement. */ +#define impossible(label, format, ...) \ + reiser4_panic(label, "impossible: " format , ## __VA_ARGS__) +/* assert assures that @cond is true. If it is not, reiser4_panic() is + called. Use this for checking logical consistency and _never_ call + this to check correctness of external data: disk blocks and user-input . */ +#define assert(label, cond) \ +({ \ + /* call_on_each_assert(); */ \ + if (cond) { \ + /* put negated check to avoid using !(cond) that would lose \ + * warnings for things like assert(a = b); */ \ + ; \ + } else { \ + DEBUGON(1); \ + reiser4_panic(label, "assertion failed: %s", #cond); \ + } \ +}) + +/* like assertion, but @expr is evaluated even if REISER4_DEBUG is off. */ +#define check_me(label, expr) assert(label, (expr)) + +#define ON_DEBUG(exp) exp + +extern int reiser4_schedulable(void); +extern void call_on_each_assert(void); + +#else + +#define dinfo(format, args...) noop +#define impossible(label, format, args...) noop +#define assert(label, cond) noop +#define check_me(label, expr) ((void) (expr)) +#define ON_DEBUG(exp) +#define reiser4_schedulable() might_sleep() + +/* REISER4_DEBUG */ +#endif + +#if REISER4_DEBUG +/* per-thread information about lock acquired by this thread. Used by lock + * ordering checking in spin_macros.h */ +typedef struct reiser4_lock_cnt_info { + int rw_locked_tree; + int read_locked_tree; + int write_locked_tree; + + int rw_locked_dk; + int read_locked_dk; + int write_locked_dk; + + int rw_locked_cbk_cache; + int read_locked_cbk_cache; + int write_locked_cbk_cache; + + int spin_locked_zlock; + int spin_locked_jnode; + int spin_locked_jload; + int spin_locked_txnh; + int spin_locked_atom; + int spin_locked_stack; + int spin_locked_txnmgr; + int spin_locked_ktxnmgrd; + int spin_locked_fq; + int spin_locked_inode; + int spin_locked_super_eflush; + int spin_locked; + int long_term_locked_znode; + + int inode_sem_r; + int inode_sem_w; + + int d_refs; + int x_refs; + int t_refs; +} reiser4_lock_cnt_info; + +extern struct reiser4_lock_cnt_info *reiser4_lock_counters(void); +#define IN_CONTEXT(a, b) (is_in_reiser4_context() ? (a) : (b)) + +/* increment lock-counter @counter, if present */ +#define LOCK_CNT_INC(counter) \ + IN_CONTEXT(++(reiser4_lock_counters()->counter), 0) + +/* decrement lock-counter @counter, if present */ +#define LOCK_CNT_DEC(counter) \ + IN_CONTEXT(--(reiser4_lock_counters()->counter), 0) + +/* check that lock-counter is zero. This is for use in assertions */ +#define LOCK_CNT_NIL(counter) \ + IN_CONTEXT(reiser4_lock_counters()->counter == 0, 1) + +/* check that lock-counter is greater than zero. This is for use in + * assertions */ +#define LOCK_CNT_GTZ(counter) \ + IN_CONTEXT(reiser4_lock_counters()->counter > 0, 1) +#define LOCK_CNT_LT(counter,n) \ + IN_CONTEXT(reiser4_lock_counters()->counter < n, 1) + +#else /* REISER4_DEBUG */ + +/* no-op versions on the above */ + +typedef struct reiser4_lock_cnt_info { +} reiser4_lock_cnt_info; + +#define reiser4_lock_counters() ((reiser4_lock_cnt_info *)NULL) +#define LOCK_CNT_INC(counter) noop +#define LOCK_CNT_DEC(counter) noop +#define LOCK_CNT_NIL(counter) (1) +#define LOCK_CNT_GTZ(counter) (1) +#define LOCK_CNT_LT(counter, n) (1) + +#endif /* REISER4_DEBUG */ + +#define assert_spin_not_locked(lock) BUG_ON(0) +#define assert_rw_write_locked(lock) BUG_ON(0) +#define assert_rw_read_locked(lock) BUG_ON(0) +#define assert_rw_locked(lock) BUG_ON(0) +#define assert_rw_not_write_locked(lock) BUG_ON(0) +#define assert_rw_not_read_locked(lock) BUG_ON(0) +#define assert_rw_not_locked(lock) BUG_ON(0) + +/* flags controlling debugging behavior. Are set through debug_flags=N mount + option. */ +typedef enum { + /* print a lot of information during panic. When this is on all jnodes + * are listed. This can be *very* large output. Usually you don't want + * this. Especially over serial line. */ + REISER4_VERBOSE_PANIC = 0x00000001, + /* print a lot of information during umount */ + REISER4_VERBOSE_UMOUNT = 0x00000002, + /* print gathered statistics on umount */ + REISER4_STATS_ON_UMOUNT = 0x00000004, + /* check node consistency */ + REISER4_CHECK_NODE = 0x00000008 +} reiser4_debug_flags; + +extern int is_in_reiser4_context(void); + +/* + * evaluate expression @e only if with reiser4 context + */ +#define ON_CONTEXT(e) do { \ + if (is_in_reiser4_context()) { \ + e; \ + } } while (0) + +/* + * evaluate expression @e only when within reiser4_context and debugging is + * on. + */ +#define ON_DEBUG_CONTEXT(e) ON_DEBUG(ON_CONTEXT(e)) + +/* + * complain about unexpected function result and crash. Used in "default" + * branches of switch statements and alike to assert that invalid results are + * not silently ignored. + */ +#define wrong_return_value(label, function) \ + impossible(label, "wrong return value from " function) + +/* Issue different types of reiser4 messages to the console */ +#define warning(label, format, ...) \ + DCALL(KERN_WARNING, \ + printk, 1, label, "WARNING: " format , ## __VA_ARGS__) +#define notice(label, format, ...) \ + DCALL(KERN_NOTICE, \ + printk, 1, label, "NOTICE: " format , ## __VA_ARGS__) + +/* mark not yet implemented functionality */ +#define not_yet(label, format, ...) \ + reiser4_panic(label, "NOT YET IMPLEMENTED: " format , ## __VA_ARGS__) + +extern void reiser4_do_panic(const char *format, ...) + __attribute__ ((noreturn, format(printf, 1, 2))); + +extern int reiser4_preempt_point(void); +extern void reiser4_print_stats(void); + +#if REISER4_DEBUG +extern int reiser4_no_counters_are_held(void); +extern int reiser4_commit_check_locks(void); +#else +#define reiser4_no_counters_are_held() (1) +#define reiser4_commit_check_locks() (1) +#endif + +/* true if @i is power-of-two. Useful for rate-limited warnings, etc. */ +#define IS_POW(i) \ +({ \ + typeof(i) __i; \ + \ + __i = (i); \ + !(__i & (__i - 1)); \ +}) + +#define KERNEL_DEBUGGER (1) + +#if KERNEL_DEBUGGER + +extern void reiser4_debugtrap(void); + +/* + * Check condition @cond and drop into kernel debugger (kgdb) if it's true. If + * kgdb is not compiled in, do nothing. + */ +#define DEBUGON(cond) \ +({ \ + if (unlikely(cond)) \ + reiser4_debugtrap(); \ +}) +#else +#define DEBUGON(cond) noop +#endif + +/* + * Error code tracing facility. (Idea is borrowed from XFS code.) + * + * Suppose some strange and/or unexpected code is returned from some function + * (for example, write(2) returns -EEXIST). It is possible to place a + * breakpoint in the reiser4_write(), but it is too late here. How to find out + * in what particular place -EEXIST was generated first? + * + * In reiser4 all places where actual error codes are produced (that is, + * statements of the form + * + * return -EFOO; // (1), or + * + * result = -EFOO; // (2) + * + * are replaced with + * + * return RETERR(-EFOO); // (1a), and + * + * result = RETERR(-EFOO); // (2a) respectively + * + * RETERR() macro fills a backtrace in reiser4_context. This back-trace is + * printed in error and warning messages. Moreover, it's possible to put a + * conditional breakpoint in reiser4_return_err (low-level function called + * by RETERR() to do the actual work) to break into debugger immediately + * when particular error happens. + * + */ + +#if REISER4_DEBUG + +/* + * data-type to store information about where error happened ("error site"). + */ +typedef struct err_site { + int code; /* error code */ + const char *file; /* source file, filled by __FILE__ */ + int line; /* source file line, filled by __LINE__ */ +} err_site; + +extern void reiser4_return_err(int code, const char *file, int line); + +/* + * fill &get_current_context()->err_site with error information. + */ +#define RETERR(code) \ +({ \ + typeof(code) __code; \ + \ + __code = (code); \ + reiser4_return_err(__code, __FILE__, __LINE__); \ + __code; \ +}) + +#else + +/* + * no-op versions of the above + */ + +typedef struct err_site { +} err_site; +#define RETERR(code) code +#endif + +#if REISER4_LARGE_KEY +/* + * conditionally compile arguments only if REISER4_LARGE_KEY is on. + */ +#define ON_LARGE_KEY(...) __VA_ARGS__ +#else +#define ON_LARGE_KEY(...) +#endif + +/* __FS_REISER4_DEBUG_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/dformat.h b/fs/reiser4/dformat.h new file mode 100644 index 000000000000..7316754daeaa --- /dev/null +++ b/fs/reiser4/dformat.h @@ -0,0 +1,73 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +/* Formats of on-disk data and conversion functions. */ + +/* put all item formats in the files describing the particular items, + our model is, everything you need to do to add an item to reiser4, + (excepting the changes to the plugin that uses the item which go + into the file defining that plugin), you put into one file. */ +/* Data on disk are stored in little-endian format. + To declare fields of on-disk structures, use d8, d16, d32 and d64. + d??tocpu() and cputod??() to convert. */ + +#if !defined(__FS_REISER4_DFORMAT_H__) +#define __FS_REISER4_DFORMAT_H__ + +#include "debug.h" + +#include <asm/byteorder.h> +#include <asm/unaligned.h> +#include <linux/types.h> + +typedef __u8 d8; +typedef __le16 d16; +typedef __le32 d32; +typedef __le64 d64; + +#define PACKED __attribute__((packed)) + +/* data-type for block number */ +typedef __u64 reiser4_block_nr; + +/* data-type for block number on disk, disk format */ +typedef __le64 reiser4_dblock_nr; + +/** + * disk_addr_eq - compare disk addresses + * @b1: pointer to block number ot compare + * @b2: pointer to block number ot compare + * + * Returns true if if disk addresses are the same + */ +static inline int disk_addr_eq(const reiser4_block_nr * b1, + const reiser4_block_nr * b2) +{ + assert("nikita-1033", b1 != NULL); + assert("nikita-1266", b2 != NULL); + + return !memcmp(b1, b2, sizeof *b1); +} + +/* structure of master reiser4 super block */ +typedef struct reiser4_master_sb { + char magic[16]; /* "ReIsEr4" */ + __le16 disk_plugin_id; /* id of disk layout plugin */ + __le16 blocksize; + char uuid[16]; /* unique id */ + char label[16]; /* filesystem label */ + __le64 diskmap; /* location of the diskmap. 0 if not present */ +} reiser4_master_sb; + +/* __FS_REISER4_DFORMAT_H__ */ +#endif + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/discard.c b/fs/reiser4/discard.c new file mode 100644 index 000000000000..e1b1ea8d1f5b --- /dev/null +++ b/fs/reiser4/discard.c @@ -0,0 +1,179 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* TRIM/discard interoperation subsystem for reiser4. */ + +/* + * This subsystem is responsible for populating an atom's ->discard_set and + * (later) converting it into a series of discard calls to the kernel. + * + * The discard is an in-kernel interface for notifying the storage + * hardware about blocks that are being logically freed by the filesystem. + * This is done via calling the blkdev_issue_discard() function. There are + * restrictions on block ranges: they should constitute at least one erase unit + * in length and be correspondingly aligned. Otherwise a discard request will + * be ignored. + * + * The erase unit size is kept in struct queue_limits as discard_granularity. + * The offset from the partition start to the first erase unit is kept in + * struct queue_limits as discard_alignment. + * + * At atom level, we record numbers of all blocks that happen to be deallocated + * during the transaction. Then we read the generated set, filter out any blocks + * that have since been allocated again and issue discards for everything still + * valid. This is what discard.[ch] is here for. + * + * However, simply iterating through the recorded extents is not enough: + * - if a single extent is smaller than the erase unit, then this particular + * extent won't be discarded even if it is surrounded by enough free blocks + * to constitute a whole erase unit; + * - we won't be able to merge small adjacent extents forming an extent long + * enough to be discarded. + * + * MECHANISM: + * + * During the transaction deallocated extents are recorded in atom's delete + * set. In reiser4, there are two methods to deallocate a block: + * 1. deferred deallocation, enabled by BA_DEFER flag to reiser4_dealloc_block(). + * In this mode, blocks are stored to delete set instead of being marked free + * immediately. After committing the transaction, the delete set is "applied" + * by the block allocator and all these blocks are marked free in memory + * (see reiser4_post_write_back_hook()). + * Space management plugins also read the delete set to update on-disk + * allocation records (see reiser4_pre_commit_hook()). + * 2. immediate deallocation (the opposite). + * In this mode, blocks are marked free immediately. This is used by the + * journal subsystem to manage space used by the journal records, so these + * allocations are not visible to the space management plugins and never hit + * the disk. + * + * When discard is enabled, all immediate deallocations become deferred. This + * is OK because journal's allocations happen after reiser4_pre_commit_hook() + * where the on-disk space allocation records are updated. So, in this mode + * the atom's delete set becomes "the discard set" -- list of blocks that have + * to be considered for discarding. + * + * Discarding is performed before completing deferred deallocations, hence all + * extents in the discard set are still marked as allocated and cannot contain + * any data. Thus we can avoid any checks for blocks directly present in the + * discard set. + * + * For now, we don't perform "padding" of extents to erase unit boundaries. + * This means if extents are not aligned with the device's erase unit lattice, + * the partial erase units at head and tail of extents are truncated by kernel + * (in blkdev_issue_discard()). + * + * So, at commit time the following actions take place: + * - delete sets are merged to form the discard set; + * - elements of the discard set are sorted; + * - the discard set is iterated, joining any adjacent extents; + * - for each extent, a single call to blkdev_issue_discard() is done. + */ + +#include "discard.h" +#include "context.h" +#include "debug.h" +#include "txnmgr.h" +#include "super.h" + +#include <linux/slab.h> +#include <linux/fs.h> +#include <linux/blkdev.h> + +static int __discard_extent(struct block_device *bdev, sector_t start, + sector_t len) +{ + assert("intelfx-21", bdev != NULL); + + return blkdev_issue_discard(bdev, start, len, reiser4_ctx_gfp_mask_get(), + 0); +} + +static int discard_extent(txn_atom *atom UNUSED_ARG, + const reiser4_block_nr* start, + const reiser4_block_nr* len, + void *data UNUSED_ARG) +{ + struct super_block *sb = reiser4_get_current_sb(); + struct block_device *bdev = sb->s_bdev; + + sector_t extent_start_sec, extent_len_sec; + + const int sec_per_blk = sb->s_blocksize >> 9; + + /* we assume block = N * sector */ + assert("intelfx-7", sec_per_blk > 0); + + /* convert extent to sectors */ + extent_start_sec = *start * sec_per_blk; + extent_len_sec = *len * sec_per_blk; + + /* discard the extent, don't pad it to erase unit boundaries for now */ + return __discard_extent(bdev, extent_start_sec, extent_len_sec); +} + +int discard_atom(txn_atom *atom, struct list_head *processed_set) +{ + int ret; + struct list_head discard_set; + + if (!reiser4_is_set(reiser4_get_current_sb(), REISER4_DISCARD)) { + spin_unlock_atom(atom); + return 0; + } + + assert("intelfx-28", atom != NULL); + assert("intelfx-59", processed_set != NULL); + + if (list_empty(&atom->discard.delete_set)) { + /* Nothing left to discard. */ + spin_unlock_atom(atom); + return 0; + } + + /* Take the delete sets from the atom in order to release atom spinlock. */ + blocknr_list_init(&discard_set); + blocknr_list_merge(&atom->discard.delete_set, &discard_set); + spin_unlock_atom(atom); + + /* Sort the discard list, joining adjacent and overlapping extents. */ + blocknr_list_sort_and_join(&discard_set); + + /* Perform actual dirty work. */ + ret = blocknr_list_iterator(NULL, &discard_set, &discard_extent, NULL, 0); + + /* Add processed extents to the temporary list. */ + blocknr_list_merge(&discard_set, processed_set); + + if (ret != 0) { + return ret; + } + + /* Let's do this again for any new extents in the atom's discard set. */ + return -E_REPEAT; +} + +void discard_atom_post(txn_atom *atom, struct list_head *processed_set) +{ + assert("intelfx-60", atom != NULL); + assert("intelfx-61", processed_set != NULL); + + if (!reiser4_is_set(reiser4_get_current_sb(), REISER4_DISCARD)) { + spin_unlock_atom(atom); + return; + } + + blocknr_list_merge(processed_set, &atom->discard.delete_set); + spin_unlock_atom(atom); +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/discard.h b/fs/reiser4/discard.h new file mode 100644 index 000000000000..5f0d0d8c12c3 --- /dev/null +++ b/fs/reiser4/discard.h @@ -0,0 +1,42 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* TRIM/discard interoperation subsystem for reiser4. */ + +#if !defined(__FS_REISER4_DISCARD_H__) +#define __FS_REISER4_DISCARD_H__ + +#include "forward.h" +#include "dformat.h" + +/** + * Issue discard requests for all block extents recorded in @atom's delete sets, + * if discard is enabled. The extents processed are removed from the @atom's + * delete sets and stored in @processed_set. + * + * @atom must be locked on entry and is unlocked on exit. + * @processed_set must be initialized with blocknr_list_init(). + */ +extern int discard_atom(txn_atom *atom, struct list_head *processed_set); + +/** + * Splices @processed_set back to @atom's delete set. + * Must be called after discard_atom() loop, using the same @processed_set. + * + * @atom must be locked on entry and is unlocked on exit. + * @processed_set must be the same as passed to discard_atom(). + */ +extern void discard_atom_post(txn_atom *atom, struct list_head *processed_set); + +/* __FS_REISER4_DISCARD_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/dscale.c b/fs/reiser4/dscale.c new file mode 100644 index 000000000000..2f13c4ea6e7b --- /dev/null +++ b/fs/reiser4/dscale.c @@ -0,0 +1,192 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Scalable on-disk integers */ + +/* + * Various on-disk structures contain integer-like structures. Stat-data + * contain [yes, "data" is plural, check the dictionary] file size, link + * count; extent unit contains extent width etc. To accommodate for general + * case enough space is reserved to keep largest possible value. 64 bits in + * all cases above. But in overwhelming majority of cases numbers actually + * stored in these fields will be comparatively small and reserving 8 bytes is + * a waste of precious disk bandwidth. + * + * Scalable integers are one way to solve this problem. dscale_write() + * function stores __u64 value in the given area consuming from 1 to 9 bytes, + * depending on the magnitude of the value supplied. dscale_read() reads value + * previously stored by dscale_write(). + * + * dscale_write() produces format not completely unlike of UTF: two highest + * bits of the first byte are used to store "tag". One of 4 possible tag + * values is chosen depending on the number being encoded: + * + * 0 ... 0x3f => 0 [table 1] + * 0x40 ... 0x3fff => 1 + * 0x4000 ... 0x3fffffff => 2 + * 0x40000000 ... 0xffffffffffffffff => 3 + * + * (see dscale_range() function) + * + * Values in the range 0x40000000 ... 0xffffffffffffffff require 8 full bytes + * to be stored, so in this case there is no place in the first byte to store + * tag. For such values tag is stored in an extra 9th byte. + * + * As _highest_ bits are used for the test (which is natural) scaled integers + * are stored in BIG-ENDIAN format in contrast with the rest of reiser4 which + * uses LITTLE-ENDIAN. + * + */ + +#include "debug.h" +#include "dscale.h" + +/* return tag of scaled integer stored at @address */ +static int gettag(const unsigned char *address) +{ + /* tag is stored in two highest bits */ + return (*address) >> 6; +} + +/* clear tag from value. Clear tag embedded into @value. */ +static void cleartag(__u64 *value, int tag) +{ + /* + * W-w-what ?! + * + * Actually, this is rather simple: @value passed here was read by + * dscale_read(), converted from BIG-ENDIAN, and padded to __u64 by + * zeroes. Tag is still stored in the highest (arithmetically) + * non-zero bits of @value, but relative position of tag within __u64 + * depends on @tag. + * + * For example if @tag is 0, it's stored 2 highest bits of lowest + * byte, and its offset (counting from lowest bit) is 8 - 2 == 6 bits. + * + * If tag is 1, it's stored in two highest bits of 2nd lowest byte, + * and it's offset if (2 * 8) - 2 == 14 bits. + * + * See table 1 above for details. + * + * All these cases are captured by the formula: + */ + *value &= ~(3 << (((1 << tag) << 3) - 2)); + /* + * That is, clear two (3 == 0t11) bits at the offset + * + * 8 * (2 ^ tag) - 2, + * + * that is, two highest bits of (2 ^ tag)-th byte of @value. + */ +} + +/* return tag for @value. See table 1 above for details. */ +static int dscale_range(__u64 value) +{ + if (value > 0x3fffffff) + return 3; + if (value > 0x3fff) + return 2; + if (value > 0x3f) + return 1; + return 0; +} + +/* restore value stored at @adderss by dscale_write() and return number of + * bytes consumed */ +int dscale_read(unsigned char *address, __u64 *value) +{ + int tag; + + /* read tag */ + tag = gettag(address); + switch (tag) { + case 3: + /* In this case tag is stored in an extra byte, skip this byte + * and decode value stored in the next 8 bytes.*/ + *value = __be64_to_cpu(get_unaligned((__be64 *)(address + 1))); + /* worst case: 8 bytes for value itself plus one byte for + * tag. */ + return 9; + case 0: + *value = get_unaligned(address); + break; + case 1: + *value = __be16_to_cpu(get_unaligned((__be16 *)address)); + break; + case 2: + *value = __be32_to_cpu(get_unaligned((__be32 *)address)); + break; + default: + return RETERR(-EIO); + } + /* clear tag embedded into @value */ + cleartag(value, tag); + /* number of bytes consumed is (2 ^ tag)---see table 1. */ + return 1 << tag; +} + +/* number of bytes consumed */ +int dscale_bytes_to_read(unsigned char *address) +{ + int tag; + + tag = gettag(address); + switch (tag) { + case 0: + case 1: + case 2: + return 1 << tag; + case 3: + return 9; + default: + return RETERR(-EIO); + } +} + +/* store @value at @address and return number of bytes consumed */ +int dscale_write(unsigned char *address, __u64 value) +{ + int tag; + int shift; + __be64 v; + unsigned char *valarr; + + tag = dscale_range(value); + v = __cpu_to_be64(value); + valarr = (unsigned char *)&v; + shift = (tag == 3) ? 1 : 0; + memcpy(address + shift, valarr + sizeof v - (1 << tag), 1 << tag); + *address |= (tag << 6); + return shift + (1 << tag); +} + +/* number of bytes required to store @value */ +int dscale_bytes_to_write(__u64 value) +{ + int bytes; + + bytes = 1 << dscale_range(value); + if (bytes == 8) + ++bytes; + return bytes; +} + +/* returns true if @value and @other require the same number of bytes to be + * stored. Used by detect when data structure (like stat-data) has to be + * expanded or contracted. */ +int dscale_fit(__u64 value, __u64 other) +{ + return dscale_range(value) == dscale_range(other); +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/dscale.h b/fs/reiser4/dscale.h new file mode 100644 index 000000000000..9fbf7158c149 --- /dev/null +++ b/fs/reiser4/dscale.h @@ -0,0 +1,28 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Scalable on-disk integers. See dscale.h for details. */ + +#if !defined(__FS_REISER4_DSCALE_H__) +#define __FS_REISER4_DSCALE_H__ + +#include "dformat.h" + +extern int dscale_read(unsigned char *address, __u64 *value); +extern int dscale_write(unsigned char *address, __u64 value); +extern int dscale_bytes_to_read(unsigned char *address); +extern int dscale_bytes_to_write(__u64 value); +extern int dscale_fit(__u64 value, __u64 other); + +/* __FS_REISER4_DSCALE_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/entd.c b/fs/reiser4/entd.c new file mode 100644 index 000000000000..e6b56ae57dab --- /dev/null +++ b/fs/reiser4/entd.c @@ -0,0 +1,361 @@ +/* Copyright 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Ent daemon. */ + +#include "debug.h" +#include "txnmgr.h" +#include "tree.h" +#include "entd.h" +#include "super.h" +#include "context.h" +#include "reiser4.h" +#include "vfs_ops.h" +#include "page_cache.h" +#include "inode.h" + +#include <linux/sched.h> /* struct task_struct */ +#include <linux/suspend.h> +#include <linux/kernel.h> +#include <linux/writeback.h> +#include <linux/time.h> /* INITIAL_JIFFIES */ +#include <linux/backing-dev.h> /* bdi_write_congested */ +#include <linux/wait.h> +#include <linux/kthread.h> +#include <linux/freezer.h> + +#define DEF_PRIORITY 12 +#define MAX_ENTD_ITERS 10 + +static void entd_flush(struct super_block *, struct wbq *); +static int entd(void *arg); + +/* + * set ->comm field of end thread to make its state visible to the user level + */ +#define entd_set_comm(state) \ + snprintf(current->comm, sizeof(current->comm), \ + "ent:%s%s", super->s_id, (state)) + +/** + * reiser4_init_entd - initialize entd context and start kernel daemon + * @super: super block to start ent thread for + * + * Creates entd contexts, starts kernel thread and waits until it + * initializes. + */ +int reiser4_init_entd(struct super_block *super) +{ + entd_context *ctx; + + assert("nikita-3104", super != NULL); + + ctx = get_entd_context(super); + + memset(ctx, 0, sizeof *ctx); + spin_lock_init(&ctx->guard); + init_waitqueue_head(&ctx->wait); +#if REISER4_DEBUG + INIT_LIST_HEAD(&ctx->flushers_list); +#endif + /* lists of writepage requests */ + INIT_LIST_HEAD(&ctx->todo_list); + INIT_LIST_HEAD(&ctx->done_list); + /* start entd */ + ctx->tsk = kthread_run(entd, super, "ent:%s", super->s_id); + if (IS_ERR(ctx->tsk)) + return PTR_ERR(ctx->tsk); + return 0; +} + +static void put_wbq(struct wbq *rq) +{ + iput(rq->mapping->host); + complete(&rq->completion); +} + +/* ent should be locked */ +static struct wbq *__get_wbq(entd_context * ent) +{ + struct wbq *wbq; + + if (list_empty(&ent->todo_list)) + return NULL; + + ent->nr_todo_reqs--; + wbq = list_entry(ent->todo_list.next, struct wbq, link); + list_del_init(&wbq->link); + return wbq; +} + +/* ent thread function */ +static int entd(void *arg) +{ + struct super_block *super; + entd_context *ent; + int done = 0; + + super = arg; + /* do_fork() just copies task_struct into the new + thread. ->fs_context shouldn't be copied of course. This shouldn't + be a problem for the rest of the code though. + */ + current->journal_info = NULL; + + ent = get_entd_context(super); + + while (!done) { + try_to_freeze(); + + spin_lock(&ent->guard); + while (ent->nr_todo_reqs != 0) { + struct wbq *rq; + + assert("", list_empty(&ent->done_list)); + + /* take request from the queue head */ + rq = __get_wbq(ent); + assert("", rq != NULL); + ent->cur_request = rq; + spin_unlock(&ent->guard); + + entd_set_comm("!"); + entd_flush(super, rq); + + put_wbq(rq); + + /* + * wakeup all requestors and iput their inodes + */ + spin_lock(&ent->guard); + while (!list_empty(&ent->done_list)) { + rq = list_entry(ent->done_list.next, struct wbq, link); + list_del_init(&rq->link); + ent->nr_done_reqs--; + spin_unlock(&ent->guard); + assert("", rq->written == 1); + put_wbq(rq); + spin_lock(&ent->guard); + } + } + spin_unlock(&ent->guard); + + entd_set_comm("."); + + { + DEFINE_WAIT(__wait); + + do { + prepare_to_wait(&ent->wait, &__wait, TASK_INTERRUPTIBLE); + if (kthread_should_stop()) { + done = 1; + break; + } + if (ent->nr_todo_reqs != 0) + break; + schedule(); + } while (0); + finish_wait(&ent->wait, &__wait); + } + } + BUG_ON(ent->nr_todo_reqs != 0); + return 0; +} + +/** + * reiser4_done_entd - stop entd kernel thread + * @super: super block to stop ent thread for + * + * It is called on umount. Sends stop signal to entd and wait until it handles + * it. + */ +void reiser4_done_entd(struct super_block *super) +{ + entd_context *ent; + + assert("nikita-3103", super != NULL); + + ent = get_entd_context(super); + assert("zam-1055", ent->tsk != NULL); + kthread_stop(ent->tsk); +} + +/* called at the beginning of jnode_flush to register flusher thread with ent + * daemon */ +void reiser4_enter_flush(struct super_block *super) +{ + entd_context *ent; + + assert("zam-1029", super != NULL); + ent = get_entd_context(super); + + assert("zam-1030", ent != NULL); + + spin_lock(&ent->guard); + ent->flushers++; +#if REISER4_DEBUG + list_add(&get_current_context()->flushers_link, &ent->flushers_list); +#endif + spin_unlock(&ent->guard); +} + +/* called at the end of jnode_flush */ +void reiser4_leave_flush(struct super_block *super) +{ + entd_context *ent; + int wake_up_ent; + + assert("zam-1027", super != NULL); + ent = get_entd_context(super); + + assert("zam-1028", ent != NULL); + + spin_lock(&ent->guard); + ent->flushers--; + wake_up_ent = (ent->flushers == 0 && ent->nr_todo_reqs != 0); +#if REISER4_DEBUG + list_del_init(&get_current_context()->flushers_link); +#endif + spin_unlock(&ent->guard); + if (wake_up_ent) + wake_up_process(ent->tsk); +} + +#define ENTD_CAPTURE_APAGE_BURST SWAP_CLUSTER_MAX + +static void entd_flush(struct super_block *super, struct wbq *rq) +{ + reiser4_context ctx; + + init_stack_context(&ctx, super); + ctx.entd = 1; + ctx.gfp_mask = GFP_NOFS; + + rq->wbc->range_start = page_offset(rq->page); + rq->wbc->range_end = rq->wbc->range_start + + (ENTD_CAPTURE_APAGE_BURST << PAGE_SHIFT); + + + rq->mapping->a_ops->writepages(rq->mapping, rq->wbc); + + if (rq->wbc->nr_to_write > 0) { + long result; + struct bdi_writeback *wb; + struct wb_writeback_work work = { + .sb = super, + .sync_mode = WB_SYNC_NONE, + .nr_pages = LONG_MAX, + .range_cyclic = 0, + .reason = WB_REASON_VMSCAN, + }; + rq->wbc->sync_mode = work.sync_mode, + rq->wbc->range_cyclic = work.range_cyclic, + rq->wbc->range_start = 0; + rq->wbc->range_end = LLONG_MAX; + /* + * we don't need to pin superblock for writeback: + * this is implicitly pinned by write_page_by_ent + * (via igrab), so that shutdown_super() will wait + * (on reiser4_put_super) for entd completion. + */ + wb = &inode_to_bdi(rq->mapping->host)->wb; + + spin_lock(&wb->list_lock); + result = generic_writeback_sb_inodes(super, + wb, + rq->wbc, + &work, + true); + spin_unlock(&wb->list_lock); + } + rq->wbc->nr_to_write = ENTD_CAPTURE_APAGE_BURST; + + reiser4_writeout(super, rq->wbc); + context_set_commit_async(&ctx); + reiser4_exit_context(&ctx); +} + +/** + * write_page_by_ent - ask entd thread to flush this page as part of slum + * @page: page to be written + * @wbc: writeback control passed to reiser4_writepage + * + * Creates a request, puts it on entd list of requests, wakeups entd if + * necessary, waits until entd completes with the request. + */ +int write_page_by_ent(struct page *page, struct writeback_control *wbc) +{ + struct super_block *sb; + struct inode *inode; + entd_context *ent; + struct wbq rq; + + assert("", PageLocked(page)); + assert("", page->mapping != NULL); + + sb = page->mapping->host->i_sb; + ent = get_entd_context(sb); + assert("", ent && ent->done == 0); + + /* + * we are going to unlock page and ask ent thread to write the + * page. Re-dirty page before unlocking so that if ent thread fails to + * write it - it will remain dirty + */ + set_page_dirty_notag(page); + account_page_redirty(page); + + /* + * pin inode in memory, unlock page, entd_flush will iput. We can not + * iput here becasue we can not allow delete_inode to be called here + */ + inode = igrab(page->mapping->host); + unlock_page(page); + if (inode == NULL) + /* inode is getting freed */ + return 0; + + /* init wbq */ + INIT_LIST_HEAD(&rq.link); + rq.magic = WBQ_MAGIC; + rq.wbc = wbc; + rq.page = page; + rq.mapping = inode->i_mapping; + rq.node = NULL; + rq.written = 0; + init_completion(&rq.completion); + + /* add request to entd's list of writepage requests */ + spin_lock(&ent->guard); + ent->nr_todo_reqs++; + list_add_tail(&rq.link, &ent->todo_list); + if (ent->nr_todo_reqs == 1) + wake_up_process(ent->tsk); + + spin_unlock(&ent->guard); + + /* wait until entd finishes */ + wait_for_completion(&rq.completion); + + if (rq.written) + /* Eventually ENTD has written the page to disk. */ + return 0; + return 0; +} + +int wbq_available(void) +{ + struct super_block *sb = reiser4_get_current_sb(); + entd_context *ent = get_entd_context(sb); + return ent->nr_todo_reqs; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/entd.h b/fs/reiser4/entd.h new file mode 100644 index 000000000000..4f79a578fba3 --- /dev/null +++ b/fs/reiser4/entd.h @@ -0,0 +1,90 @@ +/* Copyright 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Ent daemon. */ + +#ifndef __ENTD_H__ +#define __ENTD_H__ + +#include "context.h" + +#include <linux/fs.h> +#include <linux/completion.h> +#include <linux/wait.h> +#include <linux/spinlock.h> +#include <linux/sched.h> /* for struct task_struct */ + +#define WBQ_MAGIC 0x7876dc76 + +/* write-back request. */ +struct wbq { + int magic; + struct list_head link; /* list head of this list is in entd context */ + struct writeback_control *wbc; + struct page *page; + struct address_space *mapping; + struct completion completion; + jnode *node; /* set if ent thread captured requested page */ + int written; /* set if ent thread wrote requested page */ +}; + +/* ent-thread context. This is used to synchronize starting/stopping ent + * threads. */ +typedef struct entd_context { + /* wait queue that ent thread waits on for more work. It's + * signaled by write_page_by_ent(). */ + wait_queue_head_t wait; + /* spinlock protecting other fields */ + spinlock_t guard; + /* ent thread */ + struct task_struct *tsk; + /* set to indicate that ent thread should leave. */ + int done; + /* counter of active flushers */ + int flushers; + /* + * when reiser4_writepage asks entd to write a page - it adds struct + * wbq to this list + */ + struct list_head todo_list; + /* number of elements on the above list */ + int nr_todo_reqs; + + struct wbq *cur_request; + /* + * when entd writes a page it moves write-back request from todo_list + * to done_list. This list is used at the end of entd iteration to + * wakeup requestors and iput inodes. + */ + struct list_head done_list; + /* number of elements on the above list */ + int nr_done_reqs; + +#if REISER4_DEBUG + /* list of all active flushers */ + struct list_head flushers_list; +#endif +} entd_context; + +extern int reiser4_init_entd(struct super_block *); +extern void reiser4_done_entd(struct super_block *); + +extern void reiser4_enter_flush(struct super_block *); +extern void reiser4_leave_flush(struct super_block *); + +extern int write_page_by_ent(struct page *, struct writeback_control *); +extern int wbq_available(void); +extern void ent_writes_page(struct super_block *, struct page *); + +extern jnode *get_jnode_by_wbq(struct super_block *, struct wbq *); +/* __ENTD_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/eottl.c b/fs/reiser4/eottl.c new file mode 100644 index 000000000000..169b8684a33a --- /dev/null +++ b/fs/reiser4/eottl.c @@ -0,0 +1,510 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +#include "forward.h" +#include "debug.h" +#include "key.h" +#include "coord.h" +#include "plugin/item/item.h" +#include "plugin/node/node.h" +#include "znode.h" +#include "block_alloc.h" +#include "tree_walk.h" +#include "tree_mod.h" +#include "carry.h" +#include "tree.h" +#include "super.h" + +#include <linux/types.h> /* for __u?? */ + +/* + * Extents on the twig level (EOTTL) handling. + * + * EOTTL poses some problems to the tree traversal, that are better explained + * by example. + * + * Suppose we have block B1 on the twig level with the following items: + * + * 0. internal item I0 with key (0:0:0:0) (locality, key-type, object-id, + * offset) + * 1. extent item E1 with key (1:4:100:0), having 10 blocks of 4k each + * 2. internal item I2 with key (10:0:0:0) + * + * We are trying to insert item with key (5:0:0:0). Lookup finds node B1, and + * then intra-node lookup is done. This lookup finished on the E1, because the + * key we are looking for is larger than the key of E1 and is smaller than key + * the of I2. + * + * Here search is stuck. + * + * After some thought it is clear what is wrong here: extents on the twig level + * break some basic property of the *search* tree (on the pretext, that they + * restore property of balanced tree). + * + * Said property is the following: if in the internal node of the search tree + * we have [ ... Key1 Pointer Key2 ... ] then, all data that are or will be + * keyed in the tree with the Key such that Key1 <= Key < Key2 are accessible + * through the Pointer. + * + * This is not true, when Pointer is Extent-Pointer, simply because extent + * cannot expand indefinitely to the right to include any item with + * + * Key1 <= Key <= Key2. + * + * For example, our E1 extent is only responsible for the data with keys + * + * (1:4:100:0) <= key <= (1:4:100:0xffffffffffffffff), and + * + * so, key range + * + * ( (1:4:100:0xffffffffffffffff), (10:0:0:0) ) + * + * is orphaned: there is no way to get there from the tree root. + * + * In other words, extent pointers are different than normal child pointers as + * far as search tree is concerned, and this creates such problems. + * + * Possible solution for this problem is to insert our item into node pointed + * to by I2. There are some problems through: + * + * (1) I2 can be in a different node. + * (2) E1 can be immediately followed by another extent E2. + * + * (1) is solved by calling reiser4_get_right_neighbor() and accounting + * for locks/coords as necessary. + * + * (2) is more complex. Solution here is to insert new empty leaf node and + * insert internal item between E1 and E2 pointing to said leaf node. This is + * further complicated by possibility that E2 is in a different node, etc. + * + * Problems: + * + * (1) if there was internal item I2 immediately on the right of an extent E1 + * we and we decided to insert new item S1 into node N2 pointed to by I2, then + * key of S1 will be less than smallest key in the N2. Normally, search key + * checks that key we are looking for is in the range of keys covered by the + * node key is being looked in. To work around of this situation, while + * preserving useful consistency check new flag CBK_TRUST_DK was added to the + * cbk falgs bitmask. This flag is automatically set on entrance to the + * coord_by_key() and is only cleared when we are about to enter situation + * described above. + * + * (2) If extent E1 is immediately followed by another extent E2 and we are + * searching for the key that is between E1 and E2 we only have to insert new + * empty leaf node when coord_by_key was called for insertion, rather than just + * for lookup. To distinguish these cases, new flag CBK_FOR_INSERT was added to + * the cbk falgs bitmask. This flag is automatically set by coord_by_key calls + * performed by insert_by_key() and friends. + * + * (3) Insertion of new empty leaf node (possibly) requires balancing. In any + * case it requires modification of node content which is only possible under + * write lock. It may well happen that we only have read lock on the node where + * new internal pointer is to be inserted (common case: lookup of non-existent + * stat-data that fells between two extents). If only read lock is held, tree + * traversal is restarted with lock_level modified so that next time we hit + * this problem, write lock will be held. Once we have write lock, balancing + * will be performed. + */ + +/** + * is_next_item_internal - check whether next item is internal + * @coord: coordinate of extent item in twig node + * @key: search key + * @lh: twig node lock handle + * + * Looks at the unit next to @coord. If it is an internal one - 1 is returned, + * @coord is set to that unit. If that unit is in right neighbor, @lh is moved + * to that node, @coord is set to its first unit. If next item is not internal + * or does not exist then 0 is returned, @coord and @lh are left unchanged. 2 + * is returned if search restart has to be done. + */ +static int +is_next_item_internal(coord_t *coord, const reiser4_key * key, + lock_handle * lh) +{ + coord_t next; + lock_handle rn; + int result; + + coord_dup(&next, coord); + if (coord_next_unit(&next) == 0) { + /* next unit is in this node */ + if (item_is_internal(&next)) { + coord_dup(coord, &next); + return 1; + } + assert("vs-3", item_is_extent(&next)); + return 0; + } + + /* + * next unit either does not exist or is in right neighbor. If it is in + * right neighbor we have to check right delimiting key because + * concurrent thread could get their first and insert item with a key + * smaller than @key + */ + read_lock_dk(current_tree); + result = keycmp(key, znode_get_rd_key(coord->node)); + read_unlock_dk(current_tree); + assert("vs-6", result != EQUAL_TO); + if (result == GREATER_THAN) + return 2; + + /* lock right neighbor */ + init_lh(&rn); + result = reiser4_get_right_neighbor(&rn, coord->node, + znode_is_wlocked(coord->node) ? + ZNODE_WRITE_LOCK : ZNODE_READ_LOCK, + GN_CAN_USE_UPPER_LEVELS); + if (result == -E_NO_NEIGHBOR) { + /* we are on the rightmost edge of the tree */ + done_lh(&rn); + return 0; + } + + if (result) { + assert("vs-4", result < 0); + done_lh(&rn); + return result; + } + + /* + * check whether concurrent thread managed to insert item with a key + * smaller than @key + */ + read_lock_dk(current_tree); + result = keycmp(key, znode_get_ld_key(rn.node)); + read_unlock_dk(current_tree); + assert("vs-6", result != EQUAL_TO); + if (result == GREATER_THAN) { + done_lh(&rn); + return 2; + } + + result = zload(rn.node); + if (result) { + assert("vs-5", result < 0); + done_lh(&rn); + return result; + } + + coord_init_first_unit(&next, rn.node); + if (item_is_internal(&next)) { + /* + * next unit is in right neighbor and it is an unit of internal + * item. Unlock coord->node. Move @lh to right neighbor. @coord + * is set to the first unit of right neighbor. + */ + coord_dup(coord, &next); + zrelse(rn.node); + done_lh(lh); + move_lh(lh, &rn); + return 1; + } + + /* + * next unit is unit of extent item. Return without chaning @lh and + * @coord. + */ + assert("vs-6", item_is_extent(&next)); + zrelse(rn.node); + done_lh(&rn); + return 0; +} + +/** + * rd_key - calculate key of an item next to the given one + * @coord: position in a node + * @key: storage for result key + * + * @coord is set between items or after the last item in a node. Calculate key + * of item to the right of @coord. + */ +static reiser4_key *rd_key(const coord_t *coord, reiser4_key *key) +{ + coord_t dup; + + assert("nikita-2281", coord_is_between_items(coord)); + coord_dup(&dup, coord); + + if (coord_set_to_right(&dup) == 0) + /* next item is in this node. Return its key. */ + unit_key_by_coord(&dup, key); + else { + /* + * next item either does not exist or is in right + * neighbor. Return znode's right delimiting key. + */ + read_lock_dk(current_tree); + *key = *znode_get_rd_key(coord->node); + read_unlock_dk(current_tree); + } + return key; +} + +/** + * add_empty_leaf - insert empty leaf between two extents + * @insert_coord: position in twig node between two extents + * @lh: twig node lock handle + * @key: left delimiting key of new node + * @rdkey: right delimiting key of new node + * + * Inserts empty leaf node between two extent items. It is necessary when we + * have to insert an item on leaf level between two extents (items on the twig + * level). + */ +static int +add_empty_leaf(coord_t *insert_coord, lock_handle *lh, + const reiser4_key *key, const reiser4_key *rdkey) +{ + int result; + carry_pool *pool; + carry_level *todo; + reiser4_item_data *item; + carry_insert_data *cdata; + carry_op *op; + znode *node; + reiser4_tree *tree; + + assert("vs-49827", znode_contains_key_lock(insert_coord->node, key)); + tree = znode_get_tree(insert_coord->node); + node = reiser4_new_node(insert_coord->node, LEAF_LEVEL); + if (IS_ERR(node)) + return PTR_ERR(node); + + /* setup delimiting keys for node being inserted */ + write_lock_dk(tree); + znode_set_ld_key(node, key); + znode_set_rd_key(node, rdkey); + ON_DEBUG(node->creator = current); + ON_DEBUG(node->first_key = *key); + write_unlock_dk(tree); + + ZF_SET(node, JNODE_ORPHAN); + + /* + * allocate carry_pool, 3 carry_level-s, reiser4_item_data and + * carry_insert_data + */ + pool = init_carry_pool(sizeof(*pool) + 3 * sizeof(*todo) + + sizeof(*item) + sizeof(*cdata)); + if (IS_ERR(pool)) + return PTR_ERR(pool); + todo = (carry_level *) (pool + 1); + init_carry_level(todo, pool); + + item = (reiser4_item_data *) (todo + 3); + cdata = (carry_insert_data *) (item + 1); + + op = reiser4_post_carry(todo, COP_INSERT, insert_coord->node, 0); + if (!IS_ERR(op)) { + cdata->coord = insert_coord; + cdata->key = key; + cdata->data = item; + op->u.insert.d = cdata; + op->u.insert.type = COPT_ITEM_DATA; + build_child_ptr_data(node, item); + item->arg = NULL; + /* have @insert_coord to be set at inserted item after + insertion is done */ + todo->track_type = CARRY_TRACK_CHANGE; + todo->tracked = lh; + + result = reiser4_carry(todo, NULL); + if (result == 0) { + /* + * pin node in memory. This is necessary for + * znode_make_dirty() below. + */ + result = zload(node); + if (result == 0) { + lock_handle local_lh; + + /* + * if we inserted new child into tree we have + * to mark it dirty so that flush will be able + * to process it. + */ + init_lh(&local_lh); + result = longterm_lock_znode(&local_lh, node, + ZNODE_WRITE_LOCK, + ZNODE_LOCK_LOPRI); + if (result == 0) { + znode_make_dirty(node); + + /* + * when internal item pointing to @node + * was inserted into twig node + * create_hook_internal did not connect + * it properly because its right + * neighbor was not known. Do it + * here + */ + write_lock_tree(tree); + assert("nikita-3312", + znode_is_right_connected(node)); + assert("nikita-2984", + node->right == NULL); + ZF_CLR(node, JNODE_RIGHT_CONNECTED); + write_unlock_tree(tree); + result = + connect_znode(insert_coord, node); + ON_DEBUG(if (result == 0) check_dkeys(node);); + + done_lh(lh); + move_lh(lh, &local_lh); + assert("vs-1676", node_is_empty(node)); + coord_init_first_unit(insert_coord, + node); + } else { + warning("nikita-3136", + "Cannot lock child"); + } + done_lh(&local_lh); + zrelse(node); + } + } + } else + result = PTR_ERR(op); + zput(node); + done_carry_pool(pool); + return result; +} + +/** + * handle_eottl - handle extent-on-the-twig-level cases in tree traversal + * @h: search handle + * @outcome: flag saying whether search has to restart or is done + * + * Handles search on twig level. If this function completes search itself then + * it returns 1. If search has to go one level down then 0 is returned. If + * error happens then LOOKUP_DONE is returned via @outcome and error code is + * saved in @h->result. + */ +int handle_eottl(cbk_handle *h, int *outcome) +{ + int result; + reiser4_key key; + coord_t *coord; + + coord = h->coord; + + if (h->level != TWIG_LEVEL || + (coord_is_existing_item(coord) && item_is_internal(coord))) { + /* Continue to traverse tree downward. */ + return 0; + } + + /* + * make sure that @h->coord is set to twig node and that it is either + * set to extent item or after extent item + */ + assert("vs-356", h->level == TWIG_LEVEL); + assert("vs-357", ({ + coord_t lcoord; + coord_dup(&lcoord, coord); + check_me("vs-733", coord_set_to_left(&lcoord) == 0); + item_is_extent(&lcoord); + } + )); + + if (*outcome == NS_FOUND) { + /* we have found desired key on twig level in extent item */ + h->result = CBK_COORD_FOUND; + *outcome = LOOKUP_DONE; + return 1; + } + + if (!(h->flags & CBK_FOR_INSERT)) { + /* tree traversal is not for insertion. Just return + CBK_COORD_NOTFOUND. */ + h->result = CBK_COORD_NOTFOUND; + *outcome = LOOKUP_DONE; + return 1; + } + + /* take a look at the item to the right of h -> coord */ + result = is_next_item_internal(coord, h->key, h->active_lh); + if (unlikely(result < 0)) { + h->error = "get_right_neighbor failed"; + h->result = result; + *outcome = LOOKUP_DONE; + return 1; + } + if (result == 0) { + /* + * item to the right is also an extent one. Allocate a new node + * and insert pointer to it after item h -> coord. + * + * This is a result of extents being located at the twig + * level. For explanation, see comment just above + * is_next_item_internal(). + */ + znode *loaded; + + if (cbk_lock_mode(h->level, h) != ZNODE_WRITE_LOCK) { + /* + * we got node read locked, restart coord_by_key to + * have write lock on twig level + */ + h->lock_level = TWIG_LEVEL; + h->lock_mode = ZNODE_WRITE_LOCK; + *outcome = LOOKUP_REST; + return 1; + } + + loaded = coord->node; + result = + add_empty_leaf(coord, h->active_lh, h->key, + rd_key(coord, &key)); + if (result) { + h->error = "could not add empty leaf"; + h->result = result; + *outcome = LOOKUP_DONE; + return 1; + } + /* added empty leaf is locked (h->active_lh), its parent node + is unlocked, h->coord is set as EMPTY */ + assert("vs-13", coord->between == EMPTY_NODE); + assert("vs-14", znode_is_write_locked(coord->node)); + assert("vs-15", + WITH_DATA(coord->node, node_is_empty(coord->node))); + assert("vs-16", jnode_is_leaf(ZJNODE(coord->node))); + assert("vs-17", coord->node == h->active_lh->node); + *outcome = LOOKUP_DONE; + h->result = CBK_COORD_NOTFOUND; + return 1; + } else if (result == 1) { + /* + * this is special case mentioned in the comment on + * tree.h:cbk_flags. We have found internal item immediately on + * the right of extent, and we are going to insert new item + * there. Key of item we are going to insert is smaller than + * leftmost key in the node pointed to by said internal item + * (otherwise search wouldn't come to the extent in the first + * place). + * + * This is a result of extents being located at the twig + * level. For explanation, see comment just above + * is_next_item_internal(). + */ + h->flags &= ~CBK_TRUST_DK; + } else { + assert("vs-8", result == 2); + *outcome = LOOKUP_REST; + return 1; + } + assert("vs-362", WITH_DATA(coord->node, item_is_internal(coord))); + return 0; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 120 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/estimate.c b/fs/reiser4/estimate.c new file mode 100644 index 000000000000..ca2652af1cfe --- /dev/null +++ b/fs/reiser4/estimate.c @@ -0,0 +1,129 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +#include "debug.h" +#include "dformat.h" +#include "tree.h" +#include "carry.h" +#include "inode.h" +#include "plugin/cluster.h" +#include "plugin/item/ctail.h" + +/* This returns how many nodes might get dirty and added nodes if @children + nodes are dirtied + + Amount of internals which will get dirty or get allocated we estimate as 5% + of the childs + 1 balancing. 1 balancing is 2 neighbours, 2 new blocks and + the current block on the leaf level, 2 neighbour nodes + the current (or 1 + neighbour and 1 new and the current) on twig level, 2 neighbour nodes on + upper levels and 1 for a new root. So 5 for leaf level, 3 for twig level, + 2 on upper + 1 for root. + + Do not calculate the current node of the lowest level here - this is overhead + only. + + children is almost always 1 here. Exception is flow insertion +*/ +static reiser4_block_nr +max_balance_overhead(reiser4_block_nr childen, tree_level tree_height) +{ + reiser4_block_nr ten_percent; + + ten_percent = ((103 * childen) >> 10); + + /* If we have too many balancings at the time, tree height can raise on + more then 1. Assume that if tree_height is 5, it can raise on 1 only. + */ + return ((tree_height < 5 ? 5 : tree_height) * 2 + (4 + ten_percent)); +} + +/* this returns maximal possible number of nodes which can be modified plus + number of new nodes which can be required to perform insertion of one item + into the tree */ +/* it is only called when tree height changes, or gets initialized */ +reiser4_block_nr calc_estimate_one_insert(tree_level height) +{ + return 1 + max_balance_overhead(1, height); +} + +reiser4_block_nr estimate_one_insert_item(reiser4_tree * tree) +{ + return tree->estimate_one_insert; +} + +/* this returns maximal possible number of nodes which can be modified plus + number of new nodes which can be required to perform insertion of one unit + into an item in the tree */ +reiser4_block_nr estimate_one_insert_into_item(reiser4_tree * tree) +{ + /* estimate insert into item just like item insertion */ + return tree->estimate_one_insert; +} + +reiser4_block_nr estimate_one_item_removal(reiser4_tree * tree) +{ + /* on item removal reiser4 does not try to pack nodes more complact, so, + only one node may be dirtied on leaf level */ + return tree->estimate_one_insert; +} + +/* on leaf level insert_flow may add CARRY_FLOW_NEW_NODES_LIMIT new nodes and + dirty 3 existing nodes (insert point and both its neighbors). + Max_balance_overhead should estimate number of blocks which may change/get + added on internal levels */ +reiser4_block_nr estimate_insert_flow(tree_level height) +{ + return 3 + CARRY_FLOW_NEW_NODES_LIMIT + max_balance_overhead(3 + + CARRY_FLOW_NEW_NODES_LIMIT, + height); +} + +/* returnes max number of nodes can be occupied by disk cluster */ +static reiser4_block_nr estimate_cluster(struct inode *inode, int unprepped) +{ + int per_cluster; + per_cluster = (unprepped ? 1 : cluster_nrpages(inode)); + return 3 + per_cluster + + max_balance_overhead(3 + per_cluster, + REISER4_MAX_ZTREE_HEIGHT); +} + +/* how many nodes might get dirty and added + during insertion of a disk cluster */ +reiser4_block_nr estimate_insert_cluster(struct inode *inode) +{ + return estimate_cluster(inode, 1); /* 24 */ +} + +/* how many nodes might get dirty and added + during update of a (prepped or unprepped) disk cluster */ +reiser4_block_nr estimate_update_cluster(struct inode *inode) +{ + return estimate_cluster(inode, 0); /* 44, for 64K-cluster */ +} + +/* How many nodes occupied by a disk cluster might get dirty. + Note that this estimation is not precise (i.e. disk cluster + can occupy more nodes). + Q: Why we don't use precise estimation? + A: 1.Because precise estimation is fairly bad: 65536 nodes + for 64K logical cluster, it means 256M of dead space on + a partition + 2.It is a very rare case when disk cluster occupies more + nodes then this estimation returns. +*/ +reiser4_block_nr estimate_dirty_cluster(struct inode *inode) +{ + return cluster_nrpages(inode) + 4; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/export_ops.c b/fs/reiser4/export_ops.c new file mode 100644 index 000000000000..a54957eec5ff --- /dev/null +++ b/fs/reiser4/export_ops.c @@ -0,0 +1,325 @@ +/* Copyright 2005 by Hans Reiser, licensing governed by + * reiser4/README */ + +#include "inode.h" +#include "plugin/plugin.h" + +/* + * Supported file-handle types + */ +typedef enum { + FH_WITH_PARENT = 0x10, /* file handle with parent */ + FH_WITHOUT_PARENT = 0x11 /* file handle without parent */ +} reiser4_fhtype; + +#define NFSERROR (255) + +/* initialize place-holder for object */ +static void object_on_wire_init(reiser4_object_on_wire *o) +{ + o->plugin = NULL; +} + +/* finish with @o */ +static void object_on_wire_done(reiser4_object_on_wire *o) +{ + if (o->plugin != NULL) + o->plugin->wire.done(o); +} + +/* + * read serialized object identity from @addr and store information about + * object in @obj. This is dual to encode_inode(). + */ +static char *decode_inode(struct super_block *s, char *addr, + reiser4_object_on_wire * obj) +{ + file_plugin *fplug; + + /* identifier of object plugin is stored in the first two bytes, + * followed by... */ + fplug = file_plugin_by_disk_id(reiser4_get_tree(s), (d16 *) addr); + if (fplug != NULL) { + addr += sizeof(d16); + obj->plugin = fplug; + assert("nikita-3520", fplug->wire.read != NULL); + /* plugin specific encoding of object identity. */ + addr = fplug->wire.read(addr, obj); + } else + addr = ERR_PTR(RETERR(-EINVAL)); + return addr; +} + +static struct dentry *reiser4_get_dentry(struct super_block *super, + void *data); +/** + * reiser4_decode_fh: decode on-wire object - helper function + * for fh_to_dentry, fh_to_parent export operations; + * @super: super block; + * @addr: onwire object to be decoded; + * + * Returns dentry referring to the object being decoded. + */ +static struct dentry *reiser4_decode_fh(struct super_block * super, + char * addr) +{ + reiser4_object_on_wire object; + + object_on_wire_init(&object); + + addr = decode_inode(super, addr, &object); + if (!IS_ERR(addr)) { + struct dentry *d; + d = reiser4_get_dentry(super, &object); + if (d != NULL && !IS_ERR(d)) + /* FIXME check for -ENOMEM */ + reiser4_get_dentry_fsdata(d)->stateless = 1; + addr = (char *)d; + } + object_on_wire_done(&object); + return (void *)addr; +} + +static struct dentry *reiser4_fh_to_dentry(struct super_block *sb, + struct fid *fid, + int fh_len, int fh_type) +{ + reiser4_context *ctx; + struct dentry *d; + + assert("edward-1536", + fh_type == FH_WITH_PARENT || fh_type == FH_WITHOUT_PARENT); + + ctx = reiser4_init_context(sb); + if (IS_ERR(ctx)) + return (struct dentry *)ctx; + + d = reiser4_decode_fh(sb, (char *)fid->raw); + + reiser4_exit_context(ctx); + return d; +} + +static struct dentry *reiser4_fh_to_parent(struct super_block *sb, + struct fid *fid, + int fh_len, int fh_type) +{ + char * addr; + struct dentry * d; + reiser4_context *ctx; + file_plugin *fplug; + + if (fh_type == FH_WITHOUT_PARENT) + return NULL; + assert("edward-1537", fh_type == FH_WITH_PARENT); + + ctx = reiser4_init_context(sb); + if (IS_ERR(ctx)) + return (struct dentry *)ctx; + addr = (char *)fid->raw; + /* extract 2-bytes file plugin id */ + fplug = file_plugin_by_disk_id(reiser4_get_tree(sb), (d16 *)addr); + if (fplug == NULL) { + d = ERR_PTR(RETERR(-EINVAL)); + goto exit; + } + addr += sizeof(d16); + /* skip previously encoded object */ + addr = fplug->wire.read(addr, NULL /* skip */); + if (IS_ERR(addr)) { + d = (struct dentry *)addr; + goto exit; + } + /* @extract and decode parent object */ + d = reiser4_decode_fh(sb, addr); + exit: + reiser4_exit_context(ctx); + return d; +} + +/* + * Object serialization support. + * + * To support knfsd file system provides export_operations that are used to + * construct and interpret NFS file handles. As a generalization of this, + * reiser4 object plugins have serialization support: it provides methods to + * create on-wire representation of identity of reiser4 object, and + * re-create/locate object given its on-wire identity. + * + */ + +/* + * return number of bytes that on-wire representation of @inode's identity + * consumes. + */ +static int encode_inode_size(struct inode *inode) +{ + assert("nikita-3514", inode != NULL); + assert("nikita-3515", inode_file_plugin(inode) != NULL); + assert("nikita-3516", inode_file_plugin(inode)->wire.size != NULL); + + return inode_file_plugin(inode)->wire.size(inode) + sizeof(d16); +} + +/* + * store on-wire representation of @inode's identity at the area beginning at + * @start. + */ +static char *encode_inode(struct inode *inode, char *start) +{ + assert("nikita-3517", inode != NULL); + assert("nikita-3518", inode_file_plugin(inode) != NULL); + assert("nikita-3519", inode_file_plugin(inode)->wire.write != NULL); + + /* + * first, store two-byte identifier of object plugin, then + */ + save_plugin_id(file_plugin_to_plugin(inode_file_plugin(inode)), + (d16 *) start); + start += sizeof(d16); + /* + * call plugin to serialize object's identity + */ + return inode_file_plugin(inode)->wire.write(inode, start); +} + +/* this returns number of 32 bit long numbers encoded in @lenp. 255 is + * returned if file handle can not be stored */ +/** + * reiser4_encode_fh - encode_fh of export operations + * @dentry: + * @fh: + * @lenp: + * @need_parent: + * + */ +static int +reiser4_encode_fh(struct inode *inode, __u32 *fh, int *lenp, + struct inode *parent) +{ + char *addr; + int need; + int delta; + int result; + bool need_parent; + reiser4_context *ctx; + + /* + * knfsd asks as to serialize @inode, and, optionally its + * parent @parent (if it is non-NULL). + * + * encode_inode() and encode_inode_size() is used to build + * representation of object and its parent. All hard work is done by + * object plugins. + */ + need_parent = (parent != NULL); + addr = (char *)fh; + + need = encode_inode_size(inode); + if (need < 0) + return NFSERROR; + if (need_parent) { + delta = encode_inode_size(parent); + if (delta < 0) + return NFSERROR; + need += delta; + } + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + if (need <= sizeof(__u32) * (*lenp)) { + addr = encode_inode(inode, addr); + if (need_parent) + addr = encode_inode(parent, addr); + + /* store in lenp number of 32bit words required for file + * handle. */ + *lenp = (need + sizeof(__u32) - 1) >> 2; + result = need_parent ? FH_WITH_PARENT : FH_WITHOUT_PARENT; + } else + /* no enough space in file handle */ + result = NFSERROR; + reiser4_exit_context(ctx); + return result; +} + +/** + * reiser4_get_dentry_parent - get_parent of export operations + * @child: + * + */ +static struct dentry *reiser4_get_dentry_parent(struct dentry *child) +{ + struct inode *dir; + dir_plugin *dplug; + struct dentry *result; + reiser4_context *ctx; + + assert("nikita-3527", child != NULL); + + dir = child->d_inode; + assert("nikita-3529", dir != NULL); + + ctx = reiser4_init_context(dir->i_sb); + if (IS_ERR(ctx)) + return (void *)ctx; + + dplug = inode_dir_plugin(dir); + assert("nikita-3531", ergo(dplug != NULL, dplug->get_parent != NULL)); + + if (unlikely(dplug == NULL)) { + reiser4_exit_context(ctx); + return ERR_PTR(RETERR(-ENOTDIR)); + } + result = dplug->get_parent(dir); + reiser4_exit_context(ctx); + return result; +} + +/** + * reiser4_get_dentry - get_dentry of export operations + * @super: + * @data: + * + * + */ +static struct dentry *reiser4_get_dentry(struct super_block *super, void *data) +{ + reiser4_object_on_wire *o; + + assert("nikita-3522", super != NULL); + assert("nikita-3523", data != NULL); + /* + * this is only supposed to be called by + * + * reiser4_decode_fh->find_exported_dentry + * + * so, reiser4_context should be here already. + */ + assert("nikita-3526", is_in_reiser4_context()); + + o = (reiser4_object_on_wire *)data; + assert("nikita-3524", o->plugin != NULL); + assert("nikita-3525", o->plugin->wire.get != NULL); + + return o->plugin->wire.get(super, o); +} + +struct export_operations reiser4_export_operations = { + .encode_fh = reiser4_encode_fh, + .fh_to_dentry = reiser4_fh_to_dentry, + .fh_to_parent = reiser4_fh_to_parent, + .get_parent = reiser4_get_dentry_parent, +}; + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/flush.c b/fs/reiser4/flush.c new file mode 100644 index 000000000000..b908dede8e8b --- /dev/null +++ b/fs/reiser4/flush.c @@ -0,0 +1,3522 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +/* The design document for this file is at http://www.namesys.com/v4/v4.html. */ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "key.h" +#include "coord.h" +#include "plugin/item/item.h" +#include "plugin/plugin.h" +#include "plugin/object.h" +#include "txnmgr.h" +#include "jnode.h" +#include "znode.h" +#include "block_alloc.h" +#include "tree_walk.h" +#include "carry.h" +#include "tree.h" +#include "vfs_ops.h" +#include "inode.h" +#include "page_cache.h" +#include "wander.h" +#include "super.h" +#include "entd.h" +#include "reiser4.h" +#include "flush.h" +#include "writeout.h" + +#include <asm/atomic.h> +#include <linux/fs.h> /* for struct super_block */ +#include <linux/mm.h> /* for struct page */ +#include <linux/bio.h> /* for struct bio */ +#include <linux/pagemap.h> +#include <linux/blkdev.h> + +/* IMPLEMENTATION NOTES */ + +/* PARENT-FIRST: Some terminology: A parent-first traversal is a way of + assigning a total order to the nodes of the tree in which the parent is + placed before its children, which are ordered (recursively) in left-to-right + order. When we speak of a "parent-first preceder", it describes the node that + "came before in forward parent-first order". When we speak of a "parent-first + follower", it describes the node that "comes next in parent-first order" + (alternatively the node that "came before in reverse parent-first order"). + + The following pseudo-code prints the nodes of a tree in forward parent-first + order: + + void parent_first (node) + { + print_node (node); + if (node->level > leaf) { + for (i = 0; i < num_children; i += 1) { + parent_first (node->child[i]); + } + } + } +*/ + +/* JUST WHAT ARE WE TRYING TO OPTIMIZE, HERE? The idea is to optimize block + allocation so that a left-to-right scan of the tree's data (i.e., the leaves + in left-to-right order) can be accomplished with sequential reads, which + results in reading nodes in their parent-first order. This is a + read-optimization aspect of the flush algorithm, and there is also a + write-optimization aspect, which is that we wish to make large sequential + writes to the disk by allocating or reallocating blocks so that they can be + written in sequence. Sometimes the read-optimization and write-optimization + goals conflict with each other, as we discuss in more detail below. +*/ + +/* STATE BITS: The flush code revolves around the state of the jnodes it covers. + Here are the relevant jnode->state bits and their relevence to flush: + + JNODE_DIRTY: If a node is dirty, it must be flushed. But in order to be + written it must be allocated first. In order to be considered allocated, + the jnode must have exactly one of { JNODE_OVRWR, JNODE_RELOC } set. These + two bits are exclusive, and all dirtied jnodes eventually have one of these + bits set during each transaction. + + JNODE_CREATED: The node was freshly created in its transaction and has no + previous block address, so it is unconditionally assigned to be relocated, + although this is mainly for code-convenience. It is not being 'relocated' + from anything, but in almost every regard it is treated as part of the + relocate set. The JNODE_CREATED bit remains set even after JNODE_RELOC is + set, so the actual relocate can be distinguished from the + created-and-allocated set easily: relocate-set members (belonging to the + preserve-set) have (JNODE_RELOC) set and created-set members which have no + previous location to preserve have (JNODE_RELOC | JNODE_CREATED) set. + + JNODE_OVRWR: The node belongs to atom's overwrite set. The flush algorithm + made the decision to maintain the pre-existing location for this node and + it will be written to the wandered-log. + + JNODE_RELOC: The flush algorithm made the decision to relocate this block + (if it was not created, see note above). A block with JNODE_RELOC set is + eligible for early-flushing and may be submitted during flush_empty_queues. + When the JNODE_RELOC bit is set on a znode, the parent node's internal item + is modified and the znode is rehashed. + + JNODE_SQUEEZABLE: Before shifting everything left, the flush algorithm + scans the node and calls plugin->f.squeeze() method for its items. By this + technology we update disk clusters of cryptcompress objects. Also if + leftmost point that was found by flush scan has this flag (races with + write(), rare case) the flush algorythm makes the decision to pass it to + squalloc() in spite of its flushprepped status for squeezing, not for + repeated allocation. + + JNODE_FLUSH_QUEUED: This bit is set when a call to flush enters the jnode + into its flush queue. This means the jnode is not on any clean or dirty + list, instead it is moved to one of the flush queue (see flush_queue.h) + object private list. This prevents multiple concurrent flushes from + attempting to start flushing from the same node. + + (DEAD STATE BIT) JNODE_FLUSH_BUSY: This bit was set during the bottom-up + squeeze-and-allocate on a node while its children are actively being + squeezed and allocated. This flag was created to avoid submitting a write + request for a node while its children are still being allocated and + squeezed. Then flush queue was re-implemented to allow unlimited number of + nodes be queued. This flag support was commented out in source code because + we decided that there was no reason to submit queued nodes before + jnode_flush() finishes. However, current code calls fq_write() during a + slum traversal and may submit "busy nodes" to disk. Probably we can + re-enable the JNODE_FLUSH_BUSY bit support in future. + + With these state bits, we describe a test used frequently in the code below, + jnode_is_flushprepped()(and the spin-lock-taking jnode_check_flushprepped()). + The test for "flushprepped" returns true if any of the following are true: + + - The node is not dirty + - The node has JNODE_RELOC set + - The node has JNODE_OVRWR set + + If either the node is not dirty or it has already been processed by flush + (and assigned JNODE_OVRWR or JNODE_RELOC), then it is prepped. If + jnode_is_flushprepped() returns true then flush has work to do on that node. +*/ + +/* FLUSH_PREP_ONCE_PER_TRANSACTION: Within a single transaction a node is never + flushprepped twice (unless an explicit call to flush_unprep is made as + described in detail below). For example a node is dirtied, allocated, and + then early-flushed to disk and set clean. Before the transaction commits, the + page is dirtied again and, due to memory pressure, the node is flushed again. + The flush algorithm will not relocate the node to a new disk location, it + will simply write it to the same, previously relocated position again. +*/ + +/* THE BOTTOM-UP VS. TOP-DOWN ISSUE: This code implements a bottom-up algorithm + where we start at a leaf node and allocate in parent-first order by iterating + to the right. At each step of the iteration, we check for the right neighbor. + Before advancing to the right neighbor, we check if the current position and + the right neighbor share the same parent. If they do not share the same + parent, the parent is allocated before the right neighbor. + + This process goes recursively up the tree and squeeze nodes level by level as + long as the right neighbor and the current position have different parents, + then it allocates the right-neighbors-with-different-parents on the way back + down. This process is described in more detail in + flush_squalloc_changed_ancestor and the recursive function + squalloc_one_changed_ancestor. But the purpose here is not to discuss the + specifics of the bottom-up approach as it is to contrast the bottom-up and + top-down approaches. + + The top-down algorithm was implemented earlier (April-May 2002). In the + top-down approach, we find a starting point by scanning left along each level + past dirty nodes, then going up and repeating the process until the left node + and the parent node are clean. We then perform a parent-first traversal from + the starting point, which makes allocating in parent-first order trivial. + After one subtree has been allocated in this manner, we move to the right, + try moving upward, then repeat the parent-first traversal. + + Both approaches have problems that need to be addressed. Both are + approximately the same amount of code, but the bottom-up approach has + advantages in the order it acquires locks which, at the very least, make it + the better approach. At first glance each one makes the other one look + simpler, so it is important to remember a few of the problems with each one. + + Main problem with the top-down approach: When you encounter a clean child + during the parent-first traversal, what do you do? You would like to avoid + searching through a large tree of nodes just to find a few dirty leaves at + the bottom, and there is not an obvious solution. One of the advantages of + the top-down approach is that during the parent-first traversal you check + every child of a parent to see if it is dirty. In this way, the top-down + approach easily handles the main problem of the bottom-up approach: + unallocated children. + + The unallocated children problem is that before writing a node to disk we + must make sure that all of its children are allocated. Otherwise, the writing + the node means extra I/O because the node will have to be written again when + the child is finally allocated. + + WE HAVE NOT YET ELIMINATED THE UNALLOCATED CHILDREN PROBLEM. Except for bugs, + this should not cause any file system corruption, it only degrades I/O + performance because a node may be written when it is sure to be written at + least one more time in the same transaction when the remaining children are + allocated. What follows is a description of how we will solve the problem. +*/ + +/* HANDLING UNALLOCATED CHILDREN: During flush we may allocate a parent node, + then proceeding in parent first order, allocate some of its left-children, + then encounter a clean child in the middle of the parent. We do not allocate + the clean child, but there may remain unallocated (dirty) children to the + right of the clean child. If we were to stop flushing at this moment and + write everything to disk, the parent might still contain unallocated + children. + + We could try to allocate all the descendents of every node that we allocate, + but this is not necessary. Doing so could result in allocating the entire + tree: if the root node is allocated then every unallocated node would have to + be allocated before flushing. Actually, we do not have to write a node just + because we allocate it. It is possible to allocate but not write a node + during flush, when it still has unallocated children. However, this approach + is probably not optimal for the following reason. + + The flush algorithm is designed to allocate nodes in parent-first order in an + attempt to optimize reads that occur in the same order. Thus we are + read-optimizing for a left-to-right scan through all the leaves in the + system, and we are hoping to write-optimize at the same time because those + nodes will be written together in batch. What happens, however, if we assign + a block number to a node in its read-optimized order but then avoid writing + it because it has unallocated children? In that situation, we lose out on the + write-optimization aspect because a node will have to be written again to the + its location on the device, later, which likely means seeking back to that + location. + + So there are tradeoffs. We can choose either: + + A. Allocate all unallocated children to preserve both write-optimization and + read-optimization, but this is not always desirable because it may mean + having to allocate and flush very many nodes at once. + + B. Defer writing nodes with unallocated children, keep their read-optimized + locations, but sacrifice write-optimization because those nodes will be + written again. + + C. Defer writing nodes with unallocated children, but do not keep their + read-optimized locations. Instead, choose to write-optimize them later, when + they are written. To facilitate this, we "undo" the read-optimized allocation + that was given to the node so that later it can be write-optimized, thus + "unpreparing" the flush decision. This is a case where we disturb the + FLUSH_PREP_ONCE_PER_TRANSACTION rule described above. By a call to + flush_unprep() we will: if the node was wandered, unset the JNODE_OVRWR bit; + if the node was relocated, unset the JNODE_RELOC bit, non-deferred-deallocate + its block location, and set the JNODE_CREATED bit, effectively setting the + node back to an unallocated state. + + We will take the following approach in v4.0: for twig nodes we will always + finish allocating unallocated children (A). For nodes with (level > TWIG) + we will defer writing and choose write-optimization (C). + + To summarize, there are several parts to a solution that avoids the problem + with unallocated children: + + FIXME-ZAM: Still no one approach is implemented to eliminate the + "UNALLOCATED CHILDREN" problem because there was an experiment which was done + showed that we have 1-2 nodes with unallocated children for thousands of + written nodes. The experiment was simple like coping/deletion of linux kernel + sources. However the problem can arise in more complex tests. I think we have + jnode_io_hook to insert a check for unallocated children and see what kind of + problem we have. + + 1. When flush reaches a stopping point (e.g. a clean node) it should continue + calling squeeze-and-allocate on any remaining unallocated children. + FIXME: Difficulty to implement: should be simple -- amounts to adding a while + loop to jnode_flush, see comments in that function. + + 2. When flush reaches flush_empty_queue(), some of the (level > TWIG) nodes + may still have unallocated children. If the twig level has unallocated + children it is an assertion failure. If a higher-level node has unallocated + children, then it should be explicitly de-allocated by a call to + flush_unprep(). + FIXME: Difficulty to implement: should be simple. + + 3. (CPU-Optimization) Checking whether a node has unallocated children may + consume more CPU cycles than we would like, and it is possible (but medium + complexity) to optimize this somewhat in the case where large sub-trees are + flushed. The following observation helps: if both the left- and + right-neighbor of a node are processed by the flush algorithm then the node + itself is guaranteed to have all of its children allocated. However, the cost + of this check may not be so expensive after all: it is not needed for leaves + and flush can guarantee this property for twigs. That leaves only (level > + TWIG) nodes that have to be checked, so this optimization only helps if at + least three (level > TWIG) nodes are flushed in one pass, and the savings + will be very small unless there are many more (level > TWIG) nodes. But if + there are many (level > TWIG) nodes then the number of blocks being written + will be very large, so the savings may be insignificant. That said, the idea + is to maintain both the left and right edges of nodes that are processed in + flush. When flush_empty_queue() is called, a relatively simple test will + tell whether the (level > TWIG) node is on the edge. If it is on the edge, + the slow check is necessary, but if it is in the interior then it can be + assumed to have all of its children allocated. FIXME: medium complexity to + implement, but simple to verify given that we must have a slow check anyway. + + 4. (Optional) This part is optional, not for v4.0--flush should work + independently of whether this option is used or not. Called RAPID_SCAN, the + idea is to amend the left-scan operation to take unallocated children into + account. Normally, the left-scan operation goes left as long as adjacent + nodes are dirty up until some large maximum value (FLUSH_SCAN_MAXNODES) at + which point it stops and begins flushing. But scan-left may stop at a + position where there are unallocated children to the left with the same + parent. When RAPID_SCAN is enabled, the ordinary scan-left operation stops + after FLUSH_RELOCATE_THRESHOLD, which is much smaller than + FLUSH_SCAN_MAXNODES, then procedes with a rapid scan. The rapid scan skips + all the interior children of a node--if the leftmost child of a twig is + dirty, check its left neighbor (the rightmost child of the twig to the left). + If the left neighbor of the leftmost child is also dirty, then continue the + scan at the left twig and repeat. This option will cause flush to allocate + more twigs in a single pass, but it also has the potential to write many more + nodes than would otherwise be written without the RAPID_SCAN option. + RAPID_SCAN was partially implemented, code removed August 12, 2002 by JMACD. +*/ + +/* FLUSH CALLED ON NON-LEAF LEVEL. Most of our design considerations assume that + the starting point for flush is a leaf node, but actually the flush code + cares very little about whether or not this is true. It is possible that all + the leaf nodes are flushed and dirty parent nodes still remain, in which case + jnode_flush() is called on a non-leaf argument. Flush doesn't care--it treats + the argument node as if it were a leaf, even when it is not. This is a simple + approach, and there may be a more optimal policy but until a problem with + this approach is discovered, simplest is probably best. + + NOTE: In this case, the ordering produced by flush is parent-first only if + you ignore the leaves. This is done as a matter of simplicity and there is + only one (shaky) justification. When an atom commits, it flushes all leaf + level nodes first, followed by twigs, and so on. With flushing done in this + order, if flush is eventually called on a non-leaf node it means that + (somehow) we reached a point where all leaves are clean and only internal + nodes need to be flushed. If that it the case, then it means there were no + leaves that were the parent-first preceder/follower of the parent. This is + expected to be a rare case, which is why we do nothing special about it. + However, memory pressure may pass an internal node to flush when there are + still dirty leaf nodes that need to be flushed, which could prove our + original assumptions "inoperative". If this needs to be fixed, then + scan_left/right should have special checks for the non-leaf levels. For + example, instead of passing from a node to the left neighbor, it should pass + from the node to the left neighbor's rightmost descendent (if dirty). + +*/ + +/* UNIMPLEMENTED AS YET: REPACKING AND RESIZING. We walk the tree in 4MB-16MB + chunks, dirtying everything and putting it into a transaction. We tell the + allocator to allocate the blocks as far as possible towards one end of the + logical device--the left (starting) end of the device if we are walking from + left to right, the right end of the device if we are walking from right to + left. We then make passes in alternating directions, and as we do this the + device becomes sorted such that tree order and block number order fully + correlate. + + Resizing is done by shifting everything either all the way to the left or all + the way to the right, and then reporting the last block. +*/ + +/* RELOCATE DECISIONS: The code makes a decision to relocate in several places. + This descibes the policy from the highest level: + + The FLUSH_RELOCATE_THRESHOLD parameter: If we count this many consecutive + nodes on the leaf level during flush-scan (right, left), then we + unconditionally decide to relocate leaf nodes. + + Otherwise, there are two contexts in which we make a decision to relocate: + + 1. The REVERSE PARENT-FIRST context: Implemented in reverse_allocate + During the initial stages of flush, after scan-right completes, we want to + ask the question: should we relocate this leaf node and thus dirty the parent + node. Then if the node is a leftmost child its parent is its own parent-first + preceder, thus we repeat the question at the next level up, and so on. In + these cases we are moving in the reverse-parent first direction. + + There is another case which is considered the reverse direction, which comes + at the end of a twig in reverse_relocate_end_of_twig(). As we finish + processing a twig we may reach a point where there is a clean twig to the + right with a dirty leftmost child. In this case, we may wish to relocate the + child by testing if it should be relocated relative to its parent. + + 2. The FORWARD PARENT-FIRST context: Testing for forward relocation is done + in allocate_znode. What distinguishes the forward parent-first case from the + reverse-parent first case is that the preceder has already been allocated in + the forward case, whereas in the reverse case we don't know what the preceder + is until we finish "going in reverse". That simplifies the forward case + considerably, and there we actually use the block allocator to determine + whether, e.g., a block closer to the preceder is available. +*/ + +/* SQUEEZE_LEFT_EDGE: Unimplemented idea for future consideration. The idea is, + once we finish scan-left and find a starting point, if the parent's left + neighbor is dirty then squeeze the parent's left neighbor and the parent. + This may change the flush-starting-node's parent. Repeat until the child's + parent is stable. If the child is a leftmost child, repeat this left-edge + squeezing operation at the next level up. Note that we cannot allocate + extents during this or they will be out of parent-first order. There is also + some difficult coordinate maintenence issues. We can't do a tree search to + find coordinates again (because we hold locks), we have to determine them + from the two nodes being squeezed. Looks difficult, but has potential to + increase space utilization. */ + +/* Flush-scan helper functions. */ +static void scan_init(flush_scan * scan); +static void scan_done(flush_scan * scan); + +/* Flush-scan algorithm. */ +static int scan_left(flush_scan * scan, flush_scan * right, jnode * node, + unsigned limit); +static int scan_right(flush_scan * scan, jnode * node, unsigned limit); +static int scan_common(flush_scan * scan, flush_scan * other); +static int scan_formatted(flush_scan * scan); +static int scan_unformatted(flush_scan * scan, flush_scan * other); +static int scan_by_coord(flush_scan * scan); + +/* Initial flush-point ancestor allocation. */ +static int alloc_pos_and_ancestors(flush_pos_t *pos); +static int alloc_one_ancestor(const coord_t *coord, flush_pos_t *pos); +static int set_preceder(const coord_t *coord_in, flush_pos_t *pos); + +/* Main flush algorithm. + Note on abbreviation: "squeeze and allocate" == "squalloc". */ +static int squalloc(flush_pos_t *pos); + +/* Flush squeeze implementation. */ +static int squeeze_right_non_twig(znode * left, znode * right); +static int shift_one_internal_unit(znode * left, znode * right); + +/* Flush reverse parent-first relocation routines. */ +static int reverse_allocate_parent(jnode * node, + const coord_t *parent_coord, + flush_pos_t *pos); + +/* Flush allocate write-queueing functions: */ +static int allocate_znode(znode * node, const coord_t *parent_coord, + flush_pos_t *pos); +static int lock_parent_and_allocate_znode(znode *, flush_pos_t *); + +/* Flush helper functions: */ +static int jnode_lock_parent_coord(jnode * node, + coord_t *coord, + lock_handle * parent_lh, + load_count * parent_zh, + znode_lock_mode mode, int try); +static int neighbor_in_slum(znode * node, lock_handle * right_lock, sideof side, + znode_lock_mode mode, int check_dirty, int expected); +static int znode_same_parents(znode * a, znode * b); + +static int znode_check_flushprepped(znode * node) +{ + return jnode_check_flushprepped(ZJNODE(node)); +} +static void update_znode_dkeys(znode * left, znode * right); + +/* Flush position functions */ +static void pos_init(flush_pos_t *pos); +static int pos_valid(flush_pos_t *pos); +static void pos_done(flush_pos_t *pos); +static int pos_stop(flush_pos_t *pos); + +/* check that @org is first jnode extent unit, if extent is unallocated, + * because all jnodes of unallocated extent are dirty and of the same atom. */ +#define checkchild(scan) \ +assert("nikita-3435", \ + ergo(scan->direction == LEFT_SIDE && \ + (scan->parent_coord.node->level == TWIG_LEVEL) && \ + jnode_is_unformatted(scan->node) && \ + extent_is_unallocated(&scan->parent_coord), \ + extent_unit_index(&scan->parent_coord) == index_jnode(scan->node))) + +/* This flush_cnt variable is used to track the number of concurrent flush + operations, useful for debugging. It is initialized in txnmgr.c out of + laziness (because flush has no static initializer function...) */ +ON_DEBUG(atomic_t flush_cnt;) + +/* check fs backing device for write congestion */ +static int check_write_congestion(void) +{ + struct super_block *sb; + struct backing_dev_info *bdi; + + sb = reiser4_get_current_sb(); + bdi = inode_to_bdi(reiser4_get_super_fake(sb)); + return bdi_write_congested(bdi); +} + +/* conditionally write flush queue */ +static int write_prepped_nodes(flush_pos_t *pos) +{ + int ret; + + assert("zam-831", pos); + assert("zam-832", pos->fq); + + if (!(pos->flags & JNODE_FLUSH_WRITE_BLOCKS)) + return 0; + + if (check_write_congestion()) + return 0; + + ret = reiser4_write_fq(pos->fq, pos->nr_written, + WRITEOUT_SINGLE_STREAM | WRITEOUT_FOR_PAGE_RECLAIM); + return ret; +} + +/* Proper release all flush pos. resources then move flush position to new + locked node */ +static void move_flush_pos(flush_pos_t *pos, lock_handle * new_lock, + load_count * new_load, const coord_t *new_coord) +{ + assert("zam-857", new_lock->node == new_load->node); + + if (new_coord) { + assert("zam-858", new_coord->node == new_lock->node); + coord_dup(&pos->coord, new_coord); + } else { + coord_init_first_unit(&pos->coord, new_lock->node); + } + + if (pos->child) { + jput(pos->child); + pos->child = NULL; + } + + move_load_count(&pos->load, new_load); + done_lh(&pos->lock); + move_lh(&pos->lock, new_lock); +} + +/* delete empty node which link from the parent still exists. */ +static int delete_empty_node(znode * node) +{ + reiser4_key smallest_removed; + + assert("zam-1019", node != NULL); + assert("zam-1020", node_is_empty(node)); + assert("zam-1023", znode_is_wlocked(node)); + + return reiser4_delete_node(node, &smallest_removed, NULL, 1); +} + +/* Prepare flush position for alloc_pos_and_ancestors() and squalloc() */ +static int prepare_flush_pos(flush_pos_t *pos, jnode * org) +{ + int ret; + load_count load; + lock_handle lock; + + init_lh(&lock); + init_load_count(&load); + + if (jnode_is_znode(org)) { + ret = longterm_lock_znode(&lock, JZNODE(org), + ZNODE_WRITE_LOCK, ZNODE_LOCK_HIPRI); + if (ret) + return ret; + + ret = incr_load_count_znode(&load, JZNODE(org)); + if (ret) + return ret; + + pos->state = + (jnode_get_level(org) == + LEAF_LEVEL) ? POS_ON_LEAF : POS_ON_INTERNAL; + move_flush_pos(pos, &lock, &load, NULL); + } else { + coord_t parent_coord; + ret = jnode_lock_parent_coord(org, &parent_coord, &lock, + &load, ZNODE_WRITE_LOCK, 0); + if (ret) + goto done; + if (!item_is_extent(&parent_coord)) { + /* file was converted to tail, org became HB, we found + internal item */ + ret = -EAGAIN; + goto done; + } + + pos->state = POS_ON_EPOINT; + move_flush_pos(pos, &lock, &load, &parent_coord); + pos->child = jref(org); + if (extent_is_unallocated(&parent_coord) + && extent_unit_index(&parent_coord) != index_jnode(org)) { + /* @org is not first child of its parent unit. This may + happen because longerm lock of its parent node was + released between scan_left and scan_right. For now + work around this having flush to repeat */ + ret = -EAGAIN; + } + } + +done: + done_load_count(&load); + done_lh(&lock); + return ret; +} + +static txmod_plugin *get_txmod_plugin(void) +{ + struct super_block *sb = reiser4_get_current_sb(); + return txmod_plugin_by_id(get_super_private(sb)->txmod); +} + +/* TODO LIST (no particular order): */ +/* I have labelled most of the legitimate FIXME comments in this file with + letters to indicate which issue they relate to. There are a few miscellaneous + FIXMEs with specific names mentioned instead that need to be + inspected/resolved. */ +/* B. There is an issue described in reverse_allocate having to do with an + imprecise is_preceder? check having to do with partially-dirty extents. The + code that sets preceder hints and computes the preceder is basically + untested. Careful testing needs to be done that preceder calculations are + done correctly, since if it doesn't affect correctness we will not catch this + stuff during regular testing. */ +/* C. EINVAL, E_DEADLOCK, E_NO_NEIGHBOR, ENOENT handling. It is unclear which of + these are considered expected but unlikely conditions. Flush currently + returns 0 (i.e., success but no progress, i.e., restart) whenever it receives + any of these in jnode_flush(). Many of the calls that may produce one of + these return values (i.e., longterm_lock_znode, reiser4_get_parent, + reiser4_get_neighbor, ...) check some of these values themselves and, for + instance, stop flushing instead of resulting in a restart. If any of these + results are true error conditions then flush will go into a busy-loop, as we + noticed during testing when a corrupt tree caused find_child_ptr to return + ENOENT. It needs careful thought and testing of corner conditions. +*/ +/* D. Atomicity of flush_prep against deletion and flush concurrency. Suppose a + created block is assigned a block number then early-flushed to disk. It is + dirtied again and flush is called again. Concurrently, that block is deleted, + and the de-allocation of its block number does not need to be deferred, since + it is not part of the preserve set (i.e., it didn't exist before the + transaction). I think there may be a race condition where flush writes the + dirty, created block after the non-deferred deallocated block number is + re-allocated, making it possible to write deleted data on top of non-deleted + data. Its just a theory, but it needs to be thought out. */ +/* F. bio_alloc() failure is not handled gracefully. */ +/* G. Unallocated children. */ +/* H. Add a WANDERED_LIST to the atom to clarify the placement of wandered + blocks. */ +/* I. Rename flush-scan to scan-point, (flush-pos to flush-point?) */ + +/* JNODE_FLUSH: MAIN ENTRY POINT */ +/* This is the main entry point for flushing a jnode and its dirty neighborhood + (dirty neighborhood is named "slum"). Jnode_flush() is called if reiser4 has + to write dirty blocks to disk, it happens when Linux VM decides to reduce + number of dirty pages or as a part of transaction commit. + + Our objective here is to prep and flush the slum the jnode belongs to. We + want to squish the slum together, and allocate the nodes in it as we squish + because allocation of children affects squishing of parents. + + The "argument" @node tells flush where to start. From there, flush finds the + left edge of the slum, and calls squalloc (in which nodes are squeezed and + allocated). To find a "better place" to start squalloc first we perform a + flush_scan. + + Flush-scanning may be performed in both left and right directions, but for + different purposes. When scanning to the left, we are searching for a node + that precedes a sequence of parent-first-ordered nodes which we will then + flush in parent-first order. During flush-scanning, we also take the + opportunity to count the number of consecutive leaf nodes. If this number is + past some threshold (FLUSH_RELOCATE_THRESHOLD), then we make a decision to + reallocate leaf nodes (thus favoring write-optimization). + + Since the flush argument node can be anywhere in a sequence of dirty leaves, + there may also be dirty nodes to the right of the argument. If the scan-left + operation does not count at least FLUSH_RELOCATE_THRESHOLD nodes then we + follow it with a right-scan operation to see whether there is, in fact, + enough nodes to meet the relocate threshold. Each right- and left-scan + operation uses a single flush_scan object. + + After left-scan and possibly right-scan, we prepare a flush_position object + with the starting flush point or parent coordinate, which was determined + using scan-left. + + Next we call the main flush routine, squalloc, which iterates along the leaf + level, squeezing and allocating nodes (and placing them into the flush + queue). + + After squalloc returns we take extra steps to ensure that all the children + of the final twig node are allocated--this involves repeating squalloc + until we finish at a twig with no unallocated children. + + Finally, we call flush_empty_queue to submit write-requests to disk. If we + encounter any above-twig nodes during flush_empty_queue that still have + unallocated children, we flush_unprep them. + + Flush treats several "failure" cases as non-failures, essentially causing + them to start over. E_DEADLOCK is one example. + FIXME:(C) EINVAL, E_NO_NEIGHBOR, ENOENT: these should probably be handled + properly rather than restarting, but there are a bunch of cases to audit. +*/ + +static int +jnode_flush(jnode * node, long nr_to_write, long *nr_written, + flush_queue_t *fq, int flags) +{ + long ret = 0; + flush_scan *right_scan; + flush_scan *left_scan; + flush_pos_t *flush_pos; + int todo; + struct super_block *sb; + reiser4_super_info_data *sbinfo; + jnode *leftmost_in_slum = NULL; + + assert("jmacd-76619", lock_stack_isclean(get_current_lock_stack())); + assert("nikita-3022", reiser4_schedulable()); + + assert("nikita-3185", + get_current_super_private()->delete_mutex_owner != current); + + /* allocate right_scan, left_scan and flush_pos */ + right_scan = + kmalloc(2 * sizeof(*right_scan) + sizeof(*flush_pos), + reiser4_ctx_gfp_mask_get()); + if (right_scan == NULL) + return RETERR(-ENOMEM); + left_scan = right_scan + 1; + flush_pos = (flush_pos_t *) (left_scan + 1); + + sb = reiser4_get_current_sb(); + sbinfo = get_super_private(sb); + + /* Flush-concurrency debug code */ +#if REISER4_DEBUG + atomic_inc(&flush_cnt); +#endif + + reiser4_enter_flush(sb); + + /* Initialize a flush position. */ + pos_init(flush_pos); + + flush_pos->nr_written = nr_written; + flush_pos->fq = fq; + flush_pos->flags = flags; + flush_pos->nr_to_write = nr_to_write; + + scan_init(right_scan); + scan_init(left_scan); + + /* First scan left and remember the leftmost scan position. If the + leftmost position is unformatted we remember its parent_coord. We + scan until counting FLUSH_SCAN_MAXNODES. + + If starting @node is unformatted, at the beginning of left scan its + parent (twig level node, containing extent item) will be long term + locked and lock handle will be stored in the + @right_scan->parent_lock. This lock is used to start the rightward + scan without redoing the tree traversal (necessary to find parent) + and, hence, is kept during leftward scan. As a result, we have to + use try-lock when taking long term locks during the leftward scan. + */ + ret = scan_left(left_scan, right_scan, + node, sbinfo->flush.scan_maxnodes); + if (ret != 0) + goto failed; + + leftmost_in_slum = jref(left_scan->node); + scan_done(left_scan); + + /* Then possibly go right to decide if we will use a policy of + relocating leaves. This is only done if we did not scan past (and + count) enough nodes during the leftward scan. If we do scan right, + we only care to go far enough to establish that at least + FLUSH_RELOCATE_THRESHOLD number of nodes are being flushed. The scan + limit is the difference between left_scan.count and the threshold. */ + + todo = sbinfo->flush.relocate_threshold - left_scan->count; + /* scan right is inherently deadlock prone, because we are + * (potentially) holding a lock on the twig node at this moment. + * FIXME: this is incorrect comment: lock is not held */ + if (todo > 0) { + ret = scan_right(right_scan, node, (unsigned)todo); + if (ret != 0) + goto failed; + } + + /* Only the right-scan count is needed, release any rightward locks + right away. */ + scan_done(right_scan); + + /* ... and the answer is: we should relocate leaf nodes if at least + FLUSH_RELOCATE_THRESHOLD nodes were found. */ + flush_pos->leaf_relocate = JF_ISSET(node, JNODE_REPACK) || + (left_scan->count + right_scan->count >= + sbinfo->flush.relocate_threshold); + + /* Funny business here. We set the 'point' in the flush_position at + prior to starting squalloc regardless of whether the first point is + formatted or unformatted. Without this there would be an invariant, + in the rest of the code, that if the flush_position is unformatted + then flush_position->point is NULL and + flush_position->parent_{lock,coord} is set, and if the flush_position + is formatted then flush_position->point is non-NULL and no parent + info is set. + + This seems lazy, but it makes the initial calls to + reverse_allocate (which ask "is it the pos->point the leftmost + child of its parent") much easier because we know the first child + already. Nothing is broken by this, but the reasoning is subtle. + Holding an extra reference on a jnode during flush can cause us to + see nodes with HEARD_BANSHEE during squalloc, because nodes are not + removed from sibling lists until they have zero reference count. + Flush would never observe a HEARD_BANSHEE node on the left-edge of + flush, nodes are only deleted to the right. So if nothing is broken, + why fix it? + + NOTE-NIKITA actually, flush can meet HEARD_BANSHEE node at any + point and in any moment, because of the concurrent file system + activity (for example, truncate). */ + + /* Check jnode state after flush_scan completed. Having a lock on this + node or its parent (in case of unformatted) helps us in case of + concurrent flushing. */ + if (jnode_check_flushprepped(leftmost_in_slum) + && !jnode_convertible(leftmost_in_slum)) { + ret = 0; + goto failed; + } + + /* Now setup flush_pos using scan_left's endpoint. */ + ret = prepare_flush_pos(flush_pos, leftmost_in_slum); + if (ret) + goto failed; + + if (znode_get_level(flush_pos->coord.node) == LEAF_LEVEL + && node_is_empty(flush_pos->coord.node)) { + znode *empty = flush_pos->coord.node; + + assert("zam-1022", !ZF_ISSET(empty, JNODE_HEARD_BANSHEE)); + ret = delete_empty_node(empty); + goto failed; + } + + if (jnode_check_flushprepped(leftmost_in_slum) + && !jnode_convertible(leftmost_in_slum)) { + ret = 0; + goto failed; + } + + /* Set pos->preceder and (re)allocate pos and its ancestors if it is + needed */ + ret = alloc_pos_and_ancestors(flush_pos); + if (ret) + goto failed; + + /* Do the main rightward-bottom-up squeeze and allocate loop. */ + ret = squalloc(flush_pos); + pos_stop(flush_pos); + if (ret) + goto failed; + + /* FIXME_NFQUCMPD: Here, handle the twig-special case for unallocated + children. First, the pos_stop() and pos_valid() routines should be + modified so that pos_stop() sets a flush_position->stop flag to 1 + without releasing the current position immediately--instead release + it in pos_done(). This is a better implementation than the current + one anyway. + + It is not clear that all fields of the flush_position should not be + released, but at the very least the parent_lock, parent_coord, and + parent_load should remain held because they are hold the last twig + when pos_stop() is called. + + When we reach this point in the code, if the parent_coord is set to + after the last item then we know that flush reached the end of a twig + (and according to the new flush queueing design, we will return now). + If parent_coord is not past the last item, we should check if the + current twig has any unallocated children to the right (we are not + concerned with unallocated children to the left--in that case the + twig itself should not have been allocated). If the twig has + unallocated children to the right, set the parent_coord to that + position and then repeat the call to squalloc. + + Testing for unallocated children may be defined in two ways: if any + internal item has a fake block number, it is unallocated; if any + extent item is unallocated then all of its children are unallocated. + But there is a more aggressive approach: if there are any dirty + children of the twig to the right of the current position, we may + wish to relocate those nodes now. Checking for potential relocation + is more expensive as it requires knowing whether there are any dirty + children that are not unallocated. The extent_needs_allocation should + be used after setting the correct preceder. + + When we reach the end of a twig at this point in the code, if the + flush can continue (when the queue is ready) it will need some + information on the future starting point. That should be stored away + in the flush_handle using a seal, I believe. Holding a jref() on the + future starting point may break other code that deletes that node. + */ + + /* FIXME_NFQUCMPD: Also, we don't want to do any flushing when flush is + called above the twig level. If the VM calls flush above the twig + level, do nothing and return (but figure out why this happens). The + txnmgr should be modified to only flush its leaf-level dirty list. + This will do all the necessary squeeze and allocate steps but leave + unallocated branches and possibly unallocated twigs (when the twig's + leftmost child is not dirty). After flushing the leaf level, the + remaining unallocated nodes should be given write-optimized + locations. (Possibly, the remaining unallocated twigs should be + allocated just before their leftmost child.) + */ + + /* Any failure reaches this point. */ +failed: + + switch (ret) { + case -E_REPEAT: + case -EINVAL: + case -E_DEADLOCK: + case -E_NO_NEIGHBOR: + case -ENOENT: + /* FIXME(C): Except for E_DEADLOCK, these should probably be + handled properly in each case. They already are handled in + many cases. */ + /* Something bad happened, but difficult to avoid... Try again! + */ + ret = 0; + } + + if (leftmost_in_slum) + jput(leftmost_in_slum); + + pos_done(flush_pos); + scan_done(left_scan); + scan_done(right_scan); + kfree(right_scan); + + ON_DEBUG(atomic_dec(&flush_cnt)); + + reiser4_leave_flush(sb); + + return ret; +} + +/* The reiser4 flush subsystem can be turned into "rapid flush mode" means that + * flusher should submit all prepped nodes immediately without keeping them in + * flush queues for long time. The reason for rapid flush mode is to free + * memory as fast as possible. */ + +#if REISER4_USE_RAPID_FLUSH + +/** + * submit all prepped nodes if rapid flush mode is set, + * turn rapid flush mode off. + */ + +static int rapid_flush(flush_pos_t *pos) +{ + if (!wbq_available()) + return 0; + + return write_prepped_nodes(pos); +} + +#else + +#define rapid_flush(pos) (0) + +#endif /* REISER4_USE_RAPID_FLUSH */ + +static jnode *find_flush_start_jnode(jnode *start, txn_atom * atom, + flush_queue_t *fq, int *nr_queued, + int flags) +{ + jnode * node; + + if (start != NULL) { + spin_lock_jnode(start); + if (!jnode_is_flushprepped(start)) { + assert("zam-1056", start->atom == atom); + node = start; + goto enter; + } + spin_unlock_jnode(start); + } + /* + * In this loop we process all already prepped (RELOC or OVRWR) and + * dirtied again nodes. The atom spin lock is not released until all + * dirty nodes processed or not prepped node found in the atom dirty + * lists. + */ + while ((node = find_first_dirty_jnode(atom, flags))) { + spin_lock_jnode(node); +enter: + assert("zam-881", JF_ISSET(node, JNODE_DIRTY)); + assert("zam-898", !JF_ISSET(node, JNODE_OVRWR)); + + if (JF_ISSET(node, JNODE_WRITEBACK)) { + /* move node to the end of atom's writeback list */ + list_move_tail(&node->capture_link, ATOM_WB_LIST(atom)); + + /* + * jnode is not necessarily on dirty list: if it was + * dirtied when it was on flush queue - it does not get + * moved to dirty list + */ + ON_DEBUG(count_jnode(atom, node, NODE_LIST(node), + WB_LIST, 1)); + + } else if (jnode_is_znode(node) + && znode_above_root(JZNODE(node))) { + /* + * A special case for znode-above-root. The above-root + * (fake) znode is captured and dirtied when the tree + * height changes or when the root node is relocated. + * This causes atoms to fuse so that changes at the root + * are serialized. However, this node is never flushed. + * This special case used to be in lock.c to prevent the + * above-root node from ever being captured, but now + * that it is captured we simply prevent it from + * flushing. The log-writer code relies on this to + * properly log superblock modifications of the tree + * height. + */ + jnode_make_wander_nolock(node); + } else if (JF_ISSET(node, JNODE_RELOC)) { + queue_jnode(fq, node); + ++(*nr_queued); + } else + break; + + spin_unlock_jnode(node); + } + return node; +} + +/* Flush some nodes of current atom, usually slum, return -E_REPEAT if there are + * more nodes to flush, return 0 if atom's dirty lists empty and keep current + * atom locked, return other errors as they are. */ +int +flush_current_atom(int flags, long nr_to_write, long *nr_submitted, + txn_atom ** atom, jnode *start) +{ + reiser4_super_info_data *sinfo = get_current_super_private(); + flush_queue_t *fq = NULL; + jnode *node; + int nr_queued; + int ret; + + assert("zam-889", atom != NULL && *atom != NULL); + assert_spin_locked(&((*atom)->alock)); + assert("zam-892", get_current_context()->trans->atom == *atom); + + BUG_ON(rofs_super(get_current_context()->super)); + + nr_to_write = LONG_MAX; + while (1) { + ret = reiser4_fq_by_atom(*atom, &fq); + if (ret != -E_REPEAT) + break; + *atom = get_current_atom_locked(); + } + if (ret) + return ret; + + assert_spin_locked(&((*atom)->alock)); + + /* parallel flushers limit */ + if (sinfo->tmgr.atom_max_flushers != 0) { + while ((*atom)->nr_flushers >= sinfo->tmgr.atom_max_flushers) { + /* An reiser4_atom_send_event() call is inside + reiser4_fq_put_nolock() which is called when flush is + finished and nr_flushers is decremented. */ + reiser4_atom_wait_event(*atom); + *atom = get_current_atom_locked(); + } + } + + /* count ourself as a flusher */ + (*atom)->nr_flushers++; + + writeout_mode_enable(); + + nr_queued = 0; + node = find_flush_start_jnode(start, *atom, fq, &nr_queued, flags); + + if (node == NULL) { + if (nr_queued == 0) { + (*atom)->nr_flushers--; + reiser4_fq_put_nolock(fq); + reiser4_atom_send_event(*atom); + /* current atom remains locked */ + writeout_mode_disable(); + return 0; + } + spin_unlock_atom(*atom); + } else { + jref(node); + BUG_ON((*atom)->super != node->tree->super); + spin_unlock_atom(*atom); + spin_unlock_jnode(node); + BUG_ON(nr_to_write == 0); + ret = jnode_flush(node, nr_to_write, nr_submitted, fq, flags); + jput(node); + } + + ret = + reiser4_write_fq(fq, nr_submitted, + WRITEOUT_SINGLE_STREAM | WRITEOUT_FOR_PAGE_RECLAIM); + + *atom = get_current_atom_locked(); + (*atom)->nr_flushers--; + reiser4_fq_put_nolock(fq); + reiser4_atom_send_event(*atom); + spin_unlock_atom(*atom); + + writeout_mode_disable(); + + if (ret == 0) + ret = -E_REPEAT; + + return ret; +} + +/** + * This function calls txmod->reverse_alloc_formatted() to make a + * reverse-parent-first relocation decision and then, if yes, it marks + * the parent dirty. + */ +static int reverse_allocate_parent(jnode * node, + const coord_t *parent_coord, + flush_pos_t *pos) +{ + int ret; + + if (!JF_ISSET(ZJNODE(parent_coord->node), JNODE_DIRTY)) { + txmod_plugin *txmod_plug = get_txmod_plugin(); + + if (!txmod_plug->reverse_alloc_formatted) + return 0; + ret = txmod_plug->reverse_alloc_formatted(node, + parent_coord, pos); + if (ret < 0) + return ret; + /* + * FIXME-ZAM: if parent is already relocated - + * we do not want to grab space, right? + */ + if (ret == 1) { + int grabbed; + + grabbed = get_current_context()->grabbed_blocks; + if (reiser4_grab_space_force((__u64) 1, BA_RESERVED) != + 0) + reiser4_panic("umka-1250", + "No space left during flush."); + + assert("jmacd-18923", + znode_is_write_locked(parent_coord->node)); + znode_make_dirty(parent_coord->node); + grabbed2free_mark(grabbed); + } + } + return 0; +} + +/* INITIAL ALLOCATE ANCESTORS STEP (REVERSE PARENT-FIRST ALLOCATION BEFORE + FORWARD PARENT-FIRST LOOP BEGINS) */ + +/* Get the leftmost child for given coord. */ +static int get_leftmost_child_of_unit(const coord_t *coord, jnode ** child) +{ + int ret; + + ret = item_utmost_child(coord, LEFT_SIDE, child); + + if (ret) + return ret; + + if (IS_ERR(*child)) + return PTR_ERR(*child); + + return 0; +} + +/* This step occurs after the left- and right-scans are completed, before + starting the forward parent-first traversal. Here we attempt to allocate + ancestors of the starting flush point, which means continuing in the reverse + parent-first direction to the parent, grandparent, and so on (as long as the + child is a leftmost child). This routine calls a recursive process, + alloc_one_ancestor, which does the real work, except there is special-case + handling here for the first ancestor, which may be a twig. At each level + (here and alloc_one_ancestor), we check for relocation and then, if the child + is a leftmost child, repeat at the next level. On the way back down (the + recursion), we allocate the ancestors in parent-first order. */ +static int alloc_pos_and_ancestors(flush_pos_t *pos) +{ + int ret = 0; + lock_handle plock; + load_count pload; + coord_t pcoord; + + if (znode_check_flushprepped(pos->lock.node)) + return 0; + + coord_init_invalid(&pcoord, NULL); + init_lh(&plock); + init_load_count(&pload); + + if (pos->state == POS_ON_EPOINT) { + /* a special case for pos on twig level, where we already have + a lock on parent node. */ + /* The parent may not be dirty, in which case we should decide + whether to relocate the child now. If decision is made to + relocate the child, the parent is marked dirty. */ + ret = reverse_allocate_parent(pos->child, &pos->coord, pos); + if (ret) + goto exit; + + /* FIXME_NFQUCMPD: We only need to allocate the twig (if child + is leftmost) and the leaf/child, so recursion is not needed. + Levels above the twig will be allocated for + write-optimization before the transaction commits. */ + + /* Do the recursive step, allocating zero or more of our + * ancestors. */ + ret = alloc_one_ancestor(&pos->coord, pos); + + } else { + if (!znode_is_root(pos->lock.node)) { + /* all formatted nodes except tree root */ + ret = + reiser4_get_parent(&plock, pos->lock.node, + ZNODE_WRITE_LOCK); + if (ret) + goto exit; + + ret = incr_load_count_znode(&pload, plock.node); + if (ret) + goto exit; + + ret = + find_child_ptr(plock.node, pos->lock.node, &pcoord); + if (ret) + goto exit; + + ret = reverse_allocate_parent(ZJNODE(pos->lock.node), + &pcoord, + pos); + if (ret) + goto exit; + + ret = alloc_one_ancestor(&pcoord, pos); + if (ret) + goto exit; + } + + ret = allocate_znode(pos->lock.node, &pcoord, pos); + } +exit: + done_load_count(&pload); + done_lh(&plock); + return ret; +} + +/* This is the recursive step described in alloc_pos_and_ancestors, above. + Ignoring the call to set_preceder, which is the next function described, this + checks if the child is a leftmost child and returns if it is not. If the + child is a leftmost child it checks for relocation, possibly dirtying the + parent. Then it performs the recursive step. */ +static int alloc_one_ancestor(const coord_t *coord, flush_pos_t *pos) +{ + int ret = 0; + lock_handle alock; + load_count aload; + coord_t acoord; + + /* As we ascend at the left-edge of the region to flush, take this + opportunity at the twig level to find our parent-first preceder + unless we have already set it. */ + if (pos->preceder.blk == 0) { + ret = set_preceder(coord, pos); + if (ret != 0) + return ret; + } + + /* If the ancestor is clean or already allocated, or if the child is not + a leftmost child, stop going up, even leaving coord->node not + flushprepped. */ + if (znode_check_flushprepped(coord->node) + || !coord_is_leftmost_unit(coord)) + return 0; + + init_lh(&alock); + init_load_count(&aload); + coord_init_invalid(&acoord, NULL); + + /* Only ascend to the next level if it is a leftmost child, but + write-lock the parent in case we will relocate the child. */ + if (!znode_is_root(coord->node)) { + + ret = + jnode_lock_parent_coord(ZJNODE(coord->node), &acoord, + &alock, &aload, ZNODE_WRITE_LOCK, + 0); + if (ret != 0) { + /* FIXME(C): check EINVAL, E_DEADLOCK */ + goto exit; + } + + ret = reverse_allocate_parent(ZJNODE(coord->node), + &acoord, pos); + if (ret != 0) + goto exit; + + /* Recursive call. */ + if (!znode_check_flushprepped(acoord.node)) { + ret = alloc_one_ancestor(&acoord, pos); + if (ret) + goto exit; + } + } + + /* Note: we call allocate with the parent write-locked (except at the + root) in case we relocate the child, in which case it will modify the + parent during this call. */ + ret = allocate_znode(coord->node, &acoord, pos); + +exit: + done_load_count(&aload); + done_lh(&alock); + return ret; +} + +/* During the reverse parent-first alloc_pos_and_ancestors process described + above there is a call to this function at the twig level. During + alloc_pos_and_ancestors we may ask: should this node be relocated (in reverse + parent-first context)? We repeat this process as long as the child is the + leftmost child, eventually reaching an ancestor of the flush point that is + not a leftmost child. The preceder of that ancestors, which is not a leftmost + child, is actually on the leaf level. The preceder of that block is the + left-neighbor of the flush point. The preceder of that block is the rightmost + child of the twig on the left. So, when alloc_pos_and_ancestors passes upward + through the twig level, it stops momentarily to remember the block of the + rightmost child of the twig on the left and sets it to the flush_position's + preceder_hint. + + There is one other place where we may set the flush_position's preceder hint, + which is during scan-left. +*/ +static int set_preceder(const coord_t *coord_in, flush_pos_t *pos) +{ + int ret; + coord_t coord; + lock_handle left_lock; + load_count left_load; + + coord_dup(&coord, coord_in); + + init_lh(&left_lock); + init_load_count(&left_load); + + /* FIXME(B): Same FIXME as in "Find the preceder" in + reverse_allocate. coord_is_leftmost_unit is not the right test + if the unformatted child is in the middle of the first extent unit.*/ + if (!coord_is_leftmost_unit(&coord)) { + coord_prev_unit(&coord); + } else { + ret = + reiser4_get_left_neighbor(&left_lock, coord.node, + ZNODE_READ_LOCK, GN_SAME_ATOM); + if (ret) { + /* If we fail for any reason it doesn't matter because + the preceder is only a hint. We are low-priority at + this point, so this must be the case. */ + if (ret == -E_REPEAT || ret == -E_NO_NEIGHBOR || + ret == -ENOENT || ret == -EINVAL + || ret == -E_DEADLOCK) + ret = 0; + goto exit; + } + + ret = incr_load_count_znode(&left_load, left_lock.node); + if (ret) + goto exit; + + coord_init_last_unit(&coord, left_lock.node); + } + + ret = + item_utmost_child_real_block(&coord, RIGHT_SIDE, + &pos->preceder.blk); +exit: + check_preceder(pos->preceder.blk); + done_load_count(&left_load); + done_lh(&left_lock); + return ret; +} + +/* MAIN SQUEEZE AND ALLOCATE LOOP (THREE BIG FUNCTIONS) */ + +/* This procedure implements the outer loop of the flush algorithm. To put this + in context, here is the general list of steps taken by the flush routine as a + whole: + + 1. Scan-left + 2. Scan-right (maybe) + 3. Allocate initial flush position and its ancestors + 4. <handle extents> + 5. <squeeze and next position and its ancestors to-the-right, + then update position to-the-right> + 6. <repeat from #4 until flush is stopped> + + This procedure implements the loop in steps 4 through 6 in the above listing. + + Step 4: if the current flush position is an extent item (position on the twig + level), it allocates the extent (allocate_extent_item_in_place) then shifts + to the next coordinate. If the next coordinate's leftmost child needs + flushprep, we will continue. If the next coordinate is an internal item, we + descend back to the leaf level, otherwise we repeat a step #4 (labeled + ALLOC_EXTENTS below). If the "next coordinate" brings us past the end of the + twig level, then we call reverse_relocate_end_of_twig to possibly dirty the + next (right) twig, prior to step #5 which moves to the right. + + Step 5: calls squalloc_changed_ancestors, which initiates a recursive call up + the tree to allocate any ancestors of the next-right flush position that are + not also ancestors of the current position. Those ancestors (in top-down + order) are the next in parent-first order. We squeeze adjacent nodes on the + way up until the right node and current node share the same parent, then + allocate on the way back down. Finally, this step sets the flush position to + the next-right node. Then repeat steps 4 and 5. +*/ + +/* SQUEEZE CODE */ + +/* squalloc_right_twig helper function, cut a range of extent items from + cut node to->node from the beginning up to coord @to. */ +static int squalloc_right_twig_cut(coord_t *to, reiser4_key * to_key, + znode * left) +{ + coord_t from; + reiser4_key from_key; + + coord_init_first_unit(&from, to->node); + item_key_by_coord(&from, &from_key); + + return cut_node_content(&from, to, &from_key, to_key, NULL); +} + +/* Copy as much of the leading extents from @right to @left, allocating + unallocated extents as they are copied. Returns SQUEEZE_TARGET_FULL or + SQUEEZE_SOURCE_EMPTY when no more can be shifted. If the next item is an + internal item it calls shift_one_internal_unit and may then return + SUBTREE_MOVED. */ +static int squeeze_right_twig(znode * left, znode * right, flush_pos_t *pos) +{ + int ret = SUBTREE_MOVED; + coord_t coord; /* used to iterate over items */ + reiser4_key stop_key; + reiser4_tree *tree; + txmod_plugin *txmod_plug = get_txmod_plugin(); + + assert("jmacd-2008", !node_is_empty(right)); + coord_init_first_unit(&coord, right); + + /* FIXME: can be optimized to cut once */ + while (!node_is_empty(coord.node) && item_is_extent(&coord)) { + ON_DEBUG(void *vp); + + assert("vs-1468", coord_is_leftmost_unit(&coord)); + ON_DEBUG(vp = shift_check_prepare(left, coord.node)); + + /* stop_key is used to find what was copied and what to cut */ + stop_key = *reiser4_min_key(); + ret = txmod_plug->squeeze_alloc_unformatted(left, + &coord, pos, + &stop_key); + if (ret != SQUEEZE_CONTINUE) { + ON_DEBUG(kfree(vp)); + break; + } + assert("vs-1465", !keyeq(&stop_key, reiser4_min_key())); + + /* Helper function to do the cutting. */ + set_key_offset(&stop_key, get_key_offset(&stop_key) - 1); + check_me("vs-1466", + squalloc_right_twig_cut(&coord, &stop_key, left) == 0); + + ON_DEBUG(shift_check(vp, left, coord.node)); + } + /* + * @left and @right nodes participated in the + * implicit shift, determined by the pair of + * functions: + * . squalloc_extent() - append units to the @left + * . squalloc_right_twig_cut() - cut the units from @right + * so update their delimiting keys + */ + tree = znode_get_tree(left); + write_lock_dk(tree); + update_znode_dkeys(left, right); + write_unlock_dk(tree); + + if (node_is_empty(coord.node)) + ret = SQUEEZE_SOURCE_EMPTY; + + if (ret == SQUEEZE_TARGET_FULL) + goto out; + + if (node_is_empty(right)) { + /* The whole right node was copied into @left. */ + assert("vs-464", ret == SQUEEZE_SOURCE_EMPTY); + goto out; + } + + coord_init_first_unit(&coord, right); + + if (!item_is_internal(&coord)) { + /* we do not want to squeeze anything else to left neighbor + because "slum" is over */ + ret = SQUEEZE_TARGET_FULL; + goto out; + } + assert("jmacd-433", item_is_internal(&coord)); + + /* Shift an internal unit. The child must be allocated before shifting + any more extents, so we stop here. */ + ret = shift_one_internal_unit(left, right); + +out: + assert("jmacd-8612", ret < 0 || ret == SQUEEZE_TARGET_FULL + || ret == SUBTREE_MOVED || ret == SQUEEZE_SOURCE_EMPTY); + + if (ret == SQUEEZE_TARGET_FULL) { + /* We submit prepped nodes here and expect that this @left twig + * will not be modified again during this jnode_flush() call. */ + int ret1; + + /* NOTE: seems like io is done under long term locks. */ + ret1 = write_prepped_nodes(pos); + if (ret1 < 0) + return ret1; + } + + return ret; +} + +#if REISER4_DEBUG +static void item_convert_invariant(flush_pos_t *pos) +{ + assert("edward-1225", coord_is_existing_item(&pos->coord)); + if (convert_data_attached(pos)) { + item_plugin *iplug = item_convert_plug(pos); + + assert("edward-1000", + iplug == item_plugin_by_coord(&pos->coord)); + assert("edward-1001", iplug->f.convert != NULL); + } else + assert("edward-1226", pos->child == NULL); +} +#else + +#define item_convert_invariant(pos) noop + +#endif + +/* + * Scan all node's items and apply for each one + * its ->convert() method. This method may: + * . resize the item; + * . kill the item; + * . insert a group of items/nodes on the right, + * which possess the following properties: + * . all new nodes are dirty and not convertible; + * . for all new items ->convert() method is a noop. + * + * NOTE: this function makes the tree unbalanced! + * This intended to be used by flush squalloc() in a + * combination with squeeze procedure. + * + * GLOSSARY + * + * Chained nodes and items. + * Two neighboring nodes @left and @right are chained, + * iff the last item of @left and the first item of @right + * belong to the same item cluster. In this case those + * items are called chained. + */ +static int convert_node(flush_pos_t *pos, znode * node) +{ + int ret = 0; + item_plugin *iplug; + assert("edward-304", pos != NULL); + assert("edward-305", pos->child == NULL); + assert("edward-475", znode_convertible(node)); + assert("edward-669", znode_is_wlocked(node)); + assert("edward-1210", !node_is_empty(node)); + + if (znode_get_level(node) != LEAF_LEVEL) + /* unsupported */ + goto exit; + + coord_init_first_unit(&pos->coord, node); + + while (1) { + ret = 0; + coord_set_to_left(&pos->coord); + item_convert_invariant(pos); + + iplug = item_plugin_by_coord(&pos->coord); + assert("edward-844", iplug != NULL); + + if (iplug->f.convert) { + ret = iplug->f.convert(pos); + if (ret) + goto exit; + } + assert("edward-307", pos->child == NULL); + + if (coord_next_item(&pos->coord)) { + /* + * node is over + */ + if (convert_data_attached(pos)) + /* + * the last item was convertible and + * there still is an unprocesssed flow + */ + if (next_node_is_chained(pos)) { + /* + * next node contains items of + * the same disk cluster, + * so finish with this node + */ + update_chaining_state(pos, 0/* move + to next + node */); + break; + } + else { + /* + * perform one more iteration + * for the same item and the + * rest of flow + */ + update_chaining_state(pos, 1/* this + node */); + } + else + /* + * the last item wasn't convertible, or + * convert date was detached in the last + * iteration, + * go to next node + */ + break; + } else { + /* + * Node is not over, item position got decremented. + */ + if (convert_data_attached(pos)) { + /* + * disk cluster should be increased, so roll + * one item position back and perform the + * iteration with the previous item and the + * rest of attached data + */ + if (iplug != item_plugin_by_coord(&pos->coord)) + set_item_convert_count(pos, 0); + + ret = coord_prev_item(&pos->coord); + assert("edward-1003", !ret); + + update_chaining_state(pos, 1/* this node */); + } + else + /* + * previous item was't convertible, or + * convert date was detached in the last + * iteration, go to next item + */ + ; + } + } + JF_CLR(ZJNODE(node), JNODE_CONVERTIBLE); + znode_make_dirty(node); +exit: + assert("edward-1004", !ret); + return ret; +} + +/* Squeeze and allocate the right neighbor. This is called after @left and + its current children have been squeezed and allocated already. This + procedure's job is to squeeze and items from @right to @left. + + If at the leaf level, use the shift_everything_left memcpy-optimized + version of shifting (squeeze_right_leaf). + + If at the twig level, extents are allocated as they are shifted from @right + to @left (squalloc_right_twig). + + At any other level, shift one internal item and return to the caller + (squalloc_parent_first) so that the shifted-subtree can be processed in + parent-first order. + + When unit of internal item is moved, squeezing stops and SUBTREE_MOVED is + returned. When all content of @right is squeezed, SQUEEZE_SOURCE_EMPTY is + returned. If nothing can be moved into @left anymore, SQUEEZE_TARGET_FULL + is returned. +*/ + +static int squeeze_right_neighbor(flush_pos_t *pos, znode * left, + znode * right) +{ + int ret; + + /* FIXME it is possible to see empty hasn't-heard-banshee node in a + * tree owing to error (for example, ENOSPC) in write */ + /* assert("jmacd-9321", !node_is_empty(left)); */ + assert("jmacd-9322", !node_is_empty(right)); + assert("jmacd-9323", znode_get_level(left) == znode_get_level(right)); + + switch (znode_get_level(left)) { + case TWIG_LEVEL: + /* Shift with extent allocating until either an internal item + is encountered or everything is shifted or no free space + left in @left */ + ret = squeeze_right_twig(left, right, pos); + break; + + default: + /* All other levels can use shift_everything until we implement + per-item flush plugins. */ + ret = squeeze_right_non_twig(left, right); + break; + } + + assert("jmacd-2011", (ret < 0 || + ret == SQUEEZE_SOURCE_EMPTY + || ret == SQUEEZE_TARGET_FULL + || ret == SUBTREE_MOVED)); + return ret; +} + +static int squeeze_right_twig_and_advance_coord(flush_pos_t *pos, + znode * right) +{ + int ret; + + ret = squeeze_right_twig(pos->lock.node, right, pos); + if (ret < 0) + return ret; + if (ret > 0) { + coord_init_after_last_item(&pos->coord, pos->lock.node); + return ret; + } + + coord_init_last_unit(&pos->coord, pos->lock.node); + return 0; +} + +/* forward declaration */ +static int squalloc_upper_levels(flush_pos_t *, znode *, znode *); + +/* do a fast check for "same parents" condition before calling + * squalloc_upper_levels() */ +static inline int check_parents_and_squalloc_upper_levels(flush_pos_t *pos, + znode * left, + znode * right) +{ + if (znode_same_parents(left, right)) + return 0; + + return squalloc_upper_levels(pos, left, right); +} + +/* Check whether the parent of given @right node needs to be processes + ((re)allocated) prior to processing of the child. If @left and @right do not + share at least the parent of the @right is after the @left but before the + @right in parent-first order, we have to (re)allocate it before the @right + gets (re)allocated. */ +static int squalloc_upper_levels(flush_pos_t *pos, znode * left, znode * right) +{ + int ret; + + lock_handle left_parent_lock; + lock_handle right_parent_lock; + + load_count left_parent_load; + load_count right_parent_load; + + init_lh(&left_parent_lock); + init_lh(&right_parent_lock); + + init_load_count(&left_parent_load); + init_load_count(&right_parent_load); + + ret = reiser4_get_parent(&left_parent_lock, left, ZNODE_WRITE_LOCK); + if (ret) + goto out; + + ret = reiser4_get_parent(&right_parent_lock, right, ZNODE_WRITE_LOCK); + if (ret) + goto out; + + /* Check for same parents */ + if (left_parent_lock.node == right_parent_lock.node) + goto out; + + if (znode_check_flushprepped(right_parent_lock.node)) { + /* Keep parent-first order. In the order, the right parent node + stands before the @right node. If it is already allocated, + we set the preceder (next block search start point) to its + block number, @right node should be allocated after it. + + However, preceder is set only if the right parent is on twig + level. The explanation is the following: new branch nodes are + allocated over already allocated children while the tree + grows, it is difficult to keep tree ordered, we assume that + only leaves and twings are correctly allocated. So, only + twigs are used as a preceder for allocating of the rest of + the slum. */ + if (znode_get_level(right_parent_lock.node) == TWIG_LEVEL) { + pos->preceder.blk = + *znode_get_block(right_parent_lock.node); + check_preceder(pos->preceder.blk); + } + goto out; + } + + ret = incr_load_count_znode(&left_parent_load, left_parent_lock.node); + if (ret) + goto out; + + ret = incr_load_count_znode(&right_parent_load, right_parent_lock.node); + if (ret) + goto out; + + ret = + squeeze_right_neighbor(pos, left_parent_lock.node, + right_parent_lock.node); + /* We stop if error. We stop if some items/units were shifted (ret == 0) + * and thus @right changed its parent. It means we have not process + * right_parent node prior to processing of @right. Positive return + * values say that shifting items was not happen because of "empty + * source" or "target full" conditions. */ + if (ret <= 0) + goto out; + + /* parent(@left) and parent(@right) may have different parents also. We + * do a recursive call for checking that. */ + ret = + check_parents_and_squalloc_upper_levels(pos, left_parent_lock.node, + right_parent_lock.node); + if (ret) + goto out; + + /* allocate znode when going down */ + ret = lock_parent_and_allocate_znode(right_parent_lock.node, pos); + +out: + done_load_count(&left_parent_load); + done_load_count(&right_parent_load); + + done_lh(&left_parent_lock); + done_lh(&right_parent_lock); + + return ret; +} + +/* Check the leftmost child "flushprepped" status, also returns true if child + * node was not found in cache. */ +static int leftmost_child_of_unit_check_flushprepped(const coord_t *coord) +{ + int ret; + int prepped; + + jnode *child; + + ret = get_leftmost_child_of_unit(coord, &child); + + if (ret) + return ret; + + if (child) { + prepped = jnode_check_flushprepped(child); + jput(child); + } else { + /* We consider not existing child as a node which slum + processing should not continue to. Not cached node is clean, + so it is flushprepped. */ + prepped = 1; + } + + return prepped; +} + +/* (re)allocate znode with automated getting parent node */ +static int lock_parent_and_allocate_znode(znode * node, flush_pos_t *pos) +{ + int ret; + lock_handle parent_lock; + load_count parent_load; + coord_t pcoord; + + assert("zam-851", znode_is_write_locked(node)); + + init_lh(&parent_lock); + init_load_count(&parent_load); + + ret = reiser4_get_parent(&parent_lock, node, ZNODE_WRITE_LOCK); + if (ret) + goto out; + + ret = incr_load_count_znode(&parent_load, parent_lock.node); + if (ret) + goto out; + + ret = find_child_ptr(parent_lock.node, node, &pcoord); + if (ret) + goto out; + + ret = allocate_znode(node, &pcoord, pos); + +out: + done_load_count(&parent_load); + done_lh(&parent_lock); + return ret; +} + +/* + * Process nodes on the leaf level until unformatted node or + * rightmost node in the slum reached. + * + * This function is a complicated beast, because it calls a + * static machine ->convert_node() for every node, which, in + * turn, scans node's items and does something for each of them. + */ +static int handle_pos_on_formatted(flush_pos_t *pos) +{ + int ret; + lock_handle right_lock; + load_count right_load; + + init_lh(&right_lock); + init_load_count(&right_load); + + if (znode_convertible(pos->lock.node)) { + ret = convert_node(pos, pos->lock.node); + if (ret) + return ret; + } + while (1) { + assert("edward-1635", + ergo(node_is_empty(pos->lock.node), + ZF_ISSET(pos->lock.node, JNODE_HEARD_BANSHEE))); + /* + * First of all, grab a right neighbor + */ + if (convert_data(pos) && convert_data(pos)->right_locked) { + /* + * the right neighbor was locked by convert_node() + * transfer the lock from the "cache". + */ + move_lh(&right_lock, &convert_data(pos)->right_lock); + done_lh(&convert_data(pos)->right_lock); + convert_data(pos)->right_locked = 0; + } + else { + ret = neighbor_in_slum(pos->lock.node, &right_lock, + RIGHT_SIDE, ZNODE_WRITE_LOCK, + 1, 0); + if (ret) { + /* + * There is no right neighbor for some reasons, + * so finish with this level. + */ + assert("edward-1636", + !should_convert_right_neighbor(pos)); + break; + } + } + /* + * Check "flushprepped" status of the right neighbor. + * + * We don't prep(allocate) nodes for flushing twice. This can be + * suboptimal, or it can be optimal. For now we choose to live + * with the risk that it will be suboptimal because it would be + * quite complex to code it to be smarter. + */ + if (znode_check_flushprepped(right_lock.node) + && !znode_convertible(right_lock.node)) { + assert("edward-1005", + !should_convert_right_neighbor(pos)); + pos_stop(pos); + break; + } + ret = incr_load_count_znode(&right_load, right_lock.node); + if (ret) + break; + if (znode_convertible(right_lock.node)) { + assert("edward-1643", + ergo(convert_data(pos), + convert_data(pos)->right_locked == 0)); + + ret = convert_node(pos, right_lock.node); + if (ret) + break; + } + else + assert("edward-1637", + !should_convert_right_neighbor(pos)); + + if (node_is_empty(pos->lock.node)) { + /* + * Current node became empty after conversion + * and, hence, was removed from the tree; + * Advance the current position to the right neighbor. + */ + assert("edward-1638", + ZF_ISSET(pos->lock.node, JNODE_HEARD_BANSHEE)); + move_flush_pos(pos, &right_lock, &right_load, NULL); + continue; + } + if (node_is_empty(right_lock.node)) { + assert("edward-1639", + ZF_ISSET(right_lock.node, JNODE_HEARD_BANSHEE)); + /* + * The right neighbor became empty after + * convertion, and hence it was deleted + * from the tree - skip this. + * Since current node is not empty, + * we'll obtain a correct pointer to + * the next right neighbor + */ + done_load_count(&right_load); + done_lh(&right_lock); + continue; + } + /* + * At this point both, current node and its right + * neigbor are converted and not empty. + * Squeeze them _before_ going upward. + */ + ret = squeeze_right_neighbor(pos, pos->lock.node, + right_lock.node); + if (ret < 0) + break; + if (node_is_empty(right_lock.node)) { + assert("edward-1640", + ZF_ISSET(right_lock.node, JNODE_HEARD_BANSHEE)); + /* + * right neighbor was squeezed completely, + * and hence has been deleted from the tree. + * Skip this. + */ + done_load_count(&right_load); + done_lh(&right_lock); + continue; + } + if (znode_check_flushprepped(right_lock.node)) { + if (should_convert_right_neighbor(pos)) { + /* + * in spite of flushprepped status of the node, + * its right slum neighbor should be converted + */ + assert("edward-953", convert_data(pos)); + assert("edward-954", item_convert_data(pos)); + + move_flush_pos(pos, &right_lock, &right_load, NULL); + continue; + } else { + pos_stop(pos); + break; + } + } + /* + * parent(right_lock.node) has to be processed before + * (right_lock.node) due to "parent-first" allocation + * order + */ + ret = check_parents_and_squalloc_upper_levels(pos, + pos->lock.node, + right_lock.node); + if (ret) + break; + /* + * (re)allocate _after_ going upward + */ + ret = lock_parent_and_allocate_znode(right_lock.node, pos); + if (ret) + break; + if (should_terminate_squalloc(pos)) { + set_item_convert_count(pos, 0); + break; + } + /* + * advance the flush position to the right neighbor + */ + move_flush_pos(pos, &right_lock, &right_load, NULL); + + ret = rapid_flush(pos); + if (ret) + break; + } + check_convert_info(pos); + done_load_count(&right_load); + done_lh(&right_lock); + /* + * This function indicates via pos whether to stop or go to twig or + * continue on current level + */ + return ret; + +} + +/* Process nodes on leaf level until unformatted node or rightmost node in the + * slum reached. */ +static int handle_pos_on_leaf(flush_pos_t *pos) +{ + int ret; + + assert("zam-845", pos->state == POS_ON_LEAF); + + ret = handle_pos_on_formatted(pos); + + if (ret == -E_NO_NEIGHBOR) { + /* cannot get right neighbor, go process extents. */ + pos->state = POS_TO_TWIG; + return 0; + } + + return ret; +} + +/* Process slum on level > 1 */ +static int handle_pos_on_internal(flush_pos_t *pos) +{ + assert("zam-850", pos->state == POS_ON_INTERNAL); + return handle_pos_on_formatted(pos); +} + +/* check whether squalloc should stop before processing given extent */ +static int squalloc_extent_should_stop(flush_pos_t *pos) +{ + assert("zam-869", item_is_extent(&pos->coord)); + + /* pos->child is a jnode handle_pos_on_extent() should start with in + * stead of the first child of the first extent unit. */ + if (pos->child) { + int prepped; + + assert("vs-1383", jnode_is_unformatted(pos->child)); + prepped = jnode_check_flushprepped(pos->child); + pos->pos_in_unit = + jnode_get_index(pos->child) - + extent_unit_index(&pos->coord); + assert("vs-1470", + pos->pos_in_unit < extent_unit_width(&pos->coord)); + assert("nikita-3434", + ergo(extent_is_unallocated(&pos->coord), + pos->pos_in_unit == 0)); + jput(pos->child); + pos->child = NULL; + + return prepped; + } + + pos->pos_in_unit = 0; + if (extent_is_unallocated(&pos->coord)) + return 0; + + return leftmost_child_of_unit_check_flushprepped(&pos->coord); +} + +/* Handle the case when regular reiser4 tree (znodes connected one to its + * neighbors by sibling pointers) is interrupted on leaf level by one or more + * unformatted nodes. By having a lock on twig level and use extent code + * routines to process unformatted nodes we swim around an irregular part of + * reiser4 tree. */ +static int handle_pos_on_twig(flush_pos_t *pos) +{ + int ret; + txmod_plugin *txmod_plug = get_txmod_plugin(); + + assert("zam-844", pos->state == POS_ON_EPOINT); + assert("zam-843", item_is_extent(&pos->coord)); + + /* We decide should we continue slum processing with current extent + unit: if leftmost child of current extent unit is flushprepped + (i.e. clean or already processed by flush) we stop squalloc(). There + is a fast check for unallocated extents which we assume contain all + not flushprepped nodes. */ + /* FIXME: Here we implement simple check, we are only looking on the + leftmost child. */ + ret = squalloc_extent_should_stop(pos); + if (ret != 0) { + pos_stop(pos); + return ret; + } + + while (pos_valid(pos) && coord_is_existing_unit(&pos->coord) + && item_is_extent(&pos->coord)) { + ret = txmod_plug->forward_alloc_unformatted(pos); + if (ret) + break; + coord_next_unit(&pos->coord); + } + + if (coord_is_after_rightmost(&pos->coord)) { + pos->state = POS_END_OF_TWIG; + return 0; + } + if (item_is_internal(&pos->coord)) { + pos->state = POS_TO_LEAF; + return 0; + } + + assert("zam-860", item_is_extent(&pos->coord)); + + /* "slum" is over */ + pos->state = POS_INVALID; + return 0; +} + +/* When we about to return flush position from twig to leaf level we can process + * the right twig node or move position to the leaf. This processes right twig + * if it is possible and jump to leaf level if not. */ +static int handle_pos_end_of_twig(flush_pos_t *pos) +{ + int ret; + lock_handle right_lock; + load_count right_load; + coord_t at_right; + jnode *child = NULL; + + assert("zam-848", pos->state == POS_END_OF_TWIG); + assert("zam-849", coord_is_after_rightmost(&pos->coord)); + + init_lh(&right_lock); + init_load_count(&right_load); + + /* We get a lock on the right twig node even it is not dirty because + * slum continues or discontinues on leaf level not on next twig. This + * lock on the right twig is needed for getting its leftmost child. */ + ret = + reiser4_get_right_neighbor(&right_lock, pos->lock.node, + ZNODE_WRITE_LOCK, GN_SAME_ATOM); + if (ret) + goto out; + + ret = incr_load_count_znode(&right_load, right_lock.node); + if (ret) + goto out; + + /* right twig could be not dirty */ + if (JF_ISSET(ZJNODE(right_lock.node), JNODE_DIRTY)) { + /* If right twig node is dirty we always attempt to squeeze it + * content to the left... */ +became_dirty: + ret = + squeeze_right_twig_and_advance_coord(pos, right_lock.node); + if (ret <= 0) { + /* pos->coord is on internal item, go to leaf level, or + * we have an error which will be caught in squalloc() + */ + pos->state = POS_TO_LEAF; + goto out; + } + + /* If right twig was squeezed completely we wave to re-lock + * right twig. now it is done through the top-level squalloc + * routine. */ + if (node_is_empty(right_lock.node)) + goto out; + + /* ... and prep it if it is not yet prepped */ + if (!znode_check_flushprepped(right_lock.node)) { + /* As usual, process parent before ... */ + ret = + check_parents_and_squalloc_upper_levels(pos, + pos->lock. + node, + right_lock. + node); + if (ret) + goto out; + + /* ... processing the child */ + ret = + lock_parent_and_allocate_znode(right_lock.node, + pos); + if (ret) + goto out; + } + } else { + coord_init_first_unit(&at_right, right_lock.node); + + /* check first child of next twig, should we continue there ? */ + ret = get_leftmost_child_of_unit(&at_right, &child); + if (ret || child == NULL || jnode_check_flushprepped(child)) { + pos_stop(pos); + goto out; + } + + /* check clean twig for possible relocation */ + if (!znode_check_flushprepped(right_lock.node)) { + ret = reverse_allocate_parent(child, &at_right, pos); + if (ret) + goto out; + if (JF_ISSET(ZJNODE(right_lock.node), JNODE_DIRTY)) + goto became_dirty; + } + } + + assert("zam-875", znode_check_flushprepped(right_lock.node)); + + /* Update the preceder by a block number of just processed right twig + * node. The code above could miss the preceder updating because + * allocate_znode() could not be called for this node. */ + pos->preceder.blk = *znode_get_block(right_lock.node); + check_preceder(pos->preceder.blk); + + coord_init_first_unit(&at_right, right_lock.node); + assert("zam-868", coord_is_existing_unit(&at_right)); + + pos->state = item_is_extent(&at_right) ? POS_ON_EPOINT : POS_TO_LEAF; + move_flush_pos(pos, &right_lock, &right_load, &at_right); + +out: + done_load_count(&right_load); + done_lh(&right_lock); + + if (child) + jput(child); + + return ret; +} + +/* Move the pos->lock to leaf node pointed by pos->coord, check should we + * continue there. */ +static int handle_pos_to_leaf(flush_pos_t *pos) +{ + int ret; + lock_handle child_lock; + load_count child_load; + jnode *child; + + assert("zam-846", pos->state == POS_TO_LEAF); + assert("zam-847", item_is_internal(&pos->coord)); + + init_lh(&child_lock); + init_load_count(&child_load); + + ret = get_leftmost_child_of_unit(&pos->coord, &child); + if (ret) + return ret; + if (child == NULL) { + pos_stop(pos); + return 0; + } + + if (jnode_check_flushprepped(child)) { + pos->state = POS_INVALID; + goto out; + } + + ret = + longterm_lock_znode(&child_lock, JZNODE(child), ZNODE_WRITE_LOCK, + ZNODE_LOCK_LOPRI); + if (ret) + goto out; + + ret = incr_load_count_znode(&child_load, JZNODE(child)); + if (ret) + goto out; + + ret = allocate_znode(JZNODE(child), &pos->coord, pos); + if (ret) + goto out; + + /* move flush position to leaf level */ + pos->state = POS_ON_LEAF; + move_flush_pos(pos, &child_lock, &child_load, NULL); + + if (node_is_empty(JZNODE(child))) { + ret = delete_empty_node(JZNODE(child)); + pos->state = POS_INVALID; + } +out: + done_load_count(&child_load); + done_lh(&child_lock); + jput(child); + + return ret; +} + +/* move pos from leaf to twig, and move lock from leaf to twig. */ +/* Move pos->lock to upper (twig) level */ +static int handle_pos_to_twig(flush_pos_t *pos) +{ + int ret; + + lock_handle parent_lock; + load_count parent_load; + coord_t pcoord; + + assert("zam-852", pos->state == POS_TO_TWIG); + + init_lh(&parent_lock); + init_load_count(&parent_load); + + ret = + reiser4_get_parent(&parent_lock, pos->lock.node, ZNODE_WRITE_LOCK); + if (ret) + goto out; + + ret = incr_load_count_znode(&parent_load, parent_lock.node); + if (ret) + goto out; + + ret = find_child_ptr(parent_lock.node, pos->lock.node, &pcoord); + if (ret) + goto out; + + assert("zam-870", item_is_internal(&pcoord)); + coord_next_item(&pcoord); + + if (coord_is_after_rightmost(&pcoord)) + pos->state = POS_END_OF_TWIG; + else if (item_is_extent(&pcoord)) + pos->state = POS_ON_EPOINT; + else { + /* Here we understand that getting -E_NO_NEIGHBOR in + * handle_pos_on_leaf() was because of just a reaching edge of + * slum */ + pos_stop(pos); + goto out; + } + + move_flush_pos(pos, &parent_lock, &parent_load, &pcoord); + +out: + done_load_count(&parent_load); + done_lh(&parent_lock); + + return ret; +} + +typedef int (*pos_state_handle_t) (flush_pos_t *); +static pos_state_handle_t flush_pos_handlers[] = { + /* process formatted nodes on leaf level, keep lock on a leaf node */ + [POS_ON_LEAF] = handle_pos_on_leaf, + /* process unformatted nodes, keep lock on twig node, pos->coord points + * to extent currently being processed */ + [POS_ON_EPOINT] = handle_pos_on_twig, + /* move a lock from leaf node to its parent for further processing of + unformatted nodes */ + [POS_TO_TWIG] = handle_pos_to_twig, + /* move a lock from twig to leaf level when a processing of unformatted + * nodes finishes, pos->coord points to the leaf node we jump to */ + [POS_TO_LEAF] = handle_pos_to_leaf, + /* after processing last extent in the twig node, attempting to shift + * items from the twigs right neighbor and process them while shifting*/ + [POS_END_OF_TWIG] = handle_pos_end_of_twig, + /* process formatted nodes on internal level, keep lock on an internal + node */ + [POS_ON_INTERNAL] = handle_pos_on_internal +}; + +/* Advance flush position horizontally, prepare for flushing ((re)allocate, + * squeeze, encrypt) nodes and their ancestors in "parent-first" order */ +static int squalloc(flush_pos_t *pos) +{ + int ret = 0; + + /* maybe needs to be made a case statement with handle_pos_on_leaf as + * first case, for greater CPU efficiency? Measure and see.... -Hans */ + while (pos_valid(pos)) { + ret = flush_pos_handlers[pos->state] (pos); + if (ret < 0) + break; + + ret = rapid_flush(pos); + if (ret) + break; + } + + /* any positive value or -E_NO_NEIGHBOR are legal return codes for + handle_pos* routines, -E_NO_NEIGHBOR means that slum edge was + reached */ + if (ret > 0 || ret == -E_NO_NEIGHBOR) + ret = 0; + + return ret; +} + +static void update_ldkey(znode * node) +{ + reiser4_key ldkey; + + assert_rw_write_locked(&(znode_get_tree(node)->dk_lock)); + if (node_is_empty(node)) + return; + + znode_set_ld_key(node, leftmost_key_in_node(node, &ldkey)); +} + +/* this is to be called after calling of shift node's method to shift data from + @right to @left. It sets left delimiting keys of @left and @right to keys of + first items of @left and @right correspondingly and sets right delimiting key + of @left to first key of @right */ +static void update_znode_dkeys(znode * left, znode * right) +{ + assert_rw_write_locked(&(znode_get_tree(right)->dk_lock)); + assert("vs-1629", (znode_is_write_locked(left) && + znode_is_write_locked(right))); + + /* we need to update left delimiting of left if it was empty before + shift */ + update_ldkey(left); + update_ldkey(right); + if (node_is_empty(right)) + znode_set_rd_key(left, znode_get_rd_key(right)); + else + znode_set_rd_key(left, znode_get_ld_key(right)); +} + +/* try to shift everything from @right to @left. If everything was shifted - + @right is removed from the tree. Result is the number of bytes shifted. */ +static int +shift_everything_left(znode * right, znode * left, carry_level * todo) +{ + coord_t from; + node_plugin *nplug; + carry_plugin_info info; + + coord_init_after_last_item(&from, right); + + nplug = node_plugin_by_node(right); + info.doing = NULL; + info.todo = todo; + return nplug->shift(&from, left, SHIFT_LEFT, + 1 /* delete @right if it becomes empty */ , + 1 + /* move coord @from to node @left if everything will + be shifted */ + , + &info); +} + +/* Shift as much as possible from @right to @left using the memcpy-optimized + shift_everything_left. @left and @right are formatted neighboring nodes on + leaf level. */ +static int squeeze_right_non_twig(znode * left, znode * right) +{ + int ret; + carry_pool *pool; + carry_level *todo; + + assert("nikita-2246", znode_get_level(left) == znode_get_level(right)); + + if (!JF_ISSET(ZJNODE(left), JNODE_DIRTY) || + !JF_ISSET(ZJNODE(right), JNODE_DIRTY)) + return SQUEEZE_TARGET_FULL; + + pool = init_carry_pool(sizeof(*pool) + 3 * sizeof(*todo)); + if (IS_ERR(pool)) + return PTR_ERR(pool); + todo = (carry_level *) (pool + 1); + init_carry_level(todo, pool); + + ret = shift_everything_left(right, left, todo); + if (ret > 0) { + /* something was shifted */ + reiser4_tree *tree; + __u64 grabbed; + + znode_make_dirty(left); + znode_make_dirty(right); + + /* update delimiting keys of nodes which participated in + shift. FIXME: it would be better to have this in shift + node's operation. But it can not be done there. Nobody + remembers why, though + */ + tree = znode_get_tree(left); + write_lock_dk(tree); + update_znode_dkeys(left, right); + write_unlock_dk(tree); + + /* Carry is called to update delimiting key and, maybe, to + remove empty node. */ + grabbed = get_current_context()->grabbed_blocks; + ret = reiser4_grab_space_force(tree->height, BA_RESERVED); + assert("nikita-3003", ret == 0); /* reserved space is + exhausted. Ask Hans. */ + ret = reiser4_carry(todo, NULL/* previous level */); + grabbed2free_mark(grabbed); + } else { + /* Shifting impossible, we return appropriate result code */ + ret = + node_is_empty(right) ? SQUEEZE_SOURCE_EMPTY : + SQUEEZE_TARGET_FULL; + } + + done_carry_pool(pool); + + return ret; +} + +#if REISER4_DEBUG +static int sibling_link_is_ok(const znode *left, const znode *right) +{ + int result; + + read_lock_tree(znode_get_tree(left)); + result = (left->right == right && left == right->left); + read_unlock_tree(znode_get_tree(left)); + return result; +} +#endif + +/* Shift first unit of first item if it is an internal one. Return + SQUEEZE_TARGET_FULL if it fails to shift an item, otherwise return + SUBTREE_MOVED. */ +static int shift_one_internal_unit(znode * left, znode * right) +{ + int ret; + carry_pool *pool; + carry_level *todo; + coord_t *coord; + carry_plugin_info *info; + int size, moved; + + assert("nikita-2247", znode_get_level(left) == znode_get_level(right)); + assert("nikita-2435", znode_is_write_locked(left)); + assert("nikita-2436", znode_is_write_locked(right)); + assert("nikita-2434", sibling_link_is_ok(left, right)); + + pool = init_carry_pool(sizeof(*pool) + 3 * sizeof(*todo) + + sizeof(*coord) + sizeof(*info) +#if REISER4_DEBUG + + sizeof(*coord) + 2 * sizeof(reiser4_key) +#endif + ); + if (IS_ERR(pool)) + return PTR_ERR(pool); + todo = (carry_level *) (pool + 1); + init_carry_level(todo, pool); + + coord = (coord_t *) (todo + 3); + coord_init_first_unit(coord, right); + info = (carry_plugin_info *) (coord + 1); + +#if REISER4_DEBUG + if (!node_is_empty(left)) { + coord_t *last; + reiser4_key *right_key; + reiser4_key *left_key; + + last = (coord_t *) (info + 1); + right_key = (reiser4_key *) (last + 1); + left_key = right_key + 1; + coord_init_last_unit(last, left); + + assert("nikita-2463", + keyle(item_key_by_coord(last, left_key), + item_key_by_coord(coord, right_key))); + } +#endif + + assert("jmacd-2007", item_is_internal(coord)); + + size = item_length_by_coord(coord); + info->todo = todo; + info->doing = NULL; + + ret = node_plugin_by_node(left)->shift(coord, left, SHIFT_LEFT, + 1 + /* delete @right if it becomes + empty */ + , + 0 + /* do not move coord @coord to + node @left */ + , + info); + + /* If shift returns positive, then we shifted the item. */ + assert("vs-423", ret <= 0 || size == ret); + moved = (ret > 0); + + if (moved) { + /* something was moved */ + reiser4_tree *tree; + int grabbed; + + znode_make_dirty(left); + znode_make_dirty(right); + tree = znode_get_tree(left); + write_lock_dk(tree); + update_znode_dkeys(left, right); + write_unlock_dk(tree); + + /* reserve space for delimiting keys after shifting */ + grabbed = get_current_context()->grabbed_blocks; + ret = reiser4_grab_space_force(tree->height, BA_RESERVED); + assert("nikita-3003", ret == 0); /* reserved space is + exhausted. Ask Hans. */ + + ret = reiser4_carry(todo, NULL/* previous level */); + grabbed2free_mark(grabbed); + } + + done_carry_pool(pool); + + if (ret != 0) { + /* Shift or carry operation failed. */ + assert("jmacd-7325", ret < 0); + return ret; + } + + return moved ? SUBTREE_MOVED : SQUEEZE_TARGET_FULL; +} + +static int allocate_znode(znode * node, + const coord_t *parent_coord, flush_pos_t *pos) +{ + txmod_plugin *plug = get_txmod_plugin(); + /* + * perform znode allocation with znode pinned in memory to avoid races + * with asynchronous emergency flush (which plays with + * JNODE_FLUSH_RESERVED bit). + */ + return WITH_DATA(node, plug->forward_alloc_formatted(node, + parent_coord, + pos)); +} + + +/* JNODE INTERFACE */ + +/* Lock a node (if formatted) and then get its parent locked, set the child's + coordinate in the parent. If the child is the root node, the above_root + znode is returned but the coord is not set. This function may cause atom + fusion, but it is only used for read locks (at this point) and therefore + fusion only occurs when the parent is already dirty. */ +/* Hans adds this note: remember to ask how expensive this operation is vs. + storing parent pointer in jnodes. */ +static int +jnode_lock_parent_coord(jnode * node, + coord_t *coord, + lock_handle * parent_lh, + load_count * parent_zh, + znode_lock_mode parent_mode, int try) +{ + int ret; + + assert("edward-53", jnode_is_unformatted(node) || jnode_is_znode(node)); + assert("edward-54", jnode_is_unformatted(node) + || znode_is_any_locked(JZNODE(node))); + + if (!jnode_is_znode(node)) { + reiser4_key key; + tree_level stop_level = TWIG_LEVEL; + lookup_bias bias = FIND_EXACT; + + assert("edward-168", !(jnode_get_type(node) == JNODE_BITMAP)); + + /* The case when node is not znode, but can have parent coord + (unformatted node, node which represents cluster page, + etc..). Generate a key for the appropriate entry, search + in the tree using coord_by_key, which handles locking for + us. */ + + /* + * nothing is locked at this moment, so, nothing prevents + * concurrent truncate from removing jnode from inode. To + * prevent this spin-lock jnode. jnode can be truncated just + * after call to the jnode_build_key(), but this is ok, + * because coord_by_key() will just fail to find appropriate + * extent. + */ + spin_lock_jnode(node); + if (!JF_ISSET(node, JNODE_HEARD_BANSHEE)) { + jnode_build_key(node, &key); + ret = 0; + } else + ret = RETERR(-ENOENT); + spin_unlock_jnode(node); + + if (ret != 0) + return ret; + + if (jnode_is_cluster_page(node)) + stop_level = LEAF_LEVEL; + + assert("jmacd-1812", coord != NULL); + + ret = coord_by_key(jnode_get_tree(node), &key, coord, parent_lh, + parent_mode, bias, stop_level, stop_level, + CBK_UNIQUE, NULL/*ra_info */); + switch (ret) { + case CBK_COORD_NOTFOUND: + assert("edward-1038", + ergo(jnode_is_cluster_page(node), + JF_ISSET(node, JNODE_HEARD_BANSHEE))); + if (!JF_ISSET(node, JNODE_HEARD_BANSHEE)) + warning("nikita-3177", "Parent not found"); + return ret; + case CBK_COORD_FOUND: + if (coord->between != AT_UNIT) { + /* FIXME: comment needed */ + done_lh(parent_lh); + if (!JF_ISSET(node, JNODE_HEARD_BANSHEE)) { + warning("nikita-3178", + "Found but not happy: %i", + coord->between); + } + return RETERR(-ENOENT); + } + ret = incr_load_count_znode(parent_zh, parent_lh->node); + if (ret != 0) + return ret; + /* if (jnode_is_cluster_page(node)) { + races with write() are possible + check_child_cluster (parent_lh->node); + } + */ + break; + default: + return ret; + } + + } else { + int flags; + znode *z; + + z = JZNODE(node); + /* Formatted node case: */ + assert("jmacd-2061", !znode_is_root(z)); + + flags = GN_ALLOW_NOT_CONNECTED; + if (try) + flags |= GN_TRY_LOCK; + + ret = + reiser4_get_parent_flags(parent_lh, z, parent_mode, flags); + if (ret != 0) + /* -E_REPEAT is ok here, it is handled by the caller. */ + return ret; + + /* Make the child's position "hint" up-to-date. (Unless above + root, which caller must check.) */ + if (coord != NULL) { + + ret = incr_load_count_znode(parent_zh, parent_lh->node); + if (ret != 0) { + warning("jmacd-976812386", + "incr_load_count_znode failed: %d", + ret); + return ret; + } + + ret = find_child_ptr(parent_lh->node, z, coord); + if (ret != 0) { + warning("jmacd-976812", + "find_child_ptr failed: %d", ret); + return ret; + } + } + } + + return 0; +} + +/* Get the (locked) next neighbor of a znode which is dirty and a member of the + same atom. If there is no next neighbor or the neighbor is not in memory or + if there is a neighbor but it is not dirty or not in the same atom, + -E_NO_NEIGHBOR is returned. In some cases the slum may include nodes which + are not dirty, if so @check_dirty should be 0 */ +static int neighbor_in_slum(znode * node, /* starting point */ + lock_handle * lock, /* lock on starting point */ + sideof side, /* left or right direction we + seek the next node in */ + znode_lock_mode mode, /* kind of lock we want */ + int check_dirty, /* true if the neighbor should + be dirty */ + int use_upper_levels /* get neighbor by going though + upper levels */) +{ + int ret; + int flags; + + assert("jmacd-6334", znode_is_connected(node)); + + flags = GN_SAME_ATOM | (side == LEFT_SIDE ? GN_GO_LEFT : 0); + if (use_upper_levels) + flags |= GN_CAN_USE_UPPER_LEVELS; + + ret = reiser4_get_neighbor(lock, node, mode, flags); + if (ret) { + /* May return -ENOENT or -E_NO_NEIGHBOR. */ + /* FIXME(C): check EINVAL, E_DEADLOCK */ + if (ret == -ENOENT) + ret = RETERR(-E_NO_NEIGHBOR); + return ret; + } + if (!check_dirty) + return 0; + /* Check dirty bit of locked znode, no races here */ + if (JF_ISSET(ZJNODE(lock->node), JNODE_DIRTY)) + return 0; + + done_lh(lock); + return RETERR(-E_NO_NEIGHBOR); +} + +/* Return true if two znodes have the same parent. This is called with both + nodes write-locked (for squeezing) so no tree lock is needed. */ +static int znode_same_parents(znode * a, znode * b) +{ + int result; + + assert("jmacd-7011", znode_is_write_locked(a)); + assert("jmacd-7012", znode_is_write_locked(b)); + + /* We lock the whole tree for this check.... I really don't like whole + * tree locks... -Hans */ + read_lock_tree(znode_get_tree(a)); + result = (znode_parent(a) == znode_parent(b)); + read_unlock_tree(znode_get_tree(a)); + return result; +} + +/* FLUSH SCAN */ + +/* Initialize the flush_scan data structure. */ +static void scan_init(flush_scan * scan) +{ + memset(scan, 0, sizeof(*scan)); + init_lh(&scan->node_lock); + init_lh(&scan->parent_lock); + init_load_count(&scan->parent_load); + init_load_count(&scan->node_load); + coord_init_invalid(&scan->parent_coord, NULL); +} + +/* Release any resources held by the flush scan, e.g. release locks, + free memory, etc. */ +static void scan_done(flush_scan * scan) +{ + done_load_count(&scan->node_load); + if (scan->node != NULL) { + jput(scan->node); + scan->node = NULL; + } + done_load_count(&scan->parent_load); + done_lh(&scan->parent_lock); + done_lh(&scan->node_lock); +} + +/* Returns true if flush scanning is finished. */ +int reiser4_scan_finished(flush_scan * scan) +{ + return scan->stop || (scan->direction == RIGHT_SIDE && + scan->count >= scan->max_count); +} + +/* Return true if the scan should continue to the @tonode. True if the node + meets the same_slum_check condition. If not, deref the "left" node and stop + the scan. */ +int reiser4_scan_goto(flush_scan * scan, jnode * tonode) +{ + int go = same_slum_check(scan->node, tonode, 1, 0); + + if (!go) { + scan->stop = 1; + jput(tonode); + } + + return go; +} + +/* Set the current scan->node, refcount it, increment count by the @add_count + (number to count, e.g., skipped unallocated nodes), deref previous current, + and copy the current parent coordinate. */ +int +scan_set_current(flush_scan * scan, jnode * node, unsigned add_count, + const coord_t *parent) +{ + /* Release the old references, take the new reference. */ + done_load_count(&scan->node_load); + + if (scan->node != NULL) + jput(scan->node); + scan->node = node; + scan->count += add_count; + + /* This next stmt is somewhat inefficient. The reiser4_scan_extent() + code could delay this update step until it finishes and update the + parent_coord only once. It did that before, but there was a bug and + this was the easiest way to make it correct. */ + if (parent != NULL) + coord_dup(&scan->parent_coord, parent); + + /* Failure may happen at the incr_load_count call, but the caller can + assume the reference is safely taken. */ + return incr_load_count_jnode(&scan->node_load, node); +} + +/* Return true if scanning in the leftward direction. */ +int reiser4_scanning_left(flush_scan * scan) +{ + return scan->direction == LEFT_SIDE; +} + +/* Performs leftward scanning starting from either kind of node. Counts the + starting node. The right-scan object is passed in for the left-scan in order + to copy the parent of an unformatted starting position. This way we avoid + searching for the unformatted node's parent when scanning in each direction. + If we search for the parent once it is set in both scan objects. The limit + parameter tells flush-scan when to stop. + + Rapid scanning is used only during scan_left, where we are interested in + finding the 'leftpoint' where we begin flushing. We are interested in + stopping at the left child of a twig that does not have a dirty left + neighbour. THIS IS A SPECIAL CASE. The problem is finding a way to flush only + those nodes without unallocated children, and it is difficult to solve in the + bottom-up flushing algorithm we are currently using. The problem can be + solved by scanning left at every level as we go upward, but this would + basically bring us back to using a top-down allocation strategy, which we + already tried (see BK history from May 2002), and has a different set of + problems. The top-down strategy makes avoiding unallocated children easier, + but makes it difficult to propertly flush dirty children with clean parents + that would otherwise stop the top-down flush, only later to dirty the parent + once the children are flushed. So we solve the problem in the bottom-up + algorithm with a special case for twigs and leaves only. + + The first step in solving the problem is this rapid leftward scan. After we + determine that there are at least enough nodes counted to qualify for + FLUSH_RELOCATE_THRESHOLD we are no longer interested in the exact count, we + are only interested in finding the best place to start the flush. + + We could choose one of two possibilities: + + 1. Stop at the leftmost child (of a twig) that does not have a dirty left + neighbor. This requires checking one leaf per rapid-scan twig + + 2. Stop at the leftmost child (of a twig) where there are no dirty children + of the twig to the left. This requires checking possibly all of the in-memory + children of each twig during the rapid scan. + + For now we implement the first policy. +*/ +static int +scan_left(flush_scan * scan, flush_scan * right, jnode * node, unsigned limit) +{ + int ret = 0; + + scan->max_count = limit; + scan->direction = LEFT_SIDE; + + ret = scan_set_current(scan, jref(node), 1, NULL); + if (ret != 0) + return ret; + + ret = scan_common(scan, right); + if (ret != 0) + return ret; + + /* Before rapid scanning, we need a lock on scan->node so that we can + get its parent, only if formatted. */ + if (jnode_is_znode(scan->node)) { + ret = longterm_lock_znode(&scan->node_lock, JZNODE(scan->node), + ZNODE_WRITE_LOCK, ZNODE_LOCK_LOPRI); + } + + /* Rapid_scan would go here (with limit set to FLUSH_RELOCATE_THRESHOLD) + */ + return ret; +} + +/* Performs rightward scanning... Does not count the starting node. The limit + parameter is described in scan_left. If the starting node is unformatted then + the parent_coord was already set during scan_left. The rapid_after parameter + is not used during right-scanning. + + scan_right is only called if the scan_left operation does not count at least + FLUSH_RELOCATE_THRESHOLD nodes for flushing. Otherwise, the limit parameter + is set to the difference between scan-left's count and + FLUSH_RELOCATE_THRESHOLD, meaning scan-right counts as high as + FLUSH_RELOCATE_THRESHOLD and then stops. */ +static int scan_right(flush_scan * scan, jnode * node, unsigned limit) +{ + int ret; + + scan->max_count = limit; + scan->direction = RIGHT_SIDE; + + ret = scan_set_current(scan, jref(node), 0, NULL); + if (ret != 0) + return ret; + + return scan_common(scan, NULL); +} + +/* Common code to perform left or right scanning. */ +static int scan_common(flush_scan * scan, flush_scan * other) +{ + int ret; + + assert("nikita-2376", scan->node != NULL); + assert("edward-54", jnode_is_unformatted(scan->node) + || jnode_is_znode(scan->node)); + + /* Special case for starting at an unformatted node. Optimization: we + only want to search for the parent (which requires a tree traversal) + once. Obviously, we shouldn't have to call it once for the left scan + and once for the right scan. For this reason, if we search for the + parent during scan-left we then duplicate the coord/lock/load into + the scan-right object. */ + if (jnode_is_unformatted(scan->node)) { + ret = scan_unformatted(scan, other); + if (ret != 0) + return ret; + } + /* This loop expects to start at a formatted position and performs + chaining of formatted regions */ + while (!reiser4_scan_finished(scan)) { + + ret = scan_formatted(scan); + if (ret != 0) + return ret; + } + + return 0; +} + +static int scan_unformatted(flush_scan * scan, flush_scan * other) +{ + int ret = 0; + int try = 0; + + if (!coord_is_invalid(&scan->parent_coord)) + goto scan; + + /* set parent coord from */ + if (!jnode_is_unformatted(scan->node)) { + /* formatted position */ + + lock_handle lock; + assert("edward-301", jnode_is_znode(scan->node)); + init_lh(&lock); + + /* + * when flush starts from unformatted node, first thing it + * does is tree traversal to find formatted parent of starting + * node. This parent is then kept lock across scans to the + * left and to the right. This means that during scan to the + * left we cannot take left-ward lock, because this is + * dead-lock prone. So, if we are scanning to the left and + * there is already lock held by this thread, + * jnode_lock_parent_coord() should use try-lock. + */ + try = reiser4_scanning_left(scan) + && !lock_stack_isclean(get_current_lock_stack()); + /* Need the node locked to get the parent lock, We have to + take write lock since there is at least one call path + where this znode is already write-locked by us. */ + ret = + longterm_lock_znode(&lock, JZNODE(scan->node), + ZNODE_WRITE_LOCK, + reiser4_scanning_left(scan) ? + ZNODE_LOCK_LOPRI : + ZNODE_LOCK_HIPRI); + if (ret != 0) + /* EINVAL or E_DEADLOCK here mean... try again! At this + point we've scanned too far and can't back out, just + start over. */ + return ret; + + ret = jnode_lock_parent_coord(scan->node, + &scan->parent_coord, + &scan->parent_lock, + &scan->parent_load, + ZNODE_WRITE_LOCK, try); + + /* FIXME(C): check EINVAL, E_DEADLOCK */ + done_lh(&lock); + if (ret == -E_REPEAT) { + scan->stop = 1; + return 0; + } + if (ret) + return ret; + + } else { + /* unformatted position */ + + ret = + jnode_lock_parent_coord(scan->node, &scan->parent_coord, + &scan->parent_lock, + &scan->parent_load, + ZNODE_WRITE_LOCK, try); + + if (IS_CBKERR(ret)) + return ret; + + if (ret == CBK_COORD_NOTFOUND) + /* FIXME(C): check EINVAL, E_DEADLOCK */ + return ret; + + /* parent was found */ + assert("jmacd-8661", other != NULL); + /* Duplicate the reference into the other flush_scan. */ + coord_dup(&other->parent_coord, &scan->parent_coord); + copy_lh(&other->parent_lock, &scan->parent_lock); + copy_load_count(&other->parent_load, &scan->parent_load); + } +scan: + return scan_by_coord(scan); +} + +/* Performs left- or rightward scanning starting from a formatted node. Follow + left pointers under tree lock as long as: + + - node->left/right is non-NULL + - node->left/right is connected, dirty + - node->left/right belongs to the same atom + - scan has not reached maximum count +*/ +static int scan_formatted(flush_scan * scan) +{ + int ret; + znode *neighbor = NULL; + + assert("jmacd-1401", !reiser4_scan_finished(scan)); + + do { + znode *node = JZNODE(scan->node); + + /* Node should be connected, but if not stop the scan. */ + if (!znode_is_connected(node)) { + scan->stop = 1; + break; + } + + /* Lock the tree, check-for and reference the next sibling. */ + read_lock_tree(znode_get_tree(node)); + + /* It may be that a node is inserted or removed between a node + and its left sibling while the tree lock is released, but the + flush-scan count does not need to be precise. Thus, we + release the tree lock as soon as we get the neighboring node. + */ + neighbor = + reiser4_scanning_left(scan) ? node->left : node->right; + if (neighbor != NULL) + zref(neighbor); + + read_unlock_tree(znode_get_tree(node)); + + /* If neighbor is NULL at the leaf level, need to check for an + unformatted sibling using the parent--break in any case. */ + if (neighbor == NULL) + break; + + /* Check the condition for going left, break if it is not met. + This also releases (jputs) the neighbor if false. */ + if (!reiser4_scan_goto(scan, ZJNODE(neighbor))) + break; + + /* Advance the flush_scan state to the left, repeat. */ + ret = scan_set_current(scan, ZJNODE(neighbor), 1, NULL); + if (ret != 0) + return ret; + + } while (!reiser4_scan_finished(scan)); + + /* If neighbor is NULL then we reached the end of a formatted region, or + else the sibling is out of memory, now check for an extent to the + left (as long as LEAF_LEVEL). */ + if (neighbor != NULL || jnode_get_level(scan->node) != LEAF_LEVEL + || reiser4_scan_finished(scan)) { + scan->stop = 1; + return 0; + } + /* Otherwise, calls scan_by_coord for the right(left)most item of the + left(right) neighbor on the parent level, then possibly continue. */ + + coord_init_invalid(&scan->parent_coord, NULL); + return scan_unformatted(scan, NULL); +} + +/* NOTE-EDWARD: + This scans adjacent items of the same type and calls scan flush plugin for + each one. Performs left(right)ward scanning starting from a (possibly) + unformatted node. If we start from unformatted node, then we continue only if + the next neighbor is also unformatted. When called from scan_formatted, we + skip first iteration (to make sure that right(left)most item of the + left(right) neighbor on the parent level is of the same type and set + appropriate coord). */ +static int scan_by_coord(flush_scan * scan) +{ + int ret = 0; + int scan_this_coord; + lock_handle next_lock; + load_count next_load; + coord_t next_coord; + jnode *child; + item_plugin *iplug; + + init_lh(&next_lock); + init_load_count(&next_load); + scan_this_coord = (jnode_is_unformatted(scan->node) ? 1 : 0); + + /* set initial item id */ + iplug = item_plugin_by_coord(&scan->parent_coord); + + for (; !reiser4_scan_finished(scan); scan_this_coord = 1) { + if (scan_this_coord) { + /* Here we expect that unit is scannable. it would not + * be so due to race with extent->tail conversion. */ + if (iplug->f.scan == NULL) { + scan->stop = 1; + ret = -E_REPEAT; + /* skip the check at the end. */ + goto race; + } + + ret = iplug->f.scan(scan); + if (ret != 0) + goto exit; + + if (reiser4_scan_finished(scan)) { + checkchild(scan); + break; + } + } else { + /* the same race against truncate as above is possible + * here, it seems */ + + /* NOTE-JMACD: In this case, apply the same end-of-node + logic but don't scan the first coordinate. */ + assert("jmacd-1231", + item_is_internal(&scan->parent_coord)); + } + + if (iplug->f.utmost_child == NULL + || znode_get_level(scan->parent_coord.node) != TWIG_LEVEL) { + /* stop this coord and continue on parrent level */ + ret = + scan_set_current(scan, + ZJNODE(zref + (scan->parent_coord.node)), + 1, NULL); + if (ret != 0) + goto exit; + break; + } + + /* Either way, the invariant is that scan->parent_coord is set + to the parent of scan->node. Now get the next unit. */ + coord_dup(&next_coord, &scan->parent_coord); + coord_sideof_unit(&next_coord, scan->direction); + + /* If off-the-end of the twig, try the next twig. */ + if (coord_is_after_sideof_unit(&next_coord, scan->direction)) { + /* We take the write lock because we may start flushing + * from this coordinate. */ + ret = neighbor_in_slum(next_coord.node, + &next_lock, + scan->direction, + ZNODE_WRITE_LOCK, + 1 /* check dirty */, + 0 /* don't go though upper + levels */); + if (ret == -E_NO_NEIGHBOR) { + scan->stop = 1; + ret = 0; + break; + } + + if (ret != 0) + goto exit; + + ret = incr_load_count_znode(&next_load, next_lock.node); + if (ret != 0) + goto exit; + + coord_init_sideof_unit(&next_coord, next_lock.node, + sideof_reverse(scan->direction)); + } + + iplug = item_plugin_by_coord(&next_coord); + + /* Get the next child. */ + ret = + iplug->f.utmost_child(&next_coord, + sideof_reverse(scan->direction), + &child); + if (ret != 0) + goto exit; + /* If the next child is not in memory, or, item_utmost_child + failed (due to race with unlink, most probably), stop + here. */ + if (child == NULL || IS_ERR(child)) { + scan->stop = 1; + checkchild(scan); + break; + } + + assert("nikita-2374", jnode_is_unformatted(child) + || jnode_is_znode(child)); + + /* See if it is dirty, part of the same atom. */ + if (!reiser4_scan_goto(scan, child)) { + checkchild(scan); + break; + } + + /* If so, make this child current. */ + ret = scan_set_current(scan, child, 1, &next_coord); + if (ret != 0) + goto exit; + + /* Now continue. If formatted we release the parent lock and + return, then proceed. */ + if (jnode_is_znode(child)) + break; + + /* Otherwise, repeat the above loop with next_coord. */ + if (next_load.node != NULL) { + done_lh(&scan->parent_lock); + move_lh(&scan->parent_lock, &next_lock); + move_load_count(&scan->parent_load, &next_load); + } + } + + assert("jmacd-6233", + reiser4_scan_finished(scan) || jnode_is_znode(scan->node)); +exit: + checkchild(scan); +race: /* skip the above check */ + if (jnode_is_znode(scan->node)) { + done_lh(&scan->parent_lock); + done_load_count(&scan->parent_load); + } + + done_load_count(&next_load); + done_lh(&next_lock); + return ret; +} + +/* FLUSH POS HELPERS */ + +/* Initialize the fields of a flush_position. */ +static void pos_init(flush_pos_t *pos) +{ + memset(pos, 0, sizeof *pos); + + pos->state = POS_INVALID; + coord_init_invalid(&pos->coord, NULL); + init_lh(&pos->lock); + init_load_count(&pos->load); + + reiser4_blocknr_hint_init(&pos->preceder); +} + +/* The flush loop inside squalloc periodically checks pos_valid to determine + when "enough flushing" has been performed. This will return true until one + of the following conditions is met: + + 1. the number of flush-queued nodes has reached the kernel-supplied + "int *nr_to_flush" parameter, meaning we have flushed as many blocks as the + kernel requested. When flushing to commit, this parameter is NULL. + + 2. pos_stop() is called because squalloc discovers that the "next" node in + the flush order is either non-existant, not dirty, or not in the same atom. +*/ + +static int pos_valid(flush_pos_t *pos) +{ + return pos->state != POS_INVALID; +} + +/* Release any resources of a flush_position. Called when jnode_flush + finishes. */ +static void pos_done(flush_pos_t *pos) +{ + pos_stop(pos); + reiser4_blocknr_hint_done(&pos->preceder); + if (convert_data(pos)) + free_convert_data(pos); +} + +/* Reset the point and parent. Called during flush subroutines to terminate the + squalloc loop. */ +static int pos_stop(flush_pos_t *pos) +{ + pos->state = POS_INVALID; + done_lh(&pos->lock); + done_load_count(&pos->load); + coord_init_invalid(&pos->coord, NULL); + + if (pos->child) { + jput(pos->child); + pos->child = NULL; + } + + return 0; +} + +/* Return the flush_position's block allocator hint. */ +reiser4_blocknr_hint *reiser4_pos_hint(flush_pos_t *pos) +{ + return &pos->preceder; +} + +flush_queue_t *reiser4_pos_fq(flush_pos_t *pos) +{ + return pos->fq; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 90 + LocalWords: preceder + End: +*/ diff --git a/fs/reiser4/flush.h b/fs/reiser4/flush.h new file mode 100644 index 000000000000..270ea01e5c32 --- /dev/null +++ b/fs/reiser4/flush.h @@ -0,0 +1,290 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* DECLARATIONS: */ + +#if !defined(__REISER4_FLUSH_H__) +#define __REISER4_FLUSH_H__ + +#include "plugin/cluster.h" + +/* The flush_scan data structure maintains the state of an in-progress + flush-scan on a single level of the tree. A flush-scan is used for counting + the number of adjacent nodes to flush, which is used to determine whether we + should relocate, and it is also used to find a starting point for flush. A + flush-scan object can scan in both right and left directions via the + scan_left() and scan_right() interfaces. The right- and left-variations are + similar but perform different functions. When scanning left we (optionally + perform rapid scanning and then) longterm-lock the endpoint node. When + scanning right we are simply counting the number of adjacent, dirty nodes. */ +struct flush_scan { + + /* The current number of nodes scanned on this level. */ + unsigned count; + + /* There may be a maximum number of nodes for a scan on any single + level. When going leftward, max_count is determined by + FLUSH_SCAN_MAXNODES (see reiser4.h) */ + unsigned max_count; + + /* Direction: Set to one of the sideof enumeration: + { LEFT_SIDE, RIGHT_SIDE }. */ + sideof direction; + + /* Initially @stop is set to false then set true once some condition + stops the search (e.g., we found a clean node before reaching + max_count or we found a node belonging to another atom). */ + int stop; + + /* The current scan position. If @node is non-NULL then its reference + count has been incremented to reflect this reference. */ + jnode *node; + + /* A handle for zload/zrelse of current scan position node. */ + load_count node_load; + + /* During left-scan, if the final position (a.k.a. endpoint node) is + formatted the node is locked using this lock handle. The endpoint + needs to be locked for transfer to the flush_position object after + scanning finishes. */ + lock_handle node_lock; + + /* When the position is unformatted, its parent, coordinate, and parent + zload/zrelse handle. */ + lock_handle parent_lock; + coord_t parent_coord; + load_count parent_load; + + /* The block allocator preceder hint. Sometimes flush_scan determines + what the preceder is and if so it sets it here, after which it is + copied into the flush_position. Otherwise, the preceder is computed + later. */ + reiser4_block_nr preceder_blk; +}; + +struct convert_item_info { + dc_item_stat d_cur; /* per-cluster status of the current item */ + dc_item_stat d_next; /* per-cluster status of the first item on + the right neighbor */ + int cluster_shift; /* disk cluster shift */ + flow_t flow; /* disk cluster data */ +}; + +struct convert_info { + int count; /* for squalloc terminating */ + item_plugin *iplug; /* current item plugin */ + struct convert_item_info *itm; /* current item info */ + struct cluster_handle clust; /* transform cluster */ + lock_handle right_lock; /* lock handle of the right neighbor */ + int right_locked; +}; + +typedef enum flush_position_state { + POS_INVALID, /* Invalid or stopped pos, do not continue slum + * processing */ + POS_ON_LEAF, /* pos points to already prepped, locked + * formatted node at leaf level */ + POS_ON_EPOINT, /* pos keeps a lock on twig level, "coord" field + * is used to traverse unformatted nodes */ + POS_TO_LEAF, /* pos is being moved to leaf level */ + POS_TO_TWIG, /* pos is being moved to twig level */ + POS_END_OF_TWIG, /* special case of POS_ON_TWIG, when coord is + * after rightmost unit of the current twig */ + POS_ON_INTERNAL /* same as POS_ON_LEAF, but points to internal + * node */ +} flushpos_state_t; + +/* An encapsulation of the current flush point and all the parameters that are + passed through the entire squeeze-and-allocate stage of the flush routine. + A single flush_position object is constructed after left- and right-scanning + finishes. */ +struct flush_position { + flushpos_state_t state; + + coord_t coord; /* coord to traverse unformatted nodes */ + lock_handle lock; /* current lock we hold */ + load_count load; /* load status for current locked formatted node + */ + jnode *child; /* for passing a reference to unformatted child + * across pos state changes */ + + reiser4_blocknr_hint preceder; /* The flush 'hint' state. */ + int leaf_relocate; /* True if enough leaf-level nodes were + * found to suggest a relocate policy. */ + int alloc_cnt; /* The number of nodes allocated during squeeze + and allococate. */ + int prep_or_free_cnt; /* The number of nodes prepared for write + (allocate) or squeezed and freed. */ + flush_queue_t *fq; + long *nr_written; /* number of nodes submitted to disk */ + int flags; /* a copy of jnode_flush flags argument */ + + znode *prev_twig; /* previous parent pointer value, used to catch + * processing of new twig node */ + struct convert_info *sq; /* convert info */ + + unsigned long pos_in_unit; /* for extents only. Position + within an extent unit of first + jnode of slum */ + long nr_to_write; /* number of unformatted nodes to handle on + flush */ +}; + +static inline int item_convert_count(flush_pos_t *pos) +{ + return pos->sq->count; +} +static inline void inc_item_convert_count(flush_pos_t *pos) +{ + pos->sq->count++; +} +static inline void set_item_convert_count(flush_pos_t *pos, int count) +{ + pos->sq->count = count; +} +static inline item_plugin *item_convert_plug(flush_pos_t *pos) +{ + return pos->sq->iplug; +} + +static inline struct convert_info *convert_data(flush_pos_t *pos) +{ + return pos->sq; +} + +static inline struct convert_item_info *item_convert_data(flush_pos_t *pos) +{ + assert("edward-955", convert_data(pos)); + return pos->sq->itm; +} + +static inline struct tfm_cluster *tfm_cluster_sq(flush_pos_t *pos) +{ + return &pos->sq->clust.tc; +} + +static inline struct tfm_stream *tfm_stream_sq(flush_pos_t *pos, + tfm_stream_id id) +{ + assert("edward-854", pos->sq != NULL); + return get_tfm_stream(tfm_cluster_sq(pos), id); +} + +static inline int convert_data_attached(flush_pos_t *pos) +{ + return convert_data(pos) != NULL && item_convert_data(pos) != NULL; +} + +#define should_convert_right_neighbor(pos) convert_data_attached(pos) + +/* Returns true if next node contains next item of the disk cluster + so item convert data should be moved to the right slum neighbor. +*/ +static inline int next_node_is_chained(flush_pos_t *pos) +{ + return convert_data_attached(pos) && + item_convert_data(pos)->d_next == DC_CHAINED_ITEM; +} + +/* + * Update "twin state" (d_cur, d_next) to assign a proper + * conversion mode in the next iteration of convert_node() + */ +static inline void update_chaining_state(flush_pos_t *pos, + int this_node /* where to proceed */) +{ + + assert("edward-1010", convert_data_attached(pos)); + + if (this_node) { + /* + * we want to perform one more iteration with the same item + */ + assert("edward-1013", + item_convert_data(pos)->d_cur == DC_FIRST_ITEM || + item_convert_data(pos)->d_cur == DC_CHAINED_ITEM); + assert("edward-1227", + item_convert_data(pos)->d_next == DC_AFTER_CLUSTER || + item_convert_data(pos)->d_next == DC_INVALID_STATE); + + item_convert_data(pos)->d_cur = DC_AFTER_CLUSTER; + item_convert_data(pos)->d_next = DC_INVALID_STATE; + } + else { + /* + * we want to proceed on right neighbor, which is chained + */ + assert("edward-1011", + item_convert_data(pos)->d_cur == DC_FIRST_ITEM || + item_convert_data(pos)->d_cur == DC_CHAINED_ITEM); + assert("edward-1012", + item_convert_data(pos)->d_next == DC_CHAINED_ITEM); + + item_convert_data(pos)->d_cur = DC_CHAINED_ITEM; + item_convert_data(pos)->d_next = DC_INVALID_STATE; + } +} + +#define SQUALLOC_THRESHOLD 256 + +static inline int should_terminate_squalloc(flush_pos_t *pos) +{ + return convert_data(pos) && + !item_convert_data(pos) && + item_convert_count(pos) >= SQUALLOC_THRESHOLD; +} + +#if REISER4_DEBUG +#define check_convert_info(pos) \ +do { \ + if (unlikely(should_convert_right_neighbor(pos))) { \ + warning("edward-1006", "unprocessed chained data"); \ + printk("d_cur = %d, d_next = %d, flow.len = %llu\n", \ + item_convert_data(pos)->d_cur, \ + item_convert_data(pos)->d_next, \ + item_convert_data(pos)->flow.length); \ + } \ +} while (0) +#else +#define check_convert_info(pos) +#endif /* REISER4_DEBUG */ + +void free_convert_data(flush_pos_t *pos); +/* used in extent.c */ +int scan_set_current(flush_scan * scan, jnode * node, unsigned add_size, + const coord_t *parent); +int reiser4_scan_finished(flush_scan * scan); +int reiser4_scanning_left(flush_scan * scan); +int reiser4_scan_goto(flush_scan * scan, jnode * tonode); +txn_atom *atom_locked_by_fq(flush_queue_t *fq); +int reiser4_alloc_extent(flush_pos_t *flush_pos); +squeeze_result squalloc_extent(znode *left, const coord_t *, flush_pos_t *, + reiser4_key *stop_key); +extern int reiser4_init_fqs(void); +extern void reiser4_done_fqs(void); + +#if REISER4_DEBUG + +extern void reiser4_check_fq(const txn_atom *atom); +extern atomic_t flush_cnt; + +#define check_preceder(blk) \ +assert("nikita-2588", blk < reiser4_block_count(reiser4_get_current_sb())); +extern void check_pos(flush_pos_t *pos); +#else +#define check_preceder(b) noop +#define check_pos(pos) noop +#endif + +/* __REISER4_FLUSH_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 90 + LocalWords: preceder + End: +*/ diff --git a/fs/reiser4/flush_queue.c b/fs/reiser4/flush_queue.c new file mode 100644 index 000000000000..8aa9c3625fef --- /dev/null +++ b/fs/reiser4/flush_queue.c @@ -0,0 +1,677 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +#include "debug.h" +#include "super.h" +#include "txnmgr.h" +#include "jnode.h" +#include "znode.h" +#include "page_cache.h" +#include "wander.h" +#include "vfs_ops.h" +#include "writeout.h" +#include "flush.h" + +#include <linux/bio.h> +#include <linux/mm.h> +#include <linux/pagemap.h> +#include <linux/blkdev.h> +#include <linux/writeback.h> + +/* A flush queue object is an accumulator for keeping jnodes prepared + by the jnode_flush() function for writing to disk. Those "queued" jnodes are + kept on the flush queue until memory pressure or atom commit asks + flush queues to write some or all from their jnodes. */ + +/* + LOCKING: + + fq->guard spin lock protects fq->atom pointer and nothing else. fq->prepped + list protected by atom spin lock. fq->prepped list uses the following + locking: + + two ways to protect fq->prepped list for read-only list traversal: + + 1. atom spin-lock atom. + 2. fq is IN_USE, atom->nr_running_queues increased. + + and one for list modification: + + 1. atom is spin-locked and one condition is true: fq is IN_USE or + atom->nr_running_queues == 0. + + The deadlock-safe order for flush queues and atoms is: first lock atom, then + lock flush queue, then lock jnode. +*/ + +#define fq_in_use(fq) ((fq)->state & FQ_IN_USE) +#define fq_ready(fq) (!fq_in_use(fq)) + +#define mark_fq_in_use(fq) do { (fq)->state |= FQ_IN_USE; } while (0) +#define mark_fq_ready(fq) do { (fq)->state &= ~FQ_IN_USE; } while (0) + +/* get lock on atom from locked flush queue object */ +static txn_atom *atom_locked_by_fq_nolock(flush_queue_t *fq) +{ + /* This code is similar to jnode_get_atom(), look at it for the + * explanation. */ + txn_atom *atom; + + assert_spin_locked(&(fq->guard)); + + while (1) { + atom = fq->atom; + if (atom == NULL) + break; + + if (spin_trylock_atom(atom)) + break; + + atomic_inc(&atom->refcount); + spin_unlock(&(fq->guard)); + spin_lock_atom(atom); + spin_lock(&(fq->guard)); + + if (fq->atom == atom) { + atomic_dec(&atom->refcount); + break; + } + + spin_unlock(&(fq->guard)); + atom_dec_and_unlock(atom); + spin_lock(&(fq->guard)); + } + + return atom; +} + +txn_atom *atom_locked_by_fq(flush_queue_t *fq) +{ + txn_atom *atom; + + spin_lock(&(fq->guard)); + atom = atom_locked_by_fq_nolock(fq); + spin_unlock(&(fq->guard)); + return atom; +} + +static void init_fq(flush_queue_t *fq) +{ + memset(fq, 0, sizeof *fq); + + atomic_set(&fq->nr_submitted, 0); + + INIT_LIST_HEAD(ATOM_FQ_LIST(fq)); + + init_waitqueue_head(&fq->wait); + spin_lock_init(&fq->guard); +} + +/* slab for flush queues */ +static struct kmem_cache *fq_slab; + +/** + * reiser4_init_fqs - create flush queue cache + * + * Initializes slab cache of flush queues. It is part of reiser4 module + * initialization. + */ +int reiser4_init_fqs(void) +{ + fq_slab = kmem_cache_create("fq", + sizeof(flush_queue_t), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (fq_slab == NULL) + return RETERR(-ENOMEM); + return 0; +} + +/** + * reiser4_done_fqs - delete flush queue cache + * + * This is called on reiser4 module unloading or system shutdown. + */ +void reiser4_done_fqs(void) +{ + destroy_reiser4_cache(&fq_slab); +} + +/* create new flush queue object */ +static flush_queue_t *create_fq(gfp_t gfp) +{ + flush_queue_t *fq; + + fq = kmem_cache_alloc(fq_slab, gfp); + if (fq) + init_fq(fq); + + return fq; +} + +/* adjust atom's and flush queue's counters of queued nodes */ +static void count_enqueued_node(flush_queue_t *fq) +{ + ON_DEBUG(fq->atom->num_queued++); +} + +static void count_dequeued_node(flush_queue_t *fq) +{ + assert("zam-993", fq->atom->num_queued > 0); + ON_DEBUG(fq->atom->num_queued--); +} + +/* attach flush queue object to the atom */ +static void attach_fq(txn_atom *atom, flush_queue_t *fq) +{ + assert_spin_locked(&(atom->alock)); + list_add(&fq->alink, &atom->flush_queues); + fq->atom = atom; + ON_DEBUG(atom->nr_flush_queues++); +} + +static void detach_fq(flush_queue_t *fq) +{ + assert_spin_locked(&(fq->atom->alock)); + + spin_lock(&(fq->guard)); + list_del_init(&fq->alink); + assert("vs-1456", fq->atom->nr_flush_queues > 0); + ON_DEBUG(fq->atom->nr_flush_queues--); + fq->atom = NULL; + spin_unlock(&(fq->guard)); +} + +/* destroy flush queue object */ +static void done_fq(flush_queue_t *fq) +{ + assert("zam-763", list_empty_careful(ATOM_FQ_LIST(fq))); + assert("zam-766", atomic_read(&fq->nr_submitted) == 0); + + kmem_cache_free(fq_slab, fq); +} + +/* */ +static void mark_jnode_queued(flush_queue_t *fq, jnode * node) +{ + JF_SET(node, JNODE_FLUSH_QUEUED); + count_enqueued_node(fq); +} + +/* Putting jnode into the flush queue. Both atom and jnode should be + spin-locked. */ +void queue_jnode(flush_queue_t *fq, jnode * node) +{ + assert_spin_locked(&(node->guard)); + assert("zam-713", node->atom != NULL); + assert_spin_locked(&(node->atom->alock)); + assert("zam-716", fq->atom != NULL); + assert("zam-717", fq->atom == node->atom); + assert("zam-907", fq_in_use(fq)); + + assert("zam-714", JF_ISSET(node, JNODE_DIRTY)); + assert("zam-826", JF_ISSET(node, JNODE_RELOC)); + assert("vs-1481", !JF_ISSET(node, JNODE_FLUSH_QUEUED)); + assert("vs-1481", NODE_LIST(node) != FQ_LIST); + + mark_jnode_queued(fq, node); + list_move_tail(&node->capture_link, ATOM_FQ_LIST(fq)); + + ON_DEBUG(count_jnode(node->atom, node, NODE_LIST(node), + FQ_LIST, 1)); +} + +/* repeatable process for waiting io completion on a flush queue object */ +static int wait_io(flush_queue_t *fq, int *nr_io_errors) +{ + assert("zam-738", fq->atom != NULL); + assert_spin_locked(&(fq->atom->alock)); + assert("zam-736", fq_in_use(fq)); + assert("zam-911", list_empty_careful(ATOM_FQ_LIST(fq))); + + if (atomic_read(&fq->nr_submitted) != 0) { + struct super_block *super; + + spin_unlock_atom(fq->atom); + + assert("nikita-3013", reiser4_schedulable()); + + super = reiser4_get_current_sb(); + + /* FIXME: this is instead of blk_run_queues() */ + //blk_flush_plug(current); + + if (!(super->s_flags & MS_RDONLY)) + wait_event(fq->wait, + atomic_read(&fq->nr_submitted) == 0); + + /* Ask the caller to re-acquire the locks and call this + function again. Note: this technique is commonly used in + the txnmgr code. */ + return -E_REPEAT; + } + + *nr_io_errors += atomic_read(&fq->nr_errors); + return 0; +} + +/* wait on I/O completion, re-submit dirty nodes to write */ +static int finish_fq(flush_queue_t *fq, int *nr_io_errors) +{ + int ret; + txn_atom *atom = fq->atom; + + assert("zam-801", atom != NULL); + assert_spin_locked(&(atom->alock)); + assert("zam-762", fq_in_use(fq)); + + ret = wait_io(fq, nr_io_errors); + if (ret) + return ret; + + detach_fq(fq); + done_fq(fq); + + reiser4_atom_send_event(atom); + + return 0; +} + +/* wait for all i/o for given atom to be completed, actually do one iteration + on that and return -E_REPEAT if there more iterations needed */ +static int finish_all_fq(txn_atom * atom, int *nr_io_errors) +{ + flush_queue_t *fq; + + assert_spin_locked(&(atom->alock)); + + if (list_empty_careful(&atom->flush_queues)) + return 0; + + list_for_each_entry(fq, &atom->flush_queues, alink) { + if (fq_ready(fq)) { + int ret; + + mark_fq_in_use(fq); + assert("vs-1247", fq->owner == NULL); + ON_DEBUG(fq->owner = current); + ret = finish_fq(fq, nr_io_errors); + + if (*nr_io_errors) + reiser4_handle_error(); + + if (ret) { + reiser4_fq_put(fq); + return ret; + } + + spin_unlock_atom(atom); + + return -E_REPEAT; + } + } + + /* All flush queues are in use; atom remains locked */ + return -EBUSY; +} + +/* wait all i/o for current atom */ +int current_atom_finish_all_fq(void) +{ + txn_atom *atom; + int nr_io_errors = 0; + int ret = 0; + + do { + while (1) { + atom = get_current_atom_locked(); + ret = finish_all_fq(atom, &nr_io_errors); + if (ret != -EBUSY) + break; + reiser4_atom_wait_event(atom); + } + } while (ret == -E_REPEAT); + + /* we do not need locked atom after this function finishes, SUCCESS or + -EBUSY are two return codes when atom remains locked after + finish_all_fq */ + if (!ret) + spin_unlock_atom(atom); + + assert_spin_not_locked(&(atom->alock)); + + if (ret) + return ret; + + if (nr_io_errors) + return RETERR(-EIO); + + return 0; +} + +/* change node->atom field for all jnode from given list */ +static void +scan_fq_and_update_atom_ref(struct list_head *list, txn_atom *atom) +{ + jnode *cur; + + list_for_each_entry(cur, list, capture_link) { + spin_lock_jnode(cur); + cur->atom = atom; + spin_unlock_jnode(cur); + } +} + +/* support for atom fusion operation */ +void reiser4_fuse_fq(txn_atom *to, txn_atom *from) +{ + flush_queue_t *fq; + + assert_spin_locked(&(to->alock)); + assert_spin_locked(&(from->alock)); + + list_for_each_entry(fq, &from->flush_queues, alink) { + scan_fq_and_update_atom_ref(ATOM_FQ_LIST(fq), to); + spin_lock(&(fq->guard)); + fq->atom = to; + spin_unlock(&(fq->guard)); + } + + list_splice_init(&from->flush_queues, to->flush_queues.prev); + +#if REISER4_DEBUG + to->num_queued += from->num_queued; + to->nr_flush_queues += from->nr_flush_queues; + from->nr_flush_queues = 0; +#endif +} + +#if REISER4_DEBUG +int atom_fq_parts_are_clean(txn_atom * atom) +{ + assert("zam-915", atom != NULL); + return list_empty_careful(&atom->flush_queues); +} +#endif + +/* + * Bio i/o completion routine for reiser4 write operations + */ +static void end_io_handler(struct bio *bio) +{ + int i; + int nr_errors = 0; + flush_queue_t *fq; + + assert("zam-958", bio_op(bio) == WRITE); + + /* we expect that bio->private is set to NULL or fq object which is used + * for synchronization and error counting. */ + fq = bio->bi_private; + /* Check all elements of io_vec for correct write completion. */ + for (i = 0; i < bio->bi_vcnt; i += 1) { + struct page *pg = bio->bi_io_vec[i].bv_page; + + if (bio->bi_status) { + SetPageError(pg); + nr_errors++; + } + + { + /* jnode WRITEBACK ("write is in progress bit") is + * atomically cleared here. */ + jnode *node; + + assert("zam-736", pg != NULL); + assert("zam-736", PagePrivate(pg)); + node = jprivate(pg); + + JF_CLR(node, JNODE_WRITEBACK); + } + + end_page_writeback(pg); + put_page(pg); + } + + if (fq) { + /* count i/o error in fq object */ + atomic_add(nr_errors, &fq->nr_errors); + + /* If all write requests registered in this "fq" are done we up + * the waiter. */ + if (atomic_sub_and_test(bio->bi_vcnt, &fq->nr_submitted)) + wake_up(&fq->wait); + } + + bio_put(bio); +} + +/* Count I/O requests which will be submitted by @bio in given flush queues + @fq */ +void add_fq_to_bio(flush_queue_t *fq, struct bio *bio) +{ + bio->bi_private = fq; + bio->bi_end_io = end_io_handler; + + if (fq) + atomic_add(bio->bi_vcnt, &fq->nr_submitted); +} + +/* Move all queued nodes out from @fq->prepped list. */ +static void release_prepped_list(flush_queue_t *fq) +{ + txn_atom *atom; + + assert("zam-904", fq_in_use(fq)); + atom = atom_locked_by_fq(fq); + + while (!list_empty(ATOM_FQ_LIST(fq))) { + jnode *cur; + + cur = list_entry(ATOM_FQ_LIST(fq)->next, jnode, capture_link); + list_del_init(&cur->capture_link); + + count_dequeued_node(fq); + spin_lock_jnode(cur); + assert("nikita-3154", !JF_ISSET(cur, JNODE_OVRWR)); + assert("nikita-3154", JF_ISSET(cur, JNODE_RELOC)); + assert("nikita-3154", JF_ISSET(cur, JNODE_FLUSH_QUEUED)); + JF_CLR(cur, JNODE_FLUSH_QUEUED); + + if (JF_ISSET(cur, JNODE_DIRTY)) { + list_add_tail(&cur->capture_link, + ATOM_DIRTY_LIST(atom, + jnode_get_level(cur))); + ON_DEBUG(count_jnode(atom, cur, FQ_LIST, + DIRTY_LIST, 1)); + } else { + list_add_tail(&cur->capture_link, + ATOM_CLEAN_LIST(atom)); + ON_DEBUG(count_jnode(atom, cur, FQ_LIST, + CLEAN_LIST, 1)); + } + + spin_unlock_jnode(cur); + } + + if (--atom->nr_running_queues == 0) + reiser4_atom_send_event(atom); + + spin_unlock_atom(atom); +} + +/* Submit write requests for nodes on the already filled flush queue @fq. + + @fq: flush queue object which contains jnodes we can (and will) write. + @return: number of submitted blocks (>=0) if success, otherwise -- an error + code (<0). */ +int reiser4_write_fq(flush_queue_t *fq, long *nr_submitted, int flags) +{ + int ret; + txn_atom *atom; + + while (1) { + atom = atom_locked_by_fq(fq); + assert("zam-924", atom); + /* do not write fq in parallel. */ + if (atom->nr_running_queues == 0 + || !(flags & WRITEOUT_SINGLE_STREAM)) + break; + reiser4_atom_wait_event(atom); + } + + atom->nr_running_queues++; + spin_unlock_atom(atom); + + ret = write_jnode_list(ATOM_FQ_LIST(fq), fq, nr_submitted, flags); + release_prepped_list(fq); + + return ret; +} + +/* Getting flush queue object for exclusive use by one thread. May require + several iterations which is indicated by -E_REPEAT return code. + + This function does not contain code for obtaining an atom lock because an + atom lock is obtained by different ways in different parts of reiser4, + usually it is current atom, but we need a possibility for getting fq for the + atom of given jnode. */ +static int fq_by_atom_gfp(txn_atom *atom, flush_queue_t **new_fq, gfp_t gfp) +{ + flush_queue_t *fq; + + assert_spin_locked(&(atom->alock)); + + fq = list_entry(atom->flush_queues.next, flush_queue_t, alink); + while (&atom->flush_queues != &fq->alink) { + spin_lock(&(fq->guard)); + + if (fq_ready(fq)) { + mark_fq_in_use(fq); + assert("vs-1246", fq->owner == NULL); + ON_DEBUG(fq->owner = current); + spin_unlock(&(fq->guard)); + + if (*new_fq) + done_fq(*new_fq); + + *new_fq = fq; + + return 0; + } + + spin_unlock(&(fq->guard)); + + fq = list_entry(fq->alink.next, flush_queue_t, alink); + } + + /* Use previously allocated fq object */ + if (*new_fq) { + mark_fq_in_use(*new_fq); + assert("vs-1248", (*new_fq)->owner == 0); + ON_DEBUG((*new_fq)->owner = current); + attach_fq(atom, *new_fq); + + return 0; + } + + spin_unlock_atom(atom); + + *new_fq = create_fq(gfp); + + if (*new_fq == NULL) + return RETERR(-ENOMEM); + + return RETERR(-E_REPEAT); +} + +int reiser4_fq_by_atom(txn_atom * atom, flush_queue_t **new_fq) +{ + return fq_by_atom_gfp(atom, new_fq, reiser4_ctx_gfp_mask_get()); +} + +/* A wrapper around reiser4_fq_by_atom for getting a flush queue + object for current atom, if success fq->atom remains locked. */ +flush_queue_t *get_fq_for_current_atom(void) +{ + flush_queue_t *fq = NULL; + txn_atom *atom; + int ret; + + do { + atom = get_current_atom_locked(); + ret = reiser4_fq_by_atom(atom, &fq); + } while (ret == -E_REPEAT); + + if (ret) + return ERR_PTR(ret); + return fq; +} + +/* Releasing flush queue object after exclusive use */ +void reiser4_fq_put_nolock(flush_queue_t *fq) +{ + assert("zam-747", fq->atom != NULL); + assert("zam-902", list_empty_careful(ATOM_FQ_LIST(fq))); + mark_fq_ready(fq); + assert("vs-1245", fq->owner == current); + ON_DEBUG(fq->owner = NULL); +} + +void reiser4_fq_put(flush_queue_t *fq) +{ + txn_atom *atom; + + spin_lock(&(fq->guard)); + atom = atom_locked_by_fq_nolock(fq); + + assert("zam-746", atom != NULL); + + reiser4_fq_put_nolock(fq); + reiser4_atom_send_event(atom); + + spin_unlock(&(fq->guard)); + spin_unlock_atom(atom); +} + +/* A part of atom object initialization related to the embedded flush queue + list head */ + +void init_atom_fq_parts(txn_atom *atom) +{ + INIT_LIST_HEAD(&atom->flush_queues); +} + +#if REISER4_DEBUG + +void reiser4_check_fq(const txn_atom *atom) +{ + /* check number of nodes on all atom's flush queues */ + flush_queue_t *fq; + int count; + struct list_head *pos; + + count = 0; + list_for_each_entry(fq, &atom->flush_queues, alink) { + spin_lock(&(fq->guard)); + /* calculate number of jnodes on fq' list of prepped jnodes */ + list_for_each(pos, ATOM_FQ_LIST(fq)) + count++; + spin_unlock(&(fq->guard)); + } + if (count != atom->fq) + warning("", "fq counter %d, real %d\n", atom->fq, count); + +} + +#endif + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/forward.h b/fs/reiser4/forward.h new file mode 100644 index 000000000000..9170c2bbab38 --- /dev/null +++ b/fs/reiser4/forward.h @@ -0,0 +1,259 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +/* Forward declarations. Thank you Kernighan. */ + +#if !defined(__REISER4_FORWARD_H__) +#define __REISER4_FORWARD_H__ + +#include <asm/errno.h> +#include <linux/types.h> + +typedef struct zlock zlock; +typedef struct lock_stack lock_stack; +typedef struct lock_handle lock_handle; +typedef struct znode znode; +typedef struct flow flow_t; +typedef struct coord coord_t; +typedef struct tree_access_pointer tap_t; +typedef struct reiser4_object_create_data reiser4_object_create_data; +typedef union reiser4_plugin reiser4_plugin; +typedef __u16 reiser4_plugin_id; +typedef __u64 reiser4_plugin_groups; +typedef struct item_plugin item_plugin; +typedef struct jnode_plugin jnode_plugin; +typedef struct reiser4_item_data reiser4_item_data; +typedef union reiser4_key reiser4_key; +typedef struct reiser4_tree reiser4_tree; +typedef struct carry_cut_data carry_cut_data; +typedef struct carry_kill_data carry_kill_data; +typedef struct carry_tree_op carry_tree_op; +typedef struct carry_tree_node carry_tree_node; +typedef struct carry_plugin_info carry_plugin_info; +typedef struct reiser4_journal reiser4_journal; +typedef struct txn_atom txn_atom; +typedef struct txn_handle txn_handle; +typedef struct txn_mgr txn_mgr; +typedef struct reiser4_dir_entry_desc reiser4_dir_entry_desc; +typedef struct reiser4_context reiser4_context; +typedef struct carry_level carry_level; +typedef struct blocknr_set_entry blocknr_set_entry; +typedef struct blocknr_list_entry blocknr_list_entry; +/* super_block->s_fs_info points to this */ +typedef struct reiser4_super_info_data reiser4_super_info_data; +/* next two objects are fields of reiser4_super_info_data */ +typedef struct reiser4_oid_allocator reiser4_oid_allocator; +typedef struct reiser4_space_allocator reiser4_space_allocator; + +typedef struct flush_scan flush_scan; +typedef struct flush_position flush_pos_t; + +typedef unsigned short pos_in_node_t; +#define MAX_POS_IN_NODE 65535 + +typedef struct jnode jnode; +typedef struct reiser4_blocknr_hint reiser4_blocknr_hint; + +typedef struct uf_coord uf_coord_t; +typedef struct hint hint_t; + +typedef struct ktxnmgrd_context ktxnmgrd_context; + +struct inode; +struct page; +struct file; +struct dentry; +struct super_block; + +/* return values of coord_by_key(). cbk == coord_by_key */ +typedef enum { + CBK_COORD_FOUND = 0, + CBK_COORD_NOTFOUND = -ENOENT, +} lookup_result; + +/* results of lookup with directory file */ +typedef enum { + FILE_NAME_FOUND = 0, + FILE_NAME_NOTFOUND = -ENOENT, + FILE_IO_ERROR = -EIO, /* FIXME: it seems silly to have special OOM, + IO_ERROR return codes for each search. */ + FILE_OOM = -ENOMEM /* FIXME: it seems silly to have special OOM, + IO_ERROR return codes for each search. */ +} file_lookup_result; + +/* behaviors of lookup. If coord we are looking for is actually in a tree, + both coincide. */ +typedef enum { + /* search exactly for the coord with key given */ + FIND_EXACT, + /* search for coord with the maximal key not greater than one + given */ + FIND_MAX_NOT_MORE_THAN /*LEFT_SLANT_BIAS */ +} lookup_bias; + +typedef enum { + /* number of leaf level of the tree + The fake root has (tree_level=0). */ + LEAF_LEVEL = 1, + + /* number of level one above leaf level of the tree. + + It is supposed that internal tree used by reiser4 to store file + system data and meta data will have height 2 initially (when + created by mkfs). + */ + TWIG_LEVEL = 2, +} tree_level; + +/* The "real" maximum ztree height is the 0-origin size of any per-level + array, since the zero'th level is not used. */ +#define REAL_MAX_ZTREE_HEIGHT (REISER4_MAX_ZTREE_HEIGHT-LEAF_LEVEL) + +/* enumeration of possible mutual position of item and coord. This enum is + return type of ->is_in_item() item plugin method which see. */ +typedef enum { + /* coord is on the left of an item */ + IP_ON_THE_LEFT, + /* coord is inside item */ + IP_INSIDE, + /* coord is inside item, but to the right of the rightmost unit of + this item */ + IP_RIGHT_EDGE, + /* coord is on the right of an item */ + IP_ON_THE_RIGHT +} interposition; + +/* type of lock to acquire on znode before returning it to caller */ +typedef enum { + ZNODE_NO_LOCK = 0, + ZNODE_READ_LOCK = 1, + ZNODE_WRITE_LOCK = 2, +} znode_lock_mode; + +/* type of lock request */ +typedef enum { + ZNODE_LOCK_LOPRI = 0, + ZNODE_LOCK_HIPRI = (1 << 0), + + /* By setting the ZNODE_LOCK_NONBLOCK flag in a lock request the call to + longterm_lock_znode will not sleep waiting for the lock to become + available. If the lock is unavailable, reiser4_znode_lock will + immediately return the value -E_REPEAT. */ + ZNODE_LOCK_NONBLOCK = (1 << 1), + /* An option for longterm_lock_znode which prevents atom fusion */ + ZNODE_LOCK_DONT_FUSE = (1 << 2) +} znode_lock_request; + +typedef enum { READ_OP = 0, WRITE_OP = 1 } rw_op; + +/* used to specify direction of shift. These must be -1 and 1 */ +typedef enum { + SHIFT_LEFT = 1, + SHIFT_RIGHT = -1 +} shift_direction; + +typedef enum { + LEFT_SIDE, + RIGHT_SIDE +} sideof; + +#define reiser4_round_up(value, order) \ + ((typeof(value))(((long) (value) + (order) - 1U) & \ + ~((order) - 1))) + +/* values returned by squalloc_right_neighbor and its auxiliary functions */ +typedef enum { + /* unit of internal item is moved */ + SUBTREE_MOVED = 0, + /* nothing else can be squeezed into left neighbor */ + SQUEEZE_TARGET_FULL = 1, + /* all content of node is squeezed into its left neighbor */ + SQUEEZE_SOURCE_EMPTY = 2, + /* one more item is copied (this is only returned by + allocate_and_copy_extent to squalloc_twig)) */ + SQUEEZE_CONTINUE = 3 +} squeeze_result; + +/* Do not change items ids. If you do - there will be format change */ +typedef enum { + STATIC_STAT_DATA_ID = 0x0, + SIMPLE_DIR_ENTRY_ID = 0x1, + COMPOUND_DIR_ID = 0x2, + NODE_POINTER_ID = 0x3, + EXTENT_POINTER_ID = 0x5, + FORMATTING_ID = 0x6, + CTAIL_ID = 0x7, + BLACK_BOX_ID = 0x8, + LAST_ITEM_ID = 0x9 +} item_id; + +/* Flags passed to jnode_flush() to allow it to distinguish default settings + based on whether commit() was called or VM memory pressure was applied. */ +typedef enum { + /* submit flush queue to disk at jnode_flush completion */ + JNODE_FLUSH_WRITE_BLOCKS = 1, + + /* flush is called for commit */ + JNODE_FLUSH_COMMIT = 2, + /* not implemented */ + JNODE_FLUSH_MEMORY_FORMATTED = 4, + + /* not implemented */ + JNODE_FLUSH_MEMORY_UNFORMATTED = 8, +} jnode_flush_flags; + +/* Flags to insert/paste carry operations. Currently they only used in + flushing code, but in future, they can be used to optimize for repetitive + accesses. */ +typedef enum { + /* carry is not allowed to shift data to the left when trying to find + free space */ + COPI_DONT_SHIFT_LEFT = (1 << 0), + /* carry is not allowed to shift data to the right when trying to find + free space */ + COPI_DONT_SHIFT_RIGHT = (1 << 1), + /* carry is not allowed to allocate new node(s) when trying to find + free space */ + COPI_DONT_ALLOCATE = (1 << 2), + /* try to load left neighbor if its not in a cache */ + COPI_LOAD_LEFT = (1 << 3), + /* try to load right neighbor if its not in a cache */ + COPI_LOAD_RIGHT = (1 << 4), + /* shift insertion point to the left neighbor */ + COPI_GO_LEFT = (1 << 5), + /* shift insertion point to the right neighbor */ + COPI_GO_RIGHT = (1 << 6), + /* try to step back into original node if insertion into new node + fails after shifting data there. */ + COPI_STEP_BACK = (1 << 7), + /* use all possible space in the node */ + COPI_SWEEP = (1 << 8) +} cop_insert_flag; + +typedef enum { + SAFE_UNLINK, /* safe-link for unlink */ + SAFE_TRUNCATE /* safe-link for truncate */ +} reiser4_safe_link_t; + +/* this is to show on which list of atom jnode is */ +typedef enum { + NOT_CAPTURED, + DIRTY_LIST, + CLEAN_LIST, + FQ_LIST, + WB_LIST, + OVRWR_LIST +} atom_list; + +/* __REISER4_FORWARD_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/fsdata.c b/fs/reiser4/fsdata.c new file mode 100644 index 000000000000..ad3f4101b7ca --- /dev/null +++ b/fs/reiser4/fsdata.c @@ -0,0 +1,801 @@ +/* Copyright 2001, 2002, 2003, 2004, 2005 by Hans Reiser, licensing governed by + * reiser4/README */ + +#include "fsdata.h" +#include "inode.h" + +#include <linux/shrinker.h> + +/* cache or dir_cursors */ +static struct kmem_cache *d_cursor_cache; + +/* list of unused cursors */ +static LIST_HEAD(cursor_cache); + +/* number of cursors in list of ununsed cursors */ +static unsigned long d_cursor_unused = 0; + +/* spinlock protecting manipulations with dir_cursor's hash table and lists */ +DEFINE_SPINLOCK(d_c_lock); + +static reiser4_file_fsdata *create_fsdata(struct file *file); +static int file_is_stateless(struct file *file); +static void free_fsdata(reiser4_file_fsdata *fsdata); +static void kill_cursor(dir_cursor *); + +static unsigned long d_cursor_shrink_scan(struct shrinker *shrink, + struct shrink_control *sc) +{ + dir_cursor *scan; + unsigned long freed = 0; + + spin_lock(&d_c_lock); + while (!list_empty(&cursor_cache) && sc->nr_to_scan) { + scan = list_entry(cursor_cache.next, dir_cursor, alist); + assert("nikita-3567", scan->ref == 0); + kill_cursor(scan); + freed++; + sc->nr_to_scan--; + } + spin_unlock(&d_c_lock); + return freed; +} + +static unsigned long d_cursor_shrink_count (struct shrinker *shrink, + struct shrink_control *sc) +{ + return d_cursor_unused; +} + +/* + * actually, d_cursors are "priceless", because there is no way to + * recover information stored in them. On the other hand, we don't + * want to consume all kernel memory by them. As a compromise, just + * assign higher "seeks" value to d_cursor cache, so that it will be + * shrunk only if system is really tight on memory. + */ +static struct shrinker d_cursor_shrinker = { + .count_objects = d_cursor_shrink_count, + .scan_objects = d_cursor_shrink_scan, + .seeks = DEFAULT_SEEKS << 3 +}; + +/** + * reiser4_init_d_cursor - create d_cursor cache + * + * Initializes slab cache of d_cursors. It is part of reiser4 module + * initialization. + */ +int reiser4_init_d_cursor(void) +{ + d_cursor_cache = kmem_cache_create("d_cursor", sizeof(dir_cursor), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (d_cursor_cache == NULL) + return RETERR(-ENOMEM); + + register_shrinker(&d_cursor_shrinker); + return 0; +} + +/** + * reiser4_done_d_cursor - delete d_cursor cache and d_cursor shrinker + * + * This is called on reiser4 module unloading or system shutdown. + */ +void reiser4_done_d_cursor(void) +{ + unregister_shrinker(&d_cursor_shrinker); + + destroy_reiser4_cache(&d_cursor_cache); +} + +#define D_CURSOR_TABLE_SIZE (256) + +static inline unsigned long +d_cursor_hash(d_cursor_hash_table * table, const struct d_cursor_key *key) +{ + assert("nikita-3555", IS_POW(D_CURSOR_TABLE_SIZE)); + return (key->oid + key->cid) & (D_CURSOR_TABLE_SIZE - 1); +} + +static inline int d_cursor_eq(const struct d_cursor_key *k1, + const struct d_cursor_key *k2) +{ + return k1->cid == k2->cid && k1->oid == k2->oid; +} + +/* + * define functions to manipulate reiser4 super block's hash table of + * dir_cursors + */ +#define KMALLOC(size) kmalloc((size), reiser4_ctx_gfp_mask_get()) +#define KFREE(ptr, size) kfree(ptr) +TYPE_SAFE_HASH_DEFINE(d_cursor, + dir_cursor, + struct d_cursor_key, + key, hash, d_cursor_hash, d_cursor_eq); +#undef KFREE +#undef KMALLOC + +/** + * reiser4_init_super_d_info - initialize per-super-block d_cursor resources + * @super: super block to initialize + * + * Initializes per-super-block d_cursor's hash table and radix tree. It is part + * of mount. + */ +int reiser4_init_super_d_info(struct super_block *super) +{ + struct d_cursor_info *p; + + p = &get_super_private(super)->d_info; + + INIT_RADIX_TREE(&p->tree, reiser4_ctx_gfp_mask_get()); + return d_cursor_hash_init(&p->table, D_CURSOR_TABLE_SIZE); +} + +/** + * reiser4_done_super_d_info - release per-super-block d_cursor resources + * @super: super block being umounted + * + * It is called on umount. Kills all directory cursors attached to suoer block. + */ +void reiser4_done_super_d_info(struct super_block *super) +{ + struct d_cursor_info *d_info; + dir_cursor *cursor, *next; + + d_info = &get_super_private(super)->d_info; + for_all_in_htable(&d_info->table, d_cursor, cursor, next) + kill_cursor(cursor); + + BUG_ON(d_info->tree.rnode != NULL); + d_cursor_hash_done(&d_info->table); +} + +/** + * kill_cursor - free dir_cursor and reiser4_file_fsdata attached to it + * @cursor: cursor to free + * + * Removes reiser4_file_fsdata attached to @cursor from readdir list of + * reiser4_inode, frees that reiser4_file_fsdata. Removes @cursor from from + * indices, hash table, list of unused cursors and frees it. + */ +static void kill_cursor(dir_cursor *cursor) +{ + unsigned long index; + + assert("nikita-3566", cursor->ref == 0); + assert("nikita-3572", cursor->fsdata != NULL); + + index = (unsigned long)cursor->key.oid; + list_del_init(&cursor->fsdata->dir.linkage); + free_fsdata(cursor->fsdata); + cursor->fsdata = NULL; + + if (list_empty_careful(&cursor->list)) + /* this is last cursor for a file. Kill radix-tree entry */ + radix_tree_delete(&cursor->info->tree, index); + else { + void **slot; + + /* + * there are other cursors for the same oid. + */ + + /* + * if radix tree point to the cursor being removed, re-target + * radix tree slot to the next cursor in the (non-empty as was + * checked above) element of the circular list of all cursors + * for this oid. + */ + slot = radix_tree_lookup_slot(&cursor->info->tree, index); + assert("nikita-3571", *slot != NULL); + if (*slot == cursor) + *slot = list_entry(cursor->list.next, dir_cursor, list); + /* remove cursor from circular list */ + list_del_init(&cursor->list); + } + /* remove cursor from the list of unused cursors */ + list_del_init(&cursor->alist); + /* remove cursor from the hash table */ + d_cursor_hash_remove(&cursor->info->table, cursor); + /* and free it */ + kmem_cache_free(d_cursor_cache, cursor); + --d_cursor_unused; +} + +/* possible actions that can be performed on all cursors for the given file */ +enum cursor_action { + /* + * load all detached state: this is called when stat-data is loaded + * from the disk to recover information about all pending readdirs + */ + CURSOR_LOAD, + /* + * detach all state from inode, leaving it in the cache. This is called + * when inode is removed form the memory by memory pressure + */ + CURSOR_DISPOSE, + /* + * detach cursors from the inode, and free them. This is called when + * inode is destroyed + */ + CURSOR_KILL +}; + +/* + * return d_cursor data for the file system @inode is in. + */ +static inline struct d_cursor_info *d_info(struct inode *inode) +{ + return &get_super_private(inode->i_sb)->d_info; +} + +/* + * lookup d_cursor in the per-super-block radix tree. + */ +static inline dir_cursor *lookup(struct d_cursor_info *info, + unsigned long index) +{ + return (dir_cursor *) radix_tree_lookup(&info->tree, index); +} + +/* + * attach @cursor to the radix tree. There may be multiple cursors for the + * same oid, they are chained into circular list. + */ +static void bind_cursor(dir_cursor * cursor, unsigned long index) +{ + dir_cursor *head; + + head = lookup(cursor->info, index); + if (head == NULL) { + /* this is the first cursor for this index */ + INIT_LIST_HEAD(&cursor->list); + radix_tree_insert(&cursor->info->tree, index, cursor); + } else { + /* some cursor already exists. Chain ours */ + list_add(&cursor->list, &head->list); + } +} + +/* + * detach fsdata (if detachable) from file descriptor, and put cursor on the + * "unused" list. Called when file descriptor is not longer in active use. + */ +static void clean_fsdata(struct file *file) +{ + dir_cursor *cursor; + reiser4_file_fsdata *fsdata; + + assert("nikita-3570", file_is_stateless(file)); + + fsdata = (reiser4_file_fsdata *) file->private_data; + if (fsdata != NULL) { + cursor = fsdata->cursor; + if (cursor != NULL) { + spin_lock(&d_c_lock); + --cursor->ref; + if (cursor->ref == 0) { + list_add_tail(&cursor->alist, &cursor_cache); + ++d_cursor_unused; + } + spin_unlock(&d_c_lock); + file->private_data = NULL; + } + } +} + +/* + * global counter used to generate "client ids". These ids are encoded into + * high bits of fpos. + */ +static __u32 cid_counter = 0; +#define CID_SHIFT (20) +#define CID_MASK (0xfffffull) + +static void free_file_fsdata_nolock(struct file *); + +/** + * insert_cursor - allocate file_fsdata, insert cursor to tree and hash table + * @cursor: + * @file: + * @inode: + * + * Allocates reiser4_file_fsdata, attaches it to @cursor, inserts cursor to + * reiser4 super block's hash table and radix tree. + add detachable readdir + * state to the @f + */ +static int insert_cursor(dir_cursor *cursor, struct file *file, loff_t *fpos, + struct inode *inode) +{ + int result; + reiser4_file_fsdata *fsdata; + + memset(cursor, 0, sizeof *cursor); + + /* this is either first call to readdir, or rewind. Anyway, create new + * cursor. */ + fsdata = create_fsdata(NULL); + if (fsdata != NULL) { + result = radix_tree_preload(reiser4_ctx_gfp_mask_get()); + if (result == 0) { + struct d_cursor_info *info; + oid_t oid; + + info = d_info(inode); + oid = get_inode_oid(inode); + /* cid occupies higher 12 bits of f->f_pos. Don't + * allow it to become negative: this confuses + * nfsd_readdir() */ + cursor->key.cid = (++cid_counter) & 0x7ff; + cursor->key.oid = oid; + cursor->fsdata = fsdata; + cursor->info = info; + cursor->ref = 1; + + spin_lock_inode(inode); + /* install cursor as @f's private_data, discarding old + * one if necessary */ +#if REISER4_DEBUG + if (file->private_data) + warning("", "file has fsdata already"); +#endif + clean_fsdata(file); + free_file_fsdata_nolock(file); + file->private_data = fsdata; + fsdata->cursor = cursor; + spin_unlock_inode(inode); + spin_lock(&d_c_lock); + /* insert cursor into hash table */ + d_cursor_hash_insert(&info->table, cursor); + /* and chain it into radix-tree */ + bind_cursor(cursor, (unsigned long)oid); + spin_unlock(&d_c_lock); + radix_tree_preload_end(); + *fpos = ((__u64) cursor->key.cid) << CID_SHIFT; + } + } else + result = RETERR(-ENOMEM); + return result; +} + +/** + * process_cursors - do action on each cursor attached to inode + * @inode: + * @act: action to do + * + * Finds all cursors of @inode in reiser4's super block radix tree of cursors + * and performs action specified by @act on each of cursors. + */ +static void process_cursors(struct inode *inode, enum cursor_action act) +{ + oid_t oid; + dir_cursor *start; + struct list_head *head; + reiser4_context *ctx; + struct d_cursor_info *info; + + /* this can be called by + * + * kswapd->...->prune_icache->..reiser4_destroy_inode + * + * without reiser4_context + */ + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) { + warning("vs-23", "failed to init context"); + return; + } + + assert("nikita-3558", inode != NULL); + + info = d_info(inode); + oid = get_inode_oid(inode); + spin_lock_inode(inode); + head = get_readdir_list(inode); + spin_lock(&d_c_lock); + /* find any cursor for this oid: reference to it is hanging of radix + * tree */ + start = lookup(info, (unsigned long)oid); + if (start != NULL) { + dir_cursor *scan; + reiser4_file_fsdata *fsdata; + + /* process circular list of cursors for this oid */ + scan = start; + do { + dir_cursor *next; + + next = list_entry(scan->list.next, dir_cursor, list); + fsdata = scan->fsdata; + assert("nikita-3557", fsdata != NULL); + if (scan->key.oid == oid) { + switch (act) { + case CURSOR_DISPOSE: + list_del_init(&fsdata->dir.linkage); + break; + case CURSOR_LOAD: + list_add(&fsdata->dir.linkage, head); + break; + case CURSOR_KILL: + kill_cursor(scan); + break; + } + } + if (scan == next) + /* last cursor was just killed */ + break; + scan = next; + } while (scan != start); + } + spin_unlock(&d_c_lock); + /* check that we killed 'em all */ + assert("nikita-3568", + ergo(act == CURSOR_KILL, + list_empty_careful(get_readdir_list(inode)))); + assert("nikita-3569", + ergo(act == CURSOR_KILL, lookup(info, oid) == NULL)); + spin_unlock_inode(inode); + reiser4_exit_context(ctx); +} + +/** + * reiser4_dispose_cursors - removes cursors from inode's list + * @inode: inode to dispose cursors of + * + * For each of cursors corresponding to @inode - removes reiser4_file_fsdata + * attached to cursor from inode's readdir list. This is called when inode is + * removed from the memory by memory pressure. + */ +void reiser4_dispose_cursors(struct inode *inode) +{ + process_cursors(inode, CURSOR_DISPOSE); +} + +/** + * reiser4_load_cursors - attach cursors to inode + * @inode: inode to load cursors to + * + * For each of cursors corresponding to @inode - attaches reiser4_file_fsdata + * attached to cursor to inode's readdir list. This is done when inode is + * loaded into memory. + */ +void reiser4_load_cursors(struct inode *inode) +{ + process_cursors(inode, CURSOR_LOAD); +} + +/** + * reiser4_kill_cursors - kill all inode cursors + * @inode: inode to kill cursors of + * + * Frees all cursors for this inode. This is called when inode is destroyed. + */ +void reiser4_kill_cursors(struct inode *inode) +{ + process_cursors(inode, CURSOR_KILL); +} + +/** + * file_is_stateless - + * @file: + * + * true, if file descriptor @f is created by NFS server by "demand" to serve + * one file system operation. This means that there may be "detached state" + * for underlying inode. + */ +static int file_is_stateless(struct file *file) +{ + return reiser4_get_dentry_fsdata(file->f_path.dentry)->stateless; +} + +/** + * reiser4_get_dir_fpos - + * @dir: + * @fpos: effective value of dir->f_pos + * + * Calculates ->fpos from user-supplied cookie. Normally it is dir->f_pos, but + * in the case of stateless directory operation (readdir-over-nfs), client id + * was encoded in the high bits of cookie and should me masked off. + */ +loff_t reiser4_get_dir_fpos(struct file *dir, loff_t fpos) +{ + if (file_is_stateless(dir)) + return fpos & CID_MASK; + else + return fpos; +} + +/** + * reiser4_attach_fsdata - try to attach fsdata + * @file: + * @fpos: effective value of @file->f_pos + * @inode: + * + * Finds or creates cursor for readdir-over-nfs. + */ +int reiser4_attach_fsdata(struct file *file, loff_t *fpos, struct inode *inode) +{ + loff_t pos; + int result; + dir_cursor *cursor; + + /* + * we are serialized by inode->i_mutex + */ + if (!file_is_stateless(file)) + return 0; + + pos = *fpos; + result = 0; + if (pos == 0) { + /* + * first call to readdir (or rewind to the beginning of + * directory) + */ + cursor = kmem_cache_alloc(d_cursor_cache, + reiser4_ctx_gfp_mask_get()); + if (cursor != NULL) + result = insert_cursor(cursor, file, fpos, inode); + else + result = RETERR(-ENOMEM); + } else { + /* try to find existing cursor */ + struct d_cursor_key key; + + key.cid = pos >> CID_SHIFT; + key.oid = get_inode_oid(inode); + spin_lock(&d_c_lock); + cursor = d_cursor_hash_find(&d_info(inode)->table, &key); + if (cursor != NULL) { + /* cursor was found */ + if (cursor->ref == 0) { + /* move it from unused list */ + list_del_init(&cursor->alist); + --d_cursor_unused; + } + ++cursor->ref; + } + spin_unlock(&d_c_lock); + if (cursor != NULL) { + spin_lock_inode(inode); + assert("nikita-3556", cursor->fsdata->back == NULL); + clean_fsdata(file); + free_file_fsdata_nolock(file); + file->private_data = cursor->fsdata; + spin_unlock_inode(inode); + } + } + return result; +} + +/** + * reiser4_detach_fsdata - ??? + * @file: + * + * detach fsdata, if necessary + */ +void reiser4_detach_fsdata(struct file *file) +{ + struct inode *inode; + + if (!file_is_stateless(file)) + return; + + inode = file_inode(file); + spin_lock_inode(inode); + clean_fsdata(file); + spin_unlock_inode(inode); +} + +/* slab for reiser4_dentry_fsdata */ +static struct kmem_cache *dentry_fsdata_cache; + +/** + * reiser4_init_dentry_fsdata - create cache of dentry_fsdata + * + * Initializes slab cache of structures attached to denty->d_fsdata. It is + * part of reiser4 module initialization. + */ +int reiser4_init_dentry_fsdata(void) +{ + dentry_fsdata_cache = kmem_cache_create("dentry_fsdata", + sizeof(struct reiser4_dentry_fsdata), + 0, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT, + NULL); + if (dentry_fsdata_cache == NULL) + return RETERR(-ENOMEM); + return 0; +} + +/** + * reiser4_done_dentry_fsdata - delete cache of dentry_fsdata + * + * This is called on reiser4 module unloading or system shutdown. + */ +void reiser4_done_dentry_fsdata(void) +{ + destroy_reiser4_cache(&dentry_fsdata_cache); +} + +/** + * reiser4_get_dentry_fsdata - get fs-specific dentry data + * @dentry: queried dentry + * + * Allocates if necessary and returns per-dentry data that we attach to each + * dentry. + */ +struct reiser4_dentry_fsdata *reiser4_get_dentry_fsdata(struct dentry *dentry) +{ + assert("nikita-1365", dentry != NULL); + + if (dentry->d_fsdata == NULL) { + dentry->d_fsdata = kmem_cache_alloc(dentry_fsdata_cache, + reiser4_ctx_gfp_mask_get()); + if (dentry->d_fsdata == NULL) + return ERR_PTR(RETERR(-ENOMEM)); + memset(dentry->d_fsdata, 0, + sizeof(struct reiser4_dentry_fsdata)); + } + return dentry->d_fsdata; +} + +/** + * reiser4_free_dentry_fsdata - detach and free dentry_fsdata + * @dentry: dentry to free fsdata of + * + * Detaches and frees fs-specific dentry data + */ +void reiser4_free_dentry_fsdata(struct dentry *dentry) +{ + if (dentry->d_fsdata != NULL) { + kmem_cache_free(dentry_fsdata_cache, dentry->d_fsdata); + dentry->d_fsdata = NULL; + } +} + +/* slab for reiser4_file_fsdata */ +static struct kmem_cache *file_fsdata_cache; + +/** + * reiser4_init_file_fsdata - create cache of reiser4_file_fsdata + * + * Initializes slab cache of structures attached to file->private_data. It is + * part of reiser4 module initialization. + */ +int reiser4_init_file_fsdata(void) +{ + file_fsdata_cache = kmem_cache_create("file_fsdata", + sizeof(reiser4_file_fsdata), + 0, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT, NULL); + if (file_fsdata_cache == NULL) + return RETERR(-ENOMEM); + return 0; +} + +/** + * reiser4_done_file_fsdata - delete cache of reiser4_file_fsdata + * + * This is called on reiser4 module unloading or system shutdown. + */ +void reiser4_done_file_fsdata(void) +{ + destroy_reiser4_cache(&file_fsdata_cache); +} + +/** + * create_fsdata - allocate and initialize reiser4_file_fsdata + * @file: what to create file_fsdata for, may be NULL + * + * Allocates and initializes reiser4_file_fsdata structure. + */ +static reiser4_file_fsdata *create_fsdata(struct file *file) +{ + reiser4_file_fsdata *fsdata; + + fsdata = kmem_cache_alloc(file_fsdata_cache, + reiser4_ctx_gfp_mask_get()); + if (fsdata != NULL) { + memset(fsdata, 0, sizeof *fsdata); + fsdata->back = file; + INIT_LIST_HEAD(&fsdata->dir.linkage); + } + return fsdata; +} + +/** + * free_fsdata - free reiser4_file_fsdata + * @fsdata: object to free + * + * Dual to create_fsdata(). Free reiser4_file_fsdata. + */ +static void free_fsdata(reiser4_file_fsdata *fsdata) +{ + BUG_ON(fsdata == NULL); + kmem_cache_free(file_fsdata_cache, fsdata); +} + +/** + * reiser4_get_file_fsdata - get fs-specific file data + * @file: queried file + * + * Returns fs-specific data of @file. If it is NULL, allocates it and attaches + * to @file. + */ +reiser4_file_fsdata *reiser4_get_file_fsdata(struct file *file) +{ + assert("nikita-1603", file != NULL); + + if (file->private_data == NULL) { + reiser4_file_fsdata *fsdata; + struct inode *inode; + + fsdata = create_fsdata(file); + if (fsdata == NULL) + return ERR_PTR(RETERR(-ENOMEM)); + + inode = file_inode(file); + spin_lock_inode(inode); + if (file->private_data == NULL) { + file->private_data = fsdata; + fsdata = NULL; + } + spin_unlock_inode(inode); + if (fsdata != NULL) + /* other thread initialized ->fsdata */ + kmem_cache_free(file_fsdata_cache, fsdata); + } + assert("nikita-2665", file->private_data != NULL); + return file->private_data; +} + +/** + * free_file_fsdata_nolock - detach and free reiser4_file_fsdata + * @file: + * + * Detaches reiser4_file_fsdata from @file, removes reiser4_file_fsdata from + * readdir list, frees if it is not linked to d_cursor object. + */ +static void free_file_fsdata_nolock(struct file *file) +{ + reiser4_file_fsdata *fsdata; + + assert("", spin_inode_is_locked(file_inode(file))); + fsdata = file->private_data; + if (fsdata != NULL) { + list_del_init(&fsdata->dir.linkage); + if (fsdata->cursor == NULL) + free_fsdata(fsdata); + } + file->private_data = NULL; +} + +/** + * reiser4_free_file_fsdata - detach from struct file and free reiser4_file_fsdata + * @file: + * + * Spinlocks inode and calls free_file_fsdata_nolock to do the work. + */ +void reiser4_free_file_fsdata(struct file *file) +{ + spin_lock_inode(file_inode(file)); + free_file_fsdata_nolock(file); + spin_unlock_inode(file_inode(file)); +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/fsdata.h b/fs/reiser4/fsdata.h new file mode 100644 index 000000000000..fa6634e87997 --- /dev/null +++ b/fs/reiser4/fsdata.h @@ -0,0 +1,203 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +#if !defined(__REISER4_FSDATA_H__) +#define __REISER4_FSDATA_H__ + +#include "debug.h" +#include "kassign.h" +#include "seal.h" +#include "type_safe_hash.h" +#include "plugin/file/file.h" +#include "readahead.h" + +/* + * comment about reiser4_dentry_fsdata + * + * + */ + +/* + * locking: fields of per file descriptor readdir_pos and ->f_pos are + * protected by ->i_mutex on inode. Under this lock following invariant + * holds: + * + * file descriptor is "looking" at the entry_no-th directory entry from + * the beginning of directory. This entry has key dir_entry_key and is + * pos-th entry with duplicate-key sequence. + * + */ + +/* logical position within directory */ +struct dir_pos { + /* key of directory entry (actually, part of a key sufficient to + identify directory entry) */ + de_id dir_entry_key; + /* ordinal number of directory entry among all entries with the same + key. (Starting from 0.) */ + unsigned pos; +}; + +struct readdir_pos { + /* f_pos corresponding to this readdir position */ + __u64 fpos; + /* logical position within directory */ + struct dir_pos position; + /* logical number of directory entry within + directory */ + __u64 entry_no; +}; + +/* + * this is used to speed up lookups for directory entry: on initial call to + * ->lookup() seal and coord of directory entry (if found, that is) are stored + * in struct dentry and reused later to avoid tree traversals. + */ +struct de_location { + /* seal covering directory entry */ + seal_t entry_seal; + /* coord of directory entry */ + coord_t entry_coord; + /* ordinal number of directory entry among all entries with the same + key. (Starting from 0.) */ + int pos; +}; + +/** + * reiser4_dentry_fsdata - reiser4-specific data attached to dentries + * + * This is allocated dynamically and released in d_op->d_release() + * + * Currently it only contains cached location (hint) of directory entry, but + * it is expected that other information will be accumulated here. + */ +struct reiser4_dentry_fsdata { + /* + * here will go fields filled by ->lookup() to speedup next + * create/unlink, like blocknr of znode with stat-data, or key of + * stat-data. + */ + struct de_location dec; + int stateless; /* created through reiser4_decode_fh, needs + * special treatment in readdir. */ +}; + +extern int reiser4_init_dentry_fsdata(void); +extern void reiser4_done_dentry_fsdata(void); +extern struct reiser4_dentry_fsdata *reiser4_get_dentry_fsdata(struct dentry *); +extern void reiser4_free_dentry_fsdata(struct dentry *dentry); + +/** + * reiser4_file_fsdata - reiser4-specific data attached to file->private_data + * + * This is allocated dynamically and released in inode->i_fop->release + */ +typedef struct reiser4_file_fsdata { + /* + * pointer back to the struct file which this reiser4_file_fsdata is + * part of + */ + struct file *back; + /* detached cursor for stateless readdir. */ + struct dir_cursor *cursor; + /* + * We need both directory and regular file parts here, because there + * are file system objects that are files and directories. + */ + struct { + /* + * position in directory. It is updated each time directory is + * modified + */ + struct readdir_pos readdir; + /* head of this list is reiser4_inode->lists.readdir_list */ + struct list_head linkage; + } dir; + /* hints to speed up operations with regular files: read and write. */ + struct { + hint_t hint; + } reg; +} reiser4_file_fsdata; + +extern int reiser4_init_file_fsdata(void); +extern void reiser4_done_file_fsdata(void); +extern reiser4_file_fsdata *reiser4_get_file_fsdata(struct file *); +extern void reiser4_free_file_fsdata(struct file *); + +/* + * d_cursor is reiser4_file_fsdata not attached to struct file. d_cursors are + * used to address problem reiser4 has with readdir accesses via NFS. See + * plugin/file_ops_readdir.c for more details. + */ +struct d_cursor_key{ + __u16 cid; + __u64 oid; +}; + +/* + * define structures d_cursor_hash_table d_cursor_hash_link which are used to + * maintain hash table of dir_cursor-s in reiser4's super block + */ +typedef struct dir_cursor dir_cursor; +TYPE_SAFE_HASH_DECLARE(d_cursor, dir_cursor); + +struct dir_cursor { + int ref; + reiser4_file_fsdata *fsdata; + + /* link to reiser4 super block hash table of cursors */ + d_cursor_hash_link hash; + + /* + * this is to link cursors to reiser4 super block's radix tree of + * cursors if there are more than one cursor of the same objectid + */ + struct list_head list; + struct d_cursor_key key; + struct d_cursor_info *info; + /* list of unused cursors */ + struct list_head alist; +}; + +extern int reiser4_init_d_cursor(void); +extern void reiser4_done_d_cursor(void); + +extern int reiser4_init_super_d_info(struct super_block *); +extern void reiser4_done_super_d_info(struct super_block *); + +extern loff_t reiser4_get_dir_fpos(struct file *, loff_t); +extern int reiser4_attach_fsdata(struct file *, loff_t *, struct inode *); +extern void reiser4_detach_fsdata(struct file *); + +/* these are needed for "stateless" readdir. See plugin/file_ops_readdir.c for + more details */ +void reiser4_dispose_cursors(struct inode *inode); +void reiser4_load_cursors(struct inode *inode); +void reiser4_kill_cursors(struct inode *inode); +void reiser4_adjust_dir_file(struct inode *dir, const struct dentry *de, + int offset, int adj); + +/* + * this structure is embedded to reise4_super_info_data. It maintains d_cursors + * (detached readdir state). See plugin/file_ops_readdir.c for more details. + */ +struct d_cursor_info { + d_cursor_hash_table table; + struct radix_tree_root tree; +}; + +/* spinlock protecting readdir cursors */ +extern spinlock_t d_c_lock; + +/* __REISER4_FSDATA_H__ */ +#endif + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 120 + * End: + */ diff --git a/fs/reiser4/init_super.c b/fs/reiser4/init_super.c new file mode 100644 index 000000000000..e9945960ceaa --- /dev/null +++ b/fs/reiser4/init_super.c @@ -0,0 +1,806 @@ +/* Copyright by Hans Reiser, 2003 */ + +#include "super.h" +#include "inode.h" +#include "plugin/plugin_set.h" + +#include <linux/swap.h> + +/** + * init_fs_info - allocate reiser4 specific super block + * @super: super block of filesystem + * + * Allocates and initialize reiser4_super_info_data, attaches it to + * super->s_fs_info, initializes structures maintaining d_cursor-s. + */ +int reiser4_init_fs_info(struct super_block *super) +{ + reiser4_super_info_data *sbinfo; + + sbinfo = kzalloc(sizeof(reiser4_super_info_data), + reiser4_ctx_gfp_mask_get()); + if (!sbinfo) + return RETERR(-ENOMEM); + + super->s_fs_info = sbinfo; + super->s_op = NULL; + + ON_DEBUG(INIT_LIST_HEAD(&sbinfo->all_jnodes)); + ON_DEBUG(spin_lock_init(&sbinfo->all_guard)); + + mutex_init(&sbinfo->delete_mutex); + spin_lock_init(&(sbinfo->guard)); + + /* initialize per-super-block d_cursor resources */ + reiser4_init_super_d_info(super); + + return 0; +} + +/** + * Release reiser4 specific super block + * + * release per-super-block d_cursor resources + * free reiser4_super_info_data. + */ +void reiser4_done_fs_info(struct super_block *super) +{ + assert("zam-990", super->s_fs_info != NULL); + + reiser4_done_super_d_info(super); + kfree(super->s_fs_info); + super->s_fs_info = NULL; +} + +/* type of option parseable by parse_option() */ +typedef enum { + /* value of option is arbitrary string */ + OPT_STRING, + + /* + * option specifies bit in a bitmask. When option is set - bit in + * sbinfo->fs_flags is set. Examples are bsdgroups, 32bittimes, mtflush, + * dont_load_bitmap, atomic_write. + */ + OPT_BIT, + + /* + * value of option should conform to sprintf() format. Examples are + * tmgr.atom_max_size=N, tmgr.atom_max_age=N + */ + OPT_FORMAT, + + /* + * option can take one of predefined values. Example is onerror=panic or + * onerror=remount-ro + */ + OPT_ONEOF, + + /* + * option take one of txmod plugin labels. + * Example is "txmod=journal" or "txmod=wa" + */ + OPT_TXMOD, +} opt_type_t; + +#if 0 +struct opt_bitmask_bit { + const char *bit_name; + int bit_nr; +}; +#endif + +#define MAX_ONEOF_LIST 10 + +/* description of option parseable by parse_option() */ +struct opt_desc { + /* option name. + + parsed portion of string has a form "name=value". + */ + const char *name; + /* type of option */ + opt_type_t type; + union { + /* where to store value of string option (type == OPT_STRING) */ + char **string; + /* description of bits for bit option (type == OPT_BIT) */ + struct { + int nr; + void *addr; + } bit; + /* description of format and targets for format option (type + == OPT_FORMAT) */ + struct { + const char *format; + int nr_args; + void *arg1; + void *arg2; + void *arg3; + void *arg4; + } f; + struct { + int *result; + const char *list[MAX_ONEOF_LIST]; + } oneof; + struct { + reiser4_txmod_id *result; + } txmod; + struct { + void *addr; + int nr_bits; + /* struct opt_bitmask_bit *bits; */ + } bitmask; + } u; +}; + +/** + * parse_option - parse one option + * @opt_strin: starting point of parsing + * @opt: option description + * + * foo=bar, + * ^ ^ ^ + * | | +-- replaced to '\0' + * | +-- val_start + * +-- opt_string + * Figures out option type and handles option correspondingly. + */ +static int parse_option(char *opt_string, struct opt_desc *opt) +{ + char *val_start; + int result; + const char *err_msg; + + /* NOTE-NIKITA think about using lib/cmdline.c functions here. */ + + val_start = strchr(opt_string, '='); + if (val_start != NULL) { + *val_start = '\0'; + ++val_start; + } + + err_msg = NULL; + result = 0; + switch (opt->type) { + case OPT_STRING: + if (val_start == NULL) { + err_msg = "String arg missing"; + result = RETERR(-EINVAL); + } else + *opt->u.string = val_start; + break; + case OPT_BIT: + if (val_start != NULL) + err_msg = "Value ignored"; + else + set_bit(opt->u.bit.nr, opt->u.bit.addr); + break; + case OPT_FORMAT: + if (val_start == NULL) { + err_msg = "Formatted arg missing"; + result = RETERR(-EINVAL); + break; + } + if (sscanf(val_start, opt->u.f.format, + opt->u.f.arg1, opt->u.f.arg2, opt->u.f.arg3, + opt->u.f.arg4) != opt->u.f.nr_args) { + err_msg = "Wrong conversion"; + result = RETERR(-EINVAL); + } + break; + case OPT_ONEOF: + { + int i = 0; + + if (val_start == NULL) { + err_msg = "Value is missing"; + result = RETERR(-EINVAL); + break; + } + err_msg = "Wrong option value"; + result = RETERR(-EINVAL); + while (opt->u.oneof.list[i]) { + if (!strcmp(opt->u.oneof.list[i], val_start)) { + result = 0; + err_msg = NULL; + *opt->u.oneof.result = i; + break; + } + i++; + } + break; + } + break; + case OPT_TXMOD: + { + reiser4_txmod_id i = 0; + + if (val_start == NULL) { + err_msg = "Value is missing"; + result = RETERR(-EINVAL); + break; + } + err_msg = "Wrong option value"; + result = RETERR(-EINVAL); + while (i < LAST_TXMOD_ID) { + if (!strcmp(txmod_plugins[i].h.label, + val_start)) { + result = 0; + err_msg = NULL; + *opt->u.txmod.result = i; + break; + } + i++; + } + break; + } + default: + wrong_return_value("nikita-2100", "opt -> type"); + break; + } + if (err_msg != NULL) { + warning("nikita-2496", "%s when parsing option \"%s%s%s\"", + err_msg, opt->name, val_start ? "=" : "", + val_start ? : ""); + } + return result; +} + +/** + * parse_options - parse reiser4 mount options + * @opt_string: starting point + * @opts: array of option description + * @nr_opts: number of elements in @opts + * + * Parses comma separated list of reiser4 mount options. + */ +static int parse_options(char *opt_string, struct opt_desc *opts, int nr_opts) +{ + int result; + + result = 0; + while ((result == 0) && opt_string && *opt_string) { + int j; + char *next; + + next = strchr(opt_string, ','); + if (next != NULL) { + *next = '\0'; + ++next; + } + for (j = 0; j < nr_opts; ++j) { + if (!strncmp(opt_string, opts[j].name, + strlen(opts[j].name))) { + result = parse_option(opt_string, &opts[j]); + break; + } + } + if (j == nr_opts) { + warning("nikita-2307", "Unrecognized option: \"%s\"", + opt_string); + /* traditionally, -EINVAL is returned on wrong mount + option */ + result = RETERR(-EINVAL); + } + opt_string = next; + } + return result; +} + +#define NUM_OPT(label, fmt, addr) \ + { \ + .name = (label), \ + .type = OPT_FORMAT, \ + .u = { \ + .f = { \ + .format = (fmt), \ + .nr_args = 1, \ + .arg1 = (addr), \ + .arg2 = NULL, \ + .arg3 = NULL, \ + .arg4 = NULL \ + } \ + } \ + } + +#define SB_FIELD_OPT(field, fmt) NUM_OPT(#field, fmt, &sbinfo->field) + +#define BIT_OPT(label, bitnr) \ + { \ + .name = label, \ + .type = OPT_BIT, \ + .u = { \ + .bit = { \ + .nr = bitnr, \ + .addr = &sbinfo->fs_flags \ + } \ + } \ + } + +#define MAX_NR_OPTIONS (30) + +#if REISER4_DEBUG +# define OPT_ARRAY_CHECK(opt, array) \ + if ((opt) > (array) + MAX_NR_OPTIONS) { \ + warning("zam-1046", "opt array is overloaded"); break; \ + } +#else +# define OPT_ARRAY_CHECK(opt, array) noop +#endif + +#define PUSH_OPT(opt, array, ...) \ +do { \ + struct opt_desc o = __VA_ARGS__; \ + OPT_ARRAY_CHECK(opt, array); \ + *(opt) ++ = o; \ +} while (0) + +static noinline void push_sb_field_opts(struct opt_desc **p, + struct opt_desc *opts, + reiser4_super_info_data *sbinfo) +{ +#define PUSH_SB_FIELD_OPT(field, format) \ + PUSH_OPT(*p, opts, SB_FIELD_OPT(field, format)) + /* + * tmgr.atom_max_size=N + * Atoms containing more than N blocks will be forced to commit. N is + * decimal. + */ + PUSH_SB_FIELD_OPT(tmgr.atom_max_size, "%u"); + /* + * tmgr.atom_max_age=N + * Atoms older than N seconds will be forced to commit. N is decimal. + */ + PUSH_SB_FIELD_OPT(tmgr.atom_max_age, "%u"); + /* + * tmgr.atom_min_size=N + * In committing an atom to free dirty pages, force the atom less than + * N in size to fuse with another one. + */ + PUSH_SB_FIELD_OPT(tmgr.atom_min_size, "%u"); + /* + * tmgr.atom_max_flushers=N + * limit of concurrent flushers for one atom. 0 means no limit. + */ + PUSH_SB_FIELD_OPT(tmgr.atom_max_flushers, "%u"); + /* + * tree.cbk_cache_slots=N + * Number of slots in the cbk cache. + */ + PUSH_SB_FIELD_OPT(tree.cbk_cache.nr_slots, "%u"); + /* + * If flush finds more than FLUSH_RELOCATE_THRESHOLD adjacent dirty + * leaf-level blocks it will force them to be relocated. + */ + PUSH_SB_FIELD_OPT(flush.relocate_threshold, "%u"); + /* + * If flush finds can find a block allocation closer than at most + * FLUSH_RELOCATE_DISTANCE from the preceder it will relocate to that + * position. + */ + PUSH_SB_FIELD_OPT(flush.relocate_distance, "%u"); + /* + * If we have written this much or more blocks before encountering busy + * jnode in flush list - abort flushing hoping that next time we get + * called this jnode will be clean already, and we will save some + * seeks. + */ + PUSH_SB_FIELD_OPT(flush.written_threshold, "%u"); + /* The maximum number of nodes to scan left on a level during flush. */ + PUSH_SB_FIELD_OPT(flush.scan_maxnodes, "%u"); + /* preferred IO size */ + PUSH_SB_FIELD_OPT(optimal_io_size, "%u"); + /* carry flags used for insertion of new nodes */ + PUSH_SB_FIELD_OPT(tree.carry.new_node_flags, "%u"); + /* carry flags used for insertion of new extents */ + PUSH_SB_FIELD_OPT(tree.carry.new_extent_flags, "%u"); + /* carry flags used for paste operations */ + PUSH_SB_FIELD_OPT(tree.carry.paste_flags, "%u"); + /* carry flags used for insert operations */ + PUSH_SB_FIELD_OPT(tree.carry.insert_flags, "%u"); + +#ifdef CONFIG_REISER4_BADBLOCKS + /* + * Alternative master superblock location in case if it's original + * location is not writeable/accessable. This is offset in BYTES. + */ + PUSH_SB_FIELD_OPT(altsuper, "%lu"); +#endif +} + +/** + * reiser4_init_super_data - initialize reiser4 private super block + * @super: super block to initialize + * @opt_string: list of reiser4 mount options + * + * Sets various reiser4 parameters to default values. Parses mount options and + * overwrites default settings. + */ +int reiser4_init_super_data(struct super_block *super, char *opt_string) +{ + int result; + struct opt_desc *opts, *p; + reiser4_super_info_data *sbinfo = get_super_private(super); + + /* initialize super, export, dentry operations */ + sbinfo->ops.super = reiser4_super_operations; + sbinfo->ops.export = reiser4_export_operations; + sbinfo->ops.dentry = reiser4_dentry_operations; + super->s_op = &sbinfo->ops.super; + super->s_export_op = &sbinfo->ops.export; + + /* initialize transaction manager parameters to default values */ + sbinfo->tmgr.atom_max_size = totalram_pages / 4; + sbinfo->tmgr.atom_max_age = REISER4_ATOM_MAX_AGE / HZ; + sbinfo->tmgr.atom_min_size = 256; + sbinfo->tmgr.atom_max_flushers = ATOM_MAX_FLUSHERS; + + /* initialize cbk cache parameter */ + sbinfo->tree.cbk_cache.nr_slots = CBK_CACHE_SLOTS; + + /* initialize flush parameters */ + sbinfo->flush.relocate_threshold = FLUSH_RELOCATE_THRESHOLD; + sbinfo->flush.relocate_distance = FLUSH_RELOCATE_DISTANCE; + sbinfo->flush.written_threshold = FLUSH_WRITTEN_THRESHOLD; + sbinfo->flush.scan_maxnodes = FLUSH_SCAN_MAXNODES; + + sbinfo->optimal_io_size = REISER4_OPTIMAL_IO_SIZE; + + /* preliminary tree initializations */ + sbinfo->tree.super = super; + sbinfo->tree.carry.new_node_flags = REISER4_NEW_NODE_FLAGS; + sbinfo->tree.carry.new_extent_flags = REISER4_NEW_EXTENT_FLAGS; + sbinfo->tree.carry.paste_flags = REISER4_PASTE_FLAGS; + sbinfo->tree.carry.insert_flags = REISER4_INSERT_FLAGS; + rwlock_init(&(sbinfo->tree.tree_lock)); + spin_lock_init(&(sbinfo->tree.epoch_lock)); + + /* initialize default readahead params */ + sbinfo->ra_params.max = totalram_pages / 4; + sbinfo->ra_params.flags = 0; + + /* allocate memory for structure describing reiser4 mount options */ + opts = kmalloc(sizeof(struct opt_desc) * MAX_NR_OPTIONS, + reiser4_ctx_gfp_mask_get()); + if (opts == NULL) + return RETERR(-ENOMEM); + + /* initialize structure describing reiser4 mount options */ + p = opts; + + push_sb_field_opts(&p, opts, sbinfo); + /* turn on BSD-style gid assignment */ + +#define PUSH_BIT_OPT(name, bit) \ + PUSH_OPT(p, opts, BIT_OPT(name, bit)) + + PUSH_BIT_OPT("bsdgroups", REISER4_BSD_GID); + /* turn on 32 bit times */ + PUSH_BIT_OPT("32bittimes", REISER4_32_BIT_TIMES); + /* + * Don't load all bitmap blocks at mount time, it is useful for + * machines with tiny RAM and large disks. + */ + PUSH_BIT_OPT("dont_load_bitmap", REISER4_DONT_LOAD_BITMAP); + /* disable transaction commits during write() */ + PUSH_BIT_OPT("atomic_write", REISER4_ATOMIC_WRITE); + /* enable issuing of discard requests */ + PUSH_BIT_OPT("discard", REISER4_DISCARD); + /* disable hole punching at flush time */ + PUSH_BIT_OPT("dont_punch_holes", REISER4_DONT_PUNCH_HOLES); + + PUSH_OPT(p, opts, + { + /* + * tree traversal readahead parameters: + * -o readahead:MAXNUM:FLAGS + * MAXNUM - max number fo nodes to request readahead for: -1UL + * will set it to max_sane_readahead() + * FLAGS - combination of bits: RA_ADJCENT_ONLY, RA_ALL_LEVELS, + * CONTINUE_ON_PRESENT + */ + .name = "readahead", + .type = OPT_FORMAT, + .u = { + .f = { + .format = "%u:%u", + .nr_args = 2, + .arg1 = &sbinfo->ra_params.max, + .arg2 = &sbinfo->ra_params.flags, + .arg3 = NULL, + .arg4 = NULL + } + } + } + ); + + /* What to do in case of fs error */ + PUSH_OPT(p, opts, + { + .name = "onerror", + .type = OPT_ONEOF, + .u = { + .oneof = { + .result = &sbinfo->onerror, + .list = { + "remount-ro", "panic", NULL + }, + } + } + } + ); + + /* + * What trancaction model (journal, cow, etc) + * is used to commit transactions + */ + PUSH_OPT(p, opts, + { + .name = "txmod", + .type = OPT_TXMOD, + .u = { + .txmod = { + .result = &sbinfo->txmod + } + } + } + ); + + /* modify default settings to values set by mount options */ + result = parse_options(opt_string, opts, p - opts); + kfree(opts); + if (result != 0) + return result; + + /* correct settings to sanity values */ + sbinfo->tmgr.atom_max_age *= HZ; + if (sbinfo->tmgr.atom_max_age <= 0) + /* overflow */ + sbinfo->tmgr.atom_max_age = REISER4_ATOM_MAX_AGE; + + /* round optimal io size up to 512 bytes */ + sbinfo->optimal_io_size >>= VFS_BLKSIZE_BITS; + sbinfo->optimal_io_size <<= VFS_BLKSIZE_BITS; + if (sbinfo->optimal_io_size == 0) { + warning("nikita-2497", "optimal_io_size is too small"); + return RETERR(-EINVAL); + } + return result; +} + +/** + * reiser4_init_read_super - read reiser4 master super block + * @super: super block to fill + * @silent: if 0 - print warnings + * + * Reads reiser4 master super block either from predefined location or from + * location specified by altsuper mount option, initializes disk format plugin. + */ +int reiser4_init_read_super(struct super_block *super, int silent) +{ + struct buffer_head *super_bh; + struct reiser4_master_sb *master_sb; + reiser4_super_info_data *sbinfo = get_super_private(super); + unsigned long blocksize; + + read_super_block: +#ifdef CONFIG_REISER4_BADBLOCKS + if (sbinfo->altsuper) + /* + * read reiser4 master super block at position specified by + * mount option + */ + super_bh = sb_bread(super, + (sector_t)(sbinfo->altsuper / super->s_blocksize)); + else +#endif + /* read reiser4 master super block at 16-th 4096 block */ + super_bh = sb_bread(super, + (sector_t)(REISER4_MAGIC_OFFSET / super->s_blocksize)); + if (!super_bh) + return RETERR(-EIO); + + master_sb = (struct reiser4_master_sb *)super_bh->b_data; + /* check reiser4 magic string */ + if (!strncmp(master_sb->magic, REISER4_SUPER_MAGIC_STRING, + sizeof(REISER4_SUPER_MAGIC_STRING))) { + /* reiser4 master super block contains filesystem blocksize */ + blocksize = le16_to_cpu(get_unaligned(&master_sb->blocksize)); + + if (blocksize != PAGE_SIZE) { + /* + * currenly reiser4's blocksize must be equal to + * pagesize + */ + if (!silent) + warning("nikita-2609", + "%s: wrong block size %ld\n", super->s_id, + blocksize); + brelse(super_bh); + return RETERR(-EINVAL); + } + if (blocksize != super->s_blocksize) { + /* + * filesystem uses different blocksize. Reread master + * super block with correct blocksize + */ + brelse(super_bh); + if (!sb_set_blocksize(super, (int)blocksize)) + return RETERR(-EINVAL); + goto read_super_block; + } + + sbinfo->df_plug = + disk_format_plugin_by_unsafe_id( + le16_to_cpu(get_unaligned(&master_sb->disk_plugin_id))); + if (sbinfo->df_plug == NULL) { + if (!silent) + warning("nikita-26091", + "%s: unknown disk format plugin %d\n", + super->s_id, + le16_to_cpu(get_unaligned(&master_sb->disk_plugin_id))); + brelse(super_bh); + return RETERR(-EINVAL); + } + sbinfo->diskmap_block = le64_to_cpu(get_unaligned(&master_sb->diskmap)); + brelse(super_bh); + return 0; + } + + /* there is no reiser4 on the device */ + if (!silent) + warning("nikita-2608", + "%s: wrong master super block magic", super->s_id); + brelse(super_bh); + return RETERR(-EINVAL); +} + +static struct { + reiser4_plugin_type type; + reiser4_plugin_id id; +} default_plugins[PSET_LAST] = { + [PSET_FILE] = { + .type = REISER4_FILE_PLUGIN_TYPE, + .id = UNIX_FILE_PLUGIN_ID + }, + [PSET_DIR] = { + .type = REISER4_DIR_PLUGIN_TYPE, + .id = HASHED_DIR_PLUGIN_ID + }, + [PSET_HASH] = { + .type = REISER4_HASH_PLUGIN_TYPE, + .id = R5_HASH_ID + }, + [PSET_FIBRATION] = { + .type = REISER4_FIBRATION_PLUGIN_TYPE, + .id = FIBRATION_DOT_O + }, + [PSET_PERM] = { + .type = REISER4_PERM_PLUGIN_TYPE, + .id = NULL_PERM_ID + }, + [PSET_FORMATTING] = { + .type = REISER4_FORMATTING_PLUGIN_TYPE, + .id = SMALL_FILE_FORMATTING_ID + }, + [PSET_SD] = { + .type = REISER4_ITEM_PLUGIN_TYPE, + .id = STATIC_STAT_DATA_ID + }, + [PSET_DIR_ITEM] = { + .type = REISER4_ITEM_PLUGIN_TYPE, + .id = COMPOUND_DIR_ID + }, + [PSET_CIPHER] = { + .type = REISER4_CIPHER_PLUGIN_TYPE, + .id = NONE_CIPHER_ID + }, + [PSET_DIGEST] = { + .type = REISER4_DIGEST_PLUGIN_TYPE, + .id = SHA256_32_DIGEST_ID + }, + [PSET_COMPRESSION] = { + .type = REISER4_COMPRESSION_PLUGIN_TYPE, + .id = LZO1_COMPRESSION_ID + }, + [PSET_COMPRESSION_MODE] = { + .type = REISER4_COMPRESSION_MODE_PLUGIN_TYPE, + .id = CONVX_COMPRESSION_MODE_ID + }, + [PSET_CLUSTER] = { + .type = REISER4_CLUSTER_PLUGIN_TYPE, + .id = CLUSTER_64K_ID + }, + [PSET_CREATE] = { + .type = REISER4_FILE_PLUGIN_TYPE, + .id = UNIX_FILE_PLUGIN_ID + } +}; + +/* access to default plugin table */ +reiser4_plugin *get_default_plugin(pset_member memb) +{ + return plugin_by_id(default_plugins[memb].type, + default_plugins[memb].id); +} + +/** + * reiser4_init_root_inode - obtain inode of root directory + * @super: super block of filesystem + * + * Obtains inode of root directory (reading it from disk), initializes plugin + * set it was not initialized. + */ +int reiser4_init_root_inode(struct super_block *super) +{ + reiser4_super_info_data *sbinfo = get_super_private(super); + struct inode *inode; + int result = 0; + + inode = reiser4_iget(super, sbinfo->df_plug->root_dir_key(super), 0); + if (IS_ERR(inode)) + return RETERR(PTR_ERR(inode)); + + super->s_root = d_make_root(inode); + if (!super->s_root) { + return RETERR(-ENOMEM); + } + + super->s_root->d_op = &sbinfo->ops.dentry; + + if (!is_inode_loaded(inode)) { + pset_member memb; + plugin_set *pset; + + pset = reiser4_inode_data(inode)->pset; + for (memb = 0; memb < PSET_LAST; ++memb) { + + if (aset_get(pset, memb) != NULL) + continue; + + result = grab_plugin_pset(inode, NULL, memb); + if (result != 0) + break; + + reiser4_inode_clr_flag(inode, REISER4_SDLEN_KNOWN); + } + + if (result == 0) { + if (REISER4_DEBUG) { + for (memb = 0; memb < PSET_LAST; ++memb) + assert("nikita-3500", + aset_get(pset, memb) != NULL); + } + } else + warning("nikita-3448", "Cannot set plugins of root: %i", + result); + reiser4_iget_complete(inode); + + /* As the default pset kept in the root dir may has been changed + (length is unknown), call update_sd. */ + if (!reiser4_inode_get_flag(inode, REISER4_SDLEN_KNOWN)) { + result = reiser4_grab_space( + inode_file_plugin(inode)->estimate.update(inode), + BA_CAN_COMMIT); + + if (result == 0) + result = reiser4_update_sd(inode); + + all_grabbed2free(); + } + } + + super->s_maxbytes = MAX_LFS_FILESIZE; + return result; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/inode.c b/fs/reiser4/inode.c new file mode 100644 index 000000000000..cc4a401da2ba --- /dev/null +++ b/fs/reiser4/inode.c @@ -0,0 +1,711 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +/* Inode specific operations. */ + +#include "forward.h" +#include "debug.h" +#include "key.h" +#include "kassign.h" +#include "coord.h" +#include "seal.h" +#include "dscale.h" +#include "plugin/item/item.h" +#include "plugin/security/perm.h" +#include "plugin/plugin.h" +#include "plugin/object.h" +#include "znode.h" +#include "vfs_ops.h" +#include "inode.h" +#include "super.h" +#include "reiser4.h" + +#include <linux/fs.h> /* for struct super_block, address_space */ + +/* return reiser4 internal tree which inode belongs to */ +/* Audited by: green(2002.06.17) */ +reiser4_tree *reiser4_tree_by_inode(const struct inode *inode/* inode queried*/) +{ + assert("nikita-256", inode != NULL); + assert("nikita-257", inode->i_sb != NULL); + return reiser4_get_tree(inode->i_sb); +} + +/* return reiser4-specific inode flags */ +static inline unsigned long *inode_flags(const struct inode *const inode) +{ + assert("nikita-2842", inode != NULL); + return &reiser4_inode_data(inode)->flags; +} + +/* set reiser4-specific flag @f in @inode */ +void reiser4_inode_set_flag(struct inode *inode, reiser4_file_plugin_flags f) +{ + assert("nikita-2248", inode != NULL); + set_bit((int)f, inode_flags(inode)); +} + +/* clear reiser4-specific flag @f in @inode */ +void reiser4_inode_clr_flag(struct inode *inode, reiser4_file_plugin_flags f) +{ + assert("nikita-2250", inode != NULL); + clear_bit((int)f, inode_flags(inode)); +} + +/* true if reiser4-specific flag @f is set in @inode */ +int reiser4_inode_get_flag(const struct inode *inode, + reiser4_file_plugin_flags f) +{ + assert("nikita-2251", inode != NULL); + return test_bit((int)f, inode_flags(inode)); +} + +/* convert oid to inode number */ +ino_t oid_to_ino(oid_t oid) +{ + return (ino_t) oid; +} + +/* convert oid to user visible inode number */ +ino_t oid_to_uino(oid_t oid) +{ + /* reiser4 object is uniquely identified by oid which is 64 bit + quantity. Kernel in-memory inode is indexed (in the hash table) by + 32 bit i_ino field, but this is not a problem, because there is a + way to further distinguish inodes with identical inode numbers + (find_actor supplied to iget()). + + But user space expects unique 32 bit inode number. Obviously this + is impossible. Work-around is to somehow hash oid into user visible + inode number. + */ + oid_t max_ino = (ino_t) ~0; + + if (REISER4_INO_IS_OID || (oid <= max_ino)) + return oid; + else + /* this is remotely similar to algorithm used to find next pid + to use for process: after wrap-around start from some + offset rather than from 0. Idea is that there are some long + living objects with which we don't want to collide. + */ + return REISER4_UINO_SHIFT + ((oid - max_ino) & (max_ino >> 1)); +} + +/* check that "inode" is on reiser4 file-system */ +int is_reiser4_inode(const struct inode *inode/* inode queried */) +{ + return inode != NULL && is_reiser4_super(inode->i_sb); +} + +/* Maximal length of a name that can be stored in directory @inode. + + This is used in check during file creation and lookup. */ +int reiser4_max_filename_len(const struct inode *inode/* inode queried */) +{ + assert("nikita-287", is_reiser4_inode(inode)); + assert("nikita-1710", inode_dir_item_plugin(inode)); + if (inode_dir_item_plugin(inode)->s.dir.max_name_len) + return inode_dir_item_plugin(inode)->s.dir.max_name_len(inode); + else + return 255; +} + +#if REISER4_USE_COLLISION_LIMIT +/* Maximal number of hash collisions for this directory. */ +int max_hash_collisions(const struct inode *dir/* inode queried */) +{ + assert("nikita-1711", dir != NULL); + return reiser4_inode_data(dir)->plugin.max_collisions; +} +#endif /* REISER4_USE_COLLISION_LIMIT */ + +/* Install file, inode, and address_space operation on @inode, depending on + its mode. */ +int setup_inode_ops(struct inode *inode /* inode to intialize */ , + reiser4_object_create_data * data /* parameters to create + * object */ ) +{ + reiser4_super_info_data *sinfo; + file_plugin *fplug; + dir_plugin *dplug; + + fplug = inode_file_plugin(inode); + dplug = inode_dir_plugin(inode); + + sinfo = get_super_private(inode->i_sb); + + switch (inode->i_mode & S_IFMT) { + case S_IFSOCK: + case S_IFBLK: + case S_IFCHR: + case S_IFIFO: + { + dev_t rdev; /* to keep gcc happy */ + + assert("vs-46", fplug != NULL); + /* ugly hack with rdev */ + if (data == NULL) { + rdev = inode->i_rdev; + inode->i_rdev = 0; + } else + rdev = data->rdev; + inode->i_blocks = 0; + assert("vs-42", fplug->h.id == SPECIAL_FILE_PLUGIN_ID); + inode->i_op = file_plugins[fplug->h.id].inode_ops; + /* initialize inode->i_fop and inode->i_rdev for block + and char devices */ + init_special_inode(inode, inode->i_mode, rdev); + /* all address space operations are null */ + inode->i_mapping->a_ops = + file_plugins[fplug->h.id].as_ops; + break; + } + case S_IFLNK: + assert("vs-46", fplug != NULL); + assert("vs-42", fplug->h.id == SYMLINK_FILE_PLUGIN_ID); + inode->i_op = file_plugins[fplug->h.id].inode_ops; + inode->i_fop = NULL; + /* all address space operations are null */ + inode->i_mapping->a_ops = file_plugins[fplug->h.id].as_ops; + break; + case S_IFDIR: + assert("vs-46", dplug != NULL); + assert("vs-43", (dplug->h.id == HASHED_DIR_PLUGIN_ID || + dplug->h.id == SEEKABLE_HASHED_DIR_PLUGIN_ID)); + inode->i_op = dir_plugins[dplug->h.id].inode_ops; + inode->i_fop = dir_plugins[dplug->h.id].file_ops; + inode->i_mapping->a_ops = dir_plugins[dplug->h.id].as_ops; + break; + case S_IFREG: + assert("vs-46", fplug != NULL); + assert("vs-43", (fplug->h.id == UNIX_FILE_PLUGIN_ID || + fplug->h.id == CRYPTCOMPRESS_FILE_PLUGIN_ID)); + inode->i_op = file_plugins[fplug->h.id].inode_ops; + inode->i_fop = file_plugins[fplug->h.id].file_ops; + inode->i_mapping->a_ops = file_plugins[fplug->h.id].as_ops; + break; + default: + warning("nikita-291", "wrong file mode: %o for %llu", + inode->i_mode, + (unsigned long long)get_inode_oid(inode)); + reiser4_make_bad_inode(inode); + return RETERR(-EINVAL); + } + return 0; +} + +/* Initialize inode from disk data. Called with inode locked. + Return inode locked. */ +static int init_inode(struct inode *inode /* inode to intialise */ , + coord_t *coord/* coord of stat data */) +{ + int result; + item_plugin *iplug; + void *body; + int length; + reiser4_inode *state; + + assert("nikita-292", coord != NULL); + assert("nikita-293", inode != NULL); + + coord_clear_iplug(coord); + result = zload(coord->node); + if (result) + return result; + iplug = item_plugin_by_coord(coord); + body = item_body_by_coord(coord); + length = item_length_by_coord(coord); + + assert("nikita-295", iplug != NULL); + assert("nikita-296", body != NULL); + assert("nikita-297", length > 0); + + /* inode is under I_LOCK now */ + + state = reiser4_inode_data(inode); + /* call stat-data plugin method to load sd content into inode */ + result = iplug->s.sd.init_inode(inode, body, length); + set_plugin(&state->pset, PSET_SD, item_plugin_to_plugin(iplug)); + if (result == 0) { + result = setup_inode_ops(inode, NULL); + if (result == 0 && inode->i_sb->s_root && + inode->i_sb->s_root->d_inode) + result = finish_pset(inode); + } + zrelse(coord->node); + return result; +} + +/* read `inode' from the disk. This is what was previously in + reiserfs_read_inode2(). + + Must be called with inode locked. Return inode still locked. +*/ +static int read_inode(struct inode *inode /* inode to read from disk */ , + const reiser4_key * key /* key of stat data */ , + int silent) +{ + int result; + lock_handle lh; + reiser4_inode *info; + coord_t coord; + + assert("nikita-298", inode != NULL); + assert("nikita-1945", !is_inode_loaded(inode)); + + info = reiser4_inode_data(inode); + assert("nikita-300", info->locality_id != 0); + + coord_init_zero(&coord); + init_lh(&lh); + /* locate stat-data in a tree and return znode locked */ + result = lookup_sd(inode, ZNODE_READ_LOCK, &coord, &lh, key, silent); + assert("nikita-301", !is_inode_loaded(inode)); + if (result == 0) { + /* use stat-data plugin to load sd into inode. */ + result = init_inode(inode, &coord); + if (result == 0) { + /* initialize stat-data seal */ + spin_lock_inode(inode); + reiser4_seal_init(&info->sd_seal, &coord, key); + info->sd_coord = coord; + spin_unlock_inode(inode); + + /* call file plugin's method to initialize plugin + * specific part of inode */ + if (inode_file_plugin(inode)->init_inode_data) + inode_file_plugin(inode)->init_inode_data(inode, + NULL, + 0); + /* load detached directory cursors for stateless + * directory readers (NFS). */ + reiser4_load_cursors(inode); + + /* Check the opened inode for consistency. */ + result = + get_super_private(inode->i_sb)->df_plug-> + check_open(inode); + } + } + /* lookup_sd() doesn't release coord because we want znode + stay read-locked while stat-data fields are accessed in + init_inode() */ + done_lh(&lh); + + if (result != 0) + reiser4_make_bad_inode(inode); + return result; +} + +/* initialise new reiser4 inode being inserted into hash table. */ +static int init_locked_inode(struct inode *inode /* new inode */ , + void *opaque /* key of stat data passed to + * the iget5_locked as cookie */) +{ + reiser4_key *key; + + assert("nikita-1995", inode != NULL); + assert("nikita-1996", opaque != NULL); + key = opaque; + set_inode_oid(inode, get_key_objectid(key)); + reiser4_inode_data(inode)->locality_id = get_key_locality(key); + return 0; +} + +/* reiser4_inode_find_actor() - "find actor" supplied by reiser4 to + iget5_locked(). + + This function is called by iget5_locked() to distinguish reiser4 inodes + having the same inode numbers. Such inodes can only exist due to some error + condition. One of them should be bad. Inodes with identical inode numbers + (objectids) are distinguished by their packing locality. + +*/ +static int reiser4_inode_find_actor(struct inode *inode /* inode from hash table + * to check */ , + void *opaque /* "cookie" passed to + * iget5_locked(). This + * is stat-data key */) +{ + reiser4_key *key; + + key = opaque; + return + /* oid is unique, so first term is enough, actually. */ + get_inode_oid(inode) == get_key_objectid(key) && + /* + * also, locality should be checked, but locality is stored in + * the reiser4-specific part of the inode, and actor can be + * called against arbitrary inode that happened to be in this + * hash chain. Hence we first have to check that this is + * reiser4 inode at least. is_reiser4_inode() is probably too + * early to call, as inode may have ->i_op not yet + * initialised. + */ + is_reiser4_super(inode->i_sb) && + /* + * usually objectid is unique, but pseudo files use counter to + * generate objectid. All pseudo files are placed into special + * (otherwise unused) locality. + */ + reiser4_inode_data(inode)->locality_id == get_key_locality(key); +} + +/* hook for kmem_cache_create */ +void loading_init_once(reiser4_inode * info) +{ + mutex_init(&info->loading); +} + +/* for reiser4_alloc_inode */ +void loading_alloc(reiser4_inode * info) +{ + assert("vs-1717", !mutex_is_locked(&info->loading)); +} + +/* for reiser4_destroy */ +void loading_destroy(reiser4_inode * info) +{ + assert("vs-1717a", !mutex_is_locked(&info->loading)); +} + +static void loading_begin(reiser4_inode * info) +{ + mutex_lock(&info->loading); +} + +static void loading_end(reiser4_inode * info) +{ + mutex_unlock(&info->loading); +} + +/** + * reiser4_iget - obtain inode via iget5_locked, read from disk if necessary + * @super: super block of filesystem + * @key: key of inode's stat-data + * @silent: + * + * This is our helper function a la iget(). This is be called by + * lookup_common() and reiser4_read_super(). Return inode locked or error + * encountered. + */ +struct inode *reiser4_iget(struct super_block *super, const reiser4_key *key, + int silent) +{ + struct inode *inode; + int result; + reiser4_inode *info; + + assert("nikita-302", super != NULL); + assert("nikita-303", key != NULL); + + result = 0; + + /* call iget(). Our ->read_inode() is dummy, so this will either + find inode in cache or return uninitialised inode */ + inode = iget5_locked(super, + (unsigned long)get_key_objectid(key), + reiser4_inode_find_actor, + init_locked_inode, (reiser4_key *) key); + if (inode == NULL) + return ERR_PTR(RETERR(-ENOMEM)); + if (is_bad_inode(inode)) { + warning("nikita-304", "Bad inode found"); + reiser4_print_key("key", key); + iput(inode); + return ERR_PTR(RETERR(-EIO)); + } + + info = reiser4_inode_data(inode); + + /* Reiser4 inode state bit REISER4_LOADED is used to distinguish fully + loaded and initialized inode from just allocated inode. If + REISER4_LOADED bit is not set, reiser4_iget() completes loading under + info->loading. The place in reiser4 which uses not initialized inode + is the reiser4 repacker, see repacker-related functions in + plugin/item/extent.c */ + if (!is_inode_loaded(inode)) { + loading_begin(info); + if (!is_inode_loaded(inode)) { + /* locking: iget5_locked returns locked inode */ + assert("nikita-1941", !is_inode_loaded(inode)); + assert("nikita-1949", + reiser4_inode_find_actor(inode, + (reiser4_key *) key)); + /* now, inode has objectid as ->i_ino and locality in + reiser4-specific part. This is enough for + read_inode() to read stat data from the disk */ + result = read_inode(inode, key, silent); + } else + loading_end(info); + } + + if (inode->i_state & I_NEW) + unlock_new_inode(inode); + + if (is_bad_inode(inode)) { + assert("vs-1717", result != 0); + loading_end(info); + iput(inode); + inode = ERR_PTR(result); + } else if (REISER4_DEBUG) { + reiser4_key found_key; + + assert("vs-1717", result == 0); + build_sd_key(inode, &found_key); + if (!keyeq(&found_key, key)) { + warning("nikita-305", "Wrong key in sd"); + reiser4_print_key("sought for", key); + reiser4_print_key("found", &found_key); + } + if (inode->i_nlink == 0) { + warning("nikita-3559", "Unlinked inode found: %llu\n", + (unsigned long long)get_inode_oid(inode)); + } + } + return inode; +} + +/* reiser4_iget() may return not fully initialized inode, this function should + * be called after one completes reiser4 inode initializing. */ +void reiser4_iget_complete(struct inode *inode) +{ + assert("zam-988", is_reiser4_inode(inode)); + + if (!is_inode_loaded(inode)) { + reiser4_inode_set_flag(inode, REISER4_LOADED); + loading_end(reiser4_inode_data(inode)); + } +} + +void reiser4_make_bad_inode(struct inode *inode) +{ + assert("nikita-1934", inode != NULL); + + /* clear LOADED bit */ + reiser4_inode_clr_flag(inode, REISER4_LOADED); + make_bad_inode(inode); + return; +} + +file_plugin *inode_file_plugin(const struct inode *inode) +{ + assert("nikita-1997", inode != NULL); + return reiser4_inode_data(inode)->pset->file; +} + +dir_plugin *inode_dir_plugin(const struct inode *inode) +{ + assert("nikita-1998", inode != NULL); + return reiser4_inode_data(inode)->pset->dir; +} + +formatting_plugin *inode_formatting_plugin(const struct inode *inode) +{ + assert("nikita-2000", inode != NULL); + return reiser4_inode_data(inode)->pset->formatting; +} + +hash_plugin *inode_hash_plugin(const struct inode *inode) +{ + assert("nikita-2001", inode != NULL); + return reiser4_inode_data(inode)->pset->hash; +} + +fibration_plugin *inode_fibration_plugin(const struct inode *inode) +{ + assert("nikita-2001", inode != NULL); + return reiser4_inode_data(inode)->pset->fibration; +} + +cipher_plugin *inode_cipher_plugin(const struct inode *inode) +{ + assert("edward-36", inode != NULL); + return reiser4_inode_data(inode)->pset->cipher; +} + +compression_plugin *inode_compression_plugin(const struct inode *inode) +{ + assert("edward-37", inode != NULL); + return reiser4_inode_data(inode)->pset->compression; +} + +compression_mode_plugin *inode_compression_mode_plugin(const struct inode * + inode) +{ + assert("edward-1330", inode != NULL); + return reiser4_inode_data(inode)->pset->compression_mode; +} + +cluster_plugin *inode_cluster_plugin(const struct inode *inode) +{ + assert("edward-1328", inode != NULL); + return reiser4_inode_data(inode)->pset->cluster; +} + +file_plugin *inode_create_plugin(const struct inode *inode) +{ + assert("edward-1329", inode != NULL); + return reiser4_inode_data(inode)->pset->create; +} + +digest_plugin *inode_digest_plugin(const struct inode *inode) +{ + assert("edward-86", inode != NULL); + return reiser4_inode_data(inode)->pset->digest; +} + +item_plugin *inode_sd_plugin(const struct inode *inode) +{ + assert("vs-534", inode != NULL); + return reiser4_inode_data(inode)->pset->sd; +} + +item_plugin *inode_dir_item_plugin(const struct inode *inode) +{ + assert("vs-534", inode != NULL); + return reiser4_inode_data(inode)->pset->dir_item; +} + +file_plugin *child_create_plugin(const struct inode *inode) +{ + assert("edward-1329", inode != NULL); + return reiser4_inode_data(inode)->hset->create; +} + +void inode_set_extension(struct inode *inode, sd_ext_bits ext) +{ + reiser4_inode *state; + + assert("nikita-2716", inode != NULL); + assert("nikita-2717", ext < LAST_SD_EXTENSION); + assert("nikita-3491", spin_inode_is_locked(inode)); + + state = reiser4_inode_data(inode); + state->extmask |= 1 << ext; + /* force re-calculation of stat-data length on next call to + update_sd(). */ + reiser4_inode_clr_flag(inode, REISER4_SDLEN_KNOWN); +} + +void inode_clr_extension(struct inode *inode, sd_ext_bits ext) +{ + reiser4_inode *state; + + assert("vpf-1926", inode != NULL); + assert("vpf-1927", ext < LAST_SD_EXTENSION); + assert("vpf-1928", spin_inode_is_locked(inode)); + + state = reiser4_inode_data(inode); + state->extmask &= ~(1 << ext); + /* force re-calculation of stat-data length on next call to + update_sd(). */ + reiser4_inode_clr_flag(inode, REISER4_SDLEN_KNOWN); +} + +void inode_check_scale_nolock(struct inode *inode, __u64 old, __u64 new) +{ + assert("edward-1287", inode != NULL); + if (!dscale_fit(old, new)) + reiser4_inode_clr_flag(inode, REISER4_SDLEN_KNOWN); + return; +} + +void inode_check_scale(struct inode *inode, __u64 old, __u64 new) +{ + assert("nikita-2875", inode != NULL); + spin_lock_inode(inode); + inode_check_scale_nolock(inode, old, new); + spin_unlock_inode(inode); +} + +/* + * initialize ->ordering field of inode. This field defines how file stat-data + * and body is ordered within a tree with respect to other objects within the + * same parent directory. + */ +void +init_inode_ordering(struct inode *inode, + reiser4_object_create_data * crd, int create) +{ + reiser4_key key; + + if (create) { + struct inode *parent; + + parent = crd->parent; + assert("nikita-3224", inode_dir_plugin(parent) != NULL); + inode_dir_plugin(parent)->build_entry_key(parent, + &crd->dentry->d_name, + &key); + } else { + coord_t *coord; + + coord = &reiser4_inode_data(inode)->sd_coord; + coord_clear_iplug(coord); + /* safe to use ->sd_coord, because node is under long term + * lock */ + WITH_DATA(coord->node, item_key_by_coord(coord, &key)); + } + + set_inode_ordering(inode, get_key_ordering(&key)); +} + +znode *inode_get_vroot(struct inode *inode) +{ + reiser4_block_nr blk; + znode *result; + + spin_lock_inode(inode); + blk = reiser4_inode_data(inode)->vroot; + spin_unlock_inode(inode); + if (!disk_addr_eq(&UBER_TREE_ADDR, &blk)) + result = zlook(reiser4_tree_by_inode(inode), &blk); + else + result = NULL; + return result; +} + +void inode_set_vroot(struct inode *inode, znode *vroot) +{ + spin_lock_inode(inode); + reiser4_inode_data(inode)->vroot = *znode_get_block(vroot); + spin_unlock_inode(inode); +} + +#if REISER4_DEBUG + +void reiser4_inode_invariant(const struct inode *inode) +{ + assert("nikita-3077", spin_inode_is_locked(inode)); +} + +int inode_has_no_jnodes(reiser4_inode * r4_inode) +{ + return jnode_tree_by_reiser4_inode(r4_inode)->rnode == NULL && + r4_inode->nr_jnodes == 0; +} + +#endif + +/* true if directory is empty (only contains dot and dotdot) */ +/* FIXME: shouldn't it be dir plugin method? */ +int is_dir_empty(const struct inode *dir) +{ + assert("nikita-1976", dir != NULL); + + /* rely on our method to maintain directory i_size being equal to the + number of entries. */ + return dir->i_size <= 2 ? 0 : RETERR(-ENOTEMPTY); +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/inode.h b/fs/reiser4/inode.h new file mode 100644 index 000000000000..c6a627312268 --- /dev/null +++ b/fs/reiser4/inode.h @@ -0,0 +1,506 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ + +/* Inode functions. */ + +#if !defined(__REISER4_INODE_H__) +#define __REISER4_INODE_H__ + +#include "forward.h" +#include "debug.h" +#include "key.h" +#include "seal.h" +#include "plugin/plugin.h" +#include "plugin/file/cryptcompress.h" +#include "plugin/file/file.h" +#include "plugin/dir/dir.h" +#include "plugin/plugin_set.h" +#include "plugin/security/perm.h" +#include "vfs_ops.h" +#include "jnode.h" +#include "fsdata.h" + +#include <linux/types.h> /* for __u?? , ino_t */ +#include <linux/fs.h> /* for struct super_block, struct + * rw_semaphore, etc */ +#include <linux/spinlock.h> +#include <asm/types.h> + +/* reiser4-specific inode flags. They are "transient" and are not + supposed to be stored on disk. Used to trace "state" of + inode +*/ +typedef enum { + /* this is light-weight inode, inheriting some state from its + parent */ + REISER4_LIGHT_WEIGHT = 0, + /* stat data wasn't yet created */ + REISER4_NO_SD = 1, + /* internal immutable flag. Currently is only used + to avoid race condition during file creation. + See comment in create_object(). */ + REISER4_IMMUTABLE = 2, + /* inode was read from storage */ + REISER4_LOADED = 3, + /* this bit is set for symlinks. inode->i_private points to target + name of symlink. */ + REISER4_GENERIC_PTR_USED = 4, + /* set if size of stat-data item for this inode is known. If this is + * set we can avoid recalculating size of stat-data on each update. */ + REISER4_SDLEN_KNOWN = 5, + /* reiser4_inode->crypt points to the crypto stat */ + REISER4_CRYPTO_STAT_LOADED = 6, + /* cryptcompress_inode_data points to the secret key */ + REISER4_SECRET_KEY_INSTALLED = 7, + /* File (possibly) has pages corresponding to the tail items, that + * were created by ->readpage. It is set by mmap_unix_file() and + * sendfile_unix_file(). This bit is inspected by write_unix_file and + * kill-hook of tail items. It is never cleared once set. This bit is + * modified and inspected under i_mutex. */ + REISER4_HAS_MMAP = 8, + REISER4_PART_MIXED = 9, + REISER4_PART_IN_CONV = 10, + /* This flag indicates that file plugin conversion is in progress */ + REISER4_FILE_CONV_IN_PROGRESS = 11 +} reiser4_file_plugin_flags; + +/* state associated with each inode. + reiser4 inode. + + NOTE-NIKITA In 2.5 kernels it is not necessary that all file-system inodes + be of the same size. File-system allocates inodes by itself through + s_op->allocate_inode() method. So, it is possible to adjust size of inode + at the time of its creation. + + Invariants involving parts of this data-type: + + [inode->eflushed] + +*/ + +typedef struct reiser4_inode reiser4_inode; +/* return pointer to reiser4-specific part of inode */ +static inline reiser4_inode *reiser4_inode_data(const struct inode *inode + /* inode queried */ ); + +#if BITS_PER_LONG == 64 + +#define REISER4_INO_IS_OID (1) +typedef struct {; +} oid_hi_t; + +/* BITS_PER_LONG == 64 */ +#else + +#define REISER4_INO_IS_OID (0) +typedef __u32 oid_hi_t; + +/* BITS_PER_LONG == 64 */ +#endif + +struct reiser4_inode { + /* spin lock protecting fields of this structure. */ + spinlock_t guard; + /* main plugin set that control the file + (see comments in plugin/plugin_set.c) */ + plugin_set *pset; + /* plugin set for inheritance + (see comments in plugin/plugin_set.c) */ + plugin_set *hset; + /* high 32 bits of object id */ + oid_hi_t oid_hi; + /* seal for stat-data */ + seal_t sd_seal; + /* locality id for this file */ + oid_t locality_id; +#if REISER4_LARGE_KEY + __u64 ordering; +#endif + /* coord of stat-data in sealed node */ + coord_t sd_coord; + /* bit-mask of stat-data extentions used by this file */ + __u64 extmask; + /* bitmask of non-default plugins for this inode */ + __u16 plugin_mask; + /* bitmask of set heir plugins for this inode. */ + __u16 heir_mask; + union { + struct list_head readdir_list; + struct list_head not_used; + } lists; + /* per-inode flags. Filled by values of reiser4_file_plugin_flags */ + unsigned long flags; + union { + /* fields specific to unix_file plugin */ + struct unix_file_info unix_file_info; + /* fields specific to cryptcompress file plugin */ + struct cryptcompress_info cryptcompress_info; + } file_plugin_data; + + /* this semaphore is to serialize readers and writers of @pset->file + * when file plugin conversion is enabled + */ + struct rw_semaphore conv_sem; + + /* tree of jnodes. Phantom jnodes (ones not attched to any atom) are + tagged in that tree by EFLUSH_TAG_ANONYMOUS */ + struct radix_tree_root jnodes_tree; +#if REISER4_DEBUG + /* number of unformatted node jnodes of this file in jnode hash table */ + unsigned long nr_jnodes; +#endif + + /* block number of virtual root for this object. See comment above + * fs/reiser4/search.c:handle_vroot() */ + reiser4_block_nr vroot; + struct mutex loading; +}; + +void loading_init_once(reiser4_inode *); +void loading_alloc(reiser4_inode *); +void loading_destroy(reiser4_inode *); + +struct reiser4_inode_object { + /* private part */ + reiser4_inode p; + /* generic fields not specific to reiser4, but used by VFS */ + struct inode vfs_inode; +}; + +/* return pointer to the reiser4 specific portion of @inode */ +static inline reiser4_inode *reiser4_inode_data(const struct inode *inode + /* inode queried */ ) +{ + assert("nikita-254", inode != NULL); + return &container_of(inode, struct reiser4_inode_object, vfs_inode)->p; +} + +static inline struct inode *inode_by_reiser4_inode(const reiser4_inode * + r4_inode /* inode queried */ + ) +{ + return &container_of(r4_inode, struct reiser4_inode_object, + p)->vfs_inode; +} + +/* + * reiser4 inodes are identified by 64bit object-id (oid_t), but in struct + * inode ->i_ino field is of type ino_t (long) that can be either 32 or 64 + * bits. + * + * If ->i_ino is 32 bits we store remaining 32 bits in reiser4 specific part + * of inode, otherwise whole oid is stored in i_ino. + * + * Wrappers below ([sg]et_inode_oid()) are used to hide this difference. + */ + +#define OID_HI_SHIFT (sizeof(ino_t) * 8) + +#if REISER4_INO_IS_OID + +static inline oid_t get_inode_oid(const struct inode *inode) +{ + return inode->i_ino; +} + +static inline void set_inode_oid(struct inode *inode, oid_t oid) +{ + inode->i_ino = oid; +} + +/* REISER4_INO_IS_OID */ +#else + +static inline oid_t get_inode_oid(const struct inode *inode) +{ + return + ((__u64) reiser4_inode_data(inode)->oid_hi << OID_HI_SHIFT) | + inode->i_ino; +} + +static inline void set_inode_oid(struct inode *inode, oid_t oid) +{ + assert("nikita-2519", inode != NULL); + inode->i_ino = (ino_t) (oid); + reiser4_inode_data(inode)->oid_hi = (oid) >> OID_HI_SHIFT; + assert("nikita-2521", get_inode_oid(inode) == (oid)); +} + +/* REISER4_INO_IS_OID */ +#endif + +static inline oid_t get_inode_locality(const struct inode *inode) +{ + return reiser4_inode_data(inode)->locality_id; +} + +#if REISER4_LARGE_KEY +static inline __u64 get_inode_ordering(const struct inode *inode) +{ + return reiser4_inode_data(inode)->ordering; +} + +static inline void set_inode_ordering(const struct inode *inode, __u64 ordering) +{ + reiser4_inode_data(inode)->ordering = ordering; +} + +#else + +#define get_inode_ordering(inode) (0) +#define set_inode_ordering(inode, val) noop + +#endif + +/* return inode in which @uf_info is embedded */ +static inline struct inode * +unix_file_info_to_inode(const struct unix_file_info *uf_info) +{ + return &container_of(uf_info, struct reiser4_inode_object, + p.file_plugin_data.unix_file_info)->vfs_inode; +} + +extern ino_t oid_to_ino(oid_t oid) __attribute__ ((const)); +extern ino_t oid_to_uino(oid_t oid) __attribute__ ((const)); + +extern reiser4_tree *reiser4_tree_by_inode(const struct inode *inode); + +#if REISER4_DEBUG +extern void reiser4_inode_invariant(const struct inode *inode); +extern int inode_has_no_jnodes(reiser4_inode *); +#else +#define reiser4_inode_invariant(inode) noop +#endif + +static inline int spin_inode_is_locked(const struct inode *inode) +{ + assert_spin_locked(&reiser4_inode_data(inode)->guard); + return 1; +} + +/** + * spin_lock_inode - lock reiser4_inode' embedded spinlock + * @inode: inode to lock + * + * In debug mode it checks that lower priority locks are not held and + * increments reiser4_context's lock counters on which lock ordering checking + * is based. + */ +static inline void spin_lock_inode(struct inode *inode) +{ + assert("", LOCK_CNT_NIL(spin_locked)); + /* check lock ordering */ + assert_spin_not_locked(&d_c_lock); + + spin_lock(&reiser4_inode_data(inode)->guard); + + LOCK_CNT_INC(spin_locked_inode); + LOCK_CNT_INC(spin_locked); + + reiser4_inode_invariant(inode); +} + +/** + * spin_unlock_inode - unlock reiser4_inode' embedded spinlock + * @inode: inode to unlock + * + * In debug mode it checks that spinlock is held and decrements + * reiser4_context's lock counters on which lock ordering checking is based. + */ +static inline void spin_unlock_inode(struct inode *inode) +{ + assert_spin_locked(&reiser4_inode_data(inode)->guard); + assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_inode)); + assert("nikita-1376", LOCK_CNT_GTZ(spin_locked)); + + reiser4_inode_invariant(inode); + + LOCK_CNT_DEC(spin_locked_inode); + LOCK_CNT_DEC(spin_locked); + + spin_unlock(&reiser4_inode_data(inode)->guard); +} + +extern znode *inode_get_vroot(struct inode *inode); +extern void inode_set_vroot(struct inode *inode, znode * vroot); + +extern int reiser4_max_filename_len(const struct inode *inode); +extern int max_hash_collisions(const struct inode *dir); +extern void reiser4_unlock_inode(struct inode *inode); +extern int is_reiser4_inode(const struct inode *inode); +extern int setup_inode_ops(struct inode *inode, reiser4_object_create_data *); +extern struct inode *reiser4_iget(struct super_block *super, + const reiser4_key * key, int silent); +extern void reiser4_iget_complete(struct inode *inode); +extern void reiser4_inode_set_flag(struct inode *inode, + reiser4_file_plugin_flags f); +extern void reiser4_inode_clr_flag(struct inode *inode, + reiser4_file_plugin_flags f); +extern int reiser4_inode_get_flag(const struct inode *inode, + reiser4_file_plugin_flags f); + +/* has inode been initialized? */ +static inline int +is_inode_loaded(const struct inode *inode/* inode queried */) +{ + assert("nikita-1120", inode != NULL); + return reiser4_inode_get_flag(inode, REISER4_LOADED); +} + +extern file_plugin *inode_file_plugin(const struct inode *inode); +extern dir_plugin *inode_dir_plugin(const struct inode *inode); +extern formatting_plugin *inode_formatting_plugin(const struct inode *inode); +extern hash_plugin *inode_hash_plugin(const struct inode *inode); +extern fibration_plugin *inode_fibration_plugin(const struct inode *inode); +extern cipher_plugin *inode_cipher_plugin(const struct inode *inode); +extern digest_plugin *inode_digest_plugin(const struct inode *inode); +extern compression_plugin *inode_compression_plugin(const struct inode *inode); +extern compression_mode_plugin *inode_compression_mode_plugin(const struct inode + *inode); +extern cluster_plugin *inode_cluster_plugin(const struct inode *inode); +extern file_plugin *inode_create_plugin(const struct inode *inode); +extern item_plugin *inode_sd_plugin(const struct inode *inode); +extern item_plugin *inode_dir_item_plugin(const struct inode *inode); +extern file_plugin *child_create_plugin(const struct inode *inode); + +extern void reiser4_make_bad_inode(struct inode *inode); + +extern void inode_set_extension(struct inode *inode, sd_ext_bits ext); +extern void inode_clr_extension(struct inode *inode, sd_ext_bits ext); +extern void inode_check_scale(struct inode *inode, __u64 old, __u64 new); +extern void inode_check_scale_nolock(struct inode *inode, __u64 old, __u64 new); + +#define INODE_SET_SIZE(i, value) \ +({ \ + struct inode *__i; \ + typeof(value) __v; \ + \ + __i = (i); \ + __v = (value); \ + inode_check_scale(__i, __i->i_size, __v); \ + i_size_write(__i, __v); \ +}) + +/* + * update field @field in inode @i to contain value @value. + */ +#define INODE_SET_FIELD(i, field, value) \ +({ \ + struct inode *__i; \ + typeof(value) __v; \ + \ + __i = (i); \ + __v = (value); \ + inode_check_scale(__i, __i->field, __v); \ + __i->field = __v; \ +}) + +#define INODE_INC_FIELD(i, field) \ +({ \ + struct inode *__i; \ + \ + __i = (i); \ + inode_check_scale(__i, __i->field, __i->field + 1); \ + ++ __i->field; \ +}) + +#define INODE_DEC_FIELD(i, field) \ +({ \ + struct inode *__i; \ + \ + __i = (i); \ + inode_check_scale(__i, __i->field, __i->field - 1); \ + -- __i->field; \ +}) + +/* + * Update field i_nlink in inode @i using library function @op. + */ +#define INODE_SET_NLINK(i, value) \ +({ \ + struct inode *__i; \ + typeof(value) __v; \ + \ + __i = (i); \ + __v = (value); \ + inode_check_scale(__i, __i->i_nlink, __v); \ + set_nlink(__i, __v); \ +}) + +#define INODE_INC_NLINK(i) \ + ({ \ + struct inode *__i; \ + \ + __i = (i); \ + inode_check_scale(__i, __i->i_nlink, __i->i_nlink + 1); \ + inc_nlink(__i); \ +}) + +#define INODE_DROP_NLINK(i) \ + ({ \ + struct inode *__i; \ + \ + __i = (i); \ + inode_check_scale(__i, __i->i_nlink, __i->i_nlink - 1); \ + drop_nlink(__i); \ +}) + +#define INODE_CLEAR_NLINK(i) \ + ({ \ + struct inode *__i; \ + \ + __i = (i); \ + inode_check_scale(__i, __i->i_nlink, 0); \ + clear_nlink(__i); \ +}) + + +static inline void inode_add_blocks(struct inode *inode, __u64 blocks) +{ + inode_add_bytes(inode, blocks << inode->i_blkbits); +} + +static inline void inode_sub_blocks(struct inode *inode, __u64 blocks) +{ + inode_sub_bytes(inode, blocks << inode->i_blkbits); +} + + +/* See comment before reiser4_readdir_common() for description. */ +static inline struct list_head *get_readdir_list(const struct inode *inode) +{ + return &reiser4_inode_data(inode)->lists.readdir_list; +} + +extern void init_inode_ordering(struct inode *inode, + reiser4_object_create_data * crd, int create); + +static inline struct radix_tree_root *jnode_tree_by_inode(struct inode *inode) +{ + return &reiser4_inode_data(inode)->jnodes_tree; +} + +static inline struct radix_tree_root *jnode_tree_by_reiser4_inode(reiser4_inode + *r4_inode) +{ + return &r4_inode->jnodes_tree; +} + +#if REISER4_DEBUG +extern void print_inode(const char *prefix, const struct inode *i); +#endif + +int is_dir_empty(const struct inode *); + +/* __REISER4_INODE_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/ioctl.h b/fs/reiser4/ioctl.h new file mode 100644 index 000000000000..b24c6c3dd3d2 --- /dev/null +++ b/fs/reiser4/ioctl.h @@ -0,0 +1,41 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +#if !defined(__REISER4_IOCTL_H__) +#define __REISER4_IOCTL_H__ + +#include <linux/fs.h> + +/* + * ioctl(2) command used to "unpack" reiser4 file, that is, convert it into + * extents and fix in this state. This is used by applications that rely on + * + * . files being block aligned, and + * + * . files never migrating on disk + * + * for example, boot loaders (LILO) need this. + * + * This ioctl should be used as + * + * result = ioctl(fd, REISER4_IOC_UNPACK); + * + * File behind fd descriptor will be converted to the extents (if necessary), + * and its stat-data will be updated so that it will never be converted back + * into tails again. + */ +#define REISER4_IOC_UNPACK _IOW(0xCD, 1, long) + +/* __REISER4_IOCTL_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/jnode.c b/fs/reiser4/jnode.c new file mode 100644 index 000000000000..0f1594cd643e --- /dev/null +++ b/fs/reiser4/jnode.c @@ -0,0 +1,1905 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ +/* Jnode manipulation functions. */ +/* Jnode is entity used to track blocks with data and meta-data in reiser4. + + In particular, jnodes are used to track transactional information + associated with each block. Each znode contains jnode as ->zjnode field. + + Jnode stands for either Josh or Journal node. +*/ + +/* + * Taxonomy. + * + * Jnode represents block containing data or meta-data. There are jnodes + * for: + * + * unformatted blocks (jnodes proper). There are plans, however to + * have a handle per extent unit rather than per each unformatted + * block, because there are so many of them. + * + * For bitmaps. Each bitmap is actually represented by two jnodes--one + * for working and another for "commit" data, together forming bnode. + * + * For io-heads. These are used by log writer. + * + * For formatted nodes (znode). See comment at the top of znode.c for + * details specific to the formatted nodes (znodes). + * + * Node data. + * + * Jnode provides access to the data of node it represents. Data are + * stored in a page. Page is kept in a page cache. This means, that jnodes + * are highly interconnected with page cache and VM internals. + * + * jnode has a pointer to page (->pg) containing its data. Pointer to data + * themselves is cached in ->data field to avoid frequent calls to + * page_address(). + * + * jnode and page are attached to each other by jnode_attach_page(). This + * function places pointer to jnode in set_page_private(), sets PG_private + * flag and increments page counter. + * + * Opposite operation is performed by page_clear_jnode(). + * + * jnode->pg is protected by jnode spin lock, and page->private is + * protected by page lock. See comment at the top of page_cache.c for + * more. + * + * page can be detached from jnode for two reasons: + * + * . jnode is removed from a tree (file is truncated, of formatted + * node is removed by balancing). + * + * . during memory pressure, VM calls ->releasepage() method + * (reiser4_releasepage()) to evict page from memory. + * + * (there, of course, is also umount, but this is special case we are not + * concerned with here). + * + * To protect jnode page from eviction, one calls jload() function that + * "pins" page in memory (loading it if necessary), increments + * jnode->d_count, and kmap()s page. Page is unpinned through call to + * jrelse(). + * + * Jnode life cycle. + * + * jnode is created, placed in hash table, and, optionally, in per-inode + * radix tree. Page can be attached to jnode, pinned, released, etc. + * + * When jnode is captured into atom its reference counter is + * increased. While being part of an atom, jnode can be "early + * flushed". This means that as part of flush procedure, jnode is placed + * into "relocate set", and its page is submitted to the disk. After io + * completes, page can be detached, then loaded again, re-dirtied, etc. + * + * Thread acquired reference to jnode by calling jref() and releases it by + * jput(). When last reference is removed, jnode is still retained in + * memory (cached) if it has page attached, _unless_ it is scheduled for + * destruction (has JNODE_HEARD_BANSHEE bit set). + * + * Tree read-write lock was used as "existential" lock for jnodes. That is, + * jnode->x_count could be changed from 0 to 1 only under tree write lock, + * that is, tree lock protected unreferenced jnodes stored in the hash + * table, from recycling. + * + * This resulted in high contention on tree lock, because jref()/jput() is + * frequent operation. To ameliorate this problem, RCU is used: when jput() + * is just about to release last reference on jnode it sets JNODE_RIP bit + * on it, and then proceed with jnode destruction (removing jnode from hash + * table, cbk_cache, detaching page, etc.). All places that change jnode + * reference counter from 0 to 1 (jlookup(), zlook(), zget(), and + * cbk_cache_scan_slots()) check for JNODE_RIP bit (this is done by + * jnode_rip_check() function), and pretend that nothing was found in hash + * table if bit is set. + * + * jput defers actual return of jnode into slab cache to some later time + * (by call_rcu()), this guarantees that other threads can safely continue + * working with JNODE_RIP-ped jnode. + * + */ + +#include "reiser4.h" +#include "debug.h" +#include "dformat.h" +#include "jnode.h" +#include "plugin/plugin_header.h" +#include "plugin/plugin.h" +#include "txnmgr.h" +/*#include "jnode.h"*/ +#include "znode.h" +#include "tree.h" +#include "tree_walk.h" +#include "super.h" +#include "inode.h" +#include "page_cache.h" + +#include <asm/uaccess.h> /* UML needs this for PAGE_OFFSET */ +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/pagemap.h> +#include <linux/swap.h> +#include <linux/fs.h> /* for struct address_space */ +#include <linux/writeback.h> /* for inode_wb_list_lock */ + +static struct kmem_cache *_jnode_slab = NULL; + +static void jnode_set_type(jnode * node, jnode_type type); +static int jdelete(jnode * node); +static int jnode_try_drop(jnode * node); + +#if REISER4_DEBUG +static int jnode_invariant(jnode * node, int tlocked, int jlocked); +#endif + +/* true if valid page is attached to jnode */ +static inline int jnode_is_parsed(jnode * node) +{ + return JF_ISSET(node, JNODE_PARSED); +} + +/* hash table support */ + +/* compare two jnode keys for equality. Used by hash-table macros */ +static inline int jnode_key_eq(const struct jnode_key *k1, + const struct jnode_key *k2) +{ + assert("nikita-2350", k1 != NULL); + assert("nikita-2351", k2 != NULL); + + return (k1->index == k2->index && k1->objectid == k2->objectid); +} + +/* Hash jnode by its key (inode plus offset). Used by hash-table macros */ +static inline __u32 jnode_key_hashfn(j_hash_table * table, + const struct jnode_key *key) +{ + assert("nikita-2352", key != NULL); + assert("nikita-3346", IS_POW(table->_buckets)); + + /* yes, this is remarkable simply (where not stupid) hash function. */ + return (key->objectid + key->index) & (table->_buckets - 1); +} + +/* The hash table definition */ +#define KMALLOC(size) reiser4_vmalloc(size) +#define KFREE(ptr, size) vfree(ptr) +TYPE_SAFE_HASH_DEFINE(j, jnode, struct jnode_key, key.j, link.j, + jnode_key_hashfn, jnode_key_eq); +#undef KFREE +#undef KMALLOC + +/* call this to initialise jnode hash table */ +int jnodes_tree_init(reiser4_tree * tree/* tree to initialise jnodes for */) +{ + assert("nikita-2359", tree != NULL); + return j_hash_init(&tree->jhash_table, 16384); +} + +/* call this to destroy jnode hash table. This is called during umount. */ +int jnodes_tree_done(reiser4_tree * tree/* tree to destroy jnodes for */) +{ + j_hash_table *jtable; + jnode *node; + jnode *next; + + assert("nikita-2360", tree != NULL); + + /* + * Scan hash table and free all jnodes. + */ + jtable = &tree->jhash_table; + if (jtable->_table) { + for_all_in_htable(jtable, j, node, next) { + assert("nikita-2361", !atomic_read(&node->x_count)); + jdrop(node); + } + + j_hash_done(&tree->jhash_table); + } + return 0; +} + +/** + * init_jnodes - create jnode cache + * + * Initializes slab cache jnodes. It is part of reiser4 module initialization. + */ +int init_jnodes(void) +{ + assert("umka-168", _jnode_slab == NULL); + + _jnode_slab = kmem_cache_create("jnode", sizeof(jnode), 0, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT, NULL); + if (_jnode_slab == NULL) + return RETERR(-ENOMEM); + + return 0; +} + +/** + * done_znodes - delete znode cache + * + * This is called on reiser4 module unloading or system shutdown. + */ +void done_jnodes(void) +{ + destroy_reiser4_cache(&_jnode_slab); +} + +/* Initialize a jnode. */ +void jnode_init(jnode * node, reiser4_tree * tree, jnode_type type) +{ + memset(node, 0, sizeof(jnode)); + ON_DEBUG(node->magic = JMAGIC); + jnode_set_type(node, type); + atomic_set(&node->d_count, 0); + atomic_set(&node->x_count, 0); + spin_lock_init(&node->guard); + spin_lock_init(&node->load); + node->atom = NULL; + node->tree = tree; + INIT_LIST_HEAD(&node->capture_link); + + ASSIGN_NODE_LIST(node, NOT_CAPTURED); + +#if REISER4_DEBUG + { + reiser4_super_info_data *sbinfo; + + sbinfo = get_super_private(tree->super); + spin_lock_irq(&sbinfo->all_guard); + list_add(&node->jnodes, &sbinfo->all_jnodes); + spin_unlock_irq(&sbinfo->all_guard); + } +#endif +} + +#if REISER4_DEBUG +/* + * Remove jnode from ->all_jnodes list. + */ +static void jnode_done(jnode * node, reiser4_tree * tree) +{ + reiser4_super_info_data *sbinfo; + + sbinfo = get_super_private(tree->super); + + spin_lock_irq(&sbinfo->all_guard); + assert("nikita-2422", !list_empty(&node->jnodes)); + list_del_init(&node->jnodes); + spin_unlock_irq(&sbinfo->all_guard); +} +#endif + +/* return already existing jnode of page */ +jnode *jnode_by_page(struct page *pg) +{ + assert("nikita-2400", PageLocked(pg)); + assert("nikita-2068", PagePrivate(pg)); + assert("nikita-2067", jprivate(pg) != NULL); + return jprivate(pg); +} + +/* exported functions to allocate/free jnode objects outside this file */ +jnode *jalloc(void) +{ + jnode *jal = kmem_cache_alloc(_jnode_slab, reiser4_ctx_gfp_mask_get()); + return jal; +} + +/* return jnode back to the slab allocator */ +inline void jfree(jnode * node) +{ + assert("nikita-2663", (list_empty_careful(&node->capture_link) && + NODE_LIST(node) == NOT_CAPTURED)); + assert("nikita-3222", list_empty(&node->jnodes)); + assert("nikita-3221", jnode_page(node) == NULL); + + /* not yet phash_jnode_destroy(node); */ + + kmem_cache_free(_jnode_slab, node); +} + +/* + * This function is supplied as RCU callback. It actually frees jnode when + * last reference to it is gone. + */ +static void jnode_free_actor(struct rcu_head *head) +{ + jnode *node; + jnode_type jtype; + + node = container_of(head, jnode, rcu); + jtype = jnode_get_type(node); + + ON_DEBUG(jnode_done(node, jnode_get_tree(node))); + + switch (jtype) { + case JNODE_IO_HEAD: + case JNODE_BITMAP: + case JNODE_UNFORMATTED_BLOCK: + jfree(node); + break; + case JNODE_FORMATTED_BLOCK: + zfree(JZNODE(node)); + break; + case JNODE_INODE: + default: + wrong_return_value("nikita-3197", "Wrong jnode type"); + } +} + +/* + * Free a jnode. Post a callback to be executed later through RCU when all + * references to @node are released. + */ +static inline void jnode_free(jnode * node, jnode_type jtype) +{ + if (jtype != JNODE_INODE) { + /*assert("nikita-3219", list_empty(&node->rcu.list)); */ + call_rcu(&node->rcu, jnode_free_actor); + } else + jnode_list_remove(node); +} + +/* allocate new unformatted jnode */ +static jnode *jnew_unformatted(void) +{ + jnode *jal; + + jal = jalloc(); + if (jal == NULL) + return NULL; + + jnode_init(jal, current_tree, JNODE_UNFORMATTED_BLOCK); + jal->key.j.mapping = NULL; + jal->key.j.index = (unsigned long)-1; + jal->key.j.objectid = 0; + return jal; +} + +/* look for jnode with given mapping and offset within hash table */ +jnode *jlookup(reiser4_tree * tree, oid_t objectid, unsigned long index) +{ + struct jnode_key jkey; + jnode *node; + + jkey.objectid = objectid; + jkey.index = index; + + /* + * hash table is _not_ protected by any lock during lookups. All we + * have to do is to disable preemption to keep RCU happy. + */ + + rcu_read_lock(); + node = j_hash_find(&tree->jhash_table, &jkey); + if (node != NULL) { + /* protect @node from recycling */ + jref(node); + assert("nikita-2955", jnode_invariant(node, 0, 0)); + node = jnode_rip_check(tree, node); + } + rcu_read_unlock(); + return node; +} + +/* per inode radix tree of jnodes is protected by tree's read write spin lock */ +static jnode *jfind_nolock(struct address_space *mapping, unsigned long index) +{ + assert("vs-1694", mapping->host != NULL); + + return radix_tree_lookup(jnode_tree_by_inode(mapping->host), index); +} + +jnode *jfind(struct address_space *mapping, unsigned long index) +{ + reiser4_tree *tree; + jnode *node; + + assert("vs-1694", mapping->host != NULL); + tree = reiser4_tree_by_inode(mapping->host); + + read_lock_tree(tree); + node = jfind_nolock(mapping, index); + if (node != NULL) + jref(node); + read_unlock_tree(tree); + return node; +} + +static void inode_attach_jnode(jnode * node) +{ + struct inode *inode; + reiser4_inode *info; + struct radix_tree_root *rtree; + + assert_rw_write_locked(&(jnode_get_tree(node)->tree_lock)); + assert("zam-1043", node->key.j.mapping != NULL); + inode = node->key.j.mapping->host; + info = reiser4_inode_data(inode); + rtree = jnode_tree_by_reiser4_inode(info); + if (rtree->rnode == NULL) { + /* prevent inode from being pruned when it has jnodes attached + to it */ + spin_lock_irq(&inode->i_data.tree_lock); + inode->i_data.nrpages++; + spin_unlock_irq(&inode->i_data.tree_lock); + } + assert("zam-1049", equi(rtree->rnode != NULL, info->nr_jnodes != 0)); + check_me("zam-1045", + !radix_tree_insert(rtree, node->key.j.index, node)); + ON_DEBUG(info->nr_jnodes++); +} + +static void inode_detach_jnode(jnode * node) +{ + struct inode *inode; + reiser4_inode *info; + struct radix_tree_root *rtree; + + assert_rw_write_locked(&(jnode_get_tree(node)->tree_lock)); + assert("zam-1044", node->key.j.mapping != NULL); + inode = node->key.j.mapping->host; + info = reiser4_inode_data(inode); + rtree = jnode_tree_by_reiser4_inode(info); + + assert("zam-1051", info->nr_jnodes != 0); + assert("zam-1052", rtree->rnode != NULL); + ON_DEBUG(info->nr_jnodes--); + + /* delete jnode from inode's radix tree of jnodes */ + check_me("zam-1046", radix_tree_delete(rtree, node->key.j.index)); + if (rtree->rnode == NULL) { + /* inode can be pruned now */ + spin_lock_irq(&inode->i_data.tree_lock); + inode->i_data.nrpages--; + spin_unlock_irq(&inode->i_data.tree_lock); + } +} + +/* put jnode into hash table (where they can be found by flush who does not know + mapping) and to inode's tree of jnodes (where they can be found (hopefully + faster) in places where mapping is known). Currently it is used by + fs/reiser4/plugin/item/extent_file_ops.c:index_extent_jnode when new jnode is + created */ +static void +hash_unformatted_jnode(jnode * node, struct address_space *mapping, + unsigned long index) +{ + j_hash_table *jtable; + + assert("vs-1446", jnode_is_unformatted(node)); + assert("vs-1442", node->key.j.mapping == 0); + assert("vs-1443", node->key.j.objectid == 0); + assert("vs-1444", node->key.j.index == (unsigned long)-1); + assert_rw_write_locked(&(jnode_get_tree(node)->tree_lock)); + + node->key.j.mapping = mapping; + node->key.j.objectid = get_inode_oid(mapping->host); + node->key.j.index = index; + + jtable = &jnode_get_tree(node)->jhash_table; + + /* race with some other thread inserting jnode into the hash table is + * impossible, because we keep the page lock. */ + /* + * following assertion no longer holds because of RCU: it is possible + * jnode is in the hash table, but with JNODE_RIP bit set. + */ + /* assert("nikita-3211", j_hash_find(jtable, &node->key.j) == NULL); */ + j_hash_insert_rcu(jtable, node); + inode_attach_jnode(node); +} + +static void unhash_unformatted_node_nolock(jnode * node) +{ + assert("vs-1683", node->key.j.mapping != NULL); + assert("vs-1684", + node->key.j.objectid == + get_inode_oid(node->key.j.mapping->host)); + + /* remove jnode from hash-table */ + j_hash_remove_rcu(&node->tree->jhash_table, node); + inode_detach_jnode(node); + node->key.j.mapping = NULL; + node->key.j.index = (unsigned long)-1; + node->key.j.objectid = 0; + +} + +/* remove jnode from hash table and from inode's tree of jnodes. This is used in + reiser4_invalidatepage and in kill_hook_extent -> truncate_inode_jnodes -> + reiser4_uncapture_jnode */ +void unhash_unformatted_jnode(jnode * node) +{ + assert("vs-1445", jnode_is_unformatted(node)); + + write_lock_tree(node->tree); + unhash_unformatted_node_nolock(node); + write_unlock_tree(node->tree); +} + +/* + * search hash table for a jnode with given oid and index. If not found, + * allocate new jnode, insert it, and also insert into radix tree for the + * given inode/mapping. + */ +static jnode *find_get_jnode(reiser4_tree * tree, + struct address_space *mapping, + oid_t oid, unsigned long index) +{ + jnode *result; + jnode *shadow; + int preload; + + result = jnew_unformatted(); + + if (unlikely(result == NULL)) + return ERR_PTR(RETERR(-ENOMEM)); + + preload = radix_tree_preload(reiser4_ctx_gfp_mask_get()); + if (preload != 0) + return ERR_PTR(preload); + + write_lock_tree(tree); + shadow = jfind_nolock(mapping, index); + if (likely(shadow == NULL)) { + /* add new jnode to hash table and inode's radix tree of + * jnodes */ + jref(result); + hash_unformatted_jnode(result, mapping, index); + } else { + /* jnode is found in inode's radix tree of jnodes */ + jref(shadow); + jnode_free(result, JNODE_UNFORMATTED_BLOCK); + assert("vs-1498", shadow->key.j.mapping == mapping); + result = shadow; + } + write_unlock_tree(tree); + + assert("nikita-2955", + ergo(result != NULL, jnode_invariant(result, 0, 0))); + radix_tree_preload_end(); + return result; +} + +/* jget() (a la zget() but for unformatted nodes). Returns (and possibly + creates) jnode corresponding to page @pg. jnode is attached to page and + inserted into jnode hash-table. */ +static jnode *do_jget(reiser4_tree * tree, struct page *pg) +{ + /* + * There are two ways to create jnode: starting with pre-existing page + * and without page. + * + * When page already exists, jnode is created + * (jnode_of_page()->do_jget()) under page lock. This is done in + * ->writepage(), or when capturing anonymous page dirtied through + * mmap. + * + * Jnode without page is created by index_extent_jnode(). + * + */ + + jnode *result; + oid_t oid = get_inode_oid(pg->mapping->host); + + assert("umka-176", pg != NULL); + assert("nikita-2394", PageLocked(pg)); + + result = jprivate(pg); + if (likely(result != NULL)) + return jref(result); + + tree = reiser4_tree_by_page(pg); + + /* check hash-table first */ + result = jfind(pg->mapping, pg->index); + if (unlikely(result != NULL)) { + spin_lock_jnode(result); + jnode_attach_page(result, pg); + spin_unlock_jnode(result); + result->key.j.mapping = pg->mapping; + return result; + } + + /* since page is locked, jnode should be allocated with GFP_NOFS flag */ + reiser4_ctx_gfp_mask_force(GFP_NOFS); + result = find_get_jnode(tree, pg->mapping, oid, pg->index); + if (unlikely(IS_ERR(result))) + return result; + /* attach jnode to page */ + spin_lock_jnode(result); + jnode_attach_page(result, pg); + spin_unlock_jnode(result); + return result; +} + +/* + * return jnode for @pg, creating it if necessary. + */ +jnode *jnode_of_page(struct page *pg) +{ + jnode *result; + + assert("nikita-2394", PageLocked(pg)); + + result = do_jget(reiser4_tree_by_page(pg), pg); + + if (REISER4_DEBUG && !IS_ERR(result)) { + assert("nikita-3210", result == jprivate(pg)); + assert("nikita-2046", jnode_page(jprivate(pg)) == pg); + if (jnode_is_unformatted(jprivate(pg))) { + assert("nikita-2364", + jprivate(pg)->key.j.index == pg->index); + assert("nikita-2367", + jprivate(pg)->key.j.mapping == pg->mapping); + assert("nikita-2365", + jprivate(pg)->key.j.objectid == + get_inode_oid(pg->mapping->host)); + assert("vs-1200", + jprivate(pg)->key.j.objectid == + pg->mapping->host->i_ino); + assert("nikita-2356", + jnode_is_unformatted(jnode_by_page(pg))); + } + assert("nikita-2956", jnode_invariant(jprivate(pg), 0, 0)); + } + return result; +} + +/* attach page to jnode: set ->pg pointer in jnode, and ->private one in the + * page.*/ +void jnode_attach_page(jnode * node, struct page *pg) +{ + assert("nikita-2060", node != NULL); + assert("nikita-2061", pg != NULL); + + assert("nikita-2050", jprivate(pg) == 0ul); + assert("nikita-2393", !PagePrivate(pg)); + assert("vs-1741", node->pg == NULL); + + assert("nikita-2396", PageLocked(pg)); + assert_spin_locked(&(node->guard)); + + get_page(pg); + set_page_private(pg, (unsigned long)node); + node->pg = pg; + SetPagePrivate(pg); +} + +/* Dual to jnode_attach_page: break a binding between page and jnode */ +void page_clear_jnode(struct page *page, jnode * node) +{ + assert("nikita-2425", PageLocked(page)); + assert_spin_locked(&(node->guard)); + assert("nikita-2428", PagePrivate(page)); + + assert("nikita-3551", !PageWriteback(page)); + + JF_CLR(node, JNODE_PARSED); + set_page_private(page, 0ul); + ClearPagePrivate(page); + node->pg = NULL; + put_page(page); +} + +#if 0 +/* it is only used in one place to handle error */ +void +page_detach_jnode(struct page *page, struct address_space *mapping, + unsigned long index) +{ + assert("nikita-2395", page != NULL); + + lock_page(page); + if ((page->mapping == mapping) && (page->index == index) + && PagePrivate(page)) { + jnode *node; + + node = jprivate(page); + spin_lock_jnode(node); + page_clear_jnode(page, node); + spin_unlock_jnode(node); + } + unlock_page(page); +} +#endif /* 0 */ + +/* return @node page locked. + + Locking ordering requires that one first takes page lock and afterwards + spin lock on node attached to this page. Sometimes it is necessary to go in + the opposite direction. This is done through standard trylock-and-release + loop. +*/ +static struct page *jnode_lock_page(jnode * node) +{ + struct page *page; + + assert("nikita-2052", node != NULL); + assert("nikita-2401", LOCK_CNT_NIL(spin_locked_jnode)); + + while (1) { + + spin_lock_jnode(node); + page = jnode_page(node); + if (page == NULL) + break; + + /* no need to get_page( page ) here, because page cannot + be evicted from memory without detaching it from jnode and + this requires spin lock on jnode that we already hold. + */ + if (trylock_page(page)) { + /* We won a lock on jnode page, proceed. */ + break; + } + + /* Page is locked by someone else. */ + get_page(page); + spin_unlock_jnode(node); + wait_on_page_locked(page); + /* it is possible that page was detached from jnode and + returned to the free pool, or re-assigned while we were + waiting on locked bit. This will be rechecked on the next + loop iteration. + */ + put_page(page); + + /* try again */ + } + return page; +} + +/* + * is JNODE_PARSED bit is not set, call ->parse() method of jnode, to verify + * validness of jnode content. + */ +static inline int jparse(jnode * node) +{ + int result; + + assert("nikita-2466", node != NULL); + + spin_lock_jnode(node); + if (likely(!jnode_is_parsed(node))) { + result = jnode_ops(node)->parse(node); + if (likely(result == 0)) + JF_SET(node, JNODE_PARSED); + } else + result = 0; + spin_unlock_jnode(node); + return result; +} + +/* Lock a page attached to jnode, create and attach page to jnode if it had no + * one. */ +static struct page *jnode_get_page_locked(jnode * node, gfp_t gfp_flags) +{ + struct page *page; + + spin_lock_jnode(node); + page = jnode_page(node); + + if (page == NULL) { + spin_unlock_jnode(node); + page = find_or_create_page(jnode_get_mapping(node), + jnode_get_index(node), gfp_flags); + if (page == NULL) + return ERR_PTR(RETERR(-ENOMEM)); + } else { + if (trylock_page(page)) { + spin_unlock_jnode(node); + return page; + } + get_page(page); + spin_unlock_jnode(node); + lock_page(page); + assert("nikita-3134", page->mapping == jnode_get_mapping(node)); + } + + spin_lock_jnode(node); + if (!jnode_page(node)) + jnode_attach_page(node, page); + spin_unlock_jnode(node); + + put_page(page); + assert("zam-894", jnode_page(node) == page); + return page; +} + +/* Start read operation for jnode's page if page is not up-to-date. */ +static int jnode_start_read(jnode * node, struct page *page) +{ + assert("zam-893", PageLocked(page)); + + if (PageUptodate(page)) { + unlock_page(page); + return 0; + } + return reiser4_page_io(page, node, READ, reiser4_ctx_gfp_mask_get()); +} + +#if REISER4_DEBUG +static void check_jload(jnode * node, struct page *page) +{ + if (jnode_is_znode(node)) { + znode *z = JZNODE(node); + + if (znode_is_any_locked(z)) { + assert("nikita-3253", + z->nr_items == + node_plugin_by_node(z)->num_of_items(z)); + kunmap(page); + } + assert("nikita-3565", znode_invariant(z)); + } +} +#else +#define check_jload(node, page) noop +#endif + +/* prefetch jnode to speed up next call to jload. Call this when you are going + * to call jload() shortly. This will bring appropriate portion of jnode into + * CPU cache. */ +void jload_prefetch(jnode * node) +{ + prefetchw(&node->x_count); +} + +/* load jnode's data into memory */ +int jload_gfp(jnode * node /* node to load */ , + gfp_t gfp_flags /* allocation flags */ , + int do_kmap/* true if page should be kmapped */) +{ + struct page *page; + int result = 0; + int parsed; + + assert("nikita-3010", reiser4_schedulable()); + + prefetchw(&node->pg); + + /* taking d-reference implies taking x-reference. */ + jref(node); + + /* + * acquiring d-reference to @jnode and check for JNODE_PARSED bit + * should be atomic, otherwise there is a race against + * reiser4_releasepage(). + */ + spin_lock(&(node->load)); + add_d_ref(node); + parsed = jnode_is_parsed(node); + spin_unlock(&(node->load)); + + if (unlikely(!parsed)) { + page = jnode_get_page_locked(node, gfp_flags); + if (unlikely(IS_ERR(page))) { + result = PTR_ERR(page); + goto failed; + } + + result = jnode_start_read(node, page); + if (unlikely(result != 0)) + goto failed; + + wait_on_page_locked(page); + if (unlikely(!PageUptodate(page))) { + result = RETERR(-EIO); + goto failed; + } + + if (do_kmap) + node->data = kmap(page); + + result = jparse(node); + if (unlikely(result != 0)) { + if (do_kmap) + kunmap(page); + goto failed; + } + check_jload(node, page); + } else { + page = jnode_page(node); + check_jload(node, page); + if (do_kmap) + node->data = kmap(page); + } + + if (!is_writeout_mode()) + /* We do not mark pages active if jload is called as a part of + * jnode_flush() or reiser4_write_logs(). Both jnode_flush() + * and write_logs() add no value to cached data, there is no + * sense to mark pages as active when they go to disk, it just + * confuses vm scanning routines because clean page could be + * moved out from inactive list as a result of this + * mark_page_accessed() call. */ + mark_page_accessed(page); + + return 0; + +failed: + jrelse_tail(node); + return result; + +} + +/* start asynchronous reading for given jnode's page. */ +int jstartio(jnode * node) +{ + struct page *page; + + page = jnode_get_page_locked(node, reiser4_ctx_gfp_mask_get()); + if (IS_ERR(page)) + return PTR_ERR(page); + + return jnode_start_read(node, page); +} + +/* Initialize a node by calling appropriate plugin instead of reading + * node from disk as in jload(). */ +int jinit_new(jnode * node, gfp_t gfp_flags) +{ + struct page *page; + int result; + + jref(node); + add_d_ref(node); + + page = jnode_get_page_locked(node, gfp_flags); + if (IS_ERR(page)) { + result = PTR_ERR(page); + goto failed; + } + + SetPageUptodate(page); + unlock_page(page); + + node->data = kmap(page); + + if (!jnode_is_parsed(node)) { + jnode_plugin *jplug = jnode_ops(node); + spin_lock_jnode(node); + result = jplug->init(node); + spin_unlock_jnode(node); + if (result) { + kunmap(page); + goto failed; + } + JF_SET(node, JNODE_PARSED); + } + + return 0; + +failed: + jrelse(node); + return result; +} + +/* release a reference to jnode acquired by jload(), decrement ->d_count */ +void jrelse_tail(jnode * node/* jnode to release references to */) +{ + assert("nikita-489", atomic_read(&node->d_count) > 0); + atomic_dec(&node->d_count); + /* release reference acquired in jload_gfp() or jinit_new() */ + jput(node); + if (jnode_is_unformatted(node) || jnode_is_znode(node)) + LOCK_CNT_DEC(d_refs); +} + +/* drop reference to node data. When last reference is dropped, data are + unloaded. */ +void jrelse(jnode * node/* jnode to release references to */) +{ + struct page *page; + + assert("nikita-487", node != NULL); + assert_spin_not_locked(&(node->guard)); + + page = jnode_page(node); + if (likely(page != NULL)) { + /* + * it is safe not to lock jnode here, because at this point + * @node->d_count is greater than zero (if jrelse() is used + * correctly, that is). JNODE_PARSED may be not set yet, if, + * for example, we got here as a result of error handling path + * in jload(). Anyway, page cannot be detached by + * reiser4_releasepage(). truncate will invalidate page + * regardless, but this should not be a problem. + */ + kunmap(page); + } + jrelse_tail(node); +} + +/* called from jput() to wait for io completion */ +static void jnode_finish_io(jnode * node) +{ + struct page *page; + + assert("nikita-2922", node != NULL); + + spin_lock_jnode(node); + page = jnode_page(node); + if (page != NULL) { + get_page(page); + spin_unlock_jnode(node); + wait_on_page_writeback(page); + put_page(page); + } else + spin_unlock_jnode(node); +} + +/* + * This is called by jput() when last reference to jnode is released. This is + * separate function, because we want fast path of jput() to be inline and, + * therefore, small. + */ +void jput_final(jnode * node) +{ + int r_i_p; + + /* A fast check for keeping node in cache. We always keep node in cache + * if its page is present and node was not marked for deletion */ + if (jnode_page(node) != NULL && !JF_ISSET(node, JNODE_HEARD_BANSHEE)) { + rcu_read_unlock(); + return; + } + r_i_p = !JF_TEST_AND_SET(node, JNODE_RIP); + /* + * if r_i_p is true, we were first to set JNODE_RIP on this node. In + * this case it is safe to access node after unlock. + */ + rcu_read_unlock(); + if (r_i_p) { + jnode_finish_io(node); + if (JF_ISSET(node, JNODE_HEARD_BANSHEE)) + /* node is removed from the tree. */ + jdelete(node); + else + jnode_try_drop(node); + } + /* if !r_i_p some other thread is already killing it */ +} + +int jwait_io(jnode * node, int rw) +{ + struct page *page; + int result; + + assert("zam-448", jnode_page(node) != NULL); + + page = jnode_page(node); + + result = 0; + if (rw == READ) { + wait_on_page_locked(page); + } else { + assert("nikita-2227", rw == WRITE); + wait_on_page_writeback(page); + } + if (PageError(page)) + result = RETERR(-EIO); + + return result; +} + +/* + * jnode types and plugins. + * + * jnode by itself is a "base type". There are several different jnode + * flavors, called "jnode types" (see jnode_type for a list). Sometimes code + * has to do different things based on jnode type. In the standard reiser4 way + * this is done by having jnode plugin (see fs/reiser4/plugin.h:jnode_plugin). + * + * Functions below deal with jnode types and define methods of jnode plugin. + * + */ + +/* set jnode type. This is done during jnode initialization. */ +static void jnode_set_type(jnode * node, jnode_type type) +{ + static unsigned long type_to_mask[] = { + [JNODE_UNFORMATTED_BLOCK] = 1, + [JNODE_FORMATTED_BLOCK] = 0, + [JNODE_BITMAP] = 2, + [JNODE_IO_HEAD] = 6, + [JNODE_INODE] = 4 + }; + + assert("zam-647", type < LAST_JNODE_TYPE); + assert("nikita-2815", !jnode_is_loaded(node)); + assert("nikita-3386", node->state == 0); + + node->state |= (type_to_mask[type] << JNODE_TYPE_1); +} + +/* ->init() method of jnode plugin for jnodes that don't require plugin + * specific initialization. */ +static int init_noinit(jnode * node UNUSED_ARG) +{ + return 0; +} + +/* ->parse() method of jnode plugin for jnodes that don't require plugin + * specific pasring. */ +static int parse_noparse(jnode * node UNUSED_ARG) +{ + return 0; +} + +/* ->mapping() method for unformatted jnode */ +struct address_space *mapping_jnode(const jnode * node) +{ + struct address_space *map; + + assert("nikita-2713", node != NULL); + + /* mapping is stored in jnode */ + + map = node->key.j.mapping; + assert("nikita-2714", map != NULL); + assert("nikita-2897", is_reiser4_inode(map->host)); + assert("nikita-2715", get_inode_oid(map->host) == node->key.j.objectid); + return map; +} + +/* ->index() method for unformatted jnodes */ +unsigned long index_jnode(const jnode * node) +{ + /* index is stored in jnode */ + return node->key.j.index; +} + +/* ->remove() method for unformatted jnodes */ +static inline void remove_jnode(jnode * node, reiser4_tree * tree) +{ + /* remove jnode from hash table and radix tree */ + if (node->key.j.mapping) + unhash_unformatted_node_nolock(node); +} + +/* ->mapping() method for znodes */ +static struct address_space *mapping_znode(const jnode * node) +{ + /* all znodes belong to fake inode */ + return reiser4_get_super_fake(jnode_get_tree(node)->super)->i_mapping; +} + +/* ->index() method for znodes */ +static unsigned long index_znode(const jnode * node) +{ + unsigned long addr; + assert("nikita-3317", (1 << znode_shift_order) < sizeof(znode)); + + /* index of znode is just its address (shifted) */ + addr = (unsigned long)node; + return (addr - PAGE_OFFSET) >> znode_shift_order; +} + +/* ->mapping() method for bitmap jnode */ +static struct address_space *mapping_bitmap(const jnode * node) +{ + /* all bitmap blocks belong to special bitmap inode */ + return get_super_private(jnode_get_tree(node)->super)->bitmap-> + i_mapping; +} + +/* ->index() method for jnodes that are indexed by address */ +static unsigned long index_is_address(const jnode * node) +{ + unsigned long ind; + + ind = (unsigned long)node; + return ind - PAGE_OFFSET; +} + +/* resolve race with jput */ +jnode *jnode_rip_sync(reiser4_tree *tree, jnode *node) +{ + /* + * This is used as part of RCU-based jnode handling. + * + * jlookup(), zlook(), zget(), and cbk_cache_scan_slots() have to work + * with unreferenced jnodes (ones with ->x_count == 0). Hash table is + * not protected during this, so concurrent thread may execute + * zget-set-HEARD_BANSHEE-zput, or somehow else cause jnode to be + * freed in jput_final(). To avoid such races, jput_final() sets + * JNODE_RIP on jnode (under tree lock). All places that work with + * unreferenced jnodes call this function. It checks for JNODE_RIP bit + * (first without taking tree lock), and if this bit is set, released + * reference acquired by the current thread and returns NULL. + * + * As a result, if jnode is being concurrently freed, NULL is returned + * and caller should pretend that jnode wasn't found in the first + * place. + * + * Otherwise it's safe to release "rcu-read-lock" and continue with + * jnode. + */ + if (unlikely(JF_ISSET(node, JNODE_RIP))) { + read_lock_tree(tree); + if (JF_ISSET(node, JNODE_RIP)) { + dec_x_ref(node); + node = NULL; + } + read_unlock_tree(tree); + } + return node; +} + +reiser4_key *jnode_build_key(const jnode * node, reiser4_key * key) +{ + struct inode *inode; + item_plugin *iplug; + loff_t off; + + assert("nikita-3092", node != NULL); + assert("nikita-3093", key != NULL); + assert("nikita-3094", jnode_is_unformatted(node)); + + off = ((loff_t) index_jnode(node)) << PAGE_SHIFT; + inode = mapping_jnode(node)->host; + + if (node->parent_item_id != 0) + iplug = item_plugin_by_id(node->parent_item_id); + else + iplug = NULL; + + if (iplug != NULL && iplug->f.key_by_offset) + iplug->f.key_by_offset(inode, off, key); + else { + file_plugin *fplug; + + fplug = inode_file_plugin(inode); + assert("zam-1007", fplug != NULL); + assert("zam-1008", fplug->key_by_inode != NULL); + + fplug->key_by_inode(inode, off, key); + } + + return key; +} + +/* ->parse() method for formatted nodes */ +static int parse_znode(jnode * node) +{ + return zparse(JZNODE(node)); +} + +/* ->delete() method for formatted nodes */ +static void delete_znode(jnode * node, reiser4_tree * tree) +{ + znode *z; + + assert_rw_write_locked(&(tree->tree_lock)); + assert("vs-898", JF_ISSET(node, JNODE_HEARD_BANSHEE)); + + z = JZNODE(node); + assert("vs-899", z->c_count == 0); + + /* delete znode from sibling list. */ + sibling_list_remove(z); + + znode_remove(z, tree); +} + +/* ->remove() method for formatted nodes */ +static int remove_znode(jnode * node, reiser4_tree * tree) +{ + znode *z; + + assert_rw_write_locked(&(tree->tree_lock)); + z = JZNODE(node); + + if (z->c_count == 0) { + /* detach znode from sibling list. */ + sibling_list_drop(z); + /* this is called with tree spin-lock held, so call + znode_remove() directly (rather than znode_lock_remove()). */ + znode_remove(z, tree); + return 0; + } + return RETERR(-EBUSY); +} + +/* ->init() method for formatted nodes */ +int init_znode(jnode * node) +{ + znode *z; + + z = JZNODE(node); + /* call node plugin to do actual initialization */ + z->nr_items = 0; + return z->nplug->init(z); +} + +/* ->clone() method for formatted nodes */ +static jnode *clone_formatted(jnode * node) +{ + znode *clone; + + assert("vs-1430", jnode_is_znode(node)); + clone = zalloc(reiser4_ctx_gfp_mask_get()); + if (clone == NULL) + return ERR_PTR(RETERR(-ENOMEM)); + zinit(clone, NULL, current_tree); + jnode_set_block(ZJNODE(clone), jnode_get_block(node)); + /* ZJNODE(clone)->key.z is not initialized */ + clone->level = JZNODE(node)->level; + + return ZJNODE(clone); +} + +/* jplug->clone for unformatted nodes */ +static jnode *clone_unformatted(jnode * node) +{ + jnode *clone; + + assert("vs-1431", jnode_is_unformatted(node)); + clone = jalloc(); + if (clone == NULL) + return ERR_PTR(RETERR(-ENOMEM)); + + jnode_init(clone, current_tree, JNODE_UNFORMATTED_BLOCK); + jnode_set_block(clone, jnode_get_block(node)); + + return clone; + +} + +/* + * Setup jnode plugin methods for various jnode types. + */ +jnode_plugin jnode_plugins[LAST_JNODE_TYPE] = { + [JNODE_UNFORMATTED_BLOCK] = { + .h = { + .type_id = REISER4_JNODE_PLUGIN_TYPE, + .id = JNODE_UNFORMATTED_BLOCK, + .pops = NULL, + .label = "unformatted", + .desc = "unformatted node", + .linkage = {NULL, NULL} + }, + .init = init_noinit, + .parse = parse_noparse, + .mapping = mapping_jnode, + .index = index_jnode, + .clone = clone_unformatted + }, + [JNODE_FORMATTED_BLOCK] = { + .h = { + .type_id = REISER4_JNODE_PLUGIN_TYPE, + .id = JNODE_FORMATTED_BLOCK, + .pops = NULL, + .label = "formatted", + .desc = "formatted tree node", + .linkage = {NULL, NULL} + }, + .init = init_znode, + .parse = parse_znode, + .mapping = mapping_znode, + .index = index_znode, + .clone = clone_formatted + }, + [JNODE_BITMAP] = { + .h = { + .type_id = REISER4_JNODE_PLUGIN_TYPE, + .id = JNODE_BITMAP, + .pops = NULL, + .label = "bitmap", + .desc = "bitmap node", + .linkage = {NULL, NULL} + }, + .init = init_noinit, + .parse = parse_noparse, + .mapping = mapping_bitmap, + .index = index_is_address, + .clone = NULL + }, + [JNODE_IO_HEAD] = { + .h = { + .type_id = REISER4_JNODE_PLUGIN_TYPE, + .id = JNODE_IO_HEAD, + .pops = NULL, + .label = "io head", + .desc = "io head", + .linkage = {NULL, NULL} + }, + .init = init_noinit, + .parse = parse_noparse, + .mapping = mapping_bitmap, + .index = index_is_address, + .clone = NULL + }, + [JNODE_INODE] = { + .h = { + .type_id = REISER4_JNODE_PLUGIN_TYPE, + .id = JNODE_INODE, + .pops = NULL, + .label = "inode", + .desc = "inode's builtin jnode", + .linkage = {NULL, NULL} + }, + .init = NULL, + .parse = NULL, + .mapping = NULL, + .index = NULL, + .clone = NULL + } +}; + +/* + * jnode destruction. + * + * Thread may use a jnode after it acquired a reference to it. References are + * counted in ->x_count field. Reference protects jnode from being + * recycled. This is different from protecting jnode data (that are stored in + * jnode page) from being evicted from memory. Data are protected by jload() + * and released by jrelse(). + * + * If thread already possesses a reference to the jnode it can acquire another + * one through jref(). Initial reference is obtained (usually) by locating + * jnode in some indexing structure that depends on jnode type: formatted + * nodes are kept in global hash table, where they are indexed by block + * number, and also in the cbk cache. Unformatted jnodes are also kept in hash + * table, which is indexed by oid and offset within file, and in per-inode + * radix tree. + * + * Reference to jnode is released by jput(). If last reference is released, + * jput_final() is called. This function determines whether jnode has to be + * deleted (this happens when corresponding node is removed from the file + * system, jnode is marked with JNODE_HEARD_BANSHEE bit in this case), or it + * should be just "removed" (deleted from memory). + * + * Jnode destruction is signally delicate dance because of locking and RCU. + */ + +/* + * Returns true if jnode cannot be removed right now. This check is called + * under tree lock. If it returns true, jnode is irrevocably committed to be + * deleted/removed. + */ +static inline int jnode_is_busy(const jnode * node, jnode_type jtype) +{ + /* if other thread managed to acquire a reference to this jnode, don't + * free it. */ + if (atomic_read(&node->x_count) > 0) + return 1; + /* also, don't free znode that has children in memory */ + if (jtype == JNODE_FORMATTED_BLOCK && JZNODE(node)->c_count > 0) + return 1; + return 0; +} + +/* + * this is called as part of removing jnode. Based on jnode type, call + * corresponding function that removes jnode from indices and returns it back + * to the appropriate slab (through RCU). + */ +static inline void +jnode_remove(jnode * node, jnode_type jtype, reiser4_tree * tree) +{ + switch (jtype) { + case JNODE_UNFORMATTED_BLOCK: + remove_jnode(node, tree); + break; + case JNODE_IO_HEAD: + case JNODE_BITMAP: + break; + case JNODE_INODE: + break; + case JNODE_FORMATTED_BLOCK: + remove_znode(node, tree); + break; + default: + wrong_return_value("nikita-3196", "Wrong jnode type"); + } +} + +/* + * this is called as part of deleting jnode. Based on jnode type, call + * corresponding function that removes jnode from indices and returns it back + * to the appropriate slab (through RCU). + * + * This differs from jnode_remove() only for formatted nodes---for them + * sibling list handling is different for removal and deletion. + */ +static inline void +jnode_delete(jnode * node, jnode_type jtype, reiser4_tree * tree UNUSED_ARG) +{ + switch (jtype) { + case JNODE_UNFORMATTED_BLOCK: + remove_jnode(node, tree); + break; + case JNODE_IO_HEAD: + case JNODE_BITMAP: + break; + case JNODE_FORMATTED_BLOCK: + delete_znode(node, tree); + break; + case JNODE_INODE: + default: + wrong_return_value("nikita-3195", "Wrong jnode type"); + } +} + +#if REISER4_DEBUG +/* + * remove jnode from the debugging list of all jnodes hanging off super-block. + */ +void jnode_list_remove(jnode * node) +{ + reiser4_super_info_data *sbinfo; + + sbinfo = get_super_private(jnode_get_tree(node)->super); + + spin_lock_irq(&sbinfo->all_guard); + assert("nikita-2422", !list_empty(&node->jnodes)); + list_del_init(&node->jnodes); + spin_unlock_irq(&sbinfo->all_guard); +} +#endif + +/* + * this is called by jput_final() to remove jnode when last reference to it is + * released. + */ +static int jnode_try_drop(jnode * node) +{ + int result; + reiser4_tree *tree; + jnode_type jtype; + + assert("nikita-2491", node != NULL); + assert("nikita-2583", JF_ISSET(node, JNODE_RIP)); + + tree = jnode_get_tree(node); + jtype = jnode_get_type(node); + + spin_lock_jnode(node); + write_lock_tree(tree); + /* + * if jnode has a page---leave it alone. Memory pressure will + * eventually kill page and jnode. + */ + if (jnode_page(node) != NULL) { + write_unlock_tree(tree); + spin_unlock_jnode(node); + JF_CLR(node, JNODE_RIP); + return RETERR(-EBUSY); + } + + /* re-check ->x_count under tree lock. */ + result = jnode_is_busy(node, jtype); + if (result == 0) { + assert("nikita-2582", !JF_ISSET(node, JNODE_HEARD_BANSHEE)); + assert("jmacd-511/b", atomic_read(&node->d_count) == 0); + + spin_unlock_jnode(node); + /* no page and no references---despatch him. */ + jnode_remove(node, jtype, tree); + write_unlock_tree(tree); + jnode_free(node, jtype); + } else { + /* busy check failed: reference was acquired by concurrent + * thread. */ + write_unlock_tree(tree); + spin_unlock_jnode(node); + JF_CLR(node, JNODE_RIP); + } + return result; +} + +/* jdelete() -- Delete jnode from the tree and file system */ +static int jdelete(jnode * node/* jnode to finish with */) +{ + struct page *page; + int result; + reiser4_tree *tree; + jnode_type jtype; + + assert("nikita-467", node != NULL); + assert("nikita-2531", JF_ISSET(node, JNODE_RIP)); + + jtype = jnode_get_type(node); + + page = jnode_lock_page(node); + assert_spin_locked(&(node->guard)); + + tree = jnode_get_tree(node); + + write_lock_tree(tree); + /* re-check ->x_count under tree lock. */ + result = jnode_is_busy(node, jtype); + if (likely(!result)) { + assert("nikita-2123", JF_ISSET(node, JNODE_HEARD_BANSHEE)); + assert("jmacd-511", atomic_read(&node->d_count) == 0); + + /* detach page */ + if (page != NULL) { + /* + * FIXME this is racy against jnode_extent_write(). + */ + page_clear_jnode(page, node); + } + spin_unlock_jnode(node); + /* goodbye */ + jnode_delete(node, jtype, tree); + write_unlock_tree(tree); + jnode_free(node, jtype); + /* @node is no longer valid pointer */ + if (page != NULL) + reiser4_drop_page(page); + } else { + /* busy check failed: reference was acquired by concurrent + * thread. */ + JF_CLR(node, JNODE_RIP); + write_unlock_tree(tree); + spin_unlock_jnode(node); + if (page != NULL) + unlock_page(page); + } + return result; +} + +/* drop jnode on the floor. + + Return value: + + -EBUSY: failed to drop jnode, because there are still references to it + + 0: successfully dropped jnode + +*/ +static int jdrop_in_tree(jnode * node, reiser4_tree * tree) +{ + struct page *page; + jnode_type jtype; + int result; + + assert("zam-602", node != NULL); + assert_rw_not_read_locked(&(tree->tree_lock)); + assert_rw_not_write_locked(&(tree->tree_lock)); + assert("nikita-2403", !JF_ISSET(node, JNODE_HEARD_BANSHEE)); + + jtype = jnode_get_type(node); + + page = jnode_lock_page(node); + assert_spin_locked(&(node->guard)); + + write_lock_tree(tree); + + /* re-check ->x_count under tree lock. */ + result = jnode_is_busy(node, jtype); + if (!result) { + assert("nikita-2488", page == jnode_page(node)); + assert("nikita-2533", atomic_read(&node->d_count) == 0); + if (page != NULL) { + assert("nikita-2126", !PageDirty(page)); + assert("nikita-2127", PageUptodate(page)); + assert("nikita-2181", PageLocked(page)); + page_clear_jnode(page, node); + } + spin_unlock_jnode(node); + jnode_remove(node, jtype, tree); + write_unlock_tree(tree); + jnode_free(node, jtype); + if (page != NULL) + reiser4_drop_page(page); + } else { + /* busy check failed: reference was acquired by concurrent + * thread. */ + JF_CLR(node, JNODE_RIP); + write_unlock_tree(tree); + spin_unlock_jnode(node); + if (page != NULL) + unlock_page(page); + } + return result; +} + +/* This function frees jnode "if possible". In particular, [dcx]_count has to + be 0 (where applicable). */ +void jdrop(jnode * node) +{ + jdrop_in_tree(node, jnode_get_tree(node)); +} + +/* IO head jnode implementation; The io heads are simple j-nodes with limited + functionality (these j-nodes are not in any hash table) just for reading + from and writing to disk. */ + +jnode *reiser4_alloc_io_head(const reiser4_block_nr * block) +{ + jnode *jal = jalloc(); + + if (jal != NULL) { + jnode_init(jal, current_tree, JNODE_IO_HEAD); + jnode_set_block(jal, block); + } + + jref(jal); + + return jal; +} + +void reiser4_drop_io_head(jnode * node) +{ + assert("zam-648", jnode_get_type(node) == JNODE_IO_HEAD); + + jput(node); + jdrop(node); +} + +/* protect keep jnode data from reiser4_releasepage() */ +void pin_jnode_data(jnode * node) +{ + assert("zam-671", jnode_page(node) != NULL); + get_page(jnode_page(node)); +} + +/* make jnode data free-able again */ +void unpin_jnode_data(jnode * node) +{ + assert("zam-672", jnode_page(node) != NULL); + put_page(jnode_page(node)); +} + +struct address_space *jnode_get_mapping(const jnode * node) +{ + return jnode_ops(node)->mapping(node); +} + +#if REISER4_DEBUG +/* debugging aid: jnode invariant */ +int jnode_invariant_f(const jnode * node, char const **msg) +{ +#define _ergo(ant, con) \ + ((*msg) = "{" #ant "} ergo {" #con "}", ergo((ant), (con))) +#define _check(exp) ((*msg) = #exp, (exp)) + + return _check(node != NULL) && + /* [jnode-queued] */ + /* only relocated node can be queued, except that when znode + * is being deleted, its JNODE_RELOC bit is cleared */ + _ergo(JF_ISSET(node, JNODE_FLUSH_QUEUED), + JF_ISSET(node, JNODE_RELOC) || + JF_ISSET(node, JNODE_HEARD_BANSHEE)) && + _check(node->jnodes.prev != NULL) && + _check(node->jnodes.next != NULL) && + /* [jnode-dirty] invariant */ + /* dirty inode is part of atom */ + _ergo(JF_ISSET(node, JNODE_DIRTY), node->atom != NULL) && + /* [jnode-oid] invariant */ + /* for unformatted node ->objectid and ->mapping fields are + * consistent */ + _ergo(jnode_is_unformatted(node) && node->key.j.mapping != NULL, + node->key.j.objectid == + get_inode_oid(node->key.j.mapping->host)) && + /* [jnode-atom-valid] invariant */ + /* node atom has valid state */ + _ergo(node->atom != NULL, node->atom->stage != ASTAGE_INVALID) && + /* [jnode-page-binding] invariant */ + /* if node points to page, it points back to node */ + _ergo(node->pg != NULL, jprivate(node->pg) == node) && + /* [jnode-refs] invariant */ + /* only referenced jnode can be loaded */ + _check(atomic_read(&node->x_count) >= atomic_read(&node->d_count)); + +} + +static const char *jnode_type_name(jnode_type type) +{ + switch (type) { + case JNODE_UNFORMATTED_BLOCK: + return "unformatted"; + case JNODE_FORMATTED_BLOCK: + return "formatted"; + case JNODE_BITMAP: + return "bitmap"; + case JNODE_IO_HEAD: + return "io head"; + case JNODE_INODE: + return "inode"; + case LAST_JNODE_TYPE: + return "last"; + default:{ + static char unknown[30]; + + sprintf(unknown, "unknown %i", type); + return unknown; + } + } +} + +#define jnode_state_name(node, flag) \ + (JF_ISSET((node), (flag)) ? ((#flag "|")+6) : "") + +/* debugging aid: output human readable information about @node */ +static void info_jnode(const char *prefix /* prefix to print */ , + const jnode * node/* node to print */) +{ + assert("umka-068", prefix != NULL); + + if (node == NULL) { + printk("%s: null\n", prefix); + return; + } + + printk + ("%s: %p: state: %lx: [%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s], level: %i," + " block: %s, d_count: %d, x_count: %d, " + "pg: %p, atom: %p, lock: %i:%i, type: %s, ", prefix, node, + node->state, + jnode_state_name(node, JNODE_PARSED), + jnode_state_name(node, JNODE_HEARD_BANSHEE), + jnode_state_name(node, JNODE_LEFT_CONNECTED), + jnode_state_name(node, JNODE_RIGHT_CONNECTED), + jnode_state_name(node, JNODE_ORPHAN), + jnode_state_name(node, JNODE_CREATED), + jnode_state_name(node, JNODE_RELOC), + jnode_state_name(node, JNODE_OVRWR), + jnode_state_name(node, JNODE_DIRTY), + jnode_state_name(node, JNODE_IS_DYING), + jnode_state_name(node, JNODE_RIP), + jnode_state_name(node, JNODE_MISSED_IN_CAPTURE), + jnode_state_name(node, JNODE_WRITEBACK), + jnode_state_name(node, JNODE_DKSET), + jnode_state_name(node, JNODE_REPACK), + jnode_state_name(node, JNODE_CLUSTER_PAGE), + jnode_get_level(node), sprint_address(jnode_get_block(node)), + atomic_read(&node->d_count), atomic_read(&node->x_count), + jnode_page(node), node->atom, 0, 0, + jnode_type_name(jnode_get_type(node))); + if (jnode_is_unformatted(node)) { + printk("inode: %llu, index: %lu, ", + node->key.j.objectid, node->key.j.index); + } +} + +/* debugging aid: check znode invariant and panic if it doesn't hold */ +static int jnode_invariant(jnode * node, int tlocked, int jlocked) +{ + char const *failed_msg; + int result; + reiser4_tree *tree; + + tree = jnode_get_tree(node); + + assert("umka-063312", node != NULL); + assert("umka-064321", tree != NULL); + + if (!jlocked && !tlocked) + spin_lock_jnode((jnode *) node); + if (!tlocked) + read_lock_tree(jnode_get_tree(node)); + result = jnode_invariant_f(node, &failed_msg); + if (!result) { + info_jnode("corrupted node", node); + warning("jmacd-555", "Condition %s failed", failed_msg); + } + if (!tlocked) + read_unlock_tree(jnode_get_tree(node)); + if (!jlocked && !tlocked) + spin_unlock_jnode((jnode *) node); + return result; +} + +#endif /* REISER4_DEBUG */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + End: +*/ diff --git a/fs/reiser4/jnode.h b/fs/reiser4/jnode.h new file mode 100644 index 000000000000..9896f5f0ecaa --- /dev/null +++ b/fs/reiser4/jnode.h @@ -0,0 +1,704 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Declaration of jnode. See jnode.c for details. */ + +#ifndef __JNODE_H__ +#define __JNODE_H__ + +#include "forward.h" +#include "type_safe_hash.h" +#include "txnmgr.h" +#include "key.h" +#include "debug.h" +#include "dformat.h" +#include "page_cache.h" +#include "context.h" + +#include "plugin/plugin.h" + +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/spinlock.h> +#include <asm/atomic.h> +#include <linux/bitops.h> +#include <linux/list.h> +#include <linux/rcupdate.h> + +/* declare hash table of jnodes (jnodes proper, that is, unformatted + nodes) */ +TYPE_SAFE_HASH_DECLARE(j, jnode); + +/* declare hash table of znodes */ +TYPE_SAFE_HASH_DECLARE(z, znode); + +struct jnode_key { + __u64 objectid; + unsigned long index; + struct address_space *mapping; +}; + +/* + Jnode is the "base class" of other nodes in reiser4. It is also happens to + be exactly the node we use for unformatted tree nodes. + + Jnode provides following basic functionality: + + . reference counting and indexing. + + . integration with page cache. Jnode has ->pg reference to which page can + be attached. + + . interface to transaction manager. It is jnode that is kept in transaction + manager lists, attached to atoms, etc. (NOTE-NIKITA one may argue that this + means, there should be special type of jnode for inode.) + + Locking: + + Spin lock: the following fields are protected by the per-jnode spin lock: + + ->state + ->atom + ->capture_link + + Following fields are protected by the global tree lock: + + ->link + ->key.z (content of ->key.z is only changed in znode_rehash()) + ->key.j + + Atomic counters + + ->x_count + ->d_count + + ->pg, and ->data are protected by spin lock for unused jnode and are + immutable for used jnode (one for which fs/reiser4/vfs_ops.c:releasable() + is false). + + ->tree is immutable after creation + + Unclear + + ->blocknr: should be under jnode spin-lock, but current interface is based + on passing of block address. + + If you ever need to spin lock two nodes at once, do this in "natural" + memory order: lock znode with lower address first. (See lock_two_nodes().) + + Invariants involving this data-type: + + [jnode-dirty] + [jnode-refs] + [jnode-oid] + [jnode-queued] + [jnode-atom-valid] + [jnode-page-binding] +*/ + +struct jnode { +#if REISER4_DEBUG +#define JMAGIC 0x52654973 /* "ReIs" */ + int magic; +#endif + /* FIRST CACHE LINE (16 bytes): data used by jload */ + + /* jnode's state: bitwise flags from the reiser4_jnode_state enum. */ + /* 0 */ unsigned long state; + + /* lock, protecting jnode's fields. */ + /* 4 */ spinlock_t load; + + /* counter of references to jnode itself. Increased on jref(). + Decreased on jput(). + */ + /* 8 */ atomic_t x_count; + + /* counter of references to jnode's data. Pin data page(s) in + memory while this is greater than 0. Increased on jload(). + Decreased on jrelse(). + */ + /* 12 */ atomic_t d_count; + + /* SECOND CACHE LINE: data used by hash table lookups */ + + /* 16 */ union { + /* znodes are hashed by block number */ + reiser4_block_nr z; + /* unformatted nodes are hashed by mapping plus offset */ + struct jnode_key j; + } key; + + /* THIRD CACHE LINE */ + + /* 32 */ union { + /* pointers to maintain hash-table */ + z_hash_link z; + j_hash_link j; + } link; + + /* pointer to jnode page. */ + /* 36 */ struct page *pg; + /* pointer to node itself. This is page_address(node->pg) when page is + attached to the jnode + */ + /* 40 */ void *data; + + /* 44 */ reiser4_tree *tree; + + /* FOURTH CACHE LINE: atom related fields */ + + /* 48 */ spinlock_t guard; + + /* atom the block is in, if any */ + /* 52 */ txn_atom *atom; + + /* capture list */ + /* 56 */ struct list_head capture_link; + + /* FIFTH CACHE LINE */ + + /* 64 */ struct rcu_head rcu; + /* crosses cache line */ + + /* SIXTH CACHE LINE */ + + /* the real blocknr (where io is going to/from) */ + /* 80 */ reiser4_block_nr blocknr; + /* Parent item type, unformatted and CRC need it for + * offset => key conversion. */ + /* NOTE: this parent_item_id looks like jnode type. */ + /* 88 */ reiser4_plugin_id parent_item_id; + /* 92 */ +#if REISER4_DEBUG + /* list of all jnodes for debugging purposes. */ + struct list_head jnodes; + /* how many times this jnode was written in one transaction */ + int written; + /* this indicates which atom's list the jnode is on */ + atom_list list; +#endif +} __attribute__ ((aligned(16))); + +/* + * jnode types. Enumeration of existing jnode types. + */ +typedef enum { + JNODE_UNFORMATTED_BLOCK, /* unformatted block */ + JNODE_FORMATTED_BLOCK, /* formatted block, znode */ + JNODE_BITMAP, /* bitmap */ + JNODE_IO_HEAD, /* jnode representing a block in the + * wandering log */ + JNODE_INODE, /* jnode embedded into inode */ + LAST_JNODE_TYPE +} jnode_type; + +/* jnode states */ +typedef enum { + /* jnode's page is loaded and data checked */ + JNODE_PARSED = 0, + /* node was deleted, not all locks on it were released. This + node is empty and is going to be removed from the tree + shortly. */ + JNODE_HEARD_BANSHEE = 1, + /* left sibling pointer is valid */ + JNODE_LEFT_CONNECTED = 2, + /* right sibling pointer is valid */ + JNODE_RIGHT_CONNECTED = 3, + + /* znode was just created and doesn't yet have a pointer from + its parent */ + JNODE_ORPHAN = 4, + + /* this node was created by its transaction and has not been assigned + a block address. */ + JNODE_CREATED = 5, + + /* this node is currently relocated */ + JNODE_RELOC = 6, + /* this node is currently wandered */ + JNODE_OVRWR = 7, + + /* this znode has been modified */ + JNODE_DIRTY = 8, + + /* znode lock is being invalidated */ + JNODE_IS_DYING = 9, + + /* THIS PLACE IS INTENTIONALLY LEFT BLANK */ + + /* jnode is queued for flushing. */ + JNODE_FLUSH_QUEUED = 12, + + /* In the following bits jnode type is encoded. */ + JNODE_TYPE_1 = 13, + JNODE_TYPE_2 = 14, + JNODE_TYPE_3 = 15, + + /* jnode is being destroyed */ + JNODE_RIP = 16, + + /* znode was not captured during locking (it might so be because + ->level != LEAF_LEVEL and lock_mode == READ_LOCK) */ + JNODE_MISSED_IN_CAPTURE = 17, + + /* write is in progress */ + JNODE_WRITEBACK = 18, + + /* unused flag */ + JNODE_NEW = 19, + + /* delimiting keys are already set for this znode. */ + JNODE_DKSET = 20, + + /* when this bit is set page and jnode can not be disconnected */ + JNODE_WRITE_PREPARED = 21, + + JNODE_CLUSTER_PAGE = 22, + /* Jnode is marked for repacking, that means the reiser4 flush and the + * block allocator should process this node special way */ + JNODE_REPACK = 23, + /* node should be converted by flush in squalloc phase */ + JNODE_CONVERTIBLE = 24, + /* + * When jnode is dirtied for the first time in given transaction, + * do_jnode_make_dirty() checks whether this jnode can possible became + * member of overwrite set. If so, this bit is set, and one block is + * reserved in the ->flush_reserved space of atom. + * + * This block is "used" (and JNODE_FLUSH_RESERVED bit is cleared) when + * + * (1) flush decides that we want this block to go into relocate + * set after all. + * + * (2) wandering log is allocated (by log writer) + * + * (3) extent is allocated + * + */ + JNODE_FLUSH_RESERVED = 29 +} reiser4_jnode_state; + +/* Macros for accessing the jnode state. */ + +static inline void JF_CLR(jnode * j, int f) +{ + assert("unknown-1", j->magic == JMAGIC); + clear_bit(f, &j->state); +} +static inline int JF_ISSET(const jnode * j, int f) +{ + assert("unknown-2", j->magic == JMAGIC); + return test_bit(f, &((jnode *) j)->state); +} +static inline void JF_SET(jnode * j, int f) +{ + assert("unknown-3", j->magic == JMAGIC); + set_bit(f, &j->state); +} + +static inline int JF_TEST_AND_SET(jnode * j, int f) +{ + assert("unknown-4", j->magic == JMAGIC); + return test_and_set_bit(f, &j->state); +} + +static inline void spin_lock_jnode(jnode *node) +{ + /* check that spinlocks of lower priorities are not held */ + assert("", (LOCK_CNT_NIL(rw_locked_tree) && + LOCK_CNT_NIL(spin_locked_txnh) && + LOCK_CNT_NIL(spin_locked_zlock) && + LOCK_CNT_NIL(rw_locked_dk) && + LOCK_CNT_LT(spin_locked_jnode, 2))); + + spin_lock(&(node->guard)); + + LOCK_CNT_INC(spin_locked_jnode); + LOCK_CNT_INC(spin_locked); +} + +static inline void spin_unlock_jnode(jnode *node) +{ + assert_spin_locked(&(node->guard)); + assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_jnode)); + assert("nikita-1376", LOCK_CNT_GTZ(spin_locked)); + + LOCK_CNT_DEC(spin_locked_jnode); + LOCK_CNT_DEC(spin_locked); + + spin_unlock(&(node->guard)); +} + +static inline int jnode_is_in_deleteset(const jnode * node) +{ + return JF_ISSET(node, JNODE_RELOC); +} + +extern int init_jnodes(void); +extern void done_jnodes(void); + +/* Jnode routines */ +extern jnode *jalloc(void); +extern void jfree(jnode * node) NONNULL; +extern jnode *jclone(jnode *); +extern jnode *jlookup(reiser4_tree * tree, + oid_t objectid, unsigned long ind) NONNULL; +extern jnode *jfind(struct address_space *, unsigned long index) NONNULL; +extern jnode *jnode_by_page(struct page *pg) NONNULL; +extern jnode *jnode_of_page(struct page *pg) NONNULL; +void jnode_attach_page(jnode * node, struct page *pg); + +void unhash_unformatted_jnode(jnode *); +extern jnode *page_next_jnode(jnode * node) NONNULL; +extern void jnode_init(jnode * node, reiser4_tree * tree, jnode_type) NONNULL; +extern void jnode_make_dirty(jnode * node) NONNULL; +extern void jnode_make_clean(jnode * node) NONNULL; +extern void jnode_make_wander_nolock(jnode * node) NONNULL; +extern void jnode_make_wander(jnode *) NONNULL; +extern void znode_make_reloc(znode * , flush_queue_t *) NONNULL; +extern void unformatted_make_reloc(jnode *, flush_queue_t *) NONNULL; +extern struct address_space *jnode_get_mapping(const jnode * node) NONNULL; + +/** + * jnode_get_block + * @node: jnode to query + * + */ +static inline const reiser4_block_nr *jnode_get_block(const jnode *node) +{ + assert("nikita-528", node != NULL); + + return &node->blocknr; +} + +/** + * jnode_set_block + * @node: jnode to update + * @blocknr: new block nr + */ +static inline void jnode_set_block(jnode *node, const reiser4_block_nr *blocknr) +{ + assert("nikita-2020", node != NULL); + assert("umka-055", blocknr != NULL); + node->blocknr = *blocknr; +} + + +/* block number for IO. Usually this is the same as jnode_get_block(), unless + * jnode was emergency flushed---then block number chosen by eflush is + * used. */ +static inline const reiser4_block_nr *jnode_get_io_block(jnode * node) +{ + assert("nikita-2768", node != NULL); + assert_spin_locked(&(node->guard)); + + return jnode_get_block(node); +} + +/* Jnode flush interface. */ +extern reiser4_blocknr_hint *reiser4_pos_hint(flush_pos_t *pos); +extern flush_queue_t *reiser4_pos_fq(flush_pos_t *pos); + +/* FIXME-VS: these are used in plugin/item/extent.c */ + +/* does extent_get_block have to be called */ +#define jnode_mapped(node) JF_ISSET (node, JNODE_MAPPED) +#define jnode_set_mapped(node) JF_SET (node, JNODE_MAPPED) + +/* the node should be converted during flush squalloc phase */ +#define jnode_convertible(node) JF_ISSET (node, JNODE_CONVERTIBLE) +#define jnode_set_convertible(node) JF_SET (node, JNODE_CONVERTIBLE) + +/* Macros to convert from jnode to znode, znode to jnode. These are macros + because C doesn't allow overloading of const prototypes. */ +#define ZJNODE(x) (&(x)->zjnode) +#define JZNODE(x) \ +({ \ + typeof(x) __tmp_x; \ + \ + __tmp_x = (x); \ + assert("jmacd-1300", jnode_is_znode(__tmp_x)); \ + (znode*) __tmp_x; \ +}) + +extern int jnodes_tree_init(reiser4_tree * tree); +extern int jnodes_tree_done(reiser4_tree * tree); + +#if REISER4_DEBUG + +extern int znode_is_any_locked(const znode * node); +extern void jnode_list_remove(jnode * node); + +#else + +#define jnode_list_remove(node) noop + +#endif + +int znode_is_root(const znode * node) NONNULL; + +/* bump reference counter on @node */ +static inline void add_x_ref(jnode * node/* node to increase x_count of */) +{ + assert("nikita-1911", node != NULL); + + atomic_inc(&node->x_count); + LOCK_CNT_INC(x_refs); +} + +static inline void dec_x_ref(jnode * node) +{ + assert("nikita-3215", node != NULL); + assert("nikita-3216", atomic_read(&node->x_count) > 0); + + atomic_dec(&node->x_count); + assert("nikita-3217", LOCK_CNT_GTZ(x_refs)); + LOCK_CNT_DEC(x_refs); +} + +/* jref() - increase counter of references to jnode/znode (x_count) */ +static inline jnode *jref(jnode * node) +{ + assert("jmacd-508", (node != NULL) && !IS_ERR(node)); + add_x_ref(node); + return node; +} + +/* get the page of jnode */ +static inline struct page *jnode_page(const jnode * node) +{ + return node->pg; +} + +/* return pointer to jnode data */ +static inline char *jdata(const jnode * node) +{ + assert("nikita-1415", node != NULL); + assert("nikita-3198", jnode_page(node) != NULL); + return node->data; +} + +static inline int jnode_is_loaded(const jnode * node) +{ + assert("zam-506", node != NULL); + return atomic_read(&node->d_count) > 0; +} + +extern void page_clear_jnode(struct page *page, jnode * node) NONNULL; + +static inline void jnode_set_reloc(jnode * node) +{ + assert("nikita-2431", node != NULL); + assert("nikita-2432", !JF_ISSET(node, JNODE_OVRWR)); + JF_SET(node, JNODE_RELOC); +} + +/* jload/jwrite/junload give a bread/bwrite/brelse functionality for jnodes */ + +extern int jload_gfp(jnode *, gfp_t, int do_kmap) NONNULL; + +static inline int jload(jnode *node) +{ + return jload_gfp(node, reiser4_ctx_gfp_mask_get(), 1); +} + +extern int jinit_new(jnode *, gfp_t) NONNULL; +extern int jstartio(jnode *) NONNULL; + +extern void jdrop(jnode *) NONNULL; +extern int jwait_io(jnode *, int rw) NONNULL; + +void jload_prefetch(jnode *); + +extern jnode *reiser4_alloc_io_head(const reiser4_block_nr * block) NONNULL; +extern void reiser4_drop_io_head(jnode * node) NONNULL; + +static inline reiser4_tree *jnode_get_tree(const jnode * node) +{ + assert("nikita-2691", node != NULL); + return node->tree; +} + +extern void pin_jnode_data(jnode *); +extern void unpin_jnode_data(jnode *); + +static inline jnode_type jnode_get_type(const jnode * node) +{ + static const unsigned long state_mask = + (1 << JNODE_TYPE_1) | (1 << JNODE_TYPE_2) | (1 << JNODE_TYPE_3); + + static jnode_type mask_to_type[] = { + /* JNODE_TYPE_3 : JNODE_TYPE_2 : JNODE_TYPE_1 */ + + /* 000 */ + [0] = JNODE_FORMATTED_BLOCK, + /* 001 */ + [1] = JNODE_UNFORMATTED_BLOCK, + /* 010 */ + [2] = JNODE_BITMAP, + /* 011 */ + [3] = LAST_JNODE_TYPE, /*invalid */ + /* 100 */ + [4] = JNODE_INODE, + /* 101 */ + [5] = LAST_JNODE_TYPE, + /* 110 */ + [6] = JNODE_IO_HEAD, + /* 111 */ + [7] = LAST_JNODE_TYPE, /* invalid */ + }; + + return mask_to_type[(node->state & state_mask) >> JNODE_TYPE_1]; +} + +/* returns true if node is a znode */ +static inline int jnode_is_znode(const jnode * node) +{ + return jnode_get_type(node) == JNODE_FORMATTED_BLOCK; +} + +static inline int jnode_is_flushprepped(jnode * node) +{ + assert("jmacd-78212", node != NULL); + assert_spin_locked(&(node->guard)); + return !JF_ISSET(node, JNODE_DIRTY) || JF_ISSET(node, JNODE_RELOC) || + JF_ISSET(node, JNODE_OVRWR); +} + +/* Return true if @node has already been processed by the squeeze and allocate + process. This implies the block address has been finalized for the + duration of this atom (or it is clean and will remain in place). If this + returns true you may use the block number as a hint. */ +static inline int jnode_check_flushprepped(jnode * node) +{ + int result; + + /* It must be clean or relocated or wandered. New allocations are set + * to relocate. */ + spin_lock_jnode(node); + result = jnode_is_flushprepped(node); + spin_unlock_jnode(node); + return result; +} + +/* returns true if node is unformatted */ +static inline int jnode_is_unformatted(const jnode * node) +{ + assert("jmacd-0123", node != NULL); + return jnode_get_type(node) == JNODE_UNFORMATTED_BLOCK; +} + +/* returns true if node represents a cluster cache page */ +static inline int jnode_is_cluster_page(const jnode * node) +{ + assert("edward-50", node != NULL); + return (JF_ISSET(node, JNODE_CLUSTER_PAGE)); +} + +/* returns true is node is builtin inode's jnode */ +static inline int jnode_is_inode(const jnode * node) +{ + assert("vs-1240", node != NULL); + return jnode_get_type(node) == JNODE_INODE; +} + +static inline jnode_plugin *jnode_ops_of(const jnode_type type) +{ + assert("nikita-2367", type < LAST_JNODE_TYPE); + return jnode_plugin_by_id((reiser4_plugin_id) type); +} + +static inline jnode_plugin *jnode_ops(const jnode * node) +{ + assert("nikita-2366", node != NULL); + + return jnode_ops_of(jnode_get_type(node)); +} + +/* Get the index of a block. */ +static inline unsigned long jnode_get_index(jnode * node) +{ + return jnode_ops(node)->index(node); +} + +/* return true if "node" is the root */ +static inline int jnode_is_root(const jnode * node) +{ + return jnode_is_znode(node) && znode_is_root(JZNODE(node)); +} + +extern struct address_space *mapping_jnode(const jnode * node); +extern unsigned long index_jnode(const jnode * node); + +static inline void jput(jnode * node); +extern void jput_final(jnode * node); + +/* bump data counter on @node */ +static inline void add_d_ref(jnode * node/* node to increase d_count of */) +{ + assert("nikita-1962", node != NULL); + + atomic_inc(&node->d_count); + if (jnode_is_unformatted(node) || jnode_is_znode(node)) + LOCK_CNT_INC(d_refs); +} + +/* jput() - decrement x_count reference counter on znode. + + Count may drop to 0, jnode stays in cache until memory pressure causes the + eviction of its page. The c_count variable also ensures that children are + pressured out of memory before the parent. The jnode remains hashed as + long as the VM allows its page to stay in memory. +*/ +static inline void jput(jnode * node) +{ + assert("jmacd-509", node != NULL); + assert("jmacd-510", atomic_read(&node->x_count) > 0); + assert("zam-926", reiser4_schedulable()); + LOCK_CNT_DEC(x_refs); + + rcu_read_lock(); + /* + * we don't need any kind of lock here--jput_final() uses RCU. + */ + if (unlikely(atomic_dec_and_test(&node->x_count))) + jput_final(node); + else + rcu_read_unlock(); + assert("nikita-3473", reiser4_schedulable()); +} + +extern void jrelse(jnode * node); +extern void jrelse_tail(jnode * node); + +extern jnode *jnode_rip_sync(reiser4_tree * t, jnode * node); + +/* resolve race with jput */ +static inline jnode *jnode_rip_check(reiser4_tree * tree, jnode * node) +{ + if (unlikely(JF_ISSET(node, JNODE_RIP))) + node = jnode_rip_sync(tree, node); + return node; +} + +extern reiser4_key *jnode_build_key(const jnode *node, reiser4_key * key); + +#if REISER4_DEBUG +extern int jnode_invariant_f(const jnode *node, char const **msg); +#endif + +extern jnode_plugin jnode_plugins[LAST_JNODE_TYPE]; + +/* __JNODE_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/kassign.c b/fs/reiser4/kassign.c new file mode 100644 index 000000000000..87a04dc849c0 --- /dev/null +++ b/fs/reiser4/kassign.c @@ -0,0 +1,677 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Key assignment policy implementation */ + +/* + * In reiser4 every piece of file system data and meta-data has a key. Keys + * are used to store information in and retrieve it from reiser4 internal + * tree. In addition to this, keys define _ordering_ of all file system + * information: things having close keys are placed into the same or + * neighboring (in the tree order) nodes of the tree. As our block allocator + * tries to respect tree order (see flush.c), keys also define order in which + * things are laid out on the disk, and hence, affect performance directly. + * + * Obviously, assignment of keys to data and meta-data should be consistent + * across whole file system. Algorithm that calculates a key for a given piece + * of data or meta-data is referred to as "key assignment". + * + * Key assignment is too expensive to be implemented as a plugin (that is, + * with an ability to support different key assignment schemas in the same + * compiled kernel image). As a compromise, all key-assignment functions and + * data-structures are collected in this single file, so that modifications to + * key assignment algorithm can be localized. Additional changes may be + * required in key.[ch]. + * + * Current default reiser4 key assignment algorithm is dubbed "Plan A". As one + * may guess, there is "Plan B" too. + * + */ + +/* + * Additional complication with key assignment implementation is a requirement + * to support different key length. + */ + +/* + * KEY ASSIGNMENT: PLAN A, LONG KEYS. + * + * DIRECTORY ITEMS + * + * | 60 | 4 | 7 |1| 56 | 64 | 64 | + * +--------------+---+---+-+-------------+------------------+-----------------+ + * | dirid | 0 | F |H| prefix-1 | prefix-2 | prefix-3/hash | + * +--------------+---+---+-+-------------+------------------+-----------------+ + * | | | | | + * | 8 bytes | 8 bytes | 8 bytes | 8 bytes | + * + * dirid objectid of directory this item is for + * + * F fibration, see fs/reiser4/plugin/fibration.[ch] + * + * H 1 if last 8 bytes of the key contain hash, + * 0 if last 8 bytes of the key contain prefix-3 + * + * prefix-1 first 7 characters of file name. + * Padded by zeroes if name is not long enough. + * + * prefix-2 next 8 characters of the file name. + * + * prefix-3 next 8 characters of the file name. + * + * hash hash of the rest of file name (i.e., portion of file + * name not included into prefix-1 and prefix-2). + * + * File names shorter than 23 (== 7 + 8 + 8) characters are completely encoded + * in the key. Such file names are called "short". They are distinguished by H + * bit set 0 in the key. + * + * Other file names are "long". For long name, H bit is 1, and first 15 (== 7 + * + 8) characters are encoded in prefix-1 and prefix-2 portions of the + * key. Last 8 bytes of the key are occupied by hash of the remaining + * characters of the name. + * + * This key assignment reaches following important goals: + * + * (1) directory entries are sorted in approximately lexicographical + * order. + * + * (2) collisions (when multiple directory items have the same key), while + * principally unavoidable in a tree with fixed length keys, are rare. + * + * STAT DATA + * + * | 60 | 4 | 64 | 4 | 60 | 64 | + * +--------------+---+-----------------+---+--------------+-----------------+ + * | locality id | 1 | ordering | 0 | objectid | 0 | + * +--------------+---+-----------------+---+--------------+-----------------+ + * | | | | | + * | 8 bytes | 8 bytes | 8 bytes | 8 bytes | + * + * locality id object id of a directory where first name was created for + * the object + * + * ordering copy of second 8-byte portion of the key of directory + * entry for the first name of this object. Ordering has a form + * { + * fibration :7; + * h :1; + * prefix1 :56; + * } + * see description of key for directory entry above. + * + * objectid object id for this object + * + * This key assignment policy is designed to keep stat-data in the same order + * as corresponding directory items, thus speeding up readdir/stat types of + * workload. + * + * FILE BODY + * + * | 60 | 4 | 64 | 4 | 60 | 64 | + * +--------------+---+-----------------+---+--------------+-----------------+ + * | locality id | 4 | ordering | 0 | objectid | offset | + * +--------------+---+-----------------+---+--------------+-----------------+ + * | | | | | + * | 8 bytes | 8 bytes | 8 bytes | 8 bytes | + * + * locality id object id of a directory where first name was created for + * the object + * + * ordering the same as in the key of stat-data for this object + * + * objectid object id for this object + * + * offset logical offset from the beginning of this file. + * Measured in bytes. + * + * + * KEY ASSIGNMENT: PLAN A, SHORT KEYS. + * + * DIRECTORY ITEMS + * + * | 60 | 4 | 7 |1| 56 | 64 | + * +--------------+---+---+-+-------------+-----------------+ + * | dirid | 0 | F |H| prefix-1 | prefix-2/hash | + * +--------------+---+---+-+-------------+-----------------+ + * | | | | + * | 8 bytes | 8 bytes | 8 bytes | + * + * dirid objectid of directory this item is for + * + * F fibration, see fs/reiser4/plugin/fibration.[ch] + * + * H 1 if last 8 bytes of the key contain hash, + * 0 if last 8 bytes of the key contain prefix-2 + * + * prefix-1 first 7 characters of file name. + * Padded by zeroes if name is not long enough. + * + * prefix-2 next 8 characters of the file name. + * + * hash hash of the rest of file name (i.e., portion of file + * name not included into prefix-1). + * + * File names shorter than 15 (== 7 + 8) characters are completely encoded in + * the key. Such file names are called "short". They are distinguished by H + * bit set in the key. + * + * Other file names are "long". For long name, H bit is 0, and first 7 + * characters are encoded in prefix-1 portion of the key. Last 8 bytes of the + * key are occupied by hash of the remaining characters of the name. + * + * STAT DATA + * + * | 60 | 4 | 4 | 60 | 64 | + * +--------------+---+---+--------------+-----------------+ + * | locality id | 1 | 0 | objectid | 0 | + * +--------------+---+---+--------------+-----------------+ + * | | | | + * | 8 bytes | 8 bytes | 8 bytes | + * + * locality id object id of a directory where first name was created for + * the object + * + * objectid object id for this object + * + * FILE BODY + * + * | 60 | 4 | 4 | 60 | 64 | + * +--------------+---+---+--------------+-----------------+ + * | locality id | 4 | 0 | objectid | offset | + * +--------------+---+---+--------------+-----------------+ + * | | | | + * | 8 bytes | 8 bytes | 8 bytes | + * + * locality id object id of a directory where first name was created for + * the object + * + * objectid object id for this object + * + * offset logical offset from the beginning of this file. + * Measured in bytes. + * + * + */ + +#include "debug.h" +#include "key.h" +#include "kassign.h" +#include "vfs_ops.h" +#include "inode.h" +#include "super.h" +#include "dscale.h" + +#include <linux/types.h> /* for __u?? */ +#include <linux/fs.h> /* for struct super_block, etc */ + +/* bitmask for H bit (see comment at the beginning of this file */ +static const __u64 longname_mark = 0x0100000000000000ull; +/* bitmask for F and H portions of the key. */ +static const __u64 fibration_mask = 0xff00000000000000ull; + +/* return true if name is not completely encoded in @key */ +int is_longname_key(const reiser4_key * key) +{ + __u64 highpart; + + assert("nikita-2863", key != NULL); + if (get_key_type(key) != KEY_FILE_NAME_MINOR) + reiser4_print_key("oops", key); + assert("nikita-2864", get_key_type(key) == KEY_FILE_NAME_MINOR); + + if (REISER4_LARGE_KEY) + highpart = get_key_ordering(key); + else + highpart = get_key_objectid(key); + + return (highpart & longname_mark) ? 1 : 0; +} + +/* return true if @name is too long to be completely encoded in the key */ +int is_longname(const char *name UNUSED_ARG, int len) +{ + if (REISER4_LARGE_KEY) + return len > 23; + else + return len > 15; +} + +/* code ascii string into __u64. + + Put characters of @name into result (@str) one after another starting + from @start_idx-th highest (arithmetically) byte. This produces + endian-safe encoding. memcpy(2) will not do. + +*/ +static __u64 pack_string(const char *name /* string to encode */ , + int start_idx /* highest byte in result from + * which to start encoding */ ) +{ + unsigned i; + __u64 str; + + str = 0; + for (i = 0; (i < sizeof str - start_idx) && name[i]; ++i) { + str <<= 8; + str |= (unsigned char)name[i]; + } + str <<= (sizeof str - i - start_idx) << 3; + return str; +} + +/* opposite to pack_string(). Takes value produced by pack_string(), restores + * string encoded in it and stores result in @buf */ +char *reiser4_unpack_string(__u64 value, char *buf) +{ + do { + *buf = value >> (64 - 8); + if (*buf) + ++buf; + value <<= 8; + } while (value != 0); + *buf = 0; + return buf; +} + +/* obtain name encoded in @key and store it in @buf */ +char *extract_name_from_key(const reiser4_key * key, char *buf) +{ + char *c; + + assert("nikita-2868", !is_longname_key(key)); + + c = buf; + if (REISER4_LARGE_KEY) { + c = reiser4_unpack_string(get_key_ordering(key) & + ~fibration_mask, c); + c = reiser4_unpack_string(get_key_fulloid(key), c); + } else + c = reiser4_unpack_string(get_key_fulloid(key) & + ~fibration_mask, c); + reiser4_unpack_string(get_key_offset(key), c); + return buf; +} + +/** + * complete_entry_key - calculate entry key by name + * @dir: directory where entry is (or will be) in + * @name: name to calculate key of + * @len: lenth of name + * @result: place to store result in + * + * Sets fields of entry key @result which depend on file name. + * When REISER4_LARGE_KEY is defined three fields of @result are set: ordering, + * objectid and offset. Otherwise, objectid and offset are set. + */ +void complete_entry_key(const struct inode *dir, const char *name, + int len, reiser4_key *result) +{ +#if REISER4_LARGE_KEY + __u64 ordering; + __u64 objectid; + __u64 offset; + + assert("nikita-1139", dir != NULL); + assert("nikita-1142", result != NULL); + assert("nikita-2867", strlen(name) == len); + + /* + * key allocation algorithm for directory entries in case of large + * keys: + * + * If name is not longer than 7 + 8 + 8 = 23 characters, put first 7 + * characters into ordering field of key, next 8 charactes (if any) + * into objectid field of key and next 8 ones (of any) into offset + * field of key + * + * If file name is longer than 23 characters, put first 7 characters + * into key's ordering, next 8 to objectid and hash of remaining + * characters into offset field. + * + * To distinguish above cases, in latter set up unused high bit in + * ordering field. + */ + + /* [0-6] characters to ordering */ + ordering = pack_string(name, 1); + if (len > 7) { + /* [7-14] characters to objectid */ + objectid = pack_string(name + 7, 0); + if (len > 15) { + if (len <= 23) { + /* [15-23] characters to offset */ + offset = pack_string(name + 15, 0); + } else { + /* note in a key the fact that offset contains + * hash */ + ordering |= longname_mark; + + /* offset is the hash of the file name's tail */ + offset = inode_hash_plugin(dir)->hash(name + 15, + len - 15); + } + } else { + offset = 0ull; + } + } else { + objectid = 0ull; + offset = 0ull; + } + + assert("nikita-3480", inode_fibration_plugin(dir) != NULL); + ordering |= inode_fibration_plugin(dir)->fibre(dir, name, len); + + set_key_ordering(result, ordering); + set_key_fulloid(result, objectid); + set_key_offset(result, offset); + return; + +#else + __u64 objectid; + __u64 offset; + + assert("nikita-1139", dir != NULL); + assert("nikita-1142", result != NULL); + assert("nikita-2867", strlen(name) == len); + + /* + * key allocation algorithm for directory entries in case of not large + * keys: + * + * If name is not longer than 7 + 8 = 15 characters, put first 7 + * characters into objectid field of key, next 8 charactes (if any) + * into offset field of key + * + * If file name is longer than 15 characters, put first 7 characters + * into key's objectid, and hash of remaining characters into offset + * field. + * + * To distinguish above cases, in latter set up unused high bit in + * objectid field. + */ + + /* [0-6] characters to objectid */ + objectid = pack_string(name, 1); + if (len > 7) { + if (len <= 15) { + /* [7-14] characters to offset */ + offset = pack_string(name + 7, 0); + } else { + /* note in a key the fact that offset contains hash. */ + objectid |= longname_mark; + + /* offset is the hash of the file name. */ + offset = inode_hash_plugin(dir)->hash(name + 7, + len - 7); + } + } else + offset = 0ull; + + assert("nikita-3480", inode_fibration_plugin(dir) != NULL); + objectid |= inode_fibration_plugin(dir)->fibre(dir, name, len); + + set_key_fulloid(result, objectid); + set_key_offset(result, offset); + return; +#endif /* ! REISER4_LARGE_KEY */ +} + +/* true, if @key is the key of "." */ +int is_dot_key(const reiser4_key * key/* key to check */) +{ + assert("nikita-1717", key != NULL); + assert("nikita-1718", get_key_type(key) == KEY_FILE_NAME_MINOR); + return + (get_key_ordering(key) == 0ull) && + (get_key_objectid(key) == 0ull) && (get_key_offset(key) == 0ull); +} + +/* build key for stat-data. + + return key of stat-data of this object. This should became sd plugin + method in the future. For now, let it be here. + +*/ +reiser4_key *build_sd_key(const struct inode *target /* inode of an object */ , + reiser4_key * result /* resulting key of @target + stat-data */ ) +{ + assert("nikita-261", result != NULL); + + reiser4_key_init(result); + set_key_locality(result, reiser4_inode_data(target)->locality_id); + set_key_ordering(result, get_inode_ordering(target)); + set_key_objectid(result, get_inode_oid(target)); + set_key_type(result, KEY_SD_MINOR); + set_key_offset(result, (__u64) 0); + return result; +} + +/* encode part of key into &obj_key_id + + This encodes into @id part of @key sufficient to restore @key later, + given that latter is key of object (key of stat-data). + + See &obj_key_id +*/ +int build_obj_key_id(const reiser4_key * key /* key to encode */ , + obj_key_id * id/* id where key is encoded in */) +{ + assert("nikita-1151", key != NULL); + assert("nikita-1152", id != NULL); + + memcpy(id, key, sizeof *id); + return 0; +} + +/* encode reference to @obj in @id. + + This is like build_obj_key_id() above, but takes inode as parameter. */ +int build_inode_key_id(const struct inode *obj /* object to build key of */ , + obj_key_id * id/* result */) +{ + reiser4_key sdkey; + + assert("nikita-1166", obj != NULL); + assert("nikita-1167", id != NULL); + + build_sd_key(obj, &sdkey); + build_obj_key_id(&sdkey, id); + return 0; +} + +/* decode @id back into @key + + Restore key of object stat-data from @id. This is dual to + build_obj_key_id() above. +*/ +int extract_key_from_id(const obj_key_id * id /* object key id to extract key + * from */ , + reiser4_key * key/* result */) +{ + assert("nikita-1153", id != NULL); + assert("nikita-1154", key != NULL); + + reiser4_key_init(key); + memcpy(key, id, sizeof *id); + return 0; +} + +/* extract objectid of directory from key of directory entry within said + directory. + */ +oid_t extract_dir_id_from_key(const reiser4_key * de_key /* key of + * directory + * entry */ ) +{ + assert("nikita-1314", de_key != NULL); + return get_key_locality(de_key); +} + +/* encode into @id key of directory entry. + + Encode into @id information sufficient to later distinguish directory + entries within the same directory. This is not whole key, because all + directory entries within directory item share locality which is equal + to objectid of their directory. + +*/ +int build_de_id(const struct inode *dir /* inode of directory */ , + const struct qstr *name /* name to be given to @obj by + * directory entry being + * constructed */ , + de_id * id/* short key of directory entry */) +{ + reiser4_key key; + + assert("nikita-1290", dir != NULL); + assert("nikita-1292", id != NULL); + + /* NOTE-NIKITA this is suboptimal. */ + inode_dir_plugin(dir)->build_entry_key(dir, name, &key); + return build_de_id_by_key(&key, id); +} + +/* encode into @id key of directory entry. + + Encode into @id information sufficient to later distinguish directory + entries within the same directory. This is not whole key, because all + directory entries within directory item share locality which is equal + to objectid of their directory. + +*/ +int build_de_id_by_key(const reiser4_key * entry_key /* full key of directory + * entry */ , + de_id * id/* short key of directory entry */) +{ + memcpy(id, ((__u64 *) entry_key) + 1, sizeof *id); + return 0; +} + +/* restore from @id key of directory entry. + + Function dual to build_de_id(): given @id and locality, build full + key of directory entry within directory item. + +*/ +int extract_key_from_de_id(const oid_t locality /* locality of directory + * entry */ , + const de_id * id /* directory entry id */ , + reiser4_key * key/* result */) +{ + /* no need to initialise key here: all fields are overwritten */ + memcpy(((__u64 *) key) + 1, id, sizeof *id); + set_key_locality(key, locality); + set_key_type(key, KEY_FILE_NAME_MINOR); + return 0; +} + +/* compare two &de_id's */ +cmp_t de_id_cmp(const de_id * id1 /* first &de_id to compare */ , + const de_id * id2/* second &de_id to compare */) +{ + /* NOTE-NIKITA ugly implementation */ + reiser4_key k1; + reiser4_key k2; + + extract_key_from_de_id((oid_t) 0, id1, &k1); + extract_key_from_de_id((oid_t) 0, id2, &k2); + return keycmp(&k1, &k2); +} + +/* compare &de_id with key */ +cmp_t de_id_key_cmp(const de_id * id /* directory entry id to compare */ , + const reiser4_key * key/* key to compare */) +{ + cmp_t result; + reiser4_key *k1; + + k1 = (reiser4_key *) (((unsigned long)id) - sizeof key->el[0]); + result = KEY_DIFF_EL(k1, key, 1); + if (result == EQUAL_TO) { + result = KEY_DIFF_EL(k1, key, 2); + if (REISER4_LARGE_KEY && result == EQUAL_TO) + result = KEY_DIFF_EL(k1, key, 3); + } + return result; +} + +/* + * return number of bytes necessary to encode @inode identity. + */ +int inode_onwire_size(const struct inode *inode) +{ + int result; + + result = dscale_bytes_to_write(get_inode_oid(inode)); + result += dscale_bytes_to_write(get_inode_locality(inode)); + + /* + * ordering is large (it usually has highest bits set), so it makes + * little sense to dscale it. + */ + if (REISER4_LARGE_KEY) + result += sizeof(get_inode_ordering(inode)); + return result; +} + +/* + * encode @inode identity at @start + */ +char *build_inode_onwire(const struct inode *inode, char *start) +{ + start += dscale_write(start, get_inode_locality(inode)); + start += dscale_write(start, get_inode_oid(inode)); + + if (REISER4_LARGE_KEY) { + put_unaligned(cpu_to_le64(get_inode_ordering(inode)), (__le64 *)start); + start += sizeof(get_inode_ordering(inode)); + } + return start; +} + +/* + * extract key that was previously encoded by build_inode_onwire() at @addr + */ +char *extract_obj_key_id_from_onwire(char *addr, obj_key_id * key_id) +{ + __u64 val; + + addr += dscale_read(addr, &val); + val = (val << KEY_LOCALITY_SHIFT) | KEY_SD_MINOR; + put_unaligned(cpu_to_le64(val), (__le64 *)key_id->locality); + addr += dscale_read(addr, &val); + put_unaligned(cpu_to_le64(val), (__le64 *)key_id->objectid); +#if REISER4_LARGE_KEY + memcpy(&key_id->ordering, addr, sizeof key_id->ordering); + addr += sizeof key_id->ordering; +#endif + return addr; +} + +/* + * skip a key that was previously encoded by build_inode_onwire() at @addr + * FIXME: handle IO errors. + */ +char * locate_obj_key_id_onwire(char * addr) +{ + /* locality */ + addr += dscale_bytes_to_read(addr); + /* objectid */ + addr += dscale_bytes_to_read(addr); +#if REISER4_LARGE_KEY + addr += sizeof ((obj_key_id *)0)->ordering; +#endif + return addr; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/kassign.h b/fs/reiser4/kassign.h new file mode 100644 index 000000000000..8de30027ca76 --- /dev/null +++ b/fs/reiser4/kassign.h @@ -0,0 +1,111 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Key assignment policy interface. See kassign.c for details. */ + +#if !defined(__KASSIGN_H__) +#define __KASSIGN_H__ + +#include "forward.h" +#include "key.h" +#include "dformat.h" + +#include <linux/types.h> /* for __u?? */ +#include <linux/fs.h> /* for struct super_block, etc */ +#include <linux/dcache.h> /* for struct qstr */ + +/* key assignment functions */ + +/* Information from which key of file stat-data can be uniquely + restored. This depends on key assignment policy for + stat-data. Currently it's enough to store object id and locality id + (60+60==120) bits, because minor packing locality and offset of + stat-data key are always known constants: KEY_SD_MINOR and 0 + respectively. For simplicity 4 bits are wasted in each id, and just + two 64 bit integers are stored. + + This field has to be byte-aligned, because we don't want to waste + space in directory entries. There is another side of a coin of + course: we waste CPU and bus bandwidth in stead, by copying data back + and forth. + + Next optimization: &obj_key_id is mainly used to address stat data from + directory entries. Under the assumption that majority of files only have + only name (one hard link) from *the* parent directory it seems reasonable + to only store objectid of stat data and take its locality from key of + directory item. + + This requires some flag to be added to the &obj_key_id to distinguish + between these two cases. Remaining bits in flag byte are then asking to be + used to store file type. + + This optimization requires changes in directory item handling code. + +*/ +typedef struct obj_key_id { + d8 locality[sizeof(__u64)]; + ON_LARGE_KEY(d8 ordering[sizeof(__u64)]; + ) + d8 objectid[sizeof(__u64)]; +} +obj_key_id; + +/* Information sufficient to uniquely identify directory entry within + compressed directory item. + + For alignment issues see &obj_key_id above. +*/ +typedef struct de_id { + ON_LARGE_KEY(d8 ordering[sizeof(__u64)];) + d8 objectid[sizeof(__u64)]; + d8 offset[sizeof(__u64)]; +} +de_id; + +extern int inode_onwire_size(const struct inode *obj); +extern char *build_inode_onwire(const struct inode *obj, char *area); +extern char *locate_obj_key_id_onwire(char *area); +extern char *extract_obj_key_id_from_onwire(char *area, obj_key_id * key_id); + +extern int build_inode_key_id(const struct inode *obj, obj_key_id * id); +extern int extract_key_from_id(const obj_key_id * id, reiser4_key * key); +extern int build_obj_key_id(const reiser4_key * key, obj_key_id * id); +extern oid_t extract_dir_id_from_key(const reiser4_key * de_key); +extern int build_de_id(const struct inode *dir, const struct qstr *name, + de_id * id); +extern int build_de_id_by_key(const reiser4_key * entry_key, de_id * id); +extern int extract_key_from_de_id(const oid_t locality, const de_id * id, + reiser4_key * key); +extern cmp_t de_id_cmp(const de_id * id1, const de_id * id2); +extern cmp_t de_id_key_cmp(const de_id * id, const reiser4_key * key); + +extern int build_readdir_key_common(struct file *dir, reiser4_key * result); +extern void build_entry_key_common(const struct inode *dir, + const struct qstr *name, + reiser4_key * result); +extern void build_entry_key_stable_entry(const struct inode *dir, + const struct qstr *name, + reiser4_key * result); +extern int is_dot_key(const reiser4_key * key); +extern reiser4_key *build_sd_key(const struct inode *target, + reiser4_key * result); + +extern int is_longname_key(const reiser4_key * key); +extern int is_longname(const char *name, int len); +extern char *extract_name_from_key(const reiser4_key * key, char *buf); +extern char *reiser4_unpack_string(__u64 value, char *buf); +extern void complete_entry_key(const struct inode *dir, const char *name, + int len, reiser4_key *result); + +/* __KASSIGN_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/key.c b/fs/reiser4/key.c new file mode 100644 index 000000000000..0efd51832d1a --- /dev/null +++ b/fs/reiser4/key.c @@ -0,0 +1,138 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Key manipulations. */ + +#include "debug.h" +#include "key.h" +#include "super.h" +#include "reiser4.h" + +#include <linux/types.h> /* for __u?? */ + +/* Minimal possible key: all components are zero. It is presumed that this is + independent of key scheme. */ +static const reiser4_key MINIMAL_KEY = { + .el = { + 0ull, + ON_LARGE_KEY(0ull,) + 0ull, + 0ull + } +}; + +/* Maximal possible key: all components are ~0. It is presumed that this is + independent of key scheme. */ +static const reiser4_key MAXIMAL_KEY = { + .el = { + __constant_cpu_to_le64(~0ull), + ON_LARGE_KEY(__constant_cpu_to_le64(~0ull),) + __constant_cpu_to_le64(~0ull), + __constant_cpu_to_le64(~0ull) + } +}; + +/* Initialize key. */ +void reiser4_key_init(reiser4_key * key/* key to init */) +{ + assert("nikita-1169", key != NULL); + memset(key, 0, sizeof *key); +} + +/* minimal possible key in the tree. Return pointer to the static storage. */ +const reiser4_key * reiser4_min_key(void) +{ + return &MINIMAL_KEY; +} + +/* maximum possible key in the tree. Return pointer to the static storage. */ +const reiser4_key * reiser4_max_key(void) +{ + return &MAXIMAL_KEY; +} + +#if REISER4_DEBUG +/* debugging aid: print symbolic name of key type */ +static const char *type_name(unsigned int key_type/* key type */) +{ + switch (key_type) { + case KEY_FILE_NAME_MINOR: + return "file name"; + case KEY_SD_MINOR: + return "stat data"; + case KEY_ATTR_NAME_MINOR: + return "attr name"; + case KEY_ATTR_BODY_MINOR: + return "attr body"; + case KEY_BODY_MINOR: + return "file body"; + default: + return "unknown"; + } +} + +/* debugging aid: print human readable information about key */ +void reiser4_print_key(const char *prefix /* prefix to print */ , + const reiser4_key * key/* key to print */) +{ + /* turn bold on */ + /* printf ("\033[1m"); */ + if (key == NULL) + printk("%s: null key\n", prefix); + else { + if (REISER4_LARGE_KEY) + printk("%s: (%Lx:%x:%Lx:%Lx:%Lx:%Lx)", prefix, + get_key_locality(key), + get_key_type(key), + get_key_ordering(key), + get_key_band(key), + get_key_objectid(key), get_key_offset(key)); + else + printk("%s: (%Lx:%x:%Lx:%Lx:%Lx)", prefix, + get_key_locality(key), + get_key_type(key), + get_key_band(key), + get_key_objectid(key), get_key_offset(key)); + /* + * if this is a key of directory entry, try to decode part of + * a name stored in the key, and output it. + */ + if (get_key_type(key) == KEY_FILE_NAME_MINOR) { + char buf[DE_NAME_BUF_LEN]; + char *c; + + c = buf; + c = reiser4_unpack_string(get_key_ordering(key), c); + reiser4_unpack_string(get_key_fulloid(key), c); + printk("[%s", buf); + if (is_longname_key(key)) + /* + * only part of the name is stored in the key. + */ + printk("...]\n"); + else { + /* + * whole name is stored in the key. + */ + reiser4_unpack_string(get_key_offset(key), buf); + printk("%s]\n", buf); + } + } else { + printk("[%s]\n", type_name(get_key_type(key))); + } + } + /* turn bold off */ + /* printf ("\033[m\017"); */ +} + +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/key.h b/fs/reiser4/key.h new file mode 100644 index 000000000000..2ad4ee277e61 --- /dev/null +++ b/fs/reiser4/key.h @@ -0,0 +1,392 @@ +/* Copyright 2000, 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Declarations of key-related data-structures and operations on keys. */ + +#if !defined(__REISER4_KEY_H__) +#define __REISER4_KEY_H__ + +#include "dformat.h" +#include "forward.h" +#include "debug.h" + +#include <linux/types.h> /* for __u?? */ + +/* Operations on keys in reiser4 tree */ + +/* No access to any of these fields shall be done except via a + wrapping macro/function, and that wrapping macro/function shall + convert to little endian order. Compare keys will consider cpu byte order. */ + +/* A storage layer implementation difference between a regular unix file body + and its attributes is in the typedef below which causes all of the attributes + of a file to be near in key to all of the other attributes for all of the + files within that directory, and not near to the file itself. It is + interesting to consider whether this is the wrong approach, and whether there + should be no difference at all. For current usage patterns this choice is + probably the right one. */ + +/* possible values for minor packing locality (4 bits required) */ +typedef enum { + /* file name */ + KEY_FILE_NAME_MINOR = 0, + /* stat-data */ + KEY_SD_MINOR = 1, + /* file attribute name */ + KEY_ATTR_NAME_MINOR = 2, + /* file attribute value */ + KEY_ATTR_BODY_MINOR = 3, + /* file body (tail or extent) */ + KEY_BODY_MINOR = 4, +} key_minor_locality; + +/* Everything stored in the tree has a unique key, which means that the tree is + (logically) fully ordered by key. Physical order is determined by dynamic + heuristics that attempt to reflect key order when allocating available space, + and by the repacker. It is stylistically better to put aggregation + information into the key. Thus, if you want to segregate extents from tails, + it is better to give them distinct minor packing localities rather than + changing block_alloc.c to check the node type when deciding where to allocate + the node. + + The need to randomly displace new directories and large files disturbs this + symmetry unfortunately. However, it should be noted that this is a need that + is not clearly established given the existence of a repacker. Also, in our + current implementation tails have a different minor packing locality from + extents, and no files have both extents and tails, so maybe symmetry can be + had without performance cost after all. Symmetry is what we ship for now.... +*/ + +/* Arbitrary major packing localities can be assigned to objects using + the reiser4(filenameA/..packing<=some_number) system call. + + In reiser4, the creat() syscall creates a directory + + whose default flow (that which is referred to if the directory is + read as a file) is the traditional unix file body. + + whose directory plugin is the 'filedir' + + whose major packing locality is that of the parent of the object created. + + The static_stat item is a particular commonly used directory + compression (the one for normal unix files). + + The filedir plugin checks to see if the static_stat item exists. + There is a unique key for static_stat. If yes, then it uses the + static_stat item for all of the values that it contains. The + static_stat item contains a flag for each stat it contains which + indicates whether one should look outside the static_stat item for its + contents. +*/ + +/* offset of fields in reiser4_key. Value of each element of this enum + is index within key (thought as array of __u64's) where this field + is. */ +typedef enum { + /* major "locale", aka dirid. Sits in 1st element */ + KEY_LOCALITY_INDEX = 0, + /* minor "locale", aka item type. Sits in 1st element */ + KEY_TYPE_INDEX = 0, + ON_LARGE_KEY(KEY_ORDERING_INDEX,) + /* "object band". Sits in 2nd element */ + KEY_BAND_INDEX, + /* objectid. Sits in 2nd element */ + KEY_OBJECTID_INDEX = KEY_BAND_INDEX, + /* full objectid. Sits in 2nd element */ + KEY_FULLOID_INDEX = KEY_BAND_INDEX, + /* Offset. Sits in 3rd element */ + KEY_OFFSET_INDEX, + /* Name hash. Sits in 3rd element */ + KEY_HASH_INDEX = KEY_OFFSET_INDEX, + KEY_CACHELINE_END = KEY_OFFSET_INDEX, + KEY_LAST_INDEX +} reiser4_key_field_index; + +/* key in reiser4 internal "balanced" tree. It is just array of three + 64bit integers in disk byte order (little-endian by default). This + array is actually indexed by reiser4_key_field. Each __u64 within + this array is called "element". Logical key component encoded within + elements are called "fields". + + We declare this as union with second component dummy to suppress + inconvenient array<->pointer casts implied in C. */ +union reiser4_key { + __le64 el[KEY_LAST_INDEX]; + int pad; +}; + +/* bitmasks showing where within reiser4_key particular key is stored. */ +/* major locality occupies higher 60 bits of the first element */ +#define KEY_LOCALITY_MASK 0xfffffffffffffff0ull + +/* minor locality occupies lower 4 bits of the first element */ +#define KEY_TYPE_MASK 0xfull + +/* controversial band occupies higher 4 bits of the 2nd element */ +#define KEY_BAND_MASK 0xf000000000000000ull + +/* objectid occupies lower 60 bits of the 2nd element */ +#define KEY_OBJECTID_MASK 0x0fffffffffffffffull + +/* full 64bit objectid*/ +#define KEY_FULLOID_MASK 0xffffffffffffffffull + +/* offset is just 3rd L.M.Nt itself */ +#define KEY_OFFSET_MASK 0xffffffffffffffffull + +/* ordering is whole second element */ +#define KEY_ORDERING_MASK 0xffffffffffffffffull + +/* how many bits key element should be shifted to left to get particular field + */ +typedef enum { + KEY_LOCALITY_SHIFT = 4, + KEY_TYPE_SHIFT = 0, + KEY_BAND_SHIFT = 60, + KEY_OBJECTID_SHIFT = 0, + KEY_FULLOID_SHIFT = 0, + KEY_OFFSET_SHIFT = 0, + KEY_ORDERING_SHIFT = 0, +} reiser4_key_field_shift; + +static inline __u64 +get_key_el(const reiser4_key * key, reiser4_key_field_index off) +{ + assert("nikita-753", key != NULL); + assert("nikita-754", off < KEY_LAST_INDEX); + return le64_to_cpu(get_unaligned(&key->el[off])); +} + +static inline void +set_key_el(reiser4_key * key, reiser4_key_field_index off, __u64 value) +{ + assert("nikita-755", key != NULL); + assert("nikita-756", off < KEY_LAST_INDEX); + put_unaligned(cpu_to_le64(value), &key->el[off]); +} + +/* macro to define getter and setter functions for field F with type T */ +#define DEFINE_KEY_FIELD(L, U, T) \ +static inline T get_key_ ## L(const reiser4_key *key) \ +{ \ + assert("nikita-750", key != NULL); \ + return (T) (get_key_el(key, KEY_ ## U ## _INDEX) & \ + KEY_ ## U ## _MASK) >> KEY_ ## U ## _SHIFT; \ +} \ + \ +static inline void set_key_ ## L(reiser4_key * key, T loc) \ +{ \ + __u64 el; \ + \ + assert("nikita-752", key != NULL); \ + \ + el = get_key_el(key, KEY_ ## U ## _INDEX); \ + /* clear field bits in the key */ \ + el &= ~KEY_ ## U ## _MASK; \ + /* actually it should be \ + \ + el |= ( loc << KEY_ ## U ## _SHIFT ) & KEY_ ## U ## _MASK; \ + \ + but we trust user to never pass values that wouldn't fit \ + into field. Clearing extra bits is one operation, but this \ + function is time-critical. \ + But check this in assertion. */ \ + assert("nikita-759", ((loc << KEY_ ## U ## _SHIFT) & \ + ~KEY_ ## U ## _MASK) == 0); \ + el |= (loc << KEY_ ## U ## _SHIFT); \ + set_key_el(key, KEY_ ## U ## _INDEX, el); \ +} + +typedef __u64 oid_t; + +/* define get_key_locality(), set_key_locality() */ +DEFINE_KEY_FIELD(locality, LOCALITY, oid_t); +/* define get_key_type(), set_key_type() */ +DEFINE_KEY_FIELD(type, TYPE, key_minor_locality); +/* define get_key_band(), set_key_band() */ +DEFINE_KEY_FIELD(band, BAND, __u64); +/* define get_key_objectid(), set_key_objectid() */ +DEFINE_KEY_FIELD(objectid, OBJECTID, oid_t); +/* define get_key_fulloid(), set_key_fulloid() */ +DEFINE_KEY_FIELD(fulloid, FULLOID, oid_t); +/* define get_key_offset(), set_key_offset() */ +DEFINE_KEY_FIELD(offset, OFFSET, __u64); +#if (REISER4_LARGE_KEY) +/* define get_key_ordering(), set_key_ordering() */ +DEFINE_KEY_FIELD(ordering, ORDERING, __u64); +#else +static inline __u64 get_key_ordering(const reiser4_key * key) +{ + return 0; +} + +static inline void set_key_ordering(reiser4_key * key, __u64 val) +{ +} +#endif + +/* key comparison result */ +typedef enum { LESS_THAN = -1, /* if first key is less than second */ + EQUAL_TO = 0, /* if keys are equal */ + GREATER_THAN = +1 /* if first key is greater than second */ +} cmp_t; + +void reiser4_key_init(reiser4_key * key); + +/* minimal possible key in the tree. Return pointer to the static storage. */ +extern const reiser4_key *reiser4_min_key(void); +extern const reiser4_key *reiser4_max_key(void); + +/* helper macro for keycmp() */ +#define KEY_DIFF(k1, k2, field) \ +({ \ + typeof(get_key_ ## field(k1)) f1; \ + typeof(get_key_ ## field(k2)) f2; \ + \ + f1 = get_key_ ## field(k1); \ + f2 = get_key_ ## field(k2); \ + \ + (f1 < f2) ? LESS_THAN : ((f1 == f2) ? EQUAL_TO : GREATER_THAN); \ +}) + +/* helper macro for keycmp() */ +#define KEY_DIFF_EL(k1, k2, off) \ +({ \ + __u64 e1; \ + __u64 e2; \ + \ + e1 = get_key_el(k1, off); \ + e2 = get_key_el(k2, off); \ + \ + (e1 < e2) ? LESS_THAN : ((e1 == e2) ? EQUAL_TO : GREATER_THAN); \ +}) + +/* compare `k1' and `k2'. This function is a heart of "key allocation + policy". All you need to implement new policy is to add yet another + clause here. */ +static inline cmp_t keycmp(const reiser4_key * k1 /* first key to compare */ , + const reiser4_key * k2/* second key to compare */) +{ + cmp_t result; + + /* + * This function is the heart of reiser4 tree-routines. Key comparison + * is among most heavily used operations in the file system. + */ + + assert("nikita-439", k1 != NULL); + assert("nikita-440", k2 != NULL); + + /* there is no actual branch here: condition is compile time constant + * and constant folding and propagation ensures that only one branch + * is actually compiled in. */ + + if (REISER4_PLANA_KEY_ALLOCATION) { + /* if physical order of fields in a key is identical + with logical order, we can implement key comparison + as three 64bit comparisons. */ + /* logical order of fields in plan-a: + locality->type->objectid->offset. */ + /* compare locality and type at once */ + result = KEY_DIFF_EL(k1, k2, 0); + if (result == EQUAL_TO) { + /* compare objectid (and band if it's there) */ + result = KEY_DIFF_EL(k1, k2, 1); + /* compare offset */ + if (result == EQUAL_TO) { + result = KEY_DIFF_EL(k1, k2, 2); + if (REISER4_LARGE_KEY && result == EQUAL_TO) + result = KEY_DIFF_EL(k1, k2, 3); + } + } + } else if (REISER4_3_5_KEY_ALLOCATION) { + result = KEY_DIFF(k1, k2, locality); + if (result == EQUAL_TO) { + result = KEY_DIFF(k1, k2, objectid); + if (result == EQUAL_TO) { + result = KEY_DIFF(k1, k2, type); + if (result == EQUAL_TO) + result = KEY_DIFF(k1, k2, offset); + } + } + } else + impossible("nikita-441", "Unknown key allocation scheme!"); + return result; +} + +/* true if @k1 equals @k2 */ +static inline int keyeq(const reiser4_key * k1 /* first key to compare */ , + const reiser4_key * k2/* second key to compare */) +{ + assert("nikita-1879", k1 != NULL); + assert("nikita-1880", k2 != NULL); + return !memcmp(k1, k2, sizeof *k1); +} + +/* true if @k1 is less than @k2 */ +static inline int keylt(const reiser4_key * k1 /* first key to compare */ , + const reiser4_key * k2/* second key to compare */) +{ + assert("nikita-1952", k1 != NULL); + assert("nikita-1953", k2 != NULL); + return keycmp(k1, k2) == LESS_THAN; +} + +/* true if @k1 is less than or equal to @k2 */ +static inline int keyle(const reiser4_key * k1 /* first key to compare */ , + const reiser4_key * k2/* second key to compare */) +{ + assert("nikita-1954", k1 != NULL); + assert("nikita-1955", k2 != NULL); + return keycmp(k1, k2) != GREATER_THAN; +} + +/* true if @k1 is greater than @k2 */ +static inline int keygt(const reiser4_key * k1 /* first key to compare */ , + const reiser4_key * k2/* second key to compare */) +{ + assert("nikita-1959", k1 != NULL); + assert("nikita-1960", k2 != NULL); + return keycmp(k1, k2) == GREATER_THAN; +} + +/* true if @k1 is greater than or equal to @k2 */ +static inline int keyge(const reiser4_key * k1 /* first key to compare */ , + const reiser4_key * k2/* second key to compare */) +{ + assert("nikita-1956", k1 != NULL); + assert("nikita-1957", k2 != NULL); /* October 4: sputnik launched + * November 3: Laika */ + return keycmp(k1, k2) != LESS_THAN; +} + +static inline void prefetchkey(reiser4_key * key) +{ + prefetch(key); + prefetch(&key->el[KEY_CACHELINE_END]); +} + +/* (%Lx:%x:%Lx:%Lx:%Lx:%Lx) = + 1 + 16 + 1 + 1 + 1 + 1 + 1 + 16 + 1 + 16 + 1 + 16 + 1 */ +/* size of a buffer suitable to hold human readable key representation */ +#define KEY_BUF_LEN (80) + +#if REISER4_DEBUG +extern void reiser4_print_key(const char *prefix, const reiser4_key * key); +#else +#define reiser4_print_key(p, k) noop +#endif + +/* __FS_REISERFS_KEY_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/ktxnmgrd.c b/fs/reiser4/ktxnmgrd.c new file mode 100644 index 000000000000..b36215b3db43 --- /dev/null +++ b/fs/reiser4/ktxnmgrd.c @@ -0,0 +1,215 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ +/* Transaction manager daemon. */ + +/* + * ktxnmgrd is a kernel daemon responsible for committing transactions. It is + * needed/important for the following reasons: + * + * 1. in reiser4 atom is not committed immediately when last transaction + * handle closes, unless atom is either too old or too large (see + * atom_should_commit()). This is done to avoid committing too frequently. + * because: + * + * 2. sometimes we don't want to commit atom when closing last transaction + * handle even if it is old and fat enough. For example, because we are at + * this point under directory semaphore, and committing would stall all + * accesses to this directory. + * + * ktxnmgrd binds its time sleeping on condition variable. When is awakes + * either due to (tunable) timeout or because it was explicitly woken up by + * call to ktxnmgrd_kick(), it scans list of all atoms and commits ones + * eligible. + * + */ + +#include "debug.h" +#include "txnmgr.h" +#include "tree.h" +#include "ktxnmgrd.h" +#include "super.h" +#include "reiser4.h" + +#include <linux/sched.h> /* for struct task_struct */ +#include <linux/wait.h> +#include <linux/suspend.h> +#include <linux/kernel.h> +#include <linux/writeback.h> +#include <linux/kthread.h> +#include <linux/freezer.h> + +static int scan_mgr(struct super_block *); + +/* + * change current->comm so that ps, top, and friends will see changed + * state. This serves no useful purpose whatsoever, but also costs nothing. May + * be it will make lonely system administrator feeling less alone at 3 A.M. + */ +#define set_comm(state) \ + snprintf(current->comm, sizeof(current->comm), \ + "%s:%s:%s", __FUNCTION__, (super)->s_id, (state)) + +/** + * ktxnmgrd - kernel txnmgr daemon + * @arg: pointer to super block + * + * The background transaction manager daemon, started as a kernel thread during + * reiser4 initialization. + */ +static int ktxnmgrd(void *arg) +{ + struct super_block *super; + ktxnmgrd_context *ctx; + txn_mgr *mgr; + int done = 0; + + super = arg; + mgr = &get_super_private(super)->tmgr; + + /* + * do_fork() just copies task_struct into the new thread. ->fs_context + * shouldn't be copied of course. This shouldn't be a problem for the + * rest of the code though. + */ + current->journal_info = NULL; + ctx = mgr->daemon; + while (1) { + try_to_freeze(); + set_comm("wait"); + { + DEFINE_WAIT(__wait); + + prepare_to_wait(&ctx->wait, &__wait, + TASK_INTERRUPTIBLE); + if (kthread_should_stop()) + done = 1; + else + schedule_timeout(ctx->timeout); + finish_wait(&ctx->wait, &__wait); + } + if (done) + break; + set_comm("run"); + spin_lock(&ctx->guard); + /* + * wait timed out or ktxnmgrd was woken up by explicit request + * to commit something. Scan list of atoms in txnmgr and look + * for too old atoms. + */ + do { + ctx->rescan = 0; + scan_mgr(super); + spin_lock(&ctx->guard); + if (ctx->rescan) { + /* + * the list could be modified while ctx + * spinlock was released, we have to repeat + * scanning from the beginning + */ + break; + } + } while (ctx->rescan); + spin_unlock(&ctx->guard); + } + return 0; +} + +#undef set_comm + +/** + * reiser4_init_ktxnmgrd - initialize ktxnmgrd context and start kernel daemon + * @super: pointer to super block + * + * Allocates and initializes ktxnmgrd_context, attaches it to transaction + * manager. Starts kernel txnmgr daemon. This is called on mount. + */ +int reiser4_init_ktxnmgrd(struct super_block *super) +{ + txn_mgr *mgr; + ktxnmgrd_context *ctx; + + mgr = &get_super_private(super)->tmgr; + + assert("zam-1014", mgr->daemon == NULL); + + ctx = kzalloc(sizeof(ktxnmgrd_context), reiser4_ctx_gfp_mask_get()); + if (!ctx) + return RETERR(-ENOMEM); + + assert("nikita-2442", ctx != NULL); + + init_waitqueue_head(&ctx->wait); + + /*kcond_init(&ctx->startup);*/ + spin_lock_init(&ctx->guard); + ctx->timeout = REISER4_TXNMGR_TIMEOUT; + ctx->rescan = 1; + mgr->daemon = ctx; + + ctx->tsk = kthread_run(ktxnmgrd, super, "ktxnmgrd"); + if (IS_ERR(ctx->tsk)) { + int ret = PTR_ERR(ctx->tsk); + mgr->daemon = NULL; + kfree(ctx); + return RETERR(ret); + } + return 0; +} + +void ktxnmgrd_kick(txn_mgr *mgr) +{ + assert("nikita-3234", mgr != NULL); + assert("nikita-3235", mgr->daemon != NULL); + wake_up(&mgr->daemon->wait); +} + +int is_current_ktxnmgrd(void) +{ + return (get_current_super_private()->tmgr.daemon->tsk == current); +} + +/** + * scan_mgr - commit atoms which are to be committed + * @super: super block to commit atoms of + * + * Commits old atoms. + */ +static int scan_mgr(struct super_block *super) +{ + int ret; + reiser4_context ctx; + + init_stack_context(&ctx, super); + + ret = commit_some_atoms(&get_super_private(super)->tmgr); + + reiser4_exit_context(&ctx); + return ret; +} + +/** + * reiser4_done_ktxnmgrd - stop kernel thread and frees ktxnmgrd context + * @mgr: + * + * This is called on umount. Stops ktxnmgrd and free t + */ +void reiser4_done_ktxnmgrd(struct super_block *super) +{ + txn_mgr *mgr; + + mgr = &get_super_private(super)->tmgr; + assert("zam-1012", mgr->daemon != NULL); + + kthread_stop(mgr->daemon->tsk); + kfree(mgr->daemon); + mgr->daemon = NULL; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 120 + * End: + */ diff --git a/fs/reiser4/ktxnmgrd.h b/fs/reiser4/ktxnmgrd.h new file mode 100644 index 000000000000..d00f1d9e54ed --- /dev/null +++ b/fs/reiser4/ktxnmgrd.h @@ -0,0 +1,52 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Transaction manager daemon. See ktxnmgrd.c for comments. */ + +#ifndef __KTXNMGRD_H__ +#define __KTXNMGRD_H__ + +#include "txnmgr.h" + +#include <linux/fs.h> +#include <linux/wait.h> +#include <linux/completion.h> +#include <linux/spinlock.h> +#include <asm/atomic.h> +#include <linux/sched.h> /* for struct task_struct */ + +/* in this structure all data necessary to start up, shut down and communicate + * with ktxnmgrd are kept. */ +struct ktxnmgrd_context { + /* wait queue head on which ktxnmgrd sleeps */ + wait_queue_head_t wait; + /* spin lock protecting all fields of this structure */ + spinlock_t guard; + /* timeout of sleeping on ->wait */ + signed long timeout; + /* kernel thread running ktxnmgrd */ + struct task_struct *tsk; + /* list of all file systems served by this ktxnmgrd */ + struct list_head queue; + /* should ktxnmgrd repeat scanning of atoms? */ + unsigned int rescan:1; +}; + +extern int reiser4_init_ktxnmgrd(struct super_block *); +extern void reiser4_done_ktxnmgrd(struct super_block *); + +extern void ktxnmgrd_kick(txn_mgr * mgr); +extern int is_current_ktxnmgrd(void); + +/* __KTXNMGRD_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/lock.c b/fs/reiser4/lock.c new file mode 100644 index 000000000000..4af6fd0f9d2a --- /dev/null +++ b/fs/reiser4/lock.c @@ -0,0 +1,1237 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Traditional deadlock avoidance is achieved by acquiring all locks in a single + order. V4 balances the tree from the bottom up, and searches the tree from + the top down, and that is really the way we want it, so tradition won't work + for us. + + Instead we have two lock orderings, a high priority lock ordering, and a low + priority lock ordering. Each node in the tree has a lock in its znode. + + Suppose we have a set of processes which lock (R/W) tree nodes. Each process + has a set (maybe empty) of already locked nodes ("process locked set"). Each + process may have a pending lock request to a node locked by another process. + Note: we lock and unlock, but do not transfer locks: it is possible + transferring locks instead would save some bus locking.... + + Deadlock occurs when we have a loop constructed from process locked sets and + lock request vectors. + + NOTE: The reiser4 "tree" is a tree on disk, but its cached representation in + memory is extended with "znodes" with which we connect nodes with their left + and right neighbors using sibling pointers stored in the znodes. When we + perform balancing operations we often go from left to right and from right to + left. + + +-P1-+ +-P3-+ + |+--+| V1 |+--+| + ||N1|| -------> ||N3|| + |+--+| |+--+| + +----+ +----+ + ^ | + |V2 |V3 + | v + +---------P2---------+ + |+--+ +--+| + ||N2| -------- |N4|| + |+--+ +--+| + +--------------------+ + + We solve this by ensuring that only low priority processes lock in top to + bottom order and from right to left, and high priority processes lock from + bottom to top and left to right. + + ZAM-FIXME-HANS: order not just node locks in this way, order atom locks, and + kill those damn busy loops. + ANSWER(ZAM): atom locks (which are introduced by ASTAGE_CAPTURE_WAIT atom + stage) cannot be ordered that way. There are no rules what nodes can belong + to the atom and what nodes cannot. We cannot define what is right or left + direction, what is top or bottom. We can take immediate parent or side + neighbor of one node, but nobody guarantees that, say, left neighbor node is + not a far right neighbor for other nodes from the same atom. It breaks + deadlock avoidance rules and hi-low priority locking cannot be applied for + atom locks. + + How does it help to avoid deadlocks ? + + Suppose we have a deadlock with n processes. Processes from one priority + class never deadlock because they take locks in one consistent + order. + + So, any possible deadlock loop must have low priority as well as high + priority processes. There are no other lock priority levels except low and + high. We know that any deadlock loop contains at least one node locked by a + low priority process and requested by a high priority process. If this + situation is caught and resolved it is sufficient to avoid deadlocks. + + V4 DEADLOCK PREVENTION ALGORITHM IMPLEMENTATION. + + The deadlock prevention algorithm is based on comparing + priorities of node owners (processes which keep znode locked) and + requesters (processes which want to acquire a lock on znode). We + implement a scheme where low-priority owners yield locks to + high-priority requesters. We created a signal passing system that + is used to ask low-priority processes to yield one or more locked + znodes. + + The condition when a znode needs to change its owners is described by the + following formula: + + ############################################# + # # + # (number of high-priority requesters) > 0 # + # AND # + # (numbers of high-priority owners) == 0 # + # # + ############################################# + + Note that a low-priority process delays node releasing if another + high-priority process owns this node. So, slightly more strictly speaking, + to have a deadlock capable cycle you must have a loop in which a high + priority process is waiting on a low priority process to yield a node, which + is slightly different from saying a high priority process is waiting on a + node owned by a low priority process. + + It is enough to avoid deadlocks if we prevent any low-priority process from + falling asleep if its locked set contains a node which satisfies the + deadlock condition. + + That condition is implicitly or explicitly checked in all places where new + high-priority requests may be added or removed from node request queue or + high-priority process takes or releases a lock on node. The main + goal of these checks is to never lose the moment when node becomes "has + wrong owners" and send "must-yield-this-lock" signals to its low-pri owners + at that time. + + The information about received signals is stored in the per-process + structure (lock stack) and analyzed before a low-priority process goes to + sleep but after a "fast" attempt to lock a node fails. Any signal wakes + sleeping process up and forces him to re-check lock status and received + signal info. If "must-yield-this-lock" signals were received the locking + primitive (longterm_lock_znode()) fails with -E_DEADLOCK error code. + + V4 LOCKING DRAWBACKS + + If we have already balanced on one level, and we are propagating our changes + upward to a higher level, it could be very messy to surrender all locks on + the lower level because we put so much computational work into it, and + reverting them to their state before they were locked might be very complex. + We also don't want to acquire all locks before performing balancing because + that would either be almost as much work as the balancing, or it would be + too conservative and lock too much. We want balancing to be done only at + high priority. Yet, we might want to go to the left one node and use some + of its empty space... So we make one attempt at getting the node to the left + using try_lock, and if it fails we do without it, because we didn't really + need it, it was only a nice to have. + + LOCK STRUCTURES DESCRIPTION + + The following data structures are used in the reiser4 locking + implementation: + + All fields related to long-term locking are stored in znode->lock. + + The lock stack is a per thread object. It owns all znodes locked by the + thread. One znode may be locked by several threads in case of read lock or + one znode may be write locked by one thread several times. The special link + objects (lock handles) support n<->m relation between znodes and lock + owners. + + <Thread 1> <Thread 2> + + +---------+ +---------+ + | LS1 | | LS2 | + +---------+ +---------+ + ^ ^ + |---------------+ +----------+ + v v v v + +---------+ +---------+ +---------+ +---------+ + | LH1 | | LH2 | | LH3 | | LH4 | + +---------+ +---------+ +---------+ +---------+ + ^ ^ ^ ^ + | +------------+ | + v v v + +---------+ +---------+ +---------+ + | Z1 | | Z2 | | Z3 | + +---------+ +---------+ +---------+ + + Thread 1 locked znodes Z1 and Z2, thread 2 locked znodes Z2 and Z3. The + picture above shows that lock stack LS1 has a list of 2 lock handles LH1 and + LH2, lock stack LS2 has a list with lock handles LH3 and LH4 on it. Znode + Z1 is locked by only one thread, znode has only one lock handle LH1 on its + list, similar situation is for Z3 which is locked by the thread 2 only. Z2 + is locked (for read) twice by different threads and two lock handles are on + its list. Each lock handle represents a single relation of a locking of a + znode by a thread. Locking of a znode is an establishing of a locking + relation between the lock stack and the znode by adding of a new lock handle + to a list of lock handles, the lock stack. The lock stack links all lock + handles for all znodes locked by the lock stack. The znode list groups all + lock handles for all locks stacks which locked the znode. + + Yet another relation may exist between znode and lock owners. If lock + procedure cannot immediately take lock on an object it adds the lock owner + on special `requestors' list belongs to znode. That list represents a + queue of pending lock requests. Because one lock owner may request only + only one lock object at a time, it is a 1->n relation between lock objects + and a lock owner implemented as it is described above. Full information + (priority, pointers to lock and link objects) about each lock request is + stored in lock owner structure in `request' field. + + SHORT_TERM LOCKING + + This is a list of primitive operations over lock stacks / lock handles / + znodes and locking descriptions for them. + + 1. locking / unlocking which is done by two list insertion/deletion, one + to/from znode's list of lock handles, another one is to/from lock stack's + list of lock handles. The first insertion is protected by + znode->lock.guard spinlock. The list owned by the lock stack can be + modified only by thread who owns the lock stack and nobody else can + modify/read it. There is nothing to be protected by a spinlock or + something else. + + 2. adding/removing a lock request to/from znode requesters list. The rule is + that znode->lock.guard spinlock should be taken for this. + + 3. we can traverse list of lock handles and use references to lock stacks who + locked given znode if znode->lock.guard spinlock is taken. + + 4. If a lock stack is associated with a znode as a lock requestor or lock + owner its existence is guaranteed by znode->lock.guard spinlock. Some its + (lock stack's) fields should be protected from being accessed in parallel + by two or more threads. Please look at lock_stack structure definition + for the info how those fields are protected. */ + +/* Znode lock and capturing intertwining. */ +/* In current implementation we capture formatted nodes before locking + them. Take a look on longterm lock znode, reiser4_try_capture() request + precedes locking requests. The longterm_lock_znode function unconditionally + captures znode before even checking of locking conditions. + + Another variant is to capture znode after locking it. It was not tested, but + at least one deadlock condition is supposed to be there. One thread has + locked a znode (Node-1) and calls reiser4_try_capture() for it. + reiser4_try_capture() sleeps because znode's atom has CAPTURE_WAIT state. + Second thread is a flushing thread, its current atom is the atom Node-1 + belongs to. Second thread wants to lock Node-1 and sleeps because Node-1 + is locked by the first thread. The described situation is a deadlock. */ + +#include "debug.h" +#include "txnmgr.h" +#include "znode.h" +#include "jnode.h" +#include "tree.h" +#include "plugin/node/node.h" +#include "super.h" + +#include <linux/spinlock.h> + +#if REISER4_DEBUG +static int request_is_deadlock_safe(znode * , znode_lock_mode, + znode_lock_request); +#endif + +/* Returns a lock owner associated with current thread */ +lock_stack *get_current_lock_stack(void) +{ + return &get_current_context()->stack; +} + +/* Wakes up all low priority owners informing them about possible deadlock */ +static void wake_up_all_lopri_owners(znode * node) +{ + lock_handle *handle; + + assert_spin_locked(&(node->lock.guard)); + list_for_each_entry(handle, &node->lock.owners, owners_link) { + assert("nikita-1832", handle->node == node); + /* count this signal in owner->nr_signaled */ + if (!handle->signaled) { + handle->signaled = 1; + atomic_inc(&handle->owner->nr_signaled); + /* Wake up a single process */ + reiser4_wake_up(handle->owner); + } + } +} + +/* Adds a lock to a lock owner, which means creating a link to the lock and + putting the link into the two lists all links are on (the doubly linked list + that forms the lock_stack, and the doubly linked list of links attached + to a lock. +*/ +static inline void +link_object(lock_handle * handle, lock_stack * owner, znode * node) +{ + assert("jmacd-810", handle->owner == NULL); + assert_spin_locked(&(node->lock.guard)); + + handle->owner = owner; + handle->node = node; + + assert("reiser4-4", + ergo(list_empty_careful(&owner->locks), owner->nr_locks == 0)); + + /* add lock handle to the end of lock_stack's list of locks */ + list_add_tail(&handle->locks_link, &owner->locks); + ON_DEBUG(owner->nr_locks++); + reiser4_ctx_gfp_mask_set(); + + /* add lock handle to the head of znode's list of owners */ + list_add(&handle->owners_link, &node->lock.owners); + handle->signaled = 0; +} + +/* Breaks a relation between a lock and its owner */ +static inline void unlink_object(lock_handle * handle) +{ + assert("zam-354", handle->owner != NULL); + assert("nikita-1608", handle->node != NULL); + assert_spin_locked(&(handle->node->lock.guard)); + assert("nikita-1829", handle->owner == get_current_lock_stack()); + assert("reiser4-5", handle->owner->nr_locks > 0); + + /* remove lock handle from lock_stack's list of locks */ + list_del(&handle->locks_link); + ON_DEBUG(handle->owner->nr_locks--); + reiser4_ctx_gfp_mask_set(); + assert("reiser4-6", + ergo(list_empty_careful(&handle->owner->locks), + handle->owner->nr_locks == 0)); + /* remove lock handle from znode's list of owners */ + list_del(&handle->owners_link); + /* indicates that lock handle is free now */ + handle->node = NULL; +#if REISER4_DEBUG + INIT_LIST_HEAD(&handle->locks_link); + INIT_LIST_HEAD(&handle->owners_link); + handle->owner = NULL; +#endif +} + +/* Actually locks an object knowing that we are able to do this */ +static void lock_object(lock_stack * owner) +{ + struct lock_request *request; + znode *node; + + request = &owner->request; + node = request->node; + assert_spin_locked(&(node->lock.guard)); + if (request->mode == ZNODE_READ_LOCK) { + node->lock.nr_readers++; + } else { + /* check that we don't switched from read to write lock */ + assert("nikita-1840", node->lock.nr_readers <= 0); + /* We allow recursive locking; a node can be locked several + times for write by same process */ + node->lock.nr_readers--; + } + + link_object(request->handle, owner, node); + + if (owner->curpri) + node->lock.nr_hipri_owners++; +} + +/* Check for recursive write locking */ +static int recursive(lock_stack * owner) +{ + int ret; + znode *node; + lock_handle *lh; + + node = owner->request.node; + + /* Owners list is not empty for a locked node */ + assert("zam-314", !list_empty_careful(&node->lock.owners)); + assert("nikita-1841", owner == get_current_lock_stack()); + assert_spin_locked(&(node->lock.guard)); + + lh = list_entry(node->lock.owners.next, lock_handle, owners_link); + ret = (lh->owner == owner); + + /* Recursive read locking should be done usual way */ + assert("zam-315", !ret || owner->request.mode == ZNODE_WRITE_LOCK); + /* mixing of read/write locks is not allowed */ + assert("zam-341", !ret || znode_is_wlocked(node)); + + return ret; +} + +#if REISER4_DEBUG +/* Returns true if the lock is held by the calling thread. */ +int znode_is_any_locked(const znode * node) +{ + lock_handle *handle; + lock_stack *stack; + int ret; + + if (!znode_is_locked(node)) + return 0; + + stack = get_current_lock_stack(); + + spin_lock_stack(stack); + + ret = 0; + + list_for_each_entry(handle, &stack->locks, locks_link) { + if (handle->node == node) { + ret = 1; + break; + } + } + + spin_unlock_stack(stack); + + return ret; +} + +#endif + +/* Returns true if a write lock is held by the calling thread. */ +int znode_is_write_locked(const znode * node) +{ + lock_stack *stack; + lock_handle *handle; + + assert("jmacd-8765", node != NULL); + + if (!znode_is_wlocked(node)) + return 0; + + stack = get_current_lock_stack(); + + /* + * When znode is write locked, all owner handles point to the same lock + * stack. Get pointer to lock stack from the first lock handle from + * znode's owner list + */ + handle = list_entry(node->lock.owners.next, lock_handle, owners_link); + + return (handle->owner == stack); +} + +/* This "deadlock" condition is the essential part of reiser4 locking + implementation. This condition is checked explicitly by calling + check_deadlock_condition() or implicitly in all places where znode lock + state (set of owners and request queue) is changed. Locking code is + designed to use this condition to trigger procedure of passing object from + low priority owner(s) to high priority one(s). + + The procedure results in passing an event (setting lock_handle->signaled + flag) and counting this event in nr_signaled field of owner's lock stack + object and wakeup owner's process. +*/ +static inline int check_deadlock_condition(znode * node) +{ + assert_spin_locked(&(node->lock.guard)); + return node->lock.nr_hipri_requests > 0 + && node->lock.nr_hipri_owners == 0; +} + +static int check_livelock_condition(znode * node, znode_lock_mode mode) +{ + zlock * lock = &node->lock; + + return mode == ZNODE_READ_LOCK && + lock->nr_readers >= 0 && lock->nr_hipri_write_requests > 0; +} + +/* checks lock/request compatibility */ +static int can_lock_object(lock_stack * owner) +{ + znode *node = owner->request.node; + + assert_spin_locked(&(node->lock.guard)); + + /* See if the node is disconnected. */ + if (unlikely(ZF_ISSET(node, JNODE_IS_DYING))) + return RETERR(-EINVAL); + + /* Do not ever try to take a lock if we are going in low priority + direction and a node have a high priority request without high + priority owners. */ + if (unlikely(!owner->curpri && check_deadlock_condition(node))) + return RETERR(-E_REPEAT); + if (unlikely(owner->curpri && + check_livelock_condition(node, owner->request.mode))) + return RETERR(-E_REPEAT); + if (unlikely(!is_lock_compatible(node, owner->request.mode))) + return RETERR(-E_REPEAT); + return 0; +} + +/* Setting of a high priority to the process. It clears "signaled" flags + because znode locked by high-priority process can't satisfy our "deadlock + condition". */ +static void set_high_priority(lock_stack * owner) +{ + assert("nikita-1846", owner == get_current_lock_stack()); + /* Do nothing if current priority is already high */ + if (!owner->curpri) { + /* We don't need locking for owner->locks list, because, this + * function is only called with the lock stack of the current + * thread, and no other thread can play with owner->locks list + * and/or change ->node pointers of lock handles in this list. + * + * (Interrupts also are not involved.) + */ + lock_handle *item = list_entry(owner->locks.next, lock_handle, + locks_link); + while (&owner->locks != &item->locks_link) { + znode *node = item->node; + + spin_lock_zlock(&node->lock); + + node->lock.nr_hipri_owners++; + + /* we can safely set signaled to zero, because + previous statement (nr_hipri_owners ++) guarantees + that signaled will be never set again. */ + item->signaled = 0; + spin_unlock_zlock(&node->lock); + + item = list_entry(item->locks_link.next, lock_handle, + locks_link); + } + owner->curpri = 1; + atomic_set(&owner->nr_signaled, 0); + } +} + +/* Sets a low priority to the process. */ +static void set_low_priority(lock_stack * owner) +{ + assert("nikita-3075", owner == get_current_lock_stack()); + /* Do nothing if current priority is already low */ + if (owner->curpri) { + /* scan all locks (lock handles) held by @owner, which is + actually current thread, and check whether we are reaching + deadlock possibility anywhere. + */ + lock_handle *handle = list_entry(owner->locks.next, lock_handle, + locks_link); + while (&owner->locks != &handle->locks_link) { + znode *node = handle->node; + spin_lock_zlock(&node->lock); + /* this thread just was hipri owner of @node, so + nr_hipri_owners has to be greater than zero. */ + assert("nikita-1835", node->lock.nr_hipri_owners > 0); + node->lock.nr_hipri_owners--; + /* If we have deadlock condition, adjust a nr_signaled + field. It is enough to set "signaled" flag only for + current process, other low-pri owners will be + signaled and waken up after current process unlocks + this object and any high-priority requestor takes + control. */ + if (check_deadlock_condition(node) + && !handle->signaled) { + handle->signaled = 1; + atomic_inc(&owner->nr_signaled); + } + spin_unlock_zlock(&node->lock); + handle = list_entry(handle->locks_link.next, + lock_handle, locks_link); + } + owner->curpri = 0; + } +} + +static void remove_lock_request(lock_stack * requestor) +{ + zlock * lock = &requestor->request.node->lock; + + if (requestor->curpri) { + assert("nikita-1838", lock->nr_hipri_requests > 0); + lock->nr_hipri_requests--; + if (requestor->request.mode == ZNODE_WRITE_LOCK) + lock->nr_hipri_write_requests--; + } + list_del(&requestor->requestors_link); +} + +static void invalidate_all_lock_requests(znode * node) +{ + lock_stack *requestor, *tmp; + + assert_spin_locked(&(node->lock.guard)); + + list_for_each_entry_safe(requestor, tmp, &node->lock.requestors, + requestors_link) { + remove_lock_request(requestor); + requestor->request.ret_code = -EINVAL; + reiser4_wake_up(requestor); + requestor->request.mode = ZNODE_NO_LOCK; + } +} + +static void dispatch_lock_requests(znode * node) +{ + lock_stack *requestor, *tmp; + + assert_spin_locked(&(node->lock.guard)); + + list_for_each_entry_safe(requestor, tmp, &node->lock.requestors, + requestors_link) { + if (znode_is_write_locked(node)) + break; + if (!can_lock_object(requestor)) { + lock_object(requestor); + remove_lock_request(requestor); + requestor->request.ret_code = 0; + reiser4_wake_up(requestor); + requestor->request.mode = ZNODE_NO_LOCK; + } + } +} + +/* release long-term lock, acquired by longterm_lock_znode() */ +void longterm_unlock_znode(lock_handle * handle) +{ + znode *node = handle->node; + lock_stack *oldowner = handle->owner; + int hipri; + int readers; + int rdelta; + int youdie; + + /* + * this is time-critical and highly optimized code. Modify carefully. + */ + + assert("jmacd-1021", handle != NULL); + assert("jmacd-1022", handle->owner != NULL); + assert("nikita-1392", LOCK_CNT_GTZ(long_term_locked_znode)); + + assert("zam-130", oldowner == get_current_lock_stack()); + + LOCK_CNT_DEC(long_term_locked_znode); + + /* + * to minimize amount of operations performed under lock, pre-compute + * all variables used within critical section. This makes code + * obscure. + */ + + /* was this lock of hi or lo priority */ + hipri = oldowner->curpri ? 1 : 0; + /* number of readers */ + readers = node->lock.nr_readers; + /* +1 if write lock, -1 if read lock */ + rdelta = (readers > 0) ? -1 : +1; + /* true if node is to die and write lock is released */ + youdie = ZF_ISSET(node, JNODE_HEARD_BANSHEE) && (readers < 0); + + spin_lock_zlock(&node->lock); + + assert("zam-101", znode_is_locked(node)); + + /* Adjust a number of high priority owners of this lock */ + assert("nikita-1836", node->lock.nr_hipri_owners >= hipri); + node->lock.nr_hipri_owners -= hipri; + + /* Handle znode deallocation on last write-lock release. */ + if (znode_is_wlocked_once(node)) { + if (youdie) { + forget_znode(handle); + assert("nikita-2191", znode_invariant(node)); + zput(node); + return; + } + } + + if (handle->signaled) + atomic_dec(&oldowner->nr_signaled); + + /* Unlocking means owner<->object link deletion */ + unlink_object(handle); + + /* This is enough to be sure whether an object is completely + unlocked. */ + node->lock.nr_readers += rdelta; + + /* If the node is locked it must have an owners list. Likewise, if + the node is unlocked it must have an empty owners list. */ + assert("zam-319", equi(znode_is_locked(node), + !list_empty_careful(&node->lock.owners))); + +#if REISER4_DEBUG + if (!znode_is_locked(node)) + ++node->times_locked; +#endif + + /* If there are pending lock requests we wake up a requestor */ + if (!znode_is_wlocked(node)) + dispatch_lock_requests(node); + if (check_deadlock_condition(node)) + wake_up_all_lopri_owners(node); + spin_unlock_zlock(&node->lock); + + /* minus one reference from handle->node */ + assert("nikita-2190", znode_invariant(node)); + ON_DEBUG(check_lock_data()); + ON_DEBUG(check_lock_node_data(node)); + zput(node); +} + +/* final portion of longterm-lock */ +static int +lock_tail(lock_stack * owner, int ok, znode_lock_mode mode) +{ + znode *node = owner->request.node; + + assert_spin_locked(&(node->lock.guard)); + + /* If we broke with (ok == 0) it means we can_lock, now do it. */ + if (ok == 0) { + lock_object(owner); + owner->request.mode = 0; + /* count a reference from lockhandle->node + + znode was already referenced at the entry to this function, + hence taking spin-lock here is not necessary (see comment + in the zref()). + */ + zref(node); + + LOCK_CNT_INC(long_term_locked_znode); + } + spin_unlock_zlock(&node->lock); + ON_DEBUG(check_lock_data()); + ON_DEBUG(check_lock_node_data(node)); + return ok; +} + +/* + * version of longterm_znode_lock() optimized for the most common case: read + * lock without any special flags. This is the kind of lock that any tree + * traversal takes on the root node of the tree, which is very frequent. + */ +static int longterm_lock_tryfast(lock_stack * owner) +{ + int result; + znode *node; + zlock *lock; + + node = owner->request.node; + lock = &node->lock; + + assert("nikita-3340", reiser4_schedulable()); + assert("nikita-3341", request_is_deadlock_safe(node, + ZNODE_READ_LOCK, + ZNODE_LOCK_LOPRI)); + spin_lock_zlock(lock); + result = can_lock_object(owner); + spin_unlock_zlock(lock); + + if (likely(result != -EINVAL)) { + spin_lock_znode(node); + result = reiser4_try_capture(ZJNODE(node), ZNODE_READ_LOCK, 0); + spin_unlock_znode(node); + spin_lock_zlock(lock); + if (unlikely(result != 0)) { + owner->request.mode = 0; + } else { + result = can_lock_object(owner); + if (unlikely(result == -E_REPEAT)) { + /* fall back to longterm_lock_znode() */ + spin_unlock_zlock(lock); + return 1; + } + } + return lock_tail(owner, result, ZNODE_READ_LOCK); + } else + return 1; +} + +/* locks given lock object */ +int longterm_lock_znode( + /* local link object (allocated by lock owner + * thread, usually on its own stack) */ + lock_handle * handle, + /* znode we want to lock. */ + znode * node, + /* {ZNODE_READ_LOCK, ZNODE_WRITE_LOCK}; */ + znode_lock_mode mode, + /* {0, -EINVAL, -E_DEADLOCK}, see return codes + description. */ + znode_lock_request request) { + int ret; + int hipri = (request & ZNODE_LOCK_HIPRI) != 0; + int non_blocking = 0; + int has_atom; + txn_capture cap_flags; + zlock *lock; + txn_handle *txnh; + tree_level level; + + /* Get current process context */ + lock_stack *owner = get_current_lock_stack(); + + /* Check that the lock handle is initialized and isn't already being + * used. */ + assert("jmacd-808", handle->owner == NULL); + assert("nikita-3026", reiser4_schedulable()); + assert("nikita-3219", request_is_deadlock_safe(node, mode, request)); + assert("zam-1056", atomic_read(&ZJNODE(node)->x_count) > 0); + /* long term locks are not allowed in the VM contexts (->writepage(), + * prune_{d,i}cache()). + * + * FIXME this doesn't work due to unused-dentry-with-unlinked-inode + * bug caused by d_splice_alias() only working for directories. + */ + assert("nikita-3547", 1 || ((current->flags & PF_MEMALLOC) == 0)); + assert("zam-1055", mode != ZNODE_NO_LOCK); + + cap_flags = 0; + if (request & ZNODE_LOCK_NONBLOCK) { + cap_flags |= TXN_CAPTURE_NONBLOCKING; + non_blocking = 1; + } + + if (request & ZNODE_LOCK_DONT_FUSE) + cap_flags |= TXN_CAPTURE_DONT_FUSE; + + /* If we are changing our process priority we must adjust a number + of high priority owners for each znode that we already lock */ + if (hipri) { + set_high_priority(owner); + } else { + set_low_priority(owner); + } + + level = znode_get_level(node); + + /* Fill request structure with our values. */ + owner->request.mode = mode; + owner->request.handle = handle; + owner->request.node = node; + + txnh = get_current_context()->trans; + lock = &node->lock; + + if (mode == ZNODE_READ_LOCK && request == 0) { + ret = longterm_lock_tryfast(owner); + if (ret <= 0) + return ret; + } + + has_atom = (txnh->atom != NULL); + + /* Synchronize on node's zlock guard lock. */ + spin_lock_zlock(lock); + + if (znode_is_locked(node) && + mode == ZNODE_WRITE_LOCK && recursive(owner)) + return lock_tail(owner, 0, mode); + + for (;;) { + /* Check the lock's availability: if it is unavaiable we get + E_REPEAT, 0 indicates "can_lock", otherwise the node is + invalid. */ + ret = can_lock_object(owner); + + if (unlikely(ret == -EINVAL)) { + /* @node is dying. Leave it alone. */ + break; + } + + if (unlikely(ret == -E_REPEAT && non_blocking)) { + /* either locking of @node by the current thread will + * lead to the deadlock, or lock modes are + * incompatible. */ + break; + } + + assert("nikita-1844", (ret == 0) + || ((ret == -E_REPEAT) && !non_blocking)); + /* If we can get the lock... Try to capture first before + taking the lock. */ + + /* first handle commonest case where node and txnh are already + * in the same atom. */ + /* safe to do without taking locks, because: + * + * 1. read of aligned word is atomic with respect to writes to + * this word + * + * 2. false negatives are handled in reiser4_try_capture(). + * + * 3. false positives are impossible. + * + * PROOF: left as an exercise to the curious reader. + * + * Just kidding. Here is one: + * + * At the time T0 txnh->atom is stored in txnh_atom. + * + * At the time T1 node->atom is stored in node_atom. + * + * At the time T2 we observe that + * + * txnh_atom != NULL && node_atom == txnh_atom. + * + * Imagine that at this moment we acquire node and txnh spin + * lock in this order. Suppose that under spin lock we have + * + * node->atom != txnh->atom, (S1) + * + * at the time T3. + * + * txnh->atom != NULL still, because txnh is open by the + * current thread. + * + * Suppose node->atom == NULL, that is, node was un-captured + * between T1, and T3. But un-capturing of formatted node is + * always preceded by the call to reiser4_invalidate_lock(), + * which marks znode as JNODE_IS_DYING under zlock spin + * lock. Contradiction, because can_lock_object() above checks + * for JNODE_IS_DYING. Hence, node->atom != NULL at T3. + * + * Suppose that node->atom != node_atom, that is, atom, node + * belongs to was fused into another atom: node_atom was fused + * into node->atom. Atom of txnh was equal to node_atom at T2, + * which means that under spin lock, txnh->atom == node->atom, + * because txnh->atom can only follow fusion + * chain. Contradicts S1. + * + * The same for hypothesis txnh->atom != txnh_atom. Hence, + * node->atom == node_atom == txnh_atom == txnh->atom. Again + * contradicts S1. Hence S1 is false. QED. + * + */ + + if (likely(has_atom && ZJNODE(node)->atom == txnh->atom)) { + ; + } else { + /* + * unlock zlock spin lock here. It is possible for + * longterm_unlock_znode() to sneak in here, but there + * is no harm: reiser4_invalidate_lock() will mark znode + * as JNODE_IS_DYING and this will be noted by + * can_lock_object() below. + */ + spin_unlock_zlock(lock); + spin_lock_znode(node); + ret = reiser4_try_capture(ZJNODE(node), mode, + cap_flags); + spin_unlock_znode(node); + spin_lock_zlock(lock); + if (unlikely(ret != 0)) { + /* In the failure case, the txnmgr releases + the znode's lock (or in some cases, it was + released a while ago). There's no need to + reacquire it so we should return here, + avoid releasing the lock. */ + owner->request.mode = 0; + break; + } + + /* Check the lock's availability again -- this is + because under some circumstances the capture code + has to release and reacquire the znode spinlock. */ + ret = can_lock_object(owner); + } + + /* This time, a return of (ret == 0) means we can lock, so we + should break out of the loop. */ + if (likely(ret != -E_REPEAT || non_blocking)) + break; + + /* Lock is unavailable, we have to wait. */ + ret = reiser4_prepare_to_sleep(owner); + if (unlikely(ret != 0)) + break; + + assert_spin_locked(&(node->lock.guard)); + if (hipri) { + /* If we are going in high priority direction then + increase high priority requests counter for the + node */ + lock->nr_hipri_requests++; + if (mode == ZNODE_WRITE_LOCK) + lock->nr_hipri_write_requests++; + /* If there are no high priority owners for a node, + then immediately wake up low priority owners, so + they can detect possible deadlock */ + if (lock->nr_hipri_owners == 0) + wake_up_all_lopri_owners(node); + } + list_add_tail(&owner->requestors_link, &lock->requestors); + + /* Ok, here we have prepared a lock request, so unlock + a znode ... */ + spin_unlock_zlock(lock); + /* ... and sleep */ + reiser4_go_to_sleep(owner); + if (owner->request.mode == ZNODE_NO_LOCK) + goto request_is_done; + spin_lock_zlock(lock); + if (owner->request.mode == ZNODE_NO_LOCK) { + spin_unlock_zlock(lock); +request_is_done: + if (owner->request.ret_code == 0) { + LOCK_CNT_INC(long_term_locked_znode); + zref(node); + } + return owner->request.ret_code; + } + remove_lock_request(owner); + } + + return lock_tail(owner, ret, mode); +} + +/* lock object invalidation means changing of lock object state to `INVALID' + and waiting for all other processes to cancel theirs lock requests. */ +void reiser4_invalidate_lock(lock_handle * handle /* path to lock + * owner and lock + * object is being + * invalidated. */ ) +{ + znode *node = handle->node; + lock_stack *owner = handle->owner; + + assert("zam-325", owner == get_current_lock_stack()); + assert("zam-103", znode_is_write_locked(node)); + assert("nikita-1393", !ZF_ISSET(node, JNODE_LEFT_CONNECTED)); + assert("nikita-1793", !ZF_ISSET(node, JNODE_RIGHT_CONNECTED)); + assert("nikita-1394", ZF_ISSET(node, JNODE_HEARD_BANSHEE)); + assert("nikita-3097", znode_is_wlocked_once(node)); + assert_spin_locked(&(node->lock.guard)); + + if (handle->signaled) + atomic_dec(&owner->nr_signaled); + + ZF_SET(node, JNODE_IS_DYING); + unlink_object(handle); + node->lock.nr_readers = 0; + + invalidate_all_lock_requests(node); + spin_unlock_zlock(&node->lock); +} + +/* Initializes lock_stack. */ +void init_lock_stack(lock_stack * owner /* pointer to + * allocated + * structure. */ ) +{ + INIT_LIST_HEAD(&owner->locks); + INIT_LIST_HEAD(&owner->requestors_link); + spin_lock_init(&owner->sguard); + owner->curpri = 1; + init_waitqueue_head(&owner->wait); +} + +/* Initializes lock object. */ +void reiser4_init_lock(zlock * lock /* pointer on allocated + * uninitialized lock object + * structure. */ ) +{ + memset(lock, 0, sizeof(zlock)); + spin_lock_init(&lock->guard); + INIT_LIST_HEAD(&lock->requestors); + INIT_LIST_HEAD(&lock->owners); +} + +/* Transfer a lock handle (presumably so that variables can be moved between + stack and heap locations). */ +static void +move_lh_internal(lock_handle * new, lock_handle * old, int unlink_old) +{ + znode *node = old->node; + lock_stack *owner = old->owner; + int signaled; + + /* locks_list, modified by link_object() is not protected by + anything. This is valid because only current thread ever modifies + locks_list of its lock_stack. + */ + assert("nikita-1827", owner == get_current_lock_stack()); + assert("nikita-1831", new->owner == NULL); + + spin_lock_zlock(&node->lock); + + signaled = old->signaled; + if (unlink_old) { + unlink_object(old); + } else { + if (node->lock.nr_readers > 0) { + node->lock.nr_readers += 1; + } else { + node->lock.nr_readers -= 1; + } + if (signaled) + atomic_inc(&owner->nr_signaled); + if (owner->curpri) + node->lock.nr_hipri_owners += 1; + LOCK_CNT_INC(long_term_locked_znode); + + zref(node); + } + link_object(new, owner, node); + new->signaled = signaled; + + spin_unlock_zlock(&node->lock); +} + +void move_lh(lock_handle * new, lock_handle * old) +{ + move_lh_internal(new, old, /*unlink_old */ 1); +} + +void copy_lh(lock_handle * new, lock_handle * old) +{ + move_lh_internal(new, old, /*unlink_old */ 0); +} + +/* after getting -E_DEADLOCK we unlock znodes until this function returns false + */ +int reiser4_check_deadlock(void) +{ + lock_stack *owner = get_current_lock_stack(); + return atomic_read(&owner->nr_signaled) != 0; +} + +/* Before going to sleep we re-check "release lock" requests which might come + from threads with hi-pri lock priorities. */ +int reiser4_prepare_to_sleep(lock_stack * owner) +{ + assert("nikita-1847", owner == get_current_lock_stack()); + + /* We return -E_DEADLOCK if one or more "give me the lock" messages are + * counted in nr_signaled */ + if (unlikely(atomic_read(&owner->nr_signaled) != 0)) { + assert("zam-959", !owner->curpri); + return RETERR(-E_DEADLOCK); + } + return 0; +} + +/* Wakes up a single thread */ +void __reiser4_wake_up(lock_stack * owner) +{ + atomic_set(&owner->wakeup, 1); + wake_up(&owner->wait); +} + +/* Puts a thread to sleep */ +void reiser4_go_to_sleep(lock_stack * owner) +{ + /* Well, we might sleep here, so holding of any spinlocks is no-no */ + assert("nikita-3027", reiser4_schedulable()); + + wait_event(owner->wait, atomic_read(&owner->wakeup)); + atomic_set(&owner->wakeup, 0); +} + +int lock_stack_isclean(lock_stack * owner) +{ + if (list_empty_careful(&owner->locks)) { + assert("zam-353", atomic_read(&owner->nr_signaled) == 0); + return 1; + } + + return 0; +} + +#if REISER4_DEBUG + +/* + * debugging functions + */ + +static void list_check(struct list_head *head) +{ + struct list_head *pos; + + list_for_each(pos, head) + assert("", (pos->prev != NULL && pos->next != NULL && + pos->prev->next == pos && pos->next->prev == pos)); +} + +/* check consistency of locking data-structures hanging of the @stack */ +static void check_lock_stack(lock_stack * stack) +{ + spin_lock_stack(stack); + /* check that stack->locks is not corrupted */ + list_check(&stack->locks); + spin_unlock_stack(stack); +} + +/* check consistency of locking data structures */ +void check_lock_data(void) +{ + check_lock_stack(&get_current_context()->stack); +} + +/* check consistency of locking data structures for @node */ +void check_lock_node_data(znode * node) +{ + spin_lock_zlock(&node->lock); + list_check(&node->lock.owners); + list_check(&node->lock.requestors); + spin_unlock_zlock(&node->lock); +} + +/* check that given lock request is dead lock safe. This check is, of course, + * not exhaustive. */ +static int +request_is_deadlock_safe(znode * node, znode_lock_mode mode, + znode_lock_request request) +{ + lock_stack *owner; + + owner = get_current_lock_stack(); + /* + * check that hipri lock request is not issued when there are locked + * nodes at the higher levels. + */ + if (request & ZNODE_LOCK_HIPRI && !(request & ZNODE_LOCK_NONBLOCK) && + znode_get_level(node) != 0) { + lock_handle *item; + + list_for_each_entry(item, &owner->locks, locks_link) { + znode *other; + + other = item->node; + + if (znode_get_level(other) == 0) + continue; + if (znode_get_level(other) > znode_get_level(node)) + return 0; + } + } + return 1; +} + +#endif + +/* return pointer to static storage with name of lock_mode. For + debugging */ +const char *lock_mode_name(znode_lock_mode lock/* lock mode to get name of */) +{ + if (lock == ZNODE_READ_LOCK) + return "read"; + else if (lock == ZNODE_WRITE_LOCK) + return "write"; + else { + static char buf[30]; + + sprintf(buf, "unknown: %i", lock); + return buf; + } +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 79 + End: +*/ diff --git a/fs/reiser4/lock.h b/fs/reiser4/lock.h new file mode 100644 index 000000000000..e74ed8faad58 --- /dev/null +++ b/fs/reiser4/lock.h @@ -0,0 +1,250 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Long term locking data structures. See lock.c for details. */ + +#ifndef __LOCK_H__ +#define __LOCK_H__ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "key.h" +#include "coord.h" +#include "plugin/node/node.h" +#include "txnmgr.h" +#include "readahead.h" + +#include <linux/types.h> +#include <linux/spinlock.h> +#include <linux/pagemap.h> /* for PAGE_CACHE_SIZE */ +#include <asm/atomic.h> +#include <linux/wait.h> + +/* Per-znode lock object */ +struct zlock { + spinlock_t guard; + /* The number of readers if positive; the number of recursively taken + write locks if negative. Protected by zlock spin lock. */ + int nr_readers; + /* A number of processes (lock_stacks) that have this object + locked with high priority */ + unsigned nr_hipri_owners; + /* A number of attempts to lock znode in high priority direction */ + unsigned nr_hipri_requests; + /* A linked list of lock_handle objects that contains pointers + for all lock_stacks which have this lock object locked */ + unsigned nr_hipri_write_requests; + struct list_head owners; + /* A linked list of lock_stacks that wait for this lock */ + struct list_head requestors; +}; + +static inline void spin_lock_zlock(zlock *lock) +{ + /* check that zlock is not locked */ + assert("", LOCK_CNT_NIL(spin_locked_zlock)); + /* check that spinlocks of lower priorities are not held */ + assert("", LOCK_CNT_NIL(spin_locked_stack)); + + spin_lock(&lock->guard); + + LOCK_CNT_INC(spin_locked_zlock); + LOCK_CNT_INC(spin_locked); +} + +static inline void spin_unlock_zlock(zlock *lock) +{ + assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_zlock)); + assert("nikita-1376", LOCK_CNT_GTZ(spin_locked)); + + LOCK_CNT_DEC(spin_locked_zlock); + LOCK_CNT_DEC(spin_locked); + + spin_unlock(&lock->guard); +} + +#define lock_is_locked(lock) ((lock)->nr_readers != 0) +#define lock_is_rlocked(lock) ((lock)->nr_readers > 0) +#define lock_is_wlocked(lock) ((lock)->nr_readers < 0) +#define lock_is_wlocked_once(lock) ((lock)->nr_readers == -1) +#define lock_can_be_rlocked(lock) ((lock)->nr_readers >= 0) +#define lock_mode_compatible(lock, mode) \ + (((mode) == ZNODE_WRITE_LOCK && !lock_is_locked(lock)) || \ + ((mode) == ZNODE_READ_LOCK && lock_can_be_rlocked(lock))) + +/* Since we have R/W znode locks we need additional bidirectional `link' + objects to implement n<->m relationship between lock owners and lock + objects. We call them `lock handles'. + + Locking: see lock.c/"SHORT-TERM LOCKING" +*/ +struct lock_handle { + /* This flag indicates that a signal to yield a lock was passed to + lock owner and counted in owner->nr_signalled + + Locking: this is accessed under spin lock on ->node. + */ + int signaled; + /* A link to owner of a lock */ + lock_stack *owner; + /* A link to znode locked */ + znode *node; + /* A list of all locks for a process */ + struct list_head locks_link; + /* A list of all owners for a znode */ + struct list_head owners_link; +}; + +struct lock_request { + /* A pointer to uninitialized link object */ + lock_handle *handle; + /* A pointer to the object we want to lock */ + znode *node; + /* Lock mode (ZNODE_READ_LOCK or ZNODE_WRITE_LOCK) */ + znode_lock_mode mode; + /* how dispatch_lock_requests() returns lock request result code */ + int ret_code; +}; + +/* A lock stack structure for accumulating locks owned by a process */ +struct lock_stack { + /* A guard lock protecting a lock stack */ + spinlock_t sguard; + /* number of znodes which were requested by high priority processes */ + atomic_t nr_signaled; + /* Current priority of a process + + This is only accessed by the current thread and thus requires no + locking. + */ + int curpri; + /* A list of all locks owned by this process. Elements can be added to + * this list only by the current thread. ->node pointers in this list + * can be only changed by the current thread. */ + struct list_head locks; + /* When lock_stack waits for the lock, it puts itself on double-linked + requestors list of that lock */ + struct list_head requestors_link; + /* Current lock request info. + + This is only accessed by the current thread and thus requires no + locking. + */ + struct lock_request request; + /* the following two fields are the lock stack's + * synchronization object to use with the standard linux/wait.h + * interface. See reiser4_go_to_sleep and __reiser4_wake_up for + * usage details. */ + wait_queue_head_t wait; + atomic_t wakeup; +#if REISER4_DEBUG + int nr_locks; /* number of lock handles in the above list */ +#endif +}; + +/* + User-visible znode locking functions +*/ + +extern int longterm_lock_znode(lock_handle * handle, + znode * node, + znode_lock_mode mode, + znode_lock_request request); + +extern void longterm_unlock_znode(lock_handle * handle); + +extern int reiser4_check_deadlock(void); + +extern lock_stack *get_current_lock_stack(void); + +extern void init_lock_stack(lock_stack * owner); +extern void reiser4_init_lock(zlock * lock); + +static inline void init_lh(lock_handle *lh) +{ +#if REISER4_DEBUG + memset(lh, 0, sizeof *lh); + INIT_LIST_HEAD(&lh->locks_link); + INIT_LIST_HEAD(&lh->owners_link); +#else + lh->node = NULL; +#endif +} + +static inline void done_lh(lock_handle *lh) +{ + assert("zam-342", lh != NULL); + if (lh->node != NULL) + longterm_unlock_znode(lh); +} + +extern void move_lh(lock_handle * new, lock_handle * old); +extern void copy_lh(lock_handle * new, lock_handle * old); + +extern int reiser4_prepare_to_sleep(lock_stack * owner); +extern void reiser4_go_to_sleep(lock_stack * owner); +extern void __reiser4_wake_up(lock_stack * owner); + +extern int lock_stack_isclean(lock_stack * owner); + +/* zlock object state check macros: only used in assertions. Both forms imply + that the lock is held by the current thread. */ +extern int znode_is_write_locked(const znode *); +extern void reiser4_invalidate_lock(lock_handle *); + +/* lock ordering is: first take zlock spin lock, then lock stack spin lock */ +#define spin_ordering_pred_stack(stack) \ + (LOCK_CNT_NIL(spin_locked_stack) && \ + LOCK_CNT_NIL(spin_locked_txnmgr) && \ + LOCK_CNT_NIL(spin_locked_inode) && \ + LOCK_CNT_NIL(rw_locked_cbk_cache) && \ + LOCK_CNT_NIL(spin_locked_super_eflush)) + +static inline void spin_lock_stack(lock_stack *stack) +{ + assert("", spin_ordering_pred_stack(stack)); + spin_lock(&(stack->sguard)); + LOCK_CNT_INC(spin_locked_stack); + LOCK_CNT_INC(spin_locked); +} + +static inline void spin_unlock_stack(lock_stack *stack) +{ + assert_spin_locked(&(stack->sguard)); + assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_stack)); + assert("nikita-1376", LOCK_CNT_GTZ(spin_locked)); + LOCK_CNT_DEC(spin_locked_stack); + LOCK_CNT_DEC(spin_locked); + spin_unlock(&(stack->sguard)); +} + +static inline void reiser4_wake_up(lock_stack * owner) +{ + spin_lock_stack(owner); + __reiser4_wake_up(owner); + spin_unlock_stack(owner); +} + +const char *lock_mode_name(znode_lock_mode lock); + +#if REISER4_DEBUG +extern void check_lock_data(void); +extern void check_lock_node_data(znode * node); +#else +#define check_lock_data() noop +#define check_lock_node_data() noop +#endif + +/* __LOCK_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/oid.c b/fs/reiser4/oid.c new file mode 100644 index 000000000000..623f52c6f9d7 --- /dev/null +++ b/fs/reiser4/oid.c @@ -0,0 +1,141 @@ +/* Copyright 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#include "debug.h" +#include "super.h" +#include "txnmgr.h" + +/* we used to have oid allocation plugin. It was removed because it + was recognized as providing unneeded level of abstraction. If one + ever will find it useful - look at yet_unneeded_abstractions/oid +*/ + +/* + * initialize in-memory data for oid allocator at @super. @nr_files and @next + * are provided by disk format plugin that reads them from the disk during + * mount. + */ +int oid_init_allocator(struct super_block *super, oid_t nr_files, oid_t next) +{ + reiser4_super_info_data *sbinfo; + + sbinfo = get_super_private(super); + + sbinfo->next_to_use = next; + sbinfo->oids_in_use = nr_files; + return 0; +} + +/* + * allocate oid and return it. ABSOLUTE_MAX_OID is returned when allocator + * runs out of oids. + */ +oid_t oid_allocate(struct super_block *super) +{ + reiser4_super_info_data *sbinfo; + oid_t oid; + + sbinfo = get_super_private(super); + + spin_lock_reiser4_super(sbinfo); + if (sbinfo->next_to_use != ABSOLUTE_MAX_OID) { + oid = sbinfo->next_to_use++; + sbinfo->oids_in_use++; + } else + oid = ABSOLUTE_MAX_OID; + spin_unlock_reiser4_super(sbinfo); + return oid; +} + +/* + * Tell oid allocator that @oid is now free. + */ +int oid_release(struct super_block *super, oid_t oid UNUSED_ARG) +{ + reiser4_super_info_data *sbinfo; + + sbinfo = get_super_private(super); + + spin_lock_reiser4_super(sbinfo); + sbinfo->oids_in_use--; + spin_unlock_reiser4_super(sbinfo); + return 0; +} + +/* + * return next @oid that would be allocated (i.e., returned by oid_allocate()) + * without actually allocating it. This is used by disk format plugin to save + * oid allocator state on the disk. + */ +oid_t oid_next(const struct super_block *super) +{ + reiser4_super_info_data *sbinfo; + oid_t oid; + + sbinfo = get_super_private(super); + + spin_lock_reiser4_super(sbinfo); + oid = sbinfo->next_to_use; + spin_unlock_reiser4_super(sbinfo); + return oid; +} + +/* + * returns number of currently used oids. This is used by statfs(2) to report + * number of "inodes" and by disk format plugin to save oid allocator state on + * the disk. + */ +long oids_used(const struct super_block *super) +{ + reiser4_super_info_data *sbinfo; + oid_t used; + + sbinfo = get_super_private(super); + + spin_lock_reiser4_super(sbinfo); + used = sbinfo->oids_in_use; + spin_unlock_reiser4_super(sbinfo); + if (used < (__u64) ((long)~0) >> 1) + return (long)used; + else + return (long)-1; +} + +/* + * Count oid as allocated in atom. This is done after call to oid_allocate() + * at the point when we are irrevocably committed to creation of the new file + * (i.e., when oid allocation cannot be any longer rolled back due to some + * error). + */ +void oid_count_allocated(void) +{ + txn_atom *atom; + + atom = get_current_atom_locked(); + atom->nr_objects_created++; + spin_unlock_atom(atom); +} + +/* + * Count oid as free in atom. This is done after call to oid_release() at the + * point when we are irrevocably committed to the deletion of the file (i.e., + * when oid release cannot be any longer rolled back due to some error). + */ +void oid_count_released(void) +{ + txn_atom *atom; + + atom = get_current_atom_locked(); + atom->nr_objects_deleted++; + spin_unlock_atom(atom); +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/page_cache.c b/fs/reiser4/page_cache.c new file mode 100644 index 000000000000..8cca578674c3 --- /dev/null +++ b/fs/reiser4/page_cache.c @@ -0,0 +1,691 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Memory pressure hooks. Fake inodes handling. */ + +/* GLOSSARY + + . Formatted and unformatted nodes. + Elements of reiser4 balanced tree to store data and metadata. + Unformatted nodes are pointed to by extent pointers. Such nodes + are used to store data of large objects. Unlike unformatted nodes, + formatted ones have associated format described by node4X plugin. + + . Jnode (or journal node) + The in-memory header which is used to track formatted and unformatted + nodes, bitmap nodes, etc. In particular, jnodes are used to track + transactional information associated with each block(see reiser4/jnode.c + for details). + + . Znode + The in-memory header which is used to track formatted nodes. Contains + embedded jnode (see reiser4/znode.c for details). +*/ + +/* We store all file system meta data (and data, of course) in the page cache. + + What does this mean? In stead of using bread/brelse we create special + "fake" inode (one per super block) and store content of formatted nodes + into pages bound to this inode in the page cache. In newer kernels bread() + already uses inode attached to block device (bd_inode). Advantage of having + our own fake inode is that we can install appropriate methods in its + address_space operations. Such methods are called by VM on memory pressure + (or during background page flushing) and we can use them to react + appropriately. + + In initial version we only support one block per page. Support for multiple + blocks per page is complicated by relocation. + + To each page, used by reiser4, jnode is attached. jnode is analogous to + buffer head. Difference is that jnode is bound to the page permanently: + jnode cannot be removed from memory until its backing page is. + + jnode contain pointer to page (->pg field) and page contain pointer to + jnode in ->private field. Pointer from jnode to page is protected to by + jnode's spinlock and pointer from page to jnode is protected by page lock + (PG_locked bit). Lock ordering is: first take page lock, then jnode spin + lock. To go into reverse direction use jnode_lock_page() function that uses + standard try-lock-and-release device. + + Properties: + + 1. when jnode-to-page mapping is established (by jnode_attach_page()), page + reference counter is increased. + + 2. when jnode-to-page mapping is destroyed (by page_clear_jnode(), page + reference counter is decreased. + + 3. on jload() reference counter on jnode page is increased, page is + kmapped and `referenced'. + + 4. on jrelse() inverse operations are performed. + + 5. kmapping/kunmapping of unformatted pages is done by read/write methods. + + DEADLOCKS RELATED TO MEMORY PRESSURE. [OUTDATED. Only interesting + historically.] + + [In the following discussion, `lock' invariably means long term lock on + znode.] (What about page locks?) + + There is some special class of deadlock possibilities related to memory + pressure. Locks acquired by other reiser4 threads are accounted for in + deadlock prevention mechanism (lock.c), but when ->vm_writeback() is + invoked additional hidden arc is added to the locking graph: thread that + tries to allocate memory waits for ->vm_writeback() to finish. If this + thread keeps lock and ->vm_writeback() tries to acquire this lock, deadlock + prevention is useless. + + Another related problem is possibility for ->vm_writeback() to run out of + memory itself. This is not a problem for ext2 and friends, because their + ->vm_writeback() don't allocate much memory, but reiser4 flush is + definitely able to allocate huge amounts of memory. + + It seems that there is no reliable way to cope with the problems above. In + stead it was decided that ->vm_writeback() (as invoked in the kswapd + context) wouldn't perform any flushing itself, but rather should just wake + up some auxiliary thread dedicated for this purpose (or, the same thread + that does periodic commit of old atoms (ktxnmgrd.c)). + + Details: + + 1. Page is called `reclaimable' against particular reiser4 mount F if this + page can be ultimately released by try_to_free_pages() under presumptions + that: + + a. ->vm_writeback() for F is no-op, and + + b. none of the threads accessing F are making any progress, and + + c. other reiser4 mounts obey the same memory reservation protocol as F + (described below). + + For example, clean un-pinned page, or page occupied by ext2 data are + reclaimable against any reiser4 mount. + + When there is more than one reiser4 mount in a system, condition (c) makes + reclaim-ability not easily verifiable beyond trivial cases mentioned above. + + THIS COMMENT IS VALID FOR "MANY BLOCKS ON PAGE" CASE + + Fake inode is used to bound formatted nodes and each node is indexed within + fake inode by its block number. If block size of smaller than page size, it + may so happen that block mapped to the page with formatted node is occupied + by unformatted node or is unallocated. This lead to some complications, + because flushing whole page can lead to an incorrect overwrite of + unformatted node that is moreover, can be cached in some other place as + part of the file body. To avoid this, buffers for unformatted nodes are + never marked dirty. Also pages in the fake are never marked dirty. This + rules out usage of ->writepage() as memory pressure hook. In stead + ->releasepage() is used. + + Josh is concerned that page->buffer is going to die. This should not pose + significant problem though, because we need to add some data structures to + the page anyway (jnode) and all necessary book keeping can be put there. + +*/ + +/* Life cycle of pages/nodes. + + jnode contains reference to page and page contains reference back to + jnode. This reference is counted in page ->count. Thus, page bound to jnode + cannot be released back into free pool. + + 1. Formatted nodes. + + 1. formatted node is represented by znode. When new znode is created its + ->pg pointer is NULL initially. + + 2. when node content is loaded into znode (by call to zload()) for the + first time following happens (in call to ->read_node() or + ->allocate_node()): + + 1. new page is added to the page cache. + + 2. this page is attached to znode and its ->count is increased. + + 3. page is kmapped. + + 3. if more calls to zload() follow (without corresponding zrelses), page + counter is left intact and in its stead ->d_count is increased in znode. + + 4. each call to zrelse decreases ->d_count. When ->d_count drops to zero + ->release_node() is called and page is kunmapped as result. + + 5. at some moment node can be captured by a transaction. Its ->x_count + is then increased by transaction manager. + + 6. if node is removed from the tree (empty node with JNODE_HEARD_BANSHEE + bit set) following will happen (also see comment at the top of znode.c): + + 1. when last lock is released, node will be uncaptured from + transaction. This released reference that transaction manager acquired + at the step 5. + + 2. when last reference is released, zput() detects that node is + actually deleted and calls ->delete_node() + operation. page_cache_delete_node() implementation detaches jnode from + page and releases page. + + 7. otherwise (node wasn't removed from the tree), last reference to + znode will be released after transaction manager committed transaction + node was in. This implies squallocing of this node (see + flush.c). Nothing special happens at this point. Znode is still in the + hash table and page is still attached to it. + + 8. znode is actually removed from the memory because of the memory + pressure, or during umount (znodes_tree_done()). Anyway, znode is + removed by the call to zdrop(). At this moment, page is detached from + znode and removed from the inode address space. + +*/ + +#include "debug.h" +#include "dformat.h" +#include "key.h" +#include "txnmgr.h" +#include "jnode.h" +#include "znode.h" +#include "block_alloc.h" +#include "tree.h" +#include "vfs_ops.h" +#include "inode.h" +#include "super.h" +#include "entd.h" +#include "page_cache.h" +#include "ktxnmgrd.h" + +#include <linux/types.h> +#include <linux/fs.h> +#include <linux/mm.h> /* for struct page */ +#include <linux/swap.h> /* for struct page */ +#include <linux/pagemap.h> +#include <linux/bio.h> +#include <linux/writeback.h> +#include <linux/blkdev.h> + +static struct bio *page_bio(struct page *, jnode * , int rw, gfp_t gfp); + +static struct address_space_operations formatted_fake_as_ops; + +static const oid_t fake_ino = 0x1; +static const oid_t bitmap_ino = 0x2; +static const oid_t cc_ino = 0x3; + +static void +init_fake_inode(struct super_block *super, struct inode *fake, + struct inode **pfake) +{ + assert("nikita-2168", fake->i_state & I_NEW); + fake->i_mapping->a_ops = &formatted_fake_as_ops; + inode_attach_wb(fake, NULL); + *pfake = fake; + /* NOTE-NIKITA something else? */ + unlock_new_inode(fake); +} + +/** + * reiser4_init_formatted_fake - iget inodes for formatted nodes and bitmaps + * @super: super block to init fake inode for + * + * Initializes fake inode to which formatted nodes are bound in the page cache + * and inode for bitmaps. + */ +int reiser4_init_formatted_fake(struct super_block *super) +{ + struct inode *fake; + struct inode *bitmap; + struct inode *cc; + reiser4_super_info_data *sinfo; + + assert("nikita-1703", super != NULL); + + sinfo = get_super_private_nocheck(super); + fake = iget_locked(super, oid_to_ino(fake_ino)); + + if (fake != NULL) { + init_fake_inode(super, fake, &sinfo->fake); + + bitmap = iget_locked(super, oid_to_ino(bitmap_ino)); + if (bitmap != NULL) { + init_fake_inode(super, bitmap, &sinfo->bitmap); + + cc = iget_locked(super, oid_to_ino(cc_ino)); + if (cc != NULL) { + init_fake_inode(super, cc, &sinfo->cc); + return 0; + } else { + iput(sinfo->fake); + iput(sinfo->bitmap); + sinfo->fake = NULL; + sinfo->bitmap = NULL; + } + } else { + iput(sinfo->fake); + sinfo->fake = NULL; + } + } + return RETERR(-ENOMEM); +} + +/** + * reiser4_done_formatted_fake - release inode used by formatted nodes and bitmaps + * @super: super block to init fake inode for + * + * Releases inodes which were used as address spaces of bitmap and formatted + * nodes. + */ +void reiser4_done_formatted_fake(struct super_block *super) +{ + reiser4_super_info_data *sinfo; + + sinfo = get_super_private_nocheck(super); + + if (sinfo->fake != NULL) { + iput(sinfo->fake); + sinfo->fake = NULL; + } + + if (sinfo->bitmap != NULL) { + iput(sinfo->bitmap); + sinfo->bitmap = NULL; + } + + if (sinfo->cc != NULL) { + iput(sinfo->cc); + sinfo->cc = NULL; + } + return; +} + +void reiser4_wait_page_writeback(struct page *page) +{ + assert("zam-783", PageLocked(page)); + + do { + unlock_page(page); + wait_on_page_writeback(page); + lock_page(page); + } while (PageWriteback(page)); +} + +/* return tree @page is in */ +reiser4_tree *reiser4_tree_by_page(const struct page *page/* page to query */) +{ + assert("nikita-2461", page != NULL); + return &get_super_private(page->mapping->host->i_sb)->tree; +} + +/* completion handler for single page bio-based read. + + mpage_end_io_read() would also do. But it's static. + +*/ +static void end_bio_single_page_read(struct bio *bio) +{ + struct page *page; + + page = bio->bi_io_vec[0].bv_page; + + if (!bio->bi_status) + SetPageUptodate(page); + else { + ClearPageUptodate(page); + SetPageError(page); + } + unlock_page(page); + bio_put(bio); +} + +/* completion handler for single page bio-based write. + + mpage_end_io_write() would also do. But it's static. + +*/ +static void end_bio_single_page_write(struct bio *bio) +{ + struct page *page; + + page = bio->bi_io_vec[0].bv_page; + + if (bio->bi_status) + SetPageError(page); + end_page_writeback(page); + bio_put(bio); +} + +/* ->readpage() method for formatted nodes */ +static int formatted_readpage(struct file *f UNUSED_ARG, + struct page *page/* page to read */) +{ + assert("nikita-2412", PagePrivate(page) && jprivate(page)); + return reiser4_page_io(page, jprivate(page), READ, + reiser4_ctx_gfp_mask_get()); +} + +/** + * reiser4_page_io - submit single-page bio request + * @page: page to perform io for + * @node: jnode of page + * @rw: read or write + * @gfp: gfp mask for bio allocation + * + * Submits single page read or write. + */ +int reiser4_page_io(struct page *page, jnode *node, int rw, gfp_t gfp) +{ + struct bio *bio; + int result; + + assert("nikita-2094", page != NULL); + assert("nikita-2226", PageLocked(page)); + assert("nikita-2634", node != NULL); + assert("nikita-2893", rw == READ || rw == WRITE); + + if (rw) { + if (unlikely(page->mapping->host->i_sb->s_flags & MS_RDONLY)) { + unlock_page(page); + return 0; + } + } + + bio = page_bio(page, node, rw, gfp); + if (!IS_ERR(bio)) { + if (rw == WRITE) { + set_page_writeback(page); + unlock_page(page); + } + bio_set_op_attrs(bio, rw, 0); + submit_bio(bio); + result = 0; + } else { + unlock_page(page); + result = PTR_ERR(bio); + } + + return result; +} + +/* helper function to construct bio for page */ +static struct bio *page_bio(struct page *page, jnode * node, int rw, gfp_t gfp) +{ + struct bio *bio; + assert("nikita-2092", page != NULL); + assert("nikita-2633", node != NULL); + + /* Simple implementation in the assumption that blocksize == pagesize. + + We only have to submit one block, but submit_bh() will allocate bio + anyway, so lets use all the bells-and-whistles of bio code. + */ + + bio = bio_alloc(gfp, 1); + if (bio != NULL) { + int blksz; + struct super_block *super; + reiser4_block_nr blocknr; + + super = page->mapping->host->i_sb; + assert("nikita-2029", super != NULL); + blksz = super->s_blocksize; + assert("nikita-2028", blksz == (int)PAGE_SIZE); + + spin_lock_jnode(node); + blocknr = *jnode_get_io_block(node); + spin_unlock_jnode(node); + + assert("nikita-2275", blocknr != (reiser4_block_nr) 0); + assert("nikita-2276", !reiser4_blocknr_is_fake(&blocknr)); + + bio_set_dev(bio, super->s_bdev); + /* fill bio->bi_iter.bi_sector before calling bio_add_page(), because + * q->merge_bvec_fn may want to inspect it (see + * drivers/md/linear.c:linear_mergeable_bvec() for example. */ + bio->bi_iter.bi_sector = blocknr * (blksz >> 9); + + if (!bio_add_page(bio, page, blksz, 0)) { + warning("nikita-3452", + "Single page bio cannot be constructed"); + return ERR_PTR(RETERR(-EINVAL)); + } + + /* bio -> bi_idx is filled by bio_init() */ + bio->bi_end_io = (rw == READ) ? + end_bio_single_page_read : end_bio_single_page_write; + + return bio; + } else + return ERR_PTR(RETERR(-ENOMEM)); +} + +#if 0 +static int can_hit_entd(reiser4_context *ctx, struct super_block *s) +{ + if (ctx == NULL || ((unsigned long)ctx->magic) != context_magic) + return 1; + if (ctx->super != s) + return 1; + if (get_super_private(s)->entd.tsk == current) + return 0; + if (!lock_stack_isclean(&ctx->stack)) + return 0; + if (ctx->trans->atom != NULL) + return 0; + return 1; +} +#endif + +/** + * reiser4_writepage - writepage of struct address_space_operations + * @page: page to write + * @wbc: + * + * + */ +/* Common memory pressure notification. */ +int reiser4_writepage(struct page *page, + struct writeback_control *wbc) +{ + /* + * assert("edward-1562", + * can_hit_entd(get_current_context_check(), sb)); + */ + assert("vs-828", PageLocked(page)); + + return write_page_by_ent(page, wbc); +} + +/* ->set_page_dirty() method of formatted address_space */ +static int formatted_set_page_dirty(struct page *page) +{ + assert("nikita-2173", page != NULL); + BUG(); + return __set_page_dirty_nobuffers(page); +} + +/* writepages method of address space operations in reiser4 is used to involve + into transactions pages which are dirtied via mmap. Only regular files can + have such pages. Fake inode is used to access formatted nodes via page + cache. As formatted nodes can never be mmaped, fake inode's writepages has + nothing to do */ +static int +writepages_fake(struct address_space *mapping, struct writeback_control *wbc) +{ + return 0; +} + +/* address space operations for the fake inode */ +static struct address_space_operations formatted_fake_as_ops = { + /* Perform a writeback of a single page as a memory-freeing + * operation. */ + .writepage = reiser4_writepage, + /* this is called to read formatted node */ + .readpage = formatted_readpage, + /* ->sync_page() method of fake inode address space operations. Called + from wait_on_page() and lock_page(). + + This is most annoyingly misnomered method. Actually it is called + from wait_on_page_bit() and lock_page() and its purpose is to + actually start io by jabbing device drivers. + .sync_page = block_sync_page, + */ + /* Write back some dirty pages from this mapping. Called from sync. + called during sync (pdflush) */ + .writepages = writepages_fake, + /* Set a page dirty */ + .set_page_dirty = formatted_set_page_dirty, + /* used for read-ahead. Not applicable */ + .readpages = NULL, + .write_begin = NULL, + .write_end = NULL, + .bmap = NULL, + /* called just before page is being detached from inode mapping and + removed from memory. Called on truncate, cut/squeeze, and + umount. */ + .invalidatepage = reiser4_invalidatepage, + /* this is called by shrink_cache() so that file system can try to + release objects (jnodes, buffers, journal heads) attached to page + and, may be made page itself free-able. + */ + .releasepage = reiser4_releasepage, + .direct_IO = NULL, + .migratepage = reiser4_migratepage +}; + +/* called just before page is released (no longer used by reiser4). Callers: + jdelete() and extent2tail(). */ +void reiser4_drop_page(struct page *page) +{ + assert("nikita-2181", PageLocked(page)); + clear_page_dirty_for_io(page); + ClearPageUptodate(page); +#if defined(PG_skipped) + ClearPageSkipped(page); +#endif + unlock_page(page); +} + +#define JNODE_GANG_SIZE (16) + +/* find all jnodes from range specified and invalidate them */ +static int +truncate_jnodes_range(struct inode *inode, pgoff_t from, pgoff_t count) +{ + reiser4_inode *info; + int truncated_jnodes; + reiser4_tree *tree; + unsigned long index; + unsigned long end; + + if (inode_file_plugin(inode) == + file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)) + /* + * No need to get rid of jnodes here: if the single jnode of + * page cluster did not have page, then it was found and killed + * before in + * truncate_complete_page_cluster()->jput()->jput_final(), + * otherwise it will be dropped by reiser4_invalidatepage() + */ + return 0; + truncated_jnodes = 0; + + info = reiser4_inode_data(inode); + tree = reiser4_tree_by_inode(inode); + + index = from; + end = from + count; + + while (1) { + jnode *gang[JNODE_GANG_SIZE]; + int taken; + int i; + jnode *node; + + assert("nikita-3466", index <= end); + + read_lock_tree(tree); + taken = + radix_tree_gang_lookup(jnode_tree_by_reiser4_inode(info), + (void **)gang, index, + JNODE_GANG_SIZE); + for (i = 0; i < taken; ++i) { + node = gang[i]; + if (index_jnode(node) < end) + jref(node); + else + gang[i] = NULL; + } + read_unlock_tree(tree); + + for (i = 0; i < taken; ++i) { + node = gang[i]; + if (node != NULL) { + index = max(index, index_jnode(node)); + spin_lock_jnode(node); + assert("edward-1457", node->pg == NULL); + /* this is always called after + truncate_inode_pages_range(). Therefore, here + jnode can not have page. New pages can not be + created because truncate_jnodes_range goes + under exclusive access on file obtained, + where as new page creation requires + non-exclusive access obtained */ + JF_SET(node, JNODE_HEARD_BANSHEE); + reiser4_uncapture_jnode(node); + unhash_unformatted_jnode(node); + truncated_jnodes++; + jput(node); + } else + break; + } + if (i != taken || taken == 0) + break; + } + return truncated_jnodes; +} + +/* Truncating files in reiser4: problems and solutions. + + VFS calls fs's truncate after it has called truncate_inode_pages() + to get rid of pages corresponding to part of file being truncated. + In reiser4 it may cause existence of unallocated extents which do + not have jnodes. Flush code does not expect that. Solution of this + problem is straightforward. As vfs's truncate is implemented using + setattr operation, it seems reasonable to have ->setattr() that + will cut file body. However, flush code also does not expect dirty + pages without parent items, so it is impossible to cut all items, + then truncate all pages in two steps. We resolve this problem by + cutting items one-by-one. Each such fine-grained step performed + under longterm znode lock calls at the end ->kill_hook() method of + a killed item to remove its binded pages and jnodes. + + The following function is a common part of mentioned kill hooks. + Also, this is called before tail-to-extent conversion (to not manage + few copies of the data). +*/ +void reiser4_invalidate_pages(struct address_space *mapping, pgoff_t from, + unsigned long count, int even_cows) +{ + loff_t from_bytes, count_bytes; + + if (count == 0) + return; + from_bytes = ((loff_t) from) << PAGE_SHIFT; + count_bytes = ((loff_t) count) << PAGE_SHIFT; + + unmap_mapping_range(mapping, from_bytes, count_bytes, even_cows); + truncate_inode_pages_range(mapping, from_bytes, + from_bytes + count_bytes - 1); + truncate_jnodes_range(mapping->host, from, count); +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 120 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/page_cache.h b/fs/reiser4/page_cache.h new file mode 100644 index 000000000000..32106f17b454 --- /dev/null +++ b/fs/reiser4/page_cache.h @@ -0,0 +1,64 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ +/* Memory pressure hooks. Fake inodes handling. See page_cache.c. */ + +#if !defined(__REISER4_PAGE_CACHE_H__) +#define __REISER4_PAGE_CACHE_H__ + +#include "forward.h" +#include "context.h" /* for reiser4_ctx_gfp_mask_get() */ + +#include <linux/fs.h> /* for struct super_block, address_space */ +#include <linux/mm.h> /* for struct page */ +#include <linux/pagemap.h> /* for lock_page() */ +#include <linux/vmalloc.h> /* for __vmalloc() */ + +extern int reiser4_init_formatted_fake(struct super_block *); +extern void reiser4_done_formatted_fake(struct super_block *); + +extern reiser4_tree *reiser4_tree_by_page(const struct page *); + +extern void reiser4_wait_page_writeback(struct page *); +static inline void lock_and_wait_page_writeback(struct page *page) +{ + lock_page(page); + if (unlikely(PageWriteback(page))) + reiser4_wait_page_writeback(page); +} + +#define jprivate(page) ((jnode *)page_private(page)) + +extern int reiser4_page_io(struct page *, jnode *, int rw, gfp_t); +extern void reiser4_drop_page(struct page *); +extern void reiser4_invalidate_pages(struct address_space *, pgoff_t from, + unsigned long count, int even_cows); +extern void capture_reiser4_inodes(struct super_block *, + struct writeback_control *); +static inline void *reiser4_vmalloc(unsigned long size) +{ + return __vmalloc(size, + reiser4_ctx_gfp_mask_get() | __GFP_HIGHMEM, + PAGE_KERNEL); +} + +#define PAGECACHE_TAG_REISER4_MOVED PAGECACHE_TAG_DIRTY + +#if REISER4_DEBUG +extern void print_page(const char *prefix, struct page *page); +#else +#define print_page(prf, p) noop +#endif + +/* __REISER4_PAGE_CACHE_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/Makefile b/fs/reiser4/plugin/Makefile new file mode 100644 index 000000000000..4b2c9f8bab6e --- /dev/null +++ b/fs/reiser4/plugin/Makefile @@ -0,0 +1,26 @@ +obj-$(CONFIG_REISER4_FS) += plugins.o + +plugins-objs := \ + plugin.o \ + plugin_set.o \ + object.o \ + inode_ops.o \ + inode_ops_rename.o \ + file_ops.o \ + file_ops_readdir.o \ + file_plugin_common.o \ + dir_plugin_common.o \ + digest.o \ + hash.o \ + fibration.o \ + tail_policy.o \ + regular.o + +obj-$(CONFIG_REISER4_FS) += item/ +obj-$(CONFIG_REISER4_FS) += file/ +obj-$(CONFIG_REISER4_FS) += dir/ +obj-$(CONFIG_REISER4_FS) += node/ +obj-$(CONFIG_REISER4_FS) += compress/ +obj-$(CONFIG_REISER4_FS) += space/ +obj-$(CONFIG_REISER4_FS) += disk_format/ +obj-$(CONFIG_REISER4_FS) += security/ diff --git a/fs/reiser4/plugin/cluster.c b/fs/reiser4/plugin/cluster.c new file mode 100644 index 000000000000..f54b19ea6887 --- /dev/null +++ b/fs/reiser4/plugin/cluster.c @@ -0,0 +1,72 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Contains reiser4 cluster plugins (see + http://www.namesys.com/cryptcompress_design.html + "Concepts of clustering" for details). */ + +#include "plugin_header.h" +#include "plugin.h" +#include "../inode.h" + +static int change_cluster(struct inode *inode, + reiser4_plugin * plugin, + pset_member memb) +{ + assert("edward-1324", inode != NULL); + assert("edward-1325", plugin != NULL); + assert("edward-1326", is_reiser4_inode(inode)); + assert("edward-1327", plugin->h.type_id == REISER4_CLUSTER_PLUGIN_TYPE); + + /* Can't change the cluster plugin for already existent regular files */ + if (!plugin_of_group(inode_file_plugin(inode), REISER4_DIRECTORY_FILE)) + return RETERR(-EINVAL); + + /* If matches, nothing to change. */ + if (inode_hash_plugin(inode) != NULL && + inode_hash_plugin(inode)->h.id == plugin->h.id) + return 0; + + return aset_set_unsafe(&reiser4_inode_data(inode)->pset, + PSET_CLUSTER, plugin); +} + +static reiser4_plugin_ops cluster_plugin_ops = { + .init = NULL, + .load = NULL, + .save_len = NULL, + .save = NULL, + .change = &change_cluster +}; + +#define SUPPORT_CLUSTER(SHIFT, ID, LABEL, DESC) \ + [CLUSTER_ ## ID ## _ID] = { \ + .h = { \ + .type_id = REISER4_CLUSTER_PLUGIN_TYPE, \ + .id = CLUSTER_ ## ID ## _ID, \ + .pops = &cluster_plugin_ops, \ + .label = LABEL, \ + .desc = DESC, \ + .linkage = {NULL, NULL} \ + }, \ + .shift = SHIFT \ + } + +cluster_plugin cluster_plugins[LAST_CLUSTER_ID] = { + SUPPORT_CLUSTER(16, 64K, "64K", "Large"), + SUPPORT_CLUSTER(15, 32K, "32K", "Big"), + SUPPORT_CLUSTER(14, 16K, "16K", "Average"), + SUPPORT_CLUSTER(13, 8K, "8K", "Small"), + SUPPORT_CLUSTER(12, 4K, "4K", "Minimal") +}; + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/cluster.h b/fs/reiser4/plugin/cluster.h new file mode 100644 index 000000000000..6bf931609fca --- /dev/null +++ b/fs/reiser4/plugin/cluster.h @@ -0,0 +1,410 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* This file contains size/offset translators, modulators + and other helper functions. */ + +#if !defined(__FS_REISER4_CLUSTER_H__) +#define __FS_REISER4_CLUSTER_H__ + +#include "../inode.h" + +static inline int inode_cluster_shift(struct inode *inode) +{ + assert("edward-92", inode != NULL); + assert("edward-93", reiser4_inode_data(inode) != NULL); + + return inode_cluster_plugin(inode)->shift; +} + +static inline unsigned cluster_nrpages_shift(struct inode *inode) +{ + return inode_cluster_shift(inode) - PAGE_SHIFT; +} + +/* cluster size in page units */ +static inline unsigned cluster_nrpages(struct inode *inode) +{ + return 1U << cluster_nrpages_shift(inode); +} + +static inline size_t inode_cluster_size(struct inode *inode) +{ + assert("edward-96", inode != NULL); + + return 1U << inode_cluster_shift(inode); +} + +static inline cloff_t pg_to_clust(pgoff_t idx, struct inode *inode) +{ + return idx >> cluster_nrpages_shift(inode); +} + +static inline pgoff_t clust_to_pg(cloff_t idx, struct inode *inode) +{ + return idx << cluster_nrpages_shift(inode); +} + +static inline pgoff_t pg_to_clust_to_pg(pgoff_t idx, struct inode *inode) +{ + return clust_to_pg(pg_to_clust(idx, inode), inode); +} + +static inline pgoff_t off_to_pg(loff_t off) +{ + return (off >> PAGE_SHIFT); +} + +static inline loff_t pg_to_off(pgoff_t idx) +{ + return ((loff_t) (idx) << PAGE_SHIFT); +} + +static inline cloff_t off_to_clust(loff_t off, struct inode *inode) +{ + return off >> inode_cluster_shift(inode); +} + +static inline loff_t clust_to_off(cloff_t idx, struct inode *inode) +{ + return (loff_t) idx << inode_cluster_shift(inode); +} + +static inline loff_t off_to_clust_to_off(loff_t off, struct inode *inode) +{ + return clust_to_off(off_to_clust(off, inode), inode); +} + +static inline pgoff_t off_to_clust_to_pg(loff_t off, struct inode *inode) +{ + return clust_to_pg(off_to_clust(off, inode), inode); +} + +static inline unsigned off_to_pgoff(loff_t off) +{ + return off & (PAGE_SIZE - 1); +} + +static inline unsigned off_to_cloff(loff_t off, struct inode *inode) +{ + return off & ((loff_t) (inode_cluster_size(inode)) - 1); +} + +static inline pgoff_t offset_in_clust(struct page *page) +{ + assert("edward-1488", page != NULL); + assert("edward-1489", page->mapping != NULL); + + return page_index(page) & ((cluster_nrpages(page->mapping->host)) - 1); +} + +static inline int first_page_in_cluster(struct page *page) +{ + return offset_in_clust(page) == 0; +} + +static inline int last_page_in_cluster(struct page *page) +{ + return offset_in_clust(page) == + cluster_nrpages(page->mapping->host) - 1; +} + +static inline unsigned +pg_to_off_to_cloff(unsigned long idx, struct inode *inode) +{ + return off_to_cloff(pg_to_off(idx), inode); +} + +/*********************** Size translators **************************/ + +/* Translate linear size. + * New units are (1 << @blk_shift) times larger, then old ones. + * In other words, calculate number of logical blocks, occupied + * by @count elements + */ +static inline unsigned long size_in_blocks(loff_t count, unsigned blkbits) +{ + return (count + (1UL << blkbits) - 1) >> blkbits; +} + +/* size in pages */ +static inline pgoff_t size_in_pages(loff_t size) +{ + return size_in_blocks(size, PAGE_SHIFT); +} + +/* size in logical clusters */ +static inline cloff_t size_in_lc(loff_t size, struct inode *inode) +{ + return size_in_blocks(size, inode_cluster_shift(inode)); +} + +/* size in pages to the size in page clusters */ +static inline cloff_t sp_to_spcl(pgoff_t size, struct inode *inode) +{ + return size_in_blocks(size, cluster_nrpages_shift(inode)); +} + +/*********************** Size modulators ***************************/ + +/* + Modulate linear size by nominated block size and offset. + + The "finite" function (which is zero almost everywhere). + How much is a height of the figure at a position @pos, + when trying to construct rectangle of height (1 << @blkbits), + and square @size. + + ****** + ******* + ******* + ******* + ----------> pos +*/ +static inline unsigned __mbb(loff_t size, unsigned long pos, int blkbits) +{ + unsigned end = size >> blkbits; + if (pos < end) + return 1U << blkbits; + if (unlikely(pos > end)) + return 0; + return size & ~(~0ull << blkbits); +} + +/* the same as above, but block size is page size */ +static inline unsigned __mbp(loff_t size, pgoff_t pos) +{ + return __mbb(size, pos, PAGE_SHIFT); +} + +/* number of file's bytes in the nominated logical cluster */ +static inline unsigned lbytes(cloff_t index, struct inode *inode) +{ + return __mbb(i_size_read(inode), index, inode_cluster_shift(inode)); +} + +/* number of file's bytes in the nominated page */ +static inline unsigned pbytes(pgoff_t index, struct inode *inode) +{ + return __mbp(i_size_read(inode), index); +} + +/** + * number of pages occuped by @win->count bytes starting from + * @win->off at logical cluster defined by @win. This is exactly + * a number of pages to be modified and dirtied in any cluster operation. + */ +static inline pgoff_t win_count_to_nrpages(struct reiser4_slide * win) +{ + return ((win->off + win->count + + (1UL << PAGE_SHIFT) - 1) >> PAGE_SHIFT) - + off_to_pg(win->off); +} + +/* return true, if logical cluster is not occupied by the file */ +static inline int new_logical_cluster(struct cluster_handle *clust, + struct inode *inode) +{ + return clust_to_off(clust->index, inode) >= i_size_read(inode); +} + +/* return true, if pages @p1 and @p2 are of the same page cluster */ +static inline int same_page_cluster(struct page *p1, struct page *p2) +{ + assert("edward-1490", p1 != NULL); + assert("edward-1491", p2 != NULL); + assert("edward-1492", p1->mapping != NULL); + assert("edward-1493", p2->mapping != NULL); + + return (pg_to_clust(page_index(p1), p1->mapping->host) == + pg_to_clust(page_index(p2), p2->mapping->host)); +} + +static inline int cluster_is_complete(struct cluster_handle *clust, + struct inode *inode) +{ + return clust->tc.lsize == inode_cluster_size(inode); +} + +static inline void reiser4_slide_init(struct reiser4_slide *win) +{ + assert("edward-1084", win != NULL); + memset(win, 0, sizeof *win); +} + +static inline tfm_action +cluster_get_tfm_act(struct tfm_cluster *tc) +{ + assert("edward-1356", tc != NULL); + return tc->act; +} + +static inline void +cluster_set_tfm_act(struct tfm_cluster *tc, tfm_action act) +{ + assert("edward-1356", tc != NULL); + tc->act = act; +} + +static inline void cluster_init_act(struct cluster_handle *clust, + tfm_action act, + struct reiser4_slide *window) +{ + assert("edward-84", clust != NULL); + memset(clust, 0, sizeof *clust); + cluster_set_tfm_act(&clust->tc, act); + clust->dstat = INVAL_DISK_CLUSTER; + clust->win = window; +} + +static inline void cluster_init_read(struct cluster_handle *clust, + struct reiser4_slide *window) +{ + cluster_init_act(clust, TFMA_READ, window); +} + +static inline void cluster_init_write(struct cluster_handle *clust, + struct reiser4_slide *window) +{ + cluster_init_act(clust, TFMA_WRITE, window); +} + +/* true if @p1 and @p2 are items of the same disk cluster */ +static inline int same_disk_cluster(const coord_t *p1, const coord_t *p2) +{ + /* drop this if you have other items to aggregate */ + assert("edward-1494", item_id_by_coord(p1) == CTAIL_ID); + + return item_plugin_by_coord(p1)->b.mergeable(p1, p2); +} + +static inline int dclust_get_extension_dsize(hint_t *hint) +{ + return hint->ext_coord.extension.ctail.dsize; +} + +static inline void dclust_set_extension_dsize(hint_t *hint, int dsize) +{ + hint->ext_coord.extension.ctail.dsize = dsize; +} + +static inline int dclust_get_extension_shift(hint_t *hint) +{ + return hint->ext_coord.extension.ctail.shift; +} + +static inline int dclust_get_extension_ncount(hint_t *hint) +{ + return hint->ext_coord.extension.ctail.ncount; +} + +static inline void dclust_inc_extension_ncount(hint_t *hint) +{ + hint->ext_coord.extension.ctail.ncount++; +} + +static inline void dclust_init_extension(hint_t *hint) +{ + memset(&hint->ext_coord.extension.ctail, 0, + sizeof(hint->ext_coord.extension.ctail)); +} + +static inline int hint_is_unprepped_dclust(hint_t *hint) +{ + assert("edward-1451", hint_is_valid(hint)); + return dclust_get_extension_shift(hint) == (int)UCTAIL_SHIFT; +} + +static inline void coord_set_between_clusters(coord_t *coord) +{ +#if REISER4_DEBUG + int result; + result = zload(coord->node); + assert("edward-1296", !result); +#endif + if (!coord_is_between_items(coord)) { + coord->between = AFTER_ITEM; + coord->unit_pos = 0; + } +#if REISER4_DEBUG + zrelse(coord->node); +#endif +} + +int reiser4_inflate_cluster(struct cluster_handle *, struct inode *); +int find_disk_cluster(struct cluster_handle *, struct inode *, int read, + znode_lock_mode mode); +int checkout_logical_cluster(struct cluster_handle *, jnode * , struct inode *); +int reiser4_deflate_cluster(struct cluster_handle *, struct inode *); +void truncate_complete_page_cluster(struct inode *inode, cloff_t start, + int even_cows); +void invalidate_hint_cluster(struct cluster_handle *clust); +int get_disk_cluster_locked(struct cluster_handle *clust, struct inode *inode, + znode_lock_mode lock_mode); +void reset_cluster_params(struct cluster_handle *clust); +int set_cluster_by_page(struct cluster_handle *clust, struct page *page, + int count); +int prepare_page_cluster(struct inode *inode, struct cluster_handle *clust, + rw_op rw); +void __put_page_cluster(int from, int count, struct page **pages, + struct inode *inode); +void put_page_cluster(struct cluster_handle *clust, + struct inode *inode, rw_op rw); +void put_cluster_handle(struct cluster_handle *clust); +int grab_tfm_stream(struct inode *inode, struct tfm_cluster *tc, + tfm_stream_id id); +int tfm_cluster_is_uptodate(struct tfm_cluster *tc); +void tfm_cluster_set_uptodate(struct tfm_cluster *tc); +void tfm_cluster_clr_uptodate(struct tfm_cluster *tc); + +/* move cluster handle to the target position + specified by the page of index @pgidx */ +static inline void move_cluster_forward(struct cluster_handle *clust, + struct inode *inode, + pgoff_t pgidx) +{ + assert("edward-1297", clust != NULL); + assert("edward-1298", inode != NULL); + + reset_cluster_params(clust); + if (clust->index_valid && + /* Hole in the indices. Hint became invalid and can not be + used by find_cluster_item() even if seal/node versions + will coincide */ + pg_to_clust(pgidx, inode) != clust->index + 1) { + reiser4_unset_hint(clust->hint); + invalidate_hint_cluster(clust); + } + clust->index = pg_to_clust(pgidx, inode); + clust->index_valid = 1; +} + +static inline int alloc_clust_pages(struct cluster_handle *clust, + struct inode *inode) +{ + assert("edward-791", clust != NULL); + assert("edward-792", inode != NULL); + clust->pages = + kmalloc(sizeof(*clust->pages) << inode_cluster_shift(inode), + reiser4_ctx_gfp_mask_get()); + if (!clust->pages) + return -ENOMEM; + return 0; +} + +static inline void free_clust_pages(struct cluster_handle *clust) +{ + kfree(clust->pages); +} + +#endif /* __FS_REISER4_CLUSTER_H__ */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/compress/Makefile b/fs/reiser4/plugin/compress/Makefile new file mode 100644 index 000000000000..7fa4adb9621a --- /dev/null +++ b/fs/reiser4/plugin/compress/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_REISER4_FS) += compress_plugins.o + +compress_plugins-objs := \ + compress.o \ + compress_mode.o diff --git a/fs/reiser4/plugin/compress/compress.c b/fs/reiser4/plugin/compress/compress.c new file mode 100644 index 000000000000..ef568a099090 --- /dev/null +++ b/fs/reiser4/plugin/compress/compress.c @@ -0,0 +1,521 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ +/* reiser4 compression transform plugins */ + +#include "../../debug.h" +#include "../../inode.h" +#include "../plugin.h" + +#include <linux/lzo.h> +#include <linux/zstd.h> +#include <linux/zlib.h> +#include <linux/types.h> +#include <linux/hardirq.h> + +static int change_compression(struct inode *inode, + reiser4_plugin * plugin, + pset_member memb) +{ + assert("edward-1316", inode != NULL); + assert("edward-1317", plugin != NULL); + assert("edward-1318", is_reiser4_inode(inode)); + assert("edward-1319", + plugin->h.type_id == REISER4_COMPRESSION_PLUGIN_TYPE); + + /* cannot change compression plugin of already existing regular object */ + if (!plugin_of_group(inode_file_plugin(inode), REISER4_DIRECTORY_FILE)) + return RETERR(-EINVAL); + + /* If matches, nothing to change. */ + if (inode_hash_plugin(inode) != NULL && + inode_hash_plugin(inode)->h.id == plugin->h.id) + return 0; + + return aset_set_unsafe(&reiser4_inode_data(inode)->pset, + PSET_COMPRESSION, plugin); +} + +static reiser4_plugin_ops compression_plugin_ops = { + .init = NULL, + .load = NULL, + .save_len = NULL, + .save = NULL, + .change = &change_compression +}; + +/******************************************************************************/ +/* gzip1 compression */ +/******************************************************************************/ + +#define GZIP1_DEF_LEVEL Z_BEST_SPEED +#define GZIP1_DEF_WINBITS 15 +#define GZIP1_DEF_MEMLEVEL MAX_MEM_LEVEL + +static int gzip1_init(void) +{ + return 0; +} + +static int gzip1_overrun(unsigned src_len UNUSED_ARG) +{ + return 0; +} + +static coa_t gzip1_alloc(tfm_action act) +{ + coa_t coa = NULL; + int ret = 0; + switch (act) { + case TFMA_WRITE: /* compress */ + coa = reiser4_vmalloc(zlib_deflate_workspacesize(MAX_WBITS, + MAX_MEM_LEVEL)); + if (!coa) { + ret = -ENOMEM; + break; + } + break; + case TFMA_READ: /* decompress */ + coa = reiser4_vmalloc(zlib_inflate_workspacesize()); + if (!coa) { + ret = -ENOMEM; + break; + } + break; + default: + impossible("edward-767", "unknown tfm action"); + } + if (ret) + return ERR_PTR(ret); + return coa; +} + +static void gzip1_free(coa_t coa, tfm_action act) +{ + assert("edward-769", coa != NULL); + + switch (act) { + case TFMA_WRITE: /* compress */ + vfree(coa); + break; + case TFMA_READ: /* decompress */ + vfree(coa); + break; + default: + impossible("edward-770", "unknown tfm action"); + } + return; +} + +static int gzip1_min_size_deflate(void) +{ + return 64; +} + +static void +gzip1_compress(coa_t coa, __u8 * src_first, size_t src_len, + __u8 * dst_first, size_t *dst_len) +{ + int ret = 0; + struct z_stream_s stream; + + assert("edward-842", coa != NULL); + assert("edward-875", src_len != 0); + + stream.workspace = coa; + ret = zlib_deflateInit2(&stream, GZIP1_DEF_LEVEL, Z_DEFLATED, + -GZIP1_DEF_WINBITS, GZIP1_DEF_MEMLEVEL, + Z_DEFAULT_STRATEGY); + if (ret != Z_OK) { + warning("edward-771", "zlib_deflateInit2 returned %d\n", ret); + goto rollback; + } + ret = zlib_deflateReset(&stream); + if (ret != Z_OK) { + warning("edward-772", "zlib_deflateReset returned %d\n", ret); + goto rollback; + } + stream.next_in = src_first; + stream.avail_in = src_len; + stream.next_out = dst_first; + stream.avail_out = *dst_len; + + ret = zlib_deflate(&stream, Z_FINISH); + if (ret != Z_STREAM_END) { + if (ret != Z_OK) + warning("edward-773", + "zlib_deflate returned %d\n", ret); + goto rollback; + } + *dst_len = stream.total_out; + return; + rollback: + *dst_len = src_len; + return; +} + +static void +gzip1_decompress(coa_t coa, __u8 * src_first, size_t src_len, + __u8 * dst_first, size_t *dst_len) +{ + int ret = 0; + struct z_stream_s stream; + + assert("edward-843", coa != NULL); + assert("edward-876", src_len != 0); + + stream.workspace = coa; + ret = zlib_inflateInit2(&stream, -GZIP1_DEF_WINBITS); + if (ret != Z_OK) { + warning("edward-774", "zlib_inflateInit2 returned %d\n", ret); + return; + } + ret = zlib_inflateReset(&stream); + if (ret != Z_OK) { + warning("edward-775", "zlib_inflateReset returned %d\n", ret); + return; + } + + stream.next_in = src_first; + stream.avail_in = src_len; + stream.next_out = dst_first; + stream.avail_out = *dst_len; + + ret = zlib_inflate(&stream, Z_SYNC_FLUSH); + /* + * Work around a bug in zlib, which sometimes wants to taste an extra + * byte when being used in the (undocumented) raw deflate mode. + * (From USAGI). + */ + if (ret == Z_OK && !stream.avail_in && stream.avail_out) { + u8 zerostuff = 0; + stream.next_in = &zerostuff; + stream.avail_in = 1; + ret = zlib_inflate(&stream, Z_FINISH); + } + if (ret != Z_STREAM_END) { + warning("edward-776", "zlib_inflate returned %d\n", ret); + return; + } + *dst_len = stream.total_out; + return; +} + +/******************************************************************************/ +/* lzo1 compression */ +/******************************************************************************/ + +static int lzo1_init(void) +{ + return 0; +} + +static int lzo1_overrun(unsigned in_len) +{ + return in_len / 16 + 64 + 3; +} + +static coa_t lzo1_alloc(tfm_action act) +{ + int ret = 0; + coa_t coa = NULL; + + switch (act) { + case TFMA_WRITE: /* compress */ + coa = reiser4_vmalloc(LZO1X_1_MEM_COMPRESS); + if (!coa) { + ret = -ENOMEM; + break; + } + case TFMA_READ: /* decompress */ + break; + default: + impossible("edward-877", "unknown tfm action"); + } + if (ret) + return ERR_PTR(ret); + return coa; +} + +static void lzo1_free(coa_t coa, tfm_action act) +{ + assert("edward-879", coa != NULL); + + switch (act) { + case TFMA_WRITE: /* compress */ + vfree(coa); + break; + case TFMA_READ: /* decompress */ + impossible("edward-1304", + "trying to free non-allocated workspace"); + default: + impossible("edward-880", "unknown tfm action"); + } + return; +} + +static int lzo1_min_size_deflate(void) +{ + return 256; +} + +static void +lzo1_compress(coa_t coa, __u8 * src_first, size_t src_len, + __u8 * dst_first, size_t *dst_len) +{ + int result; + + assert("edward-846", coa != NULL); + assert("edward-847", src_len != 0); + + result = lzo1x_1_compress(src_first, src_len, dst_first, dst_len, coa); + if (unlikely(result != LZO_E_OK)) { + warning("edward-849", "lzo1x_1_compress failed\n"); + goto out; + } + if (*dst_len >= src_len) { + //warning("edward-850", "lzo1x_1_compress: incompressible data\n"); + goto out; + } + return; + out: + *dst_len = src_len; + return; +} + +static void +lzo1_decompress(coa_t coa, __u8 * src_first, size_t src_len, + __u8 * dst_first, size_t *dst_len) +{ + int result; + + assert("edward-851", coa == NULL); + assert("edward-852", src_len != 0); + + result = lzo1x_decompress_safe(src_first, src_len, dst_first, dst_len); + if (result != LZO_E_OK) + warning("edward-853", "lzo1x_1_decompress failed\n"); + return; +} + +/******************************************************************************/ +/* zstd1 compression */ +/******************************************************************************/ + +typedef struct { + ZSTD_parameters params; + void* workspace; + ZSTD_CCtx* cctx; +} zstd1_coa_c; +typedef struct { + void* workspace; + ZSTD_DCtx* dctx; +} zstd1_coa_d; + +static int zstd1_init(void) +{ + return 0; +} + +static int zstd1_overrun(unsigned src_len UNUSED_ARG) +{ + return ZSTD_compressBound(src_len) - src_len; +} + +static coa_t zstd1_alloc(tfm_action act) +{ + int ret = 0; + size_t workspace_size; + coa_t coa = NULL; + + switch (act) { + case TFMA_WRITE: /* compress */ + coa = reiser4_vmalloc(sizeof(zstd1_coa_c)); + if (!coa) { + ret = -ENOMEM; + break; + } + /* ZSTD benchmark use level 1 as default. Max is 22. */ + ((zstd1_coa_c*)coa)->params = ZSTD_getParams(1, 0, 0); + workspace_size = ZSTD_CCtxWorkspaceBound(((zstd1_coa_c*)coa)->params.cParams); + ((zstd1_coa_c*)coa)->workspace = reiser4_vmalloc(workspace_size); + if (!(((zstd1_coa_c*)coa)->workspace)) { + ret = -ENOMEM; + vfree(coa); + break; + } + ((zstd1_coa_c*)coa)->cctx = ZSTD_initCCtx(((zstd1_coa_c*)coa)->workspace, workspace_size); + if (!(((zstd1_coa_c*)coa)->cctx)) { + ret = -ENOMEM; + vfree(((zstd1_coa_c*)coa)->workspace); + vfree(coa); + break; + } + break; + case TFMA_READ: /* decompress */ + coa = reiser4_vmalloc(sizeof(zstd1_coa_d)); + if (!coa) { + ret = -ENOMEM; + break; + } + workspace_size = ZSTD_DCtxWorkspaceBound(); + ((zstd1_coa_d*)coa)->workspace = reiser4_vmalloc(workspace_size); + if (!(((zstd1_coa_d*)coa)->workspace)) { + ret = -ENOMEM; + vfree(coa); + break; + } + ((zstd1_coa_d*)coa)->dctx = ZSTD_initDCtx(((zstd1_coa_d*)coa)->workspace, workspace_size); + if (!(((zstd1_coa_d*)coa)->dctx)) { + ret = -ENOMEM; + vfree(((zstd1_coa_d*)coa)->workspace); + vfree(coa); + break; + } + break; + default: + impossible("bsinot-1", + "trying to alloc workspace for unknown tfm action"); + } + if (ret) { + warning("bsinot-2", + "alloc workspace for zstd (tfm action = %d) failed\n", + act); + return ERR_PTR(ret); + } + return coa; +} + +static void zstd1_free(coa_t coa, tfm_action act) +{ + assert("bsinot-3", coa != NULL); + + switch (act) { + case TFMA_WRITE: /* compress */ + vfree(((zstd1_coa_c*)coa)->workspace); + vfree(coa); + //printk(KERN_WARNING "free comp memory -- %p\n", coa); + break; + case TFMA_READ: /* decompress */ + vfree(((zstd1_coa_d*)coa)->workspace); + vfree(coa); + //printk(KERN_WARNING "free decomp memory -- %p\n", coa); + break; + default: + impossible("bsinot-4", "unknown tfm action"); + } + return; +} + +static int zstd1_min_size_deflate(void) +{ + return 256; /* I'm not sure about the correct value, so took from LZO1 */ +} + +static void +zstd1_compress(coa_t coa, __u8 * src_first, size_t src_len, + __u8 * dst_first, size_t *dst_len) +{ + unsigned int result; + + assert("bsinot-5", coa != NULL); + assert("bsinot-6", src_len != 0); + result = ZSTD_compressCCtx(((zstd1_coa_c*)coa)->cctx, dst_first, *dst_len, src_first, src_len, ((zstd1_coa_c*)coa)->params); + if (ZSTD_isError(result)) { + warning("bsinot-7", "zstd1_compressCCtx failed\n"); + goto out; + } + *dst_len = result; + if (*dst_len >= src_len) { + //warning("bsinot-8", "zstd1_compressCCtx: incompressible data\n"); + goto out; + } + return; + out: + *dst_len = src_len; + return; +} + +static void +zstd1_decompress(coa_t coa, __u8 * src_first, size_t src_len, + __u8 * dst_first, size_t *dst_len) +{ + unsigned int result; + + assert("bsinot-9", coa != NULL); + assert("bsinot-10", src_len != 0); + + result = ZSTD_decompressDCtx(((zstd1_coa_d*)coa)->dctx, dst_first, *dst_len, src_first, src_len); + /* Same here. */ + if (ZSTD_isError(result)) + warning("bsinot-11", "zstd1_decompressDCtx failed\n"); + *dst_len = result; + return; +} + + +compression_plugin compression_plugins[LAST_COMPRESSION_ID] = { + [LZO1_COMPRESSION_ID] = { + .h = { + .type_id = REISER4_COMPRESSION_PLUGIN_TYPE, + .id = LZO1_COMPRESSION_ID, + .pops = &compression_plugin_ops, + .label = "lzo1", + .desc = "lzo1 compression transform", + .linkage = {NULL, NULL} + }, + .init = lzo1_init, + .overrun = lzo1_overrun, + .alloc = lzo1_alloc, + .free = lzo1_free, + .min_size_deflate = lzo1_min_size_deflate, + .checksum = reiser4_adler32, + .compress = lzo1_compress, + .decompress = lzo1_decompress + }, + [GZIP1_COMPRESSION_ID] = { + .h = { + .type_id = REISER4_COMPRESSION_PLUGIN_TYPE, + .id = GZIP1_COMPRESSION_ID, + .pops = &compression_plugin_ops, + .label = "gzip1", + .desc = "gzip1 compression transform", + .linkage = {NULL, NULL} + }, + .init = gzip1_init, + .overrun = gzip1_overrun, + .alloc = gzip1_alloc, + .free = gzip1_free, + .min_size_deflate = gzip1_min_size_deflate, + .checksum = reiser4_adler32, + .compress = gzip1_compress, + .decompress = gzip1_decompress + }, + [ZSTD1_COMPRESSION_ID] = { + .h = { + .type_id = REISER4_COMPRESSION_PLUGIN_TYPE, + .id = ZSTD1_COMPRESSION_ID, + .pops = &compression_plugin_ops, + .label = "zstd1", + .desc = "zstd1 compression transform", + .linkage = {NULL, NULL} + }, + .init = zstd1_init, + .overrun = zstd1_overrun, + .alloc = zstd1_alloc, + .free = zstd1_free, + .min_size_deflate = zstd1_min_size_deflate, + .checksum = reiser4_adler32, + .compress = zstd1_compress, + .decompress = zstd1_decompress + } +}; + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/compress/compress.h b/fs/reiser4/plugin/compress/compress.h new file mode 100644 index 000000000000..235273897071 --- /dev/null +++ b/fs/reiser4/plugin/compress/compress.h @@ -0,0 +1,44 @@ +#if !defined( __FS_REISER4_COMPRESS_H__ ) +#define __FS_REISER4_COMPRESS_H__ + +#include <linux/types.h> +#include <linux/string.h> + +/* transform direction */ +typedef enum { + TFMA_READ, /* decrypt, decompress */ + TFMA_WRITE, /* encrypt, compress */ + TFMA_LAST +} tfm_action; + +/* supported compression algorithms */ +typedef enum { + LZO1_COMPRESSION_ID, + GZIP1_COMPRESSION_ID, + ZSTD1_COMPRESSION_ID, + LAST_COMPRESSION_ID, +} reiser4_compression_id; + +/* the same as pgoff, but units are page clusters */ +typedef unsigned long cloff_t; + +/* working data of a (de)compression algorithm */ +typedef void *coa_t; + +/* table for all supported (de)compression algorithms */ +typedef coa_t coa_set[LAST_COMPRESSION_ID][TFMA_LAST]; + +__u32 reiser4_adler32(char *data, __u32 len); + +#endif /* __FS_REISER4_COMPRESS_H__ */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/compress/compress_mode.c b/fs/reiser4/plugin/compress/compress_mode.c new file mode 100644 index 000000000000..5e318caf995c --- /dev/null +++ b/fs/reiser4/plugin/compress/compress_mode.c @@ -0,0 +1,162 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ +/* This file contains Reiser4 compression mode plugins. + + Compression mode plugin is a set of handlers called by compressor + at flush time and represent some heuristics including the ones + which are to avoid compression of incompressible data, see + http://www.namesys.com/cryptcompress_design.html for more details. +*/ +#include "../../inode.h" +#include "../plugin.h" + +static int should_deflate_none(struct inode * inode, cloff_t index) +{ + return 0; +} + +static int should_deflate_common(struct inode * inode, cloff_t index) +{ + return compression_is_on(cryptcompress_inode_data(inode)); +} + +static int discard_hook_ultim(struct inode *inode, cloff_t index) +{ + turn_off_compression(cryptcompress_inode_data(inode)); + return 0; +} + +static int discard_hook_lattd(struct inode *inode, cloff_t index) +{ + struct cryptcompress_info * info = cryptcompress_inode_data(inode); + + assert("edward-1462", + get_lattice_factor(info) >= MIN_LATTICE_FACTOR && + get_lattice_factor(info) <= MAX_LATTICE_FACTOR); + + turn_off_compression(info); + if (get_lattice_factor(info) < MAX_LATTICE_FACTOR) + set_lattice_factor(info, get_lattice_factor(info) << 1); + return 0; +} + +static int accept_hook_lattd(struct inode *inode, cloff_t index) +{ + turn_on_compression(cryptcompress_inode_data(inode)); + set_lattice_factor(cryptcompress_inode_data(inode), MIN_LATTICE_FACTOR); + return 0; +} + +/* Check on dynamic lattice, the adaptive compression modes which + defines the following behavior: + + Compression is on: try to compress everything and turn + it off, whenever cluster is incompressible. + + Compression is off: try to compress clusters of indexes + k * FACTOR (k = 0, 1, 2, ...) and turn it on, if some of + them is compressible. If incompressible, then increase FACTOR */ + +/* check if @index belongs to one-dimensional lattice + of sparce factor @factor */ +static int is_on_lattice(cloff_t index, int factor) +{ + return (factor ? index % factor == 0: index == 0); +} + +static int should_deflate_lattd(struct inode * inode, cloff_t index) +{ + return should_deflate_common(inode, index) || + is_on_lattice(index, + get_lattice_factor + (cryptcompress_inode_data(inode))); +} + +/* compression mode_plugins */ +compression_mode_plugin compression_mode_plugins[LAST_COMPRESSION_MODE_ID] = { + [NONE_COMPRESSION_MODE_ID] = { + .h = { + .type_id = REISER4_COMPRESSION_MODE_PLUGIN_TYPE, + .id = NONE_COMPRESSION_MODE_ID, + .pops = NULL, + .label = "none", + .desc = "Compress nothing", + .linkage = {NULL, NULL} + }, + .should_deflate = should_deflate_none, + .accept_hook = NULL, + .discard_hook = NULL + }, + /* Check-on-dynamic-lattice adaptive compression mode */ + [LATTD_COMPRESSION_MODE_ID] = { + .h = { + .type_id = REISER4_COMPRESSION_MODE_PLUGIN_TYPE, + .id = LATTD_COMPRESSION_MODE_ID, + .pops = NULL, + .label = "lattd", + .desc = "Check on dynamic lattice", + .linkage = {NULL, NULL} + }, + .should_deflate = should_deflate_lattd, + .accept_hook = accept_hook_lattd, + .discard_hook = discard_hook_lattd + }, + /* Check-ultimately compression mode: + Turn off compression forever as soon as we meet + incompressible data */ + [ULTIM_COMPRESSION_MODE_ID] = { + .h = { + .type_id = REISER4_COMPRESSION_MODE_PLUGIN_TYPE, + .id = ULTIM_COMPRESSION_MODE_ID, + .pops = NULL, + .label = "ultim", + .desc = "Check ultimately", + .linkage = {NULL, NULL} + }, + .should_deflate = should_deflate_common, + .accept_hook = NULL, + .discard_hook = discard_hook_ultim + }, + /* Force-to-compress-everything compression mode */ + [FORCE_COMPRESSION_MODE_ID] = { + .h = { + .type_id = REISER4_COMPRESSION_MODE_PLUGIN_TYPE, + .id = FORCE_COMPRESSION_MODE_ID, + .pops = NULL, + .label = "force", + .desc = "Force to compress everything", + .linkage = {NULL, NULL} + }, + .should_deflate = NULL, + .accept_hook = NULL, + .discard_hook = NULL + }, + /* Convert-to-extent compression mode. + In this mode items will be converted to extents and management + will be passed to (classic) unix file plugin as soon as ->write() + detects that the first complete logical cluster (of index #0) is + incompressible. */ + [CONVX_COMPRESSION_MODE_ID] = { + .h = { + .type_id = REISER4_COMPRESSION_MODE_PLUGIN_TYPE, + .id = CONVX_COMPRESSION_MODE_ID, + .pops = NULL, + .label = "conv", + .desc = "Convert to extent", + .linkage = {NULL, NULL} + }, + .should_deflate = should_deflate_common, + .accept_hook = NULL, + .discard_hook = NULL + } +}; + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/compress/lzoconf.h b/fs/reiser4/plugin/compress/lzoconf.h new file mode 100644 index 000000000000..cc0fa4db25b5 --- /dev/null +++ b/fs/reiser4/plugin/compress/lzoconf.h @@ -0,0 +1,216 @@ +/* lzoconf.h -- configuration for the LZO real-time data compression library + adopted for reiser4 compression transform plugin. + + This file is part of the LZO real-time data compression library + and not included in any proprietary licenses of reiser4. + + Copyright (C) 2002 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2001 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2000 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1999 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1998 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1997 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1996 Markus Franz Xaver Johannes Oberhumer + All Rights Reserved. + + The LZO library is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + The LZO library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with the LZO library; see the file COPYING. + If not, write to the Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + Markus F.X.J. Oberhumer + <markus@oberhumer.com> + http://www.oberhumer.com/opensource/lzo/ + */ + +#include <linux/kernel.h> /* for UINT_MAX, ULONG_MAX - edward */ + +#ifndef __LZOCONF_H +#define __LZOCONF_H + +#define LZO_VERSION 0x1080 +#define LZO_VERSION_STRING "1.08" +#define LZO_VERSION_DATE "Jul 12 2002" + +/* internal Autoconf configuration file - only used when building LZO */ + +/*********************************************************************** +// LZO requires a conforming <limits.h> +************************************************************************/ + +#define CHAR_BIT 8 +#define USHRT_MAX 0xffff + +/* workaround a cpp bug under hpux 10.20 */ +#define LZO_0xffffffffL 4294967295ul + +/*********************************************************************** +// architecture defines +************************************************************************/ + +#if !defined(__LZO_i386) +# if defined(__i386__) || defined(__386__) || defined(_M_IX86) +# define __LZO_i386 +# endif +#endif + +/* memory checkers */ +#if !defined(__LZO_CHECKER) +# if defined(__BOUNDS_CHECKING_ON) +# define __LZO_CHECKER +# elif defined(__CHECKER__) +# define __LZO_CHECKER +# elif defined(__INSURE__) +# define __LZO_CHECKER +# elif defined(__PURIFY__) +# define __LZO_CHECKER +# endif +#endif + +/*********************************************************************** +// integral and pointer types +************************************************************************/ + +/* Integral types with 32 bits or more */ +#if !defined(LZO_UINT32_MAX) +# if (UINT_MAX >= LZO_0xffffffffL) + typedef unsigned int lzo_uint32; + typedef int lzo_int32; +# define LZO_UINT32_MAX UINT_MAX +# define LZO_INT32_MAX INT_MAX +# define LZO_INT32_MIN INT_MIN +# elif (ULONG_MAX >= LZO_0xffffffffL) + typedef unsigned long lzo_uint32; + typedef long lzo_int32; +# define LZO_UINT32_MAX ULONG_MAX +# define LZO_INT32_MAX LONG_MAX +# define LZO_INT32_MIN LONG_MIN +# else +# error "lzo_uint32" +# endif +#endif + +/* lzo_uint is used like size_t */ +#if !defined(LZO_UINT_MAX) +# if (UINT_MAX >= LZO_0xffffffffL) + typedef unsigned int lzo_uint; + typedef int lzo_int; +# define LZO_UINT_MAX UINT_MAX +# define LZO_INT_MAX INT_MAX +# define LZO_INT_MIN INT_MIN +# elif (ULONG_MAX >= LZO_0xffffffffL) + typedef unsigned long lzo_uint; + typedef long lzo_int; +# define LZO_UINT_MAX ULONG_MAX +# define LZO_INT_MAX LONG_MAX +# define LZO_INT_MIN LONG_MIN +# else +# error "lzo_uint" +# endif +#endif + + typedef int lzo_bool; + +/*********************************************************************** +// memory models +************************************************************************/ + +/* Memory model that allows to access memory at offsets of lzo_uint. */ +#if !defined(__LZO_MMODEL) +# if (LZO_UINT_MAX <= UINT_MAX) +# define __LZO_MMODEL +# else +# error "__LZO_MMODEL" +# endif +#endif + +/* no typedef here because of const-pointer issues */ +#define lzo_byte unsigned char __LZO_MMODEL +#define lzo_bytep unsigned char __LZO_MMODEL * +#define lzo_charp char __LZO_MMODEL * +#define lzo_voidp void __LZO_MMODEL * +#define lzo_shortp short __LZO_MMODEL * +#define lzo_ushortp unsigned short __LZO_MMODEL * +#define lzo_uint32p lzo_uint32 __LZO_MMODEL * +#define lzo_int32p lzo_int32 __LZO_MMODEL * +#define lzo_uintp lzo_uint __LZO_MMODEL * +#define lzo_intp lzo_int __LZO_MMODEL * +#define lzo_voidpp lzo_voidp __LZO_MMODEL * +#define lzo_bytepp lzo_bytep __LZO_MMODEL * + +#ifndef lzo_sizeof_dict_t +# define lzo_sizeof_dict_t sizeof(lzo_bytep) +#endif + +typedef int (*lzo_compress_t) (const lzo_byte * src, lzo_uint src_len, + lzo_byte * dst, lzo_uintp dst_len, + lzo_voidp wrkmem); + + +/*********************************************************************** +// error codes and prototypes +************************************************************************/ + +/* Error codes for the compression/decompression functions. Negative + * values are errors, positive values will be used for special but + * normal events. + */ +#define LZO_E_OK 0 +#define LZO_E_ERROR (-1) +#define LZO_E_OUT_OF_MEMORY (-2) /* not used right now */ +#define LZO_E_NOT_COMPRESSIBLE (-3) /* not used right now */ +#define LZO_E_INPUT_OVERRUN (-4) +#define LZO_E_OUTPUT_OVERRUN (-5) +#define LZO_E_LOOKBEHIND_OVERRUN (-6) +#define LZO_E_EOF_NOT_FOUND (-7) +#define LZO_E_INPUT_NOT_CONSUMED (-8) + +/* lzo_init() should be the first function you call. + * Check the return code ! + * + * lzo_init() is a macro to allow checking that the library and the + * compiler's view of various types are consistent. + */ +#define lzo_init() __lzo_init2(LZO_VERSION,(int)sizeof(short),(int)sizeof(int),\ + (int)sizeof(long),(int)sizeof(lzo_uint32),(int)sizeof(lzo_uint),\ + (int)lzo_sizeof_dict_t,(int)sizeof(char *),(int)sizeof(lzo_voidp),\ + (int)sizeof(lzo_compress_t)) + extern int __lzo_init2(unsigned, int, int, int, int, int, int, + int, int, int); + +/* checksum functions */ +extern lzo_uint32 lzo_crc32(lzo_uint32 _c, const lzo_byte * _buf, + lzo_uint _len); +/* misc. */ + typedef union { + lzo_bytep p; + lzo_uint u; + } __lzo_pu_u; + typedef union { + lzo_bytep p; + lzo_uint32 u32; + } __lzo_pu32_u; + typedef union { + void *vp; + lzo_bytep bp; + lzo_uint32 u32; + long l; + } lzo_align_t; + +#define LZO_PTR_ALIGN_UP(_ptr,_size) \ + ((_ptr) + (lzo_uint) __lzo_align_gap((const lzo_voidp)(_ptr),(lzo_uint)(_size))) + +/* deprecated - only for backward compatibility */ +#define LZO_ALIGN(_ptr,_size) LZO_PTR_ALIGN_UP(_ptr,_size) + +#endif /* already included */ diff --git a/fs/reiser4/plugin/compress/minilzo.c b/fs/reiser4/plugin/compress/minilzo.c new file mode 100644 index 000000000000..2dba187d9845 --- /dev/null +++ b/fs/reiser4/plugin/compress/minilzo.c @@ -0,0 +1,1967 @@ +/* minilzo.c -- mini subset of the LZO real-time data compression library + adopted for reiser4 compression transform plugin. + + This file is part of the LZO real-time data compression library + and not included in any proprietary licenses of reiser4. + + Copyright (C) 2002 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2001 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2000 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1999 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1998 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1997 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1996 Markus Franz Xaver Johannes Oberhumer + All Rights Reserved. + + The LZO library is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + The LZO library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with the LZO library; see the file COPYING. + If not, write to the Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + Markus F.X.J. Oberhumer + <markus@oberhumer.com> + http://www.oberhumer.com/opensource/lzo/ + */ + +/* + * NOTE: + * the full LZO package can be found at + * http://www.oberhumer.com/opensource/lzo/ + */ + +#include "../../debug.h" /* for reiser4 assert macro -edward */ + +#define __LZO_IN_MINILZO +#define LZO_BUILD + +#include "minilzo.h" + +#if !defined(MINILZO_VERSION) || (MINILZO_VERSION != 0x1080) +# error "version mismatch in miniLZO source files" +#endif + +#ifndef __LZO_CONF_H +#define __LZO_CONF_H + +# define BOUNDS_CHECKING_OFF_DURING(stmt) stmt +# define BOUNDS_CHECKING_OFF_IN_EXPR(expr) (expr) + +# define HAVE_MEMCMP +# define HAVE_MEMCPY +# define HAVE_MEMMOVE +# define HAVE_MEMSET + +#undef NDEBUG +#if !defined(LZO_DEBUG) +# define NDEBUG +#endif +#if defined(LZO_DEBUG) || !defined(NDEBUG) +# if !defined(NO_STDIO_H) +# include <stdio.h> +# endif +#endif + +#if !defined(LZO_COMPILE_TIME_ASSERT) +# define LZO_COMPILE_TIME_ASSERT(expr) \ + { typedef int __lzo_compile_time_assert_fail[1 - 2 * !(expr)]; } +#endif + +#if !defined(LZO_UNUSED) +# if 1 +# define LZO_UNUSED(var) ((void)&var) +# elif 0 +# define LZO_UNUSED(var) { typedef int __lzo_unused[sizeof(var) ? 2 : 1]; } +# else +# define LZO_UNUSED(parm) (parm = parm) +# endif +#endif + +#if defined(NO_MEMCMP) +# undef HAVE_MEMCMP +#endif + +#if !defined(HAVE_MEMSET) +# undef memset +# define memset lzo_memset +#endif + +# define LZO_BYTE(x) ((unsigned char) ((x) & 0xff)) + +#define LZO_MAX(a,b) ((a) >= (b) ? (a) : (b)) +#define LZO_MIN(a,b) ((a) <= (b) ? (a) : (b)) +#define LZO_MAX3(a,b,c) ((a) >= (b) ? LZO_MAX(a,c) : LZO_MAX(b,c)) +#define LZO_MIN3(a,b,c) ((a) <= (b) ? LZO_MIN(a,c) : LZO_MIN(b,c)) + +#define lzo_sizeof(type) ((lzo_uint) (sizeof(type))) + +#define LZO_HIGH(array) ((lzo_uint) (sizeof(array)/sizeof(*(array)))) + +#define LZO_SIZE(bits) (1u << (bits)) +#define LZO_MASK(bits) (LZO_SIZE(bits) - 1) + +#define LZO_LSIZE(bits) (1ul << (bits)) +#define LZO_LMASK(bits) (LZO_LSIZE(bits) - 1) + +#define LZO_USIZE(bits) ((lzo_uint) 1 << (bits)) +#define LZO_UMASK(bits) (LZO_USIZE(bits) - 1) + +#define LZO_STYPE_MAX(b) (((1l << (8*(b)-2)) - 1l) + (1l << (8*(b)-2))) +#define LZO_UTYPE_MAX(b) (((1ul << (8*(b)-1)) - 1ul) + (1ul << (8*(b)-1))) + +#if !defined(SIZEOF_UNSIGNED) +# if (UINT_MAX == 0xffff) +# define SIZEOF_UNSIGNED 2 +# elif (UINT_MAX == LZO_0xffffffffL) +# define SIZEOF_UNSIGNED 4 +# elif (UINT_MAX >= LZO_0xffffffffL) +# define SIZEOF_UNSIGNED 8 +# else +# error "SIZEOF_UNSIGNED" +# endif +#endif + +#if !defined(SIZEOF_UNSIGNED_LONG) +# if (ULONG_MAX == LZO_0xffffffffL) +# define SIZEOF_UNSIGNED_LONG 4 +# elif (ULONG_MAX >= LZO_0xffffffffL) +# define SIZEOF_UNSIGNED_LONG 8 +# else +# error "SIZEOF_UNSIGNED_LONG" +# endif +#endif + +#if !defined(SIZEOF_SIZE_T) +# define SIZEOF_SIZE_T SIZEOF_UNSIGNED +#endif +#if !defined(SIZE_T_MAX) +# define SIZE_T_MAX LZO_UTYPE_MAX(SIZEOF_SIZE_T) +#endif + +#if 1 && defined(__LZO_i386) && (UINT_MAX == LZO_0xffffffffL) +# if !defined(LZO_UNALIGNED_OK_2) && (USHRT_MAX == 0xffff) +# define LZO_UNALIGNED_OK_2 +# endif +# if !defined(LZO_UNALIGNED_OK_4) && (LZO_UINT32_MAX == LZO_0xffffffffL) +# define LZO_UNALIGNED_OK_4 +# endif +#endif + +#if defined(LZO_UNALIGNED_OK_2) || defined(LZO_UNALIGNED_OK_4) +# if !defined(LZO_UNALIGNED_OK) +# define LZO_UNALIGNED_OK +# endif +#endif + +#if defined(__LZO_NO_UNALIGNED) +# undef LZO_UNALIGNED_OK +# undef LZO_UNALIGNED_OK_2 +# undef LZO_UNALIGNED_OK_4 +#endif + +#if defined(LZO_UNALIGNED_OK_2) && (USHRT_MAX != 0xffff) +# error "LZO_UNALIGNED_OK_2 must not be defined on this system" +#endif +#if defined(LZO_UNALIGNED_OK_4) && (LZO_UINT32_MAX != LZO_0xffffffffL) +# error "LZO_UNALIGNED_OK_4 must not be defined on this system" +#endif + +#if defined(__LZO_NO_ALIGNED) +# undef LZO_ALIGNED_OK_4 +#endif + +#if defined(LZO_ALIGNED_OK_4) && (LZO_UINT32_MAX != LZO_0xffffffffL) +# error "LZO_ALIGNED_OK_4 must not be defined on this system" +#endif + +#define LZO_LITTLE_ENDIAN 1234 +#define LZO_BIG_ENDIAN 4321 +#define LZO_PDP_ENDIAN 3412 + +#if !defined(LZO_BYTE_ORDER) +# if defined(MFX_BYTE_ORDER) +# define LZO_BYTE_ORDER MFX_BYTE_ORDER +# elif defined(__LZO_i386) +# define LZO_BYTE_ORDER LZO_LITTLE_ENDIAN +# elif defined(BYTE_ORDER) +# define LZO_BYTE_ORDER BYTE_ORDER +# elif defined(__BYTE_ORDER) +# define LZO_BYTE_ORDER __BYTE_ORDER +# endif +#endif + +#if defined(LZO_BYTE_ORDER) +# if (LZO_BYTE_ORDER != LZO_LITTLE_ENDIAN) && \ + (LZO_BYTE_ORDER != LZO_BIG_ENDIAN) +# error "invalid LZO_BYTE_ORDER" +# endif +#endif + +#if defined(LZO_UNALIGNED_OK) && !defined(LZO_BYTE_ORDER) +# error "LZO_BYTE_ORDER is not defined" +#endif + +#define LZO_OPTIMIZE_GNUC_i386_IS_BUGGY + +#if defined(NDEBUG) && !defined(LZO_DEBUG) && !defined(__LZO_CHECKER) +# if defined(__GNUC__) && defined(__i386__) +# if !defined(LZO_OPTIMIZE_GNUC_i386_IS_BUGGY) +# define LZO_OPTIMIZE_GNUC_i386 +# endif +# endif +#endif + +extern const lzo_uint32 _lzo_crc32_table[256]; + +#define _LZO_STRINGIZE(x) #x +#define _LZO_MEXPAND(x) _LZO_STRINGIZE(x) + +#define _LZO_CONCAT2(a,b) a ## b +#define _LZO_CONCAT3(a,b,c) a ## b ## c +#define _LZO_CONCAT4(a,b,c,d) a ## b ## c ## d +#define _LZO_CONCAT5(a,b,c,d,e) a ## b ## c ## d ## e + +#define _LZO_ECONCAT2(a,b) _LZO_CONCAT2(a,b) +#define _LZO_ECONCAT3(a,b,c) _LZO_CONCAT3(a,b,c) +#define _LZO_ECONCAT4(a,b,c,d) _LZO_CONCAT4(a,b,c,d) +#define _LZO_ECONCAT5(a,b,c,d,e) _LZO_CONCAT5(a,b,c,d,e) + +#ifndef __LZO_PTR_H +#define __LZO_PTR_H + +#if !defined(lzo_ptrdiff_t) +# if (UINT_MAX >= LZO_0xffffffffL) +typedef ptrdiff_t lzo_ptrdiff_t; +# else +typedef long lzo_ptrdiff_t; +# endif +#endif + +#if !defined(__LZO_HAVE_PTR_T) +# if defined(lzo_ptr_t) +# define __LZO_HAVE_PTR_T +# endif +#endif +#if !defined(__LZO_HAVE_PTR_T) +# if defined(SIZEOF_CHAR_P) && defined(SIZEOF_UNSIGNED_LONG) +# if (SIZEOF_CHAR_P == SIZEOF_UNSIGNED_LONG) +typedef unsigned long lzo_ptr_t; +typedef long lzo_sptr_t; +# define __LZO_HAVE_PTR_T +# endif +# endif +#endif +#if !defined(__LZO_HAVE_PTR_T) +# if defined(SIZEOF_CHAR_P) && defined(SIZEOF_UNSIGNED) +# if (SIZEOF_CHAR_P == SIZEOF_UNSIGNED) +typedef unsigned int lzo_ptr_t; +typedef int lzo_sptr_t; +# define __LZO_HAVE_PTR_T +# endif +# endif +#endif +#if !defined(__LZO_HAVE_PTR_T) +# if defined(SIZEOF_CHAR_P) && defined(SIZEOF_UNSIGNED_SHORT) +# if (SIZEOF_CHAR_P == SIZEOF_UNSIGNED_SHORT) +typedef unsigned short lzo_ptr_t; +typedef short lzo_sptr_t; +# define __LZO_HAVE_PTR_T +# endif +# endif +#endif +#if !defined(__LZO_HAVE_PTR_T) +# if defined(LZO_HAVE_CONFIG_H) || defined(SIZEOF_CHAR_P) +# error "no suitable type for lzo_ptr_t" +# else +typedef unsigned long lzo_ptr_t; +typedef long lzo_sptr_t; +# define __LZO_HAVE_PTR_T +# endif +#endif + +#define PTR(a) ((lzo_ptr_t) (a)) +#define PTR_LINEAR(a) PTR(a) +#define PTR_ALIGNED_4(a) ((PTR_LINEAR(a) & 3) == 0) +#define PTR_ALIGNED_8(a) ((PTR_LINEAR(a) & 7) == 0) +#define PTR_ALIGNED2_4(a,b) (((PTR_LINEAR(a) | PTR_LINEAR(b)) & 3) == 0) +#define PTR_ALIGNED2_8(a,b) (((PTR_LINEAR(a) | PTR_LINEAR(b)) & 7) == 0) + +#define PTR_LT(a,b) (PTR(a) < PTR(b)) +#define PTR_GE(a,b) (PTR(a) >= PTR(b)) +#define PTR_DIFF(a,b) ((lzo_ptrdiff_t) (PTR(a) - PTR(b))) +#define pd(a,b) ((lzo_uint) ((a)-(b))) + +typedef union { + char a_char; + unsigned char a_uchar; + short a_short; + unsigned short a_ushort; + int a_int; + unsigned int a_uint; + long a_long; + unsigned long a_ulong; + lzo_int a_lzo_int; + lzo_uint a_lzo_uint; + lzo_int32 a_lzo_int32; + lzo_uint32 a_lzo_uint32; + ptrdiff_t a_ptrdiff_t; + lzo_ptrdiff_t a_lzo_ptrdiff_t; + lzo_ptr_t a_lzo_ptr_t; + lzo_voidp a_lzo_voidp; + void *a_void_p; + lzo_bytep a_lzo_bytep; + lzo_bytepp a_lzo_bytepp; + lzo_uintp a_lzo_uintp; + lzo_uint *a_lzo_uint_p; + lzo_uint32p a_lzo_uint32p; + lzo_uint32 *a_lzo_uint32_p; + unsigned char *a_uchar_p; + char *a_char_p; +} lzo_full_align_t; + +#endif +#define LZO_DETERMINISTIC +#define LZO_DICT_USE_PTR +# define lzo_dict_t const lzo_bytep +# define lzo_dict_p lzo_dict_t __LZO_MMODEL * +#if !defined(lzo_moff_t) +#define lzo_moff_t lzo_uint +#endif +#endif +static lzo_ptr_t __lzo_ptr_linear(const lzo_voidp ptr) +{ + return PTR_LINEAR(ptr); +} + +static unsigned __lzo_align_gap(const lzo_voidp ptr, lzo_uint size) +{ + lzo_ptr_t p, s, n; + + assert("lzo-01", size > 0); + + p = __lzo_ptr_linear(ptr); + s = (lzo_ptr_t) (size - 1); + n = (((p + s) / size) * size) - p; + + assert("lzo-02", (long)n >= 0); + assert("lzo-03", n <= s); + + return (unsigned)n; +} + +#ifndef __LZO_UTIL_H +#define __LZO_UTIL_H + +#ifndef __LZO_CONF_H +#endif + +#if 1 && defined(HAVE_MEMCPY) +#define MEMCPY8_DS(dest,src,len) \ + memcpy(dest,src,len); \ + dest += len; \ + src += len +#endif + +#if !defined(MEMCPY8_DS) + +#define MEMCPY8_DS(dest,src,len) \ + { register lzo_uint __l = (len) / 8; \ + do { \ + *dest++ = *src++; \ + *dest++ = *src++; \ + *dest++ = *src++; \ + *dest++ = *src++; \ + *dest++ = *src++; \ + *dest++ = *src++; \ + *dest++ = *src++; \ + *dest++ = *src++; \ + } while (--__l > 0); } + +#endif + +#define MEMCPY_DS(dest,src,len) \ + do *dest++ = *src++; \ + while (--len > 0) + +#define MEMMOVE_DS(dest,src,len) \ + do *dest++ = *src++; \ + while (--len > 0) + +#if (LZO_UINT_MAX <= SIZE_T_MAX) && defined(HAVE_MEMSET) + +#define BZERO8_PTR(s,l,n) memset((s),0,(lzo_uint)(l)*(n)) + +#else + +#define BZERO8_PTR(s,l,n) \ + lzo_memset((lzo_voidp)(s),0,(lzo_uint)(l)*(n)) + +#endif +#endif + +/* If you use the LZO library in a product, you *must* keep this + * copyright string in the executable of your product. + */ + +static const lzo_byte __lzo_copyright[] = +#if !defined(__LZO_IN_MINLZO) + LZO_VERSION_STRING; +#else + "\n\n\n" + "LZO real-time data compression library.\n" + "Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002 Markus Franz Xaver Johannes Oberhumer\n" + "<markus.oberhumer@jk.uni-linz.ac.at>\n" + "http://www.oberhumer.com/opensource/lzo/\n" + "\n" + "LZO version: v" LZO_VERSION_STRING ", " LZO_VERSION_DATE "\n" + "LZO build date: " __DATE__ " " __TIME__ "\n\n" + "LZO special compilation options:\n" +#ifdef __cplusplus + " __cplusplus\n" +#endif +#if defined(__PIC__) + " __PIC__\n" +#elif defined(__pic__) + " __pic__\n" +#endif +#if (UINT_MAX < LZO_0xffffffffL) + " 16BIT\n" +#endif +#if defined(__LZO_STRICT_16BIT) + " __LZO_STRICT_16BIT\n" +#endif +#if (UINT_MAX > LZO_0xffffffffL) + " UINT_MAX=" _LZO_MEXPAND(UINT_MAX) "\n" +#endif +#if (ULONG_MAX > LZO_0xffffffffL) + " ULONG_MAX=" _LZO_MEXPAND(ULONG_MAX) "\n" +#endif +#if defined(LZO_BYTE_ORDER) + " LZO_BYTE_ORDER=" _LZO_MEXPAND(LZO_BYTE_ORDER) "\n" +#endif +#if defined(LZO_UNALIGNED_OK_2) + " LZO_UNALIGNED_OK_2\n" +#endif +#if defined(LZO_UNALIGNED_OK_4) + " LZO_UNALIGNED_OK_4\n" +#endif +#if defined(LZO_ALIGNED_OK_4) + " LZO_ALIGNED_OK_4\n" +#endif +#if defined(LZO_DICT_USE_PTR) + " LZO_DICT_USE_PTR\n" +#endif +#if defined(__LZO_QUERY_COMPRESS) + " __LZO_QUERY_COMPRESS\n" +#endif +#if defined(__LZO_QUERY_DECOMPRESS) + " __LZO_QUERY_DECOMPRESS\n" +#endif +#if defined(__LZO_IN_MINILZO) + " __LZO_IN_MINILZO\n" +#endif + "\n\n" "$Id: LZO " LZO_VERSION_STRING " built " __DATE__ " " __TIME__ +#if defined(__GNUC__) && defined(__VERSION__) + " by gcc " __VERSION__ +#elif defined(__BORLANDC__) + " by Borland C " _LZO_MEXPAND(__BORLANDC__) +#elif defined(_MSC_VER) + " by Microsoft C " _LZO_MEXPAND(_MSC_VER) +#elif defined(__PUREC__) + " by Pure C " _LZO_MEXPAND(__PUREC__) +#elif defined(__SC__) + " by Symantec C " _LZO_MEXPAND(__SC__) +#elif defined(__TURBOC__) + " by Turbo C " _LZO_MEXPAND(__TURBOC__) +#elif defined(__WATCOMC__) + " by Watcom C " _LZO_MEXPAND(__WATCOMC__) +#endif + " $\n" + "$Copyright: LZO (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002 Markus Franz Xaver Johannes Oberhumer $\n"; +#endif + +#define LZO_BASE 65521u +#define LZO_NMAX 5552 + +#define LZO_DO1(buf,i) {s1 += buf[i]; s2 += s1;} +#define LZO_DO2(buf,i) LZO_DO1(buf,i); LZO_DO1(buf,i+1); +#define LZO_DO4(buf,i) LZO_DO2(buf,i); LZO_DO2(buf,i+2); +#define LZO_DO8(buf,i) LZO_DO4(buf,i); LZO_DO4(buf,i+4); +#define LZO_DO16(buf,i) LZO_DO8(buf,i); LZO_DO8(buf,i+8); + +# define IS_SIGNED(type) (((type) (-1)) < ((type) 0)) +# define IS_UNSIGNED(type) (((type) (-1)) > ((type) 0)) + +#define IS_POWER_OF_2(x) (((x) & ((x) - 1)) == 0) + +static lzo_bool schedule_insns_bug(void); +static lzo_bool strength_reduce_bug(int *); + +# define __lzo_assert(x) ((x) ? 1 : 0) + +#undef COMPILE_TIME_ASSERT + +# define COMPILE_TIME_ASSERT(expr) LZO_COMPILE_TIME_ASSERT(expr) + +static lzo_bool basic_integral_check(void) +{ + lzo_bool r = 1; + + COMPILE_TIME_ASSERT(CHAR_BIT == 8); + COMPILE_TIME_ASSERT(sizeof(char) == 1); + COMPILE_TIME_ASSERT(sizeof(short) >= 2); + COMPILE_TIME_ASSERT(sizeof(long) >= 4); + COMPILE_TIME_ASSERT(sizeof(int) >= sizeof(short)); + COMPILE_TIME_ASSERT(sizeof(long) >= sizeof(int)); + + COMPILE_TIME_ASSERT(sizeof(lzo_uint) == sizeof(lzo_int)); + COMPILE_TIME_ASSERT(sizeof(lzo_uint32) == sizeof(lzo_int32)); + + COMPILE_TIME_ASSERT(sizeof(lzo_uint32) >= 4); + COMPILE_TIME_ASSERT(sizeof(lzo_uint32) >= sizeof(unsigned)); +#if defined(__LZO_STRICT_16BIT) + COMPILE_TIME_ASSERT(sizeof(lzo_uint) == 2); +#else + COMPILE_TIME_ASSERT(sizeof(lzo_uint) >= 4); + COMPILE_TIME_ASSERT(sizeof(lzo_uint) >= sizeof(unsigned)); +#endif + +#if (USHRT_MAX == 65535u) + COMPILE_TIME_ASSERT(sizeof(short) == 2); +#elif (USHRT_MAX == LZO_0xffffffffL) + COMPILE_TIME_ASSERT(sizeof(short) == 4); +#elif (USHRT_MAX >= LZO_0xffffffffL) + COMPILE_TIME_ASSERT(sizeof(short) > 4); +#endif + COMPILE_TIME_ASSERT(IS_UNSIGNED(unsigned char)); + COMPILE_TIME_ASSERT(IS_UNSIGNED(unsigned short)); + COMPILE_TIME_ASSERT(IS_UNSIGNED(unsigned)); + COMPILE_TIME_ASSERT(IS_UNSIGNED(unsigned long)); + COMPILE_TIME_ASSERT(IS_SIGNED(short)); + COMPILE_TIME_ASSERT(IS_SIGNED(int)); + COMPILE_TIME_ASSERT(IS_SIGNED(long)); + + COMPILE_TIME_ASSERT(IS_UNSIGNED(lzo_uint32)); + COMPILE_TIME_ASSERT(IS_UNSIGNED(lzo_uint)); + COMPILE_TIME_ASSERT(IS_SIGNED(lzo_int32)); + COMPILE_TIME_ASSERT(IS_SIGNED(lzo_int)); + + COMPILE_TIME_ASSERT(INT_MAX == LZO_STYPE_MAX(sizeof(int))); + COMPILE_TIME_ASSERT(UINT_MAX == LZO_UTYPE_MAX(sizeof(unsigned))); + COMPILE_TIME_ASSERT(LONG_MAX == LZO_STYPE_MAX(sizeof(long))); + COMPILE_TIME_ASSERT(ULONG_MAX == LZO_UTYPE_MAX(sizeof(unsigned long))); + COMPILE_TIME_ASSERT(USHRT_MAX == LZO_UTYPE_MAX(sizeof(unsigned short))); + COMPILE_TIME_ASSERT(LZO_UINT32_MAX == + LZO_UTYPE_MAX(sizeof(lzo_uint32))); + COMPILE_TIME_ASSERT(LZO_UINT_MAX == LZO_UTYPE_MAX(sizeof(lzo_uint))); + + r &= __lzo_assert(LZO_BYTE(257) == 1); + + return r; +} + +static lzo_bool basic_ptr_check(void) +{ + lzo_bool r = 1; + + COMPILE_TIME_ASSERT(sizeof(char *) >= sizeof(int)); + COMPILE_TIME_ASSERT(sizeof(lzo_byte *) >= sizeof(char *)); + + COMPILE_TIME_ASSERT(sizeof(lzo_voidp) == sizeof(lzo_byte *)); + COMPILE_TIME_ASSERT(sizeof(lzo_voidp) == sizeof(lzo_voidpp)); + COMPILE_TIME_ASSERT(sizeof(lzo_voidp) == sizeof(lzo_bytepp)); + COMPILE_TIME_ASSERT(sizeof(lzo_voidp) >= sizeof(lzo_uint)); + + COMPILE_TIME_ASSERT(sizeof(lzo_ptr_t) == sizeof(lzo_voidp)); + COMPILE_TIME_ASSERT(sizeof(lzo_ptr_t) == sizeof(lzo_sptr_t)); + COMPILE_TIME_ASSERT(sizeof(lzo_ptr_t) >= sizeof(lzo_uint)); + + COMPILE_TIME_ASSERT(sizeof(lzo_ptrdiff_t) >= 4); + COMPILE_TIME_ASSERT(sizeof(lzo_ptrdiff_t) >= sizeof(ptrdiff_t)); + + COMPILE_TIME_ASSERT(sizeof(ptrdiff_t) >= sizeof(size_t)); + COMPILE_TIME_ASSERT(sizeof(lzo_ptrdiff_t) >= sizeof(lzo_uint)); + +#if defined(SIZEOF_CHAR_P) + COMPILE_TIME_ASSERT(SIZEOF_CHAR_P == sizeof(char *)); +#endif +#if defined(SIZEOF_PTRDIFF_T) + COMPILE_TIME_ASSERT(SIZEOF_PTRDIFF_T == sizeof(ptrdiff_t)); +#endif + + COMPILE_TIME_ASSERT(IS_SIGNED(ptrdiff_t)); + COMPILE_TIME_ASSERT(IS_UNSIGNED(size_t)); + COMPILE_TIME_ASSERT(IS_SIGNED(lzo_ptrdiff_t)); + COMPILE_TIME_ASSERT(IS_SIGNED(lzo_sptr_t)); + COMPILE_TIME_ASSERT(IS_UNSIGNED(lzo_ptr_t)); + COMPILE_TIME_ASSERT(IS_UNSIGNED(lzo_moff_t)); + + return r; +} + +static lzo_bool ptr_check(void) +{ + lzo_bool r = 1; + int i; + char _wrkmem[10 * sizeof(lzo_byte *) + sizeof(lzo_full_align_t)]; + lzo_bytep wrkmem; + lzo_bytepp dict; + unsigned char x[4 * sizeof(lzo_full_align_t)]; + long d; + lzo_full_align_t a; + lzo_full_align_t u; + + for (i = 0; i < (int)sizeof(x); i++) + x[i] = LZO_BYTE(i); + + wrkmem = + LZO_PTR_ALIGN_UP((lzo_byte *) _wrkmem, sizeof(lzo_full_align_t)); + + u.a_lzo_bytep = wrkmem; + dict = u.a_lzo_bytepp; + + d = (long)((const lzo_bytep)dict - (const lzo_bytep)_wrkmem); + r &= __lzo_assert(d >= 0); + r &= __lzo_assert(d < (long)sizeof(lzo_full_align_t)); + + memset(&a, 0, sizeof(a)); + r &= __lzo_assert(a.a_lzo_voidp == NULL); + + memset(&a, 0xff, sizeof(a)); + r &= __lzo_assert(a.a_ushort == USHRT_MAX); + r &= __lzo_assert(a.a_uint == UINT_MAX); + r &= __lzo_assert(a.a_ulong == ULONG_MAX); + r &= __lzo_assert(a.a_lzo_uint == LZO_UINT_MAX); + r &= __lzo_assert(a.a_lzo_uint32 == LZO_UINT32_MAX); + + if (r == 1) { + for (i = 0; i < 8; i++) + r &= __lzo_assert((const lzo_voidp)(&dict[i]) == + (const + lzo_voidp)(&wrkmem[i * + sizeof(lzo_byte + *)])); + } + + memset(&a, 0, sizeof(a)); + r &= __lzo_assert(a.a_char_p == NULL); + r &= __lzo_assert(a.a_lzo_bytep == NULL); + r &= __lzo_assert(NULL == (void *)0); + if (r == 1) { + for (i = 0; i < 10; i++) + dict[i] = wrkmem; + BZERO8_PTR(dict + 1, sizeof(dict[0]), 8); + r &= __lzo_assert(dict[0] == wrkmem); + for (i = 1; i < 9; i++) + r &= __lzo_assert(dict[i] == NULL); + r &= __lzo_assert(dict[9] == wrkmem); + } + + if (r == 1) { + unsigned k = 1; + const unsigned n = (unsigned)sizeof(lzo_uint32); + lzo_byte *p0; + lzo_byte *p1; + + k += __lzo_align_gap(&x[k], n); + p0 = (lzo_bytep) & x[k]; +#if defined(PTR_LINEAR) + r &= __lzo_assert((PTR_LINEAR(p0) & (n - 1)) == 0); +#else + r &= __lzo_assert(n == 4); + r &= __lzo_assert(PTR_ALIGNED_4(p0)); +#endif + + r &= __lzo_assert(k >= 1); + p1 = (lzo_bytep) & x[1]; + r &= __lzo_assert(PTR_GE(p0, p1)); + + r &= __lzo_assert(k < 1 + n); + p1 = (lzo_bytep) & x[1 + n]; + r &= __lzo_assert(PTR_LT(p0, p1)); + + if (r == 1) { + lzo_uint32 v0, v1; + + u.a_uchar_p = &x[k]; + v0 = *u.a_lzo_uint32_p; + u.a_uchar_p = &x[k + n]; + v1 = *u.a_lzo_uint32_p; + + r &= __lzo_assert(v0 > 0); + r &= __lzo_assert(v1 > 0); + } + } + + return r; +} + +static int _lzo_config_check(void) +{ + lzo_bool r = 1; + int i; + union { + lzo_uint32 a; + unsigned short b; + lzo_uint32 aa[4]; + unsigned char x[4 * sizeof(lzo_full_align_t)]; + } u; + + COMPILE_TIME_ASSERT((int)((unsigned char)((signed char)-1)) == 255); + COMPILE_TIME_ASSERT((((unsigned char)128) << (int)(8 * sizeof(int) - 8)) + < 0); + + r &= basic_integral_check(); + r &= basic_ptr_check(); + if (r != 1) + return LZO_E_ERROR; + + u.a = 0; + u.b = 0; + for (i = 0; i < (int)sizeof(u.x); i++) + u.x[i] = LZO_BYTE(i); + +#if defined(LZO_BYTE_ORDER) + if (r == 1) { +# if (LZO_BYTE_ORDER == LZO_LITTLE_ENDIAN) + lzo_uint32 a = (lzo_uint32) (u.a & LZO_0xffffffffL); + unsigned short b = (unsigned short)(u.b & 0xffff); + r &= __lzo_assert(a == 0x03020100L); + r &= __lzo_assert(b == 0x0100); +# elif (LZO_BYTE_ORDER == LZO_BIG_ENDIAN) + lzo_uint32 a = u.a >> (8 * sizeof(u.a) - 32); + unsigned short b = u.b >> (8 * sizeof(u.b) - 16); + r &= __lzo_assert(a == 0x00010203L); + r &= __lzo_assert(b == 0x0001); +# else +# error "invalid LZO_BYTE_ORDER" +# endif + } +#endif + +#if defined(LZO_UNALIGNED_OK_2) + COMPILE_TIME_ASSERT(sizeof(short) == 2); + if (r == 1) { + unsigned short b[4]; + + for (i = 0; i < 4; i++) + b[i] = *(const unsigned short *)&u.x[i]; + +# if (LZO_BYTE_ORDER == LZO_LITTLE_ENDIAN) + r &= __lzo_assert(b[0] == 0x0100); + r &= __lzo_assert(b[1] == 0x0201); + r &= __lzo_assert(b[2] == 0x0302); + r &= __lzo_assert(b[3] == 0x0403); +# elif (LZO_BYTE_ORDER == LZO_BIG_ENDIAN) + r &= __lzo_assert(b[0] == 0x0001); + r &= __lzo_assert(b[1] == 0x0102); + r &= __lzo_assert(b[2] == 0x0203); + r &= __lzo_assert(b[3] == 0x0304); +# endif + } +#endif + +#if defined(LZO_UNALIGNED_OK_4) + COMPILE_TIME_ASSERT(sizeof(lzo_uint32) == 4); + if (r == 1) { + lzo_uint32 a[4]; + + for (i = 0; i < 4; i++) + a[i] = *(const lzo_uint32 *)&u.x[i]; + +# if (LZO_BYTE_ORDER == LZO_LITTLE_ENDIAN) + r &= __lzo_assert(a[0] == 0x03020100L); + r &= __lzo_assert(a[1] == 0x04030201L); + r &= __lzo_assert(a[2] == 0x05040302L); + r &= __lzo_assert(a[3] == 0x06050403L); +# elif (LZO_BYTE_ORDER == LZO_BIG_ENDIAN) + r &= __lzo_assert(a[0] == 0x00010203L); + r &= __lzo_assert(a[1] == 0x01020304L); + r &= __lzo_assert(a[2] == 0x02030405L); + r &= __lzo_assert(a[3] == 0x03040506L); +# endif + } +#endif + +#if defined(LZO_ALIGNED_OK_4) + COMPILE_TIME_ASSERT(sizeof(lzo_uint32) == 4); +#endif + + COMPILE_TIME_ASSERT(lzo_sizeof_dict_t == sizeof(lzo_dict_t)); + + if (r == 1) { + r &= __lzo_assert(!schedule_insns_bug()); + } + + if (r == 1) { + static int x[3]; + static unsigned xn = 3; + register unsigned j; + + for (j = 0; j < xn; j++) + x[j] = (int)j - 3; + r &= __lzo_assert(!strength_reduce_bug(x)); + } + + if (r == 1) { + r &= ptr_check(); + } + + return r == 1 ? LZO_E_OK : LZO_E_ERROR; +} + +static lzo_bool schedule_insns_bug(void) +{ +#if defined(__LZO_CHECKER) + return 0; +#else + const int clone[] = { 1, 2, 0 }; + const int *q; + q = clone; + return (*q) ? 0 : 1; +#endif +} + +static lzo_bool strength_reduce_bug(int *x) +{ + return x[0] != -3 || x[1] != -2 || x[2] != -1; +} + +#undef COMPILE_TIME_ASSERT + +int __lzo_init2(unsigned v, int s1, int s2, int s3, int s4, int s5, + int s6, int s7, int s8, int s9) +{ + int r; + + if (v == 0) + return LZO_E_ERROR; + + r = (s1 == -1 || s1 == (int)sizeof(short)) && + (s2 == -1 || s2 == (int)sizeof(int)) && + (s3 == -1 || s3 == (int)sizeof(long)) && + (s4 == -1 || s4 == (int)sizeof(lzo_uint32)) && + (s5 == -1 || s5 == (int)sizeof(lzo_uint)) && + (s6 == -1 || s6 == (int)lzo_sizeof_dict_t) && + (s7 == -1 || s7 == (int)sizeof(char *)) && + (s8 == -1 || s8 == (int)sizeof(lzo_voidp)) && + (s9 == -1 || s9 == (int)sizeof(lzo_compress_t)); + if (!r) + return LZO_E_ERROR; + + r = _lzo_config_check(); + if (r != LZO_E_OK) + return r; + + return r; +} + +#define do_compress _lzo1x_1_do_compress + +#define LZO_NEED_DICT_H +#define D_BITS 14 +#define D_INDEX1(d,p) d = DM((0x21*DX3(p,5,5,6)) >> 5) +#define D_INDEX2(d,p) d = (d & (D_MASK & 0x7ff)) ^ (D_HIGH | 0x1f) + +#ifndef __LZO_CONFIG1X_H +#define __LZO_CONFIG1X_H + +#if !defined(LZO1X) && !defined(LZO1Y) && !defined(LZO1Z) +# define LZO1X +#endif + +#define LZO_EOF_CODE +#undef LZO_DETERMINISTIC + +#define M1_MAX_OFFSET 0x0400 +#ifndef M2_MAX_OFFSET +#define M2_MAX_OFFSET 0x0800 +#endif +#define M3_MAX_OFFSET 0x4000 +#define M4_MAX_OFFSET 0xbfff + +#define MX_MAX_OFFSET (M1_MAX_OFFSET + M2_MAX_OFFSET) + +#define M1_MIN_LEN 2 +#define M1_MAX_LEN 2 +#define M2_MIN_LEN 3 +#ifndef M2_MAX_LEN +#define M2_MAX_LEN 8 +#endif +#define M3_MIN_LEN 3 +#define M3_MAX_LEN 33 +#define M4_MIN_LEN 3 +#define M4_MAX_LEN 9 + +#define M1_MARKER 0 +#define M2_MARKER 64 +#define M3_MARKER 32 +#define M4_MARKER 16 + +#ifndef MIN_LOOKAHEAD +#define MIN_LOOKAHEAD (M2_MAX_LEN + 1) +#endif + +#if defined(LZO_NEED_DICT_H) + +#ifndef LZO_HASH +#define LZO_HASH LZO_HASH_LZO_INCREMENTAL_B +#endif +#define DL_MIN_LEN M2_MIN_LEN + +#ifndef __LZO_DICT_H +#define __LZO_DICT_H + +#if !defined(D_BITS) && defined(DBITS) +# define D_BITS DBITS +#endif +#if !defined(D_BITS) +# error "D_BITS is not defined" +#endif +#if (D_BITS < 16) +# define D_SIZE LZO_SIZE(D_BITS) +# define D_MASK LZO_MASK(D_BITS) +#else +# define D_SIZE LZO_USIZE(D_BITS) +# define D_MASK LZO_UMASK(D_BITS) +#endif +#define D_HIGH ((D_MASK >> 1) + 1) + +#if !defined(DD_BITS) +# define DD_BITS 0 +#endif +#define DD_SIZE LZO_SIZE(DD_BITS) +#define DD_MASK LZO_MASK(DD_BITS) + +#if !defined(DL_BITS) +# define DL_BITS (D_BITS - DD_BITS) +#endif +#if (DL_BITS < 16) +# define DL_SIZE LZO_SIZE(DL_BITS) +# define DL_MASK LZO_MASK(DL_BITS) +#else +# define DL_SIZE LZO_USIZE(DL_BITS) +# define DL_MASK LZO_UMASK(DL_BITS) +#endif + +#if (D_BITS != DL_BITS + DD_BITS) +# error "D_BITS does not match" +#endif +#if (D_BITS < 8 || D_BITS > 18) +# error "invalid D_BITS" +#endif +#if (DL_BITS < 8 || DL_BITS > 20) +# error "invalid DL_BITS" +#endif +#if (DD_BITS < 0 || DD_BITS > 6) +# error "invalid DD_BITS" +#endif + +#if !defined(DL_MIN_LEN) +# define DL_MIN_LEN 3 +#endif +#if !defined(DL_SHIFT) +# define DL_SHIFT ((DL_BITS + (DL_MIN_LEN - 1)) / DL_MIN_LEN) +#endif + +#define LZO_HASH_GZIP 1 +#define LZO_HASH_GZIP_INCREMENTAL 2 +#define LZO_HASH_LZO_INCREMENTAL_A 3 +#define LZO_HASH_LZO_INCREMENTAL_B 4 + +#if !defined(LZO_HASH) +# error "choose a hashing strategy" +#endif + +#if (DL_MIN_LEN == 3) +# define _DV2_A(p,shift1,shift2) \ + (((( (lzo_uint32)((p)[0]) << shift1) ^ (p)[1]) << shift2) ^ (p)[2]) +# define _DV2_B(p,shift1,shift2) \ + (((( (lzo_uint32)((p)[2]) << shift1) ^ (p)[1]) << shift2) ^ (p)[0]) +# define _DV3_B(p,shift1,shift2,shift3) \ + ((_DV2_B((p)+1,shift1,shift2) << (shift3)) ^ (p)[0]) +#elif (DL_MIN_LEN == 2) +# define _DV2_A(p,shift1,shift2) \ + (( (lzo_uint32)(p[0]) << shift1) ^ p[1]) +# define _DV2_B(p,shift1,shift2) \ + (( (lzo_uint32)(p[1]) << shift1) ^ p[2]) +#else +# error "invalid DL_MIN_LEN" +#endif +#define _DV_A(p,shift) _DV2_A(p,shift,shift) +#define _DV_B(p,shift) _DV2_B(p,shift,shift) +#define DA2(p,s1,s2) \ + (((((lzo_uint32)((p)[2]) << (s2)) + (p)[1]) << (s1)) + (p)[0]) +#define DS2(p,s1,s2) \ + (((((lzo_uint32)((p)[2]) << (s2)) - (p)[1]) << (s1)) - (p)[0]) +#define DX2(p,s1,s2) \ + (((((lzo_uint32)((p)[2]) << (s2)) ^ (p)[1]) << (s1)) ^ (p)[0]) +#define DA3(p,s1,s2,s3) ((DA2((p)+1,s2,s3) << (s1)) + (p)[0]) +#define DS3(p,s1,s2,s3) ((DS2((p)+1,s2,s3) << (s1)) - (p)[0]) +#define DX3(p,s1,s2,s3) ((DX2((p)+1,s2,s3) << (s1)) ^ (p)[0]) +#define DMS(v,s) ((lzo_uint) (((v) & (D_MASK >> (s))) << (s))) +#define DM(v) DMS(v,0) + +#if (LZO_HASH == LZO_HASH_GZIP) +# define _DINDEX(dv,p) (_DV_A((p),DL_SHIFT)) + +#elif (LZO_HASH == LZO_HASH_GZIP_INCREMENTAL) +# define __LZO_HASH_INCREMENTAL +# define DVAL_FIRST(dv,p) dv = _DV_A((p),DL_SHIFT) +# define DVAL_NEXT(dv,p) dv = (((dv) << DL_SHIFT) ^ p[2]) +# define _DINDEX(dv,p) (dv) +# define DVAL_LOOKAHEAD DL_MIN_LEN + +#elif (LZO_HASH == LZO_HASH_LZO_INCREMENTAL_A) +# define __LZO_HASH_INCREMENTAL +# define DVAL_FIRST(dv,p) dv = _DV_A((p),5) +# define DVAL_NEXT(dv,p) \ + dv ^= (lzo_uint32)(p[-1]) << (2*5); dv = (((dv) << 5) ^ p[2]) +# define _DINDEX(dv,p) ((0x9f5f * (dv)) >> 5) +# define DVAL_LOOKAHEAD DL_MIN_LEN + +#elif (LZO_HASH == LZO_HASH_LZO_INCREMENTAL_B) +# define __LZO_HASH_INCREMENTAL +# define DVAL_FIRST(dv,p) dv = _DV_B((p),5) +# define DVAL_NEXT(dv,p) \ + dv ^= p[-1]; dv = (((dv) >> 5) ^ ((lzo_uint32)(p[2]) << (2*5))) +# define _DINDEX(dv,p) ((0x9f5f * (dv)) >> 5) +# define DVAL_LOOKAHEAD DL_MIN_LEN + +#else +# error "choose a hashing strategy" +#endif + +#ifndef DINDEX +#define DINDEX(dv,p) ((lzo_uint)((_DINDEX(dv,p)) & DL_MASK) << DD_BITS) +#endif +#if !defined(DINDEX1) && defined(D_INDEX1) +#define DINDEX1 D_INDEX1 +#endif +#if !defined(DINDEX2) && defined(D_INDEX2) +#define DINDEX2 D_INDEX2 +#endif + +#if !defined(__LZO_HASH_INCREMENTAL) +# define DVAL_FIRST(dv,p) ((void) 0) +# define DVAL_NEXT(dv,p) ((void) 0) +# define DVAL_LOOKAHEAD 0 +#endif + +#if !defined(DVAL_ASSERT) +#if defined(__LZO_HASH_INCREMENTAL) && !defined(NDEBUG) +static void DVAL_ASSERT(lzo_uint32 dv, const lzo_byte * p) +{ + lzo_uint32 df; + DVAL_FIRST(df, (p)); + assert(DINDEX(dv, p) == DINDEX(df, p)); +} +#else +# define DVAL_ASSERT(dv,p) ((void) 0) +#endif +#endif + +# define DENTRY(p,in) (p) +# define GINDEX(m_pos,m_off,dict,dindex,in) m_pos = dict[dindex] + +#if (DD_BITS == 0) + +# define UPDATE_D(dict,drun,dv,p,in) dict[ DINDEX(dv,p) ] = DENTRY(p,in) +# define UPDATE_I(dict,drun,index,p,in) dict[index] = DENTRY(p,in) +# define UPDATE_P(ptr,drun,p,in) (ptr)[0] = DENTRY(p,in) + +#else + +# define UPDATE_D(dict,drun,dv,p,in) \ + dict[ DINDEX(dv,p) + drun++ ] = DENTRY(p,in); drun &= DD_MASK +# define UPDATE_I(dict,drun,index,p,in) \ + dict[ (index) + drun++ ] = DENTRY(p,in); drun &= DD_MASK +# define UPDATE_P(ptr,drun,p,in) \ + (ptr) [ drun++ ] = DENTRY(p,in); drun &= DD_MASK + +#endif + +#define LZO_CHECK_MPOS_DET(m_pos,m_off,in,ip,max_offset) \ + (m_pos == NULL || (m_off = (lzo_moff_t) (ip - m_pos)) > max_offset) + +#define LZO_CHECK_MPOS_NON_DET(m_pos,m_off,in,ip,max_offset) \ + (BOUNDS_CHECKING_OFF_IN_EXPR( \ + (PTR_LT(m_pos,in) || \ + (m_off = (lzo_moff_t) PTR_DIFF(ip,m_pos)) <= 0 || \ + m_off > max_offset) )) + +#if defined(LZO_DETERMINISTIC) +# define LZO_CHECK_MPOS LZO_CHECK_MPOS_DET +#else +# define LZO_CHECK_MPOS LZO_CHECK_MPOS_NON_DET +#endif +#endif +#endif +#endif +#define DO_COMPRESS lzo1x_1_compress +static +lzo_uint do_compress(const lzo_byte * in, lzo_uint in_len, + lzo_byte * out, lzo_uintp out_len, lzo_voidp wrkmem) +{ + register const lzo_byte *ip; + lzo_byte *op; + const lzo_byte *const in_end = in + in_len; + const lzo_byte *const ip_end = in + in_len - M2_MAX_LEN - 5; + const lzo_byte *ii; + lzo_dict_p const dict = (lzo_dict_p) wrkmem; + + op = out; + ip = in; + ii = ip; + + ip += 4; + for (;;) { + register const lzo_byte *m_pos; + + lzo_moff_t m_off; + lzo_uint m_len; + lzo_uint dindex; + + DINDEX1(dindex, ip); + GINDEX(m_pos, m_off, dict, dindex, in); + if (LZO_CHECK_MPOS_NON_DET(m_pos, m_off, in, ip, M4_MAX_OFFSET)) + goto literal; +#if 1 + if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3]) + goto try_match; + DINDEX2(dindex, ip); +#endif + GINDEX(m_pos, m_off, dict, dindex, in); + if (LZO_CHECK_MPOS_NON_DET(m_pos, m_off, in, ip, M4_MAX_OFFSET)) + goto literal; + if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3]) + goto try_match; + goto literal; + + try_match: +#if 1 && defined(LZO_UNALIGNED_OK_2) + if (*(const lzo_ushortp)m_pos != *(const lzo_ushortp)ip) { +#else + if (m_pos[0] != ip[0] || m_pos[1] != ip[1]) { +#endif + ; + } else { + if (m_pos[2] == ip[2]) { + goto match; + } else { + ; + } + } + + literal: + UPDATE_I(dict, 0, dindex, ip, in); + ++ip; + if (ip >= ip_end) + break; + continue; + + match: + UPDATE_I(dict, 0, dindex, ip, in); + if (pd(ip, ii) > 0) { + register lzo_uint t = pd(ip, ii); + + if (t <= 3) { + assert("lzo-04", op - 2 > out); + op[-2] |= LZO_BYTE(t); + } else if (t <= 18) + *op++ = LZO_BYTE(t - 3); + else { + register lzo_uint tt = t - 18; + + *op++ = 0; + while (tt > 255) { + tt -= 255; + *op++ = 0; + } + assert("lzo-05", tt > 0); + *op++ = LZO_BYTE(tt); + } + do + *op++ = *ii++; + while (--t > 0); + } + + assert("lzo-06", ii == ip); + ip += 3; + if (m_pos[3] != *ip++ || m_pos[4] != *ip++ || m_pos[5] != *ip++ + || m_pos[6] != *ip++ || m_pos[7] != *ip++ + || m_pos[8] != *ip++ +#ifdef LZO1Y + || m_pos[9] != *ip++ || m_pos[10] != *ip++ + || m_pos[11] != *ip++ || m_pos[12] != *ip++ + || m_pos[13] != *ip++ || m_pos[14] != *ip++ +#endif + ) { + --ip; + m_len = ip - ii; + assert("lzo-07", m_len >= 3); + assert("lzo-08", m_len <= M2_MAX_LEN); + + if (m_off <= M2_MAX_OFFSET) { + m_off -= 1; +#if defined(LZO1X) + *op++ = + LZO_BYTE(((m_len - + 1) << 5) | ((m_off & 7) << 2)); + *op++ = LZO_BYTE(m_off >> 3); +#elif defined(LZO1Y) + *op++ = + LZO_BYTE(((m_len + + 1) << 4) | ((m_off & 3) << 2)); + *op++ = LZO_BYTE(m_off >> 2); +#endif + } else if (m_off <= M3_MAX_OFFSET) { + m_off -= 1; + *op++ = LZO_BYTE(M3_MARKER | (m_len - 2)); + goto m3_m4_offset; + } else +#if defined(LZO1X) + { + m_off -= 0x4000; + assert("lzo-09", m_off > 0); + assert("lzo-10", m_off <= 0x7fff); + *op++ = LZO_BYTE(M4_MARKER | + ((m_off & 0x4000) >> 11) | + (m_len - 2)); + goto m3_m4_offset; + } +#elif defined(LZO1Y) + goto m4_match; +#endif + } else { + { + const lzo_byte *end = in_end; + const lzo_byte *m = m_pos + M2_MAX_LEN + 1; + while (ip < end && *m == *ip) + m++, ip++; + m_len = (ip - ii); + } + assert("lzo-11", m_len > M2_MAX_LEN); + + if (m_off <= M3_MAX_OFFSET) { + m_off -= 1; + if (m_len <= 33) + *op++ = + LZO_BYTE(M3_MARKER | (m_len - 2)); + else { + m_len -= 33; + *op++ = M3_MARKER | 0; + goto m3_m4_len; + } + } else { +#if defined(LZO1Y) + m4_match: +#endif + m_off -= 0x4000; + assert("lzo-12", m_off > 0); + assert("lzo-13", m_off <= 0x7fff); + if (m_len <= M4_MAX_LEN) + *op++ = LZO_BYTE(M4_MARKER | + ((m_off & 0x4000) >> + 11) | (m_len - 2)); + else { + m_len -= M4_MAX_LEN; + *op++ = + LZO_BYTE(M4_MARKER | + ((m_off & 0x4000) >> 11)); + m3_m4_len: + while (m_len > 255) { + m_len -= 255; + *op++ = 0; + } + assert("lzo-14", m_len > 0); + *op++ = LZO_BYTE(m_len); + } + } + + m3_m4_offset: + *op++ = LZO_BYTE((m_off & 63) << 2); + *op++ = LZO_BYTE(m_off >> 6); + } + + ii = ip; + if (ip >= ip_end) + break; + } + + *out_len = op - out; + return pd(in_end, ii); +} + +int DO_COMPRESS(const lzo_byte * in, lzo_uint in_len, + lzo_byte * out, lzo_uintp out_len, lzo_voidp wrkmem) +{ + lzo_byte *op = out; + lzo_uint t; + +#if defined(__LZO_QUERY_COMPRESS) + if (__LZO_IS_COMPRESS_QUERY(in, in_len, out, out_len, wrkmem)) + return __LZO_QUERY_COMPRESS(in, in_len, out, out_len, wrkmem, + D_SIZE, lzo_sizeof(lzo_dict_t)); +#endif + + if (in_len <= M2_MAX_LEN + 5) + t = in_len; + else { + t = do_compress(in, in_len, op, out_len, wrkmem); + op += *out_len; + } + + if (t > 0) { + const lzo_byte *ii = in + in_len - t; + + if (op == out && t <= 238) + *op++ = LZO_BYTE(17 + t); + else if (t <= 3) + op[-2] |= LZO_BYTE(t); + else if (t <= 18) + *op++ = LZO_BYTE(t - 3); + else { + lzo_uint tt = t - 18; + + *op++ = 0; + while (tt > 255) { + tt -= 255; + *op++ = 0; + } + assert("lzo-15", tt > 0); + *op++ = LZO_BYTE(tt); + } + do + *op++ = *ii++; + while (--t > 0); + } + + *op++ = M4_MARKER | 1; + *op++ = 0; + *op++ = 0; + + *out_len = op - out; + return LZO_E_OK; +} + +#undef do_compress +#undef DO_COMPRESS +#undef LZO_HASH + +#undef LZO_TEST_DECOMPRESS_OVERRUN +#undef LZO_TEST_DECOMPRESS_OVERRUN_INPUT +#undef LZO_TEST_DECOMPRESS_OVERRUN_OUTPUT +#undef LZO_TEST_DECOMPRESS_OVERRUN_LOOKBEHIND +#undef DO_DECOMPRESS +#define DO_DECOMPRESS lzo1x_decompress + +#if defined(LZO_TEST_DECOMPRESS_OVERRUN) +# if !defined(LZO_TEST_DECOMPRESS_OVERRUN_INPUT) +# define LZO_TEST_DECOMPRESS_OVERRUN_INPUT 2 +# endif +# if !defined(LZO_TEST_DECOMPRESS_OVERRUN_OUTPUT) +# define LZO_TEST_DECOMPRESS_OVERRUN_OUTPUT 2 +# endif +# if !defined(LZO_TEST_DECOMPRESS_OVERRUN_LOOKBEHIND) +# define LZO_TEST_DECOMPRESS_OVERRUN_LOOKBEHIND +# endif +#endif + +#undef TEST_IP +#undef TEST_OP +#undef TEST_LOOKBEHIND +#undef NEED_IP +#undef NEED_OP +#undef HAVE_TEST_IP +#undef HAVE_TEST_OP +#undef HAVE_NEED_IP +#undef HAVE_NEED_OP +#undef HAVE_ANY_IP +#undef HAVE_ANY_OP + +#if defined(LZO_TEST_DECOMPRESS_OVERRUN_INPUT) +# if (LZO_TEST_DECOMPRESS_OVERRUN_INPUT >= 1) +# define TEST_IP (ip < ip_end) +# endif +# if (LZO_TEST_DECOMPRESS_OVERRUN_INPUT >= 2) +# define NEED_IP(x) \ + if ((lzo_uint)(ip_end - ip) < (lzo_uint)(x)) goto input_overrun +# endif +#endif + +#if defined(LZO_TEST_DECOMPRESS_OVERRUN_OUTPUT) +# if (LZO_TEST_DECOMPRESS_OVERRUN_OUTPUT >= 1) +# define TEST_OP (op <= op_end) +# endif +# if (LZO_TEST_DECOMPRESS_OVERRUN_OUTPUT >= 2) +# undef TEST_OP +# define NEED_OP(x) \ + if ((lzo_uint)(op_end - op) < (lzo_uint)(x)) goto output_overrun +# endif +#endif + +#if defined(LZO_TEST_DECOMPRESS_OVERRUN_LOOKBEHIND) +# define TEST_LOOKBEHIND(m_pos,out) if (m_pos < out) goto lookbehind_overrun +#else +# define TEST_LOOKBEHIND(m_pos,op) ((void) 0) +#endif + +#if !defined(LZO_EOF_CODE) && !defined(TEST_IP) +# define TEST_IP (ip < ip_end) +#endif + +#if defined(TEST_IP) +# define HAVE_TEST_IP +#else +# define TEST_IP 1 +#endif +#if defined(TEST_OP) +# define HAVE_TEST_OP +#else +# define TEST_OP 1 +#endif + +#if defined(NEED_IP) +# define HAVE_NEED_IP +#else +# define NEED_IP(x) ((void) 0) +#endif +#if defined(NEED_OP) +# define HAVE_NEED_OP +#else +# define NEED_OP(x) ((void) 0) +#endif + +#if defined(HAVE_TEST_IP) || defined(HAVE_NEED_IP) +# define HAVE_ANY_IP +#endif +#if defined(HAVE_TEST_OP) || defined(HAVE_NEED_OP) +# define HAVE_ANY_OP +#endif + +#undef __COPY4 +#define __COPY4(dst,src) * (lzo_uint32p)(dst) = * (const lzo_uint32p)(src) + +#undef COPY4 +#if defined(LZO_UNALIGNED_OK_4) +# define COPY4(dst,src) __COPY4(dst,src) +#elif defined(LZO_ALIGNED_OK_4) +# define COPY4(dst,src) __COPY4((lzo_ptr_t)(dst),(lzo_ptr_t)(src)) +#endif + +#if defined(DO_DECOMPRESS) +int DO_DECOMPRESS(const lzo_byte * in, lzo_uint in_len, + lzo_byte * out, lzo_uintp out_len, lzo_voidp wrkmem) +#endif +{ + register lzo_byte *op; + register const lzo_byte *ip; + register lzo_uint t; +#if defined(COPY_DICT) + lzo_uint m_off; + const lzo_byte *dict_end; +#else + register const lzo_byte *m_pos; +#endif + + const lzo_byte *const ip_end = in + in_len; +#if defined(HAVE_ANY_OP) + lzo_byte *const op_end = out + *out_len; +#endif +#if defined(LZO1Z) + lzo_uint last_m_off = 0; +#endif + + LZO_UNUSED(wrkmem); + +#if defined(__LZO_QUERY_DECOMPRESS) + if (__LZO_IS_DECOMPRESS_QUERY(in, in_len, out, out_len, wrkmem)) + return __LZO_QUERY_DECOMPRESS(in, in_len, out, out_len, wrkmem, + 0, 0); +#endif + +#if defined(COPY_DICT) + if (dict) { + if (dict_len > M4_MAX_OFFSET) { + dict += dict_len - M4_MAX_OFFSET; + dict_len = M4_MAX_OFFSET; + } + dict_end = dict + dict_len; + } else { + dict_len = 0; + dict_end = NULL; + } +#endif + + *out_len = 0; + + op = out; + ip = in; + + if (*ip > 17) { + t = *ip++ - 17; + if (t < 4) + goto match_next; + assert("lzo-16", t > 0); + NEED_OP(t); + NEED_IP(t + 1); + do + *op++ = *ip++; + while (--t > 0); + goto first_literal_run; + } + + while (TEST_IP && TEST_OP) { + t = *ip++; + if (t >= 16) + goto match; + if (t == 0) { + NEED_IP(1); + while (*ip == 0) { + t += 255; + ip++; + NEED_IP(1); + } + t += 15 + *ip++; + } + assert("lzo-17", t > 0); + NEED_OP(t + 3); + NEED_IP(t + 4); +#if defined(LZO_UNALIGNED_OK_4) || defined(LZO_ALIGNED_OK_4) +#if !defined(LZO_UNALIGNED_OK_4) + if (PTR_ALIGNED2_4(op, ip)) { +#endif + COPY4(op, ip); + op += 4; + ip += 4; + if (--t > 0) { + if (t >= 4) { + do { + COPY4(op, ip); + op += 4; + ip += 4; + t -= 4; + } while (t >= 4); + if (t > 0) + do + *op++ = *ip++; + while (--t > 0); + } else + do + *op++ = *ip++; + while (--t > 0); + } +#if !defined(LZO_UNALIGNED_OK_4) + } else +#endif +#endif +#if !defined(LZO_UNALIGNED_OK_4) + { + *op++ = *ip++; + *op++ = *ip++; + *op++ = *ip++; + do + *op++ = *ip++; + while (--t > 0); + } +#endif + + first_literal_run: + + t = *ip++; + if (t >= 16) + goto match; +#if defined(COPY_DICT) +#if defined(LZO1Z) + m_off = (1 + M2_MAX_OFFSET) + (t << 6) + (*ip++ >> 2); + last_m_off = m_off; +#else + m_off = (1 + M2_MAX_OFFSET) + (t >> 2) + (*ip++ << 2); +#endif + NEED_OP(3); + t = 3; + COPY_DICT(t, m_off) +#else +#if defined(LZO1Z) + t = (1 + M2_MAX_OFFSET) + (t << 6) + (*ip++ >> 2); + m_pos = op - t; + last_m_off = t; +#else + m_pos = op - (1 + M2_MAX_OFFSET); + m_pos -= t >> 2; + m_pos -= *ip++ << 2; +#endif + TEST_LOOKBEHIND(m_pos, out); + NEED_OP(3); + *op++ = *m_pos++; + *op++ = *m_pos++; + *op++ = *m_pos; +#endif + goto match_done; + + while (TEST_IP && TEST_OP) { + match: + if (t >= 64) { +#if defined(COPY_DICT) +#if defined(LZO1X) + m_off = 1 + ((t >> 2) & 7) + (*ip++ << 3); + t = (t >> 5) - 1; +#elif defined(LZO1Y) + m_off = 1 + ((t >> 2) & 3) + (*ip++ << 2); + t = (t >> 4) - 3; +#elif defined(LZO1Z) + m_off = t & 0x1f; + if (m_off >= 0x1c) + m_off = last_m_off; + else { + m_off = 1 + (m_off << 6) + (*ip++ >> 2); + last_m_off = m_off; + } + t = (t >> 5) - 1; +#endif +#else +#if defined(LZO1X) + m_pos = op - 1; + m_pos -= (t >> 2) & 7; + m_pos -= *ip++ << 3; + t = (t >> 5) - 1; +#elif defined(LZO1Y) + m_pos = op - 1; + m_pos -= (t >> 2) & 3; + m_pos -= *ip++ << 2; + t = (t >> 4) - 3; +#elif defined(LZO1Z) + { + lzo_uint off = t & 0x1f; + m_pos = op; + if (off >= 0x1c) { + assert(last_m_off > 0); + m_pos -= last_m_off; + } else { + off = + 1 + (off << 6) + + (*ip++ >> 2); + m_pos -= off; + last_m_off = off; + } + } + t = (t >> 5) - 1; +#endif + TEST_LOOKBEHIND(m_pos, out); + assert("lzo-18", t > 0); + NEED_OP(t + 3 - 1); + goto copy_match; +#endif + } else if (t >= 32) { + t &= 31; + if (t == 0) { + NEED_IP(1); + while (*ip == 0) { + t += 255; + ip++; + NEED_IP(1); + } + t += 31 + *ip++; + } +#if defined(COPY_DICT) +#if defined(LZO1Z) + m_off = 1 + (ip[0] << 6) + (ip[1] >> 2); + last_m_off = m_off; +#else + m_off = 1 + (ip[0] >> 2) + (ip[1] << 6); +#endif +#else +#if defined(LZO1Z) + { + lzo_uint off = + 1 + (ip[0] << 6) + (ip[1] >> 2); + m_pos = op - off; + last_m_off = off; + } +#elif defined(LZO_UNALIGNED_OK_2) && (LZO_BYTE_ORDER == LZO_LITTLE_ENDIAN) + m_pos = op - 1; + m_pos -= (*(const lzo_ushortp)ip) >> 2; +#else + m_pos = op - 1; + m_pos -= (ip[0] >> 2) + (ip[1] << 6); +#endif +#endif + ip += 2; + } else if (t >= 16) { +#if defined(COPY_DICT) + m_off = (t & 8) << 11; +#else + m_pos = op; + m_pos -= (t & 8) << 11; +#endif + t &= 7; + if (t == 0) { + NEED_IP(1); + while (*ip == 0) { + t += 255; + ip++; + NEED_IP(1); + } + t += 7 + *ip++; + } +#if defined(COPY_DICT) +#if defined(LZO1Z) + m_off += (ip[0] << 6) + (ip[1] >> 2); +#else + m_off += (ip[0] >> 2) + (ip[1] << 6); +#endif + ip += 2; + if (m_off == 0) + goto eof_found; + m_off += 0x4000; +#if defined(LZO1Z) + last_m_off = m_off; +#endif +#else +#if defined(LZO1Z) + m_pos -= (ip[0] << 6) + (ip[1] >> 2); +#elif defined(LZO_UNALIGNED_OK_2) && (LZO_BYTE_ORDER == LZO_LITTLE_ENDIAN) + m_pos -= (*(const lzo_ushortp)ip) >> 2; +#else + m_pos -= (ip[0] >> 2) + (ip[1] << 6); +#endif + ip += 2; + if (m_pos == op) + goto eof_found; + m_pos -= 0x4000; +#if defined(LZO1Z) + last_m_off = op - m_pos; +#endif +#endif + } else { +#if defined(COPY_DICT) +#if defined(LZO1Z) + m_off = 1 + (t << 6) + (*ip++ >> 2); + last_m_off = m_off; +#else + m_off = 1 + (t >> 2) + (*ip++ << 2); +#endif + NEED_OP(2); + t = 2; + COPY_DICT(t, m_off) +#else +#if defined(LZO1Z) + t = 1 + (t << 6) + (*ip++ >> 2); + m_pos = op - t; + last_m_off = t; +#else + m_pos = op - 1; + m_pos -= t >> 2; + m_pos -= *ip++ << 2; +#endif + TEST_LOOKBEHIND(m_pos, out); + NEED_OP(2); + *op++ = *m_pos++; + *op++ = *m_pos; +#endif + goto match_done; + } + +#if defined(COPY_DICT) + + NEED_OP(t + 3 - 1); + t += 3 - 1; + COPY_DICT(t, m_off) +#else + + TEST_LOOKBEHIND(m_pos, out); + assert("lzo-19", t > 0); + NEED_OP(t + 3 - 1); +#if defined(LZO_UNALIGNED_OK_4) || defined(LZO_ALIGNED_OK_4) +#if !defined(LZO_UNALIGNED_OK_4) + if (t >= 2 * 4 - (3 - 1) && PTR_ALIGNED2_4(op, m_pos)) { + assert((op - m_pos) >= 4); +#else + if (t >= 2 * 4 - (3 - 1) && (op - m_pos) >= 4) { +#endif + COPY4(op, m_pos); + op += 4; + m_pos += 4; + t -= 4 - (3 - 1); + do { + COPY4(op, m_pos); + op += 4; + m_pos += 4; + t -= 4; + } while (t >= 4); + if (t > 0) + do + *op++ = *m_pos++; + while (--t > 0); + } else +#endif + { + copy_match: + *op++ = *m_pos++; + *op++ = *m_pos++; + do + *op++ = *m_pos++; + while (--t > 0); + } + +#endif + + match_done: +#if defined(LZO1Z) + t = ip[-1] & 3; +#else + t = ip[-2] & 3; +#endif + if (t == 0) + break; + + match_next: + assert("lzo-20", t > 0); + NEED_OP(t); + NEED_IP(t + 1); + do + *op++ = *ip++; + while (--t > 0); + t = *ip++; + } + } + +#if defined(HAVE_TEST_IP) || defined(HAVE_TEST_OP) + *out_len = op - out; + return LZO_E_EOF_NOT_FOUND; +#endif + + eof_found: + assert("lzo-21", t == 1); + *out_len = op - out; + return (ip == ip_end ? LZO_E_OK : + (ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN)); + +#if defined(HAVE_NEED_IP) + input_overrun: + *out_len = op - out; + return LZO_E_INPUT_OVERRUN; +#endif + +#if defined(HAVE_NEED_OP) + output_overrun: + *out_len = op - out; + return LZO_E_OUTPUT_OVERRUN; +#endif + +#if defined(LZO_TEST_DECOMPRESS_OVERRUN_LOOKBEHIND) + lookbehind_overrun: + *out_len = op - out; + return LZO_E_LOOKBEHIND_OVERRUN; +#endif +} + +#define LZO_TEST_DECOMPRESS_OVERRUN +#undef DO_DECOMPRESS +#define DO_DECOMPRESS lzo1x_decompress_safe + +#if defined(LZO_TEST_DECOMPRESS_OVERRUN) +# if !defined(LZO_TEST_DECOMPRESS_OVERRUN_INPUT) +# define LZO_TEST_DECOMPRESS_OVERRUN_INPUT 2 +# endif +# if !defined(LZO_TEST_DECOMPRESS_OVERRUN_OUTPUT) +# define LZO_TEST_DECOMPRESS_OVERRUN_OUTPUT 2 +# endif +# if !defined(LZO_TEST_DECOMPRESS_OVERRUN_LOOKBEHIND) +# define LZO_TEST_DECOMPRESS_OVERRUN_LOOKBEHIND +# endif +#endif + +#undef TEST_IP +#undef TEST_OP +#undef TEST_LOOKBEHIND +#undef NEED_IP +#undef NEED_OP +#undef HAVE_TEST_IP +#undef HAVE_TEST_OP +#undef HAVE_NEED_IP +#undef HAVE_NEED_OP +#undef HAVE_ANY_IP +#undef HAVE_ANY_OP + +#if defined(LZO_TEST_DECOMPRESS_OVERRUN_INPUT) +# if (LZO_TEST_DECOMPRESS_OVERRUN_INPUT >= 1) +# define TEST_IP (ip < ip_end) +# endif +# if (LZO_TEST_DECOMPRESS_OVERRUN_INPUT >= 2) +# define NEED_IP(x) \ + if ((lzo_uint)(ip_end - ip) < (lzo_uint)(x)) goto input_overrun +# endif +#endif + +#if defined(LZO_TEST_DECOMPRESS_OVERRUN_OUTPUT) +# if (LZO_TEST_DECOMPRESS_OVERRUN_OUTPUT >= 1) +# define TEST_OP (op <= op_end) +# endif +# if (LZO_TEST_DECOMPRESS_OVERRUN_OUTPUT >= 2) +# undef TEST_OP +# define NEED_OP(x) \ + if ((lzo_uint)(op_end - op) < (lzo_uint)(x)) goto output_overrun +# endif +#endif + +#if defined(LZO_TEST_DECOMPRESS_OVERRUN_LOOKBEHIND) +# define TEST_LOOKBEHIND(m_pos,out) if (m_pos < out) goto lookbehind_overrun +#else +# define TEST_LOOKBEHIND(m_pos,op) ((void) 0) +#endif + +#if !defined(LZO_EOF_CODE) && !defined(TEST_IP) +# define TEST_IP (ip < ip_end) +#endif + +#if defined(TEST_IP) +# define HAVE_TEST_IP +#else +# define TEST_IP 1 +#endif +#if defined(TEST_OP) +# define HAVE_TEST_OP +#else +# define TEST_OP 1 +#endif + +#if defined(NEED_IP) +# define HAVE_NEED_IP +#else +# define NEED_IP(x) ((void) 0) +#endif +#if defined(NEED_OP) +# define HAVE_NEED_OP +#else +# define NEED_OP(x) ((void) 0) +#endif + +#if defined(HAVE_TEST_IP) || defined(HAVE_NEED_IP) +# define HAVE_ANY_IP +#endif +#if defined(HAVE_TEST_OP) || defined(HAVE_NEED_OP) +# define HAVE_ANY_OP +#endif + +#undef __COPY4 +#define __COPY4(dst,src) * (lzo_uint32p)(dst) = * (const lzo_uint32p)(src) + +#undef COPY4 +#if defined(LZO_UNALIGNED_OK_4) +# define COPY4(dst,src) __COPY4(dst,src) +#elif defined(LZO_ALIGNED_OK_4) +# define COPY4(dst,src) __COPY4((lzo_ptr_t)(dst),(lzo_ptr_t)(src)) +#endif + +/***** End of minilzo.c *****/ diff --git a/fs/reiser4/plugin/compress/minilzo.h b/fs/reiser4/plugin/compress/minilzo.h new file mode 100644 index 000000000000..6a470012db04 --- /dev/null +++ b/fs/reiser4/plugin/compress/minilzo.h @@ -0,0 +1,70 @@ +/* minilzo.h -- mini subset of the LZO real-time data compression library + adopted for reiser4 compression transform plugin. + + This file is part of the LZO real-time data compression library + and not included in any proprietary licenses of reiser4. + + Copyright (C) 2002 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2001 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2000 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1999 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1998 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1997 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1996 Markus Franz Xaver Johannes Oberhumer + All Rights Reserved. + + The LZO library is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + The LZO library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with the LZO library; see the file COPYING. + If not, write to the Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + Markus F.X.J. Oberhumer + <markus@oberhumer.com> + http://www.oberhumer.com/opensource/lzo/ + */ + +/* + * NOTE: + * the full LZO package can be found at + * http://www.oberhumer.com/opensource/lzo/ + */ + +#ifndef __MINILZO_H +#define __MINILZO_H + +#define MINILZO_VERSION 0x1080 + +#include "lzoconf.h" + +/* Memory required for the wrkmem parameter. + * When the required size is 0, you can also pass a NULL pointer. + */ + +#define LZO1X_MEM_COMPRESS LZO1X_1_MEM_COMPRESS +#define LZO1X_1_MEM_COMPRESS ((lzo_uint32) (16384L * lzo_sizeof_dict_t)) +#define LZO1X_MEM_DECOMPRESS (0) + +/* compression */ +extern int lzo1x_1_compress(const lzo_byte * src, lzo_uint src_len, + lzo_byte * dst, lzo_uintp dst_len, + lzo_voidp wrkmem); +/* decompression */ +extern int lzo1x_decompress(const lzo_byte * src, lzo_uint src_len, + lzo_byte * dst, lzo_uintp dst_len, + lzo_voidp wrkmem /* NOT USED */); +/* safe decompression with overrun testing */ +extern int lzo1x_decompress_safe(const lzo_byte * src, lzo_uint src_len, + lzo_byte * dst, lzo_uintp dst_len, + lzo_voidp wrkmem /* NOT USED */ ); + +#endif /* already included */ diff --git a/fs/reiser4/plugin/crypto/cipher.c b/fs/reiser4/plugin/crypto/cipher.c new file mode 100644 index 000000000000..e9181541ef6c --- /dev/null +++ b/fs/reiser4/plugin/crypto/cipher.c @@ -0,0 +1,37 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, + licensing governed by reiser4/README */ +/* Reiser4 cipher transform plugins */ + +#include "../../debug.h" +#include "../plugin.h" + +cipher_plugin cipher_plugins[LAST_CIPHER_ID] = { + [NONE_CIPHER_ID] = { + .h = { + .type_id = REISER4_CIPHER_PLUGIN_TYPE, + .id = NONE_CIPHER_ID, + .pops = NULL, + .label = "none", + .desc = "no cipher transform", + .linkage = {NULL, NULL} + }, + .alloc = NULL, + .free = NULL, + .scale = NULL, + .align_stream = NULL, + .setkey = NULL, + .encrypt = NULL, + .decrypt = NULL + } +}; + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/crypto/cipher.h b/fs/reiser4/plugin/crypto/cipher.h new file mode 100644 index 000000000000..a7920e0a1e95 --- /dev/null +++ b/fs/reiser4/plugin/crypto/cipher.h @@ -0,0 +1,55 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ +/* This file contains definitions for the objects operated + by reiser4 key manager, which is something like keyring + wrapped by appropriate reiser4 plugin */ + +#if !defined( __FS_REISER4_CRYPT_H__ ) +#define __FS_REISER4_CRYPT_H__ + +#include <linux/crypto.h> + +/* key info imported from user space */ +struct reiser4_crypto_data { + int keysize; /* uninstantiated key size */ + __u8 * key; /* uninstantiated key */ + int keyid_size; /* size of passphrase */ + __u8 * keyid; /* passphrase */ +}; + +/* This object contains all needed infrastructure to implement + cipher transform. This is operated (allocating, inheriting, + validating, binding to host inode, etc..) by reiser4 key manager. + + This info can be allocated in two cases: + 1. importing a key from user space. + 2. reading inode from disk */ +struct reiser4_crypto_info { + struct inode * host; + struct crypto_hash * digest; + struct crypto_blkcipher * cipher; +#if 0 + cipher_key_plugin * kplug; /* key manager */ +#endif + __u8 * keyid; /* key fingerprint, created by digest plugin, + using uninstantiated key and passphrase. + supposed to be stored in disk stat-data */ + int inst; /* this indicates if the cipher key is + instantiated (case 1 above) */ + int keysize; /* uninstantiated key size (bytes), supposed + to be stored in disk stat-data */ + int keyload_count; /* number of the objects which has this + crypto-stat attached */ +}; + +#endif /* __FS_REISER4_CRYPT_H__ */ + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/crypto/digest.c b/fs/reiser4/plugin/crypto/digest.c new file mode 100644 index 000000000000..7508917d4407 --- /dev/null +++ b/fs/reiser4/plugin/crypto/digest.c @@ -0,0 +1,58 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* reiser4 digest transform plugin (is used by cryptcompress object plugin) */ +/* EDWARD-FIXME-HANS: and it does what? a digest is a what? */ +#include "../../debug.h" +#include "../plugin_header.h" +#include "../plugin.h" +#include "../file/cryptcompress.h" + +#include <linux/types.h> + +extern digest_plugin digest_plugins[LAST_DIGEST_ID]; + +static struct crypto_hash * alloc_sha256 (void) +{ +#if REISER4_SHA256 + return crypto_alloc_hash ("sha256", 0, CRYPTO_ALG_ASYNC); +#else + warning("edward-1418", "sha256 unsupported"); + return ERR_PTR(-EINVAL); +#endif +} + +static void free_sha256 (struct crypto_hash * tfm) +{ +#if REISER4_SHA256 + crypto_free_hash(tfm); +#endif + return; +} + +/* digest plugins */ +digest_plugin digest_plugins[LAST_DIGEST_ID] = { + [SHA256_32_DIGEST_ID] = { + .h = { + .type_id = REISER4_DIGEST_PLUGIN_TYPE, + .id = SHA256_32_DIGEST_ID, + .pops = NULL, + .label = "sha256_32", + .desc = "sha256_32 digest transform", + .linkage = {NULL, NULL} + }, + .fipsize = sizeof(__u32), + .alloc = alloc_sha256, + .free = free_sha256 + } +}; + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/dir/Makefile b/fs/reiser4/plugin/dir/Makefile new file mode 100644 index 000000000000..ed370b1ed093 --- /dev/null +++ b/fs/reiser4/plugin/dir/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_REISER4_FS) += dir_plugins.o + +dir_plugins-objs := \ + hashed_dir.o \ + seekable_dir.o diff --git a/fs/reiser4/plugin/dir/dir.h b/fs/reiser4/plugin/dir/dir.h new file mode 100644 index 000000000000..4a91ebeb3232 --- /dev/null +++ b/fs/reiser4/plugin/dir/dir.h @@ -0,0 +1,36 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* this file contains declarations of methods implementing directory plugins */ + +#if !defined( __REISER4_DIR_H__ ) +#define __REISER4_DIR_H__ + +/*#include "../../key.h" + +#include <linux/fs.h>*/ + +/* declarations of functions implementing HASHED_DIR_PLUGIN_ID dir plugin */ + +/* "hashed" directory methods of dir plugin */ +void build_entry_key_hashed(const struct inode *, const struct qstr *, + reiser4_key *); + +/* declarations of functions implementing SEEKABLE_HASHED_DIR_PLUGIN_ID dir plugin */ + +/* "seekable" directory methods of dir plugin */ +void build_entry_key_seekable(const struct inode *, const struct qstr *, + reiser4_key *); + +/* __REISER4_DIR_H__ */ +#endif + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/dir/hashed_dir.c b/fs/reiser4/plugin/dir/hashed_dir.c new file mode 100644 index 000000000000..0f34824dbae4 --- /dev/null +++ b/fs/reiser4/plugin/dir/hashed_dir.c @@ -0,0 +1,81 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Directory plugin using hashes (see fs/reiser4/plugin/hash.c) to map file + names to the files. */ + +/* + * Hashed directory logically consists of persistent directory + * entries. Directory entry is a pair of a file name and a key of stat-data of + * a file that has this name in the given directory. + * + * Directory entries are stored in the tree in the form of directory + * items. Directory item should implement dir_entry_ops portion of item plugin + * interface (see plugin/item/item.h). Hashed directory interacts with + * directory item plugin exclusively through dir_entry_ops operations. + * + * Currently there are two implementations of directory items: "simple + * directory item" (plugin/item/sde.[ch]), and "compound directory item" + * (plugin/item/cde.[ch]) with the latter being the default. + * + * There is, however some delicate way through which directory code interferes + * with item plugin: key assignment policy. A key for a directory item is + * chosen by directory code, and as described in kassign.c, this key contains + * a portion of file name. Directory item uses this knowledge to avoid storing + * this portion of file name twice: in the key and in the directory item body. + * + */ + +#include "../../inode.h" + +void complete_entry_key(const struct inode *, const char *name, + int len, reiser4_key * result); + +/* this is implementation of build_entry_key method of dir + plugin for HASHED_DIR_PLUGIN_ID + */ +void build_entry_key_hashed(const struct inode *dir, /* directory where entry is + * (or will be) in.*/ + const struct qstr *qname, /* name of file referenced + * by this entry */ + reiser4_key * result /* resulting key of directory + * entry */ ) +{ + const char *name; + int len; + + assert("nikita-1139", dir != NULL); + assert("nikita-1140", qname != NULL); + assert("nikita-1141", qname->name != NULL); + assert("nikita-1142", result != NULL); + + name = qname->name; + len = qname->len; + + assert("nikita-2867", strlen(name) == len); + + reiser4_key_init(result); + /* locality of directory entry's key is objectid of parent + directory */ + set_key_locality(result, get_inode_oid(dir)); + /* minor packing locality is constant */ + set_key_type(result, KEY_FILE_NAME_MINOR); + /* dot is special case---we always want it to be first entry in + a directory. Actually, we just want to have smallest + directory entry. + */ + if (len == 1 && name[0] == '.') + return; + + /* initialize part of entry key which depends on file name */ + complete_entry_key(dir, name, len, result); +} + +/* Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/dir/seekable_dir.c b/fs/reiser4/plugin/dir/seekable_dir.c new file mode 100644 index 000000000000..c1c6c4cc400f --- /dev/null +++ b/fs/reiser4/plugin/dir/seekable_dir.c @@ -0,0 +1,46 @@ +/* Copyright 2005 by Hans Reiser, licensing governed by + * reiser4/README */ + +#include "../../inode.h" + +/* this is implementation of build_entry_key method of dir + plugin for SEEKABLE_HASHED_DIR_PLUGIN_ID + This is for directories where we want repeatable and restartable readdir() + even in case 32bit user level struct dirent (readdir(3)). +*/ +void +build_entry_key_seekable(const struct inode *dir, const struct qstr *name, + reiser4_key * result) +{ + oid_t objectid; + + assert("nikita-2283", dir != NULL); + assert("nikita-2284", name != NULL); + assert("nikita-2285", name->name != NULL); + assert("nikita-2286", result != NULL); + + reiser4_key_init(result); + /* locality of directory entry's key is objectid of parent + directory */ + set_key_locality(result, get_inode_oid(dir)); + /* minor packing locality is constant */ + set_key_type(result, KEY_FILE_NAME_MINOR); + /* dot is special case---we always want it to be first entry in + a directory. Actually, we just want to have smallest + directory entry. + */ + if ((name->len == 1) && (name->name[0] == '.')) + return; + + /* objectid of key is 31 lowest bits of hash. */ + objectid = + inode_hash_plugin(dir)->hash(name->name, + (int)name->len) & 0x7fffffff; + + assert("nikita-2303", !(objectid & ~KEY_OBJECTID_MASK)); + set_key_objectid(result, objectid); + + /* offset is always 0. */ + set_key_offset(result, (__u64) 0); + return; +} diff --git a/fs/reiser4/plugin/dir_plugin_common.c b/fs/reiser4/plugin/dir_plugin_common.c new file mode 100644 index 000000000000..5e72fa670adb --- /dev/null +++ b/fs/reiser4/plugin/dir_plugin_common.c @@ -0,0 +1,865 @@ +/* Copyright 2005 by Hans Reiser, licensing governed by + reiser4/README */ + +/* this file contains typical implementations for most of methods of + directory plugin +*/ + +#include "../inode.h" + +int reiser4_find_entry(struct inode *dir, struct dentry *name, + lock_handle * , znode_lock_mode, reiser4_dir_entry_desc *); +int reiser4_lookup_name(struct inode *parent, struct dentry *dentry, + reiser4_key * key); +void check_light_weight(struct inode *inode, struct inode *parent); + +/* this is common implementation of get_parent method of dir plugin + this is used by NFS kernel server to "climb" up directory tree to + check permissions + */ +struct dentry *get_parent_common(struct inode *child) +{ + struct super_block *s; + struct inode *parent; + struct dentry dotdot; + struct dentry *dentry; + reiser4_key key; + int result; + + /* + * lookup dotdot entry. + */ + + s = child->i_sb; + memset(&dotdot, 0, sizeof(dotdot)); + dotdot.d_name.name = ".."; + dotdot.d_name.len = 2; + dotdot.d_op = &get_super_private(s)->ops.dentry; + + result = reiser4_lookup_name(child, &dotdot, &key); + if (result != 0) + return ERR_PTR(result); + + parent = reiser4_iget(s, &key, 1); + if (!IS_ERR(parent)) { + /* + * FIXME-NIKITA dubious: attributes are inherited from @child + * to @parent. But: + * + * (*) this is the only this we can do + * + * (*) attributes of light-weight object are inherited + * from a parent through which object was looked up first, + * so it is ambiguous anyway. + * + */ + check_light_weight(parent, child); + reiser4_iget_complete(parent); + dentry = d_obtain_alias(parent); + if (!IS_ERR(dentry)) + dentry->d_op = &get_super_private(s)->ops.dentry; + } else if (PTR_ERR(parent) == -ENOENT) + dentry = ERR_PTR(RETERR(-ESTALE)); + else + dentry = (void *)parent; + return dentry; +} + +/* this is common implementation of is_name_acceptable method of dir + plugin + */ +int is_name_acceptable_common(const struct inode *inode, /* directory to check*/ + const char *name UNUSED_ARG, /* name to check */ + int len/* @name's length */) +{ + assert("nikita-733", inode != NULL); + assert("nikita-734", name != NULL); + assert("nikita-735", len > 0); + + return len <= reiser4_max_filename_len(inode); +} + +/* there is no common implementation of build_entry_key method of dir + plugin. See plugin/dir/hashed_dir.c:build_entry_key_hashed() or + plugin/dir/seekable.c:build_entry_key_seekable() for example +*/ + +/* this is common implementation of build_readdir_key method of dir + plugin + see reiser4_readdir_common for more details +*/ +int build_readdir_key_common(struct file *dir /* directory being read */ , + reiser4_key * result/* where to store key */) +{ + reiser4_file_fsdata *fdata; + struct inode *inode; + + assert("nikita-1361", dir != NULL); + assert("nikita-1362", result != NULL); + assert("nikita-1363", dir->f_path.dentry != NULL); + inode = file_inode(dir); + assert("nikita-1373", inode != NULL); + + fdata = reiser4_get_file_fsdata(dir); + if (IS_ERR(fdata)) + return PTR_ERR(fdata); + assert("nikita-1364", fdata != NULL); + return extract_key_from_de_id(get_inode_oid(inode), + &fdata->dir.readdir.position. + dir_entry_key, result); + +} + +void reiser4_adjust_dir_file(struct inode *, const struct dentry *, int offset, + int adj); + +/* this is common implementation of add_entry method of dir plugin +*/ +int reiser4_add_entry_common(struct inode *object, /* directory to add new name + * in */ + struct dentry *where, /* new name */ + reiser4_object_create_data * data, /* parameters of + * new object */ + reiser4_dir_entry_desc * entry /* parameters of + * new directory + * entry */) +{ + int result; + coord_t *coord; + lock_handle lh; + struct reiser4_dentry_fsdata *fsdata; + reiser4_block_nr reserve; + + assert("nikita-1114", object != NULL); + assert("nikita-1250", where != NULL); + + fsdata = reiser4_get_dentry_fsdata(where); + if (unlikely(IS_ERR(fsdata))) + return PTR_ERR(fsdata); + + reserve = inode_dir_plugin(object)->estimate.add_entry(object); + if (reiser4_grab_space(reserve, BA_CAN_COMMIT)) + return RETERR(-ENOSPC); + + init_lh(&lh); + coord = &fsdata->dec.entry_coord; + coord_clear_iplug(coord); + + /* check for this entry in a directory. This is plugin method. */ + result = reiser4_find_entry(object, where, &lh, ZNODE_WRITE_LOCK, + entry); + if (likely(result == -ENOENT)) { + /* add new entry. Just pass control to the directory + item plugin. */ + assert("nikita-1709", inode_dir_item_plugin(object)); + assert("nikita-2230", coord->node == lh.node); + reiser4_seal_done(&fsdata->dec.entry_seal); + result = + inode_dir_item_plugin(object)->s.dir.add_entry(object, + coord, &lh, + where, + entry); + if (result == 0) { + reiser4_adjust_dir_file(object, where, + fsdata->dec.pos + 1, +1); + INODE_INC_FIELD(object, i_size); + } + } else if (result == 0) { + assert("nikita-2232", coord->node == lh.node); + result = RETERR(-EEXIST); + } + done_lh(&lh); + + return result; +} + +/** + * rem_entry - remove entry from directory item + * @dir: + * @dentry: + * @entry: + * @coord: + * @lh: + * + * Checks that coordinate @coord is set properly and calls item plugin + * method to cut entry. + */ +static int +rem_entry(struct inode *dir, struct dentry *dentry, + reiser4_dir_entry_desc * entry, coord_t *coord, lock_handle * lh) +{ + item_plugin *iplug; + struct inode *child; + + iplug = inode_dir_item_plugin(dir); + child = dentry->d_inode; + assert("nikita-3399", child != NULL); + + /* check that we are really destroying an entry for @child */ + if (REISER4_DEBUG) { + int result; + reiser4_key key; + + result = iplug->s.dir.extract_key(coord, &key); + if (result != 0) + return result; + if (get_key_objectid(&key) != get_inode_oid(child)) { + warning("nikita-3397", + "rem_entry: %#llx != %#llx\n", + get_key_objectid(&key), + (unsigned long long)get_inode_oid(child)); + return RETERR(-EIO); + } + } + return iplug->s.dir.rem_entry(dir, &dentry->d_name, coord, lh, entry); +} + +/** + * reiser4_rem_entry_common - remove entry from a directory + * @dir: directory to remove entry from + * @where: name that is being removed + * @entry: description of entry being removed + * + * This is common implementation of rem_entry method of dir plugin. + */ +int reiser4_rem_entry_common(struct inode *dir, + struct dentry *dentry, + reiser4_dir_entry_desc * entry) +{ + int result; + coord_t *coord; + lock_handle lh; + struct reiser4_dentry_fsdata *fsdata; + __u64 tograb; + + assert("nikita-1124", dir != NULL); + assert("nikita-1125", dentry != NULL); + + tograb = inode_dir_plugin(dir)->estimate.rem_entry(dir); + result = reiser4_grab_space(tograb, BA_CAN_COMMIT | BA_RESERVED); + if (result != 0) + return RETERR(-ENOSPC); + + init_lh(&lh); + + /* check for this entry in a directory. This is plugin method. */ + result = reiser4_find_entry(dir, dentry, &lh, ZNODE_WRITE_LOCK, entry); + fsdata = reiser4_get_dentry_fsdata(dentry); + if (IS_ERR(fsdata)) { + done_lh(&lh); + return PTR_ERR(fsdata); + } + + coord = &fsdata->dec.entry_coord; + + assert("nikita-3404", + get_inode_oid(dentry->d_inode) != get_inode_oid(dir) || + dir->i_size <= 1); + + coord_clear_iplug(coord); + if (result == 0) { + /* remove entry. Just pass control to the directory item + plugin. */ + assert("vs-542", inode_dir_item_plugin(dir)); + reiser4_seal_done(&fsdata->dec.entry_seal); + reiser4_adjust_dir_file(dir, dentry, fsdata->dec.pos, -1); + result = + WITH_COORD(coord, + rem_entry(dir, dentry, entry, coord, &lh)); + if (result == 0) { + if (dir->i_size >= 1) + INODE_DEC_FIELD(dir, i_size); + else { + warning("nikita-2509", "Dir %llu is runt", + (unsigned long long) + get_inode_oid(dir)); + result = RETERR(-EIO); + } + + assert("nikita-3405", dentry->d_inode->i_nlink != 1 || + dentry->d_inode->i_size != 2 || + inode_dir_plugin(dentry->d_inode) == NULL); + } + } + done_lh(&lh); + + return result; +} + +static reiser4_block_nr estimate_init(struct inode *parent, + struct inode *object); +static int create_dot_dotdot(struct inode *object, struct inode *parent); + +/* this is common implementation of init method of dir plugin + create "." and ".." entries +*/ +int reiser4_dir_init_common(struct inode *object, /* new directory */ + struct inode *parent, /* parent directory */ + reiser4_object_create_data * data /* info passed + * to us, this + * is filled by + * reiser4() + * syscall in + * particular */) +{ + reiser4_block_nr reserve; + + assert("nikita-680", object != NULL); + assert("nikita-681", S_ISDIR(object->i_mode)); + assert("nikita-682", parent != NULL); + assert("nikita-684", data != NULL); + assert("nikita-686", data->id == DIRECTORY_FILE_PLUGIN_ID); + assert("nikita-687", object->i_mode & S_IFDIR); + + reserve = estimate_init(parent, object); + if (reiser4_grab_space(reserve, BA_CAN_COMMIT)) + return RETERR(-ENOSPC); + + return create_dot_dotdot(object, parent); +} + +/* this is common implementation of done method of dir plugin + remove "." entry +*/ +int reiser4_dir_done_common(struct inode *object/* object being deleted */) +{ + int result; + reiser4_block_nr reserve; + struct dentry goodby_dots; + reiser4_dir_entry_desc entry; + + assert("nikita-1449", object != NULL); + + if (reiser4_inode_get_flag(object, REISER4_NO_SD)) + return 0; + + /* of course, this can be rewritten to sweep everything in one + reiser4_cut_tree(). */ + memset(&entry, 0, sizeof entry); + + /* FIXME: this done method is called from reiser4_delete_dir_common + * which reserved space already */ + reserve = inode_dir_plugin(object)->estimate.rem_entry(object); + if (reiser4_grab_space(reserve, BA_CAN_COMMIT | BA_RESERVED)) + return RETERR(-ENOSPC); + + memset(&goodby_dots, 0, sizeof goodby_dots); + entry.obj = goodby_dots.d_inode = object; + goodby_dots.d_name.name = "."; + goodby_dots.d_name.len = 1; + result = reiser4_rem_entry_common(object, &goodby_dots, &entry); + reiser4_free_dentry_fsdata(&goodby_dots); + if (unlikely(result != 0 && result != -ENOMEM && result != -ENOENT)) + warning("nikita-2252", "Cannot remove dot of %lli: %i", + (unsigned long long)get_inode_oid(object), result); + return 0; +} + +/* this is common implementation of attach method of dir plugin +*/ +int reiser4_attach_common(struct inode *child UNUSED_ARG, + struct inode *parent UNUSED_ARG) +{ + assert("nikita-2647", child != NULL); + assert("nikita-2648", parent != NULL); + + return 0; +} + +/* this is common implementation of detach method of dir plugin + remove "..", decrease nlink on parent +*/ +int reiser4_detach_common(struct inode *object, struct inode *parent) +{ + int result; + struct dentry goodby_dots; + reiser4_dir_entry_desc entry; + + assert("nikita-2885", object != NULL); + assert("nikita-2886", !reiser4_inode_get_flag(object, REISER4_NO_SD)); + + memset(&entry, 0, sizeof entry); + + /* NOTE-NIKITA this only works if @parent is -the- parent of + @object, viz. object whose key is stored in dotdot + entry. Wouldn't work with hard-links on directories. */ + memset(&goodby_dots, 0, sizeof goodby_dots); + entry.obj = goodby_dots.d_inode = parent; + goodby_dots.d_name.name = ".."; + goodby_dots.d_name.len = 2; + result = reiser4_rem_entry_common(object, &goodby_dots, &entry); + reiser4_free_dentry_fsdata(&goodby_dots); + if (result == 0) { + /* the dot should be the only entry remaining at this time... */ + assert("nikita-3400", + object->i_size == 1 && object->i_nlink <= 2); +#if 0 + /* and, together with the only name directory can have, they + * provides for the last 2 remaining references. If we get + * here as part of error handling during mkdir, @object + * possibly has no name yet, so its nlink == 1. If we get here + * from rename (targeting empty directory), it has no name + * already, so its nlink == 1. */ + assert("nikita-3401", + object->i_nlink == 2 || object->i_nlink == 1); +#endif + + /* decrement nlink of directory removed ".." pointed + to */ + reiser4_del_nlink(parent, NULL, 0); + } + return result; +} + +/* this is common implementation of estimate.add_entry method of + dir plugin + estimation of adding entry which supposes that entry is inserting a + unit into item +*/ +reiser4_block_nr estimate_add_entry_common(const struct inode *inode) +{ + return estimate_one_insert_into_item(reiser4_tree_by_inode(inode)); +} + +/* this is common implementation of estimate.rem_entry method of dir + plugin +*/ +reiser4_block_nr estimate_rem_entry_common(const struct inode *inode) +{ + return estimate_one_item_removal(reiser4_tree_by_inode(inode)); +} + +/* this is common implementation of estimate.unlink method of dir + plugin +*/ +reiser4_block_nr +dir_estimate_unlink_common(const struct inode *parent, + const struct inode *object) +{ + reiser4_block_nr res; + + /* hashed_rem_entry(object) */ + res = inode_dir_plugin(object)->estimate.rem_entry(object); + /* del_nlink(parent) */ + res += 2 * inode_file_plugin(parent)->estimate.update(parent); + + return res; +} + +/* + * helper for inode_ops ->lookup() and dir plugin's ->get_parent() + * methods: if @inode is a light-weight file, setup its credentials + * that are not stored in the stat-data in this case + */ +void check_light_weight(struct inode *inode, struct inode *parent) +{ + if (reiser4_inode_get_flag(inode, REISER4_LIGHT_WEIGHT)) { + inode->i_uid = parent->i_uid; + inode->i_gid = parent->i_gid; + /* clear light-weight flag. If inode would be read by any + other name, [ug]id wouldn't change. */ + reiser4_inode_clr_flag(inode, REISER4_LIGHT_WEIGHT); + } +} + +/* looks for name specified in @dentry in directory @parent and if name is + found - key of object found entry points to is stored in @entry->key */ +int reiser4_lookup_name(struct inode *parent, /* inode of directory to lookup + * for name in */ + struct dentry *dentry, /* name to look for */ + reiser4_key * key/* place to store key */) +{ + int result; + coord_t *coord; + lock_handle lh; + const char *name; + int len; + reiser4_dir_entry_desc entry; + struct reiser4_dentry_fsdata *fsdata; + + assert("nikita-1247", parent != NULL); + assert("nikita-1248", dentry != NULL); + assert("nikita-1123", dentry->d_name.name != NULL); + assert("vs-1486", + dentry->d_op == &get_super_private(parent->i_sb)->ops.dentry); + + name = dentry->d_name.name; + len = dentry->d_name.len; + + if (!inode_dir_plugin(parent)->is_name_acceptable(parent, name, len)) + /* some arbitrary error code to return */ + return RETERR(-ENAMETOOLONG); + + fsdata = reiser4_get_dentry_fsdata(dentry); + if (IS_ERR(fsdata)) + return PTR_ERR(fsdata); + + coord = &fsdata->dec.entry_coord; + coord_clear_iplug(coord); + init_lh(&lh); + + /* find entry in a directory. This is plugin method. */ + result = reiser4_find_entry(parent, dentry, &lh, ZNODE_READ_LOCK, + &entry); + if (result == 0) { + /* entry was found, extract object key from it. */ + result = + WITH_COORD(coord, + item_plugin_by_coord(coord)->s.dir. + extract_key(coord, key)); + } + done_lh(&lh); + return result; + +} + +/* helper for reiser4_dir_init_common(): estimate number of blocks to reserve */ +static reiser4_block_nr +estimate_init(struct inode *parent, struct inode *object) +{ + reiser4_block_nr res = 0; + + assert("vpf-321", parent != NULL); + assert("vpf-322", object != NULL); + + /* hashed_add_entry(object) */ + res += inode_dir_plugin(object)->estimate.add_entry(object); + /* reiser4_add_nlink(object) */ + res += inode_file_plugin(object)->estimate.update(object); + /* hashed_add_entry(object) */ + res += inode_dir_plugin(object)->estimate.add_entry(object); + /* reiser4_add_nlink(parent) */ + res += inode_file_plugin(parent)->estimate.update(parent); + + return 0; +} + +/* helper function for reiser4_dir_init_common(). Create "." and ".." */ +static int create_dot_dotdot(struct inode *object/* object to create dot and + * dotdot for */ , + struct inode *parent/* parent of @object */) +{ + int result; + struct dentry dots_entry; + reiser4_dir_entry_desc entry; + + assert("nikita-688", object != NULL); + assert("nikita-689", S_ISDIR(object->i_mode)); + assert("nikita-691", parent != NULL); + + /* We store dot and dotdot as normal directory entries. This is + not necessary, because almost all information stored in them + is already in the stat-data of directory, the only thing + being missed is objectid of grand-parent directory that can + easily be added there as extension. + + But it is done the way it is done, because not storing dot + and dotdot will lead to the following complications: + + . special case handling in ->lookup(). + . addition of another extension to the sd. + . dependency on key allocation policy for stat data. + + */ + + memset(&entry, 0, sizeof entry); + memset(&dots_entry, 0, sizeof dots_entry); + entry.obj = dots_entry.d_inode = object; + dots_entry.d_name.name = "."; + dots_entry.d_name.len = 1; + result = reiser4_add_entry_common(object, &dots_entry, NULL, &entry); + reiser4_free_dentry_fsdata(&dots_entry); + + if (result == 0) { + result = reiser4_add_nlink(object, object, 0); + if (result == 0) { + entry.obj = dots_entry.d_inode = parent; + dots_entry.d_name.name = ".."; + dots_entry.d_name.len = 2; + result = reiser4_add_entry_common(object, + &dots_entry, NULL, &entry); + reiser4_free_dentry_fsdata(&dots_entry); + /* if creation of ".." failed, iput() will delete + object with ".". */ + if (result == 0) { + result = reiser4_add_nlink(parent, object, 0); + if (result != 0) + /* + * if we failed to bump i_nlink, try + * to remove ".." + */ + reiser4_detach_common(object, parent); + } + } + } + + if (result != 0) { + /* + * in the case of error, at least update stat-data so that, + * ->i_nlink updates are not lingering. + */ + reiser4_update_sd(object); + reiser4_update_sd(parent); + } + + return result; +} + +/* + * return 0 iff @coord contains a directory entry for the file with the name + * @name. + */ +static int +check_item(const struct inode *dir, const coord_t *coord, const char *name) +{ + item_plugin *iplug; + char buf[DE_NAME_BUF_LEN]; + + iplug = item_plugin_by_coord(coord); + if (iplug == NULL) { + warning("nikita-1135", "Cannot get item plugin"); + print_coord("coord", coord, 1); + return RETERR(-EIO); + } else if (item_id_by_coord(coord) != + item_id_by_plugin(inode_dir_item_plugin(dir))) { + /* item id of current item does not match to id of items a + directory is built of */ + warning("nikita-1136", "Wrong item plugin"); + print_coord("coord", coord, 1); + return RETERR(-EIO); + } + assert("nikita-1137", iplug->s.dir.extract_name); + + /* Compare name stored in this entry with name we are looking for. + + NOTE-NIKITA Here should go code for support of something like + unicode, code tables, etc. + */ + return !!strcmp(name, iplug->s.dir.extract_name(coord, buf)); +} + +static int +check_entry(const struct inode *dir, coord_t *coord, const struct qstr *name) +{ + return WITH_COORD(coord, check_item(dir, coord, name->name)); +} + +/* + * argument package used by entry_actor to scan entries with identical keys. + */ +struct entry_actor_args { + /* name we are looking for */ + const char *name; + /* key of directory entry. entry_actor() scans through sequence of + * items/units having the same key */ + reiser4_key *key; + /* how many entries with duplicate key was scanned so far. */ + int non_uniq; +#if REISER4_USE_COLLISION_LIMIT + /* scan limit */ + int max_non_uniq; +#endif + /* return parameter: set to true, if ->name wasn't found */ + int not_found; + /* what type of lock to take when moving to the next node during + * scan */ + znode_lock_mode mode; + + /* last coord that was visited during scan */ + coord_t last_coord; + /* last node locked during scan */ + lock_handle last_lh; + /* inode of directory */ + const struct inode *inode; +}; + +/* Function called by reiser4_find_entry() to look for given name + in the directory. */ +static int entry_actor(reiser4_tree * tree UNUSED_ARG /* tree being scanned */ , + coord_t *coord /* current coord */ , + lock_handle * lh /* current lock handle */ , + void *entry_actor_arg/* argument to scan */) +{ + reiser4_key unit_key; + struct entry_actor_args *args; + + assert("nikita-1131", tree != NULL); + assert("nikita-1132", coord != NULL); + assert("nikita-1133", entry_actor_arg != NULL); + + args = entry_actor_arg; + ++args->non_uniq; +#if REISER4_USE_COLLISION_LIMIT + if (args->non_uniq > args->max_non_uniq) { + args->not_found = 1; + /* hash collision overflow. */ + return RETERR(-EBUSY); + } +#endif + + /* + * did we just reach the end of the sequence of items/units with + * identical keys? + */ + if (!keyeq(args->key, unit_key_by_coord(coord, &unit_key))) { + assert("nikita-1791", + keylt(args->key, unit_key_by_coord(coord, &unit_key))); + args->not_found = 1; + args->last_coord.between = AFTER_UNIT; + return 0; + } + + coord_dup(&args->last_coord, coord); + /* + * did scan just moved to the next node? + */ + if (args->last_lh.node != lh->node) { + int lock_result; + + /* + * if so, lock new node with the mode requested by the caller + */ + done_lh(&args->last_lh); + assert("nikita-1896", znode_is_any_locked(lh->node)); + lock_result = longterm_lock_znode(&args->last_lh, lh->node, + args->mode, ZNODE_LOCK_HIPRI); + if (lock_result != 0) + return lock_result; + } + return check_item(args->inode, coord, args->name); +} + +/* Look for given @name within directory @dir. + + This is called during lookup, creation and removal of directory + entries and on reiser4_rename_common + + First calculate key that directory entry for @name would have. Search + for this key in the tree. If such key is found, scan all items with + the same key, checking name in each directory entry along the way. +*/ +int reiser4_find_entry(struct inode *dir, /* directory to scan */ + struct dentry *de, /* name to search for */ + lock_handle * lh, /* resulting lock handle */ + znode_lock_mode mode, /* required lock mode */ + reiser4_dir_entry_desc * entry /* parameters of found + directory entry */) +{ + const struct qstr *name; + seal_t *seal; + coord_t *coord; + int result; + __u32 flags; + struct de_location *dec; + struct reiser4_dentry_fsdata *fsdata; + + assert("nikita-1130", lh != NULL); + assert("nikita-1128", dir != NULL); + + name = &de->d_name; + assert("nikita-1129", name != NULL); + + /* dentry private data don't require lock, because dentry + manipulations are protected by i_mutex on parent. + + This is not so for inodes, because there is no -the- parent in + inode case. + */ + fsdata = reiser4_get_dentry_fsdata(de); + if (IS_ERR(fsdata)) + return PTR_ERR(fsdata); + dec = &fsdata->dec; + + coord = &dec->entry_coord; + coord_clear_iplug(coord); + seal = &dec->entry_seal; + /* compose key of directory entry for @name */ + inode_dir_plugin(dir)->build_entry_key(dir, name, &entry->key); + + if (reiser4_seal_is_set(seal)) { + /* check seal */ + result = reiser4_seal_validate(seal, coord, &entry->key, + lh, mode, ZNODE_LOCK_LOPRI); + if (result == 0) { + /* key was found. Check that it is really item we are + looking for. */ + result = check_entry(dir, coord, name); + if (result == 0) + return 0; + } + } + flags = (mode == ZNODE_WRITE_LOCK) ? CBK_FOR_INSERT : 0; + /* + * find place in the tree where directory item should be located. + */ + result = reiser4_object_lookup(dir, &entry->key, coord, lh, mode, + FIND_EXACT, LEAF_LEVEL, LEAF_LEVEL, + flags, NULL/*ra_info */); + if (result == CBK_COORD_FOUND) { + struct entry_actor_args arg; + + /* fast path: no hash collisions */ + result = check_entry(dir, coord, name); + if (result == 0) { + reiser4_seal_init(seal, coord, &entry->key); + dec->pos = 0; + } else if (result > 0) { + /* Iterate through all units with the same keys. */ + arg.name = name->name; + arg.key = &entry->key; + arg.not_found = 0; + arg.non_uniq = 0; +#if REISER4_USE_COLLISION_LIMIT + arg.max_non_uniq = max_hash_collisions(dir); + assert("nikita-2851", arg.max_non_uniq > 1); +#endif + arg.mode = mode; + arg.inode = dir; + coord_init_zero(&arg.last_coord); + init_lh(&arg.last_lh); + + result = reiser4_iterate_tree + (reiser4_tree_by_inode(dir), + coord, lh, + entry_actor, &arg, mode, 1); + /* if end of the tree or extent was reached during + scanning. */ + if (arg.not_found || (result == -E_NO_NEIGHBOR)) { + /* step back */ + done_lh(lh); + + result = zload(arg.last_coord.node); + if (result == 0) { + coord_clear_iplug(&arg.last_coord); + coord_dup(coord, &arg.last_coord); + move_lh(lh, &arg.last_lh); + result = RETERR(-ENOENT); + zrelse(arg.last_coord.node); + --arg.non_uniq; + } + } + + done_lh(&arg.last_lh); + if (result == 0) + reiser4_seal_init(seal, coord, &entry->key); + + if (result == 0 || result == -ENOENT) { + assert("nikita-2580", arg.non_uniq > 0); + dec->pos = arg.non_uniq - 1; + } + } + } else + dec->pos = -1; + return result; +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/disk_format/Makefile b/fs/reiser4/plugin/disk_format/Makefile new file mode 100644 index 000000000000..e4e9e54f278d --- /dev/null +++ b/fs/reiser4/plugin/disk_format/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_REISER4_FS) += df_plugins.o + +df_plugins-objs := \ + disk_format40.o \ + disk_format.o diff --git a/fs/reiser4/plugin/disk_format/disk_format.c b/fs/reiser4/plugin/disk_format/disk_format.c new file mode 100644 index 000000000000..d7851063c821 --- /dev/null +++ b/fs/reiser4/plugin/disk_format/disk_format.c @@ -0,0 +1,38 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#include "../../debug.h" +#include "../plugin_header.h" +#include "disk_format40.h" +#include "disk_format.h" +#include "../plugin.h" + +/* initialization of disk layout plugins */ +disk_format_plugin format_plugins[LAST_FORMAT_ID] = { + [FORMAT40_ID] = { + .h = { + .type_id = REISER4_FORMAT_PLUGIN_TYPE, + .id = FORMAT40_ID, + .pops = NULL, + .label = "reiser40", + .desc = "standard disk layout for reiser40", + .linkage = {NULL, NULL} + }, + .init_format = init_format_format40, + .root_dir_key = root_dir_key_format40, + .release = release_format40, + .log_super = log_super_format40, + .check_open = check_open_format40, + .version_update = version_update_format40 + } +}; + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/disk_format/disk_format.h b/fs/reiser4/plugin/disk_format/disk_format.h new file mode 100644 index 000000000000..b9c53acede72 --- /dev/null +++ b/fs/reiser4/plugin/disk_format/disk_format.h @@ -0,0 +1,27 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* identifiers for disk layouts, they are also used as indexes in array of disk + plugins */ + +#if !defined( __REISER4_DISK_FORMAT_H__ ) +#define __REISER4_DISK_FORMAT_H__ + +typedef enum { + /* standard reiser4 disk layout plugin id */ + FORMAT40_ID, + LAST_FORMAT_ID +} disk_format_id; + +/* __REISER4_DISK_FORMAT_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/disk_format/disk_format40.c b/fs/reiser4/plugin/disk_format/disk_format40.c new file mode 100644 index 000000000000..b572f14d69a8 --- /dev/null +++ b/fs/reiser4/plugin/disk_format/disk_format40.c @@ -0,0 +1,664 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#include "../../debug.h" +#include "../../dformat.h" +#include "../../key.h" +#include "../node/node.h" +#include "../space/space_allocator.h" +#include "disk_format40.h" +#include "../plugin.h" +#include "../../txnmgr.h" +#include "../../jnode.h" +#include "../../tree.h" +#include "../../super.h" +#include "../../wander.h" +#include "../../inode.h" +#include "../../ktxnmgrd.h" +#include "../../status_flags.h" + +#include <linux/types.h> /* for __u?? */ +#include <linux/fs.h> /* for struct super_block */ +#include <linux/buffer_head.h> + +/* reiser 4.0 default disk layout */ + +/* Amount of free blocks needed to perform release_format40 when fs gets + mounted RW: 1 for SB, 1 for non-leaves in overwrite set, 2 for tx header + & tx record. */ +#define RELEASE_RESERVED 4 + +/* This flag indicates that backup should be updated + (the update is performed by fsck) */ +#define FORMAT40_UPDATE_BACKUP (1 << 31) + +/* functions to access fields of format40_disk_super_block */ +static __u64 get_format40_block_count(const format40_disk_super_block * sb) +{ + return le64_to_cpu(get_unaligned(&sb->block_count)); +} + +static __u64 get_format40_free_blocks(const format40_disk_super_block * sb) +{ + return le64_to_cpu(get_unaligned(&sb->free_blocks)); +} + +static __u64 get_format40_root_block(const format40_disk_super_block * sb) +{ + return le64_to_cpu(get_unaligned(&sb->root_block)); +} + +static __u16 get_format40_tree_height(const format40_disk_super_block * sb) +{ + return le16_to_cpu(get_unaligned(&sb->tree_height)); +} + +static __u64 get_format40_file_count(const format40_disk_super_block * sb) +{ + return le64_to_cpu(get_unaligned(&sb->file_count)); +} + +static __u64 get_format40_oid(const format40_disk_super_block * sb) +{ + return le64_to_cpu(get_unaligned(&sb->oid)); +} + +static __u32 get_format40_mkfs_id(const format40_disk_super_block * sb) +{ + return le32_to_cpu(get_unaligned(&sb->mkfs_id)); +} + +static __u32 get_format40_node_plugin_id(const format40_disk_super_block * sb) +{ + return le32_to_cpu(get_unaligned(&sb->node_pid)); +} + +static __u64 get_format40_flags(const format40_disk_super_block * sb) +{ + return le64_to_cpu(get_unaligned(&sb->flags)); +} + +static __u32 get_format40_version(const format40_disk_super_block * sb) +{ + return le32_to_cpu(get_unaligned(&sb->version)) & + ~FORMAT40_UPDATE_BACKUP; +} + +static int update_backup_version(const format40_disk_super_block * sb) +{ + return (le32_to_cpu(get_unaligned(&sb->version)) & + FORMAT40_UPDATE_BACKUP); +} + +static int update_disk_version_minor(const format40_disk_super_block * sb) +{ + return (get_format40_version(sb) < get_release_number_minor()); +} + +static int incomplete_compatibility(const format40_disk_super_block * sb) +{ + return (get_format40_version(sb) > get_release_number_minor()); +} + +static format40_super_info *get_sb_info(struct super_block *super) +{ + return &get_super_private(super)->u.format40; +} + +static int consult_diskmap(struct super_block *s) +{ + format40_super_info *info; + journal_location *jloc; + + info = get_sb_info(s); + jloc = &get_super_private(s)->jloc; + /* Default format-specific locations, if there is nothing in + * diskmap */ + jloc->footer = FORMAT40_JOURNAL_FOOTER_BLOCKNR; + jloc->header = FORMAT40_JOURNAL_HEADER_BLOCKNR; + info->loc.super = FORMAT40_OFFSET / s->s_blocksize; +#ifdef CONFIG_REISER4_BADBLOCKS + reiser4_get_diskmap_value(FORMAT40_PLUGIN_DISKMAP_ID, FORMAT40_JF, + &jloc->footer); + reiser4_get_diskmap_value(FORMAT40_PLUGIN_DISKMAP_ID, FORMAT40_JH, + &jloc->header); + reiser4_get_diskmap_value(FORMAT40_PLUGIN_DISKMAP_ID, FORMAT40_SUPER, + &info->loc.super); +#endif + return 0; +} + +/* find any valid super block of disk_format40 (even if the first + super block is destroyed), will change block numbers of actual journal header/footer (jf/jh) + if needed */ +static struct buffer_head *find_a_disk_format40_super_block(struct super_block + *s) +{ + struct buffer_head *super_bh; + format40_disk_super_block *disk_sb; + format40_super_info *info; + + assert("umka-487", s != NULL); + + info = get_sb_info(s); + + super_bh = sb_bread(s, info->loc.super); + if (super_bh == NULL) + return ERR_PTR(RETERR(-EIO)); + + disk_sb = (format40_disk_super_block *) super_bh->b_data; + if (strncmp(disk_sb->magic, FORMAT40_MAGIC, sizeof(FORMAT40_MAGIC))) { + brelse(super_bh); + return ERR_PTR(RETERR(-EINVAL)); + } + + reiser4_set_block_count(s, le64_to_cpu(get_unaligned(&disk_sb->block_count))); + reiser4_set_data_blocks(s, le64_to_cpu(get_unaligned(&disk_sb->block_count)) - + le64_to_cpu(get_unaligned(&disk_sb->free_blocks))); + reiser4_set_free_blocks(s, le64_to_cpu(get_unaligned(&disk_sb->free_blocks))); + + return super_bh; +} + +/* find the most recent version of super block. This is called after journal is + replayed */ +static struct buffer_head *read_super_block(struct super_block *s UNUSED_ARG) +{ + /* Here the most recent superblock copy has to be read. However, as + journal replay isn't complete, we are using + find_a_disk_format40_super_block() function. */ + return find_a_disk_format40_super_block(s); +} + +static int get_super_jnode(struct super_block *s) +{ + reiser4_super_info_data *sbinfo = get_super_private(s); + jnode *sb_jnode; + int ret; + + sb_jnode = reiser4_alloc_io_head(&get_sb_info(s)->loc.super); + + ret = jload(sb_jnode); + + if (ret) { + reiser4_drop_io_head(sb_jnode); + return ret; + } + + pin_jnode_data(sb_jnode); + jrelse(sb_jnode); + + sbinfo->u.format40.sb_jnode = sb_jnode; + + return 0; +} + +static void done_super_jnode(struct super_block *s) +{ + jnode *sb_jnode = get_super_private(s)->u.format40.sb_jnode; + + if (sb_jnode) { + unpin_jnode_data(sb_jnode); + reiser4_drop_io_head(sb_jnode); + } +} + +typedef enum format40_init_stage { + NONE_DONE = 0, + CONSULT_DISKMAP, + FIND_A_SUPER, + INIT_JOURNAL_INFO, + INIT_STATUS, + JOURNAL_REPLAY, + READ_SUPER, + KEY_CHECK, + INIT_OID, + INIT_TREE, + JOURNAL_RECOVER, + INIT_SA, + INIT_JNODE, + ALL_DONE +} format40_init_stage; + +static format40_disk_super_block *copy_sb(const struct buffer_head *super_bh) +{ + format40_disk_super_block *sb_copy; + + sb_copy = kmalloc(sizeof(format40_disk_super_block), + reiser4_ctx_gfp_mask_get()); + if (sb_copy == NULL) + return ERR_PTR(RETERR(-ENOMEM)); + memcpy(sb_copy, ((format40_disk_super_block *) super_bh->b_data), + sizeof(format40_disk_super_block)); + return sb_copy; +} + +static int check_key_format(const format40_disk_super_block *sb_copy) +{ + if (!equi(REISER4_LARGE_KEY, + get_format40_flags(sb_copy) & (1 << FORMAT40_LARGE_KEYS))) { + warning("nikita-3228", "Key format mismatch. " + "Only %s keys are supported.", + REISER4_LARGE_KEY ? "large" : "small"); + return RETERR(-EINVAL); + } + return 0; +} + +/** + * try_init_format40 + * @super: + * @stage: + * + */ +static int try_init_format40(struct super_block *super, + format40_init_stage *stage) +{ + int result; + struct buffer_head *super_bh; + reiser4_super_info_data *sbinfo; + format40_disk_super_block *sb_copy; + tree_level height; + reiser4_block_nr root_block; + node_plugin *nplug; + + assert("vs-475", super != NULL); + assert("vs-474", get_super_private(super)); + + *stage = NONE_DONE; + + result = consult_diskmap(super); + if (result) + return result; + *stage = CONSULT_DISKMAP; + + super_bh = find_a_disk_format40_super_block(super); + if (IS_ERR(super_bh)) + return PTR_ERR(super_bh); + brelse(super_bh); + *stage = FIND_A_SUPER; + + /* ok, we are sure that filesystem format is a format40 format */ + + /* map jnodes for journal control blocks (header, footer) to disk */ + result = reiser4_init_journal_info(super); + if (result) + return result; + *stage = INIT_JOURNAL_INFO; + + /* ok, we are sure that filesystem format is a format40 format */ + /* Now check it's state */ + result = reiser4_status_init(FORMAT40_STATUS_BLOCKNR); + if (result != 0 && result != -EINVAL) + /* -EINVAL means there is no magic, so probably just old + * fs. */ + return result; + *stage = INIT_STATUS; + + result = reiser4_status_query(NULL, NULL); + if (result == REISER4_STATUS_MOUNT_WARN) + notice("vpf-1363", "Warning: mounting %s with errors.", + super->s_id); + if (result == REISER4_STATUS_MOUNT_RO) { + notice("vpf-1364", "Warning: mounting %s with fatal errors," + " forcing read-only mount.", super->s_id); + super->s_flags |= MS_RDONLY; + } + result = reiser4_journal_replay(super); + if (result) + return result; + *stage = JOURNAL_REPLAY; + + super_bh = read_super_block(super); + if (IS_ERR(super_bh)) + return PTR_ERR(super_bh); + *stage = READ_SUPER; + + /* allocate and make a copy of format40_disk_super_block */ + sb_copy = copy_sb(super_bh); + brelse(super_bh); + + if (IS_ERR(sb_copy)) + return PTR_ERR(sb_copy); + printk("reiser4: %s: found disk format 4.0.%u.\n", + super->s_id, + get_format40_version(sb_copy)); + if (incomplete_compatibility(sb_copy)) + printk("reiser4: %s: format version number (4.0.%u) is " + "greater than release number (4.%u.%u) of reiser4 " + "kernel module. Some objects of the volume can be " + "inaccessible.\n", + super->s_id, + get_format40_version(sb_copy), + get_release_number_major(), + get_release_number_minor()); + /* make sure that key format of kernel and filesystem match */ + result = check_key_format(sb_copy); + if (result) { + kfree(sb_copy); + return result; + } + *stage = KEY_CHECK; + + result = oid_init_allocator(super, get_format40_file_count(sb_copy), + get_format40_oid(sb_copy)); + if (result) { + kfree(sb_copy); + return result; + } + *stage = INIT_OID; + + /* get things necessary to init reiser4_tree */ + root_block = get_format40_root_block(sb_copy); + height = get_format40_tree_height(sb_copy); + nplug = node_plugin_by_id(get_format40_node_plugin_id(sb_copy)); + + /* initialize reiser4_super_info_data */ + sbinfo = get_super_private(super); + assert("", sbinfo->tree.super == super); + /* init reiser4_tree for the filesystem */ + result = reiser4_init_tree(&sbinfo->tree, &root_block, height, nplug); + if (result) { + kfree(sb_copy); + return result; + } + *stage = INIT_TREE; + + /* + * initialize reiser4_super_info_data with data from format40 super + * block + */ + sbinfo->default_uid = 0; + sbinfo->default_gid = 0; + sbinfo->mkfs_id = get_format40_mkfs_id(sb_copy); + /* number of blocks in filesystem and reserved space */ + reiser4_set_block_count(super, get_format40_block_count(sb_copy)); + sbinfo->blocks_free = get_format40_free_blocks(sb_copy); + sbinfo->version = get_format40_version(sb_copy); + + if (update_backup_version(sb_copy)) + printk("reiser4: %s: use 'fsck.reiser4 --fix' " + "to complete disk format upgrade.\n", super->s_id); + kfree(sb_copy); + + sbinfo->fsuid = 0; + sbinfo->fs_flags |= (1 << REISER4_ADG); /* hard links for directories + * are not supported */ + sbinfo->fs_flags |= (1 << REISER4_ONE_NODE_PLUGIN); /* all nodes in + * layout 40 are + * of one + * plugin */ + /* sbinfo->tmgr is initialized already */ + + /* recover sb data which were logged separately from sb block */ + + /* NOTE-NIKITA: reiser4_journal_recover_sb_data() calls + * oid_init_allocator() and reiser4_set_free_blocks() with new + * data. What's the reason to call them above? */ + result = reiser4_journal_recover_sb_data(super); + if (result != 0) + return result; + *stage = JOURNAL_RECOVER; + + /* + * Set number of used blocks. The number of used blocks is not stored + * neither in on-disk super block nor in the journal footer blocks. At + * this moment actual values of total blocks and free block counters + * are set in the reiser4 super block (in-memory structure) and we can + * calculate number of used blocks from them. + */ + reiser4_set_data_blocks(super, + reiser4_block_count(super) - + reiser4_free_blocks(super)); + +#if REISER4_DEBUG + sbinfo->min_blocks_used = 16 /* reserved area */ + + 2 /* super blocks */ + + 2 /* journal footer and header */ ; +#endif + + /* init disk space allocator */ + result = sa_init_allocator(reiser4_get_space_allocator(super), + super, NULL); + if (result) + return result; + *stage = INIT_SA; + + result = get_super_jnode(super); + if (result == 0) + *stage = ALL_DONE; + return result; +} + +/* plugin->u.format.get_ready */ +int init_format_format40(struct super_block *s, void *data UNUSED_ARG) +{ + int result; + format40_init_stage stage; + + result = try_init_format40(s, &stage); + switch (stage) { + case ALL_DONE: + assert("nikita-3458", result == 0); + break; + case INIT_JNODE: + done_super_jnode(s); + case INIT_SA: + sa_destroy_allocator(reiser4_get_space_allocator(s), s); + case JOURNAL_RECOVER: + case INIT_TREE: + reiser4_done_tree(&get_super_private(s)->tree); + case INIT_OID: + case KEY_CHECK: + case READ_SUPER: + case JOURNAL_REPLAY: + case INIT_STATUS: + reiser4_status_finish(); + case INIT_JOURNAL_INFO: + reiser4_done_journal_info(s); + case FIND_A_SUPER: + case CONSULT_DISKMAP: + case NONE_DONE: + break; + default: + impossible("nikita-3457", "init stage: %i", stage); + } + + if (!rofs_super(s) && reiser4_free_blocks(s) < RELEASE_RESERVED) + return RETERR(-ENOSPC); + + return result; +} + +static void pack_format40_super(const struct super_block *s, char *data) +{ + format40_disk_super_block *super_data = + (format40_disk_super_block *) data; + + reiser4_super_info_data *sbinfo = get_super_private(s); + + assert("zam-591", data != NULL); + + put_unaligned(cpu_to_le64(reiser4_free_committed_blocks(s)), + &super_data->free_blocks); + + put_unaligned(cpu_to_le64(sbinfo->tree.root_block), + &super_data->root_block); + + put_unaligned(cpu_to_le64(oid_next(s)), + &super_data->oid); + + put_unaligned(cpu_to_le64(oids_used(s)), + &super_data->file_count); + + put_unaligned(cpu_to_le16(sbinfo->tree.height), + &super_data->tree_height); + + if (update_disk_version_minor(super_data)) { + __u32 version = PLUGIN_LIBRARY_VERSION | FORMAT40_UPDATE_BACKUP; + + put_unaligned(cpu_to_le32(version), &super_data->version); + } +} + +/* plugin->u.format.log_super + return a jnode which should be added to transaction when the super block + gets logged */ +jnode *log_super_format40(struct super_block *s) +{ + jnode *sb_jnode; + + sb_jnode = get_super_private(s)->u.format40.sb_jnode; + + jload(sb_jnode); + + pack_format40_super(s, jdata(sb_jnode)); + + jrelse(sb_jnode); + + return sb_jnode; +} + +/* plugin->u.format.release */ +int release_format40(struct super_block *s) +{ + int ret; + reiser4_super_info_data *sbinfo; + + sbinfo = get_super_private(s); + assert("zam-579", sbinfo != NULL); + + if (!rofs_super(s)) { + ret = reiser4_capture_super_block(s); + if (ret != 0) + warning("vs-898", + "reiser4_capture_super_block failed: %d", + ret); + + ret = txnmgr_force_commit_all(s, 1); + if (ret != 0) + warning("jmacd-74438", "txn_force failed: %d", ret); + + all_grabbed2free(); + } + + sa_destroy_allocator(&sbinfo->space_allocator, s); + reiser4_done_journal_info(s); + done_super_jnode(s); + + rcu_barrier(); + reiser4_done_tree(&sbinfo->tree); + /* call finish_rcu(), because some znode were "released" in + * reiser4_done_tree(). */ + rcu_barrier(); + + return 0; +} + +#define FORMAT40_ROOT_LOCALITY 41 +#define FORMAT40_ROOT_OBJECTID 42 + +/* plugin->u.format.root_dir_key */ +const reiser4_key *root_dir_key_format40(const struct super_block *super + UNUSED_ARG) +{ + static const reiser4_key FORMAT40_ROOT_DIR_KEY = { + .el = { + __constant_cpu_to_le64((FORMAT40_ROOT_LOCALITY << 4) | KEY_SD_MINOR), +#if REISER4_LARGE_KEY + ON_LARGE_KEY(0ull,) +#endif + __constant_cpu_to_le64(FORMAT40_ROOT_OBJECTID), + 0ull + } + }; + + return &FORMAT40_ROOT_DIR_KEY; +} + +/* plugin->u.format.check_open. + Check the opened object for validness. For now it checks for the valid oid & + locality only, can be improved later and it its work may depend on the mount + options. */ +int check_open_format40(const struct inode *object) +{ + oid_t max, oid; + + max = oid_next(object->i_sb) - 1; + + /* Check the oid. */ + oid = get_inode_oid(object); + if (oid > max) { + warning("vpf-1360", "The object with the oid %llu " + "greater then the max used oid %llu found.", + (unsigned long long)oid, (unsigned long long)max); + + return RETERR(-EIO); + } + + /* Check the locality. */ + oid = reiser4_inode_data(object)->locality_id; + if (oid > max) { + warning("vpf-1361", "The object with the locality %llu " + "greater then the max used oid %llu found.", + (unsigned long long)oid, (unsigned long long)max); + + return RETERR(-EIO); + } + + return 0; +} + +/* + * plugin->u.format.version_update + * Upgrade minor disk format version number + */ +int version_update_format40(struct super_block *super) { + txn_handle * trans; + lock_handle lh; + txn_atom *atom; + int ret; + + /* Nothing to do if RO mount or the on-disk version is not less. */ + if (super->s_flags & MS_RDONLY) + return 0; + + if (get_super_private(super)->version >= get_release_number_minor()) + return 0; + + printk("reiser4: %s: upgrading disk format to 4.0.%u.\n", + super->s_id, + get_release_number_minor()); + printk("reiser4: %s: use 'fsck.reiser4 --fix' " + "to complete disk format upgrade.\n", super->s_id); + + /* Mark the uber znode dirty to call log_super on write_logs. */ + init_lh(&lh); + ret = get_uber_znode(reiser4_get_tree(super), ZNODE_WRITE_LOCK, + ZNODE_LOCK_HIPRI, &lh); + if (ret != 0) + return ret; + + znode_make_dirty(lh.node); + done_lh(&lh); + + /* Update the backup blocks. */ + + /* Force write_logs immediately. */ + trans = get_current_context()->trans; + atom = get_current_atom_locked(); + assert("vpf-1906", atom != NULL); + + spin_lock_txnh(trans); + return force_commit_atom(trans); +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/disk_format/disk_format40.h b/fs/reiser4/plugin/disk_format/disk_format40.h new file mode 100644 index 000000000000..f91f6c4327d8 --- /dev/null +++ b/fs/reiser4/plugin/disk_format/disk_format40.h @@ -0,0 +1,111 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* this file contains: + - definition of ondisk super block of standart disk layout for + reiser 4.0 (layout 40) + - definition of layout 40 specific portion of in-core super block + - declarations of functions implementing methods of layout plugin + for layout 40 + - declarations of functions used to get/set fields in layout 40 super block +*/ + +#ifndef __DISK_FORMAT40_H__ +#define __DISK_FORMAT40_H__ + +/* magic for default reiser4 layout */ +#define FORMAT40_MAGIC "ReIsEr40FoRmAt" +#define FORMAT40_OFFSET (REISER4_MASTER_OFFSET + PAGE_SIZE) + +#include "../../dformat.h" + +#include <linux/fs.h> /* for struct super_block */ + +typedef enum { + FORMAT40_LARGE_KEYS +} format40_flags; + +/* ondisk super block for format 40. It is 512 bytes long */ +typedef struct format40_disk_super_block { + /* 0 */ d64 block_count; + /* number of block in a filesystem */ + /* 8 */ d64 free_blocks; + /* number of free blocks */ + /* 16 */ d64 root_block; + /* filesystem tree root block */ + /* 24 */ d64 oid; + /* smallest free objectid */ + /* 32 */ d64 file_count; + /* number of files in a filesystem */ + /* 40 */ d64 flushes; + /* number of times super block was + flushed. Needed if format 40 + will have few super blocks */ + /* 48 */ d32 mkfs_id; + /* unique identifier of fs */ + /* 52 */ char magic[16]; + /* magic string ReIsEr40FoRmAt */ + /* 68 */ d16 tree_height; + /* height of filesystem tree */ + /* 70 */ d16 formatting_policy; + /* not used anymore */ + /* 72 */ d64 flags; + /* 80 */ d32 version; + /* on-disk format version number + initially assigned by mkfs as the greatest format40 + version number supported by reiser4progs and updated + in mount time in accordance with the greatest format40 + version number supported by kernel. + Is used by fsck to catch possible corruption and + for various compatibility issues */ + /* 84 */ d32 node_pid; + /* node plugin id */ + /* 88 */ char not_used[424]; +} format40_disk_super_block; + +/* format 40 specific part of reiser4_super_info_data */ +typedef struct format40_super_info { +/* format40_disk_super_block actual_sb; */ + jnode *sb_jnode; + struct { + reiser4_block_nr super; + } loc; +} format40_super_info; + +/* Defines for journal header and footer respectively. */ +#define FORMAT40_JOURNAL_HEADER_BLOCKNR \ + ((REISER4_MASTER_OFFSET / PAGE_SIZE) + 3) + +#define FORMAT40_JOURNAL_FOOTER_BLOCKNR \ + ((REISER4_MASTER_OFFSET / PAGE_SIZE) + 4) + +#define FORMAT40_STATUS_BLOCKNR \ + ((REISER4_MASTER_OFFSET / PAGE_SIZE) + 5) + +/* Diskmap declarations */ +#define FORMAT40_PLUGIN_DISKMAP_ID ((REISER4_FORMAT_PLUGIN_TYPE<<16) | (FORMAT40_ID)) +#define FORMAT40_SUPER 1 +#define FORMAT40_JH 2 +#define FORMAT40_JF 3 + +/* declarations of functions implementing methods of layout plugin for + format 40. The functions theirself are in disk_format40.c */ +extern int init_format_format40(struct super_block *, void *data); +extern const reiser4_key *root_dir_key_format40(const struct super_block *); +extern int release_format40(struct super_block *s); +extern jnode *log_super_format40(struct super_block *s); +extern int check_open_format40(const struct inode *object); +extern int version_update_format40(struct super_block *super); + +/* __DISK_FORMAT40_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/fibration.c b/fs/reiser4/plugin/fibration.c new file mode 100644 index 000000000000..690dac4b83a1 --- /dev/null +++ b/fs/reiser4/plugin/fibration.c @@ -0,0 +1,175 @@ +/* Copyright 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Directory fibrations */ + +/* + * Suppose we have a directory tree with sources of some project. During + * compilation .o files are created within this tree. This makes access + * to the original source files less efficient, because source files are + * now "diluted" by object files: default directory plugin uses prefix + * of a file name as a part of the key for directory entry (and this + * part is also inherited by the key of file body). This means that + * foo.o will be located close to foo.c and foo.h in the tree. + * + * To avoid this effect directory plugin fill highest 7 (unused + * originally) bits of the second component of the directory entry key + * by bit-pattern depending on the file name (see + * fs/reiser4/kassign.c:build_entry_key_common()). These bits are called + * "fibre". Fibre of the file name key is inherited by key of stat data + * and keys of file body (in the case of REISER4_LARGE_KEY). + * + * Fibre for a given file is chosen by per-directory fibration + * plugin. Names within given fibre are ordered lexicographically. + */ + +#include "../debug.h" +#include "plugin_header.h" +#include "plugin.h" +#include "../super.h" +#include "../inode.h" + +#include <linux/types.h> + +static const int fibre_shift = 57; + +#define FIBRE_NO(n) (((__u64)(n)) << fibre_shift) + +/* + * Trivial fibration: all files of directory are just ordered + * lexicographically. + */ +static __u64 fibre_trivial(const struct inode *dir, const char *name, int len) +{ + return FIBRE_NO(0); +} + +/* + * dot-o fibration: place .o files after all others. + */ +static __u64 fibre_dot_o(const struct inode *dir, const char *name, int len) +{ + /* special treatment for .*\.o */ + if (len > 2 && name[len - 1] == 'o' && name[len - 2] == '.') + return FIBRE_NO(1); + else + return FIBRE_NO(0); +} + +/* + * ext.1 fibration: subdivide directory into 128 fibrations one for each + * 7bit extension character (file "foo.h" goes into fibre "h"), plus + * default fibre for the rest. + */ +static __u64 fibre_ext_1(const struct inode *dir, const char *name, int len) +{ + if (len > 2 && name[len - 2] == '.') + return FIBRE_NO(name[len - 1]); + else + return FIBRE_NO(0); +} + +/* + * ext.3 fibration: try to separate files with different 3-character + * extensions from each other. + */ +static __u64 fibre_ext_3(const struct inode *dir, const char *name, int len) +{ + if (len > 4 && name[len - 4] == '.') + return FIBRE_NO(name[len - 3] + name[len - 2] + name[len - 1]); + else + return FIBRE_NO(0); +} + +static int change_fibration(struct inode *inode, + reiser4_plugin * plugin, + pset_member memb) +{ + int result; + + assert("nikita-3503", inode != NULL); + assert("nikita-3504", plugin != NULL); + + assert("nikita-3505", is_reiser4_inode(inode)); + assert("nikita-3506", inode_dir_plugin(inode) != NULL); + assert("nikita-3507", + plugin->h.type_id == REISER4_FIBRATION_PLUGIN_TYPE); + + result = 0; + if (inode_fibration_plugin(inode) == NULL || + inode_fibration_plugin(inode)->h.id != plugin->h.id) { + if (is_dir_empty(inode) == 0) + result = aset_set_unsafe(&reiser4_inode_data(inode)->pset, + PSET_FIBRATION, plugin); + else + result = RETERR(-ENOTEMPTY); + + } + return result; +} + +static reiser4_plugin_ops fibration_plugin_ops = { + .init = NULL, + .load = NULL, + .save_len = NULL, + .save = NULL, + .change = change_fibration +}; + +/* fibration plugins */ +fibration_plugin fibration_plugins[LAST_FIBRATION_ID] = { + [FIBRATION_LEXICOGRAPHIC] = { + .h = { + .type_id = REISER4_FIBRATION_PLUGIN_TYPE, + .id = FIBRATION_LEXICOGRAPHIC, + .pops = &fibration_plugin_ops, + .label = "lexicographic", + .desc = "no fibration", + .linkage = {NULL, NULL} + }, + .fibre = fibre_trivial + }, + [FIBRATION_DOT_O] = { + .h = { + .type_id = REISER4_FIBRATION_PLUGIN_TYPE, + .id = FIBRATION_DOT_O, + .pops = &fibration_plugin_ops, + .label = "dot-o", + .desc = "fibrate .o files separately", + .linkage = {NULL, NULL} + }, + .fibre = fibre_dot_o + }, + [FIBRATION_EXT_1] = { + .h = { + .type_id = REISER4_FIBRATION_PLUGIN_TYPE, + .id = FIBRATION_EXT_1, + .pops = &fibration_plugin_ops, + .label = "ext-1", + .desc = "fibrate file by single character extension", + .linkage = {NULL, NULL} + }, + .fibre = fibre_ext_1 + }, + [FIBRATION_EXT_3] = { + .h = { + .type_id = REISER4_FIBRATION_PLUGIN_TYPE, + .id = FIBRATION_EXT_3, + .pops = &fibration_plugin_ops, + .label = "ext-3", + .desc = "fibrate file by three character extension", + .linkage = {NULL, NULL} + }, + .fibre = fibre_ext_3 + } +}; + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/plugin/fibration.h b/fs/reiser4/plugin/fibration.h new file mode 100644 index 000000000000..5ff1800ef705 --- /dev/null +++ b/fs/reiser4/plugin/fibration.h @@ -0,0 +1,37 @@ +/* Copyright 2004 by Hans Reiser, licensing governed by reiser4/README */ + +/* Fibration plugin used by hashed directory plugin to segment content + * of directory. See fs/reiser4/plugin/fibration.c for more on this. */ + +#if !defined(__FS_REISER4_PLUGIN_FIBRATION_H__) +#define __FS_REISER4_PLUGIN_FIBRATION_H__ + +#include "plugin_header.h" + +typedef struct fibration_plugin { + /* generic fields */ + plugin_header h; + + __u64(*fibre) (const struct inode *dir, const char *name, int len); +} fibration_plugin; + +typedef enum { + FIBRATION_LEXICOGRAPHIC, + FIBRATION_DOT_O, + FIBRATION_EXT_1, + FIBRATION_EXT_3, + LAST_FIBRATION_ID +} reiser4_fibration_id; + +/* __FS_REISER4_PLUGIN_FIBRATION_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/file/Makefile b/fs/reiser4/plugin/file/Makefile new file mode 100644 index 000000000000..134fa7aa20b9 --- /dev/null +++ b/fs/reiser4/plugin/file/Makefile @@ -0,0 +1,7 @@ +obj-$(CONFIG_REISER4_FS) += file_plugins.o + +file_plugins-objs := \ + file.o \ + tail_conversion.o \ + symlink.o \ + cryptcompress.o diff --git a/fs/reiser4/plugin/file/cryptcompress.c b/fs/reiser4/plugin/file/cryptcompress.c new file mode 100644 index 000000000000..440277cfd011 --- /dev/null +++ b/fs/reiser4/plugin/file/cryptcompress.c @@ -0,0 +1,3797 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + reiser4/README */ +/* + * Written by Edward Shishkin. + * + * Implementations of inode/file/address_space operations + * specific for cryptcompress file plugin which manages + * regular files built of compressed and(or) encrypted bodies. + * See http://dev.namesys.com/CryptcompressPlugin for details. + */ + +#include "../../inode.h" +#include "../cluster.h" +#include "../object.h" +#include "../../tree_walk.h" +#include "cryptcompress.h" + +#include <linux/pagevec.h> +#include <asm/uaccess.h> +#include <linux/swap.h> +#include <linux/writeback.h> +#include <linux/random.h> +#include <linux/scatterlist.h> + +/* + Managing primary and secondary caches by Reiser4 + cryptcompress file plugin. Synchronization scheme. + + + +------------------+ + +------------------->| tfm stream | + | | (compressed data)| + flush | +------------------+ + +-----------------+ | + |(->)longterm lock| V +--+ writepages() | | +-***-+ reiser4 +---+ + | | +--+ | *** | storage tree | | + | | | +-***-+ (primary cache)| | +u | write() (secondary| cache) V / | \ | | +s | ----> +----+ +----+ +----+ +----+ +-***** ******* **----+ ----> | d | +e | | | |page cluster | | | **disk cluster** | | i | +r | <---- +----+ +----+ +----+ +----+ +-***** **********----+ <---- | s | + | read() ^ ^ | | k | + | | (->)longterm lock| | page_io()| | + | | +------+ | | +--+ readpages() | | +---+ + | V + | +------------------+ + +--------------------| tfm stream | + | (plain text) | + +------------------+ +*/ + +/* get cryptcompress specific portion of inode */ +struct cryptcompress_info *cryptcompress_inode_data(const struct inode *inode) +{ + return &reiser4_inode_data(inode)->file_plugin_data.cryptcompress_info; +} + +/* plugin->u.file.init_inode_data */ +void init_inode_data_cryptcompress(struct inode *inode, + reiser4_object_create_data * crd, + int create) +{ + struct cryptcompress_info *data; + + data = cryptcompress_inode_data(inode); + assert("edward-685", data != NULL); + + memset(data, 0, sizeof(*data)); + + mutex_init(&data->checkin_mutex); + data->trunc_index = ULONG_MAX; + turn_on_compression(data); + set_lattice_factor(data, MIN_LATTICE_FACTOR); + init_inode_ordering(inode, crd, create); +} + +/* The following is a part of reiser4 cipher key manager + which is called when opening/creating a cryptcompress file */ + +/* get/set cipher key info */ +struct reiser4_crypto_info * inode_crypto_info (struct inode * inode) +{ + assert("edward-90", inode != NULL); + assert("edward-91", reiser4_inode_data(inode) != NULL); + return cryptcompress_inode_data(inode)->crypt; +} + +static void set_inode_crypto_info (struct inode * inode, + struct reiser4_crypto_info * info) +{ + cryptcompress_inode_data(inode)->crypt = info; +} + +/* allocate a cipher key info */ +struct reiser4_crypto_info * reiser4_alloc_crypto_info (struct inode * inode) +{ + struct reiser4_crypto_info *info; + int fipsize; + + info = kzalloc(sizeof(*info), reiser4_ctx_gfp_mask_get()); + if (!info) + return ERR_PTR(-ENOMEM); + + fipsize = inode_digest_plugin(inode)->fipsize; + info->keyid = kmalloc(fipsize, reiser4_ctx_gfp_mask_get()); + if (!info->keyid) { + kfree(info); + return ERR_PTR(-ENOMEM); + } + info->host = inode; + return info; +} + +#if 0 +/* allocate/free low-level info for cipher and digest + transforms */ +static int alloc_crypto_tfms(struct reiser4_crypto_info * info) +{ + struct crypto_blkcipher * ctfm = NULL; + struct crypto_hash * dtfm = NULL; + cipher_plugin * cplug = inode_cipher_plugin(info->host); + digest_plugin * dplug = inode_digest_plugin(info->host); + + if (cplug->alloc) { + ctfm = cplug->alloc(); + if (IS_ERR(ctfm)) { + warning("edward-1364", + "Can not allocate info for %s\n", + cplug->h.desc); + return RETERR(PTR_ERR(ctfm)); + } + } + info_set_cipher(info, ctfm); + if (dplug->alloc) { + dtfm = dplug->alloc(); + if (IS_ERR(dtfm)) { + warning("edward-1365", + "Can not allocate info for %s\n", + dplug->h.desc); + goto unhappy_with_digest; + } + } + info_set_digest(info, dtfm); + return 0; + unhappy_with_digest: + if (cplug->free) { + cplug->free(ctfm); + info_set_cipher(info, NULL); + } + return RETERR(PTR_ERR(dtfm)); +} +#endif + +static void +free_crypto_tfms(struct reiser4_crypto_info * info) +{ + assert("edward-1366", info != NULL); + if (!info_get_cipher(info)) { + assert("edward-1601", !info_get_digest(info)); + return; + } + inode_cipher_plugin(info->host)->free(info_get_cipher(info)); + info_set_cipher(info, NULL); + inode_digest_plugin(info->host)->free(info_get_digest(info)); + info_set_digest(info, NULL); + return; +} + +#if 0 +/* create a key fingerprint for disk stat-data */ +static int create_keyid (struct reiser4_crypto_info * info, + struct reiser4_crypto_data * data) +{ + int ret = -ENOMEM; + size_t blk, pad; + __u8 * dmem; + __u8 * cmem; + struct hash_desc ddesc; + struct blkcipher_desc cdesc; + struct scatterlist sg; + + assert("edward-1367", info != NULL); + assert("edward-1368", info->keyid != NULL); + + ddesc.tfm = info_get_digest(info); + ddesc.flags = 0; + cdesc.tfm = info_get_cipher(info); + cdesc.flags = 0; + + dmem = kmalloc((size_t)crypto_hash_digestsize(ddesc.tfm), + reiser4_ctx_gfp_mask_get()); + if (!dmem) + goto exit1; + + blk = crypto_blkcipher_blocksize(cdesc.tfm); + + pad = data->keyid_size % blk; + pad = (pad ? blk - pad : 0); + + cmem = kmalloc((size_t)data->keyid_size + pad, + reiser4_ctx_gfp_mask_get()); + if (!cmem) + goto exit2; + memcpy(cmem, data->keyid, data->keyid_size); + memset(cmem + data->keyid_size, 0, pad); + + sg_init_one(&sg, cmem, data->keyid_size + pad); + + ret = crypto_blkcipher_encrypt(&cdesc, &sg, &sg, + data->keyid_size + pad); + if (ret) { + warning("edward-1369", + "encryption failed flags=%x\n", cdesc.flags); + goto exit3; + } + ret = crypto_hash_digest(&ddesc, &sg, sg.length, dmem); + if (ret) { + warning("edward-1602", + "digest failed flags=%x\n", ddesc.flags); + goto exit3; + } + memcpy(info->keyid, dmem, inode_digest_plugin(info->host)->fipsize); + exit3: + kfree(cmem); + exit2: + kfree(dmem); + exit1: + return ret; +} +#endif + +static void destroy_keyid(struct reiser4_crypto_info * info) +{ + assert("edward-1370", info != NULL); + assert("edward-1371", info->keyid != NULL); + kfree(info->keyid); + return; +} + +static void __free_crypto_info (struct inode * inode) +{ + struct reiser4_crypto_info * info = inode_crypto_info(inode); + assert("edward-1372", info != NULL); + + free_crypto_tfms(info); + destroy_keyid(info); + kfree(info); +} + +#if 0 +static void instantiate_crypto_info(struct reiser4_crypto_info * info) +{ + assert("edward-1373", info != NULL); + assert("edward-1374", info->inst == 0); + info->inst = 1; +} +#endif + +static void uninstantiate_crypto_info(struct reiser4_crypto_info * info) +{ + assert("edward-1375", info != NULL); + info->inst = 0; +} + +#if 0 +static int is_crypto_info_instantiated(struct reiser4_crypto_info * info) +{ + return info->inst; +} + +static int inode_has_cipher_key(struct inode * inode) +{ + assert("edward-1376", inode != NULL); + return inode_crypto_info(inode) && + is_crypto_info_instantiated(inode_crypto_info(inode)); +} +#endif + +static void free_crypto_info (struct inode * inode) +{ + uninstantiate_crypto_info(inode_crypto_info(inode)); + __free_crypto_info(inode); +} + +static int need_cipher(struct inode * inode) +{ + return inode_cipher_plugin(inode) != + cipher_plugin_by_id(NONE_CIPHER_ID); +} + +/* Parse @data which contains a (uninstantiated) cipher key imported + from user space, create a low-level cipher info and attach it to + the @object. If success, then info contains an instantiated key */ +#if 0 +struct reiser4_crypto_info * create_crypto_info(struct inode * object, + struct reiser4_crypto_data * data) +{ + int ret; + struct reiser4_crypto_info * info; + + assert("edward-1377", data != NULL); + assert("edward-1378", need_cipher(object)); + + if (inode_file_plugin(object) != + file_plugin_by_id(DIRECTORY_FILE_PLUGIN_ID)) + return ERR_PTR(-EINVAL); + + info = reiser4_alloc_crypto_info(object); + if (IS_ERR(info)) + return info; + ret = alloc_crypto_tfms(info); + if (ret) + goto err; + /* instantiating a key */ + ret = crypto_blkcipher_setkey(info_get_cipher(info), + data->key, + data->keysize); + if (ret) { + warning("edward-1379", + "setkey failed flags=%x", + crypto_blkcipher_get_flags(info_get_cipher(info))); + goto err; + } + info->keysize = data->keysize; + ret = create_keyid(info, data); + if (ret) + goto err; + instantiate_crypto_info(info); + return info; + err: + __free_crypto_info(object); + return ERR_PTR(ret); +} +#endif + +/* increment/decrement a load counter when + attaching/detaching the crypto-stat to any object */ +static void load_crypto_info(struct reiser4_crypto_info * info) +{ + assert("edward-1380", info != NULL); + inc_keyload_count(info); +} + +static void unload_crypto_info(struct inode * inode) +{ + struct reiser4_crypto_info * info = inode_crypto_info(inode); + assert("edward-1381", info->keyload_count > 0); + + dec_keyload_count(inode_crypto_info(inode)); + if (info->keyload_count == 0) + /* final release */ + free_crypto_info(inode); +} + +/* attach/detach an existing crypto-stat */ +void reiser4_attach_crypto_info(struct inode * inode, + struct reiser4_crypto_info * info) +{ + assert("edward-1382", inode != NULL); + assert("edward-1383", info != NULL); + assert("edward-1384", inode_crypto_info(inode) == NULL); + + set_inode_crypto_info(inode, info); + load_crypto_info(info); +} + +/* returns true, if crypto stat can be attached to the @host */ +#if REISER4_DEBUG +static int host_allows_crypto_info(struct inode * host) +{ + int ret; + file_plugin * fplug = inode_file_plugin(host); + + switch (fplug->h.id) { + case CRYPTCOMPRESS_FILE_PLUGIN_ID: + ret = 1; + break; + default: + ret = 0; + } + return ret; +} +#endif /* REISER4_DEBUG */ + +static void reiser4_detach_crypto_info(struct inode * inode) +{ + assert("edward-1385", inode != NULL); + assert("edward-1386", host_allows_crypto_info(inode)); + + if (inode_crypto_info(inode)) + unload_crypto_info(inode); + set_inode_crypto_info(inode, NULL); +} + +#if 0 + +/* compare fingerprints of @child and @parent */ +static int keyid_eq(struct reiser4_crypto_info * child, + struct reiser4_crypto_info * parent) +{ + return !memcmp(child->keyid, + parent->keyid, + info_digest_plugin(parent)->fipsize); +} + +/* check if a crypto-stat (which is bound to @parent) can be inherited */ +int can_inherit_crypto_cryptcompress(struct inode *child, struct inode *parent) +{ + if (!need_cipher(child)) + return 0; + /* the child is created */ + if (!inode_crypto_info(child)) + return 1; + /* the child is looked up */ + if (!inode_crypto_info(parent)) + return 0; + return (inode_cipher_plugin(child) == inode_cipher_plugin(parent) && + inode_digest_plugin(child) == inode_digest_plugin(parent) && + inode_crypto_info(child)->keysize == + inode_crypto_info(parent)->keysize && + keyid_eq(inode_crypto_info(child), inode_crypto_info(parent))); +} +#endif + +/* helper functions for ->create() method of the cryptcompress plugin */ +static int inode_set_crypto(struct inode * object) +{ + reiser4_inode * info; + if (!inode_crypto_info(object)) { + if (need_cipher(object)) + return RETERR(-EINVAL); + /* the file is not to be encrypted */ + return 0; + } + info = reiser4_inode_data(object); + info->extmask |= (1 << CRYPTO_STAT); + return 0; +} + +static int inode_init_compression(struct inode * object) +{ + int result = 0; + assert("edward-1461", object != NULL); + if (inode_compression_plugin(object)->init) + result = inode_compression_plugin(object)->init(); + return result; +} + +static int inode_check_cluster(struct inode * object) +{ + assert("edward-696", object != NULL); + + if (unlikely(inode_cluster_size(object) < PAGE_SIZE)) { + warning("edward-1320", "Can not support '%s' " + "logical clusters (less then page size)", + inode_cluster_plugin(object)->h.label); + return RETERR(-EINVAL); + } + if (unlikely(inode_cluster_shift(object)) >= BITS_PER_BYTE*sizeof(int)){ + warning("edward-1463", "Can not support '%s' " + "logical clusters (too big for transform)", + inode_cluster_plugin(object)->h.label); + return RETERR(-EINVAL); + } + return 0; +} + +/* plugin->destroy_inode() */ +void destroy_inode_cryptcompress(struct inode * inode) +{ + assert("edward-1464", INODE_PGCOUNT(inode) == 0); + reiser4_detach_crypto_info(inode); + return; +} + +/* plugin->create_object(): +. install plugins +. attach crypto info if specified +. attach compression info if specified +. attach cluster info +*/ +int create_object_cryptcompress(struct inode *object, struct inode *parent, + reiser4_object_create_data * data) +{ + int result; + reiser4_inode *info; + + assert("edward-23", object != NULL); + assert("edward-24", parent != NULL); + assert("edward-30", data != NULL); + assert("edward-26", reiser4_inode_get_flag(object, REISER4_NO_SD)); + assert("edward-27", data->id == CRYPTCOMPRESS_FILE_PLUGIN_ID); + + info = reiser4_inode_data(object); + + assert("edward-29", info != NULL); + + /* set file bit */ + info->plugin_mask |= (1 << PSET_FILE); + + /* set crypto */ + result = inode_set_crypto(object); + if (result) + goto error; + /* set compression */ + result = inode_init_compression(object); + if (result) + goto error; + /* set cluster */ + result = inode_check_cluster(object); + if (result) + goto error; + + /* save everything in disk stat-data */ + result = write_sd_by_inode_common(object); + if (!result) + return 0; + error: + reiser4_detach_crypto_info(object); + return result; +} + +/* plugin->open() */ +int open_cryptcompress(struct inode * inode, struct file * file) +{ + return 0; +} + +/* returns a blocksize, the attribute of a cipher algorithm */ +static unsigned int +cipher_blocksize(struct inode * inode) +{ + assert("edward-758", need_cipher(inode)); + assert("edward-1400", inode_crypto_info(inode) != NULL); + return crypto_blkcipher_blocksize + (info_get_cipher(inode_crypto_info(inode))); +} + +/* returns offset translated by scale factor of the crypto-algorithm */ +static loff_t inode_scaled_offset (struct inode * inode, + const loff_t src_off /* input offset */) +{ + assert("edward-97", inode != NULL); + + if (!need_cipher(inode) || + src_off == get_key_offset(reiser4_min_key()) || + src_off == get_key_offset(reiser4_max_key())) + return src_off; + + return inode_cipher_plugin(inode)->scale(inode, + cipher_blocksize(inode), + src_off); +} + +/* returns disk cluster size */ +size_t inode_scaled_cluster_size(struct inode * inode) +{ + assert("edward-110", inode != NULL); + + return inode_scaled_offset(inode, inode_cluster_size(inode)); +} + +/* set number of cluster pages */ +static void set_cluster_nrpages(struct cluster_handle * clust, + struct inode *inode) +{ + struct reiser4_slide * win; + + assert("edward-180", clust != NULL); + assert("edward-1040", inode != NULL); + + clust->old_nrpages = size_in_pages(lbytes(clust->index, inode)); + win = clust->win; + if (!win) { + clust->nr_pages = size_in_pages(lbytes(clust->index, inode)); + return; + } + assert("edward-1176", clust->op != LC_INVAL); + assert("edward-1064", win->off + win->count + win->delta != 0); + + if (win->stat == HOLE_WINDOW && + win->off == 0 && win->count == inode_cluster_size(inode)) { + /* special case: writing a "fake" logical cluster */ + clust->nr_pages = 0; + return; + } + clust->nr_pages = size_in_pages(max(win->off + win->count + win->delta, + lbytes(clust->index, inode))); + return; +} + +/* plugin->key_by_inode() + build key of a disk cluster */ +int key_by_inode_cryptcompress(struct inode *inode, loff_t off, + reiser4_key * key) +{ + assert("edward-64", inode != 0); + + if (likely(off != get_key_offset(reiser4_max_key()))) + off = off_to_clust_to_off(off, inode); + if (inode_crypto_info(inode)) + off = inode_scaled_offset(inode, off); + + key_by_inode_and_offset_common(inode, 0, key); + set_key_offset(key, (__u64)off); + return 0; +} + +/* plugin->flow_by_inode() */ +/* flow is used to read/write disk clusters */ +int flow_by_inode_cryptcompress(struct inode *inode, const char __user * buf, + int user, /* 1: @buf is of user space, + 0: kernel space */ + loff_t size, /* @buf size */ + loff_t off, /* offset to start io from */ + rw_op op, /* READ or WRITE */ + flow_t * f /* resulting flow */) +{ + assert("edward-436", f != NULL); + assert("edward-149", inode != NULL); + assert("edward-150", inode_file_plugin(inode) != NULL); + assert("edward-1465", user == 0); /* we use flow to read/write + disk clusters located in + kernel space */ + f->length = size; + memcpy(&f->data, &buf, sizeof(buf)); + f->user = user; + f->op = op; + + return key_by_inode_cryptcompress(inode, off, &f->key); +} + +static int +cryptcompress_hint_validate(hint_t * hint, const reiser4_key * key, + znode_lock_mode lock_mode) +{ + coord_t *coord; + + assert("edward-704", hint != NULL); + assert("edward-1089", !hint_is_valid(hint)); + assert("edward-706", hint->lh.owner == NULL); + + coord = &hint->ext_coord.coord; + + if (!hint || !hint_is_set(hint) || hint->mode != lock_mode) + /* hint either not set or set by different operation */ + return RETERR(-E_REPEAT); + + if (get_key_offset(key) != hint->offset) + /* hint is set for different key */ + return RETERR(-E_REPEAT); + + assert("edward-707", reiser4_schedulable()); + + return reiser4_seal_validate(&hint->seal, &hint->ext_coord.coord, + key, &hint->lh, lock_mode, + ZNODE_LOCK_LOPRI); +} + +/* reserve disk space when writing a logical cluster */ +static int reserve4cluster(struct inode *inode, struct cluster_handle *clust) +{ + int result = 0; + + assert("edward-965", reiser4_schedulable()); + assert("edward-439", inode != NULL); + assert("edward-440", clust != NULL); + assert("edward-441", clust->pages != NULL); + + if (clust->nr_pages == 0) { + assert("edward-1152", clust->win != NULL); + assert("edward-1153", clust->win->stat == HOLE_WINDOW); + /* don't reserve disk space for fake logical cluster */ + return 0; + } + assert("edward-442", jprivate(clust->pages[0]) != NULL); + + result = reiser4_grab_space_force(estimate_insert_cluster(inode) + + estimate_update_cluster(inode), + BA_CAN_COMMIT); + if (result) + return result; + clust->reserved = 1; + grabbed2cluster_reserved(estimate_insert_cluster(inode) + + estimate_update_cluster(inode)); +#if REISER4_DEBUG + clust->reserved_prepped = estimate_update_cluster(inode); + clust->reserved_unprepped = estimate_insert_cluster(inode); +#endif + /* there can be space grabbed by txnmgr_force_commit_all */ + return 0; +} + +/* free reserved disk space if writing a logical cluster fails */ +static void free_reserved4cluster(struct inode *inode, + struct cluster_handle *ch, int count) +{ + assert("edward-967", ch->reserved == 1); + + cluster_reserved2free(count); + ch->reserved = 0; +} + +/* + * The core search procedure of the cryptcompress plugin. + * If returned value is not cbk_errored, then current position + * is locked. + */ +static int find_cluster_item(hint_t * hint, + const reiser4_key * key, /* key of the item we are + looking for */ + znode_lock_mode lock_mode /* which lock */ , + ra_info_t * ra_info, lookup_bias bias, __u32 flags) +{ + int result; + reiser4_key ikey; + coord_t *coord = &hint->ext_coord.coord; + coord_t orig = *coord; + + assert("edward-152", hint != NULL); + + if (!hint_is_valid(hint)) { + result = cryptcompress_hint_validate(hint, key, lock_mode); + if (result == -E_REPEAT) + goto traverse_tree; + else if (result) { + assert("edward-1216", 0); + return result; + } + hint_set_valid(hint); + } + assert("edward-709", znode_is_any_locked(coord->node)); + /* + * Hint is valid, so we perform in-place lookup. + * It means we just need to check if the next item in + * the tree (relative to the current position @coord) + * has key @key. + * + * Valid hint means in particular, that node is not + * empty and at least one its item has been processed + */ + if (equal_to_rdk(coord->node, key)) { + /* + * Look for the item in the right neighbor + */ + lock_handle lh_right; + + init_lh(&lh_right); + result = reiser4_get_right_neighbor(&lh_right, coord->node, + znode_is_wlocked(coord->node) ? + ZNODE_WRITE_LOCK : ZNODE_READ_LOCK, + GN_CAN_USE_UPPER_LEVELS); + if (result) { + done_lh(&lh_right); + reiser4_unset_hint(hint); + if (result == -E_NO_NEIGHBOR) + return RETERR(-EIO); + return result; + } + assert("edward-1218", + equal_to_ldk(lh_right.node, key)); + result = zload(lh_right.node); + if (result) { + done_lh(&lh_right); + reiser4_unset_hint(hint); + return result; + } + coord_init_first_unit_nocheck(coord, lh_right.node); + + if (!coord_is_existing_item(coord)) { + zrelse(lh_right.node); + done_lh(&lh_right); + goto traverse_tree; + } + item_key_by_coord(coord, &ikey); + zrelse(coord->node); + if (unlikely(!keyeq(key, &ikey))) { + warning("edward-1608", + "Expected item not found. Fsck?"); + done_lh(&lh_right); + goto not_found; + } + /* + * item has been found in the right neighbor; + * move lock to the right + */ + done_lh(&hint->lh); + move_lh(&hint->lh, &lh_right); + + dclust_inc_extension_ncount(hint); + + return CBK_COORD_FOUND; + } else { + /* + * Look for the item in the current node + */ + coord->item_pos++; + coord->unit_pos = 0; + coord->between = AT_UNIT; + + result = zload(coord->node); + if (result) { + done_lh(&hint->lh); + return result; + } + if (!coord_is_existing_item(coord)) { + zrelse(coord->node); + goto not_found; + } + item_key_by_coord(coord, &ikey); + zrelse(coord->node); + if (!keyeq(key, &ikey)) + goto not_found; + /* + * item has been found in the current node + */ + dclust_inc_extension_ncount(hint); + + return CBK_COORD_FOUND; + } + not_found: + /* + * The tree doesn't contain an item with @key; + * roll back the coord + */ + *coord = orig; + ON_DEBUG(coord_update_v(coord)); + return CBK_COORD_NOTFOUND; + + traverse_tree: + + reiser4_unset_hint(hint); + dclust_init_extension(hint); + coord_init_zero(coord); + + assert("edward-713", hint->lh.owner == NULL); + assert("edward-714", reiser4_schedulable()); + + result = coord_by_key(current_tree, key, coord, &hint->lh, + lock_mode, bias, LEAF_LEVEL, LEAF_LEVEL, + CBK_UNIQUE | flags, ra_info); + if (cbk_errored(result)) + return result; + if(result == CBK_COORD_FOUND) + dclust_inc_extension_ncount(hint); + hint_set_valid(hint); + return result; +} + +/* This function is called by deflate[inflate] manager when + creating a transformed/plain stream to check if we should + create/cut some overhead. If this returns true, then @oh + contains the size of this overhead. + */ +static int need_cut_or_align(struct inode * inode, + struct cluster_handle * ch, rw_op rw, int * oh) +{ + struct tfm_cluster * tc = &ch->tc; + switch (rw) { + case WRITE_OP: /* estimate align */ + *oh = tc->len % cipher_blocksize(inode); + if (*oh != 0) + return 1; + break; + case READ_OP: /* estimate cut */ + *oh = *(tfm_output_data(ch) + tc->len - 1); + break; + default: + impossible("edward-1401", "bad option"); + } + return (tc->len != tc->lsize); +} + +/* create/cut an overhead of transformed/plain stream */ +static void align_or_cut_overhead(struct inode * inode, + struct cluster_handle * ch, rw_op rw) +{ + unsigned int oh; + cipher_plugin * cplug = inode_cipher_plugin(inode); + + assert("edward-1402", need_cipher(inode)); + + if (!need_cut_or_align(inode, ch, rw, &oh)) + return; + switch (rw) { + case WRITE_OP: /* do align */ + ch->tc.len += + cplug->align_stream(tfm_input_data(ch) + + ch->tc.len, ch->tc.len, + cipher_blocksize(inode)); + *(tfm_input_data(ch) + ch->tc.len - 1) = + cipher_blocksize(inode) - oh; + break; + case READ_OP: /* do cut */ + assert("edward-1403", oh <= cipher_blocksize(inode)); + ch->tc.len -= oh; + break; + default: + impossible("edward-1404", "bad option"); + } + return; +} + +static unsigned max_cipher_overhead(struct inode * inode) +{ + if (!need_cipher(inode) || !inode_cipher_plugin(inode)->align_stream) + return 0; + return cipher_blocksize(inode); +} + +static int deflate_overhead(struct inode *inode) +{ + return (inode_compression_plugin(inode)-> + checksum ? DC_CHECKSUM_SIZE : 0); +} + +static unsigned deflate_overrun(struct inode * inode, int ilen) +{ + return coa_overrun(inode_compression_plugin(inode), ilen); +} + +static bool is_all_zero(char const* mem, size_t size) +{ + while (size-- > 0) + if (*mem++) + return false; + return true; +} + +static inline bool should_punch_hole(struct tfm_cluster *tc) +{ + if (0 && + !reiser4_is_set(reiser4_get_current_sb(), REISER4_DONT_PUNCH_HOLES) + && is_all_zero(tfm_stream_data(tc, INPUT_STREAM), tc->lsize)) { + + tc->hole = 1; + return true; + } + return false; +} + +/* Estimating compressibility of a logical cluster by various + policies represented by compression mode plugin. + If this returns false, then compressor won't be called for + the cluster of index @index. +*/ +static int should_compress(struct tfm_cluster *tc, cloff_t index, + struct inode *inode) +{ + compression_plugin *cplug = inode_compression_plugin(inode); + compression_mode_plugin *mplug = inode_compression_mode_plugin(inode); + + assert("edward-1321", tc->len != 0); + assert("edward-1322", cplug != NULL); + assert("edward-1323", mplug != NULL); + + if (should_punch_hole(tc)) + /* + * we are about to punch a hole, + * so don't compress data + */ + return 0; + return /* estimate by size */ + (cplug->min_size_deflate ? + tc->len >= cplug->min_size_deflate() : + 1) && + /* estimate by compression mode plugin */ + (mplug->should_deflate ? + mplug->should_deflate(inode, index) : + 1); +} + +/* Evaluating results of compression transform. + Returns true, if we need to accept this results */ +static int save_compressed(int size_before, int size_after, struct inode *inode) +{ + return (size_after + deflate_overhead(inode) + + max_cipher_overhead(inode) < size_before); +} + +/* Guess result of the evaluation above */ +static int need_inflate(struct cluster_handle * ch, struct inode * inode, + int encrypted /* is cluster encrypted */ ) +{ + struct tfm_cluster * tc = &ch->tc; + + assert("edward-142", tc != 0); + assert("edward-143", inode != NULL); + + return tc->len < + (encrypted ? + inode_scaled_offset(inode, tc->lsize) : + tc->lsize); +} + +/* If results of compression were accepted, then we add + a checksum to catch possible disk cluster corruption. + The following is a format of the data stored in disk clusters: + + data This is (transformed) logical cluster. + cipher_overhead This is created by ->align() method + of cipher plugin. May be absent. + checksum (4) This is created by ->checksum method + of compression plugin to check + integrity. May be absent. + + Crypto overhead format: + + data + control_byte (1) contains aligned overhead size: + 1 <= overhead <= cipher_blksize +*/ +/* Append a checksum at the end of a transformed stream */ +static void dc_set_checksum(compression_plugin * cplug, struct tfm_cluster * tc) +{ + __u32 checksum; + + assert("edward-1309", tc != NULL); + assert("edward-1310", tc->len > 0); + assert("edward-1311", cplug->checksum != NULL); + + checksum = cplug->checksum(tfm_stream_data(tc, OUTPUT_STREAM), tc->len); + put_unaligned(cpu_to_le32(checksum), + (d32 *)(tfm_stream_data(tc, OUTPUT_STREAM) + tc->len)); + tc->len += (int)DC_CHECKSUM_SIZE; +} + +/* Check a disk cluster checksum. + Returns 0 if checksum is correct, otherwise returns 1 */ +static int dc_check_checksum(compression_plugin * cplug, struct tfm_cluster * tc) +{ + assert("edward-1312", tc != NULL); + assert("edward-1313", tc->len > (int)DC_CHECKSUM_SIZE); + assert("edward-1314", cplug->checksum != NULL); + + if (cplug->checksum(tfm_stream_data(tc, INPUT_STREAM), + tc->len - (int)DC_CHECKSUM_SIZE) != + le32_to_cpu(get_unaligned((d32 *) + (tfm_stream_data(tc, INPUT_STREAM) + + tc->len - (int)DC_CHECKSUM_SIZE)))) { + warning("edward-156", + "Bad disk cluster checksum %d, (should be %d) Fsck?\n", + (int)le32_to_cpu + (get_unaligned((d32 *) + (tfm_stream_data(tc, INPUT_STREAM) + + tc->len - (int)DC_CHECKSUM_SIZE))), + (int)cplug->checksum + (tfm_stream_data(tc, INPUT_STREAM), + tc->len - (int)DC_CHECKSUM_SIZE)); + return 1; + } + tc->len -= (int)DC_CHECKSUM_SIZE; + return 0; +} + +/* get input/output stream for some transform action */ +int grab_tfm_stream(struct inode * inode, struct tfm_cluster * tc, + tfm_stream_id id) +{ + size_t size = inode_scaled_cluster_size(inode); + + assert("edward-901", tc != NULL); + assert("edward-1027", inode_compression_plugin(inode) != NULL); + + if (cluster_get_tfm_act(tc) == TFMA_WRITE) + size += deflate_overrun(inode, inode_cluster_size(inode)); + + if (!get_tfm_stream(tc, id) && id == INPUT_STREAM) + alternate_streams(tc); + if (!get_tfm_stream(tc, id)) + return alloc_tfm_stream(tc, size, id); + + assert("edward-902", tfm_stream_is_set(tc, id)); + + if (tfm_stream_size(tc, id) < size) + return realloc_tfm_stream(tc, size, id); + return 0; +} + +/* Common deflate manager */ +int reiser4_deflate_cluster(struct cluster_handle * clust, struct inode * inode) +{ + int result = 0; + int compressed = 0; + int encrypted = 0; + struct tfm_cluster * tc = &clust->tc; + compression_plugin * coplug; + + assert("edward-401", inode != NULL); + assert("edward-903", tfm_stream_is_set(tc, INPUT_STREAM)); + assert("edward-1348", cluster_get_tfm_act(tc) == TFMA_WRITE); + assert("edward-498", !tfm_cluster_is_uptodate(tc)); + + coplug = inode_compression_plugin(inode); + if (should_compress(tc, clust->index, inode)) { + /* try to compress, discard bad results */ + size_t dst_len; + compression_mode_plugin * mplug = + inode_compression_mode_plugin(inode); + assert("edward-602", coplug != NULL); + assert("edward-1423", coplug->compress != NULL); + + result = grab_coa(tc, coplug); + if (result) + /* + * can not allocate memory to perform + * compression, leave data uncompressed + */ + goto cipher; + result = grab_tfm_stream(inode, tc, OUTPUT_STREAM); + if (result) { + warning("edward-1425", + "alloc stream failed with ret=%d, skipped compression", + result); + goto cipher; + } + dst_len = tfm_stream_size(tc, OUTPUT_STREAM); + coplug->compress(get_coa(tc, coplug->h.id, tc->act), + tfm_input_data(clust), tc->len, + tfm_output_data(clust), &dst_len); + /* make sure we didn't overwrite extra bytes */ + assert("edward-603", + dst_len <= tfm_stream_size(tc, OUTPUT_STREAM)); + + /* evaluate results of compression transform */ + if (save_compressed(tc->len, dst_len, inode)) { + /* good result, accept */ + tc->len = dst_len; + if (mplug->accept_hook != NULL) { + result = mplug->accept_hook(inode, clust->index); + if (result) + warning("edward-1426", + "accept_hook failed with ret=%d", + result); + } + compressed = 1; + } + else { + /* bad result, discard */ +#if 0 + if (cluster_is_complete(clust, inode)) + warning("edward-1496", + "incompressible cluster %lu (inode %llu)", + clust->index, + (unsigned long long)get_inode_oid(inode)); +#endif + if (mplug->discard_hook != NULL && + cluster_is_complete(clust, inode)) { + result = mplug->discard_hook(inode, + clust->index); + if (result) + warning("edward-1427", + "discard_hook failed with ret=%d", + result); + } + } + } + cipher: + if (need_cipher(inode)) { + cipher_plugin * ciplug; + struct blkcipher_desc desc; + struct scatterlist src; + struct scatterlist dst; + + ciplug = inode_cipher_plugin(inode); + desc.tfm = info_get_cipher(inode_crypto_info(inode)); + desc.flags = 0; + if (compressed) + alternate_streams(tc); + result = grab_tfm_stream(inode, tc, OUTPUT_STREAM); + if (result) + return result; + + align_or_cut_overhead(inode, clust, WRITE_OP); + sg_init_one(&src, tfm_input_data(clust), tc->len); + sg_init_one(&dst, tfm_output_data(clust), tc->len); + + result = crypto_blkcipher_encrypt(&desc, &dst, &src, tc->len); + if (result) { + warning("edward-1405", + "encryption failed flags=%x\n", desc.flags); + return result; + } + encrypted = 1; + } + if (compressed && coplug->checksum != NULL) + dc_set_checksum(coplug, tc); + if (!compressed && !encrypted) + alternate_streams(tc); + return result; +} + +/* Common inflate manager. */ +int reiser4_inflate_cluster(struct cluster_handle * clust, struct inode * inode) +{ + int result = 0; + int transformed = 0; + struct tfm_cluster * tc = &clust->tc; + compression_plugin * coplug; + + assert("edward-905", inode != NULL); + assert("edward-1178", clust->dstat == PREP_DISK_CLUSTER); + assert("edward-906", tfm_stream_is_set(&clust->tc, INPUT_STREAM)); + assert("edward-1349", tc->act == TFMA_READ); + assert("edward-907", !tfm_cluster_is_uptodate(tc)); + + /* Handle a checksum (if any) */ + coplug = inode_compression_plugin(inode); + if (need_inflate(clust, inode, need_cipher(inode)) && + coplug->checksum != NULL) { + result = dc_check_checksum(coplug, tc); + if (unlikely(result)) { + warning("edward-1460", + "Inode %llu: disk cluster %lu looks corrupted", + (unsigned long long)get_inode_oid(inode), + clust->index); + return RETERR(-EIO); + } + } + if (need_cipher(inode)) { + cipher_plugin * ciplug; + struct blkcipher_desc desc; + struct scatterlist src; + struct scatterlist dst; + + ciplug = inode_cipher_plugin(inode); + desc.tfm = info_get_cipher(inode_crypto_info(inode)); + desc.flags = 0; + result = grab_tfm_stream(inode, tc, OUTPUT_STREAM); + if (result) + return result; + assert("edward-909", tfm_cluster_is_set(tc)); + + sg_init_one(&src, tfm_input_data(clust), tc->len); + sg_init_one(&dst, tfm_output_data(clust), tc->len); + + result = crypto_blkcipher_decrypt(&desc, &dst, &src, tc->len); + if (result) { + warning("edward-1600", "decrypt failed flags=%x\n", + desc.flags); + return result; + } + align_or_cut_overhead(inode, clust, READ_OP); + transformed = 1; + } + if (need_inflate(clust, inode, 0)) { + size_t dst_len = inode_cluster_size(inode); + if(transformed) + alternate_streams(tc); + + result = grab_tfm_stream(inode, tc, OUTPUT_STREAM); + if (result) + return result; + assert("edward-1305", coplug->decompress != NULL); + assert("edward-910", tfm_cluster_is_set(tc)); + + coplug->decompress(get_coa(tc, coplug->h.id, tc->act), + tfm_input_data(clust), tc->len, + tfm_output_data(clust), &dst_len); + /* check length */ + tc->len = dst_len; + assert("edward-157", dst_len == tc->lsize); + transformed = 1; + } + if (!transformed) + alternate_streams(tc); + return result; +} + +/* This is implementation of readpage method of struct + address_space_operations for cryptcompress plugin. */ +int readpage_cryptcompress(struct file *file, struct page *page) +{ + reiser4_context *ctx; + struct cluster_handle clust; + item_plugin *iplug; + int result; + + assert("edward-88", PageLocked(page)); + assert("vs-976", !PageUptodate(page)); + assert("edward-89", page->mapping && page->mapping->host); + + ctx = reiser4_init_context(page->mapping->host->i_sb); + if (IS_ERR(ctx)) { + unlock_page(page); + return PTR_ERR(ctx); + } + assert("edward-113", + ergo(file != NULL, + page->mapping == file_inode(file)->i_mapping)); + + if (PageUptodate(page)) { + warning("edward-1338", "page is already uptodate\n"); + unlock_page(page); + reiser4_exit_context(ctx); + return 0; + } + cluster_init_read(&clust, NULL); + clust.file = file; + iplug = item_plugin_by_id(CTAIL_ID); + if (!iplug->s.file.readpage) { + unlock_page(page); + put_cluster_handle(&clust); + reiser4_exit_context(ctx); + return -EINVAL; + } + result = iplug->s.file.readpage(&clust, page); + + put_cluster_handle(&clust); + reiser4_txn_restart(ctx); + reiser4_exit_context(ctx); + return result; +} + +/* number of pages to check in */ +static int get_new_nrpages(struct cluster_handle * clust) +{ + switch (clust->op) { + case LC_APPOV: + case LC_EXPAND: + return clust->nr_pages; + case LC_SHRINK: + assert("edward-1179", clust->win != NULL); + return size_in_pages(clust->win->off + clust->win->count); + default: + impossible("edward-1180", "bad page cluster option"); + return 0; + } +} + +static void set_cluster_pages_dirty(struct cluster_handle * clust, + struct inode * inode) +{ + int i; + struct page *pg; + int nrpages = get_new_nrpages(clust); + + for (i = 0; i < nrpages; i++) { + + pg = clust->pages[i]; + assert("edward-968", pg != NULL); + lock_page(pg); + assert("edward-1065", PageUptodate(pg)); + set_page_dirty_notag(pg); + unlock_page(pg); + mark_page_accessed(pg); + } +} + +/* Grab a page cluster for read/write operations. + Attach a jnode for write operations (when preparing for modifications, which + are supposed to be committed). + + We allocate only one jnode per page cluster; this jnode is binded to the + first page of this cluster, so we have an extra-reference that will be put + as soon as jnode is evicted from memory), other references will be cleaned + up in flush time (assume that check in page cluster was successful). +*/ +int grab_page_cluster(struct inode * inode, + struct cluster_handle * clust, rw_op rw) +{ + int i; + int result = 0; + jnode *node = NULL; + + assert("edward-182", clust != NULL); + assert("edward-183", clust->pages != NULL); + assert("edward-1466", clust->node == NULL); + assert("edward-1428", inode != NULL); + assert("edward-1429", inode->i_mapping != NULL); + assert("edward-184", clust->nr_pages <= cluster_nrpages(inode)); + + if (clust->nr_pages == 0) + return 0; + + for (i = 0; i < clust->nr_pages; i++) { + + assert("edward-1044", clust->pages[i] == NULL); + + clust->pages[i] = + find_or_create_page(inode->i_mapping, + clust_to_pg(clust->index, inode) + i, + reiser4_ctx_gfp_mask_get()); + if (!clust->pages[i]) { + result = RETERR(-ENOMEM); + break; + } + if (i == 0 && rw == WRITE_OP) { + node = jnode_of_page(clust->pages[i]); + if (IS_ERR(node)) { + result = PTR_ERR(node); + unlock_page(clust->pages[i]); + break; + } + JF_SET(node, JNODE_CLUSTER_PAGE); + assert("edward-920", jprivate(clust->pages[0])); + } + INODE_PGCOUNT_INC(inode); + unlock_page(clust->pages[i]); + } + if (unlikely(result)) { + while (i) { + put_cluster_page(clust->pages[--i]); + INODE_PGCOUNT_DEC(inode); + } + if (node && !IS_ERR(node)) + jput(node); + return result; + } + clust->node = node; + return 0; +} + +static void truncate_page_cluster_range(struct inode * inode, + struct page ** pages, + cloff_t index, + int from, int count, + int even_cows) +{ + assert("edward-1467", count > 0); + reiser4_invalidate_pages(inode->i_mapping, + clust_to_pg(index, inode) + from, + count, even_cows); +} + +/* Put @count pages starting from @from offset */ +void __put_page_cluster(int from, int count, + struct page ** pages, struct inode * inode) +{ + int i; + assert("edward-1468", pages != NULL); + assert("edward-1469", inode != NULL); + assert("edward-1470", from >= 0 && count >= 0); + + for (i = 0; i < count; i++) { + assert("edward-1471", pages[from + i] != NULL); + assert("edward-1472", + pages[from + i]->index == pages[from]->index + i); + + put_cluster_page(pages[from + i]); + INODE_PGCOUNT_DEC(inode); + } +} + +/* + * This is dual to grab_page_cluster, + * however if @rw == WRITE_OP, then we call this function + * only if something is failed before checkin page cluster. + */ +void put_page_cluster(struct cluster_handle * clust, + struct inode * inode, rw_op rw) +{ + assert("edward-445", clust != NULL); + assert("edward-922", clust->pages != NULL); + assert("edward-446", + ergo(clust->nr_pages != 0, clust->pages[0] != NULL)); + + __put_page_cluster(0, clust->nr_pages, clust->pages, inode); + if (rw == WRITE_OP) { + if (unlikely(clust->node)) { + assert("edward-447", + clust->node == jprivate(clust->pages[0])); + jput(clust->node); + clust->node = NULL; + } + } +} + +#if REISER4_DEBUG +int cryptcompress_inode_ok(struct inode *inode) +{ + if (!(reiser4_inode_data(inode)->plugin_mask & (1 << PSET_FILE))) + return 0; + if (!cluster_shift_ok(inode_cluster_shift(inode))) + return 0; + return 1; +} + +static int window_ok(struct reiser4_slide * win, struct inode *inode) +{ + assert("edward-1115", win != NULL); + assert("edward-1116", ergo(win->delta, win->stat == HOLE_WINDOW)); + + return (win->off != inode_cluster_size(inode)) && + (win->off + win->count + win->delta <= inode_cluster_size(inode)); +} + +static int cluster_ok(struct cluster_handle * clust, struct inode *inode) +{ + assert("edward-279", clust != NULL); + + if (!clust->pages) + return 0; + return (clust->win ? window_ok(clust->win, inode) : 1); +} +#if 0 +static int pages_truncate_ok(struct inode *inode, pgoff_t start) +{ + int found; + struct page * page; + + + found = find_get_pages(inode->i_mapping, &start, 1, &page); + if (found) + put_cluster_page(page); + return !found; +} +#else +#define pages_truncate_ok(inode, start) 1 +#endif + +static int jnode_truncate_ok(struct inode *inode, cloff_t index) +{ + jnode *node; + node = jlookup(current_tree, get_inode_oid(inode), + clust_to_pg(index, inode)); + if (likely(!node)) + return 1; + jput(node); + return 0; +} +#endif + +/* guess next window stat */ +static inline window_stat next_window_stat(struct reiser4_slide * win) +{ + assert("edward-1130", win != NULL); + return ((win->stat == HOLE_WINDOW && win->delta == 0) ? + HOLE_WINDOW : DATA_WINDOW); +} + +/* guess and set next cluster index and window params */ +static void move_update_window(struct inode * inode, + struct cluster_handle * clust, + loff_t file_off, loff_t to_file) +{ + struct reiser4_slide * win; + + assert("edward-185", clust != NULL); + assert("edward-438", clust->pages != NULL); + assert("edward-281", cluster_ok(clust, inode)); + + win = clust->win; + if (!win) + return; + + switch (win->stat) { + case DATA_WINDOW: + /* increment */ + clust->index++; + win->stat = DATA_WINDOW; + win->off = 0; + win->count = min((loff_t)inode_cluster_size(inode), to_file); + break; + case HOLE_WINDOW: + switch (next_window_stat(win)) { + case HOLE_WINDOW: + /* skip */ + clust->index = off_to_clust(file_off, inode); + win->stat = HOLE_WINDOW; + win->off = 0; + win->count = off_to_cloff(file_off, inode); + win->delta = min((loff_t)(inode_cluster_size(inode) - + win->count), to_file); + break; + case DATA_WINDOW: + /* stay */ + win->stat = DATA_WINDOW; + /* off+count+delta=inv */ + win->off = win->off + win->count; + win->count = win->delta; + win->delta = 0; + break; + default: + impossible("edward-282", "wrong next window state"); + } + break; + default: + impossible("edward-283", "wrong current window state"); + } + assert("edward-1068", cluster_ok(clust, inode)); +} + +static int update_sd_cryptcompress(struct inode *inode) +{ + int result = 0; + + assert("edward-978", reiser4_schedulable()); + + result = reiser4_grab_space_force(/* one for stat data update */ + estimate_update_common(inode), + BA_CAN_COMMIT); + if (result) + return result; + if (!IS_NOCMTIME(inode)) + inode->i_ctime = inode->i_mtime = current_time(inode); + + result = reiser4_update_sd(inode); + + if (unlikely(result != 0)) + warning("edward-1573", + "Can not update stat-data: %i. FSCK?", + result); + return result; +} + +static void uncapture_cluster_jnode(jnode * node) +{ + txn_atom *atom; + + assert_spin_locked(&(node->guard)); + + atom = jnode_get_atom(node); + if (atom == NULL) { + assert("jmacd-7111", !JF_ISSET(node, JNODE_DIRTY)); + spin_unlock_jnode(node); + return; + } + reiser4_uncapture_block(node); + spin_unlock_atom(atom); + jput(node); +} + +static void put_found_pages(struct page **pages, int nr) +{ + int i; + for (i = 0; i < nr; i++) { + assert("edward-1045", pages[i] != NULL); + put_cluster_page(pages[i]); + } +} + +/* Lifecycle of a logical cluster in the system. + * + * + * Logical cluster of a cryptcompress file is represented in the system by + * . page cluster (in memory, primary cache, contains plain text); + * . disk cluster (in memory, secondary cache, contains transformed text). + * Primary cache is to reduce number of transform operations (compression, + * encryption), i.e. to implement transform-caching strategy. + * Secondary cache is to reduce number of I/O operations, i.e. for usual + * write-caching strategy. Page cluster is a set of pages, i.e. mapping of + * a logical cluster to the primary cache. Disk cluster is a set of items + * of the same type defined by some reiser4 item plugin id. + * + * 1. Performing modifications + * + * Every modification of a cryptcompress file is considered as a set of + * operations performed on file's logical clusters. Every such "atomic" + * modification is truncate, append and(or) overwrite some bytes of a + * logical cluster performed in the primary cache with the following + * synchronization with the secondary cache (in flush time). Disk clusters, + * which live in the secondary cache, are supposed to be synchronized with + * disk. The mechanism of synchronization of primary and secondary caches + * includes so-called checkin/checkout technique described below. + * + * 2. Submitting modifications + * + * Each page cluster has associated jnode (a special in-memory header to + * keep a track of transactions in reiser4), which is attached to its first + * page when grabbing page cluster for modifications (see grab_page_cluster). + * Submitting modifications (see checkin_logical_cluster) is going per logical + * cluster and includes: + * . checkin_cluster_size; + * . checkin_page_cluster. + * checkin_cluster_size() is resolved to file size update (which completely + * defines new size of logical cluster (number of file's bytes in a logical + * cluster). + * checkin_page_cluster() captures jnode of a page cluster and installs + * jnode's dirty flag (if needed) to indicate that modifications are + * successfully checked in. + * + * 3. Checking out modifications + * + * Is going per logical cluster in flush time (see checkout_logical_cluster). + * This is the time of synchronizing primary and secondary caches. + * checkout_logical_cluster() includes: + * . checkout_page_cluster (retrieving checked in pages). + * . uncapture jnode (including clear dirty flag and unlock) + * + * 4. Committing modifications + * + * Proceeding a synchronization of primary and secondary caches. When checking + * out page cluster (the phase above) pages are locked/flushed/unlocked + * one-by-one in ascending order of their indexes to contiguous stream, which + * is supposed to be transformed (compressed, encrypted), chopped up into items + * and committed to disk as a disk cluster. + * + * 5. Managing page references + * + * Every checked in page have a special additional "control" reference, + * which is dropped at checkout. We need this to avoid unexpected evicting + * pages from memory before checkout. Control references are managed so + * they are not accumulated with every checkin: + * + * 0 + * checkin -> 1 + * 0 -> checkout + * checkin -> 1 + * checkin -> 1 + * checkin -> 1 + * 0 -> checkout + * ... + * + * Every page cluster has its own unique "cluster lock". Update/drop + * references are serialized via this lock. Number of checked in cluster + * pages is calculated by i_size under cluster lock. File size is updated + * at every checkin action also under cluster lock (except cases of + * appending/truncating fake logical clusters). + * + * Proof of correctness: + * + * Since we update file size under cluster lock, in the case of non-fake + * logical cluster with its lock held we do have expected number of checked + * in pages. On the other hand, append/truncate of fake logical clusters + * doesn't change number of checked in pages of any cluster. + * + * NOTE-EDWARD: As cluster lock we use guard (spinlock_t) of its jnode. + * Currently, I don't see any reason to create a special lock for those + * needs. + */ + +static inline void lock_cluster(jnode * node) +{ + spin_lock_jnode(node); +} + +static inline void unlock_cluster(jnode * node) +{ + spin_unlock_jnode(node); +} + +static inline void unlock_cluster_uncapture(jnode * node) +{ + uncapture_cluster_jnode(node); +} + +/* Set new file size by window. Cluster lock is required. */ +static void checkin_file_size(struct cluster_handle * clust, + struct inode * inode) +{ + loff_t new_size; + struct reiser4_slide * win; + + assert("edward-1181", clust != NULL); + assert("edward-1182", inode != NULL); + assert("edward-1473", clust->pages != NULL); + assert("edward-1474", clust->pages[0] != NULL); + assert("edward-1475", jprivate(clust->pages[0]) != NULL); + assert_spin_locked(&(jprivate(clust->pages[0])->guard)); + + + win = clust->win; + assert("edward-1183", win != NULL); + + new_size = clust_to_off(clust->index, inode) + win->off; + + switch (clust->op) { + case LC_APPOV: + case LC_EXPAND: + if (new_size + win->count <= i_size_read(inode)) + /* overwrite only */ + return; + new_size += win->count; + break; + case LC_SHRINK: + break; + default: + impossible("edward-1184", "bad page cluster option"); + break; + } + inode_check_scale_nolock(inode, i_size_read(inode), new_size); + i_size_write(inode, new_size); + return; +} + +static inline void checkin_cluster_size(struct cluster_handle * clust, + struct inode * inode) +{ + if (clust->win) + checkin_file_size(clust, inode); +} + +static int checkin_page_cluster(struct cluster_handle * clust, + struct inode * inode) +{ + int result; + jnode * node; + int old_nrpages = clust->old_nrpages; + int new_nrpages = get_new_nrpages(clust); + + node = clust->node; + + assert("edward-221", node != NULL); + assert("edward-971", clust->reserved == 1); + assert("edward-1263", + clust->reserved_prepped == estimate_update_cluster(inode)); + assert("edward-1264", clust->reserved_unprepped == 0); + + if (JF_ISSET(node, JNODE_DIRTY)) { + /* + * page cluster was checked in, but not yet + * checked out, so release related resources + */ + free_reserved4cluster(inode, clust, + estimate_update_cluster(inode)); + __put_page_cluster(0, clust->old_nrpages, + clust->pages, inode); + } else { + result = capture_cluster_jnode(node); + if (unlikely(result)) { + unlock_cluster(node); + return result; + } + jnode_make_dirty_locked(node); + clust->reserved = 0; + } + unlock_cluster(node); + + if (new_nrpages < old_nrpages) { + /* truncate >= 1 complete pages */ + __put_page_cluster(new_nrpages, + old_nrpages - new_nrpages, + clust->pages, inode); + truncate_page_cluster_range(inode, + clust->pages, clust->index, + new_nrpages, + old_nrpages - new_nrpages, + 0); + } +#if REISER4_DEBUG + clust->reserved_prepped -= estimate_update_cluster(inode); +#endif + return 0; +} + +/* Submit modifications of a logical cluster */ +static int checkin_logical_cluster(struct cluster_handle * clust, + struct inode *inode) +{ + int result = 0; + jnode * node; + + node = clust->node; + + assert("edward-1035", node != NULL); + assert("edward-1029", clust != NULL); + assert("edward-1030", clust->reserved == 1); + assert("edward-1031", clust->nr_pages != 0); + assert("edward-1032", clust->pages != NULL); + assert("edward-1033", clust->pages[0] != NULL); + assert("edward-1446", jnode_is_cluster_page(node)); + assert("edward-1476", node == jprivate(clust->pages[0])); + + lock_cluster(node); + checkin_cluster_size(clust, inode); + /* + * this will unlock the cluster + */ + result = checkin_page_cluster(clust, inode); + jput(node); + clust->node = NULL; + return result; +} + +/* + * Retrieve size of logical cluster that was checked in at + * the latest modifying session (cluster lock is required) + */ +static inline void checkout_cluster_size(struct cluster_handle * clust, + struct inode * inode) +{ + struct tfm_cluster *tc = &clust->tc; + + tc->len = lbytes(clust->index, inode); + assert("edward-1478", tc->len != 0); +} + +/* + * Retrieve a page cluster with the latest submitted modifications + * and flush its pages to previously allocated contiguous stream. + */ +static void checkout_page_cluster(struct cluster_handle * clust, + jnode * node, struct inode * inode) +{ + int i; + int found; + int to_put; + pgoff_t page_index = clust_to_pg(clust->index, inode); + struct tfm_cluster *tc = &clust->tc; + + /* find and put checked in pages: cluster is locked, + * so we must get expected number (to_put) of pages + */ + to_put = size_in_pages(lbytes(clust->index, inode)); + found = find_get_pages(inode->i_mapping, &page_index, + to_put, clust->pages); + BUG_ON(found != to_put); + + __put_page_cluster(0, to_put, clust->pages, inode); + unlock_cluster_uncapture(node); + + /* Flush found pages. + * + * Note, that we don't disable modifications while flushing, + * moreover, some found pages can be truncated, as we have + * released cluster lock. + */ + for (i = 0; i < found; i++) { + int in_page; + char * data; + assert("edward-1479", + clust->pages[i]->index == clust->pages[0]->index + i); + + lock_page(clust->pages[i]); + if (!PageUptodate(clust->pages[i])) { + /* page was truncated */ + assert("edward-1480", + i_size_read(inode) <= page_offset(clust->pages[i])); + assert("edward-1481", + clust->pages[i]->mapping != inode->i_mapping); + unlock_page(clust->pages[i]); + break; + } + /* Update the number of bytes in the logical cluster, + * as it could be partially truncated. Note, that only + * partial truncate is possible (complete truncate can + * not go here, as it is performed via ->kill_hook() + * called by cut_file_items(), and the last one must + * wait for znode locked with parent coord). + */ + checkout_cluster_size(clust, inode); + + /* this can be zero, as new file size is + checked in before truncating pages */ + in_page = __mbp(tc->len, i); + + data = kmap_atomic(clust->pages[i]); + memcpy(tfm_stream_data(tc, INPUT_STREAM) + pg_to_off(i), + data, in_page); + kunmap_atomic(data); + /* + * modifications have been checked out and will be + * committed later. Anyway, the dirty status of the + * page is no longer relevant. However, the uptodate + * status of the page is still relevant! + */ + if (PageDirty(clust->pages[i])) + cancel_dirty_page(clust->pages[i]); + + unlock_page(clust->pages[i]); + + if (in_page < PAGE_SIZE) + /* end of the file */ + break; + } + put_found_pages(clust->pages, found); /* find_get_pages */ + tc->lsize = tc->len; + return; +} + +/* Check out modifications of a logical cluster */ +int checkout_logical_cluster(struct cluster_handle * clust, + jnode * node, struct inode *inode) +{ + int result; + struct tfm_cluster *tc = &clust->tc; + + assert("edward-980", node != NULL); + assert("edward-236", inode != NULL); + assert("edward-237", clust != NULL); + assert("edward-240", !clust->win); + assert("edward-241", reiser4_schedulable()); + assert("edward-718", cryptcompress_inode_ok(inode)); + + result = grab_tfm_stream(inode, tc, INPUT_STREAM); + if (result) { + warning("edward-1430", "alloc stream failed with ret=%d", + result); + return RETERR(-E_REPEAT); + } + lock_cluster(node); + + if (unlikely(!JF_ISSET(node, JNODE_DIRTY))) { + /* race with another flush */ + warning("edward-982", + "checking out logical cluster %lu of inode %llu: " + "jnode is not dirty", clust->index, + (unsigned long long)get_inode_oid(inode)); + unlock_cluster(node); + return RETERR(-E_REPEAT); + } + cluster_reserved2grabbed(estimate_update_cluster(inode)); + + /* this will unlock cluster */ + checkout_page_cluster(clust, node, inode); + return 0; +} + +/* set hint for the cluster of the index @index */ +static void set_hint_cluster(struct inode *inode, hint_t * hint, + cloff_t index, znode_lock_mode mode) +{ + reiser4_key key; + assert("edward-722", cryptcompress_inode_ok(inode)); + assert("edward-723", + inode_file_plugin(inode) == + file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)); + + inode_file_plugin(inode)->key_by_inode(inode, + clust_to_off(index, inode), + &key); + + reiser4_seal_init(&hint->seal, &hint->ext_coord.coord, &key); + hint->offset = get_key_offset(&key); + hint->mode = mode; +} + +void invalidate_hint_cluster(struct cluster_handle * clust) +{ + assert("edward-1291", clust != NULL); + assert("edward-1292", clust->hint != NULL); + + done_lh(&clust->hint->lh); + hint_clr_valid(clust->hint); +} + +static void put_hint_cluster(struct cluster_handle * clust, + struct inode *inode, znode_lock_mode mode) +{ + assert("edward-1286", clust != NULL); + assert("edward-1287", clust->hint != NULL); + + set_hint_cluster(inode, clust->hint, clust->index + 1, mode); + invalidate_hint_cluster(clust); +} + +static int balance_dirty_page_cluster(struct cluster_handle * clust, + struct inode *inode, loff_t off, + loff_t to_file, + int nr_dirtied) +{ + int result; + struct cryptcompress_info * info; + + assert("edward-724", inode != NULL); + assert("edward-725", cryptcompress_inode_ok(inode)); + assert("edward-1547", nr_dirtied <= cluster_nrpages(inode)); + + /* set next window params */ + move_update_window(inode, clust, off, to_file); + + result = update_sd_cryptcompress(inode); + if (result) + return result; + assert("edward-726", clust->hint->lh.owner == NULL); + info = cryptcompress_inode_data(inode); + + if (nr_dirtied == 0) + return 0; + mutex_unlock(&info->checkin_mutex); + reiser4_throttle_write(inode); + mutex_lock(&info->checkin_mutex); + return 0; +} + +/* + * Check in part of a hole within a logical cluster + */ +static int write_hole(struct inode *inode, struct cluster_handle * clust, + loff_t file_off, loff_t to_file) +{ + int result = 0; + unsigned cl_off, cl_count = 0; + unsigned to_pg, pg_off; + struct reiser4_slide * win; + + assert("edward-190", clust != NULL); + assert("edward-1069", clust->win != NULL); + assert("edward-191", inode != NULL); + assert("edward-727", cryptcompress_inode_ok(inode)); + assert("edward-1171", clust->dstat != INVAL_DISK_CLUSTER); + assert("edward-1154", + ergo(clust->dstat != FAKE_DISK_CLUSTER, clust->reserved == 1)); + + win = clust->win; + + assert("edward-1070", win != NULL); + assert("edward-201", win->stat == HOLE_WINDOW); + assert("edward-192", cluster_ok(clust, inode)); + + if (win->off == 0 && win->count == inode_cluster_size(inode)) { + /* + * This part of the hole occupies the whole logical + * cluster, so it won't be represented by any items. + * Nothing to submit. + */ + move_update_window(inode, clust, file_off, to_file); + return 0; + } + /* + * This part of the hole starts not at logical cluster + * boundary, so it has to be converted to zeros and written to disk + */ + cl_count = win->count; /* number of zeroes to write */ + cl_off = win->off; + pg_off = off_to_pgoff(win->off); + + while (cl_count) { + struct page *page; + page = clust->pages[off_to_pg(cl_off)]; + + assert("edward-284", page != NULL); + + to_pg = min((typeof(pg_off))PAGE_SIZE - pg_off, cl_count); + lock_page(page); + zero_user(page, pg_off, to_pg); + SetPageUptodate(page); + set_page_dirty_notag(page); + mark_page_accessed(page); + unlock_page(page); + + cl_off += to_pg; + cl_count -= to_pg; + pg_off = 0; + } + if (win->delta == 0) { + /* only zeroes in this window, try to capture + */ + result = checkin_logical_cluster(clust, inode); + if (result) + return result; + put_hint_cluster(clust, inode, ZNODE_WRITE_LOCK); + result = balance_dirty_page_cluster(clust, + inode, file_off, to_file, + win_count_to_nrpages(win)); + } else + move_update_window(inode, clust, file_off, to_file); + return result; +} + +/* + The main disk search procedure for cryptcompress plugin, which + . scans all items of disk cluster with the lock mode @mode + . maybe reads each one (if @read) + . maybe makes its znode dirty (if write lock mode was specified) + + NOTE-EDWARD: Callers should handle the case when disk cluster + is incomplete (-EIO) +*/ +int find_disk_cluster(struct cluster_handle * clust, + struct inode *inode, int read, znode_lock_mode mode) +{ + flow_t f; + hint_t *hint; + int result = 0; + int was_grabbed; + ra_info_t ra_info; + file_plugin *fplug; + item_plugin *iplug; + struct tfm_cluster *tc; + struct cryptcompress_info * info; + + assert("edward-138", clust != NULL); + assert("edward-728", clust->hint != NULL); + assert("edward-226", reiser4_schedulable()); + assert("edward-137", inode != NULL); + assert("edward-729", cryptcompress_inode_ok(inode)); + + hint = clust->hint; + fplug = inode_file_plugin(inode); + was_grabbed = get_current_context()->grabbed_blocks; + info = cryptcompress_inode_data(inode); + tc = &clust->tc; + + assert("edward-462", !tfm_cluster_is_uptodate(tc)); + assert("edward-461", ergo(read, tfm_stream_is_set(tc, INPUT_STREAM))); + + dclust_init_extension(hint); + + /* set key of the first disk cluster item */ + fplug->flow_by_inode(inode, + (read ? (char __user *)tfm_stream_data(tc, INPUT_STREAM) : NULL), + 0 /* kernel space */ , + inode_scaled_cluster_size(inode), + clust_to_off(clust->index, inode), READ_OP, &f); + if (mode == ZNODE_WRITE_LOCK) { + /* reserve for flush to make dirty all the leaf nodes + which contain disk cluster */ + result = + reiser4_grab_space_force(estimate_dirty_cluster(inode), + BA_CAN_COMMIT); + if (result) + goto out; + } + + ra_info.key_to_stop = f.key; + set_key_offset(&ra_info.key_to_stop, get_key_offset(reiser4_max_key())); + + while (f.length) { + result = find_cluster_item(hint, &f.key, mode, + NULL, FIND_EXACT, + (mode == ZNODE_WRITE_LOCK ? + CBK_FOR_INSERT : 0)); + switch (result) { + case CBK_COORD_NOTFOUND: + result = 0; + if (inode_scaled_offset + (inode, clust_to_off(clust->index, inode)) == + get_key_offset(&f.key)) { + /* first item not found, this is treated + as disk cluster is absent */ + clust->dstat = FAKE_DISK_CLUSTER; + goto out; + } + /* we are outside the cluster, stop search here */ + assert("edward-146", + f.length != inode_scaled_cluster_size(inode)); + goto ok; + case CBK_COORD_FOUND: + assert("edward-148", + hint->ext_coord.coord.between == AT_UNIT); + assert("edward-460", + hint->ext_coord.coord.unit_pos == 0); + + coord_clear_iplug(&hint->ext_coord.coord); + result = zload_ra(hint->ext_coord.coord.node, &ra_info); + if (unlikely(result)) + goto out; + iplug = item_plugin_by_coord(&hint->ext_coord.coord); + assert("edward-147", + item_id_by_coord(&hint->ext_coord.coord) == + CTAIL_ID); + + result = iplug->s.file.read(NULL, &f, hint); + if (result) { + zrelse(hint->ext_coord.coord.node); + goto out; + } + if (mode == ZNODE_WRITE_LOCK) { + /* Don't make dirty more nodes then it was + estimated (see comments before + estimate_dirty_cluster). Missed nodes will be + read up in flush time if they are evicted from + memory */ + if (dclust_get_extension_ncount(hint) <= + estimate_dirty_cluster(inode)) + znode_make_dirty(hint->ext_coord.coord.node); + + znode_set_convertible(hint->ext_coord.coord. + node); + } + zrelse(hint->ext_coord.coord.node); + break; + default: + goto out; + } + } + ok: + /* at least one item was found */ + /* NOTE-EDWARD: Callers should handle the case + when disk cluster is incomplete (-EIO) */ + tc->len = inode_scaled_cluster_size(inode) - f.length; + tc->lsize = lbytes(clust->index, inode); + assert("edward-1196", tc->len > 0); + assert("edward-1406", tc->lsize > 0); + + if (hint_is_unprepped_dclust(clust->hint)) { + clust->dstat = UNPR_DISK_CLUSTER; + } else if (clust->index == info->trunc_index) { + clust->dstat = TRNC_DISK_CLUSTER; + } else { + clust->dstat = PREP_DISK_CLUSTER; + dclust_set_extension_dsize(clust->hint, tc->len); + } + out: + assert("edward-1339", + get_current_context()->grabbed_blocks >= was_grabbed); + grabbed2free(get_current_context(), + get_current_super_private(), + get_current_context()->grabbed_blocks - was_grabbed); + return result; +} + +int get_disk_cluster_locked(struct cluster_handle * clust, struct inode *inode, + znode_lock_mode lock_mode) +{ + reiser4_key key; + ra_info_t ra_info; + + assert("edward-730", reiser4_schedulable()); + assert("edward-731", clust != NULL); + assert("edward-732", inode != NULL); + + if (hint_is_valid(clust->hint)) { + assert("edward-1293", clust->dstat != INVAL_DISK_CLUSTER); + assert("edward-1294", + znode_is_write_locked(clust->hint->lh.node)); + /* already have a valid locked position */ + return (clust->dstat == + FAKE_DISK_CLUSTER ? CBK_COORD_NOTFOUND : + CBK_COORD_FOUND); + } + key_by_inode_cryptcompress(inode, clust_to_off(clust->index, inode), + &key); + ra_info.key_to_stop = key; + set_key_offset(&ra_info.key_to_stop, get_key_offset(reiser4_max_key())); + + return find_cluster_item(clust->hint, &key, lock_mode, NULL, FIND_EXACT, + CBK_FOR_INSERT); +} + +/* Read needed cluster pages before modifying. + If success, @clust->hint contains locked position in the tree. + Also: + . find and set disk cluster state + . make disk cluster dirty if its state is not FAKE_DISK_CLUSTER. +*/ +static int read_some_cluster_pages(struct inode * inode, + struct cluster_handle * clust) +{ + int i; + int result = 0; + item_plugin *iplug; + struct reiser4_slide * win = clust->win; + znode_lock_mode mode = ZNODE_WRITE_LOCK; + + iplug = item_plugin_by_id(CTAIL_ID); + + assert("edward-924", !tfm_cluster_is_uptodate(&clust->tc)); + +#if REISER4_DEBUG + if (clust->nr_pages == 0) { + /* start write hole from fake disk cluster */ + assert("edward-1117", win != NULL); + assert("edward-1118", win->stat == HOLE_WINDOW); + assert("edward-1119", new_logical_cluster(clust, inode)); + } +#endif + if (new_logical_cluster(clust, inode)) { + /* + new page cluster is about to be written, nothing to read, + */ + assert("edward-734", reiser4_schedulable()); + assert("edward-735", clust->hint->lh.owner == NULL); + + if (clust->nr_pages) { + int off; + struct page * pg; + assert("edward-1419", clust->pages != NULL); + pg = clust->pages[clust->nr_pages - 1]; + assert("edward-1420", pg != NULL); + off = off_to_pgoff(win->off+win->count+win->delta); + if (off) { + lock_page(pg); + zero_user_segment(pg, off, PAGE_SIZE); + unlock_page(pg); + } + } + clust->dstat = FAKE_DISK_CLUSTER; + return 0; + } + /* + Here we should search for disk cluster to figure out its real state. + Also there is one more important reason to do disk search: we need + to make disk cluster _dirty_ if it exists + */ + + /* if windows is specified, read the only pages + that will be modified partially */ + + for (i = 0; i < clust->nr_pages; i++) { + struct page *pg = clust->pages[i]; + + lock_page(pg); + if (PageUptodate(pg)) { + unlock_page(pg); + continue; + } + unlock_page(pg); + + if (win && + i >= size_in_pages(win->off) && + i < off_to_pg(win->off + win->count + win->delta)) + /* page will be completely overwritten */ + continue; + + if (win && (i == clust->nr_pages - 1) && + /* the last page is + partially modified, + not uptodate .. */ + (size_in_pages(i_size_read(inode)) <= pg->index)) { + /* .. and appended, + so set zeroes to the rest */ + int offset; + lock_page(pg); + assert("edward-1260", + size_in_pages(win->off + win->count + + win->delta) - 1 == i); + + offset = + off_to_pgoff(win->off + win->count + win->delta); + zero_user_segment(pg, offset, PAGE_SIZE); + unlock_page(pg); + /* still not uptodate */ + break; + } + lock_page(pg); + result = do_readpage_ctail(inode, clust, pg, mode); + + assert("edward-1526", ergo(!result, PageUptodate(pg))); + unlock_page(pg); + if (result) { + warning("edward-219", "do_readpage_ctail failed"); + goto out; + } + } + if (!tfm_cluster_is_uptodate(&clust->tc)) { + /* disk cluster unclaimed, but we need to make its znodes dirty + * to make flush update convert its content + */ + result = find_disk_cluster(clust, inode, + 0 /* do not read items */, + mode); + } + out: + tfm_cluster_clr_uptodate(&clust->tc); + return result; +} + +static int should_create_unprepped_cluster(struct cluster_handle * clust, + struct inode * inode) +{ + assert("edward-737", clust != NULL); + + switch (clust->dstat) { + case PREP_DISK_CLUSTER: + case UNPR_DISK_CLUSTER: + return 0; + case FAKE_DISK_CLUSTER: + if (clust->win && + clust->win->stat == HOLE_WINDOW && clust->nr_pages == 0) { + assert("edward-1172", + new_logical_cluster(clust, inode)); + return 0; + } + return 1; + default: + impossible("edward-1173", "bad disk cluster state"); + return 0; + } +} + +static int cryptcompress_make_unprepped_cluster(struct cluster_handle * clust, + struct inode *inode) +{ + int result; + + assert("edward-1123", reiser4_schedulable()); + assert("edward-737", clust != NULL); + assert("edward-738", inode != NULL); + assert("edward-739", cryptcompress_inode_ok(inode)); + assert("edward-1053", clust->hint != NULL); + + if (!should_create_unprepped_cluster(clust, inode)) { + if (clust->reserved) { + cluster_reserved2free(estimate_insert_cluster(inode)); +#if REISER4_DEBUG + assert("edward-1267", + clust->reserved_unprepped == + estimate_insert_cluster(inode)); + clust->reserved_unprepped -= + estimate_insert_cluster(inode); +#endif + } + return 0; + } + assert("edward-1268", clust->reserved); + cluster_reserved2grabbed(estimate_insert_cluster(inode)); +#if REISER4_DEBUG + assert("edward-1441", + clust->reserved_unprepped == estimate_insert_cluster(inode)); + clust->reserved_unprepped -= estimate_insert_cluster(inode); +#endif + result = ctail_insert_unprepped_cluster(clust, inode); + if (result) + return result; + + inode_add_bytes(inode, inode_cluster_size(inode)); + + assert("edward-743", cryptcompress_inode_ok(inode)); + assert("edward-744", znode_is_write_locked(clust->hint->lh.node)); + + clust->dstat = UNPR_DISK_CLUSTER; + return 0; +} + +/* . Grab page cluster for read, write, setattr, etc. operations; + * . Truncate its complete pages, if needed; + */ +int prepare_page_cluster(struct inode * inode, struct cluster_handle * clust, + rw_op rw) +{ + assert("edward-177", inode != NULL); + assert("edward-741", cryptcompress_inode_ok(inode)); + assert("edward-740", clust->pages != NULL); + + set_cluster_nrpages(clust, inode); + reset_cluster_pgset(clust, cluster_nrpages(inode)); + return grab_page_cluster(inode, clust, rw); +} + +/* Truncate complete page cluster of index @index. + * This is called by ->kill_hook() method of item + * plugin when deleting a disk cluster of such index. + */ +void truncate_complete_page_cluster(struct inode *inode, cloff_t index, + int even_cows) +{ + int found; + int nr_pages; + jnode *node; + pgoff_t page_index = clust_to_pg(index, inode); + struct page *pages[MAX_CLUSTER_NRPAGES]; + + node = jlookup(current_tree, get_inode_oid(inode), + clust_to_pg(index, inode)); + nr_pages = size_in_pages(lbytes(index, inode)); + assert("edward-1483", nr_pages != 0); + if (!node) + goto truncate; + found = find_get_pages(inode->i_mapping, &page_index, + cluster_nrpages(inode), pages); + if (!found) { + assert("edward-1484", jnode_truncate_ok(inode, index)); + return; + } + lock_cluster(node); + + if (reiser4_inode_get_flag(inode, REISER4_FILE_CONV_IN_PROGRESS) + && index == 0) + /* converting to unix_file is in progress */ + JF_CLR(node, JNODE_CLUSTER_PAGE); + if (JF_ISSET(node, JNODE_DIRTY)) { + /* + * @nr_pages were checked in, but not yet checked out - + * we need to release them. (also there can be pages + * attached to page cache by read(), etc. - don't take + * them into account). + */ + assert("edward-1198", found >= nr_pages); + + /* free disk space grabbed for disk cluster converting */ + cluster_reserved2grabbed(estimate_update_cluster(inode)); + grabbed2free(get_current_context(), + get_current_super_private(), + estimate_update_cluster(inode)); + __put_page_cluster(0, nr_pages, pages, inode); + + /* This will clear dirty bit, uncapture and unlock jnode */ + unlock_cluster_uncapture(node); + } else + unlock_cluster(node); + jput(node); /* jlookup */ + put_found_pages(pages, found); /* find_get_pages */ + truncate: + if (reiser4_inode_get_flag(inode, REISER4_FILE_CONV_IN_PROGRESS) && + index == 0) + return; + truncate_page_cluster_range(inode, pages, index, 0, + cluster_nrpages(inode), + even_cows); + assert("edward-1201", + ergo(!reiser4_inode_get_flag(inode, + REISER4_FILE_CONV_IN_PROGRESS), + jnode_truncate_ok(inode, index))); + return; +} + +/* + * Set cluster handle @clust of a logical cluster before + * modifications which are supposed to be committed. + * + * . grab cluster pages; + * . reserve disk space; + * . maybe read pages from disk and set the disk cluster dirty; + * . maybe write hole and check in (partially zeroed) logical cluster; + * . create 'unprepped' disk cluster for new or fake logical one. + */ +static int prepare_logical_cluster(struct inode *inode, + loff_t file_off, /* write position + in the file */ + loff_t to_file, /* bytes of users data + to write to the file */ + struct cluster_handle * clust, + logical_cluster_op op) +{ + int result = 0; + struct reiser4_slide * win = clust->win; + + reset_cluster_params(clust); + cluster_set_tfm_act(&clust->tc, TFMA_READ); +#if REISER4_DEBUG + clust->ctx = get_current_context(); +#endif + assert("edward-1190", op != LC_INVAL); + + clust->op = op; + + result = prepare_page_cluster(inode, clust, WRITE_OP); + if (result) + return result; + assert("edward-1447", + ergo(clust->nr_pages != 0, jprivate(clust->pages[0]))); + assert("edward-1448", + ergo(clust->nr_pages != 0, + jnode_is_cluster_page(jprivate(clust->pages[0])))); + + result = reserve4cluster(inode, clust); + if (result) + goto out; + + result = read_some_cluster_pages(inode, clust); + + if (result || + /* + * don't submit data modifications + * when expanding or shrinking holes + */ + (op == LC_SHRINK && clust->dstat == FAKE_DISK_CLUSTER) || + (op == LC_EXPAND && clust->dstat == FAKE_DISK_CLUSTER)){ + free_reserved4cluster(inode, + clust, + estimate_update_cluster(inode) + + estimate_insert_cluster(inode)); + goto out; + } + assert("edward-1124", clust->dstat != INVAL_DISK_CLUSTER); + + result = cryptcompress_make_unprepped_cluster(clust, inode); + if (result) + goto error; + if (win && win->stat == HOLE_WINDOW) { + result = write_hole(inode, clust, file_off, to_file); + if (result) + goto error; + } + return 0; + error: + free_reserved4cluster(inode, clust, + estimate_update_cluster(inode)); + out: + put_page_cluster(clust, inode, WRITE_OP); + return result; +} + +/* set window by two offsets */ +static void set_window(struct cluster_handle * clust, + struct reiser4_slide * win, struct inode *inode, + loff_t o1, loff_t o2) +{ + assert("edward-295", clust != NULL); + assert("edward-296", inode != NULL); + assert("edward-1071", win != NULL); + assert("edward-297", o1 <= o2); + + clust->index = off_to_clust(o1, inode); + + win->off = off_to_cloff(o1, inode); + win->count = min((loff_t)(inode_cluster_size(inode) - win->off), + o2 - o1); + win->delta = 0; + + clust->win = win; +} + +static int set_window_and_cluster(struct inode *inode, + struct cluster_handle * clust, + struct reiser4_slide * win, size_t length, + loff_t file_off) +{ + int result; + + assert("edward-197", clust != NULL); + assert("edward-1072", win != NULL); + assert("edward-198", inode != NULL); + + result = alloc_cluster_pgset(clust, cluster_nrpages(inode)); + if (result) + return result; + + if (file_off > i_size_read(inode)) { + /* Uhmm, hole in cryptcompress file... */ + loff_t hole_size; + hole_size = file_off - inode->i_size; + + set_window(clust, win, inode, inode->i_size, file_off); + win->stat = HOLE_WINDOW; + if (win->off + hole_size < inode_cluster_size(inode)) + /* there is also user's data to append to the hole */ + win->delta = min(inode_cluster_size(inode) - + (win->off + win->count), length); + return 0; + } + set_window(clust, win, inode, file_off, file_off + length); + win->stat = DATA_WINDOW; + return 0; +} + +int set_cluster_by_page(struct cluster_handle * clust, struct page * page, + int count) +{ + int result = 0; + int (*setting_actor)(struct cluster_handle * clust, int count); + + assert("edward-1358", clust != NULL); + assert("edward-1359", page != NULL); + assert("edward-1360", page->mapping != NULL); + assert("edward-1361", page->mapping->host != NULL); + + setting_actor = + (clust->pages ? reset_cluster_pgset : alloc_cluster_pgset); + result = setting_actor(clust, count); + clust->index = pg_to_clust(page->index, page->mapping->host); + return result; +} + +/* reset all the params that not get updated */ +void reset_cluster_params(struct cluster_handle * clust) +{ + assert("edward-197", clust != NULL); + + clust->dstat = INVAL_DISK_CLUSTER; + clust->tc.uptodate = 0; + clust->tc.len = 0; +} + +/* the heart of write_cryptcompress */ +static loff_t do_write_cryptcompress(struct file *file, struct inode *inode, + const char __user *buf, size_t to_write, + loff_t pos, struct dispatch_context *cont) +{ + int i; + hint_t *hint; + int result = 0; + size_t count; + struct reiser4_slide win; + struct cluster_handle clust; + struct cryptcompress_info * info; + + assert("edward-154", buf != NULL); + assert("edward-161", reiser4_schedulable()); + assert("edward-748", cryptcompress_inode_ok(inode)); + assert("edward-159", current_blocksize == PAGE_SIZE); + assert("edward-1274", get_current_context()->grabbed_blocks == 0); + + hint = kmalloc(sizeof(*hint), reiser4_ctx_gfp_mask_get()); + if (hint == NULL) + return RETERR(-ENOMEM); + + result = load_file_hint(file, hint); + if (result) { + kfree(hint); + return result; + } + count = to_write; + + reiser4_slide_init(&win); + cluster_init_read(&clust, &win); + clust.hint = hint; + info = cryptcompress_inode_data(inode); + + mutex_lock(&info->checkin_mutex); + + result = set_window_and_cluster(inode, &clust, &win, to_write, pos); + if (result) + goto out; + + if (next_window_stat(&win) == HOLE_WINDOW) { + /* write hole in this iteration + separated from the loop below */ + result = write_dispatch_hook(file, inode, + pos, &clust, cont); + if (result) + goto out; + result = prepare_logical_cluster(inode, pos, count, &clust, + LC_APPOV); + if (result) + goto out; + } + do { + const char __user * src; + unsigned page_off, to_page; + + assert("edward-750", reiser4_schedulable()); + + result = write_dispatch_hook(file, inode, + pos + to_write - count, + &clust, cont); + if (result) + goto out; + if (cont->state == DISPATCH_ASSIGNED_NEW) + /* done_lh was called in write_dispatch_hook */ + goto out_no_longterm_lock; + + result = prepare_logical_cluster(inode, pos, count, &clust, + LC_APPOV); + if (result) + goto out; + + assert("edward-751", cryptcompress_inode_ok(inode)); + assert("edward-204", win.stat == DATA_WINDOW); + assert("edward-1288", hint_is_valid(clust.hint)); + assert("edward-752", + znode_is_write_locked(hint->ext_coord.coord.node)); + put_hint_cluster(&clust, inode, ZNODE_WRITE_LOCK); + + /* set write position in page */ + page_off = off_to_pgoff(win.off); + + /* copy user's data to cluster pages */ + for (i = off_to_pg(win.off), src = buf; + i < size_in_pages(win.off + win.count); + i++, src += to_page) { + to_page = __mbp(win.off + win.count, i) - page_off; + assert("edward-1039", + page_off + to_page <= PAGE_SIZE); + assert("edward-287", clust.pages[i] != NULL); + + fault_in_pages_readable(src, to_page); + + lock_page(clust.pages[i]); + result = + __copy_from_user((char *)kmap(clust.pages[i]) + + page_off, src, to_page); + kunmap(clust.pages[i]); + if (unlikely(result)) { + unlock_page(clust.pages[i]); + result = -EFAULT; + goto err2; + } + SetPageUptodate(clust.pages[i]); + set_page_dirty_notag(clust.pages[i]); + flush_dcache_page(clust.pages[i]); + mark_page_accessed(clust.pages[i]); + unlock_page(clust.pages[i]); + page_off = 0; + } + assert("edward-753", cryptcompress_inode_ok(inode)); + + result = checkin_logical_cluster(&clust, inode); + if (result) + goto err2; + + buf += win.count; + count -= win.count; + + result = balance_dirty_page_cluster(&clust, inode, 0, count, + win_count_to_nrpages(&win)); + if (result) + goto err1; + assert("edward-755", hint->lh.owner == NULL); + reset_cluster_params(&clust); + continue; + err2: + put_page_cluster(&clust, inode, WRITE_OP); + err1: + if (clust.reserved) + free_reserved4cluster(inode, + &clust, + estimate_update_cluster(inode)); + break; + } while (count); + out: + done_lh(&hint->lh); + save_file_hint(file, hint); + out_no_longterm_lock: + mutex_unlock(&info->checkin_mutex); + kfree(hint); + put_cluster_handle(&clust); + assert("edward-195", + ergo((to_write == count), + (result < 0 || cont->state == DISPATCH_ASSIGNED_NEW))); + return (to_write - count) ? (to_write - count) : result; +} + +/** + * plugin->write() + * @file: file to write to + * @buf: address of user-space buffer + * @read_amount: number of bytes to write + * @off: position in file to write to + */ +ssize_t write_cryptcompress(struct file *file, const char __user *buf, + size_t count, loff_t *off, + struct dispatch_context *cont) +{ + ssize_t result; + struct inode *inode; + reiser4_context *ctx; + loff_t pos = *off; + struct cryptcompress_info *info; + + assert("edward-1449", cont->state == DISPATCH_INVAL_STATE); + + inode = file_inode(file); + assert("edward-196", cryptcompress_inode_ok(inode)); + + info = cryptcompress_inode_data(inode); + ctx = get_current_context(); + + result = file_remove_privs(file); + if (unlikely(result != 0)) { + context_set_commit_async(ctx); + return result; + } + /* remove_suid might create a transaction */ + reiser4_txn_restart(ctx); + + result = do_write_cryptcompress(file, inode, buf, count, pos, cont); + + if (unlikely(result < 0)) { + context_set_commit_async(ctx); + return result; + } + /* update position in a file */ + *off = pos + result; + return result; +} + +/* plugin->readpages */ +int readpages_cryptcompress(struct file *file, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages) +{ + reiser4_context * ctx; + int ret; + + ctx = reiser4_init_context(mapping->host->i_sb); + if (IS_ERR(ctx)) { + ret = PTR_ERR(ctx); + goto err; + } + /* cryptcompress file can be built of ctail items only */ + ret = readpages_ctail(file, mapping, pages); + reiser4_txn_restart(ctx); + reiser4_exit_context(ctx); + if (ret) { +err: + put_pages_list(pages); + } + return ret; +} + +static reiser4_block_nr cryptcompress_estimate_read(struct inode *inode) +{ + /* reserve one block to update stat data item */ + assert("edward-1193", + inode_file_plugin(inode)->estimate.update == + estimate_update_common); + return estimate_update_common(inode); +} + +/** + * plugin->read + * @file: file to read from + * @buf: address of user-space buffer + * @read_amount: number of bytes to read + * @off: position in file to read from + */ +ssize_t read_cryptcompress(struct file * file, char __user *buf, size_t size, + loff_t * off) +{ + ssize_t result; + struct inode *inode; + reiser4_context *ctx; + struct cryptcompress_info *info; + reiser4_block_nr needed; + + inode = file_inode(file); + assert("edward-1194", !reiser4_inode_get_flag(inode, REISER4_NO_SD)); + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + info = cryptcompress_inode_data(inode); + needed = cryptcompress_estimate_read(inode); + + result = reiser4_grab_space(needed, BA_CAN_COMMIT); + if (result != 0) { + reiser4_exit_context(ctx); + return result; + } + result = new_sync_read(file, buf, size, off); + + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + + return result; +} + +/* Set left coord when unit is not found after node_lookup() + This takes into account that there can be holes in a sequence + of disk clusters */ + +static void adjust_left_coord(coord_t * left_coord) +{ + switch (left_coord->between) { + case AFTER_UNIT: + left_coord->between = AFTER_ITEM; + case AFTER_ITEM: + case BEFORE_UNIT: + break; + default: + impossible("edward-1204", "bad left coord to cut"); + } + return; +} + +#define CRC_CUT_TREE_MIN_ITERATIONS 64 + +/* plugin->cut_tree_worker */ +int cut_tree_worker_cryptcompress(tap_t * tap, const reiser4_key * from_key, + const reiser4_key * to_key, + reiser4_key * smallest_removed, + struct inode *object, int truncate, + int *progress) +{ + lock_handle next_node_lock; + coord_t left_coord; + int result; + + assert("edward-1158", tap->coord->node != NULL); + assert("edward-1159", znode_is_write_locked(tap->coord->node)); + assert("edward-1160", znode_get_level(tap->coord->node) == LEAF_LEVEL); + + *progress = 0; + init_lh(&next_node_lock); + + while (1) { + znode *node; /* node from which items are cut */ + node_plugin *nplug; /* node plugin for @node */ + + node = tap->coord->node; + + /* Move next_node_lock to the next node on the left. */ + result = + reiser4_get_left_neighbor(&next_node_lock, node, + ZNODE_WRITE_LOCK, + GN_CAN_USE_UPPER_LEVELS); + if (result != 0 && result != -E_NO_NEIGHBOR) + break; + /* FIXME-EDWARD: Check can we delete the node as a whole. */ + result = reiser4_tap_load(tap); + if (result) + return result; + + /* Prepare the second (right) point for cut_node() */ + if (*progress) + coord_init_last_unit(tap->coord, node); + + else if (item_plugin_by_coord(tap->coord)->b.lookup == NULL) + /* set rightmost unit for the items without lookup method */ + tap->coord->unit_pos = coord_last_unit_pos(tap->coord); + + nplug = node->nplug; + + assert("edward-1161", nplug); + assert("edward-1162", nplug->lookup); + + /* left_coord is leftmost unit cut from @node */ + result = nplug->lookup(node, from_key, FIND_EXACT, &left_coord); + + if (IS_CBKERR(result)) + break; + + if (result == CBK_COORD_NOTFOUND) + adjust_left_coord(&left_coord); + + /* adjust coordinates so that they are set to existing units */ + if (coord_set_to_right(&left_coord) + || coord_set_to_left(tap->coord)) { + result = 0; + break; + } + + if (coord_compare(&left_coord, tap->coord) == + COORD_CMP_ON_RIGHT) { + /* keys from @from_key to @to_key are not in the tree */ + result = 0; + break; + } + + /* cut data from one node */ + *smallest_removed = *reiser4_min_key(); + result = kill_node_content(&left_coord, + tap->coord, + from_key, + to_key, + smallest_removed, + next_node_lock.node, + object, truncate); + reiser4_tap_relse(tap); + + if (result) + break; + + ++(*progress); + + /* Check whether all items with keys >= from_key were removed + * from the tree. */ + if (keyle(smallest_removed, from_key)) + /* result = 0; */ + break; + + if (next_node_lock.node == NULL) + break; + + result = reiser4_tap_move(tap, &next_node_lock); + done_lh(&next_node_lock); + if (result) + break; + + /* Break long cut_tree operation (deletion of a large file) if + * atom requires commit. */ + if (*progress > CRC_CUT_TREE_MIN_ITERATIONS + && current_atom_should_commit()) { + result = -E_REPEAT; + break; + } + } + done_lh(&next_node_lock); + return result; +} + +static int expand_cryptcompress(struct inode *inode /* old size */, + loff_t new_size) +{ + int result = 0; + hint_t *hint; + lock_handle *lh; + loff_t hole_size; + int nr_zeroes; + struct reiser4_slide win; + struct cluster_handle clust; + + assert("edward-1133", inode->i_size < new_size); + assert("edward-1134", reiser4_schedulable()); + assert("edward-1135", cryptcompress_inode_ok(inode)); + assert("edward-1136", current_blocksize == PAGE_SIZE); + + hint = kmalloc(sizeof(*hint), reiser4_ctx_gfp_mask_get()); + if (hint == NULL) + return RETERR(-ENOMEM); + hint_init_zero(hint); + lh = &hint->lh; + + reiser4_slide_init(&win); + cluster_init_read(&clust, &win); + clust.hint = hint; + + if (off_to_cloff(inode->i_size, inode) == 0) + goto append_hole; + /* + * It can happen that + * a part of the hole will be converted + * to zeros. If so, it should be submitted + */ + result = alloc_cluster_pgset(&clust, cluster_nrpages(inode)); + if (result) + goto out; + hole_size = new_size - inode->i_size; + nr_zeroes = inode_cluster_size(inode) - + off_to_cloff(inode->i_size, inode); + if (nr_zeroes > hole_size) + nr_zeroes = hole_size; + + set_window(&clust, &win, inode, inode->i_size, + inode->i_size + nr_zeroes); + win.stat = HOLE_WINDOW; + + assert("edward-1137", + clust.index == off_to_clust(inode->i_size, inode)); + + result = prepare_logical_cluster(inode, 0, 0, &clust, LC_EXPAND); + if (result) + goto out; + assert("edward-1139", + clust.dstat == PREP_DISK_CLUSTER || + clust.dstat == UNPR_DISK_CLUSTER || + clust.dstat == FAKE_DISK_CLUSTER); + + assert("edward-1431", hole_size >= nr_zeroes); + + append_hole: + INODE_SET_SIZE(inode, new_size); + out: + done_lh(lh); + kfree(hint); + put_cluster_handle(&clust); + return result; +} + +static int update_size_actor(struct inode *inode, + loff_t new_size, int update_sd) +{ + if (new_size & ((loff_t) (inode_cluster_size(inode)) - 1)) + /* + * cut not at logical cluster boundary, + * size will be updated by write_hole() + */ + return 0; + else + return reiser4_update_file_size(inode, new_size, update_sd); +} + +static int prune_cryptcompress(struct inode *inode, + loff_t new_size, int update_sd) +{ + int result = 0; + unsigned nr_zeros; + loff_t to_prune; + loff_t old_size; + cloff_t from_idx; + cloff_t to_idx; + + hint_t *hint; + lock_handle *lh; + struct reiser4_slide win; + struct cluster_handle clust; + + assert("edward-1140", inode->i_size >= new_size); + assert("edward-1141", reiser4_schedulable()); + assert("edward-1142", cryptcompress_inode_ok(inode)); + assert("edward-1143", current_blocksize == PAGE_SIZE); + + old_size = inode->i_size; + + hint = kmalloc(sizeof(*hint), reiser4_ctx_gfp_mask_get()); + if (hint == NULL) + return RETERR(-ENOMEM); + hint_init_zero(hint); + lh = &hint->lh; + + reiser4_slide_init(&win); + cluster_init_read(&clust, &win); + clust.hint = hint; + + /* + * index of the leftmost logical cluster + * that will be completely truncated + */ + from_idx = size_in_lc(new_size, inode); + to_idx = size_in_lc(inode->i_size, inode); + /* + * truncate all complete disk clusters starting from @from_idx + */ + assert("edward-1174", from_idx <= to_idx); + + old_size = inode->i_size; + if (from_idx != to_idx) { + struct cryptcompress_info *info; + info = cryptcompress_inode_data(inode); + + result = cut_file_items(inode, + clust_to_off(from_idx, inode), + update_sd, + clust_to_off(to_idx, inode), + update_size_actor); + info->trunc_index = ULONG_MAX; + if (unlikely(result == CBK_COORD_NOTFOUND)) + result = 0; + if (unlikely(result)) + goto out; + } + if (off_to_cloff(new_size, inode) == 0) + goto truncate_hole; + + assert("edward-1146", new_size < inode->i_size); + + to_prune = inode->i_size - new_size; + /* + * Partial truncate of the last logical cluster. + * Partial hole will be converted to zeros. The resulted + * logical cluster will be captured and submitted to disk + */ + result = alloc_cluster_pgset(&clust, cluster_nrpages(inode)); + if (result) + goto out; + + nr_zeros = off_to_pgoff(new_size); + if (nr_zeros) + nr_zeros = PAGE_SIZE - nr_zeros; + + set_window(&clust, &win, inode, new_size, new_size + nr_zeros); + win.stat = HOLE_WINDOW; + + assert("edward-1149", clust.index == from_idx - 1); + + result = prepare_logical_cluster(inode, 0, 0, &clust, LC_SHRINK); + if (result) + goto out; + assert("edward-1151", + clust.dstat == PREP_DISK_CLUSTER || + clust.dstat == UNPR_DISK_CLUSTER || + clust.dstat == FAKE_DISK_CLUSTER); + truncate_hole: + /* + * drop all the pages that don't have jnodes (i.e. pages + * which can not be truncated by cut_file_items() because + * of holes represented by fake disk clusters) including + * the pages of partially truncated cluster which was + * released by prepare_logical_cluster() + */ + INODE_SET_SIZE(inode, new_size); + truncate_inode_pages(inode->i_mapping, new_size); + out: + assert("edward-1497", + pages_truncate_ok(inode, size_in_pages(new_size))); + + done_lh(lh); + kfree(hint); + put_cluster_handle(&clust); + return result; +} + +/** + * Capture a pager cluster. + * @clust must be set up by a caller. + */ +static int capture_page_cluster(struct cluster_handle * clust, + struct inode * inode) +{ + int result; + + assert("edward-1073", clust != NULL); + assert("edward-1074", inode != NULL); + assert("edward-1075", clust->dstat == INVAL_DISK_CLUSTER); + + result = prepare_logical_cluster(inode, 0, 0, clust, LC_APPOV); + if (result) + return result; + + set_cluster_pages_dirty(clust, inode); + result = checkin_logical_cluster(clust, inode); + put_hint_cluster(clust, inode, ZNODE_WRITE_LOCK); + if (unlikely(result)) + put_page_cluster(clust, inode, WRITE_OP); + return result; +} + +/* Starting from @index find tagged pages of the same page cluster. + * Clear the tag for each of them. Return number of found pages. + */ +static int find_anon_page_cluster(struct address_space * mapping, + pgoff_t * index, struct page ** pages) +{ + int i = 0; + int found; + spin_lock_irq(&mapping->tree_lock); + do { + /* looking for one page */ + found = radix_tree_gang_lookup_tag(&mapping->page_tree, + (void **)&pages[i], + *index, 1, + PAGECACHE_TAG_REISER4_MOVED); + if (!found) + break; + if (!same_page_cluster(pages[0], pages[i])) + break; + + /* found */ + get_page(pages[i]); + *index = pages[i]->index + 1; + + radix_tree_tag_clear(&mapping->page_tree, + pages[i]->index, + PAGECACHE_TAG_REISER4_MOVED); + if (last_page_in_cluster(pages[i++])) + break; + } while (1); + spin_unlock_irq(&mapping->tree_lock); + return i; +} + +#define MAX_PAGES_TO_CAPTURE (1024) + +/* Capture anonymous page clusters */ +static int capture_anon_pages(struct address_space * mapping, pgoff_t * index, + int to_capture) +{ + int count = 0; + int found = 0; + int result = 0; + hint_t *hint; + lock_handle *lh; + struct inode * inode; + struct cluster_handle clust; + struct page * pages[MAX_CLUSTER_NRPAGES]; + + assert("edward-1127", mapping != NULL); + assert("edward-1128", mapping->host != NULL); + assert("edward-1440", mapping->host->i_mapping == mapping); + + inode = mapping->host; + hint = kmalloc(sizeof(*hint), reiser4_ctx_gfp_mask_get()); + if (hint == NULL) + return RETERR(-ENOMEM); + hint_init_zero(hint); + lh = &hint->lh; + + cluster_init_read(&clust, NULL /* no sliding window */); + clust.hint = hint; + + result = alloc_cluster_pgset(&clust, cluster_nrpages(inode)); + if (result) + goto out; + + while (to_capture > 0) { + found = find_anon_page_cluster(mapping, index, pages); + if (!found) { + *index = (pgoff_t) - 1; + break; + } + move_cluster_forward(&clust, inode, pages[0]->index); + result = capture_page_cluster(&clust, inode); + + put_found_pages(pages, found); /* find_anon_page_cluster */ + if (result) + break; + to_capture -= clust.nr_pages; + count += clust.nr_pages; + } + if (result) { + warning("edward-1077", + "Capture failed (inode %llu, result=%i, captured=%d)\n", + (unsigned long long)get_inode_oid(inode), result, count); + } else { + assert("edward-1078", ergo(found > 0, count > 0)); + if (to_capture <= 0) + /* there may be left more pages */ + __mark_inode_dirty(inode, I_DIRTY_PAGES); + result = count; + } + out: + done_lh(lh); + kfree(hint); + put_cluster_handle(&clust); + return result; +} + +/* Returns true if inode's mapping has dirty pages + which do not belong to any atom */ +static int cryptcompress_inode_has_anon_pages(struct inode *inode) +{ + int result; + spin_lock_irq(&inode->i_mapping->tree_lock); + result = radix_tree_tagged(&inode->i_mapping->page_tree, + PAGECACHE_TAG_REISER4_MOVED); + spin_unlock_irq(&inode->i_mapping->tree_lock); + return result; +} + +/* plugin->writepages */ +int writepages_cryptcompress(struct address_space *mapping, + struct writeback_control *wbc) +{ + int result = 0; + long to_capture; + pgoff_t nrpages; + pgoff_t index = 0; + struct inode *inode; + struct cryptcompress_info *info; + + inode = mapping->host; + if (!cryptcompress_inode_has_anon_pages(inode)) + goto end; + info = cryptcompress_inode_data(inode); + nrpages = size_in_pages(i_size_read(inode)); + + if (wbc->sync_mode != WB_SYNC_ALL) + to_capture = min(wbc->nr_to_write, (long)MAX_PAGES_TO_CAPTURE); + else + to_capture = MAX_PAGES_TO_CAPTURE; + do { + reiser4_context *ctx; + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) { + result = PTR_ERR(ctx); + break; + } + /* avoid recursive calls to ->sync_inodes */ + ctx->nobalance = 1; + + assert("edward-1079", + lock_stack_isclean(get_current_lock_stack())); + + reiser4_txn_restart_current(); + + if (get_current_context()->entd) { + if (mutex_trylock(&info->checkin_mutex) == 0) { + /* the mutex might be occupied by + entd caller */ + result = RETERR(-EBUSY); + reiser4_exit_context(ctx); + break; + } + } else + mutex_lock(&info->checkin_mutex); + + result = capture_anon_pages(inode->i_mapping, &index, + to_capture); + mutex_unlock(&info->checkin_mutex); + + if (result < 0) { + reiser4_exit_context(ctx); + break; + } + wbc->nr_to_write -= result; + if (wbc->sync_mode != WB_SYNC_ALL) { + reiser4_exit_context(ctx); + break; + } + result = txnmgr_force_commit_all(inode->i_sb, 0); + reiser4_exit_context(ctx); + } while (result >= 0 && index < nrpages); + + end: + if (is_in_reiser4_context()) { + if (get_current_context()->nr_captured >= CAPTURE_APAGE_BURST) { + /* there are already pages to flush, flush them out, + do not delay until end of reiser4_sync_inodes */ + reiser4_writeout(inode->i_sb, wbc); + get_current_context()->nr_captured = 0; + } + } + return result; +} + +/* plugin->ioctl */ +int ioctl_cryptcompress(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + return RETERR(-ENOTTY); +} + +/* plugin->mmap */ +int mmap_cryptcompress(struct file *file, struct vm_area_struct *vma) +{ + int result; + struct inode *inode; + reiser4_context *ctx; + + inode = file_inode(file); + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + /* + * generic_file_mmap will do update_atime. Grab space for stat data + * update. + */ + result = reiser4_grab_space_force + (inode_file_plugin(inode)->estimate.update(inode), + BA_CAN_COMMIT); + if (result) { + reiser4_exit_context(ctx); + return result; + } + result = generic_file_mmap(file, vma); + reiser4_exit_context(ctx); + return result; +} + +/* plugin->delete_object */ +int delete_object_cryptcompress(struct inode *inode) +{ + int result; + struct cryptcompress_info * info; + + assert("edward-429", inode->i_nlink == 0); + + reiser4_txn_restart_current(); + info = cryptcompress_inode_data(inode); + + mutex_lock(&info->checkin_mutex); + result = prune_cryptcompress(inode, 0, 0); + mutex_unlock(&info->checkin_mutex); + + if (result) { + warning("edward-430", + "cannot truncate cryptcompress file %lli: %i", + (unsigned long long)get_inode_oid(inode), + result); + } + /* and remove stat data */ + return reiser4_delete_object_common(inode); +} + +/* + * plugin->setattr + * This implements actual truncate (see comments in reiser4/page_cache.c) + */ +int setattr_cryptcompress(struct dentry *dentry, struct iattr *attr) +{ + int result; + struct inode *inode; + struct cryptcompress_info * info; + + inode = dentry->d_inode; + info = cryptcompress_inode_data(inode); + + if (attr->ia_valid & ATTR_SIZE) { + if (i_size_read(inode) != attr->ia_size) { + reiser4_context *ctx; + loff_t old_size; + + ctx = reiser4_init_context(dentry->d_inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + result = setattr_dispatch_hook(inode); + if (result) { + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; + } + old_size = i_size_read(inode); + inode_check_scale(inode, old_size, attr->ia_size); + + mutex_lock(&info->checkin_mutex); + if (attr->ia_size > inode->i_size) + result = expand_cryptcompress(inode, + attr->ia_size); + else + result = prune_cryptcompress(inode, + attr->ia_size, + 1/* update sd */); + mutex_unlock(&info->checkin_mutex); + if (result) { + warning("edward-1192", + "truncate_cryptcompress failed: oid %lli, " + "old size %lld, new size %lld, retval %d", + (unsigned long long) + get_inode_oid(inode), old_size, + attr->ia_size, result); + } + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + } else + result = 0; + } else + result = reiser4_setattr_common(dentry, attr); + return result; +} + +/* plugin->release */ +int release_cryptcompress(struct inode *inode, struct file *file) +{ + reiser4_context *ctx = reiser4_init_context(inode->i_sb); + + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + reiser4_free_file_fsdata(file); + reiser4_exit_context(ctx); + return 0; +} + +/* plugin->write_begin() */ +int write_begin_cryptcompress(struct file *file, struct page *page, + loff_t pos, unsigned len, void **fsdata) +{ + int ret = -ENOMEM; + char *buf; + hint_t *hint; + struct inode *inode; + struct reiser4_slide *win; + struct cluster_handle *clust; + struct cryptcompress_info *info; + reiser4_context *ctx; + + ctx = get_current_context(); + inode = page->mapping->host; + info = cryptcompress_inode_data(inode); + + assert("edward-1564", PageLocked(page)); + buf = kmalloc(sizeof(*clust) + + sizeof(*win) + + sizeof(*hint), + reiser4_ctx_gfp_mask_get()); + if (!buf) + goto err2; + clust = (struct cluster_handle *)buf; + win = (struct reiser4_slide *)(buf + sizeof(*clust)); + hint = (hint_t *)(buf + sizeof(*clust) + sizeof(*win)); + + hint_init_zero(hint); + cluster_init_read(clust, NULL); + clust->hint = hint; + + mutex_lock(&info->checkin_mutex); + + ret = set_window_and_cluster(inode, clust, win, len, pos); + if (ret) + goto err1; + unlock_page(page); + ret = prepare_logical_cluster(inode, pos, len, clust, LC_APPOV); + done_lh(&hint->lh); + assert("edward-1565", lock_stack_isclean(get_current_lock_stack())); + lock_page(page); + if (ret) { + SetPageError(page); + ClearPageUptodate(page); + goto err0; + } + /* + * Success. All resources (including checkin_mutex) + * will be released in ->write_end() + */ + ctx->locked_page = page; + *fsdata = (void *)buf; + + return 0; + err0: + put_cluster_handle(clust); + err1: + mutex_unlock(&info->checkin_mutex); + kfree(buf); + err2: + assert("edward-1568", !ret); + return ret; +} + +/* plugin->write_end() */ +int write_end_cryptcompress(struct file *file, struct page *page, + loff_t pos, unsigned copied, void *fsdata) +{ + int ret; + hint_t *hint; + struct inode *inode; + struct cluster_handle *clust; + struct cryptcompress_info *info; + reiser4_context *ctx; + + assert("edward-1566", + lock_stack_isclean(get_current_lock_stack())); + ctx = get_current_context(); + inode = page->mapping->host; + info = cryptcompress_inode_data(inode); + clust = (struct cluster_handle *)fsdata; + hint = clust->hint; + + unlock_page(page); + ctx->locked_page = NULL; + set_cluster_pages_dirty(clust, inode); + ret = checkin_logical_cluster(clust, inode); + if (ret) { + SetPageError(page); + goto exit; + } + exit: + mutex_unlock(&info->checkin_mutex); + + put_cluster_handle(clust); + + if (pos + copied > inode->i_size) { + /* + * i_size has been updated in + * checkin_logical_cluster + */ + ret = reiser4_update_sd(inode); + if (unlikely(ret != 0)) + warning("edward-1603", + "Can not update stat-data: %i. FSCK?", + ret); + } + kfree(fsdata); + return ret; +} + +/* plugin->bmap */ +sector_t bmap_cryptcompress(struct address_space *mapping, sector_t lblock) +{ + return -EINVAL; +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/file/cryptcompress.h b/fs/reiser4/plugin/file/cryptcompress.h new file mode 100644 index 000000000000..fbdd85f157a2 --- /dev/null +++ b/fs/reiser4/plugin/file/cryptcompress.h @@ -0,0 +1,619 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ +/* See http://www.namesys.com/cryptcompress_design.html */ + +#if !defined( __FS_REISER4_CRYPTCOMPRESS_H__ ) +#define __FS_REISER4_CRYPTCOMPRESS_H__ + +#include "../../page_cache.h" +#include "../compress/compress.h" +#include "../crypto/cipher.h" + +#include <linux/pagemap.h> + +#define MIN_CLUSTER_SHIFT PAGE_SHIFT +#define MAX_CLUSTER_SHIFT 16 +#define MAX_CLUSTER_NRPAGES (1U << MAX_CLUSTER_SHIFT >> PAGE_SHIFT) +#define DC_CHECKSUM_SIZE 4 + +#define MIN_LATTICE_FACTOR 1 +#define MAX_LATTICE_FACTOR 32 + +/* this mask contains all non-standard plugins that might + be present in reiser4-specific part of inode managed by + cryptcompress file plugin */ +#define cryptcompress_mask \ + ((1 << PSET_FILE) | \ + (1 << PSET_CLUSTER) | \ + (1 << PSET_CIPHER) | \ + (1 << PSET_DIGEST) | \ + (1 << PSET_COMPRESSION) | \ + (1 << PSET_COMPRESSION_MODE)) + +#if REISER4_DEBUG +static inline int cluster_shift_ok(int shift) +{ + return (shift >= MIN_CLUSTER_SHIFT) && (shift <= MAX_CLUSTER_SHIFT); +} +#endif + +#if REISER4_DEBUG +#define INODE_PGCOUNT(inode) \ +({ \ + assert("edward-1530", inode_file_plugin(inode) == \ + file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)); \ + atomic_read(&cryptcompress_inode_data(inode)->pgcount); \ + }) +#define INODE_PGCOUNT_INC(inode) \ +do { \ + assert("edward-1531", inode_file_plugin(inode) == \ + file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)); \ + atomic_inc(&cryptcompress_inode_data(inode)->pgcount); \ +} while (0) +#define INODE_PGCOUNT_DEC(inode) \ +do { \ + if (inode_file_plugin(inode) == \ + file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)) \ + atomic_dec(&cryptcompress_inode_data(inode)->pgcount); \ +} while (0) +#else +#define INODE_PGCOUNT(inode) (0) +#define INODE_PGCOUNT_INC(inode) +#define INODE_PGCOUNT_DEC(inode) +#endif /* REISER4_DEBUG */ + +struct tfm_stream { + __u8 *data; + size_t size; +}; + +typedef enum { + INPUT_STREAM, + OUTPUT_STREAM, + LAST_STREAM +} tfm_stream_id; + +typedef struct tfm_stream * tfm_unit[LAST_STREAM]; + +static inline __u8 *ts_data(struct tfm_stream * stm) +{ + assert("edward-928", stm != NULL); + return stm->data; +} + +static inline size_t ts_size(struct tfm_stream * stm) +{ + assert("edward-929", stm != NULL); + return stm->size; +} + +static inline void set_ts_size(struct tfm_stream * stm, size_t size) +{ + assert("edward-930", stm != NULL); + + stm->size = size; +} + +static inline int alloc_ts(struct tfm_stream ** stm) +{ + assert("edward-931", stm); + assert("edward-932", *stm == NULL); + + *stm = kzalloc(sizeof(**stm), reiser4_ctx_gfp_mask_get()); + if (!*stm) + return -ENOMEM; + return 0; +} + +static inline void free_ts(struct tfm_stream * stm) +{ + assert("edward-933", !ts_data(stm)); + assert("edward-934", !ts_size(stm)); + + kfree(stm); +} + +static inline int alloc_ts_data(struct tfm_stream * stm, size_t size) +{ + assert("edward-935", !ts_data(stm)); + assert("edward-936", !ts_size(stm)); + assert("edward-937", size != 0); + + stm->data = reiser4_vmalloc(size); + if (!stm->data) + return -ENOMEM; + set_ts_size(stm, size); + return 0; +} + +static inline void free_ts_data(struct tfm_stream * stm) +{ + assert("edward-938", equi(ts_data(stm), ts_size(stm))); + + if (ts_data(stm)) + vfree(ts_data(stm)); + memset(stm, 0, sizeof *stm); +} + +/* Write modes for item conversion in flush convert phase */ +typedef enum { + CTAIL_INVAL_CONVERT_MODE = 0, + CTAIL_APPEND_ITEM = 1, + CTAIL_OVERWRITE_ITEM = 2, + CTAIL_CUT_ITEM = 3 +} ctail_convert_mode_t; + +typedef enum { + LC_INVAL = 0, /* invalid value */ + LC_APPOV = 1, /* append and/or overwrite */ + LC_EXPAND = 2, /* expanding truncate */ + LC_SHRINK = 3 /* shrinking truncate */ +} logical_cluster_op; + +/* Transform cluster. + * Intermediate state between page cluster and disk cluster + * Is used for data transform (compression/encryption) + */ +struct tfm_cluster { + coa_set coa; /* compression algorithms info */ + tfm_unit tun; /* plain and transformed streams */ + tfm_action act; + int uptodate; + int lsize; /* number of bytes in logical cluster */ + int len; /* length of the transform stream */ + unsigned int hole:1; /* should punch hole */ +}; + +static inline coa_t get_coa(struct tfm_cluster * tc, reiser4_compression_id id, + tfm_action act) +{ + return tc->coa[id][act]; +} + +static inline void set_coa(struct tfm_cluster * tc, reiser4_compression_id id, + tfm_action act, coa_t coa) +{ + tc->coa[id][act] = coa; +} + +static inline int alloc_coa(struct tfm_cluster * tc, compression_plugin * cplug) +{ + coa_t coa; + + coa = cplug->alloc(tc->act); + if (IS_ERR(coa)) + return PTR_ERR(coa); + set_coa(tc, cplug->h.id, tc->act, coa); + return 0; +} + +static inline int +grab_coa(struct tfm_cluster * tc, compression_plugin * cplug) +{ + return (cplug->alloc && !get_coa(tc, cplug->h.id, tc->act) ? + alloc_coa(tc, cplug) : 0); +} + +static inline void free_coa_set(struct tfm_cluster * tc) +{ + tfm_action j; + reiser4_compression_id i; + compression_plugin *cplug; + + assert("edward-810", tc != NULL); + + for (j = 0; j < TFMA_LAST; j++) + for (i = 0; i < LAST_COMPRESSION_ID; i++) { + if (!get_coa(tc, i, j)) + continue; + cplug = compression_plugin_by_id(i); + assert("edward-812", cplug->free != NULL); + cplug->free(get_coa(tc, i, j), j); + set_coa(tc, i, j, 0); + } + return; +} + +static inline struct tfm_stream * get_tfm_stream(struct tfm_cluster * tc, + tfm_stream_id id) +{ + return tc->tun[id]; +} + +static inline void set_tfm_stream(struct tfm_cluster * tc, + tfm_stream_id id, struct tfm_stream * ts) +{ + tc->tun[id] = ts; +} + +static inline __u8 *tfm_stream_data(struct tfm_cluster * tc, tfm_stream_id id) +{ + return ts_data(get_tfm_stream(tc, id)); +} + +static inline void set_tfm_stream_data(struct tfm_cluster * tc, + tfm_stream_id id, __u8 * data) +{ + get_tfm_stream(tc, id)->data = data; +} + +static inline size_t tfm_stream_size(struct tfm_cluster * tc, tfm_stream_id id) +{ + return ts_size(get_tfm_stream(tc, id)); +} + +static inline void +set_tfm_stream_size(struct tfm_cluster * tc, tfm_stream_id id, size_t size) +{ + get_tfm_stream(tc, id)->size = size; +} + +static inline int +alloc_tfm_stream(struct tfm_cluster * tc, size_t size, tfm_stream_id id) +{ + assert("edward-939", tc != NULL); + assert("edward-940", !get_tfm_stream(tc, id)); + + tc->tun[id] = kzalloc(sizeof(struct tfm_stream), + reiser4_ctx_gfp_mask_get()); + if (!tc->tun[id]) + return -ENOMEM; + return alloc_ts_data(get_tfm_stream(tc, id), size); +} + +static inline int +realloc_tfm_stream(struct tfm_cluster * tc, size_t size, tfm_stream_id id) +{ + assert("edward-941", tfm_stream_size(tc, id) < size); + free_ts_data(get_tfm_stream(tc, id)); + return alloc_ts_data(get_tfm_stream(tc, id), size); +} + +static inline void free_tfm_stream(struct tfm_cluster * tc, tfm_stream_id id) +{ + free_ts_data(get_tfm_stream(tc, id)); + free_ts(get_tfm_stream(tc, id)); + set_tfm_stream(tc, id, 0); +} + +static inline unsigned coa_overrun(compression_plugin * cplug, int ilen) +{ + return (cplug->overrun != NULL ? cplug->overrun(ilen) : 0); +} + +static inline void free_tfm_unit(struct tfm_cluster * tc) +{ + tfm_stream_id id; + for (id = 0; id < LAST_STREAM; id++) { + if (!get_tfm_stream(tc, id)) + continue; + free_tfm_stream(tc, id); + } +} + +static inline void put_tfm_cluster(struct tfm_cluster * tc) +{ + assert("edward-942", tc != NULL); + free_coa_set(tc); + free_tfm_unit(tc); +} + +static inline int tfm_cluster_is_uptodate(struct tfm_cluster * tc) +{ + assert("edward-943", tc != NULL); + assert("edward-944", tc->uptodate == 0 || tc->uptodate == 1); + return (tc->uptodate == 1); +} + +static inline void tfm_cluster_set_uptodate(struct tfm_cluster * tc) +{ + assert("edward-945", tc != NULL); + assert("edward-946", tc->uptodate == 0 || tc->uptodate == 1); + tc->uptodate = 1; + return; +} + +static inline void tfm_cluster_clr_uptodate(struct tfm_cluster * tc) +{ + assert("edward-947", tc != NULL); + assert("edward-948", tc->uptodate == 0 || tc->uptodate == 1); + tc->uptodate = 0; + return; +} + +static inline int tfm_stream_is_set(struct tfm_cluster * tc, tfm_stream_id id) +{ + return (get_tfm_stream(tc, id) && + tfm_stream_data(tc, id) && tfm_stream_size(tc, id)); +} + +static inline int tfm_cluster_is_set(struct tfm_cluster * tc) +{ + int i; + for (i = 0; i < LAST_STREAM; i++) + if (!tfm_stream_is_set(tc, i)) + return 0; + return 1; +} + +static inline void alternate_streams(struct tfm_cluster * tc) +{ + struct tfm_stream *tmp = get_tfm_stream(tc, INPUT_STREAM); + + set_tfm_stream(tc, INPUT_STREAM, get_tfm_stream(tc, OUTPUT_STREAM)); + set_tfm_stream(tc, OUTPUT_STREAM, tmp); +} + +/* Set of states to indicate a kind of data + * that will be written to the window */ +typedef enum { + DATA_WINDOW, /* user's data */ + HOLE_WINDOW /* zeroes (such kind of data can be written + * if we start to write from offset > i_size) */ +} window_stat; + +/* Window (of logical cluster size) discretely sliding along a file. + * Is used to locate hole region in a logical cluster to be properly + * represented on disk. + * We split a write to cryptcompress file into writes to its logical + * clusters. Before writing to a logical cluster we set a window, i.e. + * calculate values of the following fields: + */ +struct reiser4_slide { + unsigned off; /* offset to write from */ + unsigned count; /* number of bytes to write */ + unsigned delta; /* number of bytes to append to the hole */ + window_stat stat; /* what kind of data will be written starting + from @off */ +}; + +/* Possible states of a disk cluster */ +typedef enum { + INVAL_DISK_CLUSTER, /* unknown state */ + PREP_DISK_CLUSTER, /* disk cluster got converted by flush + * at least 1 time */ + UNPR_DISK_CLUSTER, /* disk cluster just created and should be + * converted by flush */ + FAKE_DISK_CLUSTER, /* disk cluster doesn't exist neither in memory + * nor on disk */ + TRNC_DISK_CLUSTER /* disk cluster is partially truncated */ +} disk_cluster_stat; + +/* The following structure represents various stages of the same logical + * cluster of index @index: + * . fixed slide + * . page cluster (stage in primary cache) + * . transform cluster (transition stage) + * . disk cluster (stage in secondary cache) + * This structure is used in transition and synchronizing operations, e.g. + * transform cluster is a transition state when synchronizing page cluster + * and disk cluster. + * FIXME: Encapsulate page cluster, disk cluster. + */ +struct cluster_handle { + cloff_t index; /* offset in a file (unit is a cluster size) */ + int index_valid; /* for validating the index above, if needed */ + struct file *file; /* host file */ + + /* logical cluster */ + struct reiser4_slide *win; /* sliding window to locate holes */ + logical_cluster_op op; /* logical cluster operation (truncate or + append/overwrite) */ + /* transform cluster */ + struct tfm_cluster tc; /* contains all needed info to synchronize + page cluster and disk cluster) */ + /* page cluster */ + int nr_pages; /* number of pages of current checkin action */ + int old_nrpages; /* number of pages of last checkin action */ + struct page **pages; /* attached pages */ + jnode * node; /* jnode for capture */ + + /* disk cluster */ + hint_t *hint; /* current position in the tree */ + disk_cluster_stat dstat; /* state of the current disk cluster */ + int reserved; /* is space for disk cluster reserved */ +#if REISER4_DEBUG + reiser4_context *ctx; + int reserved_prepped; + int reserved_unprepped; +#endif + +}; + +static inline __u8 * tfm_input_data (struct cluster_handle * clust) +{ + return tfm_stream_data(&clust->tc, INPUT_STREAM); +} + +static inline __u8 * tfm_output_data (struct cluster_handle * clust) +{ + return tfm_stream_data(&clust->tc, OUTPUT_STREAM); +} + +static inline int reset_cluster_pgset(struct cluster_handle * clust, + int nrpages) +{ + assert("edward-1057", clust->pages != NULL); + memset(clust->pages, 0, sizeof(*clust->pages) * nrpages); + return 0; +} + +static inline int alloc_cluster_pgset(struct cluster_handle * clust, + int nrpages) +{ + assert("edward-949", clust != NULL); + assert("edward-1362", clust->pages == NULL); + assert("edward-950", nrpages != 0 && nrpages <= MAX_CLUSTER_NRPAGES); + + clust->pages = kzalloc(sizeof(*clust->pages) * nrpages, + reiser4_ctx_gfp_mask_get()); + if (!clust->pages) + return RETERR(-ENOMEM); + return 0; +} + +static inline void move_cluster_pgset(struct cluster_handle *clust, + struct page ***pages, int * nr_pages) +{ + assert("edward-1545", clust != NULL && clust->pages != NULL); + assert("edward-1546", pages != NULL && *pages == NULL); + *pages = clust->pages; + *nr_pages = clust->nr_pages; + clust->pages = NULL; +} + +static inline void free_cluster_pgset(struct cluster_handle * clust) +{ + assert("edward-951", clust->pages != NULL); + kfree(clust->pages); + clust->pages = NULL; +} + +static inline void put_cluster_handle(struct cluster_handle * clust) +{ + assert("edward-435", clust != NULL); + + put_tfm_cluster(&clust->tc); + if (clust->pages) + free_cluster_pgset(clust); + memset(clust, 0, sizeof *clust); +} + +static inline void inc_keyload_count(struct reiser4_crypto_info * data) +{ + assert("edward-1410", data != NULL); + data->keyload_count++; +} + +static inline void dec_keyload_count(struct reiser4_crypto_info * data) +{ + assert("edward-1411", data != NULL); + assert("edward-1412", data->keyload_count > 0); + data->keyload_count--; +} + +static inline int capture_cluster_jnode(jnode * node) +{ + return reiser4_try_capture(node, ZNODE_WRITE_LOCK, 0); +} + +/* cryptcompress specific part of reiser4_inode */ +struct cryptcompress_info { + struct mutex checkin_mutex; /* This is to serialize + * checkin_logical_cluster operations */ + cloff_t trunc_index; /* Index of the leftmost truncated disk + * cluster (to resolve races with read) */ + struct reiser4_crypto_info *crypt; + /* + * the following 2 fields are controlled by compression mode plugin + */ + int compress_toggle; /* Current status of compressibility */ + int lattice_factor; /* Factor of dynamic lattice. FIXME: Have + * a compression_toggle to keep the factor + */ +#if REISER4_DEBUG + atomic_t pgcount; /* number of grabbed pages */ +#endif +}; + +static inline void set_compression_toggle (struct cryptcompress_info * info, int val) +{ + info->compress_toggle = val; +} + +static inline int get_compression_toggle (struct cryptcompress_info * info) +{ + return info->compress_toggle; +} + +static inline int compression_is_on(struct cryptcompress_info * info) +{ + return get_compression_toggle(info) == 1; +} + +static inline void turn_on_compression(struct cryptcompress_info * info) +{ + set_compression_toggle(info, 1); +} + +static inline void turn_off_compression(struct cryptcompress_info * info) +{ + set_compression_toggle(info, 0); +} + +static inline void set_lattice_factor(struct cryptcompress_info * info, int val) +{ + info->lattice_factor = val; +} + +static inline int get_lattice_factor(struct cryptcompress_info * info) +{ + return info->lattice_factor; +} + +struct cryptcompress_info *cryptcompress_inode_data(const struct inode *); +int equal_to_rdk(znode *, const reiser4_key *); +int goto_right_neighbor(coord_t *, lock_handle *); +int cryptcompress_inode_ok(struct inode *inode); +int coord_is_unprepped_ctail(const coord_t * coord); +extern int do_readpage_ctail(struct inode *, struct cluster_handle *, + struct page * page, znode_lock_mode mode); +extern int ctail_insert_unprepped_cluster(struct cluster_handle * clust, + struct inode * inode); +extern int readpages_cryptcompress(struct file*, struct address_space*, + struct list_head*, unsigned); +int bind_cryptcompress(struct inode *child, struct inode *parent); +void destroy_inode_cryptcompress(struct inode * inode); +int grab_page_cluster(struct inode *inode, struct cluster_handle * clust, + rw_op rw); +int write_dispatch_hook(struct file *file, struct inode * inode, + loff_t pos, struct cluster_handle * clust, + struct dispatch_context * cont); +int setattr_dispatch_hook(struct inode * inode); +struct reiser4_crypto_info * inode_crypto_info(struct inode * inode); +void inherit_crypto_info_common(struct inode * parent, struct inode * object, + int (*can_inherit)(struct inode * child, + struct inode * parent)); +void reiser4_attach_crypto_info(struct inode * inode, + struct reiser4_crypto_info * info); +void change_crypto_info(struct inode * inode, struct reiser4_crypto_info * new); +struct reiser4_crypto_info * reiser4_alloc_crypto_info (struct inode * inode); + +static inline struct crypto_blkcipher * info_get_cipher(struct reiser4_crypto_info * info) +{ + return info->cipher; +} + +static inline void info_set_cipher(struct reiser4_crypto_info * info, + struct crypto_blkcipher * tfm) +{ + info->cipher = tfm; +} + +static inline struct crypto_hash * info_get_digest(struct reiser4_crypto_info * info) +{ + return info->digest; +} + +static inline void info_set_digest(struct reiser4_crypto_info * info, + struct crypto_hash * tfm) +{ + info->digest = tfm; +} + +static inline void put_cluster_page(struct page * page) +{ + put_page(page); +} + +#endif /* __FS_REISER4_CRYPTCOMPRESS_H__ */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/file/file.c b/fs/reiser4/plugin/file/file.c new file mode 100644 index 000000000000..9da0744cc750 --- /dev/null +++ b/fs/reiser4/plugin/file/file.c @@ -0,0 +1,2796 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* + * this file contains implementations of inode/file/address_space/file plugin + * operations specific for "unix file plugin" (plugin id is + * UNIX_FILE_PLUGIN_ID). "Unix file" is either built of tail items only + * (FORMATTING_ID) or of extent items only (EXTENT_POINTER_ID) or empty (have + * no items but stat data) + */ + +#include "../../inode.h" +#include "../../super.h" +#include "../../tree_walk.h" +#include "../../carry.h" +#include "../../page_cache.h" +#include "../../ioctl.h" +#include "../object.h" +#include "../cluster.h" +#include "../../safe_link.h" + +#include <linux/writeback.h> +#include <linux/pagevec.h> +#include <linux/syscalls.h> + + +static int unpack(struct file *file, struct inode *inode, int forever); +static void drop_access(struct unix_file_info *); +static int hint_validate(hint_t * hint, const reiser4_key * key, int check_key, + znode_lock_mode lock_mode); + +/* Get exclusive access and make sure that file is not partially + * converted (It may happen that another process is doing tail + * conversion. If so, wait until it completes) + */ +static inline void get_exclusive_access_careful(struct unix_file_info * uf_info, + struct inode *inode) +{ + do { + get_exclusive_access(uf_info); + if (!reiser4_inode_get_flag(inode, REISER4_PART_IN_CONV)) + break; + drop_exclusive_access(uf_info); + schedule(); + } while (1); +} + +/* get unix file plugin specific portion of inode */ +struct unix_file_info *unix_file_inode_data(const struct inode *inode) +{ + return &reiser4_inode_data(inode)->file_plugin_data.unix_file_info; +} + +/** + * equal_to_rdk - compare key and znode's right delimiting key + * @node: node whose right delimiting key to compare with @key + * @key: key to compare with @node's right delimiting key + * + * Returns true if @key is equal to right delimiting key of @node. + */ +int equal_to_rdk(znode *node, const reiser4_key *key) +{ + int result; + + read_lock_dk(znode_get_tree(node)); + result = keyeq(key, znode_get_rd_key(node)); + read_unlock_dk(znode_get_tree(node)); + return result; +} + +#if REISER4_DEBUG + +/** + * equal_to_ldk - compare key and znode's left delimiting key + * @node: node whose left delimiting key to compare with @key + * @key: key to compare with @node's left delimiting key + * + * Returns true if @key is equal to left delimiting key of @node. + */ +int equal_to_ldk(znode *node, const reiser4_key *key) +{ + int result; + + read_lock_dk(znode_get_tree(node)); + result = keyeq(key, znode_get_ld_key(node)); + read_unlock_dk(znode_get_tree(node)); + return result; +} + +/** + * check_coord - check whether coord corresponds to key + * @coord: coord to check + * @key: key @coord has to correspond to + * + * Returns true if @coord is set as if it was set as result of lookup with @key + * in coord->node. + */ +static int check_coord(const coord_t *coord, const reiser4_key *key) +{ + coord_t twin; + + node_plugin_by_node(coord->node)->lookup(coord->node, key, + FIND_MAX_NOT_MORE_THAN, &twin); + return coords_equal(coord, &twin); +} + +#endif /* REISER4_DEBUG */ + +/** + * init_uf_coord - initialize extended coord + * @uf_coord: + * @lh: + * + * + */ +void init_uf_coord(uf_coord_t *uf_coord, lock_handle *lh) +{ + coord_init_zero(&uf_coord->coord); + coord_clear_iplug(&uf_coord->coord); + uf_coord->lh = lh; + init_lh(lh); + memset(&uf_coord->extension, 0, sizeof(uf_coord->extension)); + uf_coord->valid = 0; +} + +static void validate_extended_coord(uf_coord_t *uf_coord, loff_t offset) +{ + assert("vs-1333", uf_coord->valid == 0); + + if (coord_is_between_items(&uf_coord->coord)) + return; + + assert("vs-1348", + item_plugin_by_coord(&uf_coord->coord)->s.file. + init_coord_extension); + + item_body_by_coord(&uf_coord->coord); + item_plugin_by_coord(&uf_coord->coord)->s.file. + init_coord_extension(uf_coord, offset); +} + +/** + * goto_right_neighbor - lock right neighbor, drop current node lock + * @coord: + * @lh: + * + * Obtain lock on right neighbor and drop lock on current node. + */ +int goto_right_neighbor(coord_t *coord, lock_handle *lh) +{ + int result; + lock_handle lh_right; + + assert("vs-1100", znode_is_locked(coord->node)); + + init_lh(&lh_right); + result = reiser4_get_right_neighbor(&lh_right, coord->node, + znode_is_wlocked(coord->node) ? + ZNODE_WRITE_LOCK : ZNODE_READ_LOCK, + GN_CAN_USE_UPPER_LEVELS); + if (result) { + done_lh(&lh_right); + return result; + } + + /* + * we hold two longterm locks on neighboring nodes. Unlock left of + * them + */ + done_lh(lh); + + coord_init_first_unit_nocheck(coord, lh_right.node); + move_lh(lh, &lh_right); + + return 0; + +} + +/** + * set_file_state + * @uf_info: + * @cbk_result: + * @level: + * + * This is to be used by find_file_item and in find_file_state to + * determine real state of file + */ +static void set_file_state(struct unix_file_info *uf_info, int cbk_result, + tree_level level) +{ + if (cbk_errored(cbk_result)) + /* error happened in find_file_item */ + return; + + assert("vs-1164", level == LEAF_LEVEL || level == TWIG_LEVEL); + + if (uf_info->container == UF_CONTAINER_UNKNOWN) { + if (cbk_result == CBK_COORD_NOTFOUND) + uf_info->container = UF_CONTAINER_EMPTY; + else if (level == LEAF_LEVEL) + uf_info->container = UF_CONTAINER_TAILS; + else + uf_info->container = UF_CONTAINER_EXTENTS; + } else { + /* + * file state is known, check whether it is set correctly if + * file is not being tail converted + */ + if (!reiser4_inode_get_flag(unix_file_info_to_inode(uf_info), + REISER4_PART_IN_CONV)) { + assert("vs-1162", + ergo(level == LEAF_LEVEL && + cbk_result == CBK_COORD_FOUND, + uf_info->container == UF_CONTAINER_TAILS)); + assert("vs-1165", + ergo(level == TWIG_LEVEL && + cbk_result == CBK_COORD_FOUND, + uf_info->container == UF_CONTAINER_EXTENTS)); + } + } +} + +int find_file_item_nohint(coord_t *coord, lock_handle *lh, + const reiser4_key *key, znode_lock_mode lock_mode, + struct inode *inode) +{ + return reiser4_object_lookup(inode, key, coord, lh, lock_mode, + FIND_MAX_NOT_MORE_THAN, + TWIG_LEVEL, LEAF_LEVEL, + (lock_mode == ZNODE_READ_LOCK) ? CBK_UNIQUE : + (CBK_UNIQUE | CBK_FOR_INSERT), + NULL /* ra_info */ ); +} + +/** + * find_file_item - look for file item in the tree + * @hint: provides coordinate, lock handle, seal + * @key: key for search + * @mode: mode of lock to put on returned node + * @ra_info: + * @inode: + * + * This finds position in the tree corresponding to @key. It first tries to use + * @hint's seal if it is set. + */ +int find_file_item(hint_t *hint, const reiser4_key *key, + znode_lock_mode lock_mode, + struct inode *inode) +{ + int result; + coord_t *coord; + lock_handle *lh; + + assert("nikita-3030", reiser4_schedulable()); + assert("vs-1707", hint != NULL); + assert("vs-47", inode != NULL); + + coord = &hint->ext_coord.coord; + lh = hint->ext_coord.lh; + init_lh(lh); + + result = hint_validate(hint, key, 1 /* check key */, lock_mode); + if (!result) { + if (coord->between == AFTER_UNIT && + equal_to_rdk(coord->node, key)) { + result = goto_right_neighbor(coord, lh); + if (result == -E_NO_NEIGHBOR) + return RETERR(-EIO); + if (result) + return result; + assert("vs-1152", equal_to_ldk(coord->node, key)); + /* + * we moved to different node. Invalidate coord + * extension, zload is necessary to init it again + */ + hint->ext_coord.valid = 0; + } + + set_file_state(unix_file_inode_data(inode), CBK_COORD_FOUND, + znode_get_level(coord->node)); + + return CBK_COORD_FOUND; + } + + coord_init_zero(coord); + result = find_file_item_nohint(coord, lh, key, lock_mode, inode); + set_file_state(unix_file_inode_data(inode), result, + znode_get_level(coord->node)); + + /* FIXME: we might already have coord extension initialized */ + hint->ext_coord.valid = 0; + return result; +} + +void hint_init_zero(hint_t * hint) +{ + memset(hint, 0, sizeof(*hint)); + init_lh(&hint->lh); + hint->ext_coord.lh = &hint->lh; +} + +static int find_file_state(struct inode *inode, struct unix_file_info *uf_info) +{ + int result; + reiser4_key key; + coord_t coord; + lock_handle lh; + + assert("vs-1628", ea_obtained(uf_info)); + + if (uf_info->container == UF_CONTAINER_UNKNOWN) { + key_by_inode_and_offset_common(inode, 0, &key); + init_lh(&lh); + result = find_file_item_nohint(&coord, &lh, &key, + ZNODE_READ_LOCK, inode); + set_file_state(uf_info, result, znode_get_level(coord.node)); + done_lh(&lh); + if (!cbk_errored(result)) + result = 0; + } else + result = 0; + assert("vs-1074", + ergo(result == 0, uf_info->container != UF_CONTAINER_UNKNOWN)); + reiser4_txn_restart_current(); + return result; +} + +/** + * Estimate and reserve space needed to truncate page + * which gets partially truncated: one block for page + * itself, stat-data update (estimate_one_insert_into_item) + * and one item insertion (estimate_one_insert_into_item) + * which may happen if page corresponds to hole extent and + * unallocated one will have to be created + */ +static int reserve_partial_page(reiser4_tree * tree) +{ + grab_space_enable(); + return reiser4_grab_reserved(reiser4_get_current_sb(), + 1 + + 2 * estimate_one_insert_into_item(tree), + BA_CAN_COMMIT); +} + +/* estimate and reserve space needed to cut one item and update one stat data */ +static int reserve_cut_iteration(reiser4_tree * tree) +{ + __u64 estimate = estimate_one_item_removal(tree) + + estimate_one_insert_into_item(tree); + + assert("nikita-3172", lock_stack_isclean(get_current_lock_stack())); + + grab_space_enable(); + /* We need to double our estimate now that we can delete more than one + node. */ + return reiser4_grab_reserved(reiser4_get_current_sb(), estimate * 2, + BA_CAN_COMMIT); +} + +int reiser4_update_file_size(struct inode *inode, loff_t new_size, + int update_sd) +{ + int result = 0; + + INODE_SET_SIZE(inode, new_size); + if (update_sd) { + inode->i_ctime = inode->i_mtime = current_time(inode); + result = reiser4_update_sd(inode); + } + return result; +} + +/** + * Cut file items one by one starting from the last one until + * new file size (inode->i_size) is reached. Reserve space + * and update file stat data on every single cut from the tree + */ +int cut_file_items(struct inode *inode, loff_t new_size, + int update_sd, loff_t cur_size, + int (*update_actor) (struct inode *, loff_t, int)) +{ + reiser4_key from_key, to_key; + reiser4_key smallest_removed; + file_plugin *fplug = inode_file_plugin(inode); + int result; + int progress = 0; + + assert("vs-1248", + fplug == file_plugin_by_id(UNIX_FILE_PLUGIN_ID) || + fplug == file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)); + + fplug->key_by_inode(inode, new_size, &from_key); + to_key = from_key; + set_key_offset(&to_key, cur_size - 1 /*get_key_offset(reiser4_max_key()) */ ); + /* this loop normally runs just once */ + while (1) { + result = reserve_cut_iteration(reiser4_tree_by_inode(inode)); + if (result) + break; + + result = reiser4_cut_tree_object(current_tree, &from_key, &to_key, + &smallest_removed, inode, 1, + &progress); + if (result == -E_REPEAT) { + /** + * -E_REPEAT is a signal to interrupt a long + * file truncation process + */ + if (progress) { + result = update_actor(inode, + get_key_offset(&smallest_removed), + update_sd); + if (result) + break; + } + /* the below does up(sbinfo->delete_mutex). + * Do not get folled */ + reiser4_release_reserved(inode->i_sb); + /** + * reiser4_cut_tree_object() was interrupted probably + * because current atom requires commit, we have to + * release transaction handle to allow atom commit. + */ + reiser4_txn_restart_current(); + continue; + } + if (result + && !(result == CBK_COORD_NOTFOUND && new_size == 0 + && inode->i_size == 0)) + break; + + set_key_offset(&smallest_removed, new_size); + /* Final sd update after the file gets its correct size */ + result = update_actor(inode, get_key_offset(&smallest_removed), + update_sd); + break; + } + + /* the below does up(sbinfo->delete_mutex). Do not get folled */ + reiser4_release_reserved(inode->i_sb); + + return result; +} + +int find_or_create_extent(struct page *page); + +/* part of truncate_file_body: it is called when truncate is used to make file + shorter */ +static int shorten_file(struct inode *inode, loff_t new_size) +{ + int result; + struct page *page; + int padd_from; + unsigned long index; + struct unix_file_info *uf_info; + + /* + * all items of ordinary reiser4 file are grouped together. That is why + * we can use reiser4_cut_tree. Plan B files (for instance) can not be + * truncated that simply + */ + result = cut_file_items(inode, new_size, 1 /*update_sd */ , + get_key_offset(reiser4_max_key()), + reiser4_update_file_size); + if (result) + return result; + + uf_info = unix_file_inode_data(inode); + assert("vs-1105", new_size == inode->i_size); + if (new_size == 0) { + uf_info->container = UF_CONTAINER_EMPTY; + return 0; + } + + result = find_file_state(inode, uf_info); + if (result) + return result; + if (uf_info->container == UF_CONTAINER_TAILS) + /* + * No need to worry about zeroing last page after new file + * end + */ + return 0; + + padd_from = inode->i_size & (PAGE_SIZE - 1); + if (!padd_from) + /* file is truncated to page boundary */ + return 0; + + result = reserve_partial_page(reiser4_tree_by_inode(inode)); + if (result) { + reiser4_release_reserved(inode->i_sb); + return result; + } + + /* last page is partially truncated - zero its content */ + index = (inode->i_size >> PAGE_SHIFT); + page = read_mapping_page(inode->i_mapping, index, NULL); + if (IS_ERR(page)) { + /* + * the below does up(sbinfo->delete_mutex). Do not get + * confused + */ + reiser4_release_reserved(inode->i_sb); + if (likely(PTR_ERR(page) == -EINVAL)) { + /* looks like file is built of tail items */ + return 0; + } + return PTR_ERR(page); + } + wait_on_page_locked(page); + if (!PageUptodate(page)) { + put_page(page); + /* + * the below does up(sbinfo->delete_mutex). Do not get + * confused + */ + reiser4_release_reserved(inode->i_sb); + return RETERR(-EIO); + } + + /* + * if page correspons to hole extent unit - unallocated one will be + * created here. This is not necessary + */ + result = find_or_create_extent(page); + + /* + * FIXME: cut_file_items has already updated inode. Probably it would + * be better to update it here when file is really truncated + */ + if (result) { + put_page(page); + /* + * the below does up(sbinfo->delete_mutex). Do not get + * confused + */ + reiser4_release_reserved(inode->i_sb); + return result; + } + + lock_page(page); + assert("vs-1066", PageLocked(page)); + zero_user_segment(page, padd_from, PAGE_SIZE); + unlock_page(page); + put_page(page); + /* the below does up(sbinfo->delete_mutex). Do not get confused */ + reiser4_release_reserved(inode->i_sb); + return 0; +} + +/** + * should_have_notail + * @uf_info: + * @new_size: + * + * Calls formatting plugin to see whether file of size @new_size has to be + * stored in unformatted nodes or in tail items. 0 is returned for later case. + */ +static int should_have_notail(const struct unix_file_info *uf_info, loff_t new_size) +{ + if (!uf_info->tplug) + return 1; + return !uf_info->tplug->have_tail(unix_file_info_to_inode(uf_info), + new_size); + +} + +/** + * truncate_file_body - change length of file + * @inode: inode of file + * @new_size: new file length + * + * Adjusts items file @inode is built of to match @new_size. It may either cut + * items or add them to represent a hole at the end of file. The caller has to + * obtain exclusive access to the file. + */ +static int truncate_file_body(struct inode *inode, struct iattr *attr) +{ + int result; + loff_t new_size = attr->ia_size; + + if (inode->i_size < new_size) { + /* expanding truncate */ + struct unix_file_info *uf_info = unix_file_inode_data(inode); + + result = find_file_state(inode, uf_info); + if (result) + return result; + + if (should_have_notail(uf_info, new_size)) { + /* + * file of size @new_size has to be built of + * extents. If it is built of tails - convert to + * extents + */ + if (uf_info->container == UF_CONTAINER_TAILS) { + /* + * if file is being convered by another process + * - wait until it completes + */ + while (1) { + if (reiser4_inode_get_flag(inode, + REISER4_PART_IN_CONV)) { + drop_exclusive_access(uf_info); + schedule(); + get_exclusive_access(uf_info); + continue; + } + break; + } + + if (uf_info->container == UF_CONTAINER_TAILS) { + result = tail2extent(uf_info); + if (result) + return result; + } + } + result = reiser4_write_extent(NULL, inode, NULL, + 0, &new_size); + if (result) + return result; + uf_info->container = UF_CONTAINER_EXTENTS; + } else { + if (uf_info->container == UF_CONTAINER_EXTENTS) { + result = reiser4_write_extent(NULL, inode, NULL, + 0, &new_size); + if (result) + return result; + } else { + result = reiser4_write_tail(NULL, inode, NULL, + 0, &new_size); + if (result) + return result; + uf_info->container = UF_CONTAINER_TAILS; + } + } + BUG_ON(result > 0); + result = reiser4_update_file_size(inode, new_size, 1); + BUG_ON(result != 0); + } else + result = shorten_file(inode, new_size); + return result; +} + +/** + * load_file_hint - copy hint from struct file to local variable + * @file: file to get hint from + * @hint: structure to fill + * + * Reiser4 specific portion of struct file may contain information (hint) + * stored on exiting from previous read or write. That information includes + * seal of znode and coord within that znode where previous read or write + * stopped. This function copies that information to @hint if it was stored or + * initializes @hint by 0s otherwise. + */ +int load_file_hint(struct file *file, hint_t *hint) +{ + reiser4_file_fsdata *fsdata; + + if (file) { + fsdata = reiser4_get_file_fsdata(file); + if (IS_ERR(fsdata)) + return PTR_ERR(fsdata); + + spin_lock_inode(file_inode(file)); + if (reiser4_seal_is_set(&fsdata->reg.hint.seal)) { + memcpy(hint, &fsdata->reg.hint, sizeof(*hint)); + init_lh(&hint->lh); + hint->ext_coord.lh = &hint->lh; + spin_unlock_inode(file_inode(file)); + /* + * force re-validation of the coord on the first + * iteration of the read/write loop. + */ + hint->ext_coord.valid = 0; + assert("nikita-19892", + coords_equal(&hint->seal.coord1, + &hint->ext_coord.coord)); + return 0; + } + memset(&fsdata->reg.hint, 0, sizeof(hint_t)); + spin_unlock_inode(file_inode(file)); + } + hint_init_zero(hint); + return 0; +} + +/** + * save_file_hint - copy hint to reiser4 private struct file's part + * @file: file to save hint in + * @hint: hint to save + * + * This copies @hint to reiser4 private part of struct file. It can help + * speedup future accesses to the file. + */ +void save_file_hint(struct file *file, const hint_t *hint) +{ + reiser4_file_fsdata *fsdata; + + assert("edward-1337", hint != NULL); + + if (!file || !reiser4_seal_is_set(&hint->seal)) + return; + fsdata = reiser4_get_file_fsdata(file); + assert("vs-965", !IS_ERR(fsdata)); + assert("nikita-19891", + coords_equal(&hint->seal.coord1, &hint->ext_coord.coord)); + assert("vs-30", hint->lh.owner == NULL); + spin_lock_inode(file_inode(file)); + fsdata->reg.hint = *hint; + spin_unlock_inode(file_inode(file)); + return; +} + +void reiser4_unset_hint(hint_t * hint) +{ + assert("vs-1315", hint); + hint->ext_coord.valid = 0; + reiser4_seal_done(&hint->seal); + done_lh(&hint->lh); +} + +/* coord must be set properly. So, that reiser4_set_hint + has nothing to do */ +void reiser4_set_hint(hint_t * hint, const reiser4_key * key, + znode_lock_mode mode) +{ + ON_DEBUG(coord_t * coord = &hint->ext_coord.coord); + assert("vs-1207", WITH_DATA(coord->node, check_coord(coord, key))); + + reiser4_seal_init(&hint->seal, &hint->ext_coord.coord, key); + hint->offset = get_key_offset(key); + hint->mode = mode; + done_lh(&hint->lh); +} + +int hint_is_set(const hint_t * hint) +{ + return reiser4_seal_is_set(&hint->seal); +} + +#if REISER4_DEBUG +static int all_but_offset_key_eq(const reiser4_key * k1, const reiser4_key * k2) +{ + return (get_key_locality(k1) == get_key_locality(k2) && + get_key_type(k1) == get_key_type(k2) && + get_key_band(k1) == get_key_band(k2) && + get_key_ordering(k1) == get_key_ordering(k2) && + get_key_objectid(k1) == get_key_objectid(k2)); +} +#endif + +static int +hint_validate(hint_t * hint, const reiser4_key * key, int check_key, + znode_lock_mode lock_mode) +{ + if (!hint || !hint_is_set(hint) || hint->mode != lock_mode) + /* hint either not set or set by different operation */ + return RETERR(-E_REPEAT); + + assert("vs-1277", all_but_offset_key_eq(key, &hint->seal.key)); + + if (check_key && get_key_offset(key) != hint->offset) + /* hint is set for different key */ + return RETERR(-E_REPEAT); + + assert("vs-31", hint->ext_coord.lh == &hint->lh); + return reiser4_seal_validate(&hint->seal, &hint->ext_coord.coord, key, + hint->ext_coord.lh, lock_mode, + ZNODE_LOCK_LOPRI); +} + +/** + * Look for place at twig level for extent corresponding to page, + * call extent's writepage method to create unallocated extent if + * it does not exist yet, initialize jnode, capture page + */ +int find_or_create_extent(struct page *page) +{ + int result; + struct inode *inode; + int plugged_hole; + + jnode *node; + + assert("vs-1065", page->mapping && page->mapping->host); + inode = page->mapping->host; + + lock_page(page); + node = jnode_of_page(page); + if (IS_ERR(node)) { + unlock_page(page); + return PTR_ERR(node); + } + JF_SET(node, JNODE_WRITE_PREPARED); + unlock_page(page); + + if (node->blocknr == 0) { + plugged_hole = 0; + result = reiser4_update_extent(inode, node, page_offset(page), + &plugged_hole); + if (result) { + JF_CLR(node, JNODE_WRITE_PREPARED); + jput(node); + warning("edward-1549", + "reiser4_update_extent failed: %d", result); + return result; + } + if (plugged_hole) + reiser4_update_sd(inode); + } else { + spin_lock_jnode(node); + result = reiser4_try_capture(node, ZNODE_WRITE_LOCK, 0); + BUG_ON(result != 0); + jnode_make_dirty_locked(node); + spin_unlock_jnode(node); + } + + BUG_ON(node->atom == NULL); + JF_CLR(node, JNODE_WRITE_PREPARED); + + if (get_current_context()->entd) { + entd_context *ent = get_entd_context(node->tree->super); + + if (ent->cur_request->page == page) + /* the following reference will be + dropped in reiser4_writeout */ + ent->cur_request->node = jref(node); + } + jput(node); + return 0; +} + +/** + * has_anonymous_pages - check whether inode has pages dirtied via mmap + * @inode: inode to check + * + * Returns true if inode's mapping has dirty pages which do not belong to any + * atom. Those are either tagged PAGECACHE_TAG_REISER4_MOVED in mapping's page + * tree or were eflushed and can be found via jnodes tagged + * EFLUSH_TAG_ANONYMOUS in radix tree of jnodes. + */ +static int has_anonymous_pages(struct inode *inode) +{ + int result; + + spin_lock_irq(&inode->i_mapping->tree_lock); + result = radix_tree_tagged(&inode->i_mapping->page_tree, PAGECACHE_TAG_REISER4_MOVED); + spin_unlock_irq(&inode->i_mapping->tree_lock); + return result; +} + +/** + * capture_page_and_create_extent - + * @page: page to be captured + * + * Grabs space for extent creation and stat data update and calls function to + * do actual work. + * Exclusive, or non-exclusive lock must be held. + */ +static int capture_page_and_create_extent(struct page *page) +{ + int result; + struct inode *inode; + + assert("vs-1084", page->mapping && page->mapping->host); + inode = page->mapping->host; + assert("vs-1139", + unix_file_inode_data(inode)->container == UF_CONTAINER_EXTENTS); + /* page belongs to file */ + assert("vs-1393", + inode->i_size > page_offset(page)); + + /* page capture may require extent creation (if it does not exist yet) + and stat data's update (number of blocks changes on extent + creation) */ + grab_space_enable(); + result = reiser4_grab_space(2 * estimate_one_insert_into_item + (reiser4_tree_by_inode(inode)), + BA_CAN_COMMIT); + if (likely(!result)) + result = find_or_create_extent(page); + + if (result != 0) + SetPageError(page); + return result; +} + +/* + * Support for "anonymous" pages and jnodes. + * + * When file is write-accessed through mmap pages can be dirtied from the user + * level. In this case kernel is not notified until one of following happens: + * + * (1) msync() + * + * (2) truncate() (either explicit or through unlink) + * + * (3) VM scanner starts reclaiming mapped pages, dirtying them before + * starting write-back. + * + * As a result of (3) ->writepage may be called on a dirty page without + * jnode. Such page is called "anonymous" in reiser4. Certain work-loads + * (iozone) generate huge number of anonymous pages. + * + * reiser4_sync_sb() method tries to insert anonymous pages into + * tree. This is done by capture_anonymous_*() functions below. + */ + +/** + * capture_anonymous_page - involve page into transaction + * @pg: page to deal with + * + * Takes care that @page has corresponding metadata in the tree, creates jnode + * for @page and captures it. On success 1 is returned. + */ +static int capture_anonymous_page(struct page *page) +{ + int result; + + if (PageWriteback(page)) + /* FIXME: do nothing? */ + return 0; + + result = capture_page_and_create_extent(page); + if (result == 0) { + result = 1; + } else + warning("nikita-3329", + "Cannot capture anon page: %i", result); + + return result; +} + +/** + * capture_anonymous_pages - find and capture pages dirtied via mmap + * @mapping: address space where to look for pages + * @index: start index + * @to_capture: maximum number of pages to capture + * + * Looks for pages tagged REISER4_MOVED starting from the *@index-th page, + * captures (involves into atom) them, returns number of captured pages, + * updates @index to next page after the last captured one. + */ +static int +capture_anonymous_pages(struct address_space *mapping, pgoff_t *index, + unsigned int to_capture) +{ + int result; + struct pagevec pvec; + unsigned int i, count; + int nr; + + pagevec_init(&pvec, 0); + count = min(pagevec_space(&pvec), to_capture); + nr = 0; + + /* find pages tagged MOVED */ + spin_lock_irq(&mapping->tree_lock); + pvec.nr = radix_tree_gang_lookup_tag(&mapping->page_tree, + (void **)pvec.pages, *index, count, + PAGECACHE_TAG_REISER4_MOVED); + if (pagevec_count(&pvec) == 0) { + /* + * there are no pages tagged MOVED in mapping->page_tree + * starting from *index + */ + spin_unlock_irq(&mapping->tree_lock); + *index = (pgoff_t)-1; + return 0; + } + + /* clear MOVED tag for all found pages */ + for (i = 0; i < pagevec_count(&pvec); i++) { + get_page(pvec.pages[i]); + radix_tree_tag_clear(&mapping->page_tree, pvec.pages[i]->index, + PAGECACHE_TAG_REISER4_MOVED); + } + spin_unlock_irq(&mapping->tree_lock); + + + *index = pvec.pages[i - 1]->index + 1; + + for (i = 0; i < pagevec_count(&pvec); i++) { + result = capture_anonymous_page(pvec.pages[i]); + if (result == 1) + nr++; + else { + if (result < 0) { + warning("vs-1454", + "failed to capture page: " + "result=%d, captured=%d)\n", + result, i); + + /* + * set MOVED tag to all pages which left not + * captured + */ + spin_lock_irq(&mapping->tree_lock); + for (; i < pagevec_count(&pvec); i ++) { + radix_tree_tag_set(&mapping->page_tree, + pvec.pages[i]->index, + PAGECACHE_TAG_REISER4_MOVED); + } + spin_unlock_irq(&mapping->tree_lock); + + pagevec_release(&pvec); + return result; + } else { + /* + * result == 0. capture_anonymous_page returns + * 0 for Writeback-ed page. Set MOVED tag on + * that page + */ + spin_lock_irq(&mapping->tree_lock); + radix_tree_tag_set(&mapping->page_tree, + pvec.pages[i]->index, + PAGECACHE_TAG_REISER4_MOVED); + spin_unlock_irq(&mapping->tree_lock); + if (i == 0) + *index = pvec.pages[0]->index; + else + *index = pvec.pages[i - 1]->index + 1; + } + } + } + pagevec_release(&pvec); + return nr; +} + +/** + * capture_anonymous_jnodes - find and capture anonymous jnodes + * @mapping: address space where to look for jnodes + * @from: start index + * @to: end index + * @to_capture: maximum number of jnodes to capture + * + * Looks for jnodes tagged EFLUSH_TAG_ANONYMOUS in inode's tree of jnodes in + * the range of indexes @from-@to and captures them, returns number of captured + * jnodes, updates @from to next jnode after the last captured one. + */ +static int +capture_anonymous_jnodes(struct address_space *mapping, + pgoff_t *from, pgoff_t to, int to_capture) +{ + *from = to; + return 0; +} + +/* + * Commit atom of the jnode of a page. + */ +static int sync_page(struct page *page) +{ + int result; + do { + jnode *node; + txn_atom *atom; + + lock_page(page); + node = jprivate(page); + if (node != NULL) { + spin_lock_jnode(node); + atom = jnode_get_atom(node); + spin_unlock_jnode(node); + } else + atom = NULL; + unlock_page(page); + result = reiser4_sync_atom(atom); + } while (result == -E_REPEAT); + /* + * ZAM-FIXME-HANS: document the logic of this loop, is it just to + * handle the case where more pages get added to the atom while we are + * syncing it? + */ + assert("nikita-3485", ergo(result == 0, + get_current_context()->trans->atom == NULL)); + return result; +} + +/* + * Commit atoms of pages on @pages list. + * call sync_page for each page from mapping's page tree + */ +static int sync_page_list(struct inode *inode) +{ + int result; + struct address_space *mapping; + unsigned long from; /* start index for radix_tree_gang_lookup */ + unsigned int found; /* return value for radix_tree_gang_lookup */ + + mapping = inode->i_mapping; + from = 0; + result = 0; + spin_lock_irq(&mapping->tree_lock); + while (result == 0) { + struct page *page; + + found = + radix_tree_gang_lookup(&mapping->page_tree, (void **)&page, + from, 1); + assert("edward-1550", found < 2); + if (found == 0) + break; + /** + * page may not leave radix tree because it is protected from + * truncating by inode->i_mutex locked by sys_fsync + */ + get_page(page); + spin_unlock_irq(&mapping->tree_lock); + + from = page->index + 1; + + result = sync_page(page); + + put_page(page); + spin_lock_irq(&mapping->tree_lock); + } + + spin_unlock_irq(&mapping->tree_lock); + return result; +} + +static int commit_file_atoms(struct inode *inode) +{ + int result; + struct unix_file_info *uf_info; + + uf_info = unix_file_inode_data(inode); + + get_exclusive_access(uf_info); + /* + * find what items file is made from + */ + result = find_file_state(inode, uf_info); + drop_exclusive_access(uf_info); + if (result != 0) + return result; + + /* + * file state cannot change because we are under ->i_mutex + */ + switch (uf_info->container) { + case UF_CONTAINER_EXTENTS: + /* find_file_state might open join an atom */ + reiser4_txn_restart_current(); + result = + /* + * when we are called by + * filemap_fdatawrite-> + * do_writepages()-> + * reiser4_writepages_dispatch() + * + * inode->i_mapping->dirty_pages are spices into + * ->io_pages, leaving ->dirty_pages dirty. + * + * When we are called from + * reiser4_fsync()->sync_unix_file(), we have to + * commit atoms of all pages on the ->dirty_list. + * + * So for simplicity we just commit ->io_pages and + * ->dirty_pages. + */ + sync_page_list(inode); + break; + case UF_CONTAINER_TAILS: + /* + * NOTE-NIKITA probably we can be smarter for tails. For now + * just commit all existing atoms. + */ + result = txnmgr_force_commit_all(inode->i_sb, 0); + break; + case UF_CONTAINER_EMPTY: + result = 0; + break; + case UF_CONTAINER_UNKNOWN: + default: + result = -EIO; + break; + } + + /* + * commit current transaction: there can be captured nodes from + * find_file_state() and finish_conversion(). + */ + reiser4_txn_restart_current(); + return result; +} + +/** + * writepages_unix_file - writepages of struct address_space_operations + * @mapping: + * @wbc: + * + * This captures anonymous pages and anonymous jnodes. Anonymous pages are + * pages which are dirtied via mmapping. Anonymous jnodes are ones which were + * created by reiser4_writepage. + */ +int writepages_unix_file(struct address_space *mapping, + struct writeback_control *wbc) +{ + int result; + struct unix_file_info *uf_info; + pgoff_t pindex, jindex, nr_pages; + long to_capture; + struct inode *inode; + + inode = mapping->host; + if (!has_anonymous_pages(inode)) { + result = 0; + goto end; + } + jindex = pindex = wbc->range_start >> PAGE_SHIFT; + result = 0; + nr_pages = size_in_pages(i_size_read(inode)); + + uf_info = unix_file_inode_data(inode); + + do { + reiser4_context *ctx; + + if (wbc->sync_mode != WB_SYNC_ALL) + to_capture = min(wbc->nr_to_write, CAPTURE_APAGE_BURST); + else + to_capture = CAPTURE_APAGE_BURST; + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) { + result = PTR_ERR(ctx); + break; + } + /* avoid recursive calls to ->sync_inodes */ + ctx->nobalance = 1; + assert("zam-760", lock_stack_isclean(get_current_lock_stack())); + assert("edward-1551", LOCK_CNT_NIL(inode_sem_w)); + assert("edward-1552", LOCK_CNT_NIL(inode_sem_r)); + + reiser4_txn_restart_current(); + + /* we have to get nonexclusive access to the file */ + if (get_current_context()->entd) { + /* + * use nonblocking version of nonexclusive_access to + * avoid deadlock which might look like the following: + * process P1 holds NEA on file F1 and called entd to + * reclaim some memory. Entd works for P1 and is going + * to capture pages of file F2. To do that entd has to + * get NEA to F2. F2 is held by process P2 which also + * called entd. But entd is serving P1 at the moment + * and P2 has to wait. Process P3 trying to get EA to + * file F2. Existence of pending EA request to file F2 + * makes impossible for entd to get NEA to file + * F2. Neither of these process can continue. Using + * nonblocking version of gettign NEA is supposed to + * avoid this deadlock. + */ + if (try_to_get_nonexclusive_access(uf_info) == 0) { + result = RETERR(-EBUSY); + reiser4_exit_context(ctx); + break; + } + } else + get_nonexclusive_access(uf_info); + + while (to_capture > 0) { + pgoff_t start; + + assert("vs-1727", jindex <= pindex); + if (pindex == jindex) { + start = pindex; + result = + capture_anonymous_pages(inode->i_mapping, + &pindex, + to_capture); + if (result <= 0) + break; + to_capture -= result; + wbc->nr_to_write -= result; + if (start + result == pindex) { + jindex = pindex; + continue; + } + if (to_capture <= 0) + break; + } + /* deal with anonymous jnodes between jindex and pindex */ + result = + capture_anonymous_jnodes(inode->i_mapping, &jindex, + pindex, to_capture); + if (result < 0) + break; + to_capture -= result; + get_current_context()->nr_captured += result; + + if (jindex == (pgoff_t) - 1) { + assert("vs-1728", pindex == (pgoff_t) - 1); + break; + } + } + if (to_capture <= 0) + /* there may be left more pages */ + __mark_inode_dirty(inode, I_DIRTY_PAGES); + + drop_nonexclusive_access(uf_info); + if (result < 0) { + /* error happened */ + reiser4_exit_context(ctx); + return result; + } + if (wbc->sync_mode != WB_SYNC_ALL) { + reiser4_exit_context(ctx); + return 0; + } + result = commit_file_atoms(inode); + reiser4_exit_context(ctx); + if (pindex >= nr_pages && jindex == pindex) + break; + } while (1); + + end: + if (is_in_reiser4_context()) { + if (get_current_context()->nr_captured >= CAPTURE_APAGE_BURST) { + /* + * there are already pages to flush, flush them out, do + * not delay until end of reiser4_sync_inodes + */ + reiser4_writeout(inode->i_sb, wbc); + get_current_context()->nr_captured = 0; + } + } + return result; +} + +/** + * readpage_unix_file_nolock - readpage of struct address_space_operations + * @file: + * @page: + * + * Compose a key and search for item containing information about @page + * data. If item is found - its readpage method is called. + */ +int readpage_unix_file(struct file *file, struct page *page) +{ + reiser4_context *ctx; + int result; + struct inode *inode; + reiser4_key key; + item_plugin *iplug; + hint_t *hint; + lock_handle *lh; + coord_t *coord; + + assert("vs-1062", PageLocked(page)); + assert("vs-976", !PageUptodate(page)); + assert("vs-1061", page->mapping && page->mapping->host); + + if (page->mapping->host->i_size <= page_offset(page)) { + /* page is out of file */ + zero_user(page, 0, PAGE_SIZE); + SetPageUptodate(page); + unlock_page(page); + return 0; + } + + inode = page->mapping->host; + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) { + unlock_page(page); + return PTR_ERR(ctx); + } + + hint = kmalloc(sizeof(*hint), reiser4_ctx_gfp_mask_get()); + if (hint == NULL) { + unlock_page(page); + reiser4_exit_context(ctx); + return RETERR(-ENOMEM); + } + + result = load_file_hint(file, hint); + if (result) { + kfree(hint); + unlock_page(page); + reiser4_exit_context(ctx); + return result; + } + lh = &hint->lh; + + /* get key of first byte of the page */ + key_by_inode_and_offset_common(inode, page_offset(page), &key); + + /* look for file metadata corresponding to first byte of page */ + get_page(page); + unlock_page(page); + result = find_file_item(hint, &key, ZNODE_READ_LOCK, inode); + lock_page(page); + put_page(page); + + if (page->mapping == NULL) { + /* + * readpage allows truncate to run concurrently. Page was + * truncated while it was not locked + */ + done_lh(lh); + kfree(hint); + unlock_page(page); + reiser4_txn_restart(ctx); + reiser4_exit_context(ctx); + return -EINVAL; + } + + if (result != CBK_COORD_FOUND || hint->ext_coord.coord.between != AT_UNIT) { + if (result == CBK_COORD_FOUND && + hint->ext_coord.coord.between != AT_UNIT) + /* file is truncated */ + result = -EINVAL; + done_lh(lh); + kfree(hint); + unlock_page(page); + reiser4_txn_restart(ctx); + reiser4_exit_context(ctx); + return result; + } + + /* + * item corresponding to page is found. It can not be removed because + * znode lock is held + */ + if (PageUptodate(page)) { + done_lh(lh); + kfree(hint); + unlock_page(page); + reiser4_txn_restart(ctx); + reiser4_exit_context(ctx); + return 0; + } + + coord = &hint->ext_coord.coord; + result = zload(coord->node); + if (result) { + done_lh(lh); + kfree(hint); + unlock_page(page); + reiser4_txn_restart(ctx); + reiser4_exit_context(ctx); + return result; + } + + validate_extended_coord(&hint->ext_coord, page_offset(page)); + + if (!coord_is_existing_unit(coord)) { + /* this indicates corruption */ + warning("vs-280", + "Looking for page %lu of file %llu (size %lli). " + "No file items found (%d). File is corrupted?\n", + page->index, (unsigned long long)get_inode_oid(inode), + inode->i_size, result); + zrelse(coord->node); + done_lh(lh); + kfree(hint); + unlock_page(page); + reiser4_txn_restart(ctx); + reiser4_exit_context(ctx); + return RETERR(-EIO); + } + + /* + * get plugin of found item or use plugin if extent if there are no + * one + */ + iplug = item_plugin_by_coord(coord); + if (iplug->s.file.readpage) + result = iplug->s.file.readpage(coord, page); + else + result = RETERR(-EINVAL); + + if (!result) { + set_key_offset(&key, + (loff_t) (page->index + 1) << PAGE_SHIFT); + /* FIXME should call reiser4_set_hint() */ + reiser4_unset_hint(hint); + } else { + unlock_page(page); + reiser4_unset_hint(hint); + } + assert("vs-979", + ergo(result == 0, (PageLocked(page) || PageUptodate(page)))); + assert("vs-9791", ergo(result != 0, !PageLocked(page))); + + zrelse(coord->node); + done_lh(lh); + + save_file_hint(file, hint); + kfree(hint); + + /* + * FIXME: explain why it is needed. HINT: page allocation in write can + * not be done when atom is not NULL because reiser4_writepage can not + * kick entd and have to eflush + */ + reiser4_txn_restart(ctx); + reiser4_exit_context(ctx); + return result; +} + +struct uf_readpages_context { + lock_handle lh; + coord_t coord; +}; + +/* + * A callback function for readpages_unix_file/read_cache_pages. + * We don't take non-exclusive access. If an item different from + * extent pointer is found in some iteration, then return error + * (-EINVAL). + * + * @data -- a pointer to reiser4_readpages_context object, + * to save the twig lock and the coord between + * read_cache_page iterations. + * @page -- page to start read. + */ +static int readpages_filler(void * data, struct page * page) +{ + struct uf_readpages_context *rc = data; + jnode * node; + int ret = 0; + reiser4_extent *ext; + __u64 ext_index; + int cbk_done = 0; + struct address_space *mapping = page->mapping; + + if (PageUptodate(page)) { + unlock_page(page); + return 0; + } + get_page(page); + + if (rc->lh.node == 0) { + /* no twig lock - have to do tree search. */ + reiser4_key key; + repeat: + unlock_page(page); + key_by_inode_and_offset_common( + mapping->host, page_offset(page), &key); + ret = coord_by_key( + &get_super_private(mapping->host->i_sb)->tree, + &key, &rc->coord, &rc->lh, + ZNODE_READ_LOCK, FIND_EXACT, + TWIG_LEVEL, TWIG_LEVEL, CBK_UNIQUE, NULL); + if (unlikely(ret)) + goto exit; + lock_page(page); + if (PageUptodate(page)) + goto unlock; + cbk_done = 1; + } + ret = zload(rc->coord.node); + if (unlikely(ret)) + goto unlock; + if (!coord_is_existing_item(&rc->coord)) { + zrelse(rc->coord.node); + ret = RETERR(-ENOENT); + goto unlock; + } + if (!item_is_extent(&rc->coord)) { + /* + * ->readpages() is not + * defined for tail items + */ + zrelse(rc->coord.node); + ret = RETERR(-EINVAL); + goto unlock; + } + ext = extent_by_coord(&rc->coord); + ext_index = extent_unit_index(&rc->coord); + if (page->index < ext_index || + page->index >= ext_index + extent_get_width(ext)) { + /* the page index doesn't belong to the extent unit + which the coord points to - release the lock and + repeat with tree search. */ + zrelse(rc->coord.node); + done_lh(&rc->lh); + /* we can be here after a CBK call only in case of + corruption of the tree or the tree lookup algorithm bug. */ + if (unlikely(cbk_done)) { + ret = RETERR(-EIO); + goto unlock; + } + goto repeat; + } + node = jnode_of_page(page); + if (unlikely(IS_ERR(node))) { + zrelse(rc->coord.node); + ret = PTR_ERR(node); + goto unlock; + } + ret = reiser4_do_readpage_extent(ext, page->index - ext_index, page); + jput(node); + zrelse(rc->coord.node); + if (likely(!ret)) + goto exit; + unlock: + unlock_page(page); + exit: + put_page(page); + return ret; +} + +/** + * readpages_unix_file - called by the readahead code, starts reading for each + * page of given list of pages + */ +int readpages_unix_file(struct file *file, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages) +{ + reiser4_context *ctx; + struct uf_readpages_context rc; + int ret; + + ctx = reiser4_init_context(mapping->host->i_sb); + if (IS_ERR(ctx)) { + put_pages_list(pages); + return PTR_ERR(ctx); + } + init_lh(&rc.lh); + ret = read_cache_pages(mapping, pages, readpages_filler, &rc); + done_lh(&rc.lh); + + context_set_commit_async(ctx); + /* close the transaction to protect further page allocation from deadlocks */ + reiser4_txn_restart(ctx); + reiser4_exit_context(ctx); + return ret; +} + +static reiser4_block_nr unix_file_estimate_read(struct inode *inode, + loff_t count UNUSED_ARG) +{ + /* We should reserve one block, because of updating of the stat data + item */ + assert("vs-1249", + inode_file_plugin(inode)->estimate.update == + estimate_update_common); + return estimate_update_common(inode); +} + +/* this is called with nonexclusive access obtained, + file's container can not change */ +static ssize_t do_read_compound_file(hint_t *hint, struct file *file, + char __user *buf, size_t count, + loff_t *off) +{ + int result; + struct inode *inode; + flow_t flow; + coord_t *coord; + znode *loaded; + + inode = file_inode(file); + + /* build flow */ + assert("vs-1250", + inode_file_plugin(inode)->flow_by_inode == + flow_by_inode_unix_file); + result = flow_by_inode_unix_file(inode, buf, 1 /* user space */, + count, *off, READ_OP, &flow); + if (unlikely(result)) + return result; + + /* get seal and coord sealed with it from reiser4 private data + of struct file. The coord will tell us where our last read + of this file finished, and the seal will help to determine + if that location is still valid. + */ + coord = &hint->ext_coord.coord; + while (flow.length && result == 0) { + result = find_file_item(hint, &flow.key, + ZNODE_READ_LOCK, inode); + if (cbk_errored(result)) + /* error happened */ + break; + + if (coord->between != AT_UNIT) { + /* there were no items corresponding to given offset */ + done_lh(hint->ext_coord.lh); + break; + } + + loaded = coord->node; + result = zload(loaded); + if (unlikely(result)) { + done_lh(hint->ext_coord.lh); + break; + } + + if (hint->ext_coord.valid == 0) + validate_extended_coord(&hint->ext_coord, + get_key_offset(&flow.key)); + + assert("vs-4", hint->ext_coord.valid == 1); + assert("vs-33", hint->ext_coord.lh == &hint->lh); + /* call item's read method */ + result = item_plugin_by_coord(coord)->s.file.read(file, + &flow, + hint); + zrelse(loaded); + done_lh(hint->ext_coord.lh); + } + return (count - flow.length) ? (count - flow.length) : result; +} + +static ssize_t read_compound_file(struct file*, char __user*, size_t, loff_t*); + +/** + * unix-file specific ->read() method + * of struct file_operations. + */ +ssize_t read_unix_file(struct file *file, char __user *buf, + size_t read_amount, loff_t *off) +{ + reiser4_context *ctx; + ssize_t result; + struct inode *inode; + struct unix_file_info *uf_info; + + if (unlikely(read_amount == 0)) + return 0; + + inode = file_inode(file); + assert("vs-972", !reiser4_inode_get_flag(inode, REISER4_NO_SD)); + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + result = reiser4_grab_space_force(unix_file_estimate_read(inode, + read_amount), BA_CAN_COMMIT); + if (unlikely(result != 0)) + goto out2; + + uf_info = unix_file_inode_data(inode); + + if (uf_info->container == UF_CONTAINER_UNKNOWN) { + get_exclusive_access(uf_info); + result = find_file_state(inode, uf_info); + if (unlikely(result != 0)) + goto out; + } + else + get_nonexclusive_access(uf_info); + + switch (uf_info->container) { + case UF_CONTAINER_EXTENTS: + if (!reiser4_inode_get_flag(inode, REISER4_PART_MIXED)) { + result = new_sync_read(file, buf, read_amount, off); + break; + } + case UF_CONTAINER_TAILS: + case UF_CONTAINER_UNKNOWN: + result = read_compound_file(file, buf, read_amount, off); + break; + case UF_CONTAINER_EMPTY: + result = 0; + } + out: + drop_access(uf_info); + out2: + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; +} + +/* + * Read a file, which contains tails and, maybe, + * extents. + * + * Sometimes file can consist of items of both types + * (extents and tails). It can happen, e.g. because + * of failed tail conversion. Also the conversion code + * may release exclusive lock before calling + * balance_dirty_pages(). + * + * In this case applying a generic VFS library function + * would be suboptimal. We use our own "light-weigth" + * version below. + */ +static ssize_t read_compound_file(struct file *file, char __user *buf, + size_t count, loff_t *off) +{ + ssize_t result = 0; + struct inode *inode; + hint_t *hint; + struct unix_file_info *uf_info; + size_t to_read; + size_t was_read = 0; + loff_t i_size; + + inode = file_inode(file); + assert("vs-972", !reiser4_inode_get_flag(inode, REISER4_NO_SD)); + + i_size = i_size_read(inode); + if (*off >= i_size) + /* position to read from is past the end of file */ + goto exit; + if (*off + count > i_size) + count = i_size - *off; + + hint = kmalloc(sizeof(*hint), reiser4_ctx_gfp_mask_get()); + if (hint == NULL) + return RETERR(-ENOMEM); + + result = load_file_hint(file, hint); + if (result) { + kfree(hint); + return result; + } + uf_info = unix_file_inode_data(inode); + + /* read by page-aligned chunks */ + to_read = PAGE_SIZE - (*off & (loff_t)(PAGE_SIZE - 1)); + if (to_read > count) + to_read = count; + while (count > 0) { + reiser4_txn_restart_current(); + /* + * faultin user page + */ + result = fault_in_pages_writeable(buf, to_read); + if (result) + return RETERR(-EFAULT); + + result = do_read_compound_file(hint, file, buf, to_read, off); + if (result < 0) + break; + count -= result; + buf += result; + + /* update position in a file */ + *off += result; + /* total number of read bytes */ + was_read += result; + to_read = count; + if (to_read > PAGE_SIZE) + to_read = PAGE_SIZE; + } + done_lh(&hint->lh); + save_file_hint(file, hint); + kfree(hint); + if (was_read) + file_accessed(file); + exit: + return was_read ? was_read : result; +} + +/* This function takes care about @file's pages. First of all it checks if + filesystems readonly and if so gets out. Otherwise, it throws out all + pages of file if it was mapped for read and going to be mapped for write + and consists of tails. This is done in order to not manage few copies + of the data (first in page cache and second one in tails them selves) + for the case of mapping files consisting tails. + + Here also tail2extent conversion is performed if it is allowed and file + is going to be written or mapped for write. This functions may be called + from write_unix_file() or mmap_unix_file(). */ +static int check_pages_unix_file(struct file *file, struct inode *inode) +{ + reiser4_invalidate_pages(inode->i_mapping, 0, + (inode->i_size + PAGE_SIZE - + 1) >> PAGE_SHIFT, 0); + return unpack(file, inode, 0 /* not forever */ ); +} + +/** + * mmap_unix_file - mmap of struct file_operations + * @file: file to mmap + * @vma: + * + * This is implementation of vfs's mmap method of struct file_operations for + * unix file plugin. It converts file to extent if necessary. Sets + * reiser4_inode's flag - REISER4_HAS_MMAP. + */ +int mmap_unix_file(struct file *file, struct vm_area_struct *vma) +{ + reiser4_context *ctx; + int result; + struct inode *inode; + struct unix_file_info *uf_info; + reiser4_block_nr needed; + + inode = file_inode(file); + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + uf_info = unix_file_inode_data(inode); + + get_exclusive_access_careful(uf_info, inode); + + if (!IS_RDONLY(inode) && (vma->vm_flags & (VM_MAYWRITE | VM_SHARED))) { + /* + * we need file built of extent items. If it is still built of + * tail items we have to convert it. Find what items the file + * is built of + */ + result = find_file_state(inode, uf_info); + if (result != 0) { + drop_exclusive_access(uf_info); + reiser4_exit_context(ctx); + return result; + } + + assert("vs-1648", (uf_info->container == UF_CONTAINER_TAILS || + uf_info->container == UF_CONTAINER_EXTENTS || + uf_info->container == UF_CONTAINER_EMPTY)); + if (uf_info->container == UF_CONTAINER_TAILS) { + /* + * invalidate all pages and convert file from tails to + * extents + */ + result = check_pages_unix_file(file, inode); + if (result) { + drop_exclusive_access(uf_info); + reiser4_exit_context(ctx); + return result; + } + } + } + + /* + * generic_file_mmap will do update_atime. Grab space for stat data + * update. + */ + needed = inode_file_plugin(inode)->estimate.update(inode); + result = reiser4_grab_space_force(needed, BA_CAN_COMMIT); + if (result) { + drop_exclusive_access(uf_info); + reiser4_exit_context(ctx); + return result; + } + + result = generic_file_mmap(file, vma); + if (result == 0) { + /* mark file as having mapping. */ + reiser4_inode_set_flag(inode, REISER4_HAS_MMAP); + } + + drop_exclusive_access(uf_info); + reiser4_exit_context(ctx); + return result; +} + +/** + * find_first_item + * @inode: + * + * Finds file item which is responsible for first byte in the file. + */ +static int find_first_item(struct inode *inode) +{ + coord_t coord; + lock_handle lh; + reiser4_key key; + int result; + + coord_init_zero(&coord); + init_lh(&lh); + inode_file_plugin(inode)->key_by_inode(inode, 0, &key); + result = find_file_item_nohint(&coord, &lh, &key, ZNODE_READ_LOCK, + inode); + if (result == CBK_COORD_FOUND) { + if (coord.between == AT_UNIT) { + result = zload(coord.node); + if (result == 0) { + result = item_id_by_coord(&coord); + zrelse(coord.node); + if (result != EXTENT_POINTER_ID && + result != FORMATTING_ID) + result = RETERR(-EIO); + } + } else + result = RETERR(-EIO); + } + done_lh(&lh); + return result; +} + +/** + * open_unix_file + * @inode: + * @file: + * + * If filesystem is not readonly - complete uncompleted tail conversion if + * there was one + */ +int open_unix_file(struct inode *inode, struct file *file) +{ + int result; + reiser4_context *ctx; + struct unix_file_info *uf_info; + + if (IS_RDONLY(inode)) + return 0; + + if (!reiser4_inode_get_flag(inode, REISER4_PART_MIXED)) + return 0; + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + uf_info = unix_file_inode_data(inode); + + get_exclusive_access_careful(uf_info, inode); + + if (!reiser4_inode_get_flag(inode, REISER4_PART_MIXED)) { + /* + * other process completed the conversion + */ + drop_exclusive_access(uf_info); + reiser4_exit_context(ctx); + return 0; + } + + /* + * file left in semi converted state after unclean shutdown or another + * thread is doing conversion and dropped exclusive access which doing + * balance dirty pages. Complete the conversion + */ + result = find_first_item(inode); + if (result == EXTENT_POINTER_ID) + /* + * first item is extent, therefore there was incomplete + * tail2extent conversion. Complete it + */ + result = tail2extent(unix_file_inode_data(inode)); + else if (result == FORMATTING_ID) + /* + * first item is formatting item, therefore there was + * incomplete extent2tail conversion. Complete it + */ + result = extent2tail(file, unix_file_inode_data(inode)); + else + result = -EIO; + + assert("vs-1712", + ergo(result == 0, + (!reiser4_inode_get_flag(inode, REISER4_PART_MIXED) && + !reiser4_inode_get_flag(inode, REISER4_PART_IN_CONV)))); + drop_exclusive_access(uf_info); + reiser4_exit_context(ctx); + return result; +} + +#define NEITHER_OBTAINED 0 +#define EA_OBTAINED 1 +#define NEA_OBTAINED 2 + +static void drop_access(struct unix_file_info *uf_info) +{ + if (uf_info->exclusive_use) + drop_exclusive_access(uf_info); + else + drop_nonexclusive_access(uf_info); +} + +#define debug_wuf(format, ...) printk("%s: %d: %s: " format "\n", \ + __FILE__, __LINE__, __FUNCTION__, ## __VA_ARGS__) + +/** + * write_unix_file - private ->write() method of unix_file plugin. + * + * @file: file to write to + * @buf: address of user-space buffer + * @count: number of bytes to write + * @pos: position in file to write to + * @cont: unused argument, as we don't perform plugin conversion when being + * managed by unix_file plugin. + */ +ssize_t write_unix_file(struct file *file, + const char __user *buf, + size_t count, loff_t *pos, + struct dispatch_context *cont) +{ + int result; + reiser4_context *ctx; + struct inode *inode; + struct unix_file_info *uf_info; + ssize_t written; + int to_write = PAGE_SIZE * WRITE_GRANULARITY; + size_t left; + ssize_t (*write_op)(struct file *, struct inode *, + const char __user *, size_t, + loff_t *pos); + int ea; + int enospc = 0; /* item plugin ->write() returned ENOSPC */ + loff_t new_size; + + ctx = get_current_context(); + inode = file_inode(file); + + assert("vs-947", !reiser4_inode_get_flag(inode, REISER4_NO_SD)); + assert("vs-9471", (!reiser4_inode_get_flag(inode, REISER4_PART_MIXED))); + + result = file_remove_privs(file); + if (result) { + context_set_commit_async(ctx); + return result; + } + /* remove_suid might create a transaction */ + reiser4_txn_restart(ctx); + + uf_info = unix_file_inode_data(inode); + + written = 0; + left = count; + ea = NEITHER_OBTAINED; + enospc = 0; + + new_size = i_size_read(inode); + if (*pos + count > new_size) + new_size = *pos + count; + + while (left) { + int update_sd = 0; + if (left < to_write) + to_write = left; + + if (uf_info->container == UF_CONTAINER_EMPTY) { + get_exclusive_access(uf_info); + ea = EA_OBTAINED; + if (uf_info->container != UF_CONTAINER_EMPTY) { + /* file is made not empty by another process */ + drop_exclusive_access(uf_info); + ea = NEITHER_OBTAINED; + continue; + } + } else if (uf_info->container == UF_CONTAINER_UNKNOWN) { + /* + * get exclusive access directly just to not have to + * re-obtain it if file will appear empty + */ + get_exclusive_access(uf_info); + ea = EA_OBTAINED; + result = find_file_state(inode, uf_info); + if (result) { + drop_exclusive_access(uf_info); + ea = NEITHER_OBTAINED; + break; + } + } else { + get_nonexclusive_access(uf_info); + ea = NEA_OBTAINED; + } + + /* either EA or NEA is obtained. Choose item write method */ + if (uf_info->container == UF_CONTAINER_EXTENTS) { + /* file is built of extent items */ + write_op = reiser4_write_extent; + } else if (uf_info->container == UF_CONTAINER_EMPTY) { + /* file is empty */ + if (should_have_notail(uf_info, new_size)) + write_op = reiser4_write_extent; + else + write_op = reiser4_write_tail; + } else { + /* file is built of tail items */ + if (should_have_notail(uf_info, new_size)) { + if (ea == NEA_OBTAINED) { + drop_nonexclusive_access(uf_info); + get_exclusive_access(uf_info); + ea = EA_OBTAINED; + } + if (uf_info->container == UF_CONTAINER_TAILS) { + /* + * if file is being convered by another + * process - wait until it completes + */ + while (1) { + if (reiser4_inode_get_flag(inode, + REISER4_PART_IN_CONV)) { + drop_exclusive_access(uf_info); + schedule(); + get_exclusive_access(uf_info); + continue; + } + break; + } + if (uf_info->container == UF_CONTAINER_TAILS) { + result = tail2extent(uf_info); + if (result) { + drop_exclusive_access(uf_info); + context_set_commit_async(ctx); + break; + } + } + } + drop_exclusive_access(uf_info); + ea = NEITHER_OBTAINED; + continue; + } + write_op = reiser4_write_tail; + } + + written = write_op(file, inode, buf, to_write, pos); + if (written == -ENOSPC && !enospc) { + drop_access(uf_info); + txnmgr_force_commit_all(inode->i_sb, 0); + enospc = 1; + continue; + } + if (written < 0) { + /* + * If this is -ENOSPC, then it happened + * second time, so don't try to free space + * once again. + */ + drop_access(uf_info); + result = written; + break; + } + /* something is written. */ + if (enospc) + enospc = 0; + if (uf_info->container == UF_CONTAINER_EMPTY) { + assert("edward-1553", ea == EA_OBTAINED); + uf_info->container = + (write_op == reiser4_write_extent) ? + UF_CONTAINER_EXTENTS : UF_CONTAINER_TAILS; + } + assert("edward-1554", + ergo(uf_info->container == UF_CONTAINER_EXTENTS, + write_op == reiser4_write_extent)); + assert("edward-1555", + ergo(uf_info->container == UF_CONTAINER_TAILS, + write_op == reiser4_write_tail)); + if (*pos + written > inode->i_size) { + INODE_SET_FIELD(inode, i_size, *pos + written); + update_sd = 1; + } + if (!IS_NOCMTIME(inode)) { + inode->i_ctime = inode->i_mtime = current_time(inode); + update_sd = 1; + } + if (update_sd) { + /* + * space for update_sd was reserved in write_op + */ + result = reiser4_update_sd(inode); + if (result) { + warning("edward-1574", + "Can not update stat-data: %i. FSCK?", + result); + drop_access(uf_info); + context_set_commit_async(ctx); + break; + } + } + drop_access(uf_info); + ea = NEITHER_OBTAINED; + + /* + * tell VM how many pages were dirtied. Maybe number of pages + * which were dirty already should not be counted + */ + reiser4_throttle_write(inode); + left -= written; + buf += written; + *pos += written; + } + if (result == 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { + reiser4_txn_restart_current(); + grab_space_enable(); + result = reiser4_sync_file_common(file, 0, LONG_MAX, + 0 /* data and stat data */); + if (result) + warning("reiser4-7", "failed to sync file %llu", + (unsigned long long)get_inode_oid(inode)); + } + /* + * return number of written bytes or error code if nothing is + * written. Note, that it does not work correctly in case when + * sync_unix_file returns error + */ + return (count - left) ? (count - left) : result; +} + +/** + * release_unix_file - release of struct file_operations + * @inode: inode of released file + * @file: file to release + * + * Implementation of release method of struct file_operations for unix file + * plugin. If last reference to indode is released - convert all extent items + * into tail items if necessary. Frees reiser4 specific file data. + */ +int release_unix_file(struct inode *inode, struct file *file) +{ + reiser4_context *ctx; + struct unix_file_info *uf_info; + int result; + int in_reiser4; + + in_reiser4 = is_in_reiser4_context(); + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + result = 0; + if (in_reiser4 == 0) { + uf_info = unix_file_inode_data(inode); + + get_exclusive_access_careful(uf_info, inode); + if (file->f_path.dentry->d_lockref.count == 1 && + uf_info->container == UF_CONTAINER_EXTENTS && + !should_have_notail(uf_info, inode->i_size) && + !rofs_inode(inode)) { + result = extent2tail(file, uf_info); + if (result != 0) { + context_set_commit_async(ctx); + warning("nikita-3233", + "Failed (%d) to convert in %s (%llu)", + result, __FUNCTION__, + (unsigned long long) + get_inode_oid(inode)); + } + } + drop_exclusive_access(uf_info); + } else { + /* + we are within reiser4 context already. How latter is + possible? Simple: + + (gdb) bt + #0 get_exclusive_access () + #2 0xc01e56d3 in release_unix_file () + #3 0xc01c3643 in reiser4_release () + #4 0xc014cae0 in __fput () + #5 0xc013ffc3 in remove_vm_struct () + #6 0xc0141786 in exit_mmap () + #7 0xc0118480 in mmput () + #8 0xc0133205 in oom_kill () + #9 0xc01332d1 in out_of_memory () + #10 0xc013bc1d in try_to_free_pages () + #11 0xc013427b in __alloc_pages () + #12 0xc013f058 in do_anonymous_page () + #13 0xc013f19d in do_no_page () + #14 0xc013f60e in handle_mm_fault () + #15 0xc01131e5 in do_page_fault () + #16 0xc0104935 in error_code () + #17 0xc025c0c6 in __copy_to_user_ll () + #18 0xc01d496f in reiser4_read_tail () + #19 0xc01e4def in read_unix_file () + #20 0xc01c3504 in reiser4_read () + #21 0xc014bd4f in vfs_read () + #22 0xc014bf66 in sys_read () + */ + warning("vs-44", "out of memory?"); + } + + reiser4_free_file_fsdata(file); + + reiser4_exit_context(ctx); + return result; +} + +static void set_file_notail(struct inode *inode) +{ + reiser4_inode *state; + formatting_plugin *tplug; + + state = reiser4_inode_data(inode); + tplug = formatting_plugin_by_id(NEVER_TAILS_FORMATTING_ID); + force_plugin_pset(inode, PSET_FORMATTING, (reiser4_plugin *)tplug); +} + +/* if file is built of tails - convert it to extents */ +static int unpack(struct file *filp, struct inode *inode, int forever) +{ + int result = 0; + struct unix_file_info *uf_info; + + uf_info = unix_file_inode_data(inode); + assert("vs-1628", ea_obtained(uf_info)); + + result = find_file_state(inode, uf_info); + if (result) + return result; + assert("vs-1074", uf_info->container != UF_CONTAINER_UNKNOWN); + + if (uf_info->container == UF_CONTAINER_TAILS) { + /* + * if file is being convered by another process - wait until it + * completes + */ + while (1) { + if (reiser4_inode_get_flag(inode, + REISER4_PART_IN_CONV)) { + drop_exclusive_access(uf_info); + schedule(); + get_exclusive_access(uf_info); + continue; + } + break; + } + if (uf_info->container == UF_CONTAINER_TAILS) { + result = tail2extent(uf_info); + if (result) + return result; + } + } + if (forever) { + /* safe new formatting plugin in stat data */ + __u64 tograb; + + set_file_notail(inode); + + grab_space_enable(); + tograb = inode_file_plugin(inode)->estimate.update(inode); + result = reiser4_grab_space(tograb, BA_CAN_COMMIT); + result = reiser4_update_sd(inode); + } + + return result; +} + +/* implentation of vfs' ioctl method of struct file_operations for unix file + plugin +*/ +int ioctl_unix_file(struct file *filp, unsigned int cmd, + unsigned long arg UNUSED_ARG) +{ + reiser4_context *ctx; + int result; + struct inode *inode = filp->f_path.dentry->d_inode; + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + switch (cmd) { + case REISER4_IOC_UNPACK: + get_exclusive_access(unix_file_inode_data(inode)); + result = unpack(filp, inode, 1 /* forever */ ); + drop_exclusive_access(unix_file_inode_data(inode)); + break; + + default: + result = RETERR(-ENOTTY); + break; + } + reiser4_exit_context(ctx); + return result; +} + +/* implentation of vfs' bmap method of struct address_space_operations for unix + file plugin +*/ +sector_t bmap_unix_file(struct address_space * mapping, sector_t lblock) +{ + reiser4_context *ctx; + sector_t result; + reiser4_key key; + coord_t coord; + lock_handle lh; + struct inode *inode; + item_plugin *iplug; + sector_t block; + + inode = mapping->host; + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + key_by_inode_and_offset_common(inode, + (loff_t) lblock * current_blocksize, + &key); + + init_lh(&lh); + result = + find_file_item_nohint(&coord, &lh, &key, ZNODE_READ_LOCK, inode); + if (cbk_errored(result)) { + done_lh(&lh); + reiser4_exit_context(ctx); + return result; + } + + result = zload(coord.node); + if (result) { + done_lh(&lh); + reiser4_exit_context(ctx); + return result; + } + + iplug = item_plugin_by_coord(&coord); + if (iplug->s.file.get_block) { + result = iplug->s.file.get_block(&coord, lblock, &block); + if (result == 0) + result = block; + } else + result = RETERR(-EINVAL); + + zrelse(coord.node); + done_lh(&lh); + reiser4_exit_context(ctx); + return result; +} + +/** + * flow_by_inode_unix_file - initizlize structure flow + * @inode: inode of file for which read or write is abou + * @buf: buffer to perform read to or write from + * @user: flag showing whether @buf is user space or kernel space + * @size: size of buffer @buf + * @off: start offset fro read or write + * @op: READ or WRITE + * @flow: + * + * Initializes fields of @flow: key, size of data, i/o mode (read or write). + */ +int flow_by_inode_unix_file(struct inode *inode, + const char __user *buf, int user, + loff_t size, loff_t off, + rw_op op, flow_t *flow) +{ + assert("nikita-1100", inode != NULL); + + flow->length = size; + memcpy(&flow->data, &buf, sizeof(buf)); + flow->user = user; + flow->op = op; + assert("nikita-1931", inode_file_plugin(inode) != NULL); + assert("nikita-1932", + inode_file_plugin(inode)->key_by_inode == + key_by_inode_and_offset_common); + /* calculate key of write position and insert it into flow->key */ + return key_by_inode_and_offset_common(inode, off, &flow->key); +} + +/* plugin->u.file.set_plug_in_sd = NULL + plugin->u.file.set_plug_in_inode = NULL + plugin->u.file.create_blank_sd = NULL */ +/* plugin->u.file.delete */ +/* + plugin->u.file.add_link = reiser4_add_link_common + plugin->u.file.rem_link = NULL */ + +/* plugin->u.file.owns_item + this is common_file_owns_item with assertion */ +/* Audited by: green(2002.06.15) */ +int +owns_item_unix_file(const struct inode *inode /* object to check against */ , + const coord_t * coord /* coord to check */ ) +{ + int result; + + result = owns_item_common(inode, coord); + if (!result) + return 0; + if (!plugin_of_group(item_plugin_by_coord(coord), + UNIX_FILE_METADATA_ITEM_TYPE)) + return 0; + assert("vs-547", + item_id_by_coord(coord) == EXTENT_POINTER_ID || + item_id_by_coord(coord) == FORMATTING_ID); + return 1; +} + +static int setattr_truncate(struct inode *inode, struct iattr *attr) +{ + int result; + int s_result; + loff_t old_size; + reiser4_tree *tree; + + inode_check_scale(inode, inode->i_size, attr->ia_size); + + old_size = inode->i_size; + tree = reiser4_tree_by_inode(inode); + + result = safe_link_grab(tree, BA_CAN_COMMIT); + if (result == 0) + result = safe_link_add(inode, SAFE_TRUNCATE); + if (result == 0) + result = truncate_file_body(inode, attr); + if (result) + warning("vs-1588", "truncate_file failed: oid %lli, " + "old size %lld, new size %lld, retval %d", + (unsigned long long)get_inode_oid(inode), + old_size, attr->ia_size, result); + + s_result = safe_link_grab(tree, BA_CAN_COMMIT); + if (s_result == 0) + s_result = + safe_link_del(tree, get_inode_oid(inode), SAFE_TRUNCATE); + if (s_result != 0) { + warning("nikita-3417", "Cannot kill safelink %lli: %i", + (unsigned long long)get_inode_oid(inode), s_result); + } + safe_link_release(tree); + return result; +} + +/* plugin->u.file.setattr method */ +/* This calls inode_setattr and if truncate is in effect it also takes + exclusive inode access to avoid races */ +int setattr_unix_file(struct dentry *dentry, /* Object to change attributes */ + struct iattr *attr /* change description */ ) +{ + int result; + + if (attr->ia_valid & ATTR_SIZE) { + reiser4_context *ctx; + struct unix_file_info *uf_info; + + /* truncate does reservation itself and requires exclusive + access obtained */ + ctx = reiser4_init_context(dentry->d_inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + uf_info = unix_file_inode_data(dentry->d_inode); + get_exclusive_access_careful(uf_info, dentry->d_inode); + result = setattr_truncate(dentry->d_inode, attr); + drop_exclusive_access(uf_info); + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + } else + result = reiser4_setattr_common(dentry, attr); + + return result; +} + +/* plugin->u.file.init_inode_data */ +void +init_inode_data_unix_file(struct inode *inode, + reiser4_object_create_data * crd, int create) +{ + struct unix_file_info *data; + + data = unix_file_inode_data(inode); + data->container = create ? UF_CONTAINER_EMPTY : UF_CONTAINER_UNKNOWN; + init_rwsem(&data->latch); + data->tplug = inode_formatting_plugin(inode); + data->exclusive_use = 0; + +#if REISER4_DEBUG + data->ea_owner = NULL; + atomic_set(&data->nr_neas, 0); +#endif + init_inode_ordering(inode, crd, create); +} + +/** + * delete_unix_file - delete_object of file_plugin + * @inode: inode to be deleted + * + * Truncates file to length 0, removes stat data and safe link. + */ +int delete_object_unix_file(struct inode *inode) +{ + struct unix_file_info *uf_info; + int result; + + if (reiser4_inode_get_flag(inode, REISER4_NO_SD)) + return 0; + + /* truncate file bogy first */ + uf_info = unix_file_inode_data(inode); + get_exclusive_access(uf_info); + result = shorten_file(inode, 0 /* size */ ); + drop_exclusive_access(uf_info); + + if (result) + warning("edward-1556", + "failed to truncate file (%llu) on removal: %d", + get_inode_oid(inode), result); + + /* remove stat data and safe link */ + return reiser4_delete_object_common(inode); +} + +static int do_write_begin(struct file *file, struct page *page, + loff_t pos, unsigned len) +{ + int ret; + if (len == PAGE_SIZE || PageUptodate(page)) + return 0; + + ret = readpage_unix_file(file, page); + if (ret) { + SetPageError(page); + ClearPageUptodate(page); + /* All reiser4 readpage() implementations should return the + * page locked in case of error. */ + assert("nikita-3472", PageLocked(page)); + return ret; + } + /* + * ->readpage() either: + * + * 1. starts IO against @page. @page is locked for IO in + * this case. + * + * 2. doesn't start IO. @page is unlocked. + * + * In either case, page should be locked. + */ + lock_page(page); + /* + * IO (if any) is completed at this point. Check for IO + * errors. + */ + if (!PageUptodate(page)) + return RETERR(-EIO); + return ret; +} + +/* plugin->write_begin() */ +int write_begin_unix_file(struct file *file, struct page *page, + loff_t pos, unsigned len, void **fsdata) +{ + int ret; + struct inode * inode; + struct unix_file_info *info; + + inode = file_inode(file); + info = unix_file_inode_data(inode); + + ret = reiser4_grab_space_force(estimate_one_insert_into_item + (reiser4_tree_by_inode(inode)), + BA_CAN_COMMIT); + if (ret) + return ret; + get_exclusive_access(info); + ret = find_file_state(file_inode(file), info); + if (unlikely(ret != 0)) { + drop_exclusive_access(info); + return ret; + } + if (info->container == UF_CONTAINER_TAILS) { + ret = tail2extent(info); + if (ret) { + warning("edward-1575", + "tail conversion failed: %d", ret); + drop_exclusive_access(info); + return ret; + } + } + ret = do_write_begin(file, page, pos, len); + if (unlikely(ret != 0)) + drop_exclusive_access(info); + /* else exclusive access will be dropped in ->write_end() */ + return ret; +} + +/* plugin->write_end() */ +int write_end_unix_file(struct file *file, struct page *page, + loff_t pos, unsigned copied, void *fsdata) +{ + int ret; + struct inode *inode; + struct unix_file_info *info; + + inode = file_inode(file); + info = unix_file_inode_data(inode); + + unlock_page(page); + ret = find_or_create_extent(page); + if (ret) { + SetPageError(page); + goto exit; + } + if (pos + copied > inode->i_size) { + INODE_SET_FIELD(inode, i_size, pos + copied); + ret = reiser4_update_sd(inode); + if (unlikely(ret != 0)) + warning("edward-1604", + "Can not update stat-data: %i. FSCK?", + ret); + } + exit: + drop_exclusive_access(info); + return ret; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/plugin/file/file.h b/fs/reiser4/plugin/file/file.h new file mode 100644 index 000000000000..523f86855dbc --- /dev/null +++ b/fs/reiser4/plugin/file/file.h @@ -0,0 +1,322 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* this file contains declarations of methods implementing + file plugins (UNIX_FILE_PLUGIN_ID, CRYPTCOMPRESS_FILE_PLUGIN_ID + and SYMLINK_FILE_PLUGIN_ID) */ + +#if !defined( __REISER4_FILE_H__ ) +#define __REISER4_FILE_H__ + +/* possible states in dispatching process */ +typedef enum { + DISPATCH_INVAL_STATE, /* invalid state */ + DISPATCH_POINT, /* dispatching point has been achieved */ + DISPATCH_REMAINS_OLD, /* made a decision to manage by old plugin */ + DISPATCH_ASSIGNED_NEW /* a new plugin has been assigned */ +} dispatch_state; + +struct dispatch_context { + int nr_pages; + struct page **pages; + dispatch_state state; +}; + +/* + * Declarations of methods provided for VFS. + */ + +/* inode operations */ +int reiser4_setattr_dispatch(struct dentry *, struct iattr *); + +/* file operations */ +ssize_t reiser4_read_dispatch(struct file *, char __user *buf, + size_t count, loff_t *off); +ssize_t reiser4_write_dispatch(struct file *, const char __user *buf, + size_t count, loff_t * off); +long reiser4_ioctl_dispatch(struct file *filp, unsigned int cmd, + unsigned long arg); +int reiser4_mmap_dispatch(struct file *, struct vm_area_struct *); +int reiser4_open_dispatch(struct inode *inode, struct file *file); +int reiser4_release_dispatch(struct inode *, struct file *); +int reiser4_sync_file_common(struct file *, loff_t, loff_t, int datasync); + +/* address space operations */ +int reiser4_readpage_dispatch(struct file *, struct page *); +int reiser4_readpages_dispatch(struct file *, struct address_space *, + struct list_head *, unsigned); +int reiser4_writepages_dispatch(struct address_space *, + struct writeback_control *); +int reiser4_write_begin_dispatch(struct file *file, + struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata); +int reiser4_write_end_dispatch(struct file *file, + struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata); +sector_t reiser4_bmap_dispatch(struct address_space *, sector_t lblock); + +/* + * Private methods of unix-file plugin + * (UNIX_FILE_PLUGIN_ID) + */ + +/* private inode operations */ +int setattr_unix_file(struct dentry *, struct iattr *); + +/* private file operations */ + +ssize_t read_unix_file(struct file *, char __user *buf, size_t read_amount, + loff_t *off); +ssize_t write_unix_file(struct file *, const char __user *buf, size_t write_amount, + loff_t * off, struct dispatch_context * cont); +int ioctl_unix_file(struct file *, unsigned int cmd, unsigned long arg); +int mmap_unix_file(struct file *, struct vm_area_struct *); +int open_unix_file(struct inode *, struct file *); +int release_unix_file(struct inode *, struct file *); + +/* private address space operations */ +int readpage_unix_file(struct file *, struct page *); +int readpages_unix_file(struct file*, struct address_space*, struct list_head*, + unsigned); +int writepages_unix_file(struct address_space *, struct writeback_control *); +int write_begin_unix_file(struct file *file, struct page *page, + loff_t pos, unsigned len, void **fsdata); +int write_end_unix_file(struct file *file, struct page *page, + loff_t pos, unsigned copied, void *fsdata); +sector_t bmap_unix_file(struct address_space *, sector_t lblock); + +/* other private methods */ +int delete_object_unix_file(struct inode *); +int flow_by_inode_unix_file(struct inode *, const char __user *buf, + int user, loff_t, loff_t, rw_op, flow_t *); +int owns_item_unix_file(const struct inode *, const coord_t *); +void init_inode_data_unix_file(struct inode *, reiser4_object_create_data *, + int create); + +/* + * Private methods of cryptcompress file plugin + * (CRYPTCOMPRESS_FILE_PLUGIN_ID) + */ + +/* private inode operations */ +int setattr_cryptcompress(struct dentry *, struct iattr *); + +/* private file operations */ +ssize_t read_cryptcompress(struct file *, char __user *buf, + size_t count, loff_t *off); +ssize_t write_cryptcompress(struct file *, const char __user *buf, + size_t count, loff_t * off, + struct dispatch_context *cont); +int ioctl_cryptcompress(struct file *, unsigned int cmd, unsigned long arg); +int mmap_cryptcompress(struct file *, struct vm_area_struct *); +int open_cryptcompress(struct inode *, struct file *); +int release_cryptcompress(struct inode *, struct file *); + +/* private address space operations */ +int readpage_cryptcompress(struct file *, struct page *); +int readpages_cryptcompress(struct file*, struct address_space*, + struct list_head*, unsigned); +int writepages_cryptcompress(struct address_space *, + struct writeback_control *); +int write_begin_cryptcompress(struct file *file, struct page *page, + loff_t pos, unsigned len, void **fsdata); +int write_end_cryptcompress(struct file *file, struct page *page, + loff_t pos, unsigned copied, void *fsdata); +sector_t bmap_cryptcompress(struct address_space *, sector_t lblock); + +/* other private methods */ +int flow_by_inode_cryptcompress(struct inode *, const char __user *buf, + int user, loff_t, loff_t, rw_op, flow_t *); +int key_by_inode_cryptcompress(struct inode *, loff_t off, reiser4_key *); +int create_object_cryptcompress(struct inode *, struct inode *, + reiser4_object_create_data *); +int delete_object_cryptcompress(struct inode *); +void init_inode_data_cryptcompress(struct inode *, reiser4_object_create_data *, + int create); +int cut_tree_worker_cryptcompress(tap_t *, const reiser4_key * from_key, + const reiser4_key * to_key, + reiser4_key * smallest_removed, + struct inode *object, int truncate, + int *progress); +void destroy_inode_cryptcompress(struct inode *); + +/* + * Private methods of symlink file plugin + * (SYMLINK_FILE_PLUGIN_ID) + */ +int reiser4_create_symlink(struct inode *symlink, struct inode *dir, + reiser4_object_create_data *); +void destroy_inode_symlink(struct inode *); + +/* + * all the write into unix file is performed by item write method. Write method + * of unix file plugin only decides which item plugin (extent or tail) and in + * which mode (one from the enum below) to call + */ +typedef enum { + FIRST_ITEM = 1, + APPEND_ITEM = 2, + OVERWRITE_ITEM = 3 +} write_mode_t; + +/* unix file may be in one the following states */ +typedef enum { + UF_CONTAINER_UNKNOWN = 0, + UF_CONTAINER_TAILS = 1, + UF_CONTAINER_EXTENTS = 2, + UF_CONTAINER_EMPTY = 3 +} file_container_t; + +struct formatting_plugin; +struct inode; + +/* unix file plugin specific part of reiser4 inode */ +struct unix_file_info { + /* + * this read-write lock protects file containerization change. Accesses + * which do not change file containerization (see file_container_t) + * (read, readpage, writepage, write (until tail conversion is + * involved)) take read-lock. Accesses which modify file + * containerization (truncate, conversion from tail to extent and back) + * take write-lock. + */ + struct rw_semaphore latch; + /* this enum specifies which items are used to build the file */ + file_container_t container; + /* + * plugin which controls when file is to be converted to extents and + * back to tail + */ + struct formatting_plugin *tplug; + /* if this is set, file is in exclusive use */ + int exclusive_use; +#if REISER4_DEBUG + /* pointer to task struct of thread owning exclusive access to file */ + void *ea_owner; + atomic_t nr_neas; + void *last_reader; +#endif +}; + +struct unix_file_info *unix_file_inode_data(const struct inode *inode); +void get_exclusive_access(struct unix_file_info *); +void drop_exclusive_access(struct unix_file_info *); +void get_nonexclusive_access(struct unix_file_info *); +void drop_nonexclusive_access(struct unix_file_info *); +int try_to_get_nonexclusive_access(struct unix_file_info *); +int find_file_item(hint_t *, const reiser4_key *, znode_lock_mode, + struct inode *); +int find_file_item_nohint(coord_t *, lock_handle *, + const reiser4_key *, znode_lock_mode, + struct inode *); + +int load_file_hint(struct file *, hint_t *); +void save_file_hint(struct file *, const hint_t *); + +#include "../item/extent.h" +#include "../item/tail.h" +#include "../item/ctail.h" + +struct uf_coord { + coord_t coord; + lock_handle *lh; + int valid; + union { + struct extent_coord_extension extent; + struct tail_coord_extension tail; + struct ctail_coord_extension ctail; + } extension; +}; + +#include "../../forward.h" +#include "../../seal.h" +#include "../../lock.h" + +/* + * This structure is used to speed up file operations (reads and writes). A + * hint is a suggestion about where a key resolved to last time. A seal + * indicates whether a node has been modified since a hint was last recorded. + * You check the seal, and if the seal is still valid, you can use the hint + * without traversing the tree again. + */ +struct hint { + seal_t seal; /* a seal over last file item accessed */ + uf_coord_t ext_coord; + loff_t offset; + znode_lock_mode mode; + lock_handle lh; +}; + +static inline int hint_is_valid(hint_t * hint) +{ + return hint->ext_coord.valid; +} + +static inline void hint_set_valid(hint_t * hint) +{ + hint->ext_coord.valid = 1; +} + +static inline void hint_clr_valid(hint_t * hint) +{ + hint->ext_coord.valid = 0; +} + +int load_file_hint(struct file *, hint_t *); +void save_file_hint(struct file *, const hint_t *); +void hint_init_zero(hint_t *); +void reiser4_set_hint(hint_t *, const reiser4_key *, znode_lock_mode); +int hint_is_set(const hint_t *); +void reiser4_unset_hint(hint_t *); + +int reiser4_update_file_size(struct inode *, loff_t, int update_sd); +int cut_file_items(struct inode *, loff_t new_size, + int update_sd, loff_t cur_size, + int (*update_actor) (struct inode *, loff_t, int)); +#if REISER4_DEBUG + +/* return 1 is exclusive access is obtained, 0 - otherwise */ +static inline int ea_obtained(struct unix_file_info * uf_info) +{ + int ret; + + ret = down_read_trylock(&uf_info->latch); + if (ret) + up_read(&uf_info->latch); + return !ret; +} + +#endif + +#define WRITE_GRANULARITY 32 + +int tail2extent(struct unix_file_info *); +int extent2tail(struct file *, struct unix_file_info *); + +int goto_right_neighbor(coord_t *, lock_handle *); +int find_or_create_extent(struct page *); +int equal_to_ldk(znode *, const reiser4_key *); + +void init_uf_coord(uf_coord_t *uf_coord, lock_handle *lh); + +static inline int cbk_errored(int cbk_result) +{ + return (cbk_result != CBK_COORD_NOTFOUND + && cbk_result != CBK_COORD_FOUND); +} + +/* __REISER4_FILE_H__ */ +#endif + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: +*/ diff --git a/fs/reiser4/plugin/file/file_conversion.c b/fs/reiser4/plugin/file/file_conversion.c new file mode 100644 index 000000000000..cda538bb405f --- /dev/null +++ b/fs/reiser4/plugin/file/file_conversion.c @@ -0,0 +1,755 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, + licensing governed by reiser4/README */ + +/** + * This file contains dispatching hooks, and conversion methods, which + * implement transitions in the FILE interface. + * + * Dispatching hook makes a decision (at dispatching point) about the + * most reasonable plugin. Such decision is made in accordance with some + * O(1)-heuristic. + * + * We implement a transition CRYPTCOMPRESS -> UNIX_FILE for files with + * incompressible data. Current heuristic to estimate compressibility is + * very simple: if first complete logical cluster (64K by default) of a + * file is incompressible, then we make a decision, that the whole file + * is incompressible. + * + * To enable dispatching we install a special "magic" compression mode + * plugin CONVX_COMPRESSION_MODE_ID at file creation time. + * + * Note, that we don't perform back conversion (UNIX_FILE->CRYPTCOMPRESS) + * because of compatibility reasons). + * + * In conversion time we protect CS, the conversion set (file's (meta)data + * and plugin table (pset)) via special per-inode rw-semaphore (conv_sem). + * The methods which implement conversion are CS writers. The methods of FS + * interface (file_operations, inode_operations, address_space_operations) + * are CS readers. + */ + +#include <linux/uio.h> +#include "../../inode.h" +#include "../cluster.h" +#include "file.h" + +#define conversion_enabled(inode) \ + (inode_compression_mode_plugin(inode) == \ + compression_mode_plugin_by_id(CONVX_COMPRESSION_MODE_ID)) + +/** + * Located sections (readers and writers of @pset) are not permanently + * critical: cryptcompress file can be converted only if the conversion + * is enabled (see the macrio above). Also we don't perform back + * conversion. The following helper macro is a sanity check to decide + * if we need the protection (locks are always additional overheads). + */ +#define should_protect(inode) \ + (inode_file_plugin(inode) == \ + file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID) && \ + conversion_enabled(inode)) +/** + * To avoid confusion with read/write file operations, we'll speak about + * "passive" protection for FCS readers and "active" protection for FCS + * writers. All methods with active or passive protection have suffix + * "careful". + */ +/** + * Macros for passive protection. + * + * Construct invariant operation to be supplied to VFS. + * The macro accepts the following lexemes: + * @type - type of the value represented by the compound statement; + * @method - name of an operation to be supplied to VFS (reiser4 file + * plugin also should contain a method with such name). + */ +#define PROT_PASSIVE(type, method, args) \ +({ \ + type _result; \ + struct rw_semaphore * guard = \ + &reiser4_inode_data(inode)->conv_sem; \ + \ + if (should_protect(inode)) { \ + down_read(guard); \ + if (!should_protect(inode)) \ + up_read(guard); \ + } \ + _result = inode_file_plugin(inode)->method args; \ + if (should_protect(inode)) \ + up_read(guard); \ + _result; \ +}) + +#define PROT_PASSIVE_VOID(method, args) \ +({ \ + struct rw_semaphore * guard = \ + &reiser4_inode_data(inode)->conv_sem; \ + \ + if (should_protect(inode)) { \ + down_read(guard); \ + if (!should_protect(inode)) \ + up_read(guard); \ + } \ + inode_file_plugin(inode)->method args; \ + \ + if (should_protect(inode)) \ + up_read(guard); \ +}) + +/* Pass management to the unix-file plugin with "notail" policy */ +static int __cryptcompress2unixfile(struct file *file, struct inode * inode) +{ + int result; + reiser4_inode *info; + struct unix_file_info * uf; + info = reiser4_inode_data(inode); + + result = aset_set_unsafe(&info->pset, + PSET_FILE, + (reiser4_plugin *) + file_plugin_by_id(UNIX_FILE_PLUGIN_ID)); + if (result) + return result; + result = aset_set_unsafe(&info->pset, + PSET_FORMATTING, + (reiser4_plugin *) + formatting_plugin_by_id(NEVER_TAILS_FORMATTING_ID)); + if (result) + return result; + /* get rid of non-standard plugins */ + info->plugin_mask &= ~cryptcompress_mask; + /* get rid of plugin stat-data extension */ + info->extmask &= ~(1 << PLUGIN_STAT); + + reiser4_inode_clr_flag(inode, REISER4_SDLEN_KNOWN); + + /* FIXME use init_inode_data_unix_file() instead, + but aviod init_inode_ordering() */ + /* Init unix-file specific part of inode */ + uf = unix_file_inode_data(inode); + uf->container = UF_CONTAINER_UNKNOWN; + init_rwsem(&uf->latch); + uf->tplug = inode_formatting_plugin(inode); + uf->exclusive_use = 0; +#if REISER4_DEBUG + uf->ea_owner = NULL; + atomic_set(&uf->nr_neas, 0); +#endif + /** + * we was carefull for file_ops, inode_ops and as_ops + * to be invariant for plugin conversion, so there is + * no need to update ones already installed in the + * vfs's residence. + */ + return 0; +} + +#if REISER4_DEBUG +static int disabled_conversion_inode_ok(struct inode * inode) +{ + __u64 extmask = reiser4_inode_data(inode)->extmask; + __u16 plugin_mask = reiser4_inode_data(inode)->plugin_mask; + + return ((extmask & (1 << LIGHT_WEIGHT_STAT)) && + (extmask & (1 << UNIX_STAT)) && + (extmask & (1 << LARGE_TIMES_STAT)) && + (extmask & (1 << PLUGIN_STAT)) && + (plugin_mask & (1 << PSET_COMPRESSION_MODE))); +} +#endif + +/** + * Disable future attempts to schedule/convert file plugin. + * This function is called by plugin schedule hooks. + * + * To disable conversion we assign any compression mode plugin id + * different from CONVX_COMPRESSION_MODE_ID. + */ +static int disable_conversion(struct inode * inode) +{ + int result; + result = + force_plugin_pset(inode, + PSET_COMPRESSION_MODE, + (reiser4_plugin *)compression_mode_plugin_by_id + (LATTD_COMPRESSION_MODE_ID)); + assert("edward-1500", + ergo(!result, disabled_conversion_inode_ok(inode))); + return result; +} + +/** + * Check if we really have achieved plugin scheduling point + */ +static int check_dispatch_point(struct inode * inode, + loff_t pos /* position in the + file to write from */, + struct cluster_handle * clust, + struct dispatch_context * cont) +{ + assert("edward-1505", conversion_enabled(inode)); + /* + * if file size is more then cluster size, then compressible + * status must be figured out (i.e. compression was disabled, + * or file plugin was converted to unix_file) + */ + assert("edward-1506", inode->i_size <= inode_cluster_size(inode)); + + if (pos > inode->i_size) + /* first logical cluster will contain a (partial) hole */ + return disable_conversion(inode); + if (pos < inode_cluster_size(inode)) + /* writing to the first logical cluster */ + return 0; + /* + * here we have: + * cluster_size <= pos <= i_size <= cluster_size, + * and, hence, pos == i_size == cluster_size + */ + assert("edward-1498", + pos == inode->i_size && + pos == inode_cluster_size(inode)); + assert("edward-1539", cont != NULL); + assert("edward-1540", cont->state == DISPATCH_INVAL_STATE); + + cont->state = DISPATCH_POINT; + return 0; +} + +static void start_check_compressibility(struct inode * inode, + struct cluster_handle * clust, + hint_t * hint) +{ + assert("edward-1507", clust->index == 1); + assert("edward-1508", !tfm_cluster_is_uptodate(&clust->tc)); + assert("edward-1509", cluster_get_tfm_act(&clust->tc) == TFMA_READ); + + hint_init_zero(hint); + clust->hint = hint; + clust->index --; + clust->nr_pages = size_in_pages(lbytes(clust->index, inode)); + + /* first logical cluster (of index #0) must be complete */ + assert("edward-1510", lbytes(clust->index, inode) == + inode_cluster_size(inode)); +} + +static void finish_check_compressibility(struct inode * inode, + struct cluster_handle * clust, + hint_t * hint) +{ + reiser4_unset_hint(clust->hint); + clust->hint = hint; + clust->index ++; +} + +#if REISER4_DEBUG +static int prepped_dclust_ok(hint_t * hint) +{ + reiser4_key key; + coord_t * coord = &hint->ext_coord.coord; + + item_key_by_coord(coord, &key); + return (item_id_by_coord(coord) == CTAIL_ID && + !coord_is_unprepped_ctail(coord) && + (get_key_offset(&key) + nr_units_ctail(coord) == + dclust_get_extension_dsize(hint))); +} +#endif + +#define fifty_persent(size) (size >> 1) +/* evaluation of data compressibility */ +#define data_is_compressible(osize, isize) \ + (osize < fifty_persent(isize)) + +/** + * A simple O(1)-heuristic for compressibility. + * This is called not more then one time per file's life. + * Read first logical cluster (of index #0) and estimate its compressibility. + * Save estimation result in @cont. + */ +static int read_check_compressibility(struct inode * inode, + struct cluster_handle * clust, + struct dispatch_context * cont) +{ + int i; + int result; + size_t dst_len; + hint_t tmp_hint; + hint_t * cur_hint = clust->hint; + assert("edward-1541", cont->state == DISPATCH_POINT); + + start_check_compressibility(inode, clust, &tmp_hint); + + reset_cluster_pgset(clust, cluster_nrpages(inode)); + result = grab_page_cluster(inode, clust, READ_OP); + if (result) + return result; + /* Read page cluster here */ + for (i = 0; i < clust->nr_pages; i++) { + struct page *page = clust->pages[i]; + lock_page(page); + result = do_readpage_ctail(inode, clust, page, + ZNODE_READ_LOCK); + unlock_page(page); + if (result) + goto error; + } + tfm_cluster_clr_uptodate(&clust->tc); + + cluster_set_tfm_act(&clust->tc, TFMA_WRITE); + + if (hint_is_valid(&tmp_hint) && !hint_is_unprepped_dclust(&tmp_hint)) { + /* lenght of compressed data is known, no need to compress */ + assert("edward-1511", + znode_is_any_locked(tmp_hint.lh.node)); + assert("edward-1512", + WITH_DATA(tmp_hint.ext_coord.coord.node, + prepped_dclust_ok(&tmp_hint))); + dst_len = dclust_get_extension_dsize(&tmp_hint); + } + else { + struct tfm_cluster * tc = &clust->tc; + compression_plugin * cplug = inode_compression_plugin(inode); + result = grab_tfm_stream(inode, tc, INPUT_STREAM); + if (result) + goto error; + for (i = 0; i < clust->nr_pages; i++) { + char *data; + lock_page(clust->pages[i]); + BUG_ON(!PageUptodate(clust->pages[i])); + data = kmap(clust->pages[i]); + memcpy(tfm_stream_data(tc, INPUT_STREAM) + pg_to_off(i), + data, PAGE_SIZE); + kunmap(clust->pages[i]); + unlock_page(clust->pages[i]); + } + result = grab_tfm_stream(inode, tc, OUTPUT_STREAM); + if (result) + goto error; + result = grab_coa(tc, cplug); + if (result) + goto error; + tc->len = tc->lsize = lbytes(clust->index, inode); + assert("edward-1513", tc->len == inode_cluster_size(inode)); + dst_len = tfm_stream_size(tc, OUTPUT_STREAM); + cplug->compress(get_coa(tc, cplug->h.id, tc->act), + tfm_input_data(clust), tc->len, + tfm_output_data(clust), &dst_len); + assert("edward-1514", + dst_len <= tfm_stream_size(tc, OUTPUT_STREAM)); + } + finish_check_compressibility(inode, clust, cur_hint); + cont->state = + (data_is_compressible(dst_len, inode_cluster_size(inode)) ? + DISPATCH_REMAINS_OLD : + DISPATCH_ASSIGNED_NEW); + return 0; + error: + put_page_cluster(clust, inode, READ_OP); + return result; +} + +/* Cut disk cluster of index @idx */ +static int cut_disk_cluster(struct inode * inode, cloff_t idx) +{ + reiser4_key from, to; + assert("edward-1515", inode_file_plugin(inode) == + file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)); + key_by_inode_cryptcompress(inode, clust_to_off(idx, inode), &from); + to = from; + set_key_offset(&to, + get_key_offset(&from) + inode_cluster_size(inode) - 1); + return reiser4_cut_tree(reiser4_tree_by_inode(inode), + &from, &to, inode, 0); +} + +static int reserve_cryptcompress2unixfile(struct inode *inode) +{ + reiser4_block_nr unformatted_nodes; + reiser4_tree *tree; + + tree = reiser4_tree_by_inode(inode); + + /* number of unformatted nodes which will be created */ + unformatted_nodes = cluster_nrpages(inode); /* N */ + + /* + * space required for one iteration of extent->tail conversion: + * + * 1. kill ctail items + * + * 2. insert N unformatted nodes + * + * 3. insert N (worst-case single-block + * extents) extent units. + * + * 4. drilling to the leaf level by coord_by_key() + * + * 5. possible update of stat-data + * + */ + grab_space_enable(); + return reiser4_grab_space + (2 * tree->height + + unformatted_nodes + + unformatted_nodes * estimate_one_insert_into_item(tree) + + 1 + estimate_one_insert_item(tree) + + inode_file_plugin(inode)->estimate.update(inode), + BA_CAN_COMMIT); +} + +/** + * Convert cryptcompress file plugin to unix_file plugin. + */ +static int cryptcompress2unixfile(struct file *file, struct inode *inode, + struct dispatch_context *cont) +{ + int i; + int result = 0; + struct cryptcompress_info *cr_info; + struct unix_file_info *uf_info; + assert("edward-1516", cont->pages[0]->index == 0); + + /* release all cryptcompress-specific resources */ + cr_info = cryptcompress_inode_data(inode); + result = reserve_cryptcompress2unixfile(inode); + if (result) + goto out; + /* tell kill_hook to not truncate pages */ + reiser4_inode_set_flag(inode, REISER4_FILE_CONV_IN_PROGRESS); + result = cut_disk_cluster(inode, 0); + if (result) + goto out; + /* captured jnode of cluster and assotiated resources (pages, + reserved disk space) were released by ->kill_hook() method + of the item plugin */ + + result = __cryptcompress2unixfile(file, inode); + if (result) + goto out; + /* At this point file is managed by unix file plugin */ + + uf_info = unix_file_inode_data(inode); + + assert("edward-1518", + ergo(jprivate(cont->pages[0]), + !jnode_is_cluster_page(jprivate(cont->pages[0])))); + for(i = 0; i < cont->nr_pages; i++) { + assert("edward-1519", cont->pages[i]); + assert("edward-1520", PageUptodate(cont->pages[i])); + + result = find_or_create_extent(cont->pages[i]); + if (result) + break; + } + if (unlikely(result)) + goto out; + uf_info->container = UF_CONTAINER_EXTENTS; + result = reiser4_update_sd(inode); + out: + all_grabbed2free(); + return result; +} + +#define convert_file_plugin cryptcompress2unixfile + +/** + * This is called by ->write() method of a cryptcompress file plugin. + * Make a decision about the most reasonable file plugin id to manage + * the file. + */ +int write_dispatch_hook(struct file *file, struct inode *inode, + loff_t pos, struct cluster_handle *clust, + struct dispatch_context *cont) +{ + int result; + if (!conversion_enabled(inode)) + return 0; + result = check_dispatch_point(inode, pos, clust, cont); + if (result || cont->state != DISPATCH_POINT) + return result; + result = read_check_compressibility(inode, clust, cont); + if (result) + return result; + if (cont->state == DISPATCH_REMAINS_OLD) { + put_page_cluster(clust, inode, READ_OP); + return disable_conversion(inode); + } + assert("edward-1543", cont->state == DISPATCH_ASSIGNED_NEW); + /* + * page cluster is grabbed and uptodate. It will be + * released with a pgset after plugin conversion is + * finished, see put_dispatch_context(). + */ + reiser4_unset_hint(clust->hint); + move_cluster_pgset(clust, &cont->pages, &cont->nr_pages); + return 0; +} + +/** + * This is called by ->setattr() method of cryptcompress file plugin. + */ +int setattr_dispatch_hook(struct inode * inode) +{ + if (conversion_enabled(inode)) + return disable_conversion(inode); + return 0; +} + +static inline void init_dispatch_context(struct dispatch_context * cont) +{ + memset(cont, 0, sizeof(*cont)); +} + +static inline void done_dispatch_context(struct dispatch_context * cont, + struct inode * inode) +{ + if (cont->pages) { + __put_page_cluster(0, cont->nr_pages, cont->pages, inode); + kfree(cont->pages); + } +} + +static inline ssize_t reiser4_write_checks(struct file *file, + const char __user *buf, + size_t count, loff_t *off) +{ + ssize_t result; + struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count }; + struct kiocb iocb; + struct iov_iter iter; + + init_sync_kiocb(&iocb, file); + iocb.ki_pos = *off; + iov_iter_init(&iter, WRITE, &iov, 1, count); + + result = generic_write_checks(&iocb, &iter); + *off = iocb.ki_pos; + return result; +} + +/* + * ->write() VFS file operation + * + * performs "intelligent" conversion in the FILE interface. + * Write a file in 3 steps (2d and 3d steps are optional). + */ +ssize_t reiser4_write_dispatch(struct file *file, const char __user *buf, + size_t count, loff_t *off) +{ + ssize_t result; + reiser4_context *ctx; + ssize_t written_old = 0; /* bytes written with initial plugin */ + ssize_t written_new = 0; /* bytes written with new plugin */ + struct dispatch_context cont; + struct inode * inode = file_inode(file); + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + current->backing_dev_info = inode_to_bdi(inode); + init_dispatch_context(&cont); + inode_lock(inode); + + result = reiser4_write_checks(file, buf, count, off); + if (unlikely(result <= 0)) + goto exit; + /** + * First step. + * Start write with initial file plugin. + * Keep a plugin schedule status at @cont (if any). + */ + written_old = inode_file_plugin(inode)->write(file, + buf, + count, + off, + &cont); + if (cont.state != DISPATCH_ASSIGNED_NEW || written_old < 0) + goto exit; + /** + * Second step. + * New file plugin has been scheduled. + * Perform conversion to the new plugin. + */ + down_read(&reiser4_inode_data(inode)->conv_sem); + result = convert_file_plugin(file, inode, &cont); + up_read(&reiser4_inode_data(inode)->conv_sem); + if (result) { + warning("edward-1544", + "Inode %llu: file plugin conversion failed (%d)", + (unsigned long long)get_inode_oid(inode), + (int)result); + goto exit; + } + reiser4_txn_restart(ctx); + /** + * Third step: + * Finish write with the new file plugin. + */ + assert("edward-1536", + inode_file_plugin(inode) == + file_plugin_by_id(UNIX_FILE_PLUGIN_ID)); + + written_new = inode_file_plugin(inode)->write(file, + buf + written_old, + count - written_old, + off, + NULL); + exit: + inode_unlock(inode); + done_dispatch_context(&cont, inode); + current->backing_dev_info = NULL; + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + + return written_old + (written_new < 0 ? 0 : written_new); +} + +/* + * Dispatchers with "passive" protection for: + * + * ->open(); + * ->read(); + * ->ioctl(); + * ->mmap(); + * ->release(); + * ->bmap(). + */ + +int reiser4_open_dispatch(struct inode *inode, struct file *file) +{ + return PROT_PASSIVE(int, open, (inode, file)); +} + +ssize_t reiser4_read_dispatch(struct file * file, char __user * buf, + size_t size, loff_t * off) +{ + struct inode * inode = file_inode(file); + return PROT_PASSIVE(ssize_t, read, (file, buf, size, off)); +} + +long reiser4_ioctl_dispatch(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct inode * inode = file_inode(filp); + return PROT_PASSIVE(int, ioctl, (filp, cmd, arg)); +} + +int reiser4_mmap_dispatch(struct file *file, struct vm_area_struct *vma) +{ + struct inode *inode = file_inode(file); + return PROT_PASSIVE(int, mmap, (file, vma)); +} + +int reiser4_release_dispatch(struct inode *inode, struct file *file) +{ + return PROT_PASSIVE(int, release, (inode, file)); +} + +sector_t reiser4_bmap_dispatch(struct address_space * mapping, sector_t lblock) +{ + struct inode *inode = mapping->host; + return PROT_PASSIVE(sector_t, bmap, (mapping, lblock)); +} + +/** + * NOTE: The following two methods are + * used only for loopback functionality. + * reiser4_write_end() can not cope with + * short writes for now. + */ +int reiser4_write_begin_dispatch(struct file *file, + struct address_space *mapping, + loff_t pos, + unsigned len, + unsigned flags, + struct page **pagep, + void **fsdata) +{ + int ret = 0; + struct page *page; + pgoff_t index; + reiser4_context *ctx; + struct inode * inode = file_inode(file); + + index = pos >> PAGE_SHIFT; + page = grab_cache_page_write_begin(mapping, index, + flags & AOP_FLAG_NOFS); + *pagep = page; + if (!page) + return -ENOMEM; + + ctx = reiser4_init_context(file_inode(file)->i_sb); + if (IS_ERR(ctx)) { + ret = PTR_ERR(ctx); + goto err2; + } + ret = reiser4_grab_space_force(/* for update_sd: + * one when updating file size and + * one when updating mtime/ctime */ + 2 * estimate_update_common(inode), + BA_CAN_COMMIT); + if (ret) + goto err1; + ret = PROT_PASSIVE(int, write_begin, (file, page, pos, len, fsdata)); + if (unlikely(ret)) + goto err1; + /* Success. Resorces will be released in write_end_dispatch */ + return 0; + err1: + reiser4_exit_context(ctx); + err2: + unlock_page(page); + put_page(page); + return ret; +} + +int reiser4_write_end_dispatch(struct file *file, + struct address_space *mapping, + loff_t pos, + unsigned len, + unsigned copied, + struct page *page, + void *fsdata) +{ + int ret; + reiser4_context *ctx; + struct inode *inode = page->mapping->host; + + assert("umka-3101", file != NULL); + assert("umka-3102", page != NULL); + assert("umka-3093", PageLocked(page)); + + ctx = get_current_context(); + + SetPageUptodate(page); + set_page_dirty_notag(page); + + ret = PROT_PASSIVE(int, write_end, (file, page, pos, copied, fsdata)); + put_page(page); + + /* don't commit transaction under inode semaphore */ + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return ret == 0 ? copied : ret; +} + +/* + * Dispatchers without protection + */ +int reiser4_setattr_dispatch(struct dentry *dentry, struct iattr *attr) +{ + return inode_file_plugin(dentry->d_inode)->setattr(dentry, attr); +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/file/invert.c b/fs/reiser4/plugin/file/invert.c new file mode 100644 index 000000000000..73498787b74f --- /dev/null +++ b/fs/reiser4/plugin/file/invert.c @@ -0,0 +1,493 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Suppose you want to conveniently read and write a large variety of small files conveniently within a single emacs + buffer, without having a separate buffer for each 8 byte or so file. Inverts are the way to do that. An invert + provides you with the contents of a set of subfiles plus its own contents. It is a file which inherits other files + when you read it, and allows you to write to it and through it to the files that it inherits from. In order for it + to know which subfiles each part of your write should go into, there must be delimiters indicating that. It tries to + make that easy for you by providing those delimiters in what you read from it. + + When you read it, an invert performs an inverted assignment. Instead of taking an assignment command and writing a + bunch of files, it takes a bunch of files and composes an assignment command for you to read from it that if executed + would create those files. But which files? Well, that must be specified in the body of the invert using a special + syntax, and that specification is called the invert of the assignment. + + When written to, an invert performs the assignment command that is written + to it, and modifies its own body to contain the invert of that + assignment. + + In other words, writing to an invert file what you have read from it + is the identity operation. + + Malformed assignments cause write errors. Partial writes are not + supported in v4.0, but will be. + + Example: + + If an invert contains: + + /filenameA/<>+"(some text stored in the invert)+/filenameB/<> + +====================== +Each element in this definition should be an invert, and all files +should be called recursively - too. This is bad. If one of the +included files in not a regular or invert file, then we can't read +main file. + +I think to make it is possible easier: + +internal structure of invert file should be like symlink file. But +read and write method should be explitely indicated in i/o operation.. + +By default we read and write (if probably) as symlink and if we +specify ..invert at reading time that too we can specify it at write time. + +example: +/my_invert_file/..invert<- ( (/filenameA<-"(The contents of filenameA))+"(some text stored in the invert)+(/filenameB<-"(The contents of filenameB) ) ) +will create /my_invert_file as invert, and will creat /filenameA and /filenameB with specified body. + +read of /my_invert_file/..invert will be +/filenameA<-"(The contents of filenameA)+"(some text stored in the invert)+/filenameB<-"(The contents of filenameB) + +but read of /my_invert_file/ will be +The contents of filenameAsome text stored in the invertThe contents of filenameB + +we also can creat this file as +/my_invert_file/<-/filenameA+"(some text stored in the invert)+/filenameB +will create /my_invert_file , and use existing files /filenameA and /filenameB. + +and when we will read it will be as previously invert file. + +This is correct? + + vv +DEMIDOV-FIXME-HANS: + +Maybe you are right, but then you must disable writes to /my_invert_file/ and only allow writes to /my_invert_file/..invert + +Do you agree? Discuss it on reiserfs-list.... + +-Hans +======================= + + Then a read will return: + + /filenameA<-"(The contents of filenameA)+"(some text stored in the invert)+/filenameB<-"(The contents of filenameB) + + and a write of the line above to the invert will set the contents of + the invert and filenameA and filenameB to their original values. + + Note that the contents of an invert have no influence on the effect + of a write unless the write is a partial write (and a write of a + shorter file without using truncate first is a partial write). + + truncate() has no effect on filenameA and filenameB, it merely + resets the value of the invert. + + Writes to subfiles via the invert are implemented by preceding them + with truncates. + + Parse failures cause write failures. + + Questions to ponder: should the invert be acted on prior to file + close when writing to an open filedescriptor? + + Example: + + If an invert contains: + + "(This text and a pair of quotes are all that is here.) + +Then a read will return: + + "(This text and a pair of quotes are all that is here.) + +*/ + +/* OPEN method places a struct file in memory associated with invert body + and returns something like file descriptor to the user for the future access + to the invert file. + During opening we parse the body of invert and get a list of the 'entryes' + (that describes all its subfiles) and place pointer on the first struct in + reiserfs-specific part of invert inode (arbitrary decision). + + Each subfile is described by the struct inv_entry that has a pointer @sd on + in-core based stat-data and a pointer on struct file @f (if we find that the + subfile uses more then one unformated node (arbitrary decision), we load + struct file in memory, otherwise we load base stat-data (and maybe 1-2 bytes + of some other information we need) + + Since READ and WRITE methods for inverts were formulated in assignment + language, they don't contain arguments 'size' and 'offset' that make sense + only in ordinary read/write methods. + + READ method is a combination of two methods: + 1) ordinary read method (with offset=0, lenght = @f->...->i_size) for entries + with @f != 0, this method uses pointer on struct file as an argument + 2) read method for inode-less files with @sd != 0, this method uses + in-core based stat-data instead struct file as an argument. + in the first case we don't use pagecache, just copy data that we got after + cbk() into userspace. + + WRITE method for invert files is more complex. + Besides declared WRITE-interface in assignment languageb above we need + to have an opportunity to edit unwrapped body of invert file with some + text editor, it means we need GENERIC WRITE METHOD for invert file: + + my_invert_file/..invert <- "string" + + this method parses "string" and looks for correct subfile signatures, also + the parsing process splits this "string" on the set of flows in accordance + with the set of subfiles specified by this signarure. + The found list of signatures #S is compared with the opened one #I of invert + file. If it doesn't have this one (#I==0, it will be so for instance if we + have just create this invert file) the write method assignes found signature + (#I=#S;) to the invert file. Then if #I==#S, generic write method splits + itself to the some write methods for ordinary or light-weight, or call itself + recursively for invert files with corresponding flows. + I am not sure, but the list of signatures looks like what mr.Demidov means + by 'delimiters'. + + The cases when #S<#I (#I<#S) (in the sense of set-theory) are also available + and cause delete (create new) subfiles (arbitrary decision - it may looks + too complex, but this interface will be the completest). The order of entries + of list #S (#I) and inherited order on #I (#S) must coincide. + The other parsing results give malformed signature that aborts READ method + and releases all resources. + + Format of subfile (entry) signature: + + "START_MAGIC"<>(TYPE="...",LOOKUP_ARG="...")SUBFILE_BODY"END_MAGIC" + + Legend: + + START_MAGIC - keyword indicates the start of subfile signature; + + <> indicates the start of 'subfile metadata', that is the pair + (TYPE="...",LOOKUP_ARG="...") in parenthesis separated by comma. + + TYPE - the string "type" indicates the start of one of the three words: + - ORDINARY_FILE, + - LIGHT_WEIGHT_FILE, + - INVERT_FILE; + + LOOKUP_ARG - lookup argument depends on previous type: + */ + + /************************************************************/ + /* TYPE * LOOKUP ARGUMENT */ + /************************************************************/ + /* LIGH_WEIGHT_FILE * stat-data key */ + /************************************************************/ + /* ORDINARY_FILE * filename */ + /************************************************************/ + /* INVERT_FILE * filename */ + /************************************************************/ + + /* where: + *stat-data key - the string contains stat data key of this subfile, it will be + passed to fast-access lookup method for light-weight files; + *filename - pathname of this subfile, iyt well be passed to VFS lookup methods + for ordinary and invert files; + + SUBFILE_BODY - data of this subfile (it will go to the flow) + END_MAGIC - the keyword indicates the end of subfile signature. + + The other simbols inside the signature interpreted as 'unformatted content', + which is available with VFS's read_link() (arbitraruy decision). + + NOTE: Parse method for a body of invert file uses mentioned signatures _without_ + subfile bodies. + + Now the only unclear thing is WRITE in regular light-weight subfile A that we + can describe only in assignment language: + + A <- "some_string" + + I guess we don't want to change stat-data and body items of file A + if this file exist, and size(A) != size("some_string") because this operation is + expencive, so we only do the partial write if size(A) > size("some_string") + and do truncate of the "some_string", and then do A <- "truncated string", if + size(A) < size("some_string"). This decision is also arbitrary.. + */ + +/* here is infrastructure for formated flows */ + +#define SUBFILE_HEADER_MAGIC 0x19196605 +#define FLOW_HEADER_MAGIC 0x01194304 + +#include "../plugin.h" +#include "../../debug.h" +#include "../../forward.h" +#include "../object.h" +#include "../item/item.h" +#include "../item/static_stat.h" +#include "../../dformat.h" +#include "../znode.h" +#include "../inode.h" + +#include <linux/types.h> +#include <linux/fs.h> /* for struct file */ +#include <linux/list.h> /* for struct list_head */ + +typedef enum { + LIGHT_WEIGHT_FILE, + ORDINARY_FILE, + INVERT_FILE +} inv_entry_type; + +typedef struct flow_header { + d32 fl_magic; + d16 fl_nr; /* number of subfiles in the flow */ +}; + +typedef struct subfile_header { + d32 sh_magic; /* subfile magic */ + d16 sh_type; /* type of subfile: light-weight, ordinary, invert */ + d16 sh_arg_len; /* lenght of lookup argument (filename, key) */ + d32 sh_body_len; /* lenght of subfile body */ +}; + +/* functions to get/set fields of flow header */ + +static void fl_set_magic(flow_header * fh, __u32 value) +{ + cputod32(value, &fh->fh_magic); +} + +static __u32 fl_get_magic(flow_header * fh) +{ + return d32tocpu(&fh->fh_magic); +} +static void fl_set_number(flow_header * fh, __u16 value) +{ + cputod16(value, &fh->fh_nr); +} +static unsigned fl_get_number(flow_header * fh) +{ + return d16tocpu(&fh->fh_nr); +} + +/* functions to get/set fields of subfile header */ + +static void sh_set_magic(subfile_header * sh, __u32 value) +{ + cputod32(value, &sh->sh_magic); +} + +static __u32 sh_get_magic(subfile_header * sh) +{ + return d32tocpu(&sh->sh_magic); +} +static void sh_set_type(subfile_header * sh, __u16 value) +{ + cputod16(value, &sh->sh_magic); +} +static unsigned sh_get_type(subfile_header * sh) +{ + return d16tocpu(&sh->sh_magic); +} +static void sh_set_arg_len(subfile_header * sh, __u16 value) +{ + cputod16(value, &sh->sh_arg_len); +} +static unsigned sh_get_arg_len(subfile_header * sh) +{ + return d16tocpu(&sh->sh_arg_len); +} +static void sh_set_body_len(subfile_header * sh, __u32 value) +{ + cputod32(value, &sh->sh_body_len); +} + +static __u32 sh_get_body_len(subfile_header * sh) +{ + return d32tocpu(&sh->sh_body_len); +} + +/* in-core minimal stat-data, light-weight analog of inode */ + +struct incore_sd_base { + umode_t isd_mode; + nlink_t isd_nlink; + loff_t isd_size; + char *isd_data; /* 'subflow' to write */ +}; + +/* open invert create a list of invert entries, + every entry is represented by structure inv_entry */ + +struct inv_entry { + struct list_head *ie_list; + struct file *ie_file; /* this is NULL if the file doesn't + have unformated nodes */ + struct incore_sd_base *ie_sd; /* inode-less analog of struct file */ +}; + +/* allocate and init invert entry */ + +static struct inv_entry *allocate_inv_entry(void) +{ + struct inv_entry *inv_entry; + + inv_entry = reiser4_kmalloc(sizeof(struct inv_entry), GFP_KERNEL); + if (!inv_entry) + return ERR_PTR(RETERR(-ENOMEM)); + inv_entry->ie_file = NULL; + inv_entry->ie_sd = NULL; + INIT_LIST_HEAD(&inv_entry->ie_list); + return inv_entry; +} + +static int put_inv_entry(struct inv_entry *ientry) +{ + int result = 0; + + assert("edward-96", ientry != NULL); + assert("edward-97", ientry->ie_list != NULL); + + list_del(ientry->ie_list); + if (ientry->ie_sd != NULL) { + kfree(ientry->ie_sd); + kfree(ientry); + } + if (ientry->ie_file != NULL) + result = filp_close(ientry->file, NULL); + return result; +} + +static int allocate_incore_sd_base(struct inv_entry *inv_entry) +{ + struct incore_sd_base *isd_base assert("edward-98", inv_entry != NULL); + assert("edward-99", inv_entry->ie_inode = NULL); + assert("edward-100", inv_entry->ie_sd = NULL); + + isd_base = reiser4_kmalloc(sizeof(struct incore_sd_base), GFP_KERNEL); + if (!isd_base) + return RETERR(-ENOMEM); + inv_entry->ie_sd = isd_base; + return 0; +} + +/* this can be installed as ->init_inv_entry () method of + item_plugins[ STATIC_STAT_DATA_IT ] (fs/reiser4/plugin/item/item.c). + Copies data from on-disk stat-data format into light-weight analog of inode . + Doesn't hanlde stat-data extensions. */ + +static void sd_base_load(struct inv_entry *inv_entry, char *sd) +{ + reiser4_stat_data_base *sd_base; + + assert("edward-101", inv_entry != NULL); + assert("edward-101", inv_entry->ie_sd != NULL); + assert("edward-102", sd != NULL); + + sd_base = (reiser4_stat_data_base *) sd; + inv_entry->incore_sd_base->isd_mode = d16tocpu(&sd_base->mode); + inv_entry->incore_sd_base->isd_nlink = d32tocpu(&sd_base->nlink); + inv_entry->incore_sd_base->isd_size = d64tocpu(&sd_base->size); + inv_entry->incore_sd_base->isd_data = NULL; +} + +/* initialise incore stat-data */ + +static void init_incore_sd_base(struct inv_entry *inv_entry, coord_t * coord) +{ + reiser4_plugin *plugin = item_plugin_by_coord(coord); + void *body = item_body_by_coord(coord); + + assert("edward-103", inv_entry != NULL); + assert("edward-104", plugin != NULL); + assert("edward-105", body != NULL); + + sd_base_load(inv_entry, body); +} + +/* takes a key or filename and allocates new invert_entry, + init and adds it into the list, + we use lookup_sd_by_key() for light-weight files and VFS lookup by filename */ + +int get_inv_entry(struct inode *invert_inode, /* inode of invert's body */ + inv_entry_type type, /* LIGHT-WEIGHT or ORDINARY */ + const reiser4_key * key, /* key of invert entry stat-data */ + char *filename, /* filename of the file to be opened */ + int flags, int mode) +{ + int result; + struct inv_entry *ientry; + + assert("edward-107", invert_inode != NULL); + + ientry = allocate_inv_entry(); + if (IS_ERR(ientry)) + return (PTR_ERR(ientry)); + + if (type == LIGHT_WEIGHT_FILE) { + coord_t coord; + lock_handle lh; + + assert("edward-108", key != NULL); + + init_coord(&coord); + init_lh(&lh); + result = + lookup_sd_by_key(reiser4_tree_by_inode(invert_inode), + ZNODE_READ_LOCK, &coord, &lh, key); + if (result == 0) + init_incore_sd_base(ientry, coord); + + done_lh(&lh); + done_coord(&coord); + return (result); + } else { + struct file *file = filp_open(filename, flags, mode); + /* FIXME_EDWARD here we need to check if we + did't follow to any mount point */ + + assert("edward-108", filename != NULL); + + if (IS_ERR(file)) + return (PTR_ERR(file)); + ientry->ie_file = file; + return 0; + } +} + +/* takes inode of invert, reads the body of this invert, parses it, + opens all invert entries and return pointer on the first inv_entry */ + +struct inv_entry *open_invert(struct file *invert_file) +{ + +} + +ssize_t subfile_read(struct *invert_entry, flow * f) +{ + +} + +ssize_t subfile_write(struct *invert_entry, flow * f) +{ + +} + +ssize_t invert_read(struct *file, flow * f) +{ + +} + +ssize_t invert_write(struct *file, flow * f) +{ + +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/file/symfile.c b/fs/reiser4/plugin/file/symfile.c new file mode 100644 index 000000000000..814dfb8b2cf8 --- /dev/null +++ b/fs/reiser4/plugin/file/symfile.c @@ -0,0 +1,87 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Symfiles are a generalization of Unix symlinks. + + A symfile when read behaves as though you took its contents and + substituted them into the reiser4 naming system as the right hand side + of an assignment, and then read that which you had assigned to it. + + A key issue for symfiles is how to implement writes through to + subfiles. In general, one must have some method of determining what + of that which is written to the symfile is written to what subfile. + This can be done by use of custom plugin methods written by users, or + by using a few general methods we provide for those willing to endure + the insertion of delimiters into what is read. + + Writing to symfiles without delimiters to denote what is written to + what subfile is not supported by any plugins we provide in this + release. Our most sophisticated support for writes is that embodied + by the invert plugin (see invert.c). + + A read only version of the /etc/passwd file might be + constructed as a symfile whose contents are as follows: + + /etc/passwd/userlines/* + + or + + /etc/passwd/userlines/demidov+/etc/passwd/userlines/edward+/etc/passwd/userlines/reiser+/etc/passwd/userlines/root + + or + + /etc/passwd/userlines/(demidov+edward+reiser+root) + + A symfile with contents + + /filenameA+"(some text stored in the uninvertable symfile)+/filenameB + + will return when read + + The contents of filenameAsome text stored in the uninvertable symfileThe contents of filenameB + + and write of what has been read will not be possible to implement as + an identity operation because there are no delimiters denoting the + boundaries of what is to be written to what subfile. + + Note that one could make this a read/write symfile if one specified + delimiters, and the write method understood those delimiters delimited + what was written to subfiles. + + So, specifying the symfile in a manner that allows writes: + + /etc/passwd/userlines/demidov+"( + )+/etc/passwd/userlines/edward+"( + )+/etc/passwd/userlines/reiser+"( + )+/etc/passwd/userlines/root+"( + ) + + or + + /etc/passwd/userlines/(demidov+"( + )+edward+"( + )+reiser+"( + )+root+"( + )) + + and the file demidov might be specified as: + + /etc/passwd/userlines/demidov/username+"(:)+/etc/passwd/userlines/demidov/password+"(:)+/etc/passwd/userlines/demidov/userid+"(:)+/etc/passwd/userlines/demidov/groupid+"(:)+/etc/passwd/userlines/demidov/gecos+"(:)+/etc/passwd/userlines/demidov/home+"(:)+/etc/passwd/userlines/demidov/shell + + or + + /etc/passwd/userlines/demidov/(username+"(:)+password+"(:)+userid+"(:)+groupid+"(:)+gecos+"(:)+home+"(:)+shell) + + Notice that if the file demidov has a carriage return in it, the + parsing fails, but then if you put carriage returns in the wrong place + in a normal /etc/passwd file it breaks things also. + + Note that it is forbidden to have no text between two interpolations + if one wants to be able to define what parts of a write go to what + subfiles referenced in an interpolation. + + If one wants to be able to add new lines by writing to the file, one + must either write a custom plugin for /etc/passwd that knows how to + name an added line, or one must use an invert, or one must use a more + sophisticated symfile syntax that we are not planning to write for + version 4.0. +*/ diff --git a/fs/reiser4/plugin/file/symlink.c b/fs/reiser4/plugin/file/symlink.c new file mode 100644 index 000000000000..bcf3ef80c4dc --- /dev/null +++ b/fs/reiser4/plugin/file/symlink.c @@ -0,0 +1,95 @@ +/* Copyright 2002, 2003, 2005 by Hans Reiser, licensing governed by reiser4/README */ + +#include "../../inode.h" + +#include <linux/types.h> +#include <linux/fs.h> + +/* file plugin methods specific for symlink files + (SYMLINK_FILE_PLUGIN_ID) */ + +/* this is implementation of create_object method of file plugin for + SYMLINK_FILE_PLUGIN_ID + */ + +/** + * reiser4_create_symlink - create_object of file plugin for SYMLINK_FILE_PLUGIN_ID + * @symlink: inode of symlink object + * @dir: inode of parent directory + * @info: parameters of new object + * + * Inserts stat data with symlink extension where into the tree. + */ +int reiser4_create_symlink(struct inode *symlink, + struct inode *dir UNUSED_ARG, + reiser4_object_create_data *data /* info passed to us + * this is filled by + * reiser4() syscall + * in particular */) +{ + int result; + + assert("nikita-680", symlink != NULL); + assert("nikita-681", S_ISLNK(symlink->i_mode)); + assert("nikita-685", reiser4_inode_get_flag(symlink, REISER4_NO_SD)); + assert("nikita-682", dir != NULL); + assert("nikita-684", data != NULL); + assert("nikita-686", data->id == SYMLINK_FILE_PLUGIN_ID); + + /* + * stat data of symlink has symlink extension in which we store + * symlink content, that is, path symlink is pointing to. + */ + reiser4_inode_data(symlink)->extmask |= (1 << SYMLINK_STAT); + + assert("vs-838", symlink->i_private == NULL); + symlink->i_private = (void *)data->name; + + assert("vs-843", symlink->i_size == 0); + INODE_SET_FIELD(symlink, i_size, strlen(data->name)); + + /* insert stat data appended with data->name */ + result = inode_file_plugin(symlink)->write_sd_by_inode(symlink); + if (result) { + /* FIXME-VS: Make sure that symlink->i_private is not attached + to kmalloced data */ + INODE_SET_FIELD(symlink, i_size, 0); + } else { + assert("vs-849", symlink->i_private + && reiser4_inode_get_flag(symlink, + REISER4_GENERIC_PTR_USED)); + assert("vs-850", + !memcmp((char *)symlink->i_private, data->name, + (size_t) symlink->i_size + 1)); + } + return result; +} + +/* this is implementation of destroy_inode method of file plugin for + SYMLINK_FILE_PLUGIN_ID + */ +void destroy_inode_symlink(struct inode *inode) +{ + assert("edward-799", + inode_file_plugin(inode) == + file_plugin_by_id(SYMLINK_FILE_PLUGIN_ID)); + assert("edward-800", !is_bad_inode(inode) && is_inode_loaded(inode)); + assert("edward-801", reiser4_inode_get_flag(inode, + REISER4_GENERIC_PTR_USED)); + assert("vs-839", S_ISLNK(inode->i_mode)); + + kfree(inode->i_private); + inode->i_private = NULL; + reiser4_inode_clr_flag(inode, REISER4_GENERIC_PTR_USED); +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/file/tail_conversion.c b/fs/reiser4/plugin/file/tail_conversion.c new file mode 100644 index 000000000000..a21e464845a4 --- /dev/null +++ b/fs/reiser4/plugin/file/tail_conversion.c @@ -0,0 +1,763 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#include "../../inode.h" +#include "../../super.h" +#include "../../page_cache.h" +#include "../../carry.h" +#include "../../safe_link.h" +#include "../../vfs_ops.h" + +#include <linux/writeback.h> + +/* this file contains: + tail2extent and extent2tail */ + +/* exclusive access to a file is acquired when file state changes: tail2extent, empty2tail, extent2tail, etc */ +void get_exclusive_access(struct unix_file_info * uf_info) +{ + assert("nikita-3028", reiser4_schedulable()); + assert("nikita-3047", LOCK_CNT_NIL(inode_sem_w)); + assert("nikita-3048", LOCK_CNT_NIL(inode_sem_r)); + /* + * "deadlock avoidance": sometimes we commit a transaction under + * rw-semaphore on a file. Such commit can deadlock with another + * thread that captured some block (hence preventing atom from being + * committed) and waits on rw-semaphore. + */ + reiser4_txn_restart_current(); + LOCK_CNT_INC(inode_sem_w); + down_write(&uf_info->latch); + uf_info->exclusive_use = 1; + assert("vs-1713", uf_info->ea_owner == NULL); + assert("vs-1713", atomic_read(&uf_info->nr_neas) == 0); + ON_DEBUG(uf_info->ea_owner = current); +} + +void drop_exclusive_access(struct unix_file_info * uf_info) +{ + assert("vs-1714", uf_info->ea_owner == current); + assert("vs-1715", atomic_read(&uf_info->nr_neas) == 0); + ON_DEBUG(uf_info->ea_owner = NULL); + uf_info->exclusive_use = 0; + up_write(&uf_info->latch); + assert("nikita-3049", LOCK_CNT_NIL(inode_sem_r)); + assert("nikita-3049", LOCK_CNT_GTZ(inode_sem_w)); + LOCK_CNT_DEC(inode_sem_w); + reiser4_txn_restart_current(); +} + +/** + * nea_grabbed - do something when file semaphore is down_read-ed + * @uf_info: + * + * This is called when nonexclisive access is obtained on file. All it does is + * for debugging purposes. + */ +static void nea_grabbed(struct unix_file_info *uf_info) +{ +#if REISER4_DEBUG + LOCK_CNT_INC(inode_sem_r); + assert("vs-1716", uf_info->ea_owner == NULL); + atomic_inc(&uf_info->nr_neas); + uf_info->last_reader = current; +#endif +} + +/** + * get_nonexclusive_access - get nonexclusive access to a file + * @uf_info: unix file specific part of inode to obtain access to + * + * Nonexclusive access is obtained on a file before read, write, readpage. + */ +void get_nonexclusive_access(struct unix_file_info *uf_info) +{ + assert("nikita-3029", reiser4_schedulable()); + assert("nikita-3361", get_current_context()->trans->atom == NULL); + + down_read(&uf_info->latch); + nea_grabbed(uf_info); +} + +/** + * try_to_get_nonexclusive_access - try to get nonexclusive access to a file + * @uf_info: unix file specific part of inode to obtain access to + * + * Non-blocking version of nonexclusive access obtaining. + */ +int try_to_get_nonexclusive_access(struct unix_file_info *uf_info) +{ + int result; + + result = down_read_trylock(&uf_info->latch); + if (result) + nea_grabbed(uf_info); + return result; +} + +void drop_nonexclusive_access(struct unix_file_info * uf_info) +{ + assert("vs-1718", uf_info->ea_owner == NULL); + assert("vs-1719", atomic_read(&uf_info->nr_neas) > 0); + ON_DEBUG(atomic_dec(&uf_info->nr_neas)); + + up_read(&uf_info->latch); + + LOCK_CNT_DEC(inode_sem_r); + reiser4_txn_restart_current(); +} + +/* part of tail2extent. Cut all items covering @count bytes starting from + @offset */ +/* Audited by: green(2002.06.15) */ +static int cut_formatting_items(struct inode *inode, loff_t offset, int count) +{ + reiser4_key from, to; + + /* AUDIT: How about putting an assertion here, what would check + all provided range is covered by tail items only? */ + /* key of first byte in the range to be cut */ + inode_file_plugin(inode)->key_by_inode(inode, offset, &from); + + /* key of last byte in that range */ + to = from; + set_key_offset(&to, (__u64) (offset + count - 1)); + + /* cut everything between those keys */ + return reiser4_cut_tree(reiser4_tree_by_inode(inode), &from, &to, + inode, 0); +} + +static void release_all_pages(struct page **pages, unsigned nr_pages) +{ + unsigned i; + + for (i = 0; i < nr_pages; i++) { + if (pages[i] == NULL) { +#if REISER4_DEBUG + unsigned j; + for (j = i + 1; j < nr_pages; j++) + assert("vs-1620", pages[j] == NULL); +#endif + break; + } + put_page(pages[i]); + pages[i] = NULL; + } +} + +/* part of tail2extent. replace tail items with extent one. Content of tail + items (@count bytes) being cut are copied already into + pages. extent_writepage method is called to create extents corresponding to + those pages */ +static int replace(struct inode *inode, struct page **pages, unsigned nr_pages, int count) +{ + int result; + unsigned i; + STORE_COUNTERS; + + if (nr_pages == 0) + return 0; + + assert("vs-596", pages[0]); + + /* cut copied items */ + result = cut_formatting_items(inode, page_offset(pages[0]), count); + if (result) + return result; + + CHECK_COUNTERS; + + /* put into tree replacement for just removed items: extent item, namely */ + for (i = 0; i < nr_pages; i++) { + result = add_to_page_cache_lru(pages[i], inode->i_mapping, + pages[i]->index, + mapping_gfp_mask(inode-> + i_mapping)); + if (result) + break; + SetPageUptodate(pages[i]); + set_page_dirty_notag(pages[i]); + unlock_page(pages[i]); + result = find_or_create_extent(pages[i]); + if (result) { + /* + * Unsuccess in critical place: + * tail has been removed, + * but extent hasn't been created + */ + warning("edward-1572", + "Report the error code %i to developers. Run FSCK", + result); + break; + } + } + return result; +} + +#define TAIL2EXTENT_PAGE_NUM 3 /* number of pages to fill before cutting tail + * items */ + +static int reserve_tail2extent_iteration(struct inode *inode) +{ + reiser4_block_nr unformatted_nodes; + reiser4_tree *tree; + + tree = reiser4_tree_by_inode(inode); + + /* number of unformatted nodes which will be created */ + unformatted_nodes = TAIL2EXTENT_PAGE_NUM; + + /* + * space required for one iteration of extent->tail conversion: + * + * 1. kill N tail items + * + * 2. insert TAIL2EXTENT_PAGE_NUM unformatted nodes + * + * 3. insert TAIL2EXTENT_PAGE_NUM (worst-case single-block + * extents) extent units. + * + * 4. drilling to the leaf level by coord_by_key() + * + * 5. possible update of stat-data + * + */ + grab_space_enable(); + return reiser4_grab_space + (2 * tree->height + + TAIL2EXTENT_PAGE_NUM + + TAIL2EXTENT_PAGE_NUM * estimate_one_insert_into_item(tree) + + 1 + estimate_one_insert_item(tree) + + inode_file_plugin(inode)->estimate.update(inode), BA_CAN_COMMIT); +} + +/* clear stat data's flag indicating that conversion is being converted */ +static int complete_conversion(struct inode *inode) +{ + int result; + + grab_space_enable(); + result = + reiser4_grab_space(inode_file_plugin(inode)->estimate.update(inode), + BA_CAN_COMMIT); + if (result == 0) { + reiser4_inode_clr_flag(inode, REISER4_PART_MIXED); + result = reiser4_update_sd(inode); + } + if (result) + warning("vs-1696", "Failed to clear converting bit of %llu: %i", + (unsigned long long)get_inode_oid(inode), result); + return 0; +} + +/** + * find_start + * @inode: + * @id: + * @offset: + * + * this is used by tail2extent and extent2tail to detect where previous + * uncompleted conversion stopped + */ +static int find_start(struct inode *inode, reiser4_plugin_id id, __u64 *offset) +{ + int result; + lock_handle lh; + coord_t coord; + struct unix_file_info *ufo; + int found; + reiser4_key key; + + ufo = unix_file_inode_data(inode); + init_lh(&lh); + result = 0; + found = 0; + inode_file_plugin(inode)->key_by_inode(inode, *offset, &key); + do { + init_lh(&lh); + result = find_file_item_nohint(&coord, &lh, &key, + ZNODE_READ_LOCK, inode); + + if (result == CBK_COORD_FOUND) { + if (coord.between == AT_UNIT) { + /*coord_clear_iplug(&coord); */ + result = zload(coord.node); + if (result == 0) { + if (item_id_by_coord(&coord) == id) + found = 1; + else + item_plugin_by_coord(&coord)->s. + file.append_key(&coord, + &key); + zrelse(coord.node); + } + } else + result = RETERR(-ENOENT); + } + done_lh(&lh); + } while (result == 0 && !found); + *offset = get_key_offset(&key); + return result; +} + +/** + * tail2extent + * @uf_info: + * + * + */ +int tail2extent(struct unix_file_info *uf_info) +{ + int result; + reiser4_key key; /* key of next byte to be moved to page */ + char *p_data; /* data of page */ + unsigned page_off = 0, /* offset within the page where to copy data */ + count; /* number of bytes of item which can be + * copied to page */ + struct page *pages[TAIL2EXTENT_PAGE_NUM]; + struct page *page; + int done; /* set to 1 when all file is read */ + char *item; + int i; + struct inode *inode; + int first_iteration; + int bytes; + __u64 offset; + + assert("nikita-3362", ea_obtained(uf_info)); + inode = unix_file_info_to_inode(uf_info); + assert("nikita-3412", !IS_RDONLY(inode)); + assert("vs-1649", uf_info->container != UF_CONTAINER_EXTENTS); + assert("", !reiser4_inode_get_flag(inode, REISER4_PART_IN_CONV)); + + offset = 0; + first_iteration = 1; + result = 0; + if (reiser4_inode_get_flag(inode, REISER4_PART_MIXED)) { + /* + * file is marked on disk as there was a conversion which did + * not complete due to either crash or some error. Find which + * offset tail conversion stopped at + */ + result = find_start(inode, FORMATTING_ID, &offset); + if (result == -ENOENT) { + /* no tail items found, everything is converted */ + uf_info->container = UF_CONTAINER_EXTENTS; + complete_conversion(inode); + return 0; + } else if (result != 0) + /* some other error */ + return result; + first_iteration = 0; + } + + reiser4_inode_set_flag(inode, REISER4_PART_IN_CONV); + + /* get key of first byte of a file */ + inode_file_plugin(inode)->key_by_inode(inode, offset, &key); + + done = 0; + while (done == 0) { + memset(pages, 0, sizeof(pages)); + result = reserve_tail2extent_iteration(inode); + if (result != 0) { + reiser4_inode_clr_flag(inode, REISER4_PART_IN_CONV); + goto out; + } + if (first_iteration) { + reiser4_inode_set_flag(inode, REISER4_PART_MIXED); + reiser4_update_sd(inode); + first_iteration = 0; + } + bytes = 0; + for (i = 0; i < sizeof_array(pages) && done == 0; i++) { + assert("vs-598", + (get_key_offset(&key) & ~PAGE_MASK) == 0); + page = alloc_page(reiser4_ctx_gfp_mask_get()); + if (!page) { + result = RETERR(-ENOMEM); + goto error; + } + + page->index = + (unsigned long)(get_key_offset(&key) >> + PAGE_SHIFT); + /* + * usually when one is going to longterm lock znode (as + * find_file_item does, for instance) he must not hold + * locked pages. However, there is an exception for + * case tail2extent. Pages appearing here are not + * reachable to everyone else, they are clean, they do + * not have jnodes attached so keeping them locked do + * not risk deadlock appearance + */ + assert("vs-983", !PagePrivate(page)); + reiser4_invalidate_pages(inode->i_mapping, page->index, + 1, 0); + + for (page_off = 0; page_off < PAGE_SIZE;) { + coord_t coord; + lock_handle lh; + + /* get next item */ + /* FIXME: we might want to readahead here */ + init_lh(&lh); + result = + find_file_item_nohint(&coord, &lh, &key, + ZNODE_READ_LOCK, + inode); + if (result != CBK_COORD_FOUND) { + /* + * error happened of not items of file + * were found + */ + done_lh(&lh); + put_page(page); + goto error; + } + + if (coord.between == AFTER_UNIT) { + /* + * end of file is reached. Padd page + * with zeros + */ + done_lh(&lh); + done = 1; + p_data = kmap_atomic(page); + memset(p_data + page_off, 0, + PAGE_SIZE - page_off); + kunmap_atomic(p_data); + break; + } + + result = zload(coord.node); + if (result) { + put_page(page); + done_lh(&lh); + goto error; + } + assert("vs-856", coord.between == AT_UNIT); + item = ((char *)item_body_by_coord(&coord)) + + coord.unit_pos; + + /* how many bytes to copy */ + count = + item_length_by_coord(&coord) - + coord.unit_pos; + /* limit length of copy to end of page */ + if (count > PAGE_SIZE - page_off) + count = PAGE_SIZE - page_off; + + /* + * copy item (as much as will fit starting from + * the beginning of the item) into the page + */ + p_data = kmap_atomic(page); + memcpy(p_data + page_off, item, count); + kunmap_atomic(p_data); + + page_off += count; + bytes += count; + set_key_offset(&key, + get_key_offset(&key) + count); + + zrelse(coord.node); + done_lh(&lh); + } /* end of loop which fills one page by content of + * formatting items */ + + if (page_off) { + /* something was copied into page */ + pages[i] = page; + } else { + put_page(page); + assert("vs-1648", done == 1); + break; + } + } /* end of loop through pages of one conversion iteration */ + + if (i > 0) { + result = replace(inode, pages, i, bytes); + release_all_pages(pages, sizeof_array(pages)); + if (result) + goto error; + /* + * We have to drop exclusive access to avoid deadlock + * which may happen because called by reiser4_writepages + * capture_unix_file requires to get non-exclusive + * access to a file. It is safe to drop EA in the middle + * of tail2extent conversion because write_unix_file, + * setattr_unix_file(truncate), mmap_unix_file, + * release_unix_file(extent2tail) checks if conversion + * is not in progress (see comments before + * get_exclusive_access_careful(). + * Other processes that acquire non-exclusive access + * (read_unix_file, reiser4_writepages, etc) should work + * on partially converted files. + */ + drop_exclusive_access(uf_info); + /* throttle the conversion */ + reiser4_throttle_write(inode); + get_exclusive_access(uf_info); + + /* + * nobody is allowed to complete conversion but a + * process which started it + */ + assert("", reiser4_inode_get_flag(inode, + REISER4_PART_MIXED)); + } + } + if (result == 0) { + /* file is converted to extent items */ + reiser4_inode_clr_flag(inode, REISER4_PART_IN_CONV); + assert("vs-1697", reiser4_inode_get_flag(inode, + REISER4_PART_MIXED)); + + uf_info->container = UF_CONTAINER_EXTENTS; + complete_conversion(inode); + } else { + /* + * conversion is not complete. Inode was already marked as + * REISER4_PART_MIXED and stat-data were updated at the first + * iteration of the loop above. + */ + error: + release_all_pages(pages, sizeof_array(pages)); + reiser4_inode_clr_flag(inode, REISER4_PART_IN_CONV); + warning("edward-1548", "Partial conversion of %llu: %i", + (unsigned long long)get_inode_oid(inode), result); + } + + out: + /* this flag should be cleared, otherwise get_exclusive_access_careful() + will fall into infinite loop */ + assert("edward-1549", !reiser4_inode_get_flag(inode, + REISER4_PART_IN_CONV)); + return result; +} + +static int reserve_extent2tail_iteration(struct inode *inode) +{ + reiser4_tree *tree; + + tree = reiser4_tree_by_inode(inode); + /* + * reserve blocks for (in this order): + * + * 1. removal of extent item + * + * 2. insertion of tail by insert_flow() + * + * 3. drilling to the leaf level by coord_by_key() + * + * 4. possible update of stat-data + */ + grab_space_enable(); + return reiser4_grab_space + (estimate_one_item_removal(tree) + + estimate_insert_flow(tree->height) + + 1 + estimate_one_insert_item(tree) + + inode_file_plugin(inode)->estimate.update(inode), BA_CAN_COMMIT); +} + +/* for every page of file: read page, cut part of extent pointing to this page, + put data of page tree by tail item */ +int extent2tail(struct file * file, struct unix_file_info *uf_info) +{ + int result; + struct inode *inode; + struct page *page; + unsigned long num_pages, i; + unsigned long start_page; + reiser4_key from; + reiser4_key to; + unsigned count; + __u64 offset; + + assert("nikita-3362", ea_obtained(uf_info)); + inode = unix_file_info_to_inode(uf_info); + assert("nikita-3412", !IS_RDONLY(inode)); + assert("vs-1649", uf_info->container != UF_CONTAINER_TAILS); + assert("", !reiser4_inode_get_flag(inode, REISER4_PART_IN_CONV)); + + offset = 0; + if (reiser4_inode_get_flag(inode, REISER4_PART_MIXED)) { + /* + * file is marked on disk as there was a conversion which did + * not complete due to either crash or some error. Find which + * offset tail conversion stopped at + */ + result = find_start(inode, EXTENT_POINTER_ID, &offset); + if (result == -ENOENT) { + /* no extent found, everything is converted */ + uf_info->container = UF_CONTAINER_TAILS; + complete_conversion(inode); + return 0; + } else if (result != 0) + /* some other error */ + return result; + } + reiser4_inode_set_flag(inode, REISER4_PART_IN_CONV); + + /* number of pages in the file */ + num_pages = + (inode->i_size + - offset + PAGE_SIZE - 1) >> PAGE_SHIFT; + start_page = offset >> PAGE_SHIFT; + + inode_file_plugin(inode)->key_by_inode(inode, offset, &from); + to = from; + + result = 0; + for (i = 0; i < num_pages; i++) { + __u64 start_byte; + + result = reserve_extent2tail_iteration(inode); + if (result != 0) + break; + if (i == 0 && offset == 0) { + reiser4_inode_set_flag(inode, REISER4_PART_MIXED); + reiser4_update_sd(inode); + } + + page = read_mapping_page(inode->i_mapping, + (unsigned)(i + start_page), NULL); + if (IS_ERR(page)) { + result = PTR_ERR(page); + warning("edward-1569", + "Can not read page %lu of %lu: %i", + i, num_pages, result); + break; + } + + wait_on_page_locked(page); + + if (!PageUptodate(page)) { + put_page(page); + result = RETERR(-EIO); + break; + } + + /* cut part of file we have read */ + start_byte = (__u64) ((i + start_page) << PAGE_SHIFT); + set_key_offset(&from, start_byte); + set_key_offset(&to, start_byte + PAGE_SIZE - 1); + /* + * reiser4_cut_tree_object() returns -E_REPEAT to allow atom + * commits during over-long truncates. But + * extent->tail conversion should be performed in one + * transaction. + */ + result = reiser4_cut_tree(reiser4_tree_by_inode(inode), &from, + &to, inode, 0); + + if (result) { + put_page(page); + warning("edward-1570", + "Can not delete converted chunk: %i", + result); + break; + } + + /* put page data into tree via tail_write */ + count = PAGE_SIZE; + if ((i == (num_pages - 1)) && + (inode->i_size & ~PAGE_MASK)) + /* last page can be incompleted */ + count = (inode->i_size & ~PAGE_MASK); + while (count) { + loff_t pos = start_byte; + + assert("edward-1537", + file != NULL && file->f_path.dentry != NULL); + assert("edward-1538", + file_inode(file) == inode); + + result = reiser4_write_tail_noreserve(file, inode, + (char __user *)kmap(page), + count, &pos); + kunmap(page); + /* FIXME: + may be put_file_hint() instead ? */ + reiser4_free_file_fsdata(file); + if (result <= 0) { + /* + * Unsuccess in critical place: + * extent has been removed, + * but tail hasn't been created + */ + warning("edward-1571", + "Report the error code %i to developers. Run FSCK", + result); + put_page(page); + reiser4_inode_clr_flag(inode, + REISER4_PART_IN_CONV); + return result; + } + count -= result; + } + + /* release page */ + lock_page(page); + /* page is already detached from jnode and mapping. */ + assert("vs-1086", page->mapping == NULL); + assert("nikita-2690", + (!PagePrivate(page) && jprivate(page) == 0)); + /* waiting for writeback completion with page lock held is + * perfectly valid. */ + wait_on_page_writeback(page); + reiser4_drop_page(page); + /* release reference taken by read_cache_page() above */ + put_page(page); + + drop_exclusive_access(uf_info); + /* throttle the conversion */ + reiser4_throttle_write(inode); + get_exclusive_access(uf_info); + /* + * nobody is allowed to complete conversion but a process which + * started it + */ + assert("", reiser4_inode_get_flag(inode, REISER4_PART_MIXED)); + } + + reiser4_inode_clr_flag(inode, REISER4_PART_IN_CONV); + + if (i == num_pages) { + /* file is converted to formatted items */ + assert("vs-1698", reiser4_inode_get_flag(inode, + REISER4_PART_MIXED)); + assert("vs-1260", + inode_has_no_jnodes(reiser4_inode_data(inode))); + + uf_info->container = UF_CONTAINER_TAILS; + complete_conversion(inode); + return 0; + } + /* + * conversion is not complete. Inode was already marked as + * REISER4_PART_MIXED and stat-data were updated at the first + * iteration of the loop above. + */ + warning("nikita-2282", + "Partial conversion of %llu: %lu of %lu: %i", + (unsigned long long)get_inode_oid(inode), i, + num_pages, result); + + /* this flag should be cleared, otherwise get_exclusive_access_careful() + will fall into infinite loop */ + assert("edward-1550", !reiser4_inode_get_flag(inode, + REISER4_PART_IN_CONV)); + return result; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/plugin/file_ops.c b/fs/reiser4/plugin/file_ops.c new file mode 100644 index 000000000000..fbdb8c365e5a --- /dev/null +++ b/fs/reiser4/plugin/file_ops.c @@ -0,0 +1,119 @@ +/* Copyright 2005 by Hans Reiser, licensing governed by + reiser4/README */ + +/* this file contains typical implementations for some of methods of + struct file_operations and of struct address_space_operations +*/ + +#include "../inode.h" +#include "object.h" + +/* file operations */ + +/* implementation of vfs's llseek method of struct file_operations for + typical directory can be found in file_ops_readdir.c +*/ +loff_t reiser4_llseek_dir_common(struct file *, loff_t, int origin); + +/* implementation of vfs's iterate method of struct file_operations for + typical directory can be found in file_ops_readdir.c +*/ +int reiser4_iterate_common(struct file *, struct dir_context *); + +/** + * reiser4_release_dir_common - release of struct file_operations + * @inode: inode of released file + * @file: file to release + * + * Implementation of release method of struct file_operations for typical + * directory. All it does is freeing of reiser4 specific file data. +*/ +int reiser4_release_dir_common(struct inode *inode, struct file *file) +{ + reiser4_context *ctx; + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + reiser4_free_file_fsdata(file); + reiser4_exit_context(ctx); + return 0; +} + +/* this is common implementation of vfs's fsync method of struct + file_operations +*/ +int reiser4_sync_common(struct file *file, loff_t start, + loff_t end, int datasync) +{ + reiser4_context *ctx; + int result; + struct dentry *dentry = file->f_path.dentry; + + ctx = reiser4_init_context(dentry->d_inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + result = txnmgr_force_commit_all(dentry->d_inode->i_sb, 0); + + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; +} + +/* + * common sync method for regular files. + * + * We are trying to be smart here. Instead of committing all atoms (original + * solution), we scan dirty pages of this file and commit all atoms they are + * part of. + * + * Situation is complicated by anonymous pages: i.e., extent-less pages + * dirtied through mmap. Fortunately sys_fsync() first calls + * filemap_fdatawrite() that will ultimately call reiser4_writepages_dispatch, + * insert all missing extents and capture anonymous pages. + */ +int reiser4_sync_file_common(struct file *file, loff_t start, loff_t end, int datasync) +{ + reiser4_context *ctx; + txn_atom *atom; + reiser4_block_nr reserve; + struct dentry *dentry = file->f_path.dentry; + struct inode *inode = file->f_mapping->host; + + int err = filemap_write_and_wait_range(file->f_mapping->host->i_mapping, start, end); + if (err) + return err; + + ctx = reiser4_init_context(dentry->d_inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + inode_lock(inode); + + reserve = estimate_update_common(dentry->d_inode); + if (reiser4_grab_space(reserve, BA_CAN_COMMIT)) { + reiser4_exit_context(ctx); + inode_unlock(inode); + return RETERR(-ENOSPC); + } + write_sd_by_inode_common(dentry->d_inode); + + atom = get_current_atom_locked(); + spin_lock_txnh(ctx->trans); + force_commit_atom(ctx->trans); + reiser4_exit_context(ctx); + inode_unlock(inode); + + return 0; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/plugin/file_ops_readdir.c b/fs/reiser4/plugin/file_ops_readdir.c new file mode 100644 index 000000000000..0cde411eab18 --- /dev/null +++ b/fs/reiser4/plugin/file_ops_readdir.c @@ -0,0 +1,658 @@ +/* Copyright 2005 by Hans Reiser, licensing governed by + * reiser4/README */ + +#include "../inode.h" + +/* return true, iff @coord points to the valid directory item that is part of + * @inode directory. */ +static int is_valid_dir_coord(struct inode *inode, coord_t *coord) +{ + return plugin_of_group(item_plugin_by_coord(coord), + DIR_ENTRY_ITEM_TYPE) && + inode_file_plugin(inode)->owns_item(inode, coord); +} + +/* compare two logical positions within the same directory */ +static cmp_t dir_pos_cmp(const struct dir_pos *p1, const struct dir_pos *p2) +{ + cmp_t result; + + assert("nikita-2534", p1 != NULL); + assert("nikita-2535", p2 != NULL); + + result = de_id_cmp(&p1->dir_entry_key, &p2->dir_entry_key); + if (result == EQUAL_TO) { + int diff; + + diff = p1->pos - p2->pos; + result = + (diff < 0) ? LESS_THAN : (diff ? GREATER_THAN : EQUAL_TO); + } + return result; +} + +/* see comment before reiser4_readdir_common() for overview of why "adjustment" + * is necessary. */ +static void +adjust_dir_pos(struct file *dir, struct readdir_pos *readdir_spot, + const struct dir_pos *mod_point, int adj) +{ + struct dir_pos *pos; + + /* + * new directory entry was added (adj == +1) or removed (adj == -1) at + * the @mod_point. Directory file descriptor @dir is doing readdir and + * is currently positioned at @readdir_spot. Latter has to be updated + * to maintain stable readdir. + */ + /* directory is positioned to the beginning. */ + if (readdir_spot->entry_no == 0) + return; + + pos = &readdir_spot->position; + switch (dir_pos_cmp(mod_point, pos)) { + case LESS_THAN: + /* @mod_pos is _before_ @readdir_spot, that is, entry was + * added/removed on the left (in key order) of current + * position. */ + /* logical number of directory entry readdir is "looking" at + * changes */ + readdir_spot->entry_no += adj; + assert("nikita-2577", + ergo(dir != NULL, + reiser4_get_dir_fpos(dir, dir->f_pos) + adj >= 0)); + if (de_id_cmp(&pos->dir_entry_key, + &mod_point->dir_entry_key) == EQUAL_TO) { + assert("nikita-2575", mod_point->pos < pos->pos); + /* + * if entry added/removed has the same key as current + * for readdir, update counter of duplicate keys in + * @readdir_spot. + */ + pos->pos += adj; + } + break; + case GREATER_THAN: + /* directory is modified after @pos: nothing to do. */ + break; + case EQUAL_TO: + /* cannot insert an entry readdir is looking at, because it + already exists. */ + assert("nikita-2576", adj < 0); + /* directory entry to which @pos points to is being + removed. + + NOTE-NIKITA: Right thing to do is to update @pos to point + to the next entry. This is complex (we are under spin-lock + for one thing). Just rewind it to the beginning. Next + readdir will have to scan the beginning of + directory. Proper solution is to use semaphore in + spin lock's stead and use rewind_right() here. + + NOTE-NIKITA: now, semaphore is used, so... + */ + memset(readdir_spot, 0, sizeof *readdir_spot); + } +} + +/* scan all file-descriptors for this directory and adjust their + positions respectively. Should be used by implementations of + add_entry and rem_entry of dir plugin */ +void reiser4_adjust_dir_file(struct inode *dir, const struct dentry *de, + int offset, int adj) +{ + reiser4_file_fsdata *scan; + struct dir_pos mod_point; + + assert("nikita-2536", dir != NULL); + assert("nikita-2538", de != NULL); + assert("nikita-2539", adj != 0); + + build_de_id(dir, &de->d_name, &mod_point.dir_entry_key); + mod_point.pos = offset; + + spin_lock_inode(dir); + + /* + * new entry was added/removed in directory @dir. Scan all file + * descriptors for @dir that are currently involved into @readdir and + * update them. + */ + + list_for_each_entry(scan, get_readdir_list(dir), dir.linkage) + adjust_dir_pos(scan->back, &scan->dir.readdir, &mod_point, adj); + + spin_unlock_inode(dir); +} + +/* + * traverse tree to start/continue readdir from the readdir position @pos. + */ +static int dir_go_to(struct file *dir, struct readdir_pos *pos, tap_t *tap) +{ + reiser4_key key; + int result; + struct inode *inode; + + assert("nikita-2554", pos != NULL); + + inode = file_inode(dir); + result = inode_dir_plugin(inode)->build_readdir_key(dir, &key); + if (result != 0) + return result; + result = reiser4_object_lookup(inode, + &key, + tap->coord, + tap->lh, + tap->mode, + FIND_EXACT, + LEAF_LEVEL, LEAF_LEVEL, + 0, &tap->ra_info); + if (result == CBK_COORD_FOUND) + result = rewind_right(tap, (int)pos->position.pos); + else { + tap->coord->node = NULL; + done_lh(tap->lh); + result = RETERR(-EIO); + } + return result; +} + +/* + * handling of non-unique keys: calculate at what ordinal position within + * sequence of directory items with identical keys @pos is. + */ +static int set_pos(struct inode *inode, struct readdir_pos *pos, tap_t *tap) +{ + int result; + coord_t coord; + lock_handle lh; + tap_t scan; + de_id *did; + reiser4_key de_key; + + coord_init_zero(&coord); + init_lh(&lh); + reiser4_tap_init(&scan, &coord, &lh, ZNODE_READ_LOCK); + reiser4_tap_copy(&scan, tap); + reiser4_tap_load(&scan); + pos->position.pos = 0; + + did = &pos->position.dir_entry_key; + + if (is_valid_dir_coord(inode, scan.coord)) { + + build_de_id_by_key(unit_key_by_coord(scan.coord, &de_key), did); + + while (1) { + + result = go_prev_unit(&scan); + if (result != 0) + break; + + if (!is_valid_dir_coord(inode, scan.coord)) { + result = -EINVAL; + break; + } + + /* get key of directory entry */ + unit_key_by_coord(scan.coord, &de_key); + if (de_id_key_cmp(did, &de_key) != EQUAL_TO) { + /* duplicate-sequence is over */ + break; + } + pos->position.pos++; + } + } else + result = RETERR(-ENOENT); + reiser4_tap_relse(&scan); + reiser4_tap_done(&scan); + return result; +} + +/* + * "rewind" directory to @offset, i.e., set @pos and @tap correspondingly. + */ +static int dir_rewind(struct file *dir, loff_t *fpos, struct readdir_pos *pos, tap_t *tap) +{ + __u64 destination; + __s64 shift; + int result; + struct inode *inode; + loff_t dirpos; + + assert("nikita-2553", dir != NULL); + assert("nikita-2548", pos != NULL); + assert("nikita-2551", tap->coord != NULL); + assert("nikita-2552", tap->lh != NULL); + + dirpos = reiser4_get_dir_fpos(dir, *fpos); + shift = dirpos - pos->fpos; + /* this is logical directory entry within @dir which we are rewinding + * to */ + destination = pos->entry_no + shift; + + inode = file_inode(dir); + if (dirpos < 0) + return RETERR(-EINVAL); + else if (destination == 0ll || dirpos == 0) { + /* rewind to the beginning of directory */ + memset(pos, 0, sizeof *pos); + return dir_go_to(dir, pos, tap); + } else if (destination >= inode->i_size) + return RETERR(-ENOENT); + + if (shift < 0) { + /* I am afraid of negative numbers */ + shift = -shift; + /* rewinding to the left */ + if (shift <= (int)pos->position.pos) { + /* destination is within sequence of entries with + duplicate keys. */ + result = dir_go_to(dir, pos, tap); + } else { + shift -= pos->position.pos; + while (1) { + /* repetitions: deadlock is possible when + going to the left. */ + result = dir_go_to(dir, pos, tap); + if (result == 0) { + result = rewind_left(tap, shift); + if (result == -E_DEADLOCK) { + reiser4_tap_done(tap); + continue; + } + } + break; + } + } + } else { + /* rewinding to the right */ + result = dir_go_to(dir, pos, tap); + if (result == 0) + result = rewind_right(tap, shift); + } + if (result == 0) { + result = set_pos(inode, pos, tap); + if (result == 0) { + /* update pos->position.pos */ + pos->entry_no = destination; + pos->fpos = dirpos; + } + } + return result; +} + +/* + * Function that is called by common_readdir() on each directory entry while + * doing readdir. ->filldir callback may block, so we had to release long term + * lock while calling it. To avoid repeating tree traversal, seal is used. If + * seal is broken, we return -E_REPEAT. Node is unlocked in this case. + * + * Whether node is unlocked in case of any other error is undefined. It is + * guaranteed to be still locked if success (0) is returned. + * + * When ->filldir() wants no more, feed_entry() returns 1, and node is + * unlocked. + */ +static int +feed_entry(tap_t *tap, struct dir_context *context) +{ + item_plugin *iplug; + char *name; + reiser4_key sd_key; + int result; + char buf[DE_NAME_BUF_LEN]; + char name_buf[32]; + char *local_name; + unsigned file_type; + seal_t seal; + coord_t *coord; + reiser4_key entry_key; + + coord = tap->coord; + iplug = item_plugin_by_coord(coord); + + /* pointer to name within the node */ + name = iplug->s.dir.extract_name(coord, buf); + assert("nikita-1371", name != NULL); + + /* key of object the entry points to */ + if (iplug->s.dir.extract_key(coord, &sd_key) != 0) + return RETERR(-EIO); + + /* we must release longterm znode lock before calling filldir to avoid + deadlock which may happen if filldir causes page fault. So, copy + name to intermediate buffer */ + if (strlen(name) + 1 > sizeof(name_buf)) { + local_name = kmalloc(strlen(name) + 1, + reiser4_ctx_gfp_mask_get()); + if (local_name == NULL) + return RETERR(-ENOMEM); + } else + local_name = name_buf; + + strcpy(local_name, name); + file_type = iplug->s.dir.extract_file_type(coord); + + unit_key_by_coord(coord, &entry_key); + reiser4_seal_init(&seal, coord, &entry_key); + + longterm_unlock_znode(tap->lh); + + /* + * send information about directory entry to the ->filldir() filler + * supplied to us by caller (VFS). + * + * ->filldir is entitled to do weird things. For example, ->filldir + * supplied by knfsd re-enters file system. Make sure no locks are + * held. + */ + assert("nikita-3436", lock_stack_isclean(get_current_lock_stack())); + + reiser4_txn_restart_current(); + if (!dir_emit(context, name, (int)strlen(name), + /* inode number of object bounden by this entry */ + oid_to_uino(get_key_objectid(&sd_key)), file_type)) + /* ->filldir() is satisfied. (no space in buffer, IOW) */ + result = 1; + else + result = reiser4_seal_validate(&seal, coord, &entry_key, + tap->lh, tap->mode, + ZNODE_LOCK_HIPRI); + + if (local_name != name_buf) + kfree(local_name); + + return result; +} + +static void move_entry(struct readdir_pos *pos, coord_t *coord) +{ + reiser4_key de_key; + de_id *did; + + /* update @pos */ + ++pos->entry_no; + did = &pos->position.dir_entry_key; + + /* get key of directory entry */ + unit_key_by_coord(coord, &de_key); + + if (de_id_key_cmp(did, &de_key) == EQUAL_TO) + /* we are within sequence of directory entries + with duplicate keys. */ + ++pos->position.pos; + else { + pos->position.pos = 0; + build_de_id_by_key(&de_key, did); + } + ++pos->fpos; +} + +/* + * STATELESS READDIR + * + * readdir support in reiser4 relies on ability to update readdir_pos embedded + * into reiser4_file_fsdata on each directory modification (name insertion and + * removal), see reiser4_readdir_common() function below. This obviously doesn't + * work when reiser4 is accessed over NFS, because NFS doesn't keep any state + * across client READDIR requests for the same directory. + * + * To address this we maintain a "pool" of detached reiser4_file_fsdata + * (d_cursor). Whenever NFS readdir request comes, we detect this, and try to + * find detached reiser4_file_fsdata corresponding to previous readdir + * request. In other words, additional state is maintained on the + * server. (This is somewhat contrary to the design goals of NFS protocol.) + * + * To efficiently detect when our ->readdir() method is called by NFS server, + * dentry is marked as "stateless" in reiser4_decode_fh() (this is checked by + * file_is_stateless() function). + * + * To find out d_cursor in the pool, we encode client id (cid) in the highest + * bits of NFS readdir cookie: when first readdir request comes to the given + * directory from the given client, cookie is set to 0. This situation is + * detected, global cid_counter is incremented, and stored in highest bits of + * all direntry offsets returned to the client, including last one. As the + * only valid readdir cookie is one obtained as direntry->offset, we are + * guaranteed that next readdir request (continuing current one) will have + * current cid in the highest bits of starting readdir cookie. All d_cursors + * are hashed into per-super-block hash table by (oid, cid) key. + * + * In addition d_cursors are placed into per-super-block radix tree where they + * are keyed by oid alone. This is necessary to efficiently remove them during + * rmdir. + * + * At last, currently unused d_cursors are linked into special list. This list + * is used d_cursor_shrink to reclaim d_cursors on memory pressure. + * + */ + +/* + * prepare for readdir. + * + * NOTE: @f->f_pos may be out-of-date (iterate() vs readdir()). + * @fpos is effective position. + */ +static int dir_readdir_init(struct file *f, loff_t* fpos, tap_t *tap, + struct readdir_pos **pos) +{ + struct inode *inode; + reiser4_file_fsdata *fsdata; + int result; + + assert("nikita-1359", f != NULL); + inode = file_inode(f); + assert("nikita-1360", inode != NULL); + + if (!S_ISDIR(inode->i_mode)) + return RETERR(-ENOTDIR); + + /* try to find detached readdir state */ + result = reiser4_attach_fsdata(f, fpos, inode); + if (result != 0) + return result; + + fsdata = reiser4_get_file_fsdata(f); + assert("nikita-2571", fsdata != NULL); + if (IS_ERR(fsdata)) + return PTR_ERR(fsdata); + + /* add file descriptor to the readdir list hanging of directory + * inode. This list is used to scan "readdirs-in-progress" while + * inserting or removing names in the directory. */ + spin_lock_inode(inode); + if (list_empty_careful(&fsdata->dir.linkage)) + list_add(&fsdata->dir.linkage, get_readdir_list(inode)); + *pos = &fsdata->dir.readdir; + spin_unlock_inode(inode); + + /* move @tap to the current position */ + return dir_rewind(f, fpos, *pos, tap); +} + +/* this is implementation of vfs's llseek method of struct file_operations for + typical directory + See comment before reiser4_iterate_common() for explanation. +*/ +loff_t reiser4_llseek_dir_common(struct file *file, loff_t off, int origin) +{ + reiser4_context *ctx; + loff_t result; + struct inode *inode; + + inode = file_inode(file); + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + inode_lock(inode); + + /* update ->f_pos */ + result = default_llseek_unlocked(file, off, origin); + if (result >= 0) { + int ff; + coord_t coord; + lock_handle lh; + tap_t tap; + struct readdir_pos *pos; + + coord_init_zero(&coord); + init_lh(&lh); + reiser4_tap_init(&tap, &coord, &lh, ZNODE_READ_LOCK); + + ff = dir_readdir_init(file, &file->f_pos, &tap, &pos); + reiser4_detach_fsdata(file); + if (ff != 0) + result = (loff_t) ff; + reiser4_tap_done(&tap); + } + reiser4_detach_fsdata(file); + inode_unlock(inode); + + reiser4_exit_context(ctx); + return result; +} + +/* this is common implementation of vfs's readdir method of struct + file_operations + + readdir problems: + + readdir(2)/getdents(2) interface is based on implicit assumption that + readdir can be restarted from any particular point by supplying file system + with off_t-full of data. That is, file system fills ->d_off field in struct + dirent and later user passes ->d_off to the seekdir(3), which is, actually, + implemented by glibc as lseek(2) on directory. + + Reiser4 cannot restart readdir from 64 bits of data, because two last + components of the key of directory entry are unknown, which given 128 bits: + locality and type fields in the key of directory entry are always known, to + start readdir() from given point objectid and offset fields have to be + filled. + + Traditional UNIX API for scanning through directory + (readdir/seekdir/telldir/opendir/closedir/rewindir/getdents) is based on the + assumption that directory is structured very much like regular file, in + particular, it is implied that each name within given directory (directory + entry) can be uniquely identified by scalar offset and that such offset is + stable across the life-time of the name is identifies. + + This is manifestly not so for reiser4. In reiser4 the only stable unique + identifies for the directory entry is its key that doesn't fit into + seekdir/telldir API. + + solution: + + Within each file descriptor participating in readdir-ing of directory + plugin/dir/dir.h:readdir_pos is maintained. This structure keeps track of + the "current" directory entry that file descriptor looks at. It contains a + key of directory entry (plus some additional info to deal with non-unique + keys that we wouldn't dwell onto here) and a logical position of this + directory entry starting from the beginning of the directory, that is + ordinal number of this entry in the readdir order. + + Obviously this logical position is not stable in the face of directory + modifications. To work around this, on each addition or removal of directory + entry all file descriptors for directory inode are scanned and their + readdir_pos are updated accordingly (adjust_dir_pos()). +*/ +int reiser4_iterate_common(struct file *f /* directory file being read */, + struct dir_context *context /* callback data passed to us by VFS */) +{ + reiser4_context *ctx; + int result; + struct inode *inode; + coord_t coord; + lock_handle lh; + tap_t tap; + struct readdir_pos *pos; + + assert("nikita-1359", f != NULL); + inode = file_inode(f); + assert("nikita-1360", inode != NULL); + + if (!S_ISDIR(inode->i_mode)) + return RETERR(-ENOTDIR); + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + coord_init_zero(&coord); + init_lh(&lh); + reiser4_tap_init(&tap, &coord, &lh, ZNODE_READ_LOCK); + + reiser4_readdir_readahead_init(inode, &tap); + +repeat: + result = dir_readdir_init(f, &context->pos, &tap, &pos); + if (result == 0) { + result = reiser4_tap_load(&tap); + /* scan entries one by one feeding them to @filld */ + while (result == 0) { + coord_t *coord; + + coord = tap.coord; + assert("nikita-2572", coord_is_existing_unit(coord)); + assert("nikita-3227", is_valid_dir_coord(inode, coord)); + + result = feed_entry(&tap, context); + if (result > 0) { + break; + } else if (result == 0) { + ++context->pos; + result = go_next_unit(&tap); + if (result == -E_NO_NEIGHBOR || + result == -ENOENT) { + result = 0; + break; + } else if (result == 0) { + if (is_valid_dir_coord(inode, coord)) + move_entry(pos, coord); + else + break; + } + } else if (result == -E_REPEAT) { + /* feed_entry() had to restart. */ + ++context->pos; + reiser4_tap_relse(&tap); + goto repeat; + } else + warning("vs-1617", + "reiser4_readdir_common: unexpected error %d", + result); + } + reiser4_tap_relse(&tap); + + if (result >= 0) + f->f_version = inode->i_version; + } else if (result == -E_NO_NEIGHBOR || result == -ENOENT) + result = 0; + reiser4_tap_done(&tap); + reiser4_detach_fsdata(f); + + /* try to update directory's atime */ + if (reiser4_grab_space_force(inode_file_plugin(inode)->estimate.update(inode), + BA_CAN_COMMIT) != 0) + warning("", "failed to update atime on readdir: %llu", + get_inode_oid(inode)); + else + file_accessed(f); + + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + + return (result <= 0) ? result : 0; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/plugin/file_plugin_common.c b/fs/reiser4/plugin/file_plugin_common.c new file mode 100644 index 000000000000..706732dd393f --- /dev/null +++ b/fs/reiser4/plugin/file_plugin_common.c @@ -0,0 +1,1004 @@ +/* Copyright 2005 by Hans Reiser, licensing governed by + reiser4/README */ + +/* this file contains typical implementations for most of methods of + file plugin +*/ + +#include "../inode.h" +#include "object.h" +#include "../safe_link.h" + +static int insert_new_sd(struct inode *inode); +static int update_sd(struct inode *inode); + +/* this is common implementation of write_sd_by_inode method of file plugin + either insert stat data or update it + */ +int write_sd_by_inode_common(struct inode *inode/* object to save */) +{ + int result; + + assert("nikita-730", inode != NULL); + + if (reiser4_inode_get_flag(inode, REISER4_NO_SD)) + /* object doesn't have stat-data yet */ + result = insert_new_sd(inode); + else + result = update_sd(inode); + if (result != 0 && result != -ENAMETOOLONG && result != -ENOMEM) + /* Don't issue warnings about "name is too long" */ + warning("nikita-2221", "Failed to save sd for %llu: %i", + (unsigned long long)get_inode_oid(inode), result); + return result; +} + +/* this is common implementation of key_by_inode method of file plugin + */ +int +key_by_inode_and_offset_common(struct inode *inode, loff_t off, + reiser4_key * key) +{ + reiser4_key_init(key); + set_key_locality(key, reiser4_inode_data(inode)->locality_id); + set_key_ordering(key, get_inode_ordering(inode)); + set_key_objectid(key, get_inode_oid(inode)); /*FIXME: inode->i_ino */ + set_key_type(key, KEY_BODY_MINOR); + set_key_offset(key, (__u64) off); + return 0; +} + +/* this is common implementation of set_plug_in_inode method of file plugin + */ +int set_plug_in_inode_common(struct inode *object /* inode to set plugin on */ , + struct inode *parent /* parent object */ , + reiser4_object_create_data * data /* creational + * data */ ) +{ + __u64 mask; + + object->i_mode = data->mode; + /* this should be plugin decision */ + object->i_uid = current_fsuid(); + object->i_mtime = object->i_atime = object->i_ctime = current_time(object); + + /* support for BSD style group-id assignment. See mount's manual page + description of bsdgroups ext2 mount options for more details */ + if (reiser4_is_set(object->i_sb, REISER4_BSD_GID)) + object->i_gid = parent->i_gid; + else if (parent->i_mode & S_ISGID) { + /* parent directory has sguid bit */ + object->i_gid = parent->i_gid; + if (S_ISDIR(object->i_mode)) + /* sguid is inherited by sub-directories */ + object->i_mode |= S_ISGID; + } else + object->i_gid = current_fsgid(); + + /* this object doesn't have stat-data yet */ + reiser4_inode_set_flag(object, REISER4_NO_SD); +#if 0 + /* this is now called after all inode plugins are initialized: + do_create_vfs_child after adjust_to_parent */ + /* setup inode and file-operations for this inode */ + setup_inode_ops(object, data); +#endif + reiser4_seal_init(&reiser4_inode_data(object)->sd_seal, NULL, NULL); + mask = (1 << UNIX_STAT) | (1 << LIGHT_WEIGHT_STAT); + if (!reiser4_is_set(object->i_sb, REISER4_32_BIT_TIMES)) + mask |= (1 << LARGE_TIMES_STAT); + + reiser4_inode_data(object)->extmask = mask; + return 0; +} + +/* this is common implementation of adjust_to_parent method of file plugin for + regular files + */ +int adjust_to_parent_common(struct inode *object /* new object */ , + struct inode *parent /* parent directory */ , + struct inode *root/* root directory */) +{ + assert("nikita-2165", object != NULL); + if (parent == NULL) + parent = root; + assert("nikita-2069", parent != NULL); + + /* + * inherit missing plugins from parent + */ + + grab_plugin_pset(object, parent, PSET_FILE); + grab_plugin_pset(object, parent, PSET_SD); + grab_plugin_pset(object, parent, PSET_FORMATTING); + grab_plugin_pset(object, parent, PSET_PERM); + return 0; +} + +/* this is common implementation of adjust_to_parent method of file plugin for + typical directories + */ +int adjust_to_parent_common_dir(struct inode *object /* new object */ , + struct inode *parent /* parent directory */ , + struct inode *root/* root directory */) +{ + int result = 0; + pset_member memb; + + assert("nikita-2166", object != NULL); + if (parent == NULL) + parent = root; + assert("nikita-2167", parent != NULL); + + /* + * inherit missing plugins from parent + */ + for (memb = 0; memb < PSET_LAST; ++memb) { + result = grab_plugin_pset(object, parent, memb); + if (result != 0) + break; + } + return result; +} + +int adjust_to_parent_cryptcompress(struct inode *object /* new object */ , + struct inode *parent /* parent directory */, + struct inode *root/* root directory */) +{ + int result; + result = adjust_to_parent_common(object, parent, root); + if (result) + return result; + assert("edward-1416", parent != NULL); + + grab_plugin_pset(object, parent, PSET_CLUSTER); + grab_plugin_pset(object, parent, PSET_CIPHER); + grab_plugin_pset(object, parent, PSET_DIGEST); + grab_plugin_pset(object, parent, PSET_COMPRESSION); + grab_plugin_pset(object, parent, PSET_COMPRESSION_MODE); + + return 0; +} + +/* this is common implementation of create_object method of file plugin + */ +int reiser4_create_object_common(struct inode *object, struct inode *parent, + reiser4_object_create_data * data) +{ + reiser4_block_nr reserve; + assert("nikita-744", object != NULL); + assert("nikita-745", parent != NULL); + assert("nikita-747", data != NULL); + assert("nikita-748", reiser4_inode_get_flag(object, REISER4_NO_SD)); + + reserve = estimate_create_common(object); + if (reiser4_grab_space(reserve, BA_CAN_COMMIT)) + return RETERR(-ENOSPC); + return write_sd_by_inode_common(object); +} + +static int common_object_delete_no_reserve(struct inode *inode); + +/** + * reiser4_delete_object_common - delete_object of file_plugin + * @inode: inode to be deleted + * + * This is common implementation of delete_object method of file_plugin. It + * applies to object its deletion consists of removing two items - stat data + * and safe-link. + */ +int reiser4_delete_object_common(struct inode *inode) +{ + int result; + + assert("nikita-1477", inode != NULL); + /* FIXME: if file body deletion failed (i/o error, for instance), + inode->i_size can be != 0 here */ + assert("nikita-3420", inode->i_size == 0 || S_ISLNK(inode->i_mode)); + assert("nikita-3421", inode->i_nlink == 0); + + if (!reiser4_inode_get_flag(inode, REISER4_NO_SD)) { + reiser4_block_nr reserve; + + /* grab space which is needed to remove 2 items from the tree: + stat data and safe-link */ + reserve = 2 * + estimate_one_item_removal(reiser4_tree_by_inode(inode)); + if (reiser4_grab_space_force(reserve, + BA_RESERVED | BA_CAN_COMMIT)) + return RETERR(-ENOSPC); + result = common_object_delete_no_reserve(inode); + } else + result = 0; + return result; +} + +/** + * reiser4_delete_dir_common - delete_object of file_plugin + * @inode: inode to be deleted + * + * This is common implementation of delete_object method of file_plugin for + * typical directory. It calls done method of dir_plugin to remove "." and + * removes stat data and safe-link. + */ +int reiser4_delete_dir_common(struct inode *inode) +{ + int result; + dir_plugin *dplug; + + assert("", (get_current_context() && + get_current_context()->trans->atom == NULL)); + + dplug = inode_dir_plugin(inode); + assert("vs-1101", dplug && dplug->done); + + /* kill cursors which might be attached to inode */ + reiser4_kill_cursors(inode); + + /* grab space enough for removing two items */ + if (reiser4_grab_space + (2 * estimate_one_item_removal(reiser4_tree_by_inode(inode)), + BA_RESERVED | BA_CAN_COMMIT)) + return RETERR(-ENOSPC); + + result = dplug->done(inode); + if (!result) + result = common_object_delete_no_reserve(inode); + return result; +} + +/* this is common implementation of add_link method of file plugin + */ +int reiser4_add_link_common(struct inode *object, struct inode *parent) +{ + /* + * increment ->i_nlink and update ->i_ctime + */ + + INODE_INC_NLINK(object); + object->i_ctime = current_time(object); + return 0; +} + +/* this is common implementation of rem_link method of file plugin + */ +int reiser4_rem_link_common(struct inode *object, struct inode *parent) +{ + assert("nikita-2021", object != NULL); + assert("nikita-2163", object->i_nlink > 0); + + /* + * decrement ->i_nlink and update ->i_ctime + */ + + INODE_DROP_NLINK(object); + object->i_ctime = current_time(object); + return 0; +} + +/* this is common implementation of rem_link method of file plugin for typical + directory +*/ +int rem_link_common_dir(struct inode *object, struct inode *parent UNUSED_ARG) +{ + assert("nikita-20211", object != NULL); + assert("nikita-21631", object->i_nlink > 0); + + /* + * decrement ->i_nlink and update ->i_ctime + */ + if(object->i_nlink == 2) + INODE_SET_NLINK(object, 0); + + else + INODE_DROP_NLINK(object); + object->i_ctime = current_time(object); + return 0; +} + +/* this is common implementation of owns_item method of file plugin + compare objectids of keys in inode and coord */ +int owns_item_common(const struct inode *inode, /* object to check + * against */ + const coord_t *coord/* coord to check */) +{ + reiser4_key item_key; + reiser4_key file_key; + + assert("nikita-760", inode != NULL); + assert("nikita-761", coord != NULL); + + return coord_is_existing_item(coord) && + (get_key_objectid(build_sd_key(inode, &file_key)) == + get_key_objectid(item_key_by_coord(coord, &item_key))); +} + +/* this is common implementation of owns_item method of file plugin + for typical directory +*/ +int owns_item_common_dir(const struct inode *inode,/* object to check against */ + const coord_t *coord/* coord of item to check */) +{ + reiser4_key item_key; + + assert("nikita-1335", inode != NULL); + assert("nikita-1334", coord != NULL); + + if (plugin_of_group(item_plugin_by_coord(coord), DIR_ENTRY_ITEM_TYPE)) + return get_key_locality(item_key_by_coord(coord, &item_key)) == + get_inode_oid(inode); + else + return owns_item_common(inode, coord); +} + +/* this is common implementation of can_add_link method of file plugin + checks whether yet another hard links to this object can be added +*/ +int can_add_link_common(const struct inode *object/* object to check */) +{ + assert("nikita-732", object != NULL); + + /* inode->i_nlink is unsigned int, so just check for integer + overflow */ + return object->i_nlink + 1 != 0; +} + +/* this is common implementation of can_rem_link method of file plugin for + typical directory +*/ +int can_rem_link_common_dir(const struct inode *inode) +{ + /* is_dir_empty() returns 0 is dir is empty */ + return !is_dir_empty(inode); +} + +/* this is common implementation of detach method of file plugin for typical + directory +*/ +int reiser4_detach_common_dir(struct inode *child, struct inode *parent) +{ + dir_plugin *dplug; + + dplug = inode_dir_plugin(child); + assert("nikita-2883", dplug != NULL); + assert("nikita-2884", dplug->detach != NULL); + return dplug->detach(child, parent); +} + +/* this is common implementation of bind method of file plugin for typical + directory +*/ +int reiser4_bind_common_dir(struct inode *child, struct inode *parent) +{ + dir_plugin *dplug; + + dplug = inode_dir_plugin(child); + assert("nikita-2646", dplug != NULL); + return dplug->attach(child, parent); +} + +static int process_truncate(struct inode *, __u64 size); + +/* this is common implementation of safelink method of file plugin + */ +int safelink_common(struct inode *object, reiser4_safe_link_t link, __u64 value) +{ + int result; + + assert("vs-1705", get_current_context()->trans->atom == NULL); + if (link == SAFE_UNLINK) + /* nothing to do. iput() in the caller (process_safelink) will + * finish with file */ + result = 0; + else if (link == SAFE_TRUNCATE) + result = process_truncate(object, value); + else { + warning("nikita-3438", "Unrecognized safe-link type: %i", link); + result = RETERR(-EIO); + } + return result; +} + +/* this is common implementation of estimate.create method of file plugin + can be used when object creation involves insertion of one item (usually stat + data) into tree +*/ +reiser4_block_nr estimate_create_common(const struct inode *object) +{ + return estimate_one_insert_item(reiser4_tree_by_inode(object)); +} + +/* this is common implementation of estimate.create method of file plugin for + typical directory + can be used when directory creation involves insertion of two items (usually + stat data and item containing "." and "..") into tree +*/ +reiser4_block_nr estimate_create_common_dir(const struct inode *object) +{ + return 2 * estimate_one_insert_item(reiser4_tree_by_inode(object)); +} + +/* this is common implementation of estimate.update method of file plugin + can be used when stat data update does not do more than inserting a unit + into a stat data item which is probably true for most cases +*/ +reiser4_block_nr estimate_update_common(const struct inode *inode) +{ + return estimate_one_insert_into_item(reiser4_tree_by_inode(inode)); +} + +/* this is common implementation of estimate.unlink method of file plugin + */ +reiser4_block_nr +estimate_unlink_common(const struct inode *object UNUSED_ARG, + const struct inode *parent UNUSED_ARG) +{ + return 0; +} + +/* this is common implementation of estimate.unlink method of file plugin for + typical directory +*/ +reiser4_block_nr +estimate_unlink_common_dir(const struct inode *object, + const struct inode *parent) +{ + dir_plugin *dplug; + + dplug = inode_dir_plugin(object); + assert("nikita-2888", dplug != NULL); + assert("nikita-2887", dplug->estimate.unlink != NULL); + return dplug->estimate.unlink(object, parent); +} + +char *wire_write_common(struct inode *inode, char *start) +{ + return build_inode_onwire(inode, start); +} + +char *wire_read_common(char *addr, reiser4_object_on_wire * obj) +{ + if (!obj) + return locate_obj_key_id_onwire(addr); + return extract_obj_key_id_from_onwire(addr, &obj->u.std.key_id); +} + +struct dentry *wire_get_common(struct super_block *sb, + reiser4_object_on_wire * obj) +{ + struct inode *inode; + struct dentry *dentry; + reiser4_key key; + + extract_key_from_id(&obj->u.std.key_id, &key); + inode = reiser4_iget(sb, &key, 1); + if (!IS_ERR(inode)) { + reiser4_iget_complete(inode); + dentry = d_obtain_alias(inode); + if (!IS_ERR(dentry)) + dentry->d_op = &get_super_private(sb)->ops.dentry; + } else if (PTR_ERR(inode) == -ENOENT) + /* + * inode wasn't found at the key encoded in the file + * handle. Hence, file handle is stale. + */ + dentry = ERR_PTR(RETERR(-ESTALE)); + else + dentry = (void *)inode; + return dentry; +} + +int wire_size_common(struct inode *inode) +{ + return inode_onwire_size(inode); +} + +void wire_done_common(reiser4_object_on_wire * obj) +{ + /* nothing to do */ +} + +/* helper function to print errors */ +static void key_warning(const reiser4_key * key /* key to print */ , + const struct inode *inode, + int code/* error code to print */) +{ + assert("nikita-716", key != NULL); + + if (code != -ENOMEM) { + warning("nikita-717", "Error for inode %llu (%i)", + (unsigned long long)get_key_objectid(key), code); + reiser4_print_key("for key", key); + } +} + +/* NIKITA-FIXME-HANS: perhaps this function belongs in another file? */ +#if REISER4_DEBUG +static void +check_inode_seal(const struct inode *inode, + const coord_t *coord, const reiser4_key * key) +{ + reiser4_key unit_key; + + unit_key_by_coord(coord, &unit_key); + assert("nikita-2752", + WITH_DATA_RET(coord->node, 1, keyeq(key, &unit_key))); + assert("nikita-2753", get_inode_oid(inode) == get_key_objectid(key)); +} + +static void check_sd_coord(coord_t *coord, const reiser4_key * key) +{ + reiser4_key ukey; + + coord_clear_iplug(coord); + if (zload(coord->node)) + return; + + if (!coord_is_existing_unit(coord) || + !item_plugin_by_coord(coord) || + !keyeq(unit_key_by_coord(coord, &ukey), key) || + (znode_get_level(coord->node) != LEAF_LEVEL) || + !item_is_statdata(coord)) { + warning("nikita-1901", "Conspicuous seal"); + reiser4_print_key("key", key); + print_coord("coord", coord, 1); + impossible("nikita-2877", "no way"); + } + zrelse(coord->node); +} + +#else +#define check_inode_seal(inode, coord, key) noop +#define check_sd_coord(coord, key) noop +#endif + +/* insert new stat-data into tree. Called with inode state + locked. Return inode state locked. */ +static int insert_new_sd(struct inode *inode/* inode to create sd for */) +{ + int result; + reiser4_key key; + coord_t coord; + reiser4_item_data data; + char *area; + reiser4_inode *ref; + lock_handle lh; + oid_t oid; + + assert("nikita-723", inode != NULL); + assert("nikita-3406", reiser4_inode_get_flag(inode, REISER4_NO_SD)); + + ref = reiser4_inode_data(inode); + spin_lock_inode(inode); + + if (ref->plugin_mask != 0) + /* inode has non-standard plugins */ + inode_set_extension(inode, PLUGIN_STAT); + /* + * prepare specification of new item to be inserted + */ + + data.iplug = inode_sd_plugin(inode); + data.length = data.iplug->s.sd.save_len(inode); + spin_unlock_inode(inode); + + data.data = NULL; + data.user = 0; +/* could be optimized for case where there is only one node format in + * use in the filesystem, probably there are lots of such + * places we could optimize for only one node layout.... -Hans */ + if (data.length > reiser4_tree_by_inode(inode)->nplug->max_item_size()) { + /* This is silly check, but we don't know actual node where + insertion will go into. */ + return RETERR(-ENAMETOOLONG); + } + oid = oid_allocate(inode->i_sb); +/* NIKITA-FIXME-HANS: what is your opinion on whether this error check should be + * encapsulated into oid_allocate? */ + if (oid == ABSOLUTE_MAX_OID) + return RETERR(-EOVERFLOW); + + set_inode_oid(inode, oid); + + coord_init_zero(&coord); + init_lh(&lh); + + result = insert_by_key(reiser4_tree_by_inode(inode), + build_sd_key(inode, &key), &data, &coord, &lh, + /* stat data lives on a leaf level */ + LEAF_LEVEL, CBK_UNIQUE); + + /* we don't want to re-check that somebody didn't insert + stat-data while we were doing io, because if it did, + insert_by_key() returned error. */ + /* but what _is_ possible is that plugin for inode's stat-data, + list of non-standard plugins or their state would change + during io, so that stat-data wouldn't fit into sd. To avoid + this race we keep inode_state lock. This lock has to be + taken each time you access inode in a way that would cause + changes in sd size: changing plugins etc. + */ + + if (result == IBK_INSERT_OK) { + coord_clear_iplug(&coord); + result = zload(coord.node); + if (result == 0) { + /* have we really inserted stat data? */ + assert("nikita-725", item_is_statdata(&coord)); + + /* inode was just created. It is inserted into hash + table, but no directory entry was yet inserted into + parent. So, inode is inaccessible through + ->lookup(). All places that directly grab inode + from hash-table (like old knfsd), should check + IMMUTABLE flag that is set by common_create_child. + */ + assert("nikita-3240", data.iplug != NULL); + assert("nikita-3241", data.iplug->s.sd.save != NULL); + area = item_body_by_coord(&coord); + result = data.iplug->s.sd.save(inode, &area); + znode_make_dirty(coord.node); + if (result == 0) { + /* object has stat-data now */ + reiser4_inode_clr_flag(inode, REISER4_NO_SD); + reiser4_inode_set_flag(inode, + REISER4_SDLEN_KNOWN); + /* initialise stat-data seal */ + reiser4_seal_init(&ref->sd_seal, &coord, &key); + ref->sd_coord = coord; + check_inode_seal(inode, &coord, &key); + } else if (result != -ENOMEM) + /* + * convert any other error code to -EIO to + * avoid confusing user level with unexpected + * errors. + */ + result = RETERR(-EIO); + zrelse(coord.node); + } + } + done_lh(&lh); + + if (result != 0) + key_warning(&key, inode, result); + else + oid_count_allocated(); + + return result; +} + +/* find sd of inode in a tree, deal with errors */ +int lookup_sd(struct inode *inode /* inode to look sd for */ , + znode_lock_mode lock_mode /* lock mode */ , + coord_t *coord /* resulting coord */ , + lock_handle * lh /* resulting lock handle */ , + const reiser4_key * key /* resulting key */ , + int silent) +{ + int result; + __u32 flags; + + assert("nikita-1692", inode != NULL); + assert("nikita-1693", coord != NULL); + assert("nikita-1694", key != NULL); + + /* look for the object's stat data in a tree. + This returns in "node" pointer to a locked znode and in "pos" + position of an item found in node. Both are only valid if + coord_found is returned. */ + flags = (lock_mode == ZNODE_WRITE_LOCK) ? CBK_FOR_INSERT : 0; + flags |= CBK_UNIQUE; + /* + * traverse tree to find stat data. We cannot use vroot here, because + * it only covers _body_ of the file, and stat data don't belong + * there. + */ + result = coord_by_key(reiser4_tree_by_inode(inode), + key, + coord, + lh, + lock_mode, + FIND_EXACT, LEAF_LEVEL, LEAF_LEVEL, flags, NULL); + if (REISER4_DEBUG && result == 0) + check_sd_coord(coord, key); + + if (result != 0 && !silent) + key_warning(key, inode, result); + return result; +} + +static int +locate_inode_sd(struct inode *inode, + reiser4_key * key, coord_t *coord, lock_handle * lh) +{ + reiser4_inode *state; + seal_t seal; + int result; + + assert("nikita-3483", inode != NULL); + + state = reiser4_inode_data(inode); + spin_lock_inode(inode); + *coord = state->sd_coord; + coord_clear_iplug(coord); + seal = state->sd_seal; + spin_unlock_inode(inode); + + build_sd_key(inode, key); + /* first, try to use seal */ + if (reiser4_seal_is_set(&seal)) { + result = reiser4_seal_validate(&seal, + coord, + key, + lh, ZNODE_WRITE_LOCK, + ZNODE_LOCK_LOPRI); + if (result == 0) { + check_sd_coord(coord, key); + return 0; + } + } + /* hint is invalid, + * so traverse tree + */ + coord_init_zero(coord); + return lookup_sd(inode, ZNODE_WRITE_LOCK, coord, lh, key, 0); +} + +#if REISER4_DEBUG +static int all_but_offset_key_eq(const reiser4_key * k1, const reiser4_key * k2) +{ + return (get_key_locality(k1) == get_key_locality(k2) && + get_key_type(k1) == get_key_type(k2) && + get_key_band(k1) == get_key_band(k2) && + get_key_ordering(k1) == get_key_ordering(k2) && + get_key_objectid(k1) == get_key_objectid(k2)); +} + +#include "../tree_walk.h" + +/* make some checks before and after stat-data resize operation */ +static int check_sd_resize(struct inode *inode, coord_t *coord, + int length, int progress/* 1 means after resize */) +{ + int ret = 0; + lock_handle left_lock; + coord_t left_coord; + reiser4_key left_key; + reiser4_key key; + + if (inode_file_plugin(inode) != + file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)) + return 0; + if (!length) + return 0; + if (coord->item_pos != 0) + return 0; + + init_lh(&left_lock); + ret = reiser4_get_left_neighbor(&left_lock, + coord->node, + ZNODE_WRITE_LOCK, + GN_CAN_USE_UPPER_LEVELS); + if (ret == -E_REPEAT || ret == -E_NO_NEIGHBOR || + ret == -ENOENT || ret == -EINVAL + || ret == -E_DEADLOCK) { + ret = 0; + goto exit; + } + ret = zload(left_lock.node); + if (ret) + goto exit; + coord_init_last_unit(&left_coord, left_lock.node); + item_key_by_coord(&left_coord, &left_key); + item_key_by_coord(coord, &key); + + if (all_but_offset_key_eq(&key, &left_key)) + /* corruption occured */ + ret = 1; + zrelse(left_lock.node); + exit: + done_lh(&left_lock); + return ret; +} +#endif + +/* update stat-data at @coord */ +static int +update_sd_at(struct inode *inode, coord_t *coord, reiser4_key * key, + lock_handle * lh) +{ + int result; + reiser4_item_data data; + char *area; + reiser4_inode *state; + znode *loaded; + + state = reiser4_inode_data(inode); + + coord_clear_iplug(coord); + result = zload(coord->node); + if (result != 0) + return result; + loaded = coord->node; + + spin_lock_inode(inode); + assert("nikita-728", inode_sd_plugin(inode) != NULL); + data.iplug = inode_sd_plugin(inode); + + /* if inode has non-standard plugins, add appropriate stat data + * extension */ + if (state->extmask & (1 << PLUGIN_STAT)) { + if (state->plugin_mask == 0) + inode_clr_extension(inode, PLUGIN_STAT); + } else if (state->plugin_mask != 0) + inode_set_extension(inode, PLUGIN_STAT); + + if (state->extmask & (1 << HEIR_STAT)) { + if (state->heir_mask == 0) + inode_clr_extension(inode, HEIR_STAT); + } else if (state->heir_mask != 0) + inode_set_extension(inode, HEIR_STAT); + + /* data.length is how much space to add to (or remove + from if negative) sd */ + if (!reiser4_inode_get_flag(inode, REISER4_SDLEN_KNOWN)) { + /* recalculate stat-data length */ + data.length = + data.iplug->s.sd.save_len(inode) - + item_length_by_coord(coord); + reiser4_inode_set_flag(inode, REISER4_SDLEN_KNOWN); + } else + data.length = 0; + spin_unlock_inode(inode); + + /* if on-disk stat data is of different length than required + for this inode, resize it */ + + if (data.length != 0) { + data.data = NULL; + data.user = 0; + + assert("edward-1441", + !check_sd_resize(inode, coord, + data.length, 0/* before resize */)); + + /* insertion code requires that insertion point (coord) was + * between units. */ + coord->between = AFTER_UNIT; + result = reiser4_resize_item(coord, &data, key, lh, + COPI_DONT_SHIFT_LEFT); + if (result != 0) { + key_warning(key, inode, result); + zrelse(loaded); + return result; + } + if (loaded != coord->node) { + /* reiser4_resize_item moved coord to another node. + Zload it */ + zrelse(loaded); + coord_clear_iplug(coord); + result = zload(coord->node); + if (result != 0) + return result; + loaded = coord->node; + } + assert("edward-1442", + !check_sd_resize(inode, coord, + data.length, 1/* after resize */)); + } + area = item_body_by_coord(coord); + spin_lock_inode(inode); + result = data.iplug->s.sd.save(inode, &area); + znode_make_dirty(coord->node); + + /* re-initialise stat-data seal */ + + /* + * coord.between was possibly skewed from AT_UNIT when stat-data size + * was changed and new extensions were pasted into item. + */ + coord->between = AT_UNIT; + reiser4_seal_init(&state->sd_seal, coord, key); + state->sd_coord = *coord; + spin_unlock_inode(inode); + check_inode_seal(inode, coord, key); + zrelse(loaded); + return result; +} + +/* Update existing stat-data in a tree. Called with inode state locked. Return + inode state locked. */ +static int update_sd(struct inode *inode/* inode to update sd for */) +{ + int result; + reiser4_key key; + coord_t coord; + lock_handle lh; + + assert("nikita-726", inode != NULL); + + /* no stat-data, nothing to update?! */ + assert("nikita-3482", !reiser4_inode_get_flag(inode, REISER4_NO_SD)); + + init_lh(&lh); + + result = locate_inode_sd(inode, &key, &coord, &lh); + if (result == 0) + result = update_sd_at(inode, &coord, &key, &lh); + done_lh(&lh); + + return result; +} + +/* helper for reiser4_delete_object_common and reiser4_delete_dir_common. + Remove object stat data. Space for that must be reserved by caller before +*/ +static int +common_object_delete_no_reserve(struct inode *inode/* object to remove */) +{ + int result; + + assert("nikita-1477", inode != NULL); + + if (!reiser4_inode_get_flag(inode, REISER4_NO_SD)) { + reiser4_key sd_key; + + build_sd_key(inode, &sd_key); + result = + reiser4_cut_tree(reiser4_tree_by_inode(inode), + &sd_key, &sd_key, NULL, 0); + if (result == 0) { + reiser4_inode_set_flag(inode, REISER4_NO_SD); + result = oid_release(inode->i_sb, get_inode_oid(inode)); + if (result == 0) { + oid_count_released(); + + result = safe_link_del(reiser4_tree_by_inode(inode), + get_inode_oid(inode), + SAFE_UNLINK); + } + } + } else + result = 0; + return result; +} + +/* helper for safelink_common */ +static int process_truncate(struct inode *inode, __u64 size) +{ + int result; + struct iattr attr; + file_plugin *fplug; + reiser4_context *ctx; + struct dentry dentry; + + assert("vs-21", is_in_reiser4_context()); + ctx = reiser4_init_context(inode->i_sb); + assert("vs-22", !IS_ERR(ctx)); + + attr.ia_size = size; + attr.ia_valid = ATTR_SIZE | ATTR_CTIME; + fplug = inode_file_plugin(inode); + + inode_lock(inode); + assert("vs-1704", get_current_context()->trans->atom == NULL); + dentry.d_inode = inode; + result = inode->i_op->setattr(&dentry, &attr); + inode_unlock(inode); + + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + + return result; +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/hash.c b/fs/reiser4/plugin/hash.c new file mode 100644 index 000000000000..999f7b1eca4d --- /dev/null +++ b/fs/reiser4/plugin/hash.c @@ -0,0 +1,352 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Hash functions */ + +#include "../debug.h" +#include "plugin_header.h" +#include "plugin.h" +#include "../super.h" +#include "../inode.h" + +#include <linux/types.h> + +/* old rupasov (yura) hash */ +static __u64 hash_rupasov(const unsigned char *name /* name to hash */ , + int len/* @name's length */) +{ + int i; + int j; + int pow; + __u64 a; + __u64 c; + + assert("nikita-672", name != NULL); + assert("nikita-673", len >= 0); + + for (pow = 1, i = 1; i < len; ++i) + pow = pow * 10; + + if (len == 1) + a = name[0] - 48; + else + a = (name[0] - 48) * pow; + + for (i = 1; i < len; ++i) { + c = name[i] - 48; + for (pow = 1, j = i; j < len - 1; ++j) + pow = pow * 10; + a = a + c * pow; + } + for (; i < 40; ++i) { + c = '0' - 48; + for (pow = 1, j = i; j < len - 1; ++j) + pow = pow * 10; + a = a + c * pow; + } + + for (; i < 256; ++i) { + c = i; + for (pow = 1, j = i; j < len - 1; ++j) + pow = pow * 10; + a = a + c * pow; + } + + a = a << 7; + return a; +} + +/* r5 hash */ +static __u64 hash_r5(const unsigned char *name /* name to hash */ , + int len UNUSED_ARG/* @name's length */) +{ + __u64 a = 0; + + assert("nikita-674", name != NULL); + assert("nikita-675", len >= 0); + + while (*name) { + a += *name << 4; + a += *name >> 4; + a *= 11; + name++; + } + return a; +} + +/* Keyed 32-bit hash function using TEA in a Davis-Meyer function + H0 = Key + Hi = E Mi(Hi-1) + Hi-1 + + (see Applied Cryptography, 2nd edition, p448). + + Jeremy Fitzhardinge <jeremy@zip.com.au> 1998 + + Jeremy has agreed to the contents of reiserfs/README. -Hans + + This code was blindly upgraded to __u64 by s/__u32/__u64/g. +*/ +static __u64 hash_tea(const unsigned char *name /* name to hash */ , + int len/* @name's length */) +{ + __u64 k[] = { 0x9464a485u, 0x542e1a94u, 0x3e846bffu, 0xb75bcfc3u }; + + __u64 h0 = k[0], h1 = k[1]; + __u64 a, b, c, d; + __u64 pad; + int i; + + assert("nikita-676", name != NULL); + assert("nikita-677", len >= 0); + +#define DELTA 0x9E3779B9u +#define FULLROUNDS 10 /* 32 is overkill, 16 is strong crypto */ +#define PARTROUNDS 6 /* 6 gets complete mixing */ + +/* a, b, c, d - data; h0, h1 - accumulated hash */ +#define TEACORE(rounds) \ + do { \ + __u64 sum = 0; \ + int n = rounds; \ + __u64 b0, b1; \ + \ + b0 = h0; \ + b1 = h1; \ + \ + do { \ + sum += DELTA; \ + b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); \ + b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); \ + } while (--n); \ + \ + h0 += b0; \ + h1 += b1; \ + } while (0) + + pad = (__u64) len | ((__u64) len << 8); + pad |= pad << 16; + + while (len >= 16) { + a = (__u64) name[0] | (__u64) name[1] << 8 | (__u64) name[2] << + 16 | (__u64) name[3] << 24; + b = (__u64) name[4] | (__u64) name[5] << 8 | (__u64) name[6] << + 16 | (__u64) name[7] << 24; + c = (__u64) name[8] | (__u64) name[9] << 8 | (__u64) name[10] << + 16 | (__u64) name[11] << 24; + d = (__u64) name[12] | (__u64) name[13] << 8 | (__u64) name[14] + << 16 | (__u64) name[15] << 24; + + TEACORE(PARTROUNDS); + + len -= 16; + name += 16; + } + + if (len >= 12) { + /* assert(len < 16); */ + if (len >= 16) + *(int *)0 = 0; + + a = (__u64) name[0] | (__u64) name[1] << 8 | (__u64) name[2] << + 16 | (__u64) name[3] << 24; + b = (__u64) name[4] | (__u64) name[5] << 8 | (__u64) name[6] << + 16 | (__u64) name[7] << 24; + c = (__u64) name[8] | (__u64) name[9] << 8 | (__u64) name[10] << + 16 | (__u64) name[11] << 24; + + d = pad; + for (i = 12; i < len; i++) { + d <<= 8; + d |= name[i]; + } + } else if (len >= 8) { + /* assert(len < 12); */ + if (len >= 12) + *(int *)0 = 0; + a = (__u64) name[0] | (__u64) name[1] << 8 | (__u64) name[2] << + 16 | (__u64) name[3] << 24; + b = (__u64) name[4] | (__u64) name[5] << 8 | (__u64) name[6] << + 16 | (__u64) name[7] << 24; + + c = d = pad; + for (i = 8; i < len; i++) { + c <<= 8; + c |= name[i]; + } + } else if (len >= 4) { + /* assert(len < 8); */ + if (len >= 8) + *(int *)0 = 0; + a = (__u64) name[0] | (__u64) name[1] << 8 | (__u64) name[2] << + 16 | (__u64) name[3] << 24; + + b = c = d = pad; + for (i = 4; i < len; i++) { + b <<= 8; + b |= name[i]; + } + } else { + /* assert(len < 4); */ + if (len >= 4) + *(int *)0 = 0; + a = b = c = d = pad; + for (i = 0; i < len; i++) { + a <<= 8; + a |= name[i]; + } + } + + TEACORE(FULLROUNDS); + +/* return 0;*/ + return h0 ^ h1; + +} + +/* classical 64 bit Fowler/Noll/Vo-1 (FNV-1) hash. + + See http://www.isthe.com/chongo/tech/comp/fnv/ for details. + + Excerpts: + + FNV hashes are designed to be fast while maintaining a low collision + rate. + + [This version also seems to preserve lexicographical order locally.] + + FNV hash algorithms and source code have been released into the public + domain. + +*/ +static __u64 hash_fnv1(const unsigned char *name /* name to hash */ , + int len UNUSED_ARG/* @name's length */) +{ + unsigned long long a = 0xcbf29ce484222325ull; + const unsigned long long fnv_64_prime = 0x100000001b3ull; + + assert("nikita-678", name != NULL); + assert("nikita-679", len >= 0); + + /* FNV-1 hash each octet in the buffer */ + for (; *name; ++name) { + /* multiply by the 32 bit FNV magic prime mod 2^64 */ + a *= fnv_64_prime; + /* xor the bottom with the current octet */ + a ^= (unsigned long long)(*name); + } + /* return our new hash value */ + return a; +} + +/* degenerate hash function used to simplify testing of non-unique key + handling */ +static __u64 hash_deg(const unsigned char *name UNUSED_ARG /* name to hash */ , + int len UNUSED_ARG/* @name's length */) +{ + return 0xc0c0c0c010101010ull; +} + +static int change_hash(struct inode *inode, + reiser4_plugin * plugin, + pset_member memb) +{ + int result; + + assert("nikita-3503", inode != NULL); + assert("nikita-3504", plugin != NULL); + + assert("nikita-3505", is_reiser4_inode(inode)); + assert("nikita-3507", plugin->h.type_id == REISER4_HASH_PLUGIN_TYPE); + + if (!plugin_of_group(inode_file_plugin(inode), REISER4_DIRECTORY_FILE)) + return RETERR(-EINVAL); + + result = 0; + if (inode_hash_plugin(inode) == NULL || + inode_hash_plugin(inode)->h.id != plugin->h.id) { + if (is_dir_empty(inode) == 0) + result = aset_set_unsafe(&reiser4_inode_data(inode)->pset, + PSET_HASH, plugin); + else + result = RETERR(-ENOTEMPTY); + + } + return result; +} + +static reiser4_plugin_ops hash_plugin_ops = { + .init = NULL, + .load = NULL, + .save_len = NULL, + .save = NULL, + .change = change_hash +}; + +/* hash plugins */ +hash_plugin hash_plugins[LAST_HASH_ID] = { + [RUPASOV_HASH_ID] = { + .h = { + .type_id = REISER4_HASH_PLUGIN_TYPE, + .id = RUPASOV_HASH_ID, + .pops = &hash_plugin_ops, + .label = "rupasov", + .desc = "Original Yura's hash", + .linkage = {NULL, NULL} + }, + .hash = hash_rupasov + }, + [R5_HASH_ID] = { + .h = { + .type_id = REISER4_HASH_PLUGIN_TYPE, + .id = R5_HASH_ID, + .pops = &hash_plugin_ops, + .label = "r5", + .desc = "r5 hash", + .linkage = {NULL, NULL} + }, + .hash = hash_r5 + }, + [TEA_HASH_ID] = { + .h = { + .type_id = REISER4_HASH_PLUGIN_TYPE, + .id = TEA_HASH_ID, + .pops = &hash_plugin_ops, + .label = "tea", + .desc = "tea hash", + .linkage = {NULL, NULL} + }, + .hash = hash_tea + }, + [FNV1_HASH_ID] = { + .h = { + .type_id = REISER4_HASH_PLUGIN_TYPE, + .id = FNV1_HASH_ID, + .pops = &hash_plugin_ops, + .label = "fnv1", + .desc = "fnv1 hash", + .linkage = {NULL, NULL} + }, + .hash = hash_fnv1 + }, + [DEGENERATE_HASH_ID] = { + .h = { + .type_id = REISER4_HASH_PLUGIN_TYPE, + .id = DEGENERATE_HASH_ID, + .pops = &hash_plugin_ops, + .label = "degenerate hash", + .desc = "Degenerate hash: only for testing", + .linkage = {NULL, NULL} + }, + .hash = hash_deg + } +}; + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/inode_ops.c b/fs/reiser4/plugin/inode_ops.c new file mode 100644 index 000000000000..64b50723fd76 --- /dev/null +++ b/fs/reiser4/plugin/inode_ops.c @@ -0,0 +1,891 @@ +/* + * Copyright 2005 by Hans Reiser, licensing governed by reiser4/README + */ + +/* + * this file contains typical implementations for most of methods of struct + * inode_operations + */ + +#include "../inode.h" +#include "../safe_link.h" + +#include <linux/namei.h> + +static int create_vfs_object(struct inode *parent, struct dentry *dentry, + reiser4_object_create_data *data); + +/** + * reiser4_create_common - create of inode operations + * @parent: inode of parent directory + * @dentry: dentry of new object to create + * @mode: the permissions to use + * @exclusive: + * + * This is common implementation of vfs's create method of struct + * inode_operations. + * Creates regular file using file plugin from parent directory plugin set. + */ +int reiser4_create_common(struct inode *parent, struct dentry *dentry, + umode_t mode, bool exclusive) +{ + reiser4_object_create_data data; + file_plugin *fplug; + + memset(&data, 0, sizeof data); + data.mode = S_IFREG | mode; + fplug = child_create_plugin(parent) ? : inode_create_plugin(parent); + if (!plugin_of_group(fplug, REISER4_REGULAR_FILE)) { + warning("vpf-1900", "'%s' is not a regular file plugin.", + fplug->h.label); + return RETERR(-EIO); + } + data.id = fplug->h.id; + return create_vfs_object(parent, dentry, &data); +} + +int reiser4_lookup_name(struct inode *dir, struct dentry *, reiser4_key *); +void check_light_weight(struct inode *inode, struct inode *parent); + +/** + * reiser4_lookup_common - lookup of inode operations + * @parent: inode of directory to lookup into + * @dentry: name to look for + * @flags: + * + * This is common implementation of vfs's lookup method of struct + * inode_operations. + */ +struct dentry *reiser4_lookup_common(struct inode *parent, + struct dentry *dentry, + unsigned int flags) +{ + reiser4_context *ctx; + int result; + struct dentry *new; + struct inode *inode; + reiser4_dir_entry_desc entry; + + ctx = reiser4_init_context(parent->i_sb); + if (IS_ERR(ctx)) + return (struct dentry *)ctx; + + /* set up operations on dentry. */ + dentry->d_op = &get_super_private(parent->i_sb)->ops.dentry; + + result = reiser4_lookup_name(parent, dentry, &entry.key); + if (result) { + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + if (result == -ENOENT) { + /* object not found */ + if (!IS_DEADDIR(parent)) + d_add(dentry, NULL); + return NULL; + } + return ERR_PTR(result); + } + + inode = reiser4_iget(parent->i_sb, &entry.key, 0); + if (IS_ERR(inode)) { + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return ERR_PTR(PTR_ERR(inode)); + } + + /* success */ + check_light_weight(inode, parent); + new = d_splice_alias(inode, dentry); + reiser4_iget_complete(inode); + + /* prevent balance_dirty_pages() from being called: we don't want to + * do this under directory i_mutex. */ + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return new; +} + +static reiser4_block_nr common_estimate_link(struct inode *parent, + struct inode *object); +int reiser4_update_dir(struct inode *); + +static inline void reiser4_check_immutable(struct inode *inode) +{ + do { + if (!reiser4_inode_get_flag(inode, REISER4_IMMUTABLE)) + break; + yield(); + } while (1); +} + +/** + * reiser4_link_common - link of inode operations + * @existing: dentry of object which is to get new name + * @parent: directory where new name is to be created + * @newname: new name + * + * This is common implementation of vfs's link method of struct + * inode_operations. + */ +int reiser4_link_common(struct dentry *existing, struct inode *parent, + struct dentry *newname) +{ + reiser4_context *ctx; + int result; + struct inode *object; + dir_plugin *parent_dplug; + reiser4_dir_entry_desc entry; + reiser4_object_create_data data; + reiser4_block_nr reserve; + + ctx = reiser4_init_context(parent->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + assert("nikita-1431", existing != NULL); + assert("nikita-1432", parent != NULL); + assert("nikita-1433", newname != NULL); + + object = existing->d_inode; + assert("nikita-1434", object != NULL); + + /* check for race with create_object() */ + reiser4_check_immutable(object); + + parent_dplug = inode_dir_plugin(parent); + + memset(&entry, 0, sizeof entry); + entry.obj = object; + + data.mode = object->i_mode; + data.id = inode_file_plugin(object)->h.id; + + reserve = common_estimate_link(parent, existing->d_inode); + if ((__s64) reserve < 0) { + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return reserve; + } + + if (reiser4_grab_space(reserve, BA_CAN_COMMIT)) { + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return RETERR(-ENOSPC); + } + + /* + * Subtle race handling: sys_link() doesn't take i_mutex on @parent. It + * means that link(2) can race against unlink(2) or rename(2), and + * inode is dead (->i_nlink == 0) when reiser4_link() is entered. + * + * For such inode we have to undo special processing done in + * reiser4_unlink() viz. creation of safe-link. + */ + if (unlikely(object->i_nlink == 0)) { + result = safe_link_del(reiser4_tree_by_inode(object), + get_inode_oid(object), SAFE_UNLINK); + if (result != 0) { + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; + } + } + + /* increment nlink of @existing and update its stat data */ + result = reiser4_add_nlink(object, parent, 1); + if (result == 0) { + /* add entry to the parent */ + result = + parent_dplug->add_entry(parent, newname, &data, &entry); + if (result != 0) { + /* failed to add entry to the parent, decrement nlink + of @existing */ + reiser4_del_nlink(object, parent, 1); + /* + * now, if that failed, we have a file with too big + * nlink---space leak, much better than directory + * entry pointing to nowhere + */ + } + } + if (result == 0) { + atomic_inc(&object->i_count); + /* + * Upon successful completion, link() shall mark for update + * the st_ctime field of the file. Also, the st_ctime and + * st_mtime fields of the directory that contains the new + * entry shall be marked for update. --SUS + */ + result = reiser4_update_dir(parent); + } + if (result == 0) + d_instantiate(newname, existing->d_inode); + + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; +} + +static int unlink_check_and_grab(struct inode *parent, struct dentry *victim); + +/** + * reiser4_unlink_common - unlink of inode operations + * @parent: inode of directory to remove name from + * @victim: name to be removed + * + * This is common implementation of vfs's unlink method of struct + * inode_operations. + */ +int reiser4_unlink_common(struct inode *parent, struct dentry *victim) +{ + reiser4_context *ctx; + int result; + struct inode *object; + file_plugin *fplug; + + ctx = reiser4_init_context(parent->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + object = victim->d_inode; + fplug = inode_file_plugin(object); + assert("nikita-2882", fplug->detach != NULL); + + result = unlink_check_and_grab(parent, victim); + if (result != 0) { + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; + } + + result = fplug->detach(object, parent); + if (result == 0) { + dir_plugin *parent_dplug; + reiser4_dir_entry_desc entry; + + parent_dplug = inode_dir_plugin(parent); + memset(&entry, 0, sizeof entry); + + /* first, delete directory entry */ + result = parent_dplug->rem_entry(parent, victim, &entry); + if (result == 0) { + /* + * if name was removed successfully, we _have_ to + * return 0 from this function, because upper level + * caller (vfs_{rmdir,unlink}) expect this. + * + * now that directory entry is removed, update + * stat-data + */ + reiser4_del_nlink(object, parent, 1); + /* + * Upon successful completion, unlink() shall mark for + * update the st_ctime and st_mtime fields of the + * parent directory. Also, if the file's link count is + * not 0, the st_ctime field of the file shall be + * marked for update. --SUS + */ + reiser4_update_dir(parent); + /* add safe-link for this file */ + if (object->i_nlink == 0) + safe_link_add(object, SAFE_UNLINK); + } + } + + if (unlikely(result != 0)) { + if (result != -ENOMEM) + warning("nikita-3398", "Cannot unlink %llu (%i)", + (unsigned long long)get_inode_oid(object), + result); + /* if operation failed commit pending inode modifications to + * the stat-data */ + reiser4_update_sd(object); + reiser4_update_sd(parent); + } + + reiser4_release_reserved(object->i_sb); + + /* @object's i_ctime was updated by ->rem_link() method(). */ + + /* @victim can be already removed from the disk by this time. Inode is + then marked so that iput() wouldn't try to remove stat data. But + inode itself is still there. + */ + + /* + * we cannot release directory semaphore here, because name has + * already been deleted, but dentry (@victim) still exists. Prevent + * balance_dirty_pages() from being called on exiting this context: we + * don't want to do this under directory i_mutex. + */ + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; +} + +/** + * reiser4_symlink_common - symlink of inode operations + * @parent: inode of parent directory + * @dentry: dentry of object to be created + * @linkname: string symlink is to contain + * + * This is common implementation of vfs's symlink method of struct + * inode_operations. + * Creates object using file plugin SYMLINK_FILE_PLUGIN_ID. + */ +int reiser4_symlink_common(struct inode *parent, struct dentry *dentry, + const char *linkname) +{ + reiser4_object_create_data data; + + memset(&data, 0, sizeof data); + data.name = linkname; + data.id = SYMLINK_FILE_PLUGIN_ID; + data.mode = S_IFLNK | S_IRWXUGO; + return create_vfs_object(parent, dentry, &data); +} + +/** + * reiser4_mkdir_common - mkdir of inode operations + * @parent: inode of parent directory + * @dentry: dentry of object to be created + * @mode: the permissions to use + * + * This is common implementation of vfs's mkdir method of struct + * inode_operations. + * Creates object using file plugin DIRECTORY_FILE_PLUGIN_ID. + */ +int reiser4_mkdir_common(struct inode *parent, struct dentry *dentry, umode_t mode) +{ + reiser4_object_create_data data; + + memset(&data, 0, sizeof data); + data.mode = S_IFDIR | mode; + data.id = DIRECTORY_FILE_PLUGIN_ID; + return create_vfs_object(parent, dentry, &data); +} + +/** + * reiser4_mknod_common - mknod of inode operations + * @parent: inode of parent directory + * @dentry: dentry of object to be created + * @mode: the permissions to use and file type + * @rdev: minor and major of new device file + * + * This is common implementation of vfs's mknod method of struct + * inode_operations. + * Creates object using file plugin SPECIAL_FILE_PLUGIN_ID. + */ +int reiser4_mknod_common(struct inode *parent, struct dentry *dentry, + umode_t mode, dev_t rdev) +{ + reiser4_object_create_data data; + + memset(&data, 0, sizeof data); + data.mode = mode; + data.rdev = rdev; + data.id = SPECIAL_FILE_PLUGIN_ID; + return create_vfs_object(parent, dentry, &data); +} + +/* + * implementation of vfs's rename method of struct inode_operations for typical + * directory is in inode_ops_rename.c + */ + +/** + * reiser4_get_link_common: ->get_link() of inode_operations + * @dentry: dentry of symlink + * + * Assumes that inode's i_private points to the content of symbolic link. + */ +const char *reiser4_get_link_common(struct dentry *dentry, + struct inode *inode, + struct delayed_call *done) +{ + if (!dentry) + return ERR_PTR(-ECHILD); + + assert("vs-851", S_ISLNK(dentry->d_inode->i_mode)); + + if (!dentry->d_inode->i_private || + !reiser4_inode_get_flag(dentry->d_inode, REISER4_GENERIC_PTR_USED)) + return ERR_PTR(RETERR(-EINVAL)); + + return dentry->d_inode->i_private; +} + +/** + * reiser4_permission_common - permission of inode operations + * @inode: inode to check permissions for + * @mask: mode bits to check permissions for + * @flags: + * + * Uses generic function to check for rwx permissions. + */ +int reiser4_permission_common(struct inode *inode, int mask) +{ + // generic_permission() says that it's rcu-aware... +#if 0 + if (mask & MAY_NOT_BLOCK) + return -ECHILD; +#endif + return generic_permission(inode, mask); +} + +static int setattr_reserve(reiser4_tree *); + +/* this is common implementation of vfs's setattr method of struct + inode_operations +*/ +int reiser4_setattr_common(struct dentry *dentry, struct iattr *attr) +{ + reiser4_context *ctx; + struct inode *inode; + int result; + + inode = dentry->d_inode; + result = setattr_prepare(dentry, attr); + if (result) + return result; + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + assert("nikita-3119", !(attr->ia_valid & ATTR_SIZE)); + + /* + * grab disk space and call standard + * setattr_copy(); + * mark_inode_dirty(). + */ + result = setattr_reserve(reiser4_tree_by_inode(inode)); + if (!result) { + setattr_copy(inode, attr); + mark_inode_dirty(inode); + result = reiser4_update_sd(inode); + } + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; +} + +/* this is common implementation of vfs's getattr method of struct + inode_operations +*/ +int reiser4_getattr_common(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags) +{ + struct inode *obj; + + assert("nikita-2298", path != NULL); + assert("nikita-2299", stat != NULL); + + obj = d_inode(path->dentry); + + stat->dev = obj->i_sb->s_dev; + stat->ino = oid_to_uino(get_inode_oid(obj)); + stat->mode = obj->i_mode; + /* don't confuse userland with huge nlink. This is not entirely + * correct, because nlink_t is not necessary 16 bit signed. */ + stat->nlink = min(obj->i_nlink, (typeof(obj->i_nlink)) 0x7fff); + stat->uid = obj->i_uid; + stat->gid = obj->i_gid; + stat->rdev = obj->i_rdev; + stat->atime = obj->i_atime; + stat->mtime = obj->i_mtime; + stat->ctime = obj->i_ctime; + stat->size = obj->i_size; + stat->blocks = + (inode_get_bytes(obj) + VFS_BLKSIZE - 1) >> VFS_BLKSIZE_BITS; + /* "preferred" blocksize for efficient file system I/O */ + stat->blksize = get_super_private(obj->i_sb)->optimal_io_size; + + return 0; +} + +/* Estimate the maximum amount of nodes which might be allocated or changed on + typical new object creation. Typical creation consists of calling create + method of file plugin, adding directory entry to parent and update parent + directory's stat data. +*/ +static reiser4_block_nr estimate_create_vfs_object(struct inode *parent, + /* parent object */ + struct inode *object + /* object */) +{ + assert("vpf-309", parent != NULL); + assert("vpf-307", object != NULL); + + return + /* object creation estimation */ + inode_file_plugin(object)->estimate.create(object) + + /* stat data of parent directory estimation */ + inode_file_plugin(parent)->estimate.update(parent) + + /* adding entry estimation */ + inode_dir_plugin(parent)->estimate.add_entry(parent) + + /* to undo in the case of failure */ + inode_dir_plugin(parent)->estimate.rem_entry(parent); +} + +/* Create child in directory. + + . get object's plugin + . get fresh inode + . initialize inode + . add object's stat-data + . initialize object's directory + . add entry to the parent + . instantiate dentry + +*/ +static int do_create_vfs_child(reiser4_object_create_data * data,/* parameters + of new + object */ + struct inode **retobj) +{ + int result; + + struct dentry *dentry; /* parent object */ + struct inode *parent; /* new name */ + + dir_plugin *par_dir; /* directory plugin on the parent */ + dir_plugin *obj_dir; /* directory plugin on the new object */ + file_plugin *obj_plug; /* object plugin on the new object */ + struct inode *object; /* new object */ + reiser4_block_nr reserve; + + reiser4_dir_entry_desc entry; /* new directory entry */ + + assert("nikita-1420", data != NULL); + parent = data->parent; + dentry = data->dentry; + + assert("nikita-1418", parent != NULL); + assert("nikita-1419", dentry != NULL); + + /* check, that name is acceptable for parent */ + par_dir = inode_dir_plugin(parent); + if (par_dir->is_name_acceptable && + !par_dir->is_name_acceptable(parent, + dentry->d_name.name, + (int)dentry->d_name.len)) + return RETERR(-ENAMETOOLONG); + + result = 0; + obj_plug = file_plugin_by_id((int)data->id); + if (obj_plug == NULL) { + warning("nikita-430", "Cannot find plugin %i", data->id); + return RETERR(-ENOENT); + } + object = new_inode(parent->i_sb); + if (object == NULL) + return RETERR(-ENOMEM); + /* new_inode() initializes i_ino to "arbitrary" value. Reset it to 0, + * to simplify error handling: if some error occurs before i_ino is + * initialized with oid, i_ino should already be set to some + * distinguished value. */ + object->i_ino = 0; + + /* So that on error iput will be called. */ + *retobj = object; + + memset(&entry, 0, sizeof entry); + entry.obj = object; + + set_plugin(&reiser4_inode_data(object)->pset, PSET_FILE, + file_plugin_to_plugin(obj_plug)); + result = obj_plug->set_plug_in_inode(object, parent, data); + if (result) { + warning("nikita-431", "Cannot install plugin %i on %llx", + data->id, (unsigned long long)get_inode_oid(object)); + return result; + } + + /* reget plugin after installation */ + obj_plug = inode_file_plugin(object); + + if (obj_plug->create_object == NULL) { + return RETERR(-EPERM); + } + + /* if any of hash, tail, sd or permission plugins for newly created + object are not set yet set them here inheriting them from parent + directory + */ + assert("nikita-2070", obj_plug->adjust_to_parent != NULL); + result = obj_plug->adjust_to_parent(object, + parent, + object->i_sb->s_root->d_inode); + if (result == 0) + result = finish_pset(object); + if (result != 0) { + warning("nikita-432", "Cannot inherit from %llx to %llx", + (unsigned long long)get_inode_oid(parent), + (unsigned long long)get_inode_oid(object)); + return result; + } + + /* setup inode and file-operations for this inode */ + setup_inode_ops(object, data); + + /* call file plugin's method to initialize plugin specific part of + * inode */ + if (obj_plug->init_inode_data) + obj_plug->init_inode_data(object, data, 1/*create */); + + /* obtain directory plugin (if any) for new object. */ + obj_dir = inode_dir_plugin(object); + if (obj_dir != NULL && obj_dir->init == NULL) { + return RETERR(-EPERM); + } + + reiser4_inode_data(object)->locality_id = get_inode_oid(parent); + + reserve = estimate_create_vfs_object(parent, object); + if (reiser4_grab_space(reserve, BA_CAN_COMMIT)) { + return RETERR(-ENOSPC); + } + + /* mark inode `immutable'. We disable changes to the file being + created until valid directory entry for it is inserted. Otherwise, + if file were expanded and insertion of directory entry fails, we + have to remove file, but we only alloted enough space in + transaction to remove _empty_ file. 3.x code used to remove stat + data in different transaction thus possibly leaking disk space on + crash. This all only matters if it's possible to access file + without name, for example, by inode number + */ + reiser4_inode_set_flag(object, REISER4_IMMUTABLE); + + /* create empty object, this includes allocation of new objectid. For + directories this implies creation of dot and dotdot */ + assert("nikita-2265", reiser4_inode_get_flag(object, REISER4_NO_SD)); + + /* mark inode as `loaded'. From this point onward + reiser4_delete_inode() will try to remove its stat-data. */ + reiser4_inode_set_flag(object, REISER4_LOADED); + + result = obj_plug->create_object(object, parent, data); + if (result != 0) { + reiser4_inode_clr_flag(object, REISER4_IMMUTABLE); + if (result != -ENAMETOOLONG && result != -ENOMEM) + warning("nikita-2219", + "Failed to create sd for %llu", + (unsigned long long)get_inode_oid(object)); + return result; + } + + if (obj_dir != NULL) + result = obj_dir->init(object, parent, data); + if (result == 0) { + assert("nikita-434", !reiser4_inode_get_flag(object, + REISER4_NO_SD)); + /* insert inode into VFS hash table */ + insert_inode_hash(object); + /* create entry */ + result = par_dir->add_entry(parent, dentry, data, &entry); + if (result == 0) { + /* If O_CREAT is set and the file did not previously + exist, upon successful completion, open() shall + mark for update the st_atime, st_ctime, and + st_mtime fields of the file and the st_ctime and + st_mtime fields of the parent directory. --SUS + */ + object->i_ctime = current_time(object); + reiser4_update_dir(parent); + } + if (result != 0) + /* cleanup failure to add entry */ + obj_plug->detach(object, parent); + } else if (result != -ENOMEM) + warning("nikita-2219", "Failed to initialize dir for %llu: %i", + (unsigned long long)get_inode_oid(object), result); + + /* + * update stat-data, committing all pending modifications to the inode + * fields. + */ + reiser4_update_sd(object); + if (result != 0) { + /* if everything was ok (result == 0), parent stat-data is + * already updated above (update_parent_dir()) */ + reiser4_update_sd(parent); + /* failure to create entry, remove object */ + obj_plug->delete_object(object); + } + + /* file has name now, clear immutable flag */ + reiser4_inode_clr_flag(object, REISER4_IMMUTABLE); + + /* on error, iput() will call ->delete_inode(). We should keep track + of the existence of stat-data for this inode and avoid attempt to + remove it in reiser4_delete_inode(). This is accomplished through + REISER4_NO_SD bit in inode.u.reiser4_i.plugin.flags + */ + return result; +} + +/* this is helper for common implementations of reiser4_mkdir, reiser4_create, + reiser4_mknod and reiser4_symlink +*/ +static int +create_vfs_object(struct inode *parent, + struct dentry *dentry, reiser4_object_create_data * data) +{ + reiser4_context *ctx; + int result; + struct inode *child; + + ctx = reiser4_init_context(parent->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + context_set_commit_async(ctx); + + data->parent = parent; + data->dentry = dentry; + child = NULL; + result = do_create_vfs_child(data, &child); + if (unlikely(result != 0)) { + if (child != NULL) { + /* for unlinked inode accounting in iput() */ + clear_nlink(child); + reiser4_make_bad_inode(child); + iput(child); + } + } else + d_instantiate(dentry, child); + + reiser4_exit_context(ctx); + return result; +} + +/** + * helper for link_common. Estimate disk space necessary to add a link + * from @parent to @object + */ +static reiser4_block_nr common_estimate_link(struct inode *parent /* parent + * directory + */, + struct inode *object /* object to + * which new + * link is + * being + * created */) +{ + reiser4_block_nr res = 0; + file_plugin *fplug; + dir_plugin *dplug; + + assert("vpf-317", object != NULL); + assert("vpf-318", parent != NULL); + + fplug = inode_file_plugin(object); + dplug = inode_dir_plugin(parent); + /* VS-FIXME-HANS: why do we do fplug->estimate.update(object) twice + * instead of multiplying by 2? */ + /* reiser4_add_nlink(object) */ + res += fplug->estimate.update(object); + /* add_entry(parent) */ + res += dplug->estimate.add_entry(parent); + /* reiser4_del_nlink(object) */ + res += fplug->estimate.update(object); + /* update_dir(parent) */ + res += inode_file_plugin(parent)->estimate.update(parent); + /* safe-link */ + res += estimate_one_item_removal(reiser4_tree_by_inode(object)); + + return res; +} + +/* Estimate disk space necessary to remove a link between @parent and + @object. +*/ +static reiser4_block_nr estimate_unlink(struct inode *parent /* parent + * directory */, + struct inode *object /* object to which + * new link is + * being created + */) +{ + reiser4_block_nr res = 0; + file_plugin *fplug; + dir_plugin *dplug; + + assert("vpf-317", object != NULL); + assert("vpf-318", parent != NULL); + + fplug = inode_file_plugin(object); + dplug = inode_dir_plugin(parent); + + /* rem_entry(parent) */ + res += dplug->estimate.rem_entry(parent); + /* reiser4_del_nlink(object) */ + res += fplug->estimate.update(object); + /* update_dir(parent) */ + res += inode_file_plugin(parent)->estimate.update(parent); + /* fplug->unlink */ + res += fplug->estimate.unlink(object, parent); + /* safe-link */ + res += estimate_one_insert_item(reiser4_tree_by_inode(object)); + + return res; +} + +/* helper for reiser4_unlink_common. Estimate and grab space for unlink. */ +static int unlink_check_and_grab(struct inode *parent, struct dentry *victim) +{ + file_plugin *fplug; + struct inode *child; + int result; + + result = 0; + child = victim->d_inode; + fplug = inode_file_plugin(child); + + /* check for race with create_object() */ + reiser4_check_immutable(child); + + /* object being deleted should have stat data */ + assert("vs-949", !reiser4_inode_get_flag(child, REISER4_NO_SD)); + + /* ask object plugin */ + if (fplug->can_rem_link != NULL && !fplug->can_rem_link(child)) + return RETERR(-ENOTEMPTY); + + result = (int)estimate_unlink(parent, child); + if (result < 0) + return result; + + return reiser4_grab_reserved(child->i_sb, result, BA_CAN_COMMIT); +} + +/* helper for reiser4_setattr_common */ +static int setattr_reserve(reiser4_tree * tree) +{ + assert("vs-1096", is_grab_enabled(get_current_context())); + return reiser4_grab_space(estimate_one_insert_into_item(tree), + BA_CAN_COMMIT); +} + +/* helper function. Standards require that for many file-system operations + on success ctime and mtime of parent directory is to be updated. */ +int reiser4_update_dir(struct inode *dir) +{ + assert("nikita-2525", dir != NULL); + + dir->i_ctime = dir->i_mtime = current_time(dir); + return reiser4_update_sd(dir); +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/inode_ops_rename.c b/fs/reiser4/plugin/inode_ops_rename.c new file mode 100644 index 000000000000..bedc86fd69ae --- /dev/null +++ b/fs/reiser4/plugin/inode_ops_rename.c @@ -0,0 +1,958 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +#include "../inode.h" +#include "../safe_link.h" + +static const char *possible_leak = "Possible disk space leak."; + +/* re-bind existing name at @from_coord in @from_dir to point to @to_inode. + + Helper function called from hashed_rename() */ +static int replace_name(struct inode *to_inode, /* inode where @from_coord is + * to be re-targeted at */ + struct inode *from_dir, /* directory where @from_coord + * lives */ + struct inode *from_inode, /* inode @from_coord + * originally point to */ + coord_t *from_coord, /* where directory entry is in + * the tree */ + lock_handle * from_lh/* lock handle on @from_coord */) +{ + item_plugin *from_item; + int result; + znode *node; + + coord_clear_iplug(from_coord); + node = from_coord->node; + result = zload(node); + if (result != 0) + return result; + from_item = item_plugin_by_coord(from_coord); + if (plugin_of_group(item_plugin_by_coord(from_coord), + DIR_ENTRY_ITEM_TYPE)) { + reiser4_key to_key; + + build_sd_key(to_inode, &to_key); + + /* everything is found and prepared to change directory entry + at @from_coord to point to @to_inode. + + @to_inode is just about to get new name, so bump its link + counter. + + */ + result = reiser4_add_nlink(to_inode, from_dir, 0); + if (result != 0) { + /* Don't issue warning: this may be plain -EMLINK */ + zrelse(node); + return result; + } + + result = + from_item->s.dir.update_key(from_coord, &to_key, from_lh); + if (result != 0) { + reiser4_del_nlink(to_inode, from_dir, 0); + zrelse(node); + return result; + } + + /* @from_inode just lost its name, he-he. + + If @from_inode was directory, it contained dotdot pointing + to @from_dir. @from_dir i_nlink will be decreased when + iput() will be called on @from_inode. + + If file-system is not ADG (hard-links are + supported on directories), iput(from_inode) will not remove + @from_inode, and thus above is incorrect, but hard-links on + directories are problematic in many other respects. + */ + result = reiser4_del_nlink(from_inode, from_dir, 0); + if (result != 0) { + warning("nikita-2330", + "Cannot remove link from source: %i. %s", + result, possible_leak); + } + /* Has to return success, because entry is already + * modified. */ + result = 0; + + /* NOTE-NIKITA consider calling plugin method in stead of + accessing inode fields directly. */ + from_dir->i_mtime = current_time(from_dir); + } else { + warning("nikita-2326", "Unexpected item type"); + result = RETERR(-EIO); + } + zrelse(node); + return result; +} + +/* add new entry pointing to @inode into @dir at @coord, locked by @lh + + Helper function used by hashed_rename(). */ +static int add_name(struct inode *inode, /* inode where @coord is to be + * re-targeted at */ + struct inode *dir, /* directory where @coord lives */ + struct dentry *name, /* new name */ + coord_t *coord, /* where directory entry is in the tree + */ + lock_handle * lh, /* lock handle on @coord */ + int is_dir/* true, if @inode is directory */) +{ + int result; + reiser4_dir_entry_desc entry; + + assert("nikita-2333", lh->node == coord->node); + assert("nikita-2334", is_dir == S_ISDIR(inode->i_mode)); + + memset(&entry, 0, sizeof entry); + entry.obj = inode; + /* build key of directory entry description */ + inode_dir_plugin(dir)->build_entry_key(dir, &name->d_name, &entry.key); + + /* ext2 does this in different order: first inserts new entry, + then increases directory nlink. We don't want do this, + because reiser4_add_nlink() calls ->add_link() plugin + method that can fail for whatever reason, leaving as with + cleanup problems. + */ + /* @inode is getting new name */ + reiser4_add_nlink(inode, dir, 0); + /* create @new_name in @new_dir pointing to + @old_inode */ + result = WITH_COORD(coord, + inode_dir_item_plugin(dir)->s.dir.add_entry(dir, + coord, + lh, + name, + &entry)); + if (result != 0) { + int result2; + result2 = reiser4_del_nlink(inode, dir, 0); + if (result2 != 0) { + warning("nikita-2327", + "Cannot drop link on %lli %i. %s", + (unsigned long long)get_inode_oid(inode), + result2, possible_leak); + } + } else + INODE_INC_FIELD(dir, i_size); + return result; +} + +static reiser4_block_nr estimate_rename(struct inode *old_dir, /* directory + * where @old is + * located */ + struct dentry *old_name,/* old name */ + struct inode *new_dir, /* directory + * where @new is + * located */ + struct dentry *new_name /* new name */) +{ + reiser4_block_nr res1, res2; + dir_plugin * p_parent_old, *p_parent_new; + file_plugin * p_child_old, *p_child_new; + + assert("vpf-311", old_dir != NULL); + assert("vpf-312", new_dir != NULL); + assert("vpf-313", old_name != NULL); + assert("vpf-314", new_name != NULL); + + p_parent_old = inode_dir_plugin(old_dir); + p_parent_new = inode_dir_plugin(new_dir); + p_child_old = inode_file_plugin(old_name->d_inode); + if (new_name->d_inode) + p_child_new = inode_file_plugin(new_name->d_inode); + else + p_child_new = NULL; + + /* find_entry - can insert one leaf. */ + res1 = res2 = 1; + + /* replace_name */ + { + /* reiser4_add_nlink(p_child_old) and + * reiser4_del_nlink(p_child_old) */ + res1 += 2 * p_child_old->estimate.update(old_name->d_inode); + /* update key */ + res1 += 1; + /* reiser4_del_nlink(p_child_new) */ + if (p_child_new) + res1 += p_child_new->estimate.update(new_name->d_inode); + } + + /* else add_name */ + { + /* reiser4_add_nlink(p_parent_new) and + * reiser4_del_nlink(p_parent_new) */ + res2 += + 2 * inode_file_plugin(new_dir)->estimate.update(new_dir); + /* reiser4_add_nlink(p_parent_old) */ + res2 += p_child_old->estimate.update(old_name->d_inode); + /* add_entry(p_parent_new) */ + res2 += p_parent_new->estimate.add_entry(new_dir); + /* reiser4_del_nlink(p_parent_old) */ + res2 += p_child_old->estimate.update(old_name->d_inode); + } + + res1 = res1 < res2 ? res2 : res1; + + /* reiser4_write_sd(p_parent_new) */ + res1 += inode_file_plugin(new_dir)->estimate.update(new_dir); + + /* reiser4_write_sd(p_child_new) */ + if (p_child_new) + res1 += p_child_new->estimate.update(new_name->d_inode); + + /* hashed_rem_entry(p_parent_old) */ + res1 += p_parent_old->estimate.rem_entry(old_dir); + + /* reiser4_del_nlink(p_child_old) */ + res1 += p_child_old->estimate.update(old_name->d_inode); + + /* replace_name */ + { + /* reiser4_add_nlink(p_parent_dir_new) */ + res1 += inode_file_plugin(new_dir)->estimate.update(new_dir); + /* update_key */ + res1 += 1; + /* reiser4_del_nlink(p_parent_new) */ + res1 += inode_file_plugin(new_dir)->estimate.update(new_dir); + /* reiser4_del_nlink(p_parent_old) */ + res1 += inode_file_plugin(old_dir)->estimate.update(old_dir); + } + + /* reiser4_write_sd(p_parent_old) */ + res1 += inode_file_plugin(old_dir)->estimate.update(old_dir); + + /* reiser4_write_sd(p_child_old) */ + res1 += p_child_old->estimate.update(old_name->d_inode); + + return res1; +} + +static int hashed_rename_estimate_and_grab(struct inode *old_dir, /* directory + * where @old + * is located + */ + struct dentry *old_name,/* old name + */ + struct inode *new_dir, /* directory + * where @new + * is located + */ + struct dentry *new_name /* new name + */) +{ + reiser4_block_nr reserve; + + reserve = estimate_rename(old_dir, old_name, new_dir, new_name); + + if (reiser4_grab_space(reserve, BA_CAN_COMMIT)) + return RETERR(-ENOSPC); + + return 0; +} + +/* check whether @old_inode and @new_inode can be moved within file system + * tree. This singles out attempts to rename pseudo-files, for example. */ +static int can_rename(struct inode *old_dir, struct inode *old_inode, + struct inode *new_dir, struct inode *new_inode) +{ + file_plugin *fplug; + dir_plugin *dplug; + + assert("nikita-3370", old_inode != NULL); + + dplug = inode_dir_plugin(new_dir); + fplug = inode_file_plugin(old_inode); + + if (dplug == NULL) + return RETERR(-ENOTDIR); + else if (new_dir->i_op->create == NULL) + return RETERR(-EPERM); + else if (!fplug->can_add_link(old_inode)) + return RETERR(-EMLINK); + else if (new_inode != NULL) { + fplug = inode_file_plugin(new_inode); + if (fplug->can_rem_link != NULL && + !fplug->can_rem_link(new_inode)) + return RETERR(-EBUSY); + } + return 0; +} + +int reiser4_find_entry(struct inode *, struct dentry *, lock_handle * , + znode_lock_mode, reiser4_dir_entry_desc *); +int reiser4_update_dir(struct inode *); + +/* this is common implementation of vfs's rename2 method of struct + inode_operations + See comments in the body. + + It is arguable that this function can be made generic so, that it + will be applicable to any kind of directory plugin that deals with + directories composed out of directory entries. The only obstacle + here is that we don't have any data-type to represent directory + entry. This should be re-considered when more than one different + directory plugin will be implemented. +*/ +int reiser4_rename2_common(struct inode *old_dir /* directory where @old + * is located */ , + struct dentry *old_name /* old name */ , + struct inode *new_dir /* directory where @new + * is located */ , + struct dentry *new_name /* new name */ , + unsigned flags /* specific flags */) +{ + /* From `The Open Group Base Specifications Issue 6' + + If either the old or new argument names a symbolic link, rename() + shall operate on the symbolic link itself, and shall not resolve + the last component of the argument. If the old argument and the new + argument resolve to the same existing file, rename() shall return + successfully and perform no other action. + + [this is done by VFS: vfs_rename()] + + If the old argument points to the pathname of a file that is not a + directory, the new argument shall not point to the pathname of a + directory. + + [checked by VFS: vfs_rename->may_delete()] + + If the link named by the new argument exists, it shall + be removed and old renamed to new. In this case, a link named new + shall remain visible to other processes throughout the renaming + operation and refer either to the file referred to by new or old + before the operation began. + + [we should assure this] + + Write access permission is required for + both the directory containing old and the directory containing new. + + [checked by VFS: vfs_rename->may_delete(), may_create()] + + If the old argument points to the pathname of a directory, the new + argument shall not point to the pathname of a file that is not a + directory. + + [checked by VFS: vfs_rename->may_delete()] + + If the directory named by the new argument exists, it + shall be removed and old renamed to new. In this case, a link named + new shall exist throughout the renaming operation and shall refer + either to the directory referred to by new or old before the + operation began. + + [we should assure this] + + If new names an existing directory, it shall be + required to be an empty directory. + + [we should check this] + + If the old argument points to a pathname of a symbolic link, the + symbolic link shall be renamed. If the new argument points to a + pathname of a symbolic link, the symbolic link shall be removed. + + The new pathname shall not contain a path prefix that names + old. Write access permission is required for the directory + containing old and the directory containing new. If the old + argument points to the pathname of a directory, write access + permission may be required for the directory named by old, and, if + it exists, the directory named by new. + + [checked by VFS: vfs_rename(), vfs_rename_dir()] + + If the link named by the new argument exists and the file's link + count becomes 0 when it is removed and no process has the file + open, the space occupied by the file shall be freed and the file + shall no longer be accessible. If one or more processes have the + file open when the last link is removed, the link shall be removed + before rename() returns, but the removal of the file contents shall + be postponed until all references to the file are closed. + + [iput() handles this, but we can do this manually, a la + reiser4_unlink()] + + Upon successful completion, rename() shall mark for update the + st_ctime and st_mtime fields of the parent directory of each file. + + [N/A] + + */ + + /* From Documentation/filesystems/vfs.txt: + + rename2: this has an additional flags argument compared to rename. + f no flags are supported by the filesystem then this method + need not be implemented. If some flags are supported then the + filesystem must return -EINVAL for any unsupported or unknown + flags. Currently the following flags are implemented: + (1) RENAME_NOREPLACE: this flag indicates that if the target + of the rename exists the rename should fail with -EEXIST + instead of replacing the target. The VFS already checks for + existence, so for local filesystems the RENAME_NOREPLACE + implementation is equivalent to plain rename. + (2) RENAME_EXCHANGE: exchange source and target. Both must + exist; this is checked by the VFS. Unlike plain rename, + source and target may be of different type. + */ + + static const unsigned supported_flags = RENAME_NOREPLACE; + + reiser4_context *ctx; + int result; + int is_dir; /* is @old_name directory */ + + struct inode *old_inode; + struct inode *new_inode; + coord_t *new_coord; + + struct reiser4_dentry_fsdata *new_fsdata; + dir_plugin *dplug; + file_plugin *fplug; + + reiser4_dir_entry_desc *old_entry, *new_entry, *dotdot_entry; + lock_handle * new_lh, *dotdot_lh; + struct dentry *dotdot_name; + struct reiser4_dentry_fsdata *dataonstack; + + ctx = reiser4_init_context(old_dir->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + /* + * Check rename2() flags. + * + * "If some flags are supported then the filesystem must return + * -EINVAL for any unsupported or unknown flags." + * + * We support: + * - RENAME_NOREPLACE (no-op) + */ + if ((flags & supported_flags) != flags) + return RETERR(-EINVAL); + + old_entry = kzalloc(3 * sizeof(*old_entry) + 2 * sizeof(*new_lh) + + sizeof(*dotdot_name) + sizeof(*dataonstack), + reiser4_ctx_gfp_mask_get()); + if (!old_entry) { + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return RETERR(-ENOMEM); + } + + new_entry = old_entry + 1; + dotdot_entry = old_entry + 2; + new_lh = (lock_handle *)(old_entry + 3); + dotdot_lh = new_lh + 1; + dotdot_name = (struct dentry *)(new_lh + 2); + dataonstack = (struct reiser4_dentry_fsdata *)(dotdot_name + 1); + + assert("nikita-2318", old_dir != NULL); + assert("nikita-2319", new_dir != NULL); + assert("nikita-2320", old_name != NULL); + assert("nikita-2321", new_name != NULL); + + old_inode = old_name->d_inode; + new_inode = new_name->d_inode; + + dplug = inode_dir_plugin(old_dir); + fplug = NULL; + + new_fsdata = reiser4_get_dentry_fsdata(new_name); + if (IS_ERR(new_fsdata)) { + kfree(old_entry); + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return PTR_ERR(new_fsdata); + } + + new_coord = &new_fsdata->dec.entry_coord; + coord_clear_iplug(new_coord); + + is_dir = S_ISDIR(old_inode->i_mode); + + assert("nikita-3461", old_inode->i_nlink >= 1 + !!is_dir); + + /* if target is existing directory and it's not empty---return error. + + This check is done specifically, because is_dir_empty() requires + tree traversal and have to be done before locks are taken. + */ + if (is_dir && new_inode != NULL && is_dir_empty(new_inode) != 0) { + kfree(old_entry); + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return RETERR(-ENOTEMPTY); + } + + result = can_rename(old_dir, old_inode, new_dir, new_inode); + if (result != 0) { + kfree(old_entry); + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; + } + + result = hashed_rename_estimate_and_grab(old_dir, old_name, + new_dir, new_name); + if (result != 0) { + kfree(old_entry); + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; + } + + init_lh(new_lh); + + /* find entry for @new_name */ + result = reiser4_find_entry(new_dir, new_name, new_lh, ZNODE_WRITE_LOCK, + new_entry); + + if (IS_CBKERR(result)) { + done_lh(new_lh); + kfree(old_entry); + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; + } + + reiser4_seal_done(&new_fsdata->dec.entry_seal); + + /* add or replace name for @old_inode as @new_name */ + if (new_inode != NULL) { + /* target (@new_name) exists. */ + /* Not clear what to do with objects that are + both directories and files at the same time. */ + if (result == CBK_COORD_FOUND) { + result = replace_name(old_inode, + new_dir, + new_inode, new_coord, new_lh); + if (result == 0) + fplug = inode_file_plugin(new_inode); + } else if (result == CBK_COORD_NOTFOUND) { + /* VFS told us that @new_name is bound to existing + inode, but we failed to find directory entry. */ + warning("nikita-2324", "Target not found"); + result = RETERR(-ENOENT); + } + } else { + /* target (@new_name) doesn't exists. */ + if (result == CBK_COORD_NOTFOUND) + result = add_name(old_inode, + new_dir, + new_name, new_coord, new_lh, is_dir); + else if (result == CBK_COORD_FOUND) { + /* VFS told us that @new_name is "negative" dentry, + but we found directory entry. */ + warning("nikita-2331", "Target found unexpectedly"); + result = RETERR(-EIO); + } + } + + assert("nikita-3462", ergo(result == 0, + old_inode->i_nlink >= 2 + !!is_dir)); + + /* We are done with all modifications to the @new_dir, release lock on + node. */ + done_lh(new_lh); + + if (fplug != NULL) { + /* detach @new_inode from name-space */ + result = fplug->detach(new_inode, new_dir); + if (result != 0) + warning("nikita-2330", "Cannot detach %lli: %i. %s", + (unsigned long long)get_inode_oid(new_inode), + result, possible_leak); + } + + if (new_inode != NULL) + reiser4_update_sd(new_inode); + + if (result == 0) { + old_entry->obj = old_inode; + + dplug->build_entry_key(old_dir, + &old_name->d_name, &old_entry->key); + + /* At this stage new name was introduced for + @old_inode. @old_inode, @new_dir, and @new_inode i_nlink + counters were updated. + + We want to remove @old_name now. If @old_inode wasn't + directory this is simple. + */ + result = dplug->rem_entry(old_dir, old_name, old_entry); + if (result != 0 && result != -ENOMEM) { + warning("nikita-2335", + "Cannot remove old name: %i", result); + } else { + result = reiser4_del_nlink(old_inode, old_dir, 0); + if (result != 0 && result != -ENOMEM) { + warning("nikita-2337", + "Cannot drop link on old: %i", result); + } + } + + if (result == 0 && is_dir) { + /* @old_inode is directory. We also have to update + dotdot entry. */ + coord_t *dotdot_coord; + + memset(dataonstack, 0, sizeof(*dataonstack)); + memset(dotdot_entry, 0, sizeof(*dotdot_entry)); + dotdot_entry->obj = old_dir; + memset(dotdot_name, 0, sizeof(*dotdot_name)); + dotdot_name->d_name.name = ".."; + dotdot_name->d_name.len = 2; + /* + * allocate ->d_fsdata on the stack to avoid using + * reiser4_get_dentry_fsdata(). Locking is not needed, + * because dentry is private to the current thread. + */ + dotdot_name->d_fsdata = dataonstack; + init_lh(dotdot_lh); + + dotdot_coord = &dataonstack->dec.entry_coord; + coord_clear_iplug(dotdot_coord); + + result = reiser4_find_entry(old_inode, dotdot_name, + dotdot_lh, ZNODE_WRITE_LOCK, + dotdot_entry); + if (result == 0) { + /* replace_name() decreases i_nlink on + * @old_dir */ + result = replace_name(new_dir, + old_inode, + old_dir, + dotdot_coord, dotdot_lh); + } else + result = RETERR(-EIO); + done_lh(dotdot_lh); + } + } + reiser4_update_dir(new_dir); + reiser4_update_dir(old_dir); + reiser4_update_sd(old_inode); + if (result == 0) { + file_plugin *fplug; + + if (new_inode != NULL) { + /* add safe-link for target file (in case we removed + * last reference to the poor fellow */ + fplug = inode_file_plugin(new_inode); + if (new_inode->i_nlink == 0) + result = safe_link_add(new_inode, SAFE_UNLINK); + } + } + kfree(old_entry); + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; +} + +#if 0 +int reiser4_rename_common(struct inode *old_dir /* directory where @old + * is located */ , + struct dentry *old_name /* old name */ , + struct inode *new_dir /* directory where @new + * is located */ , + struct dentry *new_name/* new name */) +{ + /* From `The Open Group Base Specifications Issue 6' + + If either the old or new argument names a symbolic link, rename() + shall operate on the symbolic link itself, and shall not resolve + the last component of the argument. If the old argument and the new + argument resolve to the same existing file, rename() shall return + successfully and perform no other action. + + [this is done by VFS: vfs_rename()] + + If the old argument points to the pathname of a file that is not a + directory, the new argument shall not point to the pathname of a + directory. + + [checked by VFS: vfs_rename->may_delete()] + + If the link named by the new argument exists, it shall + be removed and old renamed to new. In this case, a link named new + shall remain visible to other processes throughout the renaming + operation and refer either to the file referred to by new or old + before the operation began. + + [we should assure this] + + Write access permission is required for + both the directory containing old and the directory containing new. + + [checked by VFS: vfs_rename->may_delete(), may_create()] + + If the old argument points to the pathname of a directory, the new + argument shall not point to the pathname of a file that is not a + directory. + + [checked by VFS: vfs_rename->may_delete()] + + If the directory named by the new argument exists, it + shall be removed and old renamed to new. In this case, a link named + new shall exist throughout the renaming operation and shall refer + either to the directory referred to by new or old before the + operation began. + + [we should assure this] + + If new names an existing directory, it shall be + required to be an empty directory. + + [we should check this] + + If the old argument points to a pathname of a symbolic link, the + symbolic link shall be renamed. If the new argument points to a + pathname of a symbolic link, the symbolic link shall be removed. + + The new pathname shall not contain a path prefix that names + old. Write access permission is required for the directory + containing old and the directory containing new. If the old + argument points to the pathname of a directory, write access + permission may be required for the directory named by old, and, if + it exists, the directory named by new. + + [checked by VFS: vfs_rename(), vfs_rename_dir()] + + If the link named by the new argument exists and the file's link + count becomes 0 when it is removed and no process has the file + open, the space occupied by the file shall be freed and the file + shall no longer be accessible. If one or more processes have the + file open when the last link is removed, the link shall be removed + before rename() returns, but the removal of the file contents shall + be postponed until all references to the file are closed. + + [iput() handles this, but we can do this manually, a la + reiser4_unlink()] + + Upon successful completion, rename() shall mark for update the + st_ctime and st_mtime fields of the parent directory of each file. + + [N/A] + + */ + reiser4_context *ctx; + int result; + int is_dir; /* is @old_name directory */ + struct inode *old_inode; + struct inode *new_inode; + reiser4_dir_entry_desc old_entry; + reiser4_dir_entry_desc new_entry; + coord_t *new_coord; + struct reiser4_dentry_fsdata *new_fsdata; + lock_handle new_lh; + dir_plugin *dplug; + file_plugin *fplug; + + ctx = reiser4_init_context(old_dir->i_sb); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + assert("nikita-2318", old_dir != NULL); + assert("nikita-2319", new_dir != NULL); + assert("nikita-2320", old_name != NULL); + assert("nikita-2321", new_name != NULL); + + old_inode = old_name->d_inode; + new_inode = new_name->d_inode; + + dplug = inode_dir_plugin(old_dir); + fplug = NULL; + + new_fsdata = reiser4_get_dentry_fsdata(new_name); + if (IS_ERR(new_fsdata)) { + result = PTR_ERR(new_fsdata); + goto exit; + } + + new_coord = &new_fsdata->dec.entry_coord; + coord_clear_iplug(new_coord); + + is_dir = S_ISDIR(old_inode->i_mode); + + assert("nikita-3461", old_inode->i_nlink >= 1 + !!is_dir); + + /* if target is existing directory and it's not empty---return error. + + This check is done specifically, because is_dir_empty() requires + tree traversal and have to be done before locks are taken. + */ + if (is_dir && new_inode != NULL && is_dir_empty(new_inode) != 0) + return RETERR(-ENOTEMPTY); + + result = can_rename(old_dir, old_inode, new_dir, new_inode); + if (result != 0) + goto exit; + + result = hashed_rename_estimate_and_grab(old_dir, old_name, + new_dir, new_name); + if (result != 0) + goto exit; + + init_lh(&new_lh); + + /* find entry for @new_name */ + result = reiser4_find_entry(new_dir, new_name, &new_lh, + ZNODE_WRITE_LOCK, &new_entry); + + if (IS_CBKERR(result)) { + done_lh(&new_lh); + goto exit; + } + + reiser4_seal_done(&new_fsdata->dec.entry_seal); + + /* add or replace name for @old_inode as @new_name */ + if (new_inode != NULL) { + /* target (@new_name) exists. */ + /* Not clear what to do with objects that are + both directories and files at the same time. */ + if (result == CBK_COORD_FOUND) { + result = replace_name(old_inode, + new_dir, + new_inode, new_coord, &new_lh); + if (result == 0) + fplug = inode_file_plugin(new_inode); + } else if (result == CBK_COORD_NOTFOUND) { + /* VFS told us that @new_name is bound to existing + inode, but we failed to find directory entry. */ + warning("nikita-2324", "Target not found"); + result = RETERR(-ENOENT); + } + } else { + /* target (@new_name) doesn't exists. */ + if (result == CBK_COORD_NOTFOUND) + result = add_name(old_inode, + new_dir, + new_name, new_coord, &new_lh, is_dir); + else if (result == CBK_COORD_FOUND) { + /* VFS told us that @new_name is "negative" dentry, + but we found directory entry. */ + warning("nikita-2331", "Target found unexpectedly"); + result = RETERR(-EIO); + } + } + + assert("nikita-3462", ergo(result == 0, + old_inode->i_nlink >= 2 + !!is_dir)); + + /* We are done with all modifications to the @new_dir, release lock on + node. */ + done_lh(&new_lh); + + if (fplug != NULL) { + /* detach @new_inode from name-space */ + result = fplug->detach(new_inode, new_dir); + if (result != 0) + warning("nikita-2330", "Cannot detach %lli: %i. %s", + (unsigned long long)get_inode_oid(new_inode), + result, possible_leak); + } + + if (new_inode != NULL) + reiser4_update_sd(new_inode); + + if (result == 0) { + memset(&old_entry, 0, sizeof old_entry); + old_entry.obj = old_inode; + + dplug->build_entry_key(old_dir, + &old_name->d_name, &old_entry.key); + + /* At this stage new name was introduced for + @old_inode. @old_inode, @new_dir, and @new_inode i_nlink + counters were updated. + + We want to remove @old_name now. If @old_inode wasn't + directory this is simple. + */ + result = dplug->rem_entry(old_dir, old_name, &old_entry); + /*result = rem_entry_hashed(old_dir, old_name, &old_entry); */ + if (result != 0 && result != -ENOMEM) { + warning("nikita-2335", + "Cannot remove old name: %i", result); + } else { + result = reiser4_del_nlink(old_inode, old_dir, 0); + if (result != 0 && result != -ENOMEM) { + warning("nikita-2337", + "Cannot drop link on old: %i", result); + } + } + + if (result == 0 && is_dir) { + /* @old_inode is directory. We also have to update + dotdot entry. */ + coord_t *dotdot_coord; + lock_handle dotdot_lh; + struct dentry dotdot_name; + reiser4_dir_entry_desc dotdot_entry; + struct reiser4_dentry_fsdata dataonstack; + struct reiser4_dentry_fsdata *fsdata; + + memset(&dataonstack, 0, sizeof dataonstack); + memset(&dotdot_entry, 0, sizeof dotdot_entry); + dotdot_entry.obj = old_dir; + memset(&dotdot_name, 0, sizeof dotdot_name); + dotdot_name.d_name.name = ".."; + dotdot_name.d_name.len = 2; + /* + * allocate ->d_fsdata on the stack to avoid using + * reiser4_get_dentry_fsdata(). Locking is not needed, + * because dentry is private to the current thread. + */ + dotdot_name.d_fsdata = &dataonstack; + init_lh(&dotdot_lh); + + fsdata = &dataonstack; + dotdot_coord = &fsdata->dec.entry_coord; + coord_clear_iplug(dotdot_coord); + + result = reiser4_find_entry(old_inode, + &dotdot_name, + &dotdot_lh, + ZNODE_WRITE_LOCK, + &dotdot_entry); + if (result == 0) { + /* replace_name() decreases i_nlink on + * @old_dir */ + result = replace_name(new_dir, + old_inode, + old_dir, + dotdot_coord, &dotdot_lh); + } else + result = RETERR(-EIO); + done_lh(&dotdot_lh); + } + } + reiser4_update_dir(new_dir); + reiser4_update_dir(old_dir); + reiser4_update_sd(old_inode); + if (result == 0) { + file_plugin *fplug; + + if (new_inode != NULL) { + /* add safe-link for target file (in case we removed + * last reference to the poor fellow */ + fplug = inode_file_plugin(new_inode); + if (new_inode->i_nlink == 0) + result = safe_link_add(new_inode, SAFE_UNLINK); + } + } +exit: + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + return result; +} +#endif diff --git a/fs/reiser4/plugin/item/Makefile b/fs/reiser4/plugin/item/Makefile new file mode 100644 index 000000000000..1bae6238722e --- /dev/null +++ b/fs/reiser4/plugin/item/Makefile @@ -0,0 +1,18 @@ +obj-$(CONFIG_REISER4_FS) += item_plugins.o + +item_plugins-objs := \ + item.o \ + static_stat.o \ + sde.o \ + cde.o \ + blackbox.o \ + internal.o \ + tail.o \ + ctail.o \ + extent.o \ + extent_item_ops.o \ + extent_file_ops.o \ + extent_flush_ops.o + + + diff --git a/fs/reiser4/plugin/item/acl.h b/fs/reiser4/plugin/item/acl.h new file mode 100644 index 000000000000..f26762a1c287 --- /dev/null +++ b/fs/reiser4/plugin/item/acl.h @@ -0,0 +1,66 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Directory entry. */ + +#if !defined( __FS_REISER4_PLUGIN_DIRECTORY_ENTRY_H__ ) +#define __FS_REISER4_PLUGIN_DIRECTORY_ENTRY_H__ + +#include "../../forward.h" +#include "../../dformat.h" +#include "../../kassign.h" +#include "../../key.h" + +#include <linux/fs.h> +#include <linux/dcache.h> /* for struct dentry */ + +typedef struct directory_entry_format { + /* key of object stat-data. It's not necessary to store whole + key here, because it's always key of stat-data, so minor + packing locality and offset can be omitted here. But this + relies on particular key allocation scheme for stat-data, so, + for extensibility sake, whole key can be stored here. + + We store key as array of bytes, because we don't want 8-byte + alignment of dir entries. + */ + obj_key_id id; + /* file name. Null terminated string. */ + d8 name[0]; +} directory_entry_format; + +void print_de(const char *prefix, coord_t * coord); +int extract_key_de(const coord_t * coord, reiser4_key * key); +int update_key_de(const coord_t * coord, const reiser4_key * key, + lock_handle * lh); +char *extract_name_de(const coord_t * coord, char *buf); +unsigned extract_file_type_de(const coord_t * coord); +int add_entry_de(struct inode *dir, coord_t * coord, + lock_handle * lh, const struct dentry *name, + reiser4_dir_entry_desc * entry); +int rem_entry_de(struct inode *dir, const struct qstr *name, coord_t * coord, + lock_handle * lh, reiser4_dir_entry_desc * entry); +int max_name_len_de(const struct inode *dir); + +int de_rem_and_shrink(struct inode *dir, coord_t * coord, int length); + +char *extract_dent_name(const coord_t * coord, + directory_entry_format * dent, char *buf); + +#if REISER4_LARGE_KEY +#define DE_NAME_BUF_LEN (24) +#else +#define DE_NAME_BUF_LEN (16) +#endif + +/* __FS_REISER4_PLUGIN_DIRECTORY_ENTRY_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/blackbox.c b/fs/reiser4/plugin/item/blackbox.c new file mode 100644 index 000000000000..f13ff64572c5 --- /dev/null +++ b/fs/reiser4/plugin/item/blackbox.c @@ -0,0 +1,142 @@ +/* Copyright 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Black box item implementation */ + +#include "../../forward.h" +#include "../../debug.h" +#include "../../dformat.h" +#include "../../kassign.h" +#include "../../coord.h" +#include "../../tree.h" +#include "../../lock.h" + +#include "blackbox.h" +#include "item.h" +#include "../plugin.h" + +int +store_black_box(reiser4_tree * tree, + const reiser4_key * key, void *data, int length) +{ + int result; + reiser4_item_data idata; + coord_t coord; + lock_handle lh; + + memset(&idata, 0, sizeof idata); + + idata.data = data; + idata.user = 0; + idata.length = length; + idata.iplug = item_plugin_by_id(BLACK_BOX_ID); + + init_lh(&lh); + result = insert_by_key(tree, key, + &idata, &coord, &lh, LEAF_LEVEL, CBK_UNIQUE); + + assert("nikita-3413", + ergo(result == 0, + WITH_COORD(&coord, + item_length_by_coord(&coord) == length))); + + done_lh(&lh); + return result; +} + +int +load_black_box(reiser4_tree * tree, + reiser4_key * key, void *data, int length, int exact) +{ + int result; + coord_t coord; + lock_handle lh; + + init_lh(&lh); + result = coord_by_key(tree, key, + &coord, &lh, ZNODE_READ_LOCK, + exact ? FIND_EXACT : FIND_MAX_NOT_MORE_THAN, + LEAF_LEVEL, LEAF_LEVEL, CBK_UNIQUE, NULL); + + if (result == 0) { + int ilen; + + result = zload(coord.node); + if (result == 0) { + ilen = item_length_by_coord(&coord); + if (ilen <= length) { + memcpy(data, item_body_by_coord(&coord), ilen); + unit_key_by_coord(&coord, key); + } else if (exact) { + /* + * item is larger than buffer provided by the + * user. Only issue a warning if @exact is + * set. If @exact is false, we are iterating + * over all safe-links and here we are reaching + * the end of the iteration. + */ + warning("nikita-3415", + "Wrong black box length: %i > %i", + ilen, length); + result = RETERR(-EIO); + } + zrelse(coord.node); + } + } + + done_lh(&lh); + return result; + +} + +int +update_black_box(reiser4_tree * tree, + const reiser4_key * key, void *data, int length) +{ + int result; + coord_t coord; + lock_handle lh; + + init_lh(&lh); + result = coord_by_key(tree, key, + &coord, &lh, ZNODE_READ_LOCK, + FIND_EXACT, + LEAF_LEVEL, LEAF_LEVEL, CBK_UNIQUE, NULL); + if (result == 0) { + int ilen; + + result = zload(coord.node); + if (result == 0) { + ilen = item_length_by_coord(&coord); + if (length <= ilen) { + memcpy(item_body_by_coord(&coord), data, + length); + } else { + warning("nikita-3437", + "Wrong black box length: %i < %i", + ilen, length); + result = RETERR(-EIO); + } + zrelse(coord.node); + } + } + + done_lh(&lh); + return result; + +} + +int kill_black_box(reiser4_tree * tree, const reiser4_key * key) +{ + return reiser4_cut_tree(tree, key, key, NULL, 1); +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/blackbox.h b/fs/reiser4/plugin/item/blackbox.h new file mode 100644 index 000000000000..f5b7af382dc7 --- /dev/null +++ b/fs/reiser4/plugin/item/blackbox.h @@ -0,0 +1,33 @@ +/* Copyright 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* "Black box" entry to fixed-width contain user supplied data */ + +#if !defined( __FS_REISER4_BLACK_BOX_H__ ) +#define __FS_REISER4_BLACK_BOX_H__ + +#include "../../forward.h" +#include "../../dformat.h" +#include "../../kassign.h" +#include "../../key.h" + +extern int store_black_box(reiser4_tree * tree, + const reiser4_key * key, void *data, int length); +extern int load_black_box(reiser4_tree * tree, + reiser4_key * key, void *data, int length, int exact); +extern int kill_black_box(reiser4_tree * tree, const reiser4_key * key); +extern int update_black_box(reiser4_tree * tree, + const reiser4_key * key, void *data, int length); + +/* __FS_REISER4_BLACK_BOX_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/cde.c b/fs/reiser4/plugin/item/cde.c new file mode 100644 index 000000000000..e9afd144c39c --- /dev/null +++ b/fs/reiser4/plugin/item/cde.c @@ -0,0 +1,1004 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Directory entry implementation */ + +/* DESCRIPTION: + + This is "compound" directory item plugin implementation. This directory + item type is compound (as opposed to the "simple directory item" in + fs/reiser4/plugin/item/sde.[ch]), because it consists of several directory + entries. + + The reason behind this decision is disk space efficiency: all directory + entries inside the same directory have identical fragment in their + keys. This, of course, depends on key assignment policy. In our default key + assignment policy, all directory entries have the same locality which is + equal to the object id of their directory. + + Composing directory item out of several directory entries for the same + directory allows us to store said key fragment only once. That is, this is + some ad hoc form of key compression (stem compression) that is implemented + here, because general key compression is not supposed to be implemented in + v4.0. + + Another decision that was made regarding all directory item plugins, is + that they will store entry keys unaligned. This is for that sake of disk + space efficiency again. + + In should be noted, that storing keys unaligned increases CPU consumption, + at least on some architectures. + + Internal on-disk structure of the compound directory item is the following: + + HEADER cde_item_format. Here number of entries is stored. + ENTRY_HEADER_0 cde_unit_header. Here part of entry key and + ENTRY_HEADER_1 offset of entry body are stored. + ENTRY_HEADER_2 (basically two last parts of key) + ... + ENTRY_HEADER_N + ENTRY_BODY_0 directory_entry_format. Here part of stat data key and + ENTRY_BODY_1 NUL-terminated name are stored. + ENTRY_BODY_2 (part of statadta key in the + sence that since all SDs have + zero offset, this offset is not + stored on disk). + ... + ENTRY_BODY_N + + When it comes to the balancing, each directory entry in compound directory + item is unit, that is, something that can be cut from one item and pasted + into another item of the same type. Handling of unit cut and paste is major + reason for the complexity of code below. + +*/ + +#include "../../forward.h" +#include "../../debug.h" +#include "../../dformat.h" +#include "../../kassign.h" +#include "../../key.h" +#include "../../coord.h" +#include "sde.h" +#include "cde.h" +#include "item.h" +#include "../node/node.h" +#include "../plugin.h" +#include "../../znode.h" +#include "../../carry.h" +#include "../../tree.h" +#include "../../inode.h" + +#include <linux/fs.h> /* for struct inode */ +#include <linux/dcache.h> /* for struct dentry */ + +#if 0 +#define CHECKME(coord) \ +({ \ + const char *message; \ + coord_t dup; \ + \ + coord_dup_nocheck(&dup, (coord)); \ + dup.unit_pos = 0; \ + assert("nikita-2871", cde_check(&dup, &message) == 0); \ +}) +#else +#define CHECKME(coord) noop +#endif + +/* return body of compound directory item at @coord */ +static inline cde_item_format *formatted_at(const coord_t * coord) +{ + assert("nikita-1282", coord != NULL); + return item_body_by_coord(coord); +} + +/* return entry header at @coord */ +static inline cde_unit_header *header_at(const coord_t * + coord /* coord of item */ , + int idx /* index of unit */ ) +{ + assert("nikita-1283", coord != NULL); + return &formatted_at(coord)->entry[idx]; +} + +/* return number of units in compound directory item at @coord */ +static int units(const coord_t * coord /* coord of item */ ) +{ + return le16_to_cpu(get_unaligned(&formatted_at(coord)->num_of_entries)); +} + +/* return offset of the body of @idx-th entry in @coord */ +static unsigned int offset_of(const coord_t * coord /* coord of item */ , + int idx /* index of unit */ ) +{ + if (idx < units(coord)) + return le16_to_cpu(get_unaligned(&header_at(coord, idx)->offset)); + else if (idx == units(coord)) + return item_length_by_coord(coord); + else + impossible("nikita-1308", "Wrong idx"); + return 0; +} + +/* set offset of the body of @idx-th entry in @coord */ +static void set_offset(const coord_t * coord /* coord of item */ , + int idx /* index of unit */ , + unsigned int offset /* new offset */ ) +{ + put_unaligned(cpu_to_le16((__u16) offset), &header_at(coord, idx)->offset); +} + +static void adj_offset(const coord_t * coord /* coord of item */ , + int idx /* index of unit */ , + int delta /* offset change */ ) +{ + d16 *doffset; + __u16 offset; + + doffset = &header_at(coord, idx)->offset; + offset = le16_to_cpu(get_unaligned(doffset)); + offset += delta; + put_unaligned(cpu_to_le16((__u16) offset), doffset); +} + +/* return pointer to @offset-th byte from the beginning of @coord */ +static char *address(const coord_t * coord /* coord of item */ , + int offset) +{ + return ((char *)item_body_by_coord(coord)) + offset; +} + +/* return pointer to the body of @idx-th entry in @coord */ +static directory_entry_format *entry_at(const coord_t * coord /* coord of + * item */ , + int idx /* index of unit */ ) +{ + return (directory_entry_format *) address(coord, + (int)offset_of(coord, idx)); +} + +/* return number of unit referenced by @coord */ +static int idx_of(const coord_t * coord /* coord of item */ ) +{ + assert("nikita-1285", coord != NULL); + return coord->unit_pos; +} + +/* find position where entry with @entry_key would be inserted into @coord */ +static int find(const coord_t * coord /* coord of item */ , + const reiser4_key * entry_key /* key to look for */ , + cmp_t * last /* result of last comparison */ ) +{ + int entries; + + int left; + int right; + + cde_unit_header *header; + + assert("nikita-1295", coord != NULL); + assert("nikita-1296", entry_key != NULL); + assert("nikita-1297", last != NULL); + + entries = units(coord); + left = 0; + right = entries - 1; + while (right - left >= REISER4_SEQ_SEARCH_BREAK) { + int median; + + median = (left + right) >> 1; + + header = header_at(coord, median); + *last = de_id_key_cmp(&header->hash, entry_key); + switch (*last) { + case LESS_THAN: + left = median; + break; + case GREATER_THAN: + right = median; + break; + case EQUAL_TO:{ + do { + median--; + header--; + } while (median >= 0 && + de_id_key_cmp(&header->hash, + entry_key) == EQUAL_TO); + return median + 1; + } + } + } + header = header_at(coord, left); + for (; left < entries; ++left, ++header) { + prefetch(header + 1); + *last = de_id_key_cmp(&header->hash, entry_key); + if (*last != LESS_THAN) + break; + } + if (left < entries) + return left; + else + return RETERR(-ENOENT); + +} + +/* expand @coord as to accommodate for insertion of @no new entries starting + from @pos, with total bodies size @size. */ +static int expand_item(const coord_t * coord /* coord of item */ , + int pos /* unit position */ , int no /* number of new + * units*/ , + int size /* total size of new units' data */ , + unsigned int data_size /* free space already reserved + * in the item for insertion */ ) +{ + int entries; + cde_unit_header *header; + char *dent; + int i; + + assert("nikita-1310", coord != NULL); + assert("nikita-1311", pos >= 0); + assert("nikita-1312", no > 0); + assert("nikita-1313", data_size >= no * sizeof(directory_entry_format)); + assert("nikita-1343", + item_length_by_coord(coord) >= + (int)(size + data_size + no * sizeof *header)); + + entries = units(coord); + + if (pos == entries) + dent = address(coord, size); + else + dent = (char *)entry_at(coord, pos); + /* place where new header will be in */ + header = header_at(coord, pos); + /* free space for new entry headers */ + memmove(header + no, header, + (unsigned)(address(coord, size) - (char *)header)); + /* if adding to the end initialise first new header */ + if (pos == entries) { + set_offset(coord, pos, (unsigned)size); + } + + /* adjust entry pointer and size */ + dent = dent + no * sizeof *header; + size += no * sizeof *header; + /* free space for new entries */ + memmove(dent + data_size, dent, + (unsigned)(address(coord, size) - dent)); + + /* increase counter */ + entries += no; + put_unaligned(cpu_to_le16((__u16) entries), &formatted_at(coord)->num_of_entries); + + /* [ 0 ... pos ] entries were shifted by no * ( sizeof *header ) + bytes. */ + for (i = 0; i <= pos; ++i) + adj_offset(coord, i, no * sizeof *header); + /* [ pos + no ... +\infty ) entries were shifted by ( no * + sizeof *header + data_size ) bytes */ + for (i = pos + no; i < entries; ++i) + adj_offset(coord, i, no * sizeof *header + data_size); + return 0; +} + +/* insert new @entry into item */ +static int expand(const coord_t * coord /* coord of item */ , + struct cde_entry * entry /* entry to insert */ , + int len /* length of @entry data */ , + int *pos /* position to insert */ , + reiser4_dir_entry_desc * dir_entry /* parameters for new + * entry */ ) +{ + cmp_t cmp_res; + int datasize; + + *pos = find(coord, &dir_entry->key, &cmp_res); + if (*pos < 0) + *pos = units(coord); + + datasize = sizeof(directory_entry_format); + if (is_longname(entry->name->name, entry->name->len)) + datasize += entry->name->len + 1; + + expand_item(coord, *pos, 1, item_length_by_coord(coord) - len, + datasize); + return 0; +} + +/* paste body of @entry into item */ +static int paste_entry(const coord_t * coord /* coord of item */ , + struct cde_entry * entry /* new entry */ , + int pos /* position to insert */ , + reiser4_dir_entry_desc * dir_entry /* parameters for + * new entry */ ) +{ + cde_unit_header *header; + directory_entry_format *dent; + const char *name; + int len; + + header = header_at(coord, pos); + dent = entry_at(coord, pos); + + build_de_id_by_key(&dir_entry->key, &header->hash); + build_inode_key_id(entry->obj, &dent->id); + /* AUDIT unsafe strcpy() operation! It should be replaced with + much less CPU hungry + memcpy( ( char * ) dent -> name, entry -> name -> name , entry -> name -> len ); + + Also a more major thing is that there should be a way to figure out + amount of space in dent -> name and be able to check that we are + not going to overwrite more than we supposed to */ + name = entry->name->name; + len = entry->name->len; + if (is_longname(name, len)) { + strcpy((unsigned char *)dent->name, name); + put_unaligned(0, &dent->name[len]); + } + return 0; +} + +/* estimate how much space is necessary in item to insert/paste set of entries + described in @data. */ +int estimate_cde(const coord_t * coord /* coord of item */ , + const reiser4_item_data * data /* parameters for new item */ ) +{ + struct cde_entry_data *e; + int result; + int i; + + e = (struct cde_entry_data *) data->data; + + assert("nikita-1288", e != NULL); + assert("nikita-1289", e->num_of_entries >= 0); + + if (coord == NULL) + /* insert */ + result = sizeof(cde_item_format); + else + /* paste */ + result = 0; + + result += e->num_of_entries * + (sizeof(cde_unit_header) + sizeof(directory_entry_format)); + for (i = 0; i < e->num_of_entries; ++i) { + const char *name; + int len; + + name = e->entry[i].name->name; + len = e->entry[i].name->len; + assert("nikita-2054", strlen(name) == len); + if (is_longname(name, len)) + result += len + 1; + } + ((reiser4_item_data *) data)->length = result; + return result; +} + +/* ->nr_units() method for this item plugin. */ +pos_in_node_t nr_units_cde(const coord_t * coord /* coord of item */ ) +{ + return units(coord); +} + +/* ->unit_key() method for this item plugin. */ +reiser4_key *unit_key_cde(const coord_t * coord /* coord of item */ , + reiser4_key * key /* resulting key */ ) +{ + assert("nikita-1452", coord != NULL); + assert("nikita-1345", idx_of(coord) < units(coord)); + assert("nikita-1346", key != NULL); + + item_key_by_coord(coord, key); + extract_key_from_de_id(extract_dir_id_from_key(key), + &header_at(coord, idx_of(coord))->hash, key); + return key; +} + +/* mergeable_cde(): implementation of ->mergeable() item method. + + Two directory items are mergeable iff they are from the same + directory. That simple. + +*/ +int mergeable_cde(const coord_t * p1 /* coord of first item */ , + const coord_t * p2 /* coord of second item */ ) +{ + reiser4_key k1; + reiser4_key k2; + + assert("nikita-1339", p1 != NULL); + assert("nikita-1340", p2 != NULL); + + return + (item_plugin_by_coord(p1) == item_plugin_by_coord(p2)) && + (extract_dir_id_from_key(item_key_by_coord(p1, &k1)) == + extract_dir_id_from_key(item_key_by_coord(p2, &k2))); + +} + +/* ->max_key_inside() method for this item plugin. */ +reiser4_key *max_key_inside_cde(const coord_t * coord /* coord of item */ , + reiser4_key * result /* resulting key */ ) +{ + assert("nikita-1342", coord != NULL); + + item_key_by_coord(coord, result); + set_key_ordering(result, get_key_ordering(reiser4_max_key())); + set_key_fulloid(result, get_key_fulloid(reiser4_max_key())); + set_key_offset(result, get_key_offset(reiser4_max_key())); + return result; +} + +/* @data contains data which are to be put into tree */ +int can_contain_key_cde(const coord_t * coord /* coord of item */ , + const reiser4_key * key /* key to check */ , + const reiser4_item_data * data /* parameters of new + * item/unit being + * created */ ) +{ + reiser4_key item_key; + + /* FIXME-VS: do not rely on anything but iplug field of @data. Only + data->iplug is initialized */ + assert("vs-457", data && data->iplug); +/* assert( "vs-553", data -> user == 0 );*/ + item_key_by_coord(coord, &item_key); + + return (item_plugin_by_coord(coord) == data->iplug) && + (extract_dir_id_from_key(&item_key) == + extract_dir_id_from_key(key)); +} + +#if REISER4_DEBUG +/* cde_check ->check() method for compressed directory items + + used for debugging, every item should have here the most complete + possible check of the consistency of the item that the inventor can + construct +*/ +int reiser4_check_cde(const coord_t * coord /* coord of item to check */, + const char **error /* where to store error message */) +{ + int i; + int result; + char *item_start; + char *item_end; + reiser4_key key; + + coord_t c; + + assert("nikita-1357", coord != NULL); + assert("nikita-1358", error != NULL); + + if (!ergo(coord->item_pos != 0, + is_dot_key(item_key_by_coord(coord, &key)))) { + *error = "CDE doesn't start with dot"; + return -1; + } + item_start = item_body_by_coord(coord); + item_end = item_start + item_length_by_coord(coord); + + coord_dup(&c, coord); + result = 0; + for (i = 0; i < units(coord); ++i) { + directory_entry_format *entry; + + if ((char *)(header_at(coord, i) + 1) > + item_end - units(coord) * sizeof *entry) { + *error = "CDE header is out of bounds"; + result = -1; + break; + } + entry = entry_at(coord, i); + if ((char *)entry < item_start + sizeof(cde_item_format)) { + *error = "CDE header is too low"; + result = -1; + break; + } + if ((char *)(entry + 1) > item_end) { + *error = "CDE header is too high"; + result = -1; + break; + } + } + + return result; +} +#endif + +/* ->init() method for this item plugin. */ +int init_cde(coord_t * coord /* coord of item */ , + coord_t * from UNUSED_ARG, reiser4_item_data * data /* structure used for insertion */ + UNUSED_ARG) +{ + put_unaligned(cpu_to_le16(0), &formatted_at(coord)->num_of_entries); + return 0; +} + +/* ->lookup() method for this item plugin. */ +lookup_result lookup_cde(const reiser4_key * key /* key to search for */ , + lookup_bias bias /* search bias */ , + coord_t * coord /* coord of item to lookup in */ ) +{ + cmp_t last_comp; + int pos; + + reiser4_key utmost_key; + + assert("nikita-1293", coord != NULL); + assert("nikita-1294", key != NULL); + + CHECKME(coord); + + if (keygt(item_key_by_coord(coord, &utmost_key), key)) { + coord->unit_pos = 0; + coord->between = BEFORE_UNIT; + return CBK_COORD_NOTFOUND; + } + pos = find(coord, key, &last_comp); + if (pos >= 0) { + coord->unit_pos = (int)pos; + switch (last_comp) { + case EQUAL_TO: + coord->between = AT_UNIT; + return CBK_COORD_FOUND; + case GREATER_THAN: + coord->between = BEFORE_UNIT; + return RETERR(-ENOENT); + case LESS_THAN: + default: + impossible("nikita-1298", "Broken find"); + return RETERR(-EIO); + } + } else { + coord->unit_pos = units(coord) - 1; + coord->between = AFTER_UNIT; + return (bias == + FIND_MAX_NOT_MORE_THAN) ? CBK_COORD_FOUND : + CBK_COORD_NOTFOUND; + } +} + +/* ->paste() method for this item plugin. */ +int paste_cde(coord_t * coord /* coord of item */ , + reiser4_item_data * data /* parameters of new unit being + * inserted */ , + carry_plugin_info * info UNUSED_ARG /* todo carry queue */ ) +{ + struct cde_entry_data *e; + int result; + int i; + + CHECKME(coord); + e = (struct cde_entry_data *) data->data; + + result = 0; + for (i = 0; i < e->num_of_entries; ++i) { + int pos; + int phantom_size; + + phantom_size = data->length; + if (units(coord) == 0) + phantom_size -= sizeof(cde_item_format); + + result = + expand(coord, e->entry + i, phantom_size, &pos, data->arg); + if (result != 0) + break; + result = paste_entry(coord, e->entry + i, pos, data->arg); + if (result != 0) + break; + } + CHECKME(coord); + return result; +} + +/* amount of space occupied by all entries starting from @idx both headers and + bodies. */ +static unsigned int part_size(const coord_t * coord /* coord of item */ , + int idx /* index of unit */ ) +{ + assert("nikita-1299", coord != NULL); + assert("nikita-1300", idx < (int)units(coord)); + + return sizeof(cde_item_format) + + (idx + 1) * sizeof(cde_unit_header) + offset_of(coord, + idx + 1) - + offset_of(coord, 0); +} + +/* how many but not more than @want units of @source can be merged with + item in @target node. If pend == append - we try to append last item + of @target by first units of @source. If pend == prepend - we try to + "prepend" first item in @target by last units of @source. @target + node has @free_space bytes of free space. Total size of those units + are returned via @size */ +int can_shift_cde(unsigned free_space /* free space in item */ , + coord_t * coord /* coord of source item */ , + znode * target /* target node */ , + shift_direction pend /* shift direction */ , + unsigned *size /* resulting number of shifted bytes */ , + unsigned want /* maximal number of bytes to shift */ ) +{ + int shift; + + CHECKME(coord); + if (want == 0) { + *size = 0; + return 0; + } + + /* pend == SHIFT_LEFT <==> shifting to the left */ + if (pend == SHIFT_LEFT) { + for (shift = min((int)want - 1, units(coord)); shift >= 0; + --shift) { + *size = part_size(coord, shift); + if (target != NULL) + *size -= sizeof(cde_item_format); + if (*size <= free_space) + break; + } + shift = shift + 1; + } else { + int total_size; + + assert("nikita-1301", pend == SHIFT_RIGHT); + + total_size = item_length_by_coord(coord); + for (shift = units(coord) - want - 1; shift < units(coord) - 1; + ++shift) { + *size = total_size - part_size(coord, shift); + if (target == NULL) + *size += sizeof(cde_item_format); + if (*size <= free_space) + break; + } + shift = units(coord) - shift - 1; + } + if (shift == 0) + *size = 0; + CHECKME(coord); + return shift; +} + +/* ->copy_units() method for this item plugin. */ +void copy_units_cde(coord_t * target /* coord of target item */ , + coord_t * source /* coord of source item */ , + unsigned from /* starting unit */ , + unsigned count /* how many units to copy */ , + shift_direction where_is_free_space /* shift direction */ , + unsigned free_space /* free space in item */ ) +{ + char *header_from; + char *header_to; + + char *entry_from; + char *entry_to; + + int pos_in_target; + int data_size; + int data_delta; + int i; + + assert("nikita-1303", target != NULL); + assert("nikita-1304", source != NULL); + assert("nikita-1305", (int)from < units(source)); + assert("nikita-1307", (int)(from + count) <= units(source)); + + if (where_is_free_space == SHIFT_LEFT) { + assert("nikita-1453", from == 0); + pos_in_target = units(target); + } else { + assert("nikita-1309", (int)(from + count) == units(source)); + pos_in_target = 0; + memmove(item_body_by_coord(target), + (char *)item_body_by_coord(target) + free_space, + item_length_by_coord(target) - free_space); + } + + CHECKME(target); + CHECKME(source); + + /* expand @target */ + data_size = + offset_of(source, (int)(from + count)) - offset_of(source, + (int)from); + + if (units(target) == 0) + free_space -= sizeof(cde_item_format); + + expand_item(target, pos_in_target, (int)count, + (int)(item_length_by_coord(target) - free_space), + (unsigned)data_size); + + /* copy first @count units of @source into @target */ + data_delta = + offset_of(target, pos_in_target) - offset_of(source, (int)from); + + /* copy entries */ + entry_from = (char *)entry_at(source, (int)from); + entry_to = (char *)entry_at(source, (int)(from + count)); + memmove(entry_at(target, pos_in_target), entry_from, + (unsigned)(entry_to - entry_from)); + + /* copy headers */ + header_from = (char *)header_at(source, (int)from); + header_to = (char *)header_at(source, (int)(from + count)); + memmove(header_at(target, pos_in_target), header_from, + (unsigned)(header_to - header_from)); + + /* update offsets */ + for (i = pos_in_target; i < (int)(pos_in_target + count); ++i) + adj_offset(target, i, data_delta); + CHECKME(target); + CHECKME(source); +} + +/* ->cut_units() method for this item plugin. */ +int cut_units_cde(coord_t * coord /* coord of item */ , + pos_in_node_t from /* start unit pos */ , + pos_in_node_t to /* stop unit pos */ , + struct carry_cut_data *cdata UNUSED_ARG, + reiser4_key * smallest_removed, reiser4_key * new_first) +{ + char *header_from; + char *header_to; + + char *entry_from; + char *entry_to; + + int size; + int entry_delta; + int header_delta; + int i; + + unsigned count; + + CHECKME(coord); + + count = to - from + 1; + + assert("nikita-1454", coord != NULL); + assert("nikita-1455", (int)(from + count) <= units(coord)); + + if (smallest_removed) + unit_key_by_coord(coord, smallest_removed); + + if (new_first) { + coord_t next; + + /* not everything is cut from item head */ + assert("vs-1527", from == 0); + assert("vs-1528", to < units(coord) - 1); + + coord_dup(&next, coord); + next.unit_pos++; + unit_key_by_coord(&next, new_first); + } + + size = item_length_by_coord(coord); + if (count == (unsigned)units(coord)) { + return size; + } + + header_from = (char *)header_at(coord, (int)from); + header_to = (char *)header_at(coord, (int)(from + count)); + + entry_from = (char *)entry_at(coord, (int)from); + entry_to = (char *)entry_at(coord, (int)(from + count)); + + /* move headers */ + memmove(header_from, header_to, + (unsigned)(address(coord, size) - header_to)); + + header_delta = header_to - header_from; + + entry_from -= header_delta; + entry_to -= header_delta; + size -= header_delta; + + /* copy entries */ + memmove(entry_from, entry_to, + (unsigned)(address(coord, size) - entry_to)); + + entry_delta = entry_to - entry_from; + size -= entry_delta; + + /* update offsets */ + + for (i = 0; i < (int)from; ++i) + adj_offset(coord, i, -header_delta); + + for (i = from; i < units(coord) - (int)count; ++i) + adj_offset(coord, i, -header_delta - entry_delta); + + put_unaligned(cpu_to_le16((__u16) units(coord) - count), + &formatted_at(coord)->num_of_entries); + + if (from == 0) { + /* entries from head was removed - move remaining to right */ + memmove((char *)item_body_by_coord(coord) + + header_delta + entry_delta, item_body_by_coord(coord), + (unsigned)size); + if (REISER4_DEBUG) + memset(item_body_by_coord(coord), 0, + (unsigned)header_delta + entry_delta); + } else { + /* freed space is already at the end of item */ + if (REISER4_DEBUG) + memset((char *)item_body_by_coord(coord) + size, 0, + (unsigned)header_delta + entry_delta); + } + + return header_delta + entry_delta; +} + +int kill_units_cde(coord_t * coord /* coord of item */ , + pos_in_node_t from /* start unit pos */ , + pos_in_node_t to /* stop unit pos */ , + struct carry_kill_data *kdata UNUSED_ARG, + reiser4_key * smallest_removed, reiser4_key * new_first) +{ + return cut_units_cde(coord, from, to, NULL, smallest_removed, new_first); +} + +/* ->s.dir.extract_key() method for this item plugin. */ +int extract_key_cde(const coord_t * coord /* coord of item */ , + reiser4_key * key /* resulting key */ ) +{ + directory_entry_format *dent; + + assert("nikita-1155", coord != NULL); + assert("nikita-1156", key != NULL); + + dent = entry_at(coord, idx_of(coord)); + return extract_key_from_id(&dent->id, key); +} + +int +update_key_cde(const coord_t * coord, const reiser4_key * key, + lock_handle * lh UNUSED_ARG) +{ + directory_entry_format *dent; + obj_key_id obj_id; + int result; + + assert("nikita-2344", coord != NULL); + assert("nikita-2345", key != NULL); + + dent = entry_at(coord, idx_of(coord)); + result = build_obj_key_id(key, &obj_id); + if (result == 0) { + dent->id = obj_id; + znode_make_dirty(coord->node); + } + return 0; +} + +/* ->s.dir.extract_name() method for this item plugin. */ +char *extract_name_cde(const coord_t * coord /* coord of item */ , char *buf) +{ + directory_entry_format *dent; + + assert("nikita-1157", coord != NULL); + + dent = entry_at(coord, idx_of(coord)); + return extract_dent_name(coord, dent, buf); +} + +static int cde_bytes(int pasting, const reiser4_item_data * data) +{ + int result; + + result = data->length; + if (!pasting) + result -= sizeof(cde_item_format); + return result; +} + +/* ->s.dir.add_entry() method for this item plugin */ +int add_entry_cde(struct inode *dir /* directory object */ , + coord_t * coord /* coord of item */ , + lock_handle * lh /* lock handle for insertion */ , + const struct dentry *name /* name to insert */ , + reiser4_dir_entry_desc * dir_entry /* parameters of new + * directory entry */ ) +{ + reiser4_item_data data; + struct cde_entry entry; + struct cde_entry_data edata; + int result; + + assert("nikita-1656", coord->node == lh->node); + assert("nikita-1657", znode_is_write_locked(coord->node)); + + edata.num_of_entries = 1; + edata.entry = &entry; + + entry.dir = dir; + entry.obj = dir_entry->obj; + entry.name = &name->d_name; + + data.data = (char *)&edata; + data.user = 0; /* &edata is not user space */ + data.iplug = item_plugin_by_id(COMPOUND_DIR_ID); + data.arg = dir_entry; + assert("nikita-1302", data.iplug != NULL); + + result = is_dot_key(&dir_entry->key); + data.length = estimate_cde(result ? coord : NULL, &data); + + inode_add_bytes(dir, cde_bytes(result, &data)); + + if (result) + result = insert_by_coord(coord, &data, &dir_entry->key, lh, 0); + else + result = reiser4_resize_item(coord, &data, &dir_entry->key, + lh, 0); + return result; +} + +/* ->s.dir.rem_entry() */ +int rem_entry_cde(struct inode *dir /* directory of item */ , + const struct qstr *name, coord_t * coord /* coord of item */ , + lock_handle * lh UNUSED_ARG /* lock handle for + * removal */ , + reiser4_dir_entry_desc * entry UNUSED_ARG /* parameters of + * directory entry + * being removed */ ) +{ + coord_t shadow; + int result; + int length; + ON_DEBUG(char buf[DE_NAME_BUF_LEN]); + + assert("nikita-2870", strlen(name->name) == name->len); + assert("nikita-2869", + !strcmp(name->name, extract_name_cde(coord, buf))); + + length = sizeof(directory_entry_format) + sizeof(cde_unit_header); + if (is_longname(name->name, name->len)) + length += name->len + 1; + + if (inode_get_bytes(dir) < length) { + warning("nikita-2628", "Dir is broke: %llu: %llu", + (unsigned long long)get_inode_oid(dir), + inode_get_bytes(dir)); + + return RETERR(-EIO); + } + + /* cut_node() is supposed to take pointers to _different_ + coords, because it will modify them without respect to + possible aliasing. To work around this, create temporary copy + of @coord. + */ + coord_dup(&shadow, coord); + result = + kill_node_content(coord, &shadow, NULL, NULL, NULL, NULL, NULL, 0); + if (result == 0) { + inode_sub_bytes(dir, length); + } + return result; +} + +/* ->s.dir.max_name_len() method for this item plugin */ +int max_name_len_cde(const struct inode *dir /* directory */ ) +{ + return + reiser4_tree_by_inode(dir)->nplug->max_item_size() - + sizeof(directory_entry_format) - sizeof(cde_item_format) - + sizeof(cde_unit_header) - 2; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/cde.h b/fs/reiser4/plugin/item/cde.h new file mode 100644 index 000000000000..f599714415c8 --- /dev/null +++ b/fs/reiser4/plugin/item/cde.h @@ -0,0 +1,87 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Compound directory item. See cde.c for description. */ + +#if !defined( __FS_REISER4_PLUGIN_COMPRESSED_DE_H__ ) +#define __FS_REISER4_PLUGIN_COMPRESSED_DE_H__ + +#include "../../forward.h" +#include "../../kassign.h" +#include "../../dformat.h" + +#include <linux/fs.h> /* for struct inode */ +#include <linux/dcache.h> /* for struct dentry, etc */ + +typedef struct cde_unit_header { + de_id hash; + d16 offset; +} cde_unit_header; + +typedef struct cde_item_format { + d16 num_of_entries; + cde_unit_header entry[0]; +} cde_item_format; + +struct cde_entry { + const struct inode *dir; + const struct inode *obj; + const struct qstr *name; +}; + +struct cde_entry_data { + int num_of_entries; + struct cde_entry *entry; +}; + +/* plugin->item.b.* */ +reiser4_key *max_key_inside_cde(const coord_t * coord, reiser4_key * result); +int can_contain_key_cde(const coord_t * coord, const reiser4_key * key, + const reiser4_item_data *); +int mergeable_cde(const coord_t * p1, const coord_t * p2); +pos_in_node_t nr_units_cde(const coord_t * coord); +reiser4_key *unit_key_cde(const coord_t * coord, reiser4_key * key); +int estimate_cde(const coord_t * coord, const reiser4_item_data * data); +void print_cde(const char *prefix, coord_t * coord); +int init_cde(coord_t * coord, coord_t * from, reiser4_item_data * data); +lookup_result lookup_cde(const reiser4_key * key, lookup_bias bias, + coord_t * coord); +int paste_cde(coord_t * coord, reiser4_item_data * data, + carry_plugin_info * info UNUSED_ARG); +int can_shift_cde(unsigned free_space, coord_t * coord, znode * target, + shift_direction pend, unsigned *size, unsigned want); +void copy_units_cde(coord_t * target, coord_t * source, unsigned from, + unsigned count, shift_direction where_is_free_space, + unsigned free_space); +int cut_units_cde(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + struct carry_cut_data *, reiser4_key * smallest_removed, + reiser4_key * new_first); +int kill_units_cde(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + struct carry_kill_data *, reiser4_key * smallest_removed, + reiser4_key * new_first); +void print_cde(const char *prefix, coord_t * coord); +int reiser4_check_cde(const coord_t * coord, const char **error); + +/* plugin->u.item.s.dir.* */ +int extract_key_cde(const coord_t * coord, reiser4_key * key); +int update_key_cde(const coord_t * coord, const reiser4_key * key, + lock_handle * lh); +char *extract_name_cde(const coord_t * coord, char *buf); +int add_entry_cde(struct inode *dir, coord_t * coord, + lock_handle * lh, const struct dentry *name, + reiser4_dir_entry_desc * entry); +int rem_entry_cde(struct inode *dir, const struct qstr *name, coord_t * coord, + lock_handle * lh, reiser4_dir_entry_desc * entry); +int max_name_len_cde(const struct inode *dir); + +/* __FS_REISER4_PLUGIN_COMPRESSED_DE_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/ctail.c b/fs/reiser4/plugin/item/ctail.c new file mode 100644 index 000000000000..97f7f93725f5 --- /dev/null +++ b/fs/reiser4/plugin/item/ctail.c @@ -0,0 +1,1769 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* ctails (aka "clustered tails") are items for cryptcompress objects */ + +/* DESCRIPTION: + +Each cryptcompress object is stored on disk as a set of clusters sliced +into ctails. + +Internal on-disk structure: + + HEADER (1) Here stored disk cluster shift + BODY +*/ + +#include "../../forward.h" +#include "../../debug.h" +#include "../../dformat.h" +#include "../../kassign.h" +#include "../../key.h" +#include "../../coord.h" +#include "item.h" +#include "../node/node.h" +#include "../plugin.h" +#include "../object.h" +#include "../../znode.h" +#include "../../carry.h" +#include "../../tree.h" +#include "../../inode.h" +#include "../../super.h" +#include "../../context.h" +#include "../../page_cache.h" +#include "../cluster.h" +#include "../../flush.h" +#include "../../tree_walk.h" + +#include <linux/pagevec.h> +#include <linux/swap.h> +#include <linux/fs.h> + +/* return body of ctail item at @coord */ +static ctail_item_format *ctail_formatted_at(const coord_t * coord) +{ + assert("edward-60", coord != NULL); + return item_body_by_coord(coord); +} + +static int cluster_shift_by_coord(const coord_t * coord) +{ + return get_unaligned(&ctail_formatted_at(coord)->cluster_shift); +} + +static inline void dclust_set_extension_shift(hint_t * hint) +{ + assert("edward-1270", + item_id_by_coord(&hint->ext_coord.coord) == CTAIL_ID); + hint->ext_coord.extension.ctail.shift = + cluster_shift_by_coord(&hint->ext_coord.coord); +} + +static loff_t off_by_coord(const coord_t * coord) +{ + reiser4_key key; + return get_key_offset(item_key_by_coord(coord, &key)); +} + +int coord_is_unprepped_ctail(const coord_t * coord) +{ + assert("edward-1233", coord != NULL); + assert("edward-1234", item_id_by_coord(coord) == CTAIL_ID); + assert("edward-1235", + ergo((int)cluster_shift_by_coord(coord) == (int)UCTAIL_SHIFT, + nr_units_ctail(coord) == (pos_in_node_t) UCTAIL_NR_UNITS)); + + return (int)cluster_shift_by_coord(coord) == (int)UCTAIL_SHIFT; +} + +static cloff_t clust_by_coord(const coord_t * coord, struct inode *inode) +{ + int shift; + + if (inode != NULL) { + shift = inode_cluster_shift(inode); + assert("edward-1236", + ergo(!coord_is_unprepped_ctail(coord), + shift == cluster_shift_by_coord(coord))); + } else { + assert("edward-1237", !coord_is_unprepped_ctail(coord)); + shift = cluster_shift_by_coord(coord); + } + return off_by_coord(coord) >> shift; +} + +static int disk_cluster_size(const coord_t * coord) +{ + assert("edward-1156", + item_plugin_by_coord(coord) == item_plugin_by_id(CTAIL_ID)); + /* calculation of disk cluster size + is meaninless if ctail is unprepped */ + assert("edward-1238", !coord_is_unprepped_ctail(coord)); + + return 1 << cluster_shift_by_coord(coord); +} + +/* true if the key is of first disk cluster item */ +static int is_disk_cluster_key(const reiser4_key * key, const coord_t * coord) +{ + assert("edward-1239", item_id_by_coord(coord) == CTAIL_ID); + + return coord_is_unprepped_ctail(coord) || + ((get_key_offset(key) & + ((loff_t) disk_cluster_size(coord) - 1)) == 0); +} + +static char *first_unit(coord_t * coord) +{ + /* FIXME: warning: pointer of type `void *' used in arithmetic */ + return (char *)item_body_by_coord(coord) + sizeof(ctail_item_format); +} + +/* plugin->u.item.b.max_key_inside : + tail_max_key_inside */ + +/* plugin->u.item.b.can_contain_key */ +int can_contain_key_ctail(const coord_t * coord, const reiser4_key * key, + const reiser4_item_data * data) +{ + reiser4_key item_key; + + if (item_plugin_by_coord(coord) != data->iplug) + return 0; + + item_key_by_coord(coord, &item_key); + if (get_key_locality(key) != get_key_locality(&item_key) || + get_key_objectid(key) != get_key_objectid(&item_key)) + return 0; + if (get_key_offset(&item_key) + nr_units_ctail(coord) != + get_key_offset(key)) + return 0; + if (is_disk_cluster_key(key, coord)) + /* + * can not merge at the beginning + * of a logical cluster in a file + */ + return 0; + return 1; +} + +/* plugin->u.item.b.mergeable */ +int mergeable_ctail(const coord_t * p1, const coord_t * p2) +{ + reiser4_key key1, key2; + + assert("edward-62", item_id_by_coord(p1) == CTAIL_ID); + assert("edward-61", plugin_of_group(item_plugin_by_coord(p1), + UNIX_FILE_METADATA_ITEM_TYPE)); + + if (item_id_by_coord(p2) != CTAIL_ID) { + /* second item is of another type */ + return 0; + } + item_key_by_coord(p1, &key1); + item_key_by_coord(p2, &key2); + if (get_key_locality(&key1) != get_key_locality(&key2) || + get_key_objectid(&key1) != get_key_objectid(&key2) || + get_key_type(&key1) != get_key_type(&key2)) { + /* items of different objects */ + return 0; + } + if (get_key_offset(&key1) + nr_units_ctail(p1) != get_key_offset(&key2)) + /* not adjacent items */ + return 0; + if (is_disk_cluster_key(&key2, p2)) + /* + * can not merge at the beginning + * of a logical cluster in a file + */ + return 0; + return 1; +} + +/* plugin->u.item.b.nr_units */ +pos_in_node_t nr_units_ctail(const coord_t * coord) +{ + return (item_length_by_coord(coord) - + sizeof(ctail_formatted_at(coord)->cluster_shift)); +} + +/* plugin->u.item.b.estimate: + estimate how much space is needed to insert/paste @data->length bytes + into ctail at @coord */ +int estimate_ctail(const coord_t * coord /* coord of item */ , + const reiser4_item_data * + data /* parameters for new item */ ) +{ + if (coord == NULL) + /* insert */ + return (sizeof(ctail_item_format) + data->length); + else + /* paste */ + return data->length; +} + +/* ->init() method for this item plugin. */ +int init_ctail(coord_t * to /* coord of item */ , + coord_t * from /* old_item */ , + reiser4_item_data * data /* structure used for insertion */ ) +{ + int cluster_shift; /* cpu value to convert */ + + if (data) { + assert("edward-463", data->length > sizeof(ctail_item_format)); + cluster_shift = *((int *)(data->arg)); + data->length -= sizeof(ctail_item_format); + } else { + assert("edward-464", from != NULL); + assert("edward-855", ctail_ok(from)); + cluster_shift = (int)(cluster_shift_by_coord(from)); + } + put_unaligned((d8)cluster_shift, &ctail_formatted_at(to)->cluster_shift); + assert("edward-856", ctail_ok(to)); + return 0; +} + +/* plugin->u.item.b.lookup: + NULL: We are looking for item keys only */ + +#if REISER4_DEBUG +int ctail_ok(const coord_t * coord) +{ + return coord_is_unprepped_ctail(coord) || + cluster_shift_ok(cluster_shift_by_coord(coord)); +} + +/* plugin->u.item.b.check */ +int check_ctail(const coord_t * coord, const char **error) +{ + if (!ctail_ok(coord)) { + if (error) + *error = "bad cluster shift in ctail"; + return 1; + } + return 0; +} +#endif + +/* plugin->u.item.b.paste */ +int +paste_ctail(coord_t * coord, reiser4_item_data * data, + carry_plugin_info * info UNUSED_ARG) +{ + unsigned old_nr_units; + + assert("edward-268", data->data != NULL); + /* copy only from kernel space */ + assert("edward-66", data->user == 0); + + old_nr_units = + item_length_by_coord(coord) - sizeof(ctail_item_format) - + data->length; + + /* ctail items never get pasted in the middle */ + + if (coord->unit_pos == 0 && coord->between == AT_UNIT) { + + /* paste at the beginning when create new item */ + assert("edward-450", + item_length_by_coord(coord) == + data->length + sizeof(ctail_item_format)); + assert("edward-451", old_nr_units == 0); + } else if (coord->unit_pos == old_nr_units - 1 + && coord->between == AFTER_UNIT) { + + /* paste at the end */ + coord->unit_pos++; + } else + impossible("edward-453", "bad paste position"); + + memcpy(first_unit(coord) + coord->unit_pos, data->data, data->length); + + assert("edward-857", ctail_ok(coord)); + + return 0; +} + +/* plugin->u.item.b.fast_paste */ + +/* + * plugin->u.item.b.can_shift + * + * Return number of units that can be shifted; + * Store space (in bytes) occupied by those units in @size. + */ +int can_shift_ctail(unsigned free_space, coord_t *source, + znode * target, shift_direction direction UNUSED_ARG, + unsigned *size, unsigned want) +{ + /* make sure that that we do not want to shift more than we have */ + assert("edward-68", want > 0 && want <= nr_units_ctail(source)); + + *size = min(want, free_space); + + if (!target) { + /* + * new item will be created + */ + if (*size <= sizeof(ctail_item_format)) { + /* + * can not shift only ctail header + */ + *size = 0; + return 0; + } + return *size - sizeof(ctail_item_format); + } + else + /* + * shifting to the mergeable item + */ + return *size; +} + +/* + * plugin->u.item.b.copy_units + * cooperates with ->can_shift() + */ +void copy_units_ctail(coord_t * target, coord_t * source, + unsigned from, unsigned count /* units */ , + shift_direction where_is_free_space, + unsigned free_space /* bytes */ ) +{ + /* make sure that item @target is expanded already */ + assert("edward-69", (unsigned)item_length_by_coord(target) >= count); + assert("edward-70", free_space == count || free_space == count + 1); + + assert("edward-858", ctail_ok(source)); + + if (where_is_free_space == SHIFT_LEFT) { + /* + * append item @target with @count first bytes + * of @source: this restriction came from ordinary tails + */ + assert("edward-71", from == 0); + assert("edward-860", ctail_ok(target)); + + memcpy(first_unit(target) + nr_units_ctail(target) - count, + first_unit(source), count); + } else { + /* + * target item is moved to right already + */ + reiser4_key key; + + assert("edward-72", nr_units_ctail(source) == from + count); + + if (free_space == count) { + init_ctail(target, source, NULL); + } else { + /* + * shifting to a mergeable item + */ + assert("edward-862", ctail_ok(target)); + } + memcpy(first_unit(target), first_unit(source) + from, count); + + assert("edward-863", ctail_ok(target)); + /* + * new units are inserted before first unit + * in an item, therefore, we have to update + * item key + */ + item_key_by_coord(source, &key); + set_key_offset(&key, get_key_offset(&key) + from); + + node_plugin_by_node(target->node)->update_item_key(target, + &key, + NULL /*info */); + } +} + +/* plugin->u.item.b.create_hook */ +int create_hook_ctail(const coord_t * coord, void *arg) +{ + assert("edward-864", znode_is_loaded(coord->node)); + + znode_set_convertible(coord->node); + return 0; +} + +/* plugin->u.item.b.kill_hook */ +int kill_hook_ctail(const coord_t * coord, pos_in_node_t from, + pos_in_node_t count, carry_kill_data * kdata) +{ + struct inode *inode; + + assert("edward-1157", item_id_by_coord(coord) == CTAIL_ID); + assert("edward-291", znode_is_write_locked(coord->node)); + + inode = kdata->inode; + if (inode) { + reiser4_key key; + struct cryptcompress_info * info; + cloff_t index; + + item_key_by_coord(coord, &key); + info = cryptcompress_inode_data(inode); + index = off_to_clust(get_key_offset(&key), inode); + + if (from == 0) { + info->trunc_index = index; + if (is_disk_cluster_key(&key, coord)) { + /* + * first item of disk cluster is to be killed + */ + truncate_complete_page_cluster( + inode, index, kdata->params.truncate); + inode_sub_bytes(inode, + inode_cluster_size(inode)); + } + } + } + return 0; +} + +/* for shift_hook_ctail(), + return true if the first disk cluster item has dirty child +*/ +static int ctail_convertible(const coord_t * coord) +{ + int result; + reiser4_key key; + jnode *child = NULL; + + assert("edward-477", coord != NULL); + assert("edward-478", item_id_by_coord(coord) == CTAIL_ID); + + if (coord_is_unprepped_ctail(coord)) + /* unprepped ctail should be converted */ + return 1; + + item_key_by_coord(coord, &key); + child = jlookup(current_tree, + get_key_objectid(&key), + off_to_pg(off_by_coord(coord))); + if (!child) + return 0; + result = JF_ISSET(child, JNODE_DIRTY); + jput(child); + return result; +} + +/* FIXME-EDWARD */ +/* plugin->u.item.b.shift_hook */ +int shift_hook_ctail(const coord_t * item /* coord of item */ , + unsigned from UNUSED_ARG /* start unit */ , + unsigned count UNUSED_ARG /* stop unit */ , + znode * old_node /* old parent */ ) +{ + assert("edward-479", item != NULL); + assert("edward-480", item->node != old_node); + + if (!znode_convertible(old_node) || znode_convertible(item->node)) + return 0; + if (ctail_convertible(item)) + znode_set_convertible(item->node); + return 0; +} + +static int +cut_or_kill_ctail_units(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + int cut, void *p, reiser4_key * smallest_removed, + reiser4_key * new_first) +{ + pos_in_node_t count; /* number of units to cut */ + char *item; + + count = to - from + 1; + item = item_body_by_coord(coord); + + assert("edward-74", ergo(from != 0, to == coord_last_unit_pos(coord))); + + if (smallest_removed) { + /* store smallest key removed */ + item_key_by_coord(coord, smallest_removed); + set_key_offset(smallest_removed, + get_key_offset(smallest_removed) + from); + } + + if (new_first) { + assert("vs-1531", from == 0); + + item_key_by_coord(coord, new_first); + set_key_offset(new_first, + get_key_offset(new_first) + from + count); + } + + if (!cut) + kill_hook_ctail(coord, from, 0, (struct carry_kill_data *)p); + + if (from == 0) { + if (count != nr_units_ctail(coord)) { + /* part of item is removed, so move free space at the beginning + of the item and update item key */ + reiser4_key key; + memcpy(item + to + 1, item, sizeof(ctail_item_format)); + item_key_by_coord(coord, &key); + set_key_offset(&key, get_key_offset(&key) + count); + node_plugin_by_node(coord->node)->update_item_key(coord, + &key, + NULL); + } else { + /* cut_units should not be called to cut evrything */ + assert("vs-1532", ergo(cut, 0)); + /* whole item is cut, so more then amount of space occupied + by units got freed */ + count += sizeof(ctail_item_format); + } + } + return count; +} + +/* plugin->u.item.b.cut_units */ +int +cut_units_ctail(coord_t * item, pos_in_node_t from, pos_in_node_t to, + carry_cut_data * cdata, reiser4_key * smallest_removed, + reiser4_key * new_first) +{ + return cut_or_kill_ctail_units(item, from, to, 1, NULL, + smallest_removed, new_first); +} + +/* plugin->u.item.b.kill_units */ +int +kill_units_ctail(coord_t * item, pos_in_node_t from, pos_in_node_t to, + struct carry_kill_data *kdata, reiser4_key * smallest_removed, + reiser4_key * new_first) +{ + return cut_or_kill_ctail_units(item, from, to, 0, kdata, + smallest_removed, new_first); +} + +/* plugin->u.item.s.file.read */ +int read_ctail(struct file *file UNUSED_ARG, flow_t * f, hint_t * hint) +{ + uf_coord_t *uf_coord; + coord_t *coord; + + uf_coord = &hint->ext_coord; + coord = &uf_coord->coord; + assert("edward-127", f->user == 0); + assert("edward-129", coord && coord->node); + assert("edward-130", coord_is_existing_unit(coord)); + assert("edward-132", znode_is_loaded(coord->node)); + + /* start read only from the beginning of ctail */ + assert("edward-133", coord->unit_pos == 0); + /* read only whole ctails */ + assert("edward-135", nr_units_ctail(coord) <= f->length); + + assert("edward-136", reiser4_schedulable()); + assert("edward-886", ctail_ok(coord)); + + if (f->data) + memcpy(f->data, (char *)first_unit(coord), + (size_t) nr_units_ctail(coord)); + + dclust_set_extension_shift(hint); + mark_page_accessed(znode_page(coord->node)); + move_flow_forward(f, nr_units_ctail(coord)); + + return 0; +} + +/** + * Prepare transform stream with plain text for page + * @page taking into account synchronization issues. + */ +static int ctail_read_disk_cluster(struct cluster_handle * clust, + struct inode * inode, struct page * page, + znode_lock_mode mode) +{ + int result; + + assert("edward-1450", mode == ZNODE_READ_LOCK || ZNODE_WRITE_LOCK); + assert("edward-671", clust->hint != NULL); + assert("edward-140", clust->dstat == INVAL_DISK_CLUSTER); + assert("edward-672", cryptcompress_inode_ok(inode)); + assert("edward-1527", PageLocked(page)); + + unlock_page(page); + + /* set input stream */ + result = grab_tfm_stream(inode, &clust->tc, INPUT_STREAM); + if (result) { + lock_page(page); + return result; + } + result = find_disk_cluster(clust, inode, 1 /* read items */, mode); + lock_page(page); + if (result) + return result; + /* + * at this point we have locked position in the tree + */ + assert("edward-1528", znode_is_any_locked(clust->hint->lh.node)); + + if (page->mapping != inode->i_mapping) { + /* page was truncated */ + reiser4_unset_hint(clust->hint); + reset_cluster_params(clust); + return AOP_TRUNCATED_PAGE; + } + if (PageUptodate(page)) { + /* disk cluster can be obsolete, don't use it! */ + reiser4_unset_hint(clust->hint); + reset_cluster_params(clust); + return 0; + } + if (clust->dstat == FAKE_DISK_CLUSTER || + clust->dstat == UNPR_DISK_CLUSTER || + clust->dstat == TRNC_DISK_CLUSTER) { + /* + * this information about disk cluster will be valid + * as long as we keep the position in the tree locked + */ + tfm_cluster_set_uptodate(&clust->tc); + return 0; + } + /* now prepare output stream.. */ + result = grab_coa(&clust->tc, inode_compression_plugin(inode)); + if (result) + return result; + /* ..and fill this with plain text */ + result = reiser4_inflate_cluster(clust, inode); + if (result) + return result; + /* + * The stream is ready! It won't be obsolete as + * long as we keep last disk cluster item locked. + */ + tfm_cluster_set_uptodate(&clust->tc); + return 0; +} + +/* + * fill one page with plain text. + */ +int do_readpage_ctail(struct inode * inode, struct cluster_handle * clust, + struct page *page, znode_lock_mode mode) +{ + int ret; + unsigned cloff; + char *data; + size_t to_page; + struct tfm_cluster * tc = &clust->tc; + + assert("edward-212", PageLocked(page)); + + if (unlikely(page->mapping != inode->i_mapping)) + return AOP_TRUNCATED_PAGE; + if (PageUptodate(page)) + goto exit; + to_page = pbytes(page_index(page), inode); + if (to_page == 0) { + zero_user(page, 0, PAGE_SIZE); + SetPageUptodate(page); + goto exit; + } + if (!tfm_cluster_is_uptodate(&clust->tc)) { + clust->index = pg_to_clust(page->index, inode); + + /* this will unlock/lock the page */ + ret = ctail_read_disk_cluster(clust, inode, page, mode); + + assert("edward-212", PageLocked(page)); + if (ret) + return ret; + + /* refresh bytes */ + to_page = pbytes(page_index(page), inode); + if (to_page == 0) { + zero_user(page, 0, PAGE_SIZE); + SetPageUptodate(page); + goto exit; + } + } + if (PageUptodate(page)) + /* somebody else fill it already */ + goto exit; + + assert("edward-119", tfm_cluster_is_uptodate(tc)); + assert("edward-1529", znode_is_any_locked(clust->hint->lh.node)); + + switch (clust->dstat) { + case UNPR_DISK_CLUSTER: + /* + * Page is not uptodate and item cluster is unprepped: + * this must not ever happen. + */ + warning("edward-1632", + "Bad item cluster %lu (Inode %llu). Fsck?", + clust->index, + (unsigned long long)get_inode_oid(inode)); + return RETERR(-EIO); + case TRNC_DISK_CLUSTER: + /* + * Race with truncate! + * We resolve it in favour of the last one (the only way, + * as in this case plain text is unrecoverable) + */ + case FAKE_DISK_CLUSTER: + /* fill the page by zeroes */ + zero_user(page, 0, PAGE_SIZE); + SetPageUptodate(page); + break; + case PREP_DISK_CLUSTER: + /* fill page by transformed stream with plain text */ + assert("edward-1058", !PageUptodate(page)); + assert("edward-120", tc->len <= inode_cluster_size(inode)); + + /* page index in this logical cluster */ + cloff = pg_to_off_to_cloff(page->index, inode); + + data = kmap(page); + memcpy(data, tfm_stream_data(tc, OUTPUT_STREAM) + cloff, to_page); + memset(data + to_page, 0, (size_t) PAGE_SIZE - to_page); + flush_dcache_page(page); + kunmap(page); + SetPageUptodate(page); + break; + default: + impossible("edward-1169", "bad disk cluster state"); + } + exit: + return 0; +} + +/* plugin->u.item.s.file.readpage */ +int readpage_ctail(void *vp, struct page *page) +{ + int result; + hint_t * hint; + struct cluster_handle * clust = vp; + + assert("edward-114", clust != NULL); + assert("edward-115", PageLocked(page)); + assert("edward-116", !PageUptodate(page)); + assert("edward-118", page->mapping && page->mapping->host); + assert("edward-867", !tfm_cluster_is_uptodate(&clust->tc)); + + hint = kmalloc(sizeof(*hint), reiser4_ctx_gfp_mask_get()); + if (hint == NULL) { + unlock_page(page); + return RETERR(-ENOMEM); + } + clust->hint = hint; + result = load_file_hint(clust->file, hint); + if (result) { + kfree(hint); + unlock_page(page); + return result; + } + assert("vs-25", hint->ext_coord.lh == &hint->lh); + + result = do_readpage_ctail(page->mapping->host, clust, page, + ZNODE_READ_LOCK); + assert("edward-213", PageLocked(page)); + assert("edward-1163", ergo(!result, PageUptodate(page))); + + unlock_page(page); + done_lh(&hint->lh); + hint->ext_coord.valid = 0; + save_file_hint(clust->file, hint); + kfree(hint); + tfm_cluster_clr_uptodate(&clust->tc); + + return result; +} + +/* Helper function for ->readpages() */ +static int ctail_read_page_cluster(struct cluster_handle * clust, + struct inode *inode) +{ + int i; + int result; + assert("edward-779", clust != NULL); + assert("edward-1059", clust->win == NULL); + assert("edward-780", inode != NULL); + + result = prepare_page_cluster(inode, clust, READ_OP); + if (result) + return result; + + assert("edward-781", !tfm_cluster_is_uptodate(&clust->tc)); + + for (i = 0; i < clust->nr_pages; i++) { + struct page *page = clust->pages[i]; + lock_page(page); + result = do_readpage_ctail(inode, clust, page, ZNODE_READ_LOCK); + unlock_page(page); + if (result) + break; + } + tfm_cluster_clr_uptodate(&clust->tc); + put_page_cluster(clust, inode, READ_OP); + return result; +} + +/* filler for read_cache_pages() */ +static int ctail_readpages_filler(void * data, struct page * page) +{ + int ret = 0; + struct cluster_handle * clust = data; + struct inode * inode = file_inode(clust->file); + + assert("edward-1525", page->mapping == inode->i_mapping); + + if (PageUptodate(page)) { + unlock_page(page); + return 0; + } + if (pbytes(page_index(page), inode) == 0) { + zero_user(page, 0, PAGE_SIZE); + SetPageUptodate(page); + unlock_page(page); + return 0; + } + move_cluster_forward(clust, inode, page->index); + unlock_page(page); + /* + * read the whole page cluster + */ + ret = ctail_read_page_cluster(clust, inode); + + assert("edward-869", !tfm_cluster_is_uptodate(&clust->tc)); + return ret; +} + +/* + * We populate a bit more then upper readahead suggests: + * with each nominated page we read the whole page cluster + * this page belongs to. + */ +int readpages_ctail(struct file *file, struct address_space *mapping, + struct list_head *pages) +{ + int ret = 0; + hint_t *hint; + struct cluster_handle clust; + struct inode *inode = mapping->host; + + assert("edward-1521", inode == file_inode(file)); + + cluster_init_read(&clust, NULL); + clust.file = file; + hint = kmalloc(sizeof(*hint), reiser4_ctx_gfp_mask_get()); + if (hint == NULL) { + warning("vs-28", "failed to allocate hint"); + ret = RETERR(-ENOMEM); + goto exit1; + } + clust.hint = hint; + ret = load_file_hint(clust.file, hint); + if (ret) { + warning("edward-1522", "failed to load hint"); + goto exit2; + } + assert("vs-26", hint->ext_coord.lh == &hint->lh); + ret = alloc_cluster_pgset(&clust, cluster_nrpages(inode)); + if (ret) { + warning("edward-1523", "failed to alloc pgset"); + goto exit3; + } + ret = read_cache_pages(mapping, pages, ctail_readpages_filler, &clust); + + assert("edward-870", !tfm_cluster_is_uptodate(&clust.tc)); + exit3: + done_lh(&hint->lh); + save_file_hint(file, hint); + hint->ext_coord.valid = 0; + exit2: + kfree(hint); + exit1: + put_cluster_handle(&clust); + return ret; +} + +/* + plugin->u.item.s.file.append_key + key of the first item of the next disk cluster +*/ +reiser4_key *append_key_ctail(const coord_t * coord, reiser4_key * key) +{ + assert("edward-1241", item_id_by_coord(coord) == CTAIL_ID); + assert("edward-1242", cluster_shift_ok(cluster_shift_by_coord(coord))); + + item_key_by_coord(coord, key); + set_key_offset(key, ((__u64) (clust_by_coord(coord, NULL)) + 1) + << cluster_shift_by_coord(coord)); + return key; +} + +static int insert_unprepped_ctail(struct cluster_handle * clust, + struct inode *inode) +{ + int result; + char buf[UCTAIL_NR_UNITS]; + reiser4_item_data data; + reiser4_key key; + int shift = (int)UCTAIL_SHIFT; + + memset(buf, 0, (size_t) UCTAIL_NR_UNITS); + result = key_by_inode_cryptcompress(inode, + clust_to_off(clust->index, inode), + &key); + if (result) + return result; + data.user = 0; + data.iplug = item_plugin_by_id(CTAIL_ID); + data.arg = &shift; + data.length = sizeof(ctail_item_format) + (size_t) UCTAIL_NR_UNITS; + data.data = buf; + + result = insert_by_coord(&clust->hint->ext_coord.coord, + &data, &key, clust->hint->ext_coord.lh, 0); + return result; +} + +static int +insert_cryptcompress_flow(coord_t * coord, lock_handle * lh, flow_t * f, + int cluster_shift) +{ + int result; + carry_pool *pool; + carry_level *lowest_level; + reiser4_item_data *data; + carry_op *op; + + pool = + init_carry_pool(sizeof(*pool) + 3 * sizeof(*lowest_level) + + sizeof(*data)); + if (IS_ERR(pool)) + return PTR_ERR(pool); + lowest_level = (carry_level *) (pool + 1); + init_carry_level(lowest_level, pool); + data = (reiser4_item_data *) (lowest_level + 3); + + assert("edward-466", coord->between == AFTER_ITEM + || coord->between == AFTER_UNIT || coord->between == BEFORE_ITEM + || coord->between == EMPTY_NODE + || coord->between == BEFORE_UNIT); + + if (coord->between == AFTER_UNIT) { + coord->unit_pos = 0; + coord->between = AFTER_ITEM; + } + op = reiser4_post_carry(lowest_level, COP_INSERT_FLOW, coord->node, + 0 /* operate directly on coord -> node */); + if (IS_ERR(op) || (op == NULL)) { + done_carry_pool(pool); + return RETERR(op ? PTR_ERR(op) : -EIO); + } + data->user = 0; + data->iplug = item_plugin_by_id(CTAIL_ID); + data->arg = &cluster_shift; + + data->length = 0; + data->data = NULL; + + op->u.insert_flow.flags = + COPI_SWEEP | + COPI_DONT_SHIFT_LEFT | + COPI_DONT_SHIFT_RIGHT; + op->u.insert_flow.insert_point = coord; + op->u.insert_flow.flow = f; + op->u.insert_flow.data = data; + op->u.insert_flow.new_nodes = 0; + + lowest_level->track_type = CARRY_TRACK_CHANGE; + lowest_level->tracked = lh; + + result = reiser4_carry(lowest_level, NULL); + done_carry_pool(pool); + + return result; +} + +/* Implementation of CRC_APPEND_ITEM mode of ctail conversion */ +static int insert_cryptcompress_flow_in_place(coord_t * coord, + lock_handle * lh, flow_t * f, + int cluster_shift) +{ + int ret; + coord_t pos; + lock_handle lock; + + assert("edward-484", + coord->between == AT_UNIT || coord->between == AFTER_ITEM); + assert("edward-485", item_id_by_coord(coord) == CTAIL_ID); + + coord_dup(&pos, coord); + pos.unit_pos = 0; + pos.between = AFTER_ITEM; + + init_lh(&lock); + copy_lh(&lock, lh); + + ret = insert_cryptcompress_flow(&pos, &lock, f, cluster_shift); + done_lh(&lock); + assert("edward-1347", znode_is_write_locked(lh->node)); + assert("edward-1228", !ret); + return ret; +} + +/* Implementation of CRC_OVERWRITE_ITEM mode of ctail conversion */ +static int overwrite_ctail(coord_t * coord, flow_t * f) +{ + unsigned count; + + assert("edward-269", f->user == 0); + assert("edward-270", f->data != NULL); + assert("edward-271", f->length > 0); + assert("edward-272", coord_is_existing_unit(coord)); + assert("edward-273", coord->unit_pos == 0); + assert("edward-274", znode_is_write_locked(coord->node)); + assert("edward-275", reiser4_schedulable()); + assert("edward-467", item_id_by_coord(coord) == CTAIL_ID); + assert("edward-1243", ctail_ok(coord)); + + count = nr_units_ctail(coord); + + if (count > f->length) + count = f->length; + memcpy(first_unit(coord), f->data, count); + move_flow_forward(f, count); + coord->unit_pos += count; + return 0; +} + +/* Implementation of CRC_CUT_ITEM mode of ctail conversion: + cut ctail (part or whole) starting from next unit position */ +static int cut_ctail(coord_t * coord) +{ + coord_t stop; + + assert("edward-435", coord->between == AT_UNIT && + coord->item_pos < coord_num_items(coord) && + coord->unit_pos <= coord_num_units(coord)); + + if (coord->unit_pos == coord_num_units(coord)) + /* nothing to cut */ + return 0; + coord_dup(&stop, coord); + stop.unit_pos = coord_last_unit_pos(coord); + + return cut_node_content(coord, &stop, NULL, NULL, NULL); +} + +int ctail_insert_unprepped_cluster(struct cluster_handle * clust, + struct inode * inode) +{ + int result; + assert("edward-1244", inode != NULL); + assert("edward-1245", clust->hint != NULL); + assert("edward-1246", clust->dstat == FAKE_DISK_CLUSTER); + assert("edward-1247", clust->reserved == 1); + + result = get_disk_cluster_locked(clust, inode, ZNODE_WRITE_LOCK); + if (cbk_errored(result)) + return result; + assert("edward-1249", result == CBK_COORD_NOTFOUND); + assert("edward-1250", znode_is_write_locked(clust->hint->lh.node)); + + assert("edward-1295", + clust->hint->ext_coord.lh->node == + clust->hint->ext_coord.coord.node); + + coord_set_between_clusters(&clust->hint->ext_coord.coord); + + result = insert_unprepped_ctail(clust, inode); + all_grabbed2free(); + + assert("edward-1251", !result); + assert("edward-1252", cryptcompress_inode_ok(inode)); + assert("edward-1253", znode_is_write_locked(clust->hint->lh.node)); + assert("edward-1254", + reiser4_clustered_blocks(reiser4_get_current_sb())); + assert("edward-1255", + znode_convertible(clust->hint->ext_coord.coord.node)); + + return result; +} + +/* plugin->u.item.f.scan */ +int scan_ctail(flush_scan * scan) +{ + int result = 0; + struct page *page; + struct inode *inode; + jnode *node = scan->node; + + assert("edward-227", scan->node != NULL); + assert("edward-228", jnode_is_cluster_page(scan->node)); + assert("edward-639", znode_is_write_locked(scan->parent_lock.node)); + + page = jnode_page(node); + inode = page->mapping->host; + + if (!reiser4_scanning_left(scan)) + return result; + if (!ZF_ISSET(scan->parent_lock.node, JNODE_DIRTY)) + znode_make_dirty(scan->parent_lock.node); + + if (!znode_convertible(scan->parent_lock.node)) { + if (JF_ISSET(scan->node, JNODE_DIRTY)) + znode_set_convertible(scan->parent_lock.node); + else { + warning("edward-681", + "cluster page is already processed"); + return -EAGAIN; + } + } + return result; +} + +/* If true, this function attaches children */ +static int should_attach_convert_idata(flush_pos_t * pos) +{ + int result; + assert("edward-431", pos != NULL); + assert("edward-432", pos->child == NULL); + assert("edward-619", znode_is_write_locked(pos->coord.node)); + assert("edward-470", + item_plugin_by_coord(&pos->coord) == + item_plugin_by_id(CTAIL_ID)); + + /* check for leftmost child */ + utmost_child_ctail(&pos->coord, LEFT_SIDE, &pos->child); + + if (!pos->child) + return 0; + spin_lock_jnode(pos->child); + result = (JF_ISSET(pos->child, JNODE_DIRTY) && + pos->child->atom == ZJNODE(pos->coord.node)->atom); + spin_unlock_jnode(pos->child); + if (!result && pos->child) { + /* existing child isn't to attach, clear up this one */ + jput(pos->child); + pos->child = NULL; + } + return result; +} + +/** + * Collect all needed information about the object here, + * as in-memory inode can be evicted from memory before + * disk update completion. + */ +static int init_convert_data_ctail(struct convert_item_info * idata, + struct inode *inode) +{ + assert("edward-813", idata != NULL); + assert("edward-814", inode != NULL); + + idata->cluster_shift = inode_cluster_shift(inode); + idata->d_cur = DC_FIRST_ITEM; + idata->d_next = DC_INVALID_STATE; + + return 0; +} + +static int alloc_item_convert_data(struct convert_info * sq) +{ + assert("edward-816", sq != NULL); + assert("edward-817", sq->itm == NULL); + + sq->itm = kmalloc(sizeof(*sq->itm), reiser4_ctx_gfp_mask_get()); + if (sq->itm == NULL) + return RETERR(-ENOMEM); + init_lh(&sq->right_lock); + sq->right_locked = 0; + return 0; +} + +static void free_item_convert_data(struct convert_info * sq) +{ + assert("edward-818", sq != NULL); + assert("edward-819", sq->itm != NULL); + assert("edward-820", sq->iplug != NULL); + + done_lh(&sq->right_lock); + sq->right_locked = 0; + kfree(sq->itm); + sq->itm = NULL; + return; +} + +static struct convert_info *alloc_convert_data(void) +{ + struct convert_info *info; + + info = kmalloc(sizeof(*info), reiser4_ctx_gfp_mask_get()); + if (info != NULL) { + memset(info, 0, sizeof(*info)); + cluster_init_write(&info->clust, NULL); + } + return info; +} + +static void reset_convert_data(struct convert_info *info) +{ + info->clust.tc.hole = 0; +} + +void free_convert_data(flush_pos_t * pos) +{ + struct convert_info *sq; + + assert("edward-823", pos != NULL); + assert("edward-824", pos->sq != NULL); + + sq = pos->sq; + if (sq->itm) + free_item_convert_data(sq); + put_cluster_handle(&sq->clust); + kfree(pos->sq); + pos->sq = NULL; + return; +} + +static int init_item_convert_data(flush_pos_t * pos, struct inode *inode) +{ + struct convert_info *sq; + + assert("edward-825", pos != NULL); + assert("edward-826", pos->sq != NULL); + assert("edward-827", item_convert_data(pos) != NULL); + assert("edward-828", inode != NULL); + + sq = pos->sq; + memset(sq->itm, 0, sizeof(*sq->itm)); + + /* iplug->init_convert_data() */ + return init_convert_data_ctail(sq->itm, inode); +} + +/* create and attach disk cluster info used by 'convert' phase of the flush + squalloc() */ +static int attach_convert_idata(flush_pos_t * pos, struct inode *inode) +{ + int ret = 0; + struct convert_item_info *info; + struct cluster_handle *clust; + file_plugin *fplug = inode_file_plugin(inode); + + assert("edward-248", pos != NULL); + assert("edward-249", pos->child != NULL); + assert("edward-251", inode != NULL); + assert("edward-682", cryptcompress_inode_ok(inode)); + assert("edward-252", + fplug == file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)); + assert("edward-473", + item_plugin_by_coord(&pos->coord) == + item_plugin_by_id(CTAIL_ID)); + + if (!pos->sq) { + pos->sq = alloc_convert_data(); + if (!pos->sq) + return RETERR(-ENOMEM); + } + else + reset_convert_data(pos->sq); + + clust = &pos->sq->clust; + + ret = set_cluster_by_page(clust, + jnode_page(pos->child), + MAX_CLUSTER_NRPAGES); + if (ret) + goto err; + + assert("edward-829", pos->sq != NULL); + assert("edward-250", item_convert_data(pos) == NULL); + + pos->sq->iplug = item_plugin_by_id(CTAIL_ID); + + ret = alloc_item_convert_data(pos->sq); + if (ret) + goto err; + ret = init_item_convert_data(pos, inode); + if (ret) + goto err; + info = item_convert_data(pos); + + ret = checkout_logical_cluster(clust, pos->child, inode); + if (ret) + goto err; + + reiser4_deflate_cluster(clust, inode); + inc_item_convert_count(pos); + + /* prepare flow for insertion */ + fplug->flow_by_inode(inode, + (const char __user *)tfm_stream_data(&clust->tc, + OUTPUT_STREAM), + 0 /* kernel space */ , + clust->tc.len, + clust_to_off(clust->index, inode), + WRITE_OP, &info->flow); + if (clust->tc.hole) + info->flow.length = 0; + + jput(pos->child); + return 0; + err: + jput(pos->child); + free_convert_data(pos); + return ret; +} + +/* clear up disk cluster info */ +static void detach_convert_idata(struct convert_info * sq) +{ + struct convert_item_info *info; + + assert("edward-253", sq != NULL); + assert("edward-840", sq->itm != NULL); + + info = sq->itm; + assert("edward-1212", info->flow.length == 0); + + free_item_convert_data(sq); + return; +} + +/* plugin->u.item.f.utmost_child */ + +/* This function sets leftmost child for a first cluster item, + if the child exists, and NULL in other cases. + NOTE-EDWARD: Do not call this for RIGHT_SIDE */ + +int utmost_child_ctail(const coord_t * coord, sideof side, jnode ** child) +{ + reiser4_key key; + + item_key_by_coord(coord, &key); + + assert("edward-257", coord != NULL); + assert("edward-258", child != NULL); + assert("edward-259", side == LEFT_SIDE); + assert("edward-260", + item_plugin_by_coord(coord) == item_plugin_by_id(CTAIL_ID)); + + if (!is_disk_cluster_key(&key, coord)) + *child = NULL; + else + *child = jlookup(current_tree, + get_key_objectid(item_key_by_coord + (coord, &key)), + off_to_pg(get_key_offset(&key))); + return 0; +} + +/* + * Set status (d_next) of the first item at the right neighbor + * + * If the current position is the last item in the node, then + * look at its first item at the right neighbor (skip empty nodes). + * Note, that right neighbors may be not dirty because of races. + * If so, make it dirty and set convertible flag. + */ +static int pre_convert_ctail(flush_pos_t * pos) +{ + int ret = 0; + int stop = 0; + znode *slider; + lock_handle slider_lh; + lock_handle right_lh; + + assert("edward-1232", !node_is_empty(pos->coord.node)); + assert("edward-1014", + pos->coord.item_pos < coord_num_items(&pos->coord)); + assert("edward-1015", convert_data_attached(pos)); + assert("edward-1611", + item_convert_data(pos)->d_cur != DC_INVALID_STATE); + assert("edward-1017", + item_convert_data(pos)->d_next == DC_INVALID_STATE); + + /* + * In the following two cases we don't need + * to look at right neighbor + */ + if (item_convert_data(pos)->d_cur == DC_AFTER_CLUSTER) { + /* + * cluster is over, so the first item of the right + * neighbor doesn't belong to this cluster + */ + return 0; + } + if (pos->coord.item_pos < coord_num_items(&pos->coord) - 1) { + /* + * current position is not the last item in the node, + * so the first item of the right neighbor doesn't + * belong to this cluster + */ + return 0; + } + /* + * Look at right neighbor. + * Note that concurrent truncate is not a problem + * since we have locked the beginning of the cluster. + */ + slider = pos->coord.node; + init_lh(&slider_lh); + init_lh(&right_lh); + + while (!stop) { + coord_t coord; + + ret = reiser4_get_right_neighbor(&right_lh, + slider, + ZNODE_WRITE_LOCK, + GN_CAN_USE_UPPER_LEVELS); + if (ret) + break; + slider = right_lh.node; + ret = zload(slider); + if (ret) + break; + coord_init_before_first_item(&coord, slider); + + if (node_is_empty(slider)) { + warning("edward-1641", "Found empty right neighbor"); + znode_make_dirty(slider); + znode_set_convertible(slider); + /* + * skip this node, + * go rightward + */ + stop = 0; + } else if (same_disk_cluster(&pos->coord, &coord)) { + + item_convert_data(pos)->d_next = DC_CHAINED_ITEM; + + if (!ZF_ISSET(slider, JNODE_DIRTY)) { + /* + warning("edward-1024", + "next slum item mergeable, " + "but znode %p isn't dirty\n", + lh.node); + */ + znode_make_dirty(slider); + } + if (!znode_convertible(slider)) { + /* + warning("edward-1272", + "next slum item mergeable, " + "but znode %p isn't convertible\n", + lh.node); + */ + znode_set_convertible(slider); + } + stop = 1; + convert_data(pos)->right_locked = 1; + } else { + item_convert_data(pos)->d_next = DC_AFTER_CLUSTER; + stop = 1; + convert_data(pos)->right_locked = 1; + } + zrelse(slider); + done_lh(&slider_lh); + move_lh(&slider_lh, &right_lh); + } + if (convert_data(pos)->right_locked) + /* + * Store locked right neighbor in + * the conversion info. Otherwise, + * we won't be able to access it, + * if the current node gets deleted + * during conversion + */ + move_lh(&convert_data(pos)->right_lock, &slider_lh); + done_lh(&slider_lh); + done_lh(&right_lh); + + if (ret == -E_NO_NEIGHBOR) { + item_convert_data(pos)->d_next = DC_AFTER_CLUSTER; + ret = 0; + } + assert("edward-1610", + ergo(ret != 0, + item_convert_data(pos)->d_next == DC_INVALID_STATE)); + return ret; +} + +/* + * do some post-conversion actions; + * detach conversion data if there is nothing to convert anymore + */ +static void post_convert_ctail(flush_pos_t * pos, + ctail_convert_mode_t mode, int old_nr_items) +{ + switch (mode) { + case CTAIL_CUT_ITEM: + assert("edward-1214", item_convert_data(pos)->flow.length == 0); + assert("edward-1215", + coord_num_items(&pos->coord) == old_nr_items || + coord_num_items(&pos->coord) == old_nr_items - 1); + + if (item_convert_data(pos)->d_next == DC_CHAINED_ITEM) + /* + * the next item belongs to this cluster, + * and should be also killed + */ + break; + if (coord_num_items(&pos->coord) != old_nr_items) { + /* + * the latest item in the + * cluster has been killed, + */ + detach_convert_idata(pos->sq); + if (!node_is_empty(pos->coord.node)) + /* + * make sure the next item will be scanned + */ + coord_init_before_item(&pos->coord); + break; + } + case CTAIL_APPEND_ITEM: + /* + * in the append mode the whole flow has been inserted + * (see COP_INSERT_FLOW primitive) + */ + assert("edward-434", item_convert_data(pos)->flow.length == 0); + detach_convert_idata(pos->sq); + break; + case CTAIL_OVERWRITE_ITEM: + if (coord_is_unprepped_ctail(&pos->coord)) { + /* + * the first (unprepped) ctail has been overwritten; + * convert it to the prepped one + */ + assert("edward-1259", + cluster_shift_ok(item_convert_data(pos)-> + cluster_shift)); + put_unaligned((d8)item_convert_data(pos)->cluster_shift, + &ctail_formatted_at(&pos->coord)-> + cluster_shift); + } + break; + default: + impossible("edward-1609", "Bad ctail conversion mode"); + } +} + +static int assign_conversion_mode(flush_pos_t * pos, ctail_convert_mode_t *mode) +{ + int ret = 0; + + *mode = CTAIL_INVAL_CONVERT_MODE; + + if (!convert_data_attached(pos)) { + if (should_attach_convert_idata(pos)) { + struct inode *inode; + gfp_t old_mask = get_current_context()->gfp_mask; + + assert("edward-264", pos->child != NULL); + assert("edward-265", jnode_page(pos->child) != NULL); + assert("edward-266", + jnode_page(pos->child)->mapping != NULL); + + inode = jnode_page(pos->child)->mapping->host; + + assert("edward-267", inode != NULL); + /* + * attach new convert item info + */ + get_current_context()->gfp_mask |= __GFP_NOFAIL; + ret = attach_convert_idata(pos, inode); + get_current_context()->gfp_mask = old_mask; + pos->child = NULL; + if (ret == -E_REPEAT) { + /* + * jnode became clean, or there is no dirty + * pages (nothing to update in disk cluster) + */ + warning("edward-1021", + "convert_ctail: nothing to attach"); + ret = 0; + goto dont_convert; + } + if (ret) + goto dont_convert; + + if (pos->sq->clust.tc.hole) { + assert("edward-1634", + item_convert_data(pos)->flow.length == 0); + /* + * new content is filled with zeros - + * we punch a hole using cut (not kill) + * primitive, so attached pages won't + * be truncated + */ + *mode = CTAIL_CUT_ITEM; + } + else + /* + * this is the first ctail in the cluster, + * so it (may be only its head) should be + * overwritten + */ + *mode = CTAIL_OVERWRITE_ITEM; + } else + /* + * non-convertible item + */ + goto dont_convert; + } else { + /* + * use old convert info + */ + struct convert_item_info *idata; + idata = item_convert_data(pos); + + switch (idata->d_cur) { + case DC_FIRST_ITEM: + case DC_CHAINED_ITEM: + if (idata->flow.length) + *mode = CTAIL_OVERWRITE_ITEM; + else + *mode = CTAIL_CUT_ITEM; + break; + case DC_AFTER_CLUSTER: + if (idata->flow.length) + *mode = CTAIL_APPEND_ITEM; + else { + /* + * nothing to update anymore + */ + detach_convert_idata(pos->sq); + goto dont_convert; + } + break; + default: + impossible("edward-1018", + "wrong current item state"); + ret = RETERR(-EIO); + goto dont_convert; + } + } + /* + * ok, ctail will be converted + */ + assert("edward-433", convert_data_attached(pos)); + assert("edward-1022", + pos->coord.item_pos < coord_num_items(&pos->coord)); + return 0; + dont_convert: + return ret; +} + +/* + * perform an operation on the ctail item in + * accordance with assigned conversion @mode + */ +static int do_convert_ctail(flush_pos_t * pos, ctail_convert_mode_t mode) +{ + int result = 0; + struct convert_item_info * info; + + assert("edward-468", pos != NULL); + assert("edward-469", pos->sq != NULL); + assert("edward-845", item_convert_data(pos) != NULL); + + info = item_convert_data(pos); + assert("edward-679", info->flow.data != NULL); + + switch (mode) { + case CTAIL_APPEND_ITEM: + assert("edward-1229", info->flow.length != 0); + assert("edward-1256", + cluster_shift_ok(cluster_shift_by_coord(&pos->coord))); + /* + * insert flow without balancing + * (see comments to convert_node()) + */ + result = insert_cryptcompress_flow_in_place(&pos->coord, + &pos->lock, + &info->flow, + info->cluster_shift); + break; + case CTAIL_OVERWRITE_ITEM: + assert("edward-1230", info->flow.length != 0); + overwrite_ctail(&pos->coord, &info->flow); + if (info->flow.length != 0) + break; + else + /* + * fall through: + * cut the rest of item (if any) + */ + ; + case CTAIL_CUT_ITEM: + assert("edward-1231", info->flow.length == 0); + result = cut_ctail(&pos->coord); + break; + default: + result = RETERR(-EIO); + impossible("edward-244", "bad ctail conversion mode"); + } + return result; +} + +/* + * plugin->u.item.f.convert + * + * Convert ctail items at flush time + */ +int convert_ctail(flush_pos_t * pos) +{ + int ret; + int old_nr_items; + ctail_convert_mode_t mode; + + assert("edward-1020", pos != NULL); + assert("edward-1213", coord_num_items(&pos->coord) != 0); + assert("edward-1257", item_id_by_coord(&pos->coord) == CTAIL_ID); + assert("edward-1258", ctail_ok(&pos->coord)); + assert("edward-261", pos->coord.node != NULL); + + old_nr_items = coord_num_items(&pos->coord); + /* + * detach old conversion data and + * attach a new one, if needed + */ + ret = assign_conversion_mode(pos, &mode); + if (ret || mode == CTAIL_INVAL_CONVERT_MODE) { + assert("edward-1633", !convert_data_attached(pos)); + return ret; + } + /* + * find out the status of the right neighbor + */ + ret = pre_convert_ctail(pos); + if (ret) { + detach_convert_idata(pos->sq); + return ret; + } + ret = do_convert_ctail(pos, mode); + if (ret) { + detach_convert_idata(pos->sq); + return ret; + } + /* + * detach old conversion data if needed + */ + post_convert_ctail(pos, mode, old_nr_items); + return 0; +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/ctail.h b/fs/reiser4/plugin/item/ctail.h new file mode 100644 index 000000000000..d18e04632ffe --- /dev/null +++ b/fs/reiser4/plugin/item/ctail.h @@ -0,0 +1,102 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Ctail items are fragments (or bodies) of special tipe to provide + optimal storage of encrypted and(or) compressed files. */ + + +#if !defined( __FS_REISER4_CTAIL_H__ ) +#define __FS_REISER4_CTAIL_H__ + +/* Disk format of ctail item */ +typedef struct ctail_item_format { + /* packed shift; + if its value is different from UCTAIL_SHIFT (see below), then + size of disk cluster is calculated as (1 << cluster_shift) */ + d8 cluster_shift; + /* ctail body */ + d8 body[0]; +} __attribute__ ((packed)) ctail_item_format; + +/* "Unprepped" disk cluster is represented by a single ctail item + with the following "magic" attributes: */ +/* "magic" cluster_shift */ +#define UCTAIL_SHIFT 0xff +/* How many units unprepped ctail item has */ +#define UCTAIL_NR_UNITS 1 + +/* The following is a set of various item states in a disk cluster. + Disk cluster is a set of items whose keys belong to the interval + [dc_key , dc_key + disk_cluster_size - 1] */ +typedef enum { + DC_INVALID_STATE = 0, + DC_FIRST_ITEM = 1, + DC_CHAINED_ITEM = 2, + DC_AFTER_CLUSTER = 3 +} dc_item_stat; + +/* ctail-specific extension. + In particular this describes parameters of disk cluster an item belongs to */ +struct ctail_coord_extension { + int shift; /* this contains cluster_shift extracted from + ctail_item_format (above), or UCTAIL_SHIFT + (the last one is the "magic" of unprepped disk clusters)*/ + int dsize; /* size of a prepped disk cluster */ + int ncount; /* count of nodes occupied by a disk cluster */ +}; + +struct cut_list; + +/* plugin->item.b.* */ +int can_contain_key_ctail(const coord_t *, const reiser4_key *, + const reiser4_item_data *); +int mergeable_ctail(const coord_t * p1, const coord_t * p2); +pos_in_node_t nr_units_ctail(const coord_t * coord); +int estimate_ctail(const coord_t * coord, const reiser4_item_data * data); +void print_ctail(const char *prefix, coord_t * coord); +lookup_result lookup_ctail(const reiser4_key *, lookup_bias, coord_t *); + +int paste_ctail(coord_t * coord, reiser4_item_data * data, + carry_plugin_info * info UNUSED_ARG); +int init_ctail(coord_t *, coord_t *, reiser4_item_data *); +int can_shift_ctail(unsigned free_space, coord_t * coord, + znode * target, shift_direction pend, unsigned *size, + unsigned want); +void copy_units_ctail(coord_t * target, coord_t * source, unsigned from, + unsigned count, shift_direction where_is_free_space, + unsigned free_space); +int cut_units_ctail(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + carry_cut_data *, reiser4_key * smallest_removed, + reiser4_key * new_first); +int kill_units_ctail(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + carry_kill_data *, reiser4_key * smallest_removed, + reiser4_key * new_first); +int ctail_ok(const coord_t * coord); +int check_ctail(const coord_t * coord, const char **error); + +/* plugin->u.item.s.* */ +int read_ctail(struct file *, flow_t *, hint_t *); +int readpage_ctail(void *, struct page *); +int readpages_ctail(struct file *, struct address_space *, struct list_head *); +reiser4_key *append_key_ctail(const coord_t *, reiser4_key *); +int create_hook_ctail(const coord_t * coord, void *arg); +int kill_hook_ctail(const coord_t *, pos_in_node_t, pos_in_node_t, + carry_kill_data *); +int shift_hook_ctail(const coord_t *, unsigned, unsigned, znode *); + +/* plugin->u.item.f */ +int utmost_child_ctail(const coord_t *, sideof, jnode **); +int scan_ctail(flush_scan *); +int convert_ctail(flush_pos_t *); +size_t inode_scaled_cluster_size(struct inode *); + +#endif /* __FS_REISER4_CTAIL_H__ */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/extent.c b/fs/reiser4/plugin/item/extent.c new file mode 100644 index 000000000000..e35a4d5b7868 --- /dev/null +++ b/fs/reiser4/plugin/item/extent.c @@ -0,0 +1,197 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#include "item.h" +#include "../../key.h" +#include "../../super.h" +#include "../../carry.h" +#include "../../inode.h" +#include "../../page_cache.h" +#include "../../flush.h" +#include "../object.h" + +/* prepare structure reiser4_item_data. It is used to put one extent unit into tree */ +/* Audited by: green(2002.06.13) */ +reiser4_item_data *init_new_extent(reiser4_item_data * data, void *ext_unit, + int nr_extents) +{ + data->data = ext_unit; + /* data->data is kernel space */ + data->user = 0; + data->length = sizeof(reiser4_extent) * nr_extents; + data->arg = NULL; + data->iplug = item_plugin_by_id(EXTENT_POINTER_ID); + return data; +} + +/* how many bytes are addressed by @nr first extents of the extent item */ +reiser4_block_nr reiser4_extent_size(const coord_t * coord, pos_in_node_t nr) +{ + pos_in_node_t i; + reiser4_block_nr blocks; + reiser4_extent *ext; + + ext = item_body_by_coord(coord); + assert("vs-263", nr <= nr_units_extent(coord)); + + blocks = 0; + for (i = 0; i < nr; i++, ext++) { + blocks += extent_get_width(ext); + } + + return blocks * current_blocksize; +} + +extent_state state_of_extent(reiser4_extent * ext) +{ + switch ((int)extent_get_start(ext)) { + case 0: + return HOLE_EXTENT; + case 1: + return UNALLOCATED_EXTENT; + default: + break; + } + return ALLOCATED_EXTENT; +} + +int extent_is_unallocated(const coord_t * item) +{ + assert("jmacd-5133", item_is_extent(item)); + + return state_of_extent(extent_by_coord(item)) == UNALLOCATED_EXTENT; +} + +/* set extent's start and width */ +void reiser4_set_extent(reiser4_extent * ext, reiser4_block_nr start, + reiser4_block_nr width) +{ + extent_set_start(ext, start); + extent_set_width(ext, width); +} + +/** + * reiser4_replace_extent - replace extent and paste 1 or 2 after it + * @un_extent: coordinate of extent to be overwritten + * @lh: need better comment + * @key: need better comment + * @exts_to_add: data prepared for insertion into tree + * @replace: need better comment + * @flags: need better comment + * @return_insert_position: need better comment + * + * Overwrites one extent, pastes 1 or 2 more ones after overwritten one. If + * @return_inserted_position is 1 - @un_extent and @lh are returned set to + * first of newly inserted units, if it is 0 - @un_extent and @lh are returned + * set to extent which was overwritten. + */ +int reiser4_replace_extent(struct replace_handle *h, + int return_inserted_position) +{ + int result; + znode *orig_znode; + /*ON_DEBUG(reiser4_extent orig_ext);*/ /* this is for debugging */ + + assert("vs-990", coord_is_existing_unit(h->coord)); + assert("vs-1375", znode_is_write_locked(h->coord->node)); + assert("vs-1426", extent_get_width(&h->overwrite) != 0); + assert("vs-1427", extent_get_width(&h->new_extents[0]) != 0); + assert("vs-1427", ergo(h->nr_new_extents == 2, + extent_get_width(&h->new_extents[1]) != 0)); + + /* compose structure for paste */ + init_new_extent(&h->item, &h->new_extents[0], h->nr_new_extents); + + coord_dup(&h->coord_after, h->coord); + init_lh(&h->lh_after); + copy_lh(&h->lh_after, h->lh); + reiser4_tap_init(&h->watch, &h->coord_after, &h->lh_after, ZNODE_WRITE_LOCK); + reiser4_tap_monitor(&h->watch); + + ON_DEBUG(h->orig_ext = *extent_by_coord(h->coord)); + orig_znode = h->coord->node; + +#if REISER4_DEBUG + /* make sure that key is set properly */ + unit_key_by_coord(h->coord, &h->tmp); + set_key_offset(&h->tmp, + get_key_offset(&h->tmp) + + extent_get_width(&h->overwrite) * current_blocksize); + assert("vs-1080", keyeq(&h->tmp, &h->paste_key)); +#endif + + /* set insert point after unit to be replaced */ + h->coord->between = AFTER_UNIT; + + result = insert_into_item(h->coord, return_inserted_position ? h->lh : NULL, + &h->paste_key, &h->item, h->flags); + if (!result) { + /* now we have to replace the unit after which new units were + inserted. Its position is tracked by @watch */ + reiser4_extent *ext; + znode *node; + + node = h->coord_after.node; + if (node != orig_znode) { + coord_clear_iplug(&h->coord_after); + result = zload(node); + } + + if (likely(!result)) { + ext = extent_by_coord(&h->coord_after); + + assert("vs-987", znode_is_loaded(node)); + assert("vs-988", !memcmp(ext, &h->orig_ext, sizeof(*ext))); + + /* overwrite extent unit */ + memcpy(ext, &h->overwrite, sizeof(reiser4_extent)); + znode_make_dirty(node); + + if (node != orig_znode) + zrelse(node); + + if (return_inserted_position == 0) { + /* coord and lh are to be set to overwritten + extent */ + assert("vs-1662", + WITH_DATA(node, !memcmp(&h->overwrite, + extent_by_coord( + &h->coord_after), + sizeof(reiser4_extent)))); + + *h->coord = h->coord_after; + done_lh(h->lh); + copy_lh(h->lh, &h->lh_after); + } else { + /* h->coord and h->lh are to be set to first of + inserted units */ + assert("vs-1663", + WITH_DATA(h->coord->node, + !memcmp(&h->new_extents[0], + extent_by_coord(h->coord), + sizeof(reiser4_extent)))); + assert("vs-1664", h->lh->node == h->coord->node); + } + } + } + reiser4_tap_done(&h->watch); + + return result; +} + +lock_handle *znode_lh(znode *node) +{ + assert("vs-1371", znode_is_write_locked(node)); + assert("vs-1372", znode_is_wlocked_once(node)); + return list_entry(node->lock.owners.next, lock_handle, owners_link); +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/plugin/item/extent.h b/fs/reiser4/plugin/item/extent.h new file mode 100644 index 000000000000..1ea2e7bdc524 --- /dev/null +++ b/fs/reiser4/plugin/item/extent.h @@ -0,0 +1,231 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#ifndef __REISER4_EXTENT_H__ +#define __REISER4_EXTENT_H__ + +/* on disk extent */ +typedef struct { + reiser4_dblock_nr start; + reiser4_dblock_nr width; +} reiser4_extent; + +struct extent_stat { + int unallocated_units; + int unallocated_blocks; + int allocated_units; + int allocated_blocks; + int hole_units; + int hole_blocks; +}; + +/* extents in an extent item can be either holes, or unallocated or allocated + extents */ +typedef enum { + HOLE_EXTENT, + UNALLOCATED_EXTENT, + ALLOCATED_EXTENT +} extent_state; + +#define HOLE_EXTENT_START 0 +#define UNALLOCATED_EXTENT_START 1 +#define UNALLOCATED_EXTENT_START2 2 + +struct extent_coord_extension { + reiser4_block_nr pos_in_unit; + reiser4_block_nr width; /* width of current unit */ + pos_in_node_t nr_units; /* number of units */ + int ext_offset; /* offset from the beginning of zdata() */ + unsigned long expected_page; +#if REISER4_DEBUG + reiser4_extent extent; +#endif +}; + +/* macros to set/get fields of on-disk extent */ +static inline reiser4_block_nr extent_get_start(const reiser4_extent * ext) +{ + return le64_to_cpu(ext->start); +} + +static inline reiser4_block_nr extent_get_width(const reiser4_extent * ext) +{ + return le64_to_cpu(ext->width); +} + +extern __u64 reiser4_current_block_count(void); + +static inline void +extent_set_start(reiser4_extent * ext, reiser4_block_nr start) +{ + cassert(sizeof(ext->start) == 8); + assert("nikita-2510", + ergo(start > 1, start < reiser4_current_block_count())); + put_unaligned(cpu_to_le64(start), &ext->start); +} + +static inline void +extent_set_width(reiser4_extent * ext, reiser4_block_nr width) +{ + cassert(sizeof(ext->width) == 8); + assert("", width > 0); + put_unaligned(cpu_to_le64(width), &ext->width); + assert("nikita-2511", + ergo(extent_get_start(ext) > 1, + extent_get_start(ext) + width <= + reiser4_current_block_count())); +} + +#define extent_item(coord) \ +({ \ + assert("nikita-3143", item_is_extent(coord)); \ + ((reiser4_extent *)item_body_by_coord (coord)); \ +}) + +#define extent_by_coord(coord) \ +({ \ + assert("nikita-3144", item_is_extent(coord)); \ + (extent_item (coord) + (coord)->unit_pos); \ +}) + +#define width_by_coord(coord) \ +({ \ + assert("nikita-3145", item_is_extent(coord)); \ + extent_get_width (extent_by_coord(coord)); \ +}) + +struct carry_cut_data; +struct carry_kill_data; + +/* plugin->u.item.b.* */ +reiser4_key *max_key_inside_extent(const coord_t *, reiser4_key *); +int can_contain_key_extent(const coord_t * coord, const reiser4_key * key, + const reiser4_item_data *); +int mergeable_extent(const coord_t * p1, const coord_t * p2); +pos_in_node_t nr_units_extent(const coord_t *); +lookup_result lookup_extent(const reiser4_key *, lookup_bias, coord_t *); +void init_coord_extent(coord_t *); +int init_extent(coord_t *, reiser4_item_data *); +int paste_extent(coord_t *, reiser4_item_data *, carry_plugin_info *); +int can_shift_extent(unsigned free_space, + coord_t * source, znode * target, shift_direction, + unsigned *size, unsigned want); +void copy_units_extent(coord_t * target, coord_t * source, unsigned from, + unsigned count, shift_direction where_is_free_space, + unsigned free_space); +int kill_hook_extent(const coord_t *, pos_in_node_t from, pos_in_node_t count, + struct carry_kill_data *); +int create_hook_extent(const coord_t * coord, void *arg); +int cut_units_extent(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + struct carry_cut_data *, reiser4_key * smallest_removed, + reiser4_key * new_first); +int kill_units_extent(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + struct carry_kill_data *, reiser4_key * smallest_removed, + reiser4_key * new_first); +reiser4_key *unit_key_extent(const coord_t *, reiser4_key *); +reiser4_key *max_unit_key_extent(const coord_t *, reiser4_key *); +void print_extent(const char *, coord_t *); +int utmost_child_extent(const coord_t * coord, sideof side, jnode ** child); +int utmost_child_real_block_extent(const coord_t * coord, sideof side, + reiser4_block_nr * block); +void item_stat_extent(const coord_t * coord, void *vp); +int reiser4_check_extent(const coord_t * coord, const char **error); + +/* plugin->u.item.s.file.* */ +ssize_t reiser4_write_extent(struct file *, struct inode * inode, + const char __user *, size_t, loff_t *); +int reiser4_read_extent(struct file *, flow_t *, hint_t *); +int reiser4_readpage_extent(void *, struct page *); +int reiser4_do_readpage_extent(reiser4_extent*, reiser4_block_nr, struct page*); +reiser4_key *append_key_extent(const coord_t *, reiser4_key *); +void init_coord_extension_extent(uf_coord_t *, loff_t offset); +int get_block_address_extent(const coord_t *, sector_t block, + sector_t * result); + +/* these are used in flush.c + FIXME-VS: should they be somewhere in item_plugin? */ +int allocate_extent_item_in_place(coord_t *, lock_handle *, flush_pos_t * pos); +int allocate_and_copy_extent(znode * left, coord_t * right, flush_pos_t * pos, + reiser4_key * stop_key); + +int extent_is_unallocated(const coord_t * item); /* True if this extent is unallocated (i.e., not a hole, not allocated). */ +__u64 extent_unit_index(const coord_t * item); /* Block offset of this unit. */ +__u64 extent_unit_width(const coord_t * item); /* Number of blocks in this unit. */ + +/* plugin->u.item.f. */ +int reiser4_scan_extent(flush_scan * scan); +extern int key_by_offset_extent(struct inode *, loff_t, reiser4_key *); + +reiser4_item_data *init_new_extent(reiser4_item_data * data, void *ext_unit, + int nr_extents); +reiser4_block_nr reiser4_extent_size(const coord_t * coord, pos_in_node_t nr); +extent_state state_of_extent(reiser4_extent * ext); +void reiser4_set_extent(reiser4_extent *, reiser4_block_nr start, + reiser4_block_nr width); +int reiser4_update_extent(struct inode *, jnode *, loff_t pos, + int *plugged_hole); + +#include "../../coord.h" +#include "../../lock.h" +#include "../../tap.h" + +struct replace_handle { + /* these are to be set before calling reiser4_replace_extent */ + coord_t *coord; + lock_handle *lh; + reiser4_key key; + reiser4_key *pkey; + reiser4_extent overwrite; + reiser4_extent new_extents[2]; + int nr_new_extents; + unsigned flags; + + /* these are used by reiser4_replace_extent */ + reiser4_item_data item; + coord_t coord_after; + lock_handle lh_after; + tap_t watch; + reiser4_key paste_key; +#if REISER4_DEBUG + reiser4_extent orig_ext; + reiser4_key tmp; +#endif +}; + +/* this structure is kmalloced before calling make_extent to avoid excessive + stack consumption on plug_hole->reiser4_replace_extent */ +struct make_extent_handle { + uf_coord_t *uf_coord; + reiser4_block_nr blocknr; + int created; + struct inode *inode; + union { + struct { + } append; + struct replace_handle replace; + } u; +}; + +int reiser4_replace_extent(struct replace_handle *, + int return_inserted_position); +lock_handle *znode_lh(znode *); + +/* the reiser4 repacker support */ +struct repacker_cursor; +extern int process_extent_backward_for_repacking(tap_t *, + struct repacker_cursor *); +extern int mark_extent_for_repacking(tap_t *, int); + +#define coord_by_uf_coord(uf_coord) (&((uf_coord)->coord)) +#define ext_coord_by_uf_coord(uf_coord) (&((uf_coord)->extension.extent)) + +/* __REISER4_EXTENT_H__ */ +#endif +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/extent_file_ops.c b/fs/reiser4/plugin/item/extent_file_ops.c new file mode 100644 index 000000000000..ef82745a68c9 --- /dev/null +++ b/fs/reiser4/plugin/item/extent_file_ops.c @@ -0,0 +1,1434 @@ +/* COPYRIGHT 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#include "item.h" +#include "../../inode.h" +#include "../../page_cache.h" +#include "../object.h" + +#include <linux/swap.h> + +static inline reiser4_extent *ext_by_offset(const znode *node, int offset) +{ + reiser4_extent *ext; + + ext = (reiser4_extent *) (zdata(node) + offset); + return ext; +} + +/** + * check_uf_coord - verify coord extension + * @uf_coord: + * @key: + * + * Makes sure that all fields of @uf_coord are set properly. If @key is + * specified - check whether @uf_coord is set correspondingly. + */ +static void check_uf_coord(const uf_coord_t *uf_coord, const reiser4_key *key) +{ +#if REISER4_DEBUG + const coord_t *coord; + const struct extent_coord_extension *ext_coord; + reiser4_extent *ext; + + coord = &uf_coord->coord; + ext_coord = &uf_coord->extension.extent; + ext = ext_by_offset(coord->node, uf_coord->extension.extent.ext_offset); + + assert("", + WITH_DATA(coord->node, + (uf_coord->valid == 1 && + coord_is_iplug_set(coord) && + item_is_extent(coord) && + ext_coord->nr_units == nr_units_extent(coord) && + ext == extent_by_coord(coord) && + ext_coord->width == extent_get_width(ext) && + coord->unit_pos < ext_coord->nr_units && + ext_coord->pos_in_unit < ext_coord->width && + memcmp(ext, &ext_coord->extent, + sizeof(reiser4_extent)) == 0))); + if (key) { + reiser4_key coord_key; + + unit_key_by_coord(&uf_coord->coord, &coord_key); + set_key_offset(&coord_key, + get_key_offset(&coord_key) + + (uf_coord->extension.extent. + pos_in_unit << PAGE_SHIFT)); + assert("", keyeq(key, &coord_key)); + } +#endif +} + +static inline reiser4_extent *ext_by_ext_coord(const uf_coord_t *uf_coord) +{ + return ext_by_offset(uf_coord->coord.node, + uf_coord->extension.extent.ext_offset); +} + +#if REISER4_DEBUG + +/** + * offset_is_in_unit + * + * + * + */ +/* return 1 if offset @off is inside of extent unit pointed to by @coord. Set + pos_in_unit inside of unit correspondingly */ +static int offset_is_in_unit(const coord_t *coord, loff_t off) +{ + reiser4_key unit_key; + __u64 unit_off; + reiser4_extent *ext; + + ext = extent_by_coord(coord); + + unit_key_extent(coord, &unit_key); + unit_off = get_key_offset(&unit_key); + if (off < unit_off) + return 0; + if (off >= (unit_off + (current_blocksize * extent_get_width(ext)))) + return 0; + return 1; +} + +static int +coord_matches_key_extent(const coord_t * coord, const reiser4_key * key) +{ + reiser4_key item_key; + + assert("vs-771", coord_is_existing_unit(coord)); + assert("vs-1258", keylt(key, append_key_extent(coord, &item_key))); + assert("vs-1259", keyge(key, item_key_by_coord(coord, &item_key))); + + return offset_is_in_unit(coord, get_key_offset(key)); +} + +#endif + +/** + * can_append - + * @key: + * @coord: + * + * Returns 1 if @key is equal to an append key of item @coord is set to + */ +static int can_append(const reiser4_key *key, const coord_t *coord) +{ + reiser4_key append_key; + + return keyeq(key, append_key_extent(coord, &append_key)); +} + +/** + * append_hole + * @coord: + * @lh: + * @key: + * + */ +static int append_hole(coord_t *coord, lock_handle *lh, + const reiser4_key *key) +{ + reiser4_key append_key; + reiser4_block_nr hole_width; + reiser4_extent *ext, new_ext; + reiser4_item_data idata; + + /* last item of file may have to be appended with hole */ + assert("vs-708", znode_get_level(coord->node) == TWIG_LEVEL); + assert("vs-714", item_id_by_coord(coord) == EXTENT_POINTER_ID); + + /* key of first byte which is not addressed by this extent */ + append_key_extent(coord, &append_key); + + assert("", keyle(&append_key, key)); + + /* + * extent item has to be appended with hole. Calculate length of that + * hole + */ + hole_width = ((get_key_offset(key) - get_key_offset(&append_key) + + current_blocksize - 1) >> current_blocksize_bits); + assert("vs-954", hole_width > 0); + + /* set coord after last unit */ + coord_init_after_item_end(coord); + + /* get last extent in the item */ + ext = extent_by_coord(coord); + if (state_of_extent(ext) == HOLE_EXTENT) { + /* + * last extent of a file is hole extent. Widen that extent by + * @hole_width blocks. Note that we do not worry about + * overflowing - extent width is 64 bits + */ + reiser4_set_extent(ext, HOLE_EXTENT_START, + extent_get_width(ext) + hole_width); + znode_make_dirty(coord->node); + return 0; + } + + /* append last item of the file with hole extent unit */ + assert("vs-713", (state_of_extent(ext) == ALLOCATED_EXTENT || + state_of_extent(ext) == UNALLOCATED_EXTENT)); + + reiser4_set_extent(&new_ext, HOLE_EXTENT_START, hole_width); + init_new_extent(&idata, &new_ext, 1); + return insert_into_item(coord, lh, &append_key, &idata, 0); +} + +/** + * check_jnodes + * @twig: longterm locked twig node + * @key: + * + */ +static void check_jnodes(znode *twig, const reiser4_key *key, int count) +{ +#if REISER4_DEBUG + coord_t c; + reiser4_key node_key, jnode_key; + + jnode_key = *key; + + assert("", twig != NULL); + assert("", znode_get_level(twig) == TWIG_LEVEL); + assert("", znode_is_write_locked(twig)); + + zload(twig); + /* get the smallest key in twig node */ + coord_init_first_unit(&c, twig); + unit_key_by_coord(&c, &node_key); + assert("", keyle(&node_key, &jnode_key)); + + coord_init_last_unit(&c, twig); + unit_key_by_coord(&c, &node_key); + if (item_plugin_by_coord(&c)->s.file.append_key) + item_plugin_by_coord(&c)->s.file.append_key(&c, &node_key); + set_key_offset(&jnode_key, + get_key_offset(&jnode_key) + (loff_t)count * PAGE_SIZE - 1); + assert("", keylt(&jnode_key, &node_key)); + zrelse(twig); +#endif +} + +/** + * append_last_extent - append last file item + * @uf_coord: coord to start insertion from + * @jnodes: array of jnodes + * @count: number of jnodes in the array + * + * There is already at least one extent item of file @inode in the tree. Append + * the last of them with unallocated extent unit of width @count. Assign + * fake block numbers to jnodes corresponding to the inserted extent. + */ +static int append_last_extent(uf_coord_t *uf_coord, const reiser4_key *key, + jnode **jnodes, int count) +{ + int result; + reiser4_extent new_ext; + reiser4_item_data idata; + coord_t *coord; + struct extent_coord_extension *ext_coord; + reiser4_extent *ext; + reiser4_block_nr block; + jnode *node; + int i; + + coord = &uf_coord->coord; + ext_coord = &uf_coord->extension.extent; + ext = ext_by_ext_coord(uf_coord); + + /* check correctness of position in the item */ + assert("vs-228", coord->unit_pos == coord_last_unit_pos(coord)); + assert("vs-1311", coord->between == AFTER_UNIT); + assert("vs-1302", ext_coord->pos_in_unit == ext_coord->width - 1); + + if (!can_append(key, coord)) { + /* hole extent has to be inserted */ + result = append_hole(coord, uf_coord->lh, key); + uf_coord->valid = 0; + return result; + } + + if (count == 0) + return 0; + + assert("", get_key_offset(key) == (loff_t)index_jnode(jnodes[0]) * PAGE_SIZE); + + inode_add_blocks(mapping_jnode(jnodes[0])->host, count); + + switch (state_of_extent(ext)) { + case UNALLOCATED_EXTENT: + /* + * last extent unit of the file is unallocated one. Increase + * its width by @count + */ + reiser4_set_extent(ext, UNALLOCATED_EXTENT_START, + extent_get_width(ext) + count); + znode_make_dirty(coord->node); + + /* update coord extension */ + ext_coord->width += count; + ON_DEBUG(extent_set_width + (&uf_coord->extension.extent.extent, + ext_coord->width)); + break; + + case HOLE_EXTENT: + case ALLOCATED_EXTENT: + /* + * last extent unit of the file is either hole or allocated + * one. Append one unallocated extent of width @count + */ + reiser4_set_extent(&new_ext, UNALLOCATED_EXTENT_START, count); + init_new_extent(&idata, &new_ext, 1); + result = insert_into_item(coord, uf_coord->lh, key, &idata, 0); + uf_coord->valid = 0; + if (result) + return result; + break; + + default: + return RETERR(-EIO); + } + + /* + * make sure that we hold long term locked twig node containing all + * jnodes we are about to capture + */ + check_jnodes(uf_coord->lh->node, key, count); + + /* + * assign fake block numbers to all jnodes. FIXME: make sure whether + * twig node containing inserted extent item is locked + */ + block = fake_blocknr_unformatted(count); + for (i = 0; i < count; i ++, block ++) { + node = jnodes[i]; + spin_lock_jnode(node); + JF_SET(node, JNODE_CREATED); + jnode_set_block(node, &block); + result = reiser4_try_capture(node, ZNODE_WRITE_LOCK, 0); + BUG_ON(result != 0); + jnode_make_dirty_locked(node); + spin_unlock_jnode(node); + } + return count; +} + +/** + * insert_first_hole - inser hole extent into tree + * @coord: + * @lh: + * @key: + * + * + */ +static int insert_first_hole(coord_t *coord, lock_handle *lh, + const reiser4_key *key) +{ + reiser4_extent new_ext; + reiser4_item_data idata; + reiser4_key item_key; + reiser4_block_nr hole_width; + + /* @coord must be set for inserting of new item */ + assert("vs-711", coord_is_between_items(coord)); + + item_key = *key; + set_key_offset(&item_key, 0ull); + + hole_width = ((get_key_offset(key) + current_blocksize - 1) >> + current_blocksize_bits); + assert("vs-710", hole_width > 0); + + /* compose body of hole extent and insert item into tree */ + reiser4_set_extent(&new_ext, HOLE_EXTENT_START, hole_width); + init_new_extent(&idata, &new_ext, 1); + return insert_extent_by_coord(coord, &idata, &item_key, lh); +} + + +/** + * insert_first_extent - insert first file item + * @inode: inode of file + * @uf_coord: coord to start insertion from + * @jnodes: array of jnodes + * @count: number of jnodes in the array + * @inode: + * + * There are no items of file @inode in the tree yet. Insert unallocated extent + * of width @count into tree or hole extent if writing not to the + * beginning. Assign fake block numbers to jnodes corresponding to the inserted + * unallocated extent. Returns number of jnodes or error code. + */ +static int insert_first_extent(uf_coord_t *uf_coord, const reiser4_key *key, + jnode **jnodes, int count, + struct inode *inode) +{ + int result; + int i; + reiser4_extent new_ext; + reiser4_item_data idata; + reiser4_block_nr block; + struct unix_file_info *uf_info; + jnode *node; + + /* first extent insertion starts at leaf level */ + assert("vs-719", znode_get_level(uf_coord->coord.node) == LEAF_LEVEL); + assert("vs-711", coord_is_between_items(&uf_coord->coord)); + + if (get_key_offset(key) != 0) { + result = insert_first_hole(&uf_coord->coord, uf_coord->lh, key); + uf_coord->valid = 0; + uf_info = unix_file_inode_data(inode); + + /* + * first item insertion is only possible when writing to empty + * file or performing tail conversion + */ + assert("", (uf_info->container == UF_CONTAINER_EMPTY || + (reiser4_inode_get_flag(inode, + REISER4_PART_MIXED) && + reiser4_inode_get_flag(inode, + REISER4_PART_IN_CONV)))); + /* if file was empty - update its state */ + if (result == 0 && uf_info->container == UF_CONTAINER_EMPTY) + uf_info->container = UF_CONTAINER_EXTENTS; + return result; + } + + if (count == 0) + return 0; + + inode_add_blocks(mapping_jnode(jnodes[0])->host, count); + + /* + * prepare for tree modification: compose body of item and item data + * structure needed for insertion + */ + reiser4_set_extent(&new_ext, UNALLOCATED_EXTENT_START, count); + init_new_extent(&idata, &new_ext, 1); + + /* insert extent item into the tree */ + result = insert_extent_by_coord(&uf_coord->coord, &idata, key, + uf_coord->lh); + if (result) + return result; + + /* + * make sure that we hold long term locked twig node containing all + * jnodes we are about to capture + */ + check_jnodes(uf_coord->lh->node, key, count); + /* + * assign fake block numbers to all jnodes, capture and mark them dirty + */ + block = fake_blocknr_unformatted(count); + for (i = 0; i < count; i ++, block ++) { + node = jnodes[i]; + spin_lock_jnode(node); + JF_SET(node, JNODE_CREATED); + jnode_set_block(node, &block); + result = reiser4_try_capture(node, ZNODE_WRITE_LOCK, 0); + BUG_ON(result != 0); + jnode_make_dirty_locked(node); + spin_unlock_jnode(node); + } + + /* + * invalidate coordinate, research must be performed to continue + * because write will continue on twig level + */ + uf_coord->valid = 0; + return count; +} + +/** + * plug_hole - replace hole extent with unallocated and holes + * @uf_coord: + * @key: + * @node: + * @h: structure containing coordinate, lock handle, key, etc + * + * Creates an unallocated extent of width 1 within a hole. In worst case two + * additional extents can be created. + */ +static int plug_hole(uf_coord_t *uf_coord, const reiser4_key *key, int *how) +{ + struct replace_handle rh; + reiser4_extent *ext; + reiser4_block_nr width, pos_in_unit; + coord_t *coord; + struct extent_coord_extension *ext_coord; + int return_inserted_position; + + check_uf_coord(uf_coord, key); + + rh.coord = coord_by_uf_coord(uf_coord); + rh.lh = uf_coord->lh; + rh.flags = 0; + + coord = coord_by_uf_coord(uf_coord); + ext_coord = ext_coord_by_uf_coord(uf_coord); + ext = ext_by_ext_coord(uf_coord); + + width = ext_coord->width; + pos_in_unit = ext_coord->pos_in_unit; + + *how = 0; + if (width == 1) { + reiser4_set_extent(ext, UNALLOCATED_EXTENT_START, 1); + znode_make_dirty(coord->node); + /* update uf_coord */ + ON_DEBUG(ext_coord->extent = *ext); + *how = 1; + return 0; + } else if (pos_in_unit == 0) { + /* we deal with first element of extent */ + if (coord->unit_pos) { + /* there is an extent to the left */ + if (state_of_extent(ext - 1) == UNALLOCATED_EXTENT) { + /* + * left neighboring unit is an unallocated + * extent. Increase its width and decrease + * width of hole + */ + extent_set_width(ext - 1, + extent_get_width(ext - 1) + 1); + extent_set_width(ext, width - 1); + znode_make_dirty(coord->node); + + /* update coord extension */ + coord->unit_pos--; + ext_coord->width = extent_get_width(ext - 1); + ext_coord->pos_in_unit = ext_coord->width - 1; + ext_coord->ext_offset -= sizeof(reiser4_extent); + ON_DEBUG(ext_coord->extent = + *extent_by_coord(coord)); + *how = 2; + return 0; + } + } + /* extent for replace */ + reiser4_set_extent(&rh.overwrite, UNALLOCATED_EXTENT_START, 1); + /* extent to be inserted */ + reiser4_set_extent(&rh.new_extents[0], HOLE_EXTENT_START, + width - 1); + rh.nr_new_extents = 1; + + /* have reiser4_replace_extent to return with @coord and + @uf_coord->lh set to unit which was replaced */ + return_inserted_position = 0; + *how = 3; + } else if (pos_in_unit == width - 1) { + /* we deal with last element of extent */ + if (coord->unit_pos < nr_units_extent(coord) - 1) { + /* there is an extent unit to the right */ + if (state_of_extent(ext + 1) == UNALLOCATED_EXTENT) { + /* + * right neighboring unit is an unallocated + * extent. Increase its width and decrease + * width of hole + */ + extent_set_width(ext + 1, + extent_get_width(ext + 1) + 1); + extent_set_width(ext, width - 1); + znode_make_dirty(coord->node); + + /* update coord extension */ + coord->unit_pos++; + ext_coord->width = extent_get_width(ext + 1); + ext_coord->pos_in_unit = 0; + ext_coord->ext_offset += sizeof(reiser4_extent); + ON_DEBUG(ext_coord->extent = + *extent_by_coord(coord)); + *how = 4; + return 0; + } + } + /* extent for replace */ + reiser4_set_extent(&rh.overwrite, HOLE_EXTENT_START, width - 1); + /* extent to be inserted */ + reiser4_set_extent(&rh.new_extents[0], UNALLOCATED_EXTENT_START, + 1); + rh.nr_new_extents = 1; + + /* have reiser4_replace_extent to return with @coord and + @uf_coord->lh set to unit which was inserted */ + return_inserted_position = 1; + *how = 5; + } else { + /* extent for replace */ + reiser4_set_extent(&rh.overwrite, HOLE_EXTENT_START, + pos_in_unit); + /* extents to be inserted */ + reiser4_set_extent(&rh.new_extents[0], UNALLOCATED_EXTENT_START, + 1); + reiser4_set_extent(&rh.new_extents[1], HOLE_EXTENT_START, + width - pos_in_unit - 1); + rh.nr_new_extents = 2; + + /* have reiser4_replace_extent to return with @coord and + @uf_coord->lh set to first of units which were inserted */ + return_inserted_position = 1; + *how = 6; + } + unit_key_by_coord(coord, &rh.paste_key); + set_key_offset(&rh.paste_key, get_key_offset(&rh.paste_key) + + extent_get_width(&rh.overwrite) * current_blocksize); + + uf_coord->valid = 0; + return reiser4_replace_extent(&rh, return_inserted_position); +} + +/** + * overwrite_one_block - + * @uf_coord: + * @key: + * @node: + * + * If @node corresponds to hole extent - create unallocated extent for it and + * assign fake block number. If @node corresponds to allocated extent - assign + * block number of jnode + */ +static int overwrite_one_block(uf_coord_t *uf_coord, const reiser4_key *key, + jnode *node, int *hole_plugged) +{ + int result; + struct extent_coord_extension *ext_coord; + reiser4_extent *ext; + reiser4_block_nr block; + int how; + + assert("vs-1312", uf_coord->coord.between == AT_UNIT); + + result = 0; + ext_coord = ext_coord_by_uf_coord(uf_coord); + check_uf_coord(uf_coord, NULL); + ext = ext_by_ext_coord(uf_coord); + assert("", state_of_extent(ext) != UNALLOCATED_EXTENT); + + switch (state_of_extent(ext)) { + case ALLOCATED_EXTENT: + block = extent_get_start(ext) + ext_coord->pos_in_unit; + break; + + case HOLE_EXTENT: + inode_add_blocks(mapping_jnode(node)->host, 1); + result = plug_hole(uf_coord, key, &how); + if (result) + return result; + block = fake_blocknr_unformatted(1); + if (hole_plugged) + *hole_plugged = 1; + JF_SET(node, JNODE_CREATED); + break; + + default: + return RETERR(-EIO); + } + + jnode_set_block(node, &block); + return 0; +} + +/** + * move_coord - move coordinate forward + * @uf_coord: + * + * Move coordinate one data block pointer forward. Return 1 if coord is set to + * the last one already or is invalid. + */ +static int move_coord(uf_coord_t *uf_coord) +{ + struct extent_coord_extension *ext_coord; + + if (uf_coord->valid == 0) + return 1; + ext_coord = &uf_coord->extension.extent; + ext_coord->pos_in_unit ++; + if (ext_coord->pos_in_unit < ext_coord->width) + /* coordinate moved within the unit */ + return 0; + + /* end of unit is reached. Try to move to next unit */ + ext_coord->pos_in_unit = 0; + uf_coord->coord.unit_pos ++; + if (uf_coord->coord.unit_pos < ext_coord->nr_units) { + /* coordinate moved to next unit */ + ext_coord->ext_offset += sizeof(reiser4_extent); + ext_coord->width = + extent_get_width(ext_by_offset + (uf_coord->coord.node, + ext_coord->ext_offset)); + ON_DEBUG(ext_coord->extent = + *ext_by_offset(uf_coord->coord.node, + ext_coord->ext_offset)); + return 0; + } + /* end of item is reached */ + uf_coord->valid = 0; + return 1; +} + +/** + * overwrite_extent - + * @inode: + * + * Returns number of handled jnodes. + */ +static int overwrite_extent(uf_coord_t *uf_coord, const reiser4_key *key, + jnode **jnodes, int count, int *plugged_hole) +{ + int result; + reiser4_key k; + int i; + jnode *node; + + k = *key; + for (i = 0; i < count; i ++) { + node = jnodes[i]; + if (*jnode_get_block(node) == 0) { + result = overwrite_one_block(uf_coord, &k, node, plugged_hole); + if (result) + return result; + } + /* + * make sure that we hold long term locked twig node containing + * all jnodes we are about to capture + */ + check_jnodes(uf_coord->lh->node, &k, 1); + /* + * assign fake block numbers to all jnodes, capture and mark + * them dirty + */ + spin_lock_jnode(node); + result = reiser4_try_capture(node, ZNODE_WRITE_LOCK, 0); + BUG_ON(result != 0); + jnode_make_dirty_locked(node); + spin_unlock_jnode(node); + + if (uf_coord->valid == 0) + return i + 1; + + check_uf_coord(uf_coord, &k); + + if (move_coord(uf_coord)) { + /* + * failed to move to the next node pointer. Either end + * of file or end of twig node is reached. In the later + * case we might go to the right neighbor. + */ + uf_coord->valid = 0; + return i + 1; + } + set_key_offset(&k, get_key_offset(&k) + PAGE_SIZE); + } + + return count; +} + +/** + * reiser4_update_extent + * @file: + * @jnodes: + * @count: + * @off: + * + */ +int reiser4_update_extent(struct inode *inode, jnode *node, loff_t pos, + int *plugged_hole) +{ + int result; + znode *loaded; + uf_coord_t uf_coord; + coord_t *coord; + lock_handle lh; + reiser4_key key; + + assert("", reiser4_lock_counters()->d_refs == 0); + + key_by_inode_and_offset_common(inode, pos, &key); + + init_uf_coord(&uf_coord, &lh); + coord = &uf_coord.coord; + result = find_file_item_nohint(coord, &lh, &key, + ZNODE_WRITE_LOCK, inode); + if (IS_CBKERR(result)) { + assert("", reiser4_lock_counters()->d_refs == 0); + return result; + } + + result = zload(coord->node); + BUG_ON(result != 0); + loaded = coord->node; + + if (coord->between == AFTER_UNIT) { + /* + * append existing extent item with unallocated extent of width + * nr_jnodes + */ + init_coord_extension_extent(&uf_coord, + get_key_offset(&key)); + result = append_last_extent(&uf_coord, &key, + &node, 1); + } else if (coord->between == AT_UNIT) { + /* + * overwrite + * not optimal yet. Will be optimized if new write will show + * performance win. + */ + init_coord_extension_extent(&uf_coord, + get_key_offset(&key)); + result = overwrite_extent(&uf_coord, &key, + &node, 1, plugged_hole); + } else { + /* + * there are no items of this file in the tree yet. Create + * first item of the file inserting one unallocated extent of + * width nr_jnodes + */ + result = insert_first_extent(&uf_coord, &key, &node, 1, inode); + } + assert("", result == 1 || result < 0); + zrelse(loaded); + done_lh(&lh); + assert("", reiser4_lock_counters()->d_refs == 0); + return (result == 1) ? 0 : result; +} + +/** + * update_extents + * @file: + * @jnodes: + * @count: + * @off: + * + */ +static int update_extents(struct file *file, struct inode *inode, + jnode **jnodes, int count, loff_t pos) +{ + struct hint hint; + reiser4_key key; + int result; + znode *loaded; + + result = load_file_hint(file, &hint); + BUG_ON(result != 0); + + if (count != 0) + /* + * count == 0 is special case: expanding truncate + */ + pos = (loff_t)index_jnode(jnodes[0]) << PAGE_SHIFT; + key_by_inode_and_offset_common(inode, pos, &key); + + assert("", reiser4_lock_counters()->d_refs == 0); + + do { + result = find_file_item(&hint, &key, ZNODE_WRITE_LOCK, inode); + if (IS_CBKERR(result)) { + assert("", reiser4_lock_counters()->d_refs == 0); + return result; + } + + result = zload(hint.ext_coord.coord.node); + BUG_ON(result != 0); + loaded = hint.ext_coord.coord.node; + + if (hint.ext_coord.coord.between == AFTER_UNIT) { + /* + * append existing extent item with unallocated extent + * of width nr_jnodes + */ + if (hint.ext_coord.valid == 0) + /* NOTE: get statistics on this */ + init_coord_extension_extent(&hint.ext_coord, + get_key_offset(&key)); + result = append_last_extent(&hint.ext_coord, &key, + jnodes, count); + } else if (hint.ext_coord.coord.between == AT_UNIT) { + /* + * overwrite + * not optimal yet. Will be optimized if new write will + * show performance win. + */ + if (hint.ext_coord.valid == 0) + /* NOTE: get statistics on this */ + init_coord_extension_extent(&hint.ext_coord, + get_key_offset(&key)); + result = overwrite_extent(&hint.ext_coord, &key, + jnodes, count, NULL); + } else { + /* + * there are no items of this file in the tree + * yet. Create first item of the file inserting one + * unallocated extent of * width nr_jnodes + */ + result = insert_first_extent(&hint.ext_coord, &key, + jnodes, count, inode); + } + zrelse(loaded); + if (result < 0) { + done_lh(hint.ext_coord.lh); + break; + } + + jnodes += result; + count -= result; + set_key_offset(&key, get_key_offset(&key) + result * PAGE_SIZE); + + /* seal and unlock znode */ + if (hint.ext_coord.valid) + reiser4_set_hint(&hint, &key, ZNODE_WRITE_LOCK); + else + reiser4_unset_hint(&hint); + + } while (count > 0); + + save_file_hint(file, &hint); + assert("", reiser4_lock_counters()->d_refs == 0); + return result; +} + +/** + * write_extent_reserve_space - reserve space for extent write operation + * @inode: + * + * Estimates and reserves space which may be required for writing + * WRITE_GRANULARITY pages of file. + */ +static int write_extent_reserve_space(struct inode *inode) +{ + __u64 count; + reiser4_tree *tree; + + /* + * to write WRITE_GRANULARITY pages to a file by extents we have to + * reserve disk space for: + + * 1. find_file_item may have to insert empty node to the tree (empty + * leaf node between two extent items). This requires 1 block and + * number of blocks which are necessary to perform insertion of an + * internal item into twig level. + + * 2. for each of written pages there might be needed 1 block and + * number of blocks which might be necessary to perform insertion of or + * paste to an extent item. + + * 3. stat data update + */ + tree = reiser4_tree_by_inode(inode); + count = estimate_one_insert_item(tree) + + WRITE_GRANULARITY * (1 + estimate_one_insert_into_item(tree)) + + estimate_one_insert_item(tree); + grab_space_enable(); + return reiser4_grab_space(count, 0 /* flags */); +} + +/* + * filemap_copy_from_user no longer exists in generic code, because it + * is deadlocky (copying from user while holding the page lock is bad). + * As a temporary fix for reiser4, just define it here. + */ +static inline size_t +filemap_copy_from_user(struct page *page, unsigned long offset, + const char __user *buf, unsigned bytes) +{ + char *kaddr; + int left; + + kaddr = kmap_atomic(page); + left = __copy_from_user_inatomic(kaddr + offset, buf, bytes); + kunmap_atomic(kaddr); + + if (left != 0) { + /* Do it the slow way */ + kaddr = kmap(page); + left = __copy_from_user(kaddr + offset, buf, bytes); + kunmap(page); + } + return bytes - left; +} + +/** + * reiser4_write_extent - write method of extent item plugin + * @file: file to write to + * @buf: address of user-space buffer + * @count: number of bytes to write + * @pos: position in file to write to + * + */ +ssize_t reiser4_write_extent(struct file *file, struct inode * inode, + const char __user *buf, size_t count, loff_t *pos) +{ + int have_to_update_extent; + int nr_pages, nr_dirty; + struct page *page; + jnode *jnodes[WRITE_GRANULARITY + 1]; + unsigned long index; + unsigned long end; + int i; + int to_page, page_off; + size_t left, written; + int result = 0; + + if (write_extent_reserve_space(inode)) + return RETERR(-ENOSPC); + + if (count == 0) { + /* truncate case */ + update_extents(file, inode, jnodes, 0, *pos); + return 0; + } + + BUG_ON(get_current_context()->trans->atom != NULL); + + left = count; + index = *pos >> PAGE_SHIFT; + /* calculate number of pages which are to be written */ + end = ((*pos + count - 1) >> PAGE_SHIFT); + nr_pages = end - index + 1; + nr_dirty = 0; + assert("", nr_pages <= WRITE_GRANULARITY + 1); + + /* get pages and jnodes */ + for (i = 0; i < nr_pages; i ++) { + page = find_or_create_page(inode->i_mapping, index + i, + reiser4_ctx_gfp_mask_get()); + if (page == NULL) { + nr_pages = i; + result = RETERR(-ENOMEM); + goto out; + } + + jnodes[i] = jnode_of_page(page); + if (IS_ERR(jnodes[i])) { + unlock_page(page); + put_page(page); + nr_pages = i; + result = RETERR(-ENOMEM); + goto out; + } + /* prevent jnode and page from disconnecting */ + JF_SET(jnodes[i], JNODE_WRITE_PREPARED); + unlock_page(page); + } + + BUG_ON(get_current_context()->trans->atom != NULL); + + have_to_update_extent = 0; + + page_off = (*pos & (PAGE_SIZE - 1)); + for (i = 0; i < nr_pages; i ++) { + to_page = PAGE_SIZE - page_off; + if (to_page > left) + to_page = left; + page = jnode_page(jnodes[i]); + if (page_offset(page) < inode->i_size && + !PageUptodate(page) && to_page != PAGE_SIZE) { + /* + * the above is not optimal for partial write to last + * page of file when file size is not at boundary of + * page + */ + lock_page(page); + if (!PageUptodate(page)) { + result = readpage_unix_file(NULL, page); + BUG_ON(result != 0); + /* wait for read completion */ + lock_page(page); + BUG_ON(!PageUptodate(page)); + } else + result = 0; + unlock_page(page); + } + + BUG_ON(get_current_context()->trans->atom != NULL); + fault_in_pages_readable(buf, to_page); + BUG_ON(get_current_context()->trans->atom != NULL); + + lock_page(page); + if (!PageUptodate(page) && to_page != PAGE_SIZE) + zero_user_segments(page, 0, page_off, + page_off + to_page, + PAGE_SIZE); + + written = filemap_copy_from_user(page, page_off, buf, to_page); + if (unlikely(written != to_page)) { + unlock_page(page); + result = RETERR(-EFAULT); + break; + } + + flush_dcache_page(page); + set_page_dirty_notag(page); + unlock_page(page); + nr_dirty++; + + mark_page_accessed(page); + SetPageUptodate(page); + + if (jnodes[i]->blocknr == 0) + have_to_update_extent ++; + + page_off = 0; + buf += to_page; + left -= to_page; + BUG_ON(get_current_context()->trans->atom != NULL); + } + + if (have_to_update_extent) { + update_extents(file, inode, jnodes, nr_dirty, *pos); + } else { + for (i = 0; i < nr_dirty; i ++) { + int ret; + spin_lock_jnode(jnodes[i]); + ret = reiser4_try_capture(jnodes[i], + ZNODE_WRITE_LOCK, 0); + BUG_ON(ret != 0); + jnode_make_dirty_locked(jnodes[i]); + spin_unlock_jnode(jnodes[i]); + } + } +out: + for (i = 0; i < nr_pages; i ++) { + put_page(jnode_page(jnodes[i])); + JF_CLR(jnodes[i], JNODE_WRITE_PREPARED); + jput(jnodes[i]); + } + + /* the only errors handled so far is ENOMEM and + EFAULT on copy_from_user */ + + return (count - left) ? (count - left) : result; +} + +int reiser4_do_readpage_extent(reiser4_extent * ext, reiser4_block_nr pos, + struct page *page) +{ + jnode *j; + struct address_space *mapping; + unsigned long index; + oid_t oid; + reiser4_block_nr block; + + mapping = page->mapping; + oid = get_inode_oid(mapping->host); + index = page->index; + + switch (state_of_extent(ext)) { + case HOLE_EXTENT: + /* + * it is possible to have hole page with jnode, if page was + * eflushed previously. + */ + j = jfind(mapping, index); + if (j == NULL) { + zero_user(page, 0, PAGE_SIZE); + SetPageUptodate(page); + unlock_page(page); + return 0; + } + spin_lock_jnode(j); + if (!jnode_page(j)) { + jnode_attach_page(j, page); + } else { + BUG_ON(jnode_page(j) != page); + assert("vs-1504", jnode_page(j) == page); + } + block = *jnode_get_io_block(j); + spin_unlock_jnode(j); + if (block == 0) { + zero_user(page, 0, PAGE_SIZE); + SetPageUptodate(page); + unlock_page(page); + jput(j); + return 0; + } + break; + + case ALLOCATED_EXTENT: + j = jnode_of_page(page); + if (IS_ERR(j)) + return PTR_ERR(j); + if (*jnode_get_block(j) == 0) { + reiser4_block_nr blocknr; + + blocknr = extent_get_start(ext) + pos; + jnode_set_block(j, &blocknr); + } else + assert("vs-1403", + j->blocknr == extent_get_start(ext) + pos); + break; + + case UNALLOCATED_EXTENT: + j = jfind(mapping, index); + assert("nikita-2688", j); + assert("vs-1426", jnode_page(j) == NULL); + + spin_lock_jnode(j); + jnode_attach_page(j, page); + spin_unlock_jnode(j); + break; + + default: + warning("vs-957", "wrong extent\n"); + return RETERR(-EIO); + } + + BUG_ON(j == 0); + reiser4_page_io(page, j, READ, reiser4_ctx_gfp_mask_get()); + jput(j); + return 0; +} + +/* Implements plugin->u.item.s.file.read operation for extent items. */ +int reiser4_read_extent(struct file *file, flow_t *flow, hint_t *hint) +{ + int result; + struct page *page; + unsigned long page_idx; + unsigned long page_off; /* offset within the page to start read from */ + unsigned long page_cnt; /* bytes which can be read from the page which + contains file_off */ + struct address_space *mapping; + loff_t file_off; /* offset in a file to start read from */ + uf_coord_t *uf_coord; + coord_t *coord; + struct extent_coord_extension *ext_coord; + char *kaddr; + + assert("vs-1353", current_blocksize == PAGE_SIZE); + assert("vs-572", flow->user == 1); + assert("vs-1351", flow->length > 0); + + uf_coord = &hint->ext_coord; + + check_uf_coord(uf_coord, NULL); + assert("vs-33", uf_coord->lh == &hint->lh); + + coord = &uf_coord->coord; + assert("vs-1119", znode_is_rlocked(coord->node)); + assert("vs-1120", znode_is_loaded(coord->node)); + assert("vs-1256", coord_matches_key_extent(coord, &flow->key)); + + mapping = file_inode(file)->i_mapping; + ext_coord = &uf_coord->extension.extent; + + file_off = get_key_offset(&flow->key); + page_off = (unsigned long)(file_off & (PAGE_SIZE - 1)); + page_cnt = PAGE_SIZE - page_off; + + page_idx = (unsigned long)(file_off >> PAGE_SHIFT); + + /* we start having twig node read locked. However, we do not want to + keep that lock all the time readahead works. So, set a seal and + release twig node. */ + reiser4_set_hint(hint, &flow->key, ZNODE_READ_LOCK); + /* &hint->lh is done-ed */ + + do { + reiser4_txn_restart_current(); + page = read_mapping_page(mapping, page_idx, file); + if (IS_ERR(page)) + return PTR_ERR(page); + lock_page(page); + if (!PageUptodate(page)) { + unlock_page(page); + put_page(page); + warning("jmacd-97178", + "extent_read: page is not up to date"); + return RETERR(-EIO); + } + mark_page_accessed(page); + unlock_page(page); + + /* If users can be writing to this page using arbitrary virtual + addresses, take care about potential aliasing before reading + the page on the kernel side. + */ + if (mapping_writably_mapped(mapping)) + flush_dcache_page(page); + + assert("nikita-3034", reiser4_schedulable()); + + /* number of bytes which are to be read from the page */ + if (page_cnt > flow->length) + page_cnt = flow->length; + + result = fault_in_pages_writeable(flow->data, page_cnt); + if (result) { + put_page(page); + return RETERR(-EFAULT); + } + + kaddr = kmap_atomic(page); + result = __copy_to_user_inatomic(flow->data, + kaddr + page_off, page_cnt); + kunmap_atomic(kaddr); + if (result != 0) { + kaddr = kmap(page); + result = __copy_to_user(flow->data, + kaddr + page_off, page_cnt); + kunmap(page); + if (unlikely(result)) + return RETERR(-EFAULT); + } + put_page(page); + + /* increase (flow->key) offset, + * update (flow->data) user area pointer + */ + move_flow_forward(flow, page_cnt); + + page_off = 0; + page_idx++; + + } while (flow->length); + return 0; +} + +/* + * plugin->s.file.readpage + * + * reiser4_read->unix_file_read->page_cache_readahead-> + * ->reiser4_readpage_dispatch->readpage_unix_file->readpage_extent + * or + * filemap_fault->reiser4_readpage_dispatch->readpage_unix_file-> + * ->readpage_extent + * + * At the beginning: coord->node is read locked, zloaded, page is + * locked, coord is set to existing unit inside of extent item (it + * is not necessary that coord matches to page->index) + */ +int reiser4_readpage_extent(void *vp, struct page *page) +{ + uf_coord_t *uf_coord = vp; + ON_DEBUG(coord_t * coord = &uf_coord->coord); + ON_DEBUG(reiser4_key key); + + assert("vs-1040", PageLocked(page)); + assert("vs-1050", !PageUptodate(page)); + assert("vs-1039", page->mapping && page->mapping->host); + + assert("vs-1044", znode_is_loaded(coord->node)); + assert("vs-758", item_is_extent(coord)); + assert("vs-1046", coord_is_existing_unit(coord)); + assert("vs-1045", znode_is_rlocked(coord->node)); + assert("vs-1047", + page->mapping->host->i_ino == + get_key_objectid(item_key_by_coord(coord, &key))); + check_uf_coord(uf_coord, NULL); + + return reiser4_do_readpage_extent(ext_by_ext_coord(uf_coord), + uf_coord->extension.extent.pos_in_unit, + page); +} + +int get_block_address_extent(const coord_t *coord, sector_t block, + sector_t *result) +{ + reiser4_extent *ext; + + if (!coord_is_existing_unit(coord)) + return RETERR(-EINVAL); + + ext = extent_by_coord(coord); + + if (state_of_extent(ext) != ALLOCATED_EXTENT) + /* FIXME: bad things may happen if it is unallocated extent */ + *result = 0; + else { + reiser4_key key; + + unit_key_by_coord(coord, &key); + assert("vs-1645", + block >= get_key_offset(&key) >> current_blocksize_bits); + assert("vs-1646", + block < + (get_key_offset(&key) >> current_blocksize_bits) + + extent_get_width(ext)); + *result = + extent_get_start(ext) + (block - + (get_key_offset(&key) >> + current_blocksize_bits)); + } + return 0; +} + +/* + plugin->u.item.s.file.append_key + key of first byte which is the next to last byte by addressed by this extent +*/ +reiser4_key *append_key_extent(const coord_t * coord, reiser4_key * key) +{ + item_key_by_coord(coord, key); + set_key_offset(key, + get_key_offset(key) + reiser4_extent_size(coord, + nr_units_extent + (coord))); + + assert("vs-610", get_key_offset(key) + && (get_key_offset(key) & (current_blocksize - 1)) == 0); + return key; +} + +/* plugin->u.item.s.file.init_coord_extension */ +void init_coord_extension_extent(uf_coord_t * uf_coord, loff_t lookuped) +{ + coord_t *coord; + struct extent_coord_extension *ext_coord; + reiser4_key key; + loff_t offset; + + assert("vs-1295", uf_coord->valid == 0); + + coord = &uf_coord->coord; + assert("vs-1288", coord_is_iplug_set(coord)); + assert("vs-1327", znode_is_loaded(coord->node)); + + if (coord->between != AFTER_UNIT && coord->between != AT_UNIT) + return; + + ext_coord = &uf_coord->extension.extent; + ext_coord->nr_units = nr_units_extent(coord); + ext_coord->ext_offset = + (char *)extent_by_coord(coord) - zdata(coord->node); + ext_coord->width = extent_get_width(extent_by_coord(coord)); + ON_DEBUG(ext_coord->extent = *extent_by_coord(coord)); + uf_coord->valid = 1; + + /* pos_in_unit is the only uninitialized field in extended coord */ + if (coord->between == AFTER_UNIT) { + assert("vs-1330", + coord->unit_pos == nr_units_extent(coord) - 1); + + ext_coord->pos_in_unit = ext_coord->width - 1; + } else { + /* AT_UNIT */ + unit_key_by_coord(coord, &key); + offset = get_key_offset(&key); + + assert("vs-1328", offset <= lookuped); + assert("vs-1329", + lookuped < + offset + ext_coord->width * current_blocksize); + ext_coord->pos_in_unit = + ((lookuped - offset) >> current_blocksize_bits); + } +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/plugin/item/extent_flush_ops.c b/fs/reiser4/plugin/item/extent_flush_ops.c new file mode 100644 index 000000000000..34bd946e7bc6 --- /dev/null +++ b/fs/reiser4/plugin/item/extent_flush_ops.c @@ -0,0 +1,686 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#include "item.h" +#include "../../tree.h" +#include "../../jnode.h" +#include "../../super.h" +#include "../../flush.h" +#include "../../carry.h" +#include "../object.h" + +#include <linux/pagemap.h> + +static reiser4_block_nr extent_unit_start(const coord_t * item); + +/* Return either first or last extent (depending on @side) of the item + @coord is set to. Set @pos_in_unit either to first or to last block + of extent. */ +static reiser4_extent *extent_utmost_ext(const coord_t * coord, sideof side, + reiser4_block_nr * pos_in_unit) +{ + reiser4_extent *ext; + + if (side == LEFT_SIDE) { + /* get first extent of item */ + ext = extent_item(coord); + *pos_in_unit = 0; + } else { + /* get last extent of item and last position within it */ + assert("vs-363", side == RIGHT_SIDE); + ext = extent_item(coord) + coord_last_unit_pos(coord); + *pos_in_unit = extent_get_width(ext) - 1; + } + + return ext; +} + +/* item_plugin->f.utmost_child */ +/* Return the child. Coord is set to extent item. Find jnode corresponding + either to first or to last unformatted node pointed by the item */ +int utmost_child_extent(const coord_t * coord, sideof side, jnode ** childp) +{ + reiser4_extent *ext; + reiser4_block_nr pos_in_unit; + + ext = extent_utmost_ext(coord, side, &pos_in_unit); + + switch (state_of_extent(ext)) { + case HOLE_EXTENT: + *childp = NULL; + return 0; + case ALLOCATED_EXTENT: + case UNALLOCATED_EXTENT: + break; + default: + /* this should never happen */ + assert("vs-1417", 0); + } + + { + reiser4_key key; + reiser4_tree *tree; + unsigned long index; + + if (side == LEFT_SIDE) { + /* get key of first byte addressed by the extent */ + item_key_by_coord(coord, &key); + } else { + /* get key of byte which next after last byte addressed by the extent */ + append_key_extent(coord, &key); + } + + assert("vs-544", + (get_key_offset(&key) >> PAGE_SHIFT) < ~0ul); + /* index of first or last (depending on @side) page addressed + by the extent */ + index = + (unsigned long)(get_key_offset(&key) >> PAGE_SHIFT); + if (side == RIGHT_SIDE) + index--; + + tree = coord->node->zjnode.tree; + *childp = jlookup(tree, get_key_objectid(&key), index); + } + + return 0; +} + +/* item_plugin->f.utmost_child_real_block */ +/* Return the child's block, if allocated. */ +int +utmost_child_real_block_extent(const coord_t * coord, sideof side, + reiser4_block_nr * block) +{ + reiser4_extent *ext; + + ext = extent_by_coord(coord); + + switch (state_of_extent(ext)) { + case ALLOCATED_EXTENT: + *block = extent_get_start(ext); + if (side == RIGHT_SIDE) + *block += extent_get_width(ext) - 1; + break; + case HOLE_EXTENT: + case UNALLOCATED_EXTENT: + *block = 0; + break; + default: + /* this should never happen */ + assert("vs-1418", 0); + } + + return 0; +} + +/* item_plugin->f.scan */ +/* Performs leftward scanning starting from an unformatted node and its parent coordinate. + This scan continues, advancing the parent coordinate, until either it encounters a + formatted child or it finishes scanning this node. + + If unallocated, the entire extent must be dirty and in the same atom. (Actually, I'm + not sure this is last property (same atom) is enforced, but it should be the case since + one atom must write the parent and the others must read the parent, thus fusing?). In + any case, the code below asserts this case for unallocated extents. Unallocated + extents are thus optimized because we can skip to the endpoint when scanning. + + It returns control to reiser4_scan_extent, handles these terminating conditions, + e.g., by loading the next twig. +*/ +int reiser4_scan_extent(flush_scan * scan) +{ + coord_t coord; + jnode *neighbor; + unsigned long scan_index, unit_index, unit_width, scan_max, scan_dist; + reiser4_block_nr unit_start; + __u64 oid; + reiser4_key key; + int ret = 0, allocated, incr; + reiser4_tree *tree; + + if (!JF_ISSET(scan->node, JNODE_DIRTY)) { + scan->stop = 1; + return 0; /* Race with truncate, this node is already + * truncated. */ + } + + coord_dup(&coord, &scan->parent_coord); + + assert("jmacd-1404", !reiser4_scan_finished(scan)); + assert("jmacd-1405", jnode_get_level(scan->node) == LEAF_LEVEL); + assert("jmacd-1406", jnode_is_unformatted(scan->node)); + + /* The scan_index variable corresponds to the current page index of the + unformatted block scan position. */ + scan_index = index_jnode(scan->node); + + assert("jmacd-7889", item_is_extent(&coord)); + + repeat: + /* objectid of file */ + oid = get_key_objectid(item_key_by_coord(&coord, &key)); + + allocated = !extent_is_unallocated(&coord); + /* Get the values of this extent unit: */ + unit_index = extent_unit_index(&coord); + unit_width = extent_unit_width(&coord); + unit_start = extent_unit_start(&coord); + + assert("jmacd-7187", unit_width > 0); + assert("jmacd-7188", scan_index >= unit_index); + assert("jmacd-7189", scan_index <= unit_index + unit_width - 1); + + /* Depending on the scan direction, we set different maximum values for scan_index + (scan_max) and the number of nodes that would be passed if the scan goes the + entire way (scan_dist). Incr is an integer reflecting the incremental + direction of scan_index. */ + if (reiser4_scanning_left(scan)) { + scan_max = unit_index; + scan_dist = scan_index - unit_index; + incr = -1; + } else { + scan_max = unit_index + unit_width - 1; + scan_dist = scan_max - unit_index; + incr = +1; + } + + tree = coord.node->zjnode.tree; + + /* If the extent is allocated we have to check each of its blocks. If the extent + is unallocated we can skip to the scan_max. */ + if (allocated) { + do { + neighbor = jlookup(tree, oid, scan_index); + if (neighbor == NULL) + goto stop_same_parent; + + if (scan->node != neighbor + && !reiser4_scan_goto(scan, neighbor)) { + /* @neighbor was jput() by reiser4_scan_goto */ + goto stop_same_parent; + } + + ret = scan_set_current(scan, neighbor, 1, &coord); + if (ret != 0) { + goto exit; + } + + /* reference to @neighbor is stored in @scan, no need + to jput(). */ + scan_index += incr; + + } while (incr + scan_max != scan_index); + + } else { + /* Optimized case for unallocated extents, skip to the end. */ + neighbor = jlookup(tree, oid, scan_max /*index */ ); + if (neighbor == NULL) { + /* Race with truncate */ + scan->stop = 1; + ret = 0; + goto exit; + } + + assert("zam-1043", + reiser4_blocknr_is_fake(jnode_get_block(neighbor))); + + ret = scan_set_current(scan, neighbor, scan_dist, &coord); + if (ret != 0) { + goto exit; + } + } + + if (coord_sideof_unit(&coord, scan->direction) == 0 + && item_is_extent(&coord)) { + /* Continue as long as there are more extent units. */ + + scan_index = + extent_unit_index(&coord) + + (reiser4_scanning_left(scan) ? + extent_unit_width(&coord) - 1 : 0); + goto repeat; + } + + if (0) { + stop_same_parent: + + /* If we are scanning left and we stop in the middle of an allocated + extent, we know the preceder immediately.. */ + /* middle of extent is (scan_index - unit_index) != 0. */ + if (reiser4_scanning_left(scan) && + (scan_index - unit_index) != 0) { + /* FIXME(B): Someone should step-through and verify that this preceder + calculation is indeed correct. */ + /* @unit_start is starting block (number) of extent + unit. Flush stopped at the @scan_index block from + the beginning of the file, which is (scan_index - + unit_index) block within extent. + */ + if (unit_start) { + /* skip preceder update when we are at hole */ + scan->preceder_blk = + unit_start + scan_index - unit_index; + check_preceder(scan->preceder_blk); + } + } + + /* In this case, we leave coord set to the parent of scan->node. */ + scan->stop = 1; + + } else { + /* In this case, we are still scanning, coord is set to the next item which is + either off-the-end of the node or not an extent. */ + assert("jmacd-8912", scan->stop == 0); + assert("jmacd-7812", + (coord_is_after_sideof_unit(&coord, scan->direction) + || !item_is_extent(&coord))); + } + + ret = 0; + exit: + return ret; +} + +/** + * When on flush time unallocated extent is to be replaced with allocated one + * it may happen that one unallocated extent will have to be replaced with set + * of allocated extents. In this case insert_into_item will be called which may + * have to add new nodes into tree. Space for that is taken from inviolable + * reserve (5%). + */ +static reiser4_block_nr reserve_replace(void) +{ + reiser4_block_nr grabbed, needed; + + grabbed = get_current_context()->grabbed_blocks; + needed = estimate_one_insert_into_item(current_tree); + check_me("vpf-340", !reiser4_grab_space_force(needed, BA_RESERVED)); + return grabbed; +} + +static void free_replace_reserved(reiser4_block_nr grabbed) +{ + reiser4_context *ctx; + + ctx = get_current_context(); + grabbed2free(ctx, get_super_private(ctx->super), + ctx->grabbed_blocks - grabbed); +} + +/* Block offset of first block addressed by unit */ +__u64 extent_unit_index(const coord_t * item) +{ + reiser4_key key; + + assert("vs-648", coord_is_existing_unit(item)); + unit_key_by_coord(item, &key); + return get_key_offset(&key) >> current_blocksize_bits; +} + +/* AUDIT shouldn't return value be of reiser4_block_nr type? + Josh's answer: who knows? Is a "number of blocks" the same type as "block offset"? */ +__u64 extent_unit_width(const coord_t * item) +{ + assert("vs-649", coord_is_existing_unit(item)); + return width_by_coord(item); +} + +/* Starting block location of this unit */ +static reiser4_block_nr extent_unit_start(const coord_t * item) +{ + return extent_get_start(extent_by_coord(item)); +} + +/** + * split_allocated_extent - + * @coord: + * @pos_in_unit: + * + * replace allocated extent with two allocated extents + */ +int split_allocated_extent(coord_t *coord, reiser4_block_nr pos_in_unit) +{ + int result; + struct replace_handle *h; + reiser4_extent *ext; + reiser4_block_nr grabbed; + + ext = extent_by_coord(coord); + assert("vs-1410", state_of_extent(ext) == ALLOCATED_EXTENT); + assert("vs-1411", extent_get_width(ext) > pos_in_unit); + + h = kmalloc(sizeof(*h), reiser4_ctx_gfp_mask_get()); + if (h == NULL) + return RETERR(-ENOMEM); + h->coord = coord; + h->lh = znode_lh(coord->node); + h->pkey = &h->key; + unit_key_by_coord(coord, h->pkey); + set_key_offset(h->pkey, + (get_key_offset(h->pkey) + + pos_in_unit * current_blocksize)); + reiser4_set_extent(&h->overwrite, extent_get_start(ext), + pos_in_unit); + reiser4_set_extent(&h->new_extents[0], + extent_get_start(ext) + pos_in_unit, + extent_get_width(ext) - pos_in_unit); + h->nr_new_extents = 1; + h->flags = COPI_DONT_SHIFT_LEFT; + h->paste_key = h->key; + + /* reserve space for extent unit paste, @grabbed is reserved before */ + grabbed = reserve_replace(); + result = reiser4_replace_extent(h, 0 /* leave @coord set to overwritten + extent */); + /* restore reserved */ + free_replace_reserved(grabbed); + kfree(h); + return result; +} + +/* replace extent @ext by extent @replace. Try to merge @replace with previous extent of the item (if there is + one). Return 1 if it succeeded, 0 - otherwise */ +static int try_to_merge_with_left(coord_t *coord, reiser4_extent *ext, + reiser4_extent *replace) +{ + assert("vs-1415", extent_by_coord(coord) == ext); + + if (coord->unit_pos == 0 + || state_of_extent(ext - 1) != ALLOCATED_EXTENT) + /* @ext either does not exist or is not allocated extent */ + return 0; + if (extent_get_start(ext - 1) + extent_get_width(ext - 1) != + extent_get_start(replace)) + return 0; + + /* we can glue, widen previous unit */ + extent_set_width(ext - 1, + extent_get_width(ext - 1) + extent_get_width(replace)); + + if (extent_get_width(ext) != extent_get_width(replace)) { + /* make current extent narrower */ + if (state_of_extent(ext) == ALLOCATED_EXTENT) + extent_set_start(ext, + extent_get_start(ext) + + extent_get_width(replace)); + extent_set_width(ext, + extent_get_width(ext) - + extent_get_width(replace)); + } else { + /* current extent completely glued with its left neighbor, remove it */ + coord_t from, to; + + coord_dup(&from, coord); + from.unit_pos = nr_units_extent(coord) - 1; + coord_dup(&to, &from); + + /* currently cut from extent can cut either from the beginning or from the end. Move place which got + freed after unit removal to end of item */ + memmove(ext, ext + 1, + (from.unit_pos - + coord->unit_pos) * sizeof(reiser4_extent)); + /* wipe part of item which is going to be cut, so that node_check will not be confused */ + cut_node_content(&from, &to, NULL, NULL, NULL); + } + znode_make_dirty(coord->node); + /* move coord back */ + coord->unit_pos--; + return 1; +} + +/** + * convert_extent - replace extent with 2 ones + * @coord: coordinate of extent to be replaced + * @replace: extent to overwrite the one @coord is set to + * + * Overwrites extent @coord is set to and paste one extent unit after + * overwritten one if @replace is shorter than initial extent + */ +int convert_extent(coord_t *coord, reiser4_extent *replace) +{ + int result; + struct replace_handle *h; + reiser4_extent *ext; + reiser4_block_nr start, width, new_width; + reiser4_block_nr grabbed; + extent_state state; + + ext = extent_by_coord(coord); + state = state_of_extent(ext); + start = extent_get_start(ext); + width = extent_get_width(ext); + new_width = extent_get_width(replace); + + assert("vs-1458", (state == UNALLOCATED_EXTENT || + state == ALLOCATED_EXTENT)); + assert("vs-1459", width >= new_width); + + if (try_to_merge_with_left(coord, ext, replace)) { + /* merged @replace with left neighbor. Current unit is either + removed or narrowed */ + return 0; + } + + if (width == new_width) { + /* replace current extent with @replace */ + *ext = *replace; + znode_make_dirty(coord->node); + return 0; + } + + h = kmalloc(sizeof(*h), reiser4_ctx_gfp_mask_get()); + if (h == NULL) + return RETERR(-ENOMEM); + h->coord = coord; + h->lh = znode_lh(coord->node); + h->pkey = &h->key; + unit_key_by_coord(coord, h->pkey); + set_key_offset(h->pkey, + (get_key_offset(h->pkey) + new_width * current_blocksize)); + h->overwrite = *replace; + + /* replace @ext with @replace and padding extent */ + reiser4_set_extent(&h->new_extents[0], + (state == ALLOCATED_EXTENT) ? + (start + new_width) : + UNALLOCATED_EXTENT_START, + width - new_width); + h->nr_new_extents = 1; + h->flags = COPI_DONT_SHIFT_LEFT; + h->paste_key = h->key; + + /* reserve space for extent unit paste, @grabbed is reserved before */ + grabbed = reserve_replace(); + result = reiser4_replace_extent(h, 0 /* leave @coord set to overwritten + extent */); + + /* restore reserved */ + free_replace_reserved(grabbed); + kfree(h); + return result; +} + +/** + * assign_real_blocknrs + * @flush_pos: + * @oid: objectid of file jnodes to assign block number to belongs to + * @index: first jnode on the range + * @count: number of jnodes to assign block numbers to + * @first: start of allocated block range + * + * Assigns block numbers to each of @count jnodes. Index of first jnode is + * @index. Jnodes get lookuped with jlookup. + */ +void assign_real_blocknrs(flush_pos_t *flush_pos, oid_t oid, + unsigned long index, reiser4_block_nr count, + reiser4_block_nr first) +{ + unsigned long i; + reiser4_tree *tree; + txn_atom *atom; + int nr; + + atom = atom_locked_by_fq(flush_pos->fq); + assert("vs-1468", atom); + BUG_ON(atom == NULL); + + nr = 0; + tree = current_tree; + for (i = 0; i < count; ++i, ++index) { + jnode *node; + + node = jlookup(tree, oid, index); + assert("", node != NULL); + BUG_ON(node == NULL); + + spin_lock_jnode(node); + assert("", !jnode_is_flushprepped(node)); + assert("vs-1475", node->atom == atom); + assert("vs-1476", atomic_read(&node->x_count) > 0); + + JF_CLR(node, JNODE_FLUSH_RESERVED); + jnode_set_block(node, &first); + unformatted_make_reloc(node, flush_pos->fq); + ON_DEBUG(count_jnode(node->atom, node, NODE_LIST(node), + FQ_LIST, 0)); + spin_unlock_jnode(node); + first++; + + atomic_dec(&node->x_count); + nr ++; + } + + spin_unlock_atom(atom); + return; +} + +/** + * allocated_extent_slum_size + * @flush_pos: + * @oid: + * @index: + * @count: + * + * + */ +int allocated_extent_slum_size(flush_pos_t *flush_pos, oid_t oid, + unsigned long index, unsigned long count) +{ + unsigned long i; + reiser4_tree *tree; + txn_atom *atom; + int nr; + + atom = atom_locked_by_fq(reiser4_pos_fq(flush_pos)); + assert("vs-1468", atom); + + nr = 0; + tree = current_tree; + for (i = 0; i < count; ++i, ++index) { + jnode *node; + + node = jlookup(tree, oid, index); + if (!node) + break; + + if (jnode_check_flushprepped(node)) { + atomic_dec(&node->x_count); + break; + } + + if (node->atom != atom) { + /* + * this is possible on overwrite: extent_write may + * capture several unformatted nodes without capturing + * any formatted nodes. + */ + atomic_dec(&node->x_count); + break; + } + + assert("vs-1476", atomic_read(&node->x_count) > 1); + atomic_dec(&node->x_count); + nr ++; + } + + spin_unlock_atom(atom); + return nr; +} + +/* if @key is glueable to the item @coord is set to */ +static int must_insert(const coord_t *coord, const reiser4_key *key) +{ + reiser4_key last; + + if (item_id_by_coord(coord) == EXTENT_POINTER_ID + && keyeq(append_key_extent(coord, &last), key)) + return 0; + return 1; +} + +/** + * copy extent @copy to the end of @node. + * It may have to either insert new item after the last one, + * or append last item, or modify last unit of last item to have + * greater width + */ +int put_unit_to_end(znode *node, + const reiser4_key *key, reiser4_extent *copy_ext) +{ + int result; + coord_t coord; + cop_insert_flag flags; + reiser4_extent *last_ext; + reiser4_item_data data; + + /* set coord after last unit in an item */ + coord_init_last_unit(&coord, node); + coord.between = AFTER_UNIT; + + flags = + COPI_DONT_SHIFT_LEFT | COPI_DONT_SHIFT_RIGHT | COPI_DONT_ALLOCATE; + if (must_insert(&coord, key)) { + result = + insert_by_coord(&coord, init_new_extent(&data, copy_ext, 1), + key, NULL /*lh */ , flags); + + } else { + /* try to glue with last unit */ + last_ext = extent_by_coord(&coord); + if (state_of_extent(last_ext) && + extent_get_start(last_ext) + extent_get_width(last_ext) == + extent_get_start(copy_ext)) { + /* widen last unit of node */ + extent_set_width(last_ext, + extent_get_width(last_ext) + + extent_get_width(copy_ext)); + znode_make_dirty(node); + return 0; + } + + /* FIXME: put an assertion here that we can not merge last unit in @node and new unit */ + result = + insert_into_item(&coord, NULL /*lh */ , key, + init_new_extent(&data, copy_ext, 1), + flags); + } + + assert("vs-438", result == 0 || result == -E_NODE_FULL); + return result; +} + +int key_by_offset_extent(struct inode *inode, loff_t off, reiser4_key * key) +{ + return key_by_inode_and_offset_common(inode, off, key); +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/plugin/item/extent_item_ops.c b/fs/reiser4/plugin/item/extent_item_ops.c new file mode 100644 index 000000000000..f04f4af5d3f8 --- /dev/null +++ b/fs/reiser4/plugin/item/extent_item_ops.c @@ -0,0 +1,887 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#include "item.h" +#include "../../inode.h" +#include "../../tree_walk.h" /* check_sibling_list() */ +#include "../../page_cache.h" +#include "../../carry.h" + +/* item_plugin->b.max_key_inside */ +reiser4_key *max_key_inside_extent(const coord_t * coord, reiser4_key * key) +{ + item_key_by_coord(coord, key); + set_key_offset(key, get_key_offset(reiser4_max_key())); + return key; +} + +/* item_plugin->b.can_contain_key + this checks whether @key of @data is matching to position set by @coord */ +int +can_contain_key_extent(const coord_t * coord, const reiser4_key * key, + const reiser4_item_data * data) +{ + reiser4_key item_key; + + if (item_plugin_by_coord(coord) != data->iplug) + return 0; + + item_key_by_coord(coord, &item_key); + if (get_key_locality(key) != get_key_locality(&item_key) || + get_key_objectid(key) != get_key_objectid(&item_key) || + get_key_ordering(key) != get_key_ordering(&item_key)) + return 0; + + return 1; +} + +/* item_plugin->b.mergeable + first item is of extent type */ +/* Audited by: green(2002.06.13) */ +int mergeable_extent(const coord_t * p1, const coord_t * p2) +{ + reiser4_key key1, key2; + + assert("vs-299", item_id_by_coord(p1) == EXTENT_POINTER_ID); + /* FIXME-VS: Which is it? Assert or return 0 */ + if (item_id_by_coord(p2) != EXTENT_POINTER_ID) { + return 0; + } + + item_key_by_coord(p1, &key1); + item_key_by_coord(p2, &key2); + if (get_key_locality(&key1) != get_key_locality(&key2) || + get_key_objectid(&key1) != get_key_objectid(&key2) || + get_key_ordering(&key1) != get_key_ordering(&key2) || + get_key_type(&key1) != get_key_type(&key2)) + return 0; + if (get_key_offset(&key1) + + reiser4_extent_size(p1, nr_units_extent(p1)) != + get_key_offset(&key2)) + return 0; + return 1; +} + +/* item_plugin->b.nr_units */ +pos_in_node_t nr_units_extent(const coord_t * coord) +{ + /* length of extent item has to be multiple of extent size */ + assert("vs-1424", + (item_length_by_coord(coord) % sizeof(reiser4_extent)) == 0); + return item_length_by_coord(coord) / sizeof(reiser4_extent); +} + +/* item_plugin->b.lookup */ +lookup_result +lookup_extent(const reiser4_key * key, lookup_bias bias UNUSED_ARG, + coord_t * coord) +{ /* znode and item_pos are + set to an extent item to + look through */ + reiser4_key item_key; + reiser4_block_nr lookuped, offset; + unsigned i, nr_units; + reiser4_extent *ext; + unsigned blocksize; + unsigned char blocksize_bits; + + item_key_by_coord(coord, &item_key); + offset = get_key_offset(&item_key); + + /* key we are looking for must be greater than key of item @coord */ + assert("vs-414", keygt(key, &item_key)); + + assert("umka-99945", + !keygt(key, max_key_inside_extent(coord, &item_key))); + + ext = extent_item(coord); + assert("vs-1350", (char *)ext == (zdata(coord->node) + coord->offset)); + + blocksize = current_blocksize; + blocksize_bits = current_blocksize_bits; + + /* offset we are looking for */ + lookuped = get_key_offset(key); + + nr_units = nr_units_extent(coord); + /* go through all extents until the one which address given offset */ + for (i = 0; i < nr_units; i++, ext++) { + offset += (extent_get_width(ext) << blocksize_bits); + if (offset > lookuped) { + /* desired byte is somewhere in this extent */ + coord->unit_pos = i; + coord->between = AT_UNIT; + return CBK_COORD_FOUND; + } + } + + /* set coord after last unit */ + coord->unit_pos = nr_units - 1; + coord->between = AFTER_UNIT; + return CBK_COORD_FOUND; +} + +/* item_plugin->b.paste + item @coord is set to has been appended with @data->length of free + space. data->data contains data to be pasted into the item in position + @coord->in_item.unit_pos. It must fit into that free space. + @coord must be set between units. +*/ +int +paste_extent(coord_t * coord, reiser4_item_data * data, + carry_plugin_info * info UNUSED_ARG) +{ + unsigned old_nr_units; + reiser4_extent *ext; + int item_length; + + ext = extent_item(coord); + item_length = item_length_by_coord(coord); + old_nr_units = (item_length - data->length) / sizeof(reiser4_extent); + + /* this is also used to copy extent into newly created item, so + old_nr_units could be 0 */ + assert("vs-260", item_length >= data->length); + + /* make sure that coord is set properly */ + assert("vs-35", + ((!coord_is_existing_unit(coord)) + || (!old_nr_units && !coord->unit_pos))); + + /* first unit to be moved */ + switch (coord->between) { + case AFTER_UNIT: + coord->unit_pos++; + case BEFORE_UNIT: + coord->between = AT_UNIT; + break; + case AT_UNIT: + assert("vs-331", !old_nr_units && !coord->unit_pos); + break; + default: + impossible("vs-330", "coord is set improperly"); + } + + /* prepare space for new units */ + memmove(ext + coord->unit_pos + data->length / sizeof(reiser4_extent), + ext + coord->unit_pos, + (old_nr_units - coord->unit_pos) * sizeof(reiser4_extent)); + + /* copy new data from kernel space */ + assert("vs-556", data->user == 0); + memcpy(ext + coord->unit_pos, data->data, (unsigned)data->length); + + /* after paste @coord is set to first of pasted units */ + assert("vs-332", coord_is_existing_unit(coord)); + assert("vs-333", + !memcmp(data->data, extent_by_coord(coord), + (unsigned)data->length)); + return 0; +} + +/* item_plugin->b.can_shift */ +int +can_shift_extent(unsigned free_space, coord_t * source, + znode * target UNUSED_ARG, shift_direction pend UNUSED_ARG, + unsigned *size, unsigned want) +{ + *size = item_length_by_coord(source); + if (*size > free_space) + /* never split a unit of extent item */ + *size = free_space - free_space % sizeof(reiser4_extent); + + /* we can shift *size bytes, calculate how many do we want to shift */ + if (*size > want * sizeof(reiser4_extent)) + *size = want * sizeof(reiser4_extent); + + if (*size % sizeof(reiser4_extent) != 0) + impossible("vs-119", "Wrong extent size: %i %zd", *size, + sizeof(reiser4_extent)); + return *size / sizeof(reiser4_extent); + +} + +/* item_plugin->b.copy_units */ +void +copy_units_extent(coord_t * target, coord_t * source, + unsigned from, unsigned count, + shift_direction where_is_free_space, unsigned free_space) +{ + char *from_ext, *to_ext; + + assert("vs-217", free_space == count * sizeof(reiser4_extent)); + + from_ext = item_body_by_coord(source); + to_ext = item_body_by_coord(target); + + if (where_is_free_space == SHIFT_LEFT) { + assert("vs-215", from == 0); + + /* At this moment, item length was already updated in the item + header by shifting code, hence nr_units_extent() will + return "new" number of units---one we obtain after copying + units. + */ + to_ext += + (nr_units_extent(target) - count) * sizeof(reiser4_extent); + } else { + reiser4_key key; + coord_t coord; + + assert("vs-216", + from + count == coord_last_unit_pos(source) + 1); + + from_ext += item_length_by_coord(source) - free_space; + + /* new units are inserted before first unit in an item, + therefore, we have to update item key */ + coord = *source; + coord.unit_pos = from; + unit_key_extent(&coord, &key); + + node_plugin_by_node(target->node)->update_item_key(target, &key, + NULL /*info */); + } + + memcpy(to_ext, from_ext, free_space); +} + +/* item_plugin->b.create_hook + @arg is znode of leaf node for which we need to update right delimiting key */ +int create_hook_extent(const coord_t * coord, void *arg) +{ + coord_t *child_coord; + znode *node; + reiser4_key key; + reiser4_tree *tree; + + if (!arg) + return 0; + + child_coord = arg; + tree = znode_get_tree(coord->node); + + assert("nikita-3246", znode_get_level(child_coord->node) == LEAF_LEVEL); + + write_lock_tree(tree); + write_lock_dk(tree); + /* find a node on the left level for which right delimiting key has to + be updated */ + if (coord_wrt(child_coord) == COORD_ON_THE_LEFT) { + assert("vs-411", znode_is_left_connected(child_coord->node)); + node = child_coord->node->left; + } else { + assert("vs-412", coord_wrt(child_coord) == COORD_ON_THE_RIGHT); + node = child_coord->node; + assert("nikita-3314", node != NULL); + } + + if (node != NULL) { + znode_set_rd_key(node, item_key_by_coord(coord, &key)); + + assert("nikita-3282", check_sibling_list(node)); + /* break sibling links */ + if (ZF_ISSET(node, JNODE_RIGHT_CONNECTED) && node->right) { + ON_DEBUG(node->right->left_version = + atomic_inc_return(&delim_key_version); + node->right_version = + atomic_inc_return(&delim_key_version);); + + node->right->left = NULL; + node->right = NULL; + } + } + write_unlock_dk(tree); + write_unlock_tree(tree); + return 0; +} + +#define ITEM_TAIL_KILLED 0 +#define ITEM_HEAD_KILLED 1 +#define ITEM_KILLED 2 + +/* item_plugin->b.kill_hook + this is called when @count units starting from @from-th one are going to be removed + */ +int +kill_hook_extent(const coord_t * coord, pos_in_node_t from, pos_in_node_t count, + struct carry_kill_data *kdata) +{ + reiser4_extent *ext; + reiser4_block_nr start, length; + const reiser4_key *pfrom_key, *pto_key; + struct inode *inode; + reiser4_tree *tree; + pgoff_t from_off, to_off, offset, skip; + int retval; + + /* these are located in memory kmalloc-ed by kill_node_content */ + reiser4_key *min_item_key, *max_item_key, *from_key, *to_key, *key; + coord_t *dup, *next; + + assert("zam-811", znode_is_write_locked(coord->node)); + assert("nikita-3315", kdata != NULL); + assert("vs-34", kdata->buf != NULL); + + /* map structures to kdata->buf */ + min_item_key = (reiser4_key *) (kdata->buf); + max_item_key = min_item_key + 1; + from_key = max_item_key + 1; + to_key = from_key + 1; + key = to_key + 1; + dup = (coord_t *) (key + 1); + next = dup + 1; + + item_key_by_coord(coord, min_item_key); + max_item_key_by_coord(coord, max_item_key); + + if (kdata->params.from_key) { + pfrom_key = kdata->params.from_key; + pto_key = kdata->params.to_key; + } else { + assert("vs-1549", from == coord->unit_pos); + unit_key_by_coord(coord, from_key); + pfrom_key = from_key; + + coord_dup(dup, coord); + dup->unit_pos = from + count - 1; + max_unit_key_by_coord(dup, to_key); + pto_key = to_key; + } + + if (!keylt(pto_key, max_item_key)) { + if (!keygt(pfrom_key, min_item_key)) { + znode *left, *right; + + /* item is to be removed completely */ + assert("nikita-3316", kdata->left != NULL + && kdata->right != NULL); + + left = kdata->left->node; + right = kdata->right->node; + + tree = current_tree; + /* we have to do two things: + * + * 1. link left and right formatted neighbors of + * extent being removed, and + * + * 2. update their delimiting keys. + * + * atomicity of these operations is protected by + * taking dk-lock and tree-lock. + */ + /* if neighbors of item being removed are znodes - + * link them */ + write_lock_tree(tree); + write_lock_dk(tree); + link_left_and_right(left, right); + if (left) { + /* update right delimiting key of left + * neighbor of extent item */ + /*coord_t next; + reiser4_key key; */ + + coord_dup(next, coord); + + if (coord_next_item(next)) + *key = *znode_get_rd_key(coord->node); + else + item_key_by_coord(next, key); + znode_set_rd_key(left, key); + } + write_unlock_dk(tree); + write_unlock_tree(tree); + + from_off = + get_key_offset(min_item_key) >> PAGE_SHIFT; + to_off = + (get_key_offset(max_item_key) + + 1) >> PAGE_SHIFT; + retval = ITEM_KILLED; + } else { + /* tail of item is to be removed */ + from_off = + (get_key_offset(pfrom_key) + PAGE_SIZE - + 1) >> PAGE_SHIFT; + to_off = + (get_key_offset(max_item_key) + + 1) >> PAGE_SHIFT; + retval = ITEM_TAIL_KILLED; + } + } else { + /* head of item is to be removed */ + assert("vs-1571", keyeq(pfrom_key, min_item_key)); + assert("vs-1572", + (get_key_offset(pfrom_key) & (PAGE_SIZE - 1)) == + 0); + assert("vs-1573", + ((get_key_offset(pto_key) + 1) & (PAGE_SIZE - + 1)) == 0); + + if (kdata->left->node) { + /* update right delimiting key of left neighbor of extent item */ + /*reiser4_key key; */ + + *key = *pto_key; + set_key_offset(key, get_key_offset(pto_key) + 1); + + write_lock_dk(current_tree); + znode_set_rd_key(kdata->left->node, key); + write_unlock_dk(current_tree); + } + + from_off = get_key_offset(pfrom_key) >> PAGE_SHIFT; + to_off = (get_key_offset(pto_key) + 1) >> PAGE_SHIFT; + retval = ITEM_HEAD_KILLED; + } + + inode = kdata->inode; + assert("vs-1545", inode != NULL); + if (inode != NULL) + /* take care of pages and jnodes corresponding to part of item being killed */ + reiser4_invalidate_pages(inode->i_mapping, from_off, + to_off - from_off, + kdata->params.truncate); + + ext = extent_item(coord) + from; + offset = + (get_key_offset(min_item_key) + + reiser4_extent_size(coord, from)) >> PAGE_SHIFT; + + assert("vs-1551", from_off >= offset); + assert("vs-1552", from_off - offset <= extent_get_width(ext)); + skip = from_off - offset; + offset = from_off; + + while (offset < to_off) { + length = extent_get_width(ext) - skip; + if (state_of_extent(ext) == HOLE_EXTENT) { + skip = 0; + offset += length; + ext++; + continue; + } + + if (offset + length > to_off) { + length = to_off - offset; + } + + inode_sub_blocks(inode, length); + + if (state_of_extent(ext) == UNALLOCATED_EXTENT) { + /* some jnodes corresponding to this unallocated extent */ + fake_allocated2free(length, 0 /* unformatted */ ); + + skip = 0; + offset += length; + ext++; + continue; + } + + assert("vs-1218", state_of_extent(ext) == ALLOCATED_EXTENT); + + if (length != 0) { + start = extent_get_start(ext) + skip; + + /* BA_DEFER bit parameter is turned on because blocks which get freed are not safe to be freed + immediately */ + reiser4_dealloc_blocks(&start, &length, + 0 /* not used */ , + BA_DEFER + /* unformatted with defer */ ); + } + skip = 0; + offset += length; + ext++; + } + return retval; +} + +/* item_plugin->b.kill_units */ +int +kill_units_extent(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + struct carry_kill_data *kdata, reiser4_key * smallest_removed, + reiser4_key * new_first) +{ + reiser4_extent *ext; + reiser4_key item_key; + pos_in_node_t count; + reiser4_key from_key, to_key; + const reiser4_key *pfrom_key, *pto_key; + loff_t off; + int result; + + assert("vs-1541", + ((kdata->params.from_key == NULL && kdata->params.to_key == NULL) + || (kdata->params.from_key != NULL + && kdata->params.to_key != NULL))); + + if (kdata->params.from_key) { + pfrom_key = kdata->params.from_key; + pto_key = kdata->params.to_key; + } else { + coord_t dup; + + /* calculate key range of kill */ + assert("vs-1549", from == coord->unit_pos); + unit_key_by_coord(coord, &from_key); + pfrom_key = &from_key; + + coord_dup(&dup, coord); + dup.unit_pos = to; + max_unit_key_by_coord(&dup, &to_key); + pto_key = &to_key; + } + + item_key_by_coord(coord, &item_key); + +#if REISER4_DEBUG + { + reiser4_key max_item_key; + + max_item_key_by_coord(coord, &max_item_key); + + if (new_first) { + /* head of item is to be cut */ + assert("vs-1542", keyeq(pfrom_key, &item_key)); + assert("vs-1538", keylt(pto_key, &max_item_key)); + } else { + /* tail of item is to be cut */ + assert("vs-1540", keygt(pfrom_key, &item_key)); + assert("vs-1543", !keylt(pto_key, &max_item_key)); + } + } +#endif + + if (smallest_removed) + *smallest_removed = *pfrom_key; + + if (new_first) { + /* item head is cut. Item key will change. This new key is calculated here */ + assert("vs-1556", + (get_key_offset(pto_key) & (PAGE_SIZE - 1)) == + (PAGE_SIZE - 1)); + *new_first = *pto_key; + set_key_offset(new_first, get_key_offset(new_first) + 1); + } + + count = to - from + 1; + result = kill_hook_extent(coord, from, count, kdata); + if (result == ITEM_TAIL_KILLED) { + assert("vs-1553", + get_key_offset(pfrom_key) >= + get_key_offset(&item_key) + + reiser4_extent_size(coord, from)); + off = + get_key_offset(pfrom_key) - + (get_key_offset(&item_key) + + reiser4_extent_size(coord, from)); + if (off) { + /* unit @from is to be cut partially. Its width decreases */ + ext = extent_item(coord) + from; + extent_set_width(ext, + (off + PAGE_SIZE - + 1) >> PAGE_SHIFT); + count--; + } + } else { + __u64 max_to_offset; + __u64 rest; + + assert("vs-1575", result == ITEM_HEAD_KILLED); + assert("", from == 0); + assert("", + ((get_key_offset(pto_key) + 1) & (PAGE_SIZE - + 1)) == 0); + assert("", + get_key_offset(pto_key) + 1 > + get_key_offset(&item_key) + + reiser4_extent_size(coord, to)); + max_to_offset = + get_key_offset(&item_key) + + reiser4_extent_size(coord, to + 1) - 1; + assert("", get_key_offset(pto_key) <= max_to_offset); + + rest = + (max_to_offset - + get_key_offset(pto_key)) >> PAGE_SHIFT; + if (rest) { + /* unit @to is to be cut partially */ + ext = extent_item(coord) + to; + + assert("", extent_get_width(ext) > rest); + + if (state_of_extent(ext) == ALLOCATED_EXTENT) + extent_set_start(ext, + extent_get_start(ext) + + (extent_get_width(ext) - + rest)); + + extent_set_width(ext, rest); + count--; + } + } + return count * sizeof(reiser4_extent); +} + +/* item_plugin->b.cut_units + this is too similar to kill_units_extent */ +int +cut_units_extent(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + struct carry_cut_data *cdata, reiser4_key * smallest_removed, + reiser4_key * new_first) +{ + reiser4_extent *ext; + reiser4_key item_key; + pos_in_node_t count; + reiser4_key from_key, to_key; + const reiser4_key *pfrom_key, *pto_key; + loff_t off; + + assert("vs-1541", + ((cdata->params.from_key == NULL && cdata->params.to_key == NULL) + || (cdata->params.from_key != NULL + && cdata->params.to_key != NULL))); + + if (cdata->params.from_key) { + pfrom_key = cdata->params.from_key; + pto_key = cdata->params.to_key; + } else { + coord_t dup; + + /* calculate key range of kill */ + coord_dup(&dup, coord); + dup.unit_pos = from; + unit_key_by_coord(&dup, &from_key); + + dup.unit_pos = to; + max_unit_key_by_coord(&dup, &to_key); + + pfrom_key = &from_key; + pto_key = &to_key; + } + + assert("vs-1555", + (get_key_offset(pfrom_key) & (PAGE_SIZE - 1)) == 0); + assert("vs-1556", + (get_key_offset(pto_key) & (PAGE_SIZE - 1)) == + (PAGE_SIZE - 1)); + + item_key_by_coord(coord, &item_key); + +#if REISER4_DEBUG + { + reiser4_key max_item_key; + + assert("vs-1584", + get_key_locality(pfrom_key) == + get_key_locality(&item_key)); + assert("vs-1585", + get_key_type(pfrom_key) == get_key_type(&item_key)); + assert("vs-1586", + get_key_objectid(pfrom_key) == + get_key_objectid(&item_key)); + assert("vs-1587", + get_key_ordering(pfrom_key) == + get_key_ordering(&item_key)); + + max_item_key_by_coord(coord, &max_item_key); + + if (new_first != NULL) { + /* head of item is to be cut */ + assert("vs-1542", keyeq(pfrom_key, &item_key)); + assert("vs-1538", keylt(pto_key, &max_item_key)); + } else { + /* tail of item is to be cut */ + assert("vs-1540", keygt(pfrom_key, &item_key)); + assert("vs-1543", keyeq(pto_key, &max_item_key)); + } + } +#endif + + if (smallest_removed) + *smallest_removed = *pfrom_key; + + if (new_first) { + /* item head is cut. Item key will change. This new key is calculated here */ + *new_first = *pto_key; + set_key_offset(new_first, get_key_offset(new_first) + 1); + } + + count = to - from + 1; + + assert("vs-1553", + get_key_offset(pfrom_key) >= + get_key_offset(&item_key) + reiser4_extent_size(coord, from)); + off = + get_key_offset(pfrom_key) - (get_key_offset(&item_key) + + reiser4_extent_size(coord, from)); + if (off) { + /* tail of unit @from is to be cut partially. Its width decreases */ + assert("vs-1582", new_first == NULL); + ext = extent_item(coord) + from; + extent_set_width(ext, off >> PAGE_SHIFT); + count--; + } + + assert("vs-1554", + get_key_offset(pto_key) <= + get_key_offset(&item_key) + + reiser4_extent_size(coord, to + 1) - 1); + off = + (get_key_offset(&item_key) + + reiser4_extent_size(coord, to + 1) - 1) - + get_key_offset(pto_key); + if (off) { + /* @to_key is smaller than max key of unit @to. Unit @to will not be removed. It gets start increased + and width decreased. */ + assert("vs-1583", (off & (PAGE_SIZE - 1)) == 0); + ext = extent_item(coord) + to; + if (state_of_extent(ext) == ALLOCATED_EXTENT) + extent_set_start(ext, + extent_get_start(ext) + + (extent_get_width(ext) - + (off >> PAGE_SHIFT))); + + extent_set_width(ext, (off >> PAGE_SHIFT)); + count--; + } + return count * sizeof(reiser4_extent); +} + +/* item_plugin->b.unit_key */ +reiser4_key *unit_key_extent(const coord_t * coord, reiser4_key * key) +{ + assert("vs-300", coord_is_existing_unit(coord)); + + item_key_by_coord(coord, key); + set_key_offset(key, + (get_key_offset(key) + + reiser4_extent_size(coord, coord->unit_pos))); + + return key; +} + +/* item_plugin->b.max_unit_key */ +reiser4_key *max_unit_key_extent(const coord_t * coord, reiser4_key * key) +{ + assert("vs-300", coord_is_existing_unit(coord)); + + item_key_by_coord(coord, key); + set_key_offset(key, + (get_key_offset(key) + + reiser4_extent_size(coord, coord->unit_pos + 1) - 1)); + return key; +} + +/* item_plugin->b.estimate + item_plugin->b.item_data_by_flow */ + +#if REISER4_DEBUG + +/* item_plugin->b.check + used for debugging, every item should have here the most complete + possible check of the consistency of the item that the inventor can + construct +*/ +int reiser4_check_extent(const coord_t * coord /* coord of item to check */, + const char **error /* where to store error message */) +{ + reiser4_extent *ext, *first; + unsigned i, j; + reiser4_block_nr start, width, blk_cnt; + unsigned num_units; + reiser4_tree *tree; + oid_t oid; + reiser4_key key; + coord_t scan; + + assert("vs-933", REISER4_DEBUG); + + if (znode_get_level(coord->node) != TWIG_LEVEL) { + *error = "Extent on the wrong level"; + return -1; + } + if (item_length_by_coord(coord) % sizeof(reiser4_extent) != 0) { + *error = "Wrong item size"; + return -1; + } + ext = first = extent_item(coord); + blk_cnt = reiser4_block_count(reiser4_get_current_sb()); + num_units = coord_num_units(coord); + tree = znode_get_tree(coord->node); + item_key_by_coord(coord, &key); + oid = get_key_objectid(&key); + coord_dup(&scan, coord); + + for (i = 0; i < num_units; ++i, ++ext) { + __u64 index; + + scan.unit_pos = i; + index = extent_unit_index(&scan); + +#if 0 + /* check that all jnodes are present for the unallocated + * extent */ + if (state_of_extent(ext) == UNALLOCATED_EXTENT) { + for (j = 0; j < extent_get_width(ext); j++) { + jnode *node; + + node = jlookup(tree, oid, index + j); + if (node == NULL) { + print_coord("scan", &scan, 0); + *error = "Jnode missing"; + return -1; + } + jput(node); + } + } +#endif + + start = extent_get_start(ext); + if (start < 2) + continue; + /* extent is allocated one */ + width = extent_get_width(ext); + if (start >= blk_cnt) { + *error = "Start too large"; + return -1; + } + if (start + width > blk_cnt) { + *error = "End too large"; + return -1; + } + /* make sure that this extent does not overlap with other + allocated extents extents */ + for (j = 0; j < i; j++) { + if (state_of_extent(first + j) != ALLOCATED_EXTENT) + continue; + if (! + ((extent_get_start(ext) >= + extent_get_start(first + j) + + extent_get_width(first + j)) + || (extent_get_start(ext) + + extent_get_width(ext) <= + extent_get_start(first + j)))) { + *error = "Extent overlaps with others"; + return -1; + } + } + + } + + return 0; +} + +#endif /* REISER4_DEBUG */ + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/item/internal.c b/fs/reiser4/plugin/item/internal.c new file mode 100644 index 000000000000..24cebb67eacf --- /dev/null +++ b/fs/reiser4/plugin/item/internal.c @@ -0,0 +1,404 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Implementation of internal-item plugin methods. */ + +#include "../../forward.h" +#include "../../debug.h" +#include "../../dformat.h" +#include "../../key.h" +#include "../../coord.h" +#include "internal.h" +#include "item.h" +#include "../node/node.h" +#include "../plugin.h" +#include "../../jnode.h" +#include "../../znode.h" +#include "../../tree_walk.h" +#include "../../tree_mod.h" +#include "../../tree.h" +#include "../../super.h" +#include "../../block_alloc.h" + +/* see internal.h for explanation */ + +/* plugin->u.item.b.mergeable */ +int mergeable_internal(const coord_t * p1 UNUSED_ARG /* first item */ , + const coord_t * p2 UNUSED_ARG /* second item */ ) +{ + /* internal items are not mergeable */ + return 0; +} + +/* ->lookup() method for internal items */ +lookup_result lookup_internal(const reiser4_key * key /* key to look up */ , + lookup_bias bias UNUSED_ARG /* lookup bias */ , + coord_t * coord /* coord of item */ ) +{ + reiser4_key ukey; + + switch (keycmp(unit_key_by_coord(coord, &ukey), key)) { + default: + impossible("", "keycmp()?!"); + case LESS_THAN: + /* FIXME-VS: AFTER_ITEM used to be here. But with new coord + item plugin can not be taken using coord set this way */ + assert("vs-681", coord->unit_pos == 0); + coord->between = AFTER_UNIT; + case EQUAL_TO: + return CBK_COORD_FOUND; + case GREATER_THAN: + return CBK_COORD_NOTFOUND; + } +} + +/* return body of internal item at @coord */ +static internal_item_layout *internal_at(const coord_t * coord /* coord of + * item */ ) +{ + assert("nikita-607", coord != NULL); + assert("nikita-1650", + item_plugin_by_coord(coord) == + item_plugin_by_id(NODE_POINTER_ID)); + return (internal_item_layout *) item_body_by_coord(coord); +} + +void reiser4_update_internal(const coord_t * coord, + const reiser4_block_nr * blocknr) +{ + internal_item_layout *item = internal_at(coord); + assert("nikita-2959", reiser4_blocknr_is_sane(blocknr)); + + put_unaligned(cpu_to_le64(*blocknr), &item->pointer); +} + +/* return child block number stored in the internal item at @coord */ +static reiser4_block_nr pointer_at(const coord_t * coord /* coord of item */ ) +{ + assert("nikita-608", coord != NULL); + return le64_to_cpu(get_unaligned(&internal_at(coord)->pointer)); +} + +/* get znode pointed to by internal @item */ +static znode *znode_at(const coord_t * item /* coord of item */ , + znode * parent /* parent node */ ) +{ + return child_znode(item, parent, 1, 0); +} + +/* store pointer from internal item into "block". Implementation of + ->down_link() method */ +void down_link_internal(const coord_t * coord /* coord of item */ , + const reiser4_key * key UNUSED_ARG /* key to get + * pointer for */ , + reiser4_block_nr * block /* resulting block number */ ) +{ + ON_DEBUG(reiser4_key item_key); + + assert("nikita-609", coord != NULL); + assert("nikita-611", block != NULL); + assert("nikita-612", (key == NULL) || + /* twig horrors */ + (znode_get_level(coord->node) == TWIG_LEVEL) + || keyle(item_key_by_coord(coord, &item_key), key)); + + *block = pointer_at(coord); + assert("nikita-2960", reiser4_blocknr_is_sane(block)); +} + +/* Get the child's block number, or 0 if the block is unallocated. */ +int +utmost_child_real_block_internal(const coord_t * coord, sideof side UNUSED_ARG, + reiser4_block_nr * block) +{ + assert("jmacd-2059", coord != NULL); + + *block = pointer_at(coord); + assert("nikita-2961", reiser4_blocknr_is_sane(block)); + + if (reiser4_blocknr_is_fake(block)) { + *block = 0; + } + + return 0; +} + +/* Return the child. */ +int +utmost_child_internal(const coord_t * coord, sideof side UNUSED_ARG, + jnode ** childp) +{ + reiser4_block_nr block = pointer_at(coord); + znode *child; + + assert("jmacd-2059", childp != NULL); + assert("nikita-2962", reiser4_blocknr_is_sane(&block)); + + child = zlook(znode_get_tree(coord->node), &block); + + if (IS_ERR(child)) { + return PTR_ERR(child); + } + + *childp = ZJNODE(child); + + return 0; +} + +#if REISER4_DEBUG + +static void check_link(znode * left, znode * right) +{ + znode *scan; + + for (scan = left; scan != right; scan = scan->right) { + if (ZF_ISSET(scan, JNODE_RIP)) + break; + if (znode_is_right_connected(scan) && scan->right != NULL) { + if (ZF_ISSET(scan->right, JNODE_RIP)) + break; + assert("nikita-3285", + znode_is_left_connected(scan->right)); + assert("nikita-3265", + ergo(scan != left, + ZF_ISSET(scan, JNODE_HEARD_BANSHEE))); + assert("nikita-3284", scan->right->left == scan); + } else + break; + } +} + +int check__internal(const coord_t * coord, const char **error) +{ + reiser4_block_nr blk; + znode *child; + coord_t cpy; + + blk = pointer_at(coord); + if (!reiser4_blocknr_is_sane(&blk)) { + *error = "Invalid pointer"; + return -1; + } + coord_dup(&cpy, coord); + child = znode_at(&cpy, cpy.node); + if (child != NULL) { + znode *left_child; + znode *right_child; + + left_child = right_child = NULL; + + assert("nikita-3256", znode_invariant(child)); + if (coord_prev_item(&cpy) == 0 && item_is_internal(&cpy)) { + left_child = znode_at(&cpy, cpy.node); + if (left_child != NULL) { + read_lock_tree(znode_get_tree(child)); + check_link(left_child, child); + read_unlock_tree(znode_get_tree(child)); + zput(left_child); + } + } + coord_dup(&cpy, coord); + if (coord_next_item(&cpy) == 0 && item_is_internal(&cpy)) { + right_child = znode_at(&cpy, cpy.node); + if (right_child != NULL) { + read_lock_tree(znode_get_tree(child)); + check_link(child, right_child); + read_unlock_tree(znode_get_tree(child)); + zput(right_child); + } + } + zput(child); + } + return 0; +} + +#endif /* REISER4_DEBUG */ + +/* return true only if this item really points to "block" */ +/* Audited by: green(2002.06.14) */ +int has_pointer_to_internal(const coord_t * coord /* coord of item */ , + const reiser4_block_nr * block /* block number to + * check */ ) +{ + assert("nikita-613", coord != NULL); + assert("nikita-614", block != NULL); + + return pointer_at(coord) == *block; +} + +/* hook called by ->create_item() method of node plugin after new internal + item was just created. + + This is point where pointer to new node is inserted into tree. Initialize + parent pointer in child znode, insert child into sibling list and slum. + +*/ +int create_hook_internal(const coord_t * item /* coord of item */ , + void *arg /* child's left neighbor, if any */ ) +{ + znode *child; + __u64 child_ptr; + + assert("nikita-1252", item != NULL); + assert("nikita-1253", item->node != NULL); + assert("nikita-1181", znode_get_level(item->node) > LEAF_LEVEL); + assert("nikita-1450", item->unit_pos == 0); + + /* + * preparing to item insertion build_child_ptr_data sets pointer to + * data to be inserted to jnode's blocknr which is in cpu byte + * order. Node's create_item simply copied those data. As result we + * have child pointer in cpu's byte order. Convert content of internal + * item to little endian byte order. + */ + child_ptr = get_unaligned((__u64 *)item_body_by_coord(item)); + reiser4_update_internal(item, &child_ptr); + + child = znode_at(item, item->node); + if (child != NULL && !IS_ERR(child)) { + znode *left; + int result = 0; + reiser4_tree *tree; + + left = arg; + tree = znode_get_tree(item->node); + write_lock_tree(tree); + write_lock_dk(tree); + assert("nikita-1400", (child->in_parent.node == NULL) + || (znode_above_root(child->in_parent.node))); + ++item->node->c_count; + coord_to_parent_coord(item, &child->in_parent); + sibling_list_insert_nolock(child, left); + + assert("nikita-3297", ZF_ISSET(child, JNODE_ORPHAN)); + ZF_CLR(child, JNODE_ORPHAN); + + if ((left != NULL) && !keyeq(znode_get_rd_key(left), + znode_get_rd_key(child))) { + znode_set_rd_key(child, znode_get_rd_key(left)); + } + write_unlock_dk(tree); + write_unlock_tree(tree); + zput(child); + return result; + } else { + if (child == NULL) + child = ERR_PTR(-EIO); + return PTR_ERR(child); + } +} + +/* hook called by ->cut_and_kill() method of node plugin just before internal + item is removed. + + This is point where empty node is removed from the tree. Clear parent + pointer in child, and mark node for pending deletion. + + Node will be actually deleted later and in several installations: + + . when last lock on this node will be released, node will be removed from + the sibling list and its lock will be invalidated + + . when last reference to this node will be dropped, bitmap will be updated + and node will be actually removed from the memory. + +*/ +int kill_hook_internal(const coord_t * item /* coord of item */ , + pos_in_node_t from UNUSED_ARG /* start unit */ , + pos_in_node_t count UNUSED_ARG /* stop unit */ , + struct carry_kill_data *p UNUSED_ARG) +{ + znode *child; + int result = 0; + + assert("nikita-1222", item != NULL); + assert("nikita-1224", from == 0); + assert("nikita-1225", count == 1); + + child = znode_at(item, item->node); + if (child == NULL) + return 0; + if (IS_ERR(child)) + return PTR_ERR(child); + result = zload(child); + if (result) { + zput(child); + return result; + } + if (node_is_empty(child)) { + reiser4_tree *tree; + + assert("nikita-1397", znode_is_write_locked(child)); + assert("nikita-1398", child->c_count == 0); + assert("nikita-2546", ZF_ISSET(child, JNODE_HEARD_BANSHEE)); + + tree = znode_get_tree(item->node); + write_lock_tree(tree); + init_parent_coord(&child->in_parent, NULL); + --item->node->c_count; + write_unlock_tree(tree); + } else { + warning("nikita-1223", + "Cowardly refuse to remove link to non-empty node"); + result = RETERR(-EIO); + } + zrelse(child); + zput(child); + return result; +} + +/* hook called by ->shift() node plugin method when iternal item was just + moved from one node to another. + + Update parent pointer in child and c_counts in old and new parent + +*/ +int shift_hook_internal(const coord_t * item /* coord of item */ , + unsigned from UNUSED_ARG /* start unit */ , + unsigned count UNUSED_ARG /* stop unit */ , + znode * old_node /* old parent */ ) +{ + znode *child; + znode *new_node; + reiser4_tree *tree; + + assert("nikita-1276", item != NULL); + assert("nikita-1277", from == 0); + assert("nikita-1278", count == 1); + assert("nikita-1451", item->unit_pos == 0); + + new_node = item->node; + assert("nikita-2132", new_node != old_node); + tree = znode_get_tree(item->node); + child = child_znode(item, old_node, 1, 0); + if (child == NULL) + return 0; + if (!IS_ERR(child)) { + write_lock_tree(tree); + ++new_node->c_count; + assert("nikita-1395", znode_parent(child) == old_node); + assert("nikita-1396", old_node->c_count > 0); + coord_to_parent_coord(item, &child->in_parent); + assert("nikita-1781", znode_parent(child) == new_node); + assert("nikita-1782", + check_tree_pointer(item, child) == NS_FOUND); + --old_node->c_count; + write_unlock_tree(tree); + zput(child); + return 0; + } else + return PTR_ERR(child); +} + +/* plugin->u.item.b.max_key_inside - not defined */ + +/* plugin->u.item.b.nr_units - item.c:single_unit */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/internal.h b/fs/reiser4/plugin/item/internal.h new file mode 100644 index 000000000000..27aa27d7fb08 --- /dev/null +++ b/fs/reiser4/plugin/item/internal.h @@ -0,0 +1,57 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ +/* Internal item contains down-link to the child of the internal/twig + node in a tree. It is internal items that are actually used during + tree traversal. */ + +#if !defined( __FS_REISER4_PLUGIN_ITEM_INTERNAL_H__ ) +#define __FS_REISER4_PLUGIN_ITEM_INTERNAL_H__ + +#include "../../forward.h" +#include "../../dformat.h" + +/* on-disk layout of internal item */ +typedef struct internal_item_layout { + /* 0 */ reiser4_dblock_nr pointer; + /* 4 */ +} internal_item_layout; + +struct cut_list; + +int mergeable_internal(const coord_t * p1, const coord_t * p2); +lookup_result lookup_internal(const reiser4_key * key, lookup_bias bias, + coord_t * coord); +/* store pointer from internal item into "block". Implementation of + ->down_link() method */ +extern void down_link_internal(const coord_t * coord, const reiser4_key * key, + reiser4_block_nr * block); +extern int has_pointer_to_internal(const coord_t * coord, + const reiser4_block_nr * block); +extern int create_hook_internal(const coord_t * item, void *arg); +extern int kill_hook_internal(const coord_t * item, pos_in_node_t from, + pos_in_node_t count, struct carry_kill_data *); +extern int shift_hook_internal(const coord_t * item, unsigned from, + unsigned count, znode * old_node); +extern void reiser4_print_internal(const char *prefix, coord_t * coord); + +extern int utmost_child_internal(const coord_t * coord, sideof side, + jnode ** child); +int utmost_child_real_block_internal(const coord_t * coord, sideof side, + reiser4_block_nr * block); + +extern void reiser4_update_internal(const coord_t * coord, + const reiser4_block_nr * blocknr); +/* FIXME: reiserfs has check_internal */ +extern int check__internal(const coord_t * coord, const char **error); + +/* __FS_REISER4_PLUGIN_ITEM_INTERNAL_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/item.c b/fs/reiser4/plugin/item/item.c new file mode 100644 index 000000000000..e226f045f03d --- /dev/null +++ b/fs/reiser4/plugin/item/item.c @@ -0,0 +1,719 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* definition of item plugins. */ + +#include "../../forward.h" +#include "../../debug.h" +#include "../../key.h" +#include "../../coord.h" +#include "../plugin_header.h" +#include "sde.h" +#include "internal.h" +#include "item.h" +#include "static_stat.h" +#include "../plugin.h" +#include "../../znode.h" +#include "../../tree.h" +#include "../../context.h" +#include "ctail.h" + +/* return pointer to item body */ +void item_body_by_coord_hard(coord_t * coord /* coord to query */ ) +{ + assert("nikita-324", coord != NULL); + assert("nikita-325", coord->node != NULL); + assert("nikita-326", znode_is_loaded(coord->node)); + assert("nikita-3200", coord->offset == INVALID_OFFSET); + + coord->offset = + node_plugin_by_node(coord->node)->item_by_coord(coord) - + zdata(coord->node); + ON_DEBUG(coord->body_v = coord->node->times_locked); +} + +void *item_body_by_coord_easy(const coord_t * coord /* coord to query */ ) +{ + return zdata(coord->node) + coord->offset; +} + +#if REISER4_DEBUG + +int item_body_is_valid(const coord_t * coord) +{ + return + coord->offset == + node_plugin_by_node(coord->node)->item_by_coord(coord) - + zdata(coord->node); +} + +#endif + +/* return length of item at @coord */ +pos_in_node_t item_length_by_coord(const coord_t * coord /* coord to query */ ) +{ + int len; + + assert("nikita-327", coord != NULL); + assert("nikita-328", coord->node != NULL); + assert("nikita-329", znode_is_loaded(coord->node)); + + len = node_plugin_by_node(coord->node)->length_by_coord(coord); + return len; +} + +void obtain_item_plugin(const coord_t * coord) +{ + assert("nikita-330", coord != NULL); + assert("nikita-331", coord->node != NULL); + assert("nikita-332", znode_is_loaded(coord->node)); + + coord_set_iplug((coord_t *) coord, + node_plugin_by_node(coord->node)-> + plugin_by_coord(coord)); + assert("nikita-2479", + coord_iplug(coord) == + node_plugin_by_node(coord->node)->plugin_by_coord(coord)); +} + +/* return id of item */ +/* Audited by: green(2002.06.15) */ +item_id item_id_by_coord(const coord_t * coord /* coord to query */ ) +{ + assert("vs-539", coord != NULL); + assert("vs-538", coord->node != NULL); + assert("vs-537", znode_is_loaded(coord->node)); + assert("vs-536", item_plugin_by_coord(coord) != NULL); + assert("vs-540", + item_id_by_plugin(item_plugin_by_coord(coord)) < LAST_ITEM_ID); + + return item_id_by_plugin(item_plugin_by_coord(coord)); +} + +/* return key of item at @coord */ +/* Audited by: green(2002.06.15) */ +reiser4_key *item_key_by_coord(const coord_t * coord /* coord to query */ , + reiser4_key * key /* result */ ) +{ + assert("nikita-338", coord != NULL); + assert("nikita-339", coord->node != NULL); + assert("nikita-340", znode_is_loaded(coord->node)); + + return node_plugin_by_node(coord->node)->key_at(coord, key); +} + +/* this returns max key in the item */ +reiser4_key *max_item_key_by_coord(const coord_t * coord /* coord to query */ , + reiser4_key * key /* result */ ) +{ + coord_t last; + + assert("nikita-338", coord != NULL); + assert("nikita-339", coord->node != NULL); + assert("nikita-340", znode_is_loaded(coord->node)); + + /* make coord pointing to last item's unit */ + coord_dup(&last, coord); + last.unit_pos = coord_num_units(&last) - 1; + assert("vs-1560", coord_is_existing_unit(&last)); + + max_unit_key_by_coord(&last, key); + return key; +} + +/* return key of unit at @coord */ +reiser4_key *unit_key_by_coord(const coord_t * coord /* coord to query */ , + reiser4_key * key /* result */ ) +{ + assert("nikita-772", coord != NULL); + assert("nikita-774", coord->node != NULL); + assert("nikita-775", znode_is_loaded(coord->node)); + + if (item_plugin_by_coord(coord)->b.unit_key != NULL) + return item_plugin_by_coord(coord)->b.unit_key(coord, key); + else + return item_key_by_coord(coord, key); +} + +/* return the biggest key contained the unit @coord */ +reiser4_key *max_unit_key_by_coord(const coord_t * coord /* coord to query */ , + reiser4_key * key /* result */ ) +{ + assert("nikita-772", coord != NULL); + assert("nikita-774", coord->node != NULL); + assert("nikita-775", znode_is_loaded(coord->node)); + + if (item_plugin_by_coord(coord)->b.max_unit_key != NULL) + return item_plugin_by_coord(coord)->b.max_unit_key(coord, key); + else + return unit_key_by_coord(coord, key); +} + +/* ->max_key_inside() method for items consisting of exactly one key (like + stat-data) */ +static reiser4_key *max_key_inside_single_key(const coord_t * + coord /* coord of item */ , + reiser4_key * + result /* resulting key */ ) +{ + assert("nikita-604", coord != NULL); + + /* coord -> key is starting key of this item and it has to be already + filled in */ + return unit_key_by_coord(coord, result); +} + +/* ->nr_units() method for items consisting of exactly one unit always */ +pos_in_node_t +nr_units_single_unit(const coord_t * coord UNUSED_ARG /* coord of item */ ) +{ + return 1; +} + +static int +paste_no_paste(coord_t * coord UNUSED_ARG, + reiser4_item_data * data UNUSED_ARG, + carry_plugin_info * info UNUSED_ARG) +{ + return 0; +} + +/* default ->fast_paste() method */ +static int +agree_to_fast_op(const coord_t * coord UNUSED_ARG /* coord of item */ ) +{ + return 1; +} + +int item_can_contain_key(const coord_t * item /* coord of item */ , + const reiser4_key * key /* key to check */ , + const reiser4_item_data * data /* parameters of item + * being created */ ) +{ + item_plugin *iplug; + reiser4_key min_key_in_item; + reiser4_key max_key_in_item; + + assert("nikita-1658", item != NULL); + assert("nikita-1659", key != NULL); + + iplug = item_plugin_by_coord(item); + if (iplug->b.can_contain_key != NULL) + return iplug->b.can_contain_key(item, key, data); + else { + assert("nikita-1681", iplug->b.max_key_inside != NULL); + item_key_by_coord(item, &min_key_in_item); + iplug->b.max_key_inside(item, &max_key_in_item); + + /* can contain key if + min_key_in_item <= key && + key <= max_key_in_item + */ + return keyle(&min_key_in_item, key) + && keyle(key, &max_key_in_item); + } +} + +/* mergeable method for non mergeable items */ +static int +not_mergeable(const coord_t * i1 UNUSED_ARG, const coord_t * i2 UNUSED_ARG) +{ + return 0; +} + +/* return 0 if @item1 and @item2 are not mergeable, !0 - otherwise */ +int are_items_mergeable(const coord_t * i1 /* coord of first item */ , + const coord_t * i2 /* coord of second item */ ) +{ + item_plugin *iplug; + reiser4_key k1; + reiser4_key k2; + + assert("nikita-1336", i1 != NULL); + assert("nikita-1337", i2 != NULL); + + iplug = item_plugin_by_coord(i1); + assert("nikita-1338", iplug != NULL); + + /* NOTE-NIKITA are_items_mergeable() is also called by assertions in + shifting code when nodes are in "suspended" state. */ + assert("nikita-1663", + keyle(item_key_by_coord(i1, &k1), item_key_by_coord(i2, &k2))); + + if (iplug->b.mergeable != NULL) { + return iplug->b.mergeable(i1, i2); + } else if (iplug->b.max_key_inside != NULL) { + iplug->b.max_key_inside(i1, &k1); + item_key_by_coord(i2, &k2); + + /* mergeable if ->max_key_inside() >= key of i2; */ + return keyge(iplug->b.max_key_inside(i1, &k1), + item_key_by_coord(i2, &k2)); + } else { + item_key_by_coord(i1, &k1); + item_key_by_coord(i2, &k2); + + return + (get_key_locality(&k1) == get_key_locality(&k2)) && + (get_key_objectid(&k1) == get_key_objectid(&k2)) + && (iplug == item_plugin_by_coord(i2)); + } +} + +int item_is_extent(const coord_t * item) +{ + assert("vs-482", coord_is_existing_item(item)); + return item_id_by_coord(item) == EXTENT_POINTER_ID; +} + +int item_is_tail(const coord_t * item) +{ + assert("vs-482", coord_is_existing_item(item)); + return item_id_by_coord(item) == FORMATTING_ID; +} + +#if REISER4_DEBUG + +int item_is_statdata(const coord_t * item) +{ + assert("vs-516", coord_is_existing_item(item)); + return plugin_of_group(item_plugin_by_coord(item), STAT_DATA_ITEM_TYPE); +} + +int item_is_ctail(const coord_t * item) +{ + assert("edward-xx", coord_is_existing_item(item)); + return item_id_by_coord(item) == CTAIL_ID; +} + +#endif /* REISER4_DEBUG */ + +static int change_item(struct inode *inode, + reiser4_plugin * plugin, + pset_member memb) +{ + /* cannot change constituent item (sd, or dir_item) */ + return RETERR(-EINVAL); +} + +static reiser4_plugin_ops item_plugin_ops = { + .init = NULL, + .load = NULL, + .save_len = NULL, + .save = NULL, + .change = change_item +}; + +item_plugin item_plugins[LAST_ITEM_ID] = { + [STATIC_STAT_DATA_ID] = { + .h = { + .type_id = REISER4_ITEM_PLUGIN_TYPE, + .id = STATIC_STAT_DATA_ID, + .groups = (1 << STAT_DATA_ITEM_TYPE), + .pops = &item_plugin_ops, + .label = "sd", + .desc = "stat-data", + .linkage = {NULL, NULL} + }, + .b = { + .max_key_inside = max_key_inside_single_key, + .can_contain_key = NULL, + .mergeable = not_mergeable, + .nr_units = nr_units_single_unit, + .lookup = NULL, + .init = NULL, + .paste = paste_no_paste, + .fast_paste = NULL, + .can_shift = NULL, + .copy_units = NULL, + .create_hook = NULL, + .kill_hook = NULL, + .shift_hook = NULL, + .cut_units = NULL, + .kill_units = NULL, + .unit_key = NULL, + .max_unit_key = NULL, + .estimate = NULL, + .item_data_by_flow = NULL, +#if REISER4_DEBUG + .check = NULL +#endif + }, + .f = { + .utmost_child = NULL, + .utmost_child_real_block = NULL, + .update = NULL, + .scan = NULL, + .convert = NULL + }, + .s = { + .sd = { + .init_inode = init_inode_static_sd, + .save_len = save_len_static_sd, + .save = save_static_sd + } + } + }, + [SIMPLE_DIR_ENTRY_ID] = { + .h = { + .type_id = REISER4_ITEM_PLUGIN_TYPE, + .id = SIMPLE_DIR_ENTRY_ID, + .groups = (1 << DIR_ENTRY_ITEM_TYPE), + .pops = &item_plugin_ops, + .label = "de", + .desc = "directory entry", + .linkage = {NULL, NULL} + }, + .b = { + .max_key_inside = max_key_inside_single_key, + .can_contain_key = NULL, + .mergeable = NULL, + .nr_units = nr_units_single_unit, + .lookup = NULL, + .init = NULL, + .paste = NULL, + .fast_paste = NULL, + .can_shift = NULL, + .copy_units = NULL, + .create_hook = NULL, + .kill_hook = NULL, + .shift_hook = NULL, + .cut_units = NULL, + .kill_units = NULL, + .unit_key = NULL, + .max_unit_key = NULL, + .estimate = NULL, + .item_data_by_flow = NULL, +#if REISER4_DEBUG + .check = NULL +#endif + }, + .f = { + .utmost_child = NULL, + .utmost_child_real_block = NULL, + .update = NULL, + .scan = NULL, + .convert = NULL + }, + .s = { + .dir = { + .extract_key = extract_key_de, + .update_key = update_key_de, + .extract_name = extract_name_de, + .extract_file_type = extract_file_type_de, + .add_entry = add_entry_de, + .rem_entry = rem_entry_de, + .max_name_len = max_name_len_de + } + } + }, + [COMPOUND_DIR_ID] = { + .h = { + .type_id = REISER4_ITEM_PLUGIN_TYPE, + .id = COMPOUND_DIR_ID, + .groups = (1 << DIR_ENTRY_ITEM_TYPE), + .pops = &item_plugin_ops, + .label = "cde", + .desc = "compressed directory entry", + .linkage = {NULL, NULL} + }, + .b = { + .max_key_inside = max_key_inside_cde, + .can_contain_key = can_contain_key_cde, + .mergeable = mergeable_cde, + .nr_units = nr_units_cde, + .lookup = lookup_cde, + .init = init_cde, + .paste = paste_cde, + .fast_paste = agree_to_fast_op, + .can_shift = can_shift_cde, + .copy_units = copy_units_cde, + .create_hook = NULL, + .kill_hook = NULL, + .shift_hook = NULL, + .cut_units = cut_units_cde, + .kill_units = kill_units_cde, + .unit_key = unit_key_cde, + .max_unit_key = unit_key_cde, + .estimate = estimate_cde, + .item_data_by_flow = NULL, +#if REISER4_DEBUG + .check = reiser4_check_cde +#endif + }, + .f = { + .utmost_child = NULL, + .utmost_child_real_block = NULL, + .update = NULL, + .scan = NULL, + .convert = NULL + }, + .s = { + .dir = { + .extract_key = extract_key_cde, + .update_key = update_key_cde, + .extract_name = extract_name_cde, + .extract_file_type = extract_file_type_de, + .add_entry = add_entry_cde, + .rem_entry = rem_entry_cde, + .max_name_len = max_name_len_cde + } + } + }, + [NODE_POINTER_ID] = { + .h = { + .type_id = REISER4_ITEM_PLUGIN_TYPE, + .id = NODE_POINTER_ID, + .groups = (1 << INTERNAL_ITEM_TYPE), + .pops = NULL, + .label = "internal", + .desc = "internal item", + .linkage = {NULL, NULL} + }, + .b = { + .max_key_inside = NULL, + .can_contain_key = NULL, + .mergeable = mergeable_internal, + .nr_units = nr_units_single_unit, + .lookup = lookup_internal, + .init = NULL, + .paste = NULL, + .fast_paste = NULL, + .can_shift = NULL, + .copy_units = NULL, + .create_hook = create_hook_internal, + .kill_hook = kill_hook_internal, + .shift_hook = shift_hook_internal, + .cut_units = NULL, + .kill_units = NULL, + .unit_key = NULL, + .max_unit_key = NULL, + .estimate = NULL, + .item_data_by_flow = NULL, +#if REISER4_DEBUG + .check = check__internal +#endif + }, + .f = { + .utmost_child = utmost_child_internal, + .utmost_child_real_block = + utmost_child_real_block_internal, + .update = reiser4_update_internal, + .scan = NULL, + .convert = NULL + }, + .s = { + .internal = { + .down_link = down_link_internal, + .has_pointer_to = has_pointer_to_internal + } + } + }, + [EXTENT_POINTER_ID] = { + .h = { + .type_id = REISER4_ITEM_PLUGIN_TYPE, + .id = EXTENT_POINTER_ID, + .groups = (1 << UNIX_FILE_METADATA_ITEM_TYPE), + .pops = NULL, + .label = "extent", + .desc = "extent item", + .linkage = {NULL, NULL} + }, + .b = { + .max_key_inside = max_key_inside_extent, + .can_contain_key = can_contain_key_extent, + .mergeable = mergeable_extent, + .nr_units = nr_units_extent, + .lookup = lookup_extent, + .init = NULL, + .paste = paste_extent, + .fast_paste = agree_to_fast_op, + .can_shift = can_shift_extent, + .create_hook = create_hook_extent, + .copy_units = copy_units_extent, + .kill_hook = kill_hook_extent, + .shift_hook = NULL, + .cut_units = cut_units_extent, + .kill_units = kill_units_extent, + .unit_key = unit_key_extent, + .max_unit_key = max_unit_key_extent, + .estimate = NULL, + .item_data_by_flow = NULL, +#if REISER4_DEBUG + .check = reiser4_check_extent +#endif + }, + .f = { + .utmost_child = utmost_child_extent, + .utmost_child_real_block = + utmost_child_real_block_extent, + .update = NULL, + .scan = reiser4_scan_extent, + .convert = NULL, + .key_by_offset = key_by_offset_extent + }, + .s = { + .file = { + .write = reiser4_write_extent, + .read = reiser4_read_extent, + .readpage = reiser4_readpage_extent, + .get_block = get_block_address_extent, + .append_key = append_key_extent, + .init_coord_extension = + init_coord_extension_extent + } + } + }, + [FORMATTING_ID] = { + .h = { + .type_id = REISER4_ITEM_PLUGIN_TYPE, + .id = FORMATTING_ID, + .groups = (1 << UNIX_FILE_METADATA_ITEM_TYPE), + .pops = NULL, + .label = "body", + .desc = "body (or tail?) item", + .linkage = {NULL, NULL} + }, + .b = { + .max_key_inside = max_key_inside_tail, + .can_contain_key = can_contain_key_tail, + .mergeable = mergeable_tail, + .nr_units = nr_units_tail, + .lookup = lookup_tail, + .init = NULL, + .paste = paste_tail, + .fast_paste = agree_to_fast_op, + .can_shift = can_shift_tail, + .create_hook = NULL, + .copy_units = copy_units_tail, + .kill_hook = kill_hook_tail, + .shift_hook = NULL, + .cut_units = cut_units_tail, + .kill_units = kill_units_tail, + .unit_key = unit_key_tail, + .max_unit_key = unit_key_tail, + .estimate = NULL, + .item_data_by_flow = NULL, +#if REISER4_DEBUG + .check = NULL +#endif + }, + .f = { + .utmost_child = NULL, + .utmost_child_real_block = NULL, + .update = NULL, + .scan = NULL, + .convert = NULL + }, + .s = { + .file = { + .write = reiser4_write_tail, + .read = reiser4_read_tail, + .readpage = readpage_tail, + .get_block = get_block_address_tail, + .append_key = append_key_tail, + .init_coord_extension = + init_coord_extension_tail + } + } + }, + [CTAIL_ID] = { + .h = { + .type_id = REISER4_ITEM_PLUGIN_TYPE, + .id = CTAIL_ID, + .groups = (1 << UNIX_FILE_METADATA_ITEM_TYPE), + .pops = NULL, + .label = "ctail", + .desc = "cryptcompress tail item", + .linkage = {NULL, NULL} + }, + .b = { + .max_key_inside = max_key_inside_tail, + .can_contain_key = can_contain_key_ctail, + .mergeable = mergeable_ctail, + .nr_units = nr_units_ctail, + .lookup = NULL, + .init = init_ctail, + .paste = paste_ctail, + .fast_paste = agree_to_fast_op, + .can_shift = can_shift_ctail, + .create_hook = create_hook_ctail, + .copy_units = copy_units_ctail, + .kill_hook = kill_hook_ctail, + .shift_hook = shift_hook_ctail, + .cut_units = cut_units_ctail, + .kill_units = kill_units_ctail, + .unit_key = unit_key_tail, + .max_unit_key = unit_key_tail, + .estimate = estimate_ctail, + .item_data_by_flow = NULL, +#if REISER4_DEBUG + .check = check_ctail +#endif + }, + .f = { + .utmost_child = utmost_child_ctail, + /* FIXME-EDWARD: write this */ + .utmost_child_real_block = NULL, + .update = NULL, + .scan = scan_ctail, + .convert = convert_ctail + }, + .s = { + .file = { + .write = NULL, + .read = read_ctail, + .readpage = readpage_ctail, + .get_block = get_block_address_tail, + .append_key = append_key_ctail, + .init_coord_extension = + init_coord_extension_tail + } + } + }, + [BLACK_BOX_ID] = { + .h = { + .type_id = REISER4_ITEM_PLUGIN_TYPE, + .id = BLACK_BOX_ID, + .groups = (1 << OTHER_ITEM_TYPE), + .pops = NULL, + .label = "blackbox", + .desc = "black box item", + .linkage = {NULL, NULL} + }, + .b = { + .max_key_inside = NULL, + .can_contain_key = NULL, + .mergeable = not_mergeable, + .nr_units = nr_units_single_unit, + /* to need for ->lookup method */ + .lookup = NULL, + .init = NULL, + .paste = NULL, + .fast_paste = NULL, + .can_shift = NULL, + .copy_units = NULL, + .create_hook = NULL, + .kill_hook = NULL, + .shift_hook = NULL, + .cut_units = NULL, + .kill_units = NULL, + .unit_key = NULL, + .max_unit_key = NULL, + .estimate = NULL, + .item_data_by_flow = NULL, +#if REISER4_DEBUG + .check = NULL +#endif + } + } +}; + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/item.h b/fs/reiser4/plugin/item/item.h new file mode 100644 index 000000000000..5998701f5745 --- /dev/null +++ b/fs/reiser4/plugin/item/item.h @@ -0,0 +1,398 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* first read balance.c comments before reading this */ + +/* An item_plugin implements all of the operations required for + balancing that are item specific. */ + +/* an item plugin also implements other operations that are specific to that + item. These go into the item specific operations portion of the item + handler, and all of the item specific portions of the item handler are put + into a union. */ + +#if !defined( __REISER4_ITEM_H__ ) +#define __REISER4_ITEM_H__ + +#include "../../forward.h" +#include "../plugin_header.h" +#include "../../dformat.h" +#include "../../seal.h" +#include "../../plugin/file/file.h" + +#include <linux/fs.h> /* for struct file, struct inode */ +#include <linux/mm.h> /* for struct page */ +#include <linux/dcache.h> /* for struct dentry */ + +typedef enum { + STAT_DATA_ITEM_TYPE, + DIR_ENTRY_ITEM_TYPE, + INTERNAL_ITEM_TYPE, + UNIX_FILE_METADATA_ITEM_TYPE, + OTHER_ITEM_TYPE +} item_type_id; + +/* this is the part of each item plugin that all items are expected to + support or at least explicitly fail to support by setting the + pointer to null. */ +struct balance_ops { + /* operations called by balancing + + It is interesting to consider that some of these item + operations could be given sources or targets that are not + really items in nodes. This could be ok/useful. + + */ + /* maximal key that can _possibly_ be occupied by this item + + When inserting, and node ->lookup() method (called by + coord_by_key()) reaches an item after binary search, + the ->max_key_inside() item plugin method is used to determine + whether new item should pasted into existing item + (new_key<=max_key_inside()) or new item has to be created + (new_key>max_key_inside()). + + For items that occupy exactly one key (like stat-data) + this method should return this key. For items that can + grow indefinitely (extent, directory item) this should + return reiser4_max_key(). + + For example extent with the key + + (LOCALITY,4,OBJID,STARTING-OFFSET), and length BLK blocks, + + ->max_key_inside is (LOCALITY,4,OBJID,0xffffffffffffffff), and + */ + reiser4_key *(*max_key_inside) (const coord_t *, reiser4_key *); + + /* true if item @coord can merge data at @key. */ + int (*can_contain_key) (const coord_t *, const reiser4_key *, + const reiser4_item_data *); + /* mergeable() - check items for mergeability + + Optional method. Returns true if two items can be merged. + + */ + int (*mergeable) (const coord_t *, const coord_t *); + + /* number of atomic things in an item. + NOTE FOR CONTRIBUTORS: use a generic method + nr_units_single_unit() for solid (atomic) items, as + tree operations use it as a criterion of solidness + (see is_solid_item macro) */ + pos_in_node_t(*nr_units) (const coord_t *); + + /* search within item for a unit within the item, and return a + pointer to it. This can be used to calculate how many + bytes to shrink an item if you use pointer arithmetic and + compare to the start of the item body if the item's data + are continuous in the node, if the item's data are not + continuous in the node, all sorts of other things are maybe + going to break as well. */ + lookup_result(*lookup) (const reiser4_key *, lookup_bias, coord_t *); + /* method called by ode_plugin->create_item() to initialise new + item */ + int (*init) (coord_t * target, coord_t * from, + reiser4_item_data * data); + /* method called (e.g., by reiser4_resize_item()) to place new data + into item when it grows */ + int (*paste) (coord_t *, reiser4_item_data *, carry_plugin_info *); + /* return true if paste into @coord is allowed to skip + carry. That is, if such paste would require any changes + at the parent level + */ + int (*fast_paste) (const coord_t *); + /* how many but not more than @want units of @source can be + shifted into @target node. If pend == append - we try to + append last item of @target by first units of @source. If + pend == prepend - we try to "prepend" first item in @target + by last units of @source. @target node has @free_space + bytes of free space. Total size of those units are returned + via @size. + + @target is not NULL if shifting to the mergeable item and + NULL is new item will be created during shifting. + */ + int (*can_shift) (unsigned free_space, coord_t *, + znode *, shift_direction, unsigned *size, + unsigned want); + + /* starting off @from-th unit of item @source append or + prepend @count units to @target. @target has been already + expanded by @free_space bytes. That must be exactly what is + needed for those items in @target. If @where_is_free_space + == SHIFT_LEFT - free space is at the end of @target item, + othersize - it is in the beginning of it. */ + void (*copy_units) (coord_t *, coord_t *, + unsigned from, unsigned count, + shift_direction where_is_free_space, + unsigned free_space); + + int (*create_hook) (const coord_t *, void *); + /* do whatever is necessary to do when @count units starting + from @from-th one are removed from the tree */ + /* FIXME-VS: this is used to be here for, in particular, + extents and items of internal type to free blocks they point + to at the same time with removing items from a + tree. Problems start, however, when dealloc_block fails due + to some reason. Item gets removed, but blocks it pointed to + are not freed. It is not clear how to fix this for items of + internal type because a need to remove internal item may + appear in the middle of balancing, and there is no way to + undo changes made. OTOH, if space allocator involves + balancing to perform dealloc_block - this will probably + break balancing due to deadlock issues + */ + int (*kill_hook) (const coord_t *, pos_in_node_t from, + pos_in_node_t count, struct carry_kill_data *); + int (*shift_hook) (const coord_t *, unsigned from, unsigned count, + znode * _node); + + /* unit @*from contains @from_key. unit @*to contains @to_key. Cut all keys between @from_key and @to_key + including boundaries. When units are cut from item beginning - move space which gets freed to head of + item. When units are cut from item end - move freed space to item end. When units are cut from the middle of + item - move freed space to item head. Return amount of space which got freed. Save smallest removed key in + @smallest_removed if it is not 0. Save new first item key in @new_first_key if it is not 0 + */ + int (*cut_units) (coord_t *, pos_in_node_t from, pos_in_node_t to, + struct carry_cut_data *, + reiser4_key * smallest_removed, + reiser4_key * new_first_key); + + /* like cut_units, except that these units are removed from the + tree, not only from a node */ + int (*kill_units) (coord_t *, pos_in_node_t from, pos_in_node_t to, + struct carry_kill_data *, + reiser4_key * smallest_removed, + reiser4_key * new_first); + + /* if @key_of_coord == 1 - returned key of coord, otherwise - + key of unit is returned. If @coord is not set to certain + unit - ERR_PTR(-ENOENT) is returned */ + reiser4_key *(*unit_key) (const coord_t *, reiser4_key *); + reiser4_key *(*max_unit_key) (const coord_t *, reiser4_key *); + /* estimate how much space is needed for paste @data into item at + @coord. if @coord==0 - estimate insertion, otherwise - estimate + pasting + */ + int (*estimate) (const coord_t *, const reiser4_item_data *); + + /* converts flow @f to item data. @coord == 0 on insert */ + int (*item_data_by_flow) (const coord_t *, const flow_t *, + reiser4_item_data *); + + /*void (*show) (struct seq_file *, coord_t *); */ + +#if REISER4_DEBUG + /* used for debugging, every item should have here the most + complete possible check of the consistency of the item that + the inventor can construct */ + int (*check) (const coord_t *, const char **error); +#endif + +}; + +struct flush_ops { + /* return the right or left child of @coord, only if it is in memory */ + int (*utmost_child) (const coord_t *, sideof side, jnode ** child); + + /* return whether the right or left child of @coord has a non-fake + block number. */ + int (*utmost_child_real_block) (const coord_t *, sideof side, + reiser4_block_nr *); + /* relocate child at @coord to the @block */ + void (*update) (const coord_t *, const reiser4_block_nr *); + /* count unformatted nodes per item for leave relocation policy, etc.. */ + int (*scan) (flush_scan * scan); + /* convert item by flush */ + int (*convert) (flush_pos_t * pos); + /* backward mapping from jnode offset to a key. */ + int (*key_by_offset) (struct inode *, loff_t, reiser4_key *); +}; + +/* operations specific to the directory item */ +struct dir_entry_iops { + /* extract stat-data key from directory entry at @coord and place it + into @key. */ + int (*extract_key) (const coord_t *, reiser4_key * key); + /* update object key in item. */ + int (*update_key) (const coord_t *, const reiser4_key *, lock_handle *); + /* extract name from directory entry at @coord and return it */ + char *(*extract_name) (const coord_t *, char *buf); + /* extract file type (DT_* stuff) from directory entry at @coord and + return it */ + unsigned (*extract_file_type) (const coord_t *); + int (*add_entry) (struct inode * dir, + coord_t *, lock_handle *, + const struct dentry * name, + reiser4_dir_entry_desc * entry); + int (*rem_entry) (struct inode * dir, const struct qstr * name, + coord_t *, lock_handle *, + reiser4_dir_entry_desc * entry); + int (*max_name_len) (const struct inode * dir); +}; + +/* operations specific to items regular (unix) file metadata are built of */ +struct file_iops{ + ssize_t (*write) (struct file *, struct inode *, + const char __user *, size_t, loff_t *pos); + int (*read) (struct file *, flow_t *, hint_t *); + int (*readpage) (void *, struct page *); + int (*get_block) (const coord_t *, sector_t, sector_t *); + /* + * key of first byte which is not addressed by the item @coord is set + * to. + * For example, for extent item with the key + * + * (LOCALITY,4,OBJID,STARTING-OFFSET), and length BLK blocks, + * + * ->append_key is + * + * (LOCALITY,4,OBJID,STARTING-OFFSET + BLK * block_size) + */ + reiser4_key *(*append_key) (const coord_t *, reiser4_key *); + + void (*init_coord_extension) (uf_coord_t *, loff_t); +}; + +/* operations specific to items of stat data type */ +struct sd_iops { + int (*init_inode) (struct inode * inode, char *sd, int len); + int (*save_len) (struct inode * inode); + int (*save) (struct inode * inode, char **area); +}; + +/* operations specific to internal item */ +struct internal_iops{ + /* all tree traversal want to know from internal item is where + to go next. */ + void (*down_link) (const coord_t * coord, + const reiser4_key * key, reiser4_block_nr * block); + /* check that given internal item contains given pointer. */ + int (*has_pointer_to) (const coord_t * coord, + const reiser4_block_nr * block); +}; + +struct item_plugin { + /* generic fields */ + plugin_header h; + /* methods common for all item types */ + struct balance_ops b; /* balance operations */ + struct flush_ops f; /* flush operates with items via this methods */ + + /* methods specific to particular type of item */ + union { + struct dir_entry_iops dir; + struct file_iops file; + struct sd_iops sd; + struct internal_iops internal; + } s; +}; + +#define is_solid_item(iplug) ((iplug)->b.nr_units == nr_units_single_unit) + +static inline item_id item_id_by_plugin(item_plugin * plugin) +{ + return plugin->h.id; +} + +static inline char get_iplugid(item_plugin * iplug) +{ + assert("nikita-2838", iplug != NULL); + assert("nikita-2839", iplug->h.id < 0xff); + return (char)item_id_by_plugin(iplug); +} + +extern unsigned long znode_times_locked(const znode * z); + +static inline void coord_set_iplug(coord_t * coord, item_plugin * iplug) +{ + assert("nikita-2837", coord != NULL); + assert("nikita-2838", iplug != NULL); + coord->iplugid = get_iplugid(iplug); + ON_DEBUG(coord->plug_v = znode_times_locked(coord->node)); +} + +static inline item_plugin *coord_iplug(const coord_t * coord) +{ + assert("nikita-2833", coord != NULL); + assert("nikita-2834", coord->iplugid != INVALID_PLUGID); + assert("nikita-3549", coord->plug_v == znode_times_locked(coord->node)); + return (item_plugin *) plugin_by_id(REISER4_ITEM_PLUGIN_TYPE, + coord->iplugid); +} + +extern int item_can_contain_key(const coord_t * item, const reiser4_key * key, + const reiser4_item_data *); +extern int are_items_mergeable(const coord_t * i1, const coord_t * i2); +extern int item_is_extent(const coord_t *); +extern int item_is_tail(const coord_t *); +extern int item_is_statdata(const coord_t * item); +extern int item_is_ctail(const coord_t *); + +extern pos_in_node_t item_length_by_coord(const coord_t * coord); +extern pos_in_node_t nr_units_single_unit(const coord_t * coord); +extern item_id item_id_by_coord(const coord_t * coord /* coord to query */ ); +extern reiser4_key *item_key_by_coord(const coord_t * coord, reiser4_key * key); +extern reiser4_key *max_item_key_by_coord(const coord_t *, reiser4_key *); +extern reiser4_key *unit_key_by_coord(const coord_t * coord, reiser4_key * key); +extern reiser4_key *max_unit_key_by_coord(const coord_t * coord, + reiser4_key * key); +extern void obtain_item_plugin(const coord_t * coord); + +#if defined(REISER4_DEBUG) +extern int znode_is_loaded(const znode * node); +#endif + +/* return plugin of item at @coord */ +static inline item_plugin *item_plugin_by_coord(const coord_t * + coord /* coord to query */ ) +{ + assert("nikita-330", coord != NULL); + assert("nikita-331", coord->node != NULL); + assert("nikita-332", znode_is_loaded(coord->node)); + + if (unlikely(!coord_is_iplug_set(coord))) + obtain_item_plugin(coord); + return coord_iplug(coord); +} + +/* this returns true if item is of internal type */ +static inline int item_is_internal(const coord_t * item) +{ + assert("vs-483", coord_is_existing_item(item)); + return plugin_of_group(item_plugin_by_coord(item), INTERNAL_ITEM_TYPE); +} + +extern void item_body_by_coord_hard(coord_t * coord); +extern void *item_body_by_coord_easy(const coord_t * coord); +#if REISER4_DEBUG +extern int item_body_is_valid(const coord_t * coord); +#endif + +/* return pointer to item body */ +static inline void *item_body_by_coord(const coord_t * + coord /* coord to query */ ) +{ + assert("nikita-324", coord != NULL); + assert("nikita-325", coord->node != NULL); + assert("nikita-326", znode_is_loaded(coord->node)); + + if (coord->offset == INVALID_OFFSET) + item_body_by_coord_hard((coord_t *) coord); + assert("nikita-3201", item_body_is_valid(coord)); + assert("nikita-3550", coord->body_v == znode_times_locked(coord->node)); + return item_body_by_coord_easy(coord); +} + +/* __REISER4_ITEM_H__ */ +#endif +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/item/sde.c b/fs/reiser4/plugin/item/sde.c new file mode 100644 index 000000000000..c15abe3f0ce6 --- /dev/null +++ b/fs/reiser4/plugin/item/sde.c @@ -0,0 +1,186 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Directory entry implementation */ +#include "../../forward.h" +#include "../../debug.h" +#include "../../dformat.h" +#include "../../kassign.h" +#include "../../coord.h" +#include "sde.h" +#include "item.h" +#include "../plugin.h" +#include "../../znode.h" +#include "../../carry.h" +#include "../../tree.h" +#include "../../inode.h" + +#include <linux/fs.h> /* for struct inode */ +#include <linux/dcache.h> /* for struct dentry */ + +/* ->extract_key() method of simple directory item plugin. */ +int extract_key_de(const coord_t * coord /* coord of item */ , + reiser4_key * key /* resulting key */ ) +{ + directory_entry_format *dent; + + assert("nikita-1458", coord != NULL); + assert("nikita-1459", key != NULL); + + dent = (directory_entry_format *) item_body_by_coord(coord); + assert("nikita-1158", item_length_by_coord(coord) >= (int)sizeof *dent); + return extract_key_from_id(&dent->id, key); +} + +int +update_key_de(const coord_t * coord, const reiser4_key * key, + lock_handle * lh UNUSED_ARG) +{ + directory_entry_format *dent; + obj_key_id obj_id; + int result; + + assert("nikita-2342", coord != NULL); + assert("nikita-2343", key != NULL); + + dent = (directory_entry_format *) item_body_by_coord(coord); + result = build_obj_key_id(key, &obj_id); + if (result == 0) { + dent->id = obj_id; + znode_make_dirty(coord->node); + } + return 0; +} + +char *extract_dent_name(const coord_t * coord, directory_entry_format * dent, + char *buf) +{ + reiser4_key key; + + unit_key_by_coord(coord, &key); + if (get_key_type(&key) != KEY_FILE_NAME_MINOR) + reiser4_print_address("oops", znode_get_block(coord->node)); + if (!is_longname_key(&key)) { + if (is_dot_key(&key)) + return (char *)"."; + else + return extract_name_from_key(&key, buf); + } else + return (char *)dent->name; +} + +/* ->extract_name() method of simple directory item plugin. */ +char *extract_name_de(const coord_t * coord /* coord of item */ , char *buf) +{ + directory_entry_format *dent; + + assert("nikita-1460", coord != NULL); + + dent = (directory_entry_format *) item_body_by_coord(coord); + return extract_dent_name(coord, dent, buf); +} + +/* ->extract_file_type() method of simple directory item plugin. */ +unsigned extract_file_type_de(const coord_t * coord UNUSED_ARG /* coord of + * item */ ) +{ + assert("nikita-1764", coord != NULL); + /* we don't store file type in the directory entry yet. + + But see comments at kassign.h:obj_key_id + */ + return DT_UNKNOWN; +} + +int add_entry_de(struct inode *dir /* directory of item */ , + coord_t * coord /* coord of item */ , + lock_handle * lh /* insertion lock handle */ , + const struct dentry *de /* name to add */ , + reiser4_dir_entry_desc * entry /* parameters of new directory + * entry */ ) +{ + reiser4_item_data data; + directory_entry_format *dent; + int result; + const char *name; + int len; + int longname; + + name = de->d_name.name; + len = de->d_name.len; + assert("nikita-1163", strlen(name) == len); + + longname = is_longname(name, len); + + data.length = sizeof *dent; + if (longname) + data.length += len + 1; + data.data = NULL; + data.user = 0; + data.iplug = item_plugin_by_id(SIMPLE_DIR_ENTRY_ID); + + inode_add_bytes(dir, data.length); + + result = insert_by_coord(coord, &data, &entry->key, lh, 0 /*flags */ ); + if (result != 0) + return result; + + dent = (directory_entry_format *) item_body_by_coord(coord); + build_inode_key_id(entry->obj, &dent->id); + if (longname) { + memcpy(dent->name, name, len); + put_unaligned(0, &dent->name[len]); + } + return 0; +} + +int rem_entry_de(struct inode *dir /* directory of item */ , + const struct qstr *name UNUSED_ARG, + coord_t * coord /* coord of item */ , + lock_handle * lh UNUSED_ARG /* lock handle for + * removal */ , + reiser4_dir_entry_desc * entry UNUSED_ARG /* parameters of + * directory entry + * being removed */ ) +{ + coord_t shadow; + int result; + int length; + + length = item_length_by_coord(coord); + if (inode_get_bytes(dir) < length) { + warning("nikita-2627", "Dir is broke: %llu: %llu", + (unsigned long long)get_inode_oid(dir), + inode_get_bytes(dir)); + + return RETERR(-EIO); + } + + /* cut_node() is supposed to take pointers to _different_ + coords, because it will modify them without respect to + possible aliasing. To work around this, create temporary copy + of @coord. + */ + coord_dup(&shadow, coord); + result = + kill_node_content(coord, &shadow, NULL, NULL, NULL, NULL, NULL, 0); + if (result == 0) { + inode_sub_bytes(dir, length); + } + return result; +} + +int max_name_len_de(const struct inode *dir) +{ + return reiser4_tree_by_inode(dir)->nplug->max_item_size() - + sizeof(directory_entry_format) - 2; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/sde.h b/fs/reiser4/plugin/item/sde.h new file mode 100644 index 000000000000..f26762a1c287 --- /dev/null +++ b/fs/reiser4/plugin/item/sde.h @@ -0,0 +1,66 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Directory entry. */ + +#if !defined( __FS_REISER4_PLUGIN_DIRECTORY_ENTRY_H__ ) +#define __FS_REISER4_PLUGIN_DIRECTORY_ENTRY_H__ + +#include "../../forward.h" +#include "../../dformat.h" +#include "../../kassign.h" +#include "../../key.h" + +#include <linux/fs.h> +#include <linux/dcache.h> /* for struct dentry */ + +typedef struct directory_entry_format { + /* key of object stat-data. It's not necessary to store whole + key here, because it's always key of stat-data, so minor + packing locality and offset can be omitted here. But this + relies on particular key allocation scheme for stat-data, so, + for extensibility sake, whole key can be stored here. + + We store key as array of bytes, because we don't want 8-byte + alignment of dir entries. + */ + obj_key_id id; + /* file name. Null terminated string. */ + d8 name[0]; +} directory_entry_format; + +void print_de(const char *prefix, coord_t * coord); +int extract_key_de(const coord_t * coord, reiser4_key * key); +int update_key_de(const coord_t * coord, const reiser4_key * key, + lock_handle * lh); +char *extract_name_de(const coord_t * coord, char *buf); +unsigned extract_file_type_de(const coord_t * coord); +int add_entry_de(struct inode *dir, coord_t * coord, + lock_handle * lh, const struct dentry *name, + reiser4_dir_entry_desc * entry); +int rem_entry_de(struct inode *dir, const struct qstr *name, coord_t * coord, + lock_handle * lh, reiser4_dir_entry_desc * entry); +int max_name_len_de(const struct inode *dir); + +int de_rem_and_shrink(struct inode *dir, coord_t * coord, int length); + +char *extract_dent_name(const coord_t * coord, + directory_entry_format * dent, char *buf); + +#if REISER4_LARGE_KEY +#define DE_NAME_BUF_LEN (24) +#else +#define DE_NAME_BUF_LEN (16) +#endif + +/* __FS_REISER4_PLUGIN_DIRECTORY_ENTRY_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/static_stat.c b/fs/reiser4/plugin/item/static_stat.c new file mode 100644 index 000000000000..d75d3530ac57 --- /dev/null +++ b/fs/reiser4/plugin/item/static_stat.c @@ -0,0 +1,1114 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* stat data manipulation. */ + +#include "../../forward.h" +#include "../../super.h" +#include "../../vfs_ops.h" +#include "../../inode.h" +#include "../../debug.h" +#include "../../dformat.h" +#include "../object.h" +#include "../plugin.h" +#include "../plugin_header.h" +#include "static_stat.h" +#include "item.h" + +#include <linux/types.h> +#include <linux/fs.h> + +/* see static_stat.h for explanation */ + +/* helper function used while we are dumping/loading inode/plugin state + to/from the stat-data. */ + +static void move_on(int *length /* space remaining in stat-data */ , + char **area /* current coord in stat data */ , + int size_of /* how many bytes to move forward */ ) +{ + assert("nikita-615", length != NULL); + assert("nikita-616", area != NULL); + + *length -= size_of; + *area += size_of; + + assert("nikita-617", *length >= 0); +} + +/* helper function used while loading inode/plugin state from stat-data. + Complain if there is less space in stat-data than was expected. + Can only happen on disk corruption. */ +static int not_enough_space(struct inode *inode /* object being processed */ , + const char *where /* error message */ ) +{ + assert("nikita-618", inode != NULL); + + warning("nikita-619", "Not enough space in %llu while loading %s", + (unsigned long long)get_inode_oid(inode), where); + + return RETERR(-EINVAL); +} + +/* helper function used while loading inode/plugin state from + stat-data. Call it if invalid plugin id was found. */ +static int unknown_plugin(reiser4_plugin_id id /* invalid id */ , + struct inode *inode /* object being processed */ ) +{ + warning("nikita-620", "Unknown plugin %i in %llu", + id, (unsigned long long)get_inode_oid(inode)); + + return RETERR(-EINVAL); +} + +/* this is installed as ->init_inode() method of + item_plugins[ STATIC_STAT_DATA_IT ] (fs/reiser4/plugin/item/item.c). + Copies data from on-disk stat-data format into inode. + Handles stat-data extensions. */ +/* was sd_load */ +int init_inode_static_sd(struct inode *inode /* object being processed */ , + char *sd /* stat-data body */ , + int len /* length of stat-data */ ) +{ + int result; + int bit; + int chunk; + __u16 mask; + __u64 bigmask; + reiser4_stat_data_base *sd_base; + reiser4_inode *state; + + assert("nikita-625", inode != NULL); + assert("nikita-626", sd != NULL); + + result = 0; + sd_base = (reiser4_stat_data_base *) sd; + state = reiser4_inode_data(inode); + mask = le16_to_cpu(get_unaligned(&sd_base->extmask)); + bigmask = mask; + reiser4_inode_set_flag(inode, REISER4_SDLEN_KNOWN); + + move_on(&len, &sd, sizeof *sd_base); + for (bit = 0, chunk = 0; + mask != 0 || bit <= LAST_IMPORTANT_SD_EXTENSION; + ++bit, mask >>= 1) { + if (((bit + 1) % 16) != 0) { + /* handle extension */ + sd_ext_plugin *sdplug; + + if (bit >= LAST_SD_EXTENSION) { + warning("vpf-1904", + "No such extension %i in inode %llu", + bit, + (unsigned long long) + get_inode_oid(inode)); + + result = RETERR(-EINVAL); + break; + } + + sdplug = sd_ext_plugin_by_id(bit); + if (sdplug == NULL) { + warning("nikita-627", + "No such extension %i in inode %llu", + bit, + (unsigned long long) + get_inode_oid(inode)); + + result = RETERR(-EINVAL); + break; + } + if (mask & 1) { + assert("nikita-628", sdplug->present); + /* alignment is not supported in node layout + plugin yet. + result = align( inode, &len, &sd, + sdplug -> alignment ); + if( result != 0 ) + return result; */ + result = sdplug->present(inode, &sd, &len); + } else if (sdplug->absent != NULL) + result = sdplug->absent(inode); + if (result) + break; + /* else, we are looking at the last bit in 16-bit + portion of bitmask */ + } else if (mask & 1) { + /* next portion of bitmask */ + if (len < (int)sizeof(d16)) { + warning("nikita-629", + "No space for bitmap in inode %llu", + (unsigned long long) + get_inode_oid(inode)); + + result = RETERR(-EINVAL); + break; + } + mask = le16_to_cpu(get_unaligned((d16 *)sd)); + bigmask <<= 16; + bigmask |= mask; + move_on(&len, &sd, sizeof(d16)); + ++chunk; + if (chunk == 3) { + if (!(mask & 0x8000)) { + /* clear last bit */ + mask &= ~0x8000; + continue; + } + /* too much */ + warning("nikita-630", + "Too many extensions in %llu", + (unsigned long long) + get_inode_oid(inode)); + + result = RETERR(-EINVAL); + break; + } + } else + /* bitmask exhausted */ + break; + } + state->extmask = bigmask; + /* common initialisations */ + if (len - (bit / 16 * sizeof(d16)) > 0) { + /* alignment in save_len_static_sd() is taken into account + -edward */ + warning("nikita-631", "unused space in inode %llu", + (unsigned long long)get_inode_oid(inode)); + } + + return result; +} + +/* estimates size of stat-data required to store inode. + Installed as ->save_len() method of + item_plugins[ STATIC_STAT_DATA_IT ] (fs/reiser4/plugin/item/item.c). */ +/* was sd_len */ +int save_len_static_sd(struct inode *inode /* object being processed */ ) +{ + unsigned int result; + __u64 mask; + int bit; + + assert("nikita-632", inode != NULL); + + result = sizeof(reiser4_stat_data_base); + mask = reiser4_inode_data(inode)->extmask; + for (bit = 0; mask != 0; ++bit, mask >>= 1) { + if (mask & 1) { + sd_ext_plugin *sdplug; + + sdplug = sd_ext_plugin_by_id(bit); + assert("nikita-633", sdplug != NULL); + /* + no aligment support + result += + reiser4_round_up(result, sdplug -> alignment) - + result; + */ + result += sdplug->save_len(inode); + } + } + result += bit / 16 * sizeof(d16); + return result; +} + +/* saves inode into stat-data. + Installed as ->save() method of + item_plugins[ STATIC_STAT_DATA_IT ] (fs/reiser4/plugin/item/item.c). */ +/* was sd_save */ +int save_static_sd(struct inode *inode /* object being processed */ , + char **area /* where to save stat-data */ ) +{ + int result; + __u64 emask; + int bit; + unsigned int len; + reiser4_stat_data_base *sd_base; + + assert("nikita-634", inode != NULL); + assert("nikita-635", area != NULL); + + result = 0; + emask = reiser4_inode_data(inode)->extmask; + sd_base = (reiser4_stat_data_base *) * area; + put_unaligned(cpu_to_le16((__u16)(emask & 0xffff)), &sd_base->extmask); + /*cputod16((unsigned)(emask & 0xffff), &sd_base->extmask);*/ + + *area += sizeof *sd_base; + len = 0xffffffffu; + for (bit = 0; emask != 0; ++bit, emask >>= 1) { + if (emask & 1) { + if ((bit + 1) % 16 != 0) { + sd_ext_plugin *sdplug; + sdplug = sd_ext_plugin_by_id(bit); + assert("nikita-636", sdplug != NULL); + /* no alignment support yet + align( inode, &len, area, + sdplug -> alignment ); */ + result = sdplug->save(inode, area); + if (result) + break; + } else { + put_unaligned(cpu_to_le16((__u16)(emask & 0xffff)), + (d16 *)(*area)); + /*cputod16((unsigned)(emask & 0xffff), + (d16 *) * area);*/ + *area += sizeof(d16); + } + } + } + return result; +} + +/* stat-data extension handling functions. */ + +static int present_lw_sd(struct inode *inode /* object being processed */ , + char **area /* position in stat-data */ , + int *len /* remaining length */ ) +{ + if (*len >= (int)sizeof(reiser4_light_weight_stat)) { + reiser4_light_weight_stat *sd_lw; + + sd_lw = (reiser4_light_weight_stat *) * area; + + inode->i_mode = le16_to_cpu(get_unaligned(&sd_lw->mode)); + set_nlink(inode, le32_to_cpu(get_unaligned(&sd_lw->nlink))); + inode->i_size = le64_to_cpu(get_unaligned(&sd_lw->size)); + if ((inode->i_mode & S_IFMT) == (S_IFREG | S_IFIFO)) { + inode->i_mode &= ~S_IFIFO; + warning("", "partially converted file is encountered"); + reiser4_inode_set_flag(inode, REISER4_PART_MIXED); + } + move_on(len, area, sizeof *sd_lw); + return 0; + } else + return not_enough_space(inode, "lw sd"); +} + +static int save_len_lw_sd(struct inode *inode UNUSED_ARG /* object being + * processed */ ) +{ + return sizeof(reiser4_light_weight_stat); +} + +static int save_lw_sd(struct inode *inode /* object being processed */ , + char **area /* position in stat-data */ ) +{ + reiser4_light_weight_stat *sd; + mode_t delta; + + assert("nikita-2705", inode != NULL); + assert("nikita-2706", area != NULL); + assert("nikita-2707", *area != NULL); + + sd = (reiser4_light_weight_stat *) * area; + + delta = (reiser4_inode_get_flag(inode, + REISER4_PART_MIXED) ? S_IFIFO : 0); + put_unaligned(cpu_to_le16(inode->i_mode | delta), &sd->mode); + put_unaligned(cpu_to_le32(inode->i_nlink), &sd->nlink); + put_unaligned(cpu_to_le64((__u64) inode->i_size), &sd->size); + *area += sizeof *sd; + return 0; +} + +static int present_unix_sd(struct inode *inode /* object being processed */ , + char **area /* position in stat-data */ , + int *len /* remaining length */ ) +{ + assert("nikita-637", inode != NULL); + assert("nikita-638", area != NULL); + assert("nikita-639", *area != NULL); + assert("nikita-640", len != NULL); + assert("nikita-641", *len > 0); + + if (*len >= (int)sizeof(reiser4_unix_stat)) { + reiser4_unix_stat *sd; + + sd = (reiser4_unix_stat *) * area; + + i_uid_write(inode, le32_to_cpu(get_unaligned(&sd->uid))); + i_gid_write(inode, le32_to_cpu(get_unaligned(&sd->gid))); + inode->i_atime.tv_sec = le32_to_cpu(get_unaligned(&sd->atime)); + inode->i_mtime.tv_sec = le32_to_cpu(get_unaligned(&sd->mtime)); + inode->i_ctime.tv_sec = le32_to_cpu(get_unaligned(&sd->ctime)); + if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) + inode->i_rdev = le64_to_cpu(get_unaligned(&sd->u.rdev)); + else + inode_set_bytes(inode, (loff_t) le64_to_cpu(get_unaligned(&sd->u.bytes))); + move_on(len, area, sizeof *sd); + return 0; + } else + return not_enough_space(inode, "unix sd"); +} + +static int absent_unix_sd(struct inode *inode /* object being processed */ ) +{ + i_uid_write(inode, get_super_private(inode->i_sb)->default_uid); + i_gid_write(inode, get_super_private(inode->i_sb)->default_gid); + inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); + inode_set_bytes(inode, inode->i_size); + /* mark inode as lightweight, so that caller (lookup_common) will + complete initialisation by copying [ug]id from a parent. */ + reiser4_inode_set_flag(inode, REISER4_LIGHT_WEIGHT); + return 0; +} + +/* Audited by: green(2002.06.14) */ +static int save_len_unix_sd(struct inode *inode UNUSED_ARG /* object being + * processed */ ) +{ + return sizeof(reiser4_unix_stat); +} + +static int save_unix_sd(struct inode *inode /* object being processed */ , + char **area /* position in stat-data */ ) +{ + reiser4_unix_stat *sd; + + assert("nikita-642", inode != NULL); + assert("nikita-643", area != NULL); + assert("nikita-644", *area != NULL); + + sd = (reiser4_unix_stat *) * area; + put_unaligned(cpu_to_le32(i_uid_read(inode)), &sd->uid); + put_unaligned(cpu_to_le32(i_gid_read(inode)), &sd->gid); + put_unaligned(cpu_to_le32((__u32) inode->i_atime.tv_sec), &sd->atime); + put_unaligned(cpu_to_le32((__u32) inode->i_ctime.tv_sec), &sd->ctime); + put_unaligned(cpu_to_le32((__u32) inode->i_mtime.tv_sec), &sd->mtime); + if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) + put_unaligned(cpu_to_le64(inode->i_rdev), &sd->u.rdev); + else + put_unaligned(cpu_to_le64((__u64) inode_get_bytes(inode)), &sd->u.bytes); + *area += sizeof *sd; + return 0; +} + +static int +present_large_times_sd(struct inode *inode /* object being processed */ , + char **area /* position in stat-data */ , + int *len /* remaining length */ ) +{ + if (*len >= (int)sizeof(reiser4_large_times_stat)) { + reiser4_large_times_stat *sd_lt; + + sd_lt = (reiser4_large_times_stat *) * area; + + inode->i_atime.tv_nsec = le32_to_cpu(get_unaligned(&sd_lt->atime)); + inode->i_mtime.tv_nsec = le32_to_cpu(get_unaligned(&sd_lt->mtime)); + inode->i_ctime.tv_nsec = le32_to_cpu(get_unaligned(&sd_lt->ctime)); + + move_on(len, area, sizeof *sd_lt); + return 0; + } else + return not_enough_space(inode, "large times sd"); +} + +static int +save_len_large_times_sd(struct inode *inode UNUSED_ARG + /* object being processed */ ) +{ + return sizeof(reiser4_large_times_stat); +} + +static int +save_large_times_sd(struct inode *inode /* object being processed */ , + char **area /* position in stat-data */ ) +{ + reiser4_large_times_stat *sd; + + assert("nikita-2817", inode != NULL); + assert("nikita-2818", area != NULL); + assert("nikita-2819", *area != NULL); + + sd = (reiser4_large_times_stat *) * area; + + put_unaligned(cpu_to_le32((__u32) inode->i_atime.tv_nsec), &sd->atime); + put_unaligned(cpu_to_le32((__u32) inode->i_ctime.tv_nsec), &sd->ctime); + put_unaligned(cpu_to_le32((__u32) inode->i_mtime.tv_nsec), &sd->mtime); + + *area += sizeof *sd; + return 0; +} + +/* symlink stat data extension */ + +/* allocate memory for symlink target and attach it to inode->i_private */ +static int +symlink_target_to_inode(struct inode *inode, const char *target, int len) +{ + assert("vs-845", inode->i_private == NULL); + assert("vs-846", !reiser4_inode_get_flag(inode, + REISER4_GENERIC_PTR_USED)); + /* FIXME-VS: this is prone to deadlock. Not more than other similar + places, though */ + inode->i_private = kmalloc((size_t) len + 1, + reiser4_ctx_gfp_mask_get()); + if (!inode->i_private) + return RETERR(-ENOMEM); + + memcpy((char *)(inode->i_private), target, (size_t) len); + ((char *)(inode->i_private))[len] = 0; + reiser4_inode_set_flag(inode, REISER4_GENERIC_PTR_USED); + return 0; +} + +/* this is called on read_inode. There is nothing to do actually, but some + sanity checks */ +static int present_symlink_sd(struct inode *inode, char **area, int *len) +{ + int result; + int length; + reiser4_symlink_stat *sd; + + length = (int)inode->i_size; + /* + * *len is number of bytes in stat data item from *area to the end of + * item. It must be not less than size of symlink + 1 for ending 0 + */ + if (length > *len) + return not_enough_space(inode, "symlink"); + + if (*(*area + length) != 0) { + warning("vs-840", "Symlink is not zero terminated"); + return RETERR(-EIO); + } + + sd = (reiser4_symlink_stat *) * area; + result = symlink_target_to_inode(inode, sd->body, length); + + move_on(len, area, length + 1); + return result; +} + +static int save_len_symlink_sd(struct inode *inode) +{ + return inode->i_size + 1; +} + +/* this is called on create and update stat data. Do nothing on update but + update @area */ +static int save_symlink_sd(struct inode *inode, char **area) +{ + int result; + int length; + reiser4_symlink_stat *sd; + + length = (int)inode->i_size; + /* inode->i_size must be set already */ + assert("vs-841", length); + + result = 0; + sd = (reiser4_symlink_stat *) * area; + if (!reiser4_inode_get_flag(inode, REISER4_GENERIC_PTR_USED)) { + const char *target; + + target = (const char *)(inode->i_private); + inode->i_private = NULL; + + result = symlink_target_to_inode(inode, target, length); + + /* copy symlink to stat data */ + memcpy(sd->body, target, (size_t) length); + (*area)[length] = 0; + } else { + /* there is nothing to do in update but move area */ + assert("vs-844", + !memcmp(inode->i_private, sd->body, + (size_t) length + 1)); + } + + *area += (length + 1); + return result; +} + +static int present_flags_sd(struct inode *inode /* object being processed */ , + char **area /* position in stat-data */ , + int *len /* remaining length */ ) +{ + assert("nikita-645", inode != NULL); + assert("nikita-646", area != NULL); + assert("nikita-647", *area != NULL); + assert("nikita-648", len != NULL); + assert("nikita-649", *len > 0); + + if (*len >= (int)sizeof(reiser4_flags_stat)) { + reiser4_flags_stat *sd; + + sd = (reiser4_flags_stat *) * area; + inode->i_flags = le32_to_cpu(get_unaligned(&sd->flags)); + move_on(len, area, sizeof *sd); + return 0; + } else + return not_enough_space(inode, "generation and attrs"); +} + +/* Audited by: green(2002.06.14) */ +static int save_len_flags_sd(struct inode *inode UNUSED_ARG /* object being + * processed */ ) +{ + return sizeof(reiser4_flags_stat); +} + +static int save_flags_sd(struct inode *inode /* object being processed */ , + char **area /* position in stat-data */ ) +{ + reiser4_flags_stat *sd; + + assert("nikita-650", inode != NULL); + assert("nikita-651", area != NULL); + assert("nikita-652", *area != NULL); + + sd = (reiser4_flags_stat *) * area; + put_unaligned(cpu_to_le32(inode->i_flags), &sd->flags); + *area += sizeof *sd; + return 0; +} + +static int absent_plugin_sd(struct inode *inode); +static int present_plugin_sd(struct inode *inode /* object being processed */ , + char **area /* position in stat-data */ , + int *len /* remaining length */, + int is_pset /* 1 if plugin set, 0 if heir set. */) +{ + reiser4_plugin_stat *sd; + reiser4_plugin *plugin; + reiser4_inode *info; + int i; + __u16 mask; + int result; + int num_of_plugins; + + assert("nikita-653", inode != NULL); + assert("nikita-654", area != NULL); + assert("nikita-655", *area != NULL); + assert("nikita-656", len != NULL); + assert("nikita-657", *len > 0); + + if (*len < (int)sizeof(reiser4_plugin_stat)) + return not_enough_space(inode, "plugin"); + + sd = (reiser4_plugin_stat *) * area; + info = reiser4_inode_data(inode); + + mask = 0; + num_of_plugins = le16_to_cpu(get_unaligned(&sd->plugins_no)); + move_on(len, area, sizeof *sd); + result = 0; + for (i = 0; i < num_of_plugins; ++i) { + reiser4_plugin_slot *slot; + reiser4_plugin_type type; + pset_member memb; + + slot = (reiser4_plugin_slot *) * area; + if (*len < (int)sizeof *slot) + return not_enough_space(inode, "additional plugin"); + + memb = le16_to_cpu(get_unaligned(&slot->pset_memb)); + type = aset_member_to_type_unsafe(memb); + + if (type == REISER4_PLUGIN_TYPES) { + warning("nikita-3502", + "wrong %s member (%i) for %llu", is_pset ? + "pset" : "hset", memb, + (unsigned long long)get_inode_oid(inode)); + return RETERR(-EINVAL); + } + plugin = plugin_by_disk_id(reiser4_tree_by_inode(inode), + type, &slot->id); + if (plugin == NULL) + return unknown_plugin(le16_to_cpu(get_unaligned(&slot->id)), inode); + + /* plugin is loaded into inode, mark this into inode's + bitmask of loaded non-standard plugins */ + if (!(mask & (1 << memb))) { + mask |= (1 << memb); + } else { + warning("nikita-658", "duplicate plugin for %llu", + (unsigned long long)get_inode_oid(inode)); + return RETERR(-EINVAL); + } + move_on(len, area, sizeof *slot); + /* load plugin data, if any */ + if (plugin->h.pops != NULL && plugin->h.pops->load) + result = plugin->h.pops->load(inode, plugin, area, len); + else + result = aset_set_unsafe(is_pset ? &info->pset : + &info->hset, memb, plugin); + if (result) + return result; + } + if (is_pset) { + /* if object plugin wasn't loaded from stat-data, guess it by + mode bits */ + plugin = file_plugin_to_plugin(inode_file_plugin(inode)); + if (plugin == NULL) + result = absent_plugin_sd(inode); + info->plugin_mask = mask; + } else + info->heir_mask = mask; + + return result; +} + +static int present_pset_sd(struct inode *inode, char **area, int *len) { + return present_plugin_sd(inode, area, len, 1 /* pset */); +} + +/* Determine object plugin for @inode based on i_mode. + + Many objects in reiser4 file system are controlled by standard object + plugins that emulate traditional unix objects: unix file, directory, symlink, fifo, and so on. + + For such files we don't explicitly store plugin id in object stat + data. Rather required plugin is guessed from mode bits, where file "type" + is encoded (see stat(2)). +*/ +static int +guess_plugin_by_mode(struct inode *inode /* object to guess plugins for */ ) +{ + int fplug_id; + int dplug_id; + reiser4_inode *info; + + assert("nikita-736", inode != NULL); + + dplug_id = fplug_id = -1; + + switch (inode->i_mode & S_IFMT) { + case S_IFSOCK: + case S_IFBLK: + case S_IFCHR: + case S_IFIFO: + fplug_id = SPECIAL_FILE_PLUGIN_ID; + break; + case S_IFLNK: + fplug_id = SYMLINK_FILE_PLUGIN_ID; + break; + case S_IFDIR: + fplug_id = DIRECTORY_FILE_PLUGIN_ID; + dplug_id = HASHED_DIR_PLUGIN_ID; + break; + default: + warning("nikita-737", "wrong file mode: %o", inode->i_mode); + return RETERR(-EIO); + case S_IFREG: + fplug_id = UNIX_FILE_PLUGIN_ID; + break; + } + info = reiser4_inode_data(inode); + set_plugin(&info->pset, PSET_FILE, (fplug_id >= 0) ? + plugin_by_id(REISER4_FILE_PLUGIN_TYPE, fplug_id) : NULL); + set_plugin(&info->pset, PSET_DIR, (dplug_id >= 0) ? + plugin_by_id(REISER4_DIR_PLUGIN_TYPE, dplug_id) : NULL); + return 0; +} + +/* Audited by: green(2002.06.14) */ +static int absent_plugin_sd(struct inode *inode /* object being processed */ ) +{ + int result; + + assert("nikita-659", inode != NULL); + + result = guess_plugin_by_mode(inode); + /* if mode was wrong, guess_plugin_by_mode() returns "regular file", + but setup_inode_ops() will call make_bad_inode(). + Another, more logical but bit more complex solution is to add + "bad-file plugin". */ + /* FIXME-VS: activate was called here */ + return result; +} + +/* helper function for plugin_sd_save_len(): calculate how much space + required to save state of given plugin */ +/* Audited by: green(2002.06.14) */ +static int len_for(reiser4_plugin * plugin /* plugin to save */ , + struct inode *inode /* object being processed */ , + pset_member memb, + int len, int is_pset) +{ + reiser4_inode *info; + assert("nikita-661", inode != NULL); + + if (plugin == NULL) + return len; + + info = reiser4_inode_data(inode); + if (is_pset ? + info->plugin_mask & (1 << memb) : + info->heir_mask & (1 << memb)) { + len += sizeof(reiser4_plugin_slot); + if (plugin->h.pops && plugin->h.pops->save_len != NULL) { + /* + * non-standard plugin, call method + * commented as it is incompatible with alignment + * policy in save_plug() -edward + * + * len = reiser4_round_up(len, + * plugin->h.pops->alignment); + */ + len += plugin->h.pops->save_len(inode, plugin); + } + } + return len; +} + +/* calculate how much space is required to save state of all plugins, + associated with inode */ +static int save_len_plugin_sd(struct inode *inode /* object being processed */, + int is_pset) +{ + int len; + int last; + reiser4_inode *state; + pset_member memb; + + assert("nikita-663", inode != NULL); + + state = reiser4_inode_data(inode); + + /* common case: no non-standard plugins */ + if (is_pset ? state->plugin_mask == 0 : state->heir_mask == 0) + return 0; + len = sizeof(reiser4_plugin_stat); + last = PSET_LAST; + + for (memb = 0; memb < last; ++memb) { + len = len_for(aset_get(is_pset ? state->pset : state->hset, memb), + inode, memb, len, is_pset); + } + assert("nikita-664", len > (int)sizeof(reiser4_plugin_stat)); + return len; +} + +static int save_len_pset_sd(struct inode *inode) { + return save_len_plugin_sd(inode, 1 /* pset */); +} + +/* helper function for plugin_sd_save(): save plugin, associated with + inode. */ +static int save_plug(reiser4_plugin * plugin /* plugin to save */ , + struct inode *inode /* object being processed */ , + int memb /* what element of pset is saved */ , + char **area /* position in stat-data */ , + int *count /* incremented if plugin were actually saved. */, + int is_pset /* 1 for plugin set, 0 for heir set */) +{ + reiser4_plugin_slot *slot; + int fake_len; + int result; + + assert("nikita-665", inode != NULL); + assert("nikita-666", area != NULL); + assert("nikita-667", *area != NULL); + + if (plugin == NULL) + return 0; + + if (is_pset ? + !(reiser4_inode_data(inode)->plugin_mask & (1 << memb)) : + !(reiser4_inode_data(inode)->heir_mask & (1 << memb))) + return 0; + slot = (reiser4_plugin_slot *) * area; + put_unaligned(cpu_to_le16(memb), &slot->pset_memb); + put_unaligned(cpu_to_le16(plugin->h.id), &slot->id); + fake_len = (int)0xffff; + move_on(&fake_len, area, sizeof *slot); + ++*count; + result = 0; + if (plugin->h.pops != NULL) { + if (plugin->h.pops->save != NULL) + result = plugin->h.pops->save(inode, plugin, area); + } + return result; +} + +/* save state of all non-standard plugins associated with inode */ +static int save_plugin_sd(struct inode *inode /* object being processed */ , + char **area /* position in stat-data */, + int is_pset /* 1 for pset, 0 for hset */) +{ + int fake_len; + int result = 0; + int num_of_plugins; + reiser4_plugin_stat *sd; + reiser4_inode *state; + pset_member memb; + + assert("nikita-669", inode != NULL); + assert("nikita-670", area != NULL); + assert("nikita-671", *area != NULL); + + state = reiser4_inode_data(inode); + if (is_pset ? state->plugin_mask == 0 : state->heir_mask == 0) + return 0; + sd = (reiser4_plugin_stat *) * area; + fake_len = (int)0xffff; + move_on(&fake_len, area, sizeof *sd); + + num_of_plugins = 0; + for (memb = 0; memb < PSET_LAST; ++memb) { + result = save_plug(aset_get(is_pset ? state->pset : state->hset, + memb), + inode, memb, area, &num_of_plugins, is_pset); + if (result != 0) + break; + } + + put_unaligned(cpu_to_le16((__u16)num_of_plugins), &sd->plugins_no); + return result; +} + +static int save_pset_sd(struct inode *inode, char **area) { + return save_plugin_sd(inode, area, 1 /* pset */); +} + +static int present_hset_sd(struct inode *inode, char **area, int *len) { + return present_plugin_sd(inode, area, len, 0 /* hset */); +} + +static int save_len_hset_sd(struct inode *inode) { + return save_len_plugin_sd(inode, 0 /* pset */); +} + +static int save_hset_sd(struct inode *inode, char **area) { + return save_plugin_sd(inode, area, 0 /* hset */); +} + +/* helper function for crypto_sd_present(), crypto_sd_save. + Extract crypto info from stat-data and attach it to inode */ +static int extract_crypto_info (struct inode * inode, + reiser4_crypto_stat * sd) +{ + struct reiser4_crypto_info * info; + assert("edward-11", !inode_crypto_info(inode)); + assert("edward-1413", + !reiser4_inode_get_flag(inode, REISER4_CRYPTO_STAT_LOADED)); + /* create and attach a crypto-stat without secret key loaded */ + info = reiser4_alloc_crypto_info(inode); + if (IS_ERR(info)) + return PTR_ERR(info); + info->keysize = le16_to_cpu(get_unaligned(&sd->keysize)); + memcpy(info->keyid, sd->keyid, inode_digest_plugin(inode)->fipsize); + reiser4_attach_crypto_info(inode, info); + reiser4_inode_set_flag(inode, REISER4_CRYPTO_STAT_LOADED); + return 0; +} + +/* crypto stat-data extension */ + +static int present_crypto_sd(struct inode *inode, char **area, int *len) +{ + int result; + reiser4_crypto_stat *sd; + digest_plugin *dplug = inode_digest_plugin(inode); + + assert("edward-06", dplug != NULL); + assert("edward-684", dplug->fipsize); + assert("edward-07", area != NULL); + assert("edward-08", *area != NULL); + assert("edward-09", len != NULL); + assert("edward-10", *len > 0); + + if (*len < (int)sizeof(reiser4_crypto_stat)) { + return not_enough_space(inode, "crypto-sd"); + } + /* *len is number of bytes in stat data item from *area to the end of + item. It must be not less than size of this extension */ + assert("edward-75", sizeof(*sd) + dplug->fipsize <= *len); + + sd = (reiser4_crypto_stat *) * area; + result = extract_crypto_info(inode, sd); + move_on(len, area, sizeof(*sd) + dplug->fipsize); + + return result; +} + +static int save_len_crypto_sd(struct inode *inode) +{ + return sizeof(reiser4_crypto_stat) + + inode_digest_plugin(inode)->fipsize; +} + +static int save_crypto_sd(struct inode *inode, char **area) +{ + int result = 0; + reiser4_crypto_stat *sd; + struct reiser4_crypto_info * info = inode_crypto_info(inode); + digest_plugin *dplug = inode_digest_plugin(inode); + + assert("edward-12", dplug != NULL); + assert("edward-13", area != NULL); + assert("edward-14", *area != NULL); + assert("edward-15", info != NULL); + assert("edward-1414", info->keyid != NULL); + assert("edward-1415", info->keysize != 0); + assert("edward-76", reiser4_inode_data(inode) != NULL); + + if (!reiser4_inode_get_flag(inode, REISER4_CRYPTO_STAT_LOADED)) { + /* file is just created */ + sd = (reiser4_crypto_stat *) *area; + /* copy everything but private key to the disk stat-data */ + put_unaligned(cpu_to_le16(info->keysize), &sd->keysize); + memcpy(sd->keyid, info->keyid, (size_t) dplug->fipsize); + reiser4_inode_set_flag(inode, REISER4_CRYPTO_STAT_LOADED); + } + *area += (sizeof(*sd) + dplug->fipsize); + return result; +} + +static int eio(struct inode *inode, char **area, int *len) +{ + return RETERR(-EIO); +} + +sd_ext_plugin sd_ext_plugins[LAST_SD_EXTENSION] = { + [LIGHT_WEIGHT_STAT] = { + .h = { + .type_id = REISER4_SD_EXT_PLUGIN_TYPE, + .id = LIGHT_WEIGHT_STAT, + .pops = NULL, + .label = "light-weight sd", + .desc = "sd for light-weight files", + .linkage = {NULL,NULL} + }, + .present = present_lw_sd, + .absent = NULL, + .save_len = save_len_lw_sd, + .save = save_lw_sd, + .alignment = 8 + }, + [UNIX_STAT] = { + .h = { + .type_id = REISER4_SD_EXT_PLUGIN_TYPE, + .id = UNIX_STAT, + .pops = NULL, + .label = "unix-sd", + .desc = "unix stat-data fields", + .linkage = {NULL,NULL} + }, + .present = present_unix_sd, + .absent = absent_unix_sd, + .save_len = save_len_unix_sd, + .save = save_unix_sd, + .alignment = 8 + }, + [LARGE_TIMES_STAT] = { + .h = { + .type_id = REISER4_SD_EXT_PLUGIN_TYPE, + .id = LARGE_TIMES_STAT, + .pops = NULL, + .label = "64time-sd", + .desc = "nanosecond resolution for times", + .linkage = {NULL,NULL} + }, + .present = present_large_times_sd, + .absent = NULL, + .save_len = save_len_large_times_sd, + .save = save_large_times_sd, + .alignment = 8 + }, + [SYMLINK_STAT] = { + /* stat data of symlink has this extension */ + .h = { + .type_id = REISER4_SD_EXT_PLUGIN_TYPE, + .id = SYMLINK_STAT, + .pops = NULL, + .label = "symlink-sd", + .desc = + "stat data is appended with symlink name", + .linkage = {NULL,NULL} + }, + .present = present_symlink_sd, + .absent = NULL, + .save_len = save_len_symlink_sd, + .save = save_symlink_sd, + .alignment = 8 + }, + [PLUGIN_STAT] = { + .h = { + .type_id = REISER4_SD_EXT_PLUGIN_TYPE, + .id = PLUGIN_STAT, + .pops = NULL, + .label = "plugin-sd", + .desc = "plugin stat-data fields", + .linkage = {NULL,NULL} + }, + .present = present_pset_sd, + .absent = absent_plugin_sd, + .save_len = save_len_pset_sd, + .save = save_pset_sd, + .alignment = 8 + }, + [HEIR_STAT] = { + .h = { + .type_id = REISER4_SD_EXT_PLUGIN_TYPE, + .id = HEIR_STAT, + .pops = NULL, + .label = "heir-plugin-sd", + .desc = "heir plugin stat-data fields", + .linkage = {NULL,NULL} + }, + .present = present_hset_sd, + .absent = NULL, + .save_len = save_len_hset_sd, + .save = save_hset_sd, + .alignment = 8 + }, + [FLAGS_STAT] = { + .h = { + .type_id = REISER4_SD_EXT_PLUGIN_TYPE, + .id = FLAGS_STAT, + .pops = NULL, + .label = "flags-sd", + .desc = "inode bit flags", + .linkage = {NULL, NULL} + }, + .present = present_flags_sd, + .absent = NULL, + .save_len = save_len_flags_sd, + .save = save_flags_sd, + .alignment = 8 + }, + [CAPABILITIES_STAT] = { + .h = { + .type_id = REISER4_SD_EXT_PLUGIN_TYPE, + .id = CAPABILITIES_STAT, + .pops = NULL, + .label = "capabilities-sd", + .desc = "capabilities", + .linkage = {NULL, NULL} + }, + .present = eio, + .absent = NULL, + .save_len = save_len_flags_sd, + .save = save_flags_sd, + .alignment = 8 + }, + [CRYPTO_STAT] = { + .h = { + .type_id = REISER4_SD_EXT_PLUGIN_TYPE, + .id = CRYPTO_STAT, + .pops = NULL, + .label = "crypto-sd", + .desc = "secret key size and id", + .linkage = {NULL, NULL} + }, + .present = present_crypto_sd, + .absent = NULL, + .save_len = save_len_crypto_sd, + .save = save_crypto_sd, + .alignment = 8 + } +}; + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/static_stat.h b/fs/reiser4/plugin/item/static_stat.h new file mode 100644 index 000000000000..dd20eb3f2d7d --- /dev/null +++ b/fs/reiser4/plugin/item/static_stat.h @@ -0,0 +1,224 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* This describes the static_stat item, used to hold all information needed by the stat() syscall. + +In the case where each file has not less than the fields needed by the +stat() syscall, it is more compact to store those fields in this +struct. + +If this item does not exist, then all stats are dynamically resolved. +At the moment, we either resolve all stats dynamically or all of them +statically. If you think this is not fully optimal, and the rest of +reiser4 is working, then fix it...:-) + +*/ + +#if !defined( __FS_REISER4_PLUGIN_ITEM_STATIC_STAT_H__ ) +#define __FS_REISER4_PLUGIN_ITEM_STATIC_STAT_H__ + +#include "../../forward.h" +#include "../../dformat.h" + +#include <linux/fs.h> /* for struct inode */ + +/* Stat data layout: goals and implementation. + + We want to be able to have lightweight files which have complete flexibility in what semantic metadata is attached to + them, including not having semantic metadata attached to them. + + There is one problem with doing that, which is that if in fact you have exactly the same metadata for most files you + want to store, then it takes more space to store that metadata in a dynamically sized structure than in a statically + sized structure because the statically sized structure knows without recording it what the names and lengths of the + attributes are. + + This leads to a natural compromise, which is to special case those files which have simply the standard unix file + attributes, and only employ the full dynamic stat data mechanism for those files that differ from the standard unix + file in their use of file attributes. + + Yet this compromise deserves to be compromised a little. + + We accommodate the case where you have no more than the standard unix file attributes by using an "extension + bitmask": each bit in it indicates presence or absence of or particular stat data extension (see sd_ext_bits enum). + + If the first bit of the extension bitmask bit is 0, we have light-weight file whose attributes are either inherited + from parent directory (as uid, gid) or initialised to some sane values. + + To capitalize on existing code infrastructure, extensions are + implemented as plugins of type REISER4_SD_EXT_PLUGIN_TYPE. + Each stat-data extension plugin implements four methods: + + ->present() called by sd_load() when this extension is found in stat-data + ->absent() called by sd_load() when this extension is not found in stat-data + ->save_len() called by sd_len() to calculate total length of stat-data + ->save() called by sd_save() to store extension data into stat-data + + Implementation is in fs/reiser4/plugin/item/static_stat.c +*/ + +/* stat-data extension. Please order this by presumed frequency of use */ +typedef enum { + /* support for light-weight files */ + LIGHT_WEIGHT_STAT, + /* data required to implement unix stat(2) call. Layout is in + reiser4_unix_stat. If this is not present, file is light-weight */ + UNIX_STAT, + /* this contains additional set of 32bit [anc]time fields to implement + nanosecond resolution. Layout is in reiser4_large_times_stat. Usage + if this extension is governed by 32bittimes mount option. */ + LARGE_TIMES_STAT, + /* stat data has link name included */ + SYMLINK_STAT, + /* on-disk slots of non-standard plugins for main plugin table + (@reiser4_inode->pset), that is, plugins that cannot be deduced + from file mode bits), for example, aggregation, interpolation etc. */ + PLUGIN_STAT, + /* this extension contains persistent inode flags. These flags are + single bits: immutable, append, only, etc. Layout is in + reiser4_flags_stat. */ + FLAGS_STAT, + /* this extension contains capabilities sets, associated with this + file. Layout is in reiser4_capabilities_stat */ + CAPABILITIES_STAT, + /* this extension contains size and public id of the secret key. + Layout is in reiser4_crypto_stat */ + CRYPTO_STAT, + /* on-disk slots of non-default plugins for inheritance, which + are extracted to special plugin table (@reiser4_inode->hset). + By default, children of the object will inherit plugins from + its main plugin table (pset). */ + HEIR_STAT, + LAST_SD_EXTENSION, + /* + * init_inode_static_sd() iterates over extension mask until all + * non-zero bits are processed. This means, that neither ->present(), + * nor ->absent() methods will be called for stat-data extensions that + * go after last present extension. But some basic extensions, we want + * either ->absent() or ->present() method to be called, because these + * extensions set up something in inode even when they are not + * present. This is what LAST_IMPORTANT_SD_EXTENSION is for: for all + * extensions before and including LAST_IMPORTANT_SD_EXTENSION either + * ->present(), or ->absent() method will be called, independently of + * what other extensions are present. + */ + LAST_IMPORTANT_SD_EXTENSION = PLUGIN_STAT +} sd_ext_bits; + +/* minimal stat-data. This allows to support light-weight files. */ +typedef struct reiser4_stat_data_base { + /* 0 */ __le16 extmask; + /* 2 */ +} PACKED reiser4_stat_data_base; + +typedef struct reiser4_light_weight_stat { + /* 0 */ __le16 mode; + /* 2 */ __le32 nlink; + /* 6 */ __le64 size; + /* size in bytes */ + /* 14 */ +} PACKED reiser4_light_weight_stat; + +typedef struct reiser4_unix_stat { + /* owner id */ + /* 0 */ __le32 uid; + /* group id */ + /* 4 */ __le32 gid; + /* access time */ + /* 8 */ __le32 atime; + /* modification time */ + /* 12 */ __le32 mtime; + /* change time */ + /* 16 */ __le32 ctime; + union { + /* minor:major for device files */ + /* 20 */ __le64 rdev; + /* bytes used by file */ + /* 20 */ __le64 bytes; + } u; + /* 28 */ +} PACKED reiser4_unix_stat; + +/* symlink stored as part of inode */ +typedef struct reiser4_symlink_stat { + char body[0]; +} PACKED reiser4_symlink_stat; + +typedef struct reiser4_plugin_slot { + /* 0 */ __le16 pset_memb; + /* 2 */ __le16 id; + /* 4 *//* here plugin stores its persistent state */ +} PACKED reiser4_plugin_slot; + +/* stat-data extension for files with non-standard plugin. */ +typedef struct reiser4_plugin_stat { + /* number of additional plugins, associated with this object */ + /* 0 */ __le16 plugins_no; + /* 2 */ reiser4_plugin_slot slot[0]; + /* 2 */ +} PACKED reiser4_plugin_stat; + +/* stat-data extension for inode flags. Currently it is just fixed-width 32 + * bit mask. If need arise, this can be replaced with variable width + * bitmask. */ +typedef struct reiser4_flags_stat { + /* 0 */ __le32 flags; + /* 4 */ +} PACKED reiser4_flags_stat; + +typedef struct reiser4_capabilities_stat { + /* 0 */ __le32 effective; + /* 8 */ __le32 permitted; + /* 16 */ +} PACKED reiser4_capabilities_stat; + +typedef struct reiser4_cluster_stat { +/* this defines cluster size (an attribute of cryptcompress objects) as PAGE_SIZE << cluster shift */ + /* 0 */ d8 cluster_shift; + /* 1 */ +} PACKED reiser4_cluster_stat; + +typedef struct reiser4_crypto_stat { + /* secret key size, bits */ + /* 0 */ d16 keysize; + /* secret key id */ + /* 2 */ d8 keyid[0]; + /* 2 */ +} PACKED reiser4_crypto_stat; + +typedef struct reiser4_large_times_stat { + /* access time */ + /* 0 */ d32 atime; + /* modification time */ + /* 4 */ d32 mtime; + /* change time */ + /* 8 */ d32 ctime; + /* 12 */ +} PACKED reiser4_large_times_stat; + +/* this structure is filled by sd_item_stat */ +typedef struct sd_stat { + int dirs; + int files; + int others; +} sd_stat; + +/* plugin->item.common.* */ +extern void print_sd(const char *prefix, coord_t * coord); +extern void item_stat_static_sd(const coord_t * coord, void *vp); + +/* plugin->item.s.sd.* */ +extern int init_inode_static_sd(struct inode *inode, char *sd, int len); +extern int save_len_static_sd(struct inode *inode); +extern int save_static_sd(struct inode *inode, char **area); + +/* __FS_REISER4_PLUGIN_ITEM_STATIC_STAT_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/item/tail.c b/fs/reiser4/plugin/item/tail.c new file mode 100644 index 000000000000..436a67ae1c61 --- /dev/null +++ b/fs/reiser4/plugin/item/tail.c @@ -0,0 +1,810 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#include "item.h" +#include "../../inode.h" +#include "../../page_cache.h" +#include "../../carry.h" +#include "../../vfs_ops.h" + +#include <asm/uaccess.h> +#include <linux/swap.h> +#include <linux/writeback.h> + +/* plugin->u.item.b.max_key_inside */ +reiser4_key *max_key_inside_tail(const coord_t *coord, reiser4_key *key) +{ + item_key_by_coord(coord, key); + set_key_offset(key, get_key_offset(reiser4_max_key())); + return key; +} + +/* plugin->u.item.b.can_contain_key */ +int can_contain_key_tail(const coord_t *coord, const reiser4_key *key, + const reiser4_item_data *data) +{ + reiser4_key item_key; + + if (item_plugin_by_coord(coord) != data->iplug) + return 0; + + item_key_by_coord(coord, &item_key); + if (get_key_locality(key) != get_key_locality(&item_key) || + get_key_objectid(key) != get_key_objectid(&item_key)) + return 0; + + return 1; +} + +/* plugin->u.item.b.mergeable + first item is of tail type */ +/* Audited by: green(2002.06.14) */ +int mergeable_tail(const coord_t *p1, const coord_t *p2) +{ + reiser4_key key1, key2; + + assert("vs-535", plugin_of_group(item_plugin_by_coord(p1), + UNIX_FILE_METADATA_ITEM_TYPE)); + assert("vs-365", item_id_by_coord(p1) == FORMATTING_ID); + + if (item_id_by_coord(p2) != FORMATTING_ID) { + /* second item is of another type */ + return 0; + } + + item_key_by_coord(p1, &key1); + item_key_by_coord(p2, &key2); + if (get_key_locality(&key1) != get_key_locality(&key2) || + get_key_objectid(&key1) != get_key_objectid(&key2) + || get_key_type(&key1) != get_key_type(&key2)) { + /* items of different objects */ + return 0; + } + if (get_key_offset(&key1) + nr_units_tail(p1) != get_key_offset(&key2)) { + /* not adjacent items */ + return 0; + } + return 1; +} + +/* plugin->u.item.b.print + plugin->u.item.b.check */ + +/* plugin->u.item.b.nr_units */ +pos_in_node_t nr_units_tail(const coord_t * coord) +{ + return item_length_by_coord(coord); +} + +/* plugin->u.item.b.lookup */ +lookup_result +lookup_tail(const reiser4_key * key, lookup_bias bias, coord_t * coord) +{ + reiser4_key item_key; + __u64 lookuped, offset; + unsigned nr_units; + + item_key_by_coord(coord, &item_key); + offset = get_key_offset(item_key_by_coord(coord, &item_key)); + nr_units = nr_units_tail(coord); + + /* key we are looking for must be greater than key of item @coord */ + assert("vs-416", keygt(key, &item_key)); + + /* offset we are looking for */ + lookuped = get_key_offset(key); + + if (lookuped >= offset && lookuped < offset + nr_units) { + /* byte we are looking for is in this item */ + coord->unit_pos = lookuped - offset; + coord->between = AT_UNIT; + return CBK_COORD_FOUND; + } + + /* set coord after last unit */ + coord->unit_pos = nr_units - 1; + coord->between = AFTER_UNIT; + return bias == + FIND_MAX_NOT_MORE_THAN ? CBK_COORD_FOUND : CBK_COORD_NOTFOUND; +} + +/* plugin->u.item.b.paste */ +int +paste_tail(coord_t *coord, reiser4_item_data *data, + carry_plugin_info *info UNUSED_ARG) +{ + unsigned old_item_length; + char *item; + + /* length the item had before resizing has been performed */ + old_item_length = item_length_by_coord(coord) - data->length; + + /* tail items never get pasted in the middle */ + assert("vs-363", + (coord->unit_pos == 0 && coord->between == BEFORE_UNIT) || + (coord->unit_pos == old_item_length - 1 && + coord->between == AFTER_UNIT) || + (coord->unit_pos == 0 && old_item_length == 0 + && coord->between == AT_UNIT)); + + item = item_body_by_coord(coord); + if (coord->unit_pos == 0) + /* make space for pasted data when pasting at the beginning of + the item */ + memmove(item + data->length, item, old_item_length); + + if (coord->between == AFTER_UNIT) + coord->unit_pos++; + + if (data->data) { + assert("vs-554", data->user == 0 || data->user == 1); + if (data->user) { + assert("nikita-3035", reiser4_schedulable()); + /* copy from user space */ + if (__copy_from_user(item + coord->unit_pos, + (const char __user *)data->data, + (unsigned)data->length)) + return RETERR(-EFAULT); + } else + /* copy from kernel space */ + memcpy(item + coord->unit_pos, data->data, + (unsigned)data->length); + } else { + memset(item + coord->unit_pos, 0, (unsigned)data->length); + } + return 0; +} + +/* plugin->u.item.b.fast_paste */ + +/* plugin->u.item.b.can_shift + number of units is returned via return value, number of bytes via @size. For + tail items they coincide */ +int +can_shift_tail(unsigned free_space, coord_t * source UNUSED_ARG, + znode * target UNUSED_ARG, shift_direction direction UNUSED_ARG, + unsigned *size, unsigned want) +{ + /* make sure that that we do not want to shift more than we have */ + assert("vs-364", want > 0 + && want <= (unsigned)item_length_by_coord(source)); + + *size = min(want, free_space); + return *size; +} + +/* plugin->u.item.b.copy_units */ +void +copy_units_tail(coord_t * target, coord_t * source, + unsigned from, unsigned count, + shift_direction where_is_free_space, + unsigned free_space UNUSED_ARG) +{ + /* make sure that item @target is expanded already */ + assert("vs-366", (unsigned)item_length_by_coord(target) >= count); + assert("vs-370", free_space >= count); + + if (where_is_free_space == SHIFT_LEFT) { + /* append item @target with @count first bytes of @source */ + assert("vs-365", from == 0); + + memcpy((char *)item_body_by_coord(target) + + item_length_by_coord(target) - count, + (char *)item_body_by_coord(source), count); + } else { + /* target item is moved to right already */ + reiser4_key key; + + assert("vs-367", + (unsigned)item_length_by_coord(source) == from + count); + + memcpy((char *)item_body_by_coord(target), + (char *)item_body_by_coord(source) + from, count); + + /* new units are inserted before first unit in an item, + therefore, we have to update item key */ + item_key_by_coord(source, &key); + set_key_offset(&key, get_key_offset(&key) + from); + + node_plugin_by_node(target->node)->update_item_key(target, &key, + NULL /*info */); + } +} + +/* plugin->u.item.b.create_hook */ + +/* item_plugin->b.kill_hook + this is called when @count units starting from @from-th one are going to be removed + */ +int +kill_hook_tail(const coord_t * coord, pos_in_node_t from, + pos_in_node_t count, struct carry_kill_data *kdata) +{ + reiser4_key key; + loff_t start, end; + + assert("vs-1577", kdata); + assert("vs-1579", kdata->inode); + + item_key_by_coord(coord, &key); + start = get_key_offset(&key) + from; + end = start + count; + fake_kill_hook_tail(kdata->inode, start, end, kdata->params.truncate); + return 0; +} + +/* plugin->u.item.b.shift_hook */ + +/* helper for kill_units_tail and cut_units_tail */ +static int +do_cut_or_kill(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + reiser4_key * smallest_removed, reiser4_key * new_first) +{ + pos_in_node_t count; + + /* this method is only called to remove part of item */ + assert("vs-374", (to - from + 1) < item_length_by_coord(coord)); + /* tails items are never cut from the middle of an item */ + assert("vs-396", ergo(from != 0, to == coord_last_unit_pos(coord))); + assert("vs-1558", ergo(from == 0, to < coord_last_unit_pos(coord))); + + count = to - from + 1; + + if (smallest_removed) { + /* store smallest key removed */ + item_key_by_coord(coord, smallest_removed); + set_key_offset(smallest_removed, + get_key_offset(smallest_removed) + from); + } + if (new_first) { + /* head of item is cut */ + assert("vs-1529", from == 0); + + item_key_by_coord(coord, new_first); + set_key_offset(new_first, + get_key_offset(new_first) + from + count); + } + + if (REISER4_DEBUG) + memset((char *)item_body_by_coord(coord) + from, 0, count); + return count; +} + +/* plugin->u.item.b.cut_units */ +int +cut_units_tail(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + struct carry_cut_data *cdata UNUSED_ARG, + reiser4_key * smallest_removed, reiser4_key * new_first) +{ + return do_cut_or_kill(coord, from, to, smallest_removed, new_first); +} + +/* plugin->u.item.b.kill_units */ +int +kill_units_tail(coord_t * coord, pos_in_node_t from, pos_in_node_t to, + struct carry_kill_data *kdata, reiser4_key * smallest_removed, + reiser4_key * new_first) +{ + kill_hook_tail(coord, from, to - from + 1, kdata); + return do_cut_or_kill(coord, from, to, smallest_removed, new_first); +} + +/* plugin->u.item.b.unit_key */ +reiser4_key *unit_key_tail(const coord_t * coord, reiser4_key * key) +{ + assert("vs-375", coord_is_existing_unit(coord)); + + item_key_by_coord(coord, key); + set_key_offset(key, (get_key_offset(key) + coord->unit_pos)); + + return key; +} + +/* plugin->u.item.b.estimate + plugin->u.item.b.item_data_by_flow */ + +/* tail redpage function. It is called from readpage_tail(). */ +static int do_readpage_tail(uf_coord_t *uf_coord, struct page *page) +{ + tap_t tap; + int result; + coord_t coord; + lock_handle lh; + int count, mapped; + struct inode *inode; + char *pagedata; + + /* saving passed coord in order to do not move it by tap. */ + init_lh(&lh); + copy_lh(&lh, uf_coord->lh); + inode = page->mapping->host; + coord_dup(&coord, &uf_coord->coord); + + reiser4_tap_init(&tap, &coord, &lh, ZNODE_READ_LOCK); + + if ((result = reiser4_tap_load(&tap))) + goto out_tap_done; + + /* lookup until page is filled up. */ + for (mapped = 0; mapped < PAGE_SIZE; ) { + /* number of bytes to be copied to page */ + count = item_length_by_coord(&coord) - coord.unit_pos; + if (count > PAGE_SIZE - mapped) + count = PAGE_SIZE - mapped; + + /* attach @page to address space and get data address */ + pagedata = kmap_atomic(page); + + /* copy tail item to page */ + memcpy(pagedata + mapped, + ((char *)item_body_by_coord(&coord) + coord.unit_pos), + count); + mapped += count; + + flush_dcache_page(page); + + /* dettach page from address space */ + kunmap_atomic(pagedata); + + /* Getting next tail item. */ + if (mapped < PAGE_SIZE) { + /* + * unlock page in order to avoid keep it locked + * during tree lookup, which takes long term locks + */ + unlock_page(page); + + /* getting right neighbour. */ + result = go_dir_el(&tap, RIGHT_SIDE, 0); + + /* lock page back */ + lock_page(page); + if (PageUptodate(page)) { + /* + * another thread read the page, we have + * nothing to do + */ + result = 0; + goto out_unlock_page; + } + + if (result) { + if (result == -E_NO_NEIGHBOR) { + /* + * rigth neighbor is not a formatted + * node + */ + result = 0; + goto done; + } else { + goto out_tap_relse; + } + } else { + if (!inode_file_plugin(inode)-> + owns_item(inode, &coord)) { + /* item of another file is found */ + result = 0; + goto done; + } + } + } + } + + done: + if (mapped != PAGE_SIZE) + zero_user_segment(page, mapped, PAGE_SIZE); + SetPageUptodate(page); + out_unlock_page: + unlock_page(page); + out_tap_relse: + reiser4_tap_relse(&tap); + out_tap_done: + reiser4_tap_done(&tap); + return result; +} + +/* + * plugin->s.file.readpage + * + * reiser4_read_dispatch->read_unix_file->page_cache_readahead-> + * ->reiser4_readpage_dispatch->readpage_unix_file->readpage_tail + * or + * filemap_fault->reiser4_readpage_dispatch->readpage_unix_file->readpage_tail + * + * At the beginning: coord->node is read locked, zloaded, page is locked, + * coord is set to existing unit inside of tail item. + */ +int readpage_tail(void *vp, struct page *page) +{ + uf_coord_t *uf_coord = vp; + ON_DEBUG(coord_t * coord = &uf_coord->coord); + ON_DEBUG(reiser4_key key); + + assert("umka-2515", PageLocked(page)); + assert("umka-2516", !PageUptodate(page)); + assert("umka-2517", !jprivate(page) && !PagePrivate(page)); + assert("umka-2518", page->mapping && page->mapping->host); + + assert("umka-2519", znode_is_loaded(coord->node)); + assert("umka-2520", item_is_tail(coord)); + assert("umka-2521", coord_is_existing_unit(coord)); + assert("umka-2522", znode_is_rlocked(coord->node)); + assert("umka-2523", + page->mapping->host->i_ino == + get_key_objectid(item_key_by_coord(coord, &key))); + + return do_readpage_tail(uf_coord, page); +} + +/** + * overwrite_tail + * @flow: + * @coord: + * + * Overwrites tail item or its part by user data. Returns number of bytes + * written or error code. + */ +static int overwrite_tail(flow_t *flow, coord_t *coord) +{ + unsigned count; + + assert("vs-570", flow->user == 1); + assert("vs-946", flow->data); + assert("vs-947", coord_is_existing_unit(coord)); + assert("vs-948", znode_is_write_locked(coord->node)); + assert("nikita-3036", reiser4_schedulable()); + + count = item_length_by_coord(coord) - coord->unit_pos; + if (count > flow->length) + count = flow->length; + + if (__copy_from_user((char *)item_body_by_coord(coord) + coord->unit_pos, + (const char __user *)flow->data, count)) + return RETERR(-EFAULT); + + znode_make_dirty(coord->node); + return count; +} + +/** + * insert_first_tail + * @inode: + * @flow: + * @coord: + * @lh: + * + * Returns number of bytes written or error code. + */ +static ssize_t insert_first_tail(struct inode *inode, flow_t *flow, + coord_t *coord, lock_handle *lh) +{ + int result; + loff_t to_write; + struct unix_file_info *uf_info; + + if (get_key_offset(&flow->key) != 0) { + /* + * file is empty and we have to write not to the beginning of + * file. Create a hole at the beginning of file. On success + * insert_flow returns 0 as number of written bytes which is + * what we have to return on padding a file with holes + */ + flow->data = NULL; + flow->length = get_key_offset(&flow->key); + set_key_offset(&flow->key, 0); + /* + * holes in files built of tails are stored just like if there + * were real data which are all zeros. + */ + inode_add_bytes(inode, flow->length); + result = reiser4_insert_flow(coord, lh, flow); + if (flow->length) + inode_sub_bytes(inode, flow->length); + + uf_info = unix_file_inode_data(inode); + + /* + * first item insertion is only possible when writing to empty + * file or performing tail conversion + */ + assert("", (uf_info->container == UF_CONTAINER_EMPTY || + (reiser4_inode_get_flag(inode, + REISER4_PART_MIXED) && + reiser4_inode_get_flag(inode, + REISER4_PART_IN_CONV)))); + /* if file was empty - update its state */ + if (result == 0 && uf_info->container == UF_CONTAINER_EMPTY) + uf_info->container = UF_CONTAINER_TAILS; + return result; + } + + inode_add_bytes(inode, flow->length); + + to_write = flow->length; + result = reiser4_insert_flow(coord, lh, flow); + if (flow->length) + inode_sub_bytes(inode, flow->length); + return (to_write - flow->length) ? (to_write - flow->length) : result; +} + +/** + * append_tail + * @inode: + * @flow: + * @coord: + * @lh: + * + * Returns number of bytes written or error code. + */ +static ssize_t append_tail(struct inode *inode, + flow_t *flow, coord_t *coord, lock_handle *lh) +{ + int result; + reiser4_key append_key; + loff_t to_write; + + if (!keyeq(&flow->key, append_key_tail(coord, &append_key))) { + flow->data = NULL; + flow->length = get_key_offset(&flow->key) - get_key_offset(&append_key); + set_key_offset(&flow->key, get_key_offset(&append_key)); + /* + * holes in files built of tails are stored just like if there + * were real data which are all zeros. + */ + inode_add_bytes(inode, flow->length); + result = reiser4_insert_flow(coord, lh, flow); + if (flow->length) + inode_sub_bytes(inode, flow->length); + return result; + } + + inode_add_bytes(inode, flow->length); + + to_write = flow->length; + result = reiser4_insert_flow(coord, lh, flow); + if (flow->length) + inode_sub_bytes(inode, flow->length); + return (to_write - flow->length) ? (to_write - flow->length) : result; +} + +/** + * write_tail_reserve_space - reserve space for tail write operation + * @inode: + * + * Estimates and reserves space which may be required for writing one flow to a + * file + */ +static int write_extent_reserve_space(struct inode *inode) +{ + __u64 count; + reiser4_tree *tree; + + /* + * to write one flow to a file by tails we have to reserve disk space for: + + * 1. find_file_item may have to insert empty node to the tree (empty + * leaf node between two extent items). This requires 1 block and + * number of blocks which are necessary to perform insertion of an + * internal item into twig level. + * + * 2. flow insertion + * + * 3. stat data update + */ + tree = reiser4_tree_by_inode(inode); + count = estimate_one_insert_item(tree) + + estimate_insert_flow(tree->height) + + estimate_one_insert_item(tree); + grab_space_enable(); + return reiser4_grab_space(count, 0 /* flags */); +} + +#define PAGE_PER_FLOW 4 + +static loff_t faultin_user_pages(const char __user *buf, size_t count) +{ + loff_t faulted; + int to_fault; + + if (count > PAGE_PER_FLOW * PAGE_SIZE) + count = PAGE_PER_FLOW * PAGE_SIZE; + faulted = 0; + while (count > 0) { + to_fault = PAGE_SIZE; + if (count < to_fault) + to_fault = count; + fault_in_pages_readable(buf + faulted, to_fault); + count -= to_fault; + faulted += to_fault; + } + return faulted; +} + +ssize_t reiser4_write_tail_noreserve(struct file *file, + struct inode * inode, + const char __user *buf, + size_t count, loff_t *pos) +{ + struct hint hint; + int result; + flow_t flow; + coord_t *coord; + lock_handle *lh; + znode *loaded; + + assert("edward-1548", inode != NULL); + + result = load_file_hint(file, &hint); + BUG_ON(result != 0); + + flow.length = faultin_user_pages(buf, count); + flow.user = 1; + memcpy(&flow.data, &buf, sizeof(buf)); + flow.op = WRITE_OP; + key_by_inode_and_offset_common(inode, *pos, &flow.key); + + result = find_file_item(&hint, &flow.key, ZNODE_WRITE_LOCK, inode); + if (IS_CBKERR(result)) + return result; + + coord = &hint.ext_coord.coord; + lh = hint.ext_coord.lh; + + result = zload(coord->node); + BUG_ON(result != 0); + loaded = coord->node; + + if (coord->between == AFTER_UNIT) { + /* append with data or hole */ + result = append_tail(inode, &flow, coord, lh); + } else if (coord->between == AT_UNIT) { + /* overwrite */ + result = overwrite_tail(&flow, coord); + } else { + /* no items of this file yet. insert data or hole */ + result = insert_first_tail(inode, &flow, coord, lh); + } + zrelse(loaded); + if (result < 0) { + done_lh(lh); + return result; + } + + /* seal and unlock znode */ + hint.ext_coord.valid = 0; + if (hint.ext_coord.valid) + reiser4_set_hint(&hint, &flow.key, ZNODE_WRITE_LOCK); + else + reiser4_unset_hint(&hint); + + save_file_hint(file, &hint); + return result; +} + +/** + * reiser4_write_tail - write method of tail item plugin + * @file: file to write to + * @buf: address of user-space buffer + * @count: number of bytes to write + * @pos: position in file to write to + * + * Returns number of written bytes or error code. + */ +ssize_t reiser4_write_tail(struct file *file, + struct inode * inode, + const char __user *buf, + size_t count, loff_t *pos) +{ + if (write_extent_reserve_space(inode)) + return RETERR(-ENOSPC); + return reiser4_write_tail_noreserve(file, inode, buf, count, pos); +} + +#if REISER4_DEBUG + +static int +coord_matches_key_tail(const coord_t * coord, const reiser4_key * key) +{ + reiser4_key item_key; + + assert("vs-1356", coord_is_existing_unit(coord)); + assert("vs-1354", keylt(key, append_key_tail(coord, &item_key))); + assert("vs-1355", keyge(key, item_key_by_coord(coord, &item_key))); + return get_key_offset(key) == + get_key_offset(&item_key) + coord->unit_pos; + +} + +#endif + +/* plugin->u.item.s.file.read */ +int reiser4_read_tail(struct file *file UNUSED_ARG, flow_t *f, hint_t *hint) +{ + unsigned count; + int item_length; + coord_t *coord; + uf_coord_t *uf_coord; + + uf_coord = &hint->ext_coord; + coord = &uf_coord->coord; + + assert("vs-571", f->user == 1); + assert("vs-571", f->data); + assert("vs-967", coord && coord->node); + assert("vs-1117", znode_is_rlocked(coord->node)); + assert("vs-1118", znode_is_loaded(coord->node)); + + assert("nikita-3037", reiser4_schedulable()); + assert("vs-1357", coord_matches_key_tail(coord, &f->key)); + + /* calculate number of bytes to read off the item */ + item_length = item_length_by_coord(coord); + count = item_length_by_coord(coord) - coord->unit_pos; + if (count > f->length) + count = f->length; + + /* user page has to be brought in so that major page fault does not + * occur here when longtem lock is held */ + if (__copy_to_user((char __user *)f->data, + ((char *)item_body_by_coord(coord) + coord->unit_pos), + count)) + return RETERR(-EFAULT); + + /* probably mark_page_accessed() should only be called if + * coord->unit_pos is zero. */ + mark_page_accessed(znode_page(coord->node)); + move_flow_forward(f, count); + + coord->unit_pos += count; + if (item_length == coord->unit_pos) { + coord->unit_pos--; + coord->between = AFTER_UNIT; + } + reiser4_set_hint(hint, &f->key, ZNODE_READ_LOCK); + return 0; +} + +/* + plugin->u.item.s.file.append_key + key of first byte which is the next to last byte by addressed by this item +*/ +reiser4_key *append_key_tail(const coord_t * coord, reiser4_key * key) +{ + item_key_by_coord(coord, key); + set_key_offset(key, get_key_offset(key) + item_length_by_coord(coord)); + return key; +} + +/* plugin->u.item.s.file.init_coord_extension */ +void init_coord_extension_tail(uf_coord_t * uf_coord, loff_t lookuped) +{ + uf_coord->valid = 1; +} + +/* + plugin->u.item.s.file.get_block +*/ +int +get_block_address_tail(const coord_t * coord, sector_t lblock, sector_t * block) +{ + assert("nikita-3252", znode_get_level(coord->node) == LEAF_LEVEL); + + if (reiser4_blocknr_is_fake(znode_get_block(coord->node))) + /* if node has'nt obtainet its block number yet, return 0. + * Lets avoid upsetting users with some cosmic numbers beyond + * the device capacity.*/ + *block = 0; + else + *block = *znode_get_block(coord->node); + return 0; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/plugin/item/tail.h b/fs/reiser4/plugin/item/tail.h new file mode 100644 index 000000000000..d0eacbd27126 --- /dev/null +++ b/fs/reiser4/plugin/item/tail.h @@ -0,0 +1,59 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#if !defined( __REISER4_TAIL_H__ ) +#define __REISER4_TAIL_H__ + +struct tail_coord_extension { + int not_used; +}; + +struct cut_list; + +/* plugin->u.item.b.* */ +reiser4_key *max_key_inside_tail(const coord_t *, reiser4_key *); +int can_contain_key_tail(const coord_t * coord, const reiser4_key * key, + const reiser4_item_data *); +int mergeable_tail(const coord_t * p1, const coord_t * p2); +pos_in_node_t nr_units_tail(const coord_t *); +lookup_result lookup_tail(const reiser4_key *, lookup_bias, coord_t *); +int paste_tail(coord_t *, reiser4_item_data *, carry_plugin_info *); +int can_shift_tail(unsigned free_space, coord_t * source, + znode * target, shift_direction, unsigned *size, + unsigned want); +void copy_units_tail(coord_t * target, coord_t * source, unsigned from, + unsigned count, shift_direction, unsigned free_space); +int kill_hook_tail(const coord_t *, pos_in_node_t from, pos_in_node_t count, + struct carry_kill_data *); +int cut_units_tail(coord_t *, pos_in_node_t from, pos_in_node_t to, + struct carry_cut_data *, reiser4_key * smallest_removed, + reiser4_key * new_first); +int kill_units_tail(coord_t *, pos_in_node_t from, pos_in_node_t to, + struct carry_kill_data *, reiser4_key * smallest_removed, + reiser4_key * new_first); +reiser4_key *unit_key_tail(const coord_t *, reiser4_key *); + +/* plugin->u.item.s.* */ +ssize_t reiser4_write_tail_noreserve(struct file *file, struct inode * inode, + const char __user *buf, size_t count, + loff_t *pos); +ssize_t reiser4_write_tail(struct file *file, struct inode * inode, + const char __user *buf, size_t count, loff_t *pos); +int reiser4_read_tail(struct file *, flow_t *, hint_t *); +int readpage_tail(void *vp, struct page *page); +reiser4_key *append_key_tail(const coord_t *, reiser4_key *); +void init_coord_extension_tail(uf_coord_t *, loff_t offset); +int get_block_address_tail(const coord_t *, sector_t, sector_t *); + +/* __REISER4_TAIL_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/node/Makefile b/fs/reiser4/plugin/node/Makefile new file mode 100644 index 000000000000..36e87ff49210 --- /dev/null +++ b/fs/reiser4/plugin/node/Makefile @@ -0,0 +1,6 @@ +obj-$(CONFIG_REISER4_FS) += node_plugins.o + +node_plugins-objs := \ + node.o \ + node40.o \ + node41.o diff --git a/fs/reiser4/plugin/node/node.c b/fs/reiser4/plugin/node/node.c new file mode 100644 index 000000000000..aca83732bcd5 --- /dev/null +++ b/fs/reiser4/plugin/node/node.c @@ -0,0 +1,170 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Node plugin interface. + + Description: The tree provides the abstraction of flows, which it + internally fragments into items which it stores in nodes. + + A key_atom is a piece of data bound to a single key. + + For reasonable space efficiency to be achieved it is often + necessary to store key_atoms in the nodes in the form of items, where + an item is a sequence of key_atoms of the same or similar type. It is + more space-efficient, because the item can implement (very) + efficient compression of key_atom's bodies using internal knowledge + about their semantics, and it can often avoid having a key for each + key_atom. Each type of item has specific operations implemented by its + item handler (see balance.c). + + Rationale: the rest of the code (specifically balancing routines) + accesses leaf level nodes through this interface. This way we can + implement various block layouts and even combine various layouts + within the same tree. Balancing/allocating algorithms should not + care about peculiarities of splitting/merging specific item types, + but rather should leave that to the item's item handler. + + Items, including those that provide the abstraction of flows, have + the property that if you move them in part or in whole to another + node, the balancing code invokes their is_left_mergeable() + item_operation to determine if they are mergeable with their new + neighbor in the node you have moved them to. For some items the + is_left_mergeable() function always returns null. + + When moving the bodies of items from one node to another: + + if a partial item is shifted to another node the balancing code invokes + an item handler method to handle the item splitting. + + if the balancing code needs to merge with an item in the node it + is shifting to, it will invoke an item handler method to handle + the item merging. + + if it needs to move whole item bodies unchanged, the balancing code uses xmemcpy() + adjusting the item headers after the move is done using the node handler. +*/ + +#include "../../forward.h" +#include "../../debug.h" +#include "../../key.h" +#include "../../coord.h" +#include "../plugin_header.h" +#include "../item/item.h" +#include "node.h" +#include "../plugin.h" +#include "../../znode.h" +#include "../../tree.h" +#include "../../super.h" +#include "../../reiser4.h" + +/** + * leftmost_key_in_node - get the smallest key in node + * @node: + * @key: store result here + * + * Stores the leftmost key of @node in @key. + */ +reiser4_key *leftmost_key_in_node(const znode *node, reiser4_key *key) +{ + assert("nikita-1634", node != NULL); + assert("nikita-1635", key != NULL); + + if (!node_is_empty(node)) { + coord_t first_item; + + coord_init_first_unit(&first_item, (znode *) node); + item_key_by_coord(&first_item, key); + } else + *key = *reiser4_max_key(); + return key; +} + +node_plugin node_plugins[LAST_NODE_ID] = { + [NODE40_ID] = { + .h = { + .type_id = REISER4_NODE_PLUGIN_TYPE, + .id = NODE40_ID, + .pops = NULL, + .label = "unified", + .desc = "unified node layout", + .linkage = {NULL, NULL} + }, + .item_overhead = item_overhead_node40, + .free_space = free_space_node40, + .lookup = lookup_node40, + .num_of_items = num_of_items_node40, + .item_by_coord = item_by_coord_node40, + .length_by_coord = length_by_coord_node40, + .plugin_by_coord = plugin_by_coord_node40, + .key_at = key_at_node40, + .estimate = estimate_node40, + .check = check_node40, + .parse = parse_node40, + .init = init_node40, +#ifdef GUESS_EXISTS + .guess = guess_node40, +#endif + .change_item_size = change_item_size_node40, + .create_item = create_item_node40, + .update_item_key = update_item_key_node40, + .cut_and_kill = kill_node40, + .cut = cut_node40, + .shift = shift_node40, + .shrink_item = shrink_item_node40, + .fast_insert = fast_insert_node40, + .fast_paste = fast_paste_node40, + .fast_cut = fast_cut_node40, + .max_item_size = max_item_size_node40, + .prepare_removal = prepare_removal_node40, + .set_item_plugin = set_item_plugin_node40 + }, + [NODE41_ID] = { + .h = { + .type_id = REISER4_NODE_PLUGIN_TYPE, + .id = NODE41_ID, + .pops = NULL, + .label = "node41", + .desc = "node41 layout", + .linkage = {NULL, NULL} + }, + .item_overhead = item_overhead_node40, + .free_space = free_space_node40, + .lookup = lookup_node40, + .num_of_items = num_of_items_node40, + .item_by_coord = item_by_coord_node40, + .length_by_coord = length_by_coord_node40, + .plugin_by_coord = plugin_by_coord_node40, + .key_at = key_at_node40, + .estimate = estimate_node40, + .check = NULL, + .parse = parse_node41, + .init = init_node41, +#ifdef GUESS_EXISTS + .guess = guess_node41, +#endif + .change_item_size = change_item_size_node40, + .create_item = create_item_node40, + .update_item_key = update_item_key_node40, + .cut_and_kill = kill_node40, + .cut = cut_node40, + .shift = shift_node41, + .shrink_item = shrink_item_node40, + .fast_insert = fast_insert_node40, + .fast_paste = fast_paste_node40, + .fast_cut = fast_cut_node40, + .max_item_size = max_item_size_node41, + .prepare_removal = prepare_removal_node40, + .set_item_plugin = set_item_plugin_node40, + .csum = csum_node41 + } +}; + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/node/node.h b/fs/reiser4/plugin/node/node.h new file mode 100644 index 000000000000..a4cda2c8ab61 --- /dev/null +++ b/fs/reiser4/plugin/node/node.h @@ -0,0 +1,275 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* We need a definition of the default node layout here. */ + +/* Generally speaking, it is best to have free space in the middle of the + node so that two sets of things can grow towards it, and to have the + item bodies on the left so that the last one of them grows into free + space. We optimize for the case where we append new items to the end + of the node, or grow the last item, because it hurts nothing to so + optimize and it is a common special case to do massive insertions in + increasing key order (and one of cases more likely to have a real user + notice the delay time for). + + formatted leaf default layout: (leaf1) + + |node header:item bodies:free space:key + pluginid + item offset| + + We grow towards the middle, optimizing layout for the case where we + append new items to the end of the node. The node header is fixed + length. Keys, and item offsets plus pluginids for the items + corresponding to them are in increasing key order, and are fixed + length. Item offsets are relative to start of node (16 bits creating + a node size limit of 64k, 12 bits might be a better choice....). Item + bodies are in decreasing key order. Item bodies have a variable size. + There is a one to one to one mapping of keys to item offsets to item + bodies. Item offsets consist of pointers to the zeroth byte of the + item body. Item length equals the start of the next item minus the + start of this item, except the zeroth item whose length equals the end + of the node minus the start of that item (plus a byte). In other + words, the item length is not recorded anywhere, and it does not need + to be since it is computable. + + Leaf variable length items and keys layout : (lvar) + + |node header:key offset + item offset + pluginid triplets:free space:key bodies:item bodies| + + We grow towards the middle, optimizing layout for the case where we + append new items to the end of the node. The node header is fixed + length. Keys and item offsets for the items corresponding to them are + in increasing key order, and keys are variable length. Item offsets + are relative to start of node (16 bits). Item bodies are in + decreasing key order. Item bodies have a variable size. There is a + one to one to one mapping of keys to item offsets to item bodies. + Item offsets consist of pointers to the zeroth byte of the item body. + Item length equals the start of the next item's key minus the start of + this item, except the zeroth item whose length equals the end of the + node minus the start of that item (plus a byte). + + leaf compressed keys layout: (lcomp) + + |node header:key offset + key inherit + item offset pairs:free space:key bodies:item bodies| + + We grow towards the middle, optimizing layout for the case where we + append new items to the end of the node. The node header is fixed + length. Keys and item offsets for the items corresponding to them are + in increasing key order, and keys are variable length. The "key + inherit" field indicates how much of the key prefix is identical to + the previous key (stem compression as described in "Managing + Gigabytes" is used). key_inherit is a one byte integer. The + intra-node searches performed through this layout are linear searches, + and this is theorized to not hurt performance much due to the high + cost of processor stalls on modern CPUs, and the small number of keys + in a single node. Item offsets are relative to start of node (16 + bits). Item bodies are in decreasing key order. Item bodies have a + variable size. There is a one to one to one mapping of keys to item + offsets to item bodies. Item offsets consist of pointers to the + zeroth byte of the item body. Item length equals the start of the + next item minus the start of this item, except the zeroth item whose + length equals the end of the node minus the start of that item (plus a + byte). In other words, item length and key length is not recorded + anywhere, and it does not need to be since it is computable. + + internal node default layout: (idef1) + + just like ldef1 except that item bodies are either blocknrs of + children or extents, and moving them may require updating parent + pointers in the nodes that they point to. +*/ + +/* There is an inherent 3-way tradeoff between optimizing and + exchanging disks between different architectures and code + complexity. This is optimal and simple and inexchangeable. + Someone else can do the code for exchanging disks and make it + complex. It would not be that hard. Using other than the PAGE_SIZE + might be suboptimal. +*/ + +#if !defined( __REISER4_NODE_H__ ) +#define __REISER4_NODE_H__ + +#define LEAF40_NODE_SIZE PAGE_CACHE_SIZE + +#include "../../dformat.h" +#include "../plugin_header.h" + +#include <linux/types.h> + +typedef enum { + NS_FOUND = 0, + NS_NOT_FOUND = -ENOENT +} node_search_result; + +/* Maximal possible space overhead for creation of new item in a node */ +#define REISER4_NODE_MAX_OVERHEAD ( sizeof( reiser4_key ) + 32 ) + +typedef enum { + REISER4_NODE_DKEYS = (1 << 0), + REISER4_NODE_TREE_STABLE = (1 << 1) +} reiser4_node_check_flag; + +/* cut and cut_and_kill have too long list of parameters. This structure is just to safe some space on stack */ +struct cut_list { + coord_t *from; + coord_t *to; + const reiser4_key *from_key; + const reiser4_key *to_key; + reiser4_key *smallest_removed; + carry_plugin_info *info; + __u32 flags; + struct inode *inode; /* this is to pass list of eflushed jnodes down to extent_kill_hook */ + lock_handle *left; + lock_handle *right; +}; + +struct carry_cut_data; +struct carry_kill_data; + +/* The responsibility of the node plugin is to store and give access + to the sequence of items within the node. */ +typedef struct node_plugin { + /* generic plugin fields */ + plugin_header h; + + /* calculates the amount of space that will be required to store an + item which is in addition to the space consumed by the item body. + (the space consumed by the item body can be gotten by calling + item->estimate) */ + size_t(*item_overhead) (const znode * node, flow_t * f); + + /* returns free space by looking into node (i.e., without using + znode->free_space). */ + size_t(*free_space) (znode * node); + /* search within the node for the one item which might + contain the key, invoking item->search_within to search within + that item to see if it is in there */ + node_search_result(*lookup) (znode * node, const reiser4_key * key, + lookup_bias bias, coord_t * coord); + /* number of items in node */ + int (*num_of_items) (const znode * node); + + /* store information about item in @coord in @data */ + /* break into several node ops, don't add any more uses of this before doing so */ + /*int ( *item_at )( const coord_t *coord, reiser4_item_data *data ); */ + char *(*item_by_coord) (const coord_t * coord); + int (*length_by_coord) (const coord_t * coord); + item_plugin *(*plugin_by_coord) (const coord_t * coord); + + /* store item key in @key */ + reiser4_key *(*key_at) (const coord_t * coord, reiser4_key * key); + /* conservatively estimate whether unit of what size can fit + into node. This estimation should be performed without + actually looking into the node's content (free space is saved in + znode). */ + size_t(*estimate) (znode * node); + + /* performs every consistency check the node plugin author could + imagine. Optional. */ + int (*check) (const znode * node, __u32 flags, const char **error); + + /* Called when node is read into memory and node plugin is + already detected. This should read some data into znode (like free + space counter) and, optionally, check data consistency. + */ + int (*parse) (znode * node); + /* This method is called on a new node to initialise plugin specific + data (header, etc.) */ + int (*init) (znode * node); + /* Check whether @node content conforms to this plugin format. + Probably only useful after support for old V3.x formats is added. + Uncomment after 4.0 only. + */ + /* int ( *guess )( const znode *node ); */ +#if REISER4_DEBUG + void (*print) (const char *prefix, const znode * node, __u32 flags); +#endif + /* change size of @item by @by bytes. @item->node has enough free + space. When @by > 0 - free space is appended to end of item. When + @by < 0 - item is truncated - it is assumed that last @by bytes if + the item are freed already */ + void (*change_item_size) (coord_t * item, int by); + + /* create new item @length bytes long in coord @target */ + int (*create_item) (coord_t * target, const reiser4_key * key, + reiser4_item_data * data, carry_plugin_info * info); + + /* update key of item. */ + void (*update_item_key) (coord_t * target, const reiser4_key * key, + carry_plugin_info * info); + + int (*cut_and_kill) (struct carry_kill_data *, carry_plugin_info *); + int (*cut) (struct carry_cut_data *, carry_plugin_info *); + + /* + * shrink item pointed to by @coord by @delta bytes. + */ + int (*shrink_item) (coord_t * coord, int delta); + + /* copy as much as possible but not more than up to @stop from + @stop->node to @target. If (pend == append) then data from beginning of + @stop->node are copied to the end of @target. If (pend == prepend) then + data from the end of @stop->node are copied to the beginning of + @target. Copied data are removed from @stop->node. Information + about what to do on upper level is stored in @todo */ + int (*shift) (coord_t * stop, znode * target, shift_direction pend, + int delete_node, int including_insert_coord, + carry_plugin_info * info); + /* return true if this node allows skip carry() in some situations + (see fs/reiser4/tree.c:insert_by_coord()). Reiser3.x format + emulation doesn't. + + This will speedup insertions that doesn't require updates to the + parent, by bypassing initialisation of carry() structures. It's + believed that majority of insertions will fit there. + + */ + int (*fast_insert) (const coord_t * coord); + int (*fast_paste) (const coord_t * coord); + int (*fast_cut) (const coord_t * coord); + /* this limits max size of item which can be inserted into a node and + number of bytes item in a node may be appended with */ + int (*max_item_size) (void); + int (*prepare_removal) (znode * empty, carry_plugin_info * info); + /* change plugin id of items which are in a node already. Currently it is Used in tail conversion for regular + * files */ + int (*set_item_plugin) (coord_t * coord, item_id); + /* calculate and check/update znode's checksum + (if @check is true, then check, otherwise update) */ + int (*csum)(znode *node, int check); +} node_plugin; + +typedef enum { + NODE40_ID, /* standard unified node layout used for both, + leaf and internal nodes */ + NODE41_ID, /* node layout with a checksum */ + LAST_NODE_ID +} reiser4_node_id; + +extern reiser4_key *leftmost_key_in_node(const znode * node, reiser4_key * key); +#if REISER4_DEBUG +extern void print_node_content(const char *prefix, const znode * node, + __u32 flags); +#endif + +extern void indent_znode(const znode * node); + +typedef struct common_node_header { + /* + * identifier of node plugin. Must be located at the very beginning of + * a node. + */ + __le16 plugin_id; +} common_node_header; + +/* __REISER4_NODE_H__ */ +#endif +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/plugin/node/node40.c b/fs/reiser4/plugin/node/node40.c new file mode 100644 index 000000000000..47c83091b687 --- /dev/null +++ b/fs/reiser4/plugin/node/node40.c @@ -0,0 +1,3073 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#include "../../debug.h" +#include "../../key.h" +#include "../../coord.h" +#include "../plugin_header.h" +#include "../item/item.h" +#include "node.h" +#include "node40.h" +#include "../plugin.h" +#include "../../jnode.h" +#include "../../znode.h" +#include "../../pool.h" +#include "../../carry.h" +#include "../../tap.h" +#include "../../tree.h" +#include "../../super.h" +#include "../../reiser4.h" + +#include <asm/uaccess.h> +#include <linux/types.h> +#include <linux/prefetch.h> + +/* leaf 40 format: + + [node header | item 0, item 1, .., item N-1 | free space | item_head N-1, .. item_head 1, item head 0 ] + plugin_id (16) key + free_space (16) pluginid (16) + free_space_start (16) offset (16) + level (8) + num_items (16) + magic (32) + flush_time (32) +*/ +/* NIKITA-FIXME-HANS: I told you guys not less than 10 times to not call it r4fs. Change to "ReIs". */ +/* magic number that is stored in ->magic field of node header */ +static const __u32 REISER4_NODE40_MAGIC = 0x52344653; /* (*(__u32 *)"R4FS"); */ + +static int prepare_for_update(znode * left, znode * right, + carry_plugin_info * info); + +/* header of node of reiser40 format is at the beginning of node */ +static inline node40_header *node40_node_header(const znode * node /* node to + * query */ ) +{ + assert("nikita-567", node != NULL); + assert("nikita-568", znode_page(node) != NULL); + assert("nikita-569", zdata(node) != NULL); + return (node40_header *) zdata(node); +} + +/* functions to get/set fields of node40_header */ +#define nh40_get_magic(nh) le32_to_cpu(get_unaligned(&(nh)->magic)) +#define nh40_get_free_space(nh) le16_to_cpu(get_unaligned(&(nh)->free_space)) +#define nh40_get_free_space_start(nh) le16_to_cpu(get_unaligned(&(nh)->free_space_start)) +#define nh40_get_level(nh) get_unaligned(&(nh)->level) +#define nh40_get_num_items(nh) le16_to_cpu(get_unaligned(&(nh)->nr_items)) +#define nh40_get_flush_id(nh) le64_to_cpu(get_unaligned(&(nh)->flush_id)) + +#define nh40_set_magic(nh, value) put_unaligned(cpu_to_le32(value), &(nh)->magic) +#define nh40_set_free_space(nh, value) put_unaligned(cpu_to_le16(value), &(nh)->free_space) +#define nh40_set_free_space_start(nh, value) put_unaligned(cpu_to_le16(value), &(nh)->free_space_start) +#define nh40_set_level(nh, value) put_unaligned(value, &(nh)->level) +#define nh40_set_num_items(nh, value) put_unaligned(cpu_to_le16(value), &(nh)->nr_items) +#define nh40_set_mkfs_id(nh, value) put_unaligned(cpu_to_le32(value), &(nh)->mkfs_id) + +/* plugin field of node header should be read/set by + plugin_by_disk_id/save_disk_plugin */ + +/* array of item headers is at the end of node */ +static inline item_header40 *node40_ih_at(const znode * node, unsigned pos) +{ + return (item_header40 *) (zdata(node) + znode_size(node)) - pos - 1; +} + +/* ( page_address( node -> pg ) + PAGE_CACHE_SIZE ) - pos - 1 + */ +static inline item_header40 *node40_ih_at_coord(const coord_t * coord) +{ + return (item_header40 *) (zdata(coord->node) + + znode_size(coord->node)) - (coord->item_pos) - + 1; +} + +/* functions to get/set fields of item_header40 */ +#define ih40_get_offset(ih) le16_to_cpu(get_unaligned(&(ih)->offset)) + +#define ih40_set_offset(ih, value) put_unaligned(cpu_to_le16(value), &(ih)->offset) + +/* plugin field of item header should be read/set by + plugin_by_disk_id/save_disk_plugin */ + +/* plugin methods */ + +/* plugin->u.node.item_overhead + look for description of this method in plugin/node/node.h */ +size_t +item_overhead_node40(const znode * node UNUSED_ARG, flow_t * f UNUSED_ARG) +{ + return sizeof(item_header40); +} + +/* plugin->u.node.free_space + look for description of this method in plugin/node/node.h */ +size_t free_space_node40(znode * node) +{ + assert("nikita-577", node != NULL); + assert("nikita-578", znode_is_loaded(node)); + assert("nikita-579", zdata(node) != NULL); + + return nh40_get_free_space(node40_node_header(node)); +} + +/* private inline version of node40_num_of_items() for use in this file. This + is necessary, because address of node40_num_of_items() is taken and it is + never inlined as a result. */ +static inline short node40_num_of_items_internal(const znode * node) +{ + return nh40_get_num_items(node40_node_header(node)); +} + +#if REISER4_DEBUG +static inline void check_num_items(const znode * node) +{ + assert("nikita-2749", + node40_num_of_items_internal(node) == node->nr_items); + assert("nikita-2746", znode_is_write_locked(node)); +} +#else +#define check_num_items(node) noop +#endif + +/* plugin->u.node.num_of_items + look for description of this method in plugin/node/node.h */ +int num_of_items_node40(const znode * node) +{ + return node40_num_of_items_internal(node); +} + +static void +node40_set_num_items(znode * node, node40_header * nh, unsigned value) +{ + assert("nikita-2751", node != NULL); + assert("nikita-2750", nh == node40_node_header(node)); + + check_num_items(node); + nh40_set_num_items(nh, value); + node->nr_items = value; + check_num_items(node); +} + +/* plugin->u.node.item_by_coord + look for description of this method in plugin/node/node.h */ +char *item_by_coord_node40(const coord_t * coord) +{ + item_header40 *ih; + char *p; + + /* @coord is set to existing item */ + assert("nikita-596", coord != NULL); + assert("vs-255", coord_is_existing_item(coord)); + + ih = node40_ih_at_coord(coord); + p = zdata(coord->node) + ih40_get_offset(ih); + return p; +} + +/* plugin->u.node.length_by_coord + look for description of this method in plugin/node/node.h */ +int length_by_coord_node40(const coord_t * coord) +{ + item_header40 *ih; + int result; + + /* @coord is set to existing item */ + assert("vs-256", coord != NULL); + assert("vs-257", coord_is_existing_item(coord)); + + ih = node40_ih_at_coord(coord); + if ((int)coord->item_pos == + node40_num_of_items_internal(coord->node) - 1) + result = + nh40_get_free_space_start(node40_node_header(coord->node)) - + ih40_get_offset(ih); + else + result = ih40_get_offset(ih - 1) - ih40_get_offset(ih); + + return result; +} + +static pos_in_node_t +node40_item_length(const znode * node, pos_in_node_t item_pos) +{ + item_header40 *ih; + pos_in_node_t result; + + /* @coord is set to existing item */ + assert("vs-256", node != NULL); + assert("vs-257", node40_num_of_items_internal(node) > item_pos); + + ih = node40_ih_at(node, item_pos); + if (item_pos == node40_num_of_items_internal(node) - 1) + result = + nh40_get_free_space_start(node40_node_header(node)) - + ih40_get_offset(ih); + else + result = ih40_get_offset(ih - 1) - ih40_get_offset(ih); + + return result; +} + +/* plugin->u.node.plugin_by_coord + look for description of this method in plugin/node/node.h */ +item_plugin *plugin_by_coord_node40(const coord_t * coord) +{ + item_header40 *ih; + item_plugin *result; + + /* @coord is set to existing item */ + assert("vs-258", coord != NULL); + assert("vs-259", coord_is_existing_item(coord)); + + ih = node40_ih_at_coord(coord); + /* pass NULL in stead of current tree. This is time critical call. */ + result = item_plugin_by_disk_id(NULL, &ih->plugin_id); + return result; +} + +/* plugin->u.node.key_at + look for description of this method in plugin/node/node.h */ +reiser4_key *key_at_node40(const coord_t * coord, reiser4_key * key) +{ + item_header40 *ih; + + assert("nikita-1765", coord_is_existing_item(coord)); + + /* @coord is set to existing item */ + ih = node40_ih_at_coord(coord); + memcpy(key, &ih->key, sizeof(reiser4_key)); + return key; +} + +/* VS-FIXME-HANS: please review whether the below are properly disabled when debugging is disabled */ + +#define NODE_INCSTAT(n, counter) \ + reiser4_stat_inc_at_level(znode_get_level(n), node.lookup.counter) + +#define NODE_ADDSTAT(n, counter, val) \ + reiser4_stat_add_at_level(znode_get_level(n), node.lookup.counter, val) + +/* plugin->u.node.lookup + look for description of this method in plugin/node/node.h */ +node_search_result lookup_node40(znode * node /* node to query */ , + const reiser4_key * key /* key to look for */ , + lookup_bias bias /* search bias */ , + coord_t * coord /* resulting coord */ ) +{ + int left; + int right; + int found; + int items; + + item_header40 *lefth; + item_header40 *righth; + + item_plugin *iplug; + item_header40 *bstop; + item_header40 *ih; + cmp_t order; + + assert("nikita-583", node != NULL); + assert("nikita-584", key != NULL); + assert("nikita-585", coord != NULL); + assert("nikita-2693", znode_is_any_locked(node)); + cassert(REISER4_SEQ_SEARCH_BREAK > 2); + + items = node_num_items(node); + + if (unlikely(items == 0)) { + coord_init_first_unit(coord, node); + return NS_NOT_FOUND; + } + + /* binary search for item that can contain given key */ + left = 0; + right = items - 1; + coord->node = node; + coord_clear_iplug(coord); + found = 0; + + lefth = node40_ih_at(node, left); + righth = node40_ih_at(node, right); + + /* It is known that for small arrays sequential search is on average + more efficient than binary. This is because sequential search is + coded as tight loop that can be better optimized by compilers and + for small array size gain from this optimization makes sequential + search the winner. Another, maybe more important, reason for this, + is that sequential array is more CPU cache friendly, whereas binary + search effectively destroys CPU caching. + + Critical here is the notion of "smallness". Reasonable value of + REISER4_SEQ_SEARCH_BREAK can be found by playing with code in + fs/reiser4/ulevel/ulevel.c:test_search(). + + Don't try to further optimize sequential search by scanning from + right to left in attempt to use more efficient loop termination + condition (comparison with 0). This doesn't work. + + */ + + while (right - left >= REISER4_SEQ_SEARCH_BREAK) { + int median; + item_header40 *medianh; + + median = (left + right) / 2; + medianh = node40_ih_at(node, median); + + assert("nikita-1084", median >= 0); + assert("nikita-1085", median < items); + switch (keycmp(key, &medianh->key)) { + case LESS_THAN: + right = median; + righth = medianh; + break; + default: + wrong_return_value("nikita-586", "keycmp"); + case GREATER_THAN: + left = median; + lefth = medianh; + break; + case EQUAL_TO: + do { + --median; + /* headers are ordered from right to left */ + ++medianh; + } while (median >= 0 && keyeq(key, &medianh->key)); + right = left = median + 1; + ih = lefth = righth = medianh - 1; + found = 1; + break; + } + } + /* sequential scan. Item headers, and, therefore, keys are stored at + the rightmost part of a node from right to left. We are trying to + access memory from left to right, and hence, scan in _descending_ + order of item numbers. + */ + if (!found) { + for (left = right, ih = righth; left >= 0; ++ih, --left) { + cmp_t comparison; + + prefetchkey(&(ih + 1)->key); + comparison = keycmp(&ih->key, key); + if (comparison == GREATER_THAN) + continue; + if (comparison == EQUAL_TO) { + found = 1; + do { + --left; + ++ih; + } while (left >= 0 && keyeq(&ih->key, key)); + ++left; + --ih; + } else { + assert("nikita-1256", comparison == LESS_THAN); + } + break; + } + if (unlikely(left < 0)) + left = 0; + } + + assert("nikita-3212", right >= left); + assert("nikita-3214", + equi(found, keyeq(&node40_ih_at(node, left)->key, key))); + + coord_set_item_pos(coord, left); + coord->unit_pos = 0; + coord->between = AT_UNIT; + + /* key < leftmost key in a mode or node is corrupted and keys + are not sorted */ + bstop = node40_ih_at(node, (unsigned)left); + order = keycmp(&bstop->key, key); + if (unlikely(order == GREATER_THAN)) { + if (unlikely(left != 0)) { + /* screw up */ + warning("nikita-587", "Key less than %i key in a node", + left); + reiser4_print_key("key", key); + reiser4_print_key("min", &bstop->key); + print_coord_content("coord", coord); + return RETERR(-EIO); + } else { + coord->between = BEFORE_UNIT; + return NS_NOT_FOUND; + } + } + /* left <= key, ok */ + iplug = item_plugin_by_disk_id(znode_get_tree(node), &bstop->plugin_id); + + if (unlikely(iplug == NULL)) { + warning("nikita-588", "Unknown plugin %i", + le16_to_cpu(get_unaligned(&bstop->plugin_id))); + reiser4_print_key("key", key); + print_coord_content("coord", coord); + return RETERR(-EIO); + } + + coord_set_iplug(coord, iplug); + + /* if exact key from item header was found by binary search, no + further checks are necessary. */ + if (found) { + assert("nikita-1259", order == EQUAL_TO); + return NS_FOUND; + } + if (iplug->b.max_key_inside != NULL) { + reiser4_key max_item_key; + + /* key > max_item_key --- outside of an item */ + if (keygt(key, iplug->b.max_key_inside(coord, &max_item_key))) { + coord->unit_pos = 0; + coord->between = AFTER_ITEM; + /* FIXME-VS: key we are looking for does not fit into + found item. Return NS_NOT_FOUND then. Without that + the following case does not work: there is extent of + file 10000, 10001. File 10000, 10002 has been just + created. When writing to position 0 in that file - + traverse_tree will stop here on twig level. When we + want it to go down to leaf level + */ + return NS_NOT_FOUND; + } + } + + if (iplug->b.lookup != NULL) { + return iplug->b.lookup(key, bias, coord); + } else { + assert("nikita-1260", order == LESS_THAN); + coord->between = AFTER_UNIT; + return (bias == FIND_EXACT) ? NS_NOT_FOUND : NS_FOUND; + } +} + +#undef NODE_ADDSTAT +#undef NODE_INCSTAT + +/* plugin->u.node.estimate + look for description of this method in plugin/node/node.h */ +size_t estimate_node40(znode * node) +{ + size_t result; + + assert("nikita-597", node != NULL); + + result = free_space_node40(node) - sizeof(item_header40); + + return (result > 0) ? result : 0; +} + +/* plugin->u.node.check + look for description of this method in plugin/node/node.h */ +int check_node40(const znode * node /* node to check */ , + __u32 flags /* check flags */ , + const char **error /* where to store error message */ ) +{ + int nr_items; + int i; + reiser4_key prev; + unsigned old_offset; + tree_level level; + coord_t coord; + int result; + + assert("nikita-580", node != NULL); + assert("nikita-581", error != NULL); + assert("nikita-2948", znode_is_loaded(node)); + + if (ZF_ISSET(node, JNODE_HEARD_BANSHEE)) + return 0; + + assert("nikita-582", zdata(node) != NULL); + + nr_items = node40_num_of_items_internal(node); + if (nr_items < 0) { + *error = "Negative number of items"; + return -1; + } + + if (flags & REISER4_NODE_DKEYS) + prev = *znode_get_ld_key((znode *) node); + else + prev = *reiser4_min_key(); + + old_offset = 0; + coord_init_zero(&coord); + coord.node = (znode *) node; + coord.unit_pos = 0; + coord.between = AT_UNIT; + level = znode_get_level(node); + for (i = 0; i < nr_items; i++) { + item_header40 *ih; + reiser4_key unit_key; + unsigned j; + + ih = node40_ih_at(node, (unsigned)i); + coord_set_item_pos(&coord, i); + if ((ih40_get_offset(ih) >= + znode_size(node) - nr_items * sizeof(item_header40)) || + (ih40_get_offset(ih) < sizeof(node40_header))) { + *error = "Offset is out of bounds"; + return -1; + } + if (ih40_get_offset(ih) <= old_offset) { + *error = "Offsets are in wrong order"; + return -1; + } + if ((i == 0) && (ih40_get_offset(ih) != sizeof(node40_header))) { + *error = "Wrong offset of first item"; + return -1; + } + old_offset = ih40_get_offset(ih); + + if (keygt(&prev, &ih->key)) { + *error = "Keys are in wrong order"; + return -1; + } + if (!keyeq(&ih->key, unit_key_by_coord(&coord, &unit_key))) { + *error = "Wrong key of first unit"; + return -1; + } + prev = ih->key; + for (j = 0; j < coord_num_units(&coord); ++j) { + coord.unit_pos = j; + unit_key_by_coord(&coord, &unit_key); + if (keygt(&prev, &unit_key)) { + *error = "Unit keys are in wrong order"; + return -1; + } + prev = unit_key; + } + coord.unit_pos = 0; + if (level != TWIG_LEVEL && item_is_extent(&coord)) { + *error = "extent on the wrong level"; + return -1; + } + if (level == LEAF_LEVEL && item_is_internal(&coord)) { + *error = "internal item on the wrong level"; + return -1; + } + if (level != LEAF_LEVEL && + !item_is_internal(&coord) && !item_is_extent(&coord)) { + *error = "wrong item on the internal level"; + return -1; + } + if (level > TWIG_LEVEL && !item_is_internal(&coord)) { + *error = "non-internal item on the internal level"; + return -1; + } +#if REISER4_DEBUG + if (item_plugin_by_coord(&coord)->b.check + && item_plugin_by_coord(&coord)->b.check(&coord, error)) + return -1; +#endif + if (i) { + coord_t prev_coord; + /* two neighboring items can not be mergeable */ + coord_dup(&prev_coord, &coord); + coord_prev_item(&prev_coord); + if (are_items_mergeable(&prev_coord, &coord)) { + *error = "mergeable items in one node"; + return -1; + } + + } + } + + if ((flags & REISER4_NODE_DKEYS) && !node_is_empty(node)) { + coord_t coord; + item_plugin *iplug; + + coord_init_last_unit(&coord, node); + iplug = item_plugin_by_coord(&coord); + if ((item_is_extent(&coord) || item_is_tail(&coord)) && + iplug->s.file.append_key != NULL) { + reiser4_key mkey; + + iplug->s.file.append_key(&coord, &mkey); + set_key_offset(&mkey, get_key_offset(&mkey) - 1); + read_lock_dk(current_tree); + result = keygt(&mkey, znode_get_rd_key((znode *) node)); + read_unlock_dk(current_tree); + if (result) { + *error = "key of rightmost item is too large"; + return -1; + } + } + } + if (flags & REISER4_NODE_DKEYS) { + read_lock_tree(current_tree); + read_lock_dk(current_tree); + + flags |= REISER4_NODE_TREE_STABLE; + + if (keygt(&prev, znode_get_rd_key((znode *) node))) { + if (flags & REISER4_NODE_TREE_STABLE) { + *error = "Last key is greater than rdkey"; + read_unlock_dk(current_tree); + read_unlock_tree(current_tree); + return -1; + } + } + if (keygt + (znode_get_ld_key((znode *) node), + znode_get_rd_key((znode *) node))) { + *error = "ldkey is greater than rdkey"; + read_unlock_dk(current_tree); + read_unlock_tree(current_tree); + return -1; + } + if (ZF_ISSET(node, JNODE_LEFT_CONNECTED) && + (node->left != NULL) && + !ZF_ISSET(node->left, JNODE_HEARD_BANSHEE) && + ergo(flags & REISER4_NODE_TREE_STABLE, + !keyeq(znode_get_rd_key(node->left), + znode_get_ld_key((znode *) node))) + && ergo(!(flags & REISER4_NODE_TREE_STABLE), + keygt(znode_get_rd_key(node->left), + znode_get_ld_key((znode *) node)))) { + *error = "left rdkey or ldkey is wrong"; + read_unlock_dk(current_tree); + read_unlock_tree(current_tree); + return -1; + } + if (ZF_ISSET(node, JNODE_RIGHT_CONNECTED) && + (node->right != NULL) && + !ZF_ISSET(node->right, JNODE_HEARD_BANSHEE) && + ergo(flags & REISER4_NODE_TREE_STABLE, + !keyeq(znode_get_rd_key((znode *) node), + znode_get_ld_key(node->right))) + && ergo(!(flags & REISER4_NODE_TREE_STABLE), + keygt(znode_get_rd_key((znode *) node), + znode_get_ld_key(node->right)))) { + *error = "rdkey or right ldkey is wrong"; + read_unlock_dk(current_tree); + read_unlock_tree(current_tree); + return -1; + } + + read_unlock_dk(current_tree); + read_unlock_tree(current_tree); + } + + return 0; +} + +int parse_node40_common(znode *node, const __u32 magic) +{ + node40_header *header; + int result; + d8 level; + + header = node40_node_header((znode *) node); + result = -EIO; + level = nh40_get_level(header); + if (unlikely(((__u8) znode_get_level(node)) != level)) + warning("nikita-494", "Wrong level found in node: %i != %i", + znode_get_level(node), level); + else if (unlikely(nh40_get_magic(header) != magic)) + warning("nikita-495", + "Wrong magic in tree node: want %x, got %x", + magic, nh40_get_magic(header)); + else { + node->nr_items = node40_num_of_items_internal(node); + result = 0; + } + return RETERR(result); +} + +/* + * plugin->u.node.parse + * look for description of this method in plugin/node/node.h + */ +int parse_node40(znode *node /* node to parse */) +{ + return parse_node40_common(node, REISER4_NODE40_MAGIC); +} + +/* + * common part of ->init_node() for all nodes, + * which contain node40_header at the beginning + */ +int init_node40_common(znode *node, node_plugin *nplug, + size_t node_header_size, const __u32 magic) +{ + node40_header *header40; + + assert("nikita-570", node != NULL); + assert("nikita-572", zdata(node) != NULL); + + header40 = node40_node_header(node); + memset(header40, 0, sizeof(node40_header)); + + nh40_set_free_space(header40, znode_size(node) - node_header_size); + nh40_set_free_space_start(header40, node_header_size); + /* + * sane hypothesis: 0 in CPU format is 0 in disk format + */ + save_plugin_id(node_plugin_to_plugin(nplug), + &header40->common_header.plugin_id); + nh40_set_level(header40, znode_get_level(node)); + nh40_set_magic(header40, magic); + nh40_set_mkfs_id(header40, reiser4_mkfs_id(reiser4_get_current_sb())); + /* + * nr_items: 0 + * flags: 0 + */ + return 0; +} + +/* + * plugin->u.node.init + * look for description of this method in plugin/node/node.h + */ +int init_node40(znode *node /* node to initialise */) +{ + return init_node40_common(node, node_plugin_by_id(NODE40_ID), + sizeof(node40_header), REISER4_NODE40_MAGIC); +} + +#ifdef GUESS_EXISTS +int guess_node40_common(const znode *node, reiser4_node_id id, + const __u32 magic) +{ + node40_header *header; + + assert("nikita-1058", node != NULL); + header = node40_node_header(node); + return (nh40_get_magic(header) == magic) && + (id == plugin_by_disk_id(znode_get_tree(node), + REISER4_NODE_PLUGIN_TYPE, + &header->common_header.plugin_id)->h.id); +} + +int guess_node40(const znode *node /* node to guess plugin of */) +{ + return guess_node40_common(node, NODE40_ID, REISER4_NODE40_MAGIC); +} +#endif + +/* plugin->u.node.chage_item_size + look for description of this method in plugin/node/node.h */ +void change_item_size_node40(coord_t * coord, int by) +{ + node40_header *nh; + item_header40 *ih; + char *item_data; + int item_length; + unsigned i; + + /* make sure that @item is coord of existing item */ + assert("vs-210", coord_is_existing_item(coord)); + + nh = node40_node_header(coord->node); + + item_data = item_by_coord_node40(coord); + item_length = length_by_coord_node40(coord); + + /* move item bodies */ + ih = node40_ih_at_coord(coord); + memmove(item_data + item_length + by, item_data + item_length, + nh40_get_free_space_start(node40_node_header(coord->node)) - + (ih40_get_offset(ih) + item_length)); + + /* update offsets of moved items */ + for (i = coord->item_pos + 1; i < nh40_get_num_items(nh); i++) { + ih = node40_ih_at(coord->node, i); + ih40_set_offset(ih, ih40_get_offset(ih) + by); + } + + /* update node header */ + nh40_set_free_space(nh, nh40_get_free_space(nh) - by); + nh40_set_free_space_start(nh, nh40_get_free_space_start(nh) + by); +} + +static int should_notify_parent(const znode * node) +{ + /* FIXME_JMACD This looks equivalent to znode_is_root(), right? -josh */ + return !disk_addr_eq(znode_get_block(node), + &znode_get_tree(node)->root_block); +} + +/* plugin->u.node.create_item + look for description of this method in plugin/node/node.h */ +int +create_item_node40(coord_t *target, const reiser4_key *key, + reiser4_item_data *data, carry_plugin_info *info) +{ + node40_header *nh; + item_header40 *ih; + unsigned offset; + unsigned i; + + nh = node40_node_header(target->node); + + assert("vs-212", coord_is_between_items(target)); + /* node must have enough free space */ + assert("vs-254", + free_space_node40(target->node) >= + data->length + sizeof(item_header40)); + assert("vs-1410", data->length >= 0); + + if (coord_set_to_right(target)) + /* there are not items to the right of @target, so, new item + will be inserted after last one */ + coord_set_item_pos(target, nh40_get_num_items(nh)); + + if (target->item_pos < nh40_get_num_items(nh)) { + /* there are items to be moved to prepare space for new + item */ + ih = node40_ih_at_coord(target); + /* new item will start at this offset */ + offset = ih40_get_offset(ih); + + memmove(zdata(target->node) + offset + data->length, + zdata(target->node) + offset, + nh40_get_free_space_start(nh) - offset); + /* update headers of moved items */ + for (i = target->item_pos; i < nh40_get_num_items(nh); i++) { + ih = node40_ih_at(target->node, i); + ih40_set_offset(ih, ih40_get_offset(ih) + data->length); + } + + /* @ih is set to item header of the last item, move item headers */ + memmove(ih - 1, ih, + sizeof(item_header40) * (nh40_get_num_items(nh) - + target->item_pos)); + } else { + /* new item will start at this offset */ + offset = nh40_get_free_space_start(nh); + } + + /* make item header for the new item */ + ih = node40_ih_at_coord(target); + memcpy(&ih->key, key, sizeof(reiser4_key)); + ih40_set_offset(ih, offset); + save_plugin_id(item_plugin_to_plugin(data->iplug), &ih->plugin_id); + + /* update node header */ + nh40_set_free_space(nh, + nh40_get_free_space(nh) - data->length - + sizeof(item_header40)); + nh40_set_free_space_start(nh, + nh40_get_free_space_start(nh) + data->length); + node40_set_num_items(target->node, nh, nh40_get_num_items(nh) + 1); + + /* FIXME: check how does create_item work when between is set to BEFORE_UNIT */ + target->unit_pos = 0; + target->between = AT_UNIT; + coord_clear_iplug(target); + + /* initialize item */ + if (data->iplug->b.init != NULL) { + data->iplug->b.init(target, NULL, data); + } + /* copy item body */ + if (data->iplug->b.paste != NULL) { + data->iplug->b.paste(target, data, info); + } else if (data->data != NULL) { + if (data->user) { + /* AUDIT: Are we really should not check that pointer + from userspace was valid and data bytes were + available? How will we return -EFAULT of some kind + without this check? */ + assert("nikita-3038", reiser4_schedulable()); + /* copy data from user space */ + if (__copy_from_user(zdata(target->node) + offset, + (const char __user *)data->data, + (unsigned)data->length)) + return RETERR(-EFAULT); + } else + /* copy from kernel space */ + memcpy(zdata(target->node) + offset, data->data, + (unsigned)data->length); + } + + if (target->item_pos == 0) { + /* left delimiting key has to be updated */ + prepare_for_update(NULL, target->node, info); + } + + if (item_plugin_by_coord(target)->b.create_hook != NULL) { + item_plugin_by_coord(target)->b.create_hook(target, data->arg); + } + + return 0; +} + +/* plugin->u.node.update_item_key + look for description of this method in plugin/node/node.h */ +void +update_item_key_node40(coord_t * target, const reiser4_key * key, + carry_plugin_info * info) +{ + item_header40 *ih; + + ih = node40_ih_at_coord(target); + memcpy(&ih->key, key, sizeof(reiser4_key)); + + if (target->item_pos == 0) { + prepare_for_update(NULL, target->node, info); + } +} + +/* this bits encode cut mode */ +#define CMODE_TAIL 1 +#define CMODE_WHOLE 2 +#define CMODE_HEAD 4 + +struct cut40_info { + int mode; + pos_in_node_t tail_removed; /* position of item which gets tail removed */ + pos_in_node_t first_removed; /* position of first the leftmost item among items removed completely */ + pos_in_node_t removed_count; /* number of items removed completely */ + pos_in_node_t head_removed; /* position of item which gets head removed */ + + pos_in_node_t freed_space_start; + pos_in_node_t freed_space_end; + pos_in_node_t first_moved; + pos_in_node_t head_removed_location; +}; + +static void init_cinfo(struct cut40_info *cinfo) +{ + cinfo->mode = 0; + cinfo->tail_removed = MAX_POS_IN_NODE; + cinfo->first_removed = MAX_POS_IN_NODE; + cinfo->removed_count = MAX_POS_IN_NODE; + cinfo->head_removed = MAX_POS_IN_NODE; + cinfo->freed_space_start = MAX_POS_IN_NODE; + cinfo->freed_space_end = MAX_POS_IN_NODE; + cinfo->first_moved = MAX_POS_IN_NODE; + cinfo->head_removed_location = MAX_POS_IN_NODE; +} + +/* complete cut_node40/kill_node40 content by removing the gap created by */ +static void compact(znode * node, struct cut40_info *cinfo) +{ + node40_header *nh; + item_header40 *ih; + pos_in_node_t freed; + pos_in_node_t pos, nr_items; + + assert("vs-1526", (cinfo->freed_space_start != MAX_POS_IN_NODE && + cinfo->freed_space_end != MAX_POS_IN_NODE && + cinfo->first_moved != MAX_POS_IN_NODE)); + assert("vs-1523", cinfo->freed_space_end >= cinfo->freed_space_start); + + nh = node40_node_header(node); + nr_items = nh40_get_num_items(nh); + + /* remove gap made up by removal */ + memmove(zdata(node) + cinfo->freed_space_start, + zdata(node) + cinfo->freed_space_end, + nh40_get_free_space_start(nh) - cinfo->freed_space_end); + + /* update item headers of moved items - change their locations */ + pos = cinfo->first_moved; + ih = node40_ih_at(node, pos); + if (cinfo->head_removed_location != MAX_POS_IN_NODE) { + assert("vs-1580", pos == cinfo->head_removed); + ih40_set_offset(ih, cinfo->head_removed_location); + pos++; + ih--; + } + + freed = cinfo->freed_space_end - cinfo->freed_space_start; + for (; pos < nr_items; pos++, ih--) { + assert("vs-1581", ih == node40_ih_at(node, pos)); + ih40_set_offset(ih, ih40_get_offset(ih) - freed); + } + + /* free space start moved to right */ + nh40_set_free_space_start(nh, nh40_get_free_space_start(nh) - freed); + + if (cinfo->removed_count != MAX_POS_IN_NODE) { + /* number of items changed. Remove item headers of those items */ + ih = node40_ih_at(node, nr_items - 1); + memmove(ih + cinfo->removed_count, ih, + sizeof(item_header40) * (nr_items - + cinfo->removed_count - + cinfo->first_removed)); + freed += sizeof(item_header40) * cinfo->removed_count; + node40_set_num_items(node, nh, nr_items - cinfo->removed_count); + } + + /* total amount of free space increased */ + nh40_set_free_space(nh, nh40_get_free_space(nh) + freed); +} + +int shrink_item_node40(coord_t * coord, int delta) +{ + node40_header *nh; + item_header40 *ih; + pos_in_node_t pos; + pos_in_node_t nr_items; + char *end; + znode *node; + int off; + + assert("nikita-3487", coord != NULL); + assert("nikita-3488", delta >= 0); + + node = coord->node; + nh = node40_node_header(node); + nr_items = nh40_get_num_items(nh); + + ih = node40_ih_at_coord(coord); + assert("nikita-3489", delta <= length_by_coord_node40(coord)); + off = ih40_get_offset(ih) + length_by_coord_node40(coord); + end = zdata(node) + off; + + /* remove gap made up by removal */ + memmove(end - delta, end, nh40_get_free_space_start(nh) - off); + + /* update item headers of moved items - change their locations */ + pos = coord->item_pos + 1; + ih = node40_ih_at(node, pos); + for (; pos < nr_items; pos++, ih--) { + assert("nikita-3490", ih == node40_ih_at(node, pos)); + ih40_set_offset(ih, ih40_get_offset(ih) - delta); + } + + /* free space start moved to left */ + nh40_set_free_space_start(nh, nh40_get_free_space_start(nh) - delta); + /* total amount of free space increased */ + nh40_set_free_space(nh, nh40_get_free_space(nh) + delta); + /* + * This method does _not_ changes number of items. Hence, it cannot + * make node empty. Also it doesn't remove items at all, which means + * that no keys have to be updated either. + */ + return 0; +} + +/* + * Evaluate cut mode, if key range has been specified. + * + * This is for the case when units are not minimal objects + * addressed by keys. + * + * This doesn't work when range contains objects with + * non-unique keys (e.g. directory items). + */ +static int parse_cut_by_key_range(struct cut40_info *cinfo, + const struct cut_kill_params *params) +{ + reiser4_key min_from_key, max_to_key; + const reiser4_key *from_key = params->from_key; + const reiser4_key *to_key = params->to_key; + /* + * calculate minimal key stored in first item + * of items to be cut (params->from) + */ + item_key_by_coord(params->from, &min_from_key); + /* + * calculate maximal key stored in last item + * of items to be cut (params->to) + */ + max_item_key_by_coord(params->to, &max_to_key); + + if (params->from->item_pos == params->to->item_pos) { + if (keylt(&min_from_key, from_key) + && keylt(to_key, &max_to_key)) + return 1; + + if (keygt(from_key, &min_from_key)) { + /* tail of item is to be cut cut */ + cinfo->tail_removed = params->from->item_pos; + cinfo->mode |= CMODE_TAIL; + } else if (keylt(to_key, &max_to_key)) { + /* head of item is to be cut */ + cinfo->head_removed = params->from->item_pos; + cinfo->mode |= CMODE_HEAD; + } else { + /* item is removed completely */ + cinfo->first_removed = params->from->item_pos; + cinfo->removed_count = 1; + cinfo->mode |= CMODE_WHOLE; + } + } else { + cinfo->first_removed = params->from->item_pos + 1; + cinfo->removed_count = + params->to->item_pos - params->from->item_pos - 1; + + if (keygt(from_key, &min_from_key)) { + /* first item is not cut completely */ + cinfo->tail_removed = params->from->item_pos; + cinfo->mode |= CMODE_TAIL; + } else { + cinfo->first_removed--; + cinfo->removed_count++; + } + if (keylt(to_key, &max_to_key)) { + /* last item is not cut completely */ + cinfo->head_removed = params->to->item_pos; + cinfo->mode |= CMODE_HEAD; + } else { + cinfo->removed_count++; + } + if (cinfo->removed_count) + cinfo->mode |= CMODE_WHOLE; + } + return 0; +} + +/* + * Evaluate cut mode, if the key range hasn't been specified. + * In this case the range can include objects with non-unique + * keys (e.g. directory entries). + * + * This doesn't work when units are not the minimal objects + * addressed by keys (e.g. bytes in file's body stored in + * unformatted nodes). + */ +static int parse_cut_by_coord_range(struct cut40_info *cinfo, + const struct cut_kill_params *params) +{ + coord_t *from = params->from; + coord_t *to = params->to; + + if (from->item_pos == to->item_pos) { + /* + * cut is performed on only one item + */ + if (from->unit_pos > 0 && + to->unit_pos < coord_last_unit_pos(to)) + /* + * cut from the middle of item + */ + return 1; + if (from->unit_pos > 0) { + /* + * tail of item is to be cut + */ + cinfo->tail_removed = params->from->item_pos; + cinfo->mode |= CMODE_TAIL; + } else if (to->unit_pos < coord_last_unit_pos(to)) { + /* + * head of item is to be cut + */ + cinfo->head_removed = params->from->item_pos; + cinfo->mode |= CMODE_HEAD; + } else { + /* + * item is removed completely + */ + assert("edward-1631", + from->unit_pos == 0 && + to->unit_pos == coord_last_unit_pos(to)); + + cinfo->first_removed = params->from->item_pos; + cinfo->removed_count = 1; + cinfo->mode |= CMODE_WHOLE; + } + } else { + cinfo->first_removed = from->item_pos + 1; + cinfo->removed_count = + to->item_pos - from->item_pos - 1; + + if (from->unit_pos > 0) { + /* + * first item is not cut completely + */ + cinfo->tail_removed = from->item_pos; + cinfo->mode |= CMODE_TAIL; + } else { + cinfo->first_removed--; + cinfo->removed_count++; + } + if (to->unit_pos < coord_last_unit_pos(to)) { + /* + * last item is not cut completely + */ + cinfo->head_removed = to->item_pos; + cinfo->mode |= CMODE_HEAD; + } else { + cinfo->removed_count++; + } + if (cinfo->removed_count) + cinfo->mode |= CMODE_WHOLE; + } + return 0; +} + +/* + * this is used by cut_node40 and kill_node40. It analyses input parameters + * and calculates cut mode. There are 2 types of cut. First is when a unit is + * removed from the middle of an item. In this case this function returns 1. + * All the rest fits into second case: 0 or 1 of items getting tail cut, 0 or + * more items removed completely and 0 or 1 item getting head cut. Function + * returns 0 in this case + */ +static int parse_cut(struct cut40_info *cinfo, + const struct cut_kill_params *params) +{ + init_cinfo(cinfo); + if (params->from_key == NULL) { + /* + * cut key range is not defined in input parameters + */ + assert("vs-1513", params->to_key == NULL); + return parse_cut_by_coord_range(cinfo, params); + } else + return parse_cut_by_key_range(cinfo, params); +} + +static void +call_kill_hooks(znode * node, pos_in_node_t from, pos_in_node_t count, + carry_kill_data * kdata) +{ + coord_t coord; + item_plugin *iplug; + pos_in_node_t pos; + + coord.node = node; + coord.unit_pos = 0; + coord.between = AT_UNIT; + for (pos = 0; pos < count; pos++) { + coord_set_item_pos(&coord, from + pos); + coord.unit_pos = 0; + coord.between = AT_UNIT; + iplug = item_plugin_by_coord(&coord); + if (iplug->b.kill_hook) { + iplug->b.kill_hook(&coord, 0, coord_num_units(&coord), + kdata); + } + } +} + +/* this is used to kill item partially */ +static pos_in_node_t +kill_units(coord_t * coord, pos_in_node_t from, pos_in_node_t to, void *data, + reiser4_key * smallest_removed, reiser4_key * new_first_key) +{ + struct carry_kill_data *kdata; + item_plugin *iplug; + + kdata = data; + iplug = item_plugin_by_coord(coord); + + assert("vs-1524", iplug->b.kill_units); + return iplug->b.kill_units(coord, from, to, kdata, smallest_removed, + new_first_key); +} + +/* call item plugin to cut tail of file */ +static pos_in_node_t +kill_tail(coord_t * coord, void *data, reiser4_key * smallest_removed) +{ + struct carry_kill_data *kdata; + pos_in_node_t to; + + kdata = data; + to = coord_last_unit_pos(coord); + return kill_units(coord, coord->unit_pos, to, kdata, smallest_removed, + NULL); +} + +/* call item plugin to cut head of item */ +static pos_in_node_t +kill_head(coord_t * coord, void *data, reiser4_key * smallest_removed, + reiser4_key * new_first_key) +{ + return kill_units(coord, 0, coord->unit_pos, data, smallest_removed, + new_first_key); +} + +/* this is used to cut item partially */ +static pos_in_node_t +cut_units(coord_t * coord, pos_in_node_t from, pos_in_node_t to, void *data, + reiser4_key * smallest_removed, reiser4_key * new_first_key) +{ + carry_cut_data *cdata; + item_plugin *iplug; + + cdata = data; + iplug = item_plugin_by_coord(coord); + assert("vs-302", iplug->b.cut_units); + return iplug->b.cut_units(coord, from, to, cdata, smallest_removed, + new_first_key); +} + +/* call item plugin to cut tail of file */ +static pos_in_node_t +cut_tail(coord_t * coord, void *data, reiser4_key * smallest_removed) +{ + carry_cut_data *cdata; + pos_in_node_t to; + + cdata = data; + to = coord_last_unit_pos(cdata->params.from); + return cut_units(coord, coord->unit_pos, to, data, smallest_removed, NULL); +} + +/* call item plugin to cut head of item */ +static pos_in_node_t +cut_head(coord_t * coord, void *data, reiser4_key * smallest_removed, + reiser4_key * new_first_key) +{ + return cut_units(coord, 0, coord->unit_pos, data, smallest_removed, + new_first_key); +} + +/* this returns 1 of key of first item changed, 0 - if it did not */ +static int +prepare_for_compact(struct cut40_info *cinfo, + const struct cut_kill_params *params, int is_cut, + void *data, carry_plugin_info * info) +{ + znode *node; + item_header40 *ih; + pos_in_node_t freed; + pos_in_node_t item_pos; + coord_t coord; + reiser4_key new_first_key; + pos_in_node_t(*kill_units_f) (coord_t *, pos_in_node_t, pos_in_node_t, + void *, reiser4_key *, reiser4_key *); + pos_in_node_t(*kill_tail_f) (coord_t *, void *, reiser4_key *); + pos_in_node_t(*kill_head_f) (coord_t *, void *, reiser4_key *, + reiser4_key *); + int retval; + + retval = 0; + + node = params->from->node; + + assert("vs-184", node == params->to->node); + assert("vs-312", !node_is_empty(node)); + assert("vs-297", + coord_compare(params->from, params->to) != COORD_CMP_ON_RIGHT); + + if (is_cut) { + kill_units_f = cut_units; + kill_tail_f = cut_tail; + kill_head_f = cut_head; + } else { + kill_units_f = kill_units; + kill_tail_f = kill_tail; + kill_head_f = kill_head; + } + + if (parse_cut(cinfo, params) == 1) { + /* cut from the middle of item */ + freed = + kill_units_f(params->from, params->from->unit_pos, + params->to->unit_pos, data, + params->smallest_removed, NULL); + + item_pos = params->from->item_pos; + ih = node40_ih_at(node, item_pos); + cinfo->freed_space_start = + ih40_get_offset(ih) + node40_item_length(node, + item_pos) - freed; + cinfo->freed_space_end = cinfo->freed_space_start + freed; + cinfo->first_moved = item_pos + 1; + } else { + assert("vs-1521", (cinfo->tail_removed != MAX_POS_IN_NODE || + cinfo->first_removed != MAX_POS_IN_NODE || + cinfo->head_removed != MAX_POS_IN_NODE)); + + switch (cinfo->mode) { + case CMODE_TAIL: + /* one item gets cut partially from its end */ + assert("vs-1562", + cinfo->tail_removed == params->from->item_pos); + + freed = + kill_tail_f(params->from, data, + params->smallest_removed); + + item_pos = cinfo->tail_removed; + ih = node40_ih_at(node, item_pos); + cinfo->freed_space_start = + ih40_get_offset(ih) + node40_item_length(node, + item_pos) - + freed; + cinfo->freed_space_end = + cinfo->freed_space_start + freed; + cinfo->first_moved = cinfo->tail_removed + 1; + break; + + case CMODE_WHOLE: + /* one or more items get removed completely */ + assert("vs-1563", + cinfo->first_removed == params->from->item_pos); + assert("vs-1564", cinfo->removed_count > 0 + && cinfo->removed_count != MAX_POS_IN_NODE); + + /* call kill hook for all items removed completely */ + if (is_cut == 0) + call_kill_hooks(node, cinfo->first_removed, + cinfo->removed_count, data); + + item_pos = cinfo->first_removed; + ih = node40_ih_at(node, item_pos); + + if (params->smallest_removed) + memcpy(params->smallest_removed, &ih->key, + sizeof(reiser4_key)); + + cinfo->freed_space_start = ih40_get_offset(ih); + + item_pos += (cinfo->removed_count - 1); + ih -= (cinfo->removed_count - 1); + cinfo->freed_space_end = + ih40_get_offset(ih) + node40_item_length(node, + item_pos); + cinfo->first_moved = item_pos + 1; + if (cinfo->first_removed == 0) + /* key of first item of the node changes */ + retval = 1; + break; + + case CMODE_HEAD: + /* one item gets cut partially from its head */ + assert("vs-1565", + cinfo->head_removed == params->from->item_pos); + + freed = + kill_head_f(params->to, data, + params->smallest_removed, + &new_first_key); + + item_pos = cinfo->head_removed; + ih = node40_ih_at(node, item_pos); + cinfo->freed_space_start = ih40_get_offset(ih); + cinfo->freed_space_end = ih40_get_offset(ih) + freed; + cinfo->first_moved = cinfo->head_removed + 1; + + /* item head is removed, therefore, item key changed */ + coord.node = node; + coord_set_item_pos(&coord, item_pos); + coord.unit_pos = 0; + coord.between = AT_UNIT; + update_item_key_node40(&coord, &new_first_key, NULL); + if (item_pos == 0) + /* key of first item of the node changes */ + retval = 1; + break; + + case CMODE_TAIL | CMODE_WHOLE: + /* one item gets cut from its end and one or more items get removed completely */ + assert("vs-1566", + cinfo->tail_removed == params->from->item_pos); + assert("vs-1567", + cinfo->first_removed == cinfo->tail_removed + 1); + assert("vs-1564", cinfo->removed_count > 0 + && cinfo->removed_count != MAX_POS_IN_NODE); + + freed = + kill_tail_f(params->from, data, + params->smallest_removed); + + item_pos = cinfo->tail_removed; + ih = node40_ih_at(node, item_pos); + cinfo->freed_space_start = + ih40_get_offset(ih) + node40_item_length(node, + item_pos) - + freed; + + /* call kill hook for all items removed completely */ + if (is_cut == 0) + call_kill_hooks(node, cinfo->first_removed, + cinfo->removed_count, data); + + item_pos += cinfo->removed_count; + ih -= cinfo->removed_count; + cinfo->freed_space_end = + ih40_get_offset(ih) + node40_item_length(node, + item_pos); + cinfo->first_moved = item_pos + 1; + break; + + case CMODE_WHOLE | CMODE_HEAD: + /* one or more items get removed completely and one item gets cut partially from its head */ + assert("vs-1568", + cinfo->first_removed == params->from->item_pos); + assert("vs-1564", cinfo->removed_count > 0 + && cinfo->removed_count != MAX_POS_IN_NODE); + assert("vs-1569", + cinfo->head_removed == + cinfo->first_removed + cinfo->removed_count); + + /* call kill hook for all items removed completely */ + if (is_cut == 0) + call_kill_hooks(node, cinfo->first_removed, + cinfo->removed_count, data); + + item_pos = cinfo->first_removed; + ih = node40_ih_at(node, item_pos); + + if (params->smallest_removed) + memcpy(params->smallest_removed, &ih->key, + sizeof(reiser4_key)); + + freed = + kill_head_f(params->to, data, NULL, &new_first_key); + + cinfo->freed_space_start = ih40_get_offset(ih); + + ih = node40_ih_at(node, cinfo->head_removed); + /* this is the most complex case. Item which got head removed and items which are to be moved + intact change their location differently. */ + cinfo->freed_space_end = ih40_get_offset(ih) + freed; + cinfo->first_moved = cinfo->head_removed; + cinfo->head_removed_location = cinfo->freed_space_start; + + /* item head is removed, therefore, item key changed */ + coord.node = node; + coord_set_item_pos(&coord, cinfo->head_removed); + coord.unit_pos = 0; + coord.between = AT_UNIT; + update_item_key_node40(&coord, &new_first_key, NULL); + + assert("vs-1579", cinfo->first_removed == 0); + /* key of first item of the node changes */ + retval = 1; + break; + + case CMODE_TAIL | CMODE_HEAD: + /* one item get cut from its end and its neighbor gets cut from its tail */ + impossible("vs-1576", "this can not happen currently"); + break; + + case CMODE_TAIL | CMODE_WHOLE | CMODE_HEAD: + impossible("vs-1577", "this can not happen currently"); + break; + default: + impossible("vs-1578", "unexpected cut mode"); + break; + } + } + return retval; +} + +/* plugin->u.node.kill + return value is number of items removed completely */ +int kill_node40(struct carry_kill_data *kdata, carry_plugin_info * info) +{ + znode *node; + struct cut40_info cinfo; + int first_key_changed; + + node = kdata->params.from->node; + + first_key_changed = + prepare_for_compact(&cinfo, &kdata->params, 0 /* not cut */ , kdata, + info); + compact(node, &cinfo); + + if (info) { + /* it is not called by node40_shift, so we have to take care + of changes on upper levels */ + if (node_is_empty(node) + && !(kdata->flags & DELETE_RETAIN_EMPTY)) + /* all contents of node is deleted */ + prepare_removal_node40(node, info); + else if (first_key_changed) { + prepare_for_update(NULL, node, info); + } + } + + coord_clear_iplug(kdata->params.from); + coord_clear_iplug(kdata->params.to); + + znode_make_dirty(node); + return cinfo.removed_count == MAX_POS_IN_NODE ? 0 : cinfo.removed_count; +} + +/* plugin->u.node.cut + return value is number of items removed completely */ +int cut_node40(struct carry_cut_data *cdata, carry_plugin_info * info) +{ + znode *node; + struct cut40_info cinfo; + int first_key_changed; + + node = cdata->params.from->node; + + first_key_changed = + prepare_for_compact(&cinfo, &cdata->params, 1 /* not cut */ , cdata, + info); + compact(node, &cinfo); + + if (info) { + /* it is not called by node40_shift, so we have to take care + of changes on upper levels */ + if (node_is_empty(node)) + /* all contents of node is deleted */ + prepare_removal_node40(node, info); + else if (first_key_changed) { + prepare_for_update(NULL, node, info); + } + } + + coord_clear_iplug(cdata->params.from); + coord_clear_iplug(cdata->params.to); + + znode_make_dirty(node); + return cinfo.removed_count == MAX_POS_IN_NODE ? 0 : cinfo.removed_count; +} + +/* this structure is used by shift method of node40 plugin */ +struct shift_params { + shift_direction pend; /* when @pend == append - we are shifting to + left, when @pend == prepend - to right */ + coord_t wish_stop; /* when shifting to left this is last unit we + want shifted, when shifting to right - this + is set to unit we want to start shifting + from */ + znode *target; + int everything; /* it is set to 1 if everything we have to shift is + shifted, 0 - otherwise */ + + /* FIXME-VS: get rid of read_stop */ + + /* these are set by estimate_shift */ + coord_t real_stop; /* this will be set to last unit which will be + really shifted */ + + /* coordinate in source node before operation of unit which becomes + first after shift to left of last after shift to right */ + union { + coord_t future_first; + coord_t future_last; + } u; + + unsigned merging_units; /* number of units of first item which have to + be merged with last item of target node */ + unsigned merging_bytes; /* number of bytes in those units */ + + unsigned entire; /* items shifted in their entirety */ + unsigned entire_bytes; /* number of bytes in those items */ + + unsigned part_units; /* number of units of partially copied item */ + unsigned part_bytes; /* number of bytes in those units */ + + unsigned shift_bytes; /* total number of bytes in items shifted (item + headers not included) */ + +}; + +static int item_creation_overhead(coord_t *item) +{ + return node_plugin_by_coord(item)->item_overhead(item->node, NULL); +} + +/* how many units are there in @source starting from source->unit_pos + but not further than @stop_coord */ +static int +wanted_units(coord_t *source, coord_t *stop_coord, shift_direction pend) +{ + if (pend == SHIFT_LEFT) { + assert("vs-181", source->unit_pos == 0); + } else { + assert("vs-182", + source->unit_pos == coord_last_unit_pos(source)); + } + + if (source->item_pos != stop_coord->item_pos) { + /* @source and @stop_coord are different items */ + return coord_last_unit_pos(source) + 1; + } + + if (pend == SHIFT_LEFT) { + return stop_coord->unit_pos + 1; + } else { + return source->unit_pos - stop_coord->unit_pos + 1; + } +} + +/* this calculates what can be copied from @shift->wish_stop.node to + @shift->target */ +static void +estimate_shift(struct shift_params *shift, const reiser4_context * ctx) +{ + unsigned target_free_space, size; + pos_in_node_t stop_item; /* item which estimating should not consider */ + unsigned want; /* number of units of item we want shifted */ + coord_t source; /* item being estimated */ + item_plugin *iplug; + + /* shifting to left/right starts from first/last units of + @shift->wish_stop.node */ + if (shift->pend == SHIFT_LEFT) { + coord_init_first_unit(&source, shift->wish_stop.node); + } else { + coord_init_last_unit(&source, shift->wish_stop.node); + } + shift->real_stop = source; + + /* free space in target node and number of items in source */ + target_free_space = znode_free_space(shift->target); + + shift->everything = 0; + if (!node_is_empty(shift->target)) { + /* target node is not empty, check for boundary items + mergeability */ + coord_t to; + + /* item we try to merge @source with */ + if (shift->pend == SHIFT_LEFT) { + coord_init_last_unit(&to, shift->target); + } else { + coord_init_first_unit(&to, shift->target); + } + + if ((shift->pend == SHIFT_LEFT) ? are_items_mergeable(&to, + &source) : + are_items_mergeable(&source, &to)) { + /* how many units of @source do we want to merge to + item @to */ + want = + wanted_units(&source, &shift->wish_stop, + shift->pend); + + /* how many units of @source we can merge to item + @to */ + iplug = item_plugin_by_coord(&source); + if (iplug->b.can_shift != NULL) + shift->merging_units = + iplug->b.can_shift(target_free_space, + &source, shift->target, + shift->pend, &size, + want); + else { + shift->merging_units = 0; + size = 0; + } + shift->merging_bytes = size; + shift->shift_bytes += size; + /* update stop coord to be set to last unit of @source + we can merge to @target */ + if (shift->merging_units) + /* at least one unit can be shifted */ + shift->real_stop.unit_pos = + (shift->merging_units - source.unit_pos - + 1) * shift->pend; + else { + /* nothing can be shifted */ + if (shift->pend == SHIFT_LEFT) + coord_init_before_first_item(&shift-> + real_stop, + source. + node); + else + coord_init_after_last_item(&shift-> + real_stop, + source.node); + } + assert("nikita-2081", shift->real_stop.unit_pos + 1); + + if (shift->merging_units != want) { + /* we could not copy as many as we want, so, + there is no reason for estimating any + longer */ + return; + } + + target_free_space -= size; + coord_add_item_pos(&source, shift->pend); + } + } + + /* number of item nothing of which we want to shift */ + stop_item = shift->wish_stop.item_pos + shift->pend; + + /* calculate how many items can be copied into given free + space as whole */ + for (; source.item_pos != stop_item; + coord_add_item_pos(&source, shift->pend)) { + if (shift->pend == SHIFT_RIGHT) + source.unit_pos = coord_last_unit_pos(&source); + + /* how many units of @source do we want to copy */ + want = wanted_units(&source, &shift->wish_stop, shift->pend); + + if (want == coord_last_unit_pos(&source) + 1) { + /* we want this item to be copied entirely */ + size = + item_length_by_coord(&source) + + item_creation_overhead(&source); + if (size <= target_free_space) { + /* item fits into target node as whole */ + target_free_space -= size; + shift->shift_bytes += + size - item_creation_overhead(&source); + shift->entire_bytes += + size - item_creation_overhead(&source); + shift->entire++; + + /* update shift->real_stop coord to be set to + last unit of @source we can merge to + @target */ + shift->real_stop = source; + if (shift->pend == SHIFT_LEFT) + shift->real_stop.unit_pos = + coord_last_unit_pos(&shift-> + real_stop); + else + shift->real_stop.unit_pos = 0; + continue; + } + } + + /* we reach here only for an item which does not fit into + target node in its entirety. This item may be either + partially shifted, or not shifted at all. We will have to + create new item in target node, so decrease amout of free + space by an item creation overhead. We can reach here also + if stop coord is in this item */ + if (target_free_space >= + (unsigned)item_creation_overhead(&source)) { + target_free_space -= item_creation_overhead(&source); + iplug = item_plugin_by_coord(&source); + if (iplug->b.can_shift) { + shift->part_units = iplug->b.can_shift(target_free_space, + &source, + NULL, /* target */ + shift->pend, + &size, + want); + } else { + target_free_space = 0; + shift->part_units = 0; + size = 0; + } + } else { + target_free_space = 0; + shift->part_units = 0; + size = 0; + } + shift->part_bytes = size; + shift->shift_bytes += size; + + /* set @shift->real_stop to last unit of @source we can merge + to @shift->target */ + if (shift->part_units) { + shift->real_stop = source; + shift->real_stop.unit_pos = + (shift->part_units - source.unit_pos - + 1) * shift->pend; + assert("nikita-2082", shift->real_stop.unit_pos + 1); + } + + if (want != shift->part_units) + /* not everything wanted were shifted */ + return; + break; + } + + shift->everything = 1; +} + +static void +copy_units(coord_t * target, coord_t * source, unsigned from, unsigned count, + shift_direction dir, unsigned free_space) +{ + item_plugin *iplug; + + assert("nikita-1463", target != NULL); + assert("nikita-1464", source != NULL); + assert("nikita-1465", from + count <= coord_num_units(source)); + + iplug = item_plugin_by_coord(source); + assert("nikita-1468", iplug == item_plugin_by_coord(target)); + iplug->b.copy_units(target, source, from, count, dir, free_space); + + if (dir == SHIFT_RIGHT) { + /* FIXME-VS: this looks not necessary. update_item_key was + called already by copy_units method */ + reiser4_key split_key; + + assert("nikita-1469", target->unit_pos == 0); + + unit_key_by_coord(target, &split_key); + node_plugin_by_coord(target)->update_item_key(target, + &split_key, NULL); + } +} + +/* copy part of @shift->real_stop.node starting either from its beginning or + from its end and ending at @shift->real_stop to either the end or the + beginning of @shift->target */ +static void copy(struct shift_params *shift, size_t node_header_size) +{ + node40_header *nh; + coord_t from; + coord_t to; + item_header40 *from_ih, *to_ih; + int free_space_start; + int new_items; + unsigned old_items; + int old_offset; + unsigned i; + + nh = node40_node_header(shift->target); + free_space_start = nh40_get_free_space_start(nh); + old_items = nh40_get_num_items(nh); + new_items = shift->entire + (shift->part_units ? 1 : 0); + assert("vs-185", + shift->shift_bytes == + shift->merging_bytes + shift->entire_bytes + shift->part_bytes); + + from = shift->wish_stop; + + coord_init_first_unit(&to, shift->target); + + /* NOTE:NIKITA->VS not sure what I am doing: shift->target is empty, + hence to.between is set to EMPTY_NODE above. Looks like we want it + to be AT_UNIT. + + Oh, wonders of ->betweeness... + + */ + to.between = AT_UNIT; + + if (shift->pend == SHIFT_LEFT) { + /* copying to left */ + + coord_set_item_pos(&from, 0); + from_ih = node40_ih_at(from.node, 0); + + coord_set_item_pos(&to, + node40_num_of_items_internal(to.node) - 1); + if (shift->merging_units) { + /* expand last item, so that plugin methods will see + correct data */ + free_space_start += shift->merging_bytes; + nh40_set_free_space_start(nh, + (unsigned)free_space_start); + nh40_set_free_space(nh, + nh40_get_free_space(nh) - + shift->merging_bytes); + + /* appending last item of @target */ + copy_units(&to, &from, 0, /* starting from 0-th unit */ + shift->merging_units, SHIFT_LEFT, + shift->merging_bytes); + coord_inc_item_pos(&from); + from_ih--; + coord_inc_item_pos(&to); + } + + to_ih = node40_ih_at(shift->target, old_items); + if (shift->entire) { + /* copy @entire items entirely */ + + /* copy item headers */ + memcpy(to_ih - shift->entire + 1, + from_ih - shift->entire + 1, + shift->entire * sizeof(item_header40)); + /* update item header offset */ + old_offset = ih40_get_offset(from_ih); + /* AUDIT: Looks like if we calculate old_offset + free_space_start here instead of just old_offset, we can perform one "add" operation less per each iteration */ + for (i = 0; i < shift->entire; i++, to_ih--, from_ih--) + ih40_set_offset(to_ih, + ih40_get_offset(from_ih) - + old_offset + free_space_start); + + /* copy item bodies */ + memcpy(zdata(shift->target) + free_space_start, zdata(from.node) + old_offset, /*ih40_get_offset (from_ih), */ + shift->entire_bytes); + + coord_add_item_pos(&from, (int)shift->entire); + coord_add_item_pos(&to, (int)shift->entire); + } + + nh40_set_free_space_start(nh, + free_space_start + + shift->shift_bytes - + shift->merging_bytes); + nh40_set_free_space(nh, + nh40_get_free_space(nh) - + (shift->shift_bytes - shift->merging_bytes + + sizeof(item_header40) * new_items)); + + /* update node header */ + node40_set_num_items(shift->target, nh, old_items + new_items); + assert("vs-170", + nh40_get_free_space(nh) < znode_size(shift->target)); + + if (shift->part_units) { + /* copy heading part (@part units) of @source item as + a new item into @target->node */ + + /* copy item header of partially copied item */ + coord_set_item_pos(&to, + node40_num_of_items_internal(to.node) + - 1); + memcpy(to_ih, from_ih, sizeof(item_header40)); + ih40_set_offset(to_ih, + nh40_get_free_space_start(nh) - + shift->part_bytes); + if (item_plugin_by_coord(&to)->b.init) + item_plugin_by_coord(&to)->b.init(&to, &from, + NULL); + copy_units(&to, &from, 0, shift->part_units, SHIFT_LEFT, + shift->part_bytes); + } + + } else { + /* copying to right */ + + coord_set_item_pos(&from, + node40_num_of_items_internal(from.node) - 1); + from_ih = node40_ih_at_coord(&from); + + coord_set_item_pos(&to, 0); + + /* prepare space for new items */ + memmove(zdata(to.node) + node_header_size + + shift->shift_bytes, + zdata(to.node) + node_header_size, + free_space_start - node_header_size); + /* update item headers of moved items */ + to_ih = node40_ih_at(to.node, 0); + /* first item gets @merging_bytes longer. free space appears + at its beginning */ + if (!node_is_empty(to.node)) + ih40_set_offset(to_ih, + ih40_get_offset(to_ih) + + shift->shift_bytes - + shift->merging_bytes); + + for (i = 1; i < old_items; i++) + ih40_set_offset(to_ih - i, + ih40_get_offset(to_ih - i) + + shift->shift_bytes); + + /* move item headers to make space for new items */ + memmove(to_ih - old_items + 1 - new_items, + to_ih - old_items + 1, + sizeof(item_header40) * old_items); + to_ih -= (new_items - 1); + + nh40_set_free_space_start(nh, + free_space_start + + shift->shift_bytes); + nh40_set_free_space(nh, + nh40_get_free_space(nh) - + (shift->shift_bytes + + sizeof(item_header40) * new_items)); + + /* update node header */ + node40_set_num_items(shift->target, nh, old_items + new_items); + assert("vs-170", + nh40_get_free_space(nh) < znode_size(shift->target)); + + if (shift->merging_units) { + coord_add_item_pos(&to, new_items); + to.unit_pos = 0; + to.between = AT_UNIT; + /* prepend first item of @to */ + copy_units(&to, &from, + coord_last_unit_pos(&from) - + shift->merging_units + 1, + shift->merging_units, SHIFT_RIGHT, + shift->merging_bytes); + coord_dec_item_pos(&from); + from_ih++; + } + + if (shift->entire) { + /* copy @entire items entirely */ + + /* copy item headers */ + memcpy(to_ih, from_ih, + shift->entire * sizeof(item_header40)); + + /* update item header offset */ + old_offset = + ih40_get_offset(from_ih + shift->entire - 1); + /* AUDIT: old_offset + sizeof (node40_header) + shift->part_bytes calculation can be taken off the loop. */ + for (i = 0; i < shift->entire; i++, to_ih++, from_ih++) + ih40_set_offset(to_ih, + ih40_get_offset(from_ih) - + old_offset + + node_header_size + + shift->part_bytes); + /* copy item bodies */ + coord_add_item_pos(&from, -(int)(shift->entire - 1)); + memcpy(zdata(to.node) + node_header_size + + shift->part_bytes, item_by_coord_node40(&from), + shift->entire_bytes); + coord_dec_item_pos(&from); + } + + if (shift->part_units) { + coord_set_item_pos(&to, 0); + to.unit_pos = 0; + to.between = AT_UNIT; + /* copy heading part (@part units) of @source item as + a new item into @target->node */ + + /* copy item header of partially copied item */ + memcpy(to_ih, from_ih, sizeof(item_header40)); + ih40_set_offset(to_ih, node_header_size); + if (item_plugin_by_coord(&to)->b.init) + item_plugin_by_coord(&to)->b.init(&to, &from, + NULL); + copy_units(&to, &from, + coord_last_unit_pos(&from) - + shift->part_units + 1, shift->part_units, + SHIFT_RIGHT, shift->part_bytes); + } + } +} + +/* remove everything either before or after @fact_stop. Number of items + removed completely is returned */ +static int delete_copied(struct shift_params *shift) +{ + coord_t from; + coord_t to; + struct carry_cut_data cdata; + + if (shift->pend == SHIFT_LEFT) { + /* we were shifting to left, remove everything from the + beginning of @shift->wish_stop->node upto + @shift->wish_stop */ + coord_init_first_unit(&from, shift->real_stop.node); + to = shift->real_stop; + + /* store old coordinate of unit which will be first after + shift to left */ + shift->u.future_first = to; + coord_next_unit(&shift->u.future_first); + } else { + /* we were shifting to right, remove everything from + @shift->stop_coord upto to end of + @shift->stop_coord->node */ + from = shift->real_stop; + coord_init_last_unit(&to, from.node); + + /* store old coordinate of unit which will be last after + shift to right */ + shift->u.future_last = from; + coord_prev_unit(&shift->u.future_last); + } + + cdata.params.from = &from; + cdata.params.to = &to; + cdata.params.from_key = NULL; + cdata.params.to_key = NULL; + cdata.params.smallest_removed = NULL; + return cut_node40(&cdata, NULL); +} + +/* something was moved between @left and @right. Add carry operation to @info + list to have carry to update delimiting key between them */ +static int +prepare_for_update(znode * left, znode * right, carry_plugin_info * info) +{ + carry_op *op; + carry_node *cn; + + if (info == NULL) + /* nowhere to send operation to. */ + return 0; + + if (!should_notify_parent(right)) + return 0; + + op = node_post_carry(info, COP_UPDATE, right, 1); + if (IS_ERR(op) || op == NULL) + return op ? PTR_ERR(op) : -EIO; + + if (left != NULL) { + carry_node *reference; + + if (info->doing) + reference = insert_carry_node(info->doing, + info->todo, left); + else + reference = op->node; + assert("nikita-2992", reference != NULL); + cn = reiser4_add_carry(info->todo, POOLO_BEFORE, reference); + if (IS_ERR(cn)) + return PTR_ERR(cn); + cn->parent = 1; + cn->node = left; + if (ZF_ISSET(left, JNODE_ORPHAN)) + cn->left_before = 1; + op->u.update.left = cn; + } else + op->u.update.left = NULL; + return 0; +} + +/* plugin->u.node.prepare_removal + to delete a pointer to @empty from the tree add corresponding carry + operation (delete) to @info list */ +int prepare_removal_node40(znode * empty, carry_plugin_info * info) +{ + carry_op *op; + reiser4_tree *tree; + + if (!should_notify_parent(empty)) + return 0; + /* already on a road to Styx */ + if (ZF_ISSET(empty, JNODE_HEARD_BANSHEE)) + return 0; + op = node_post_carry(info, COP_DELETE, empty, 1); + if (IS_ERR(op) || op == NULL) + return RETERR(op ? PTR_ERR(op) : -EIO); + + op->u.delete.child = NULL; + op->u.delete.flags = 0; + + /* fare thee well */ + tree = znode_get_tree(empty); + read_lock_tree(tree); + write_lock_dk(tree); + znode_set_ld_key(empty, znode_get_rd_key(empty)); + if (znode_is_left_connected(empty) && empty->left) + znode_set_rd_key(empty->left, znode_get_rd_key(empty)); + write_unlock_dk(tree); + read_unlock_tree(tree); + + ZF_SET(empty, JNODE_HEARD_BANSHEE); + return 0; +} + +/* something were shifted from @insert_coord->node to @shift->target, update + @insert_coord correspondingly */ +static void +adjust_coord(coord_t * insert_coord, struct shift_params *shift, int removed, + int including_insert_coord) +{ + /* item plugin was invalidated by shifting */ + coord_clear_iplug(insert_coord); + + if (node_is_empty(shift->wish_stop.node)) { + assert("vs-242", shift->everything); + if (including_insert_coord) { + if (shift->pend == SHIFT_RIGHT) { + /* set @insert_coord before first unit of + @shift->target node */ + coord_init_before_first_item(insert_coord, + shift->target); + } else { + /* set @insert_coord after last in target node */ + coord_init_after_last_item(insert_coord, + shift->target); + } + } else { + /* set @insert_coord inside of empty node. There is + only one possible coord within an empty + node. init_first_unit will set that coord */ + coord_init_first_unit(insert_coord, + shift->wish_stop.node); + } + return; + } + + if (shift->pend == SHIFT_RIGHT) { + /* there was shifting to right */ + if (shift->everything) { + /* everything wanted was shifted */ + if (including_insert_coord) { + /* @insert_coord is set before first unit of + @to node */ + coord_init_before_first_item(insert_coord, + shift->target); + insert_coord->between = BEFORE_UNIT; + } else { + /* @insert_coord is set after last unit of + @insert->node */ + coord_init_last_unit(insert_coord, + shift->wish_stop.node); + insert_coord->between = AFTER_UNIT; + } + } + return; + } + + /* there was shifting to left */ + if (shift->everything) { + /* everything wanted was shifted */ + if (including_insert_coord) { + /* @insert_coord is set after last unit in @to node */ + coord_init_after_last_item(insert_coord, shift->target); + } else { + /* @insert_coord is set before first unit in the same + node */ + coord_init_before_first_item(insert_coord, + shift->wish_stop.node); + } + return; + } + + /* FIXME-VS: the code below is complicated because with between == + AFTER_ITEM unit_pos is set to 0 */ + + if (!removed) { + /* no items were shifted entirely */ + assert("vs-195", shift->merging_units == 0 + || shift->part_units == 0); + + if (shift->real_stop.item_pos == insert_coord->item_pos) { + if (shift->merging_units) { + if (insert_coord->between == AFTER_UNIT) { + assert("nikita-1441", + insert_coord->unit_pos >= + shift->merging_units); + insert_coord->unit_pos -= + shift->merging_units; + } else if (insert_coord->between == BEFORE_UNIT) { + assert("nikita-2090", + insert_coord->unit_pos > + shift->merging_units); + insert_coord->unit_pos -= + shift->merging_units; + } + + assert("nikita-2083", + insert_coord->unit_pos + 1); + } else { + if (insert_coord->between == AFTER_UNIT) { + assert("nikita-1442", + insert_coord->unit_pos >= + shift->part_units); + insert_coord->unit_pos -= + shift->part_units; + } else if (insert_coord->between == BEFORE_UNIT) { + assert("nikita-2089", + insert_coord->unit_pos > + shift->part_units); + insert_coord->unit_pos -= + shift->part_units; + } + + assert("nikita-2084", + insert_coord->unit_pos + 1); + } + } + return; + } + + /* we shifted to left and there was no enough space for everything */ + switch (insert_coord->between) { + case AFTER_UNIT: + case BEFORE_UNIT: + if (shift->real_stop.item_pos == insert_coord->item_pos) + insert_coord->unit_pos -= shift->part_units; + case AFTER_ITEM: + coord_add_item_pos(insert_coord, -removed); + break; + default: + impossible("nikita-2087", "not ready"); + } + assert("nikita-2085", insert_coord->unit_pos + 1); +} + +static int call_shift_hooks(struct shift_params *shift) +{ + unsigned i, shifted; + coord_t coord; + item_plugin *iplug; + + assert("vs-275", !node_is_empty(shift->target)); + + /* number of items shift touches */ + shifted = + shift->entire + (shift->merging_units ? 1 : 0) + + (shift->part_units ? 1 : 0); + + if (shift->pend == SHIFT_LEFT) { + /* moved items are at the end */ + coord_init_last_unit(&coord, shift->target); + coord.unit_pos = 0; + + assert("vs-279", shift->pend == 1); + for (i = 0; i < shifted; i++) { + unsigned from, count; + + iplug = item_plugin_by_coord(&coord); + if (i == 0 && shift->part_units) { + assert("vs-277", + coord_num_units(&coord) == + shift->part_units); + count = shift->part_units; + from = 0; + } else if (i == shifted - 1 && shift->merging_units) { + count = shift->merging_units; + from = coord_num_units(&coord) - count; + } else { + count = coord_num_units(&coord); + from = 0; + } + + if (iplug->b.shift_hook) { + iplug->b.shift_hook(&coord, from, count, + shift->wish_stop.node); + } + coord_add_item_pos(&coord, -shift->pend); + } + } else { + /* moved items are at the beginning */ + coord_init_first_unit(&coord, shift->target); + + assert("vs-278", shift->pend == -1); + for (i = 0; i < shifted; i++) { + unsigned from, count; + + iplug = item_plugin_by_coord(&coord); + if (i == 0 && shift->part_units) { + assert("vs-277", + coord_num_units(&coord) == + shift->part_units); + count = coord_num_units(&coord); + from = 0; + } else if (i == shifted - 1 && shift->merging_units) { + count = shift->merging_units; + from = 0; + } else { + count = coord_num_units(&coord); + from = 0; + } + + if (iplug->b.shift_hook) { + iplug->b.shift_hook(&coord, from, count, + shift->wish_stop.node); + } + coord_add_item_pos(&coord, -shift->pend); + } + } + + return 0; +} + +/* shift to left is completed. Return 1 if unit @old was moved to left neighbor */ +static int +unit_moved_left(const struct shift_params *shift, const coord_t * old) +{ + assert("vs-944", shift->real_stop.node == old->node); + + if (shift->real_stop.item_pos < old->item_pos) + return 0; + if (shift->real_stop.item_pos == old->item_pos) { + if (shift->real_stop.unit_pos < old->unit_pos) + return 0; + } + return 1; +} + +/* shift to right is completed. Return 1 if unit @old was moved to right + neighbor */ +static int +unit_moved_right(const struct shift_params *shift, const coord_t * old) +{ + assert("vs-944", shift->real_stop.node == old->node); + + if (shift->real_stop.item_pos > old->item_pos) + return 0; + if (shift->real_stop.item_pos == old->item_pos) { + if (shift->real_stop.unit_pos > old->unit_pos) + return 0; + } + return 1; +} + +/* coord @old was set in node from which shift was performed. What was shifted + is stored in @shift. Update @old correspondingly to performed shift */ +static coord_t *adjust_coord2(const struct shift_params *shift, + const coord_t * old, coord_t * new) +{ + coord_clear_iplug(new); + new->between = old->between; + + coord_clear_iplug(new); + if (old->node == shift->target) { + if (shift->pend == SHIFT_LEFT) { + /* coord which is set inside of left neighbor does not + change during shift to left */ + coord_dup(new, old); + return new; + } + new->node = old->node; + coord_set_item_pos(new, + old->item_pos + shift->entire + + (shift->part_units ? 1 : 0)); + new->unit_pos = old->unit_pos; + if (old->item_pos == 0 && shift->merging_units) + new->unit_pos += shift->merging_units; + return new; + } + + assert("vs-977", old->node == shift->wish_stop.node); + if (shift->pend == SHIFT_LEFT) { + if (unit_moved_left(shift, old)) { + /* unit @old moved to left neighbor. Calculate its + coordinate there */ + new->node = shift->target; + coord_set_item_pos(new, + node_num_items(shift->target) - + shift->entire - + (shift->part_units ? 1 : 0) + + old->item_pos); + + new->unit_pos = old->unit_pos; + if (shift->merging_units) { + coord_dec_item_pos(new); + if (old->item_pos == 0) { + /* unit_pos only changes if item got + merged */ + new->unit_pos = + coord_num_units(new) - + (shift->merging_units - + old->unit_pos); + } + } + } else { + /* unit @old did not move to left neighbor. + + Use _nocheck, because @old is outside of its node. + */ + coord_dup_nocheck(new, old); + coord_add_item_pos(new, + -shift->u.future_first.item_pos); + if (new->item_pos == 0) + new->unit_pos -= shift->u.future_first.unit_pos; + } + } else { + if (unit_moved_right(shift, old)) { + /* unit @old moved to right neighbor */ + new->node = shift->target; + coord_set_item_pos(new, + old->item_pos - + shift->real_stop.item_pos); + if (new->item_pos == 0) { + /* unit @old might change unit pos */ + coord_set_item_pos(new, + old->unit_pos - + shift->real_stop.unit_pos); + } + } else { + /* unit @old did not move to right neighbor, therefore + it did not change */ + coord_dup(new, old); + } + } + coord_set_iplug(new, item_plugin_by_coord(new)); + return new; +} + +/* this is called when shift is completed (something of source node is copied + to target and deleted in source) to update all taps set in current + context */ +static void update_taps(const struct shift_params *shift) +{ + tap_t *tap; + coord_t new; + + for_all_taps(tap) { + /* update only taps set to nodes participating in shift */ + if (tap->coord->node == shift->wish_stop.node + || tap->coord->node == shift->target) + tap_to_coord(tap, + adjust_coord2(shift, tap->coord, &new)); + } +} + +#if REISER4_DEBUG + +struct shift_check { + reiser4_key key; + __u16 plugin_id; + union { + __u64 bytes; + __u64 entries; + void *unused; + } u; +}; + +void *shift_check_prepare(const znode * left, const znode * right) +{ + pos_in_node_t i, nr_items; + int mergeable; + struct shift_check *data; + item_header40 *ih; + + if (node_is_empty(left) || node_is_empty(right)) + mergeable = 0; + else { + coord_t l, r; + + coord_init_last_unit(&l, left); + coord_init_first_unit(&r, right); + mergeable = are_items_mergeable(&l, &r); + } + nr_items = + node40_num_of_items_internal(left) + + node40_num_of_items_internal(right) - (mergeable ? 1 : 0); + data = + kmalloc(sizeof(struct shift_check) * nr_items, + reiser4_ctx_gfp_mask_get()); + if (data != NULL) { + coord_t coord; + pos_in_node_t item_pos; + + coord_init_first_unit(&coord, left); + i = 0; + + for (item_pos = 0; + item_pos < node40_num_of_items_internal(left); + item_pos++) { + + coord_set_item_pos(&coord, item_pos); + ih = node40_ih_at_coord(&coord); + + data[i].key = ih->key; + data[i].plugin_id = le16_to_cpu(get_unaligned(&ih->plugin_id)); + switch (data[i].plugin_id) { + case CTAIL_ID: + case FORMATTING_ID: + data[i].u.bytes = coord_num_units(&coord); + break; + case EXTENT_POINTER_ID: + data[i].u.bytes = + reiser4_extent_size(&coord, + coord_num_units(&coord)); + break; + case COMPOUND_DIR_ID: + data[i].u.entries = coord_num_units(&coord); + break; + default: + data[i].u.unused = NULL; + break; + } + i++; + } + + coord_init_first_unit(&coord, right); + + if (mergeable) { + assert("vs-1609", i != 0); + + ih = node40_ih_at_coord(&coord); + + assert("vs-1589", + data[i - 1].plugin_id == + le16_to_cpu(get_unaligned(&ih->plugin_id))); + switch (data[i - 1].plugin_id) { + case CTAIL_ID: + case FORMATTING_ID: + data[i - 1].u.bytes += coord_num_units(&coord); + break; + case EXTENT_POINTER_ID: + data[i - 1].u.bytes += + reiser4_extent_size(&coord, + coord_num_units(&coord)); + break; + case COMPOUND_DIR_ID: + data[i - 1].u.entries += + coord_num_units(&coord); + break; + default: + impossible("vs-1605", "wrong mergeable item"); + break; + } + item_pos = 1; + } else + item_pos = 0; + for (; item_pos < node40_num_of_items_internal(right); + item_pos++) { + + assert("vs-1604", i < nr_items); + coord_set_item_pos(&coord, item_pos); + ih = node40_ih_at_coord(&coord); + + data[i].key = ih->key; + data[i].plugin_id = le16_to_cpu(get_unaligned(&ih->plugin_id)); + switch (data[i].plugin_id) { + case CTAIL_ID: + case FORMATTING_ID: + data[i].u.bytes = coord_num_units(&coord); + break; + case EXTENT_POINTER_ID: + data[i].u.bytes = + reiser4_extent_size(&coord, + coord_num_units(&coord)); + break; + case COMPOUND_DIR_ID: + data[i].u.entries = coord_num_units(&coord); + break; + default: + data[i].u.unused = NULL; + break; + } + i++; + } + assert("vs-1606", i == nr_items); + } + return data; +} + +void shift_check(void *vp, const znode * left, const znode * right) +{ + pos_in_node_t i, nr_items; + coord_t coord; + __u64 last_bytes; + int mergeable; + item_header40 *ih; + pos_in_node_t item_pos; + struct shift_check *data; + + data = (struct shift_check *)vp; + + if (data == NULL) + return; + + if (node_is_empty(left) || node_is_empty(right)) + mergeable = 0; + else { + coord_t l, r; + + coord_init_last_unit(&l, left); + coord_init_first_unit(&r, right); + mergeable = are_items_mergeable(&l, &r); + } + + nr_items = + node40_num_of_items_internal(left) + + node40_num_of_items_internal(right) - (mergeable ? 1 : 0); + + i = 0; + last_bytes = 0; + + coord_init_first_unit(&coord, left); + + for (item_pos = 0; item_pos < node40_num_of_items_internal(left); + item_pos++) { + + coord_set_item_pos(&coord, item_pos); + ih = node40_ih_at_coord(&coord); + + assert("vs-1611", i == item_pos); + assert("vs-1590", keyeq(&ih->key, &data[i].key)); + assert("vs-1591", + le16_to_cpu(get_unaligned(&ih->plugin_id)) == data[i].plugin_id); + if ((i < (node40_num_of_items_internal(left) - 1)) + || !mergeable) { + switch (data[i].plugin_id) { + case CTAIL_ID: + case FORMATTING_ID: + assert("vs-1592", + data[i].u.bytes == + coord_num_units(&coord)); + break; + case EXTENT_POINTER_ID: + assert("vs-1593", + data[i].u.bytes == + reiser4_extent_size(&coord, + coord_num_units + (&coord))); + break; + case COMPOUND_DIR_ID: + assert("vs-1594", + data[i].u.entries == + coord_num_units(&coord)); + break; + default: + break; + } + } + if (item_pos == (node40_num_of_items_internal(left) - 1) + && mergeable) { + switch (data[i].plugin_id) { + case CTAIL_ID: + case FORMATTING_ID: + last_bytes = coord_num_units(&coord); + break; + case EXTENT_POINTER_ID: + last_bytes = + reiser4_extent_size(&coord, + coord_num_units(&coord)); + break; + case COMPOUND_DIR_ID: + last_bytes = coord_num_units(&coord); + break; + default: + impossible("vs-1595", "wrong mergeable item"); + break; + } + } + i++; + } + + coord_init_first_unit(&coord, right); + if (mergeable) { + ih = node40_ih_at_coord(&coord); + + assert("vs-1589", + data[i - 1].plugin_id == le16_to_cpu(get_unaligned(&ih->plugin_id))); + assert("vs-1608", last_bytes != 0); + switch (data[i - 1].plugin_id) { + case CTAIL_ID: + case FORMATTING_ID: + assert("vs-1596", + data[i - 1].u.bytes == + last_bytes + coord_num_units(&coord)); + break; + + case EXTENT_POINTER_ID: + assert("vs-1597", + data[i - 1].u.bytes == + last_bytes + reiser4_extent_size(&coord, + coord_num_units + (&coord))); + break; + + case COMPOUND_DIR_ID: + assert("vs-1598", + data[i - 1].u.bytes == + last_bytes + coord_num_units(&coord)); + break; + default: + impossible("vs-1599", "wrong mergeable item"); + break; + } + item_pos = 1; + } else + item_pos = 0; + + for (; item_pos < node40_num_of_items_internal(right); item_pos++) { + + coord_set_item_pos(&coord, item_pos); + ih = node40_ih_at_coord(&coord); + + assert("vs-1612", keyeq(&ih->key, &data[i].key)); + assert("vs-1613", + le16_to_cpu(get_unaligned(&ih->plugin_id)) == data[i].plugin_id); + switch (data[i].plugin_id) { + case CTAIL_ID: + case FORMATTING_ID: + assert("vs-1600", + data[i].u.bytes == coord_num_units(&coord)); + break; + case EXTENT_POINTER_ID: + assert("vs-1601", + data[i].u.bytes == + reiser4_extent_size(&coord, + coord_num_units + (&coord))); + break; + case COMPOUND_DIR_ID: + assert("vs-1602", + data[i].u.entries == coord_num_units(&coord)); + break; + default: + break; + } + i++; + } + + assert("vs-1603", i == nr_items); + kfree(data); +} + +#endif + +/* + * common part of ->shift() for all nodes, + * which contain node40_header at the beginning and + * the table of item headers at the end + */ +int shift_node40_common(coord_t *from, znode *to, + shift_direction pend, + int delete_child, /* if @from->node becomes empty, + * it will be deleted from the + * tree if this is set to 1 */ + int including_stop_coord, + carry_plugin_info *info, + size_t node_header_size) +{ + struct shift_params shift; + int result; + znode *left, *right; + znode *source; + int target_empty; + + assert("nikita-2161", coord_check(from)); + + memset(&shift, 0, sizeof(shift)); + shift.pend = pend; + shift.wish_stop = *from; + shift.target = to; + + assert("nikita-1473", znode_is_write_locked(from->node)); + assert("nikita-1474", znode_is_write_locked(to)); + + source = from->node; + + /* set @shift.wish_stop to rightmost/leftmost unit among units we want + shifted */ + if (pend == SHIFT_LEFT) { + result = coord_set_to_left(&shift.wish_stop); + left = to; + right = from->node; + } else { + result = coord_set_to_right(&shift.wish_stop); + left = from->node; + right = to; + } + + if (result) { + /* move insertion coord even if there is nothing to move */ + if (including_stop_coord) { + /* move insertion coord (@from) */ + if (pend == SHIFT_LEFT) { + /* after last item in target node */ + coord_init_after_last_item(from, to); + } else { + /* before first item in target node */ + coord_init_before_first_item(from, to); + } + } + + if (delete_child && node_is_empty(shift.wish_stop.node)) + result = + prepare_removal_node40(shift.wish_stop.node, info); + else + result = 0; + /* there is nothing to shift */ + assert("nikita-2078", coord_check(from)); + return result; + } + + target_empty = node_is_empty(to); + + /* when first node plugin with item body compression is implemented, + this must be changed to call node specific plugin */ + + /* shift->stop_coord is updated to last unit which really will be + shifted */ + estimate_shift(&shift, get_current_context()); + if (!shift.shift_bytes) { + /* we could not shift anything */ + assert("nikita-2079", coord_check(from)); + return 0; + } + + copy(&shift, node_header_size); + + /* result value of this is important. It is used by adjust_coord below */ + result = delete_copied(&shift); + + assert("vs-1610", result >= 0); + assert("vs-1471", + ((reiser4_context *) current->journal_info)->magic == + context_magic); + + /* item which has been moved from one node to another might want to do + something on that event. This can be done by item's shift_hook + method, which will be now called for every moved items */ + call_shift_hooks(&shift); + + assert("vs-1472", + ((reiser4_context *) current->journal_info)->magic == + context_magic); + + update_taps(&shift); + + assert("vs-1473", + ((reiser4_context *) current->journal_info)->magic == + context_magic); + + /* adjust @from pointer in accordance with @including_stop_coord flag + and amount of data which was really shifted */ + adjust_coord(from, &shift, result, including_stop_coord); + + if (target_empty) + /* + * items were shifted into empty node. Update delimiting key. + */ + result = prepare_for_update(NULL, left, info); + + /* add update operation to @info, which is the list of operations to + be performed on a higher level */ + result = prepare_for_update(left, right, info); + if (!result && node_is_empty(source) && delete_child) { + /* all contents of @from->node is moved to @to and @from->node + has to be removed from the tree, so, on higher level we + will be removing the pointer to node @from->node */ + result = prepare_removal_node40(source, info); + } + assert("nikita-2080", coord_check(from)); + return result ? result : (int)shift.shift_bytes; +} + +/* + * plugin->u.node.shift + * look for description of this method in plugin/node/node.h + */ +int shift_node40(coord_t *from, znode *to, + shift_direction pend, + int delete_child, /* if @from->node becomes empty, + * it will be deleted from the + * tree if this is set to 1 */ + int including_stop_coord, + carry_plugin_info *info) +{ + return shift_node40_common(from, to, pend, delete_child, + including_stop_coord, info, + sizeof(node40_header)); +} + +/* plugin->u.node.fast_insert() + look for description of this method in plugin/node/node.h */ +int fast_insert_node40(const coord_t * coord UNUSED_ARG /* node to query */ ) +{ + return 1; +} + +/* plugin->u.node.fast_paste() + look for description of this method in plugin/node/node.h */ +int fast_paste_node40(const coord_t * coord UNUSED_ARG /* node to query */ ) +{ + return 1; +} + +/* plugin->u.node.fast_cut() + look for description of this method in plugin/node/node.h */ +int fast_cut_node40(const coord_t * coord UNUSED_ARG /* node to query */ ) +{ + return 1; +} + +/* plugin->u.node.modify - not defined */ + +/* plugin->u.node.max_item_size */ +int max_item_size_node40(void) +{ + return reiser4_get_current_sb()->s_blocksize - sizeof(node40_header) - + sizeof(item_header40); +} + +/* plugin->u.node.set_item_plugin */ +int set_item_plugin_node40(coord_t *coord, item_id id) +{ + item_header40 *ih; + + ih = node40_ih_at_coord(coord); + put_unaligned(cpu_to_le16(id), &ih->plugin_id); + coord->iplugid = id; + return 0; +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/node/node40.h b/fs/reiser4/plugin/node/node40.h new file mode 100644 index 000000000000..5a0864d9ad46 --- /dev/null +++ b/fs/reiser4/plugin/node/node40.h @@ -0,0 +1,130 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#if !defined( __REISER4_NODE40_H__ ) +#define __REISER4_NODE40_H__ + +#include "../../forward.h" +#include "../../dformat.h" +#include "node.h" + +#include <linux/types.h> + +/* format of node header for 40 node layouts. Keep bloat out of this struct. */ +typedef struct node40_header { + /* identifier of node plugin. Must be located at the very beginning + of a node. */ + common_node_header common_header; /* this is 16 bits */ + /* number of items. Should be first element in the node header, + because we haven't yet finally decided whether it shouldn't go into + common_header. + */ +/* NIKITA-FIXME-HANS: Create a macro such that if there is only one + * node format at compile time, and it is this one, accesses do not function dereference when + * accessing these fields (and otherwise they do). Probably 80% of users will only have one node format at a time throughout the life of reiser4. */ + d16 nr_items; + /* free space in node measured in bytes */ + d16 free_space; + /* offset to start of free space in node */ + d16 free_space_start; + /* for reiser4_fsck. When information about what is a free + block is corrupted, and we try to recover everything even + if marked as freed, then old versions of data may + duplicate newer versions, and this field allows us to + restore the newer version. Also useful for when users + who don't have the new trashcan installed on their linux distro + delete the wrong files and send us desperate emails + offering $25 for them back. */ + + /* magic field we need to tell formatted nodes NIKITA-FIXME-HANS: improve this comment */ + d32 magic; + /* flushstamp is made of mk_id and write_counter. mk_id is an + id generated randomly at mkreiserfs time. So we can just + skip all nodes with different mk_id. write_counter is d64 + incrementing counter of writes on disk. It is used for + choosing the newest data at fsck time. NIKITA-FIXME-HANS: why was field name changed but not comment? */ + + d32 mkfs_id; + d64 flush_id; + /* node flags to be used by fsck (reiser4ck or reiser4fsck?) + and repacker NIKITA-FIXME-HANS: say more or reference elsewhere that says more */ + d16 flags; + + /* 1 is leaf level, 2 is twig level, root is the numerically + largest level */ + d8 level; + + d8 pad; +} PACKED node40_header; + +/* item headers are not standard across all node layouts, pass + pos_in_node to functions instead */ +typedef struct item_header40 { + /* key of item */ + /* 0 */ reiser4_key key; + /* offset from start of a node measured in 8-byte chunks */ + /* 24 */ d16 offset; + /* 26 */ d16 flags; + /* 28 */ d16 plugin_id; +} PACKED item_header40; + +size_t item_overhead_node40(const znode * node, flow_t * aflow); +size_t free_space_node40(znode * node); +node_search_result lookup_node40(znode * node, const reiser4_key * key, + lookup_bias bias, coord_t * coord); +int num_of_items_node40(const znode * node); +char *item_by_coord_node40(const coord_t * coord); +int length_by_coord_node40(const coord_t * coord); +item_plugin *plugin_by_coord_node40(const coord_t * coord); +reiser4_key *key_at_node40(const coord_t * coord, reiser4_key * key); +size_t estimate_node40(znode * node); +int check_node40(const znode * node, __u32 flags, const char **error); +int parse_node40_common(znode *node, const __u32 magic); +int parse_node40(znode * node); +int init_node40_common(znode *node, node_plugin *nplug, + size_t node_header_size, const __u32 magic); +int init_node40(znode *node); + +#ifdef GUESS_EXISTS +int guess_node40_common(const znode *node, reiser4_node_id id, + const __u32 magic); +int guess_node40(const znode *node); +#endif + +void change_item_size_node40(coord_t * coord, int by); +int create_item_node40(coord_t * target, const reiser4_key * key, + reiser4_item_data * data, carry_plugin_info * info); +void update_item_key_node40(coord_t * target, const reiser4_key * key, + carry_plugin_info * info); +int kill_node40(struct carry_kill_data *, carry_plugin_info *); +int cut_node40(struct carry_cut_data *, carry_plugin_info *); +int shift_node40_common(coord_t *from, znode *to, shift_direction pend, + int delete_child, int including_stop_coord, + carry_plugin_info *info, size_t nh_size); +int shift_node40(coord_t *from, znode *to, shift_direction pend, + int delete_child, int including_stop_coord, + carry_plugin_info *info); +int fast_insert_node40(const coord_t * coord); +int fast_paste_node40(const coord_t * coord); +int fast_cut_node40(const coord_t * coord); +int max_item_size_node40(void); +int prepare_removal_node40(znode * empty, carry_plugin_info * info); +int set_item_plugin_node40(coord_t * coord, item_id id); +int shrink_item_node40(coord_t * coord, int delta); + +#if REISER4_DEBUG +void *shift_check_prepare(const znode *left, const znode *right); +void shift_check(void *vp, const znode *left, const znode *right); +#endif + +/* __REISER4_NODE40_H__ */ +#endif +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/node/node41.c b/fs/reiser4/plugin/node/node41.c new file mode 100644 index 000000000000..b5c2cb537dc9 --- /dev/null +++ b/fs/reiser4/plugin/node/node41.c @@ -0,0 +1,137 @@ +/* + * Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README + */ + +#include "../../debug.h" +#include "../../key.h" +#include "../../coord.h" +#include "../plugin_header.h" +#include "../item/item.h" +#include "node.h" +#include "node41.h" +#include "../plugin.h" +#include "../../jnode.h" +#include "../../znode.h" +#include "../../pool.h" +#include "../../carry.h" +#include "../../tap.h" +#include "../../tree.h" +#include "../../super.h" +#include "../../checksum.h" +#include "../../reiser4.h" + +#include <asm/uaccess.h> +#include <linux/types.h> +#include <linux/prefetch.h> + +/* + * node41 layout it almost the same as node40: + * node41_header is at the beginning and a table of item headers + * is at the end. Ther difference is that node41_header contains + * a 32-bit checksum (see node41.h) + */ + +static const __u32 REISER4_NODE41_MAGIC = 0x19051966; + +static inline node41_header *node41_node_header(const znode *node) +{ + assert("edward-1634", node != NULL); + assert("edward-1635", znode_page(node) != NULL); + assert("edward-1636", zdata(node) != NULL); + + return (node41_header *)zdata(node); +} + +int csum_node41(znode *node, int check) +{ + __u32 cpu_csum; + + cpu_csum = reiser4_crc32c(get_current_super_private()->csum_tfm, + ~0, + zdata(node), + sizeof(struct node40_header)); + cpu_csum = reiser4_crc32c(get_current_super_private()->csum_tfm, + cpu_csum, + zdata(node) + sizeof(struct node41_header), + reiser4_get_current_sb()->s_blocksize - + sizeof(node41_header)); + if (check) + return cpu_csum == nh41_get_csum(node41_node_header(node)); + else { + nh41_set_csum(node41_node_header(node), cpu_csum); + return 1; + } +} + +/* + * plugin->u.node.parse + * look for description of this method in plugin/node/node.h + */ +int parse_node41(znode *node /* node to parse */) +{ + int ret; + + ret = csum_node41(node, 1/* check */); + if (!ret) { + warning("edward-1645", + "block %llu: bad checksum. FSCK?", + *jnode_get_block(ZJNODE(node))); + reiser4_handle_error(); + return RETERR(-EIO); + } + return parse_node40_common(node, REISER4_NODE41_MAGIC); +} + +/* + * plugin->u.node.init + * look for description of this method in plugin/node/node.h + */ +int init_node41(znode *node /* node to initialise */) +{ + return init_node40_common(node, node_plugin_by_id(NODE41_ID), + sizeof(node41_header), REISER4_NODE41_MAGIC); +} + +/* + * plugin->u.node.shift + * look for description of this method in plugin/node/node.h + */ +int shift_node41(coord_t *from, znode *to, + shift_direction pend, + int delete_child, /* if @from->node becomes empty, + * it will be deleted from the + * tree if this is set to 1 */ + int including_stop_coord, + carry_plugin_info *info) +{ + return shift_node40_common(from, to, pend, delete_child, + including_stop_coord, info, + sizeof(node41_header)); +} + +#ifdef GUESS_EXISTS +int guess_node41(const znode *node /* node to guess plugin of */) +{ + return guess_node40_common(node, NODE41_ID, REISER4_NODE41_MAGIC); +} +#endif + +/* + * plugin->u.node.max_item_size + */ +int max_item_size_node41(void) +{ + return reiser4_get_current_sb()->s_blocksize - sizeof(node41_header) - + sizeof(item_header40); +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/node/node41.h b/fs/reiser4/plugin/node/node41.h new file mode 100644 index 000000000000..dfe9a97485fb --- /dev/null +++ b/fs/reiser4/plugin/node/node41.h @@ -0,0 +1,50 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#if !defined( __REISER4_NODE41_H__ ) +#define __REISER4_NODE41_H__ + +#include "../../forward.h" +#include "../../dformat.h" +#include "node40.h" +#include <linux/types.h> + +/* + * node41 layout: the same as node40, but with 32-bit checksum + */ + +typedef struct node41_header { + node40_header head; + d32 csum; +} PACKED node41_header; + +/* + * functions to get/set fields of node41_header + */ +#define nh41_get_csum(nh) le32_to_cpu(get_unaligned(&(nh)->csum)) +#define nh41_set_csum(nh, value) put_unaligned(cpu_to_le32(value), &(nh)->csum) + +int init_node41(znode * node); +int parse_node41(znode *node); +int max_item_size_node41(void); +int shift_node41(coord_t *from, znode *to, shift_direction pend, + int delete_child, int including_stop_coord, + carry_plugin_info *info); +int csum_node41(znode *node, int check); + +#ifdef GUESS_EXISTS +int guess_node41(const znode * node); +#endif +extern void reiser4_handle_error(void); + +/* __REISER4_NODE41_H__ */ +#endif +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/object.c b/fs/reiser4/plugin/object.c new file mode 100644 index 000000000000..c039455abb03 --- /dev/null +++ b/fs/reiser4/plugin/object.c @@ -0,0 +1,553 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* + * Examples of object plugins: file, directory, symlink, special file. + * + * Plugins associated with inode: + * + * Plugin of inode is plugin referenced by plugin-id field of on-disk + * stat-data. How we store this plugin in in-core inode is not + * important. Currently pointers are used, another variant is to store offsets + * and do array lookup on each access. + * + * Now, each inode has one selected plugin: object plugin that + * determines what type of file this object is: directory, regular etc. + * + * This main plugin can use other plugins that are thus subordinated to + * it. Directory instance of object plugin uses hash; regular file + * instance uses tail policy plugin. + * + * Object plugin is either taken from id in stat-data or guessed from + * i_mode bits. Once it is established we ask it to install its + * subordinate plugins, by looking again in stat-data or inheriting them + * from parent. + * + * How new inode is initialized during ->read_inode(): + * 1 read stat-data and initialize inode fields: i_size, i_mode, + * i_generation, capabilities etc. + * 2 read plugin id from stat data or try to guess plugin id + * from inode->i_mode bits if plugin id is missing. + * 3 Call ->init_inode() method of stat-data plugin to initialise inode fields. + * + * NIKITA-FIXME-HANS: can you say a little about 1 being done before 3? What + * if stat data does contain i_size, etc., due to it being an unusual plugin? + * + * 4 Call ->activate() method of object's plugin. Plugin is either read from + * from stat-data or guessed from mode bits + * 5 Call ->inherit() method of object plugin to inherit as yet un initialized + * plugins from parent. + * + * Easy induction proves that on last step all plugins of inode would be + * initialized. + * + * When creating new object: + * 1 obtain object plugin id (see next period) + * NIKITA-FIXME-HANS: period? + * 2 ->install() this plugin + * 3 ->inherit() the rest from the parent + * + * We need some examples of creating an object with default and non-default + * plugin ids. Nikita, please create them. + */ + +#include "../inode.h" + +int _bugop(void) +{ + BUG_ON(1); + return 0; +} + +#define bugop ((void *)_bugop) + +static int flow_by_inode_bugop(struct inode *inode, const char __user *buf, + int user, loff_t size, + loff_t off, rw_op op, flow_t *f) +{ + BUG_ON(1); + return 0; +} + +static int key_by_inode_bugop(struct inode *inode, loff_t off, reiser4_key *key) +{ + BUG_ON(1); + return 0; +} + +static int _dummyop(void) +{ + return 0; +} + +#define dummyop ((void *)_dummyop) + +static int change_file(struct inode *inode, + reiser4_plugin * plugin, + pset_member memb) +{ + /* cannot change object plugin of already existing object */ + if (memb == PSET_FILE) + return RETERR(-EINVAL); + + /* Change PSET_CREATE */ + return aset_set_unsafe(&reiser4_inode_data(inode)->pset, memb, plugin); +} + +static reiser4_plugin_ops file_plugin_ops = { + .change = change_file +}; + +static struct inode_operations null_i_ops = {.create = NULL}; +static struct file_operations null_f_ops = {.owner = NULL}; +static struct address_space_operations null_a_ops = {.writepage = NULL}; + +/* + * Reiser4 provides for VFS either dispatcher, or common (fop, + * iop, aop) method. + * + * Dispatchers (suffixed with "dispatch") pass management to + * proper plugin in accordance with plugin table (pset) located + * in the private part of inode. + * + * Common methods are NOT prefixed with "dispatch". They are + * the same for all plugins of FILE interface, and, hence, no + * dispatching is needed. + */ + +/* + * VFS methods for regular files + */ +static struct inode_operations regular_file_i_ops = { + .permission = reiser4_permission_common, + .setattr = reiser4_setattr_dispatch, + .getattr = reiser4_getattr_common +}; +static struct file_operations regular_file_f_ops = { + .llseek = generic_file_llseek, + .read = reiser4_read_dispatch, + .write = reiser4_write_dispatch, + .read_iter = generic_file_read_iter, + .unlocked_ioctl = reiser4_ioctl_dispatch, +#ifdef CONFIG_COMPAT + .compat_ioctl = reiser4_ioctl_dispatch, +#endif + .mmap = reiser4_mmap_dispatch, + .open = reiser4_open_dispatch, + .release = reiser4_release_dispatch, + .fsync = reiser4_sync_file_common, + .splice_read = generic_file_splice_read, +}; +static struct address_space_operations regular_file_a_ops = { + .writepage = reiser4_writepage, + .readpage = reiser4_readpage_dispatch, + //.sync_page = block_sync_page, + .writepages = reiser4_writepages_dispatch, + .set_page_dirty = reiser4_set_page_dirty, + .readpages = reiser4_readpages_dispatch, + .write_begin = reiser4_write_begin_dispatch, + .write_end = reiser4_write_end_dispatch, + .bmap = reiser4_bmap_dispatch, + .invalidatepage = reiser4_invalidatepage, + .releasepage = reiser4_releasepage, + .migratepage = reiser4_migratepage +}; + +/* VFS methods for symlink files */ +static struct inode_operations symlink_file_i_ops = { + .get_link = reiser4_get_link_common, + .permission = reiser4_permission_common, + .setattr = reiser4_setattr_common, + .getattr = reiser4_getattr_common +}; + +/* VFS methods for special files */ +static struct inode_operations special_file_i_ops = { + .permission = reiser4_permission_common, + .setattr = reiser4_setattr_common, + .getattr = reiser4_getattr_common +}; + +/* VFS methods for directories */ +static struct inode_operations directory_i_ops = { + .create = reiser4_create_common, + .lookup = reiser4_lookup_common, + .link = reiser4_link_common, + .unlink = reiser4_unlink_common, + .symlink = reiser4_symlink_common, + .mkdir = reiser4_mkdir_common, + .rmdir = reiser4_unlink_common, + .mknod = reiser4_mknod_common, + .rename = reiser4_rename2_common, + .permission = reiser4_permission_common, + .setattr = reiser4_setattr_common, + .getattr = reiser4_getattr_common +}; +static struct file_operations directory_f_ops = { + .llseek = reiser4_llseek_dir_common, + .read = generic_read_dir, + .iterate = reiser4_iterate_common, + .release = reiser4_release_dir_common, + .fsync = reiser4_sync_common +}; +static struct address_space_operations directory_a_ops = { + .writepages = dummyop, +}; + +/* + * Definitions of object plugins. + */ + +file_plugin file_plugins[LAST_FILE_PLUGIN_ID] = { + [UNIX_FILE_PLUGIN_ID] = { + .h = { + .type_id = REISER4_FILE_PLUGIN_TYPE, + .id = UNIX_FILE_PLUGIN_ID, + .groups = (1 << REISER4_REGULAR_FILE), + .pops = &file_plugin_ops, + .label = "reg", + .desc = "regular file", + .linkage = {NULL, NULL}, + }, + /* + * invariant vfs ops + */ + .inode_ops = ®ular_file_i_ops, + .file_ops = ®ular_file_f_ops, + .as_ops = ®ular_file_a_ops, + /* + * private i_ops + */ + .setattr = setattr_unix_file, + .open = open_unix_file, + .read = read_unix_file, + .write = write_unix_file, + .ioctl = ioctl_unix_file, + .mmap = mmap_unix_file, + .release = release_unix_file, + /* + * private f_ops + */ + .readpage = readpage_unix_file, + .readpages = readpages_unix_file, + .writepages = writepages_unix_file, + .write_begin = write_begin_unix_file, + .write_end = write_end_unix_file, + /* + * private a_ops + */ + .bmap = bmap_unix_file, + /* + * other private methods + */ + .write_sd_by_inode = write_sd_by_inode_common, + .flow_by_inode = flow_by_inode_unix_file, + .key_by_inode = key_by_inode_and_offset_common, + .set_plug_in_inode = set_plug_in_inode_common, + .adjust_to_parent = adjust_to_parent_common, + .create_object = reiser4_create_object_common, + .delete_object = delete_object_unix_file, + .add_link = reiser4_add_link_common, + .rem_link = reiser4_rem_link_common, + .owns_item = owns_item_unix_file, + .can_add_link = can_add_link_common, + .detach = dummyop, + .bind = dummyop, + .safelink = safelink_common, + .estimate = { + .create = estimate_create_common, + .update = estimate_update_common, + .unlink = estimate_unlink_common + }, + .init_inode_data = init_inode_data_unix_file, + .cut_tree_worker = cut_tree_worker_common, + .wire = { + .write = wire_write_common, + .read = wire_read_common, + .get = wire_get_common, + .size = wire_size_common, + .done = wire_done_common + } + }, + [DIRECTORY_FILE_PLUGIN_ID] = { + .h = { + .type_id = REISER4_FILE_PLUGIN_TYPE, + .id = DIRECTORY_FILE_PLUGIN_ID, + .groups = (1 << REISER4_DIRECTORY_FILE), + .pops = &file_plugin_ops, + .label = "dir", + .desc = "directory", + .linkage = {NULL, NULL} + }, + .inode_ops = &null_i_ops, + .file_ops = &null_f_ops, + .as_ops = &null_a_ops, + + .write_sd_by_inode = write_sd_by_inode_common, + .flow_by_inode = flow_by_inode_bugop, + .key_by_inode = key_by_inode_bugop, + .set_plug_in_inode = set_plug_in_inode_common, + .adjust_to_parent = adjust_to_parent_common_dir, + .create_object = reiser4_create_object_common, + .delete_object = reiser4_delete_dir_common, + .add_link = reiser4_add_link_common, + .rem_link = rem_link_common_dir, + .owns_item = owns_item_common_dir, + .can_add_link = can_add_link_common, + .can_rem_link = can_rem_link_common_dir, + .detach = reiser4_detach_common_dir, + .bind = reiser4_bind_common_dir, + .safelink = safelink_common, + .estimate = { + .create = estimate_create_common_dir, + .update = estimate_update_common, + .unlink = estimate_unlink_common_dir + }, + .wire = { + .write = wire_write_common, + .read = wire_read_common, + .get = wire_get_common, + .size = wire_size_common, + .done = wire_done_common + }, + .init_inode_data = init_inode_ordering, + .cut_tree_worker = cut_tree_worker_common, + }, + [SYMLINK_FILE_PLUGIN_ID] = { + .h = { + .type_id = REISER4_FILE_PLUGIN_TYPE, + .id = SYMLINK_FILE_PLUGIN_ID, + .groups = (1 << REISER4_SYMLINK_FILE), + .pops = &file_plugin_ops, + .label = "symlink", + .desc = "symbolic link", + .linkage = {NULL,NULL} + }, + .inode_ops = &symlink_file_i_ops, + /* inode->i_fop of symlink is initialized + by NULL in setup_inode_ops */ + .file_ops = &null_f_ops, + .as_ops = &null_a_ops, + + .write_sd_by_inode = write_sd_by_inode_common, + .set_plug_in_inode = set_plug_in_inode_common, + .adjust_to_parent = adjust_to_parent_common, + .create_object = reiser4_create_symlink, + .delete_object = reiser4_delete_object_common, + .add_link = reiser4_add_link_common, + .rem_link = reiser4_rem_link_common, + .can_add_link = can_add_link_common, + .detach = dummyop, + .bind = dummyop, + .safelink = safelink_common, + .estimate = { + .create = estimate_create_common, + .update = estimate_update_common, + .unlink = estimate_unlink_common + }, + .init_inode_data = init_inode_ordering, + .cut_tree_worker = cut_tree_worker_common, + .destroy_inode = destroy_inode_symlink, + .wire = { + .write = wire_write_common, + .read = wire_read_common, + .get = wire_get_common, + .size = wire_size_common, + .done = wire_done_common + } + }, + [SPECIAL_FILE_PLUGIN_ID] = { + .h = { + .type_id = REISER4_FILE_PLUGIN_TYPE, + .id = SPECIAL_FILE_PLUGIN_ID, + .groups = (1 << REISER4_SPECIAL_FILE), + .pops = &file_plugin_ops, + .label = "special", + .desc = + "special: fifo, device or socket", + .linkage = {NULL, NULL} + }, + .inode_ops = &special_file_i_ops, + /* file_ops of special files (sockets, block, char, fifo) are + initialized by init_special_inode. */ + .file_ops = &null_f_ops, + .as_ops = &null_a_ops, + + .write_sd_by_inode = write_sd_by_inode_common, + .set_plug_in_inode = set_plug_in_inode_common, + .adjust_to_parent = adjust_to_parent_common, + .create_object = reiser4_create_object_common, + .delete_object = reiser4_delete_object_common, + .add_link = reiser4_add_link_common, + .rem_link = reiser4_rem_link_common, + .owns_item = owns_item_common, + .can_add_link = can_add_link_common, + .detach = dummyop, + .bind = dummyop, + .safelink = safelink_common, + .estimate = { + .create = estimate_create_common, + .update = estimate_update_common, + .unlink = estimate_unlink_common + }, + .init_inode_data = init_inode_ordering, + .cut_tree_worker = cut_tree_worker_common, + .wire = { + .write = wire_write_common, + .read = wire_read_common, + .get = wire_get_common, + .size = wire_size_common, + .done = wire_done_common + } + }, + [CRYPTCOMPRESS_FILE_PLUGIN_ID] = { + .h = { + .type_id = REISER4_FILE_PLUGIN_TYPE, + .id = CRYPTCOMPRESS_FILE_PLUGIN_ID, + .groups = (1 << REISER4_REGULAR_FILE), + .pops = &file_plugin_ops, + .label = "cryptcompress", + .desc = "cryptcompress file", + .linkage = {NULL, NULL} + }, + .inode_ops = ®ular_file_i_ops, + .file_ops = ®ular_file_f_ops, + .as_ops = ®ular_file_a_ops, + + .setattr = setattr_cryptcompress, + .open = open_cryptcompress, + .read = read_cryptcompress, + .write = write_cryptcompress, + .ioctl = ioctl_cryptcompress, + .mmap = mmap_cryptcompress, + .release = release_cryptcompress, + + .readpage = readpage_cryptcompress, + .readpages = readpages_cryptcompress, + .writepages = writepages_cryptcompress, + .write_begin = write_begin_cryptcompress, + .write_end = write_end_cryptcompress, + + .bmap = bmap_cryptcompress, + + .write_sd_by_inode = write_sd_by_inode_common, + .flow_by_inode = flow_by_inode_cryptcompress, + .key_by_inode = key_by_inode_cryptcompress, + .set_plug_in_inode = set_plug_in_inode_common, + .adjust_to_parent = adjust_to_parent_cryptcompress, + .create_object = create_object_cryptcompress, + .delete_object = delete_object_cryptcompress, + .add_link = reiser4_add_link_common, + .rem_link = reiser4_rem_link_common, + .owns_item = owns_item_common, + .can_add_link = can_add_link_common, + .detach = dummyop, + .bind = dummyop, + .safelink = safelink_common, + .estimate = { + .create = estimate_create_common, + .update = estimate_update_common, + .unlink = estimate_unlink_common + }, + .init_inode_data = init_inode_data_cryptcompress, + .cut_tree_worker = cut_tree_worker_cryptcompress, + .destroy_inode = destroy_inode_cryptcompress, + .wire = { + .write = wire_write_common, + .read = wire_read_common, + .get = wire_get_common, + .size = wire_size_common, + .done = wire_done_common + } + } +}; + +static int change_dir(struct inode *inode, + reiser4_plugin * plugin, + pset_member memb) +{ + /* cannot change dir plugin of already existing object */ + return RETERR(-EINVAL); +} + +static reiser4_plugin_ops dir_plugin_ops = { + .change = change_dir +}; + +/* + * definition of directory plugins + */ + +dir_plugin dir_plugins[LAST_DIR_ID] = { + /* standard hashed directory plugin */ + [HASHED_DIR_PLUGIN_ID] = { + .h = { + .type_id = REISER4_DIR_PLUGIN_TYPE, + .id = HASHED_DIR_PLUGIN_ID, + .pops = &dir_plugin_ops, + .label = "dir", + .desc = "hashed directory", + .linkage = {NULL, NULL} + }, + .inode_ops = &directory_i_ops, + .file_ops = &directory_f_ops, + .as_ops = &directory_a_ops, + + .get_parent = get_parent_common, + .is_name_acceptable = is_name_acceptable_common, + .build_entry_key = build_entry_key_hashed, + .build_readdir_key = build_readdir_key_common, + .add_entry = reiser4_add_entry_common, + .rem_entry = reiser4_rem_entry_common, + .init = reiser4_dir_init_common, + .done = reiser4_dir_done_common, + .attach = reiser4_attach_common, + .detach = reiser4_detach_common, + .estimate = { + .add_entry = estimate_add_entry_common, + .rem_entry = estimate_rem_entry_common, + .unlink = dir_estimate_unlink_common + } + }, + /* hashed directory for which seekdir/telldir are guaranteed to + * work. Brain-damage. */ + [SEEKABLE_HASHED_DIR_PLUGIN_ID] = { + .h = { + .type_id = REISER4_DIR_PLUGIN_TYPE, + .id = SEEKABLE_HASHED_DIR_PLUGIN_ID, + .pops = &dir_plugin_ops, + .label = "dir32", + .desc = "directory hashed with 31 bit hash", + .linkage = {NULL, NULL} + }, + .inode_ops = &directory_i_ops, + .file_ops = &directory_f_ops, + .as_ops = &directory_a_ops, + + .get_parent = get_parent_common, + .is_name_acceptable = is_name_acceptable_common, + .build_entry_key = build_entry_key_seekable, + .build_readdir_key = build_readdir_key_common, + .add_entry = reiser4_add_entry_common, + .rem_entry = reiser4_rem_entry_common, + .init = reiser4_dir_init_common, + .done = reiser4_dir_done_common, + .attach = reiser4_attach_common, + .detach = reiser4_detach_common, + .estimate = { + .add_entry = estimate_add_entry_common, + .rem_entry = estimate_rem_entry_common, + .unlink = dir_estimate_unlink_common + } + } +}; + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/object.h b/fs/reiser4/plugin/object.h new file mode 100644 index 000000000000..60ca4b9a9a25 --- /dev/null +++ b/fs/reiser4/plugin/object.h @@ -0,0 +1,117 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Declaration of object plugin functions. */ + +#if !defined(__FS_REISER4_PLUGIN_OBJECT_H__) +#define __FS_REISER4_PLUGIN_OBJECT_H__ + +#include "../type_safe_hash.h" + +/* common implementations of inode operations */ +int reiser4_create_common(struct inode *parent, struct dentry *dentry, + umode_t mode, bool); +struct dentry *reiser4_lookup_common(struct inode *parent, + struct dentry *dentry, + unsigned int); +int reiser4_link_common(struct dentry *existing, struct inode *parent, + struct dentry *newname); +int reiser4_unlink_common(struct inode *parent, struct dentry *victim); +int reiser4_mkdir_common(struct inode *parent, struct dentry *dentry, umode_t mode); +int reiser4_symlink_common(struct inode *parent, struct dentry *dentry, + const char *linkname); +int reiser4_mknod_common(struct inode *parent, struct dentry *dentry, + umode_t mode, dev_t rdev); +int reiser4_rename2_common(struct inode *old_dir, struct dentry *old_name, + struct inode *new_dir, struct dentry *new_name, + unsigned flags); +const char *reiser4_get_link_common(struct dentry *, struct inode *inode, + struct delayed_call *done); +int reiser4_permission_common(struct inode *, int mask); +int reiser4_setattr_common(struct dentry *, struct iattr *); +int reiser4_getattr_common(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int flags); + +/* common implementations of file operations */ +loff_t reiser4_llseek_dir_common(struct file *, loff_t off, int origin); +int reiser4_iterate_common(struct file *, struct dir_context *context); +int reiser4_release_dir_common(struct inode *, struct file *); +int reiser4_sync_common(struct file *, loff_t, loff_t, int datasync); + +/* file plugin operations: common implementations */ +int write_sd_by_inode_common(struct inode *); +int key_by_inode_and_offset_common(struct inode *, loff_t, reiser4_key *); +int set_plug_in_inode_common(struct inode *object, struct inode *parent, + reiser4_object_create_data *); +int adjust_to_parent_common(struct inode *object, struct inode *parent, + struct inode *root); +int adjust_to_parent_common_dir(struct inode *object, struct inode *parent, + struct inode *root); +int adjust_to_parent_cryptcompress(struct inode *object, struct inode *parent, + struct inode *root); +int reiser4_create_object_common(struct inode *object, struct inode *parent, + reiser4_object_create_data *); +int reiser4_delete_object_common(struct inode *); +int reiser4_delete_dir_common(struct inode *); +int reiser4_add_link_common(struct inode *object, struct inode *parent); +int reiser4_rem_link_common(struct inode *object, struct inode *parent); +int rem_link_common_dir(struct inode *object, struct inode *parent); +int owns_item_common(const struct inode *, const coord_t *); +int owns_item_common_dir(const struct inode *, const coord_t *); +int can_add_link_common(const struct inode *); +int can_rem_link_common_dir(const struct inode *); +int reiser4_detach_common_dir(struct inode *child, struct inode *parent); +int reiser4_bind_common_dir(struct inode *child, struct inode *parent); +int safelink_common(struct inode *, reiser4_safe_link_t, __u64 value); +reiser4_block_nr estimate_create_common(const struct inode *); +reiser4_block_nr estimate_create_common_dir(const struct inode *); +reiser4_block_nr estimate_update_common(const struct inode *); +reiser4_block_nr estimate_unlink_common(const struct inode *, + const struct inode *); +reiser4_block_nr estimate_unlink_common_dir(const struct inode *, + const struct inode *); +char *wire_write_common(struct inode *, char *start); +char *wire_read_common(char *addr, reiser4_object_on_wire *); +struct dentry *wire_get_common(struct super_block *, reiser4_object_on_wire *); +int wire_size_common(struct inode *); +void wire_done_common(reiser4_object_on_wire *); + +/* dir plugin operations: common implementations */ +struct dentry *get_parent_common(struct inode *child); +int is_name_acceptable_common(const struct inode *, const char *name, int len); +void build_entry_key_common(const struct inode *, + const struct qstr *qname, reiser4_key *); +int build_readdir_key_common(struct file *dir, reiser4_key *); +int reiser4_add_entry_common(struct inode *object, struct dentry *where, + reiser4_object_create_data * , reiser4_dir_entry_desc *); +int reiser4_rem_entry_common(struct inode *object, struct dentry *where, + reiser4_dir_entry_desc *); +int reiser4_dir_init_common(struct inode *object, struct inode *parent, + reiser4_object_create_data *); +int reiser4_dir_done_common(struct inode *); +int reiser4_attach_common(struct inode *child, struct inode *parent); +int reiser4_detach_common(struct inode *object, struct inode *parent); +reiser4_block_nr estimate_add_entry_common(const struct inode *); +reiser4_block_nr estimate_rem_entry_common(const struct inode *); +reiser4_block_nr dir_estimate_unlink_common(const struct inode *, + const struct inode *); + +/* these are essential parts of common implementations, they are to make + customized implementations easier */ + +/* merely useful functions */ +int lookup_sd(struct inode *, znode_lock_mode, coord_t *, lock_handle * , + const reiser4_key * , int silent); + +/* __FS_REISER4_PLUGIN_OBJECT_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/plugin.c b/fs/reiser4/plugin/plugin.c new file mode 100644 index 000000000000..4af0b88ac88b --- /dev/null +++ b/fs/reiser4/plugin/plugin.c @@ -0,0 +1,569 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Basic plugin infrastructure, lookup etc. */ + +/* PLUGINS: + + Plugins are internal Reiser4 "modules" or "objects" used to increase + extensibility and allow external users to easily adapt reiser4 to + their needs. + + Plugins are classified into several disjoint "types". Plugins + belonging to the particular plugin type are termed "instances" of + this type. Existing types are listed by enum reiser4_plugin_type + (see plugin/plugin_header.h) + +NIKITA-FIXME-HANS: update this list, and review this entire comment for currency + + Object (file) plugin determines how given file-system object serves + standard VFS requests for read, write, seek, mmap etc. Instances of + file plugins are: regular file, directory, symlink. Another example + of file plugin is audit plugin, that optionally records accesses to + underlying object and forwards requests to it. + + Hash plugins compute hashes used by reiser4 to store and locate + files within directories. Instances of hash plugin type are: r5, + tea, rupasov. + + Tail plugins (or, more precisely, tail policy plugins) determine + when last part of the file should be stored in a formatted item. + + Scope and lookup: + + label such that pair ( type_label, plugin_label ) is unique. This + pair is a globally persistent and user-visible plugin + identifier. Internally kernel maintains plugins and plugin types in + arrays using an index into those arrays as plugin and plugin type + identifiers. File-system in turn, also maintains persistent + "dictionary" which is mapping from plugin label to numerical + identifier which is stored in file-system objects. That is, we + store the offset into the plugin array for that plugin type as the + plugin id in the stat data of the filesystem object. + + Internal kernel plugin type identifier (index in plugins[] array) is + of type reiser4_plugin_type. Set of available plugin types is + currently static, but dynamic loading doesn't seem to pose + insurmountable problems. + + Within each type plugins are addressed by the identifiers of type + reiser4_plugin_id (indices in reiser4_plugin_type_data.builtin[]). + Such identifiers are only required to be unique within one type, + not globally. + + Thus, plugin in memory is uniquely identified by the pair (type_id, + id). + + Usage: + + There exists only one instance of each plugin instance, but this + single instance can be associated with many entities (file-system + objects, items, nodes, transactions, file-descriptors etc.). Entity + to which plugin of given type is termed (due to the lack of + imagination) "subject" of this plugin type and, by abuse of + terminology, subject of particular instance of this type to which + it's attached currently. For example, inode is subject of object + plugin type. Inode representing directory is subject of directory + plugin, hash plugin type and some particular instance of hash plugin + type. Inode, representing regular file is subject of "regular file" + plugin, tail-policy plugin type etc. + + With each subject the plugin possibly stores some state. For example, + the state of a directory plugin (instance of object plugin type) is pointer + to hash plugin (if directories always use hashing that is). + + Interface: + + In addition to a scalar identifier, each plugin type and plugin + proper has a "label": short string and a "description"---longer + descriptive string. Labels and descriptions of plugin types are + hard-coded into plugins[] array, declared and defined in + plugin.c. Label and description of plugin are stored in .label and + .desc fields of reiser4_plugin_header respectively. It's possible to + locate plugin by the pair of labels. + + Features (not implemented): + + . user-level plugin manipulations: + + reiser4("filename/..file_plugin<='audit'"); + + write(open("filename/..file_plugin"), "audit", 8); + + . user level utilities lsplug and chplug to manipulate plugins. + Utilities are not of primary priority. Possibly they will be not + working on v4.0 + + NIKITA-FIXME-HANS: this should be a mkreiserfs option not a mount + option, do you agree? I don't think that specifying it at mount time, + and then changing it with each mount, is a good model for usage. + + . mount option "plug" to set-up plugins of root-directory. + "plug=foo:bar" will set "bar" as default plugin of type "foo". + + Limitations: + + . each plugin type has to provide at least one builtin + plugin. This is technical limitation and it can be lifted in the + future. + + TODO: + + New plugin types/plugings: + Things we should be able to separately choose to inherit: + + security plugins + + stat data + + file bodies + + file plugins + + dir plugins + + . perm:acl + + . audi---audit plugin intercepting and possibly logging all + accesses to object. Requires to put stub functions in file_operations + in stead of generic_file_*. + +NIKITA-FIXME-HANS: why make overflows a plugin? + . over---handle hash overflows + + . sqnt---handle different access patterns and instruments read-ahead + +NIKITA-FIXME-HANS: describe the line below in more detail. + + . hier---handle inheritance of plugins along file-system hierarchy + + Different kinds of inheritance: on creation vs. on access. + Compatible/incompatible plugins. + Inheritance for multi-linked files. + Layered plugins. + Notion of plugin context is abandoned. + +Each file is associated + with one plugin and dependant plugins (hash, etc.) are stored as + main plugin state. Now, if we have plugins used for regular files + but not for directories, how such plugins would be inherited? + . always store them with directories also + +NIKTIA-FIXME-HANS: Do the line above. It is not exclusive of doing +the line below which is also useful. + + . use inheritance hierarchy, independent of file-system namespace +*/ + +#include "../debug.h" +#include "../dformat.h" +#include "plugin_header.h" +#include "item/static_stat.h" +#include "node/node.h" +#include "security/perm.h" +#include "space/space_allocator.h" +#include "disk_format/disk_format.h" +#include "plugin.h" +#include "../reiser4.h" +#include "../jnode.h" +#include "../inode.h" + +#include <linux/fs.h> /* for struct super_block */ + +/* + * init_plugins - initialize plugin sub-system. + * Just call this once on reiser4 startup. + * + * Initializes plugin sub-system. It is part of reiser4 module + * initialization. For each plugin of each type init method is called and each + * plugin is put into list of plugins. + */ +int init_plugins(void) +{ + reiser4_plugin_type type_id; + + for (type_id = 0; type_id < REISER4_PLUGIN_TYPES; ++type_id) { + struct reiser4_plugin_type_data *ptype; + int i; + + ptype = &plugins[type_id]; + assert("nikita-3508", ptype->label != NULL); + assert("nikita-3509", ptype->type_id == type_id); + + INIT_LIST_HEAD(&ptype->plugins_list); +/* NIKITA-FIXME-HANS: change builtin_num to some other name lacking the term + * builtin. */ + for (i = 0; i < ptype->builtin_num; ++i) { + reiser4_plugin *plugin; + + plugin = plugin_at(ptype, i); + + if (plugin->h.label == NULL) + /* uninitialized slot encountered */ + continue; + assert("nikita-3445", plugin->h.type_id == type_id); + plugin->h.id = i; + if (plugin->h.pops != NULL && + plugin->h.pops->init != NULL) { + int result; + + result = plugin->h.pops->init(plugin); + if (result != 0) + return result; + } + INIT_LIST_HEAD(&plugin->h.linkage); + list_add_tail(&plugin->h.linkage, &ptype->plugins_list); + } + } + return 0; +} + +/* true if plugin type id is valid */ +int is_plugin_type_valid(reiser4_plugin_type type) +{ + /* "type" is unsigned, so no comparison with 0 is + necessary */ + return (type < REISER4_PLUGIN_TYPES); +} + +/* true if plugin id is valid */ +int is_plugin_id_valid(reiser4_plugin_type type, reiser4_plugin_id id) +{ + assert("nikita-1653", is_plugin_type_valid(type)); + return id < plugins[type].builtin_num; +} + +/* return plugin by its @type and @id. + + Both arguments are checked for validness: this is supposed to be called + from user-level. + +NIKITA-FIXME-HANS: Do you instead mean that this checks ids created in +user space, and passed to the filesystem by use of method files? Your +comment really confused me on the first reading.... + +*/ +reiser4_plugin *plugin_by_unsafe_id(reiser4_plugin_type type /* plugin type + * unchecked */, + reiser4_plugin_id id /* plugin id, + * unchecked */) +{ + if (is_plugin_type_valid(type)) { + if (is_plugin_id_valid(type, id)) + return plugin_at(&plugins[type], id); + else + /* id out of bounds */ + warning("nikita-2913", + "Invalid plugin id: [%i:%i]", type, id); + } else + /* type_id out of bounds */ + warning("nikita-2914", "Invalid type_id: %i", type); + return NULL; +} + +/** + * save_plugin_id - store plugin id in disk format + * @plugin: plugin to convert + * @area: where to store result + * + * Puts id of @plugin in little endian format to address @area. + */ +int save_plugin_id(reiser4_plugin *plugin /* plugin to convert */ , + d16 * area/* where to store result */) +{ + assert("nikita-1261", plugin != NULL); + assert("nikita-1262", area != NULL); + + put_unaligned(cpu_to_le16(plugin->h.id), area); + return 0; +} + +/* list of all plugins of given type */ +struct list_head *get_plugin_list(reiser4_plugin_type type) +{ + assert("nikita-1056", is_plugin_type_valid(type)); + return &plugins[type].plugins_list; +} + +static void update_pset_mask(reiser4_inode * info, pset_member memb) +{ + struct dentry *rootdir; + reiser4_inode *root; + + assert("edward-1443", memb != PSET_FILE); + + rootdir = inode_by_reiser4_inode(info)->i_sb->s_root; + if (rootdir != NULL) { + root = reiser4_inode_data(rootdir->d_inode); + /* + * if inode is different from the default one, or we are + * changing plugin of root directory, update plugin_mask + */ + if (aset_get(info->pset, memb) != + aset_get(root->pset, memb) || + info == root) + info->plugin_mask |= (1 << memb); + else + info->plugin_mask &= ~(1 << memb); + } +} + +/* Get specified plugin set member from parent, + or from fs-defaults (if no parent is given) and + install the result to pset of @self */ +int grab_plugin_pset(struct inode *self, + struct inode *ancestor, + pset_member memb) +{ + reiser4_plugin *plug; + reiser4_inode *info; + int result = 0; + + /* Do not grab if initialised already. */ + info = reiser4_inode_data(self); + if (aset_get(info->pset, memb) != NULL) + return 0; + if (ancestor) { + reiser4_inode *parent; + + parent = reiser4_inode_data(ancestor); + plug = aset_get(parent->hset, memb) ? : + aset_get(parent->pset, memb); + } else + plug = get_default_plugin(memb); + + result = set_plugin(&info->pset, memb, plug); + if (result == 0) { + if (!ancestor || self->i_sb->s_root->d_inode != self) + update_pset_mask(info, memb); + } + return result; +} + +/* Take missing pset members from root inode */ +int finish_pset(struct inode *inode) +{ + reiser4_plugin *plug; + reiser4_inode *root; + reiser4_inode *info; + pset_member memb; + int result = 0; + + root = reiser4_inode_data(inode->i_sb->s_root->d_inode); + info = reiser4_inode_data(inode); + + assert("edward-1455", root != NULL); + assert("edward-1456", info != NULL); + + /* file and directory plugins are already initialized. */ + for (memb = PSET_DIR + 1; memb < PSET_LAST; ++memb) { + + /* Do not grab if initialised already. */ + if (aset_get(info->pset, memb) != NULL) + continue; + + plug = aset_get(root->pset, memb); + result = set_plugin(&info->pset, memb, plug); + if (result != 0) + break; + } + if (result != 0) { + warning("nikita-3447", + "Cannot set up plugins for %lli", + (unsigned long long) + get_inode_oid(inode)); + } + return result; +} + +int force_plugin_pset(struct inode *self, pset_member memb, + reiser4_plugin * plug) +{ + reiser4_inode *info; + int result = 0; + + if (!self->i_sb->s_root || self->i_sb->s_root->d_inode == self) { + /* Changing pset in the root object. */ + return RETERR(-EINVAL); + } + + info = reiser4_inode_data(self); + if (plug->h.pops != NULL && plug->h.pops->change != NULL) + result = plug->h.pops->change(self, plug, memb); + else + result = aset_set_unsafe(&info->pset, memb, plug); + if (result == 0) { + __u16 oldmask = info->plugin_mask; + + update_pset_mask(info, memb); + if (oldmask != info->plugin_mask) + reiser4_inode_clr_flag(self, REISER4_SDLEN_KNOWN); + } + return result; +} + +struct reiser4_plugin_type_data plugins[REISER4_PLUGIN_TYPES] = { + /* C90 initializers */ + [REISER4_FILE_PLUGIN_TYPE] = { + .type_id = REISER4_FILE_PLUGIN_TYPE, + .label = "file", + .desc = "Object plugins", + .builtin_num = sizeof_array(file_plugins), + .builtin = file_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(file_plugin) + }, + [REISER4_DIR_PLUGIN_TYPE] = { + .type_id = REISER4_DIR_PLUGIN_TYPE, + .label = "dir", + .desc = "Directory plugins", + .builtin_num = sizeof_array(dir_plugins), + .builtin = dir_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(dir_plugin) + }, + [REISER4_HASH_PLUGIN_TYPE] = { + .type_id = REISER4_HASH_PLUGIN_TYPE, + .label = "hash", + .desc = "Directory hashes", + .builtin_num = sizeof_array(hash_plugins), + .builtin = hash_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(hash_plugin) + }, + [REISER4_FIBRATION_PLUGIN_TYPE] = { + .type_id = + REISER4_FIBRATION_PLUGIN_TYPE, + .label = "fibration", + .desc = "Directory fibrations", + .builtin_num = sizeof_array(fibration_plugins), + .builtin = fibration_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(fibration_plugin) + }, + [REISER4_CIPHER_PLUGIN_TYPE] = { + .type_id = REISER4_CIPHER_PLUGIN_TYPE, + .label = "cipher", + .desc = "Cipher plugins", + .builtin_num = sizeof_array(cipher_plugins), + .builtin = cipher_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(cipher_plugin) + }, + [REISER4_DIGEST_PLUGIN_TYPE] = { + .type_id = REISER4_DIGEST_PLUGIN_TYPE, + .label = "digest", + .desc = "Digest plugins", + .builtin_num = sizeof_array(digest_plugins), + .builtin = digest_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(digest_plugin) + }, + [REISER4_COMPRESSION_PLUGIN_TYPE] = { + .type_id = REISER4_COMPRESSION_PLUGIN_TYPE, + .label = "compression", + .desc = "Compression plugins", + .builtin_num = sizeof_array(compression_plugins), + .builtin = compression_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(compression_plugin) + }, + [REISER4_FORMATTING_PLUGIN_TYPE] = { + .type_id = REISER4_FORMATTING_PLUGIN_TYPE, + .label = "formatting", + .desc = "Tail inlining policies", + .builtin_num = sizeof_array(formatting_plugins), + .builtin = formatting_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(formatting_plugin) + }, + [REISER4_PERM_PLUGIN_TYPE] = { + .type_id = REISER4_PERM_PLUGIN_TYPE, + .label = "perm", + .desc = "Permission checks", + .builtin_num = sizeof_array(perm_plugins), + .builtin = perm_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(perm_plugin) + }, + [REISER4_ITEM_PLUGIN_TYPE] = { + .type_id = REISER4_ITEM_PLUGIN_TYPE, + .label = "item", + .desc = "Item handlers", + .builtin_num = sizeof_array(item_plugins), + .builtin = item_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(item_plugin) + }, + [REISER4_NODE_PLUGIN_TYPE] = { + .type_id = REISER4_NODE_PLUGIN_TYPE, + .label = "node", + .desc = "node layout handlers", + .builtin_num = sizeof_array(node_plugins), + .builtin = node_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(node_plugin) + }, + [REISER4_SD_EXT_PLUGIN_TYPE] = { + .type_id = REISER4_SD_EXT_PLUGIN_TYPE, + .label = "sd_ext", + .desc = "Parts of stat-data", + .builtin_num = sizeof_array(sd_ext_plugins), + .builtin = sd_ext_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(sd_ext_plugin) + }, + [REISER4_FORMAT_PLUGIN_TYPE] = { + .type_id = REISER4_FORMAT_PLUGIN_TYPE, + .label = "disk_layout", + .desc = "defines filesystem on disk layout", + .builtin_num = sizeof_array(format_plugins), + .builtin = format_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(disk_format_plugin) + }, + [REISER4_JNODE_PLUGIN_TYPE] = { + .type_id = REISER4_JNODE_PLUGIN_TYPE, + .label = "jnode", + .desc = "defines kind of jnode", + .builtin_num = sizeof_array(jnode_plugins), + .builtin = jnode_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(jnode_plugin) + }, + [REISER4_COMPRESSION_MODE_PLUGIN_TYPE] = { + .type_id = REISER4_COMPRESSION_MODE_PLUGIN_TYPE, + .label = "compression_mode", + .desc = "Defines compression mode", + .builtin_num = sizeof_array(compression_mode_plugins), + .builtin = compression_mode_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(compression_mode_plugin) + }, + [REISER4_CLUSTER_PLUGIN_TYPE] = { + .type_id = REISER4_CLUSTER_PLUGIN_TYPE, + .label = "cluster", + .desc = "Defines cluster size", + .builtin_num = sizeof_array(cluster_plugins), + .builtin = cluster_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(cluster_plugin) + }, + [REISER4_TXMOD_PLUGIN_TYPE] = { + .type_id = REISER4_TXMOD_PLUGIN_TYPE, + .label = "txmod", + .desc = "Defines transaction model", + .builtin_num = sizeof_array(txmod_plugins), + .builtin = txmod_plugins, + .plugins_list = {NULL, NULL}, + .size = sizeof(txmod_plugin) + } +}; + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 120 + * End: + */ diff --git a/fs/reiser4/plugin/plugin.h b/fs/reiser4/plugin/plugin.h new file mode 100644 index 000000000000..c7d75d50de4c --- /dev/null +++ b/fs/reiser4/plugin/plugin.h @@ -0,0 +1,999 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Basic plugin data-types. + see fs/reiser4/plugin/plugin.c for details */ + +#if !defined(__FS_REISER4_PLUGIN_TYPES_H__) +#define __FS_REISER4_PLUGIN_TYPES_H__ + +#include "../forward.h" +#include "../debug.h" +#include "../dformat.h" +#include "../key.h" +#include "compress/compress.h" +#include "crypto/cipher.h" +#include "plugin_header.h" +#include "item/static_stat.h" +#include "item/internal.h" +#include "item/sde.h" +#include "item/cde.h" +#include "item/item.h" +#include "node/node.h" +#include "node/node41.h" +#include "security/perm.h" +#include "fibration.h" + +#include "space/bitmap.h" +#include "space/space_allocator.h" + +#include "disk_format/disk_format40.h" +#include "disk_format/disk_format.h" + +#include <linux/fs.h> /* for struct super_block, address_space */ +#include <linux/mm.h> /* for struct page */ +#include <linux/buffer_head.h> /* for struct buffer_head */ +#include <linux/dcache.h> /* for struct dentry */ +#include <linux/types.h> +#include <linux/crypto.h> + +typedef struct reiser4_object_on_wire reiser4_object_on_wire; + +/* + * File plugin. Defines the set of methods that file plugins implement, some + * of which are optional. + * + * A file plugin offers to the caller an interface for IO ( writing to and/or + * reading from) to what the caller sees as one sequence of bytes. An IO to it + * may affect more than one physical sequence of bytes, or no physical sequence + * of bytes, it may affect sequences of bytes offered by other file plugins to + * the semantic layer, and the file plugin may invoke other plugins and + * delegate work to them, but its interface is structured for offering the + * caller the ability to read and/or write what the caller sees as being a + * single sequence of bytes. + * + * The file plugin must present a sequence of bytes to the caller, but it does + * not necessarily have to store a sequence of bytes, it does not necessarily + * have to support efficient tree traversal to any offset in the sequence of + * bytes (tail and extent items, whose keys contain offsets, do however provide + * efficient non-sequential lookup of any offset in the sequence of bytes). + * + * Directory plugins provide methods for selecting file plugins by resolving a + * name for them. + * + * The functionality other filesystems call an attribute, and rigidly tie + * together, we decompose into orthogonal selectable features of files. Using + * the terminology we will define next, an attribute is a perhaps constrained, + * perhaps static length, file whose parent has a uni-count-intra-link to it, + * which might be grandparent-major-packed, and whose parent has a deletion + * method that deletes it. + * + * File plugins can implement constraints. + * + * Files can be of variable length (e.g. regular unix files), or of static + * length (e.g. static sized attributes). + * + * An object may have many sequences of bytes, and many file plugins, but, it + * has exactly one objectid. It is usually desirable that an object has a + * deletion method which deletes every item with that objectid. Items cannot + * in general be found by just their objectids. This means that an object must + * have either a method built into its deletion plugin method for knowing what + * items need to be deleted, or links stored with the object that provide the + * plugin with a method for finding those items. Deleting a file within an + * object may or may not have the effect of deleting the entire object, + * depending on the file plugin's deletion method. + * + * LINK TAXONOMY: + * + * Many objects have a reference count, and when the reference count reaches 0 + * the object's deletion method is invoked. Some links embody a reference + * count increase ("countlinks"), and others do not ("nocountlinks"). + * + * Some links are bi-directional links ("bilinks"), and some are + * uni-directional("unilinks"). + * + * Some links are between parts of the same object ("intralinks"), and some are + * between different objects ("interlinks"). + * + * PACKING TAXONOMY: + * + * Some items of an object are stored with a major packing locality based on + * their object's objectid (e.g. unix directory items in plan A), and these are + * called "self-major-packed". + * + * Some items of an object are stored with a major packing locality based on + * their semantic parent object's objectid (e.g. unix file bodies in plan A), + * and these are called "parent-major-packed". + * + * Some items of an object are stored with a major packing locality based on + * their semantic grandparent, and these are called "grandparent-major-packed". + * Now carefully notice that we run into trouble with key length if we have to + * store a 8 byte major+minor grandparent based packing locality, an 8 byte + * parent objectid, an 8 byte attribute objectid, and an 8 byte offset, all in + * a 24 byte key. One of these fields must be sacrificed if an item is to be + * grandparent-major-packed, and which to sacrifice is left to the item author + * choosing to make the item grandparent-major-packed. You cannot make tail + * items and extent items grandparent-major-packed, though you could make them + * self-major-packed (usually they are parent-major-packed). + * + * In the case of ACLs (which are composed of fixed length ACEs which consist + * of {subject-type, subject, and permission bitmask} triples), it makes sense + * to not have an offset field in the ACE item key, and to allow duplicate keys + * for ACEs. Thus, the set of ACES for a given file is found by looking for a + * key consisting of the objectid of the grandparent (thus grouping all ACLs in + * a directory together), the minor packing locality of ACE, the objectid of + * the file, and 0. + * + * IO involves moving data from one location to another, which means that two + * locations must be specified, source and destination. + * + * This source and destination can be in the filesystem, or they can be a + * pointer in the user process address space plus a byte count. + * + * If both source and destination are in the filesystem, then at least one of + * them must be representable as a pure stream of bytes (which we call a flow, + * and define as a struct containing a key, a data pointer, and a length). + * This may mean converting one of them into a flow. We provide a generic + * cast_into_flow() method, which will work for any plugin supporting + * read_flow(), though it is inefficiently implemented in that it temporarily + * stores the flow in a buffer (Question: what to do with huge flows that + * cannot fit into memory? Answer: we must not convert them all at once. ) + * + * Performing a write requires resolving the write request into a flow defining + * the source, and a method that performs the write, and a key that defines + * where in the tree the write is to go. + * + * Performing a read requires resolving the read request into a flow defining + * the target, and a method that performs the read, and a key that defines + * where in the tree the read is to come from. + * + * There will exist file plugins which have no pluginid stored on the disk for + * them, and which are only invoked by other plugins. + */ + +/* + * This should be incremented in every release which adds one + * or more new plugins. + * NOTE: Make sure that respective marco is also incremented in + * the new release of reiser4progs. + */ +#define PLUGIN_LIBRARY_VERSION 2 + + /* enumeration of fields within plugin_set */ +typedef enum { + PSET_FILE, + PSET_DIR, /* PSET_FILE and PSET_DIR should be first + * elements: inode.c:read_inode() depends on + * this. */ + PSET_PERM, + PSET_FORMATTING, + PSET_HASH, + PSET_FIBRATION, + PSET_SD, + PSET_DIR_ITEM, + PSET_CIPHER, + PSET_DIGEST, + PSET_COMPRESSION, + PSET_COMPRESSION_MODE, + PSET_CLUSTER, + PSET_CREATE, + PSET_LAST +} pset_member; + +/* builtin file-plugins */ +typedef enum { + /* regular file */ + UNIX_FILE_PLUGIN_ID, + /* directory */ + DIRECTORY_FILE_PLUGIN_ID, + /* symlink */ + SYMLINK_FILE_PLUGIN_ID, + /* for objects completely handled by the VFS: fifos, devices, + sockets */ + SPECIAL_FILE_PLUGIN_ID, + /* regular cryptcompress file */ + CRYPTCOMPRESS_FILE_PLUGIN_ID, + /* number of file plugins. Used as size of arrays to hold + file plugins. */ + LAST_FILE_PLUGIN_ID +} reiser4_file_id; + +typedef struct file_plugin { + + /* generic fields */ + plugin_header h; + + /* VFS methods */ + struct inode_operations * inode_ops; + struct file_operations * file_ops; + struct address_space_operations * as_ops; + /** + * Private methods. These are optional. If used they will allow you + * to minimize the amount of code needed to implement a deviation + * from some other method that also uses them. + */ + /* + * private inode_ops + */ + int (*setattr)(struct dentry *, struct iattr *); + /* + * private file_ops + */ + /* do whatever is necessary to do when object is opened */ + int (*open) (struct inode *inode, struct file *file); + ssize_t (*read) (struct file *, char __user *buf, size_t read_amount, + loff_t *off); + /* write as much as possible bytes from nominated @write_amount + * before plugin scheduling is occurred. Save scheduling state + * in @cont */ + ssize_t (*write) (struct file *, const char __user *buf, + size_t write_amount, loff_t * off, + struct dispatch_context * cont); + int (*ioctl) (struct file *filp, unsigned int cmd, unsigned long arg); + int (*mmap) (struct file *, struct vm_area_struct *); + int (*release) (struct inode *, struct file *); + /* + * private a_ops + */ + int (*readpage) (struct file *file, struct page *page); + int (*readpages)(struct file *file, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages); + int (*writepages)(struct address_space *mapping, + struct writeback_control *wbc); + int (*write_begin)(struct file *file, struct page *page, + loff_t pos, unsigned len, void **fsdata); + int (*write_end)(struct file *file, struct page *page, + loff_t pos, unsigned copied, void *fsdata); + sector_t (*bmap) (struct address_space * mapping, sector_t lblock); + /* other private methods */ + /* save inode cached stat-data onto disk. It was called + reiserfs_update_sd() in 3.x */ + int (*write_sd_by_inode) (struct inode *); + /* + * Construct flow into @flow according to user-supplied data. + * + * This is used by read/write methods to construct a flow to + * write/read. ->flow_by_inode() is plugin method, rather than single + * global implementation, because key in a flow used by plugin may + * depend on data in a @buf. + * + * NIKITA-FIXME-HANS: please create statistics on what functions are + * dereferenced how often for the mongo benchmark. You can supervise + * Elena doing this for you if that helps. Email me the list of the + * top 10, with their counts, and an estimate of the total number of + * CPU cycles spent dereferencing as a percentage of CPU cycles spent + * processing (non-idle processing). If the total percent is, say, + * less than 1%, it will make our coding discussions much easier, and + * keep me from questioning whether functions like the below are too + * frequently called to be dereferenced. If the total percent is more + * than 1%, perhaps private methods should be listed in a "required" + * comment at the top of each plugin (with stern language about how if + * the comment is missing it will not be accepted by the maintainer), + * and implemented using macros not dereferenced functions. How about + * replacing this whole private methods part of the struct with a + * thorough documentation of what the standard helper functions are for + * use in constructing plugins? I think users have been asking for + * that, though not in so many words. + */ + int (*flow_by_inode) (struct inode *, const char __user *buf, + int user, loff_t size, + loff_t off, rw_op op, flow_t *); + /* + * Return the key used to retrieve an offset of a file. It is used by + * default implementation of ->flow_by_inode() method + * (common_build_flow()) and, among other things, to get to the extent + * from jnode of unformatted node. + */ + int (*key_by_inode) (struct inode *, loff_t off, reiser4_key *); + + /* NIKITA-FIXME-HANS: this comment is not as clear to others as you + * think.... */ + /* + * set the plugin for a file. Called during file creation in creat() + * but not reiser4() unless an inode already exists for the file. + */ + int (*set_plug_in_inode) (struct inode *inode, struct inode *parent, + reiser4_object_create_data *); + + /* NIKITA-FIXME-HANS: comment and name seem to say different things, + * are you setting up the object itself also or just adjusting the + * parent?.... */ + /* set up plugins for new @object created in @parent. @root is root + directory. */ + int (*adjust_to_parent) (struct inode *object, struct inode *parent, + struct inode *root); + /* + * this does whatever is necessary to do when object is created. For + * instance, for unix files stat data is inserted. It is supposed to be + * called by create of struct inode_operations. + */ + int (*create_object) (struct inode *object, struct inode *parent, + reiser4_object_create_data *); + /* + * this method should check REISER4_NO_SD and set REISER4_NO_SD on + * success. Deletion of an object usually includes removal of items + * building file body (for directories this is removal of "." and "..") + * and removal of stat-data item. + */ + int (*delete_object) (struct inode *); + + /* add link from @parent to @object */ + int (*add_link) (struct inode *object, struct inode *parent); + + /* remove link from @parent to @object */ + int (*rem_link) (struct inode *object, struct inode *parent); + + /* + * return true if item addressed by @coord belongs to @inode. This is + * used by read/write to properly slice flow into items in presence of + * multiple key assignment policies, because items of a file are not + * necessarily contiguous in a key space, for example, in a plan-b. + */ + int (*owns_item) (const struct inode *, const coord_t *); + + /* checks whether yet another hard links to this object can be + added */ + int (*can_add_link) (const struct inode *); + + /* checks whether hard links to this object can be removed */ + int (*can_rem_link) (const struct inode *); + + /* not empty for DIRECTORY_FILE_PLUGIN_ID only currently. It calls + detach of directory plugin to remove ".." */ + int (*detach) (struct inode *child, struct inode *parent); + + /* called when @child was just looked up in the @parent. It is not + empty for DIRECTORY_FILE_PLUGIN_ID only where it calls attach of + directory plugin */ + int (*bind) (struct inode *child, struct inode *parent); + + /* process safe-link during mount */ + int (*safelink) (struct inode *object, reiser4_safe_link_t link, + __u64 value); + + /* The couple of estimate methods for all file operations */ + struct { + reiser4_block_nr(*create) (const struct inode *); + reiser4_block_nr(*update) (const struct inode *); + reiser4_block_nr(*unlink) (const struct inode *, + const struct inode *); + } estimate; + + /* + * reiser4 specific part of inode has a union of structures which are + * specific to a plugin. This method is called when inode is read + * (read_inode) and when file is created (common_create_child) so that + * file plugin could initialize its inode data + */ + void (*init_inode_data) (struct inode *, reiser4_object_create_data * , + int); + + /* + * This method performs progressive deletion of items and whole nodes + * from right to left. + * + * @tap: the point deletion process begins from, + * @from_key: the beginning of the deleted key range, + * @to_key: the end of the deleted key range, + * @smallest_removed: the smallest removed key, + * + * @return: 0 if success, error code otherwise, -E_REPEAT means that + * long cut_tree operation was interrupted for allowing atom commit . + */ + int (*cut_tree_worker) (tap_t *, const reiser4_key * from_key, + const reiser4_key * to_key, + reiser4_key * smallest_removed, struct inode *, + int, int *); + + /* called from ->destroy_inode() */ + void (*destroy_inode) (struct inode *); + + /* + * methods to serialize object identify. This is used, for example, by + * reiser4_{en,de}code_fh(). + */ + struct { + /* store object's identity at @area */ + char *(*write) (struct inode *inode, char *area); + /* parse object from wire to the @obj */ + char *(*read) (char *area, reiser4_object_on_wire * obj); + /* given object identity in @obj, find or create its dentry */ + struct dentry *(*get) (struct super_block *s, + reiser4_object_on_wire * obj); + /* how many bytes ->wire.write() consumes */ + int (*size) (struct inode *inode); + /* finish with object identify */ + void (*done) (reiser4_object_on_wire * obj); + } wire; +} file_plugin; + +extern file_plugin file_plugins[LAST_FILE_PLUGIN_ID]; + +struct reiser4_object_on_wire { + file_plugin *plugin; + union { + struct { + obj_key_id key_id; + } std; + void *generic; + } u; +}; + +/* builtin dir-plugins */ +typedef enum { + HASHED_DIR_PLUGIN_ID, + SEEKABLE_HASHED_DIR_PLUGIN_ID, + LAST_DIR_ID +} reiser4_dir_id; + +typedef struct dir_plugin { + /* generic fields */ + plugin_header h; + + struct inode_operations * inode_ops; + struct file_operations * file_ops; + struct address_space_operations * as_ops; + + /* + * private methods: These are optional. If used they will allow you to + * minimize the amount of code needed to implement a deviation from + * some other method that uses them. You could logically argue that + * they should be a separate type of plugin. + */ + + struct dentry *(*get_parent) (struct inode *childdir); + + /* + * check whether "name" is acceptable name to be inserted into this + * object. Optionally implemented by directory-like objects. Can check + * for maximal length, reserved symbols etc + */ + int (*is_name_acceptable) (const struct inode *inode, const char *name, + int len); + + void (*build_entry_key) (const struct inode *dir /* directory where + * entry is (or will + * be) in.*/ , + const struct qstr *name /* name of file + * referenced by this + * entry */ , + reiser4_key * result /* resulting key of + * directory entry */ ); + int (*build_readdir_key) (struct file *dir, reiser4_key * result); + int (*add_entry) (struct inode *object, struct dentry *where, + reiser4_object_create_data * data, + reiser4_dir_entry_desc * entry); + int (*rem_entry) (struct inode *object, struct dentry *where, + reiser4_dir_entry_desc * entry); + + /* + * initialize directory structure for newly created object. For normal + * unix directories, insert dot and dotdot. + */ + int (*init) (struct inode *object, struct inode *parent, + reiser4_object_create_data * data); + + /* destroy directory */ + int (*done) (struct inode *child); + + /* called when @subdir was just looked up in the @dir */ + int (*attach) (struct inode *subdir, struct inode *dir); + int (*detach) (struct inode *subdir, struct inode *dir); + + struct { + reiser4_block_nr(*add_entry) (const struct inode *); + reiser4_block_nr(*rem_entry) (const struct inode *); + reiser4_block_nr(*unlink) (const struct inode *, + const struct inode *); + } estimate; +} dir_plugin; + +extern dir_plugin dir_plugins[LAST_DIR_ID]; + +typedef struct formatting_plugin { + /* generic fields */ + plugin_header h; + /* returns non-zero iff file's tail has to be stored + in a direct item. */ + int (*have_tail) (const struct inode *inode, loff_t size); +} formatting_plugin; + +/** + * Plugins of this interface implement different transaction models. + * Transaction model is a high-level block allocator, which assigns block + * numbers to dirty nodes, and, thereby, decides, how individual dirty + * nodes of an atom will be committed. + */ +typedef struct txmod_plugin { + /* generic fields */ + plugin_header h; + /** + * allocate blocks in the FORWARD PARENT-FIRST context + * for formatted nodes + */ + int (*forward_alloc_formatted)(znode *node, const coord_t *parent_coord, + flush_pos_t *pos); //was allocate_znode_loaded + /** + * allocate blocks in the REVERSE PARENT-FIRST context + * for formatted nodes + */ + int (*reverse_alloc_formatted)(jnode * node, + const coord_t *parent_coord, + flush_pos_t *pos); // was reverse_relocate_test + /** + * allocate blocks in the FORWARD PARENT-FIRST context + * for unformatted nodes. + * + * This is called by handle_pos_on_twig to proceed extent unit + * flush_pos->coord is set to. It is to prepare for flushing + * sequence of not flushprepped nodes (slum). It supposes that + * slum starts at flush_pos->pos_in_unit position within the extent + */ + int (*forward_alloc_unformatted)(flush_pos_t *flush_pos); //was reiser4_alloc_extent + /** + * allocale blocks for unformatted nodes in squeeze_right_twig(). + * @coord is set to extent unit + */ + squeeze_result (*squeeze_alloc_unformatted)(znode *left, + const coord_t *coord, + flush_pos_t *flush_pos, + reiser4_key *stop_key); // was_squalloc_extent +} txmod_plugin; + +typedef struct hash_plugin { + /* generic fields */ + plugin_header h; + /* computes hash of the given name */ + __u64(*hash) (const unsigned char *name, int len); +} hash_plugin; + +typedef struct cipher_plugin { + /* generic fields */ + plugin_header h; + struct crypto_blkcipher * (*alloc) (void); + void (*free) (struct crypto_blkcipher *tfm); + /* Offset translator. For each offset this returns (k * offset), where + k (k >= 1) is an expansion factor of the cipher algorithm. + For all symmetric algorithms k == 1. For asymmetric algorithms (which + inflate data) offset translation guarantees that all disk cluster's + units will have keys smaller then next cluster's one. + */ + loff_t(*scale) (struct inode *inode, size_t blocksize, loff_t src); + /* Cipher algorithms can accept data only by chunks of cipher block + size. This method is to align any flow up to cipher block size when + we pass it to cipher algorithm. To align means to append padding of + special format specific to the cipher algorithm */ + int (*align_stream) (__u8 *tail, int clust_size, int blocksize); + /* low-level key manager (check, install, etc..) */ + int (*setkey) (struct crypto_tfm *tfm, const __u8 *key, + unsigned int keylen); + /* main text processing procedures */ + void (*encrypt) (__u32 *expkey, __u8 *dst, const __u8 *src); + void (*decrypt) (__u32 *expkey, __u8 *dst, const __u8 *src); +} cipher_plugin; + +typedef struct digest_plugin { + /* generic fields */ + plugin_header h; + /* fingerprint size in bytes */ + int fipsize; + struct crypto_hash * (*alloc) (void); + void (*free) (struct crypto_hash *tfm); +} digest_plugin; + +typedef struct compression_plugin { + /* generic fields */ + plugin_header h; + int (*init) (void); + /* the maximum number of bytes the size of the "compressed" data can + * exceed the uncompressed data. */ + int (*overrun) (unsigned src_len); + coa_t(*alloc) (tfm_action act); + void (*free) (coa_t coa, tfm_action act); + /* minimal size of the flow we still try to compress */ + int (*min_size_deflate) (void); + __u32(*checksum) (char *data, __u32 length); + /* main transform procedures */ + void (*compress) (coa_t coa, __u8 *src_first, size_t src_len, + __u8 *dst_first, size_t *dst_len); + void (*decompress) (coa_t coa, __u8 *src_first, size_t src_len, + __u8 *dst_first, size_t *dst_len); +} compression_plugin; + +typedef struct compression_mode_plugin { + /* generic fields */ + plugin_header h; + /* this is called when estimating compressibility + of a logical cluster by its content */ + int (*should_deflate) (struct inode *inode, cloff_t index); + /* this is called when results of compression should be saved */ + int (*accept_hook) (struct inode *inode, cloff_t index); + /* this is called when results of compression should be discarded */ + int (*discard_hook) (struct inode *inode, cloff_t index); +} compression_mode_plugin; + +typedef struct cluster_plugin { + /* generic fields */ + plugin_header h; + int shift; +} cluster_plugin; + +typedef struct sd_ext_plugin { + /* generic fields */ + plugin_header h; + int (*present) (struct inode *inode, char **area, int *len); + int (*absent) (struct inode *inode); + int (*save_len) (struct inode *inode); + int (*save) (struct inode *inode, char **area); + /* alignment requirement for this stat-data part */ + int alignment; +} sd_ext_plugin; + +/* this plugin contains methods to allocate objectid for newly created files, + to deallocate objectid when file gets removed, to report number of used and + free objectids */ +typedef struct oid_allocator_plugin { + /* generic fields */ + plugin_header h; + int (*init_oid_allocator) (reiser4_oid_allocator * map, __u64 nr_files, + __u64 oids); + /* used to report statfs->f_files */ + __u64(*oids_used) (reiser4_oid_allocator * map); + /* get next oid to use */ + __u64(*next_oid) (reiser4_oid_allocator * map); + /* used to report statfs->f_ffree */ + __u64(*oids_free) (reiser4_oid_allocator * map); + /* allocate new objectid */ + int (*allocate_oid) (reiser4_oid_allocator * map, oid_t *); + /* release objectid */ + int (*release_oid) (reiser4_oid_allocator * map, oid_t); + /* how many pages to reserve in transaction for allocation of new + objectid */ + int (*oid_reserve_allocate) (reiser4_oid_allocator * map); + /* how many pages to reserve in transaction for freeing of an + objectid */ + int (*oid_reserve_release) (reiser4_oid_allocator * map); + void (*print_info) (const char *, reiser4_oid_allocator *); +} oid_allocator_plugin; + +/* disk layout plugin: this specifies super block, journal, bitmap (if there + are any) locations, etc */ +typedef struct disk_format_plugin { + /* generic fields */ + plugin_header h; + /* replay journal, initialize super_info_data, etc */ + int (*init_format) (struct super_block *, void *data); + + /* key of root directory stat data */ + const reiser4_key * (*root_dir_key) (const struct super_block *); + + int (*release) (struct super_block *); + jnode * (*log_super) (struct super_block *); + int (*check_open) (const struct inode *object); + int (*version_update) (struct super_block *); +} disk_format_plugin; + +struct jnode_plugin { + /* generic fields */ + plugin_header h; + int (*init) (jnode * node); + int (*parse) (jnode * node); + struct address_space *(*mapping) (const jnode * node); + unsigned long (*index) (const jnode * node); + jnode * (*clone) (jnode * node); +}; + +/* plugin instance. */ +/* */ +/* This is "wrapper" union for all types of plugins. Most of the code uses */ +/* plugins of particular type (file_plugin, dir_plugin, etc.) rather than */ +/* operates with pointers to reiser4_plugin. This union is only used in */ +/* some generic code in plugin/plugin.c that operates on all */ +/* plugins. Technically speaking purpose of this union is to add type */ +/* safety to said generic code: each plugin type (file_plugin, for */ +/* example), contains plugin_header as its first memeber. This first member */ +/* is located at the same place in memory as .h member of */ +/* reiser4_plugin. Generic code, obtains pointer to reiser4_plugin and */ +/* looks in the .h which is header of plugin type located in union. This */ +/* allows to avoid type-casts. */ +union reiser4_plugin { + /* generic fields */ + plugin_header h; + /* file plugin */ + file_plugin file; + /* directory plugin */ + dir_plugin dir; + /* hash plugin, used by directory plugin */ + hash_plugin hash; + /* fibration plugin used by directory plugin */ + fibration_plugin fibration; + /* cipher transform plugin, used by file plugin */ + cipher_plugin cipher; + /* digest transform plugin, used by file plugin */ + digest_plugin digest; + /* compression transform plugin, used by file plugin */ + compression_plugin compression; + /* tail plugin, used by file plugin */ + formatting_plugin formatting; + /* permission plugin */ + perm_plugin perm; + /* node plugin */ + node_plugin node; + /* item plugin */ + item_plugin item; + /* stat-data extension plugin */ + sd_ext_plugin sd_ext; + /* disk layout plugin */ + disk_format_plugin format; + /* object id allocator plugin */ + oid_allocator_plugin oid_allocator; + /* plugin for different jnode types */ + jnode_plugin jnode; + /* compression mode plugin, used by object plugin */ + compression_mode_plugin compression_mode; + /* cluster plugin, used by object plugin */ + cluster_plugin clust; + /* transaction mode plugin */ + txmod_plugin txmod; + /* place-holder for new plugin types that can be registered + dynamically, and used by other dynamically loaded plugins. */ + void *generic; +}; + +struct reiser4_plugin_ops { + /* called when plugin is initialized */ + int (*init) (reiser4_plugin * plugin); + /* called when plugin is unloaded */ + int (*done) (reiser4_plugin * plugin); + /* load given plugin from disk */ + int (*load) (struct inode *inode, + reiser4_plugin * plugin, char **area, int *len); + /* how many space is required to store this plugin's state + in stat-data */ + int (*save_len) (struct inode *inode, reiser4_plugin * plugin); + /* save persistent plugin-data to disk */ + int (*save) (struct inode *inode, reiser4_plugin * plugin, + char **area); + /* alignment requirement for on-disk state of this plugin + in number of bytes */ + int alignment; + /* install itself into given inode. This can return error + (e.g., you cannot change hash of non-empty directory). */ + int (*change) (struct inode *inode, reiser4_plugin * plugin, + pset_member memb); + /* install itself into given inode. This can return error + (e.g., you cannot change hash of non-empty directory). */ + int (*inherit) (struct inode *inode, struct inode *parent, + reiser4_plugin * plugin); +}; + +/* functions implemented in fs/reiser4/plugin/plugin.c */ + +/* stores plugin reference in reiser4-specific part of inode */ +extern int set_object_plugin(struct inode *inode, reiser4_plugin_id id); +extern int init_plugins(void); + +/* builtin plugins */ + +/* builtin hash-plugins */ + +typedef enum { + RUPASOV_HASH_ID, + R5_HASH_ID, + TEA_HASH_ID, + FNV1_HASH_ID, + DEGENERATE_HASH_ID, + LAST_HASH_ID +} reiser4_hash_id; + +/* builtin cipher plugins */ + +typedef enum { + NONE_CIPHER_ID, + LAST_CIPHER_ID +} reiser4_cipher_id; + +/* builtin digest plugins */ + +typedef enum { + SHA256_32_DIGEST_ID, + LAST_DIGEST_ID +} reiser4_digest_id; + +/* builtin compression mode plugins */ +typedef enum { + NONE_COMPRESSION_MODE_ID, + LATTD_COMPRESSION_MODE_ID, + ULTIM_COMPRESSION_MODE_ID, + FORCE_COMPRESSION_MODE_ID, + CONVX_COMPRESSION_MODE_ID, + LAST_COMPRESSION_MODE_ID +} reiser4_compression_mode_id; + +/* builtin cluster plugins */ +typedef enum { + CLUSTER_64K_ID, + CLUSTER_32K_ID, + CLUSTER_16K_ID, + CLUSTER_8K_ID, + CLUSTER_4K_ID, + LAST_CLUSTER_ID +} reiser4_cluster_id; + +/* builtin tail packing policies */ +typedef enum { + NEVER_TAILS_FORMATTING_ID, + ALWAYS_TAILS_FORMATTING_ID, + SMALL_FILE_FORMATTING_ID, + LAST_TAIL_FORMATTING_ID +} reiser4_formatting_id; + +/* builtin transaction models */ +typedef enum { + HYBRID_TXMOD_ID, + JOURNAL_TXMOD_ID, + WA_TXMOD_ID, + LAST_TXMOD_ID +} reiser4_txmod_id; + + +/* data type used to pack parameters that we pass to vfs object creation + function create_object() */ +struct reiser4_object_create_data { + /* plugin to control created object */ + reiser4_file_id id; + /* mode of regular file, directory or special file */ +/* what happens if some other sort of perm plugin is in use? */ + umode_t mode; + /* rdev of special file */ + dev_t rdev; + /* symlink target */ + const char *name; + /* add here something for non-standard objects you invent, like + query for interpolation file etc. */ + + struct reiser4_crypto_info *crypto; + + struct inode *parent; + struct dentry *dentry; +}; + +/* description of directory entry being created/destroyed/sought for + + It is passed down to the directory plugin and farther to the + directory item plugin methods. Creation of new directory is done in + several stages: first we search for an entry with the same name, then + create new one. reiser4_dir_entry_desc is used to store some information + collected at some stage of this process and required later: key of + item that we want to insert/delete and pointer to an object that will + be bound by the new directory entry. Probably some more fields will + be added there. + +*/ +struct reiser4_dir_entry_desc { + /* key of directory entry */ + reiser4_key key; + /* object bound by this entry. */ + struct inode *obj; +}; + +#define MAX_PLUGIN_TYPE_LABEL_LEN 32 +#define MAX_PLUGIN_PLUG_LABEL_LEN 32 + +#define PLUGIN_BY_ID(TYPE, ID, FIELD) \ +static inline TYPE *TYPE ## _by_id(reiser4_plugin_id id) \ +{ \ + reiser4_plugin *plugin = plugin_by_id(ID, id); \ + return plugin ? &plugin->FIELD : NULL; \ +} \ +static inline TYPE *TYPE ## _by_disk_id(reiser4_tree * tree, d16 *id) \ +{ \ + reiser4_plugin *plugin = plugin_by_disk_id(tree, ID, id); \ + return plugin ? &plugin->FIELD : NULL; \ +} \ +static inline TYPE *TYPE ## _by_unsafe_id(reiser4_plugin_id id) \ +{ \ + reiser4_plugin *plugin = plugin_by_unsafe_id(ID, id); \ + return plugin ? &plugin->FIELD : NULL; \ +} \ +static inline reiser4_plugin* TYPE ## _to_plugin(TYPE* plugin) \ +{ \ + return (reiser4_plugin *) plugin; \ +} \ +static inline reiser4_plugin_id TYPE ## _id(TYPE* plugin) \ +{ \ + return TYPE ## _to_plugin(plugin)->h.id; \ +} \ +typedef struct { int foo; } TYPE ## _plugin_dummy + +static inline int get_release_number_major(void) +{ + return LAST_FORMAT_ID - 1; +} + +static inline int get_release_number_minor(void) +{ + return PLUGIN_LIBRARY_VERSION; +} + +PLUGIN_BY_ID(item_plugin, REISER4_ITEM_PLUGIN_TYPE, item); +PLUGIN_BY_ID(file_plugin, REISER4_FILE_PLUGIN_TYPE, file); +PLUGIN_BY_ID(dir_plugin, REISER4_DIR_PLUGIN_TYPE, dir); +PLUGIN_BY_ID(node_plugin, REISER4_NODE_PLUGIN_TYPE, node); +PLUGIN_BY_ID(sd_ext_plugin, REISER4_SD_EXT_PLUGIN_TYPE, sd_ext); +PLUGIN_BY_ID(perm_plugin, REISER4_PERM_PLUGIN_TYPE, perm); +PLUGIN_BY_ID(hash_plugin, REISER4_HASH_PLUGIN_TYPE, hash); +PLUGIN_BY_ID(fibration_plugin, REISER4_FIBRATION_PLUGIN_TYPE, fibration); +PLUGIN_BY_ID(cipher_plugin, REISER4_CIPHER_PLUGIN_TYPE, cipher); +PLUGIN_BY_ID(digest_plugin, REISER4_DIGEST_PLUGIN_TYPE, digest); +PLUGIN_BY_ID(compression_plugin, REISER4_COMPRESSION_PLUGIN_TYPE, compression); +PLUGIN_BY_ID(formatting_plugin, REISER4_FORMATTING_PLUGIN_TYPE, formatting); +PLUGIN_BY_ID(disk_format_plugin, REISER4_FORMAT_PLUGIN_TYPE, format); +PLUGIN_BY_ID(jnode_plugin, REISER4_JNODE_PLUGIN_TYPE, jnode); +PLUGIN_BY_ID(compression_mode_plugin, REISER4_COMPRESSION_MODE_PLUGIN_TYPE, + compression_mode); +PLUGIN_BY_ID(cluster_plugin, REISER4_CLUSTER_PLUGIN_TYPE, clust); +PLUGIN_BY_ID(txmod_plugin, REISER4_TXMOD_PLUGIN_TYPE, txmod); + +extern int save_plugin_id(reiser4_plugin * plugin, d16 * area); + +extern struct list_head *get_plugin_list(reiser4_plugin_type type_id); + +#define for_all_plugins(ptype, plugin) \ +for (plugin = list_entry(get_plugin_list(ptype)->next, reiser4_plugin, h.linkage); \ + get_plugin_list(ptype) != &plugin->h.linkage; \ + plugin = list_entry(plugin->h.linkage.next, reiser4_plugin, h.linkage)) + + +extern int grab_plugin_pset(struct inode *self, struct inode *ancestor, + pset_member memb); +extern int force_plugin_pset(struct inode *self, pset_member memb, + reiser4_plugin *plug); +extern int finish_pset(struct inode *inode); + +/* defined in fs/reiser4/plugin/object.c */ +extern file_plugin file_plugins[LAST_FILE_PLUGIN_ID]; +/* defined in fs/reiser4/plugin/object.c */ +extern dir_plugin dir_plugins[LAST_DIR_ID]; +/* defined in fs/reiser4/plugin/item/static_stat.c */ +extern sd_ext_plugin sd_ext_plugins[LAST_SD_EXTENSION]; +/* defined in fs/reiser4/plugin/hash.c */ +extern hash_plugin hash_plugins[LAST_HASH_ID]; +/* defined in fs/reiser4/plugin/fibration.c */ +extern fibration_plugin fibration_plugins[LAST_FIBRATION_ID]; +/* defined in fs/reiser4/plugin/txmod.c */ +extern txmod_plugin txmod_plugins[LAST_TXMOD_ID]; +/* defined in fs/reiser4/plugin/crypt.c */ +extern cipher_plugin cipher_plugins[LAST_CIPHER_ID]; +/* defined in fs/reiser4/plugin/digest.c */ +extern digest_plugin digest_plugins[LAST_DIGEST_ID]; +/* defined in fs/reiser4/plugin/compress/compress.c */ +extern compression_plugin compression_plugins[LAST_COMPRESSION_ID]; +/* defined in fs/reiser4/plugin/compress/compression_mode.c */ +extern compression_mode_plugin +compression_mode_plugins[LAST_COMPRESSION_MODE_ID]; +/* defined in fs/reiser4/plugin/cluster.c */ +extern cluster_plugin cluster_plugins[LAST_CLUSTER_ID]; +/* defined in fs/reiser4/plugin/tail.c */ +extern formatting_plugin formatting_plugins[LAST_TAIL_FORMATTING_ID]; +/* defined in fs/reiser4/plugin/security/security.c */ +extern perm_plugin perm_plugins[LAST_PERM_ID]; +/* defined in fs/reiser4/plugin/item/item.c */ +extern item_plugin item_plugins[LAST_ITEM_ID]; +/* defined in fs/reiser4/plugin/node/node.c */ +extern node_plugin node_plugins[LAST_NODE_ID]; +/* defined in fs/reiser4/plugin/disk_format/disk_format.c */ +extern disk_format_plugin format_plugins[LAST_FORMAT_ID]; + +/* __FS_REISER4_PLUGIN_TYPES_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/plugin_header.h b/fs/reiser4/plugin/plugin_header.h new file mode 100644 index 000000000000..5ee74af48bff --- /dev/null +++ b/fs/reiser4/plugin/plugin_header.h @@ -0,0 +1,150 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* plugin header. Data structures required by all plugin types. */ + +#if !defined(__PLUGIN_HEADER_H__) +#define __PLUGIN_HEADER_H__ + +/* plugin data-types and constants */ + +#include "../debug.h" +#include "../dformat.h" + +/* The list of Reiser4 interfaces */ +typedef enum { + REISER4_FILE_PLUGIN_TYPE, /* manage VFS objects */ + REISER4_DIR_PLUGIN_TYPE, /* manage directories */ + REISER4_ITEM_PLUGIN_TYPE, /* manage items */ + REISER4_NODE_PLUGIN_TYPE, /* manage formatted nodes */ + REISER4_HASH_PLUGIN_TYPE, /* hash methods */ + REISER4_FIBRATION_PLUGIN_TYPE, /* directory fibrations */ + REISER4_FORMATTING_PLUGIN_TYPE, /* dispatching policy */ + REISER4_PERM_PLUGIN_TYPE, /* stub (vacancy) */ + REISER4_SD_EXT_PLUGIN_TYPE, /* manage stat-data extensions */ + REISER4_FORMAT_PLUGIN_TYPE, /* disk format specifications */ + REISER4_JNODE_PLUGIN_TYPE, /* manage in-memory headers */ + REISER4_CIPHER_PLUGIN_TYPE, /* cipher transform methods */ + REISER4_DIGEST_PLUGIN_TYPE, /* digest transform methods */ + REISER4_COMPRESSION_PLUGIN_TYPE, /* compression methods */ + REISER4_COMPRESSION_MODE_PLUGIN_TYPE, /* dispatching policies */ + REISER4_CLUSTER_PLUGIN_TYPE, /* manage logical clusters */ + REISER4_TXMOD_PLUGIN_TYPE, /* transaction models */ + REISER4_PLUGIN_TYPES +} reiser4_plugin_type; + +/* Supported plugin groups */ +typedef enum { + REISER4_DIRECTORY_FILE, + REISER4_REGULAR_FILE, + REISER4_SYMLINK_FILE, + REISER4_SPECIAL_FILE, +} file_plugin_group; + +struct reiser4_plugin_ops; +/* generic plugin operations, supported by each + plugin type. */ +typedef struct reiser4_plugin_ops reiser4_plugin_ops; + +/* the common part of all plugin instances. */ +typedef struct plugin_header { + /* plugin type */ + reiser4_plugin_type type_id; + /* id of this plugin */ + reiser4_plugin_id id; + /* bitmask of groups the plugin belongs to. */ + reiser4_plugin_groups groups; + /* plugin operations */ + reiser4_plugin_ops *pops; +/* NIKITA-FIXME-HANS: usage of and access to label and desc is not commented and + * defined. */ + /* short label of this plugin */ + const char *label; + /* descriptive string.. */ + const char *desc; + /* list linkage */ + struct list_head linkage; +} plugin_header; + +#define plugin_of_group(plug, group) (plug->h.groups & (1 << group)) + +/* PRIVATE INTERFACES */ +/* NIKITA-FIXME-HANS: what is this for and why does it duplicate what is in + * plugin_header? */ +/* plugin type representation. */ +struct reiser4_plugin_type_data { + /* internal plugin type identifier. Should coincide with + index of this item in plugins[] array. */ + reiser4_plugin_type type_id; + /* short symbolic label of this plugin type. Should be no longer + than MAX_PLUGIN_TYPE_LABEL_LEN characters including '\0'. */ + const char *label; + /* plugin type description longer than .label */ + const char *desc; + +/* NIKITA-FIXME-HANS: define built-in */ + /* number of built-in plugin instances of this type */ + int builtin_num; + /* array of built-in plugins */ + void *builtin; + struct list_head plugins_list; + size_t size; +}; + +extern struct reiser4_plugin_type_data plugins[REISER4_PLUGIN_TYPES]; + +int is_plugin_type_valid(reiser4_plugin_type type); +int is_plugin_id_valid(reiser4_plugin_type type, reiser4_plugin_id id); + +static inline reiser4_plugin *plugin_at(struct reiser4_plugin_type_data *ptype, + int i) +{ + char *builtin; + + builtin = ptype->builtin; + return (reiser4_plugin *) (builtin + i * ptype->size); +} + +/* return plugin by its @type_id and @id */ +static inline reiser4_plugin *plugin_by_id(reiser4_plugin_type type, + reiser4_plugin_id id) +{ + assert("nikita-1651", is_plugin_type_valid(type)); + assert("nikita-1652", is_plugin_id_valid(type, id)); + return plugin_at(&plugins[type], id); +} + +extern reiser4_plugin *plugin_by_unsafe_id(reiser4_plugin_type type_id, + reiser4_plugin_id id); + +/** + * plugin_by_disk_id - get reiser4_plugin + * @type_id: plugin type id + * @did: plugin id in disk format + * + * Returns reiser4_plugin by plugin type id an dplugin_id. + */ +static inline reiser4_plugin *plugin_by_disk_id(reiser4_tree * tree UNUSED_ARG, + reiser4_plugin_type type_id, + __le16 *plugin_id) +{ + /* + * what we should do properly is to maintain within each file-system a + * dictionary that maps on-disk plugin ids to "universal" ids. This + * dictionary will be resolved on mount time, so that this function + * will perform just one additional array lookup. + */ + return plugin_by_unsafe_id(type_id, le16_to_cpu(*plugin_id)); +} + +/* __PLUGIN_HEADER_H__ */ +#endif + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/plugin/plugin_set.c b/fs/reiser4/plugin/plugin_set.c new file mode 100644 index 000000000000..cae7a295515d --- /dev/null +++ b/fs/reiser4/plugin/plugin_set.c @@ -0,0 +1,387 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ +/* This file contains Reiser4 plugin set operations */ + +/* plugin sets + * + * Each file in reiser4 is controlled by a whole set of plugins (file plugin, + * directory plugin, hash plugin, tail policy plugin, security plugin, etc.) + * assigned (inherited, deduced from mode bits, etc.) at creation time. This + * set of plugins (so called pset) is described by structure plugin_set (see + * plugin/plugin_set.h), which contains pointers to all required plugins. + * + * Children can inherit some pset members from their parent, however sometimes + * it is useful to specify members different from parent ones. Since object's + * pset can not be easily changed without fatal consequences, we use for this + * purpose another special plugin table (so called hset, or heir set) described + * by the same structure. + * + * Inode only stores a pointers to pset and hset. Different inodes with the + * same set of pset (hset) members point to the same pset (hset). This is + * archived by storing psets and hsets in global hash table. Races are avoided + * by simple (and efficient so far) solution of never recycling psets, even + * when last inode pointing to it is destroyed. + */ + +#include "../debug.h" +#include "../super.h" +#include "plugin_set.h" + +#include <linux/slab.h> +#include <linux/stddef.h> + +/* slab for plugin sets */ +static struct kmem_cache *plugin_set_slab; + +static spinlock_t plugin_set_lock[8] __cacheline_aligned_in_smp = { + __SPIN_LOCK_UNLOCKED(plugin_set_lock[0]), + __SPIN_LOCK_UNLOCKED(plugin_set_lock[1]), + __SPIN_LOCK_UNLOCKED(plugin_set_lock[2]), + __SPIN_LOCK_UNLOCKED(plugin_set_lock[3]), + __SPIN_LOCK_UNLOCKED(plugin_set_lock[4]), + __SPIN_LOCK_UNLOCKED(plugin_set_lock[5]), + __SPIN_LOCK_UNLOCKED(plugin_set_lock[6]), + __SPIN_LOCK_UNLOCKED(plugin_set_lock[7]) +}; + +/* hash table support */ + +#define PS_TABLE_SIZE (32) + +static inline plugin_set *cast_to(const unsigned long *a) +{ + return container_of(a, plugin_set, hashval); +} + +static inline int pseq(const unsigned long *a1, const unsigned long *a2) +{ + plugin_set *set1; + plugin_set *set2; + + /* make sure fields are not missed in the code below */ + cassert(sizeof *set1 == + sizeof set1->hashval + + sizeof set1->link + + sizeof set1->file + + sizeof set1->dir + + sizeof set1->perm + + sizeof set1->formatting + + sizeof set1->hash + + sizeof set1->fibration + + sizeof set1->sd + + sizeof set1->dir_item + + sizeof set1->cipher + + sizeof set1->digest + + sizeof set1->compression + + sizeof set1->compression_mode + + sizeof set1->cluster + + sizeof set1->create); + + set1 = cast_to(a1); + set2 = cast_to(a2); + return + set1->hashval == set2->hashval && + set1->file == set2->file && + set1->dir == set2->dir && + set1->perm == set2->perm && + set1->formatting == set2->formatting && + set1->hash == set2->hash && + set1->fibration == set2->fibration && + set1->sd == set2->sd && + set1->dir_item == set2->dir_item && + set1->cipher == set2->cipher && + set1->digest == set2->digest && + set1->compression == set2->compression && + set1->compression_mode == set2->compression_mode && + set1->cluster == set2->cluster && + set1->create == set2->create; +} + +#define HASH_FIELD(hash, set, field) \ +({ \ + (hash) += (unsigned long)(set)->field >> 2; \ +}) + +static inline unsigned long calculate_hash(const plugin_set * set) +{ + unsigned long result; + + result = 0; + HASH_FIELD(result, set, file); + HASH_FIELD(result, set, dir); + HASH_FIELD(result, set, perm); + HASH_FIELD(result, set, formatting); + HASH_FIELD(result, set, hash); + HASH_FIELD(result, set, fibration); + HASH_FIELD(result, set, sd); + HASH_FIELD(result, set, dir_item); + HASH_FIELD(result, set, cipher); + HASH_FIELD(result, set, digest); + HASH_FIELD(result, set, compression); + HASH_FIELD(result, set, compression_mode); + HASH_FIELD(result, set, cluster); + HASH_FIELD(result, set, create); + return result & (PS_TABLE_SIZE - 1); +} + +static inline unsigned long +pshash(ps_hash_table * table, const unsigned long *a) +{ + return *a; +} + +/* The hash table definition */ +#define KMALLOC(size) kmalloc((size), reiser4_ctx_gfp_mask_get()) +#define KFREE(ptr, size) kfree(ptr) +TYPE_SAFE_HASH_DEFINE(ps, plugin_set, unsigned long, hashval, link, pshash, + pseq); +#undef KFREE +#undef KMALLOC + +static ps_hash_table ps_table; +static plugin_set empty_set = { + .hashval = 0, + .file = NULL, + .dir = NULL, + .perm = NULL, + .formatting = NULL, + .hash = NULL, + .fibration = NULL, + .sd = NULL, + .dir_item = NULL, + .cipher = NULL, + .digest = NULL, + .compression = NULL, + .compression_mode = NULL, + .cluster = NULL, + .create = NULL, + .link = {NULL} +}; + +plugin_set *plugin_set_get_empty(void) +{ + return &empty_set; +} + +void plugin_set_put(plugin_set * set) +{ +} + +static inline unsigned long *pset_field(plugin_set * set, int offset) +{ + return (unsigned long *)(((char *)set) + offset); +} + +static int plugin_set_field(plugin_set ** set, const unsigned long val, + const int offset) +{ + unsigned long *spot; + spinlock_t *lock; + plugin_set replica; + plugin_set *twin; + plugin_set *psal; + plugin_set *orig; + + assert("nikita-2902", set != NULL); + assert("nikita-2904", *set != NULL); + + spot = pset_field(*set, offset); + if (unlikely(*spot == val)) + return 0; + + replica = *(orig = *set); + *pset_field(&replica, offset) = val; + replica.hashval = calculate_hash(&replica); + rcu_read_lock(); + twin = ps_hash_find(&ps_table, &replica.hashval); + if (unlikely(twin == NULL)) { + rcu_read_unlock(); + psal = kmem_cache_alloc(plugin_set_slab, + reiser4_ctx_gfp_mask_get()); + if (psal == NULL) + return RETERR(-ENOMEM); + *psal = replica; + lock = &plugin_set_lock[replica.hashval & 7]; + spin_lock(lock); + twin = ps_hash_find(&ps_table, &replica.hashval); + if (likely(twin == NULL)) { + *set = psal; + ps_hash_insert_rcu(&ps_table, psal); + } else { + *set = twin; + kmem_cache_free(plugin_set_slab, psal); + } + spin_unlock(lock); + } else { + rcu_read_unlock(); + *set = twin; + } + return 0; +} + +static struct { + int offset; + reiser4_plugin_groups groups; + reiser4_plugin_type type; +} pset_descr[PSET_LAST] = { + [PSET_FILE] = { + .offset = offsetof(plugin_set, file), + .type = REISER4_FILE_PLUGIN_TYPE, + .groups = 0 + }, + [PSET_DIR] = { + .offset = offsetof(plugin_set, dir), + .type = REISER4_DIR_PLUGIN_TYPE, + .groups = 0 + }, + [PSET_PERM] = { + .offset = offsetof(plugin_set, perm), + .type = REISER4_PERM_PLUGIN_TYPE, + .groups = 0 + }, + [PSET_FORMATTING] = { + .offset = offsetof(plugin_set, formatting), + .type = REISER4_FORMATTING_PLUGIN_TYPE, + .groups = 0 + }, + [PSET_HASH] = { + .offset = offsetof(plugin_set, hash), + .type = REISER4_HASH_PLUGIN_TYPE, + .groups = 0 + }, + [PSET_FIBRATION] = { + .offset = offsetof(plugin_set, fibration), + .type = REISER4_FIBRATION_PLUGIN_TYPE, + .groups = 0 + }, + [PSET_SD] = { + .offset = offsetof(plugin_set, sd), + .type = REISER4_ITEM_PLUGIN_TYPE, + .groups = (1 << STAT_DATA_ITEM_TYPE) + }, + [PSET_DIR_ITEM] = { + .offset = offsetof(plugin_set, dir_item), + .type = REISER4_ITEM_PLUGIN_TYPE, + .groups = (1 << DIR_ENTRY_ITEM_TYPE) + }, + [PSET_CIPHER] = { + .offset = offsetof(plugin_set, cipher), + .type = REISER4_CIPHER_PLUGIN_TYPE, + .groups = 0 + }, + [PSET_DIGEST] = { + .offset = offsetof(plugin_set, digest), + .type = REISER4_DIGEST_PLUGIN_TYPE, + .groups = 0 + }, + [PSET_COMPRESSION] = { + .offset = offsetof(plugin_set, compression), + .type = REISER4_COMPRESSION_PLUGIN_TYPE, + .groups = 0 + }, + [PSET_COMPRESSION_MODE] = { + .offset = offsetof(plugin_set, compression_mode), + .type = REISER4_COMPRESSION_MODE_PLUGIN_TYPE, + .groups = 0 + }, + [PSET_CLUSTER] = { + .offset = offsetof(plugin_set, cluster), + .type = REISER4_CLUSTER_PLUGIN_TYPE, + .groups = 0 + }, + [PSET_CREATE] = { + .offset = offsetof(plugin_set, create), + .type = REISER4_FILE_PLUGIN_TYPE, + .groups = (1 << REISER4_REGULAR_FILE) + } +}; + +#define DEFINE_PSET_OPS(PREFIX) \ + reiser4_plugin_type PREFIX##_member_to_type_unsafe(pset_member memb) \ +{ \ + if (memb > PSET_LAST) \ + return REISER4_PLUGIN_TYPES; \ + return pset_descr[memb].type; \ +} \ + \ +int PREFIX##_set_unsafe(plugin_set ** set, pset_member memb, \ + reiser4_plugin * plugin) \ +{ \ + assert("nikita-3492", set != NULL); \ + assert("nikita-3493", *set != NULL); \ + assert("nikita-3494", plugin != NULL); \ + assert("nikita-3495", 0 <= memb && memb < PSET_LAST); \ + assert("nikita-3496", plugin->h.type_id == pset_descr[memb].type); \ + \ + if (pset_descr[memb].groups) \ + if (!(pset_descr[memb].groups & plugin->h.groups)) \ + return -EINVAL; \ + \ + return plugin_set_field(set, \ + (unsigned long)plugin, pset_descr[memb].offset); \ +} \ + \ +reiser4_plugin *PREFIX##_get(plugin_set * set, pset_member memb) \ +{ \ + assert("nikita-3497", set != NULL); \ + assert("nikita-3498", 0 <= memb && memb < PSET_LAST); \ + \ + return *(reiser4_plugin **) (((char *)set) + pset_descr[memb].offset); \ +} + +DEFINE_PSET_OPS(aset); + +int set_plugin(plugin_set ** set, pset_member memb, reiser4_plugin * plugin) +{ + return plugin_set_field(set, + (unsigned long)plugin, pset_descr[memb].offset); +} + +/** + * init_plugin_set - create plugin set cache and hash table + * + * Initializes slab cache of plugin_set-s and their hash table. It is part of + * reiser4 module initialization. + */ +int init_plugin_set(void) +{ + int result; + + result = ps_hash_init(&ps_table, PS_TABLE_SIZE); + if (result == 0) { + plugin_set_slab = kmem_cache_create("plugin_set", + sizeof(plugin_set), 0, + SLAB_HWCACHE_ALIGN, + NULL); + if (plugin_set_slab == NULL) + result = RETERR(-ENOMEM); + } + return result; +} + +/** + * done_plugin_set - delete plugin_set cache and plugin_set hash table + * + * This is called on reiser4 module unloading or system shutdown. + */ +void done_plugin_set(void) +{ + plugin_set *cur, *next; + + for_all_in_htable(&ps_table, ps, cur, next) { + ps_hash_remove(&ps_table, cur); + kmem_cache_free(plugin_set_slab, cur); + } + destroy_reiser4_cache(&plugin_set_slab); + ps_hash_done(&ps_table); +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 120 + * End: + */ diff --git a/fs/reiser4/plugin/plugin_set.h b/fs/reiser4/plugin/plugin_set.h new file mode 100644 index 000000000000..5afb61a3d9ab --- /dev/null +++ b/fs/reiser4/plugin/plugin_set.h @@ -0,0 +1,78 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Reiser4 plugin set definition. + See fs/reiser4/plugin/plugin_set.c for details */ + +#if !defined(__PLUGIN_SET_H__) +#define __PLUGIN_SET_H__ + +#include "../type_safe_hash.h" +#include "plugin.h" + +#include <linux/rcupdate.h> + +struct plugin_set; +typedef struct plugin_set plugin_set; + +TYPE_SAFE_HASH_DECLARE(ps, plugin_set); + +struct plugin_set { + unsigned long hashval; + /* plugin of file */ + file_plugin *file; + /* plugin of dir */ + dir_plugin *dir; + /* perm plugin for this file */ + perm_plugin *perm; + /* tail policy plugin. Only meaningful for regular files */ + formatting_plugin *formatting; + /* hash plugin. Only meaningful for directories. */ + hash_plugin *hash; + /* fibration plugin. Only meaningful for directories. */ + fibration_plugin *fibration; + /* plugin of stat-data */ + item_plugin *sd; + /* plugin of items a directory is built of */ + item_plugin *dir_item; + /* cipher plugin */ + cipher_plugin *cipher; + /* digest plugin */ + digest_plugin *digest; + /* compression plugin */ + compression_plugin *compression; + /* compression mode plugin */ + compression_mode_plugin *compression_mode; + /* cluster plugin */ + cluster_plugin *cluster; + /* this specifies file plugin of regular children. + only meaningful for directories */ + file_plugin *create; + ps_hash_link link; +}; + +extern plugin_set *plugin_set_get_empty(void); +extern void plugin_set_put(plugin_set * set); + +extern int init_plugin_set(void); +extern void done_plugin_set(void); + +extern reiser4_plugin *aset_get(plugin_set * set, pset_member memb); +extern int set_plugin(plugin_set ** set, pset_member memb, + reiser4_plugin * plugin); +extern int aset_set_unsafe(plugin_set ** set, pset_member memb, + reiser4_plugin * plugin); +extern reiser4_plugin_type aset_member_to_type_unsafe(pset_member memb); + +/* __PLUGIN_SET_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/regular.c b/fs/reiser4/plugin/regular.c new file mode 100644 index 000000000000..9918e9563e3f --- /dev/null +++ b/fs/reiser4/plugin/regular.c @@ -0,0 +1,44 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Contains Reiser4 regular plugins which: + . specify a set of reiser4 regular object plugins, + . used by directory plugin to create entries powered by specified + regular plugins */ + +#include "plugin.h" + +regular_plugin regular_plugins[LAST_REGULAR_ID] = { + [UF_REGULAR_ID] = { + .h = { + .type_id = REISER4_REGULAR_PLUGIN_TYPE, + .id = UF_REGULAR_ID, + .pops = NULL, + .label = "unixfile", + .desc = "Unix file regular plugin", + .linkage = {NULL, NULL} + }, + .id = UNIX_FILE_PLUGIN_ID + }, + [CRC_REGULAR_ID] = { + .h = { + .type_id = REISER4_REGULAR_PLUGIN_TYPE, + .id = CRC_REGULAR_ID, + .pops = NULL, + .label = "cryptcompress", + .desc = "Cryptcompress regular plugin", + .linkage = {NULL, NULL} + }, + .id = CRC_FILE_PLUGIN_ID + } +}; + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/security/Makefile b/fs/reiser4/plugin/security/Makefile new file mode 100644 index 000000000000..645dbb550d84 --- /dev/null +++ b/fs/reiser4/plugin/security/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_REISER4_FS) += security_plugins.o + +security_plugins-objs := \ + perm.o diff --git a/fs/reiser4/plugin/security/perm.c b/fs/reiser4/plugin/security/perm.c new file mode 100644 index 000000000000..64c285611273 --- /dev/null +++ b/fs/reiser4/plugin/security/perm.c @@ -0,0 +1,33 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* + * This file contains implementation of permission plugins. + * See the comments in perm.h + */ + +#include "../plugin.h" +#include "../plugin_header.h" +#include "../../debug.h" + +perm_plugin perm_plugins[LAST_PERM_ID] = { + [NULL_PERM_ID] = { + .h = { + .type_id = REISER4_PERM_PLUGIN_TYPE, + .id = NULL_PERM_ID, + .pops = NULL, + .label = "null", + .desc = "stub permission plugin", + .linkage = {NULL, NULL} + } + } +}; + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/plugin/security/perm.h b/fs/reiser4/plugin/security/perm.h new file mode 100644 index 000000000000..caa27498dd69 --- /dev/null +++ b/fs/reiser4/plugin/security/perm.h @@ -0,0 +1,38 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Perm (short for "permissions") plugins common stuff. */ + +#if !defined( __REISER4_PERM_H__ ) +#define __REISER4_PERM_H__ + +#include "../../forward.h" +#include "../plugin_header.h" + +#include <linux/types.h> + +/* Definition of permission plugin */ +/* NIKITA-FIXME-HANS: define what this is targeted for. + It does not seem to be intended for use with sys_reiser4. Explain. */ + +/* NOTE-EDWARD: This seems to be intended for deprecated sys_reiser4. + Consider it like a temporary "seam" and reserved pset member. + If you have something usefull to add, then rename this plugin and add here */ +typedef struct perm_plugin { + /* generic plugin fields */ + plugin_header h; +} perm_plugin; + +typedef enum { NULL_PERM_ID, LAST_PERM_ID } reiser4_perm_id; + +/* __REISER4_PERM_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/plugin/space/Makefile b/fs/reiser4/plugin/space/Makefile new file mode 100644 index 000000000000..5a0c94fbef74 --- /dev/null +++ b/fs/reiser4/plugin/space/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_REISER4_FS) += space_plugins.o + +space_plugins-objs := \ + bitmap.o diff --git a/fs/reiser4/plugin/space/bitmap.c b/fs/reiser4/plugin/space/bitmap.c new file mode 100644 index 000000000000..898c3920cc3a --- /dev/null +++ b/fs/reiser4/plugin/space/bitmap.c @@ -0,0 +1,1609 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#include "../../debug.h" +#include "../../dformat.h" +#include "../../txnmgr.h" +#include "../../jnode.h" +#include "../../block_alloc.h" +#include "../../tree.h" +#include "../../super.h" +#include "../plugin.h" +#include "space_allocator.h" +#include "bitmap.h" + +#include <linux/types.h> +#include <linux/fs.h> /* for struct super_block */ +#include <linux/mutex.h> +#include <asm/div64.h> + +/* Proposed (but discarded) optimization: dynamic loading/unloading of bitmap + * blocks + + A useful optimization of reiser4 bitmap handling would be dynamic bitmap + blocks loading/unloading which is different from v3.x where all bitmap + blocks are loaded at mount time. + + To implement bitmap blocks unloading we need to count bitmap block usage + and detect currently unused blocks allowing them to be unloaded. It is not + a simple task since we allow several threads to modify one bitmap block + simultaneously. + + Briefly speaking, the following schema is proposed: we count in special + variable associated with each bitmap block. That is for counting of block + alloc/dealloc operations on that bitmap block. With a deferred block + deallocation feature of reiser4 all those operation will be represented in + atom dirty/deleted lists as jnodes for freshly allocated or deleted + nodes. + + So, we increment usage counter for each new node allocated or deleted, and + decrement it at atom commit one time for each node from the dirty/deleted + atom's list. Of course, freshly allocated node deletion and node reusing + from atom deleted (if we do so) list should decrement bitmap usage counter + also. + + This schema seems to be working but that reference counting is + not easy to debug. I think we should agree with Hans and do not implement + it in v4.0. Current code implements "on-demand" bitmap blocks loading only. + + For simplicity all bitmap nodes (both commit and working bitmap blocks) are + loaded into memory on fs mount time or each bitmap nodes are loaded at the + first access to it, the "dont_load_bitmap" mount option controls whether + bimtap nodes should be loaded at mount time. Dynamic unloading of bitmap + nodes currently is not supported. */ + +#define CHECKSUM_SIZE 4 + +#define BYTES_PER_LONG (sizeof(long)) + +#if BITS_PER_LONG == 64 +# define LONG_INT_SHIFT (6) +#else +# define LONG_INT_SHIFT (5) +#endif + +#define LONG_INT_MASK (BITS_PER_LONG - 1UL) + +typedef unsigned long ulong_t; + +#define bmap_size(blocksize) ((blocksize) - CHECKSUM_SIZE) +#define bmap_bit_count(blocksize) (bmap_size(blocksize) << 3) + +/* Block allocation/deallocation are done through special bitmap objects which + are allocated in an array at fs mount. */ +struct bitmap_node { + struct mutex mutex; /* long term lock object */ + + jnode *wjnode; /* j-nodes for WORKING ... */ + jnode *cjnode; /* ... and COMMIT bitmap blocks */ + + bmap_off_t first_zero_bit; /* for skip_busy option implementation */ + + atomic_t loaded; /* a flag which shows that bnode is loaded + * already */ +}; + +static inline char *bnode_working_data(struct bitmap_node *bnode) +{ + char *data; + + data = jdata(bnode->wjnode); + assert("zam-429", data != NULL); + + return data + CHECKSUM_SIZE; +} + +static inline char *bnode_commit_data(const struct bitmap_node *bnode) +{ + char *data; + + data = jdata(bnode->cjnode); + assert("zam-430", data != NULL); + + return data + CHECKSUM_SIZE; +} + +static inline __u32 bnode_commit_crc(const struct bitmap_node *bnode) +{ + char *data; + + data = jdata(bnode->cjnode); + assert("vpf-261", data != NULL); + + return le32_to_cpu(get_unaligned((d32 *)data)); +} + +static inline void bnode_set_commit_crc(struct bitmap_node *bnode, __u32 crc) +{ + char *data; + + data = jdata(bnode->cjnode); + assert("vpf-261", data != NULL); + + put_unaligned(cpu_to_le32(crc), (d32 *)data); +} + +/* ZAM-FIXME-HANS: is the idea that this might be a union someday? having + * written the code, does this added abstraction still have */ +/* ANSWER(Zam): No, the abstractions is in the level above (exact place is the + * reiser4_space_allocator structure) */ +/* ZAM-FIXME-HANS: I don't understand your english in comment above. */ +/* FIXME-HANS(Zam): I don't understand the questions like "might be a union + * someday?". What they about? If there is a reason to have a union, it should + * be a union, if not, it should not be a union. "..might be someday" means no + * reason. */ +struct bitmap_allocator_data { + /* an array for bitmap blocks direct access */ + struct bitmap_node *bitmap; +}; + +#define get_barray(super) \ +(((struct bitmap_allocator_data *)(get_super_private(super)->space_allocator.u.generic)) -> bitmap) + +#define get_bnode(super, i) (get_barray(super) + i) + +/* allocate and initialize jnode with JNODE_BITMAP type */ +static jnode *bnew(void) +{ + jnode *jal = jalloc(); + + if (jal) + jnode_init(jal, current_tree, JNODE_BITMAP); + + return jal; +} + +/* this file contains: + - bitmap based implementation of space allocation plugin + - all the helper functions like set bit, find_first_zero_bit, etc */ + +/* Audited by: green(2002.06.12) */ +static int find_next_zero_bit_in_word(ulong_t word, int start_bit) +{ + ulong_t mask = 1UL << start_bit; + int i = start_bit; + + while ((word & mask) != 0) { + mask <<= 1; + if (++i >= BITS_PER_LONG) + break; + } + + return i; +} + +#include <linux/bitops.h> + +#if BITS_PER_LONG == 64 + +#define OFF(addr) (((ulong_t)(addr) & (BYTES_PER_LONG - 1)) << 3) +#define BASE(addr) ((ulong_t*) ((ulong_t)(addr) & ~(BYTES_PER_LONG - 1))) + +static inline void reiser4_set_bit(int nr, void *addr) +{ + __test_and_set_bit_le(nr + OFF(addr), BASE(addr)); +} + +static inline void reiser4_clear_bit(int nr, void *addr) +{ + __test_and_clear_bit_le(nr + OFF(addr), BASE(addr)); +} + +static inline int reiser4_test_bit(int nr, void *addr) +{ + return test_bit_le(nr + OFF(addr), BASE(addr)); +} +static inline int reiser4_find_next_zero_bit(void *addr, int maxoffset, + int offset) +{ + int off = OFF(addr); + + return find_next_zero_bit_le(BASE(addr), maxoffset + off, + offset + off) - off; +} + +#else + +#define reiser4_set_bit(nr, addr) __test_and_set_bit_le(nr, addr) +#define reiser4_clear_bit(nr, addr) __test_and_clear_bit_le(nr, addr) +#define reiser4_test_bit(nr, addr) test_bit_le(nr, addr) + +#define reiser4_find_next_zero_bit(addr, maxoffset, offset) \ +find_next_zero_bit_le(addr, maxoffset, offset) +#endif + +/* Search for a set bit in the bit array [@start_offset, @max_offset[, offsets + * are counted from @addr, return the offset of the first bit if it is found, + * @maxoffset otherwise. */ +static bmap_off_t __reiser4_find_next_set_bit(void *addr, bmap_off_t max_offset, + bmap_off_t start_offset) +{ + ulong_t *base = addr; + /* start_offset is in bits, convert it to byte offset within bitmap. */ + int word_nr = start_offset >> LONG_INT_SHIFT; + /* bit number within the byte. */ + int bit_nr = start_offset & LONG_INT_MASK; + int max_word_nr = (max_offset - 1) >> LONG_INT_SHIFT; + + assert("zam-387", max_offset != 0); + + /* Unaligned @start_offset case. */ + if (bit_nr != 0) { + bmap_nr_t nr; + + nr = find_next_zero_bit_in_word(~(base[word_nr]), bit_nr); + + if (nr < BITS_PER_LONG) + return (word_nr << LONG_INT_SHIFT) + nr; + + ++word_nr; + } + + /* Fast scan trough aligned words. */ + while (word_nr <= max_word_nr) { + if (base[word_nr] != 0) { + return (word_nr << LONG_INT_SHIFT) + + find_next_zero_bit_in_word(~(base[word_nr]), 0); + } + + ++word_nr; + } + + return max_offset; +} + +#if BITS_PER_LONG == 64 + +static bmap_off_t reiser4_find_next_set_bit(void *addr, bmap_off_t max_offset, + bmap_off_t start_offset) +{ + bmap_off_t off = OFF(addr); + + return __reiser4_find_next_set_bit(BASE(addr), max_offset + off, + start_offset + off) - off; +} + +#else +#define reiser4_find_next_set_bit(addr, max_offset, start_offset) \ + __reiser4_find_next_set_bit(addr, max_offset, start_offset) +#endif + +/* search for the first set bit in single word. */ +static int find_last_set_bit_in_word(ulong_t word, int start_bit) +{ + ulong_t bit_mask; + int nr = start_bit; + + assert("zam-965", start_bit < BITS_PER_LONG); + assert("zam-966", start_bit >= 0); + + bit_mask = (1UL << nr); + + while (bit_mask != 0) { + if (bit_mask & word) + return nr; + bit_mask >>= 1; + nr--; + } + return BITS_PER_LONG; +} + +/* Search bitmap for a set bit in backward direction from the end to the + * beginning of given region + * + * @result: result offset of the last set bit + * @addr: base memory address, + * @low_off: low end of the search region, edge bit included into the region, + * @high_off: high end of the search region, edge bit included into the region, + * + * @return: 0 - set bit was found, -1 otherwise. + */ +static int +reiser4_find_last_set_bit(bmap_off_t * result, void *addr, bmap_off_t low_off, + bmap_off_t high_off) +{ + ulong_t *base = addr; + int last_word; + int first_word; + int last_bit; + int nr; + + assert("zam-962", high_off >= low_off); + + last_word = high_off >> LONG_INT_SHIFT; + last_bit = high_off & LONG_INT_MASK; + first_word = low_off >> LONG_INT_SHIFT; + + if (last_bit < BITS_PER_LONG) { + nr = find_last_set_bit_in_word(base[last_word], last_bit); + if (nr < BITS_PER_LONG) { + *result = (last_word << LONG_INT_SHIFT) + nr; + return 0; + } + --last_word; + } + while (last_word >= first_word) { + if (base[last_word] != 0x0) { + last_bit = + find_last_set_bit_in_word(base[last_word], + BITS_PER_LONG - 1); + assert("zam-972", last_bit < BITS_PER_LONG); + *result = (last_word << LONG_INT_SHIFT) + last_bit; + return 0; + } + --last_word; + } + + return -1; /* set bit not found */ +} + +/* Search bitmap for a clear bit in backward direction from the end to the + * beginning of given region */ +static int +reiser4_find_last_zero_bit(bmap_off_t * result, void *addr, bmap_off_t low_off, + bmap_off_t high_off) +{ + ulong_t *base = addr; + int last_word; + int first_word; + int last_bit; + int nr; + + last_word = high_off >> LONG_INT_SHIFT; + last_bit = high_off & LONG_INT_MASK; + first_word = low_off >> LONG_INT_SHIFT; + + if (last_bit < BITS_PER_LONG) { + nr = find_last_set_bit_in_word(~base[last_word], last_bit); + if (nr < BITS_PER_LONG) { + *result = (last_word << LONG_INT_SHIFT) + nr; + return 0; + } + --last_word; + } + while (last_word >= first_word) { + if (base[last_word] != (ulong_t) (-1)) { + *result = (last_word << LONG_INT_SHIFT) + + find_last_set_bit_in_word(~base[last_word], + BITS_PER_LONG - 1); + return 0; + } + --last_word; + } + + return -1; /* zero bit not found */ +} + +/* Audited by: green(2002.06.12) */ +static void reiser4_clear_bits(char *addr, bmap_off_t start, bmap_off_t end) +{ + int first_byte; + int last_byte; + + unsigned char first_byte_mask = 0xFF; + unsigned char last_byte_mask = 0xFF; + + assert("zam-410", start < end); + + first_byte = start >> 3; + last_byte = (end - 1) >> 3; + + if (last_byte > first_byte + 1) + memset(addr + first_byte + 1, 0, + (size_t) (last_byte - first_byte - 1)); + + first_byte_mask >>= 8 - (start & 0x7); + last_byte_mask <<= ((end - 1) & 0x7) + 1; + + if (first_byte == last_byte) { + addr[first_byte] &= (first_byte_mask | last_byte_mask); + } else { + addr[first_byte] &= first_byte_mask; + addr[last_byte] &= last_byte_mask; + } +} + +/* Audited by: green(2002.06.12) */ +/* ZAM-FIXME-HANS: comment this */ +static void reiser4_set_bits(char *addr, bmap_off_t start, bmap_off_t end) +{ + int first_byte; + int last_byte; + + unsigned char first_byte_mask = 0xFF; + unsigned char last_byte_mask = 0xFF; + + assert("zam-386", start < end); + + first_byte = start >> 3; + last_byte = (end - 1) >> 3; + + if (last_byte > first_byte + 1) + memset(addr + first_byte + 1, 0xFF, + (size_t) (last_byte - first_byte - 1)); + + first_byte_mask <<= start & 0x7; + last_byte_mask >>= 7 - ((end - 1) & 0x7); + + if (first_byte == last_byte) { + addr[first_byte] |= (first_byte_mask & last_byte_mask); + } else { + addr[first_byte] |= first_byte_mask; + addr[last_byte] |= last_byte_mask; + } +} + +#define ADLER_BASE 65521 +#define ADLER_NMAX 5552 + +/* Calculates the adler32 checksum for the data pointed by `data` of the + length `len`. This function was originally taken from zlib, version 1.1.3, + July 9th, 1998. + + Copyright (C) 1995-1998 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + + The above comment applies only to the reiser4_adler32 function. +*/ + +__u32 reiser4_adler32(char *data, __u32 len) +{ + unsigned char *t = data; + __u32 s1 = 1; + __u32 s2 = 0; + int k; + + while (len > 0) { + k = len < ADLER_NMAX ? len : ADLER_NMAX; + len -= k; + + while (k--) { + s1 += *t++; + s2 += s1; + } + + s1 %= ADLER_BASE; + s2 %= ADLER_BASE; + } + return (s2 << 16) | s1; +} + +#define sb_by_bnode(bnode) \ + ((struct super_block *)jnode_get_tree(bnode->wjnode)->super) + +static __u32 bnode_calc_crc(const struct bitmap_node *bnode, unsigned long size) +{ + return reiser4_adler32(bnode_commit_data(bnode), bmap_size(size)); +} + +static int +bnode_check_adler32(const struct bitmap_node *bnode, unsigned long size) +{ + if (bnode_calc_crc(bnode, size) != bnode_commit_crc(bnode)) { + bmap_nr_t bmap; + + bmap = bnode - get_bnode(sb_by_bnode(bnode), 0); + + warning("vpf-263", + "Checksum for the bitmap block %llu is incorrect", + bmap); + + return RETERR(-EIO); + } + + return 0; +} + +#define REISER4_CHECK_BMAP_CRC (0) + +#if REISER4_CHECK_BMAP_CRC +static int bnode_check_crc(const struct bitmap_node *bnode) +{ + return bnode_check_adler32(bnode, + bmap_size(sb_by_bnode(bnode)->s_blocksize)); +} + +/* REISER4_CHECK_BMAP_CRC */ +#else + +#define bnode_check_crc(bnode) (0) + +/* REISER4_CHECK_BMAP_CRC */ +#endif + +/* Recalculates the adler32 checksum for only 1 byte change. + adler - previous adler checksum + old_data, data - old, new byte values. + tail == (chunk - offset) : length, checksum was calculated for, - offset of + the changed byte within this chunk. + This function can be used for checksum calculation optimisation. +*/ + +static __u32 +adler32_recalc(__u32 adler, unsigned char old_data, unsigned char data, + __u32 tail) +{ + __u32 delta = data - old_data + 2 * ADLER_BASE; + __u32 s1 = adler & 0xffff; + __u32 s2 = (adler >> 16) & 0xffff; + + s1 = (delta + s1) % ADLER_BASE; + s2 = (delta * tail + s2) % ADLER_BASE; + + return (s2 << 16) | s1; +} + +#define LIMIT(val, boundary) ((val) > (boundary) ? (boundary) : (val)) + +/** + * get_nr_bitmap - calculate number of bitmap blocks + * @super: super block with initialized blocksize and block count + * + * Calculates number of bitmap blocks of a filesystem which uses bitmaps to + * maintain free disk space. It assumes that each bitmap addresses the same + * number of blocks which is calculated by bmap_block_count macro defined in + * above. Number of blocks in the filesystem has to be initialized in reiser4 + * private data of super block already so that it can be obtained via + * reiser4_block_count(). Unfortunately, number of blocks addressed by a bitmap + * is not power of 2 because 4 bytes are used for checksum. Therefore, we have + * to use special function to divide and modulo 64bits filesystem block + * counters. + * + * Example: suppose filesystem have 32768 blocks. Blocksize is 4096. Each bitmap + * block addresses (4096 - 4) * 8 = 32736 blocks. Number of bitmaps to address + * all 32768 blocks is calculated as (32768 - 1) / 32736 + 1 = 2. + */ +static bmap_nr_t get_nr_bmap(const struct super_block *super) +{ + u64 quotient; + + assert("zam-393", reiser4_block_count(super) != 0); + + quotient = reiser4_block_count(super) - 1; + do_div(quotient, bmap_bit_count(super->s_blocksize)); + return quotient + 1; +} + +/** + * parse_blocknr - calculate bitmap number and offset in it by block number + * @block: pointer to block number to calculate location in bitmap of + * @bmap: pointer where to store bitmap block number + * @offset: pointer where to store offset within bitmap block + * + * Calculates location of bit which is responsible for allocation/freeing of + * block @*block. That location is represented by bitmap block number and offset + * within that bitmap block. + */ +static void +parse_blocknr(const reiser4_block_nr *block, bmap_nr_t *bmap, + bmap_off_t *offset) +{ + struct super_block *super = get_current_context()->super; + u64 quotient = *block; + + *offset = do_div(quotient, bmap_bit_count(super->s_blocksize)); + *bmap = quotient; + + assert("zam-433", *bmap < get_nr_bmap(super)); + assert("", *offset < bmap_bit_count(super->s_blocksize)); +} + +#if REISER4_DEBUG +/* Audited by: green(2002.06.12) */ +static void +check_block_range(const reiser4_block_nr * start, const reiser4_block_nr * len) +{ + struct super_block *sb = reiser4_get_current_sb(); + + assert("zam-436", sb != NULL); + + assert("zam-455", start != NULL); + assert("zam-437", *start != 0); + assert("zam-541", !reiser4_blocknr_is_fake(start)); + assert("zam-441", *start < reiser4_block_count(sb)); + + if (len != NULL) { + assert("zam-438", *len != 0); + assert("zam-442", *start + *len <= reiser4_block_count(sb)); + } +} + +static void check_bnode_loaded(const struct bitmap_node *bnode) +{ + assert("zam-485", bnode != NULL); + assert("zam-483", jnode_page(bnode->wjnode) != NULL); + assert("zam-484", jnode_page(bnode->cjnode) != NULL); + assert("nikita-2820", jnode_is_loaded(bnode->wjnode)); + assert("nikita-2821", jnode_is_loaded(bnode->cjnode)); +} + +#else + +# define check_block_range(start, len) do { /* nothing */} while(0) +# define check_bnode_loaded(bnode) do { /* nothing */} while(0) + +#endif + +/* modify bnode->first_zero_bit (if we free bits before); bnode should be + spin-locked */ +static inline void +adjust_first_zero_bit(struct bitmap_node *bnode, bmap_off_t offset) +{ + if (offset < bnode->first_zero_bit) + bnode->first_zero_bit = offset; +} + +/* return a physical disk address for logical bitmap number @bmap */ +/* FIXME-VS: this is somehow related to disk layout? */ +/* ZAM-FIXME-HANS: your answer is? Use not more than one function dereference + * per block allocation so that performance is not affected. Probably this + * whole file should be considered part of the disk layout plugin, and other + * disk layouts can use other defines and efficiency will not be significantly + * affected. */ + +#define REISER4_FIRST_BITMAP_BLOCK \ + ((REISER4_MASTER_OFFSET / PAGE_SIZE) + 2) + +/* Audited by: green(2002.06.12) */ +static void +get_bitmap_blocknr(struct super_block *super, bmap_nr_t bmap, + reiser4_block_nr * bnr) +{ + + assert("zam-390", bmap < get_nr_bmap(super)); + +#ifdef CONFIG_REISER4_BADBLOCKS +#define BITMAP_PLUGIN_DISKMAP_ID ((0xc0e1<<16) | (0xe0ff)) + /* Check if the diskmap have this already, first. */ + if (reiser4_get_diskmap_value(BITMAP_PLUGIN_DISKMAP_ID, bmap, bnr) == 0) + return; /* Found it in diskmap */ +#endif + /* FIXME_ZAM: before discussing of disk layouts and disk format + plugins I implement bitmap location scheme which is close to scheme + used in reiser 3.6 */ + if (bmap == 0) { + *bnr = REISER4_FIRST_BITMAP_BLOCK; + } else { + *bnr = bmap * bmap_bit_count(super->s_blocksize); + } +} + +/* construct a fake block number for shadow bitmap (WORKING BITMAP) block */ +/* Audited by: green(2002.06.12) */ +static void get_working_bitmap_blocknr(bmap_nr_t bmap, reiser4_block_nr * bnr) +{ + *bnr = + (reiser4_block_nr) ((bmap & ~REISER4_BLOCKNR_STATUS_BIT_MASK) | + REISER4_BITMAP_BLOCKS_STATUS_VALUE); +} + +/* bnode structure initialization */ +static void +init_bnode(struct bitmap_node *bnode, + struct super_block *super UNUSED_ARG, bmap_nr_t bmap UNUSED_ARG) +{ + memset(bnode, 0, sizeof(struct bitmap_node)); + + mutex_init(&bnode->mutex); + atomic_set(&bnode->loaded, 0); +} + +static void release(jnode * node) +{ + jrelse(node); + JF_SET(node, JNODE_HEARD_BANSHEE); + jput(node); +} + +/* This function is for internal bitmap.c use because it assumes that jnode is + in under full control of this thread */ +static void done_bnode(struct bitmap_node *bnode) +{ + if (bnode) { + atomic_set(&bnode->loaded, 0); + if (bnode->wjnode != NULL) + release(bnode->wjnode); + if (bnode->cjnode != NULL) + release(bnode->cjnode); + bnode->wjnode = bnode->cjnode = NULL; + } +} + +/* ZAM-FIXME-HANS: comment this. Called only by load_and_lock_bnode()*/ +static int prepare_bnode(struct bitmap_node *bnode, jnode **cjnode_ret, + jnode **wjnode_ret) +{ + struct super_block *super; + jnode *cjnode; + jnode *wjnode; + bmap_nr_t bmap; + int ret; + + super = reiser4_get_current_sb(); + + *wjnode_ret = wjnode = bnew(); + if (wjnode == NULL) { + *cjnode_ret = NULL; + return RETERR(-ENOMEM); + } + + *cjnode_ret = cjnode = bnew(); + if (cjnode == NULL) + return RETERR(-ENOMEM); + + bmap = bnode - get_bnode(super, 0); + + get_working_bitmap_blocknr(bmap, &wjnode->blocknr); + get_bitmap_blocknr(super, bmap, &cjnode->blocknr); + + jref(cjnode); + jref(wjnode); + + /* load commit bitmap */ + ret = jload_gfp(cjnode, GFP_NOFS, 1); + + if (ret) + goto error; + + /* allocate memory for working bitmap block. Note that for + * bitmaps jinit_new() doesn't actually modifies node content, + * so parallel calls to this are ok. */ + ret = jinit_new(wjnode, GFP_NOFS); + + if (ret != 0) { + jrelse(cjnode); + goto error; + } + + return 0; + + error: + jput(cjnode); + jput(wjnode); + *wjnode_ret = *cjnode_ret = NULL; + return ret; + +} + +/* Check the bnode data on read. */ +static int check_struct_bnode(struct bitmap_node *bnode, __u32 blksize) +{ + void *data; + int ret; + + /* Check CRC */ + ret = bnode_check_adler32(bnode, blksize); + + if (ret) { + return ret; + } + + data = jdata(bnode->cjnode) + CHECKSUM_SIZE; + + /* Check the very first bit -- it must be busy. */ + if (!reiser4_test_bit(0, data)) { + warning("vpf-1362", "The allocator block %llu is not marked " + "as used.", (unsigned long long)bnode->cjnode->blocknr); + + return -EINVAL; + } + + return 0; +} + +/* load bitmap blocks "on-demand" */ +static int load_and_lock_bnode(struct bitmap_node *bnode) +{ + int ret; + + jnode *cjnode; + jnode *wjnode; + + assert("nikita-3040", reiser4_schedulable()); + +/* ZAM-FIXME-HANS: since bitmaps are never unloaded, this does not + * need to be atomic, right? Just leave a comment that if bitmaps were + * unloadable, this would need to be atomic. */ + if (atomic_read(&bnode->loaded)) { + /* bitmap is already loaded, nothing to do */ + check_bnode_loaded(bnode); + mutex_lock(&bnode->mutex); + assert("nikita-2827", atomic_read(&bnode->loaded)); + return 0; + } + + ret = prepare_bnode(bnode, &cjnode, &wjnode); + if (ret) + return ret; + + mutex_lock(&bnode->mutex); + + if (!atomic_read(&bnode->loaded)) { + assert("nikita-2822", cjnode != NULL); + assert("nikita-2823", wjnode != NULL); + assert("nikita-2824", jnode_is_loaded(cjnode)); + assert("nikita-2825", jnode_is_loaded(wjnode)); + + bnode->wjnode = wjnode; + bnode->cjnode = cjnode; + + ret = check_struct_bnode(bnode, current_blocksize); + if (unlikely(ret != 0)) + goto error; + + atomic_set(&bnode->loaded, 1); + /* working bitmap is initialized by on-disk + * commit bitmap. This should be performed + * under mutex. */ + memcpy(bnode_working_data(bnode), + bnode_commit_data(bnode), + bmap_size(current_blocksize)); + } else + /* race: someone already loaded bitmap + * while we were busy initializing data. */ + check_bnode_loaded(bnode); + return 0; + + error: + release(wjnode); + release(cjnode); + bnode->wjnode = NULL; + bnode->cjnode = NULL; + mutex_unlock(&bnode->mutex); + return ret; +} + +static void release_and_unlock_bnode(struct bitmap_node *bnode) +{ + check_bnode_loaded(bnode); + mutex_unlock(&bnode->mutex); +} + +/* This function does all block allocation work but only for one bitmap + block.*/ +/* FIXME_ZAM: It does not allow us to allocate block ranges across bitmap + block responsibility zone boundaries. This had no sense in v3.6 but may + have it in v4.x */ +/* ZAM-FIXME-HANS: do you mean search one bitmap block forward? */ +static int +search_one_bitmap_forward(bmap_nr_t bmap, bmap_off_t * offset, + bmap_off_t max_offset, int min_len, int max_len) +{ + struct super_block *super = get_current_context()->super; + struct bitmap_node *bnode = get_bnode(super, bmap); + + char *data; + + bmap_off_t search_end; + bmap_off_t start; + bmap_off_t end; + + int set_first_zero_bit = 0; + + int ret; + + assert("zam-364", min_len > 0); + assert("zam-365", max_len >= min_len); + assert("zam-366", *offset <= max_offset); + + ret = load_and_lock_bnode(bnode); + + if (ret) + return ret; + + data = bnode_working_data(bnode); + + start = *offset; + + if (bnode->first_zero_bit >= start) { + start = bnode->first_zero_bit; + set_first_zero_bit = 1; + } + + while (start + min_len < max_offset) { + + start = + reiser4_find_next_zero_bit((long *)data, max_offset, start); + if (set_first_zero_bit) { + bnode->first_zero_bit = start; + set_first_zero_bit = 0; + } + if (start >= max_offset) + break; + + search_end = LIMIT(start + max_len, max_offset); + end = + reiser4_find_next_set_bit((long *)data, search_end, start); + if (end >= start + min_len) { + /* we can't trust find_next_set_bit result if set bit + was not fount, result may be bigger than + max_offset */ + if (end > search_end) + end = search_end; + + ret = end - start; + *offset = start; + + reiser4_set_bits(data, start, end); + + /* FIXME: we may advance first_zero_bit if [start, + end] region overlaps the first_zero_bit point */ + + break; + } + + start = end + 1; + } + + release_and_unlock_bnode(bnode); + + return ret; +} + +static int +search_one_bitmap_backward(bmap_nr_t bmap, bmap_off_t * start_offset, + bmap_off_t end_offset, int min_len, int max_len) +{ + struct super_block *super = get_current_context()->super; + struct bitmap_node *bnode = get_bnode(super, bmap); + char *data; + bmap_off_t start; + int ret; + + assert("zam-958", min_len > 0); + assert("zam-959", max_len >= min_len); + assert("zam-960", *start_offset >= end_offset); + + ret = load_and_lock_bnode(bnode); + if (ret) + return ret; + + data = bnode_working_data(bnode); + start = *start_offset; + + while (1) { + bmap_off_t end, search_end; + + /* Find the beginning of the zero filled region */ + if (reiser4_find_last_zero_bit(&start, data, end_offset, start)) + break; + /* Is there more than `min_len' bits from `start' to + * `end_offset'? */ + if (start < end_offset + min_len - 1) + break; + + /* Do not search to `end_offset' if we need to find less than + * `max_len' zero bits. */ + if (end_offset + max_len - 1 < start) + search_end = start - max_len + 1; + else + search_end = end_offset; + + if (reiser4_find_last_set_bit(&end, data, search_end, start)) + end = search_end; + else + end++; + + if (end + min_len <= start + 1) { + if (end < search_end) + end = search_end; + ret = start - end + 1; + *start_offset = end; /* `end' is lowest offset */ + assert("zam-987", + reiser4_find_next_set_bit(data, start + 1, + end) >= start + 1); + reiser4_set_bits(data, end, start + 1); + break; + } + + if (end <= end_offset) + /* left search boundary reached. */ + break; + start = end - 1; + } + + release_and_unlock_bnode(bnode); + return ret; +} + +/* allocate contiguous range of blocks in bitmap */ +static int bitmap_alloc_forward(reiser4_block_nr * start, + const reiser4_block_nr * end, int min_len, + int max_len) +{ + bmap_nr_t bmap, end_bmap; + bmap_off_t offset, end_offset; + int len; + + reiser4_block_nr tmp; + + struct super_block *super = get_current_context()->super; + const bmap_off_t max_offset = bmap_bit_count(super->s_blocksize); + + parse_blocknr(start, &bmap, &offset); + + tmp = *end - 1; + parse_blocknr(&tmp, &end_bmap, &end_offset); + ++end_offset; + + assert("zam-358", end_bmap >= bmap); + assert("zam-359", ergo(end_bmap == bmap, end_offset >= offset)); + + for (; bmap < end_bmap; bmap++, offset = 0) { + len = + search_one_bitmap_forward(bmap, &offset, max_offset, + min_len, max_len); + if (len != 0) + goto out; + } + + len = + search_one_bitmap_forward(bmap, &offset, end_offset, min_len, + max_len); + out: + *start = bmap * max_offset + offset; + return len; +} + +/* allocate contiguous range of blocks in bitmap (from @start to @end in + * backward direction) */ +static int bitmap_alloc_backward(reiser4_block_nr * start, + const reiser4_block_nr * end, int min_len, + int max_len) +{ + bmap_nr_t bmap, end_bmap; + bmap_off_t offset, end_offset; + int len; + struct super_block *super = get_current_context()->super; + const bmap_off_t max_offset = bmap_bit_count(super->s_blocksize); + + parse_blocknr(start, &bmap, &offset); + parse_blocknr(end, &end_bmap, &end_offset); + + assert("zam-961", end_bmap <= bmap); + assert("zam-962", ergo(end_bmap == bmap, end_offset <= offset)); + + for (; bmap > end_bmap; bmap--, offset = max_offset - 1) { + len = + search_one_bitmap_backward(bmap, &offset, 0, min_len, + max_len); + if (len != 0) + goto out; + } + + len = + search_one_bitmap_backward(bmap, &offset, end_offset, min_len, + max_len); + out: + *start = bmap * max_offset + offset; + return len; +} + +/* plugin->u.space_allocator.alloc_blocks() */ +static int alloc_blocks_forward(reiser4_blocknr_hint *hint, int needed, + reiser4_block_nr *start, reiser4_block_nr *len) +{ + struct super_block *super = get_current_context()->super; + int actual_len; + + reiser4_block_nr search_start; + reiser4_block_nr search_end; + + assert("zam-398", super != NULL); + assert("zam-412", hint != NULL); + assert("zam-397", hint->blk <= reiser4_block_count(super)); + + if (hint->max_dist == 0) + search_end = reiser4_block_count(super); + else + search_end = + LIMIT(hint->blk + hint->max_dist, + reiser4_block_count(super)); + + /* We use @hint -> blk as a search start and search from it to the end + of the disk or in given region if @hint -> max_dist is not zero */ + search_start = hint->blk; + + actual_len = + bitmap_alloc_forward(&search_start, &search_end, 1, needed); + + /* There is only one bitmap search if max_dist was specified or first + pass was from the beginning of the bitmap. We also do one pass for + scanning bitmap in backward direction. */ + if (!(actual_len != 0 || hint->max_dist != 0 || search_start == 0)) { + /* next step is a scanning from 0 to search_start */ + search_end = search_start; + search_start = 0; + actual_len = + bitmap_alloc_forward(&search_start, &search_end, 1, needed); + } + if (actual_len == 0) + return RETERR(-ENOSPC); + if (actual_len < 0) + return RETERR(actual_len); + *len = actual_len; + *start = search_start; + return 0; +} + +static int alloc_blocks_backward(reiser4_blocknr_hint * hint, int needed, + reiser4_block_nr * start, + reiser4_block_nr * len) +{ + reiser4_block_nr search_start; + reiser4_block_nr search_end; + int actual_len; + + ON_DEBUG(struct super_block *super = reiser4_get_current_sb()); + + assert("zam-969", super != NULL); + assert("zam-970", hint != NULL); + assert("zam-971", hint->blk <= reiser4_block_count(super)); + + search_start = hint->blk; + if (hint->max_dist == 0 || search_start <= hint->max_dist) + search_end = 0; + else + search_end = search_start - hint->max_dist; + + actual_len = + bitmap_alloc_backward(&search_start, &search_end, 1, needed); + if (actual_len == 0) + return RETERR(-ENOSPC); + if (actual_len < 0) + return RETERR(actual_len); + *len = actual_len; + *start = search_start; + return 0; +} + +/* plugin->u.space_allocator.alloc_blocks() */ +int reiser4_alloc_blocks_bitmap(reiser4_space_allocator * allocator, + reiser4_blocknr_hint * hint, int needed, + reiser4_block_nr * start, reiser4_block_nr * len) +{ + if (hint->backward) + return alloc_blocks_backward(hint, needed, start, len); + return alloc_blocks_forward(hint, needed, start, len); +} + +/* plugin->u.space_allocator.dealloc_blocks(). */ +/* It just frees blocks in WORKING BITMAP. Usually formatted an unformatted + nodes deletion is deferred until transaction commit. However, deallocation + of temporary objects like wandered blocks and transaction commit records + requires immediate node deletion from WORKING BITMAP.*/ +void reiser4_dealloc_blocks_bitmap(reiser4_space_allocator * allocator, + reiser4_block_nr start, reiser4_block_nr len) +{ + struct super_block *super = reiser4_get_current_sb(); + + bmap_nr_t bmap; + bmap_off_t offset; + + struct bitmap_node *bnode; + int ret; + + assert("zam-468", len != 0); + check_block_range(&start, &len); + + parse_blocknr(&start, &bmap, &offset); + + assert("zam-469", offset + len <= bmap_bit_count(super->s_blocksize)); + + bnode = get_bnode(super, bmap); + + assert("zam-470", bnode != NULL); + + ret = load_and_lock_bnode(bnode); + assert("zam-481", ret == 0); + + reiser4_clear_bits(bnode_working_data(bnode), offset, + (bmap_off_t) (offset + len)); + + adjust_first_zero_bit(bnode, offset); + + release_and_unlock_bnode(bnode); +} + +static int check_blocks_one_bitmap(bmap_nr_t bmap, bmap_off_t start_offset, + bmap_off_t end_offset, int desired) +{ + struct super_block *super = reiser4_get_current_sb(); + struct bitmap_node *bnode = get_bnode(super, bmap); + int ret; + + assert("nikita-2215", bnode != NULL); + + ret = load_and_lock_bnode(bnode); + assert("zam-626", ret == 0); + + assert("nikita-2216", jnode_is_loaded(bnode->wjnode)); + + if (desired) { + ret = reiser4_find_next_zero_bit(bnode_working_data(bnode), + end_offset, start_offset) + >= end_offset; + } else { + ret = reiser4_find_next_set_bit(bnode_working_data(bnode), + end_offset, start_offset) + >= end_offset; + } + + release_and_unlock_bnode(bnode); + + return ret; +} + +/* plugin->u.space_allocator.check_blocks(). */ +int reiser4_check_blocks_bitmap(const reiser4_block_nr * start, + const reiser4_block_nr * len, int desired) +{ + struct super_block *super = reiser4_get_current_sb(); + + reiser4_block_nr end; + bmap_nr_t bmap, end_bmap; + bmap_off_t offset, end_offset; + const bmap_off_t max_offset = bmap_bit_count(super->s_blocksize); + + assert("intelfx-9", start != NULL); + assert("intelfx-10", ergo(len != NULL, *len > 0)); + + if (len != NULL) { + check_block_range(start, len); + end = *start + *len - 1; + } else { + /* on next line, end is used as temporary len for check_block_range() */ + end = 1; check_block_range(start, &end); + end = *start; + } + + parse_blocknr(start, &bmap, &offset); + + if (end == *start) { + end_bmap = bmap; + end_offset = offset; + } else { + parse_blocknr(&end, &end_bmap, &end_offset); + } + ++end_offset; + + assert("intelfx-4", end_bmap >= bmap); + assert("intelfx-5", ergo(end_bmap == bmap, end_offset >= offset)); + + for (; bmap < end_bmap; bmap++, offset = 0) { + if (!check_blocks_one_bitmap(bmap, offset, max_offset, desired)) { + return 0; + } + } + return check_blocks_one_bitmap(bmap, offset, end_offset, desired); +} + +/* conditional insertion of @node into atom's overwrite set if it was not there */ +static void cond_add_to_overwrite_set(txn_atom * atom, jnode * node) +{ + assert("zam-546", atom != NULL); + assert("zam-547", atom->stage == ASTAGE_PRE_COMMIT); + assert("zam-548", node != NULL); + + spin_lock_atom(atom); + spin_lock_jnode(node); + + if (node->atom == NULL) { + JF_SET(node, JNODE_OVRWR); + insert_into_atom_ovrwr_list(atom, node); + } else { + assert("zam-549", node->atom == atom); + } + + spin_unlock_jnode(node); + spin_unlock_atom(atom); +} + +/* an actor which applies delete set to COMMIT bitmap pages and link modified + pages in a single-linked list */ +static int +apply_dset_to_commit_bmap(txn_atom * atom, const reiser4_block_nr * start, + const reiser4_block_nr * len, void *data) +{ + + bmap_nr_t bmap; + bmap_off_t offset; + int ret; + + long long *blocks_freed_p = data; + + struct bitmap_node *bnode; + + struct super_block *sb = reiser4_get_current_sb(); + + check_block_range(start, len); + + parse_blocknr(start, &bmap, &offset); + + /* FIXME-ZAM: we assume that all block ranges are allocated by this + bitmap-based allocator and each block range can't go over a zone of + responsibility of one bitmap block; same assumption is used in + other journal hooks in bitmap code. */ + bnode = get_bnode(sb, bmap); + assert("zam-448", bnode != NULL); + + /* it is safe to unlock atom with is in ASTAGE_PRE_COMMIT */ + assert("zam-767", atom->stage == ASTAGE_PRE_COMMIT); + ret = load_and_lock_bnode(bnode); + if (ret) + return ret; + + /* put bnode into atom's overwrite set */ + cond_add_to_overwrite_set(atom, bnode->cjnode); + + data = bnode_commit_data(bnode); + + ret = bnode_check_crc(bnode); + if (ret != 0) + return ret; + + if (len != NULL) { + /* FIXME-ZAM: a check that all bits are set should be there */ + assert("zam-443", + offset + *len <= bmap_bit_count(sb->s_blocksize)); + reiser4_clear_bits(data, offset, (bmap_off_t) (offset + *len)); + + (*blocks_freed_p) += *len; + } else { + reiser4_clear_bit(offset, data); + (*blocks_freed_p)++; + } + + bnode_set_commit_crc(bnode, bnode_calc_crc(bnode, sb->s_blocksize)); + + release_and_unlock_bnode(bnode); + + return 0; +} + +/* plugin->u.space_allocator.pre_commit_hook(). */ +/* It just applies transaction changes to fs-wide COMMIT BITMAP, hoping the + rest is done by transaction manager (allocate wandered locations for COMMIT + BITMAP blocks, copy COMMIT BITMAP blocks data). */ +/* Only one instance of this function can be running at one given time, because + only one transaction can be committed a time, therefore it is safe to access + some global variables without any locking */ + +int reiser4_pre_commit_hook_bitmap(void) +{ + struct super_block *super = reiser4_get_current_sb(); + txn_atom *atom; + + long long blocks_freed = 0; + + atom = get_current_atom_locked(); + assert("zam-876", atom->stage == ASTAGE_PRE_COMMIT); + spin_unlock_atom(atom); + + { /* scan atom's captured list and find all freshly allocated nodes, + * mark corresponded bits in COMMIT BITMAP as used */ + struct list_head *head = ATOM_CLEAN_LIST(atom); + jnode *node = list_entry(head->next, jnode, capture_link); + + while (head != &node->capture_link) { + /* we detect freshly allocated jnodes */ + if (JF_ISSET(node, JNODE_RELOC)) { + int ret; + bmap_nr_t bmap; + + bmap_off_t offset; + bmap_off_t index; + struct bitmap_node *bn; + __u32 size = bmap_size(super->s_blocksize); + __u32 crc; + char byte; + + assert("zam-559", !JF_ISSET(node, JNODE_OVRWR)); + assert("zam-460", + !reiser4_blocknr_is_fake(&node->blocknr)); + + parse_blocknr(&node->blocknr, &bmap, &offset); + bn = get_bnode(super, bmap); + + index = offset >> 3; + assert("vpf-276", index < size); + + ret = bnode_check_crc(bnode); + if (ret != 0) + return ret; + + check_bnode_loaded(bn); + load_and_lock_bnode(bn); + + byte = *(bnode_commit_data(bn) + index); + reiser4_set_bit(offset, bnode_commit_data(bn)); + + crc = adler32_recalc(bnode_commit_crc(bn), byte, + *(bnode_commit_data(bn) + + index), + size - index), + bnode_set_commit_crc(bn, crc); + + release_and_unlock_bnode(bn); + + ret = bnode_check_crc(bn); + if (ret != 0) + return ret; + + /* working of this depends on how it inserts + new j-node into clean list, because we are + scanning the same list now. It is OK, if + insertion is done to the list front */ + cond_add_to_overwrite_set(atom, bn->cjnode); + } + + node = list_entry(node->capture_link.next, jnode, capture_link); + } + } + + atom_dset_deferred_apply(atom, apply_dset_to_commit_bmap, &blocks_freed, 0); + + blocks_freed -= atom->nr_blocks_allocated; + + { + reiser4_super_info_data *sbinfo; + + sbinfo = get_super_private(super); + + spin_lock_reiser4_super(sbinfo); + sbinfo->blocks_free_committed += blocks_freed; + spin_unlock_reiser4_super(sbinfo); + } + + return 0; +} + +/* plugin->u.space_allocator.init_allocator + constructor of reiser4_space_allocator object. It is called on fs mount */ +int reiser4_init_allocator_bitmap(reiser4_space_allocator * allocator, + struct super_block *super, void *arg) +{ + struct bitmap_allocator_data *data = NULL; + bmap_nr_t bitmap_blocks_nr; + bmap_nr_t i; + + assert("nikita-3039", reiser4_schedulable()); + + /* getting memory for bitmap allocator private data holder */ + data = + kmalloc(sizeof(struct bitmap_allocator_data), + reiser4_ctx_gfp_mask_get()); + + if (data == NULL) + return RETERR(-ENOMEM); + + /* allocation and initialization for the array of bnodes */ + bitmap_blocks_nr = get_nr_bmap(super); + + /* FIXME-ZAM: it is not clear what to do with huge number of bitmaps + which is bigger than 2^32 (= 8 * 4096 * 4096 * 2^32 bytes = 5.76e+17, + may I never meet someone who still uses the ia32 architecture when + storage devices of that size enter the market, and wants to use ia32 + with that storage device, much less reiser4. ;-) -Hans). Kmalloc is not possible and, + probably, another dynamic data structure should replace a static + array of bnodes. */ + /*data->bitmap = reiser4_kmalloc((size_t) (sizeof (struct bitmap_node) * bitmap_blocks_nr), GFP_KERNEL); */ + data->bitmap = reiser4_vmalloc(sizeof(struct bitmap_node) * bitmap_blocks_nr); + if (data->bitmap == NULL) { + kfree(data); + return RETERR(-ENOMEM); + } + + for (i = 0; i < bitmap_blocks_nr; i++) + init_bnode(data->bitmap + i, super, i); + + allocator->u.generic = data; + +#if REISER4_DEBUG + get_super_private(super)->min_blocks_used += bitmap_blocks_nr; +#endif + + /* Load all bitmap blocks at mount time. */ + if (!test_bit + (REISER4_DONT_LOAD_BITMAP, &get_super_private(super)->fs_flags)) { + __u64 start_time, elapsed_time; + struct bitmap_node *bnode; + int ret; + + if (REISER4_DEBUG) + printk(KERN_INFO "loading reiser4 bitmap..."); + start_time = jiffies; + + for (i = 0; i < bitmap_blocks_nr; i++) { + bnode = data->bitmap + i; + ret = load_and_lock_bnode(bnode); + if (ret) { + reiser4_destroy_allocator_bitmap(allocator, + super); + return ret; + } + release_and_unlock_bnode(bnode); + } + + elapsed_time = jiffies - start_time; + if (REISER4_DEBUG) + printk("...done (%llu jiffies)\n", + (unsigned long long)elapsed_time); + } + + return 0; +} + +/* plugin->u.space_allocator.destroy_allocator + destructor. It is called on fs unmount */ +int reiser4_destroy_allocator_bitmap(reiser4_space_allocator * allocator, + struct super_block *super) +{ + bmap_nr_t bitmap_blocks_nr; + bmap_nr_t i; + + struct bitmap_allocator_data *data = allocator->u.generic; + + assert("zam-414", data != NULL); + assert("zam-376", data->bitmap != NULL); + + bitmap_blocks_nr = get_nr_bmap(super); + + for (i = 0; i < bitmap_blocks_nr; i++) { + struct bitmap_node *bnode = data->bitmap + i; + + mutex_lock(&bnode->mutex); + +#if REISER4_DEBUG + if (atomic_read(&bnode->loaded)) { + jnode *wj = bnode->wjnode; + jnode *cj = bnode->cjnode; + + assert("zam-480", jnode_page(cj) != NULL); + assert("zam-633", jnode_page(wj) != NULL); + + assert("zam-634", + memcmp(jdata(wj), jdata(wj), + bmap_size(super->s_blocksize)) == 0); + + } +#endif + done_bnode(bnode); + mutex_unlock(&bnode->mutex); + } + + vfree(data->bitmap); + kfree(data); + + allocator->u.generic = NULL; + + return 0; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * scroll-step: 1 + * End: + */ diff --git a/fs/reiser4/plugin/space/bitmap.h b/fs/reiser4/plugin/space/bitmap.h new file mode 100644 index 000000000000..4590498adb45 --- /dev/null +++ b/fs/reiser4/plugin/space/bitmap.h @@ -0,0 +1,47 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#if !defined (__REISER4_PLUGIN_SPACE_BITMAP_H__) +#define __REISER4_PLUGIN_SPACE_BITMAP_H__ + +#include "../../dformat.h" +#include "../../block_alloc.h" + +#include <linux/types.h> /* for __u?? */ +#include <linux/fs.h> /* for struct super_block */ +/* EDWARD-FIXME-HANS: write something as informative as the below for every .h file lacking it. */ +/* declarations of functions implementing methods of space allocator plugin for + bitmap based allocator. The functions themselves are in bitmap.c */ +extern int reiser4_init_allocator_bitmap(reiser4_space_allocator *, + struct super_block *, void *); +extern int reiser4_destroy_allocator_bitmap(reiser4_space_allocator *, + struct super_block *); +extern int reiser4_alloc_blocks_bitmap(reiser4_space_allocator *, + reiser4_blocknr_hint *, int needed, + reiser4_block_nr * start, + reiser4_block_nr * len); +extern int reiser4_check_blocks_bitmap(const reiser4_block_nr *, + const reiser4_block_nr *, int); +extern void reiser4_dealloc_blocks_bitmap(reiser4_space_allocator *, + reiser4_block_nr, + reiser4_block_nr); +extern int reiser4_pre_commit_hook_bitmap(void); + +#define reiser4_post_commit_hook_bitmap() do{}while(0) +#define reiser4_post_write_back_hook_bitmap() do{}while(0) +#define reiser4_print_info_bitmap(pref, al) do{}while(0) + +typedef __u64 bmap_nr_t; +typedef __u32 bmap_off_t; + +#endif /* __REISER4_PLUGIN_SPACE_BITMAP_H__ */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/space/space_allocator.h b/fs/reiser4/plugin/space/space_allocator.h new file mode 100644 index 000000000000..71bfd11016d7 --- /dev/null +++ b/fs/reiser4/plugin/space/space_allocator.h @@ -0,0 +1,80 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#ifndef __SPACE_ALLOCATOR_H__ +#define __SPACE_ALLOCATOR_H__ + +#include "../../forward.h" +#include "bitmap.h" +/* NIKITA-FIXME-HANS: surely this could use a comment. Something about how bitmap is the only space allocator for now, + * but... */ +#define DEF_SPACE_ALLOCATOR(allocator) \ + \ +static inline int sa_init_allocator (reiser4_space_allocator * al, struct super_block *s, void * opaque) \ +{ \ + return reiser4_init_allocator_##allocator (al, s, opaque); \ +} \ + \ +static inline void sa_destroy_allocator (reiser4_space_allocator *al, struct super_block *s) \ +{ \ + reiser4_destroy_allocator_##allocator (al, s); \ +} \ + \ +static inline int sa_alloc_blocks (reiser4_space_allocator *al, reiser4_blocknr_hint * hint, \ + int needed, reiser4_block_nr * start, reiser4_block_nr * len) \ +{ \ + return reiser4_alloc_blocks_##allocator (al, hint, needed, start, len); \ +} \ +static inline void sa_dealloc_blocks (reiser4_space_allocator * al, reiser4_block_nr start, reiser4_block_nr len) \ +{ \ + reiser4_dealloc_blocks_##allocator (al, start, len); \ +} \ + \ +static inline int sa_check_blocks (const reiser4_block_nr * start, const reiser4_block_nr * end, int desired) \ +{ \ + return reiser4_check_blocks_##allocator (start, end, desired); \ +} \ + \ +static inline void sa_pre_commit_hook (void) \ +{ \ + reiser4_pre_commit_hook_##allocator (); \ +} \ + \ +static inline void sa_post_commit_hook (void) \ +{ \ + reiser4_post_commit_hook_##allocator (); \ +} \ + \ +static inline void sa_post_write_back_hook (void) \ +{ \ + reiser4_post_write_back_hook_##allocator(); \ +} \ + \ +static inline void sa_print_info(const char * prefix, reiser4_space_allocator * al) \ +{ \ + reiser4_print_info_##allocator (prefix, al); \ +} + +DEF_SPACE_ALLOCATOR(bitmap) + +/* this object is part of reiser4 private in-core super block */ +struct reiser4_space_allocator { + union { + /* space allocators might use this pointer to reference their + * data. */ + void *generic; + } u; +}; + +/* __SPACE_ALLOCATOR_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/plugin/tail_policy.c b/fs/reiser4/plugin/tail_policy.c new file mode 100644 index 000000000000..1e0eb1d29e42 --- /dev/null +++ b/fs/reiser4/plugin/tail_policy.c @@ -0,0 +1,113 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Formatting policy plugins */ + +/* + * Formatting policy plugin is used by object plugin (of regular file) to + * convert file between two representations. + * + * Currently following policies are implemented: + * never store file in formatted nodes + * always store file in formatted nodes + * store file in formatted nodes if file is smaller than 4 blocks (default) + */ + +#include "../tree.h" +#include "../inode.h" +#include "../super.h" +#include "object.h" +#include "plugin.h" +#include "node/node.h" +#include "plugin_header.h" + +#include <linux/pagemap.h> +#include <linux/fs.h> /* For struct inode */ + +/** + * have_formatting_never - + * @inode: + * @size: + * + * + */ +/* Never store file's tail as direct item */ +/* Audited by: green(2002.06.12) */ +static int have_formatting_never(const struct inode *inode UNUSED_ARG + /* inode to operate on */ , + loff_t size UNUSED_ARG/* new object size */) +{ + return 0; +} + +/* Always store file's tail as direct item */ +/* Audited by: green(2002.06.12) */ +static int +have_formatting_always(const struct inode *inode UNUSED_ARG + /* inode to operate on */ , + loff_t size UNUSED_ARG/* new object size */) +{ + return 1; +} + +/* This function makes test if we should store file denoted @inode as tails only + or as extents only. */ +static int +have_formatting_default(const struct inode *inode UNUSED_ARG + /* inode to operate on */ , + loff_t size/* new object size */) +{ + assert("umka-1253", inode != NULL); + + if (size > inode->i_sb->s_blocksize * 4) + return 0; + + return 1; +} + +/* tail plugins */ +formatting_plugin formatting_plugins[LAST_TAIL_FORMATTING_ID] = { + [NEVER_TAILS_FORMATTING_ID] = { + .h = { + .type_id = REISER4_FORMATTING_PLUGIN_TYPE, + .id = NEVER_TAILS_FORMATTING_ID, + .pops = NULL, + .label = "never", + .desc = "Never store file's tail", + .linkage = {NULL, NULL} + }, + .have_tail = have_formatting_never + }, + [ALWAYS_TAILS_FORMATTING_ID] = { + .h = { + .type_id = REISER4_FORMATTING_PLUGIN_TYPE, + .id = ALWAYS_TAILS_FORMATTING_ID, + .pops = NULL, + .label = "always", + .desc = "Always store file's tail", + .linkage = {NULL, NULL} + }, + .have_tail = have_formatting_always + }, + [SMALL_FILE_FORMATTING_ID] = { + .h = { + .type_id = REISER4_FORMATTING_PLUGIN_TYPE, + .id = SMALL_FILE_FORMATTING_ID, + .pops = NULL, + .label = "4blocks", + .desc = "store files shorter than 4 blocks in tail items", + .linkage = {NULL, NULL} + }, + .have_tail = have_formatting_default + } +}; + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/plugin/txmod.c b/fs/reiser4/plugin/txmod.c new file mode 100644 index 000000000000..8e489af23caf --- /dev/null +++ b/fs/reiser4/plugin/txmod.c @@ -0,0 +1,1238 @@ +#include "../forward.h" +#include "../debug.h" +#include "../coord.h" +#include "../plugin/plugin.h" +#include "../jnode.h" +#include "../znode.h" +#include "../block_alloc.h" +#include "../reiser4.h" +#include "../flush.h" + +/* + * This file contains implementation of different transaction models. + * + * Transaction model is a high-level block allocator, which assigns block + * numbers to dirty nodes, and, thereby, decides, how those nodes will be + * committed. + * + * Every dirty node of reiser4 atom can be committed by either of the + * following two ways: + * 1) via journal; + * 2) using "write-anywhere" technique. + * + * If the allocator doesn't change on-disk location of a node, then + * this node will be committed using journalling technique (overwrite). + * Otherwise, it will be comitted via write-anywhere technique (relocate): + * + * relocate <---- allocate --- > overwrite + * + * So, in our interpretation the 2 traditional "classic" strategies in + * committing transactions (journalling and "write-anywhere") are just two + * boundary cases: 1) when all nodes are overwritten, and 2) when all nodes + * are relocated. + * + * Besides those 2 boundary cases we can implement in reiser4 the infinite + * set of their various combinations, so that user can choose what is really + * suitable for his needs. + */ + +/* jnode_make_wander_nolock <- find_flush_start_jnode (special case for znode-above-root) + <- jnode_make_wander */ +void jnode_make_wander_nolock(jnode * node); + +/* jnode_make_wander <- txmod.forward_alloc_formatted */ +void jnode_make_wander(jnode * node); + +/* jnode_make_reloc_nolock <- znode_make_reloc + <- unformatted_make_reloc */ +static void jnode_make_reloc_nolock(flush_queue_t * fq, jnode * node); + + + + /* Handle formatted nodes in forward context */ + + +/** + * txmod.forward_alloc_formatted <- allocate_znode <- alloc_pos_and_ancestors <- jnode_flush + * <- alloc_one_ancestor <- alloc_pos_and_ancestors <- jnode_flush + * <- alloc_one_ancestor (recursive) + * <- lock_parent_and_allocate_znode <- squalloc_upper_levels <- check_parents_and_squalloc_upper_levels <- squalloc_upper_levels (recursive) + * <- handle_pos_on_formatted + * <- handle_pos_on_formatted + * <- handle_pos_end_of_twig + * <- handle_pos_to_leaf + */ +void znode_make_reloc(znode * z, flush_queue_t * fq); + + + /* Handle unformatted nodes */ + + +/* unformatted_make_reloc <- assign_real_blocknrs <- txmod.forward_alloc_unformatted + <- txmod.squeeze_alloc_unformatted +*/ +void unformatted_make_reloc(jnode *node, flush_queue_t *fq); + +static void forward_overwrite_unformatted(flush_pos_t *flush_pos, oid_t oid, + unsigned long index, reiser4_block_nr width); + +/* mark_jnode_overwrite <- forward_overwrite_unformatted <- txmod.forward_alloc_unformatted + squeeze_overwrite_unformatted <- txmod.squeeze_alloc_unformatted +*/ +static void mark_jnode_overwrite(struct list_head *jnodes, jnode *node); + +int split_allocated_extent(coord_t *coord, reiser4_block_nr pos_in_unit); +int allocated_extent_slum_size(flush_pos_t *flush_pos, oid_t oid, + unsigned long index, unsigned long count); +void allocate_blocks_unformatted(reiser4_blocknr_hint *preceder, + reiser4_block_nr wanted_count, + reiser4_block_nr *first_allocated, + reiser4_block_nr *allocated, + block_stage_t block_stage); +void assign_real_blocknrs(flush_pos_t *flush_pos, oid_t oid, + unsigned long index, reiser4_block_nr count, + reiser4_block_nr first); +int convert_extent(coord_t *coord, reiser4_extent *replace); +int put_unit_to_end(znode *node, + const reiser4_key *key, reiser4_extent *copy_ext); + +/* + * txmod.forward_alloc_unformatted <- handle_pos_on_twig + * txmod.squeeze_alloc_unformatted <- squeeze_right_twig + */ + +/* Common functions */ + +/** + * Mark node JNODE_OVRWR and put it on atom->overwrite_nodes list. + * Atom lock and jnode lock should be taken before calling this + * function. + */ +void jnode_make_wander_nolock(jnode * node) +{ + txn_atom *atom; + + assert("nikita-2432", !JF_ISSET(node, JNODE_RELOC)); + assert("nikita-3153", JF_ISSET(node, JNODE_DIRTY)); + assert("zam-897", !JF_ISSET(node, JNODE_FLUSH_QUEUED)); + assert("nikita-3367", !reiser4_blocknr_is_fake(jnode_get_block(node))); + + atom = node->atom; + + assert("zam-895", atom != NULL); + assert("zam-894", atom_is_protected(atom)); + + JF_SET(node, JNODE_OVRWR); + /* move node to atom's overwrite list */ + list_move_tail(&node->capture_link, ATOM_OVRWR_LIST(atom)); + ON_DEBUG(count_jnode(atom, node, DIRTY_LIST, OVRWR_LIST, 1)); +} + +/* + * Same as jnode_make_wander_nolock, but all necessary locks + * are taken inside this function. + */ +void jnode_make_wander(jnode * node) +{ + txn_atom *atom; + + spin_lock_jnode(node); + atom = jnode_get_atom(node); + assert("zam-913", atom != NULL); + assert("zam-914", !JF_ISSET(node, JNODE_RELOC)); + + jnode_make_wander_nolock(node); + spin_unlock_atom(atom); + spin_unlock_jnode(node); +} + +/* this just sets RELOC bit */ +static void jnode_make_reloc_nolock(flush_queue_t * fq, jnode * node) +{ + assert_spin_locked(&(node->guard)); + assert("zam-916", JF_ISSET(node, JNODE_DIRTY)); + assert("zam-917", !JF_ISSET(node, JNODE_RELOC)); + assert("zam-918", !JF_ISSET(node, JNODE_OVRWR)); + assert("zam-920", !JF_ISSET(node, JNODE_FLUSH_QUEUED)); + assert("nikita-3367", !reiser4_blocknr_is_fake(jnode_get_block(node))); + jnode_set_reloc(node); +} + +/* + * Mark znode RELOC and put it on flush queue + */ +void znode_make_reloc(znode * z, flush_queue_t * fq) +{ + jnode *node; + txn_atom *atom; + + node = ZJNODE(z); + spin_lock_jnode(node); + + atom = jnode_get_atom(node); + assert("zam-919", atom != NULL); + + jnode_make_reloc_nolock(fq, node); + queue_jnode(fq, node); + + spin_unlock_atom(atom); + spin_unlock_jnode(node); +} + +/* Mark unformatted node RELOC and put it on flush queue */ +void unformatted_make_reloc(jnode *node, flush_queue_t *fq) +{ + assert("vs-1479", jnode_is_unformatted(node)); + + jnode_make_reloc_nolock(fq, node); + queue_jnode(fq, node); +} + +/** + * mark_jnode_overwrite - assign node to overwrite set + * @jnodes: overwrite set list head + * @node: jnode to belong to overwrite set + * + * Sets OVRWR jnode state bit and puts @node to the end of list head @jnodes + * which is an accumulator for nodes before they get to overwrite set list of + * atom. + */ +static void mark_jnode_overwrite(struct list_head *jnodes, jnode *node) +{ + spin_lock_jnode(node); + + assert("zam-917", !JF_ISSET(node, JNODE_RELOC)); + assert("zam-918", !JF_ISSET(node, JNODE_OVRWR)); + + JF_SET(node, JNODE_OVRWR); + list_move_tail(&node->capture_link, jnodes); + ON_DEBUG(count_jnode(node->atom, node, DIRTY_LIST, OVRWR_LIST, 0)); + + spin_unlock_jnode(node); +} + +static int forward_relocate_unformatted(flush_pos_t *flush_pos, + reiser4_extent *ext, + extent_state state, + oid_t oid, __u64 index, + __u64 width, int *exit) +{ + int result; + coord_t *coord; + reiser4_extent replace_ext; + reiser4_block_nr protected; + reiser4_block_nr start; + reiser4_block_nr first_allocated; + __u64 allocated; + block_stage_t block_stage; + + *exit = 0; + coord = &flush_pos->coord; + start = extent_get_start(ext); + + if (flush_pos->pos_in_unit) { + /* + * split extent unit into two ones + */ + result = split_allocated_extent(coord, + flush_pos->pos_in_unit); + flush_pos->pos_in_unit = 0; + *exit = 1; + return result; + } + /* + * limit number of nodes to allocate + */ + if (flush_pos->nr_to_write < width) + width = flush_pos->nr_to_write; + + if (state == ALLOCATED_EXTENT) { + /* + * all protected nodes are not flushprepped, therefore + * they are counted as flush_reserved + */ + block_stage = BLOCK_FLUSH_RESERVED; + protected = allocated_extent_slum_size(flush_pos, oid, + index, width); + if (protected == 0) { + flush_pos->state = POS_INVALID; + flush_pos->pos_in_unit = 0; + *exit = 1; + return 0; + } + } else { + block_stage = BLOCK_UNALLOCATED; + protected = width; + } + /* + * look at previous unit if possible. If it is allocated, make + * preceder more precise + */ + if (coord->unit_pos && + (state_of_extent(ext - 1) == ALLOCATED_EXTENT)) + reiser4_pos_hint(flush_pos)->blk = + extent_get_start(ext - 1) + + extent_get_width(ext - 1); + /* + * allocate new block numbers for protected nodes + */ + allocate_blocks_unformatted(reiser4_pos_hint(flush_pos), + protected, + &first_allocated, &allocated, + block_stage); + + if (state == ALLOCATED_EXTENT) + /* + * on relocating - free nodes which are going to be + * relocated + */ + reiser4_dealloc_blocks(&start, &allocated, 0, BA_DEFER); + + /* assign new block numbers to protected nodes */ + assign_real_blocknrs(flush_pos, oid, index, allocated, first_allocated); + + /* prepare extent which will replace current one */ + reiser4_set_extent(&replace_ext, first_allocated, allocated); + + /* adjust extent item */ + result = convert_extent(coord, &replace_ext); + if (result != 0 && result != -ENOMEM) { + warning("vs-1461", + "Failed to allocate extent. Should not happen\n"); + *exit = 1; + return result; + } + /* + * break flush: we prepared for flushing as many blocks as we + * were asked for + */ + if (flush_pos->nr_to_write == allocated) + flush_pos->state = POS_INVALID; + return 0; +} + +static squeeze_result squeeze_relocate_unformatted(znode *left, + const coord_t *coord, + flush_pos_t *flush_pos, + reiser4_key *key, + reiser4_key *stop_key) +{ + int result; + reiser4_extent *ext; + __u64 index; + __u64 width; + reiser4_block_nr start; + extent_state state; + oid_t oid; + reiser4_block_nr first_allocated; + __u64 allocated; + __u64 protected; + reiser4_extent copy_extent; + block_stage_t block_stage; + + assert("edward-1610", flush_pos->pos_in_unit == 0); + assert("edward-1611", coord_is_leftmost_unit(coord)); + assert("edward-1612", item_is_extent(coord)); + + ext = extent_by_coord(coord); + index = extent_unit_index(coord); + start = extent_get_start(ext); + width = extent_get_width(ext); + state = state_of_extent(ext); + unit_key_by_coord(coord, key); + oid = get_key_objectid(key); + + assert("edward-1613", state != HOLE_EXTENT); + + if (state == ALLOCATED_EXTENT) { + /* + * all protected nodes are not flushprepped, + * therefore they are counted as flush_reserved + */ + block_stage = BLOCK_FLUSH_RESERVED; + protected = allocated_extent_slum_size(flush_pos, oid, + index, width); + if (protected == 0) { + flush_pos->state = POS_INVALID; + flush_pos->pos_in_unit = 0; + return 0; + } + } else { + block_stage = BLOCK_UNALLOCATED; + protected = width; + } + /* + * look at previous unit if possible. If it is allocated, make + * preceder more precise + */ + if (coord->unit_pos && + (state_of_extent(ext - 1) == ALLOCATED_EXTENT)) + reiser4_pos_hint(flush_pos)->blk = + extent_get_start(ext - 1) + + extent_get_width(ext - 1); + /* + * allocate new block numbers for protected nodes + */ + allocate_blocks_unformatted(reiser4_pos_hint(flush_pos), + protected, + &first_allocated, &allocated, + block_stage); + /* + * prepare extent which will be copied to left + */ + reiser4_set_extent(©_extent, first_allocated, allocated); + result = put_unit_to_end(left, key, ©_extent); + + if (result == -E_NODE_FULL) { + /* + * free blocks which were just allocated + */ + reiser4_dealloc_blocks(&first_allocated, &allocated, + (state == ALLOCATED_EXTENT) + ? BLOCK_FLUSH_RESERVED + : BLOCK_UNALLOCATED, + BA_PERMANENT); + /* + * rewind the preceder + */ + flush_pos->preceder.blk = first_allocated; + check_preceder(flush_pos->preceder.blk); + return SQUEEZE_TARGET_FULL; + } + if (state == ALLOCATED_EXTENT) { + /* + * free nodes which were relocated + */ + reiser4_dealloc_blocks(&start, &allocated, 0, BA_DEFER); + } + /* + * assign new block numbers to protected nodes + */ + assign_real_blocknrs(flush_pos, oid, index, allocated, + first_allocated); + set_key_offset(key, + get_key_offset(key) + + (allocated << current_blocksize_bits)); + return SQUEEZE_CONTINUE; +} + +/** + * forward_overwrite_unformatted - put bunch of jnodes to overwrite set + * @flush_pos: flush position + * @oid: objectid of file jnodes belong to + * @index: starting index + * @width: extent width + * + * Puts nodes of one extent (file objectid @oid, extent width @width) to atom's + * overwrite set. Starting from the one with index @index. If end of slum is + * detected (node is not found or flushprepped) - stop iterating and set flush + * position's state to POS_INVALID. + */ +static void forward_overwrite_unformatted(flush_pos_t *flush_pos, oid_t oid, + unsigned long index, + reiser4_block_nr width) +{ + unsigned long i; + reiser4_tree *tree; + jnode *node; + txn_atom *atom; + LIST_HEAD(jnodes); + + tree = current_tree; + + atom = atom_locked_by_fq(reiser4_pos_fq(flush_pos)); + assert("vs-1478", atom); + + for (i = flush_pos->pos_in_unit; i < width; i++, index++) { + node = jlookup(tree, oid, index); + if (!node) { + flush_pos->state = POS_INVALID; + break; + } + if (jnode_check_flushprepped(node)) { + flush_pos->state = POS_INVALID; + atomic_dec(&node->x_count); + break; + } + if (node->atom != atom) { + flush_pos->state = POS_INVALID; + atomic_dec(&node->x_count); + break; + } + mark_jnode_overwrite(&jnodes, node); + atomic_dec(&node->x_count); + } + + list_splice_init(&jnodes, ATOM_OVRWR_LIST(atom)->prev); + spin_unlock_atom(atom); +} + +static squeeze_result squeeze_overwrite_unformatted(znode *left, + const coord_t *coord, + flush_pos_t *flush_pos, + reiser4_key *key, + reiser4_key *stop_key) +{ + int result; + reiser4_extent *ext; + __u64 index; + __u64 width; + reiser4_block_nr start; + extent_state state; + oid_t oid; + reiser4_extent copy_extent; + + assert("vs-1457", flush_pos->pos_in_unit == 0); + assert("vs-1467", coord_is_leftmost_unit(coord)); + assert("vs-1467", item_is_extent(coord)); + + ext = extent_by_coord(coord); + index = extent_unit_index(coord); + start = extent_get_start(ext); + width = extent_get_width(ext); + state = state_of_extent(ext); + unit_key_by_coord(coord, key); + oid = get_key_objectid(key); + /* + * try to copy unit as it is to left neighbor + * and make all first not flushprepped nodes + * overwrite nodes + */ + reiser4_set_extent(©_extent, start, width); + + result = put_unit_to_end(left, key, ©_extent); + if (result == -E_NODE_FULL) + return SQUEEZE_TARGET_FULL; + + if (state != HOLE_EXTENT) + forward_overwrite_unformatted(flush_pos, oid, index, width); + + set_key_offset(key, + get_key_offset(key) + (width << current_blocksize_bits)); + return SQUEEZE_CONTINUE; +} + +/************************ HYBRID TRANSACTION MODEL ****************************/ + +/** + * This is the default transaction model suggested by Josh MacDonald and + * Hans Reiser. This was the single hardcoded transaction mode till Feb 2014 + * when Edward introduced pure Journalling and pure Write-Anywhere. + * + * In this mode all relocate-overwrite decisions are result of attempts to + * defragment atom's locality. + */ + +/* REVERSE PARENT-FIRST RELOCATION POLICIES */ + +/* This implements the is-it-close-enough-to-its-preceder? test for relocation + in the reverse parent-first relocate context. Here all we know is the + preceder and the block number. Since we are going in reverse, the preceder + may still be relocated as well, so we can't ask the block allocator "is there + a closer block available to relocate?" here. In the _forward_ parent-first + relocate context (not here) we actually call the block allocator to try and + find a closer location. +*/ +static int reverse_try_defragment_if_close(const reiser4_block_nr * pblk, + const reiser4_block_nr * nblk) +{ + reiser4_block_nr dist; + + assert("jmacd-7710", *pblk != 0 && *nblk != 0); + assert("jmacd-7711", !reiser4_blocknr_is_fake(pblk)); + assert("jmacd-7712", !reiser4_blocknr_is_fake(nblk)); + + /* Distance is the absolute value. */ + dist = (*pblk > *nblk) ? (*pblk - *nblk) : (*nblk - *pblk); + + /* If the block is less than FLUSH_RELOCATE_DISTANCE blocks away from + its preceder block, do not relocate. */ + if (dist <= get_current_super_private()->flush.relocate_distance) + return 0; + + return 1; +} + +/** + * This function is a predicate that tests for relocation. Always called in the + * reverse-parent-first context, when we are asking whether the current node + * should be relocated in order to expand the flush by dirtying the parent level + * (and thus proceeding to flush that level). When traversing in the forward + * parent-first direction (not here), relocation decisions are handled in two + * places: allocate_znode() and extent_needs_allocation(). + */ +static int reverse_alloc_formatted_hybrid(jnode * node, + const coord_t *parent_coord, + flush_pos_t *pos) +{ + reiser4_block_nr pblk = 0; + reiser4_block_nr nblk = 0; + + assert("jmacd-8989", !jnode_is_root(node)); + /* + * This function is called only from the + * reverse_relocate_check_dirty_parent() and only if the parent + * node is clean. This implies that the parent has the real (i.e., not + * fake) block number, and, so does the child, because otherwise the + * parent would be dirty. + */ + + /* New nodes are treated as if they are being relocated. */ + if (JF_ISSET(node, JNODE_CREATED) || + (pos->leaf_relocate && jnode_get_level(node) == LEAF_LEVEL)) + return 1; + + /* Find the preceder. FIXME(B): When the child is an unformatted, + previously existing node, the coord may be leftmost even though the + child is not the parent-first preceder of the parent. If the first + dirty node appears somewhere in the middle of the first extent unit, + this preceder calculation is wrong. + Needs more logic in here. */ + if (coord_is_leftmost_unit(parent_coord)) { + pblk = *znode_get_block(parent_coord->node); + } else { + pblk = pos->preceder.blk; + } + check_preceder(pblk); + + /* If (pblk == 0) then the preceder isn't allocated or isn't known: + relocate. */ + if (pblk == 0) + return 1; + + nblk = *jnode_get_block(node); + + if (reiser4_blocknr_is_fake(&nblk)) + /* child is unallocated, mark parent dirty */ + return 1; + + return reverse_try_defragment_if_close(&pblk, &nblk); +} + +/** + * A subroutine of forward_alloc_formatted_hybrid(), this is called first to see + * if there is a close position to relocate to. It may return ENOSPC if there is + * no close position. If there is no close position it may not relocate. This + * takes care of updating the parent node with the relocated block address. + * + * was allocate_znode_update() + */ +static int forward_try_defragment_locality(znode * node, + const coord_t *parent_coord, + flush_pos_t *pos) +{ + int ret; + reiser4_block_nr blk; + lock_handle uber_lock; + int flush_reserved_used = 0; + int grabbed; + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + + init_lh(&uber_lock); + + ctx = get_current_context(); + sbinfo = get_super_private(ctx->super); + + grabbed = ctx->grabbed_blocks; + + ret = zload(node); + if (ret) + return ret; + + if (ZF_ISSET(node, JNODE_CREATED)) { + assert("zam-816", reiser4_blocknr_is_fake(znode_get_block(node))); + pos->preceder.block_stage = BLOCK_UNALLOCATED; + } else { + pos->preceder.block_stage = BLOCK_GRABBED; + + /* The disk space for relocating the @node is already reserved + * in "flush reserved" counter if @node is leaf, otherwise we + * grab space using BA_RESERVED (means grab space from whole + * disk not from only 95%). */ + if (znode_get_level(node) == LEAF_LEVEL) { + /* + * earlier (during do_jnode_make_dirty()) we decided + * that @node can possibly go into overwrite set and + * reserved block for its wandering location. + */ + txn_atom *atom = get_current_atom_locked(); + assert("nikita-3449", + ZF_ISSET(node, JNODE_FLUSH_RESERVED)); + flush_reserved2grabbed(atom, (__u64) 1); + spin_unlock_atom(atom); + /* + * we are trying to move node into relocate + * set. Allocation of relocated position "uses" + * reserved block. + */ + ZF_CLR(node, JNODE_FLUSH_RESERVED); + flush_reserved_used = 1; + } else { + ret = reiser4_grab_space_force((__u64) 1, BA_RESERVED); + if (ret != 0) + goto exit; + } + } + + /* We may do not use 5% of reserved disk space here and flush will not + pack tightly. */ + ret = reiser4_alloc_block(&pos->preceder, &blk, + BA_FORMATTED | BA_PERMANENT); + if (ret) + goto exit; + + if (!ZF_ISSET(node, JNODE_CREATED) && + (ret = reiser4_dealloc_block(znode_get_block(node), 0, + BA_DEFER | BA_FORMATTED))) + goto exit; + + if (likely(!znode_is_root(node))) { + item_plugin *iplug; + + iplug = item_plugin_by_coord(parent_coord); + assert("nikita-2954", iplug->f.update != NULL); + iplug->f.update(parent_coord, &blk); + + znode_make_dirty(parent_coord->node); + + } else { + reiser4_tree *tree = znode_get_tree(node); + znode *uber; + + /* We take a longterm lock on the fake node in order to change + the root block number. This may cause atom fusion. */ + ret = get_uber_znode(tree, ZNODE_WRITE_LOCK, ZNODE_LOCK_HIPRI, + &uber_lock); + /* The fake node cannot be deleted, and we must have priority + here, and may not be confused with ENOSPC. */ + assert("jmacd-74412", + ret != -EINVAL && ret != -E_DEADLOCK && ret != -ENOSPC); + + if (ret) + goto exit; + + uber = uber_lock.node; + + write_lock_tree(tree); + tree->root_block = blk; + write_unlock_tree(tree); + + znode_make_dirty(uber); + } + ret = znode_rehash(node, &blk); +exit: + if (ret) { + /* Get flush reserved block back if something fails, because + * callers assume that on error block wasn't relocated and its + * flush reserved block wasn't used. */ + if (flush_reserved_used) { + /* + * ok, we failed to move node into relocate + * set. Restore status quo. + */ + grabbed2flush_reserved((__u64) 1); + ZF_SET(node, JNODE_FLUSH_RESERVED); + } + } + zrelse(node); + done_lh(&uber_lock); + grabbed2free_mark(grabbed); + return ret; +} + +/* + * Make the final relocate/wander decision during + * forward parent-first squalloc for a formatted node + */ +static int forward_alloc_formatted_hybrid(znode * node, + const coord_t *parent_coord, + flush_pos_t *pos) +{ + int ret; + reiser4_super_info_data *sbinfo = get_current_super_private(); + /** + * FIXME(D): We have the node write-locked and should have checked for ! + * allocated() somewhere before reaching this point, but there can be a + * race, so this assertion is bogus. + */ + assert("edward-1614", znode_is_loaded(node)); + assert("jmacd-7987", !jnode_check_flushprepped(ZJNODE(node))); + assert("jmacd-7988", znode_is_write_locked(node)); + assert("jmacd-7989", coord_is_invalid(parent_coord) + || znode_is_write_locked(parent_coord->node)); + + if (ZF_ISSET(node, JNODE_REPACK) || ZF_ISSET(node, JNODE_CREATED) || + znode_is_root(node) || + /* + * We have enough nodes to relocate no matter what. + */ + (pos->leaf_relocate != 0 && znode_get_level(node) == LEAF_LEVEL)) { + /* + * No need to decide with new nodes, they are treated the same + * as relocate. If the root node is dirty, relocate. + */ + if (pos->preceder.blk == 0) { + /* + * preceder is unknown and we have decided to relocate + * node -- using of default value for search start is + * better than search from block #0. + */ + get_blocknr_hint_default(&pos->preceder.blk); + check_preceder(pos->preceder.blk); + } + goto best_reloc; + + } else if (pos->preceder.blk == 0) { + /* If we don't know the preceder, leave it where it is. */ + jnode_make_wander(ZJNODE(node)); + } else { + /* Make a decision based on block distance. */ + reiser4_block_nr dist; + reiser4_block_nr nblk = *znode_get_block(node); + + assert("jmacd-6172", !reiser4_blocknr_is_fake(&nblk)); + assert("jmacd-6173", !reiser4_blocknr_is_fake(&pos->preceder.blk)); + assert("jmacd-6174", pos->preceder.blk != 0); + + if (pos->preceder.blk == nblk - 1) { + /* Ideal. */ + jnode_make_wander(ZJNODE(node)); + } else { + + dist = + (nblk < + pos->preceder.blk) ? (pos->preceder.blk - + nblk) : (nblk - + pos->preceder.blk); + + /* See if we can find a closer block + (forward direction only). */ + pos->preceder.max_dist = + min((reiser4_block_nr) sbinfo->flush. + relocate_distance, dist); + pos->preceder.level = znode_get_level(node); + + ret = forward_try_defragment_locality(node, + parent_coord, + pos); + pos->preceder.max_dist = 0; + + if (ret && (ret != -ENOSPC)) + return ret; + + if (ret == 0) { + /* Got a better allocation. */ + znode_make_reloc(node, pos->fq); + } else if (dist < sbinfo->flush.relocate_distance) { + /* The present allocation is good enough. */ + jnode_make_wander(ZJNODE(node)); + } else { + /* + * Otherwise, try to relocate to the best + * position. + */ + best_reloc: + ret = forward_try_defragment_locality(node, + parent_coord, + pos); + if (ret != 0) + return ret; + /* + * set JNODE_RELOC bit _after_ node gets + * allocated + */ + znode_make_reloc(node, pos->fq); + } + } + } + /* + * This is the new preceder + */ + pos->preceder.blk = *znode_get_block(node); + check_preceder(pos->preceder.blk); + pos->alloc_cnt += 1; + + assert("jmacd-4277", !reiser4_blocknr_is_fake(&pos->preceder.blk)); + + return 0; +} + +static int forward_alloc_unformatted_hybrid(flush_pos_t *flush_pos) +{ + coord_t *coord; + reiser4_extent *ext; + oid_t oid; + __u64 index; + __u64 width; + extent_state state; + reiser4_key key; + + assert("vs-1468", flush_pos->state == POS_ON_EPOINT); + assert("vs-1469", coord_is_existing_unit(&flush_pos->coord) + && item_is_extent(&flush_pos->coord)); + + coord = &flush_pos->coord; + + ext = extent_by_coord(coord); + state = state_of_extent(ext); + if (state == HOLE_EXTENT) { + flush_pos->state = POS_INVALID; + return 0; + } + item_key_by_coord(coord, &key); + oid = get_key_objectid(&key); + index = extent_unit_index(coord) + flush_pos->pos_in_unit; + width = extent_get_width(ext); + + assert("vs-1457", width > flush_pos->pos_in_unit); + + if (flush_pos->leaf_relocate || state == UNALLOCATED_EXTENT) { + int exit; + int result; + result = forward_relocate_unformatted(flush_pos, ext, state, + oid, + index, width, &exit); + if (exit) + return result; + } else + forward_overwrite_unformatted(flush_pos, oid, index, width); + + flush_pos->pos_in_unit = 0; + return 0; +} + +static squeeze_result squeeze_alloc_unformatted_hybrid(znode *left, + const coord_t *coord, + flush_pos_t *flush_pos, + reiser4_key *stop_key) +{ + squeeze_result ret; + reiser4_key key; + reiser4_extent *ext; + extent_state state; + + ext = extent_by_coord(coord); + state = state_of_extent(ext); + + if ((flush_pos->leaf_relocate && state == ALLOCATED_EXTENT) || + (state == UNALLOCATED_EXTENT)) + /* + * relocate + */ + ret = squeeze_relocate_unformatted(left, coord, + flush_pos, &key, stop_key); + else + /* + * (state == ALLOCATED_EXTENT && !flush_pos->leaf_relocate) || + * state == HOLE_EXTENT - overwrite + */ + ret = squeeze_overwrite_unformatted(left, coord, + flush_pos, &key, stop_key); + if (ret == SQUEEZE_CONTINUE) + *stop_key = key; + return ret; +} + +/*********************** JOURNAL TRANSACTION MODEL ****************************/ + +static int forward_alloc_formatted_journal(znode * node, + const coord_t *parent_coord, + flush_pos_t *pos) +{ + int ret; + + if (ZF_ISSET(node, JNODE_CREATED)) { + if (pos->preceder.blk == 0) { + /* + * preceder is unknown and we have decided to relocate + * node -- using of default value for search start is + * better than search from block #0. + */ + get_blocknr_hint_default(&pos->preceder.blk); + check_preceder(pos->preceder.blk); + } + ret = forward_try_defragment_locality(node, + parent_coord, + pos); + if (ret != 0) { + warning("edward-1615", + "forward defrag failed (%d)", ret); + return ret; + } + /* + * set JNODE_RELOC bit _after_ node gets + * allocated + */ + znode_make_reloc(node, pos->fq); + } + else + jnode_make_wander(ZJNODE(node)); + /* + * This is the new preceder + */ + pos->preceder.blk = *znode_get_block(node); + check_preceder(pos->preceder.blk); + pos->alloc_cnt += 1; + + assert("edward-1616", !reiser4_blocknr_is_fake(&pos->preceder.blk)); + return 0; +} + +static int forward_alloc_unformatted_journal(flush_pos_t *flush_pos) +{ + + coord_t *coord; + reiser4_extent *ext; + oid_t oid; + __u64 index; + __u64 width; + extent_state state; + reiser4_key key; + + assert("edward-1617", flush_pos->state == POS_ON_EPOINT); + assert("edward-1618", coord_is_existing_unit(&flush_pos->coord) + && item_is_extent(&flush_pos->coord)); + + coord = &flush_pos->coord; + + ext = extent_by_coord(coord); + state = state_of_extent(ext); + if (state == HOLE_EXTENT) { + flush_pos->state = POS_INVALID; + return 0; + } + item_key_by_coord(coord, &key); + oid = get_key_objectid(&key); + index = extent_unit_index(coord) + flush_pos->pos_in_unit; + width = extent_get_width(ext); + + assert("edward-1619", width > flush_pos->pos_in_unit); + + if (state == UNALLOCATED_EXTENT) { + int exit; + int result; + result = forward_relocate_unformatted(flush_pos, ext, state, + oid, + index, width, &exit); + if (exit) + return result; + } + else + /* + * state == ALLOCATED_EXTENT + * keep old allocation + */ + forward_overwrite_unformatted(flush_pos, oid, index, width); + + flush_pos->pos_in_unit = 0; + return 0; +} + +static squeeze_result squeeze_alloc_unformatted_journal(znode *left, + const coord_t *coord, + flush_pos_t *flush_pos, + reiser4_key *stop_key) +{ + squeeze_result ret; + reiser4_key key; + reiser4_extent *ext; + extent_state state; + + ext = extent_by_coord(coord); + state = state_of_extent(ext); + + if (state == UNALLOCATED_EXTENT) + ret = squeeze_relocate_unformatted(left, coord, + flush_pos, &key, stop_key); + else + /* + * state == ALLOCATED_EXTENT || state == HOLE_EXTENT + */ + ret = squeeze_overwrite_unformatted(left, coord, + flush_pos, &key, stop_key); + if (ret == SQUEEZE_CONTINUE) + *stop_key = key; + return ret; +} + +/********************** WA (Write-Anywhere) TRANSACTION MODEL ***************/ + +static int forward_alloc_formatted_wa(znode * node, + const coord_t *parent_coord, + flush_pos_t *pos) +{ + int ret; + + assert("edward-1620", znode_is_loaded(node)); + assert("edward-1621", !jnode_check_flushprepped(ZJNODE(node))); + assert("edward-1622", znode_is_write_locked(node)); + assert("edward-1623", coord_is_invalid(parent_coord) + || znode_is_write_locked(parent_coord->node)); + + if (pos->preceder.blk == 0) { + /* + * preceder is unknown and we have decided to relocate + * node -- using of default value for search start is + * better than search from block #0. + */ + get_blocknr_hint_default(&pos->preceder.blk); + check_preceder(pos->preceder.blk); + } + ret = forward_try_defragment_locality(node, parent_coord, pos); + if (ret && (ret != -ENOSPC)) { + warning("edward-1624", + "forward defrag failed (%d)", ret); + return ret; + } + if (ret == 0) + znode_make_reloc(node, pos->fq); + else { + ret = forward_try_defragment_locality(node, parent_coord, pos); + if (ret) { + warning("edward-1625", + "forward defrag failed (%d)", ret); + return ret; + } + /* set JNODE_RELOC bit _after_ node gets allocated */ + znode_make_reloc(node, pos->fq); + } + /* + * This is the new preceder + */ + pos->preceder.blk = *znode_get_block(node); + check_preceder(pos->preceder.blk); + pos->alloc_cnt += 1; + + assert("edward-1626", !reiser4_blocknr_is_fake(&pos->preceder.blk)); + return 0; +} + +static int forward_alloc_unformatted_wa(flush_pos_t *flush_pos) +{ + int exit; + int result; + + coord_t *coord; + reiser4_extent *ext; + oid_t oid; + __u64 index; + __u64 width; + extent_state state; + reiser4_key key; + + assert("edward-1627", flush_pos->state == POS_ON_EPOINT); + assert("edward-1628", coord_is_existing_unit(&flush_pos->coord) + && item_is_extent(&flush_pos->coord)); + + coord = &flush_pos->coord; + + ext = extent_by_coord(coord); + state = state_of_extent(ext); + if (state == HOLE_EXTENT) { + flush_pos->state = POS_INVALID; + return 0; + } + + item_key_by_coord(coord, &key); + oid = get_key_objectid(&key); + index = extent_unit_index(coord) + flush_pos->pos_in_unit; + width = extent_get_width(ext); + + assert("edward-1629", width > flush_pos->pos_in_unit); + assert("edward-1630", + state == ALLOCATED_EXTENT || state == UNALLOCATED_EXTENT); + /* + * always relocate + */ + result = forward_relocate_unformatted(flush_pos, ext, state, oid, + index, width, &exit); + if (exit) + return result; + flush_pos->pos_in_unit = 0; + return 0; +} + +static squeeze_result squeeze_alloc_unformatted_wa(znode *left, + const coord_t *coord, + flush_pos_t *flush_pos, + reiser4_key *stop_key) +{ + squeeze_result ret; + reiser4_key key; + reiser4_extent *ext; + extent_state state; + + ext = extent_by_coord(coord); + state = state_of_extent(ext); + + if (state == HOLE_EXTENT) + /* + * hole extents are handled in squeeze_overwrite + */ + ret = squeeze_overwrite_unformatted(left, coord, + flush_pos, &key, stop_key); + else + ret = squeeze_relocate_unformatted(left, coord, + flush_pos, &key, stop_key); + if (ret == SQUEEZE_CONTINUE) + *stop_key = key; + return ret; +} + +/******************************************************************************/ + +txmod_plugin txmod_plugins[LAST_TXMOD_ID] = { + [HYBRID_TXMOD_ID] = { + .h = { + .type_id = REISER4_TXMOD_PLUGIN_TYPE, + .id = HYBRID_TXMOD_ID, + .pops = NULL, + .label = "hybrid", + .desc = "Hybrid Transaction Model", + .linkage = {NULL, NULL} + }, + .forward_alloc_formatted = forward_alloc_formatted_hybrid, + .reverse_alloc_formatted = reverse_alloc_formatted_hybrid, + .forward_alloc_unformatted = forward_alloc_unformatted_hybrid, + .squeeze_alloc_unformatted = squeeze_alloc_unformatted_hybrid + }, + [JOURNAL_TXMOD_ID] = { + .h = { + .type_id = REISER4_TXMOD_PLUGIN_TYPE, + .id = JOURNAL_TXMOD_ID, + .pops = NULL, + .label = "journal", + .desc = "Journalling Transaction Model", + .linkage = {NULL, NULL} + }, + .forward_alloc_formatted = forward_alloc_formatted_journal, + .reverse_alloc_formatted = NULL, + .forward_alloc_unformatted = forward_alloc_unformatted_journal, + .squeeze_alloc_unformatted = squeeze_alloc_unformatted_journal + }, + [WA_TXMOD_ID] = { + .h = { + .type_id = REISER4_TXMOD_PLUGIN_TYPE, + .id = WA_TXMOD_ID, + .pops = NULL, + .label = "wa", + .desc = "Write-Anywhere Transaction Model", + .linkage = {NULL, NULL} + }, + .forward_alloc_formatted = forward_alloc_formatted_wa, + .reverse_alloc_formatted = NULL, + .forward_alloc_unformatted = forward_alloc_unformatted_wa, + .squeeze_alloc_unformatted = squeeze_alloc_unformatted_wa + } +}; + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/pool.c b/fs/reiser4/pool.c new file mode 100644 index 000000000000..56636381eb2a --- /dev/null +++ b/fs/reiser4/pool.c @@ -0,0 +1,231 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Fast pool allocation. + + There are situations when some sub-system normally asks memory allocator + for only few objects, but under some circumstances could require much + more. Typical and actually motivating example is tree balancing. It needs + to keep track of nodes that were involved into it, and it is well-known + that in reasonable packed balanced tree most (92.938121%) percent of all + balancings end up after working with only few nodes (3.141592 on + average). But in rare cases balancing can involve much more nodes + (3*tree_height+1 in extremal situation). + + On the one hand, we don't want to resort to dynamic allocation (slab, + malloc(), etc.) to allocate data structures required to keep track of + nodes during balancing. On the other hand, we cannot statically allocate + required amount of space on the stack, because first: it is useless wastage + of precious resource, and second: this amount is unknown in advance (tree + height can change). + + Pools, implemented in this file are solution for this problem: + + - some configurable amount of objects is statically preallocated on the + stack + + - if this preallocated pool is exhausted and more objects is requested + they are allocated dynamically. + + Pools encapsulate distinction between statically and dynamically allocated + objects. Both allocation and recycling look exactly the same. + + To keep track of dynamically allocated objects, pool adds its own linkage + to each object. + + NOTE-NIKITA This linkage also contains some balancing-specific data. This + is not perfect. On the other hand, balancing is currently the only client + of pool code. + + NOTE-NIKITA Another desirable feature is to rewrite all pool manipulation + functions in the style of tslist/tshash, i.e., make them unreadable, but + type-safe. + +*/ + +#include "debug.h" +#include "pool.h" +#include "super.h" + +#include <linux/types.h> +#include <linux/err.h> + +/* initialize new pool object @h */ +static void reiser4_init_pool_obj(struct reiser4_pool_header *h) +{ + INIT_LIST_HEAD(&h->usage_linkage); + INIT_LIST_HEAD(&h->level_linkage); + INIT_LIST_HEAD(&h->extra_linkage); +} + +/* initialize new pool */ +void reiser4_init_pool(struct reiser4_pool *pool /* pool to initialize */ , + size_t obj_size /* size of objects in @pool */ , + int num_of_objs /* number of preallocated objects */ , + char *data/* area for preallocated objects */) +{ + struct reiser4_pool_header *h; + int i; + + assert("nikita-955", pool != NULL); + assert("nikita-1044", obj_size > 0); + assert("nikita-956", num_of_objs >= 0); + assert("nikita-957", data != NULL); + + memset(pool, 0, sizeof *pool); + pool->obj_size = obj_size; + pool->data = data; + INIT_LIST_HEAD(&pool->free); + INIT_LIST_HEAD(&pool->used); + INIT_LIST_HEAD(&pool->extra); + memset(data, 0, obj_size * num_of_objs); + for (i = 0; i < num_of_objs; ++i) { + h = (struct reiser4_pool_header *) (data + i * obj_size); + reiser4_init_pool_obj(h); + /* add pool header to the end of pool's free list */ + list_add_tail(&h->usage_linkage, &pool->free); + } +} + +/* release pool resources + + Release all resources acquired by this pool, specifically, dynamically + allocated objects. + +*/ +void reiser4_done_pool(struct reiser4_pool *pool UNUSED_ARG) +{ +} + +/* allocate carry object from @pool + + First, try to get preallocated object. If this fails, resort to dynamic + allocation. + +*/ +static void *reiser4_pool_alloc(struct reiser4_pool *pool) +{ + struct reiser4_pool_header *result; + + assert("nikita-959", pool != NULL); + + if (!list_empty(&pool->free)) { + struct list_head *linkage; + + linkage = pool->free.next; + list_del(linkage); + INIT_LIST_HEAD(linkage); + result = list_entry(linkage, struct reiser4_pool_header, + usage_linkage); + BUG_ON(!list_empty(&result->level_linkage) || + !list_empty(&result->extra_linkage)); + } else { + /* pool is empty. Extra allocations don't deserve dedicated + slab to be served from, as they are expected to be rare. */ + result = kmalloc(pool->obj_size, reiser4_ctx_gfp_mask_get()); + if (result != 0) { + reiser4_init_pool_obj(result); + list_add(&result->extra_linkage, &pool->extra); + } else + return ERR_PTR(RETERR(-ENOMEM)); + BUG_ON(!list_empty(&result->usage_linkage) || + !list_empty(&result->level_linkage)); + } + ++pool->objs; + list_add(&result->usage_linkage, &pool->used); + memset(result + 1, 0, pool->obj_size - sizeof *result); + return result; +} + +/* return object back to the pool */ +void reiser4_pool_free(struct reiser4_pool *pool, + struct reiser4_pool_header *h) +{ + assert("nikita-961", h != NULL); + assert("nikita-962", pool != NULL); + + --pool->objs; + assert("nikita-963", pool->objs >= 0); + + list_del_init(&h->usage_linkage); + list_del_init(&h->level_linkage); + + if (list_empty(&h->extra_linkage)) + /* + * pool header is not an extra one. Push it onto free list + * using usage_linkage + */ + list_add(&h->usage_linkage, &pool->free); + else { + /* remove pool header from pool's extra list and kfree it */ + list_del(&h->extra_linkage); + kfree(h); + } +} + +/* add new object to the carry level list + + Carry level is FIFO most of the time, but not always. Complications arise + when make_space() function tries to go to the left neighbor and thus adds + carry node before existing nodes, and also, when updating delimiting keys + after moving data between two nodes, we want left node to be locked before + right node. + + Latter case is confusing at the first glance. Problem is that COP_UPDATE + opration that updates delimiting keys is sometimes called with two nodes + (when data are moved between two nodes) and sometimes with only one node + (when leftmost item is deleted in a node). In any case operation is + supplied with at least node whose left delimiting key is to be updated + (that is "right" node). + + @pool - from which to allocate new object; + @list - where to add object; + @reference - after (or before) which existing object to add +*/ +struct reiser4_pool_header *reiser4_add_obj(struct reiser4_pool *pool, + struct list_head *list, + pool_ordering order, + struct reiser4_pool_header *reference) +{ + struct reiser4_pool_header *result; + + assert("nikita-972", pool != NULL); + + result = reiser4_pool_alloc(pool); + if (IS_ERR(result)) + return result; + + assert("nikita-973", result != NULL); + + switch (order) { + case POOLO_BEFORE: + __list_add(&result->level_linkage, + reference->level_linkage.prev, + &reference->level_linkage); + break; + case POOLO_AFTER: + __list_add(&result->level_linkage, + &reference->level_linkage, + reference->level_linkage.next); + break; + case POOLO_LAST: + list_add_tail(&result->level_linkage, list); + break; + case POOLO_FIRST: + list_add(&result->level_linkage, list); + break; + default: + wrong_return_value("nikita-927", "order"); + } + return result; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/pool.h b/fs/reiser4/pool.h new file mode 100644 index 000000000000..d0f91fe09ff5 --- /dev/null +++ b/fs/reiser4/pool.h @@ -0,0 +1,57 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Fast pool allocation */ + +#ifndef __REISER4_POOL_H__ +#define __REISER4_POOL_H__ + +#include <linux/types.h> + +struct reiser4_pool { + size_t obj_size; + int objs; + char *data; + struct list_head free; + struct list_head used; + struct list_head extra; +}; + +struct reiser4_pool_header { + /* object is either on free or "used" lists */ + struct list_head usage_linkage; + struct list_head level_linkage; + struct list_head extra_linkage; +}; + +typedef enum { + POOLO_BEFORE, + POOLO_AFTER, + POOLO_LAST, + POOLO_FIRST +} pool_ordering; + +/* pool manipulation functions */ + +extern void reiser4_init_pool(struct reiser4_pool *pool, size_t obj_size, + int num_of_objs, char *data); +extern void reiser4_done_pool(struct reiser4_pool *pool); +extern void reiser4_pool_free(struct reiser4_pool *pool, + struct reiser4_pool_header *h); +struct reiser4_pool_header *reiser4_add_obj(struct reiser4_pool *pool, + struct list_head *list, + pool_ordering order, + struct reiser4_pool_header *reference); + +/* __REISER4_POOL_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/readahead.c b/fs/reiser4/readahead.c new file mode 100644 index 000000000000..0be94b646640 --- /dev/null +++ b/fs/reiser4/readahead.c @@ -0,0 +1,140 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +#include "forward.h" +#include "tree.h" +#include "tree_walk.h" +#include "super.h" +#include "inode.h" +#include "key.h" +#include "znode.h" + +#include <linux/swap.h> /* for totalram_pages */ + +void reiser4_init_ra_info(ra_info_t *rai) +{ + rai->key_to_stop = *reiser4_min_key(); +} + +/* global formatted node readahead parameter. It can be set by mount option + * -o readahead:NUM:1 */ +static inline int ra_adjacent_only(int flags) +{ + return flags & RA_ADJACENT_ONLY; +} + +/* this is used by formatted_readahead to decide whether read for right neighbor + * of node is to be issued. It returns 1 if right neighbor's first key is less + * or equal to readahead's stop key */ +static int should_readahead_neighbor(znode * node, ra_info_t *info) +{ + int result; + + read_lock_dk(znode_get_tree(node)); + result = keyle(znode_get_rd_key(node), &info->key_to_stop); + read_unlock_dk(znode_get_tree(node)); + return result; +} + +#define LOW_MEM_PERCENTAGE (5) + +static int low_on_memory(void) +{ + unsigned int freepages; + + freepages = nr_free_pages(); + return freepages < (totalram_pages * LOW_MEM_PERCENTAGE / 100); +} + +/* start read for @node and for a few of its right neighbors */ +void formatted_readahead(znode * node, ra_info_t *info) +{ + struct formatted_ra_params *ra_params; + znode *cur; + int i; + int grn_flags; + lock_handle next_lh; + + /* do nothing if node block number has not been assigned to node (which + * means it is still in cache). */ + if (reiser4_blocknr_is_fake(znode_get_block(node))) + return; + + ra_params = get_current_super_ra_params(); + + if (znode_page(node) == NULL) + jstartio(ZJNODE(node)); + + if (znode_get_level(node) != LEAF_LEVEL) + return; + + /* don't waste memory for read-ahead when low on memory */ + if (low_on_memory()) + return; + + /* We can have locked nodes on upper tree levels, in this situation lock + priorities do not help to resolve deadlocks, we have to use TRY_LOCK + here. */ + grn_flags = (GN_CAN_USE_UPPER_LEVELS | GN_TRY_LOCK); + + i = 0; + cur = zref(node); + init_lh(&next_lh); + while (i < ra_params->max) { + const reiser4_block_nr * nextblk; + + if (!should_readahead_neighbor(cur, info)) + break; + + if (reiser4_get_right_neighbor + (&next_lh, cur, ZNODE_READ_LOCK, grn_flags)) + break; + + nextblk = znode_get_block(next_lh.node); + if (reiser4_blocknr_is_fake(nextblk) || + (ra_adjacent_only(ra_params->flags) + && *nextblk != *znode_get_block(cur) + 1)) + break; + + zput(cur); + cur = zref(next_lh.node); + done_lh(&next_lh); + if (znode_page(cur) == NULL) + jstartio(ZJNODE(cur)); + else + /* Do not scan read-ahead window if pages already + * allocated (and i/o already started). */ + break; + + i++; + } + zput(cur); + done_lh(&next_lh); +} + +void reiser4_readdir_readahead_init(struct inode *dir, tap_t *tap) +{ + reiser4_key *stop_key; + + assert("nikita-3542", dir != NULL); + assert("nikita-3543", tap != NULL); + + stop_key = &tap->ra_info.key_to_stop; + /* initialize readdir readahead information: include into readahead + * stat data of all files of the directory */ + set_key_locality(stop_key, get_inode_oid(dir)); + set_key_type(stop_key, KEY_SD_MINOR); + set_key_ordering(stop_key, get_key_ordering(reiser4_max_key())); + set_key_objectid(stop_key, get_key_objectid(reiser4_max_key())); + set_key_offset(stop_key, get_key_offset(reiser4_max_key())); +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + End: +*/ diff --git a/fs/reiser4/readahead.h b/fs/reiser4/readahead.h new file mode 100644 index 000000000000..de42234b4af4 --- /dev/null +++ b/fs/reiser4/readahead.h @@ -0,0 +1,42 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +#ifndef __READAHEAD_H__ +#define __READAHEAD_H__ + +#include "key.h" + +typedef enum { + RA_ADJACENT_ONLY = 1, /* only requests nodes which are adjacent. + Default is NO (not only adjacent) */ +} ra_global_flags; + +/* reiser4 super block has a field of this type. + It controls readahead during tree traversals */ +struct formatted_ra_params { + unsigned long max; /* request not more than this amount of nodes. + Default is totalram_pages / 4 */ + int flags; +}; + +typedef struct { + reiser4_key key_to_stop; +} ra_info_t; + +void formatted_readahead(znode * , ra_info_t *); +void reiser4_init_ra_info(ra_info_t *rai); + +extern void reiser4_readdir_readahead_init(struct inode *dir, tap_t *tap); + +/* __READAHEAD_H__ */ +#endif + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/reiser4.h b/fs/reiser4/reiser4.h new file mode 100644 index 000000000000..e244656398be --- /dev/null +++ b/fs/reiser4/reiser4.h @@ -0,0 +1,260 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + + +/* definitions of common constants used by reiser4 */ + +#if !defined( __REISER4_H__ ) +#define __REISER4_H__ + +#include <asm/param.h> /* for HZ */ +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/fs.h> +#include <linux/hardirq.h> +#include <linux/sched.h> + +/* + * reiser4 compilation options. + */ + +#if defined(CONFIG_REISER4_DEBUG) +/* turn on assertion checks */ +#define REISER4_DEBUG (1) +#else +#define REISER4_DEBUG (0) +#endif + +#define REISER4_SHA256 (0) + +/* + * Turn on large keys mode. In his mode (which is default), reiser4 key has 4 + * 8-byte components. In the old "small key" mode, it's 3 8-byte + * components. Additional component, referred to as "ordering" is used to + * order items from which given object is composed of. As such, ordering is + * placed between locality and objectid. For directory item ordering contains + * initial prefix of the file name this item is for. This sorts all directory + * items within given directory lexicographically (but see + * fibration.[ch]). For file body and stat-data, ordering contains initial + * prefix of the name file was initially created with. In the common case + * (files with single name) this allows to order file bodies and stat-datas in + * the same order as their respective directory entries, thus speeding up + * readdir. + * + * Note, that kernel can only mount file system with the same key size as one + * it is compiled for, so flipping this option may render your data + * inaccessible. + */ +#define REISER4_LARGE_KEY (1) +/*#define REISER4_LARGE_KEY (0)*/ + +/*#define GUESS_EXISTS 1*/ + +/* + * PLEASE update fs/reiser4/kattr.c:show_options() when adding new compilation + * option + */ + +#define REISER4_SUPER_MAGIC_STRING "ReIsEr4" +extern const int REISER4_MAGIC_OFFSET; /* offset to magic string from the + * beginning of device */ + +/* here go tunable parameters that are not worth special entry in kernel + configuration */ + +/* default number of slots in coord-by-key caches */ +#define CBK_CACHE_SLOTS (16) +/* how many elementary tree operation to carry on the next level */ +#define CARRIES_POOL_SIZE (5) +/* size of pool of preallocated nodes for carry process. */ +#define NODES_LOCKED_POOL_SIZE (5) + +#define REISER4_NEW_NODE_FLAGS (COPI_LOAD_LEFT | COPI_LOAD_RIGHT | COPI_GO_LEFT) +#define REISER4_NEW_EXTENT_FLAGS (COPI_LOAD_LEFT | COPI_LOAD_RIGHT | COPI_GO_LEFT) +#define REISER4_PASTE_FLAGS (COPI_GO_LEFT) +#define REISER4_INSERT_FLAGS (COPI_GO_LEFT) + +/* we are supporting reservation of disk space on uid basis */ +#define REISER4_SUPPORT_UID_SPACE_RESERVATION (0) +/* we are supporting reservation of disk space for groups */ +#define REISER4_SUPPORT_GID_SPACE_RESERVATION (0) +/* we are supporting reservation of disk space for root */ +#define REISER4_SUPPORT_ROOT_SPACE_RESERVATION (0) +/* we use rapid flush mode, see flush.c for comments. */ +#define REISER4_USE_RAPID_FLUSH (1) + +/* + * set this to 0 if you don't want to use wait-for-flush in ->writepage(). + */ +#define REISER4_USE_ENTD (1) + +/* key allocation is Plan-A */ +#define REISER4_PLANA_KEY_ALLOCATION (1) +/* key allocation follows good old 3.x scheme */ +#define REISER4_3_5_KEY_ALLOCATION (0) + +/* size of hash-table for znodes */ +#define REISER4_ZNODE_HASH_TABLE_SIZE (1 << 13) + +/* number of buckets in lnode hash-table */ +#define LNODE_HTABLE_BUCKETS (1024) + +/* some ridiculously high maximal limit on height of znode tree. This + is used in declaration of various per level arrays and + to allocate stattistics gathering array for per-level stats. */ +#define REISER4_MAX_ZTREE_HEIGHT (8) + +#define REISER4_PANIC_MSG_BUFFER_SIZE (1024) + +/* If array contains less than REISER4_SEQ_SEARCH_BREAK elements then, + sequential search is on average faster than binary. This is because + of better optimization and because sequential search is more CPU + cache friendly. This number (25) was found by experiments on dual AMD + Athlon(tm), 1400MHz. + + NOTE: testing in kernel has shown that binary search is more effective than + implied by results of the user level benchmarking. Probably because in the + node keys are separated by other data. So value was adjusted after few + tests. More thorough tuning is needed. +*/ +#define REISER4_SEQ_SEARCH_BREAK (3) + +/* don't allow tree to be lower than this */ +#define REISER4_MIN_TREE_HEIGHT (TWIG_LEVEL) + +/* NOTE NIKITA this is no longer used: maximal atom size is auto-adjusted to + * available memory. */ +/* Default value of maximal atom size. Can be ovewritten by + tmgr.atom_max_size mount option. By default infinity. */ +#define REISER4_ATOM_MAX_SIZE ((unsigned)(~0)) + +/* Default value of maximal atom age (in jiffies). After reaching this age + atom will be forced to commit, either synchronously or asynchronously. Can + be overwritten by tmgr.atom_max_age mount option. */ +#define REISER4_ATOM_MAX_AGE (600 * HZ) + +/* sleeping period for ktxnmrgd */ +#define REISER4_TXNMGR_TIMEOUT (5 * HZ) + +/* timeout to wait for ent thread in writepage. Default: 3 milliseconds. */ +#define REISER4_ENTD_TIMEOUT (3 * HZ / 1000) + +/* start complaining after that many restarts in coord_by_key(). + + This either means incredibly heavy contention for this part of a tree, or + some corruption or bug. +*/ +#define REISER4_CBK_ITERATIONS_LIMIT (100) + +/* return -EIO after that many iterations in coord_by_key(). + + I have witnessed more than 800 iterations (in 30 thread test) before cbk + finished. --nikita +*/ +#define REISER4_MAX_CBK_ITERATIONS 500000 + +/* put a per-inode limit on maximal number of directory entries with identical + keys in hashed directory. + + Disable this until inheritance interfaces stabilize: we need some way to + set per directory limit. +*/ +#define REISER4_USE_COLLISION_LIMIT (0) + +/* If flush finds more than FLUSH_RELOCATE_THRESHOLD adjacent dirty leaf-level + blocks it will force them to be relocated. */ +#define FLUSH_RELOCATE_THRESHOLD 64 +/* If flush finds can find a block allocation closer than at most + FLUSH_RELOCATE_DISTANCE from the preceder it will relocate to that position. + */ +#define FLUSH_RELOCATE_DISTANCE 64 + +/* If we have written this much or more blocks before encountering busy jnode + in flush list - abort flushing hoping that next time we get called + this jnode will be clean already, and we will save some seeks. */ +#define FLUSH_WRITTEN_THRESHOLD 50 + +/* The maximum number of nodes to scan left on a level during flush. */ +#define FLUSH_SCAN_MAXNODES 10000 + +/* per-atom limit of flushers */ +#define ATOM_MAX_FLUSHERS (1) + +/* default tracing buffer size */ +#define REISER4_TRACE_BUF_SIZE (1 << 15) + +/* what size units of IO we would like cp, etc., to use, in writing to + reiser4. In bytes. + + Can be overwritten by optimal_io_size mount option. +*/ +#define REISER4_OPTIMAL_IO_SIZE (64 * 1024) + +/* see comments in inode.c:oid_to_uino() */ +#define REISER4_UINO_SHIFT (1 << 30) + +/* Mark function argument as unused to avoid compiler warnings. */ +#define UNUSED_ARG __attribute__((unused)) + +#if ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3) +#define NONNULL __attribute__((nonnull)) +#else +#define NONNULL +#endif + +/* master super block offset in bytes.*/ +#define REISER4_MASTER_OFFSET 65536 + +/* size of VFS block */ +#define VFS_BLKSIZE 512 +/* number of bits in size of VFS block (512==2^9) */ +#define VFS_BLKSIZE_BITS 9 + +#define REISER4_I reiser4_inode_data + +/* implication */ +#define ergo(antecedent, consequent) (!(antecedent) || (consequent)) +/* logical equivalence */ +#define equi(p1, p2) (ergo((p1), (p2)) && ergo((p2), (p1))) + +#define sizeof_array(x) ((int) (sizeof(x) / sizeof(x[0]))) + +#define NOT_YET (0) + +/** Reiser4 specific error codes **/ + +#define REISER4_ERROR_CODE_BASE 10000 + +/* Neighbor is not available (side neighbor or parent) */ +#define E_NO_NEIGHBOR (REISER4_ERROR_CODE_BASE) + +/* Node was not found in cache */ +#define E_NOT_IN_CACHE (REISER4_ERROR_CODE_BASE + 1) + +/* node has no free space enough for completion of balancing operation */ +#define E_NODE_FULL (REISER4_ERROR_CODE_BASE + 2) + +/* repeat operation */ +#define E_REPEAT (REISER4_ERROR_CODE_BASE + 3) + +/* deadlock happens */ +#define E_DEADLOCK (REISER4_ERROR_CODE_BASE + 4) + +/* operation cannot be performed, because it would block and non-blocking mode + * was requested. */ +#define E_BLOCK (REISER4_ERROR_CODE_BASE + 5) + +/* wait some event (depends on context), then repeat */ +#define E_WAIT (REISER4_ERROR_CODE_BASE + 6) + +#endif /* __REISER4_H__ */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/safe_link.c b/fs/reiser4/safe_link.c new file mode 100644 index 000000000000..d59f6f0f129e --- /dev/null +++ b/fs/reiser4/safe_link.c @@ -0,0 +1,354 @@ +/* Copyright 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Safe-links. */ + +/* + * Safe-links are used to maintain file system consistency during operations + * that spawns multiple transactions. For example: + * + * 1. Unlink. UNIX supports "open-but-unlinked" files, that is files + * without user-visible names in the file system, but still opened by some + * active process. What happens here is that unlink proper (i.e., removal + * of the last file name) and file deletion (truncate of file body to zero + * and deletion of stat-data, that happens when last file descriptor is + * closed), may belong to different transactions T1 and T2. If a crash + * happens after T1 commit, but before T2 commit, on-disk file system has + * a file without name, that is, disk space leak. + * + * 2. Truncate. Truncate of large file may spawn multiple transactions. If + * system crashes while truncate was in-progress, file is left partially + * truncated, which violates "atomicity guarantees" of reiser4, viz. that + * every system is atomic. + * + * Safe-links address both above cases. Basically, safe-link is a way post + * some operation to be executed during commit of some other transaction than + * current one. (Another way to look at the safe-link is to interpret it as a + * logical logging.) + * + * Specifically, at the beginning of unlink safe-link in inserted in the + * tree. This safe-link is normally removed by file deletion code (during + * transaction T2 in the above terms). Truncate also inserts safe-link that is + * normally removed when truncate operation is finished. + * + * This means, that in the case of "clean umount" there are no safe-links in + * the tree. If safe-links are observed during mount, it means that (a) system + * was terminated abnormally, and (b) safe-link correspond to the "pending" + * (i.e., not finished) operations that were in-progress during system + * termination. Each safe-link record enough information to complete + * corresponding operation, and mount simply "replays" them (hence, the + * analogy with the logical logging). + * + * Safe-links are implemented as blackbox items (see + * plugin/item/blackbox.[ch]). + * + * For the reference: ext3 also has similar mechanism, it's called "an orphan + * list" there. + */ + +#include "safe_link.h" +#include "debug.h" +#include "inode.h" + +#include "plugin/item/blackbox.h" + +#include <linux/fs.h> + +/* + * On-disk format of safe-link. + */ +typedef struct safelink { + reiser4_key sdkey; /* key of stat-data for the file safe-link is + * for */ + d64 size; /* size to which file should be truncated */ +} safelink_t; + +/* + * locality where safe-link items are stored. Next to the objectid of root + * directory. + */ +static oid_t safe_link_locality(reiser4_tree * tree) +{ + return get_key_objectid(get_super_private(tree->super)->df_plug-> + root_dir_key(tree->super)) + 1; +} + +/* + Construct a key for the safe-link. Key has the following format: + +| 60 | 4 | 64 | 4 | 60 | 64 | ++---------------+---+------------------+---+---------------+------------------+ +| locality | 0 | 0 | 0 | objectid | link type | ++---------------+---+------------------+---+---------------+------------------+ +| | | | | +| 8 bytes | 8 bytes | 8 bytes | 8 bytes | + + This is in large keys format. In small keys format second 8 byte chunk is + out. Locality is a constant returned by safe_link_locality(). objectid is + an oid of a file on which operation protected by this safe-link is + performed. link-type is used to distinguish safe-links for different + operations. + + */ +static reiser4_key *build_link_key(reiser4_tree * tree, oid_t oid, + reiser4_safe_link_t link, reiser4_key * key) +{ + reiser4_key_init(key); + set_key_locality(key, safe_link_locality(tree)); + set_key_objectid(key, oid); + set_key_offset(key, link); + return key; +} + +/* + * how much disk space is necessary to insert and remove (in the + * error-handling path) safe-link. + */ +static __u64 safe_link_tograb(reiser4_tree * tree) +{ + return + /* insert safe link */ + estimate_one_insert_item(tree) + + /* remove safe link */ + estimate_one_item_removal(tree) + + /* drill to the leaf level during insertion */ + 1 + estimate_one_insert_item(tree) + + /* + * possible update of existing safe-link. Actually, if + * safe-link existed already (we failed to remove it), then no + * insertion is necessary, so this term is already "covered", + * but for simplicity let's left it. + */ + 1; +} + +/* + * grab enough disk space to insert and remove (in the error-handling path) + * safe-link. + */ +int safe_link_grab(reiser4_tree * tree, reiser4_ba_flags_t flags) +{ + int result; + + grab_space_enable(); + /* The sbinfo->delete_mutex can be taken here. + * safe_link_release() should be called before leaving reiser4 + * context. */ + result = + reiser4_grab_reserved(tree->super, safe_link_tograb(tree), flags); + grab_space_enable(); + return result; +} + +/* + * release unused disk space reserved by safe_link_grab(). + */ +void safe_link_release(reiser4_tree * tree) +{ + reiser4_release_reserved(tree->super); +} + +/* + * insert into tree safe-link for operation @link on inode @inode. + */ +int safe_link_add(struct inode *inode, reiser4_safe_link_t link) +{ + reiser4_key key; + safelink_t sl; + int length; + int result; + reiser4_tree *tree; + + build_sd_key(inode, &sl.sdkey); + length = sizeof sl.sdkey; + + if (link == SAFE_TRUNCATE) { + /* + * for truncate we have to store final file length also, + * expand item. + */ + length += sizeof(sl.size); + put_unaligned(cpu_to_le64(inode->i_size), &sl.size); + } + tree = reiser4_tree_by_inode(inode); + build_link_key(tree, get_inode_oid(inode), link, &key); + + result = store_black_box(tree, &key, &sl, length); + if (result == -EEXIST) + result = update_black_box(tree, &key, &sl, length); + return result; +} + +/* + * remove safe-link corresponding to the operation @link on inode @inode from + * the tree. + */ +int safe_link_del(reiser4_tree * tree, oid_t oid, reiser4_safe_link_t link) +{ + reiser4_key key; + + return kill_black_box(tree, build_link_key(tree, oid, link, &key)); +} + +/* + * in-memory structure to keep information extracted from safe-link. This is + * used to iterate over all safe-links. + */ +struct safe_link_context { + reiser4_tree *tree; /* internal tree */ + reiser4_key key; /* safe-link key */ + reiser4_key sdkey; /* key of object stat-data */ + reiser4_safe_link_t link; /* safe-link type */ + oid_t oid; /* object oid */ + __u64 size; /* final size for truncate */ +}; + +/* + * start iterating over all safe-links. + */ +static void safe_link_iter_begin(reiser4_tree * tree, + struct safe_link_context *ctx) +{ + ctx->tree = tree; + reiser4_key_init(&ctx->key); + set_key_locality(&ctx->key, safe_link_locality(tree)); + set_key_objectid(&ctx->key, get_key_objectid(reiser4_max_key())); + set_key_offset(&ctx->key, get_key_offset(reiser4_max_key())); +} + +/* + * return next safe-link. + */ +static int safe_link_iter_next(struct safe_link_context *ctx) +{ + int result; + safelink_t sl; + + result = load_black_box(ctx->tree, &ctx->key, &sl, sizeof sl, 0); + if (result == 0) { + ctx->oid = get_key_objectid(&ctx->key); + ctx->link = get_key_offset(&ctx->key); + ctx->sdkey = sl.sdkey; + if (ctx->link == SAFE_TRUNCATE) + ctx->size = le64_to_cpu(get_unaligned(&sl.size)); + } + return result; +} + +/* + * check are there any more safe-links left in the tree. + */ +static int safe_link_iter_finished(struct safe_link_context *ctx) +{ + return get_key_locality(&ctx->key) != safe_link_locality(ctx->tree); +} + +/* + * finish safe-link iteration. + */ +static void safe_link_iter_end(struct safe_link_context *ctx) +{ + /* nothing special */ +} + +/* + * process single safe-link. + */ +static int process_safelink(struct super_block *super, reiser4_safe_link_t link, + reiser4_key * sdkey, oid_t oid, __u64 size) +{ + struct inode *inode; + int result; + + /* + * obtain object inode by reiser4_iget(), then call object plugin + * ->safelink() method to do actual work, then delete safe-link on + * success. + */ + inode = reiser4_iget(super, sdkey, 1); + if (!IS_ERR(inode)) { + file_plugin *fplug; + + fplug = inode_file_plugin(inode); + assert("nikita-3428", fplug != NULL); + assert("", oid == get_inode_oid(inode)); + if (fplug->safelink != NULL) { + /* reiser4_txn_restart_current is not necessary because + * mounting is signle thread. However, without it + * deadlock detection code will complain (see + * nikita-3361). */ + reiser4_txn_restart_current(); + result = fplug->safelink(inode, link, size); + } else { + warning("nikita-3430", + "Cannot handle safelink for %lli", + (unsigned long long)oid); + reiser4_print_key("key", sdkey); + result = 0; + } + if (result != 0) { + warning("nikita-3431", + "Error processing safelink for %lli: %i", + (unsigned long long)oid, result); + } + reiser4_iget_complete(inode); + iput(inode); + if (result == 0) { + result = safe_link_grab(reiser4_get_tree(super), + BA_CAN_COMMIT); + if (result == 0) + result = + safe_link_del(reiser4_get_tree(super), oid, + link); + safe_link_release(reiser4_get_tree(super)); + /* + * restart transaction: if there was large number of + * safe-links, their processing may fail to fit into + * single transaction. + */ + if (result == 0) + reiser4_txn_restart_current(); + } + } else + result = PTR_ERR(inode); + return result; +} + +/* + * iterate over all safe-links in the file-system processing them one by one. + */ +int process_safelinks(struct super_block *super) +{ + struct safe_link_context ctx; + int result; + + if (rofs_super(super)) + /* do nothing on the read-only file system */ + return 0; + safe_link_iter_begin(&get_super_private(super)->tree, &ctx); + result = 0; + do { + result = safe_link_iter_next(&ctx); + if (safe_link_iter_finished(&ctx) || result == -ENOENT) { + result = 0; + break; + } + if (result == 0) + result = process_safelink(super, ctx.link, + &ctx.sdkey, ctx.oid, + ctx.size); + } while (result == 0); + safe_link_iter_end(&ctx); + return result; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/safe_link.h b/fs/reiser4/safe_link.h new file mode 100644 index 000000000000..65252b624972 --- /dev/null +++ b/fs/reiser4/safe_link.h @@ -0,0 +1,29 @@ +/* Copyright 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Safe-links. See safe_link.c for details. */ + +#if !defined(__FS_SAFE_LINK_H__) +#define __FS_SAFE_LINK_H__ + +#include "tree.h" + +int safe_link_grab(reiser4_tree * tree, reiser4_ba_flags_t flags); +void safe_link_release(reiser4_tree * tree); +int safe_link_add(struct inode *inode, reiser4_safe_link_t link); +int safe_link_del(reiser4_tree *, oid_t oid, reiser4_safe_link_t link); + +int process_safelinks(struct super_block *super); + +/* __FS_SAFE_LINK_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/seal.c b/fs/reiser4/seal.c new file mode 100644 index 000000000000..daeef6f3f5ac --- /dev/null +++ b/fs/reiser4/seal.c @@ -0,0 +1,219 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ +/* Seals implementation. */ +/* Seals are "weak" tree pointers. They are analogous to tree coords in + allowing to bypass tree traversal. But normal usage of coords implies that + node pointed to by coord is locked, whereas seals don't keep a lock (or + even a reference) to znode. In stead, each znode contains a version number, + increased on each znode modification. This version number is copied into a + seal when seal is created. Later, one can "validate" seal by calling + reiser4_seal_validate(). If znode is in cache and its version number is + still the same, seal is "pristine" and coord associated with it can be + re-used immediately. + + If, on the other hand, znode is out of cache, or it is obviously different + one from the znode seal was initially attached to (for example, it is on + the different level, or is being removed from the tree), seal is + irreparably invalid ("burned") and tree traversal has to be repeated. + + Otherwise, there is some hope, that while znode was modified (and seal was + "broken" as a result), key attached to the seal is still in the node. This + is checked by first comparing this key with delimiting keys of node and, if + key is ok, doing intra-node lookup. + + Znode version is maintained in the following way: + + there is reiser4_tree.znode_epoch counter. Whenever new znode is created, + znode_epoch is incremented and its new value is stored in ->version field + of new znode. Whenever znode is dirtied (which means it was probably + modified), znode_epoch is also incremented and its new value is stored in + znode->version. This is done so, because just incrementing znode->version + on each update is not enough: it may so happen, that znode get deleted, new + znode is allocated for the same disk block and gets the same version + counter, tricking seal code into false positive. +*/ + +#include "forward.h" +#include "debug.h" +#include "key.h" +#include "coord.h" +#include "seal.h" +#include "plugin/item/item.h" +#include "plugin/node/node.h" +#include "jnode.h" +#include "znode.h" +#include "super.h" + +static znode *seal_node(const seal_t *seal); +static int seal_matches(const seal_t *seal, znode * node); + +/* initialise seal. This can be called several times on the same seal. @coord + and @key can be NULL. */ +void reiser4_seal_init(seal_t *seal /* seal to initialise */ , + const coord_t *coord /* coord @seal will be + * attached to */ , + const reiser4_key * key UNUSED_ARG /* key @seal will be + * attached to */ ) +{ + assert("nikita-1886", seal != NULL); + memset(seal, 0, sizeof *seal); + if (coord != NULL) { + znode *node; + + node = coord->node; + assert("nikita-1987", node != NULL); + spin_lock_znode(node); + seal->version = node->version; + assert("nikita-1988", seal->version != 0); + seal->block = *znode_get_block(node); +#if REISER4_DEBUG + seal->coord1 = *coord; + if (key != NULL) + seal->key = *key; +#endif + spin_unlock_znode(node); + } +} + +/* finish with seal */ +void reiser4_seal_done(seal_t *seal/* seal to clear */) +{ + assert("nikita-1887", seal != NULL); + seal->version = 0; +} + +/* true if seal was initialised */ +int reiser4_seal_is_set(const seal_t *seal/* seal to query */) +{ + assert("nikita-1890", seal != NULL); + return seal->version != 0; +} + +#if REISER4_DEBUG +/* helper function for reiser4_seal_validate(). It checks that item at @coord + * has expected key. This is to detect cases where node was modified but wasn't + * marked dirty. */ +static inline int check_seal_match(const coord_t *coord /* coord to check */ , + const reiser4_key *k__/* expected key */) +{ + reiser4_key ukey; + + /* FIXME-VS: we only can compare keys for items whose units + represent exactly one key */ + if (coord->between != AT_UNIT) + return 1; + if (!coord_is_existing_unit(coord)) + return 0; + if (item_is_extent(coord)) + return 1; + if (item_is_ctail(coord)) + return keyge(k__, unit_key_by_coord(coord, &ukey)); + return keyeq(k__, unit_key_by_coord(coord, &ukey)); +} +#endif + +/* this is used by reiser4_seal_validate. It accepts return value of + * longterm_lock_znode and returns 1 if it can be interpreted as seal + * validation failure. For instance, when longterm_lock_znode returns -EINVAL, + * reiser4_seal_validate returns -E_REPEAT and caller will call tre search. + * We cannot do this in longterm_lock_znode(), because sometimes we want to + * distinguish between -EINVAL and -E_REPEAT. */ +static int should_repeat(int return_code) +{ + return return_code == -EINVAL; +} + +/* (re-)validate seal. + + Checks whether seal is pristine, and try to revalidate it if possible. + + If seal was burned, or broken irreparably, return -E_REPEAT. + + NOTE-NIKITA currently reiser4_seal_validate() returns -E_REPEAT if key we are + looking for is in range of keys covered by the sealed node, but item wasn't + found by node ->lookup() method. Alternative is to return -ENOENT in this + case, but this would complicate callers logic. + +*/ +int reiser4_seal_validate(seal_t *seal /* seal to validate */, + coord_t *coord /* coord to validate against */, + const reiser4_key * key /* key to validate against */, + lock_handle * lh /* resulting lock handle */, + znode_lock_mode mode /* lock node */, + znode_lock_request request/* locking priority */) +{ + znode *node; + int result; + + assert("nikita-1889", seal != NULL); + assert("nikita-1881", reiser4_seal_is_set(seal)); + assert("nikita-1882", key != NULL); + assert("nikita-1883", coord != NULL); + assert("nikita-1884", lh != NULL); + assert("nikita-1885", keyeq(&seal->key, key)); + assert("nikita-1989", coords_equal(&seal->coord1, coord)); + + /* obtain znode by block number */ + node = seal_node(seal); + if (!node) + /* znode wasn't in cache */ + return RETERR(-E_REPEAT); + /* znode was in cache, lock it */ + result = longterm_lock_znode(lh, node, mode, request); + zput(node); + if (result == 0) { + if (seal_matches(seal, node)) { + /* if seal version and znode version + coincide */ + ON_DEBUG(coord_update_v(coord)); + assert("nikita-1990", + node == seal->coord1.node); + assert("nikita-1898", + WITH_DATA_RET(coord->node, 1, + check_seal_match(coord, + key))); + } else + result = RETERR(-E_REPEAT); + } + if (result != 0) { + if (should_repeat(result)) + result = RETERR(-E_REPEAT); + /* unlock node on failure */ + done_lh(lh); + } + return result; +} + +/* helpers functions */ + +/* obtain reference to znode seal points to, if in cache */ +static znode *seal_node(const seal_t *seal/* seal to query */) +{ + assert("nikita-1891", seal != NULL); + return zlook(current_tree, &seal->block); +} + +/* true if @seal version and @node version coincide */ +static int seal_matches(const seal_t *seal /* seal to check */ , + znode * node/* node to check */) +{ + int result; + + assert("nikita-1991", seal != NULL); + assert("nikita-1993", node != NULL); + + spin_lock_znode(node); + result = (seal->version == node->version); + spin_unlock_znode(node); + return result; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/seal.h b/fs/reiser4/seal.h new file mode 100644 index 000000000000..19d5d521f75c --- /dev/null +++ b/fs/reiser4/seal.h @@ -0,0 +1,49 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Declaration of seals: "weak" tree pointers. See seal.c for comments. */ + +#ifndef __SEAL_H__ +#define __SEAL_H__ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "key.h" +#include "coord.h" + +/* for __u?? types */ +/*#include <linux/types.h>*/ + +/* seal. See comment at the top of seal.c */ +typedef struct seal_s { + /* version of znode recorder at the time of seal creation */ + __u64 version; + /* block number of znode attached to this seal */ + reiser4_block_nr block; +#if REISER4_DEBUG + /* coord this seal is attached to. For debugging. */ + coord_t coord1; + /* key this seal is attached to. For debugging. */ + reiser4_key key; +#endif +} seal_t; + +extern void reiser4_seal_init(seal_t *, const coord_t *, const reiser4_key *); +extern void reiser4_seal_done(seal_t *); +extern int reiser4_seal_is_set(const seal_t *); +extern int reiser4_seal_validate(seal_t *, coord_t *, + const reiser4_key *, lock_handle * , + znode_lock_mode mode, znode_lock_request request); + +/* __SEAL_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/search.c b/fs/reiser4/search.c new file mode 100644 index 000000000000..0fa6bdb3645e --- /dev/null +++ b/fs/reiser4/search.c @@ -0,0 +1,1612 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "key.h" +#include "coord.h" +#include "seal.h" +#include "plugin/item/item.h" +#include "plugin/node/node.h" +#include "plugin/plugin.h" +#include "jnode.h" +#include "znode.h" +#include "block_alloc.h" +#include "tree_walk.h" +#include "tree.h" +#include "reiser4.h" +#include "super.h" +#include "inode.h" + +#include <linux/slab.h> + +static const char *bias_name(lookup_bias bias); + +/* tree searching algorithm, intranode searching algorithms are in + plugin/node/ */ + +/* tree lookup cache + * + * The coord by key cache consists of small list of recently accessed nodes + * maintained according to the LRU discipline. Before doing real top-to-down + * tree traversal this cache is scanned for nodes that can contain key + * requested. + * + * The efficiency of coord cache depends heavily on locality of reference for + * tree accesses. Our user level simulations show reasonably good hit ratios + * for coord cache under most loads so far. + */ + +/* Initialise coord cache slot */ +static void cbk_cache_init_slot(cbk_cache_slot *slot) +{ + assert("nikita-345", slot != NULL); + + INIT_LIST_HEAD(&slot->lru); + slot->node = NULL; +} + +/* Initialize coord cache */ +int cbk_cache_init(cbk_cache * cache/* cache to init */) +{ + int i; + + assert("nikita-346", cache != NULL); + + cache->slot = + kmalloc(sizeof(cbk_cache_slot) * cache->nr_slots, + reiser4_ctx_gfp_mask_get()); + if (cache->slot == NULL) + return RETERR(-ENOMEM); + + INIT_LIST_HEAD(&cache->lru); + for (i = 0; i < cache->nr_slots; ++i) { + cbk_cache_init_slot(cache->slot + i); + list_add_tail(&((cache->slot + i)->lru), &cache->lru); + } + rwlock_init(&cache->guard); + return 0; +} + +/* free cbk cache data */ +void cbk_cache_done(cbk_cache * cache/* cache to release */) +{ + assert("nikita-2493", cache != NULL); + if (cache->slot != NULL) { + kfree(cache->slot); + cache->slot = NULL; + } +} + +/* macro to iterate over all cbk cache slots */ +#define for_all_slots(cache, slot) \ + for ((slot) = list_entry((cache)->lru.next, cbk_cache_slot, lru); \ + &(cache)->lru != &(slot)->lru; \ + (slot) = list_entry(slot->lru.next, cbk_cache_slot, lru)) + +#if REISER4_DEBUG +/* this function assures that [cbk-cache-invariant] invariant holds */ +static int cbk_cache_invariant(const cbk_cache * cache) +{ + cbk_cache_slot *slot; + int result; + int unused; + + if (cache->nr_slots == 0) + return 1; + + assert("nikita-2469", cache != NULL); + unused = 0; + result = 1; + read_lock(&((cbk_cache *)cache)->guard); + for_all_slots(cache, slot) { + /* in LRU first go all `used' slots followed by `unused' */ + if (unused && (slot->node != NULL)) + result = 0; + if (slot->node == NULL) + unused = 1; + else { + cbk_cache_slot *scan; + + /* all cached nodes are different */ + scan = slot; + while (result) { + scan = list_entry(scan->lru.next, + cbk_cache_slot, lru); + if (&cache->lru == &scan->lru) + break; + if (slot->node == scan->node) + result = 0; + } + } + if (!result) + break; + } + read_unlock(&((cbk_cache *)cache)->guard); + return result; +} + +#endif + +/* Remove references, if any, to @node from coord cache */ +void cbk_cache_invalidate(const znode * node /* node to remove from cache */ , + reiser4_tree * tree/* tree to remove node from */) +{ + cbk_cache_slot *slot; + cbk_cache *cache; + int i; + + assert("nikita-350", node != NULL); + assert("nikita-1479", LOCK_CNT_GTZ(rw_locked_tree)); + + cache = &tree->cbk_cache; + assert("nikita-2470", cbk_cache_invariant(cache)); + + write_lock(&(cache->guard)); + for (i = 0, slot = cache->slot; i < cache->nr_slots; ++i, ++slot) { + if (slot->node == node) { + list_move_tail(&slot->lru, &cache->lru); + slot->node = NULL; + break; + } + } + write_unlock(&(cache->guard)); + assert("nikita-2471", cbk_cache_invariant(cache)); +} + +/* add to the cbk-cache in the "tree" information about "node". This + can actually be update of existing slot in a cache. */ +static void cbk_cache_add(const znode * node/* node to add to the cache */) +{ + cbk_cache *cache; + + cbk_cache_slot *slot; + int i; + + assert("nikita-352", node != NULL); + + cache = &znode_get_tree(node)->cbk_cache; + assert("nikita-2472", cbk_cache_invariant(cache)); + + if (cache->nr_slots == 0) + return; + + write_lock(&(cache->guard)); + /* find slot to update/add */ + for (i = 0, slot = cache->slot; i < cache->nr_slots; ++i, ++slot) { + /* oops, this node is already in a cache */ + if (slot->node == node) + break; + } + /* if all slots are used, reuse least recently used one */ + if (i == cache->nr_slots) { + slot = list_entry(cache->lru.prev, cbk_cache_slot, lru); + slot->node = (znode *) node; + } + list_move(&slot->lru, &cache->lru); + write_unlock(&(cache->guard)); + assert("nikita-2473", cbk_cache_invariant(cache)); +} + +static int setup_delimiting_keys(cbk_handle * h); +static lookup_result coord_by_handle(cbk_handle * handle); +static lookup_result traverse_tree(cbk_handle * h); +static int cbk_cache_search(cbk_handle * h); + +static level_lookup_result cbk_level_lookup(cbk_handle * h); +static level_lookup_result cbk_node_lookup(cbk_handle * h); + +/* helper functions */ + +static void update_stale_dk(reiser4_tree * tree, znode * node); + +/* release parent node during traversal */ +static void put_parent(cbk_handle * h); +/* check consistency of fields */ +static int sanity_check(cbk_handle * h); +/* release resources in handle */ +static void hput(cbk_handle * h); + +static level_lookup_result search_to_left(cbk_handle * h); + +/* pack numerous (numberous I should say) arguments of coord_by_key() into + * cbk_handle */ +static cbk_handle *cbk_pack(cbk_handle * handle, + reiser4_tree * tree, + const reiser4_key * key, + coord_t *coord, + lock_handle * active_lh, + lock_handle * parent_lh, + znode_lock_mode lock_mode, + lookup_bias bias, + tree_level lock_level, + tree_level stop_level, + __u32 flags, ra_info_t *info) +{ + memset(handle, 0, sizeof *handle); + + handle->tree = tree; + handle->key = key; + handle->lock_mode = lock_mode; + handle->bias = bias; + handle->lock_level = lock_level; + handle->stop_level = stop_level; + handle->coord = coord; + /* set flags. See comment in tree.h:cbk_flags */ + handle->flags = flags | CBK_TRUST_DK | CBK_USE_CRABLOCK; + + handle->active_lh = active_lh; + handle->parent_lh = parent_lh; + handle->ra_info = info; + return handle; +} + +/* main tree lookup procedure + + Check coord cache. If key we are looking for is not found there, call cbk() + to do real tree traversal. + + As we have extents on the twig level, @lock_level and @stop_level can + be different from LEAF_LEVEL and each other. + + Thread cannot keep any reiser4 locks (tree, znode, dk spin-locks, or znode + long term locks) while calling this. +*/ +lookup_result coord_by_key(reiser4_tree * tree /* tree to perform search + * in. Usually this tree is + * part of file-system + * super-block */ , + const reiser4_key * key /* key to look for */ , + coord_t *coord /* where to store found + * position in a tree. Fields + * in "coord" are only valid if + * coord_by_key() returned + * "CBK_COORD_FOUND" */ , + lock_handle * lh, /* resulting lock handle */ + znode_lock_mode lock_mode /* type of lookup we + * want on node. Pass + * ZNODE_READ_LOCK here + * if you only want to + * read item found and + * ZNODE_WRITE_LOCK if + * you want to modify + * it */ , + lookup_bias bias /* what to return if coord + * with exactly the @key is + * not in the tree */ , + tree_level lock_level/* tree level where to start + * taking @lock type of + * locks */ , + tree_level stop_level/* tree level to stop. Pass + * LEAF_LEVEL or TWIG_LEVEL + * here Item being looked + * for has to be between + * @lock_level and + * @stop_level, inclusive */ , + __u32 flags /* search flags */ , + ra_info_t * + info + /* information about desired tree traversal + * readahead */ + ) +{ + cbk_handle handle; + lock_handle parent_lh; + lookup_result result; + + init_lh(lh); + init_lh(&parent_lh); + + assert("nikita-3023", reiser4_schedulable()); + + assert("nikita-353", tree != NULL); + assert("nikita-354", key != NULL); + assert("nikita-355", coord != NULL); + assert("nikita-356", (bias == FIND_EXACT) + || (bias == FIND_MAX_NOT_MORE_THAN)); + assert("nikita-357", stop_level >= LEAF_LEVEL); + /* no locks can be held during tree traversal */ + assert("nikita-2104", lock_stack_isclean(get_current_lock_stack())); + + cbk_pack(&handle, + tree, + key, + coord, + lh, + &parent_lh, + lock_mode, bias, lock_level, stop_level, flags, info); + + result = coord_by_handle(&handle); + assert("nikita-3247", + ergo(!IS_CBKERR(result), coord->node == lh->node)); + return result; +} + +/* like coord_by_key(), but starts traversal from vroot of @object rather than + * from tree root. */ +lookup_result reiser4_object_lookup(struct inode *object, + const reiser4_key * key, + coord_t *coord, + lock_handle * lh, + znode_lock_mode lock_mode, + lookup_bias bias, + tree_level lock_level, + tree_level stop_level, __u32 flags, + ra_info_t *info) +{ + cbk_handle handle; + lock_handle parent_lh; + lookup_result result; + + init_lh(lh); + init_lh(&parent_lh); + + assert("nikita-3023", reiser4_schedulable()); + + assert("nikita-354", key != NULL); + assert("nikita-355", coord != NULL); + assert("nikita-356", (bias == FIND_EXACT) + || (bias == FIND_MAX_NOT_MORE_THAN)); + assert("nikita-357", stop_level >= LEAF_LEVEL); + /* no locks can be held during tree search by key */ + assert("nikita-2104", lock_stack_isclean(get_current_lock_stack())); + + cbk_pack(&handle, + object != NULL ? reiser4_tree_by_inode(object) : current_tree, + key, + coord, + lh, + &parent_lh, + lock_mode, bias, lock_level, stop_level, flags, info); + handle.object = object; + + result = coord_by_handle(&handle); + assert("nikita-3247", + ergo(!IS_CBKERR(result), coord->node == lh->node)); + return result; +} + +/* lookup by cbk_handle. Common part of coord_by_key() and + reiser4_object_lookup(). */ +static lookup_result coord_by_handle(cbk_handle * handle) +{ + /* + * first check cbk_cache (which is look-aside cache for our tree) and + * of this fails, start traversal. + */ + /* first check whether "key" is in cache of recent lookups. */ + if (cbk_cache_search(handle) == 0) + return handle->result; + else + return traverse_tree(handle); +} + +/* Execute actor for each item (or unit, depending on @through_units_p), + starting from @coord, right-ward, until either: + + - end of the tree is reached + - unformatted node is met + - error occurred + - @actor returns 0 or less + + Error code, or last actor return value is returned. + + This is used by plugin/dir/hashe_dir.c:reiser4_find_entry() to move through + sequence of entries with identical keys and alikes. +*/ +int reiser4_iterate_tree(reiser4_tree * tree /* tree to scan */ , + coord_t *coord /* coord to start from */ , + lock_handle * lh /* lock handle to start with and to + * update along the way */ , + tree_iterate_actor_t actor /* function to call on each + * item/unit */ , + void *arg /* argument to pass to @actor */ , + znode_lock_mode mode /* lock mode on scanned nodes */ , + int through_units_p /* call @actor on each item or on + * each unit */ ) +{ + int result; + + assert("nikita-1143", tree != NULL); + assert("nikita-1145", coord != NULL); + assert("nikita-1146", lh != NULL); + assert("nikita-1147", actor != NULL); + + result = zload(coord->node); + coord_clear_iplug(coord); + if (result != 0) + return result; + if (!coord_is_existing_unit(coord)) { + zrelse(coord->node); + return -ENOENT; + } + while ((result = actor(tree, coord, lh, arg)) > 0) { + /* move further */ + if ((through_units_p && coord_next_unit(coord)) || + (!through_units_p && coord_next_item(coord))) { + do { + lock_handle couple; + + /* move to the next node */ + init_lh(&couple); + result = + reiser4_get_right_neighbor(&couple, + coord->node, + (int)mode, + GN_CAN_USE_UPPER_LEVELS); + zrelse(coord->node); + if (result == 0) { + + result = zload(couple.node); + if (result != 0) { + done_lh(&couple); + return result; + } + + coord_init_first_unit(coord, + couple.node); + done_lh(lh); + move_lh(lh, &couple); + } else + return result; + } while (node_is_empty(coord->node)); + } + + assert("nikita-1149", coord_is_existing_unit(coord)); + } + zrelse(coord->node); + return result; +} + +/* return locked uber znode for @tree */ +int get_uber_znode(reiser4_tree * tree, znode_lock_mode mode, + znode_lock_request pri, lock_handle * lh) +{ + int result; + + result = longterm_lock_znode(lh, tree->uber, mode, pri); + return result; +} + +/* true if @key is strictly within @node + + we are looking for possibly non-unique key and it is item is at the edge of + @node. May be it is in the neighbor. +*/ +static int znode_contains_key_strict(znode * node /* node to check key + * against */ , + const reiser4_key * + key /* key to check */ , + int isunique) +{ + int answer; + + assert("nikita-1760", node != NULL); + assert("nikita-1722", key != NULL); + + if (keyge(key, &node->rd_key)) + return 0; + + answer = keycmp(&node->ld_key, key); + + if (isunique) + return answer != GREATER_THAN; + else + return answer == LESS_THAN; +} + +/* + * Virtual Root (vroot) code. + * + * For given file system object (e.g., regular file or directory) let's + * define its "virtual root" as lowest in the tree (that is, furtherest + * from the tree root) node such that all body items of said object are + * located in a tree rooted at this node. + * + * Once vroot of object is found all tree lookups for items within body of + * this object ("object lookups") can be started from its vroot rather + * than from real root. This has following advantages: + * + * 1. amount of nodes traversed during lookup (and, hence, amount of + * key comparisons made) decreases, and + * + * 2. contention on tree root is decreased. This latter was actually + * motivating reason behind vroot, because spin lock of root node, + * which is taken when acquiring long-term lock on root node is the + * hottest lock in the reiser4. + * + * How to find vroot. + * + * When vroot of object F is not yet determined, all object lookups start + * from the root of the tree. At each tree level during traversal we have + * a node N such that a key we are looking for (which is the key inside + * object's body) is located within N. In function handle_vroot() called + * from cbk_level_lookup() we check whether N is possible vroot for + * F. Check is trivial---if neither leftmost nor rightmost item of N + * belongs to F (and we already have helpful ->owns_item() method of + * object plugin for this), then N is possible vroot of F. This, of + * course, relies on the assumption that each object occupies contiguous + * range of keys in the tree. + * + * Thus, traversing tree downward and checking each node as we go, we can + * find lowest such node, which, by definition, is vroot. + * + * How to track vroot. + * + * Nohow. If actual vroot changes, next object lookup will just restart + * from the actual tree root, refreshing object's vroot along the way. + * + */ + +/* + * Check whether @node is possible vroot of @object. + */ +static void handle_vroot(struct inode *object, znode * node) +{ + file_plugin *fplug; + coord_t coord; + + fplug = inode_file_plugin(object); + assert("nikita-3353", fplug != NULL); + assert("nikita-3354", fplug->owns_item != NULL); + + if (unlikely(node_is_empty(node))) + return; + + coord_init_first_unit(&coord, node); + /* + * if leftmost item of @node belongs to @object, we cannot be sure + * that @node is vroot of @object, because, some items of @object are + * probably in the sub-tree rooted at the left neighbor of @node. + */ + if (fplug->owns_item(object, &coord)) + return; + coord_init_last_unit(&coord, node); + /* mutatis mutandis for the rightmost item */ + if (fplug->owns_item(object, &coord)) + return; + /* otherwise, @node is possible vroot of @object */ + inode_set_vroot(object, node); +} + +/* + * helper function used by traverse tree to start tree traversal not from the + * tree root, but from @h->object's vroot, if possible. + */ +static int prepare_object_lookup(cbk_handle * h) +{ + znode *vroot; + int result; + + vroot = inode_get_vroot(h->object); + if (vroot == NULL) { + /* + * object doesn't have known vroot, start from real tree root. + */ + return LOOKUP_CONT; + } + + h->level = znode_get_level(vroot); + /* take a long-term lock on vroot */ + h->result = longterm_lock_znode(h->active_lh, vroot, + cbk_lock_mode(h->level, h), + ZNODE_LOCK_LOPRI); + result = LOOKUP_REST; + if (h->result == 0) { + int isunique; + int inside; + + isunique = h->flags & CBK_UNIQUE; + /* check that key is inside vroot */ + read_lock_dk(h->tree); + inside = (znode_contains_key_strict(vroot, h->key, isunique) && + !ZF_ISSET(vroot, JNODE_HEARD_BANSHEE)); + read_unlock_dk(h->tree); + if (inside) { + h->result = zload(vroot); + if (h->result == 0) { + /* search for key in vroot. */ + result = cbk_node_lookup(h); + zrelse(vroot); /*h->active_lh->node); */ + if (h->active_lh->node != vroot) { + result = LOOKUP_REST; + } else if (result == LOOKUP_CONT) { + move_lh(h->parent_lh, h->active_lh); + h->flags &= ~CBK_DKSET; + } + } + } + } + + zput(vroot); + + if (IS_CBKERR(h->result) || result == LOOKUP_REST) + hput(h); + return result; +} + +/* main function that handles common parts of tree traversal: starting + (fake znode handling), restarts, error handling, completion */ +static lookup_result traverse_tree(cbk_handle * h/* search handle */) +{ + int done; + int iterations; + int vroot_used; + + assert("nikita-365", h != NULL); + assert("nikita-366", h->tree != NULL); + assert("nikita-367", h->key != NULL); + assert("nikita-368", h->coord != NULL); + assert("nikita-369", (h->bias == FIND_EXACT) + || (h->bias == FIND_MAX_NOT_MORE_THAN)); + assert("nikita-370", h->stop_level >= LEAF_LEVEL); + assert("nikita-2949", !(h->flags & CBK_DKSET)); + assert("zam-355", lock_stack_isclean(get_current_lock_stack())); + + done = 0; + iterations = 0; + vroot_used = 0; + + /* loop for restarts */ +restart: + + assert("nikita-3024", reiser4_schedulable()); + + h->result = CBK_COORD_FOUND; + /* connect_znode() needs it */ + h->ld_key = *reiser4_min_key(); + h->rd_key = *reiser4_max_key(); + h->flags |= CBK_DKSET; + h->error = NULL; + + if (!vroot_used && h->object != NULL) { + vroot_used = 1; + done = prepare_object_lookup(h); + if (done == LOOKUP_REST) + goto restart; + else if (done == LOOKUP_DONE) + return h->result; + } + if (h->parent_lh->node == NULL) { + done = + get_uber_znode(h->tree, ZNODE_READ_LOCK, ZNODE_LOCK_LOPRI, + h->parent_lh); + + assert("nikita-1637", done != -E_DEADLOCK); + + h->block = h->tree->root_block; + h->level = h->tree->height; + h->coord->node = h->parent_lh->node; + + if (done != 0) + return done; + } + + /* loop descending a tree */ + while (!done) { + + if (unlikely((iterations > REISER4_CBK_ITERATIONS_LIMIT) && + IS_POW(iterations))) { + warning("nikita-1481", "Too many iterations: %i", + iterations); + reiser4_print_key("key", h->key); + ++iterations; + } else if (unlikely(iterations > REISER4_MAX_CBK_ITERATIONS)) { + h->error = + "reiser-2018: Too many iterations. Tree corrupted, or (less likely) starvation occurring."; + h->result = RETERR(-EIO); + break; + } + switch (cbk_level_lookup(h)) { + case LOOKUP_CONT: + move_lh(h->parent_lh, h->active_lh); + continue; + default: + wrong_return_value("nikita-372", "cbk_level"); + case LOOKUP_DONE: + done = 1; + break; + case LOOKUP_REST: + hput(h); + /* deadlock avoidance is normal case. */ + if (h->result != -E_DEADLOCK) + ++iterations; + reiser4_preempt_point(); + goto restart; + } + } + /* that's all. The rest is error handling */ + if (unlikely(h->error != NULL)) { + warning("nikita-373", "%s: level: %i, " + "lock_level: %i, stop_level: %i " + "lock_mode: %s, bias: %s", + h->error, h->level, h->lock_level, h->stop_level, + lock_mode_name(h->lock_mode), bias_name(h->bias)); + reiser4_print_address("block", &h->block); + reiser4_print_key("key", h->key); + print_coord_content("coord", h->coord); + } + /* `unlikely' error case */ + if (unlikely(IS_CBKERR(h->result))) { + /* failure. do cleanup */ + hput(h); + } else { + assert("nikita-1605", WITH_DATA_RET + (h->coord->node, 1, + ergo((h->result == CBK_COORD_FOUND) && + (h->bias == FIND_EXACT) && + (!node_is_empty(h->coord->node)), + coord_is_existing_item(h->coord)))); + } + return h->result; +} + +/* find delimiting keys of child + + Determine left and right delimiting keys for child pointed to by + @parent_coord. + +*/ +static void find_child_delimiting_keys(znode * parent /* parent znode, passed + * locked */ , + const coord_t *parent_coord + /* coord where pointer + * to child is stored + */ , + reiser4_key * ld /* where to store left + * delimiting key */ , + reiser4_key * rd /* where to store right + * delimiting key */ ) +{ + coord_t neighbor; + + assert("nikita-1484", parent != NULL); + assert_rw_locked(&(znode_get_tree(parent)->dk_lock)); + + coord_dup(&neighbor, parent_coord); + + if (neighbor.between == AT_UNIT) + /* imitate item ->lookup() behavior. */ + neighbor.between = AFTER_UNIT; + + if (coord_set_to_left(&neighbor) == 0) + unit_key_by_coord(&neighbor, ld); + else { + assert("nikita-14851", 0); + *ld = *znode_get_ld_key(parent); + } + + coord_dup(&neighbor, parent_coord); + if (neighbor.between == AT_UNIT) + neighbor.between = AFTER_UNIT; + if (coord_set_to_right(&neighbor) == 0) + unit_key_by_coord(&neighbor, rd); + else + *rd = *znode_get_rd_key(parent); +} + +/* + * setup delimiting keys for a child + * + * @parent parent node + * + * @coord location in @parent where pointer to @child is + * + * @child child node + */ +int +set_child_delimiting_keys(znode * parent, const coord_t *coord, znode * child) +{ + reiser4_tree *tree; + + assert("nikita-2952", + znode_get_level(parent) == znode_get_level(coord->node)); + + /* fast check without taking dk lock. This is safe, because + * JNODE_DKSET is never cleared once set. */ + if (!ZF_ISSET(child, JNODE_DKSET)) { + tree = znode_get_tree(parent); + write_lock_dk(tree); + if (likely(!ZF_ISSET(child, JNODE_DKSET))) { + find_child_delimiting_keys(parent, coord, + &child->ld_key, + &child->rd_key); + ON_DEBUG(child->ld_key_version = + atomic_inc_return(&delim_key_version); + child->rd_key_version = + atomic_inc_return(&delim_key_version);); + ZF_SET(child, JNODE_DKSET); + } + write_unlock_dk(tree); + return 1; + } + return 0; +} + +/* Perform tree lookup at one level. This is called from cbk_traverse() + function that drives lookup through tree and calls cbk_node_lookup() to + perform lookup within one node. + + See comments in a code. +*/ +static level_lookup_result cbk_level_lookup(cbk_handle * h/* search handle */) +{ + int ret; + int setdk; + int ldkeyset = 0; + reiser4_key ldkey; + reiser4_key key; + znode *active; + + assert("nikita-3025", reiser4_schedulable()); + + /* acquire reference to @active node */ + active = + zget(h->tree, &h->block, h->parent_lh->node, h->level, + reiser4_ctx_gfp_mask_get()); + + if (IS_ERR(active)) { + h->result = PTR_ERR(active); + return LOOKUP_DONE; + } + + /* lock @active */ + h->result = longterm_lock_znode(h->active_lh, + active, + cbk_lock_mode(h->level, h), + ZNODE_LOCK_LOPRI); + /* longterm_lock_znode() acquires additional reference to znode (which + will be later released by longterm_unlock_znode()). Release + reference acquired by zget(). + */ + zput(active); + if (unlikely(h->result != 0)) + goto fail_or_restart; + + setdk = 0; + /* if @active is accessed for the first time, setup delimiting keys on + it. Delimiting keys are taken from the parent node. See + setup_delimiting_keys() for details. + */ + if (h->flags & CBK_DKSET) { + setdk = setup_delimiting_keys(h); + h->flags &= ~CBK_DKSET; + } else { + znode *parent; + + parent = h->parent_lh->node; + h->result = zload(parent); + if (unlikely(h->result != 0)) + goto fail_or_restart; + + if (!ZF_ISSET(active, JNODE_DKSET)) + setdk = set_child_delimiting_keys(parent, + h->coord, active); + else { + read_lock_dk(h->tree); + find_child_delimiting_keys(parent, h->coord, &ldkey, + &key); + read_unlock_dk(h->tree); + ldkeyset = 1; + } + zrelse(parent); + } + + /* this is ugly kludge. Reminder: this is necessary, because + ->lookup() method returns coord with ->between field probably set + to something different from AT_UNIT. + */ + h->coord->between = AT_UNIT; + + if (znode_just_created(active) && (h->coord->node != NULL)) { + write_lock_tree(h->tree); + /* if we are going to load znode right now, setup + ->in_parent: coord where pointer to this node is stored in + parent. + */ + coord_to_parent_coord(h->coord, &active->in_parent); + write_unlock_tree(h->tree); + } + + /* check connectedness without holding tree lock---false negatives + * will be re-checked by connect_znode(), and false positives are + * impossible---@active cannot suddenly turn into unconnected + * state. */ + if (!znode_is_connected(active)) { + h->result = connect_znode(h->coord, active); + if (unlikely(h->result != 0)) { + put_parent(h); + goto fail_or_restart; + } + } + + jload_prefetch(ZJNODE(active)); + + if (setdk) + update_stale_dk(h->tree, active); + + /* put_parent() cannot be called earlier, because connect_znode() + assumes parent node is referenced; */ + put_parent(h); + + if ((!znode_contains_key_lock(active, h->key) && + (h->flags & CBK_TRUST_DK)) + || ZF_ISSET(active, JNODE_HEARD_BANSHEE)) { + /* 1. key was moved out of this node while this thread was + waiting for the lock. Restart. More elaborate solution is + to determine where key moved (to the left, or to the right) + and try to follow it through sibling pointers. + + 2. or, node itself is going to be removed from the + tree. Release lock and restart. + */ + h->result = -E_REPEAT; + } + if (h->result == -E_REPEAT) + return LOOKUP_REST; + + h->result = zload_ra(active, h->ra_info); + if (h->result) + return LOOKUP_DONE; + + /* sanity checks */ + if (sanity_check(h)) { + zrelse(active); + return LOOKUP_DONE; + } + + /* check that key of leftmost item in the @active is the same as in + * its parent */ + if (ldkeyset && !node_is_empty(active) && + !keyeq(leftmost_key_in_node(active, &key), &ldkey)) { + warning("vs-3533", "Keys are inconsistent. Fsck?"); + reiser4_print_key("inparent", &ldkey); + reiser4_print_key("inchild", &key); + h->result = RETERR(-EIO); + zrelse(active); + return LOOKUP_DONE; + } + + if (h->object != NULL) + handle_vroot(h->object, active); + + ret = cbk_node_lookup(h); + + /* h->active_lh->node might change, but active is yet to be zrelsed */ + zrelse(active); + + return ret; + +fail_or_restart: + if (h->result == -E_DEADLOCK) + return LOOKUP_REST; + return LOOKUP_DONE; +} + +#if REISER4_DEBUG +/* check left and right delimiting keys of a znode */ +void check_dkeys(znode * node) +{ + znode *left; + znode *right; + + read_lock_tree(current_tree); + read_lock_dk(current_tree); + + assert("vs-1710", znode_is_any_locked(node)); + assert("vs-1197", + !keygt(znode_get_ld_key(node), znode_get_rd_key(node))); + + left = node->left; + right = node->right; + + if (ZF_ISSET(node, JNODE_LEFT_CONNECTED) && ZF_ISSET(node, JNODE_DKSET) + && left != NULL && ZF_ISSET(left, JNODE_DKSET)) + /* check left neighbor. Note that left neighbor is not locked, + so it might get wrong delimiting keys therefore */ + assert("vs-1198", + (keyeq(znode_get_rd_key(left), znode_get_ld_key(node)) + || ZF_ISSET(left, JNODE_HEARD_BANSHEE))); + + if (ZF_ISSET(node, JNODE_RIGHT_CONNECTED) && ZF_ISSET(node, JNODE_DKSET) + && right != NULL && ZF_ISSET(right, JNODE_DKSET)) + /* check right neighbor. Note that right neighbor is not + locked, so it might get wrong delimiting keys therefore */ + assert("vs-1199", + (keyeq(znode_get_rd_key(node), znode_get_ld_key(right)) + || ZF_ISSET(right, JNODE_HEARD_BANSHEE))); + + read_unlock_dk(current_tree); + read_unlock_tree(current_tree); +} +#endif + +/* true if @key is left delimiting key of @node */ +static int key_is_ld(znode * node, const reiser4_key * key) +{ + int ld; + + assert("nikita-1716", node != NULL); + assert("nikita-1758", key != NULL); + + read_lock_dk(znode_get_tree(node)); + assert("nikita-1759", znode_contains_key(node, key)); + ld = keyeq(znode_get_ld_key(node), key); + read_unlock_dk(znode_get_tree(node)); + return ld; +} + +/* Process one node during tree traversal. + + This is called by cbk_level_lookup(). */ +static level_lookup_result cbk_node_lookup(cbk_handle * h/* search handle */) +{ + /* node plugin of @active */ + node_plugin *nplug; + /* item plugin of item that was found */ + item_plugin *iplug; + /* search bias */ + lookup_bias node_bias; + /* node we are operating upon */ + znode *active; + /* tree we are searching in */ + reiser4_tree *tree; + /* result */ + int result; + + assert("nikita-379", h != NULL); + + active = h->active_lh->node; + tree = h->tree; + + nplug = active->nplug; + assert("nikita-380", nplug != NULL); + + ON_DEBUG(check_dkeys(active)); + + /* return item from "active" node with maximal key not greater than + "key" */ + node_bias = h->bias; + result = nplug->lookup(active, h->key, node_bias, h->coord); + if (unlikely(result != NS_FOUND && result != NS_NOT_FOUND)) { + /* error occurred */ + h->result = result; + return LOOKUP_DONE; + } + if (h->level == h->stop_level) { + /* welcome to the stop level */ + assert("nikita-381", h->coord->node == active); + if (result == NS_FOUND) { + /* success of tree lookup */ + if (!(h->flags & CBK_UNIQUE) + && key_is_ld(active, h->key)) + return search_to_left(h); + else + h->result = CBK_COORD_FOUND; + } else { + h->result = CBK_COORD_NOTFOUND; + } + if (!(h->flags & CBK_IN_CACHE)) + cbk_cache_add(active); + return LOOKUP_DONE; + } + + if (h->level > TWIG_LEVEL && result == NS_NOT_FOUND) { + h->error = "not found on internal node"; + h->result = result; + return LOOKUP_DONE; + } + + assert("vs-361", h->level > h->stop_level); + + if (handle_eottl(h, &result)) { + assert("vs-1674", (result == LOOKUP_DONE || + result == LOOKUP_REST)); + return result; + } + + /* go down to next level */ + check_me("vs-12", zload(h->coord->node) == 0); + assert("nikita-2116", item_is_internal(h->coord)); + iplug = item_plugin_by_coord(h->coord); + iplug->s.internal.down_link(h->coord, h->key, &h->block); + zrelse(h->coord->node); + --h->level; + return LOOKUP_CONT; /* continue */ +} + +/* scan cbk_cache slots looking for a match for @h */ +static int cbk_cache_scan_slots(cbk_handle * h/* cbk handle */) +{ + level_lookup_result llr; + znode *node; + reiser4_tree *tree; + cbk_cache_slot *slot; + cbk_cache *cache; + tree_level level; + int isunique; + const reiser4_key *key; + int result; + + assert("nikita-1317", h != NULL); + assert("nikita-1315", h->tree != NULL); + assert("nikita-1316", h->key != NULL); + + tree = h->tree; + cache = &tree->cbk_cache; + if (cache->nr_slots == 0) + /* size of cbk cache was set to 0 by mount time option. */ + return RETERR(-ENOENT); + + assert("nikita-2474", cbk_cache_invariant(cache)); + node = NULL; /* to keep gcc happy */ + level = h->level; + key = h->key; + isunique = h->flags & CBK_UNIQUE; + result = RETERR(-ENOENT); + + /* + * this is time-critical function and dragons had, hence, been settled + * here. + * + * Loop below scans cbk cache slots trying to find matching node with + * suitable range of delimiting keys and located at the h->level. + * + * Scan is done under cbk cache spin lock that protects slot->node + * pointers. If suitable node is found we want to pin it in + * memory. But slot->node can point to the node with x_count 0 + * (unreferenced). Such node can be recycled at any moment, or can + * already be in the process of being recycled (within jput()). + * + * As we found node in the cbk cache, it means that jput() hasn't yet + * called cbk_cache_invalidate(). + * + * We acquire reference to the node without holding tree lock, and + * later, check node's RIP bit. This avoids races with jput(). + */ + + rcu_read_lock(); + read_lock(&((cbk_cache *)cache)->guard); + + slot = list_entry(cache->lru.next, cbk_cache_slot, lru); + slot = list_entry(slot->lru.prev, cbk_cache_slot, lru); + BUG_ON(&slot->lru != &cache->lru);/*????*/ + while (1) { + + slot = list_entry(slot->lru.next, cbk_cache_slot, lru); + + if (&cache->lru != &slot->lru) + node = slot->node; + else + node = NULL; + + if (unlikely(node == NULL)) + break; + + /* + * this is (hopefully) the only place in the code where we are + * working with delimiting keys without holding dk lock. This + * is fine here, because this is only "guess" anyway---keys + * are rechecked under dk lock below. + */ + if (znode_get_level(node) == level && + /* reiser4_min_key < key < reiser4_max_key */ + znode_contains_key_strict(node, key, isunique)) { + zref(node); + result = 0; + spin_lock_prefetch(&tree->tree_lock); + break; + } + } + read_unlock(&((cbk_cache *)cache)->guard); + + assert("nikita-2475", cbk_cache_invariant(cache)); + + if (unlikely(result == 0 && ZF_ISSET(node, JNODE_RIP))) + result = -ENOENT; + + rcu_read_unlock(); + + if (result != 0) { + h->result = CBK_COORD_NOTFOUND; + return RETERR(-ENOENT); + } + + result = + longterm_lock_znode(h->active_lh, node, cbk_lock_mode(level, h), + ZNODE_LOCK_LOPRI); + zput(node); + if (result != 0) + return result; + result = zload(node); + if (result != 0) + return result; + + /* recheck keys */ + read_lock_dk(tree); + result = (znode_contains_key_strict(node, key, isunique) && + !ZF_ISSET(node, JNODE_HEARD_BANSHEE)); + read_unlock_dk(tree); + if (result) { + /* do lookup inside node */ + llr = cbk_node_lookup(h); + /* if cbk_node_lookup() wandered to another node (due to eottl + or non-unique keys), adjust @node */ + /*node = h->active_lh->node; */ + + if (llr != LOOKUP_DONE) { + /* restart or continue on the next level */ + result = RETERR(-ENOENT); + } else if (IS_CBKERR(h->result)) + /* io or oom */ + result = RETERR(-ENOENT); + else { + /* good. Either item found or definitely not found. */ + result = 0; + + write_lock(&(cache->guard)); + if (slot->node == h->active_lh->node) { + /* if this node is still in cbk cache---move + its slot to the head of the LRU list. */ + list_move(&slot->lru, &cache->lru); + } + write_unlock(&(cache->guard)); + } + } else { + /* race. While this thread was waiting for the lock, node was + rebalanced and item we are looking for, shifted out of it + (if it ever was here). + + Continuing scanning is almost hopeless: node key range was + moved to, is almost certainly at the beginning of the LRU + list at this time, because it's hot, but restarting + scanning from the very beginning is complex. Just return, + so that cbk() will be performed. This is not that + important, because such races should be rare. Are they? + */ + result = RETERR(-ENOENT); /* -ERAUGHT */ + } + zrelse(node); + assert("nikita-2476", cbk_cache_invariant(cache)); + return result; +} + +/* look for item with given key in the coord cache + + This function, called by coord_by_key(), scans "coord cache" (&cbk_cache) + which is a small LRU list of znodes accessed lately. For each znode in + znode in this list, it checks whether key we are looking for fits into key + range covered by this node. If so, and in addition, node lies at allowed + level (this is to handle extents on a twig level), node is locked, and + lookup inside it is performed. + + we need a measurement of the cost of this cache search compared to the cost + of coord_by_key. + +*/ +static int cbk_cache_search(cbk_handle * h/* cbk handle */) +{ + int result = 0; + tree_level level; + + /* add CBK_IN_CACHE to the handle flags. This means that + * cbk_node_lookup() assumes that cbk_cache is scanned and would add + * found node to the cache. */ + h->flags |= CBK_IN_CACHE; + for (level = h->stop_level; level <= h->lock_level; ++level) { + h->level = level; + result = cbk_cache_scan_slots(h); + if (result != 0) { + done_lh(h->active_lh); + done_lh(h->parent_lh); + } else { + assert("nikita-1319", !IS_CBKERR(h->result)); + break; + } + } + h->flags &= ~CBK_IN_CACHE; + return result; +} + +/* type of lock we want to obtain during tree traversal. On stop level + we want type of lock user asked for, on upper levels: read lock. */ +znode_lock_mode cbk_lock_mode(tree_level level, cbk_handle * h) +{ + assert("nikita-382", h != NULL); + + return (level <= h->lock_level) ? h->lock_mode : ZNODE_READ_LOCK; +} + +/* update outdated delimiting keys */ +static void stale_dk(reiser4_tree * tree, znode * node) +{ + znode *right; + + read_lock_tree(tree); + write_lock_dk(tree); + right = node->right; + + if (ZF_ISSET(node, JNODE_RIGHT_CONNECTED) && + right && ZF_ISSET(right, JNODE_DKSET) && + !keyeq(znode_get_rd_key(node), znode_get_ld_key(right))) + znode_set_rd_key(node, znode_get_ld_key(right)); + + write_unlock_dk(tree); + read_unlock_tree(tree); +} + +/* check for possibly outdated delimiting keys, and update them if + * necessary. */ +static void update_stale_dk(reiser4_tree * tree, znode * node) +{ + znode *right; + reiser4_key rd; + + read_lock_tree(tree); + read_lock_dk(tree); + rd = *znode_get_rd_key(node); + right = node->right; + if (unlikely(ZF_ISSET(node, JNODE_RIGHT_CONNECTED) && + right && ZF_ISSET(right, JNODE_DKSET) && + !keyeq(&rd, znode_get_ld_key(right)))) { + assert("nikita-38211", ZF_ISSET(node, JNODE_DKSET)); + read_unlock_dk(tree); + read_unlock_tree(tree); + stale_dk(tree, node); + return; + } + read_unlock_dk(tree); + read_unlock_tree(tree); +} + +/* + * handle searches a the non-unique key. + * + * Suppose that we are looking for an item with possibly non-unique key 100. + * + * Root node contains two pointers: one to a node with left delimiting key 0, + * and another to a node with left delimiting key 100. Item we interested in + * may well happen in the sub-tree rooted at the first pointer. + * + * To handle this search_to_left() is called when search reaches stop + * level. This function checks it is _possible_ that item we are looking for + * is in the left neighbor (this can be done by comparing delimiting keys) and + * if so, tries to lock left neighbor (this is low priority lock, so it can + * deadlock, tree traversal is just restarted if it did) and then checks + * whether left neighbor actually contains items with our key. + * + * Note that this is done on the stop level only. It is possible to try such + * left-check on each level, but as duplicate keys are supposed to be rare + * (very unlikely that more than one node is completely filled with items with + * duplicate keys), it sis cheaper to scan to the left on the stop level once. + * + */ +static level_lookup_result search_to_left(cbk_handle * h/* search handle */) +{ + level_lookup_result result; + coord_t *coord; + znode *node; + znode *neighbor; + + lock_handle lh; + + assert("nikita-1761", h != NULL); + assert("nikita-1762", h->level == h->stop_level); + + init_lh(&lh); + coord = h->coord; + node = h->active_lh->node; + assert("nikita-1763", coord_is_leftmost_unit(coord)); + + h->result = + reiser4_get_left_neighbor(&lh, node, (int)h->lock_mode, + GN_CAN_USE_UPPER_LEVELS); + neighbor = NULL; + switch (h->result) { + case -E_DEADLOCK: + result = LOOKUP_REST; + break; + case 0:{ + node_plugin *nplug; + coord_t crd; + lookup_bias bias; + + neighbor = lh.node; + h->result = zload(neighbor); + if (h->result != 0) { + result = LOOKUP_DONE; + break; + } + + nplug = neighbor->nplug; + + coord_init_zero(&crd); + bias = h->bias; + h->bias = FIND_EXACT; + h->result = + nplug->lookup(neighbor, h->key, h->bias, &crd); + h->bias = bias; + + if (h->result == NS_NOT_FOUND) { + case -E_NO_NEIGHBOR: + h->result = CBK_COORD_FOUND; + if (!(h->flags & CBK_IN_CACHE)) + cbk_cache_add(node); + default: /* some other error */ + result = LOOKUP_DONE; + } else if (h->result == NS_FOUND) { + read_lock_dk(znode_get_tree(neighbor)); + h->rd_key = *znode_get_ld_key(node); + leftmost_key_in_node(neighbor, &h->ld_key); + read_unlock_dk(znode_get_tree(neighbor)); + h->flags |= CBK_DKSET; + + h->block = *znode_get_block(neighbor); + /* clear coord->node so that cbk_level_lookup() + wouldn't overwrite parent hint in neighbor. + + Parent hint was set up by + reiser4_get_left_neighbor() + */ + /* FIXME: why do we have to spinlock here? */ + write_lock_tree(znode_get_tree(neighbor)); + h->coord->node = NULL; + write_unlock_tree(znode_get_tree(neighbor)); + result = LOOKUP_CONT; + } else { + result = LOOKUP_DONE; + } + if (neighbor != NULL) + zrelse(neighbor); + } + } + done_lh(&lh); + return result; +} + +/* debugging aid: return symbolic name of search bias */ +static const char *bias_name(lookup_bias bias/* bias to get name of */) +{ + if (bias == FIND_EXACT) + return "exact"; + else if (bias == FIND_MAX_NOT_MORE_THAN) + return "left-slant"; +/* else if( bias == RIGHT_SLANT_BIAS ) */ +/* return "right-bias"; */ + else { + static char buf[30]; + + sprintf(buf, "unknown: %i", bias); + return buf; + } +} + +#if REISER4_DEBUG +/* debugging aid: print human readable information about @p */ +void print_coord_content(const char *prefix /* prefix to print */ , + coord_t *p/* coord to print */) +{ + reiser4_key key; + + if (p == NULL) { + printk("%s: null\n", prefix); + return; + } + if ((p->node != NULL) && znode_is_loaded(p->node) + && coord_is_existing_item(p)) + printk("%s: data: %p, length: %i\n", prefix, + item_body_by_coord(p), item_length_by_coord(p)); + if (znode_is_loaded(p->node)) { + item_key_by_coord(p, &key); + reiser4_print_key(prefix, &key); + } +} + +/* debugging aid: print human readable information about @block */ +void reiser4_print_address(const char *prefix /* prefix to print */ , + const reiser4_block_nr * block/* block number to print */) +{ + printk("%s: %s\n", prefix, sprint_address(block)); +} +#endif + +/* return string containing human readable representation of @block */ +char *sprint_address(const reiser4_block_nr * + block/* block number to print */) +{ + static char address[30]; + + if (block == NULL) + sprintf(address, "null"); + else if (reiser4_blocknr_is_fake(block)) + sprintf(address, "%llx", (unsigned long long)(*block)); + else + sprintf(address, "%llu", (unsigned long long)(*block)); + return address; +} + +/* release parent node during traversal */ +static void put_parent(cbk_handle * h/* search handle */) +{ + assert("nikita-383", h != NULL); + if (h->parent_lh->node != NULL) + longterm_unlock_znode(h->parent_lh); +} + +/* helper function used by coord_by_key(): release reference to parent znode + stored in handle before processing its child. */ +static void hput(cbk_handle * h/* search handle */) +{ + assert("nikita-385", h != NULL); + done_lh(h->parent_lh); + done_lh(h->active_lh); +} + +/* Helper function used by cbk(): update delimiting keys of child node (stored + in h->active_lh->node) using key taken from parent on the parent level. */ +static int setup_delimiting_keys(cbk_handle * h/* search handle */) +{ + znode *active; + reiser4_tree *tree; + + assert("nikita-1088", h != NULL); + + active = h->active_lh->node; + + /* fast check without taking dk lock. This is safe, because + * JNODE_DKSET is never cleared once set. */ + if (!ZF_ISSET(active, JNODE_DKSET)) { + tree = znode_get_tree(active); + write_lock_dk(tree); + if (!ZF_ISSET(active, JNODE_DKSET)) { + znode_set_ld_key(active, &h->ld_key); + znode_set_rd_key(active, &h->rd_key); + ZF_SET(active, JNODE_DKSET); + } + write_unlock_dk(tree); + return 1; + } + return 0; +} + +/* true if @block makes sense for the @tree. Used to detect corrupted node + * pointers */ +static int +block_nr_is_correct(reiser4_block_nr * block /* block number to check */ , + reiser4_tree * tree/* tree to check against */) +{ + assert("nikita-757", block != NULL); + assert("nikita-758", tree != NULL); + + /* check to see if it exceeds the size of the device. */ + return reiser4_blocknr_is_sane_for(tree->super, block); +} + +/* check consistency of fields */ +static int sanity_check(cbk_handle * h/* search handle */) +{ + assert("nikita-384", h != NULL); + + if (h->level < h->stop_level) { + h->error = "Buried under leaves"; + h->result = RETERR(-EIO); + return LOOKUP_DONE; + } else if (!block_nr_is_correct(&h->block, h->tree)) { + h->error = "bad block number"; + h->result = RETERR(-EIO); + return LOOKUP_DONE; + } else + return 0; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/status_flags.c b/fs/reiser4/status_flags.c new file mode 100644 index 000000000000..574005e0677a --- /dev/null +++ b/fs/reiser4/status_flags.c @@ -0,0 +1,180 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Functions that deal with reiser4 status block, query status and update it, + * if needed */ + +#include <linux/bio.h> +#include <linux/highmem.h> +#include <linux/fs.h> +#include <linux/blkdev.h> +#include "debug.h" +#include "dformat.h" +#include "status_flags.h" +#include "super.h" + +/* This is our end I/O handler that marks page uptodate if IO was successful. + It also unconditionally unlocks the page, so we can see that io was done. + We do not free bio, because we hope to reuse that. */ +static void reiser4_status_endio(struct bio *bio) +{ + if (!bio->bi_status) + SetPageUptodate(bio->bi_io_vec->bv_page); + else { + ClearPageUptodate(bio->bi_io_vec->bv_page); + SetPageError(bio->bi_io_vec->bv_page); + } + unlock_page(bio->bi_io_vec->bv_page); +} + +/* Initialise status code. This is expected to be called from the disk format + code. block paremeter is where status block lives. */ +int reiser4_status_init(reiser4_block_nr block) +{ + struct super_block *sb = reiser4_get_current_sb(); + struct reiser4_status *statuspage; + struct bio *bio; + struct page *page; + + get_super_private(sb)->status_page = NULL; + get_super_private(sb)->status_bio = NULL; + + page = alloc_pages(reiser4_ctx_gfp_mask_get(), 0); + if (!page) + return -ENOMEM; + + bio = bio_alloc(reiser4_ctx_gfp_mask_get(), 1); + if (bio != NULL) { + bio->bi_iter.bi_sector = block * (sb->s_blocksize >> 9); + bio_set_dev(bio, sb->s_bdev); + bio->bi_io_vec[0].bv_page = page; + bio->bi_io_vec[0].bv_len = sb->s_blocksize; + bio->bi_io_vec[0].bv_offset = 0; + bio->bi_vcnt = 1; + bio->bi_iter.bi_size = sb->s_blocksize; + bio->bi_end_io = reiser4_status_endio; + } else { + __free_pages(page, 0); + return -ENOMEM; + } + lock_page(page); + bio_set_op_attrs(bio, READ, 0); + submit_bio(bio); + wait_on_page_locked(page); + if (!PageUptodate(page)) { + warning("green-2007", + "I/O error while tried to read status page\n"); + return -EIO; + } + + statuspage = (struct reiser4_status *)kmap_atomic(page); + if (memcmp + (statuspage->magic, REISER4_STATUS_MAGIC, + sizeof(REISER4_STATUS_MAGIC))) { + /* Magic does not match. */ + kunmap_atomic((char *)statuspage); + warning("green-2008", "Wrong magic in status block\n"); + __free_pages(page, 0); + bio_put(bio); + return -EINVAL; + } + kunmap_atomic((char *)statuspage); + + get_super_private(sb)->status_page = page; + get_super_private(sb)->status_bio = bio; + return 0; +} + +/* Query the status of fs. Returns if the FS can be safely mounted. + Also if "status" and "extended" parameters are given, it will fill + actual parts of status from disk there. */ +int reiser4_status_query(u64 *status, u64 *extended) +{ + struct super_block *sb = reiser4_get_current_sb(); + struct reiser4_status *statuspage; + int retval; + + if (!get_super_private(sb)->status_page) + /* No status page? */ + return REISER4_STATUS_MOUNT_UNKNOWN; + statuspage = (struct reiser4_status *) + kmap_atomic(get_super_private(sb)->status_page); + switch ((long)le64_to_cpu(get_unaligned(&statuspage->status))) { + /* FIXME: this cast is a hack for 32 bit arches to work. */ + case REISER4_STATUS_OK: + retval = REISER4_STATUS_MOUNT_OK; + break; + case REISER4_STATUS_CORRUPTED: + retval = REISER4_STATUS_MOUNT_WARN; + break; + case REISER4_STATUS_DAMAGED: + case REISER4_STATUS_DESTROYED: + case REISER4_STATUS_IOERROR: + retval = REISER4_STATUS_MOUNT_RO; + break; + default: + retval = REISER4_STATUS_MOUNT_UNKNOWN; + break; + } + + if (status) + *status = le64_to_cpu(get_unaligned(&statuspage->status)); + if (extended) + *extended = le64_to_cpu(get_unaligned(&statuspage->extended_status)); + + kunmap_atomic((char *)statuspage); + return retval; +} + +/* This function should be called when something bad happens (e.g. from + reiser4_panic). It fills the status structure and tries to push it to disk.*/ +int reiser4_status_write(__u64 status, __u64 extended_status, char *message) +{ + struct super_block *sb = reiser4_get_current_sb(); + struct reiser4_status *statuspage; + struct bio *bio = get_super_private(sb)->status_bio; + + if (!get_super_private(sb)->status_page) + /* No status page? */ + return -1; + statuspage = (struct reiser4_status *) + kmap_atomic(get_super_private(sb)->status_page); + + put_unaligned(cpu_to_le64(status), &statuspage->status); + put_unaligned(cpu_to_le64(extended_status), &statuspage->extended_status); + strncpy(statuspage->texterror, message, REISER4_TEXTERROR_LEN); + + kunmap_atomic((char *)statuspage); + bio_reset(bio); + bio_set_dev(bio, sb->s_bdev); + bio->bi_io_vec[0].bv_page = get_super_private(sb)->status_page; + bio->bi_io_vec[0].bv_len = sb->s_blocksize; + bio->bi_io_vec[0].bv_offset = 0; + bio->bi_vcnt = 1; + bio->bi_iter.bi_size = sb->s_blocksize; + bio->bi_end_io = reiser4_status_endio; + lock_page(get_super_private(sb)->status_page); /* Safe as nobody should + * touch our page. */ + /* + * We can block now, but we have no other choice anyway + */ + bio_set_op_attrs(bio, WRITE, 0); + submit_bio(bio); + /* + * We do not wait for IO completon + */ + return 0; +} + +/* Frees the page with status and bio structure. Should be called by disk format + * at umount time */ +int reiser4_status_finish(void) +{ + struct super_block *sb = reiser4_get_current_sb(); + + __free_pages(get_super_private(sb)->status_page, 0); + get_super_private(sb)->status_page = NULL; + bio_put(get_super_private(sb)->status_bio); + get_super_private(sb)->status_bio = NULL; + return 0; +} diff --git a/fs/reiser4/status_flags.h b/fs/reiser4/status_flags.h new file mode 100644 index 000000000000..bee9d2ee22ca --- /dev/null +++ b/fs/reiser4/status_flags.h @@ -0,0 +1,47 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Here we declare structures and flags that store reiser4 status on disk. + The status that helps us to find out if the filesystem is valid or if it + contains some critical, or not so critical errors */ + +#if !defined(__REISER4_STATUS_FLAGS_H__) +#define __REISER4_STATUS_FLAGS_H__ + +#include "dformat.h" +/* These are major status flags */ +#define REISER4_STATUS_OK 0 +#define REISER4_STATUS_CORRUPTED 0x1 +#define REISER4_STATUS_DAMAGED 0x2 +#define REISER4_STATUS_DESTROYED 0x4 +#define REISER4_STATUS_IOERROR 0x8 + +/* Return values for reiser4_status_query() */ +#define REISER4_STATUS_MOUNT_OK 0 +#define REISER4_STATUS_MOUNT_WARN 1 +#define REISER4_STATUS_MOUNT_RO 2 +#define REISER4_STATUS_MOUNT_UNKNOWN -1 + +#define REISER4_TEXTERROR_LEN 256 + +#define REISER4_STATUS_MAGIC "ReiSeR4StATusBl" +/* We probably need to keep its size under sector size which is 512 bytes */ +struct reiser4_status { + char magic[16]; + d64 status; /* Current FS state */ + d64 extended_status; /* Any additional info that might have sense in + * addition to "status". E.g. last sector where + * io error happened if status is + * "io error encountered" */ + d64 stacktrace[10]; /* Last ten functional calls made (addresses) */ + char texterror[REISER4_TEXTERROR_LEN]; /* Any error message if + * appropriate, otherwise filled + * with zeroes */ +}; + +int reiser4_status_init(reiser4_block_nr block); +int reiser4_status_query(u64 *status, u64 *extended); +int reiser4_status_write(u64 status, u64 extended_status, char *message); +int reiser4_status_finish(void); + +#endif diff --git a/fs/reiser4/super.c b/fs/reiser4/super.c new file mode 100644 index 000000000000..511b74e8c263 --- /dev/null +++ b/fs/reiser4/super.c @@ -0,0 +1,306 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Super-block manipulations. */ + +#include "debug.h" +#include "dformat.h" +#include "key.h" +#include "plugin/security/perm.h" +#include "plugin/space/space_allocator.h" +#include "plugin/plugin.h" +#include "tree.h" +#include "vfs_ops.h" +#include "super.h" +#include "reiser4.h" + +#include <linux/types.h> /* for __u?? */ +#include <linux/fs.h> /* for struct super_block */ + +static __u64 reserved_for_gid(const struct super_block *super, gid_t gid); +static __u64 reserved_for_uid(const struct super_block *super, uid_t uid); +static __u64 reserved_for_root(const struct super_block *super); + +/* Return reiser4-specific part of super block */ +reiser4_super_info_data *get_super_private_nocheck(const struct super_block *super) +{ + return (reiser4_super_info_data *) super->s_fs_info; +} + +/* Return reiser4 fstype: value that is returned in ->f_type field by statfs() + */ +long reiser4_statfs_type(const struct super_block *super UNUSED_ARG) +{ + assert("nikita-448", super != NULL); + assert("nikita-449", is_reiser4_super(super)); + return (long)REISER4_SUPER_MAGIC; +} + +/* functions to read/modify fields of reiser4_super_info_data */ + +/* get number of blocks in file system */ +__u64 reiser4_block_count(const struct super_block *super /* super block + queried */ ) +{ + assert("vs-494", super != NULL); + assert("vs-495", is_reiser4_super(super)); + return get_super_private(super)->block_count; +} + +#if REISER4_DEBUG +/* + * number of blocks in the current file system + */ +__u64 reiser4_current_block_count(void) +{ + return get_current_super_private()->block_count; +} +#endif /* REISER4_DEBUG */ + +/* set number of block in filesystem */ +void reiser4_set_block_count(const struct super_block *super, __u64 nr) +{ + assert("vs-501", super != NULL); + assert("vs-502", is_reiser4_super(super)); + get_super_private(super)->block_count = nr; + /* + * The proper calculation of the reserved space counter (%5 of device + * block counter) we need a 64 bit division which is missing in Linux + * on i386 platform. Because we do not need a precise calculation here + * we can replace a div64 operation by this combination of + * multiplication and shift: 51. / (2^10) == .0498 . + * FIXME: this is a bug. It comes up only for very small filesystems + * which probably are never used. Nevertheless, it is a bug. Number of + * reserved blocks must be not less than maximal number of blocks which + * get grabbed with BA_RESERVED. + */ + get_super_private(super)->blocks_reserved = ((nr * 51) >> 10); +} + +/* amount of blocks used (allocated for data) in file system */ +__u64 reiser4_data_blocks(const struct super_block *super /* super block + queried */ ) +{ + assert("nikita-452", super != NULL); + assert("nikita-453", is_reiser4_super(super)); + return get_super_private(super)->blocks_used; +} + +/* set number of block used in filesystem */ +void reiser4_set_data_blocks(const struct super_block *super, __u64 nr) +{ + assert("vs-503", super != NULL); + assert("vs-504", is_reiser4_super(super)); + get_super_private(super)->blocks_used = nr; +} + +/* amount of free blocks in file system */ +__u64 reiser4_free_blocks(const struct super_block *super /* super block + queried */ ) +{ + assert("nikita-454", super != NULL); + assert("nikita-455", is_reiser4_super(super)); + return get_super_private(super)->blocks_free; +} + +/* set number of blocks free in filesystem */ +void reiser4_set_free_blocks(const struct super_block *super, __u64 nr) +{ + assert("vs-505", super != NULL); + assert("vs-506", is_reiser4_super(super)); + get_super_private(super)->blocks_free = nr; +} + +/* get mkfs unique identifier */ +__u32 reiser4_mkfs_id(const struct super_block *super /* super block + queried */ ) +{ + assert("vpf-221", super != NULL); + assert("vpf-222", is_reiser4_super(super)); + return get_super_private(super)->mkfs_id; +} + +/* amount of free blocks in file system */ +__u64 reiser4_free_committed_blocks(const struct super_block *super) +{ + assert("vs-497", super != NULL); + assert("vs-498", is_reiser4_super(super)); + return get_super_private(super)->blocks_free_committed; +} + +/* amount of blocks in the file system reserved for @uid and @gid */ +long reiser4_reserved_blocks(const struct super_block *super /* super block + queried */ , + uid_t uid /* user id */ , + gid_t gid/* group id */) +{ + long reserved; + + assert("nikita-456", super != NULL); + assert("nikita-457", is_reiser4_super(super)); + + reserved = 0; + if (REISER4_SUPPORT_GID_SPACE_RESERVATION) + reserved += reserved_for_gid(super, gid); + if (REISER4_SUPPORT_UID_SPACE_RESERVATION) + reserved += reserved_for_uid(super, uid); + if (REISER4_SUPPORT_ROOT_SPACE_RESERVATION && (uid == 0)) + reserved += reserved_for_root(super); + return reserved; +} + +/* get/set value of/to grabbed blocks counter */ +__u64 reiser4_grabbed_blocks(const struct super_block * super) +{ + assert("zam-512", super != NULL); + assert("zam-513", is_reiser4_super(super)); + + return get_super_private(super)->blocks_grabbed; +} + +__u64 reiser4_flush_reserved(const struct super_block *super) +{ + assert("vpf-285", super != NULL); + assert("vpf-286", is_reiser4_super(super)); + + return get_super_private(super)->blocks_flush_reserved; +} + +/* get/set value of/to counter of fake allocated formatted blocks */ +__u64 reiser4_fake_allocated(const struct super_block *super) +{ + assert("zam-516", super != NULL); + assert("zam-517", is_reiser4_super(super)); + + return get_super_private(super)->blocks_fake_allocated; +} + +/* get/set value of/to counter of fake allocated unformatted blocks */ +__u64 reiser4_fake_allocated_unformatted(const struct super_block *super) +{ + assert("zam-516", super != NULL); + assert("zam-517", is_reiser4_super(super)); + + return get_super_private(super)->blocks_fake_allocated_unformatted; +} + +/* get/set value of/to counter of clustered blocks */ +__u64 reiser4_clustered_blocks(const struct super_block *super) +{ + assert("edward-601", super != NULL); + assert("edward-602", is_reiser4_super(super)); + + return get_super_private(super)->blocks_clustered; +} + +/* space allocator used by this file system */ +reiser4_space_allocator * reiser4_get_space_allocator(const struct super_block + *super) +{ + assert("nikita-1965", super != NULL); + assert("nikita-1966", is_reiser4_super(super)); + return &get_super_private(super)->space_allocator; +} + +/* return fake inode used to bind formatted nodes in the page cache */ +struct inode *reiser4_get_super_fake(const struct super_block *super) +{ + assert("nikita-1757", super != NULL); + return get_super_private(super)->fake; +} + +/* return fake inode used to bind copied on capture nodes in the page cache */ +struct inode *reiser4_get_cc_fake(const struct super_block *super) +{ + assert("nikita-1757", super != NULL); + return get_super_private(super)->cc; +} + +/* return fake inode used to bind bitmaps and journlal heads */ +struct inode *reiser4_get_bitmap_fake(const struct super_block *super) +{ + assert("nikita-17571", super != NULL); + return get_super_private(super)->bitmap; +} + +/* tree used by this file system */ +reiser4_tree *reiser4_get_tree(const struct super_block *super) +{ + assert("nikita-460", super != NULL); + assert("nikita-461", is_reiser4_super(super)); + return &get_super_private(super)->tree; +} + +/* Check that @super is (looks like) reiser4 super block. This is mainly for + use in assertions. */ +int is_reiser4_super(const struct super_block *super) +{ + return + super != NULL && + get_super_private(super) != NULL && + super->s_op == &(get_super_private(super)->ops.super); +} + +int reiser4_is_set(const struct super_block *super, reiser4_fs_flag f) +{ + return test_bit((int)f, &get_super_private(super)->fs_flags); +} + +/* amount of blocks reserved for given group in file system */ +static __u64 reserved_for_gid(const struct super_block *super UNUSED_ARG, + gid_t gid UNUSED_ARG/* group id */) +{ + return 0; +} + +/* amount of blocks reserved for given user in file system */ +static __u64 reserved_for_uid(const struct super_block *super UNUSED_ARG, + uid_t uid UNUSED_ARG/* user id */) +{ + return 0; +} + +/* amount of blocks reserved for super user in file system */ +static __u64 reserved_for_root(const struct super_block *super UNUSED_ARG) +{ + return 0; +} + +/* + * true if block number @blk makes sense for the file system at @super. + */ +int +reiser4_blocknr_is_sane_for(const struct super_block *super, + const reiser4_block_nr * blk) +{ + reiser4_super_info_data *sbinfo; + + assert("nikita-2957", super != NULL); + assert("nikita-2958", blk != NULL); + + if (reiser4_blocknr_is_fake(blk)) + return 1; + + sbinfo = get_super_private(super); + return *blk < sbinfo->block_count; +} + +#if REISER4_DEBUG +/* + * true, if block number @blk makes sense for the current file system + */ +int reiser4_blocknr_is_sane(const reiser4_block_nr * blk) +{ + return reiser4_blocknr_is_sane_for(reiser4_get_current_sb(), blk); +} +#endif /* REISER4_DEBUG */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/super.h b/fs/reiser4/super.h new file mode 100644 index 000000000000..ecc8973175ea --- /dev/null +++ b/fs/reiser4/super.h @@ -0,0 +1,472 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Super-block functions. See super.c for details. */ + +#if !defined(__REISER4_SUPER_H__) +#define __REISER4_SUPER_H__ + +#include <linux/exportfs.h> + +#include "tree.h" +#include "entd.h" +#include "wander.h" +#include "fsdata.h" +#include "plugin/object.h" +#include "plugin/space/space_allocator.h" + +/* + * Flush algorithms parameters. + */ +struct flush_params { + unsigned relocate_threshold; + unsigned relocate_distance; + unsigned written_threshold; + unsigned scan_maxnodes; +}; + +typedef enum { + /* + * True if this file system doesn't support hard-links (multiple names) + * for directories: this is default UNIX behavior. + * + * If hard-links on directoires are not allowed, file system is Acyclic + * Directed Graph (modulo dot, and dotdot, of course). + * + * This is used by reiser4_link(). + */ + REISER4_ADG = 0, + /* + * set if all nodes in internal tree have the same node layout plugin. + * If so, znode_guess_plugin() will return tree->node_plugin in stead + * of guessing plugin by plugin id stored in the node. + */ + REISER4_ONE_NODE_PLUGIN = 1, + /* if set, bsd gid assignment is supported. */ + REISER4_BSD_GID = 2, + /* [mac]_time are 32 bit in inode */ + REISER4_32_BIT_TIMES = 3, + /* load all bitmap blocks at mount time */ + REISER4_DONT_LOAD_BITMAP = 5, + /* enforce atomicity during write(2) */ + REISER4_ATOMIC_WRITE = 6, + /* enable issuing of discard requests */ + REISER4_DISCARD = 8, + /* disable hole punching at flush time */ + REISER4_DONT_PUNCH_HOLES = 9 +} reiser4_fs_flag; + +/* + * VFS related operation vectors. + */ +struct object_ops { + struct super_operations super; + struct dentry_operations dentry; + struct export_operations export; +}; + +/* reiser4-specific part of super block + + Locking + + Fields immutable after mount: + + ->oid* + ->space* + ->default_[ug]id + ->mkfs_id + ->trace_flags + ->debug_flags + ->fs_flags + ->df_plug + ->optimal_io_size + ->plug + ->flush + ->u (bad name) + ->txnmgr + ->ra_params + ->fsuid + ->journal_header + ->journal_footer + + Fields protected by ->lnode_guard + + ->lnode_htable + + Fields protected by per-super block spin lock + + ->block_count + ->blocks_used + ->blocks_free + ->blocks_free_committed + ->blocks_grabbed + ->blocks_fake_allocated_unformatted + ->blocks_fake_allocated + ->blocks_flush_reserved + ->eflushed + ->blocknr_hint_default + + After journal replaying during mount, + + ->last_committed_tx + + is protected by ->tmgr.commit_mutex + + Invariants involving this data-type: + + [sb-block-counts] + [sb-grabbed] + [sb-fake-allocated] +*/ +struct reiser4_super_info_data { + /* + * guard spinlock which protects reiser4 super block fields (currently + * blocks_free, blocks_free_committed) + */ + spinlock_t guard; + + /* next oid that will be returned by oid_allocate() */ + oid_t next_to_use; + /* total number of used oids */ + oid_t oids_in_use; + + /* space manager plugin */ + reiser4_space_allocator space_allocator; + + /* transaction model */ + reiser4_txmod_id txmod; + + /* reiser4 internal tree */ + reiser4_tree tree; + + /* + * default user id used for light-weight files without their own + * stat-data. + */ + __u32 default_uid; + + /* + * default group id used for light-weight files without their own + * stat-data. + */ + __u32 default_gid; + + /* mkfs identifier generated at mkfs time. */ + __u32 mkfs_id; + /* amount of blocks in a file system */ + __u64 block_count; + + /* inviolable reserve */ + __u64 blocks_reserved; + + /* amount of blocks used by file system data and meta-data. */ + __u64 blocks_used; + + /* + * amount of free blocks. This is "working" free blocks counter. It is + * like "working" bitmap, please see block_alloc.c for description. + */ + __u64 blocks_free; + + /* + * free block count for fs committed state. This is "commit" version of + * free block counter. + */ + __u64 blocks_free_committed; + + /* + * number of blocks reserved for further allocation, for all + * threads. + */ + __u64 blocks_grabbed; + + /* number of fake allocated unformatted blocks in tree. */ + __u64 blocks_fake_allocated_unformatted; + + /* number of fake allocated formatted blocks in tree. */ + __u64 blocks_fake_allocated; + + /* number of blocks reserved for flush operations. */ + __u64 blocks_flush_reserved; + + /* number of blocks reserved for cluster operations. */ + __u64 blocks_clustered; + + /* unique file-system identifier */ + __u32 fsuid; + + /* On-disk format version. If does not equal to the disk_format + plugin version, some format updates (e.g. enlarging plugin + set, etc) may have place on mount. */ + int version; + + /* file-system wide flags. See reiser4_fs_flag enum */ + unsigned long fs_flags; + + /* transaction manager */ + txn_mgr tmgr; + + /* ent thread */ + entd_context entd; + + /* fake inode used to bind formatted nodes */ + struct inode *fake; + /* inode used to bind bitmaps (and journal heads) */ + struct inode *bitmap; + /* inode used to bind copied on capture nodes */ + struct inode *cc; + + /* disk layout plugin */ + disk_format_plugin *df_plug; + + /* disk layout specific part of reiser4 super info data */ + union { + format40_super_info format40; + } u; + + /* value we return in st_blksize on stat(2) */ + unsigned long optimal_io_size; + + /* parameters for the flush algorithm */ + struct flush_params flush; + + /* pointers to jnodes for journal header and footer */ + jnode *journal_header; + jnode *journal_footer; + + journal_location jloc; + + /* head block number of last committed transaction */ + __u64 last_committed_tx; + + /* + * we remember last written location for using as a hint for new block + * allocation + */ + __u64 blocknr_hint_default; + + /* committed number of files (oid allocator state variable ) */ + __u64 nr_files_committed; + + struct formatted_ra_params ra_params; + + /* + * A mutex for serializing cut tree operation if out-of-free-space: + * the only one cut_tree thread is allowed to grab space from reserved + * area (it is 5% of disk space) + */ + struct mutex delete_mutex; + /* task owning ->delete_mutex */ + struct task_struct *delete_mutex_owner; + + /* Diskmap's blocknumber */ + __u64 diskmap_block; + + /* What to do in case of error */ + int onerror; + + /* operations for objects on this file system */ + struct object_ops ops; + + /* + * structure to maintain d_cursors. See plugin/file_ops_readdir.c for + * more details + */ + struct d_cursor_info d_info; + struct crypto_shash *csum_tfm; + +#ifdef CONFIG_REISER4_BADBLOCKS + /* Alternative master superblock offset (in bytes) */ + unsigned long altsuper; +#endif + struct repacker *repacker; + struct page *status_page; + struct bio *status_bio; + +#if REISER4_DEBUG + /* + * minimum used blocks value (includes super blocks, bitmap blocks and + * other fs reserved areas), depends on fs format and fs size. + */ + __u64 min_blocks_used; + + /* + * when debugging is on, all jnodes (including znodes, bitmaps, etc.) + * are kept on a list anchored at sbinfo->all_jnodes. This list is + * protected by sbinfo->all_guard spin lock. This lock should be taken + * with _irq modifier, because it is also modified from interrupt + * contexts (by RCU). + */ + spinlock_t all_guard; + /* list of all jnodes */ + struct list_head all_jnodes; +#endif + struct dentry *debugfs_root; +}; + +extern reiser4_super_info_data *get_super_private_nocheck(const struct + super_block * super); + +/* Return reiser4-specific part of super block */ +static inline reiser4_super_info_data *get_super_private(const struct + super_block * super) +{ + assert("nikita-447", super != NULL); + + return (reiser4_super_info_data *) super->s_fs_info; +} + +/* get ent context for the @super */ +static inline entd_context *get_entd_context(struct super_block *super) +{ + return &get_super_private(super)->entd; +} + +/* "Current" super-block: main super block used during current system + call. Reference to this super block is stored in reiser4_context. */ +static inline struct super_block *reiser4_get_current_sb(void) +{ + return get_current_context()->super; +} + +/* Reiser4-specific part of "current" super-block: main super block used + during current system call. Reference to this super block is stored in + reiser4_context. */ +static inline reiser4_super_info_data *get_current_super_private(void) +{ + return get_super_private(reiser4_get_current_sb()); +} + +static inline struct formatted_ra_params *get_current_super_ra_params(void) +{ + return &(get_current_super_private()->ra_params); +} + +/* + * true, if file system on @super is read-only + */ +static inline int rofs_super(struct super_block *super) +{ + return super->s_flags & MS_RDONLY; +} + +/* + * true, if @tree represents read-only file system + */ +static inline int rofs_tree(reiser4_tree * tree) +{ + return rofs_super(tree->super); +} + +/* + * true, if file system where @inode lives on, is read-only + */ +static inline int rofs_inode(struct inode *inode) +{ + return rofs_super(inode->i_sb); +} + +/* + * true, if file system where @node lives on, is read-only + */ +static inline int rofs_jnode(jnode * node) +{ + return rofs_tree(jnode_get_tree(node)); +} + +extern __u64 reiser4_current_block_count(void); + +extern void build_object_ops(struct super_block *super, struct object_ops *ops); + +#define REISER4_SUPER_MAGIC 0x52345362 /* (*(__u32 *)"R4Sb"); */ + +static inline void spin_lock_reiser4_super(reiser4_super_info_data *sbinfo) +{ + spin_lock(&(sbinfo->guard)); +} + +static inline void spin_unlock_reiser4_super(reiser4_super_info_data *sbinfo) +{ + assert_spin_locked(&(sbinfo->guard)); + spin_unlock(&(sbinfo->guard)); +} + +extern __u64 reiser4_flush_reserved(const struct super_block *); +extern int reiser4_is_set(const struct super_block *super, reiser4_fs_flag f); +extern long reiser4_statfs_type(const struct super_block *super); +extern __u64 reiser4_block_count(const struct super_block *super); +extern void reiser4_set_block_count(const struct super_block *super, __u64 nr); +extern __u64 reiser4_data_blocks(const struct super_block *super); +extern void reiser4_set_data_blocks(const struct super_block *super, __u64 nr); +extern __u64 reiser4_free_blocks(const struct super_block *super); +extern void reiser4_set_free_blocks(const struct super_block *super, __u64 nr); +extern __u32 reiser4_mkfs_id(const struct super_block *super); + +extern __u64 reiser4_free_committed_blocks(const struct super_block *super); + +extern __u64 reiser4_grabbed_blocks(const struct super_block *); +extern __u64 reiser4_fake_allocated(const struct super_block *); +extern __u64 reiser4_fake_allocated_unformatted(const struct super_block *); +extern __u64 reiser4_clustered_blocks(const struct super_block *); + +extern long reiser4_reserved_blocks(const struct super_block *super, uid_t uid, + gid_t gid); + +extern reiser4_space_allocator * +reiser4_get_space_allocator(const struct super_block *super); +extern reiser4_oid_allocator * +reiser4_get_oid_allocator(const struct super_block *super); +extern struct inode *reiser4_get_super_fake(const struct super_block *super); +extern struct inode *reiser4_get_cc_fake(const struct super_block *super); +extern struct inode *reiser4_get_bitmap_fake(const struct super_block *super); +extern reiser4_tree *reiser4_get_tree(const struct super_block *super); +extern int is_reiser4_super(const struct super_block *super); + +extern int reiser4_blocknr_is_sane(const reiser4_block_nr * blk); +extern int reiser4_blocknr_is_sane_for(const struct super_block *super, + const reiser4_block_nr * blk); +extern int reiser4_fill_super(struct super_block *s, void *data, int silent); +extern int reiser4_done_super(struct super_block *s); + +/* step of fill super */ +extern int reiser4_init_fs_info(struct super_block *); +extern void reiser4_done_fs_info(struct super_block *); +extern int reiser4_init_super_data(struct super_block *, char *opt_string); +extern int reiser4_init_read_super(struct super_block *, int silent); +extern int reiser4_init_root_inode(struct super_block *); +extern reiser4_plugin *get_default_plugin(pset_member memb); + +/* Maximal possible object id. */ +#define ABSOLUTE_MAX_OID ((oid_t)~0) + +#define OIDS_RESERVED (1 << 16) +int oid_init_allocator(struct super_block *, oid_t nr_files, oid_t next); +oid_t oid_allocate(struct super_block *); +int oid_release(struct super_block *, oid_t); +oid_t oid_next(const struct super_block *); +void oid_count_allocated(void); +void oid_count_released(void); +long oids_used(const struct super_block *); + +#if REISER4_DEBUG +void print_fs_info(const char *prefix, const struct super_block *); +#endif + +extern void destroy_reiser4_cache(struct kmem_cache **); + +extern struct super_operations reiser4_super_operations; +extern struct export_operations reiser4_export_operations; +extern struct dentry_operations reiser4_dentry_operations; + +/* __REISER4_SUPER_H__ */ +#endif + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 120 + * End: + */ diff --git a/fs/reiser4/super_ops.c b/fs/reiser4/super_ops.c new file mode 100644 index 000000000000..80ae510aed91 --- /dev/null +++ b/fs/reiser4/super_ops.c @@ -0,0 +1,783 @@ +/* Copyright 2005 by Hans Reiser, licensing governed by + * reiser4/README */ + +#include "inode.h" +#include "page_cache.h" +#include "ktxnmgrd.h" +#include "flush.h" +#include "safe_link.h" +#include "checksum.h" + +#include <linux/vfs.h> +#include <linux/writeback.h> +#include <linux/mount.h> +#include <linux/seq_file.h> +#include <linux/debugfs.h> +#include <linux/backing-dev.h> +#include <linux/module.h> + +/* slab cache for inodes */ +static struct kmem_cache *inode_cache; + +static struct dentry *reiser4_debugfs_root = NULL; + +/** + * init_once - constructor for reiser4 inodes + * @cache: cache @obj belongs to + * @obj: inode to be initialized + * + * Initialization function to be called when new page is allocated by reiser4 + * inode cache. It is set on inode cache creation. + */ +static void init_once(void *obj) +{ + struct reiser4_inode_object *info; + + info = obj; + + /* initialize vfs inode */ + inode_init_once(&info->vfs_inode); + + /* + * initialize reiser4 specific part fo inode. + * NOTE-NIKITA add here initializations for locks, list heads, + * etc. that will be added to our private inode part. + */ + INIT_LIST_HEAD(get_readdir_list(&info->vfs_inode)); + init_rwsem(&info->p.conv_sem); + /* init semaphore which is used during inode loading */ + loading_init_once(&info->p); + INIT_RADIX_TREE(jnode_tree_by_reiser4_inode(&info->p), + GFP_ATOMIC); +#if REISER4_DEBUG + info->p.nr_jnodes = 0; +#endif +} + +/** + * init_inodes - create znode cache + * + * Initializes slab cache of inodes. It is part of reiser4 module initialization + */ +static int init_inodes(void) +{ + inode_cache = kmem_cache_create("reiser4_inode", + sizeof(struct reiser4_inode_object), + 0, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT, init_once); + if (inode_cache == NULL) + return RETERR(-ENOMEM); + return 0; +} + +/** + * done_inodes - delete inode cache + * + * This is called on reiser4 module unloading or system shutdown. + */ +static void done_inodes(void) +{ + destroy_reiser4_cache(&inode_cache); +} + +/** + * reiser4_alloc_inode - alloc_inode of super operations + * @super: super block new inode is allocated for + * + * Allocates new inode, initializes reiser4 specific part of it. + */ +static struct inode *reiser4_alloc_inode(struct super_block *super) +{ + struct reiser4_inode_object *obj; + + assert("nikita-1696", super != NULL); + obj = kmem_cache_alloc(inode_cache, reiser4_ctx_gfp_mask_get()); + if (obj != NULL) { + reiser4_inode *info; + + info = &obj->p; + + info->pset = plugin_set_get_empty(); + info->hset = plugin_set_get_empty(); + info->extmask = 0; + info->locality_id = 0ull; + info->plugin_mask = 0; + info->heir_mask = 0; +#if !REISER4_INO_IS_OID + info->oid_hi = 0; +#endif + reiser4_seal_init(&info->sd_seal, NULL, NULL); + coord_init_invalid(&info->sd_coord, NULL); + info->flags = 0; + spin_lock_init(&info->guard); + /* this deals with info's loading semaphore */ + loading_alloc(info); + info->vroot = UBER_TREE_ADDR; + return &obj->vfs_inode; + } else + return NULL; +} + +/** + * reiser4_destroy_inode - destroy_inode of super operations + * @inode: inode being destroyed + * + * Puts reiser4 specific portion of inode, frees memory occupied by inode. + */ +static void reiser4_destroy_inode(struct inode *inode) +{ + reiser4_inode *info; + + info = reiser4_inode_data(inode); + + assert("vs-1220", inode_has_no_jnodes(info)); + + if (!is_bad_inode(inode) && is_inode_loaded(inode)) { + file_plugin *fplug = inode_file_plugin(inode); + if (fplug->destroy_inode != NULL) + fplug->destroy_inode(inode); + } + reiser4_dispose_cursors(inode); + if (info->pset) + plugin_set_put(info->pset); + if (info->hset) + plugin_set_put(info->hset); + + /* + * cannot add similar assertion about ->i_list as prune_icache return + * inode into slab with dangling ->list.{next,prev}. This is safe, + * because they are re-initialized in the new_inode(). + */ + assert("nikita-2895", hlist_empty(&inode->i_dentry)); + assert("nikita-2896", hlist_unhashed(&inode->i_hash)); + assert("nikita-2898", list_empty_careful(get_readdir_list(inode))); + + /* this deals with info's loading semaphore */ + loading_destroy(info); + + kmem_cache_free(inode_cache, + container_of(info, struct reiser4_inode_object, p)); +} + +/** + * reiser4_dirty_inode - dirty_inode of super operations + * @inode: inode being dirtied + * + * Updates stat data. + */ +static void reiser4_dirty_inode(struct inode *inode, int flags) +{ + int result; + reiser4_context *ctx; + + if (!is_in_reiser4_context()) + return; + assert("edward-1606", !IS_RDONLY(inode)); + assert("edward-1607", + (inode_file_plugin(inode)->estimate.update(inode) <= + get_current_context()->grabbed_blocks)); + + ctx = get_current_context(); + if (ctx->locked_page) + unlock_page(ctx->locked_page); + + result = reiser4_update_sd(inode); + + if (ctx->locked_page) + lock_page(ctx->locked_page); + if (result) + warning("edward-1605", "failed to dirty inode for %llu: %d", + get_inode_oid(inode), result); +} + +/** + * ->evict_inode() of super operations + * @inode: inode to delete + * + * Calls file plugin's delete_object method to delete object items from + * filesystem tree and calls clear_inode(). + */ +static void reiser4_evict_inode(struct inode *inode) +{ + reiser4_context *ctx; + file_plugin *fplug; + + ctx = reiser4_init_context(inode->i_sb); + if (IS_ERR(ctx)) { + warning("vs-15", "failed to init context"); + return; + } + + if (inode->i_nlink == 0 && is_inode_loaded(inode)) { + fplug = inode_file_plugin(inode); + if (fplug != NULL && fplug->delete_object != NULL) + fplug->delete_object(inode); + } + + truncate_inode_pages_final(&inode->i_data); + inode->i_blocks = 0; + clear_inode(inode); + reiser4_exit_context(ctx); +} + +/** + * reiser4_put_super - put_super of super operations + * @super: super block to free + * + * Stops daemons, release resources, umounts in short. + */ +static void reiser4_put_super(struct super_block *super) +{ + reiser4_super_info_data *sbinfo; + reiser4_context *ctx; + + sbinfo = get_super_private(super); + assert("vs-1699", sbinfo); + + debugfs_remove(sbinfo->tmgr.debugfs_atom_count); + debugfs_remove(sbinfo->tmgr.debugfs_id_count); + debugfs_remove(sbinfo->debugfs_root); + + ctx = reiser4_init_context(super); + if (IS_ERR(ctx)) { + warning("vs-17", "failed to init context"); + return; + } + + /* have disk format plugin to free its resources */ + if (get_super_private(super)->df_plug->release) + get_super_private(super)->df_plug->release(super); + + reiser4_done_formatted_fake(super); + reiser4_done_csum_tfm(sbinfo->csum_tfm); + + /* stop daemons: ktxnmgr and entd */ + reiser4_done_entd(super); + reiser4_done_ktxnmgrd(super); + reiser4_done_txnmgr(&sbinfo->tmgr); + + assert("edward-1890", list_empty(&get_super_private(super)->all_jnodes)); + assert("edward-1891", get_current_context()->trans->atom == NULL); + reiser4_check_block_counters(super); + + reiser4_exit_context(ctx); + reiser4_done_fs_info(super); +} + +/** + * reiser4_statfs - statfs of super operations + * @super: super block of file system in queried + * @stafs: buffer to fill with statistics + * + * Returns information about filesystem. + */ +static int reiser4_statfs(struct dentry *dentry, struct kstatfs *statfs) +{ + sector_t total; + sector_t reserved; + sector_t free; + sector_t forroot; + sector_t deleted; + reiser4_context *ctx; + struct super_block *super = dentry->d_sb; + + assert("nikita-408", super != NULL); + assert("nikita-409", statfs != NULL); + + ctx = reiser4_init_context(super); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + statfs->f_type = reiser4_statfs_type(super); + statfs->f_bsize = super->s_blocksize; + + /* + * 5% of total block space is reserved. This is needed for flush and + * for truncates (so that we are able to perform truncate/unlink even + * on the otherwise completely full file system). If this reservation + * is hidden from statfs(2), users will mistakenly guess that they + * have enough free space to complete some operation, which is + * frustrating. + * + * Another possible solution is to subtract ->blocks_reserved from + * ->f_bfree, but changing available space seems less intrusive than + * letting user to see 5% of disk space to be used directly after + * mkfs. + */ + total = reiser4_block_count(super); + reserved = get_super_private(super)->blocks_reserved; + deleted = txnmgr_count_deleted_blocks(); + free = reiser4_free_blocks(super) + deleted; + forroot = reiser4_reserved_blocks(super, 0, 0); + + /* + * These counters may be in inconsistent state because we take the + * values without keeping any global spinlock. Here we do a sanity + * check that free block counter does not exceed the number of all + * blocks. + */ + if (free > total) + free = total; + statfs->f_blocks = total - reserved; + /* make sure statfs->f_bfree is never larger than statfs->f_blocks */ + if (free > reserved) + free -= reserved; + else + free = 0; + statfs->f_bfree = free; + + if (free > forroot) + free -= forroot; + else + free = 0; + statfs->f_bavail = free; + + statfs->f_files = 0; + statfs->f_ffree = 0; + + /* maximal acceptable name length depends on directory plugin. */ + assert("nikita-3351", super->s_root->d_inode != NULL); + statfs->f_namelen = reiser4_max_filename_len(super->s_root->d_inode); + reiser4_exit_context(ctx); + return 0; +} + +/** + * reiser4_writeback_inodes - writeback_inodes of super operations + * @super: + * @wb: + * @wbc: + * + * This method is called by background and non-backgound writeback. + * Reiser4's implementation uses generic_writeback_sb_inodes to call + * reiser4_writepages_dispatch for each of dirty inodes. + * reiser4_writepages_dispatch handles pages dirtied via shared + * mapping - dirty pages get into atoms. Writeout is called to flush + * some atoms. + */ +static long reiser4_writeback_inodes(struct super_block *super, + struct bdi_writeback *wb, + struct writeback_control *wbc, + struct wb_writeback_work *work, + bool flush_all) +{ + long result; + reiser4_context *ctx; + + if (wbc->for_kupdate) + /* reiser4 has its own means of periodical write-out */ + goto skip; + + spin_unlock(&wb->list_lock); + ctx = reiser4_init_context(super); + if (IS_ERR(ctx)) { + warning("vs-13", "failed to init context"); + spin_lock(&wb->list_lock); + goto skip; + } + /* + * call reiser4_writepages for each of dirty inodes to turn + * dirty pages into transactions if they were not yet. + */ + spin_lock(&wb->list_lock); + result = generic_writeback_sb_inodes(super, wb, wbc, work, flush_all); + spin_unlock(&wb->list_lock); + + if (result <= 0) + goto exit; + wbc->nr_to_write = result; + + /* flush goes here */ + reiser4_writeout(super, wbc); + exit: + /* avoid recursive calls to ->writeback_inodes */ + context_set_commit_async(ctx); + reiser4_exit_context(ctx); + spin_lock(&wb->list_lock); + + return result; + skip: + writeback_skip_sb_inodes(super, wb); + return 0; +} + +/* ->sync_fs() of super operations */ +static int reiser4_sync_fs(struct super_block *super, int wait) +{ + reiser4_context *ctx; + struct bdi_writeback *wb; + struct wb_writeback_work work = { + .sb = super, + .sync_mode = WB_SYNC_ALL, + .range_cyclic = 0, + .nr_pages = LONG_MAX, + .reason = WB_REASON_SYNC, + .for_sync = 1, + }; + struct writeback_control wbc = { + .sync_mode = work.sync_mode, + .range_cyclic = work.range_cyclic, + .range_start = 0, + .range_end = LLONG_MAX, + }; + ctx = reiser4_init_context(super); + if (IS_ERR(ctx)) { + warning("edward-1567", "failed to init context"); + return PTR_ERR(ctx); + } + /* + * We don't capture superblock here. + * Superblock is captured only by operations, which change + * its fields different from free_blocks, nr_files, next_oid. + * After system crash the mentioned fields are recovered from + * journal records, see reiser4_journal_recover_sb_data(). + * Also superblock is captured at final commit when releasing + * disk format. + */ + wb = &inode_to_bdi(reiser4_get_super_fake(super))->wb; + spin_lock(&wb->list_lock); + generic_writeback_sb_inodes(super, wb, &wbc, &work, true); + spin_unlock(&wb->list_lock); + wbc.nr_to_write = LONG_MAX; + /* + * (flush goes here) + * commit all transactions + */ + reiser4_writeout(super, &wbc); + + reiser4_exit_context(ctx); + return 0; +} + +static int reiser4_remount(struct super_block *s, int *mount_flags, char *arg) +{ + sync_filesystem(s); + return 0; +} + +/** + * reiser4_show_options - show_options of super operations + * @m: file where to write information + * @mnt: mount structure + * + * Makes reiser4 mount options visible in /proc/mounts. + */ +static int reiser4_show_options(struct seq_file *m, struct dentry *dentry) +{ + struct super_block *super; + reiser4_super_info_data *sbinfo; + + super = dentry->d_sb; + sbinfo = get_super_private(super); + + seq_printf(m, ",atom_max_size=0x%x", sbinfo->tmgr.atom_max_size); + seq_printf(m, ",atom_max_age=0x%x", sbinfo->tmgr.atom_max_age); + seq_printf(m, ",atom_min_size=0x%x", sbinfo->tmgr.atom_min_size); + seq_printf(m, ",atom_max_flushers=0x%x", + sbinfo->tmgr.atom_max_flushers); + seq_printf(m, ",cbk_cache_slots=0x%x", + sbinfo->tree.cbk_cache.nr_slots); + + return 0; +} + +struct super_operations reiser4_super_operations = { + .alloc_inode = reiser4_alloc_inode, + .destroy_inode = reiser4_destroy_inode, + .dirty_inode = reiser4_dirty_inode, + .evict_inode = reiser4_evict_inode, + .put_super = reiser4_put_super, + .sync_fs = reiser4_sync_fs, + .statfs = reiser4_statfs, + .remount_fs = reiser4_remount, + .writeback_inodes = reiser4_writeback_inodes, + .show_options = reiser4_show_options +}; + +/** + * fill_super - initialize super block on mount + * @super: super block to fill + * @data: reiser4 specific mount option + * @silent: + * + * This is to be called by reiser4_get_sb. Mounts filesystem. + */ +static int fill_super(struct super_block *super, void *data, int silent) +{ + reiser4_context ctx; + int result; + reiser4_super_info_data *sbinfo; + + assert("zam-989", super != NULL); + + super->s_op = NULL; + init_stack_context(&ctx, super); + + /* allocate reiser4 specific super block */ + if ((result = reiser4_init_fs_info(super)) != 0) + goto failed_init_sinfo; + + sbinfo = get_super_private(super); + + if ((result = reiser4_init_csum_tfm(&sbinfo->csum_tfm)) != 0) + goto failed_init_csum_tfm; + + /* initialize various reiser4 parameters, parse mount options */ + if ((result = reiser4_init_super_data(super, data)) != 0) + goto failed_init_super_data; + + /* read reiser4 master super block, initialize disk format plugin */ + if ((result = reiser4_init_read_super(super, silent)) != 0) + goto failed_init_read_super; + + /* initialize transaction manager */ + reiser4_init_txnmgr(&sbinfo->tmgr); + + /* initialize ktxnmgrd context and start kernel thread ktxnmrgd */ + if ((result = reiser4_init_ktxnmgrd(super)) != 0) + goto failed_init_ktxnmgrd; + + /* initialize entd context and start kernel thread entd */ + if ((result = reiser4_init_entd(super)) != 0) + goto failed_init_entd; + + /* initialize address spaces for formatted nodes and bitmaps */ + if ((result = reiser4_init_formatted_fake(super)) != 0) + goto failed_init_formatted_fake; + + /* initialize disk format plugin */ + if ((result = get_super_private(super)->df_plug->init_format(super, + data)) != 0) + goto failed_init_disk_format; + + /* + * There are some 'committed' versions of reiser4 super block counters, + * which correspond to reiser4 on-disk state. These counters are + * initialized here + */ + sbinfo->blocks_free_committed = sbinfo->blocks_free; + sbinfo->nr_files_committed = oids_used(super); + + /* get inode of root directory */ + if ((result = reiser4_init_root_inode(super)) != 0) + goto failed_init_root_inode; + + if ((result = get_super_private(super)->df_plug->version_update(super)) != 0) + goto failed_update_format_version; + + process_safelinks(super); + reiser4_exit_context(&ctx); + + sbinfo->debugfs_root = debugfs_create_dir(super->s_id, + reiser4_debugfs_root); + if (sbinfo->debugfs_root) { + sbinfo->tmgr.debugfs_atom_count = + debugfs_create_u32("atom_count", S_IFREG|S_IRUSR, + sbinfo->debugfs_root, + &sbinfo->tmgr.atom_count); + sbinfo->tmgr.debugfs_id_count = + debugfs_create_u32("id_count", S_IFREG|S_IRUSR, + sbinfo->debugfs_root, + &sbinfo->tmgr.id_count); + } + printk("reiser4: %s: using %s.\n", super->s_id, + txmod_plugin_by_id(sbinfo->txmod)->h.desc); + return 0; + + failed_update_format_version: + failed_init_root_inode: + if (sbinfo->df_plug->release) + sbinfo->df_plug->release(super); + failed_init_disk_format: + reiser4_done_formatted_fake(super); + failed_init_formatted_fake: + reiser4_done_entd(super); + failed_init_entd: + reiser4_done_ktxnmgrd(super); + failed_init_ktxnmgrd: + reiser4_done_txnmgr(&sbinfo->tmgr); + failed_init_read_super: + failed_init_super_data: + failed_init_csum_tfm: + reiser4_done_fs_info(super); + failed_init_sinfo: + reiser4_exit_context(&ctx); + return result; +} + +/** + * reiser4_mount - mount of file_system_type operations + * @fs_type: + * @flags: mount flags MS_RDONLY, MS_VERBOSE, etc + * @dev_name: block device file name + * @data: specific mount options + * + * Reiser4 mount entry. + */ +static struct dentry *reiser4_mount(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data) +{ + return mount_bdev(fs_type, flags, dev_name, data, fill_super); +} + +/* structure describing the reiser4 filesystem implementation */ +static struct file_system_type reiser4_fs_type = { + .owner = THIS_MODULE, + .name = "reiser4", + .fs_flags = FS_REQUIRES_DEV, + .mount = reiser4_mount, + .kill_sb = kill_block_super, + .next = NULL +}; + +void destroy_reiser4_cache(struct kmem_cache **cachep) +{ + BUG_ON(*cachep == NULL); + kmem_cache_destroy(*cachep); + *cachep = NULL; +} + +/** + * init_reiser4 - reiser4 initialization entry point + * + * Initializes reiser4 slabs, registers reiser4 filesystem type. It is called + * on kernel initialization or during reiser4 module load. + */ +static int __init init_reiser4(void) +{ + int result; + + printk(KERN_INFO + "Loading Reiser4 (format release: 4.%d.%d) " + "See www.namesys.com for a description of Reiser4.\n", + get_release_number_major(), + get_release_number_minor()); + + /* initialize slab cache of inodes */ + if ((result = init_inodes()) != 0) + goto failed_inode_cache; + + /* initialize cache of znodes */ + if ((result = init_znodes()) != 0) + goto failed_init_znodes; + + /* initialize all plugins */ + if ((result = init_plugins()) != 0) + goto failed_init_plugins; + + /* initialize cache of plugin_set-s and plugin_set's hash table */ + if ((result = init_plugin_set()) != 0) + goto failed_init_plugin_set; + + /* initialize caches of txn_atom-s and txn_handle-s */ + if ((result = init_txnmgr_static()) != 0) + goto failed_init_txnmgr_static; + + /* initialize cache of jnodes */ + if ((result = init_jnodes()) != 0) + goto failed_init_jnodes; + + /* initialize cache of flush queues */ + if ((result = reiser4_init_fqs()) != 0) + goto failed_init_fqs; + + /* initialize cache of structures attached to dentry->d_fsdata */ + if ((result = reiser4_init_dentry_fsdata()) != 0) + goto failed_init_dentry_fsdata; + + /* initialize cache of structures attached to file->private_data */ + if ((result = reiser4_init_file_fsdata()) != 0) + goto failed_init_file_fsdata; + + /* + * initialize cache of d_cursors. See plugin/file_ops_readdir.c for + * more details + */ + if ((result = reiser4_init_d_cursor()) != 0) + goto failed_init_d_cursor; + + /* initialize cache of blocknr set entries */ + if ((result = blocknr_set_init_static()) != 0) + goto failed_init_blocknr_set; + + /* initialize cache of blocknr list entries */ + if ((result = blocknr_list_init_static()) != 0) + goto failed_init_blocknr_list; + + if ((result = register_filesystem(&reiser4_fs_type)) == 0) { + reiser4_debugfs_root = debugfs_create_dir("reiser4", NULL); + return 0; + } + + blocknr_list_done_static(); + failed_init_blocknr_list: + blocknr_set_done_static(); + failed_init_blocknr_set: + reiser4_done_d_cursor(); + failed_init_d_cursor: + reiser4_done_file_fsdata(); + failed_init_file_fsdata: + reiser4_done_dentry_fsdata(); + failed_init_dentry_fsdata: + reiser4_done_fqs(); + failed_init_fqs: + done_jnodes(); + failed_init_jnodes: + done_txnmgr_static(); + failed_init_txnmgr_static: + done_plugin_set(); + failed_init_plugin_set: + failed_init_plugins: + done_znodes(); + failed_init_znodes: + done_inodes(); + failed_inode_cache: + return result; +} + +/** + * done_reiser4 - reiser4 exit entry point + * + * Unregister reiser4 filesystem type, deletes caches. It is called on shutdown + * or at module unload. + */ +static void __exit done_reiser4(void) +{ + int result; + + debugfs_remove(reiser4_debugfs_root); + result = unregister_filesystem(&reiser4_fs_type); + BUG_ON(result != 0); + blocknr_list_done_static(); + blocknr_set_done_static(); + reiser4_done_d_cursor(); + reiser4_done_file_fsdata(); + reiser4_done_dentry_fsdata(); + reiser4_done_fqs(); + done_jnodes(); + done_txnmgr_static(); + done_plugin_set(); + done_znodes(); + destroy_reiser4_cache(&inode_cache); +} + +module_init(init_reiser4); +module_exit(done_reiser4); + +MODULE_ALIAS_FS("reiser4"); + +MODULE_DESCRIPTION("Reiser4 filesystem"); +MODULE_AUTHOR("Hans Reiser <Reiser@Namesys.COM>"); + +MODULE_LICENSE("GPL"); + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/tap.c b/fs/reiser4/tap.c new file mode 100644 index 000000000000..1234188c3871 --- /dev/null +++ b/fs/reiser4/tap.c @@ -0,0 +1,376 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* + Tree Access Pointer (tap). + + tap is data structure combining coord and lock handle (mostly). It is + useful when one has to scan tree nodes (for example, in readdir, or flush), + for tap functions allow to move tap in either direction transparently + crossing unit/item/node borders. + + Tap doesn't provide automatic synchronization of its fields as it is + supposed to be per-thread object. +*/ + +#include "forward.h" +#include "debug.h" +#include "coord.h" +#include "tree.h" +#include "context.h" +#include "tap.h" +#include "znode.h" +#include "tree_walk.h" + +#if REISER4_DEBUG +static int tap_invariant(const tap_t *tap); +static void tap_check(const tap_t *tap); +#else +#define tap_check(tap) noop +#endif + +/** load node tap is pointing to, if not loaded already */ +int reiser4_tap_load(tap_t *tap) +{ + tap_check(tap); + if (tap->loaded == 0) { + int result; + + result = zload_ra(tap->coord->node, &tap->ra_info); + if (result != 0) + return result; + coord_clear_iplug(tap->coord); + } + ++tap->loaded; + tap_check(tap); + return 0; +} + +/** release node tap is pointing to. Dual to tap_load() */ +void reiser4_tap_relse(tap_t *tap) +{ + tap_check(tap); + if (tap->loaded > 0) { + --tap->loaded; + if (tap->loaded == 0) + zrelse(tap->coord->node); + } + tap_check(tap); +} + +/** + * init tap to consist of @coord and @lh. Locks on nodes will be acquired with + * @mode + */ +void reiser4_tap_init(tap_t *tap, coord_t *coord, lock_handle * lh, + znode_lock_mode mode) +{ + tap->coord = coord; + tap->lh = lh; + tap->mode = mode; + tap->loaded = 0; + INIT_LIST_HEAD(&tap->linkage); + reiser4_init_ra_info(&tap->ra_info); +} + +/** add @tap to the per-thread list of all taps */ +void reiser4_tap_monitor(tap_t *tap) +{ + assert("nikita-2623", tap != NULL); + tap_check(tap); + list_add(&tap->linkage, reiser4_taps_list()); + tap_check(tap); +} + +/* duplicate @src into @dst. Copy lock handle. @dst is not initially + * loaded. */ +void reiser4_tap_copy(tap_t *dst, tap_t *src) +{ + assert("nikita-3193", src != NULL); + assert("nikita-3194", dst != NULL); + + *dst->coord = *src->coord; + if (src->lh->node) + copy_lh(dst->lh, src->lh); + dst->mode = src->mode; + dst->loaded = 0; + INIT_LIST_HEAD(&dst->linkage); + dst->ra_info = src->ra_info; +} + +/** finish with @tap */ +void reiser4_tap_done(tap_t *tap) +{ + assert("nikita-2565", tap != NULL); + tap_check(tap); + if (tap->loaded > 0) + zrelse(tap->coord->node); + done_lh(tap->lh); + tap->loaded = 0; + list_del_init(&tap->linkage); + tap->coord->node = NULL; +} + +/** + * move @tap to the new node, locked with @target. Load @target, if @tap was + * already loaded. + */ +int reiser4_tap_move(tap_t *tap, lock_handle * target) +{ + int result = 0; + + assert("nikita-2567", tap != NULL); + assert("nikita-2568", target != NULL); + assert("nikita-2570", target->node != NULL); + assert("nikita-2569", tap->coord->node == tap->lh->node); + + tap_check(tap); + if (tap->loaded > 0) + result = zload_ra(target->node, &tap->ra_info); + + if (result == 0) { + if (tap->loaded > 0) + zrelse(tap->coord->node); + done_lh(tap->lh); + copy_lh(tap->lh, target); + tap->coord->node = target->node; + coord_clear_iplug(tap->coord); + } + tap_check(tap); + return result; +} + +/** + * move @tap to @target. Acquire lock on @target, if @tap was already + * loaded. + */ +static int tap_to(tap_t *tap, znode * target) +{ + int result; + + assert("nikita-2624", tap != NULL); + assert("nikita-2625", target != NULL); + + tap_check(tap); + result = 0; + if (tap->coord->node != target) { + lock_handle here; + + init_lh(&here); + result = longterm_lock_znode(&here, target, + tap->mode, ZNODE_LOCK_HIPRI); + if (result == 0) { + result = reiser4_tap_move(tap, &here); + done_lh(&here); + } + } + tap_check(tap); + return result; +} + +/** + * move @tap to given @target, loading and locking @target->node if + * necessary + */ +int tap_to_coord(tap_t *tap, coord_t *target) +{ + int result; + + tap_check(tap); + result = tap_to(tap, target->node); + if (result == 0) + coord_dup(tap->coord, target); + tap_check(tap); + return result; +} + +/** return list of all taps */ +struct list_head *reiser4_taps_list(void) +{ + return &get_current_context()->taps; +} + +/** helper function for go_{next,prev}_{item,unit,node}() */ +int go_dir_el(tap_t *tap, sideof dir, int units_p) +{ + coord_t dup; + coord_t *coord; + int result; + + int (*coord_dir) (coord_t *); + int (*get_dir_neighbor) (lock_handle *, znode *, int, int); + void (*coord_init) (coord_t *, const znode *); + ON_DEBUG(int (*coord_check) (const coord_t *)); + + assert("nikita-2556", tap != NULL); + assert("nikita-2557", tap->coord != NULL); + assert("nikita-2558", tap->lh != NULL); + assert("nikita-2559", tap->coord->node != NULL); + + tap_check(tap); + if (dir == LEFT_SIDE) { + coord_dir = units_p ? coord_prev_unit : coord_prev_item; + get_dir_neighbor = reiser4_get_left_neighbor; + coord_init = coord_init_last_unit; + } else { + coord_dir = units_p ? coord_next_unit : coord_next_item; + get_dir_neighbor = reiser4_get_right_neighbor; + coord_init = coord_init_first_unit; + } + ON_DEBUG(coord_check = + units_p ? coord_is_existing_unit : coord_is_existing_item); + assert("nikita-2560", coord_check(tap->coord)); + + coord = tap->coord; + coord_dup(&dup, coord); + if (coord_dir(&dup) != 0) { + do { + /* move to the left neighboring node */ + lock_handle dup; + + init_lh(&dup); + result = + get_dir_neighbor(&dup, coord->node, (int)tap->mode, + GN_CAN_USE_UPPER_LEVELS); + if (result == 0) { + result = reiser4_tap_move(tap, &dup); + if (result == 0) + coord_init(tap->coord, dup.node); + done_lh(&dup); + } + /* skip empty nodes */ + } while ((result == 0) && node_is_empty(coord->node)); + } else { + result = 0; + coord_dup(coord, &dup); + } + assert("nikita-2564", ergo(!result, coord_check(tap->coord))); + tap_check(tap); + return result; +} + +/** + * move @tap to the next unit, transparently crossing item and node + * boundaries + */ +int go_next_unit(tap_t *tap) +{ + return go_dir_el(tap, RIGHT_SIDE, 1); +} + +/** + * move @tap to the previous unit, transparently crossing item and node + * boundaries + */ +int go_prev_unit(tap_t *tap) +{ + return go_dir_el(tap, LEFT_SIDE, 1); +} + +/** + * @shift times apply @actor to the @tap. This is used to move @tap by + * @shift units (or items, or nodes) in either direction. + */ +static int rewind_to(tap_t *tap, go_actor_t actor, int shift) +{ + int result; + + assert("nikita-2555", shift >= 0); + assert("nikita-2562", tap->coord->node == tap->lh->node); + + tap_check(tap); + result = reiser4_tap_load(tap); + if (result != 0) + return result; + + for (; shift > 0; --shift) { + result = actor(tap); + assert("nikita-2563", tap->coord->node == tap->lh->node); + if (result != 0) + break; + } + reiser4_tap_relse(tap); + tap_check(tap); + return result; +} + +/** move @tap @shift units rightward */ +int rewind_right(tap_t *tap, int shift) +{ + return rewind_to(tap, go_next_unit, shift); +} + +/** move @tap @shift units leftward */ +int rewind_left(tap_t *tap, int shift) +{ + return rewind_to(tap, go_prev_unit, shift); +} + +#if REISER4_DEBUG +/** debugging function: print @tap content in human readable form */ +static void print_tap(const char *prefix, const tap_t *tap) +{ + if (tap == NULL) { + printk("%s: null tap\n", prefix); + return; + } + printk("%s: loaded: %i, in-list: %i, node: %p, mode: %s\n", prefix, + tap->loaded, (&tap->linkage == tap->linkage.next && + &tap->linkage == tap->linkage.prev), + tap->lh->node, + lock_mode_name(tap->mode)); + print_coord("\tcoord", tap->coord, 0); +} + +/** check [tap-sane] invariant */ +static int tap_invariant(const tap_t *tap) +{ + /* [tap-sane] invariant */ + + if (tap == NULL) + return 1; + /* tap->mode is one of + * + * {ZNODE_NO_LOCK, ZNODE_READ_LOCK, ZNODE_WRITE_LOCK}, and + */ + if (tap->mode != ZNODE_NO_LOCK && + tap->mode != ZNODE_READ_LOCK && tap->mode != ZNODE_WRITE_LOCK) + return 2; + /* tap->coord != NULL, and */ + if (tap->coord == NULL) + return 3; + /* tap->lh != NULL, and */ + if (tap->lh == NULL) + return 4; + /* tap->loaded > 0 => znode_is_loaded(tap->coord->node), and */ + if (!ergo(tap->loaded, znode_is_loaded(tap->coord->node))) + return 5; + /* tap->coord->node == tap->lh->node if tap->lh->node is not 0 */ + if (tap->lh->node != NULL && tap->coord->node != tap->lh->node) + return 6; + return 0; +} + +/** debugging function: check internal @tap consistency */ +static void tap_check(const tap_t *tap) +{ + int result; + + result = tap_invariant(tap); + if (result != 0) { + print_tap("broken", tap); + reiser4_panic("nikita-2831", "tap broken: %i\n", result); + } +} +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/tap.h b/fs/reiser4/tap.h new file mode 100644 index 000000000000..f777d4d4540a --- /dev/null +++ b/fs/reiser4/tap.h @@ -0,0 +1,70 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* Tree Access Pointers. See tap.c for more details. */ + +#if !defined(__REISER4_TAP_H__) +#define __REISER4_TAP_H__ + +#include "forward.h" +#include "readahead.h" + +/** + tree_access_pointer aka tap. Data structure combining coord_t and lock + handle. + Invariants involving this data-type, see doc/lock-ordering for details: + + [tap-sane] + */ +struct tree_access_pointer { + /* coord tap is at */ + coord_t *coord; + /* lock handle on ->coord->node */ + lock_handle *lh; + /* mode of lock acquired by this tap */ + znode_lock_mode mode; + /* incremented by reiser4_tap_load(). + Decremented by reiser4_tap_relse(). */ + int loaded; + /* list of taps */ + struct list_head linkage; + /* read-ahead hint */ + ra_info_t ra_info; +}; + +typedef int (*go_actor_t) (tap_t *tap); + +extern int reiser4_tap_load(tap_t *tap); +extern void reiser4_tap_relse(tap_t *tap); +extern void reiser4_tap_init(tap_t *tap, coord_t *coord, lock_handle * lh, + znode_lock_mode mode); +extern void reiser4_tap_monitor(tap_t *tap); +extern void reiser4_tap_copy(tap_t *dst, tap_t *src); +extern void reiser4_tap_done(tap_t *tap); +extern int reiser4_tap_move(tap_t *tap, lock_handle * target); +extern int tap_to_coord(tap_t *tap, coord_t *target); + +extern int go_dir_el(tap_t *tap, sideof dir, int units_p); +extern int go_next_unit(tap_t *tap); +extern int go_prev_unit(tap_t *tap); +extern int rewind_right(tap_t *tap, int shift); +extern int rewind_left(tap_t *tap, int shift); + +extern struct list_head *reiser4_taps_list(void); + +#define for_all_taps(tap) \ + for (tap = list_entry(reiser4_taps_list()->next, tap_t, linkage); \ + reiser4_taps_list() != &tap->linkage; \ + tap = list_entry(tap->linkage.next, tap_t, linkage)) + +/* __REISER4_TAP_H__ */ +#endif +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/tree.c b/fs/reiser4/tree.c new file mode 100644 index 000000000000..c8d2e4665f87 --- /dev/null +++ b/fs/reiser4/tree.c @@ -0,0 +1,1884 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* + * KEYS IN A TREE. + * + * The tree consists of nodes located on the disk. Node in the tree is either + * formatted or unformatted. Formatted node is one that has structure + * understood by the tree balancing and traversal code. Formatted nodes are + * further classified into leaf and internal nodes. Latter distinctions is + * (almost) of only historical importance: general structure of leaves and + * internal nodes is the same in Reiser4. Unformatted nodes contain raw data + * that are part of bodies of ordinary files and attributes. + * + * Each node in the tree spawns some interval in the key space. Key ranges for + * all nodes in the tree are disjoint. Actually, this only holds in some weak + * sense, because of the non-unique keys: intersection of key ranges for + * different nodes is either empty, or consists of exactly one key. + * + * Formatted node consists of a sequence of items. Each item spawns some + * interval in key space. Key ranges for all items in a tree are disjoint, + * modulo non-unique keys again. Items within nodes are ordered in the key + * order of the smallest key in a item. + * + * Particular type of item can be further split into units. Unit is piece of + * item that can be cut from item and moved into another item of the same + * time. Units are used by balancing code to repack data during balancing. + * + * Unit can be further split into smaller entities (for example, extent unit + * represents several pages, and it is natural for extent code to operate on + * particular pages and even bytes within one unit), but this is of no + * relevance to the generic balancing and lookup code. + * + * Although item is said to "spawn" range or interval of keys, it is not + * necessary that item contains piece of data addressable by each and every + * key in this range. For example, compound directory item, consisting of + * units corresponding to directory entries and keyed by hashes of file names, + * looks more as having "discrete spectrum": only some disjoint keys inside + * range occupied by this item really address data. + * + * No than less, each item always has well-defined least (minimal) key, that + * is recorded in item header, stored in the node this item is in. Also, item + * plugin can optionally define method ->max_key_inside() returning maximal + * key that can _possibly_ be located within this item. This method is used + * (mainly) to determine when given piece of data should be merged into + * existing item, in stead of creating new one. Because of this, even though + * ->max_key_inside() can be larger that any key actually located in the item, + * intervals + * + * [ reiser4_min_key( item ), ->max_key_inside( item ) ] + * + * are still disjoint for all items within the _same_ node. + * + * In memory node is represented by znode. It plays several roles: + * + * . something locks are taken on + * + * . something tracked by transaction manager (this is going to change) + * + * . something used to access node data + * + * . something used to maintain tree structure in memory: sibling and + * parental linkage. + * + * . something used to organize nodes into "slums" + * + * More on znodes see in znode.[ch] + * + * DELIMITING KEYS + * + * To simplify balancing, allow some flexibility in locking and speed up + * important coord cache optimization, we keep delimiting keys of nodes in + * memory. Depending on disk format (implemented by appropriate node plugin) + * node on disk can record both left and right delimiting key, only one of + * them, or none. Still, our balancing and tree traversal code keep both + * delimiting keys for a node that is in memory stored in the znode. When + * node is first brought into memory during tree traversal, its left + * delimiting key is taken from its parent, and its right delimiting key is + * either next key in its parent, or is right delimiting key of parent if + * node is the rightmost child of parent. + * + * Physical consistency of delimiting key is protected by special dk + * read-write lock. That is, delimiting keys can only be inspected or + * modified under this lock. But dk lock is only sufficient for fast + * "pessimistic" check, because to simplify code and to decrease lock + * contention, balancing (carry) only updates delimiting keys right before + * unlocking all locked nodes on the given tree level. For example, + * coord-by-key cache scans LRU list of recently accessed znodes. For each + * node it first does fast check under dk spin lock. If key looked for is + * not between delimiting keys for this node, next node is inspected and so + * on. If key is inside of the key range, long term lock is taken on node + * and key range is rechecked. + * + * COORDINATES + * + * To find something in the tree, you supply a key, and the key is resolved + * by coord_by_key() into a coord (coordinate) that is valid as long as the + * node the coord points to remains locked. As mentioned above trees + * consist of nodes that consist of items that consist of units. A unit is + * the smallest and indivisible piece of tree as far as balancing and tree + * search are concerned. Each node, item, and unit can be addressed by + * giving its level in the tree and the key occupied by this entity. A node + * knows what the key ranges are of the items within it, and how to find its + * items and invoke their item handlers, but it does not know how to access + * individual units within its items except through the item handlers. + * coord is a structure containing a pointer to the node, the ordinal number + * of the item within this node (a sort of item offset), and the ordinal + * number of the unit within this item. + * + * TREE LOOKUP + * + * There are two types of access to the tree: lookup and modification. + * + * Lookup is a search for the key in the tree. Search can look for either + * exactly the key given to it, or for the largest key that is not greater + * than the key given to it. This distinction is determined by "bias" + * parameter of search routine (coord_by_key()). coord_by_key() either + * returns error (key is not in the tree, or some kind of external error + * occurred), or successfully resolves key into coord. + * + * This resolution is done by traversing tree top-to-bottom from root level + * to the desired level. On levels above twig level (level one above the + * leaf level) nodes consist exclusively of internal items. Internal item is + * nothing more than pointer to the tree node on the child level. On twig + * level nodes consist of internal items intermixed with extent + * items. Internal items form normal search tree structure used by traversal + * to descent through the tree. + * + * TREE LOOKUP OPTIMIZATIONS + * + * Tree lookup described above is expensive even if all nodes traversed are + * already in the memory: for each node binary search within it has to be + * performed and binary searches are CPU consuming and tend to destroy CPU + * caches. + * + * Several optimizations are used to work around this: + * + * . cbk_cache (look-aside cache for tree traversals, see search.c for + * details) + * + * . seals (see seal.[ch]) + * + * . vroot (see search.c) + * + * General search-by-key is layered thusly: + * + * [check seal, if any] --ok--> done + * | + * failed + * | + * V + * [vroot defined] --no--> node = tree_root + * | | + * yes | + * | | + * V | + * node = vroot | + * | | + * | | + * | | + * V V + * [check cbk_cache for key] --ok--> done + * | + * failed + * | + * V + * [start tree traversal from node] + * + */ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "key.h" +#include "coord.h" +#include "plugin/item/static_stat.h" +#include "plugin/item/item.h" +#include "plugin/node/node.h" +#include "plugin/plugin.h" +#include "txnmgr.h" +#include "jnode.h" +#include "znode.h" +#include "block_alloc.h" +#include "tree_walk.h" +#include "carry.h" +#include "carry_ops.h" +#include "tap.h" +#include "tree.h" +#include "vfs_ops.h" +#include "page_cache.h" +#include "super.h" +#include "reiser4.h" +#include "inode.h" + +#include <linux/fs.h> /* for struct super_block */ +#include <linux/spinlock.h> + +/* Disk address (block number) never ever used for any real tree node. This is + used as block number of "uber" znode. + + Invalid block addresses are 0 by tradition. + +*/ +const reiser4_block_nr UBER_TREE_ADDR = 0ull; + +#define CUT_TREE_MIN_ITERATIONS 64 + +static int find_child_by_addr(znode * parent, znode * child, coord_t *result); + +/* return node plugin of coord->node */ +node_plugin *node_plugin_by_coord(const coord_t *coord) +{ + assert("vs-1", coord != NULL); + assert("vs-2", coord->node != NULL); + + return coord->node->nplug; +} + +/* insert item into tree. Fields of @coord are updated so that they can be + * used by consequent insert operation. */ +insert_result insert_by_key(reiser4_tree * tree /* tree to insert new item + * into */ , + const reiser4_key * key /* key of new item */ , + reiser4_item_data * data /* parameters for item + * creation */ , + coord_t *coord /* resulting insertion coord */ , + lock_handle * lh /* resulting lock + * handle */ , + tree_level stop_level /* level where to insert */ , + __u32 flags/* insertion flags */) +{ + int result; + + assert("nikita-358", tree != NULL); + assert("nikita-360", coord != NULL); + + result = coord_by_key(tree, key, coord, lh, ZNODE_WRITE_LOCK, + FIND_EXACT, stop_level, stop_level, + flags | CBK_FOR_INSERT, NULL/*ra_info */); + switch (result) { + default: + break; + case CBK_COORD_FOUND: + result = IBK_ALREADY_EXISTS; + break; + case CBK_COORD_NOTFOUND: + assert("nikita-2017", coord->node != NULL); + result = insert_by_coord(coord, data, key, lh, 0/*flags */); + break; + } + return result; +} + +/* insert item by calling carry. Helper function called if short-cut + insertion failed */ +static insert_result insert_with_carry_by_coord(coord_t *coord, + /* coord where to insert */ + lock_handle * lh, + /* lock handle of insertion node */ + reiser4_item_data * data, + /* parameters of new item */ + const reiser4_key * key, + /* key of new item */ + carry_opcode cop, + /* carry operation to perform */ + cop_insert_flag flags + /* carry flags */ ) +{ + int result; + carry_pool *pool; + carry_level *lowest_level; + carry_insert_data *cdata; + carry_op *op; + + assert("umka-314", coord != NULL); + + /* allocate carry_pool and 3 carry_level-s */ + pool = + init_carry_pool(sizeof(*pool) + 3 * sizeof(*lowest_level) + + sizeof(*cdata)); + if (IS_ERR(pool)) + return PTR_ERR(pool); + lowest_level = (carry_level *) (pool + 1); + init_carry_level(lowest_level, pool); + + op = reiser4_post_carry(lowest_level, cop, coord->node, 0); + if (IS_ERR(op) || (op == NULL)) { + done_carry_pool(pool); + return RETERR(op ? PTR_ERR(op) : -EIO); + } + cdata = (carry_insert_data *) (lowest_level + 3); + cdata->coord = coord; + cdata->data = data; + cdata->key = key; + op->u.insert.d = cdata; + if (flags == 0) + flags = znode_get_tree(coord->node)->carry.insert_flags; + op->u.insert.flags = flags; + op->u.insert.type = COPT_ITEM_DATA; + op->u.insert.child = NULL; + if (lh != NULL) { + assert("nikita-3245", lh->node == coord->node); + lowest_level->track_type = CARRY_TRACK_CHANGE; + lowest_level->tracked = lh; + } + + result = reiser4_carry(lowest_level, NULL); + done_carry_pool(pool); + + return result; +} + +/* form carry queue to perform paste of @data with @key at @coord, and launch + its execution by calling carry(). + + Instruct carry to update @lh it after balancing insertion coord moves into + different block. + +*/ +static int paste_with_carry(coord_t *coord, /* coord of paste */ + lock_handle * lh, /* lock handle of node + * where item is + * pasted */ + reiser4_item_data * data, /* parameters of new + * item */ + const reiser4_key * key, /* key of new item */ + unsigned flags/* paste flags */) +{ + int result; + carry_pool *pool; + carry_level *lowest_level; + carry_insert_data *cdata; + carry_op *op; + + assert("umka-315", coord != NULL); + assert("umka-316", key != NULL); + + pool = + init_carry_pool(sizeof(*pool) + 3 * sizeof(*lowest_level) + + sizeof(*cdata)); + if (IS_ERR(pool)) + return PTR_ERR(pool); + lowest_level = (carry_level *) (pool + 1); + init_carry_level(lowest_level, pool); + + op = reiser4_post_carry(lowest_level, COP_PASTE, coord->node, 0); + if (IS_ERR(op) || (op == NULL)) { + done_carry_pool(pool); + return RETERR(op ? PTR_ERR(op) : -EIO); + } + cdata = (carry_insert_data *) (lowest_level + 3); + cdata->coord = coord; + cdata->data = data; + cdata->key = key; + op->u.paste.d = cdata; + if (flags == 0) + flags = znode_get_tree(coord->node)->carry.paste_flags; + op->u.paste.flags = flags; + op->u.paste.type = COPT_ITEM_DATA; + if (lh != NULL) { + lowest_level->track_type = CARRY_TRACK_CHANGE; + lowest_level->tracked = lh; + } + + result = reiser4_carry(lowest_level, NULL); + done_carry_pool(pool); + + return result; +} + +/* insert item at the given coord. + + First try to skip carry by directly calling ->create_item() method of node + plugin. If this is impossible (there is not enough free space in the node, + or leftmost item in the node is created), call insert_with_carry_by_coord() + that will do full carry(). + +*/ +insert_result insert_by_coord(coord_t *coord /* coord where to + * insert. coord->node has + * to be write locked by + * caller */ , + reiser4_item_data * data /* data to be + * inserted */ , + const reiser4_key * key /* key of new item */ , + lock_handle * lh /* lock handle of write + * lock on node */ , + __u32 flags/* insertion flags */) +{ + unsigned item_size; + int result; + znode *node; + + assert("vs-247", coord != NULL); + assert("vs-248", data != NULL); + assert("vs-249", data->length >= 0); + assert("nikita-1191", znode_is_write_locked(coord->node)); + + node = coord->node; + coord_clear_iplug(coord); + result = zload(node); + if (result != 0) + return result; + + item_size = space_needed(node, NULL, data, 1); + if (item_size > znode_free_space(node) && + (flags & COPI_DONT_SHIFT_LEFT) && (flags & COPI_DONT_SHIFT_RIGHT) + && (flags & COPI_DONT_ALLOCATE)) { + /* we are forced to use free space of coord->node and new item + does not fit into it. + + Currently we get here only when we allocate and copy units + of extent item from a node to its left neighbor during + "squalloc"-ing. If @node (this is left neighbor) does not + have enough free space - we do not want to attempt any + shifting and allocations because we are in squeezing and + everything to the left of @node is tightly packed. + */ + result = -E_NODE_FULL; + } else if ((item_size <= znode_free_space(node)) && + !coord_is_before_leftmost(coord) && + (node_plugin_by_node(node)->fast_insert != NULL) + && node_plugin_by_node(node)->fast_insert(coord)) { + /* shortcut insertion without carry() overhead. + + Only possible if: + + - there is enough free space + + - insertion is not into the leftmost position in a node + (otherwise it would require updating of delimiting key in a + parent) + + - node plugin agrees with this + + */ + result = + node_plugin_by_node(node)->create_item(coord, key, data, + NULL); + znode_make_dirty(node); + } else { + /* otherwise do full-fledged carry(). */ + result = + insert_with_carry_by_coord(coord, lh, data, key, COP_INSERT, + flags); + } + zrelse(node); + return result; +} + +/* @coord is set to leaf level and @data is to be inserted to twig level */ +insert_result +insert_extent_by_coord(coord_t *coord, /* coord where to insert. + * coord->node has to be write + * locked by caller */ + reiser4_item_data *data,/* data to be inserted */ + const reiser4_key *key, /* key of new item */ + lock_handle *lh /* lock handle of write lock + on node */) +{ + assert("vs-405", coord != NULL); + assert("vs-406", data != NULL); + assert("vs-407", data->length > 0); + assert("vs-408", znode_is_write_locked(coord->node)); + assert("vs-409", znode_get_level(coord->node) == LEAF_LEVEL); + + return insert_with_carry_by_coord(coord, lh, data, key, COP_EXTENT, + 0 /*flags */ ); +} + +/* Insert into the item at the given coord. + + First try to skip carry by directly calling ->paste() method of item + plugin. If this is impossible (there is not enough free space in the node, + or we are pasting into leftmost position in the node), call + paste_with_carry() that will do full carry(). + +*/ +/* paste_into_item */ +int insert_into_item(coord_t * coord /* coord of pasting */ , + lock_handle * lh /* lock handle on node involved */ , + const reiser4_key * key /* key of unit being pasted */ , + reiser4_item_data * data /* parameters for new unit */ , + unsigned flags /* insert/paste flags */ ) +{ + int result; + int size_change; + node_plugin *nplug; + item_plugin *iplug; + + assert("umka-317", coord != NULL); + assert("umka-318", key != NULL); + + iplug = item_plugin_by_coord(coord); + nplug = node_plugin_by_coord(coord); + + assert("nikita-1480", iplug == data->iplug); + + size_change = space_needed(coord->node, coord, data, 0); + if (size_change > (int)znode_free_space(coord->node) && + (flags & COPI_DONT_SHIFT_LEFT) && (flags & COPI_DONT_SHIFT_RIGHT) + && (flags & COPI_DONT_ALLOCATE)) { + /* we are forced to use free space of coord->node and new data + does not fit into it. */ + return -E_NODE_FULL; + } + + /* shortcut paste without carry() overhead. + + Only possible if: + + - there is enough free space + + - paste is not into the leftmost unit in a node (otherwise + it would require updating of delimiting key in a parent) + + - node plugin agrees with this + + - item plugin agrees with us + */ + if (size_change <= (int)znode_free_space(coord->node) && + (coord->item_pos != 0 || + coord->unit_pos != 0 || coord->between == AFTER_UNIT) && + coord->unit_pos != 0 && nplug->fast_paste != NULL && + nplug->fast_paste(coord) && + iplug->b.fast_paste != NULL && iplug->b.fast_paste(coord)) { + if (size_change > 0) + nplug->change_item_size(coord, size_change); + /* NOTE-NIKITA: huh? where @key is used? */ + result = iplug->b.paste(coord, data, NULL); + if (size_change < 0) + nplug->change_item_size(coord, size_change); + znode_make_dirty(coord->node); + } else + /* otherwise do full-fledged carry(). */ + result = paste_with_carry(coord, lh, data, key, flags); + return result; +} + +/* this either appends or truncates item @coord */ +int reiser4_resize_item(coord_t * coord /* coord of item being resized */ , + reiser4_item_data * data /* parameters of resize */ , + reiser4_key * key /* key of new unit */ , + lock_handle * lh /* lock handle of node + * being modified */ , + cop_insert_flag flags /* carry flags */ ) +{ + int result; + znode *node; + + assert("nikita-362", coord != NULL); + assert("nikita-363", data != NULL); + assert("vs-245", data->length != 0); + + node = coord->node; + coord_clear_iplug(coord); + result = zload(node); + if (result != 0) + return result; + + if (data->length < 0) + result = node_plugin_by_coord(coord)->shrink_item(coord, + -data->length); + else + result = insert_into_item(coord, lh, key, data, flags); + + zrelse(node); + return result; +} + +/* insert flow @f */ +int reiser4_insert_flow(coord_t * coord, lock_handle * lh, flow_t * f) +{ + int result; + carry_pool *pool; + carry_level *lowest_level; + reiser4_item_data *data; + carry_op *op; + + pool = + init_carry_pool(sizeof(*pool) + 3 * sizeof(*lowest_level) + + sizeof(*data)); + if (IS_ERR(pool)) + return PTR_ERR(pool); + lowest_level = (carry_level *) (pool + 1); + init_carry_level(lowest_level, pool); + + op = reiser4_post_carry(lowest_level, COP_INSERT_FLOW, coord->node, + 0 /* operate directly on coord -> node */ ); + if (IS_ERR(op) || (op == NULL)) { + done_carry_pool(pool); + return RETERR(op ? PTR_ERR(op) : -EIO); + } + + /* these are permanent during insert_flow */ + data = (reiser4_item_data *) (lowest_level + 3); + data->user = 1; + data->iplug = item_plugin_by_id(FORMATTING_ID); + data->arg = NULL; + /* data.length and data.data will be set before calling paste or + insert */ + data->length = 0; + data->data = NULL; + + op->u.insert_flow.flags = 0; + op->u.insert_flow.insert_point = coord; + op->u.insert_flow.flow = f; + op->u.insert_flow.data = data; + op->u.insert_flow.new_nodes = 0; + + lowest_level->track_type = CARRY_TRACK_CHANGE; + lowest_level->tracked = lh; + + result = reiser4_carry(lowest_level, NULL); + done_carry_pool(pool); + + return result; +} + +/* Given a coord in parent node, obtain a znode for the corresponding child */ +znode *child_znode(const coord_t * parent_coord /* coord of pointer to + * child */ , + znode * parent /* parent of child */ , + int incore_p /* if !0 only return child if already in + * memory */ , + int setup_dkeys_p /* if !0 update delimiting keys of + * child */ ) +{ + znode *child; + + assert("nikita-1374", parent_coord != NULL); + assert("nikita-1482", parent != NULL); +#if REISER4_DEBUG + if (setup_dkeys_p) + assert_rw_not_locked(&(znode_get_tree(parent)->dk_lock)); +#endif + assert("nikita-2947", znode_is_any_locked(parent)); + + if (znode_get_level(parent) <= LEAF_LEVEL) { + /* trying to get child of leaf node */ + warning("nikita-1217", "Child of maize?"); + return ERR_PTR(RETERR(-EIO)); + } + if (item_is_internal(parent_coord)) { + reiser4_block_nr addr; + item_plugin *iplug; + reiser4_tree *tree; + + iplug = item_plugin_by_coord(parent_coord); + assert("vs-512", iplug->s.internal.down_link); + iplug->s.internal.down_link(parent_coord, NULL, &addr); + + tree = znode_get_tree(parent); + if (incore_p) + child = zlook(tree, &addr); + else + child = + zget(tree, &addr, parent, + znode_get_level(parent) - 1, + reiser4_ctx_gfp_mask_get()); + if ((child != NULL) && !IS_ERR(child) && setup_dkeys_p) + set_child_delimiting_keys(parent, parent_coord, child); + } else { + warning("nikita-1483", "Internal item expected"); + child = ERR_PTR(RETERR(-EIO)); + } + return child; +} + +/* remove znode from transaction */ +static void uncapture_znode(znode * node) +{ + struct page *page; + + assert("zam-1001", ZF_ISSET(node, JNODE_HEARD_BANSHEE)); + + if (!reiser4_blocknr_is_fake(znode_get_block(node))) { + int ret; + + /* An already allocated block goes right to the atom's delete set. */ + ret = + reiser4_dealloc_block(znode_get_block(node), 0, + BA_DEFER | BA_FORMATTED); + if (ret) + warning("zam-942", + "can\'t add a block (%llu) number to atom's delete set\n", + (unsigned long long)(*znode_get_block(node))); + + spin_lock_znode(node); + /* Here we return flush reserved block which was reserved at the + * moment when this allocated node was marked dirty and still + * not used by flush in node relocation procedure. */ + if (ZF_ISSET(node, JNODE_FLUSH_RESERVED)) { + txn_atom *atom; + + atom = jnode_get_atom(ZJNODE(node)); + assert("zam-939", atom != NULL); + spin_unlock_znode(node); + flush_reserved2grabbed(atom, (__u64) 1); + spin_unlock_atom(atom); + } else + spin_unlock_znode(node); + } else { + /* znode has assigned block which is counted as "fake + allocated". Return it back to "free blocks") */ + fake_allocated2free((__u64) 1, BA_FORMATTED); + } + + /* + * uncapture page from transaction. There is a possibility of a race + * with ->releasepage(): reiser4_releasepage() detaches page from this + * jnode and we have nothing to uncapture. To avoid this, get + * reference of node->pg under jnode spin lock. reiser4_uncapture_page() + * will deal with released page itself. + */ + spin_lock_znode(node); + page = znode_page(node); + if (likely(page != NULL)) { + /* + * reiser4_uncapture_page() can only be called when we are sure + * that znode is pinned in memory, which we are, because + * forget_znode() is only called from longterm_unlock_znode(). + */ + get_page(page); + spin_unlock_znode(node); + lock_page(page); + reiser4_uncapture_page(page); + unlock_page(page); + put_page(page); + } else { + txn_atom *atom; + + /* handle "flush queued" znodes */ + while (1) { + atom = jnode_get_atom(ZJNODE(node)); + assert("zam-943", atom != NULL); + + if (!ZF_ISSET(node, JNODE_FLUSH_QUEUED) + || !atom->nr_running_queues) + break; + + spin_unlock_znode(node); + reiser4_atom_wait_event(atom); + spin_lock_znode(node); + } + + reiser4_uncapture_block(ZJNODE(node)); + spin_unlock_atom(atom); + zput(node); + } +} + +/* This is called from longterm_unlock_znode() when last lock is released from + the node that has been removed from the tree. At this point node is removed + from sibling list and its lock is invalidated. */ +void forget_znode(lock_handle * handle) +{ + znode *node; + reiser4_tree *tree; + + assert("umka-319", handle != NULL); + + node = handle->node; + tree = znode_get_tree(node); + + assert("vs-164", znode_is_write_locked(node)); + assert("nikita-1280", ZF_ISSET(node, JNODE_HEARD_BANSHEE)); + assert_rw_locked(&(node->lock.guard)); + + /* We assume that this node was detached from its parent before + * unlocking, it gives no way to reach this node from parent through a + * down link. The node should have no children and, thereby, can't be + * reached from them by their parent pointers. The only way to obtain a + * reference to the node is to use sibling pointers from its left and + * right neighbors. In the next several lines we remove the node from + * the sibling list. */ + + write_lock_tree(tree); + sibling_list_remove(node); + znode_remove(node, tree); + write_unlock_tree(tree); + + /* Here we set JNODE_DYING and cancel all pending lock requests. It + * forces all lock requestor threads to repeat iterations of getting + * lock on a child, neighbor or parent node. But, those threads can't + * come to this node again, because this node is no longer a child, + * neighbor or parent of any other node. This order of znode + * invalidation does not allow other threads to waste cpu time is a busy + * loop, trying to lock dying object. The exception is in the flush + * code when we take node directly from atom's capture list.*/ + reiser4_invalidate_lock(handle); + uncapture_znode(node); +} + +/* Check that internal item at @pointer really contains pointer to @child. */ +int check_tree_pointer(const coord_t * pointer /* would-be pointer to + * @child */ , + const znode * child /* child znode */ ) +{ + assert("nikita-1016", pointer != NULL); + assert("nikita-1017", child != NULL); + assert("nikita-1018", pointer->node != NULL); + + assert("nikita-1325", znode_is_any_locked(pointer->node)); + + assert("nikita-2985", + znode_get_level(pointer->node) == znode_get_level(child) + 1); + + coord_clear_iplug((coord_t *) pointer); + + if (coord_is_existing_unit(pointer)) { + item_plugin *iplug; + reiser4_block_nr addr; + + if (item_is_internal(pointer)) { + iplug = item_plugin_by_coord(pointer); + assert("vs-513", iplug->s.internal.down_link); + iplug->s.internal.down_link(pointer, NULL, &addr); + /* check that cached value is correct */ + if (disk_addr_eq(&addr, znode_get_block(child))) { + return NS_FOUND; + } + } + } + /* warning ("jmacd-1002", "tree pointer incorrect"); */ + return NS_NOT_FOUND; +} + +/* find coord of pointer to new @child in @parent. + + Find the &coord_t in the @parent where pointer to a given @child will + be in. + +*/ +int find_new_child_ptr(znode * parent /* parent znode, passed locked */ , + znode * + child UNUSED_ARG /* child znode, passed locked */ , + znode * left /* left brother of new node */ , + coord_t * result /* where result is stored in */ ) +{ + int ret; + + assert("nikita-1486", parent != NULL); + assert("nikita-1487", child != NULL); + assert("nikita-1488", result != NULL); + + ret = find_child_ptr(parent, left, result); + if (ret != NS_FOUND) { + warning("nikita-1489", "Cannot find brother position: %i", ret); + return RETERR(-EIO); + } else { + result->between = AFTER_UNIT; + return RETERR(NS_NOT_FOUND); + } +} + +/* find coord of pointer to @child in @parent. + + Find the &coord_t in the @parent where pointer to a given @child is in. + +*/ +int find_child_ptr(znode * parent /* parent znode, passed locked */ , + znode * child /* child znode, passed locked */ , + coord_t * result /* where result is stored in */ ) +{ + int lookup_res; + node_plugin *nplug; + /* left delimiting key of a child */ + reiser4_key ld; + reiser4_tree *tree; + + assert("nikita-934", parent != NULL); + assert("nikita-935", child != NULL); + assert("nikita-936", result != NULL); + assert("zam-356", znode_is_loaded(parent)); + + coord_init_zero(result); + result->node = parent; + + nplug = parent->nplug; + assert("nikita-939", nplug != NULL); + + tree = znode_get_tree(parent); + /* NOTE-NIKITA taking read-lock on tree here assumes that @result is + * not aliased to ->in_parent of some znode. Otherwise, + * parent_coord_to_coord() below would modify data protected by tree + * lock. */ + read_lock_tree(tree); + /* fast path. Try to use cached value. Lock tree to keep + node->pos_in_parent and pos->*_blocknr consistent. */ + if (child->in_parent.item_pos + 1 != 0) { + parent_coord_to_coord(&child->in_parent, result); + if (check_tree_pointer(result, child) == NS_FOUND) { + read_unlock_tree(tree); + return NS_FOUND; + } + + child->in_parent.item_pos = (unsigned short)~0; + } + read_unlock_tree(tree); + + /* is above failed, find some key from @child. We are looking for the + least key in a child. */ + read_lock_dk(tree); + ld = *znode_get_ld_key(child); + read_unlock_dk(tree); + /* + * now, lookup parent with key just found. Note, that left delimiting + * key doesn't identify node uniquely, because (in extremely rare + * case) two nodes can have equal left delimiting keys, if one of them + * is completely filled with directory entries that all happened to be + * hash collision. But, we check block number in check_tree_pointer() + * and, so, are safe. + */ + lookup_res = nplug->lookup(parent, &ld, FIND_EXACT, result); + /* update cached pos_in_node */ + if (lookup_res == NS_FOUND) { + write_lock_tree(tree); + coord_to_parent_coord(result, &child->in_parent); + write_unlock_tree(tree); + lookup_res = check_tree_pointer(result, child); + } + if (lookup_res == NS_NOT_FOUND) + lookup_res = find_child_by_addr(parent, child, result); + return lookup_res; +} + +/* find coord of pointer to @child in @parent by scanning + + Find the &coord_t in the @parent where pointer to a given @child + is in by scanning all internal items in @parent and comparing block + numbers in them with that of @child. + +*/ +static int find_child_by_addr(znode * parent /* parent znode, passed locked */ , + znode * child /* child znode, passed locked */ , + coord_t * result /* where result is stored in */ ) +{ + int ret; + + assert("nikita-1320", parent != NULL); + assert("nikita-1321", child != NULL); + assert("nikita-1322", result != NULL); + + ret = NS_NOT_FOUND; + + for_all_units(result, parent) { + if (check_tree_pointer(result, child) == NS_FOUND) { + write_lock_tree(znode_get_tree(parent)); + coord_to_parent_coord(result, &child->in_parent); + write_unlock_tree(znode_get_tree(parent)); + ret = NS_FOUND; + break; + } + } + return ret; +} + +/* true, if @addr is "unallocated block number", which is just address, with + highest bit set. */ +int is_disk_addr_unallocated(const reiser4_block_nr * addr /* address to + * check */ ) +{ + assert("nikita-1766", addr != NULL); + cassert(sizeof(reiser4_block_nr) == 8); + return (*addr & REISER4_BLOCKNR_STATUS_BIT_MASK) == + REISER4_UNALLOCATED_STATUS_VALUE; +} + +/* returns true if removing bytes of given range of key [from_key, to_key] + causes removing of whole item @from */ +static int +item_removed_completely(coord_t * from, const reiser4_key * from_key, + const reiser4_key * to_key) +{ + item_plugin *iplug; + reiser4_key key_in_item; + + assert("umka-325", from != NULL); + assert("", item_is_extent(from)); + + /* check first key just for case */ + item_key_by_coord(from, &key_in_item); + if (keygt(from_key, &key_in_item)) + return 0; + + /* check last key */ + iplug = item_plugin_by_coord(from); + assert("vs-611", iplug && iplug->s.file.append_key); + + iplug->s.file.append_key(from, &key_in_item); + set_key_offset(&key_in_item, get_key_offset(&key_in_item) - 1); + + if (keylt(to_key, &key_in_item)) + /* last byte is not removed */ + return 0; + return 1; +} + +/* helper function for prepare_twig_kill(): @left and @right are formatted + * neighbors of extent item being completely removed. Load and lock neighbors + * and store lock handles into @cdata for later use by kill_hook_extent() */ +static int +prepare_children(znode * left, znode * right, carry_kill_data * kdata) +{ + int result; + int left_loaded; + int right_loaded; + + result = 0; + left_loaded = right_loaded = 0; + + if (left != NULL) { + result = zload(left); + if (result == 0) { + left_loaded = 1; + result = longterm_lock_znode(kdata->left, left, + ZNODE_READ_LOCK, + ZNODE_LOCK_LOPRI); + } + } + if (result == 0 && right != NULL) { + result = zload(right); + if (result == 0) { + right_loaded = 1; + result = longterm_lock_znode(kdata->right, right, + ZNODE_READ_LOCK, + ZNODE_LOCK_HIPRI | + ZNODE_LOCK_NONBLOCK); + } + } + if (result != 0) { + done_lh(kdata->left); + done_lh(kdata->right); + if (left_loaded != 0) + zrelse(left); + if (right_loaded != 0) + zrelse(right); + } + return result; +} + +static void done_children(carry_kill_data * kdata) +{ + if (kdata->left != NULL && kdata->left->node != NULL) { + zrelse(kdata->left->node); + done_lh(kdata->left); + } + if (kdata->right != NULL && kdata->right->node != NULL) { + zrelse(kdata->right->node); + done_lh(kdata->right); + } +} + +/* part of cut_node. It is called when cut_node is called to remove or cut part + of extent item. When head of that item is removed - we have to update right + delimiting of left neighbor of extent. When item is removed completely - we + have to set sibling link between left and right neighbor of removed + extent. This may return -E_DEADLOCK because of trying to get left neighbor + locked. So, caller should repeat an attempt +*/ +/* Audited by: umka (2002.06.16) */ +static int +prepare_twig_kill(carry_kill_data * kdata, znode * locked_left_neighbor) +{ + int result; + reiser4_key key; + lock_handle left_lh; + lock_handle right_lh; + coord_t left_coord; + coord_t *from; + znode *left_child; + znode *right_child; + reiser4_tree *tree; + int left_zloaded_here, right_zloaded_here; + + from = kdata->params.from; + assert("umka-326", from != NULL); + assert("umka-327", kdata->params.to != NULL); + + /* for one extent item only yet */ + assert("vs-591", item_is_extent(from)); + assert("vs-592", from->item_pos == kdata->params.to->item_pos); + + if ((kdata->params.from_key + && keygt(kdata->params.from_key, item_key_by_coord(from, &key))) + || from->unit_pos != 0) { + /* head of item @from is not removed, there is nothing to + worry about */ + return 0; + } + + result = 0; + left_zloaded_here = 0; + right_zloaded_here = 0; + + left_child = right_child = NULL; + + coord_dup(&left_coord, from); + init_lh(&left_lh); + init_lh(&right_lh); + if (coord_prev_unit(&left_coord)) { + /* @from is leftmost item in its node */ + if (!locked_left_neighbor) { + result = + reiser4_get_left_neighbor(&left_lh, from->node, + ZNODE_READ_LOCK, + GN_CAN_USE_UPPER_LEVELS); + switch (result) { + case 0: + break; + case -E_NO_NEIGHBOR: + /* there is no formatted node to the left of + from->node */ + warning("vs-605", + "extent item has smallest key in " + "the tree and it is about to be removed"); + return 0; + case -E_DEADLOCK: + /* need to restart */ + default: + return result; + } + + /* we have acquired left neighbor of from->node */ + result = zload(left_lh.node); + if (result) + goto done; + + locked_left_neighbor = left_lh.node; + } else { + /* squalloc_right_twig_cut should have supplied locked + * left neighbor */ + assert("vs-834", + znode_is_write_locked(locked_left_neighbor)); + result = zload(locked_left_neighbor); + if (result) + return result; + } + + left_zloaded_here = 1; + coord_init_last_unit(&left_coord, locked_left_neighbor); + } + + if (!item_is_internal(&left_coord)) { + /* what else but extent can be on twig level */ + assert("vs-606", item_is_extent(&left_coord)); + + /* there is no left formatted child */ + if (left_zloaded_here) + zrelse(locked_left_neighbor); + done_lh(&left_lh); + return 0; + } + + tree = znode_get_tree(left_coord.node); + left_child = child_znode(&left_coord, left_coord.node, 1, 0); + + if (IS_ERR(left_child)) { + result = PTR_ERR(left_child); + goto done; + } + + /* left child is acquired, calculate new right delimiting key for it + and get right child if it is necessary */ + if (item_removed_completely + (from, kdata->params.from_key, kdata->params.to_key)) { + /* try to get right child of removed item */ + coord_t right_coord; + + assert("vs-607", + kdata->params.to->unit_pos == + coord_last_unit_pos(kdata->params.to)); + coord_dup(&right_coord, kdata->params.to); + if (coord_next_unit(&right_coord)) { + /* @to is rightmost unit in the node */ + result = + reiser4_get_right_neighbor(&right_lh, from->node, + ZNODE_READ_LOCK, + GN_CAN_USE_UPPER_LEVELS); + switch (result) { + case 0: + result = zload(right_lh.node); + if (result) + goto done; + + right_zloaded_here = 1; + coord_init_first_unit(&right_coord, + right_lh.node); + item_key_by_coord(&right_coord, &key); + break; + + case -E_NO_NEIGHBOR: + /* there is no formatted node to the right of + from->node */ + read_lock_dk(tree); + key = *znode_get_rd_key(from->node); + read_unlock_dk(tree); + right_coord.node = NULL; + result = 0; + break; + default: + /* real error */ + goto done; + } + } else { + /* there is an item to the right of @from - take its key */ + item_key_by_coord(&right_coord, &key); + } + + /* try to get right child of @from */ + if (right_coord.node && /* there is right neighbor of @from */ + item_is_internal(&right_coord)) { /* it is internal item */ + right_child = child_znode(&right_coord, + right_coord.node, 1, 0); + + if (IS_ERR(right_child)) { + result = PTR_ERR(right_child); + goto done; + } + + } + /* whole extent is removed between znodes left_child and right_child. Prepare them for linking and + update of right delimiting key of left_child */ + result = prepare_children(left_child, right_child, kdata); + } else { + /* head of item @to is removed. left_child has to get right delimting key update. Prepare it for that */ + result = prepare_children(left_child, NULL, kdata); + } + + done: + if (right_child) + zput(right_child); + if (right_zloaded_here) + zrelse(right_lh.node); + done_lh(&right_lh); + + if (left_child) + zput(left_child); + if (left_zloaded_here) + zrelse(locked_left_neighbor); + done_lh(&left_lh); + return result; +} + +/* this is used to remove part of node content between coordinates @from and @to. Units to which @from and @to are set + are to be cut completely */ +/* for try_to_merge_with_left, delete_copied, reiser4_delete_node */ +int cut_node_content(coord_t * from, coord_t * to, const reiser4_key * from_key, /* first key to be removed */ + const reiser4_key * to_key, /* last key to be removed */ + reiser4_key * + smallest_removed /* smallest key actually removed */ ) +{ + int result; + carry_pool *pool; + carry_level *lowest_level; + carry_cut_data *cut_data; + carry_op *op; + + assert("vs-1715", coord_compare(from, to) != COORD_CMP_ON_RIGHT); + + pool = + init_carry_pool(sizeof(*pool) + 3 * sizeof(*lowest_level) + + sizeof(*cut_data)); + if (IS_ERR(pool)) + return PTR_ERR(pool); + lowest_level = (carry_level *) (pool + 1); + init_carry_level(lowest_level, pool); + + op = reiser4_post_carry(lowest_level, COP_CUT, from->node, 0); + assert("vs-1509", op != 0); + if (IS_ERR(op)) { + done_carry_pool(pool); + return PTR_ERR(op); + } + + cut_data = (carry_cut_data *) (lowest_level + 3); + cut_data->params.from = from; + cut_data->params.to = to; + cut_data->params.from_key = from_key; + cut_data->params.to_key = to_key; + cut_data->params.smallest_removed = smallest_removed; + + op->u.cut_or_kill.is_cut = 1; + op->u.cut_or_kill.u.cut = cut_data; + + result = reiser4_carry(lowest_level, NULL); + done_carry_pool(pool); + + return result; +} + +/* cut part of the node + + Cut part or whole content of node. + + cut data between @from and @to of @from->node and call carry() to make + corresponding changes in the tree. @from->node may become empty. If so - + pointer to it will be removed. Neighboring nodes are not changed. Smallest + removed key is stored in @smallest_removed + +*/ +int kill_node_content(coord_t * from, /* coord of the first unit/item that will be eliminated */ + coord_t * to, /* coord of the last unit/item that will be eliminated */ + const reiser4_key * from_key, /* first key to be removed */ + const reiser4_key * to_key, /* last key to be removed */ + reiser4_key * smallest_removed, /* smallest key actually removed */ + znode * locked_left_neighbor, /* this is set when kill_node_content is called with left neighbor + * locked (in squalloc_right_twig_cut, namely) */ + struct inode *inode, /* inode of file whose item (or its part) is to be killed. This is necessary to + invalidate pages together with item pointing to them */ + int truncate) +{ /* this call is made for file truncate) */ + int result; + carry_pool *pool; + carry_level *lowest_level; + carry_kill_data *kdata; + lock_handle *left_child; + lock_handle *right_child; + carry_op *op; + + assert("umka-328", from != NULL); + assert("vs-316", !node_is_empty(from->node)); + assert("nikita-1812", coord_is_existing_unit(from) + && coord_is_existing_unit(to)); + + /* allocate carry_pool, 3 carry_level-s, carry_kill_data and structures for kill_hook_extent */ + pool = init_carry_pool(sizeof(*pool) + 3 * sizeof(*lowest_level) + + sizeof(carry_kill_data) + + 2 * sizeof(lock_handle) + + 5 * sizeof(reiser4_key) + 2 * sizeof(coord_t)); + if (IS_ERR(pool)) + return PTR_ERR(pool); + + lowest_level = (carry_level *) (pool + 1); + init_carry_level(lowest_level, pool); + + kdata = (carry_kill_data *) (lowest_level + 3); + left_child = (lock_handle *) (kdata + 1); + right_child = left_child + 1; + + init_lh(left_child); + init_lh(right_child); + + kdata->params.from = from; + kdata->params.to = to; + kdata->params.from_key = from_key; + kdata->params.to_key = to_key; + kdata->params.smallest_removed = smallest_removed; + kdata->params.truncate = truncate; + kdata->flags = 0; + kdata->inode = inode; + kdata->left = left_child; + kdata->right = right_child; + /* memory for 5 reiser4_key and 2 coord_t will be used in kill_hook_extent */ + kdata->buf = (char *)(right_child + 1); + + if (znode_get_level(from->node) == TWIG_LEVEL && item_is_extent(from)) { + /* left child of extent item may have to get updated right + delimiting key and to get linked with right child of extent + @from if it will be removed completely */ + result = prepare_twig_kill(kdata, locked_left_neighbor); + if (result) { + done_children(kdata); + done_carry_pool(pool); + return result; + } + } + + op = reiser4_post_carry(lowest_level, COP_CUT, from->node, 0); + if (IS_ERR(op) || (op == NULL)) { + done_children(kdata); + done_carry_pool(pool); + return RETERR(op ? PTR_ERR(op) : -EIO); + } + + op->u.cut_or_kill.is_cut = 0; + op->u.cut_or_kill.u.kill = kdata; + + result = reiser4_carry(lowest_level, NULL); + + done_children(kdata); + done_carry_pool(pool); + return result; +} + +void +fake_kill_hook_tail(struct inode *inode, loff_t start, loff_t end, int truncate) +{ + if (reiser4_inode_get_flag(inode, REISER4_HAS_MMAP)) { + pgoff_t start_pg, end_pg; + + start_pg = start >> PAGE_SHIFT; + end_pg = (end - 1) >> PAGE_SHIFT; + + if ((start & (PAGE_SIZE - 1)) == 0) { + /* + * kill up to the page boundary. + */ + assert("vs-123456", start_pg == end_pg); + reiser4_invalidate_pages(inode->i_mapping, start_pg, 1, + truncate); + } else if (start_pg != end_pg) { + /* + * page boundary is within killed portion of node. + */ + assert("vs-654321", end_pg - start_pg == 1); + reiser4_invalidate_pages(inode->i_mapping, end_pg, + end_pg - start_pg, 1); + } + } + inode_sub_bytes(inode, end - start); +} + +/** + * Delete whole @node from the reiser4 tree without loading it. + * + * @left: locked left neighbor, + * @node: node to be deleted, + * @smallest_removed: leftmost key of deleted node, + * @object: inode pointer, if we truncate a file body. + * @truncate: true if called for file truncate. + * + * @return: 0 if success, error code otherwise. + * + * NOTE: if @object!=NULL we assume that @smallest_removed != NULL and it + * contains the right value of the smallest removed key from the previous + * cut_worker() iteration. This is needed for proper accounting of + * "i_blocks" and "i_bytes" fields of the @object. + */ +int reiser4_delete_node(znode * node, reiser4_key * smallest_removed, + struct inode *object, int truncate) +{ + lock_handle parent_lock; + coord_t cut_from; + coord_t cut_to; + reiser4_tree *tree; + int ret; + + assert("zam-937", node != NULL); + assert("zam-933", znode_is_write_locked(node)); + assert("zam-999", smallest_removed != NULL); + + init_lh(&parent_lock); + + ret = reiser4_get_parent(&parent_lock, node, ZNODE_WRITE_LOCK); + if (ret) + return ret; + + assert("zam-934", !znode_above_root(parent_lock.node)); + + ret = zload(parent_lock.node); + if (ret) + goto failed_nozrelse; + + ret = find_child_ptr(parent_lock.node, node, &cut_from); + if (ret) + goto failed; + + /* decrement child counter and set parent pointer to NULL before + deleting the list from parent node because of checks in + internal_kill_item_hook (we can delete the last item from the parent + node, the parent node is going to be deleted and its c_count should + be zero). */ + + tree = znode_get_tree(node); + write_lock_tree(tree); + init_parent_coord(&node->in_parent, NULL); + --parent_lock.node->c_count; + write_unlock_tree(tree); + + assert("zam-989", item_is_internal(&cut_from)); + + /* @node should be deleted after unlocking. */ + ZF_SET(node, JNODE_HEARD_BANSHEE); + + /* remove a pointer from the parent node to the node being deleted. */ + coord_dup(&cut_to, &cut_from); + /* FIXME: shouldn't this be kill_node_content */ + ret = cut_node_content(&cut_from, &cut_to, NULL, NULL, NULL); + if (ret) + /* FIXME(Zam): Should we re-connect the node to its parent if + * cut_node fails? */ + goto failed; + + { + reiser4_tree *tree = current_tree; + __u64 start_offset = 0, end_offset = 0; + + read_lock_tree(tree); + write_lock_dk(tree); + if (object) { + /* We use @smallest_removed and the left delimiting of + * the current node for @object->i_blocks, i_bytes + * calculation. We assume that the items after the + * *@smallest_removed key have been deleted from the + * file body. */ + start_offset = get_key_offset(znode_get_ld_key(node)); + end_offset = get_key_offset(smallest_removed); + } + + assert("zam-1021", znode_is_connected(node)); + if (node->left) + znode_set_rd_key(node->left, znode_get_rd_key(node)); + + *smallest_removed = *znode_get_ld_key(node); + + write_unlock_dk(tree); + read_unlock_tree(tree); + + if (object) { + /* we used to perform actions which are to be performed on items on their removal from tree in + special item method - kill_hook. Here for optimization reasons we avoid reading node + containing item we remove and can not call item's kill hook. Instead we call function which + does exactly the same things as tail kill hook in assumption that node we avoid reading + contains only one item and that item is a tail one. */ + fake_kill_hook_tail(object, start_offset, end_offset, + truncate); + } + } + failed: + zrelse(parent_lock.node); + failed_nozrelse: + done_lh(&parent_lock); + + return ret; +} + +static int can_delete(const reiser4_key *key, znode *node) +{ + int result; + + read_lock_dk(current_tree); + result = keyle(key, znode_get_ld_key(node)); + read_unlock_dk(current_tree); + return result; +} + +/** + * This subroutine is not optimal but implementation seems to + * be easier). + * + * @tap: the point deletion process begins from, + * @from_key: the beginning of the deleted key range, + * @to_key: the end of the deleted key range, + * @smallest_removed: the smallest removed key, + * @truncate: true if called for file truncate. + * @progress: return true if a progress in file items deletions was made, + * @smallest_removed value is actual in that case. + * + * @return: 0 if success, error code otherwise, -E_REPEAT means that long + * reiser4_cut_tree operation was interrupted for allowing atom commit. + */ +int +cut_tree_worker_common(tap_t * tap, const reiser4_key * from_key, + const reiser4_key * to_key, + reiser4_key * smallest_removed, struct inode *object, + int truncate, int *progress) +{ + lock_handle next_node_lock; + coord_t left_coord; + int result; + + assert("zam-931", tap->coord->node != NULL); + assert("zam-932", znode_is_write_locked(tap->coord->node)); + + *progress = 0; + init_lh(&next_node_lock); + + while (1) { + znode *node; /* node from which items are cut */ + node_plugin *nplug; /* node plugin for @node */ + + node = tap->coord->node; + + /* Move next_node_lock to the next node on the left. */ + result = + reiser4_get_left_neighbor(&next_node_lock, node, + ZNODE_WRITE_LOCK, + GN_CAN_USE_UPPER_LEVELS); + if (result != 0 && result != -E_NO_NEIGHBOR) + break; + /* Check can we delete the node as a whole. */ + if (*progress && znode_get_level(node) == LEAF_LEVEL && + can_delete(from_key, node)) { + result = reiser4_delete_node(node, smallest_removed, + object, truncate); + } else { + result = reiser4_tap_load(tap); + if (result) + return result; + + /* Prepare the second (right) point for cut_node() */ + if (*progress) + coord_init_last_unit(tap->coord, node); + + else if (item_plugin_by_coord(tap->coord)->b.lookup == + NULL) + /* set rightmost unit for the items without lookup method */ + tap->coord->unit_pos = + coord_last_unit_pos(tap->coord); + + nplug = node->nplug; + + assert("vs-686", nplug); + assert("vs-687", nplug->lookup); + + /* left_coord is leftmost unit cut from @node */ + result = nplug->lookup(node, from_key, + FIND_MAX_NOT_MORE_THAN, + &left_coord); + + if (IS_CBKERR(result)) + break; + + /* adjust coordinates so that they are set to existing units */ + if (coord_set_to_right(&left_coord) + || coord_set_to_left(tap->coord)) { + result = 0; + break; + } + + if (coord_compare(&left_coord, tap->coord) == + COORD_CMP_ON_RIGHT) { + /* keys from @from_key to @to_key are not in the tree */ + result = 0; + break; + } + + if (left_coord.item_pos != tap->coord->item_pos) { + /* do not allow to cut more than one item. It is added to solve problem of truncating + partially converted files. If file is partially converted there may exist a twig node + containing both internal item or items pointing to leaf nodes with formatting items + and extent item. We do not want to kill internal items being at twig node here + because cut_tree_worker assumes killing them from level level */ + coord_dup(&left_coord, tap->coord); + assert("vs-1652", + coord_is_existing_unit(&left_coord)); + left_coord.unit_pos = 0; + } + + /* cut data from one node */ + /* *smallest_removed = *reiser4_min_key(); */ + result = + kill_node_content(&left_coord, tap->coord, from_key, + to_key, smallest_removed, + next_node_lock.node, object, + truncate); + reiser4_tap_relse(tap); + } + if (result) + break; + + ++(*progress); + + /* Check whether all items with keys >= from_key were removed + * from the tree. */ + if (keyle(smallest_removed, from_key)) + /* result = 0; */ + break; + + if (next_node_lock.node == NULL) + break; + + result = reiser4_tap_move(tap, &next_node_lock); + done_lh(&next_node_lock); + if (result) + break; + + /* Break long reiser4_cut_tree operation (deletion of a large + file) if atom requires commit. */ + if (*progress > CUT_TREE_MIN_ITERATIONS + && current_atom_should_commit()) { + result = -E_REPEAT; + break; + } + } + done_lh(&next_node_lock); + /* assert("vs-301", !keyeq(&smallest_removed, reiser4_min_key())); */ + return result; +} + +/* there is a fundamental problem with optimizing deletes: VFS does it + one file at a time. Another problem is that if an item can be + anything, then deleting items must be done one at a time. It just + seems clean to writes this to specify a from and a to key, and cut + everything between them though. */ + +/* use this function with care if deleting more than what is part of a single file. */ +/* do not use this when cutting a single item, it is suboptimal for that */ + +/* You are encouraged to write plugin specific versions of this. It + cannot be optimal for all plugins because it works item at a time, + and some plugins could sometimes work node at a time. Regular files + however are not optimizable to work node at a time because of + extents needing to free the blocks they point to. + + Optimizations compared to v3 code: + + It does not balance (that task is left to memory pressure code). + + Nodes are deleted only if empty. + + Uses extents. + + Performs read-ahead of formatted nodes whose contents are part of + the deletion. +*/ + +/** + * Delete everything from the reiser4 tree between two keys: @from_key and + * @to_key. + * + * @from_key: the beginning of the deleted key range, + * @to_key: the end of the deleted key range, + * @smallest_removed: the smallest removed key, + * @object: owner of cutting items. + * @truncate: true if called for file truncate. + * @progress: return true if a progress in file items deletions was made, + * @smallest_removed value is actual in that case. + * + * @return: 0 if success, error code otherwise, -E_REPEAT means that long cut_tree + * operation was interrupted for allowing atom commit . + */ + +int reiser4_cut_tree_object(reiser4_tree * tree, const reiser4_key * from_key, + const reiser4_key * to_key, + reiser4_key * smallest_removed_p, + struct inode *object, int truncate, int *progress) +{ + lock_handle lock; + int result; + tap_t tap; + coord_t right_coord; + reiser4_key smallest_removed; + int (*cut_tree_worker) (tap_t *, const reiser4_key *, + const reiser4_key *, reiser4_key *, + struct inode *, int, int *); + STORE_COUNTERS; + + assert("umka-329", tree != NULL); + assert("umka-330", from_key != NULL); + assert("umka-331", to_key != NULL); + assert("zam-936", keyle(from_key, to_key)); + + if (smallest_removed_p == NULL) + smallest_removed_p = &smallest_removed; + + init_lh(&lock); + + do { + /* Find rightmost item to cut away from the tree. */ + result = reiser4_object_lookup(object, to_key, &right_coord, + &lock, ZNODE_WRITE_LOCK, + FIND_MAX_NOT_MORE_THAN, + TWIG_LEVEL, LEAF_LEVEL, + CBK_UNIQUE, NULL /*ra_info */); + if (result != CBK_COORD_FOUND) + break; + if (object == NULL + || inode_file_plugin(object)->cut_tree_worker == NULL) + cut_tree_worker = cut_tree_worker_common; + else + cut_tree_worker = + inode_file_plugin(object)->cut_tree_worker; + reiser4_tap_init(&tap, &right_coord, &lock, ZNODE_WRITE_LOCK); + result = + cut_tree_worker(&tap, from_key, to_key, smallest_removed_p, + object, truncate, progress); + reiser4_tap_done(&tap); + + reiser4_preempt_point(); + + } while (0); + + done_lh(&lock); + + if (result) { + switch (result) { + case -E_NO_NEIGHBOR: + result = 0; + break; + case -E_DEADLOCK: + result = -E_REPEAT; + case -E_REPEAT: + case -ENOMEM: + case -ENOENT: + break; + default: + warning("nikita-2861", "failure: %i", result); + } + } + + CHECK_COUNTERS; + return result; +} + +/* repeat reiser4_cut_tree_object until everything is deleted. + * unlike cut_file_items, it does not end current transaction if -E_REPEAT + * is returned by cut_tree_object. */ +int reiser4_cut_tree(reiser4_tree * tree, const reiser4_key * from, + const reiser4_key * to, struct inode *inode, int truncate) +{ + int result; + int progress; + + do { + result = reiser4_cut_tree_object(tree, from, to, NULL, + inode, truncate, &progress); + } while (result == -E_REPEAT); + + return result; +} + +/* finishing reiser4 initialization */ +int reiser4_init_tree(reiser4_tree * tree /* pointer to structure being + * initialized */ , + const reiser4_block_nr * root_block /* address of a root block + * on a disk */ , + tree_level height /* height of a tree */ , + node_plugin * nplug /* default node plugin */ ) +{ + int result; + + assert("nikita-306", tree != NULL); + assert("nikita-307", root_block != NULL); + assert("nikita-308", height > 0); + assert("nikita-309", nplug != NULL); + assert("zam-587", tree->super != NULL); + assert("edward-171", get_current_context() != NULL); + /* + * We'll perform costly memory allocations for znode hash table, etc. + * So, set proper allocation flags + */ + get_current_context()->gfp_mask |= (__GFP_NOWARN); + + tree->root_block = *root_block; + tree->height = height; + tree->estimate_one_insert = calc_estimate_one_insert(height); + tree->nplug = nplug; + + tree->znode_epoch = 1ull; + + cbk_cache_init(&tree->cbk_cache); + + result = znodes_tree_init(tree); + if (result == 0) + result = jnodes_tree_init(tree); + if (result == 0) { + tree->uber = zget(tree, &UBER_TREE_ADDR, NULL, 0, + reiser4_ctx_gfp_mask_get()); + if (IS_ERR(tree->uber)) { + result = PTR_ERR(tree->uber); + tree->uber = NULL; + } + } + return result; +} + +/* release resources associated with @tree */ +void reiser4_done_tree(reiser4_tree * tree /* tree to release */ ) +{ + if (tree == NULL) + return; + + if (tree->uber != NULL) { + zput(tree->uber); + tree->uber = NULL; + } + znodes_tree_done(tree); + jnodes_tree_done(tree); + cbk_cache_done(&tree->cbk_cache); +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/tree.h b/fs/reiser4/tree.h new file mode 100644 index 000000000000..fbf8542f37b4 --- /dev/null +++ b/fs/reiser4/tree.h @@ -0,0 +1,577 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Tree operations. See fs/reiser4/tree.c for comments */ + +#if !defined( __REISER4_TREE_H__ ) +#define __REISER4_TREE_H__ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "plugin/node/node.h" +#include "plugin/plugin.h" +#include "znode.h" +#include "tap.h" + +#include <linux/types.h> /* for __u?? */ +#include <linux/fs.h> /* for struct super_block */ +#include <linux/spinlock.h> +#include <linux/sched.h> /* for struct task_struct */ + +/* fictive block number never actually used */ +extern const reiser4_block_nr UBER_TREE_ADDR; + +/* &cbk_cache_slot - entry in a coord cache. + + This is entry in a coord_by_key (cbk) cache, represented by + &cbk_cache. + +*/ +typedef struct cbk_cache_slot { + /* cached node */ + znode *node; + /* linkage to the next cbk cache slot in a LRU order */ + struct list_head lru; +} cbk_cache_slot; + +/* &cbk_cache - coord cache. This is part of reiser4_tree. + + cbk_cache is supposed to speed up tree lookups by caching results of recent + successful lookups (we don't cache negative results as dentry cache + does). Cache consists of relatively small number of entries kept in a LRU + order. Each entry (&cbk_cache_slot) contains a pointer to znode, from + which we can obtain a range of keys that covered by this znode. Before + embarking into real tree traversal we scan cbk_cache slot by slot and for + each slot check whether key we are looking for is between minimal and + maximal keys for node pointed to by this slot. If no match is found, real + tree traversal is performed and if result is successful, appropriate entry + is inserted into cache, possibly pulling least recently used entry out of + it. + + Tree spin lock is used to protect coord cache. If contention for this + lock proves to be too high, more finer grained locking can be added. + + Invariants involving parts of this data-type: + + [cbk-cache-invariant] +*/ +typedef struct cbk_cache { + /* serializator */ + rwlock_t guard; + int nr_slots; + /* head of LRU list of cache slots */ + struct list_head lru; + /* actual array of slots */ + cbk_cache_slot *slot; +} cbk_cache; + +/* level_lookup_result - possible outcome of looking up key at some level. + This is used by coord_by_key when traversing tree downward. */ +typedef enum { + /* continue to the next level */ + LOOKUP_CONT, + /* done. Either required item was found, or we can prove it + doesn't exist, or some error occurred. */ + LOOKUP_DONE, + /* restart traversal from the root. Infamous "repetition". */ + LOOKUP_REST +} level_lookup_result; + +/* This is representation of internal reiser4 tree where all file-system + data and meta-data are stored. This structure is passed to all tree + manipulation functions. It's different from the super block because: + we don't want to limit ourselves to strictly one to one mapping + between super blocks and trees, and, because they are logically + different: there are things in a super block that have no relation to + the tree (bitmaps, journalling area, mount options, etc.) and there + are things in a tree that bear no relation to the super block, like + tree of znodes. + + At this time, there is only one tree + per filesystem, and this struct is part of the super block. We only + call the super block the super block for historical reasons (most + other filesystems call the per filesystem metadata the super block). +*/ + +struct reiser4_tree { + /* block_nr == 0 is fake znode. Write lock it, while changing + tree height. */ + /* disk address of root node of a tree */ + reiser4_block_nr root_block; + + /* level of the root node. If this is 1, tree consists of root + node only */ + tree_level height; + + /* + * this is cached here avoid calling plugins through function + * dereference all the time. + */ + __u64 estimate_one_insert; + + /* cache of recent tree lookup results */ + cbk_cache cbk_cache; + + /* hash table to look up znodes by block number. */ + z_hash_table zhash_table; + z_hash_table zfake_table; + /* hash table to look up jnodes by inode and offset. */ + j_hash_table jhash_table; + + /* lock protecting: + - parent pointers, + - sibling pointers, + - znode hash table + - coord cache + */ + /* NOTE: The "giant" tree lock can be replaced by more spin locks, + hoping they will be less contented. We can use one spin lock per one + znode hash bucket. With adding of some code complexity, sibling + pointers can be protected by both znode spin locks. However it looks + more SMP scalable we should test this locking change on n-ways (n > + 4) SMP machines. Current 4-ways machine test does not show that tree + lock is contented and it is a bottleneck (2003.07.25). */ + + rwlock_t tree_lock; + + /* lock protecting delimiting keys */ + rwlock_t dk_lock; + + /* spin lock protecting znode_epoch */ + spinlock_t epoch_lock; + /* version stamp used to mark znode updates. See seal.[ch] for more + * information. */ + __u64 znode_epoch; + + znode *uber; + node_plugin *nplug; + struct super_block *super; + struct { + /* carry flags used for insertion of new nodes */ + __u32 new_node_flags; + /* carry flags used for insertion of new extents */ + __u32 new_extent_flags; + /* carry flags used for paste operations */ + __u32 paste_flags; + /* carry flags used for insert operations */ + __u32 insert_flags; + } carry; +}; + +extern int reiser4_init_tree(reiser4_tree * tree, + const reiser4_block_nr * root_block, + tree_level height, node_plugin * default_plugin); +extern void reiser4_done_tree(reiser4_tree * tree); + +/* cbk flags: options for coord_by_key() */ +typedef enum { + /* coord_by_key() is called for insertion. This is necessary because + of extents being located at the twig level. For explanation, see + comment just above is_next_item_internal(). + */ + CBK_FOR_INSERT = (1 << 0), + /* coord_by_key() is called with key that is known to be unique */ + CBK_UNIQUE = (1 << 1), + /* coord_by_key() can trust delimiting keys. This options is not user + accessible. coord_by_key() will set it automatically. It will be + only cleared by special-case in extents-on-the-twig-level handling + where it is necessary to insert item with a key smaller than + leftmost key in a node. This is necessary because of extents being + located at the twig level. For explanation, see comment just above + is_next_item_internal(). + */ + CBK_TRUST_DK = (1 << 2), + CBK_READA = (1 << 3), /* original: readahead leaves which contain items of certain file */ + CBK_READDIR_RA = (1 << 4), /* readdir: readahead whole directory and all its stat datas */ + CBK_DKSET = (1 << 5), + CBK_EXTENDED_COORD = (1 << 6), /* coord_t is actually */ + CBK_IN_CACHE = (1 << 7), /* node is already in cache */ + CBK_USE_CRABLOCK = (1 << 8) /* use crab_lock in stead of long term + * lock */ +} cbk_flags; + +/* insertion outcome. IBK = insert by key */ +typedef enum { + IBK_INSERT_OK = 0, + IBK_ALREADY_EXISTS = -EEXIST, + IBK_IO_ERROR = -EIO, + IBK_NO_SPACE = -E_NODE_FULL, + IBK_OOM = -ENOMEM +} insert_result; + +#define IS_CBKERR(err) ((err) != CBK_COORD_FOUND && (err) != CBK_COORD_NOTFOUND) + +typedef int (*tree_iterate_actor_t) (reiser4_tree * tree, coord_t * coord, + lock_handle * lh, void *arg); +extern int reiser4_iterate_tree(reiser4_tree * tree, coord_t * coord, + lock_handle * lh, + tree_iterate_actor_t actor, void *arg, + znode_lock_mode mode, int through_units_p); +extern int get_uber_znode(reiser4_tree * tree, znode_lock_mode mode, + znode_lock_request pri, lock_handle * lh); + +/* return node plugin of @node */ +static inline node_plugin *node_plugin_by_node(const znode * + node /* node to query */ ) +{ + assert("vs-213", node != NULL); + assert("vs-214", znode_is_loaded(node)); + + return node->nplug; +} + +/* number of items in @node */ +static inline pos_in_node_t node_num_items(const znode * node) +{ + assert("nikita-2754", znode_is_loaded(node)); + assert("nikita-2468", + node_plugin_by_node(node)->num_of_items(node) == node->nr_items); + + return node->nr_items; +} + +/* Return the number of items at the present node. Asserts coord->node != + NULL. */ +static inline unsigned coord_num_items(const coord_t * coord) +{ + assert("jmacd-9805", coord->node != NULL); + + return node_num_items(coord->node); +} + +/* true if @node is empty */ +static inline int node_is_empty(const znode * node) +{ + return node_num_items(node) == 0; +} + +typedef enum { + SHIFTED_SOMETHING = 0, + SHIFT_NO_SPACE = -E_NODE_FULL, + SHIFT_IO_ERROR = -EIO, + SHIFT_OOM = -ENOMEM, +} shift_result; + +extern node_plugin *node_plugin_by_coord(const coord_t * coord); +extern int is_coord_in_node(const coord_t * coord); +extern int key_in_node(const reiser4_key *, const coord_t *); +extern void coord_item_move_to(coord_t * coord, int items); +extern void coord_unit_move_to(coord_t * coord, int units); + +/* there are two types of repetitive accesses (ra): intra-syscall + (local) and inter-syscall (global). Local ra is used when + during single syscall we add/delete several items and units in the + same place in a tree. Note that plan-A fragments local ra by + separating stat-data and file body in key-space. Global ra is + used when user does repetitive modifications in the same place in a + tree. + + Our ra implementation serves following purposes: + 1 it affects balancing decisions so that next operation in a row + can be performed faster; + 2 it affects lower-level read-ahead in page-cache; + 3 it allows to avoid unnecessary lookups by maintaining some state + across several operations (this is only for local ra); + 4 it leaves room for lazy-micro-balancing: when we start a sequence of + operations they are performed without actually doing any intra-node + shifts, until we finish sequence or scope of sequence leaves + current node, only then we really pack node (local ra only). +*/ + +/* another thing that can be useful is to keep per-tree and/or + per-process cache of recent lookups. This cache can be organised as a + list of block numbers of formatted nodes sorted by starting key in + this node. Balancings should invalidate appropriate parts of this + cache. +*/ + +lookup_result coord_by_key(reiser4_tree * tree, const reiser4_key * key, + coord_t * coord, lock_handle * handle, + znode_lock_mode lock, lookup_bias bias, + tree_level lock_level, tree_level stop_level, + __u32 flags, ra_info_t *); + +lookup_result reiser4_object_lookup(struct inode *object, + const reiser4_key * key, + coord_t * coord, + lock_handle * lh, + znode_lock_mode lock_mode, + lookup_bias bias, + tree_level lock_level, + tree_level stop_level, + __u32 flags, ra_info_t * info); + +insert_result insert_by_key(reiser4_tree * tree, const reiser4_key * key, + reiser4_item_data * data, coord_t * coord, + lock_handle * lh, + tree_level stop_level, __u32 flags); +insert_result insert_by_coord(coord_t * coord, + reiser4_item_data * data, const reiser4_key * key, + lock_handle * lh, __u32); +insert_result insert_extent_by_coord(coord_t * coord, + reiser4_item_data * data, + const reiser4_key * key, lock_handle * lh); +int cut_node_content(coord_t * from, coord_t * to, const reiser4_key * from_key, + const reiser4_key * to_key, + reiser4_key * smallest_removed); +int kill_node_content(coord_t * from, coord_t * to, + const reiser4_key * from_key, const reiser4_key * to_key, + reiser4_key * smallest_removed, + znode * locked_left_neighbor, struct inode *inode, + int truncate); + +int reiser4_resize_item(coord_t * coord, reiser4_item_data * data, + reiser4_key * key, lock_handle * lh, cop_insert_flag); +int insert_into_item(coord_t * coord, lock_handle * lh, const reiser4_key * key, + reiser4_item_data * data, unsigned); +int reiser4_insert_flow(coord_t * coord, lock_handle * lh, flow_t * f); +int find_new_child_ptr(znode * parent, znode * child, znode * left, + coord_t * result); + +int shift_right_of_but_excluding_insert_coord(coord_t * insert_coord); +int shift_left_of_and_including_insert_coord(coord_t * insert_coord); + +void fake_kill_hook_tail(struct inode *, loff_t start, loff_t end, int); + +extern int cut_tree_worker_common(tap_t *, const reiser4_key *, + const reiser4_key *, reiser4_key *, + struct inode *, int, int *); +extern int reiser4_cut_tree_object(reiser4_tree *, const reiser4_key *, + const reiser4_key *, reiser4_key *, + struct inode *, int, int *); +extern int reiser4_cut_tree(reiser4_tree * tree, const reiser4_key * from, + const reiser4_key * to, struct inode *, int); + +extern int reiser4_delete_node(znode *, reiser4_key *, struct inode *, int); +extern int check_tree_pointer(const coord_t * pointer, const znode * child); +extern int find_new_child_ptr(znode * parent, znode * child UNUSED_ARG, + znode * left, coord_t * result); +extern int find_child_ptr(znode * parent, znode * child, coord_t * result); +extern int set_child_delimiting_keys(znode * parent, const coord_t * in_parent, + znode * child); +extern znode *child_znode(const coord_t * in_parent, znode * parent, + int incore_p, int setup_dkeys_p); + +extern int cbk_cache_init(cbk_cache * cache); +extern void cbk_cache_done(cbk_cache * cache); +extern void cbk_cache_invalidate(const znode * node, reiser4_tree * tree); + +extern char *sprint_address(const reiser4_block_nr * block); + +#if REISER4_DEBUG +extern void print_coord_content(const char *prefix, coord_t * p); +extern void reiser4_print_address(const char *prefix, + const reiser4_block_nr * block); +extern void print_tree_rec(const char *prefix, reiser4_tree * tree, + __u32 flags); +extern void check_dkeys(znode *node); +#else +#define print_coord_content(p, c) noop +#define reiser4_print_address(p, b) noop +#endif + +extern void forget_znode(lock_handle * handle); +extern int deallocate_znode(znode * node); + +extern int is_disk_addr_unallocated(const reiser4_block_nr * addr); + +/* struct used internally to pack all numerous arguments of tree lookup. + Used to avoid passing a lot of arguments to helper functions. */ +typedef struct cbk_handle { + /* tree we are in */ + reiser4_tree *tree; + /* key we are going after */ + const reiser4_key *key; + /* coord we will store result in */ + coord_t *coord; + /* type of lock to take on target node */ + znode_lock_mode lock_mode; + /* lookup bias. See comments at the declaration of lookup_bias */ + lookup_bias bias; + /* lock level: level starting from which tree traversal starts taking + * write locks. */ + tree_level lock_level; + /* level where search will stop. Either item will be found between + lock_level and stop_level, or CBK_COORD_NOTFOUND will be + returned. + */ + tree_level stop_level; + /* level we are currently at */ + tree_level level; + /* block number of @active node. Tree traversal operates on two + nodes: active and parent. */ + reiser4_block_nr block; + /* put here error message to be printed by caller */ + const char *error; + /* result passed back to caller */ + int result; + /* lock handles for active and parent */ + lock_handle *parent_lh; + lock_handle *active_lh; + reiser4_key ld_key; + reiser4_key rd_key; + /* flags, passed to the cbk routine. Bits of this bitmask are defined + in tree.h:cbk_flags enum. */ + __u32 flags; + ra_info_t *ra_info; + struct inode *object; +} cbk_handle; + +extern znode_lock_mode cbk_lock_mode(tree_level level, cbk_handle * h); + +/* eottl.c */ +extern int handle_eottl(cbk_handle *h, int *outcome); + +int lookup_multikey(cbk_handle * handle, int nr_keys); +int lookup_couple(reiser4_tree * tree, + const reiser4_key * key1, const reiser4_key * key2, + coord_t * coord1, coord_t * coord2, + lock_handle * lh1, lock_handle * lh2, + znode_lock_mode lock_mode, lookup_bias bias, + tree_level lock_level, tree_level stop_level, __u32 flags, + int *result1, int *result2); + +static inline void read_lock_tree(reiser4_tree *tree) +{ + /* check that tree is not locked */ + assert("", (LOCK_CNT_NIL(rw_locked_tree) && + LOCK_CNT_NIL(read_locked_tree) && + LOCK_CNT_NIL(write_locked_tree))); + /* check that spinlocks of lower priorities are not held */ + assert("", (LOCK_CNT_NIL(spin_locked_txnh) && + LOCK_CNT_NIL(rw_locked_dk) && + LOCK_CNT_NIL(spin_locked_stack))); + + read_lock(&(tree->tree_lock)); + + LOCK_CNT_INC(read_locked_tree); + LOCK_CNT_INC(rw_locked_tree); + LOCK_CNT_INC(spin_locked); +} + +static inline void read_unlock_tree(reiser4_tree *tree) +{ + assert("nikita-1375", LOCK_CNT_GTZ(read_locked_tree)); + assert("nikita-1376", LOCK_CNT_GTZ(rw_locked_tree)); + assert("nikita-1376", LOCK_CNT_GTZ(spin_locked)); + + LOCK_CNT_DEC(read_locked_tree); + LOCK_CNT_DEC(rw_locked_tree); + LOCK_CNT_DEC(spin_locked); + + read_unlock(&(tree->tree_lock)); +} + +static inline void write_lock_tree(reiser4_tree *tree) +{ + /* check that tree is not locked */ + assert("", (LOCK_CNT_NIL(rw_locked_tree) && + LOCK_CNT_NIL(read_locked_tree) && + LOCK_CNT_NIL(write_locked_tree))); + /* check that spinlocks of lower priorities are not held */ + assert("", (LOCK_CNT_NIL(spin_locked_txnh) && + LOCK_CNT_NIL(rw_locked_dk) && + LOCK_CNT_NIL(spin_locked_stack))); + + write_lock(&(tree->tree_lock)); + + LOCK_CNT_INC(write_locked_tree); + LOCK_CNT_INC(rw_locked_tree); + LOCK_CNT_INC(spin_locked); +} + +static inline void write_unlock_tree(reiser4_tree *tree) +{ + assert("nikita-1375", LOCK_CNT_GTZ(write_locked_tree)); + assert("nikita-1376", LOCK_CNT_GTZ(rw_locked_tree)); + assert("nikita-1376", LOCK_CNT_GTZ(spin_locked)); + + LOCK_CNT_DEC(write_locked_tree); + LOCK_CNT_DEC(rw_locked_tree); + LOCK_CNT_DEC(spin_locked); + + write_unlock(&(tree->tree_lock)); +} + +static inline void read_lock_dk(reiser4_tree *tree) +{ + /* check that dk is not locked */ + assert("", (LOCK_CNT_NIL(rw_locked_dk) && + LOCK_CNT_NIL(read_locked_dk) && + LOCK_CNT_NIL(write_locked_dk))); + /* check that spinlocks of lower priorities are not held */ + assert("", LOCK_CNT_NIL(spin_locked_stack)); + + read_lock(&((tree)->dk_lock)); + + LOCK_CNT_INC(read_locked_dk); + LOCK_CNT_INC(rw_locked_dk); + LOCK_CNT_INC(spin_locked); +} + +static inline void read_unlock_dk(reiser4_tree *tree) +{ + assert("nikita-1375", LOCK_CNT_GTZ(read_locked_dk)); + assert("nikita-1376", LOCK_CNT_GTZ(rw_locked_dk)); + assert("nikita-1376", LOCK_CNT_GTZ(spin_locked)); + + LOCK_CNT_DEC(read_locked_dk); + LOCK_CNT_DEC(rw_locked_dk); + LOCK_CNT_DEC(spin_locked); + + read_unlock(&(tree->dk_lock)); +} + +static inline void write_lock_dk(reiser4_tree *tree) +{ + /* check that dk is not locked */ + assert("", (LOCK_CNT_NIL(rw_locked_dk) && + LOCK_CNT_NIL(read_locked_dk) && + LOCK_CNT_NIL(write_locked_dk))); + /* check that spinlocks of lower priorities are not held */ + assert("", LOCK_CNT_NIL(spin_locked_stack)); + + write_lock(&((tree)->dk_lock)); + + LOCK_CNT_INC(write_locked_dk); + LOCK_CNT_INC(rw_locked_dk); + LOCK_CNT_INC(spin_locked); +} + +static inline void write_unlock_dk(reiser4_tree *tree) +{ + assert("nikita-1375", LOCK_CNT_GTZ(write_locked_dk)); + assert("nikita-1376", LOCK_CNT_GTZ(rw_locked_dk)); + assert("nikita-1376", LOCK_CNT_GTZ(spin_locked)); + + LOCK_CNT_DEC(write_locked_dk); + LOCK_CNT_DEC(rw_locked_dk); + LOCK_CNT_DEC(spin_locked); + + write_unlock(&(tree->dk_lock)); +} + +/* estimate api. Implementation is in estimate.c */ +reiser4_block_nr estimate_one_insert_item(reiser4_tree *); +reiser4_block_nr estimate_one_insert_into_item(reiser4_tree *); +reiser4_block_nr estimate_insert_flow(tree_level); +reiser4_block_nr estimate_one_item_removal(reiser4_tree *); +reiser4_block_nr calc_estimate_one_insert(tree_level); +reiser4_block_nr estimate_dirty_cluster(struct inode *); +reiser4_block_nr estimate_insert_cluster(struct inode *); +reiser4_block_nr estimate_update_cluster(struct inode *); + +/* __REISER4_TREE_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/tree_mod.c b/fs/reiser4/tree_mod.c new file mode 100644 index 000000000000..f9687df8ffb7 --- /dev/null +++ b/fs/reiser4/tree_mod.c @@ -0,0 +1,387 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* + * Functions to add/delete new nodes to/from the tree. + * + * Functions from this file are used by carry (see carry*) to handle: + * + * . insertion of new formatted node into tree + * + * . addition of new tree root, increasing tree height + * + * . removing tree root, decreasing tree height + * + */ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "key.h" +#include "coord.h" +#include "plugin/plugin.h" +#include "jnode.h" +#include "znode.h" +#include "tree_mod.h" +#include "block_alloc.h" +#include "tree_walk.h" +#include "tree.h" +#include "super.h" + +#include <linux/err.h> + +static int add_child_ptr(znode * parent, znode * child); +/* warning only issued if error is not -E_REPEAT */ +#define ewarning( error, ... ) \ + if( ( error ) != -E_REPEAT ) \ + warning( __VA_ARGS__ ) + +/* allocate new node on the @level and immediately on the right of @brother. */ +znode * reiser4_new_node(znode * brother /* existing left neighbor + * of new node */, + tree_level level /* tree level at which new node is to + * be allocated */) +{ + znode *result; + int retcode; + reiser4_block_nr blocknr; + + assert("nikita-930", brother != NULL); + assert("umka-264", level < REAL_MAX_ZTREE_HEIGHT); + + retcode = assign_fake_blocknr_formatted(&blocknr); + if (retcode == 0) { + result = + zget(znode_get_tree(brother), &blocknr, NULL, level, + reiser4_ctx_gfp_mask_get()); + if (IS_ERR(result)) { + ewarning(PTR_ERR(result), "nikita-929", + "Cannot allocate znode for carry: %li", + PTR_ERR(result)); + return result; + } + /* cheap test, can be executed even when debugging is off */ + if (!znode_just_created(result)) { + warning("nikita-2213", + "Allocated already existing block: %llu", + (unsigned long long)blocknr); + zput(result); + return ERR_PTR(RETERR(-EIO)); + } + + assert("nikita-931", result != NULL); + result->nplug = znode_get_tree(brother)->nplug; + assert("nikita-933", result->nplug != NULL); + + retcode = zinit_new(result, reiser4_ctx_gfp_mask_get()); + if (retcode == 0) { + ZF_SET(result, JNODE_CREATED); + zrelse(result); + } else { + zput(result); + result = ERR_PTR(retcode); + } + } else { + /* failure to allocate new node during balancing. + This should never happen. Ever. Returning -E_REPEAT + is not viable solution, because "out of disk space" + is not transient error that will go away by itself. + */ + ewarning(retcode, "nikita-928", + "Cannot allocate block for carry: %i", retcode); + result = ERR_PTR(retcode); + } + assert("nikita-1071", result != NULL); + return result; +} + +/* allocate new root and add it to the tree + + This helper function is called by add_new_root(). + +*/ +znode *reiser4_add_tree_root(znode * old_root /* existing tree root */ , + znode * fake /* "fake" znode */ ) +{ + reiser4_tree *tree = znode_get_tree(old_root); + znode *new_root = NULL; /* to shut gcc up */ + int result; + + assert("nikita-1069", old_root != NULL); + assert("umka-262", fake != NULL); + assert("umka-263", tree != NULL); + + /* "fake" znode---one always hanging just above current root. This + node is locked when new root is created or existing root is + deleted. Downward tree traversal takes lock on it before taking + lock on a root node. This avoids race conditions with root + manipulations. + + */ + assert("nikita-1348", znode_above_root(fake)); + assert("nikita-1211", znode_is_root(old_root)); + + result = 0; + if (tree->height >= REAL_MAX_ZTREE_HEIGHT) { + warning("nikita-1344", "Tree is too tall: %i", tree->height); + /* ext2 returns -ENOSPC when it runs out of free inodes with a + following comment (fs/ext2/ialloc.c:441): Is it really + ENOSPC? + + -EXFULL? -EINVAL? + */ + result = RETERR(-ENOSPC); + } else { + /* Allocate block for new root. It's not that + important where it will be allocated, as root is + almost always in memory. Moreover, allocate on + flush can be going here. + */ + assert("nikita-1448", znode_is_root(old_root)); + new_root = reiser4_new_node(fake, tree->height + 1); + if (!IS_ERR(new_root) && (result = zload(new_root)) == 0) { + lock_handle rlh; + + init_lh(&rlh); + result = + longterm_lock_znode(&rlh, new_root, + ZNODE_WRITE_LOCK, + ZNODE_LOCK_LOPRI); + if (result == 0) { + parent_coord_t *in_parent; + + znode_make_dirty(fake); + + /* new root is a child of "fake" node */ + write_lock_tree(tree); + + ++tree->height; + + /* recalculate max balance overhead */ + tree->estimate_one_insert = + calc_estimate_one_insert(tree->height); + + tree->root_block = *znode_get_block(new_root); + in_parent = &new_root->in_parent; + init_parent_coord(in_parent, fake); + /* manually insert new root into sibling + * list. With this all nodes involved into + * balancing are connected after balancing is + * done---useful invariant to check. */ + sibling_list_insert_nolock(new_root, NULL); + write_unlock_tree(tree); + + /* insert into new root pointer to the + @old_root. */ + assert("nikita-1110", + WITH_DATA(new_root, + node_is_empty(new_root))); + write_lock_dk(tree); + znode_set_ld_key(new_root, reiser4_min_key()); + znode_set_rd_key(new_root, reiser4_max_key()); + write_unlock_dk(tree); + if (REISER4_DEBUG) { + ZF_CLR(old_root, JNODE_LEFT_CONNECTED); + ZF_CLR(old_root, JNODE_RIGHT_CONNECTED); + ZF_SET(old_root, JNODE_ORPHAN); + } + result = add_child_ptr(new_root, old_root); + done_lh(&rlh); + } + zrelse(new_root); + } + } + if (result != 0) + new_root = ERR_PTR(result); + return new_root; +} + +/* build &reiser4_item_data for inserting child pointer + + Build &reiser4_item_data that can be later used to insert pointer to @child + in its parent. + +*/ +void build_child_ptr_data(znode * child /* node pointer to which will be + * inserted */ , + reiser4_item_data * data /* where to store result */ ) +{ + assert("nikita-1116", child != NULL); + assert("nikita-1117", data != NULL); + + /* + * NOTE: use address of child's blocknr as address of data to be + * inserted. As result of this data gets into on-disk structure in cpu + * byte order. internal's create_hook converts it to little endian byte + * order. + */ + data->data = (char *)znode_get_block(child); + /* data -> data is kernel space */ + data->user = 0; + data->length = sizeof(reiser4_block_nr); + /* FIXME-VS: hardcoded internal item? */ + + /* AUDIT: Is it possible that "item_plugin_by_id" may find nothing? */ + data->iplug = item_plugin_by_id(NODE_POINTER_ID); +} + +/* add pointer to @child into empty @parent. + + This is used when pointer to old root is inserted into new root which is + empty. +*/ +static int add_child_ptr(znode * parent, znode * child) +{ + coord_t coord; + reiser4_item_data data; + int result; + reiser4_key key; + + assert("nikita-1111", parent != NULL); + assert("nikita-1112", child != NULL); + assert("nikita-1115", + znode_get_level(parent) == znode_get_level(child) + 1); + + result = zload(parent); + if (result != 0) + return result; + assert("nikita-1113", node_is_empty(parent)); + coord_init_first_unit(&coord, parent); + + build_child_ptr_data(child, &data); + data.arg = NULL; + + read_lock_dk(znode_get_tree(parent)); + key = *znode_get_ld_key(child); + read_unlock_dk(znode_get_tree(parent)); + + result = node_plugin_by_node(parent)->create_item(&coord, &key, &data, + NULL); + znode_make_dirty(parent); + zrelse(parent); + return result; +} + +/* actually remove tree root */ +static int reiser4_kill_root(reiser4_tree * tree /* tree from which root is + * being removed */, + znode * old_root /* root node that is being + * removed */ , + znode * new_root /* new root---sole child of + * @old_root */, + const reiser4_block_nr * new_root_blk /* disk address of + * @new_root */) +{ + znode *uber; + int result; + lock_handle handle_for_uber; + + assert("umka-265", tree != NULL); + assert("nikita-1198", new_root != NULL); + assert("nikita-1199", + znode_get_level(new_root) + 1 == znode_get_level(old_root)); + + assert("nikita-1201", znode_is_write_locked(old_root)); + + assert("nikita-1203", + disk_addr_eq(new_root_blk, znode_get_block(new_root))); + + init_lh(&handle_for_uber); + /* obtain and lock "fake" znode protecting changes in tree height. */ + result = get_uber_znode(tree, ZNODE_WRITE_LOCK, ZNODE_LOCK_HIPRI, + &handle_for_uber); + if (result == 0) { + uber = handle_for_uber.node; + + znode_make_dirty(uber); + + /* don't take long term lock a @new_root. Take spinlock. */ + + write_lock_tree(tree); + + tree->root_block = *new_root_blk; + --tree->height; + + /* recalculate max balance overhead */ + tree->estimate_one_insert = + calc_estimate_one_insert(tree->height); + + assert("nikita-1202", + tree->height == znode_get_level(new_root)); + + /* new root is child on "fake" node */ + init_parent_coord(&new_root->in_parent, uber); + ++uber->c_count; + + /* sibling_list_insert_nolock(new_root, NULL); */ + write_unlock_tree(tree); + + /* reinitialise old root. */ + result = init_znode(ZJNODE(old_root)); + znode_make_dirty(old_root); + if (result == 0) { + assert("nikita-1279", node_is_empty(old_root)); + ZF_SET(old_root, JNODE_HEARD_BANSHEE); + old_root->c_count = 0; + } + } + done_lh(&handle_for_uber); + + return result; +} + +/* remove tree root + + This function removes tree root, decreasing tree height by one. Tree root + and its only child (that is going to become new tree root) are write locked + at the entry. + + To remove tree root we need to take lock on special "fake" znode that + protects changes of tree height. See comments in reiser4_add_tree_root() for + more on this. + + Also parent pointers have to be updated in + old and new root. To simplify code, function is split into two parts: outer + reiser4_kill_tree_root() collects all necessary arguments and calls + reiser4_kill_root() to do the actual job. + +*/ +int reiser4_kill_tree_root(znode * old_root /* tree root that we are + removing*/) +{ + int result; + coord_t down_link; + znode *new_root; + reiser4_tree *tree; + + assert("umka-266", current_tree != NULL); + assert("nikita-1194", old_root != NULL); + assert("nikita-1196", znode_is_root(old_root)); + assert("nikita-1200", node_num_items(old_root) == 1); + assert("nikita-1401", znode_is_write_locked(old_root)); + + coord_init_first_unit(&down_link, old_root); + + tree = znode_get_tree(old_root); + new_root = child_znode(&down_link, old_root, 0, 1); + if (!IS_ERR(new_root)) { + result = + reiser4_kill_root(tree, old_root, new_root, + znode_get_block(new_root)); + zput(new_root); + } else + result = PTR_ERR(new_root); + + return result; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/tree_mod.h b/fs/reiser4/tree_mod.h new file mode 100644 index 000000000000..151964117f26 --- /dev/null +++ b/fs/reiser4/tree_mod.h @@ -0,0 +1,29 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Functions to add/delete new nodes to/from the tree. See tree_mod.c for + * comments. */ + +#if !defined( __REISER4_TREE_MOD_H__ ) +#define __REISER4_TREE_MOD_H__ + +#include "forward.h" + +znode *reiser4_new_node(znode * brother, tree_level level); +znode *reiser4_add_tree_root(znode * old_root, znode * fake); +int reiser4_kill_tree_root(znode * old_root); +void build_child_ptr_data(znode * child, reiser4_item_data * data); + +/* __REISER4_TREE_MOD_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/tree_walk.c b/fs/reiser4/tree_walk.c new file mode 100644 index 000000000000..cde4875b4481 --- /dev/null +++ b/fs/reiser4/tree_walk.c @@ -0,0 +1,927 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Routines and macros to: + + get_left_neighbor() + + get_right_neighbor() + + get_parent() + + get_first_child() + + get_last_child() + + various routines to walk the whole tree and do things to it like + repack it, or move it to tertiary storage. Please make them as + generic as is reasonable. + +*/ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "coord.h" +#include "plugin/item/item.h" +#include "jnode.h" +#include "znode.h" +#include "tree_walk.h" +#include "tree.h" +#include "super.h" + +/* These macros are used internally in tree_walk.c in attempt to make + lock_neighbor() code usable to build lock_parent(), lock_right_neighbor, + lock_left_neighbor */ +#define GET_NODE_BY_PTR_OFFSET(node, off) (*(znode**)(((unsigned long)(node)) + (off))) +#define FIELD_OFFSET(name) offsetof(znode, name) +#define PARENT_PTR_OFFSET FIELD_OFFSET(in_parent.node) +#define LEFT_PTR_OFFSET FIELD_OFFSET(left) +#define RIGHT_PTR_OFFSET FIELD_OFFSET(right) + +/* This is the generic procedure to get and lock `generic' neighbor (left or + right neighbor or parent). It implements common algorithm for all cases of + getting lock on neighbor node, only znode structure field is different in + each case. This is parameterized by ptr_offset argument, which is byte + offset for the pointer to the desired neighbor within the current node's + znode structure. This function should be called with the tree lock held */ +static int lock_neighbor( + /* resulting lock handle */ + lock_handle * result, + /* znode to lock */ + znode * node, + /* pointer to neighbor (or parent) znode field offset, in bytes from + the base address of znode structure */ + int ptr_offset, + /* lock mode for longterm_lock_znode call */ + znode_lock_mode mode, + /* lock request for longterm_lock_znode call */ + znode_lock_request req, + /* GN_* flags */ + int flags, int rlocked) +{ + reiser4_tree *tree = znode_get_tree(node); + znode *neighbor; + int ret; + + assert("umka-236", node != NULL); + assert("umka-237", tree != NULL); + assert_rw_locked(&(tree->tree_lock)); + + if (flags & GN_TRY_LOCK) + req |= ZNODE_LOCK_NONBLOCK; + if (flags & GN_SAME_ATOM) + req |= ZNODE_LOCK_DONT_FUSE; + + /* get neighbor's address by using of sibling link, quit while loop + (and return) if link is not available. */ + while (1) { + neighbor = GET_NODE_BY_PTR_OFFSET(node, ptr_offset); + + /* return -E_NO_NEIGHBOR if parent or side pointer is NULL or if + * node pointed by it is not connected. + * + * However, GN_ALLOW_NOT_CONNECTED option masks "connected" + * check and allows passing reference to not connected znode to + * subsequent longterm_lock_znode() call. This kills possible + * busy loop if we are trying to get longterm lock on locked but + * not yet connected parent node. */ + if (neighbor == NULL || !((flags & GN_ALLOW_NOT_CONNECTED) + || znode_is_connected(neighbor))) { + return RETERR(-E_NO_NEIGHBOR); + } + + /* protect it from deletion. */ + zref(neighbor); + + rlocked ? read_unlock_tree(tree) : write_unlock_tree(tree); + + ret = longterm_lock_znode(result, neighbor, mode, req); + + /* The lock handle obtains its own reference, release the one from above. */ + zput(neighbor); + + rlocked ? read_lock_tree(tree) : write_lock_tree(tree); + + /* restart if node we got reference to is being + invalidated. we should not get reference to this node + again. */ + if (ret == -EINVAL) + continue; + if (ret) + return ret; + + /* check if neighbor link still points to just locked znode; + the link could have been changed while the process slept. */ + if (neighbor == GET_NODE_BY_PTR_OFFSET(node, ptr_offset)) + return 0; + + /* znode was locked by mistake; unlock it and restart locking + process from beginning. */ + rlocked ? read_unlock_tree(tree) : write_unlock_tree(tree); + longterm_unlock_znode(result); + rlocked ? read_lock_tree(tree) : write_lock_tree(tree); + } +} + +/* get parent node with longterm lock, accepts GN* flags. */ +int reiser4_get_parent_flags(lock_handle * lh /* resulting lock handle */ , + znode * node /* child node */ , + znode_lock_mode mode + /* type of lock: read or write */ , + int flags /* GN_* flags */ ) +{ + int result; + + read_lock_tree(znode_get_tree(node)); + result = lock_neighbor(lh, node, PARENT_PTR_OFFSET, mode, + ZNODE_LOCK_HIPRI, flags, 1); + read_unlock_tree(znode_get_tree(node)); + return result; +} + +/* wrapper function to lock right or left neighbor depending on GN_GO_LEFT + bit in @flags parameter */ +/* Audited by: umka (2002.06.14) */ +static inline int +lock_side_neighbor(lock_handle * result, + znode * node, znode_lock_mode mode, int flags, int rlocked) +{ + int ret; + int ptr_offset; + znode_lock_request req; + + if (flags & GN_GO_LEFT) { + ptr_offset = LEFT_PTR_OFFSET; + req = ZNODE_LOCK_LOPRI; + } else { + ptr_offset = RIGHT_PTR_OFFSET; + req = ZNODE_LOCK_HIPRI; + } + + ret = + lock_neighbor(result, node, ptr_offset, mode, req, flags, rlocked); + + if (ret == -E_NO_NEIGHBOR) /* if we walk left or right -E_NO_NEIGHBOR does not + * guarantee that neighbor is absent in the + * tree; in this case we return -ENOENT -- + * means neighbor at least not found in + * cache */ + return RETERR(-ENOENT); + + return ret; +} + +#if REISER4_DEBUG + +int check_sibling_list(znode * node) +{ + znode *scan; + znode *next; + + assert("nikita-3283", LOCK_CNT_GTZ(write_locked_tree)); + + if (node == NULL) + return 1; + + if (ZF_ISSET(node, JNODE_RIP)) + return 1; + + assert("nikita-3270", node != NULL); + assert_rw_write_locked(&(znode_get_tree(node)->tree_lock)); + + for (scan = node; znode_is_left_connected(scan); scan = next) { + next = scan->left; + if (next != NULL && !ZF_ISSET(next, JNODE_RIP)) { + assert("nikita-3271", znode_is_right_connected(next)); + assert("nikita-3272", next->right == scan); + } else + break; + } + for (scan = node; znode_is_right_connected(scan); scan = next) { + next = scan->right; + if (next != NULL && !ZF_ISSET(next, JNODE_RIP)) { + assert("nikita-3273", znode_is_left_connected(next)); + assert("nikita-3274", next->left == scan); + } else + break; + } + return 1; +} + +#endif + +/* Znode sibling pointers maintenence. */ + +/* Znode sibling pointers are established between any neighbored nodes which are + in cache. There are two znode state bits (JNODE_LEFT_CONNECTED, + JNODE_RIGHT_CONNECTED), if left or right sibling pointer contains actual + value (even NULL), corresponded JNODE_*_CONNECTED bit is set. + + Reiser4 tree operations which may allocate new znodes (CBK, tree balancing) + take care about searching (hash table lookup may be required) of znode + neighbors, establishing sibling pointers between them and setting + JNODE_*_CONNECTED state bits. */ + +/* adjusting of sibling pointers and `connected' states for two + neighbors; works if one neighbor is NULL (was not found). */ + +/* FIXME-VS: this is unstatic-ed to use in tree.c in prepare_twig_cut */ +void link_left_and_right(znode * left, znode * right) +{ + assert("nikita-3275", check_sibling_list(left)); + assert("nikita-3275", check_sibling_list(right)); + + if (left != NULL) { + if (left->right == NULL) { + left->right = right; + ZF_SET(left, JNODE_RIGHT_CONNECTED); + + ON_DEBUG(left->right_version = + atomic_inc_return(&delim_key_version); + ); + + } else if (ZF_ISSET(left->right, JNODE_HEARD_BANSHEE) + && left->right != right) { + + ON_DEBUG(left->right->left_version = + atomic_inc_return(&delim_key_version); + left->right_version = + atomic_inc_return(&delim_key_version);); + + left->right->left = NULL; + left->right = right; + ZF_SET(left, JNODE_RIGHT_CONNECTED); + } else + /* + * there is a race condition in renew_sibling_link() + * and assertions below check that it is only one + * there. Thread T1 calls renew_sibling_link() without + * GN_NO_ALLOC flag. zlook() doesn't find neighbor + * node, but before T1 gets to the + * link_left_and_right(), another thread T2 creates + * neighbor node and connects it. check for + * left->right == NULL above protects T1 from + * overwriting correct left->right pointer installed + * by T2. + */ + assert("nikita-3302", + right == NULL || left->right == right); + } + if (right != NULL) { + if (right->left == NULL) { + right->left = left; + ZF_SET(right, JNODE_LEFT_CONNECTED); + + ON_DEBUG(right->left_version = + atomic_inc_return(&delim_key_version); + ); + + } else if (ZF_ISSET(right->left, JNODE_HEARD_BANSHEE) + && right->left != left) { + + ON_DEBUG(right->left->right_version = + atomic_inc_return(&delim_key_version); + right->left_version = + atomic_inc_return(&delim_key_version);); + + right->left->right = NULL; + right->left = left; + ZF_SET(right, JNODE_LEFT_CONNECTED); + + } else + assert("nikita-3303", + left == NULL || right->left == left); + } + assert("nikita-3275", check_sibling_list(left)); + assert("nikita-3275", check_sibling_list(right)); +} + +/* Audited by: umka (2002.06.14) */ +static void link_znodes(znode * first, znode * second, int to_left) +{ + if (to_left) + link_left_and_right(second, first); + else + link_left_and_right(first, second); +} + +/* getting of next (to left or to right, depend on gn_to_left bit in flags) + coord's unit position in horizontal direction, even across node + boundary. Should be called under tree lock, it protects nonexistence of + sibling link on parent level, if lock_side_neighbor() fails with + -ENOENT. */ +static int far_next_coord(coord_t * coord, lock_handle * handle, int flags) +{ + int ret; + znode *node; + reiser4_tree *tree; + + assert("umka-243", coord != NULL); + assert("umka-244", handle != NULL); + assert("zam-1069", handle->node == NULL); + + ret = + (flags & GN_GO_LEFT) ? coord_prev_unit(coord) : + coord_next_unit(coord); + if (!ret) + return 0; + + ret = + lock_side_neighbor(handle, coord->node, ZNODE_READ_LOCK, flags, 0); + if (ret) + return ret; + + node = handle->node; + tree = znode_get_tree(node); + write_unlock_tree(tree); + + coord_init_zero(coord); + + /* We avoid synchronous read here if it is specified by flag. */ + if ((flags & GN_ASYNC) && znode_page(handle->node) == NULL) { + ret = jstartio(ZJNODE(handle->node)); + if (!ret) + ret = -E_REPEAT; + goto error_locked; + } + + /* corresponded zrelse() should be called by the clients of + far_next_coord(), in place when this node gets unlocked. */ + ret = zload(handle->node); + if (ret) + goto error_locked; + + if (flags & GN_GO_LEFT) + coord_init_last_unit(coord, node); + else + coord_init_first_unit(coord, node); + + if (0) { + error_locked: + longterm_unlock_znode(handle); + } + write_lock_tree(tree); + return ret; +} + +/* Very significant function which performs a step in horizontal direction + when sibling pointer is not available. Actually, it is only function which + does it. + Note: this function does not restore locking status at exit, + caller should does care about proper unlocking and zrelsing */ +static int +renew_sibling_link(coord_t * coord, lock_handle * handle, znode * child, + tree_level level, int flags, int *nr_locked) +{ + int ret; + int to_left = flags & GN_GO_LEFT; + reiser4_block_nr da; + /* parent of the neighbor node; we set it to parent until not sharing + of one parent between child and neighbor node is detected */ + znode *side_parent = coord->node; + reiser4_tree *tree = znode_get_tree(child); + znode *neighbor = NULL; + + assert("umka-245", coord != NULL); + assert("umka-246", handle != NULL); + assert("umka-247", child != NULL); + assert("umka-303", tree != NULL); + + init_lh(handle); + write_lock_tree(tree); + ret = far_next_coord(coord, handle, flags); + + if (ret) { + if (ret != -ENOENT) { + write_unlock_tree(tree); + return ret; + } + } else { + item_plugin *iplug; + + if (handle->node != NULL) { + (*nr_locked)++; + side_parent = handle->node; + } + + /* does coord object points to internal item? We do not + support sibling pointers between znode for formatted and + unformatted nodes and return -E_NO_NEIGHBOR in that case. */ + iplug = item_plugin_by_coord(coord); + if (!item_is_internal(coord)) { + link_znodes(child, NULL, to_left); + write_unlock_tree(tree); + /* we know there can't be formatted neighbor */ + return RETERR(-E_NO_NEIGHBOR); + } + write_unlock_tree(tree); + + iplug->s.internal.down_link(coord, NULL, &da); + + if (flags & GN_NO_ALLOC) { + neighbor = zlook(tree, &da); + } else { + neighbor = + zget(tree, &da, side_parent, level, + reiser4_ctx_gfp_mask_get()); + } + + if (IS_ERR(neighbor)) { + ret = PTR_ERR(neighbor); + return ret; + } + + if (neighbor) + /* update delimiting keys */ + set_child_delimiting_keys(coord->node, coord, neighbor); + + write_lock_tree(tree); + } + + if (likely(neighbor == NULL || + (znode_get_level(child) == znode_get_level(neighbor) + && child != neighbor))) + link_znodes(child, neighbor, to_left); + else { + warning("nikita-3532", + "Sibling nodes on the different levels: %i != %i\n", + znode_get_level(child), znode_get_level(neighbor)); + ret = RETERR(-EIO); + } + + write_unlock_tree(tree); + + /* if GN_NO_ALLOC isn't set we keep reference to neighbor znode */ + if (neighbor != NULL && (flags & GN_NO_ALLOC)) + /* atomic_dec(&ZJNODE(neighbor)->x_count); */ + zput(neighbor); + + return ret; +} + +/* This function is for establishing of one side relation. */ +/* Audited by: umka (2002.06.14) */ +static int connect_one_side(coord_t * coord, znode * node, int flags) +{ + coord_t local; + lock_handle handle; + int nr_locked; + int ret; + + assert("umka-248", coord != NULL); + assert("umka-249", node != NULL); + + coord_dup_nocheck(&local, coord); + + init_lh(&handle); + + ret = + renew_sibling_link(&local, &handle, node, znode_get_level(node), + flags | GN_NO_ALLOC, &nr_locked); + + if (handle.node != NULL) { + /* complementary operations for zload() and lock() in far_next_coord() */ + zrelse(handle.node); + longterm_unlock_znode(&handle); + } + + /* we catch error codes which are not interesting for us because we + run renew_sibling_link() only for znode connection. */ + if (ret == -ENOENT || ret == -E_NO_NEIGHBOR) + return 0; + + return ret; +} + +/* if @child is not in `connected' state, performs hash searches for left and + right neighbor nodes and establishes horizontal sibling links */ +/* Audited by: umka (2002.06.14), umka (2002.06.15) */ +int connect_znode(coord_t * parent_coord, znode * child) +{ + reiser4_tree *tree = znode_get_tree(child); + int ret = 0; + + assert("zam-330", parent_coord != NULL); + assert("zam-331", child != NULL); + assert("zam-332", parent_coord->node != NULL); + assert("umka-305", tree != NULL); + + /* it is trivial to `connect' root znode because it can't have + neighbors */ + if (znode_above_root(parent_coord->node)) { + child->left = NULL; + child->right = NULL; + ZF_SET(child, JNODE_LEFT_CONNECTED); + ZF_SET(child, JNODE_RIGHT_CONNECTED); + + ON_DEBUG(child->left_version = + atomic_inc_return(&delim_key_version); + child->right_version = + atomic_inc_return(&delim_key_version);); + + return 0; + } + + /* load parent node */ + coord_clear_iplug(parent_coord); + ret = zload(parent_coord->node); + + if (ret != 0) + return ret; + + /* protect `connected' state check by tree_lock */ + read_lock_tree(tree); + + if (!znode_is_right_connected(child)) { + read_unlock_tree(tree); + /* connect right (default is right) */ + ret = connect_one_side(parent_coord, child, GN_NO_ALLOC); + if (ret) + goto zrelse_and_ret; + + read_lock_tree(tree); + } + + ret = znode_is_left_connected(child); + + read_unlock_tree(tree); + + if (!ret) { + ret = + connect_one_side(parent_coord, child, + GN_NO_ALLOC | GN_GO_LEFT); + } else + ret = 0; + + zrelse_and_ret: + zrelse(parent_coord->node); + + return ret; +} + +/* this function is like renew_sibling_link() but allocates neighbor node if + it doesn't exist and `connects' it. It may require making two steps in + horizontal direction, first one for neighbor node finding/allocation, + second one is for finding neighbor of neighbor to connect freshly allocated + znode. */ +/* Audited by: umka (2002.06.14), umka (2002.06.15) */ +static int +renew_neighbor(coord_t * coord, znode * node, tree_level level, int flags) +{ + coord_t local; + lock_handle empty[2]; + reiser4_tree *tree = znode_get_tree(node); + znode *neighbor = NULL; + int nr_locked = 0; + int ret; + + assert("umka-250", coord != NULL); + assert("umka-251", node != NULL); + assert("umka-307", tree != NULL); + assert("umka-308", level <= tree->height); + + /* umka (2002.06.14) + Here probably should be a check for given "level" validness. + Something like assert("xxx-yyy", level < REAL_MAX_ZTREE_HEIGHT); + */ + + coord_dup(&local, coord); + + ret = + renew_sibling_link(&local, &empty[0], node, level, + flags & ~GN_NO_ALLOC, &nr_locked); + if (ret) + goto out; + + /* tree lock is not needed here because we keep parent node(s) locked + and reference to neighbor znode incremented */ + neighbor = (flags & GN_GO_LEFT) ? node->left : node->right; + + read_lock_tree(tree); + ret = znode_is_connected(neighbor); + read_unlock_tree(tree); + if (ret) { + ret = 0; + goto out; + } + + ret = + renew_sibling_link(&local, &empty[nr_locked], neighbor, level, + flags | GN_NO_ALLOC, &nr_locked); + /* second renew_sibling_link() call is used for znode connection only, + so we can live with these errors */ + if (-ENOENT == ret || -E_NO_NEIGHBOR == ret) + ret = 0; + + out: + + for (--nr_locked; nr_locked >= 0; --nr_locked) { + zrelse(empty[nr_locked].node); + longterm_unlock_znode(&empty[nr_locked]); + } + + if (neighbor != NULL) + /* decrement znode reference counter without actually + releasing it. */ + atomic_dec(&ZJNODE(neighbor)->x_count); + + return ret; +} + +/* + reiser4_get_neighbor() -- lock node's neighbor. + + reiser4_get_neighbor() locks node's neighbor (left or right one, depends on + given parameter) using sibling link to it. If sibling link is not available + (i.e. neighbor znode is not in cache) and flags allow read blocks, we go one + level up for information about neighbor's disk address. We lock node's + parent, if it is common parent for both 'node' and its neighbor, neighbor's + disk address is in next (to left or to right) down link from link that points + to original node. If not, we need to lock parent's neighbor, read its content + and take first(last) downlink with neighbor's disk address. That locking + could be done by using sibling link and lock_neighbor() function, if sibling + link exists. In another case we have to go level up again until we find + common parent or valid sibling link. Then go down + allocating/connecting/locking/reading nodes until neighbor of first one is + locked. + + @neighbor: result lock handle, + @node: a node which we lock neighbor of, + @lock_mode: lock mode {LM_READ, LM_WRITE}, + @flags: logical OR of {GN_*} (see description above) subset. + + @return: 0 if success, negative value if lock was impossible due to an error + or lack of neighbor node. +*/ + +/* Audited by: umka (2002.06.14), umka (2002.06.15) */ +int +reiser4_get_neighbor(lock_handle * neighbor, znode * node, + znode_lock_mode lock_mode, int flags) +{ + reiser4_tree *tree = znode_get_tree(node); + lock_handle path[REAL_MAX_ZTREE_HEIGHT]; + + coord_t coord; + + tree_level base_level; + tree_level h = 0; + int ret; + + assert("umka-252", tree != NULL); + assert("umka-253", neighbor != NULL); + assert("umka-254", node != NULL); + + base_level = znode_get_level(node); + + assert("umka-310", base_level <= tree->height); + + coord_init_zero(&coord); + + again: + /* first, we try to use simple lock_neighbor() which requires sibling + link existence */ + read_lock_tree(tree); + ret = lock_side_neighbor(neighbor, node, lock_mode, flags, 1); + read_unlock_tree(tree); + if (!ret) { + /* load znode content if it was specified */ + if (flags & GN_LOAD_NEIGHBOR) { + ret = zload(node); + if (ret) + longterm_unlock_znode(neighbor); + } + return ret; + } + + /* only -ENOENT means we may look upward and try to connect + @node with its neighbor (if @flags allow us to do it) */ + if (ret != -ENOENT || !(flags & GN_CAN_USE_UPPER_LEVELS)) + return ret; + + /* before establishing of sibling link we lock parent node; it is + required by renew_neighbor() to work. */ + init_lh(&path[0]); + ret = reiser4_get_parent(&path[0], node, ZNODE_READ_LOCK); + if (ret) + return ret; + if (znode_above_root(path[0].node)) { + longterm_unlock_znode(&path[0]); + return RETERR(-E_NO_NEIGHBOR); + } + + while (1) { + znode *child = (h == 0) ? node : path[h - 1].node; + znode *parent = path[h].node; + + ret = zload(parent); + if (ret) + break; + + ret = find_child_ptr(parent, child, &coord); + + if (ret) { + zrelse(parent); + break; + } + + /* try to establish missing sibling link */ + ret = renew_neighbor(&coord, child, h + base_level, flags); + + zrelse(parent); + + switch (ret) { + case 0: + /* unlocking of parent znode prevents simple + deadlock situation */ + done_lh(&path[h]); + + /* depend on tree level we stay on we repeat first + locking attempt ... */ + if (h == 0) + goto again; + + /* ... or repeat establishing of sibling link at + one level below. */ + --h; + break; + + case -ENOENT: + /* sibling link is not available -- we go + upward. */ + init_lh(&path[h + 1]); + ret = + reiser4_get_parent(&path[h + 1], parent, + ZNODE_READ_LOCK); + if (ret) + goto fail; + ++h; + if (znode_above_root(path[h].node)) { + ret = RETERR(-E_NO_NEIGHBOR); + goto fail; + } + break; + + case -E_DEADLOCK: + /* there was lock request from hi-pri locker. if + it is possible we unlock last parent node and + re-lock it again. */ + for (; reiser4_check_deadlock(); h--) { + done_lh(&path[h]); + if (h == 0) + goto fail; + } + + break; + + default: /* other errors. */ + goto fail; + } + } + fail: + ON_DEBUG(check_lock_node_data(node)); + ON_DEBUG(check_lock_data()); + + /* unlock path */ + do { + /* FIXME-Zam: when we get here from case -E_DEADLOCK's goto + fail; path[0] is already done_lh-ed, therefore + longterm_unlock_znode(&path[h]); is not applicable */ + done_lh(&path[h]); + --h; + } while (h + 1 != 0); + + return ret; +} + +/* remove node from sibling list */ +/* Audited by: umka (2002.06.14) */ +void sibling_list_remove(znode * node) +{ + reiser4_tree *tree; + + tree = znode_get_tree(node); + assert("umka-255", node != NULL); + assert_rw_write_locked(&(tree->tree_lock)); + assert("nikita-3275", check_sibling_list(node)); + + write_lock_dk(tree); + if (znode_is_right_connected(node) && node->right != NULL && + znode_is_left_connected(node) && node->left != NULL) { + assert("zam-32245", + keyeq(znode_get_rd_key(node), + znode_get_ld_key(node->right))); + znode_set_rd_key(node->left, znode_get_ld_key(node->right)); + } + write_unlock_dk(tree); + + if (znode_is_right_connected(node) && node->right != NULL) { + assert("zam-322", znode_is_left_connected(node->right)); + node->right->left = node->left; + ON_DEBUG(node->right->left_version = + atomic_inc_return(&delim_key_version); + ); + } + if (znode_is_left_connected(node) && node->left != NULL) { + assert("zam-323", znode_is_right_connected(node->left)); + node->left->right = node->right; + ON_DEBUG(node->left->right_version = + atomic_inc_return(&delim_key_version); + ); + } + + ZF_CLR(node, JNODE_LEFT_CONNECTED); + ZF_CLR(node, JNODE_RIGHT_CONNECTED); + ON_DEBUG(node->left = node->right = NULL; + node->left_version = atomic_inc_return(&delim_key_version); + node->right_version = atomic_inc_return(&delim_key_version);); + assert("nikita-3276", check_sibling_list(node)); +} + +/* disconnect node from sibling list */ +void sibling_list_drop(znode * node) +{ + znode *right; + znode *left; + + assert("nikita-2464", node != NULL); + assert("nikita-3277", check_sibling_list(node)); + + right = node->right; + if (right != NULL) { + assert("nikita-2465", znode_is_left_connected(right)); + right->left = NULL; + ON_DEBUG(right->left_version = + atomic_inc_return(&delim_key_version); + ); + } + left = node->left; + if (left != NULL) { + assert("zam-323", znode_is_right_connected(left)); + left->right = NULL; + ON_DEBUG(left->right_version = + atomic_inc_return(&delim_key_version); + ); + } + ZF_CLR(node, JNODE_LEFT_CONNECTED); + ZF_CLR(node, JNODE_RIGHT_CONNECTED); + ON_DEBUG(node->left = node->right = NULL; + node->left_version = atomic_inc_return(&delim_key_version); + node->right_version = atomic_inc_return(&delim_key_version);); +} + +/* Insert new node into sibling list. Regular balancing inserts new node + after (at right side) existing and locked node (@before), except one case + of adding new tree root node. @before should be NULL in that case. */ +void sibling_list_insert_nolock(znode * new, znode * before) +{ + assert("zam-334", new != NULL); + assert("nikita-3298", !znode_is_left_connected(new)); + assert("nikita-3299", !znode_is_right_connected(new)); + assert("nikita-3300", new->left == NULL); + assert("nikita-3301", new->right == NULL); + assert("nikita-3278", check_sibling_list(new)); + assert("nikita-3279", check_sibling_list(before)); + + if (before != NULL) { + assert("zam-333", znode_is_connected(before)); + new->right = before->right; + new->left = before; + ON_DEBUG(new->right_version = + atomic_inc_return(&delim_key_version); + new->left_version = + atomic_inc_return(&delim_key_version);); + if (before->right != NULL) { + before->right->left = new; + ON_DEBUG(before->right->left_version = + atomic_inc_return(&delim_key_version); + ); + } + before->right = new; + ON_DEBUG(before->right_version = + atomic_inc_return(&delim_key_version); + ); + } else { + new->right = NULL; + new->left = NULL; + ON_DEBUG(new->right_version = + atomic_inc_return(&delim_key_version); + new->left_version = + atomic_inc_return(&delim_key_version);); + } + ZF_SET(new, JNODE_LEFT_CONNECTED); + ZF_SET(new, JNODE_RIGHT_CONNECTED); + assert("nikita-3280", check_sibling_list(new)); + assert("nikita-3281", check_sibling_list(before)); +} + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + End: +*/ diff --git a/fs/reiser4/tree_walk.h b/fs/reiser4/tree_walk.h new file mode 100644 index 000000000000..3d5f09f8cb54 --- /dev/null +++ b/fs/reiser4/tree_walk.h @@ -0,0 +1,125 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +/* definitions of reiser4 tree walk functions */ + +#ifndef __FS_REISER4_TREE_WALK_H__ +#define __FS_REISER4_TREE_WALK_H__ + +#include "debug.h" +#include "forward.h" + +/* establishes horizontal links between cached znodes */ +int connect_znode(coord_t * coord, znode * node); + +/* tree traversal functions (reiser4_get_parent(), reiser4_get_neighbor()) + have the following common arguments: + + return codes: + + @return : 0 - OK, + +ZAM-FIXME-HANS: wrong return code name. Change them all. + -ENOENT - neighbor is not in cache, what is detected by sibling + link absence. + + -E_NO_NEIGHBOR - we are sure that neighbor (or parent) node cannot be + found (because we are left-/right- most node of the + tree, for example). Also, this return code is for + reiser4_get_parent() when we see no parent link -- it + means that our node is root node. + + -E_DEADLOCK - deadlock detected (request from high-priority process + received), other error codes are conformed to + /usr/include/asm/errno.h . +*/ + +int +reiser4_get_parent_flags(lock_handle * result, znode * node, + znode_lock_mode mode, int flags); + +/* bits definition for reiser4_get_neighbor function `flags' arg. */ +typedef enum { + /* If sibling pointer is NULL, this flag allows get_neighbor() to try to + * find not allocated not connected neigbor by going though upper + * levels */ + GN_CAN_USE_UPPER_LEVELS = 0x1, + /* locking left neighbor instead of right one */ + GN_GO_LEFT = 0x2, + /* automatically load neighbor node content */ + GN_LOAD_NEIGHBOR = 0x4, + /* return -E_REPEAT if can't lock */ + GN_TRY_LOCK = 0x8, + /* used internally in tree_walk.c, causes renew_sibling to not + allocate neighbor znode, but only search for it in znode cache */ + GN_NO_ALLOC = 0x10, + /* do not go across atom boundaries */ + GN_SAME_ATOM = 0x20, + /* allow to lock not connected nodes */ + GN_ALLOW_NOT_CONNECTED = 0x40, + /* Avoid synchronous jload, instead, call jstartio() and return -E_REPEAT. */ + GN_ASYNC = 0x80 +} znode_get_neigbor_flags; + +/* A commonly used wrapper for reiser4_get_parent_flags(). */ +static inline int reiser4_get_parent(lock_handle * result, znode * node, + znode_lock_mode mode) +{ + return reiser4_get_parent_flags(result, node, mode, + GN_ALLOW_NOT_CONNECTED); +} + +int reiser4_get_neighbor(lock_handle * neighbor, znode * node, + znode_lock_mode lock_mode, int flags); + +/* there are wrappers for most common usages of reiser4_get_neighbor() */ +static inline int +reiser4_get_left_neighbor(lock_handle * result, znode * node, int lock_mode, + int flags) +{ + return reiser4_get_neighbor(result, node, lock_mode, + flags | GN_GO_LEFT); +} + +static inline int +reiser4_get_right_neighbor(lock_handle * result, znode * node, int lock_mode, + int flags) +{ + ON_DEBUG(check_lock_node_data(node)); + ON_DEBUG(check_lock_data()); + return reiser4_get_neighbor(result, node, lock_mode, + flags & (~GN_GO_LEFT)); +} + +extern void sibling_list_remove(znode * node); +extern void sibling_list_drop(znode * node); +extern void sibling_list_insert_nolock(znode * new, znode * before); +extern void link_left_and_right(znode * left, znode * right); + +/* Functions called by tree_walk() when tree_walk() ... */ +struct tree_walk_actor { + /* ... meets a formatted node, */ + int (*process_znode) (tap_t *, void *); + /* ... meets an extent, */ + int (*process_extent) (tap_t *, void *); + /* ... begins tree traversal or repeats it after -E_REPEAT was returned by + * node or extent processing functions. */ + int (*before) (void *); +}; + +#if REISER4_DEBUG +int check_sibling_list(znode * node); +#else +#define check_sibling_list(n) (1) +#endif + +#endif /* __FS_REISER4_TREE_WALK_H__ */ + +/* + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/txnmgr.c b/fs/reiser4/txnmgr.c new file mode 100644 index 000000000000..d0de1887edae --- /dev/null +++ b/fs/reiser4/txnmgr.c @@ -0,0 +1,3163 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Joshua MacDonald wrote the first draft of this code. */ + +/* ZAM-LONGTERM-FIXME-HANS: The locking in this file is badly designed, and a +filesystem scales only as well as its worst locking design. You need to +substantially restructure this code. Josh was not as experienced a programmer +as you. Particularly review how the locking style differs from what you did +for znodes usingt hi-lo priority locking, and present to me an opinion on +whether the differences are well founded. */ + +/* I cannot help but to disagree with the sentiment above. Locking of + * transaction manager is _not_ badly designed, and, at the very least, is not + * the scaling bottleneck. Scaling bottleneck is _exactly_ hi-lo priority + * locking on znodes, especially on the root node of the tree. --nikita, + * 2003.10.13 */ + +/* The txnmgr is a set of interfaces that keep track of atoms and transcrash handles. The + txnmgr processes capture_block requests and manages the relationship between jnodes and + atoms through the various stages of a transcrash, and it also oversees the fusion and + capture-on-copy processes. The main difficulty with this task is maintaining a + deadlock-free lock ordering between atoms and jnodes/handles. The reason for the + difficulty is that jnodes, handles, and atoms contain pointer circles, and the cycle + must be broken. The main requirement is that atom-fusion be deadlock free, so once you + hold the atom_lock you may then wait to acquire any jnode or handle lock. This implies + that any time you check the atom-pointer of a jnode or handle and then try to lock that + atom, you must use trylock() and possibly reverse the order. + + This code implements the design documented at: + + http://namesys.com/txn-doc.html + +ZAM-FIXME-HANS: update v4.html to contain all of the information present in the above (but updated), and then remove the +above document and reference the new. Be sure to provide some credit to Josh. I already have some writings on this +topic in v4.html, but they are lacking in details present in the above. Cure that. Remember to write for the bright 12 +year old --- define all technical terms used. + +*/ + +/* Thoughts on the external transaction interface: + + In the current code, a TRANSCRASH handle is created implicitly by reiser4_init_context() (which + creates state that lasts for the duration of a system call and is called at the start + of ReiserFS methods implementing VFS operations), and closed by reiser4_exit_context(), + occupying the scope of a single system call. We wish to give certain applications an + interface to begin and close (commit) transactions. Since our implementation of + transactions does not yet support isolation, allowing an application to open a + transaction implies trusting it to later close the transaction. Part of the + transaction interface will be aimed at enabling that trust, but the interface for + actually using transactions is fairly narrow. + + BEGIN_TRANSCRASH: Returns a transcrash identifier. It should be possible to translate + this identifier into a string that a shell-script could use, allowing you to start a + transaction by issuing a command. Once open, the transcrash should be set in the task + structure, and there should be options (I suppose) to allow it to be carried across + fork/exec. A transcrash has several options: + + - READ_FUSING or WRITE_FUSING: The default policy is for txn-capture to capture only + on writes (WRITE_FUSING) and allow "dirty reads". If the application wishes to + capture on reads as well, it should set READ_FUSING. + + - TIMEOUT: Since a non-isolated transcrash cannot be undone, every transcrash must + eventually close (or else the machine must crash). If the application dies an + unexpected death with an open transcrash, for example, or if it hangs for a long + duration, one solution (to avoid crashing the machine) is to simply close it anyway. + This is a dangerous option, but it is one way to solve the problem until isolated + transcrashes are available for untrusted applications. + + It seems to be what databases do, though it is unclear how one avoids a DoS attack + creating a vulnerability based on resource starvation. Guaranteeing that some + minimum amount of computational resources are made available would seem more correct + than guaranteeing some amount of time. When we again have someone to code the work, + this issue should be considered carefully. -Hans + + RESERVE_BLOCKS: A running transcrash should indicate to the transaction manager how + many dirty blocks it expects. The reserve_blocks interface should be called at a point + where it is safe for the application to fail, because the system may not be able to + grant the allocation and the application must be able to back-out. For this reason, + the number of reserve-blocks can also be passed as an argument to BEGIN_TRANSCRASH, but + the application may also wish to extend the allocation after beginning its transcrash. + + CLOSE_TRANSCRASH: The application closes the transcrash when it is finished making + modifications that require transaction protection. When isolated transactions are + supported the CLOSE operation is replaced by either COMMIT or ABORT. For example, if a + RESERVE_BLOCKS call fails for the application, it should "abort" by calling + CLOSE_TRANSCRASH, even though it really commits any changes that were made (which is + why, for safety, the application should call RESERVE_BLOCKS before making any changes). + + For actually implementing these out-of-system-call-scopped transcrashes, the + reiser4_context has a "txn_handle *trans" pointer that may be set to an open + transcrash. Currently there are no dynamically-allocated transcrashes, but there is a + "struct kmem_cache *_txnh_slab" created for that purpose in this file. +*/ + +/* Extending the other system call interfaces for future transaction features: + + Specialized applications may benefit from passing flags to the ordinary system call + interface such as read(), write(), or stat(). For example, the application specifies + WRITE_FUSING by default but wishes to add that a certain read() command should be + treated as READ_FUSING. But which read? Is it the directory-entry read, the stat-data + read, or the file-data read? These issues are straight-forward, but there are a lot of + them and adding the necessary flags-passing code will be tedious. + + When supporting isolated transactions, there is a corresponding READ_MODIFY_WRITE (RMW) + flag, which specifies that although it is a read operation being requested, a + write-lock should be taken. The reason is that read-locks are shared while write-locks + are exclusive, so taking a read-lock when a later-write is known in advance will often + leads to deadlock. If a reader knows it will write later, it should issue read + requests with the RMW flag set. +*/ + +/* + The znode/atom deadlock avoidance. + + FIXME(Zam): writing of this comment is in progress. + + The atom's special stage ASTAGE_CAPTURE_WAIT introduces a kind of atom's + long-term locking, which makes reiser4 locking scheme more complex. It had + deadlocks until we implement deadlock avoidance algorithms. That deadlocks + looked as the following: one stopped thread waits for a long-term lock on + znode, the thread who owns that lock waits when fusion with another atom will + be allowed. + + The source of the deadlocks is an optimization of not capturing index nodes + for read. Let's prove it. Suppose we have dumb node capturing scheme which + unconditionally captures each block before locking it. + + That scheme has no deadlocks. Let's begin with the thread which stage is + ASTAGE_CAPTURE_WAIT and it waits for a znode lock. The thread can't wait for + a capture because it's stage allows fusion with any atom except which are + being committed currently. A process of atom commit can't deadlock because + atom commit procedure does not acquire locks and does not fuse with other + atoms. Reiser4 does capturing right before going to sleep inside the + longtertm_lock_znode() function, it means the znode which we want to lock is + already captured and its atom is in ASTAGE_CAPTURE_WAIT stage. If we + continue the analysis we understand that no one process in the sequence may + waits atom fusion. Thereby there are no deadlocks of described kind. + + The capturing optimization makes the deadlocks possible. A thread can wait a + lock which owner did not captured that node. The lock owner's current atom + is not fused with the first atom and it does not get a ASTAGE_CAPTURE_WAIT + state. A deadlock is possible when that atom meets another one which is in + ASTAGE_CAPTURE_WAIT already. + + The deadlock avoidance scheme includes two algorithms: + + First algorithm is used when a thread captures a node which is locked but not + captured by another thread. Those nodes are marked MISSED_IN_CAPTURE at the + moment we skip their capturing. If such a node (marked MISSED_IN_CAPTURE) is + being captured by a thread with current atom is in ASTAGE_CAPTURE_WAIT, the + routine which forces all lock owners to join with current atom is executed. + + Second algorithm does not allow to skip capturing of already captured nodes. + + Both algorithms together prevent waiting a longterm lock without atom fusion + with atoms of all lock owners, which is a key thing for getting atom/znode + locking deadlocks. +*/ + +/* + * Transactions and mmap(2). + * + * 1. Transactions are not supported for accesses through mmap(2), because + * this would effectively amount to user-level transactions whose duration + * is beyond control of the kernel. + * + * 2. That said, we still want to preserve some decency with regard to + * mmap(2). During normal write(2) call, following sequence of events + * happens: + * + * 1. page is created; + * + * 2. jnode is created, dirtied and captured into current atom. + * + * 3. extent is inserted and modified. + * + * Steps (2) and (3) take place under long term lock on the twig node. + * + * When file is accessed through mmap(2) page is always created during + * page fault. + * After this (in reiser4_readpage_dispatch()->reiser4_readpage_extent()): + * + * 1. if access is made to non-hole page new jnode is created, (if + * necessary) + * + * 2. if access is made to the hole page, jnode is not created (XXX + * not clear why). + * + * Also, even if page is created by write page fault it is not marked + * dirty immediately by handle_mm_fault(). Probably this is to avoid races + * with page write-out. + * + * Dirty bit installed by hardware is only transferred to the struct page + * later, when page is unmapped (in zap_pte_range(), or + * try_to_unmap_one()). + * + * So, with mmap(2) we have to handle following irksome situations: + * + * 1. there exists modified page (clean or dirty) without jnode + * + * 2. there exists modified page (clean or dirty) with clean jnode + * + * 3. clean page which is a part of atom can be transparently modified + * at any moment through mapping without becoming dirty. + * + * (1) and (2) can lead to the out-of-memory situation: ->writepage() + * doesn't know what to do with such pages and ->sync_sb()/->writepages() + * don't see them, because these methods operate on atoms. + * + * (3) can lead to the loss of data: suppose we have dirty page with dirty + * captured jnode captured by some atom. As part of early flush (for + * example) page was written out. Dirty bit was cleared on both page and + * jnode. After this page is modified through mapping, but kernel doesn't + * notice and just discards page and jnode as part of commit. (XXX + * actually it doesn't, because to reclaim page ->releasepage() has to be + * called and before this dirty bit will be transferred to the struct + * page). + * + */ + +#include "debug.h" +#include "txnmgr.h" +#include "jnode.h" +#include "znode.h" +#include "block_alloc.h" +#include "tree.h" +#include "wander.h" +#include "ktxnmgrd.h" +#include "super.h" +#include "page_cache.h" +#include "reiser4.h" +#include "vfs_ops.h" +#include "inode.h" +#include "flush.h" +#include "discard.h" + +#include <asm/atomic.h> +#include <linux/types.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/pagemap.h> +#include <linux/writeback.h> +#include <linux/swap.h> /* for totalram_pages */ + +static void atom_free(txn_atom * atom); + +static int commit_txnh(txn_handle * txnh); + +static void wakeup_atom_waitfor_list(txn_atom * atom); +static void wakeup_atom_waiting_list(txn_atom * atom); + +static void capture_assign_txnh_nolock(txn_atom * atom, txn_handle * txnh); + +static void capture_assign_block_nolock(txn_atom * atom, jnode * node); + +static void fuse_not_fused_lock_owners(txn_handle * txnh, znode * node); + +static int capture_init_fusion(jnode * node, txn_handle * txnh, + txn_capture mode); + +static int capture_fuse_wait(txn_handle *, txn_atom *, txn_atom *, txn_capture); + +static void capture_fuse_into(txn_atom * small, txn_atom * large); + +void reiser4_invalidate_list(struct list_head *); + +/* GENERIC STRUCTURES */ + +typedef struct _txn_wait_links txn_wait_links; + +struct _txn_wait_links { + lock_stack *_lock_stack; + struct list_head _fwaitfor_link; + struct list_head _fwaiting_link; + int (*waitfor_cb) (txn_atom * atom, struct _txn_wait_links * wlinks); + int (*waiting_cb) (txn_atom * atom, struct _txn_wait_links * wlinks); +}; + +/* FIXME: In theory, we should be using the slab cache init & destructor + methods instead of, e.g., jnode_init, etc. */ +static struct kmem_cache *_atom_slab = NULL; +/* this is for user-visible, cross system-call transactions. */ +static struct kmem_cache *_txnh_slab = NULL; + +/** + * init_txnmgr_static - create transaction manager slab caches + * + * Initializes caches of txn-atoms and txn_handle. It is part of reiser4 module + * initialization. + */ +int init_txnmgr_static(void) +{ + assert("jmacd-600", _atom_slab == NULL); + assert("jmacd-601", _txnh_slab == NULL); + + ON_DEBUG(atomic_set(&flush_cnt, 0)); + + _atom_slab = kmem_cache_create("txn_atom", sizeof(txn_atom), 0, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT, NULL); + if (_atom_slab == NULL) + return RETERR(-ENOMEM); + + _txnh_slab = kmem_cache_create("txn_handle", sizeof(txn_handle), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (_txnh_slab == NULL) { + kmem_cache_destroy(_atom_slab); + _atom_slab = NULL; + return RETERR(-ENOMEM); + } + + return 0; +} + +/** + * done_txnmgr_static - delete txn_atom and txn_handle caches + * + * This is called on reiser4 module unloading or system shutdown. + */ +void done_txnmgr_static(void) +{ + destroy_reiser4_cache(&_atom_slab); + destroy_reiser4_cache(&_txnh_slab); +} + +/** + * init_txnmgr - initialize a new transaction manager + * @mgr: pointer to transaction manager embedded in reiser4 super block + * + * This is called on mount. Makes necessary initializations. + */ +void reiser4_init_txnmgr(txn_mgr *mgr) +{ + assert("umka-169", mgr != NULL); + + mgr->atom_count = 0; + mgr->id_count = 1; + INIT_LIST_HEAD(&mgr->atoms_list); + spin_lock_init(&mgr->tmgr_lock); + mutex_init(&mgr->commit_mutex); +} + +/** + * reiser4_done_txnmgr - stop transaction manager + * @mgr: pointer to transaction manager embedded in reiser4 super block + * + * This is called on umount. Does sanity checks. + */ +void reiser4_done_txnmgr(txn_mgr *mgr) +{ + assert("umka-170", mgr != NULL); + assert("umka-1701", list_empty_careful(&mgr->atoms_list)); + assert("umka-1702", mgr->atom_count == 0); +} + +/* Initialize a transaction handle. */ +/* Audited by: umka (2002.06.13) */ +static void txnh_init(txn_handle * txnh, txn_mode mode) +{ + assert("umka-171", txnh != NULL); + + txnh->mode = mode; + txnh->atom = NULL; + reiser4_ctx_gfp_mask_set(); + txnh->flags = 0; + spin_lock_init(&txnh->hlock); + INIT_LIST_HEAD(&txnh->txnh_link); +} + +#if REISER4_DEBUG +/* Check if a transaction handle is clean. */ +static int txnh_isclean(txn_handle * txnh) +{ + assert("umka-172", txnh != NULL); + return txnh->atom == NULL && + LOCK_CNT_NIL(spin_locked_txnh); +} +#endif + +/* Initialize an atom. */ +static void atom_init(txn_atom * atom) +{ + int level; + + assert("umka-173", atom != NULL); + + memset(atom, 0, sizeof(txn_atom)); + + atom->stage = ASTAGE_FREE; + atom->start_time = jiffies; + + for (level = 0; level < REAL_MAX_ZTREE_HEIGHT + 1; level += 1) + INIT_LIST_HEAD(ATOM_DIRTY_LIST(atom, level)); + + INIT_LIST_HEAD(ATOM_CLEAN_LIST(atom)); + INIT_LIST_HEAD(ATOM_OVRWR_LIST(atom)); + INIT_LIST_HEAD(ATOM_WB_LIST(atom)); + INIT_LIST_HEAD(&atom->inodes); + spin_lock_init(&(atom->alock)); + /* list of transaction handles */ + INIT_LIST_HEAD(&atom->txnh_list); + /* link to transaction manager's list of atoms */ + INIT_LIST_HEAD(&atom->atom_link); + INIT_LIST_HEAD(&atom->fwaitfor_list); + INIT_LIST_HEAD(&atom->fwaiting_list); + blocknr_set_init(&atom->wandered_map); + + atom_dset_init(atom); + + init_atom_fq_parts(atom); +} + +#if REISER4_DEBUG +/* Check if an atom is clean. */ +static int atom_isclean(txn_atom * atom) +{ + int level; + + assert("umka-174", atom != NULL); + + for (level = 0; level < REAL_MAX_ZTREE_HEIGHT + 1; level += 1) { + if (!list_empty_careful(ATOM_DIRTY_LIST(atom, level))) { + return 0; + } + } + + return atom->stage == ASTAGE_FREE && + atom->txnh_count == 0 && + atom->capture_count == 0 && + atomic_read(&atom->refcount) == 0 && + (&atom->atom_link == atom->atom_link.next && + &atom->atom_link == atom->atom_link.prev) && + list_empty_careful(&atom->txnh_list) && + list_empty_careful(ATOM_CLEAN_LIST(atom)) && + list_empty_careful(ATOM_OVRWR_LIST(atom)) && + list_empty_careful(ATOM_WB_LIST(atom)) && + list_empty_careful(&atom->fwaitfor_list) && + list_empty_careful(&atom->fwaiting_list) && + atom_fq_parts_are_clean(atom); +} +#endif + +/* Begin a transaction in this context. Currently this uses the reiser4_context's + trans_in_ctx, which means that transaction handles are stack-allocated. Eventually + this will be extended to allow transaction handles to span several contexts. */ +/* Audited by: umka (2002.06.13) */ +void reiser4_txn_begin(reiser4_context * context) +{ + assert("jmacd-544", context->trans == NULL); + + context->trans = &context->trans_in_ctx; + + /* FIXME_LATER_JMACD Currently there's no way to begin a TXN_READ_FUSING + transcrash. Default should be TXN_WRITE_FUSING. Also, the _trans variable is + stack allocated right now, but we would like to allow for dynamically allocated + transcrashes that span multiple system calls. + */ + txnh_init(context->trans, TXN_WRITE_FUSING); +} + +/* Finish a transaction handle context. */ +int reiser4_txn_end(reiser4_context * context) +{ + long ret = 0; + txn_handle *txnh; + + assert("umka-283", context != NULL); + assert("nikita-3012", reiser4_schedulable()); + assert("vs-24", context == get_current_context()); + assert("nikita-2967", lock_stack_isclean(get_current_lock_stack())); + + txnh = context->trans; + if (txnh != NULL) { + if (txnh->atom != NULL) + ret = commit_txnh(txnh); + assert("jmacd-633", txnh_isclean(txnh)); + context->trans = NULL; + } + return ret; +} + +void reiser4_txn_restart(reiser4_context * context) +{ + reiser4_txn_end(context); + reiser4_preempt_point(); + reiser4_txn_begin(context); +} + +void reiser4_txn_restart_current(void) +{ + reiser4_txn_restart(get_current_context()); +} + +/* TXN_ATOM */ + +/* Get the atom belonging to a txnh, which is not locked. Return txnh locked. Locks atom, if atom + is not NULL. This performs the necessary spin_trylock to break the lock-ordering cycle. May + return NULL. */ +static txn_atom *txnh_get_atom(txn_handle * txnh) +{ + txn_atom *atom; + + assert("umka-180", txnh != NULL); + assert_spin_not_locked(&(txnh->hlock)); + + while (1) { + spin_lock_txnh(txnh); + atom = txnh->atom; + + if (atom == NULL) + break; + + if (spin_trylock_atom(atom)) + break; + + atomic_inc(&atom->refcount); + + spin_unlock_txnh(txnh); + spin_lock_atom(atom); + spin_lock_txnh(txnh); + + if (txnh->atom == atom) { + atomic_dec(&atom->refcount); + break; + } + + spin_unlock_txnh(txnh); + atom_dec_and_unlock(atom); + } + + return atom; +} + +/* Get the current atom and spinlock it if current atom present. May return NULL */ +txn_atom *get_current_atom_locked_nocheck(void) +{ + reiser4_context *cx; + txn_atom *atom; + txn_handle *txnh; + + cx = get_current_context(); + assert("zam-437", cx != NULL); + + txnh = cx->trans; + assert("zam-435", txnh != NULL); + + atom = txnh_get_atom(txnh); + + spin_unlock_txnh(txnh); + return atom; +} + +/* Get the atom belonging to a jnode, which is initially locked. Return with + both jnode and atom locked. This performs the necessary spin_trylock to + break the lock-ordering cycle. Assumes the jnode is already locked, and + returns NULL if atom is not set. */ +txn_atom *jnode_get_atom(jnode * node) +{ + txn_atom *atom; + + assert("umka-181", node != NULL); + + while (1) { + assert_spin_locked(&(node->guard)); + + atom = node->atom; + /* node is not in any atom */ + if (atom == NULL) + break; + + /* If atom is not locked, grab the lock and return */ + if (spin_trylock_atom(atom)) + break; + + /* At least one jnode belongs to this atom it guarantees that + * atom->refcount > 0, we can safely increment refcount. */ + atomic_inc(&atom->refcount); + spin_unlock_jnode(node); + + /* re-acquire spin locks in the right order */ + spin_lock_atom(atom); + spin_lock_jnode(node); + + /* check if node still points to the same atom. */ + if (node->atom == atom) { + atomic_dec(&atom->refcount); + break; + } + + /* releasing of atom lock and reference requires not holding + * locks on jnodes. */ + spin_unlock_jnode(node); + + /* We do not sure that this atom has extra references except our + * one, so we should call proper function which may free atom if + * last reference is released. */ + atom_dec_and_unlock(atom); + + /* lock jnode again for getting valid node->atom pointer + * value. */ + spin_lock_jnode(node); + } + + return atom; +} + +/* Returns true if @node is dirty and part of the same atom as one of its neighbors. Used + by flush code to indicate whether the next node (in some direction) is suitable for + flushing. */ +int +same_slum_check(jnode * node, jnode * check, int alloc_check, int alloc_value) +{ + int compat; + txn_atom *atom; + + assert("umka-182", node != NULL); + assert("umka-183", check != NULL); + + /* Not sure what this function is supposed to do if supplied with @check that is + neither formatted nor unformatted (bitmap or so). */ + assert("nikita-2373", jnode_is_znode(check) + || jnode_is_unformatted(check)); + + /* Need a lock on CHECK to get its atom and to check various state bits. + Don't need a lock on NODE once we get the atom lock. */ + /* It is not enough to lock two nodes and check (node->atom == + check->atom) because atom could be locked and being fused at that + moment, jnodes of the atom of that state (being fused) can point to + different objects, but the atom is the same. */ + spin_lock_jnode(check); + + atom = jnode_get_atom(check); + + if (atom == NULL) { + compat = 0; + } else { + compat = (node->atom == atom && JF_ISSET(check, JNODE_DIRTY)); + + if (compat && jnode_is_znode(check)) { + compat &= znode_is_connected(JZNODE(check)); + } + + if (compat && alloc_check) { + compat &= (alloc_value == jnode_is_flushprepped(check)); + } + + spin_unlock_atom(atom); + } + + spin_unlock_jnode(check); + + return compat; +} + +/* Decrement the atom's reference count and if it falls to zero, free it. */ +void atom_dec_and_unlock(txn_atom * atom) +{ + txn_mgr *mgr = &get_super_private(reiser4_get_current_sb())->tmgr; + + assert("umka-186", atom != NULL); + assert_spin_locked(&(atom->alock)); + assert("zam-1039", atomic_read(&atom->refcount) > 0); + + if (atomic_dec_and_test(&atom->refcount)) { + /* take txnmgr lock and atom lock in proper order. */ + if (!spin_trylock_txnmgr(mgr)) { + /* This atom should exist after we re-acquire its + * spinlock, so we increment its reference counter. */ + atomic_inc(&atom->refcount); + spin_unlock_atom(atom); + spin_lock_txnmgr(mgr); + spin_lock_atom(atom); + + if (!atomic_dec_and_test(&atom->refcount)) { + spin_unlock_atom(atom); + spin_unlock_txnmgr(mgr); + return; + } + } + assert_spin_locked(&(mgr->tmgr_lock)); + atom_free(atom); + spin_unlock_txnmgr(mgr); + } else + spin_unlock_atom(atom); +} + +/* Create new atom and connect it to given transaction handle. This adds the + atom to the transaction manager's list and sets its reference count to 1, an + artificial reference which is kept until it commits. We play strange games + to avoid allocation under jnode & txnh spinlocks.*/ + +static int atom_begin_and_assign_to_txnh(txn_atom ** atom_alloc, txn_handle * txnh) +{ + txn_atom *atom; + txn_mgr *mgr; + + if (REISER4_DEBUG && rofs_tree(current_tree)) { + warning("nikita-3366", "Creating atom on rofs"); + dump_stack(); + } + + if (*atom_alloc == NULL) { + (*atom_alloc) = kmem_cache_alloc(_atom_slab, + reiser4_ctx_gfp_mask_get()); + + if (*atom_alloc == NULL) + return RETERR(-ENOMEM); + } + + /* and, also, txnmgr spin lock should be taken before jnode and txnh + locks. */ + mgr = &get_super_private(reiser4_get_current_sb())->tmgr; + spin_lock_txnmgr(mgr); + spin_lock_txnh(txnh); + + /* Check whether new atom still needed */ + if (txnh->atom != NULL) { + /* NOTE-NIKITA probably it is rather better to free + * atom_alloc here than thread it up to reiser4_try_capture() */ + + spin_unlock_txnh(txnh); + spin_unlock_txnmgr(mgr); + + return -E_REPEAT; + } + + atom = *atom_alloc; + *atom_alloc = NULL; + + atom_init(atom); + + assert("jmacd-17", atom_isclean(atom)); + + /* + * lock ordering is broken here. It is ok, as long as @atom is new + * and inaccessible for others. We can't use spin_lock_atom or + * spin_lock(&atom->alock) because they care about locking + * dependencies. spin_trylock_lock doesn't. + */ + check_me("", spin_trylock_atom(atom)); + + /* add atom to the end of transaction manager's list of atoms */ + list_add_tail(&atom->atom_link, &mgr->atoms_list); + atom->atom_id = mgr->id_count++; + mgr->atom_count += 1; + + /* Release txnmgr lock */ + spin_unlock_txnmgr(mgr); + + /* One reference until it commits. */ + atomic_inc(&atom->refcount); + atom->stage = ASTAGE_CAPTURE_FUSE; + atom->super = reiser4_get_current_sb(); + capture_assign_txnh_nolock(atom, txnh); + + spin_unlock_atom(atom); + spin_unlock_txnh(txnh); + + return -E_REPEAT; +} + +/* Return true if an atom is currently "open". */ +static int atom_isopen(const txn_atom * atom) +{ + assert("umka-185", atom != NULL); + + return atom->stage > 0 && atom->stage < ASTAGE_PRE_COMMIT; +} + +/* Return the number of pointers to this atom that must be updated during fusion. This + approximates the amount of work to be done. Fusion chooses the atom with fewer + pointers to fuse into the atom with more pointers. */ +static int atom_pointer_count(const txn_atom * atom) +{ + assert("umka-187", atom != NULL); + + /* This is a measure of the amount of work needed to fuse this atom + * into another. */ + return atom->txnh_count + atom->capture_count; +} + +/* Called holding the atom lock, this removes the atom from the transaction manager list + and frees it. */ +static void atom_free(txn_atom * atom) +{ + txn_mgr *mgr = &get_super_private(reiser4_get_current_sb())->tmgr; + + assert("umka-188", atom != NULL); + assert_spin_locked(&(atom->alock)); + + /* Remove from the txn_mgr's atom list */ + assert_spin_locked(&(mgr->tmgr_lock)); + mgr->atom_count -= 1; + list_del_init(&atom->atom_link); + + /* Clean the atom */ + assert("jmacd-16", + (atom->stage == ASTAGE_INVALID || atom->stage == ASTAGE_DONE)); + atom->stage = ASTAGE_FREE; + + blocknr_set_destroy(&atom->wandered_map); + + atom_dset_destroy(atom); + + assert("jmacd-16", atom_isclean(atom)); + + spin_unlock_atom(atom); + + kmem_cache_free(_atom_slab, atom); +} + +static int atom_is_dotard(const txn_atom * atom) +{ + return time_after(jiffies, atom->start_time + + get_current_super_private()->tmgr.atom_max_age); +} + +static int atom_can_be_committed(txn_atom * atom) +{ + assert_spin_locked(&(atom->alock)); + assert("zam-885", atom->txnh_count > atom->nr_waiters); + return atom->txnh_count == atom->nr_waiters + 1; +} + +/* Return true if an atom should commit now. This is determined by aging, atom + size or atom flags. */ +static int atom_should_commit(const txn_atom * atom) +{ + assert("umka-189", atom != NULL); + return + (atom->flags & ATOM_FORCE_COMMIT) || + ((unsigned)atom_pointer_count(atom) > + get_current_super_private()->tmgr.atom_max_size) + || atom_is_dotard(atom); +} + +/* return 1 if current atom exists and requires commit. */ +int current_atom_should_commit(void) +{ + txn_atom *atom; + int result = 0; + + atom = get_current_atom_locked_nocheck(); + if (atom) { + result = atom_should_commit(atom); + spin_unlock_atom(atom); + } + return result; +} + +static int atom_should_commit_asap(const txn_atom * atom) +{ + unsigned int captured; + unsigned int pinnedpages; + + assert("nikita-3309", atom != NULL); + + captured = (unsigned)atom->capture_count; + pinnedpages = (captured >> PAGE_SHIFT) * sizeof(znode); + + return (pinnedpages > (totalram_pages >> 3)) || (atom->flushed > 100); +} + +static jnode *find_first_dirty_in_list(struct list_head *head, int flags) +{ + jnode *first_dirty; + + list_for_each_entry(first_dirty, head, capture_link) { + if (!(flags & JNODE_FLUSH_COMMIT)) { + /* + * skip jnodes which "heard banshee" or having active + * I/O + */ + if (JF_ISSET(first_dirty, JNODE_HEARD_BANSHEE) || + JF_ISSET(first_dirty, JNODE_WRITEBACK)) + continue; + } + return first_dirty; + } + return NULL; +} + +/* Get first dirty node from the atom's dirty_nodes[n] lists; return NULL if atom has no dirty + nodes on atom's lists */ +jnode *find_first_dirty_jnode(txn_atom * atom, int flags) +{ + jnode *first_dirty; + tree_level level; + + assert_spin_locked(&(atom->alock)); + + /* The flush starts from LEAF_LEVEL (=1). */ + for (level = 1; level < REAL_MAX_ZTREE_HEIGHT + 1; level += 1) { + if (list_empty_careful(ATOM_DIRTY_LIST(atom, level))) + continue; + + first_dirty = + find_first_dirty_in_list(ATOM_DIRTY_LIST(atom, level), + flags); + if (first_dirty) + return first_dirty; + } + + /* znode-above-root is on the list #0. */ + return find_first_dirty_in_list(ATOM_DIRTY_LIST(atom, 0), flags); +} + +static void dispatch_wb_list(txn_atom * atom, flush_queue_t * fq) +{ + jnode *cur; + + assert("zam-905", atom_is_protected(atom)); + + cur = list_entry(ATOM_WB_LIST(atom)->next, jnode, capture_link); + while (ATOM_WB_LIST(atom) != &cur->capture_link) { + jnode *next = list_entry(cur->capture_link.next, jnode, capture_link); + + spin_lock_jnode(cur); + if (!JF_ISSET(cur, JNODE_WRITEBACK)) { + if (JF_ISSET(cur, JNODE_DIRTY)) { + queue_jnode(fq, cur); + } else { + /* move jnode to atom's clean list */ + list_move_tail(&cur->capture_link, + ATOM_CLEAN_LIST(atom)); + } + } + spin_unlock_jnode(cur); + + cur = next; + } +} + +/* Scan current atom->writeback_nodes list, re-submit dirty and !writeback + * jnodes to disk. */ +static int submit_wb_list(void) +{ + int ret; + flush_queue_t *fq; + + fq = get_fq_for_current_atom(); + if (IS_ERR(fq)) + return PTR_ERR(fq); + + dispatch_wb_list(fq->atom, fq); + spin_unlock_atom(fq->atom); + + ret = reiser4_write_fq(fq, NULL, 1); + reiser4_fq_put(fq); + + return ret; +} + +/* Wait completion of all writes, re-submit atom writeback list if needed. */ +static int current_atom_complete_writes(void) +{ + int ret; + + /* Each jnode from that list was modified and dirtied when it had i/o + * request running already. After i/o completion we have to resubmit + * them to disk again.*/ + ret = submit_wb_list(); + if (ret < 0) + return ret; + + /* Wait all i/o completion */ + ret = current_atom_finish_all_fq(); + if (ret) + return ret; + + /* Scan wb list again; all i/o should be completed, we re-submit dirty + * nodes to disk */ + ret = submit_wb_list(); + if (ret < 0) + return ret; + + /* Wait all nodes we just submitted */ + return current_atom_finish_all_fq(); +} + +#if REISER4_DEBUG + +static void reiser4_info_atom(const char *prefix, const txn_atom * atom) +{ + if (atom == NULL) { + printk("%s: no atom\n", prefix); + return; + } + + printk("%s: refcount: %i id: %i flags: %x txnh_count: %i" + " capture_count: %i stage: %x start: %lu, flushed: %i\n", prefix, + atomic_read(&atom->refcount), atom->atom_id, atom->flags, + atom->txnh_count, atom->capture_count, atom->stage, + atom->start_time, atom->flushed); +} + +#else /* REISER4_DEBUG */ + +static inline void reiser4_info_atom(const char *prefix, const txn_atom * atom) {} + +#endif /* REISER4_DEBUG */ + +#define TOOMANYFLUSHES (1 << 13) + +/* Called with the atom locked and no open "active" transaction handlers except + ours, this function calls flush_current_atom() until all dirty nodes are + processed. Then it initiates commit processing. + + Called by the single remaining open "active" txnh, which is closing. Other + open txnhs belong to processes which wait atom commit in commit_txnh() + routine. They are counted as "waiters" in atom->nr_waiters. Therefore as + long as we hold the atom lock none of the jnodes can be captured and/or + locked. + + Return value is an error code if commit fails. +*/ +static int commit_current_atom(long *nr_submitted, txn_atom ** atom) +{ + reiser4_super_info_data *sbinfo = get_current_super_private(); + long ret = 0; + /* how many times jnode_flush() was called as a part of attempt to + * commit this atom. */ + int flushiters; + + assert("zam-888", atom != NULL && *atom != NULL); + assert_spin_locked(&((*atom)->alock)); + assert("zam-887", get_current_context()->trans->atom == *atom); + assert("jmacd-151", atom_isopen(*atom)); + + assert("nikita-3184", + get_current_super_private()->delete_mutex_owner != current); + + for (flushiters = 0;; ++flushiters) { + ret = + flush_current_atom(JNODE_FLUSH_WRITE_BLOCKS | + JNODE_FLUSH_COMMIT, + LONG_MAX /* nr_to_write */ , + nr_submitted, atom, NULL); + if (ret != -E_REPEAT) + break; + + /* if atom's dirty list contains one znode which is + HEARD_BANSHEE and is locked we have to allow lock owner to + continue and uncapture that znode */ + reiser4_preempt_point(); + + *atom = get_current_atom_locked(); + if (flushiters > TOOMANYFLUSHES && IS_POW(flushiters)) { + warning("nikita-3176", + "Flushing like mad: %i", flushiters); + reiser4_info_atom("atom", *atom); + DEBUGON(flushiters > (1 << 20)); + } + } + + if (ret) + return ret; + + assert_spin_locked(&((*atom)->alock)); + + if (!atom_can_be_committed(*atom)) { + spin_unlock_atom(*atom); + return RETERR(-E_REPEAT); + } + + if ((*atom)->capture_count == 0) + goto done; + + /* Up to this point we have been flushing and after flush is called we + return -E_REPEAT. Now we can commit. We cannot return -E_REPEAT + at this point, commit should be successful. */ + reiser4_atom_set_stage(*atom, ASTAGE_PRE_COMMIT); + ON_DEBUG(((*atom)->committer = current)); + spin_unlock_atom(*atom); + + ret = current_atom_complete_writes(); + if (ret) + return ret; + + assert("zam-906", list_empty(ATOM_WB_LIST(*atom))); + + /* isolate critical code path which should be executed by only one + * thread using tmgr mutex */ + mutex_lock(&sbinfo->tmgr.commit_mutex); + + ret = reiser4_write_logs(nr_submitted); + if (ret < 0) + reiser4_panic("zam-597", "write log failed (%ld)\n", ret); + + /* The atom->ovrwr_nodes list is processed under commit mutex held + because of bitmap nodes which are captured by special way in + reiser4_pre_commit_hook_bitmap(), that way does not include + capture_fuse_wait() as a capturing of other nodes does -- the commit + mutex is used for transaction isolation instead. */ + reiser4_invalidate_list(ATOM_OVRWR_LIST(*atom)); + mutex_unlock(&sbinfo->tmgr.commit_mutex); + + reiser4_invalidate_list(ATOM_CLEAN_LIST(*atom)); + reiser4_invalidate_list(ATOM_WB_LIST(*atom)); + assert("zam-927", list_empty(&(*atom)->inodes)); + + spin_lock_atom(*atom); + done: + reiser4_atom_set_stage(*atom, ASTAGE_DONE); + ON_DEBUG((*atom)->committer = NULL); + + /* Atom's state changes, so wake up everybody waiting for this + event. */ + wakeup_atom_waiting_list(*atom); + + /* Decrement the "until commit" reference, at least one txnh (the caller) is + still open. */ + atomic_dec(&(*atom)->refcount); + + assert("jmacd-1070", atomic_read(&(*atom)->refcount) > 0); + assert("jmacd-1062", (*atom)->capture_count == 0); + BUG_ON((*atom)->capture_count != 0); + assert_spin_locked(&((*atom)->alock)); + + return ret; +} + +/* TXN_TXNH */ + +/** + * force_commit_atom - commit current atom and wait commit completion + * @txnh: + * + * Commits current atom and wait commit completion; current atom and @txnh have + * to be spinlocked before call, this function unlocks them on exit. + */ +int force_commit_atom(txn_handle *txnh) +{ + txn_atom *atom; + + assert("zam-837", txnh != NULL); + assert_spin_locked(&(txnh->hlock)); + assert("nikita-2966", lock_stack_isclean(get_current_lock_stack())); + + atom = txnh->atom; + + assert("zam-834", atom != NULL); + assert_spin_locked(&(atom->alock)); + + /* + * Set flags for atom and txnh: forcing atom commit and waiting for + * commit completion + */ + txnh->flags |= TXNH_WAIT_COMMIT; + atom->flags |= ATOM_FORCE_COMMIT; + + spin_unlock_txnh(txnh); + spin_unlock_atom(atom); + + /* commit is here */ + reiser4_txn_restart_current(); + return 0; +} + +/* Called to force commit of any outstanding atoms. @commit_all_atoms controls + * should we commit all atoms including new ones which are created after this + * functions is called. */ +int txnmgr_force_commit_all(struct super_block *super, int commit_all_atoms) +{ + int ret; + txn_atom *atom; + txn_mgr *mgr; + txn_handle *txnh; + unsigned long start_time = jiffies; + reiser4_context *ctx = get_current_context(); + + assert("nikita-2965", lock_stack_isclean(get_current_lock_stack())); + assert("nikita-3058", reiser4_commit_check_locks()); + + reiser4_txn_restart_current(); + + mgr = &get_super_private(super)->tmgr; + + txnh = ctx->trans; + + again: + + spin_lock_txnmgr(mgr); + + list_for_each_entry(atom, &mgr->atoms_list, atom_link) { + spin_lock_atom(atom); + + /* Commit any atom which can be committed. If @commit_new_atoms + * is not set we commit only atoms which were created before + * this call is started. */ + if (commit_all_atoms + || time_before_eq(atom->start_time, start_time)) { + if (atom->stage <= ASTAGE_POST_COMMIT) { + spin_unlock_txnmgr(mgr); + + if (atom->stage < ASTAGE_PRE_COMMIT) { + spin_lock_txnh(txnh); + /* Add force-context txnh */ + capture_assign_txnh_nolock(atom, txnh); + ret = force_commit_atom(txnh); + if (ret) + return ret; + } else + /* wait atom commit */ + reiser4_atom_wait_event(atom); + + goto again; + } + } + + spin_unlock_atom(atom); + } + +#if REISER4_DEBUG + if (commit_all_atoms) { + reiser4_super_info_data *sbinfo = get_super_private(super); + spin_lock_reiser4_super(sbinfo); + assert("zam-813", + sbinfo->blocks_fake_allocated_unformatted == 0); + assert("zam-812", sbinfo->blocks_fake_allocated == 0); + spin_unlock_reiser4_super(sbinfo); + } +#endif + + spin_unlock_txnmgr(mgr); + + return 0; +} + +/* check whether commit_some_atoms() can commit @atom. Locking is up to the + * caller */ +static int atom_is_committable(txn_atom * atom) +{ + return + atom->stage < ASTAGE_PRE_COMMIT && + atom->txnh_count == atom->nr_waiters && atom_should_commit(atom); +} + +/* called periodically from ktxnmgrd to commit old atoms. Releases ktxnmgrd spin + * lock at exit */ +int commit_some_atoms(txn_mgr * mgr) +{ + int ret = 0; + txn_atom *atom; + txn_handle *txnh; + reiser4_context *ctx; + struct list_head *pos, *tmp; + + ctx = get_current_context(); + assert("nikita-2444", ctx != NULL); + + txnh = ctx->trans; + spin_lock_txnmgr(mgr); + + /* + * this is to avoid gcc complain that atom might be used + * uninitialized + */ + atom = NULL; + + /* look for atom to commit */ + list_for_each_safe(pos, tmp, &mgr->atoms_list) { + atom = list_entry(pos, txn_atom, atom_link); + /* + * first test without taking atom spin lock, whether it is + * eligible for committing at all + */ + if (atom_is_committable(atom)) { + /* now, take spin lock and re-check */ + spin_lock_atom(atom); + if (atom_is_committable(atom)) + break; + spin_unlock_atom(atom); + } + } + + ret = (&mgr->atoms_list == pos); + spin_unlock_txnmgr(mgr); + + if (ret) { + /* nothing found */ + spin_unlock(&mgr->daemon->guard); + return 0; + } + + spin_lock_txnh(txnh); + + BUG_ON(atom == NULL); + /* Set the atom to force committing */ + atom->flags |= ATOM_FORCE_COMMIT; + + /* Add force-context txnh */ + capture_assign_txnh_nolock(atom, txnh); + + spin_unlock_txnh(txnh); + spin_unlock_atom(atom); + + /* we are about to release daemon spin lock, notify daemon it + has to rescan atoms */ + mgr->daemon->rescan = 1; + spin_unlock(&mgr->daemon->guard); + reiser4_txn_restart_current(); + return 0; +} + +static int txn_try_to_fuse_small_atom(txn_mgr * tmgr, txn_atom * atom) +{ + int atom_stage; + txn_atom *atom_2; + int repeat; + + assert("zam-1051", atom->stage < ASTAGE_PRE_COMMIT); + + atom_stage = atom->stage; + repeat = 0; + + if (!spin_trylock_txnmgr(tmgr)) { + atomic_inc(&atom->refcount); + spin_unlock_atom(atom); + spin_lock_txnmgr(tmgr); + spin_lock_atom(atom); + repeat = 1; + if (atom->stage != atom_stage) { + spin_unlock_txnmgr(tmgr); + atom_dec_and_unlock(atom); + return -E_REPEAT; + } + atomic_dec(&atom->refcount); + } + + list_for_each_entry(atom_2, &tmgr->atoms_list, atom_link) { + if (atom == atom_2) + continue; + /* + * if trylock does not succeed we just do not fuse with that + * atom. + */ + if (spin_trylock_atom(atom_2)) { + if (atom_2->stage < ASTAGE_PRE_COMMIT) { + spin_unlock_txnmgr(tmgr); + capture_fuse_into(atom_2, atom); + /* all locks are lost we can only repeat here */ + return -E_REPEAT; + } + spin_unlock_atom(atom_2); + } + } + atom->flags |= ATOM_CANCEL_FUSION; + spin_unlock_txnmgr(tmgr); + if (repeat) { + spin_unlock_atom(atom); + return -E_REPEAT; + } + return 0; +} + +/* Calls jnode_flush for current atom if it exists; if not, just take another + atom and call jnode_flush() for him. If current transaction handle has + already assigned atom (current atom) we have to close current transaction + prior to switch to another atom or do something with current atom. This + code tries to flush current atom. + + flush_some_atom() is called as part of memory clearing process. It is + invoked from balance_dirty_pages(), pdflushd, and entd. + + If we can flush no nodes, atom is committed, because this frees memory. + + If atom is too large or too old it is committed also. +*/ +int +flush_some_atom(jnode * start, long *nr_submitted, const struct writeback_control *wbc, + int flags) +{ + reiser4_context *ctx = get_current_context(); + txn_mgr *tmgr = &get_super_private(ctx->super)->tmgr; + txn_handle *txnh = ctx->trans; + txn_atom *atom; + int ret; + + BUG_ON(wbc->nr_to_write == 0); + BUG_ON(*nr_submitted != 0); + assert("zam-1042", txnh != NULL); +repeat: + if (txnh->atom == NULL) { + /* current atom is not available, take first from txnmgr */ + spin_lock_txnmgr(tmgr); + + /* traverse the list of all atoms */ + list_for_each_entry(atom, &tmgr->atoms_list, atom_link) { + /* lock atom before checking its state */ + spin_lock_atom(atom); + + /* + * we need an atom which is not being committed and + * which has no flushers (jnode_flush() add one flusher + * at the beginning and subtract one at the end). + */ + if (atom->stage < ASTAGE_PRE_COMMIT && + atom->nr_flushers == 0) { + spin_lock_txnh(txnh); + capture_assign_txnh_nolock(atom, txnh); + spin_unlock_txnh(txnh); + + goto found; + } + + spin_unlock_atom(atom); + } + + /* + * Write throttling is case of no one atom can be + * flushed/committed. + */ + if (!current_is_flush_bd_task()) { + list_for_each_entry(atom, &tmgr->atoms_list, atom_link) { + spin_lock_atom(atom); + /* Repeat the check from the above. */ + if (atom->stage < ASTAGE_PRE_COMMIT + && atom->nr_flushers == 0) { + spin_lock_txnh(txnh); + capture_assign_txnh_nolock(atom, txnh); + spin_unlock_txnh(txnh); + + goto found; + } + if (atom->stage <= ASTAGE_POST_COMMIT) { + spin_unlock_txnmgr(tmgr); + /* + * we just wait until atom's flusher + * makes a progress in flushing or + * committing the atom + */ + reiser4_atom_wait_event(atom); + goto repeat; + } + spin_unlock_atom(atom); + } + } + spin_unlock_txnmgr(tmgr); + return 0; + found: + spin_unlock_txnmgr(tmgr); + } else + atom = get_current_atom_locked(); + + BUG_ON(atom->super != ctx->super); + assert("vs-35", atom->super == ctx->super); + if (start) { + spin_lock_jnode(start); + ret = (atom == start->atom) ? 1 : 0; + spin_unlock_jnode(start); + if (ret == 0) + start = NULL; + } + ret = flush_current_atom(flags, wbc->nr_to_write, nr_submitted, &atom, start); + if (ret == 0) { + /* flush_current_atom returns 0 only if it submitted for write + nothing */ + BUG_ON(*nr_submitted != 0); + if (*nr_submitted == 0 || atom_should_commit_asap(atom)) { + if (atom->capture_count < tmgr->atom_min_size && + !(atom->flags & ATOM_CANCEL_FUSION)) { + ret = txn_try_to_fuse_small_atom(tmgr, atom); + if (ret == -E_REPEAT) { + reiser4_preempt_point(); + goto repeat; + } + } + /* if early flushing could not make more nodes clean, + * or atom is too old/large, + * we force current atom to commit */ + /* wait for commit completion but only if this + * wouldn't stall pdflushd and ent thread. */ + if (!ctx->entd) + txnh->flags |= TXNH_WAIT_COMMIT; + atom->flags |= ATOM_FORCE_COMMIT; + } + spin_unlock_atom(atom); + } else if (ret == -E_REPEAT) { + if (*nr_submitted == 0) { + /* let others who hampers flushing (hold longterm locks, + for instance) to free the way for flush */ + reiser4_preempt_point(); + goto repeat; + } + ret = 0; + } +/* + if (*nr_submitted > wbc->nr_to_write) + warning("", "asked for %ld, written %ld\n", wbc->nr_to_write, *nr_submitted); +*/ + reiser4_txn_restart(ctx); + + return ret; +} + +/* Remove processed nodes from atom's clean list (thereby remove them from transaction). */ +void reiser4_invalidate_list(struct list_head *head) +{ + while (!list_empty(head)) { + jnode *node; + + node = list_entry(head->next, jnode, capture_link); + spin_lock_jnode(node); + reiser4_uncapture_block(node); + jput(node); + } +} + +static void init_wlinks(txn_wait_links * wlinks) +{ + wlinks->_lock_stack = get_current_lock_stack(); + INIT_LIST_HEAD(&wlinks->_fwaitfor_link); + INIT_LIST_HEAD(&wlinks->_fwaiting_link); + wlinks->waitfor_cb = NULL; + wlinks->waiting_cb = NULL; +} + +/* Add atom to the atom's waitfor list and wait for somebody to wake us up; */ +void reiser4_atom_wait_event(txn_atom * atom) +{ + txn_wait_links _wlinks; + + assert_spin_locked(&(atom->alock)); + assert("nikita-3156", + lock_stack_isclean(get_current_lock_stack()) || + atom->nr_running_queues > 0); + + init_wlinks(&_wlinks); + list_add_tail(&_wlinks._fwaitfor_link, &atom->fwaitfor_list); + atomic_inc(&atom->refcount); + spin_unlock_atom(atom); + + reiser4_prepare_to_sleep(_wlinks._lock_stack); + reiser4_go_to_sleep(_wlinks._lock_stack); + + spin_lock_atom(atom); + list_del(&_wlinks._fwaitfor_link); + atom_dec_and_unlock(atom); +} + +void reiser4_atom_set_stage(txn_atom * atom, txn_stage stage) +{ + assert("nikita-3535", atom != NULL); + assert_spin_locked(&(atom->alock)); + assert("nikita-3536", stage <= ASTAGE_INVALID); + /* Excelsior! */ + assert("nikita-3537", stage >= atom->stage); + if (atom->stage != stage) { + atom->stage = stage; + reiser4_atom_send_event(atom); + } +} + +/* wake all threads which wait for an event */ +void reiser4_atom_send_event(txn_atom * atom) +{ + assert_spin_locked(&(atom->alock)); + wakeup_atom_waitfor_list(atom); +} + +/* Informs txn manager code that owner of this txn_handle should wait atom commit completion (for + example, because it does fsync(2)) */ +static int should_wait_commit(txn_handle * h) +{ + return h->flags & TXNH_WAIT_COMMIT; +} + +typedef struct commit_data { + txn_atom *atom; + txn_handle *txnh; + long nr_written; + /* as an optimization we start committing atom by first trying to + * flush it few times without switching into ASTAGE_CAPTURE_WAIT. This + * allows to reduce stalls due to other threads waiting for atom in + * ASTAGE_CAPTURE_WAIT stage. ->preflush is counter of these + * preliminary flushes. */ + int preflush; + /* have we waited on atom. */ + int wait; + int failed; + int wake_ktxnmgrd_up; +} commit_data; + +/* + * Called from commit_txnh() repeatedly, until either error happens, or atom + * commits successfully. + */ +static int try_commit_txnh(commit_data * cd) +{ + int result; + + assert("nikita-2968", lock_stack_isclean(get_current_lock_stack())); + + /* Get the atom and txnh locked. */ + cd->atom = txnh_get_atom(cd->txnh); + assert("jmacd-309", cd->atom != NULL); + spin_unlock_txnh(cd->txnh); + + if (cd->wait) { + cd->atom->nr_waiters--; + cd->wait = 0; + } + + if (cd->atom->stage == ASTAGE_DONE) + return 0; + + if (cd->failed) + return 0; + + if (atom_should_commit(cd->atom)) { + /* if atom is _very_ large schedule it for commit as soon as + * possible. */ + if (atom_should_commit_asap(cd->atom)) { + /* + * When atom is in PRE_COMMIT or later stage following + * invariant (encoded in atom_can_be_committed()) + * holds: there is exactly one non-waiter transaction + * handle opened on this atom. When thread wants to + * wait until atom commits (for example sync()) it + * waits on atom event after increasing + * atom->nr_waiters (see blow in this function). It + * cannot be guaranteed that atom is already committed + * after receiving event, so loop has to be + * re-started. But if atom switched into PRE_COMMIT + * stage and became too large, we cannot change its + * state back to CAPTURE_WAIT (atom stage can only + * increase monotonically), hence this check. + */ + if (cd->atom->stage < ASTAGE_CAPTURE_WAIT) + reiser4_atom_set_stage(cd->atom, + ASTAGE_CAPTURE_WAIT); + cd->atom->flags |= ATOM_FORCE_COMMIT; + } + if (cd->txnh->flags & TXNH_DONT_COMMIT) { + /* + * this thread (transaction handle that is) doesn't + * want to commit atom. Notify waiters that handle is + * closed. This can happen, for example, when we are + * under VFS directory lock and don't want to commit + * atom right now to avoid stalling other threads + * working in the same directory. + */ + + /* Wake the ktxnmgrd up if the ktxnmgrd is needed to + * commit this atom: no atom waiters and only one + * (our) open transaction handle. */ + cd->wake_ktxnmgrd_up = + cd->atom->txnh_count == 1 && + cd->atom->nr_waiters == 0; + reiser4_atom_send_event(cd->atom); + result = 0; + } else if (!atom_can_be_committed(cd->atom)) { + if (should_wait_commit(cd->txnh)) { + /* sync(): wait for commit */ + cd->atom->nr_waiters++; + cd->wait = 1; + reiser4_atom_wait_event(cd->atom); + result = RETERR(-E_REPEAT); + } else { + result = 0; + } + } else if (cd->preflush > 0 && !is_current_ktxnmgrd()) { + /* + * optimization: flush atom without switching it into + * ASTAGE_CAPTURE_WAIT. + * + * But don't do this for ktxnmgrd, because ktxnmgrd + * should never block on atom fusion. + */ + result = flush_current_atom(JNODE_FLUSH_WRITE_BLOCKS, + LONG_MAX, &cd->nr_written, + &cd->atom, NULL); + if (result == 0) { + spin_unlock_atom(cd->atom); + cd->preflush = 0; + result = RETERR(-E_REPEAT); + } else /* Atoms wasn't flushed + * completely. Rinse. Repeat. */ + --cd->preflush; + } else { + /* We change atom state to ASTAGE_CAPTURE_WAIT to + prevent atom fusion and count ourself as an active + flusher */ + reiser4_atom_set_stage(cd->atom, ASTAGE_CAPTURE_WAIT); + cd->atom->flags |= ATOM_FORCE_COMMIT; + + result = + commit_current_atom(&cd->nr_written, &cd->atom); + if (result != 0 && result != -E_REPEAT) + cd->failed = 1; + } + } else + result = 0; + +#if REISER4_DEBUG + if (result == 0) + assert_spin_locked(&(cd->atom->alock)); +#endif + + /* perfectly valid assertion, except that when atom/txnh is not locked + * fusion can take place, and cd->atom points nowhere. */ + /* + assert("jmacd-1028", ergo(result != 0, spin_atom_is_not_locked(cd->atom))); + */ + return result; +} + +/* Called to commit a transaction handle. This decrements the atom's number of open + handles and if it is the last handle to commit and the atom should commit, initiates + atom commit. if commit does not fail, return number of written blocks */ +static int commit_txnh(txn_handle * txnh) +{ + commit_data cd; + assert("umka-192", txnh != NULL); + + memset(&cd, 0, sizeof cd); + cd.txnh = txnh; + cd.preflush = 10; + + /* calls try_commit_txnh() until either atom commits, or error + * happens */ + while (try_commit_txnh(&cd) != 0) + reiser4_preempt_point(); + + spin_lock_txnh(txnh); + + cd.atom->txnh_count -= 1; + txnh->atom = NULL; + /* remove transaction handle from atom's list of transaction handles */ + list_del_init(&txnh->txnh_link); + + spin_unlock_txnh(txnh); + atom_dec_and_unlock(cd.atom); + /* if we don't want to do a commit (TXNH_DONT_COMMIT is set, probably + * because it takes time) by current thread, we do that work + * asynchronously by ktxnmgrd daemon. */ + if (cd.wake_ktxnmgrd_up) + ktxnmgrd_kick(&get_current_super_private()->tmgr); + + return 0; +} + +/* TRY_CAPTURE */ + +/* This routine attempts a single block-capture request. It may return -E_REPEAT if some + condition indicates that the request should be retried, and it may block if the + txn_capture mode does not include the TXN_CAPTURE_NONBLOCKING request flag. + + This routine encodes the basic logic of block capturing described by: + + http://namesys.com/v4/v4.html + + Our goal here is to ensure that any two blocks that contain dependent modifications + should commit at the same time. This function enforces this discipline by initiating + fusion whenever a transaction handle belonging to one atom requests to read or write a + block belonging to another atom (TXN_CAPTURE_WRITE or TXN_CAPTURE_READ_ATOMIC). + + In addition, this routine handles the initial assignment of atoms to blocks and + transaction handles. These are possible outcomes of this function: + + 1. The block and handle are already part of the same atom: return immediate success + + 2. The block is assigned but the handle is not: call capture_assign_txnh to assign + the handle to the block's atom. + + 3. The handle is assigned but the block is not: call capture_assign_block to assign + the block to the handle's atom. + + 4. Both handle and block are assigned, but to different atoms: call capture_init_fusion + to fuse atoms. + + 5. Neither block nor handle are assigned: create a new atom and assign them both. + + 6. A read request for a non-captured block: return immediate success. + + This function acquires and releases the handle's spinlock. This function is called + under the jnode lock and if the return value is 0, it returns with the jnode lock still + held. If the return is -E_REPEAT or some other error condition, the jnode lock is + released. The external interface (reiser4_try_capture) manages re-aquiring the jnode + lock in the failure case. +*/ +static int try_capture_block( + txn_handle * txnh, jnode * node, txn_capture mode, + txn_atom ** atom_alloc) +{ + txn_atom *block_atom; + txn_atom *txnh_atom; + + /* Should not call capture for READ_NONCOM requests, handled in reiser4_try_capture. */ + assert("jmacd-567", CAPTURE_TYPE(mode) != TXN_CAPTURE_READ_NONCOM); + + /* FIXME-ZAM-HANS: FIXME_LATER_JMACD Should assert that atom->tree == + * node->tree somewhere. */ + assert("umka-194", txnh != NULL); + assert("umka-195", node != NULL); + + /* The jnode is already locked! Being called from reiser4_try_capture(). */ + assert_spin_locked(&(node->guard)); + block_atom = node->atom; + + /* Get txnh spinlock, this allows us to compare txn_atom pointers but it doesn't + let us touch the atoms themselves. */ + spin_lock_txnh(txnh); + txnh_atom = txnh->atom; + /* Process of capturing continues into one of four branches depends on + which atoms from (block atom (node->atom), current atom (txnh->atom)) + exist. */ + if (txnh_atom == NULL) { + if (block_atom == NULL) { + spin_unlock_txnh(txnh); + spin_unlock_jnode(node); + /* assign empty atom to the txnh and repeat */ + return atom_begin_and_assign_to_txnh(atom_alloc, txnh); + } else { + atomic_inc(&block_atom->refcount); + /* node spin-lock isn't needed anymore */ + spin_unlock_jnode(node); + if (!spin_trylock_atom(block_atom)) { + spin_unlock_txnh(txnh); + spin_lock_atom(block_atom); + spin_lock_txnh(txnh); + } + /* re-check state after getting txnh and the node + * atom spin-locked */ + if (node->atom != block_atom || txnh->atom != NULL) { + spin_unlock_txnh(txnh); + atom_dec_and_unlock(block_atom); + return RETERR(-E_REPEAT); + } + atomic_dec(&block_atom->refcount); + if (block_atom->stage > ASTAGE_CAPTURE_WAIT || + (block_atom->stage == ASTAGE_CAPTURE_WAIT && + block_atom->txnh_count != 0)) + return capture_fuse_wait(txnh, block_atom, NULL, mode); + capture_assign_txnh_nolock(block_atom, txnh); + spin_unlock_txnh(txnh); + spin_unlock_atom(block_atom); + return RETERR(-E_REPEAT); + } + } else { + /* It is time to perform deadlock prevention check over the + node we want to capture. It is possible this node was locked + for read without capturing it. The optimization which allows + to do it helps us in keeping atoms independent as long as + possible but it may cause lock/fuse deadlock problems. + + A number of similar deadlock situations with locked but not + captured nodes were found. In each situation there are two + or more threads: one of them does flushing while another one + does routine balancing or tree lookup. The flushing thread + (F) sleeps in long term locking request for node (N), another + thread (A) sleeps in trying to capture some node already + belonging the atom F, F has a state which prevents + immediately fusion . + + Deadlocks of this kind cannot happen if node N was properly + captured by thread A. The F thread fuse atoms before locking + therefore current atom of thread F and current atom of thread + A became the same atom and thread A may proceed. This does + not work if node N was not captured because the fusion of + atom does not happens. + + The following scheme solves the deadlock: If + longterm_lock_znode locks and does not capture a znode, that + znode is marked as MISSED_IN_CAPTURE. A node marked this way + is processed by the code below which restores the missed + capture and fuses current atoms of all the node lock owners + by calling the fuse_not_fused_lock_owners() function. */ + if (JF_ISSET(node, JNODE_MISSED_IN_CAPTURE)) { + JF_CLR(node, JNODE_MISSED_IN_CAPTURE); + if (jnode_is_znode(node) && znode_is_locked(JZNODE(node))) { + spin_unlock_txnh(txnh); + spin_unlock_jnode(node); + fuse_not_fused_lock_owners(txnh, JZNODE(node)); + return RETERR(-E_REPEAT); + } + } + if (block_atom == NULL) { + atomic_inc(&txnh_atom->refcount); + spin_unlock_txnh(txnh); + if (!spin_trylock_atom(txnh_atom)) { + spin_unlock_jnode(node); + spin_lock_atom(txnh_atom); + spin_lock_jnode(node); + } + if (txnh->atom != txnh_atom || node->atom != NULL + || JF_ISSET(node, JNODE_IS_DYING)) { + spin_unlock_jnode(node); + atom_dec_and_unlock(txnh_atom); + return RETERR(-E_REPEAT); + } + atomic_dec(&txnh_atom->refcount); + capture_assign_block_nolock(txnh_atom, node); + spin_unlock_atom(txnh_atom); + } else { + if (txnh_atom != block_atom) { + if (mode & TXN_CAPTURE_DONT_FUSE) { + spin_unlock_txnh(txnh); + spin_unlock_jnode(node); + /* we are in a "no-fusion" mode and @node is + * already part of transaction. */ + return RETERR(-E_NO_NEIGHBOR); + } + return capture_init_fusion(node, txnh, mode); + } + spin_unlock_txnh(txnh); + } + } + return 0; +} + +static txn_capture +build_capture_mode(jnode * node, znode_lock_mode lock_mode, txn_capture flags) +{ + txn_capture cap_mode; + + assert_spin_locked(&(node->guard)); + + /* FIXME_JMACD No way to set TXN_CAPTURE_READ_MODIFY yet. */ + + if (lock_mode == ZNODE_WRITE_LOCK) { + cap_mode = TXN_CAPTURE_WRITE; + } else if (node->atom != NULL) { + cap_mode = TXN_CAPTURE_WRITE; + } else if (0 && /* txnh->mode == TXN_READ_FUSING && */ + jnode_get_level(node) == LEAF_LEVEL) { + /* NOTE-NIKITA TXN_READ_FUSING is not currently used */ + /* We only need a READ_FUSING capture at the leaf level. This + is because the internal levels of the tree (twigs included) + are redundant from the point of the user that asked for a + read-fusing transcrash. The user only wants to read-fuse + atoms due to reading uncommitted data that another user has + written. It is the file system that reads/writes the + internal tree levels, the user only reads/writes leaves. */ + cap_mode = TXN_CAPTURE_READ_ATOMIC; + } else { + /* In this case (read lock at a non-leaf) there's no reason to + * capture. */ + /* cap_mode = TXN_CAPTURE_READ_NONCOM; */ + return 0; + } + + cap_mode |= (flags & (TXN_CAPTURE_NONBLOCKING | TXN_CAPTURE_DONT_FUSE)); + assert("nikita-3186", cap_mode != 0); + return cap_mode; +} + +/* This is an external interface to try_capture_block(), it calls + try_capture_block() repeatedly as long as -E_REPEAT is returned. + + @node: node to capture, + @lock_mode: read or write lock is used in capture mode calculation, + @flags: see txn_capture flags enumeration, + @can_coc : can copy-on-capture + + @return: 0 - node was successfully captured, -E_REPEAT - capture request + cannot be processed immediately as it was requested in flags, + < 0 - other errors. +*/ +int reiser4_try_capture(jnode *node, znode_lock_mode lock_mode, + txn_capture flags) +{ + txn_atom *atom_alloc = NULL; + txn_capture cap_mode; + txn_handle *txnh = get_current_context()->trans; + int ret; + + assert_spin_locked(&(node->guard)); + + repeat: + if (JF_ISSET(node, JNODE_IS_DYING)) + return RETERR(-EINVAL); + if (node->atom != NULL && txnh->atom == node->atom) + return 0; + cap_mode = build_capture_mode(node, lock_mode, flags); + if (cap_mode == 0 || + (!(cap_mode & TXN_CAPTURE_WTYPES) && node->atom == NULL)) { + /* Mark this node as "MISSED". It helps in further deadlock + * analysis */ + if (jnode_is_znode(node)) + JF_SET(node, JNODE_MISSED_IN_CAPTURE); + return 0; + } + /* Repeat try_capture as long as -E_REPEAT is returned. */ + ret = try_capture_block(txnh, node, cap_mode, &atom_alloc); + /* Regardless of non_blocking: + + If ret == 0 then jnode is still locked. + If ret != 0 then jnode is unlocked. + */ +#if REISER4_DEBUG + if (ret == 0) + assert_spin_locked(&(node->guard)); + else + assert_spin_not_locked(&(node->guard)); +#endif + assert_spin_not_locked(&(txnh->guard)); + + if (ret == -E_REPEAT) { + /* E_REPEAT implies all locks were released, therefore we need + to take the jnode's lock again. */ + spin_lock_jnode(node); + + /* Although this may appear to be a busy loop, it is not. + There are several conditions that cause E_REPEAT to be + returned by the call to try_capture_block, all cases + indicating some kind of state change that means you should + retry the request and will get a different result. In some + cases this could be avoided with some extra code, but + generally it is done because the necessary locks were + released as a result of the operation and repeating is the + simplest thing to do (less bug potential). The cases are: + atom fusion returns E_REPEAT after it completes (jnode and + txnh were unlocked); race conditions in assign_block, + assign_txnh, and init_fusion return E_REPEAT (trylock + failure); after going to sleep in capture_fuse_wait + (request was blocked but may now succeed). I'm not quite + sure how capture_copy works yet, but it may also return + E_REPEAT. When the request is legitimately blocked, the + requestor goes to sleep in fuse_wait, so this is not a busy + loop. */ + /* NOTE-NIKITA: still don't understand: + + try_capture_block->capture_assign_txnh->spin_trylock_atom->E_REPEAT + + looks like busy loop? + */ + goto repeat; + } + + /* free extra atom object that was possibly allocated by + try_capture_block(). + + Do this before acquiring jnode spin lock to + minimize time spent under lock. --nikita */ + if (atom_alloc != NULL) { + kmem_cache_free(_atom_slab, atom_alloc); + } + + if (ret != 0) { + if (ret == -E_BLOCK) { + assert("nikita-3360", + cap_mode & TXN_CAPTURE_NONBLOCKING); + ret = -E_REPEAT; + } + + /* Failure means jnode is not locked. FIXME_LATER_JMACD May + want to fix the above code to avoid releasing the lock and + re-acquiring it, but there are cases were failure occurs + when the lock is not held, and those cases would need to be + modified to re-take the lock. */ + spin_lock_jnode(node); + } + + /* Jnode is still locked. */ + assert_spin_locked(&(node->guard)); + return ret; +} + +static void release_two_atoms(txn_atom *one, txn_atom *two) +{ + spin_unlock_atom(one); + atom_dec_and_unlock(two); + spin_lock_atom(one); + atom_dec_and_unlock(one); +} + +/* This function sets up a call to try_capture_block and repeats as long as -E_REPEAT is + returned by that routine. The txn_capture request mode is computed here depending on + the transaction handle's type and the lock request. This is called from the depths of + the lock manager with the jnode lock held and it always returns with the jnode lock + held. +*/ + +/* fuse all 'active' atoms of lock owners of given node. */ +static void fuse_not_fused_lock_owners(txn_handle * txnh, znode * node) +{ + lock_handle *lh; + int repeat; + txn_atom *atomh, *atomf; + reiser4_context *me = get_current_context(); + reiser4_context *ctx = NULL; + + assert_spin_not_locked(&(ZJNODE(node)->guard)); + assert_spin_not_locked(&(txnh->hlock)); + + repeat: + repeat = 0; + atomh = txnh_get_atom(txnh); + spin_unlock_txnh(txnh); + assert("zam-692", atomh != NULL); + + spin_lock_zlock(&node->lock); + /* inspect list of lock owners */ + list_for_each_entry(lh, &node->lock.owners, owners_link) { + ctx = get_context_by_lock_stack(lh->owner); + if (ctx == me) + continue; + /* below we use two assumptions to avoid addition spin-locks + for checking the condition : + + 1) if the lock stack has lock, the transaction should be + opened, i.e. ctx->trans != NULL; + + 2) reading of well-aligned ctx->trans->atom is atomic, if it + equals to the address of spin-locked atomh, we take that + the atoms are the same, nothing has to be captured. */ + if (atomh != ctx->trans->atom) { + reiser4_wake_up(lh->owner); + repeat = 1; + break; + } + } + if (repeat) { + if (!spin_trylock_txnh(ctx->trans)) { + spin_unlock_zlock(&node->lock); + spin_unlock_atom(atomh); + goto repeat; + } + atomf = ctx->trans->atom; + if (atomf == NULL) { + capture_assign_txnh_nolock(atomh, ctx->trans); + /* release zlock lock _after_ assigning the atom to the + * transaction handle, otherwise the lock owner thread + * may unlock all znodes, exit kernel context and here + * we would access an invalid transaction handle. */ + spin_unlock_zlock(&node->lock); + spin_unlock_atom(atomh); + spin_unlock_txnh(ctx->trans); + goto repeat; + } + assert("zam-1059", atomf != atomh); + spin_unlock_zlock(&node->lock); + atomic_inc(&atomh->refcount); + atomic_inc(&atomf->refcount); + spin_unlock_txnh(ctx->trans); + if (atomf > atomh) { + spin_lock_atom_nested(atomf); + } else { + spin_unlock_atom(atomh); + spin_lock_atom(atomf); + spin_lock_atom_nested(atomh); + } + if (atomh == atomf || !atom_isopen(atomh) || !atom_isopen(atomf)) { + release_two_atoms(atomf, atomh); + goto repeat; + } + atomic_dec(&atomh->refcount); + atomic_dec(&atomf->refcount); + capture_fuse_into(atomf, atomh); + goto repeat; + } + spin_unlock_zlock(&node->lock); + spin_unlock_atom(atomh); +} + +/* This is the interface to capture unformatted nodes via their struct page + reference. Currently it is only used in reiser4_invalidatepage */ +int try_capture_page_to_invalidate(struct page *pg) +{ + int ret; + jnode *node; + + assert("umka-292", pg != NULL); + assert("nikita-2597", PageLocked(pg)); + + if (IS_ERR(node = jnode_of_page(pg))) { + return PTR_ERR(node); + } + + spin_lock_jnode(node); + unlock_page(pg); + + ret = reiser4_try_capture(node, ZNODE_WRITE_LOCK, 0); + spin_unlock_jnode(node); + jput(node); + lock_page(pg); + return ret; +} + +/* This informs the transaction manager when a node is deleted. Add the block to the + atom's delete set and uncapture the block. + +VS-FIXME-HANS: this E_REPEAT paradigm clutters the code and creates a need for +explanations. find all the functions that use it, and unless there is some very +good reason to use it (I have not noticed one so far and I doubt it exists, but maybe somewhere somehow....), +move the loop to inside the function. + +VS-FIXME-HANS: can this code be at all streamlined? In particular, can you lock and unlock the jnode fewer times? + */ +void reiser4_uncapture_page(struct page *pg) +{ + jnode *node; + txn_atom *atom; + + assert("umka-199", pg != NULL); + assert("nikita-3155", PageLocked(pg)); + + clear_page_dirty_for_io(pg); + + reiser4_wait_page_writeback(pg); + + node = jprivate(pg); + BUG_ON(node == NULL); + + spin_lock_jnode(node); + + atom = jnode_get_atom(node); + if (atom == NULL) { + assert("jmacd-7111", !JF_ISSET(node, JNODE_DIRTY)); + spin_unlock_jnode(node); + return; + } + + /* We can remove jnode from transaction even if it is on flush queue + * prepped list, we only need to be sure that flush queue is not being + * written by reiser4_write_fq(). reiser4_write_fq() does not use atom + * spin lock for protection of the prepped nodes list, instead + * write_fq() increments atom's nr_running_queues counters for the time + * when prepped list is not protected by spin lock. Here we check this + * counter if we want to remove jnode from flush queue and, if the + * counter is not zero, wait all reiser4_write_fq() for this atom to + * complete. This is not significant overhead. */ + while (JF_ISSET(node, JNODE_FLUSH_QUEUED) && atom->nr_running_queues) { + spin_unlock_jnode(node); + /* + * at this moment we want to wait for "atom event", viz. wait + * until @node can be removed from flush queue. But + * reiser4_atom_wait_event() cannot be called with page locked, + * because it deadlocks with jnode_extent_write(). Unlock page, + * after making sure (through get_page()) that it cannot + * be released from memory. + */ + get_page(pg); + unlock_page(pg); + reiser4_atom_wait_event(atom); + lock_page(pg); + /* + * page may has been detached by ->writepage()->releasepage(). + */ + reiser4_wait_page_writeback(pg); + spin_lock_jnode(node); + put_page(pg); + atom = jnode_get_atom(node); +/* VS-FIXME-HANS: improve the commenting in this function */ + if (atom == NULL) { + spin_unlock_jnode(node); + return; + } + } + reiser4_uncapture_block(node); + spin_unlock_atom(atom); + jput(node); +} + +/* this is used in extent's kill hook to uncapture and unhash jnodes attached to + * inode's tree of jnodes */ +void reiser4_uncapture_jnode(jnode * node) +{ + txn_atom *atom; + + assert_spin_locked(&(node->guard)); + assert("", node->pg == 0); + + atom = jnode_get_atom(node); + if (atom == NULL) { + assert("jmacd-7111", !JF_ISSET(node, JNODE_DIRTY)); + spin_unlock_jnode(node); + return; + } + + reiser4_uncapture_block(node); + spin_unlock_atom(atom); + jput(node); +} + +/* No-locking version of assign_txnh. Sets the transaction handle's atom pointer, + increases atom refcount and txnh_count, adds to txnh_list. */ +static void capture_assign_txnh_nolock(txn_atom *atom, txn_handle *txnh) +{ + assert("umka-200", atom != NULL); + assert("umka-201", txnh != NULL); + + assert_spin_locked(&(txnh->hlock)); + assert_spin_locked(&(atom->alock)); + assert("jmacd-824", txnh->atom == NULL); + assert("nikita-3540", atom_isopen(atom)); + BUG_ON(txnh->atom != NULL); + + atomic_inc(&atom->refcount); + txnh->atom = atom; + reiser4_ctx_gfp_mask_set(); + list_add_tail(&txnh->txnh_link, &atom->txnh_list); + atom->txnh_count += 1; +} + +/* No-locking version of assign_block. Sets the block's atom pointer, references the + block, adds it to the clean or dirty capture_jnode list, increments capture_count. */ +static void capture_assign_block_nolock(txn_atom *atom, jnode *node) +{ + assert("umka-202", atom != NULL); + assert("umka-203", node != NULL); + assert_spin_locked(&(node->guard)); + assert_spin_locked(&(atom->alock)); + assert("jmacd-323", node->atom == NULL); + BUG_ON(!list_empty_careful(&node->capture_link)); + assert("nikita-3470", !JF_ISSET(node, JNODE_DIRTY)); + + /* Pointer from jnode to atom is not counted in atom->refcount. */ + node->atom = atom; + + list_add_tail(&node->capture_link, ATOM_CLEAN_LIST(atom)); + atom->capture_count += 1; + /* reference to jnode is acquired by atom. */ + jref(node); + + ON_DEBUG(count_jnode(atom, node, NOT_CAPTURED, CLEAN_LIST, 1)); + + LOCK_CNT_INC(t_refs); +} + +/* common code for dirtying both unformatted jnodes and formatted znodes. */ +static void do_jnode_make_dirty(jnode * node, txn_atom * atom) +{ + assert_spin_locked(&(node->guard)); + assert_spin_locked(&(atom->alock)); + assert("jmacd-3981", !JF_ISSET(node, JNODE_DIRTY)); + + JF_SET(node, JNODE_DIRTY); + + if (!JF_ISSET(node, JNODE_CLUSTER_PAGE)) + get_current_context()->nr_marked_dirty++; + + /* We grab2flush_reserve one additional block only if node was + not CREATED and jnode_flush did not sort it into neither + relocate set nor overwrite one. If node is in overwrite or + relocate set we assume that atom's flush reserved counter was + already adjusted. */ + if (!JF_ISSET(node, JNODE_CREATED) && !JF_ISSET(node, JNODE_RELOC) + && !JF_ISSET(node, JNODE_OVRWR) && jnode_is_leaf(node) + && !jnode_is_cluster_page(node)) { + assert("vs-1093", !reiser4_blocknr_is_fake(&node->blocknr)); + assert("vs-1506", *jnode_get_block(node) != 0); + grabbed2flush_reserved_nolock(atom, (__u64) 1); + JF_SET(node, JNODE_FLUSH_RESERVED); + } + + if (!JF_ISSET(node, JNODE_FLUSH_QUEUED)) { + /* If the atom is not set yet, it will be added to the appropriate list in + capture_assign_block_nolock. */ + /* Sometimes a node is set dirty before being captured -- the case for new + jnodes. In that case the jnode will be added to the appropriate list + in capture_assign_block_nolock. Another reason not to re-link jnode is + that jnode is on a flush queue (see flush.c for details) */ + + int level = jnode_get_level(node); + + assert("nikita-3152", !JF_ISSET(node, JNODE_OVRWR)); + assert("zam-654", atom->stage < ASTAGE_PRE_COMMIT); + assert("nikita-2607", 0 <= level); + assert("nikita-2606", level <= REAL_MAX_ZTREE_HEIGHT); + + /* move node to atom's dirty list */ + list_move_tail(&node->capture_link, ATOM_DIRTY_LIST(atom, level)); + ON_DEBUG(count_jnode + (atom, node, NODE_LIST(node), DIRTY_LIST, 1)); + } +} + +/* Set the dirty status for this (spin locked) jnode. */ +void jnode_make_dirty_locked(jnode * node) +{ + assert("umka-204", node != NULL); + assert_spin_locked(&(node->guard)); + + if (REISER4_DEBUG && rofs_jnode(node)) { + warning("nikita-3365", "Dirtying jnode on rofs"); + dump_stack(); + } + + /* Fast check for already dirty node */ + if (!JF_ISSET(node, JNODE_DIRTY)) { + txn_atom *atom; + + atom = jnode_get_atom(node); + assert("vs-1094", atom); + /* Check jnode dirty status again because node spin lock might + * be released inside jnode_get_atom(). */ + if (likely(!JF_ISSET(node, JNODE_DIRTY))) + do_jnode_make_dirty(node, atom); + spin_unlock_atom(atom); + } +} + +/* Set the dirty status for this znode. */ +void znode_make_dirty(znode * z) +{ + jnode *node; + struct page *page; + + assert("umka-204", z != NULL); + assert("nikita-3290", znode_above_root(z) || znode_is_loaded(z)); + assert("nikita-3560", znode_is_write_locked(z)); + + node = ZJNODE(z); + /* znode is longterm locked, we can check dirty bit without spinlock */ + if (JF_ISSET(node, JNODE_DIRTY)) { + /* znode is dirty already. All we have to do is to change znode version */ + z->version = znode_build_version(jnode_get_tree(node)); + return; + } + + spin_lock_jnode(node); + jnode_make_dirty_locked(node); + page = jnode_page(node); + if (page != NULL) { + /* this is useful assertion (allows one to check that no + * modifications are lost due to update of in-flight page), + * but it requires locking on page to check PG_writeback + * bit. */ + /* assert("nikita-3292", + !PageWriteback(page) || ZF_ISSET(z, JNODE_WRITEBACK)); */ + get_page(page); + + /* jnode lock is not needed for the rest of + * znode_set_dirty(). */ + spin_unlock_jnode(node); + /* reiser4 file write code calls set_page_dirty for + * unformatted nodes, for formatted nodes we do it here. */ + set_page_dirty_notag(page); + put_page(page); + /* bump version counter in znode */ + z->version = znode_build_version(jnode_get_tree(node)); + } else { + assert("zam-596", znode_above_root(JZNODE(node))); + spin_unlock_jnode(node); + } + + assert("nikita-1900", znode_is_write_locked(z)); + assert("jmacd-9777", node->atom != NULL); +} + +int reiser4_sync_atom(txn_atom * atom) +{ + int result; + txn_handle *txnh; + + txnh = get_current_context()->trans; + + result = 0; + if (atom != NULL) { + if (atom->stage < ASTAGE_PRE_COMMIT) { + spin_lock_txnh(txnh); + capture_assign_txnh_nolock(atom, txnh); + result = force_commit_atom(txnh); + } else if (atom->stage < ASTAGE_POST_COMMIT) { + /* wait atom commit */ + reiser4_atom_wait_event(atom); + /* try once more */ + result = RETERR(-E_REPEAT); + } else + spin_unlock_atom(atom); + } + return result; +} + +#if REISER4_DEBUG + +/* move jnode form one list to another + call this after atom->capture_count is updated */ +void +count_jnode(txn_atom * atom, jnode * node, atom_list old_list, + atom_list new_list, int check_lists) +{ + struct list_head *pos; + + assert("zam-1018", atom_is_protected(atom)); + assert_spin_locked(&(node->guard)); + assert("", NODE_LIST(node) == old_list); + + switch (NODE_LIST(node)) { + case NOT_CAPTURED: + break; + case DIRTY_LIST: + assert("", atom->dirty > 0); + atom->dirty--; + break; + case CLEAN_LIST: + assert("", atom->clean > 0); + atom->clean--; + break; + case FQ_LIST: + assert("", atom->fq > 0); + atom->fq--; + break; + case WB_LIST: + assert("", atom->wb > 0); + atom->wb--; + break; + case OVRWR_LIST: + assert("", atom->ovrwr > 0); + atom->ovrwr--; + break; + default: + impossible("", ""); + } + + switch (new_list) { + case NOT_CAPTURED: + break; + case DIRTY_LIST: + atom->dirty++; + break; + case CLEAN_LIST: + atom->clean++; + break; + case FQ_LIST: + atom->fq++; + break; + case WB_LIST: + atom->wb++; + break; + case OVRWR_LIST: + atom->ovrwr++; + break; + default: + impossible("", ""); + } + ASSIGN_NODE_LIST(node, new_list); + if (0 && check_lists) { + int count; + tree_level level; + + count = 0; + + /* flush queue list */ + /* reiser4_check_fq(atom); */ + + /* dirty list */ + count = 0; + for (level = 0; level < REAL_MAX_ZTREE_HEIGHT + 1; level += 1) { + list_for_each(pos, ATOM_DIRTY_LIST(atom, level)) + count++; + } + if (count != atom->dirty) + warning("", "dirty counter %d, real %d\n", atom->dirty, + count); + + /* clean list */ + count = 0; + list_for_each(pos, ATOM_CLEAN_LIST(atom)) + count++; + if (count != atom->clean) + warning("", "clean counter %d, real %d\n", atom->clean, + count); + + /* wb list */ + count = 0; + list_for_each(pos, ATOM_WB_LIST(atom)) + count++; + if (count != atom->wb) + warning("", "wb counter %d, real %d\n", atom->wb, + count); + + /* overwrite list */ + count = 0; + list_for_each(pos, ATOM_OVRWR_LIST(atom)) + count++; + + if (count != atom->ovrwr) + warning("", "ovrwr counter %d, real %d\n", atom->ovrwr, + count); + } + assert("vs-1624", atom->num_queued == atom->fq); + if (atom->capture_count != + atom->dirty + atom->clean + atom->ovrwr + atom->wb + atom->fq) { + printk + ("count %d, dirty %d clean %d ovrwr %d wb %d fq %d\n", + atom->capture_count, atom->dirty, atom->clean, atom->ovrwr, + atom->wb, atom->fq); + assert("vs-1622", + atom->capture_count == + atom->dirty + atom->clean + atom->ovrwr + atom->wb + + atom->fq); + } +} + +#endif + +int reiser4_capture_super_block(struct super_block *s) +{ + int result; + znode *uber; + lock_handle lh; + + init_lh(&lh); + result = get_uber_znode(reiser4_get_tree(s), + ZNODE_WRITE_LOCK, ZNODE_LOCK_LOPRI, &lh); + if (result) + return result; + + uber = lh.node; + /* Grabbing one block for superblock */ + result = reiser4_grab_space_force((__u64) 1, BA_RESERVED); + if (result != 0) + return result; + + znode_make_dirty(uber); + + done_lh(&lh); + return 0; +} + +/* Wakeup every handle on the atom's WAITFOR list */ +static void wakeup_atom_waitfor_list(txn_atom * atom) +{ + txn_wait_links *wlinks; + + assert("umka-210", atom != NULL); + + /* atom is locked */ + list_for_each_entry(wlinks, &atom->fwaitfor_list, _fwaitfor_link) { + if (wlinks->waitfor_cb == NULL || + wlinks->waitfor_cb(atom, wlinks)) + /* Wake up. */ + reiser4_wake_up(wlinks->_lock_stack); + } +} + +/* Wakeup every handle on the atom's WAITING list */ +static void wakeup_atom_waiting_list(txn_atom * atom) +{ + txn_wait_links *wlinks; + + assert("umka-211", atom != NULL); + + /* atom is locked */ + list_for_each_entry(wlinks, &atom->fwaiting_list, _fwaiting_link) { + if (wlinks->waiting_cb == NULL || + wlinks->waiting_cb(atom, wlinks)) + /* Wake up. */ + reiser4_wake_up(wlinks->_lock_stack); + } +} + +/* helper function used by capture_fuse_wait() to avoid "spurious wake-ups" */ +static int wait_for_fusion(txn_atom * atom, txn_wait_links * wlinks) +{ + assert("nikita-3330", atom != NULL); + assert_spin_locked(&(atom->alock)); + + /* atom->txnh_count == 1 is for waking waiters up if we are releasing + * last transaction handle. */ + return atom->stage != ASTAGE_CAPTURE_WAIT || atom->txnh_count == 1; +} + +/* The general purpose of this function is to wait on the first of two possible events. + The situation is that a handle (and its atom atomh) is blocked trying to capture a + block (i.e., node) but the node's atom (atomf) is in the CAPTURE_WAIT state. The + handle's atom (atomh) is not in the CAPTURE_WAIT state. However, atomh could fuse with + another atom or, due to age, enter the CAPTURE_WAIT state itself, at which point it + needs to unblock the handle to avoid deadlock. When the txnh is unblocked it will + proceed and fuse the two atoms in the CAPTURE_WAIT state. + + In other words, if either atomh or atomf change state, the handle will be awakened, + thus there are two lists per atom: WAITING and WAITFOR. + + This is also called by capture_assign_txnh with (atomh == NULL) to wait for atomf to + close but it is not assigned to an atom of its own. + + Lock ordering in this method: all four locks are held: JNODE_LOCK, TXNH_LOCK, + BOTH_ATOM_LOCKS. Result: all four locks are released. +*/ +static int capture_fuse_wait(txn_handle * txnh, txn_atom * atomf, + txn_atom * atomh, txn_capture mode) +{ + int ret; + txn_wait_links wlinks; + + assert("umka-213", txnh != NULL); + assert("umka-214", atomf != NULL); + + if ((mode & TXN_CAPTURE_NONBLOCKING) != 0) { + spin_unlock_txnh(txnh); + spin_unlock_atom(atomf); + + if (atomh) { + spin_unlock_atom(atomh); + } + + return RETERR(-E_BLOCK); + } + + /* Initialize the waiting list links. */ + init_wlinks(&wlinks); + + /* Add txnh to atomf's waitfor list, unlock atomf. */ + list_add_tail(&wlinks._fwaitfor_link, &atomf->fwaitfor_list); + wlinks.waitfor_cb = wait_for_fusion; + atomic_inc(&atomf->refcount); + spin_unlock_atom(atomf); + + if (atomh) { + /* Add txnh to atomh's waiting list, unlock atomh. */ + list_add_tail(&wlinks._fwaiting_link, &atomh->fwaiting_list); + atomic_inc(&atomh->refcount); + spin_unlock_atom(atomh); + } + + /* Go to sleep. */ + spin_unlock_txnh(txnh); + + ret = reiser4_prepare_to_sleep(wlinks._lock_stack); + if (ret == 0) { + reiser4_go_to_sleep(wlinks._lock_stack); + ret = RETERR(-E_REPEAT); + } + + /* Remove from the waitfor list. */ + spin_lock_atom(atomf); + + list_del(&wlinks._fwaitfor_link); + atom_dec_and_unlock(atomf); + + if (atomh) { + /* Remove from the waiting list. */ + spin_lock_atom(atomh); + list_del(&wlinks._fwaiting_link); + atom_dec_and_unlock(atomh); + } + return ret; +} + +static void lock_two_atoms(txn_atom * one, txn_atom * two) +{ + assert("zam-1067", one != two); + + /* lock the atom with lesser address first */ + if (one < two) { + spin_lock_atom(one); + spin_lock_atom_nested(two); + } else { + spin_lock_atom(two); + spin_lock_atom_nested(one); + } +} + +/* Perform the necessary work to prepare for fusing two atoms, which involves + * acquiring two atom locks in the proper order. If one of the node's atom is + * blocking fusion (i.e., it is in the CAPTURE_WAIT stage) and the handle's + * atom is not then the handle's request is put to sleep. If the node's atom + * is committing, then the node can be copy-on-captured. Otherwise, pick the + * atom with fewer pointers to be fused into the atom with more pointer and + * call capture_fuse_into. + */ +static int capture_init_fusion(jnode *node, txn_handle *txnh, txn_capture mode) +{ + txn_atom * txnh_atom = txnh->atom; + txn_atom * block_atom = node->atom; + + atomic_inc(&txnh_atom->refcount); + atomic_inc(&block_atom->refcount); + + spin_unlock_txnh(txnh); + spin_unlock_jnode(node); + + lock_two_atoms(txnh_atom, block_atom); + + if (txnh->atom != txnh_atom || node->atom != block_atom ) { + release_two_atoms(txnh_atom, block_atom); + return RETERR(-E_REPEAT); + } + + atomic_dec(&txnh_atom->refcount); + atomic_dec(&block_atom->refcount); + + assert ("zam-1066", atom_isopen(txnh_atom)); + + if (txnh_atom->stage >= block_atom->stage || + (block_atom->stage == ASTAGE_CAPTURE_WAIT && block_atom->txnh_count == 0)) { + capture_fuse_into(txnh_atom, block_atom); + return RETERR(-E_REPEAT); + } + spin_lock_txnh(txnh); + return capture_fuse_wait(txnh, block_atom, txnh_atom, mode); +} + +/* This function splices together two jnode lists (small and large) and sets all jnodes in + the small list to point to the large atom. Returns the length of the list. */ +static int +capture_fuse_jnode_lists(txn_atom *large, struct list_head *large_head, + struct list_head *small_head) +{ + int count = 0; + jnode *node; + + assert("umka-218", large != NULL); + assert("umka-219", large_head != NULL); + assert("umka-220", small_head != NULL); + /* small atom should be locked also. */ + assert_spin_locked(&(large->alock)); + + /* For every jnode on small's capture list... */ + list_for_each_entry(node, small_head, capture_link) { + count += 1; + + /* With the jnode lock held, update atom pointer. */ + spin_lock_jnode(node); + node->atom = large; + spin_unlock_jnode(node); + } + + /* Splice the lists. */ + list_splice_init(small_head, large_head->prev); + + return count; +} + +/* This function splices together two txnh lists (small and large) and sets all txn handles in + the small list to point to the large atom. Returns the length of the list. */ +static int +capture_fuse_txnh_lists(txn_atom *large, struct list_head *large_head, + struct list_head *small_head) +{ + int count = 0; + txn_handle *txnh; + + assert("umka-221", large != NULL); + assert("umka-222", large_head != NULL); + assert("umka-223", small_head != NULL); + + /* Adjust every txnh to the new atom. */ + list_for_each_entry(txnh, small_head, txnh_link) { + count += 1; + + /* With the txnh lock held, update atom pointer. */ + spin_lock_txnh(txnh); + txnh->atom = large; + spin_unlock_txnh(txnh); + } + + /* Splice the txn_handle list. */ + list_splice_init(small_head, large_head->prev); + + return count; +} + +/* This function fuses two atoms. The captured nodes and handles belonging to SMALL are + added to LARGE and their ->atom pointers are all updated. The associated counts are + updated as well, and any waiting handles belonging to either are awakened. Finally the + smaller atom's refcount is decremented. +*/ +static void capture_fuse_into(txn_atom * small, txn_atom * large) +{ + int level; + unsigned zcount = 0; + unsigned tcount = 0; + + assert("umka-224", small != NULL); + assert("umka-225", small != NULL); + + assert_spin_locked(&(large->alock)); + assert_spin_locked(&(small->alock)); + + assert("jmacd-201", atom_isopen(small)); + assert("jmacd-202", atom_isopen(large)); + + /* Splice and update the per-level dirty jnode lists */ + for (level = 0; level < REAL_MAX_ZTREE_HEIGHT + 1; level += 1) { + zcount += + capture_fuse_jnode_lists(large, + ATOM_DIRTY_LIST(large, level), + ATOM_DIRTY_LIST(small, level)); + } + + /* Splice and update the [clean,dirty] jnode and txnh lists */ + zcount += + capture_fuse_jnode_lists(large, ATOM_CLEAN_LIST(large), + ATOM_CLEAN_LIST(small)); + zcount += + capture_fuse_jnode_lists(large, ATOM_OVRWR_LIST(large), + ATOM_OVRWR_LIST(small)); + zcount += + capture_fuse_jnode_lists(large, ATOM_WB_LIST(large), + ATOM_WB_LIST(small)); + zcount += + capture_fuse_jnode_lists(large, &large->inodes, &small->inodes); + tcount += + capture_fuse_txnh_lists(large, &large->txnh_list, + &small->txnh_list); + + /* Check our accounting. */ + assert("jmacd-1063", + zcount + small->num_queued == small->capture_count); + assert("jmacd-1065", tcount == small->txnh_count); + + /* sum numbers of waiters threads */ + large->nr_waiters += small->nr_waiters; + small->nr_waiters = 0; + + /* splice flush queues */ + reiser4_fuse_fq(large, small); + + /* update counter of jnode on every atom' list */ + ON_DEBUG(large->dirty += small->dirty; + small->dirty = 0; + large->clean += small->clean; + small->clean = 0; + large->ovrwr += small->ovrwr; + small->ovrwr = 0; + large->wb += small->wb; + small->wb = 0; + large->fq += small->fq; + small->fq = 0;); + + /* count flushers in result atom */ + large->nr_flushers += small->nr_flushers; + small->nr_flushers = 0; + + /* update counts of flushed nodes */ + large->flushed += small->flushed; + small->flushed = 0; + + /* Transfer list counts to large. */ + large->txnh_count += small->txnh_count; + large->capture_count += small->capture_count; + + /* Add all txnh references to large. */ + atomic_add(small->txnh_count, &large->refcount); + atomic_sub(small->txnh_count, &small->refcount); + + /* Reset small counts */ + small->txnh_count = 0; + small->capture_count = 0; + + /* Assign the oldest start_time, merge flags. */ + large->start_time = min(large->start_time, small->start_time); + large->flags |= small->flags; + + /* Merge blocknr sets. */ + blocknr_set_merge(&small->wandered_map, &large->wandered_map); + + /* Merge delete sets. */ + atom_dset_merge(small, large); + + /* Merge allocated/deleted file counts */ + large->nr_objects_deleted += small->nr_objects_deleted; + large->nr_objects_created += small->nr_objects_created; + + small->nr_objects_deleted = 0; + small->nr_objects_created = 0; + + /* Merge allocated blocks counts */ + large->nr_blocks_allocated += small->nr_blocks_allocated; + + large->nr_running_queues += small->nr_running_queues; + small->nr_running_queues = 0; + + /* Merge blocks reserved for overwrite set. */ + large->flush_reserved += small->flush_reserved; + small->flush_reserved = 0; + + if (large->stage < small->stage) { + /* Large only needs to notify if it has changed state. */ + reiser4_atom_set_stage(large, small->stage); + wakeup_atom_waiting_list(large); + } + + reiser4_atom_set_stage(small, ASTAGE_INVALID); + + /* Notify any waiters--small needs to unload its wait lists. Waiters + actually remove themselves from the list before returning from the + fuse_wait function. */ + wakeup_atom_waiting_list(small); + + /* Unlock atoms */ + spin_unlock_atom(large); + atom_dec_and_unlock(small); +} + +/* TXNMGR STUFF */ + +/* Release a block from the atom, reversing the effects of being captured, + do not release atom's reference to jnode due to holding spin-locks. + Currently this is only called when the atom commits. + + NOTE: this function does not release a (journal) reference to jnode + due to locking optimizations, you should call jput() somewhere after + calling reiser4_uncapture_block(). */ +void reiser4_uncapture_block(jnode * node) +{ + txn_atom *atom; + + assert("umka-226", node != NULL); + atom = node->atom; + assert("umka-228", atom != NULL); + + assert("jmacd-1021", node->atom == atom); + assert_spin_locked(&(node->guard)); + assert("jmacd-1023", atom_is_protected(atom)); + + JF_CLR(node, JNODE_DIRTY); + JF_CLR(node, JNODE_RELOC); + JF_CLR(node, JNODE_OVRWR); + JF_CLR(node, JNODE_CREATED); + JF_CLR(node, JNODE_WRITEBACK); + JF_CLR(node, JNODE_REPACK); + + list_del_init(&node->capture_link); + if (JF_ISSET(node, JNODE_FLUSH_QUEUED)) { + assert("zam-925", atom_isopen(atom)); + assert("vs-1623", NODE_LIST(node) == FQ_LIST); + ON_DEBUG(atom->num_queued--); + JF_CLR(node, JNODE_FLUSH_QUEUED); + } + atom->capture_count -= 1; + ON_DEBUG(count_jnode(atom, node, NODE_LIST(node), NOT_CAPTURED, 1)); + node->atom = NULL; + + spin_unlock_jnode(node); + LOCK_CNT_DEC(t_refs); +} + +/* Unconditional insert of jnode into atom's overwrite list. Currently used in + bitmap-based allocator code for adding modified bitmap blocks the + transaction. @atom and @node are spin locked */ +void insert_into_atom_ovrwr_list(txn_atom * atom, jnode * node) +{ + assert("zam-538", atom_is_protected(atom)); + assert_spin_locked(&(node->guard)); + assert("zam-899", JF_ISSET(node, JNODE_OVRWR)); + assert("zam-543", node->atom == NULL); + assert("vs-1433", !jnode_is_unformatted(node) && !jnode_is_znode(node)); + + list_add(&node->capture_link, ATOM_OVRWR_LIST(atom)); + jref(node); + node->atom = atom; + atom->capture_count++; + ON_DEBUG(count_jnode(atom, node, NODE_LIST(node), OVRWR_LIST, 1)); +} + +static int count_deleted_blocks_actor(txn_atom * atom, + const reiser4_block_nr * a, + const reiser4_block_nr * b, void *data) +{ + reiser4_block_nr *counter = data; + + assert("zam-995", data != NULL); + assert("zam-996", a != NULL); + if (b == NULL) + *counter += 1; + else + *counter += *b; + return 0; +} + +reiser4_block_nr txnmgr_count_deleted_blocks(void) +{ + reiser4_block_nr result; + txn_mgr *tmgr = &get_super_private(reiser4_get_current_sb())->tmgr; + txn_atom *atom; + + result = 0; + + spin_lock_txnmgr(tmgr); + list_for_each_entry(atom, &tmgr->atoms_list, atom_link) { + spin_lock_atom(atom); + if (atom_isopen(atom)) + atom_dset_deferred_apply(atom, count_deleted_blocks_actor, &result, 0); + spin_unlock_atom(atom); + } + spin_unlock_txnmgr(tmgr); + + return result; +} + +void atom_dset_init(txn_atom *atom) +{ + if (reiser4_is_set(reiser4_get_current_sb(), REISER4_DISCARD)) { + blocknr_list_init(&atom->discard.delete_set); + } else { + blocknr_set_init(&atom->nodiscard.delete_set); + } +} + +void atom_dset_destroy(txn_atom *atom) +{ + if (reiser4_is_set(reiser4_get_current_sb(), REISER4_DISCARD)) { + blocknr_list_destroy(&atom->discard.delete_set); + } else { + blocknr_set_destroy(&atom->nodiscard.delete_set); + } +} + +void atom_dset_merge(txn_atom *from, txn_atom *to) +{ + if (reiser4_is_set(reiser4_get_current_sb(), REISER4_DISCARD)) { + blocknr_list_merge(&from->discard.delete_set, &to->discard.delete_set); + } else { + blocknr_set_merge(&from->nodiscard.delete_set, &to->nodiscard.delete_set); + } +} + +int atom_dset_deferred_apply(txn_atom* atom, + blocknr_set_actor_f actor, + void *data, + int delete) +{ + int ret; + + if (reiser4_is_set(reiser4_get_current_sb(), REISER4_DISCARD)) { + ret = blocknr_list_iterator(atom, + &atom->discard.delete_set, + actor, + data, + delete); + } else { + ret = blocknr_set_iterator(atom, + &atom->nodiscard.delete_set, + actor, + data, + delete); + } + + return ret; +} + +extern int atom_dset_deferred_add_extent(txn_atom *atom, + void **new_entry, + const reiser4_block_nr *start, + const reiser4_block_nr *len) +{ + int ret; + + if (reiser4_is_set(reiser4_get_current_sb(), REISER4_DISCARD)) { + ret = blocknr_list_add_extent(atom, + &atom->discard.delete_set, + (blocknr_list_entry**)new_entry, + start, + len); + } else { + ret = blocknr_set_add_extent(atom, + &atom->nodiscard.delete_set, + (blocknr_set_entry**)new_entry, + start, + len); + } + + return ret; +} + +/* + * Local variables: + * c-indentation-style: "K&R" + * mode-name: "LC" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 79 + * End: + */ diff --git a/fs/reiser4/txnmgr.h b/fs/reiser4/txnmgr.h new file mode 100644 index 000000000000..72b84a26ff92 --- /dev/null +++ b/fs/reiser4/txnmgr.h @@ -0,0 +1,755 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* data-types and function declarations for transaction manager. See txnmgr.c + * for details. */ + +#ifndef __REISER4_TXNMGR_H__ +#define __REISER4_TXNMGR_H__ + +#include "forward.h" +#include "dformat.h" + +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/types.h> +#include <linux/spinlock.h> +#include <asm/atomic.h> +#include <linux/wait.h> + +/* TYPE DECLARATIONS */ + +/* This enumeration describes the possible types of a capture request (reiser4_try_capture). + A capture request dynamically assigns a block to the calling thread's transaction + handle. */ +typedef enum { + /* A READ_ATOMIC request indicates that a block will be read and that the caller's + atom should fuse in order to ensure that the block commits atomically with the + caller. */ + TXN_CAPTURE_READ_ATOMIC = (1 << 0), + + /* A READ_NONCOM request indicates that a block will be read and that the caller is + willing to read a non-committed block without causing atoms to fuse. */ + TXN_CAPTURE_READ_NONCOM = (1 << 1), + + /* A READ_MODIFY request indicates that a block will be read but that the caller + wishes for the block to be captured as it will be written. This capture request + mode is not currently used, but eventually it will be useful for preventing + deadlock in read-modify-write cycles. */ + TXN_CAPTURE_READ_MODIFY = (1 << 2), + + /* A WRITE capture request indicates that a block will be modified and that atoms + should fuse to make the commit atomic. */ + TXN_CAPTURE_WRITE = (1 << 3), + + /* CAPTURE_TYPES is a mask of the four above capture types, used to separate the + exclusive type designation from extra bits that may be supplied -- see + below. */ + TXN_CAPTURE_TYPES = (TXN_CAPTURE_READ_ATOMIC | + TXN_CAPTURE_READ_NONCOM | TXN_CAPTURE_READ_MODIFY | + TXN_CAPTURE_WRITE), + + /* A subset of CAPTURE_TYPES, CAPTURE_WTYPES is a mask of request types that + indicate modification will occur. */ + TXN_CAPTURE_WTYPES = (TXN_CAPTURE_READ_MODIFY | TXN_CAPTURE_WRITE), + + /* An option to reiser4_try_capture, NONBLOCKING indicates that the caller would + prefer not to sleep waiting for an aging atom to commit. */ + TXN_CAPTURE_NONBLOCKING = (1 << 4), + + /* An option to reiser4_try_capture to prevent atom fusion, just simple + capturing is allowed */ + TXN_CAPTURE_DONT_FUSE = (1 << 5) + + /* This macro selects only the exclusive capture request types, stripping out any + options that were supplied (i.e., NONBLOCKING). */ +#define CAPTURE_TYPE(x) ((x) & TXN_CAPTURE_TYPES) +} txn_capture; + +/* There are two kinds of transaction handle: WRITE_FUSING and READ_FUSING, the only + difference is in the handling of read requests. A WRITE_FUSING transaction handle + defaults read capture requests to TXN_CAPTURE_READ_NONCOM whereas a READ_FUSIONG + transaction handle defaults to TXN_CAPTURE_READ_ATOMIC. */ +typedef enum { + TXN_WRITE_FUSING = (1 << 0), + TXN_READ_FUSING = (1 << 1) | TXN_WRITE_FUSING, /* READ implies WRITE */ +} txn_mode; + +/* Every atom has a stage, which is one of these exclusive values: */ +typedef enum { + /* Initially an atom is free. */ + ASTAGE_FREE = 0, + + /* An atom begins by entering the CAPTURE_FUSE stage, where it proceeds to capture + blocks and fuse with other atoms. */ + ASTAGE_CAPTURE_FUSE = 1, + + /* We need to have a ASTAGE_CAPTURE_SLOW in which an atom fuses with one node for every X nodes it flushes to disk where X > 1. */ + + /* When an atom reaches a certain age it must do all it can to commit. An atom in + the CAPTURE_WAIT stage refuses new transaction handles and prevents fusion from + atoms in the CAPTURE_FUSE stage. */ + ASTAGE_CAPTURE_WAIT = 2, + + /* Waiting for I/O before commit. Copy-on-capture (see + http://namesys.com/v4/v4.html). */ + ASTAGE_PRE_COMMIT = 3, + + /* Post-commit overwrite I/O. Steal-on-capture. */ + ASTAGE_POST_COMMIT = 4, + + /* Atom which waits for the removal of the last reference to (it? ) to + * be deleted from memory */ + ASTAGE_DONE = 5, + + /* invalid atom. */ + ASTAGE_INVALID = 6, + +} txn_stage; + +/* Certain flags may be set in the txn_atom->flags field. */ +typedef enum { + /* Indicates that the atom should commit as soon as possible. */ + ATOM_FORCE_COMMIT = (1 << 0), + /* to avoid endless loop, mark the atom (which was considered as too + * small) after failed attempt to fuse it. */ + ATOM_CANCEL_FUSION = (1 << 1) +} txn_flags; + +/* Flags for controlling commit_txnh */ +typedef enum { + /* Wait commit atom completion in commit_txnh */ + TXNH_WAIT_COMMIT = 0x2, + /* Don't commit atom when this handle is closed */ + TXNH_DONT_COMMIT = 0x4 +} txn_handle_flags_t; + +/* TYPE DEFINITIONS */ + +/* A note on lock ordering: the handle & jnode spinlock protects reading of their ->atom + fields, so typically an operation on the atom through either of these objects must (1) + lock the object, (2) read the atom pointer, (3) lock the atom. + + During atom fusion, the process holds locks on both atoms at once. Then, it iterates + through the list of handles and pages held by the smaller of the two atoms. For each + handle and page referencing the smaller atom, the fusing process must: (1) lock the + object, and (2) update the atom pointer. + + You can see that there is a conflict of lock ordering here, so the more-complex + procedure should have priority, i.e., the fusing process has priority so that it is + guaranteed to make progress and to avoid restarts. + + This decision, however, means additional complexity for aquiring the atom lock in the + first place. + + The general original procedure followed in the code was: + + TXN_OBJECT *obj = ...; + TXN_ATOM *atom; + + spin_lock (& obj->_lock); + + atom = obj->_atom; + + if (! spin_trylock_atom (atom)) + { + spin_unlock (& obj->_lock); + RESTART OPERATION, THERE WAS A RACE; + } + + ELSE YOU HAVE BOTH ATOM AND OBJ LOCKED + + It has however been found that this wastes CPU a lot in a manner that is + hard to profile. So, proper refcounting was added to atoms, and new + standard locking sequence is like following: + + TXN_OBJECT *obj = ...; + TXN_ATOM *atom; + + spin_lock (& obj->_lock); + + atom = obj->_atom; + + if (! spin_trylock_atom (atom)) + { + atomic_inc (& atom->refcount); + spin_unlock (& obj->_lock); + spin_lock (&atom->_lock); + atomic_dec (& atom->refcount); + // HERE atom is locked + spin_unlock (&atom->_lock); + RESTART OPERATION, THERE WAS A RACE; + } + + ELSE YOU HAVE BOTH ATOM AND OBJ LOCKED + + (core of this is implemented in trylock_throttle() function) + + See the jnode_get_atom() function for a common case. + + As an additional (and important) optimization allowing to avoid restarts, + it is possible to re-check required pre-conditions at the HERE point in + code above and proceed without restarting if they are still satisfied. +*/ + +/* An atomic transaction: this is the underlying system representation + of a transaction, not the one seen by clients. + + Invariants involving this data-type: + + [sb-fake-allocated] +*/ +struct txn_atom { + /* The spinlock protecting the atom, held during fusion and various other state + changes. */ + spinlock_t alock; + + /* The atom's reference counter, increasing (in case of a duplication + of an existing reference or when we are sure that some other + reference exists) may be done without taking spinlock, decrementing + of the ref. counter requires a spinlock to be held. + + Each transaction handle counts in ->refcount. All jnodes count as + one reference acquired in atom_begin_andlock(), released in + commit_current_atom(). + */ + atomic_t refcount; + + /* The atom_id identifies the atom in persistent records such as the log. */ + __u32 atom_id; + + /* Flags holding any of the txn_flags enumerated values (e.g., + ATOM_FORCE_COMMIT). */ + __u32 flags; + + /* Number of open handles. */ + __u32 txnh_count; + + /* The number of znodes captured by this atom. Equal to the sum of lengths of the + dirty_nodes[level] and clean_nodes lists. */ + __u32 capture_count; + +#if REISER4_DEBUG + int clean; + int dirty; + int ovrwr; + int wb; + int fq; +#endif + + __u32 flushed; + + /* Current transaction stage. */ + txn_stage stage; + + /* Start time. */ + unsigned long start_time; + + /* The atom's delete sets. + "simple" are blocknr_set instances and are used when discard is disabled. + "discard" are blocknr_list instances and are used when discard is enabled. */ + union { + struct { + /* The atom's delete set. It collects block numbers of the nodes + which were deleted during the transaction. */ + struct list_head delete_set; + } nodiscard; + + struct { + /* The atom's delete set. It collects all blocks that have been + deallocated (both immediate and deferred) during the transaction. + These blocks are considered for discarding at commit time. + For details see discard.c */ + struct list_head delete_set; + } discard; + }; + + /* The atom's wandered_block mapping. */ + struct list_head wandered_map; + + /* The transaction's list of dirty captured nodes--per level. Index + by (level). dirty_nodes[0] is for znode-above-root */ + struct list_head dirty_nodes[REAL_MAX_ZTREE_HEIGHT + 1]; + + /* The transaction's list of clean captured nodes. */ + struct list_head clean_nodes; + + /* The atom's overwrite set */ + struct list_head ovrwr_nodes; + + /* nodes which are being written to disk */ + struct list_head writeback_nodes; + + /* list of inodes */ + struct list_head inodes; + + /* List of handles associated with this atom. */ + struct list_head txnh_list; + + /* Transaction list link: list of atoms in the transaction manager. */ + struct list_head atom_link; + + /* List of handles waiting FOR this atom: see 'capture_fuse_wait' comment. */ + struct list_head fwaitfor_list; + + /* List of this atom's handles that are waiting: see 'capture_fuse_wait' comment. */ + struct list_head fwaiting_list; + + /* Numbers of objects which were deleted/created in this transaction + thereby numbers of objects IDs which were released/deallocated. */ + int nr_objects_deleted; + int nr_objects_created; + /* number of blocks allocated during the transaction */ + __u64 nr_blocks_allocated; + /* All atom's flush queue objects are on this list */ + struct list_head flush_queues; +#if REISER4_DEBUG + /* number of flush queues for this atom. */ + int nr_flush_queues; + /* Number of jnodes which were removed from atom's lists and put + on flush_queue */ + int num_queued; +#endif + /* number of threads who wait for this atom to complete commit */ + int nr_waiters; + /* number of threads which do jnode_flush() over this atom */ + int nr_flushers; + /* number of flush queues which are IN_USE and jnodes from fq->prepped + are submitted to disk by the reiser4_write_fq() routine. */ + int nr_running_queues; + /* A counter of grabbed unformatted nodes, see a description of the + * reiser4 space reservation scheme at block_alloc.c */ + reiser4_block_nr flush_reserved; +#if REISER4_DEBUG + void *committer; +#endif + struct super_block *super; +}; + +#define ATOM_DIRTY_LIST(atom, level) (&(atom)->dirty_nodes[level]) +#define ATOM_CLEAN_LIST(atom) (&(atom)->clean_nodes) +#define ATOM_OVRWR_LIST(atom) (&(atom)->ovrwr_nodes) +#define ATOM_WB_LIST(atom) (&(atom)->writeback_nodes) +#define ATOM_FQ_LIST(fq) (&(fq)->prepped) + +#define NODE_LIST(node) (node)->list +#define ASSIGN_NODE_LIST(node, list) ON_DEBUG(NODE_LIST(node) = list) +ON_DEBUG(void + count_jnode(txn_atom *, jnode *, atom_list old_list, + atom_list new_list, int check_lists)); + +/* A transaction handle: the client obtains and commits this handle which is assigned by + the system to a txn_atom. */ +struct txn_handle { + /* Spinlock protecting ->atom pointer */ + spinlock_t hlock; + + /* Flags for controlling commit_txnh() behavior */ + /* from txn_handle_flags_t */ + txn_handle_flags_t flags; + + /* Whether it is READ_FUSING or WRITE_FUSING. */ + txn_mode mode; + + /* If assigned, the atom it is part of. */ + txn_atom *atom; + + /* Transaction list link. Head is in txn_atom. */ + struct list_head txnh_link; +}; + +/* The transaction manager: one is contained in the reiser4_super_info_data */ +struct txn_mgr { + /* A spinlock protecting the atom list, id_count, flush_control */ + spinlock_t tmgr_lock; + + /* List of atoms. */ + struct list_head atoms_list; + + /* Number of atoms. */ + int atom_count; + + /* A counter used to assign atom->atom_id values. */ + __u32 id_count; + + /* a mutex object for commit serialization */ + struct mutex commit_mutex; + + /* a list of all txnmrgs served by particular daemon. */ + struct list_head linkage; + + /* description of daemon for this txnmgr */ + ktxnmgrd_context *daemon; + + /* parameters. Adjustable through mount options. */ + unsigned int atom_max_size; + unsigned int atom_max_age; + unsigned int atom_min_size; + /* max number of concurrent flushers for one atom, 0 - unlimited. */ + unsigned int atom_max_flushers; + struct dentry *debugfs_atom_count; + struct dentry *debugfs_id_count; +}; + +/* FUNCTION DECLARATIONS */ + +/* These are the externally (within Reiser4) visible transaction functions, therefore they + are prefixed with "txn_". For comments, see txnmgr.c. */ + +extern int init_txnmgr_static(void); +extern void done_txnmgr_static(void); + +extern void reiser4_init_txnmgr(txn_mgr *); +extern void reiser4_done_txnmgr(txn_mgr *); + +extern int reiser4_txn_reserve(int reserved); + +extern void reiser4_txn_begin(reiser4_context * context); +extern int reiser4_txn_end(reiser4_context * context); + +extern void reiser4_txn_restart(reiser4_context * context); +extern void reiser4_txn_restart_current(void); + +extern int txnmgr_force_commit_all(struct super_block *, int); +extern int current_atom_should_commit(void); + +extern jnode *find_first_dirty_jnode(txn_atom *, int); + +extern int commit_some_atoms(txn_mgr *); +extern int force_commit_atom(txn_handle *); +extern int flush_current_atom(int, long, long *, txn_atom **, jnode *); + +extern int flush_some_atom(jnode *, long *, const struct writeback_control *, int); + +extern void reiser4_atom_set_stage(txn_atom * atom, txn_stage stage); + +extern int same_slum_check(jnode * base, jnode * check, int alloc_check, + int alloc_value); +extern void atom_dec_and_unlock(txn_atom * atom); + +extern int reiser4_try_capture(jnode * node, znode_lock_mode mode, txn_capture flags); +extern int try_capture_page_to_invalidate(struct page *pg); + +extern void reiser4_uncapture_page(struct page *pg); +extern void reiser4_uncapture_block(jnode *); +extern void reiser4_uncapture_jnode(jnode *); + +extern int reiser4_capture_inode(struct inode *); +extern int reiser4_uncapture_inode(struct inode *); + +extern txn_atom *get_current_atom_locked_nocheck(void); + +#if REISER4_DEBUG + +/** + * atom_is_protected - make sure that nobody but us can do anything with atom + * @atom: atom to be checked + * + * This is used to assert that atom either entered commit stages or is spin + * locked. + */ +static inline int atom_is_protected(txn_atom *atom) +{ + if (atom->stage >= ASTAGE_PRE_COMMIT) + return 1; + assert_spin_locked(&(atom->alock)); + return 1; +} + +#endif + +/* Get the current atom and spinlock it if current atom present. May not return NULL */ +static inline txn_atom *get_current_atom_locked(void) +{ + txn_atom *atom; + + atom = get_current_atom_locked_nocheck(); + assert("zam-761", atom != NULL); + + return atom; +} + +extern txn_atom *jnode_get_atom(jnode *); + +extern void reiser4_atom_wait_event(txn_atom *); +extern void reiser4_atom_send_event(txn_atom *); + +extern void insert_into_atom_ovrwr_list(txn_atom * atom, jnode * node); +extern int reiser4_capture_super_block(struct super_block *s); +int capture_bulk(jnode **, int count); + +/* See the comment on the function blocknrset.c:blocknr_set_add for the + calling convention of these three routines. */ +extern int blocknr_set_init_static(void); +extern void blocknr_set_done_static(void); +extern void blocknr_set_init(struct list_head * bset); +extern void blocknr_set_destroy(struct list_head * bset); +extern void blocknr_set_merge(struct list_head * from, struct list_head * into); +extern int blocknr_set_add_extent(txn_atom * atom, + struct list_head * bset, + blocknr_set_entry ** new_bsep, + const reiser4_block_nr * start, + const reiser4_block_nr * len); +extern int blocknr_set_add_pair(txn_atom * atom, struct list_head * bset, + blocknr_set_entry ** new_bsep, + const reiser4_block_nr * a, + const reiser4_block_nr * b); + +typedef int (*blocknr_set_actor_f) (txn_atom *, const reiser4_block_nr *, + const reiser4_block_nr *, void *); + +extern int blocknr_set_iterator(txn_atom * atom, struct list_head * bset, + blocknr_set_actor_f actor, void *data, + int delete); + +/* This is the block list interface (see blocknrlist.c) */ +extern int blocknr_list_init_static(void); +extern void blocknr_list_done_static(void); +extern void blocknr_list_init(struct list_head *blist); +extern void blocknr_list_destroy(struct list_head *blist); +extern void blocknr_list_merge(struct list_head *from, struct list_head *to); +extern void blocknr_list_sort_and_join(struct list_head *blist); +/** + * The @atom should be locked. + */ +extern int blocknr_list_add_extent(txn_atom *atom, + struct list_head *blist, + blocknr_list_entry **new_entry, + const reiser4_block_nr *start, + const reiser4_block_nr *len); +extern int blocknr_list_iterator(txn_atom *atom, + struct list_head *blist, + blocknr_set_actor_f actor, + void *data, + int delete); + +/* These are wrappers for accessing and modifying atom's delete lists, + depending on whether discard is enabled or not. + If it is enabled, (less memory efficient) blocknr_list is used for delete + list storage. Otherwise, blocknr_set is used for this purpose. */ +extern void atom_dset_init(txn_atom *atom); +extern void atom_dset_destroy(txn_atom *atom); +extern void atom_dset_merge(txn_atom *from, txn_atom *to); +extern int atom_dset_deferred_apply(txn_atom* atom, + blocknr_set_actor_f actor, + void *data, + int delete); +extern int atom_dset_deferred_add_extent(txn_atom *atom, + void **new_entry, + const reiser4_block_nr *start, + const reiser4_block_nr *len); + +/* flush code takes care about how to fuse flush queues */ +extern void flush_init_atom(txn_atom * atom); +extern void flush_fuse_queues(txn_atom * large, txn_atom * small); + +static inline void spin_lock_atom(txn_atom *atom) +{ + /* check that spinlocks of lower priorities are not held */ + assert("", (LOCK_CNT_NIL(spin_locked_txnh) && + LOCK_CNT_NIL(spin_locked_atom) && + LOCK_CNT_NIL(spin_locked_jnode) && + LOCK_CNT_NIL(spin_locked_zlock) && + LOCK_CNT_NIL(rw_locked_dk) && + LOCK_CNT_NIL(rw_locked_tree))); + + spin_lock(&(atom->alock)); + + LOCK_CNT_INC(spin_locked_atom); + LOCK_CNT_INC(spin_locked); +} + +static inline void spin_lock_atom_nested(txn_atom *atom) +{ + assert("", (LOCK_CNT_NIL(spin_locked_txnh) && + LOCK_CNT_NIL(spin_locked_jnode) && + LOCK_CNT_NIL(spin_locked_zlock) && + LOCK_CNT_NIL(rw_locked_dk) && + LOCK_CNT_NIL(rw_locked_tree))); + + spin_lock_nested(&(atom->alock), SINGLE_DEPTH_NESTING); + + LOCK_CNT_INC(spin_locked_atom); + LOCK_CNT_INC(spin_locked); +} + +static inline int spin_trylock_atom(txn_atom *atom) +{ + if (spin_trylock(&(atom->alock))) { + LOCK_CNT_INC(spin_locked_atom); + LOCK_CNT_INC(spin_locked); + return 1; + } + return 0; +} + +static inline void spin_unlock_atom(txn_atom *atom) +{ + assert_spin_locked(&(atom->alock)); + assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_atom)); + assert("nikita-1376", LOCK_CNT_GTZ(spin_locked)); + + LOCK_CNT_DEC(spin_locked_atom); + LOCK_CNT_DEC(spin_locked); + + spin_unlock(&(atom->alock)); +} + +static inline void spin_lock_txnh(txn_handle *txnh) +{ + /* check that spinlocks of lower priorities are not held */ + assert("", (LOCK_CNT_NIL(rw_locked_dk) && + LOCK_CNT_NIL(spin_locked_zlock) && + LOCK_CNT_NIL(rw_locked_tree))); + + spin_lock(&(txnh->hlock)); + + LOCK_CNT_INC(spin_locked_txnh); + LOCK_CNT_INC(spin_locked); +} + +static inline int spin_trylock_txnh(txn_handle *txnh) +{ + if (spin_trylock(&(txnh->hlock))) { + LOCK_CNT_INC(spin_locked_txnh); + LOCK_CNT_INC(spin_locked); + return 1; + } + return 0; +} + +static inline void spin_unlock_txnh(txn_handle *txnh) +{ + assert_spin_locked(&(txnh->hlock)); + assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_txnh)); + assert("nikita-1376", LOCK_CNT_GTZ(spin_locked)); + + LOCK_CNT_DEC(spin_locked_txnh); + LOCK_CNT_DEC(spin_locked); + + spin_unlock(&(txnh->hlock)); +} + +#define spin_ordering_pred_txnmgr(tmgr) \ + ( LOCK_CNT_NIL(spin_locked_atom) && \ + LOCK_CNT_NIL(spin_locked_txnh) && \ + LOCK_CNT_NIL(spin_locked_jnode) && \ + LOCK_CNT_NIL(rw_locked_zlock) && \ + LOCK_CNT_NIL(rw_locked_dk) && \ + LOCK_CNT_NIL(rw_locked_tree) ) + +static inline void spin_lock_txnmgr(txn_mgr *mgr) +{ + /* check that spinlocks of lower priorities are not held */ + assert("", (LOCK_CNT_NIL(spin_locked_atom) && + LOCK_CNT_NIL(spin_locked_txnh) && + LOCK_CNT_NIL(spin_locked_jnode) && + LOCK_CNT_NIL(spin_locked_zlock) && + LOCK_CNT_NIL(rw_locked_dk) && + LOCK_CNT_NIL(rw_locked_tree))); + + spin_lock(&(mgr->tmgr_lock)); + + LOCK_CNT_INC(spin_locked_txnmgr); + LOCK_CNT_INC(spin_locked); +} + +static inline int spin_trylock_txnmgr(txn_mgr *mgr) +{ + if (spin_trylock(&(mgr->tmgr_lock))) { + LOCK_CNT_INC(spin_locked_txnmgr); + LOCK_CNT_INC(spin_locked); + return 1; + } + return 0; +} + +static inline void spin_unlock_txnmgr(txn_mgr *mgr) +{ + assert_spin_locked(&(mgr->tmgr_lock)); + assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_txnmgr)); + assert("nikita-1376", LOCK_CNT_GTZ(spin_locked)); + + LOCK_CNT_DEC(spin_locked_txnmgr); + LOCK_CNT_DEC(spin_locked); + + spin_unlock(&(mgr->tmgr_lock)); +} + +typedef enum { + FQ_IN_USE = 0x1 +} flush_queue_state_t; + +typedef struct flush_queue flush_queue_t; + +/* This is an accumulator for jnodes prepared for writing to disk. A flush queue + is filled by the jnode_flush() routine, and written to disk under memory + pressure or at atom commit time. */ +/* LOCKING: fq state and fq->atom are protected by guard spinlock, fq->nr_queued + field and fq->prepped list can be modified if atom is spin-locked and fq + object is "in-use" state. For read-only traversal of the fq->prepped list + and reading of the fq->nr_queued field it is enough to keep fq "in-use" or + only have atom spin-locked. */ +struct flush_queue { + /* linkage element is the first in this structure to make debugging + easier. See field in atom struct for description of list. */ + struct list_head alink; + /* A spinlock to protect changes of fq state and fq->atom pointer */ + spinlock_t guard; + /* flush_queue state: [in_use | ready] */ + flush_queue_state_t state; + /* A list which contains queued nodes, queued nodes are removed from any + * atom's list and put on this ->prepped one. */ + struct list_head prepped; + /* number of submitted i/o requests */ + atomic_t nr_submitted; + /* number of i/o errors */ + atomic_t nr_errors; + /* An atom this flush queue is attached to */ + txn_atom *atom; + /* A wait queue head to wait on i/o completion */ + wait_queue_head_t wait; +#if REISER4_DEBUG + /* A thread which took this fq in exclusive use, NULL if fq is free, + * used for debugging. */ + struct task_struct *owner; +#endif +}; + +extern int reiser4_fq_by_atom(txn_atom *, flush_queue_t **); +extern void reiser4_fq_put_nolock(flush_queue_t *); +extern void reiser4_fq_put(flush_queue_t *); +extern void reiser4_fuse_fq(txn_atom * to, txn_atom * from); +extern void queue_jnode(flush_queue_t *, jnode *); + +extern int reiser4_write_fq(flush_queue_t *, long *, int); +extern int current_atom_finish_all_fq(void); +extern void init_atom_fq_parts(txn_atom *); + +extern reiser4_block_nr txnmgr_count_deleted_blocks(void); + +extern void znode_make_dirty(znode * node); +extern void jnode_make_dirty_locked(jnode * node); + +extern int reiser4_sync_atom(txn_atom * atom); + +#if REISER4_DEBUG +extern int atom_fq_parts_are_clean(txn_atom *); +#endif + +extern void add_fq_to_bio(flush_queue_t *, struct bio *); +extern flush_queue_t *get_fq_for_current_atom(void); + +void reiser4_invalidate_list(struct list_head * head); + +# endif /* __REISER4_TXNMGR_H__ */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/type_safe_hash.h b/fs/reiser4/type_safe_hash.h new file mode 100644 index 000000000000..b2fdacdf00f4 --- /dev/null +++ b/fs/reiser4/type_safe_hash.h @@ -0,0 +1,320 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* A hash table class that uses hash chains (singly-linked) and is + parametrized to provide type safety. */ + +#ifndef __REISER4_TYPE_SAFE_HASH_H__ +#define __REISER4_TYPE_SAFE_HASH_H__ + +#include "debug.h" + +#include <asm/errno.h> +/* Step 1: Use TYPE_SAFE_HASH_DECLARE() to define the TABLE and LINK objects + based on the object type. You need to declare the item type before + this definition, define it after this definition. */ +#define TYPE_SAFE_HASH_DECLARE(PREFIX,ITEM_TYPE) \ + \ +typedef struct PREFIX##_hash_table_ PREFIX##_hash_table; \ +typedef struct PREFIX##_hash_link_ PREFIX##_hash_link; \ + \ +struct PREFIX##_hash_table_ \ +{ \ + ITEM_TYPE **_table; \ + __u32 _buckets; \ +}; \ + \ +struct PREFIX##_hash_link_ \ +{ \ + ITEM_TYPE *_next; \ +} + +/* Step 2: Define the object type of the hash: give it field of type + PREFIX_hash_link. */ + +/* Step 3: Use TYPE_SAFE_HASH_DEFINE to define the hash table interface using + the type and field name used in step 3. The arguments are: + + ITEM_TYPE The item type being hashed + KEY_TYPE The type of key being hashed + KEY_NAME The name of the key field within the item + LINK_NAME The name of the link field within the item, which you must make type PREFIX_hash_link) + HASH_FUNC The name of the hash function (or macro, takes const pointer to key) + EQ_FUNC The name of the equality function (or macro, takes const pointer to two keys) + + It implements these functions: + + prefix_hash_init Initialize the table given its size. + prefix_hash_insert Insert an item + prefix_hash_insert_index Insert an item w/ precomputed hash_index + prefix_hash_find Find an item by key + prefix_hash_find_index Find an item w/ precomputed hash_index + prefix_hash_remove Remove an item, returns 1 if found, 0 if not found + prefix_hash_remove_index Remove an item w/ precomputed hash_index + + If you'd like something to be done differently, feel free to ask me + for modifications. Additional features that could be added but + have not been: + + prefix_hash_remove_key Find and remove an item by key + prefix_hash_remove_key_index Find and remove an item by key w/ precomputed hash_index + + The hash_function currently receives only the key as an argument, + meaning it must somehow know the number of buckets. If this is a + problem let me know. + + This hash table uses a single-linked hash chain. This means + insertion is fast but deletion requires searching the chain. + + There is also the doubly-linked hash chain approach, under which + deletion requires no search but the code is longer and it takes two + pointers per item. + + The circularly-linked approach has the shortest code but requires + two pointers per bucket, doubling the size of the bucket array (in + addition to two pointers per item). +*/ +#define TYPE_SAFE_HASH_DEFINE(PREFIX,ITEM_TYPE,KEY_TYPE,KEY_NAME,LINK_NAME,HASH_FUNC,EQ_FUNC) \ + \ +static __inline__ void \ +PREFIX##_check_hash (PREFIX##_hash_table *table UNUSED_ARG, \ + __u32 hash UNUSED_ARG) \ +{ \ + assert("nikita-2780", hash < table->_buckets); \ +} \ + \ +static __inline__ int \ +PREFIX##_hash_init (PREFIX##_hash_table *hash, \ + __u32 buckets) \ +{ \ + hash->_table = (ITEM_TYPE**) KMALLOC (sizeof (ITEM_TYPE*) * buckets); \ + hash->_buckets = buckets; \ + if (hash->_table == NULL) \ + { \ + return RETERR(-ENOMEM); \ + } \ + memset (hash->_table, 0, sizeof (ITEM_TYPE*) * buckets); \ + ON_DEBUG(printk(#PREFIX "_hash_table: %i buckets\n", buckets)); \ + return 0; \ +} \ + \ +static __inline__ void \ +PREFIX##_hash_done (PREFIX##_hash_table *hash) \ +{ \ + if (REISER4_DEBUG && hash->_table != NULL) { \ + __u32 i; \ + for (i = 0 ; i < hash->_buckets ; ++ i) \ + assert("nikita-2905", hash->_table[i] == NULL); \ + } \ + if (hash->_table != NULL) \ + KFREE (hash->_table, sizeof (ITEM_TYPE*) * hash->_buckets); \ + hash->_table = NULL; \ +} \ + \ +static __inline__ void \ +PREFIX##_hash_prefetch_next (ITEM_TYPE *item) \ +{ \ + prefetch(item->LINK_NAME._next); \ +} \ + \ +static __inline__ void \ +PREFIX##_hash_prefetch_bucket (PREFIX##_hash_table *hash, \ + __u32 index) \ +{ \ + prefetch(hash->_table[index]); \ +} \ + \ +static __inline__ ITEM_TYPE* \ +PREFIX##_hash_find_index (PREFIX##_hash_table *hash, \ + __u32 hash_index, \ + KEY_TYPE const *find_key) \ +{ \ + ITEM_TYPE *item; \ + \ + PREFIX##_check_hash(hash, hash_index); \ + \ + for (item = hash->_table[hash_index]; \ + item != NULL; \ + item = item->LINK_NAME._next) \ + { \ + prefetch(item->LINK_NAME._next); \ + prefetch(item->LINK_NAME._next + offsetof(ITEM_TYPE, KEY_NAME)); \ + if (EQ_FUNC (& item->KEY_NAME, find_key)) \ + { \ + return item; \ + } \ + } \ + \ + return NULL; \ +} \ + \ +static __inline__ ITEM_TYPE* \ +PREFIX##_hash_find_index_lru (PREFIX##_hash_table *hash, \ + __u32 hash_index, \ + KEY_TYPE const *find_key) \ +{ \ + ITEM_TYPE ** item = &hash->_table[hash_index]; \ + \ + PREFIX##_check_hash(hash, hash_index); \ + \ + while (*item != NULL) { \ + prefetch(&(*item)->LINK_NAME._next); \ + if (EQ_FUNC (&(*item)->KEY_NAME, find_key)) { \ + ITEM_TYPE *found; \ + \ + found = *item; \ + *item = found->LINK_NAME._next; \ + found->LINK_NAME._next = hash->_table[hash_index]; \ + hash->_table[hash_index] = found; \ + return found; \ + } \ + item = &(*item)->LINK_NAME._next; \ + } \ + return NULL; \ +} \ + \ +static __inline__ int \ +PREFIX##_hash_remove_index (PREFIX##_hash_table *hash, \ + __u32 hash_index, \ + ITEM_TYPE *del_item) \ +{ \ + ITEM_TYPE ** hash_item_p = &hash->_table[hash_index]; \ + \ + PREFIX##_check_hash(hash, hash_index); \ + \ + while (*hash_item_p != NULL) { \ + prefetch(&(*hash_item_p)->LINK_NAME._next); \ + if (*hash_item_p == del_item) { \ + *hash_item_p = (*hash_item_p)->LINK_NAME._next; \ + return 1; \ + } \ + hash_item_p = &(*hash_item_p)->LINK_NAME._next; \ + } \ + return 0; \ +} \ + \ +static __inline__ void \ +PREFIX##_hash_insert_index (PREFIX##_hash_table *hash, \ + __u32 hash_index, \ + ITEM_TYPE *ins_item) \ +{ \ + PREFIX##_check_hash(hash, hash_index); \ + \ + ins_item->LINK_NAME._next = hash->_table[hash_index]; \ + hash->_table[hash_index] = ins_item; \ +} \ + \ +static __inline__ void \ +PREFIX##_hash_insert_index_rcu (PREFIX##_hash_table *hash, \ + __u32 hash_index, \ + ITEM_TYPE *ins_item) \ +{ \ + PREFIX##_check_hash(hash, hash_index); \ + \ + ins_item->LINK_NAME._next = hash->_table[hash_index]; \ + smp_wmb(); \ + hash->_table[hash_index] = ins_item; \ +} \ + \ +static __inline__ ITEM_TYPE* \ +PREFIX##_hash_find (PREFIX##_hash_table *hash, \ + KEY_TYPE const *find_key) \ +{ \ + return PREFIX##_hash_find_index (hash, HASH_FUNC(hash, find_key), find_key); \ +} \ + \ +static __inline__ ITEM_TYPE* \ +PREFIX##_hash_find_lru (PREFIX##_hash_table *hash, \ + KEY_TYPE const *find_key) \ +{ \ + return PREFIX##_hash_find_index_lru (hash, HASH_FUNC(hash, find_key), find_key); \ +} \ + \ +static __inline__ int \ +PREFIX##_hash_remove (PREFIX##_hash_table *hash, \ + ITEM_TYPE *del_item) \ +{ \ + return PREFIX##_hash_remove_index (hash, \ + HASH_FUNC(hash, &del_item->KEY_NAME), del_item); \ +} \ + \ +static __inline__ int \ +PREFIX##_hash_remove_rcu (PREFIX##_hash_table *hash, \ + ITEM_TYPE *del_item) \ +{ \ + return PREFIX##_hash_remove (hash, del_item); \ +} \ + \ +static __inline__ void \ +PREFIX##_hash_insert (PREFIX##_hash_table *hash, \ + ITEM_TYPE *ins_item) \ +{ \ + return PREFIX##_hash_insert_index (hash, \ + HASH_FUNC(hash, &ins_item->KEY_NAME), ins_item); \ +} \ + \ +static __inline__ void \ +PREFIX##_hash_insert_rcu (PREFIX##_hash_table *hash, \ + ITEM_TYPE *ins_item) \ +{ \ + return PREFIX##_hash_insert_index_rcu (hash, HASH_FUNC(hash, &ins_item->KEY_NAME), \ + ins_item); \ +} \ + \ +static __inline__ ITEM_TYPE * \ +PREFIX##_hash_first (PREFIX##_hash_table *hash, __u32 ind) \ +{ \ + ITEM_TYPE *first; \ + \ + for (first = NULL; ind < hash->_buckets; ++ ind) { \ + first = hash->_table[ind]; \ + if (first != NULL) \ + break; \ + } \ + return first; \ +} \ + \ +static __inline__ ITEM_TYPE * \ +PREFIX##_hash_next (PREFIX##_hash_table *hash, \ + ITEM_TYPE *item) \ +{ \ + ITEM_TYPE *next; \ + \ + if (item == NULL) \ + return NULL; \ + next = item->LINK_NAME._next; \ + if (next == NULL) \ + next = PREFIX##_hash_first (hash, HASH_FUNC(hash, &item->KEY_NAME) + 1); \ + return next; \ +} \ + \ +typedef struct {} PREFIX##_hash_dummy + +#define for_all_ht_buckets(table, head) \ +for ((head) = &(table) -> _table[ 0 ] ; \ + (head) != &(table) -> _table[ (table) -> _buckets ] ; ++ (head)) + +#define for_all_in_bucket(bucket, item, next, field) \ +for ((item) = *(bucket), (next) = (item) ? (item) -> field._next : NULL ; \ + (item) != NULL ; \ + (item) = (next), (next) = (item) ? (item) -> field._next : NULL ) + +#define for_all_in_htable(table, prefix, item, next) \ +for ((item) = prefix ## _hash_first ((table), 0), \ + (next) = prefix ## _hash_next ((table), (item)) ; \ + (item) != NULL ; \ + (item) = (next), \ + (next) = prefix ## _hash_next ((table), (item))) + +/* __REISER4_TYPE_SAFE_HASH_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/vfs_ops.c b/fs/reiser4/vfs_ops.c new file mode 100644 index 000000000000..68a3bcd70bd7 --- /dev/null +++ b/fs/reiser4/vfs_ops.c @@ -0,0 +1,260 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Interface to VFS. Reiser4 {super|export|dentry}_operations are defined + here. */ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "coord.h" +#include "plugin/item/item.h" +#include "plugin/file/file.h" +#include "plugin/security/perm.h" +#include "plugin/disk_format/disk_format.h" +#include "plugin/plugin.h" +#include "plugin/plugin_set.h" +#include "plugin/object.h" +#include "txnmgr.h" +#include "jnode.h" +#include "znode.h" +#include "block_alloc.h" +#include "tree.h" +#include "vfs_ops.h" +#include "inode.h" +#include "page_cache.h" +#include "ktxnmgrd.h" +#include "super.h" +#include "reiser4.h" +#include "entd.h" +#include "status_flags.h" +#include "flush.h" +#include "dscale.h" + +#include <linux/profile.h> +#include <linux/types.h> +#include <linux/mount.h> +#include <linux/vfs.h> +#include <linux/mm.h> +#include <linux/buffer_head.h> +#include <linux/dcache.h> +#include <linux/list.h> +#include <linux/pagemap.h> +#include <linux/slab.h> +#include <linux/seq_file.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/writeback.h> +#include <linux/blkdev.h> +#include <linux/security.h> +#include <linux/reboot.h> +#include <linux/rcupdate.h> + +/* update inode stat-data by calling plugin */ +int reiser4_update_sd(struct inode *object) +{ + file_plugin *fplug; + + assert("nikita-2338", object != NULL); + /* check for read-only file system. */ + if (IS_RDONLY(object)) + return 0; + + fplug = inode_file_plugin(object); + assert("nikita-2339", fplug != NULL); + return fplug->write_sd_by_inode(object); +} + +/* helper function: increase inode nlink count and call plugin method to save + updated stat-data. + + Used by link/create and during creation of dot and dotdot in mkdir +*/ +int reiser4_add_nlink(struct inode *object /* object to which link is added */ , + struct inode *parent /* parent where new entry will be */ + , + int write_sd_p /* true if stat-data has to be + * updated */ ) +{ + file_plugin *fplug; + int result; + + assert("nikita-1351", object != NULL); + + fplug = inode_file_plugin(object); + assert("nikita-1445", fplug != NULL); + + /* ask plugin whether it can add yet another link to this + object */ + if (!fplug->can_add_link(object)) + return RETERR(-EMLINK); + + assert("nikita-2211", fplug->add_link != NULL); + /* call plugin to do actual addition of link */ + result = fplug->add_link(object, parent); + + /* optionally update stat data */ + if (result == 0 && write_sd_p) + result = fplug->write_sd_by_inode(object); + return result; +} + +/* helper function: decrease inode nlink count and call plugin method to save + updated stat-data. + + Used by unlink/create +*/ +int reiser4_del_nlink(struct inode *object /* object from which link is + * removed */ , + struct inode *parent /* parent where entry was */ , + int write_sd_p /* true is stat-data has to be + * updated */ ) +{ + file_plugin *fplug; + int result; + + assert("nikita-1349", object != NULL); + + fplug = inode_file_plugin(object); + assert("nikita-1350", fplug != NULL); + assert("nikita-1446", object->i_nlink > 0); + assert("nikita-2210", fplug->rem_link != NULL); + + /* call plugin to do actual deletion of link */ + result = fplug->rem_link(object, parent); + + /* optionally update stat data */ + if (result == 0 && write_sd_p) + result = fplug->write_sd_by_inode(object); + return result; +} + +/* Release reiser4 dentry. This is d_op->d_release() method. */ +static void reiser4_d_release(struct dentry *dentry /* dentry released */ ) +{ + reiser4_free_dentry_fsdata(dentry); +} + +/* + * Called by reiser4_sync_inodes(), during speculative write-back (through + * pdflush, or balance_dirty_pages()). + */ +void reiser4_writeout(struct super_block *sb, struct writeback_control *wbc) +{ + long written = 0; + int repeats = 0; + int result; + + /* + * Performs early flushing, trying to free some memory. If there + * is nothing to flush, commits some atoms. + * + * Commit all atoms if reiser4_writepages_dispatch() is called + * from sys_sync() or sys_fsync() + */ + if (wbc->sync_mode != WB_SYNC_NONE) { + txnmgr_force_commit_all(sb, 0); + return; + } + + BUG_ON(reiser4_get_super_fake(sb) == NULL); + do { + long nr_submitted = 0; + jnode *node = NULL; + + /* do not put more requests to overload write queue */ + if (bdi_write_congested(inode_to_bdi(reiser4_get_super_fake(sb)))) { + //blk_flush_plug(current); + break; + } + repeats++; + BUG_ON(wbc->nr_to_write <= 0); + + if (get_current_context()->entd) { + entd_context *ent = get_entd_context(sb); + + if (ent->cur_request->node) + /* + * this is ent thread and it managed to capture + * requested page itself - start flush from + * that page + */ + node = ent->cur_request->node; + } + + result = flush_some_atom(node, &nr_submitted, wbc, + JNODE_FLUSH_WRITE_BLOCKS); + if (result != 0) + warning("nikita-31001", "Flush failed: %i", result); + if (node) + /* drop the reference aquired + in find_or_create_extent() */ + jput(node); + if (!nr_submitted) + break; + + wbc->nr_to_write -= nr_submitted; + written += nr_submitted; + } while (wbc->nr_to_write > 0); +} + +/* tell VM how many pages were dirtied */ +void reiser4_throttle_write(struct inode *inode) +{ + reiser4_context *ctx; + + ctx = get_current_context(); + reiser4_txn_restart(ctx); + current->journal_info = NULL; + balance_dirty_pages_ratelimited(inode->i_mapping); + current->journal_info = ctx; +} + +const int REISER4_MAGIC_OFFSET = 16 * 4096; /* offset to magic string from the + * beginning of device */ + +/* + * Reiser4 initialization/shutdown. + * + * Code below performs global reiser4 initialization that is done either as + * part of kernel initialization (when reiser4 is statically built-in), or + * during reiser4 module load (when compiled as module). + */ + +void reiser4_handle_error(void) +{ + struct super_block *sb = reiser4_get_current_sb(); + + if (!sb) + return; + reiser4_status_write(REISER4_STATUS_DAMAGED, 0, + "Filesystem error occured"); + switch (get_super_private(sb)->onerror) { + case 1: + reiser4_panic("foobar-42", "Filesystem error occured\n"); + default: + if (sb->s_flags & MS_RDONLY) + return; + sb->s_flags |= MS_RDONLY; + break; + } +} + +struct dentry_operations reiser4_dentry_operations = { + .d_revalidate = NULL, + .d_hash = NULL, + .d_compare = NULL, + .d_delete = NULL, + .d_release = reiser4_d_release, + .d_iput = NULL, +}; + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/vfs_ops.h b/fs/reiser4/vfs_ops.h new file mode 100644 index 000000000000..9c8819fc726f --- /dev/null +++ b/fs/reiser4/vfs_ops.h @@ -0,0 +1,60 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* vfs_ops.c's exported symbols */ + +#if !defined( __FS_REISER4_VFS_OPS_H__ ) +#define __FS_REISER4_VFS_OPS_H__ + +#include "forward.h" +#include "coord.h" +#include "seal.h" +#include "plugin/file/file.h" +#include "super.h" +#include "readahead.h" + +#include <linux/types.h> /* for loff_t */ +#include <linux/fs.h> /* for struct address_space */ +#include <linux/dcache.h> /* for struct dentry */ +#include <linux/mm.h> +#include <linux/backing-dev.h> + +/* address space operations */ +int reiser4_writepage(struct page *, struct writeback_control *); +int reiser4_set_page_dirty(struct page *); +void reiser4_invalidatepage(struct page *, unsigned int offset, unsigned int length); +int reiser4_releasepage(struct page *, gfp_t); + +#ifdef CONFIG_MIGRATION +int reiser4_migratepage(struct address_space *, struct page *, + struct page *, enum migrate_mode); +#else +#define reiser4_migratepage NULL +#endif /* CONFIG_MIGRATION */ + +extern int reiser4_update_sd(struct inode *); +extern int reiser4_add_nlink(struct inode *, struct inode *, int); +extern int reiser4_del_nlink(struct inode *, struct inode *, int); + +extern int reiser4_start_up_io(struct page *page); +extern void reiser4_throttle_write(struct inode *); +extern int jnode_is_releasable(jnode *); + +#define CAPTURE_APAGE_BURST (1024l) +void reiser4_writeout(struct super_block *, struct writeback_control *); + +extern void reiser4_handle_error(void); + +/* __FS_REISER4_VFS_OPS_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/wander.c b/fs/reiser4/wander.c new file mode 100644 index 000000000000..5dfb30ae3a46 --- /dev/null +++ b/fs/reiser4/wander.c @@ -0,0 +1,1757 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Reiser4 Wandering Log */ + +/* You should read http://www.namesys.com/txn-doc.html + + That describes how filesystem operations are performed as atomic + transactions, and how we try to arrange it so that we can write most of the + data only once while performing the operation atomically. + + For the purposes of this code, it is enough for it to understand that it + has been told a given block should be written either once, or twice (if + twice then once to the wandered location and once to the real location). + + This code guarantees that those blocks that are defined to be part of an + atom either all take effect or none of them take effect. + + The "relocate set" of nodes are submitted to write by the jnode_flush() + routine, and the "overwrite set" is submitted by reiser4_write_log(). + This is because with the overwrite set we seek to optimize writes, and + with the relocate set we seek to cause disk order to correlate with the + "parent first order" (preorder). + + reiser4_write_log() allocates and writes wandered blocks and maintains + additional on-disk structures of the atom as wander records (each wander + record occupies one block) for storing of the "wandered map" (a table which + contains a relation between wandered and real block numbers) and other + information which might be needed at transaction recovery time. + + The wander records are unidirectionally linked into a circle: each wander + record contains a block number of the next wander record, the last wander + record points to the first one. + + One wander record (named "tx head" in this file) has a format which is + different from the other wander records. The "tx head" has a reference to the + "tx head" block of the previously committed atom. Also, "tx head" contains + fs information (the free blocks counter, and the oid allocator state) which + is logged in a special way . + + There are two journal control blocks, named journal header and journal + footer which have fixed on-disk locations. The journal header has a + reference to the "tx head" block of the last committed atom. The journal + footer points to the "tx head" of the last flushed atom. The atom is + "played" when all blocks from its overwrite set are written to disk the + second time (i.e. written to their real locations). + + NOTE: People who know reiserfs internals and its journal structure might be + confused with these terms journal footer and journal header. There is a table + with terms of similar semantics in reiserfs (reiser3) and reiser4: + + REISER3 TERM | REISER4 TERM | DESCRIPTION + --------------------+-----------------------+---------------------------- + commit record | journal header | atomic write of this record + | | ends transaction commit + --------------------+-----------------------+---------------------------- + journal header | journal footer | atomic write of this record + | | ends post-commit writes. + | | After successful + | | writing of this journal + | | blocks (in reiser3) or + | | wandered blocks/records are + | | free for re-use. + --------------------+-----------------------+---------------------------- + + The atom commit process is the following: + + 1. The overwrite set is taken from atom's clean list, and its size is + counted. + + 2. The number of necessary wander records (including tx head) is calculated, + and the wander record blocks are allocated. + + 3. Allocate wandered blocks and populate wander records by wandered map. + + 4. submit write requests for wander records and wandered blocks. + + 5. wait until submitted write requests complete. + + 6. update journal header: change the pointer to the block number of just + written tx head, submit an i/o for modified journal header block and wait + for i/o completion. + + NOTE: The special logging for bitmap blocks and some reiser4 super block + fields makes processes of atom commit, flush and recovering a bit more + complex (see comments in the source code for details). + + The atom playing process is the following: + + 1. Write atom's overwrite set in-place. + + 2. Wait on i/o. + + 3. Update journal footer: change the pointer to block number of tx head + block of the atom we currently flushing, submit an i/o, wait on i/o + completion. + + 4. Free disk space which was used for wandered blocks and wander records. + + After the freeing of wandered blocks and wander records we have that journal + footer points to the on-disk structure which might be overwritten soon. + Neither the log writer nor the journal recovery procedure use that pointer + for accessing the data. When the journal recovery procedure finds the oldest + transaction it compares the journal footer pointer value with the "prev_tx" + pointer value in tx head, if values are equal the oldest not flushed + transaction is found. + + NOTE on disk space leakage: the information about of what blocks and how many + blocks are allocated for wandered blocks, wandered records is not written to + the disk because of special logging for bitmaps and some super blocks + counters. After a system crash we the reiser4 does not remember those + objects allocation, thus we have no such a kind of disk space leakage. +*/ + +/* Special logging of reiser4 super block fields. */ + +/* There are some reiser4 super block fields (free block count and OID allocator + state (number of files and next free OID) which are logged separately from + super block to avoid unnecessary atom fusion. + + So, the reiser4 super block can be not captured by a transaction with + allocates/deallocates disk blocks or create/delete file objects. Moreover, + the reiser4 on-disk super block is not touched when such a transaction is + committed and flushed. Those "counters logged specially" are logged in "tx + head" blocks and in the journal footer block. + + A step-by-step description of special logging: + + 0. The per-atom information about deleted or created files and allocated or + freed blocks is collected during the transaction. The atom's + ->nr_objects_created and ->nr_objects_deleted are for object + deletion/creation tracking, the numbers of allocated and freed blocks are + calculated using atom's delete set and atom's capture list -- all new and + relocated nodes should be on atom's clean list and should have JNODE_RELOC + bit set. + + 1. The "logged specially" reiser4 super block fields have their "committed" + versions in the reiser4 in-memory super block. They get modified only at + atom commit time. The atom's commit thread has an exclusive access to those + "committed" fields because the log writer implementation supports only one + atom commit a time (there is a per-fs "commit" mutex). At + that time "committed" counters are modified using per-atom information + collected during the transaction. These counters are stored on disk as a + part of tx head block when atom is committed. + + 2. When the atom is flushed the value of the free block counter and the OID + allocator state get written to the journal footer block. A special journal + procedure (journal_recover_sb_data()) takes those values from the journal + footer and updates the reiser4 in-memory super block. + + NOTE: That means free block count and OID allocator state are logged + separately from the reiser4 super block regardless of the fact that the + reiser4 super block has fields to store both the free block counter and the + OID allocator. + + Writing the whole super block at commit time requires knowing true values of + all its fields without changes made by not yet committed transactions. It is + possible by having their "committed" version of the super block like the + reiser4 bitmap blocks have "committed" and "working" versions. However, + another scheme was implemented which stores special logged values in the + unused free space inside transaction head block. In my opinion it has an + advantage of not writing whole super block when only part of it was + modified. */ + +#include "debug.h" +#include "dformat.h" +#include "txnmgr.h" +#include "jnode.h" +#include "znode.h" +#include "block_alloc.h" +#include "page_cache.h" +#include "wander.h" +#include "reiser4.h" +#include "super.h" +#include "vfs_ops.h" +#include "writeout.h" +#include "inode.h" +#include "entd.h" + +#include <linux/types.h> +#include <linux/fs.h> /* for struct super_block */ +#include <linux/mm.h> /* for struct page */ +#include <linux/pagemap.h> +#include <linux/bio.h> /* for struct bio */ +#include <linux/blkdev.h> + +static int write_jnodes_to_disk_extent( + jnode *, int, const reiser4_block_nr *, flush_queue_t *, int); + +/* The commit_handle is a container for objects needed at atom commit time */ +struct commit_handle { + /* A pointer to atom's list of OVRWR nodes */ + struct list_head *overwrite_set; + /* atom's overwrite set size */ + int overwrite_set_size; + /* jnodes for wander record blocks */ + struct list_head tx_list; + /* number of wander records */ + __u32 tx_size; + /* 'committed' sb counters are saved here until atom is completely + flushed */ + __u64 free_blocks; + __u64 nr_files; + __u64 next_oid; + /* A pointer to the atom which is being committed */ + txn_atom *atom; + /* A pointer to current super block */ + struct super_block *super; + /* The counter of modified bitmaps */ + reiser4_block_nr nr_bitmap; +}; + +static void init_commit_handle(struct commit_handle *ch, txn_atom *atom) +{ + memset(ch, 0, sizeof(struct commit_handle)); + INIT_LIST_HEAD(&ch->tx_list); + + ch->atom = atom; + ch->super = reiser4_get_current_sb(); +} + +static void done_commit_handle(struct commit_handle *ch) +{ + assert("zam-690", list_empty(&ch->tx_list)); +} + +/* fill journal header block data */ +static void format_journal_header(struct commit_handle *ch) +{ + struct reiser4_super_info_data *sbinfo; + struct journal_header *header; + jnode *txhead; + + sbinfo = get_super_private(ch->super); + assert("zam-479", sbinfo != NULL); + assert("zam-480", sbinfo->journal_header != NULL); + + txhead = list_entry(ch->tx_list.next, jnode, capture_link); + + jload(sbinfo->journal_header); + + header = (struct journal_header *)jdata(sbinfo->journal_header); + assert("zam-484", header != NULL); + + put_unaligned(cpu_to_le64(*jnode_get_block(txhead)), + &header->last_committed_tx); + + jrelse(sbinfo->journal_header); +} + +/* fill journal footer block data */ +static void format_journal_footer(struct commit_handle *ch) +{ + struct reiser4_super_info_data *sbinfo; + struct journal_footer *footer; + jnode *tx_head; + + sbinfo = get_super_private(ch->super); + + tx_head = list_entry(ch->tx_list.next, jnode, capture_link); + + assert("zam-493", sbinfo != NULL); + assert("zam-494", sbinfo->journal_header != NULL); + + check_me("zam-691", jload(sbinfo->journal_footer) == 0); + + footer = (struct journal_footer *)jdata(sbinfo->journal_footer); + assert("zam-495", footer != NULL); + + put_unaligned(cpu_to_le64(*jnode_get_block(tx_head)), + &footer->last_flushed_tx); + put_unaligned(cpu_to_le64(ch->free_blocks), &footer->free_blocks); + + put_unaligned(cpu_to_le64(ch->nr_files), &footer->nr_files); + put_unaligned(cpu_to_le64(ch->next_oid), &footer->next_oid); + + jrelse(sbinfo->journal_footer); +} + +/* wander record capacity depends on current block size */ +static int wander_record_capacity(const struct super_block *super) +{ + return (super->s_blocksize - + sizeof(struct wander_record_header)) / + sizeof(struct wander_entry); +} + +/* Fill first wander record (tx head) in accordance with supplied given data */ +static void format_tx_head(struct commit_handle *ch) +{ + jnode *tx_head; + jnode *next; + struct tx_header *header; + + tx_head = list_entry(ch->tx_list.next, jnode, capture_link); + assert("zam-692", &ch->tx_list != &tx_head->capture_link); + + next = list_entry(tx_head->capture_link.next, jnode, capture_link); + if (&ch->tx_list == &next->capture_link) + next = tx_head; + + header = (struct tx_header *)jdata(tx_head); + + assert("zam-460", header != NULL); + assert("zam-462", ch->super->s_blocksize >= sizeof(struct tx_header)); + + memset(jdata(tx_head), 0, (size_t) ch->super->s_blocksize); + memcpy(jdata(tx_head), TX_HEADER_MAGIC, TX_HEADER_MAGIC_SIZE); + + put_unaligned(cpu_to_le32(ch->tx_size), &header->total); + put_unaligned(cpu_to_le64(get_super_private(ch->super)->last_committed_tx), + &header->prev_tx); + put_unaligned(cpu_to_le64(*jnode_get_block(next)), &header->next_block); + put_unaligned(cpu_to_le64(ch->free_blocks), &header->free_blocks); + put_unaligned(cpu_to_le64(ch->nr_files), &header->nr_files); + put_unaligned(cpu_to_le64(ch->next_oid), &header->next_oid); +} + +/* prepare ordinary wander record block (fill all service fields) */ +static void +format_wander_record(struct commit_handle *ch, jnode *node, __u32 serial) +{ + struct wander_record_header *LRH; + jnode *next; + + assert("zam-464", node != NULL); + + LRH = (struct wander_record_header *)jdata(node); + next = list_entry(node->capture_link.next, jnode, capture_link); + + if (&ch->tx_list == &next->capture_link) + next = list_entry(ch->tx_list.next, jnode, capture_link); + + assert("zam-465", LRH != NULL); + assert("zam-463", + ch->super->s_blocksize > sizeof(struct wander_record_header)); + + memset(jdata(node), 0, (size_t) ch->super->s_blocksize); + memcpy(jdata(node), WANDER_RECORD_MAGIC, WANDER_RECORD_MAGIC_SIZE); + + put_unaligned(cpu_to_le32(ch->tx_size), &LRH->total); + put_unaligned(cpu_to_le32(serial), &LRH->serial); + put_unaligned(cpu_to_le64(*jnode_get_block(next)), &LRH->next_block); +} + +/* add one wandered map entry to formatted wander record */ +static void +store_entry(jnode * node, int index, const reiser4_block_nr * a, + const reiser4_block_nr * b) +{ + char *data; + struct wander_entry *pairs; + + data = jdata(node); + assert("zam-451", data != NULL); + + pairs = + (struct wander_entry *)(data + sizeof(struct wander_record_header)); + + put_unaligned(cpu_to_le64(*a), &pairs[index].original); + put_unaligned(cpu_to_le64(*b), &pairs[index].wandered); +} + +/* currently, wander records contains contain only wandered map, which depend on + overwrite set size */ +static void get_tx_size(struct commit_handle *ch) +{ + assert("zam-440", ch->overwrite_set_size != 0); + assert("zam-695", ch->tx_size == 0); + + /* count all ordinary wander records + (<overwrite_set_size> - 1) / <wander_record_capacity> + 1 and add one + for tx head block */ + ch->tx_size = + (ch->overwrite_set_size - 1) / wander_record_capacity(ch->super) + + 2; +} + +/* A special structure for using in store_wmap_actor() for saving its state + between calls */ +struct store_wmap_params { + jnode *cur; /* jnode of current wander record to fill */ + int idx; /* free element index in wander record */ + int capacity; /* capacity */ + +#if REISER4_DEBUG + struct list_head *tx_list; +#endif +}; + +/* an actor for use in blocknr_set_iterator routine which populates the list + of pre-formatted wander records by wandered map info */ +static int +store_wmap_actor(txn_atom * atom UNUSED_ARG, const reiser4_block_nr * a, + const reiser4_block_nr * b, void *data) +{ + struct store_wmap_params *params = data; + + if (params->idx >= params->capacity) { + /* a new wander record should be taken from the tx_list */ + params->cur = list_entry(params->cur->capture_link.next, jnode, capture_link); + assert("zam-454", + params->tx_list != ¶ms->cur->capture_link); + + params->idx = 0; + } + + store_entry(params->cur, params->idx, a, b); + params->idx++; + + return 0; +} + +/* This function is called after Relocate set gets written to disk, Overwrite + set is written to wandered locations and all wander records are written + also. Updated journal header blocks contains a pointer (block number) to + first wander record of the just written transaction */ +static int update_journal_header(struct commit_handle *ch) +{ + struct reiser4_super_info_data *sbinfo = get_super_private(ch->super); + jnode *jh = sbinfo->journal_header; + jnode *head = list_entry(ch->tx_list.next, jnode, capture_link); + int ret; + + format_journal_header(ch); + + ret = write_jnodes_to_disk_extent(jh, 1, jnode_get_block(jh), NULL, + WRITEOUT_FLUSH_FUA); + if (ret) + return ret; + + /* blk_run_address_space(sbinfo->fake->i_mapping); + * blk_run_queues(); */ + + ret = jwait_io(jh, WRITE); + + if (ret) + return ret; + + sbinfo->last_committed_tx = *jnode_get_block(head); + + return 0; +} + +/* This function is called after write-back is finished. We update journal + footer block and free blocks which were occupied by wandered blocks and + transaction wander records */ +static int update_journal_footer(struct commit_handle *ch) +{ + reiser4_super_info_data *sbinfo = get_super_private(ch->super); + + jnode *jf = sbinfo->journal_footer; + + int ret; + + format_journal_footer(ch); + + ret = write_jnodes_to_disk_extent(jf, 1, jnode_get_block(jf), NULL, + WRITEOUT_FLUSH_FUA); + if (ret) + return ret; + + /* blk_run_address_space(sbinfo->fake->i_mapping); + * blk_run_queue(); */ + + ret = jwait_io(jf, WRITE); + if (ret) + return ret; + + return 0; +} + +/* free block numbers of wander records of already written in place transaction */ +static void dealloc_tx_list(struct commit_handle *ch) +{ + while (!list_empty(&ch->tx_list)) { + jnode *cur = list_entry(ch->tx_list.next, jnode, capture_link); + list_del(&cur->capture_link); + ON_DEBUG(INIT_LIST_HEAD(&cur->capture_link)); + reiser4_dealloc_block(jnode_get_block(cur), 0, + BA_DEFER | BA_FORMATTED); + + unpin_jnode_data(cur); + reiser4_drop_io_head(cur); + } +} + +/* An actor for use in block_nr_iterator() routine which frees wandered blocks + from atom's overwrite set. */ +static int +dealloc_wmap_actor(txn_atom * atom UNUSED_ARG, + const reiser4_block_nr * a UNUSED_ARG, + const reiser4_block_nr * b, void *data UNUSED_ARG) +{ + + assert("zam-499", b != NULL); + assert("zam-500", *b != 0); + assert("zam-501", !reiser4_blocknr_is_fake(b)); + + reiser4_dealloc_block(b, 0, BA_DEFER | BA_FORMATTED); + return 0; +} + +/* free wandered block locations of already written in place transaction */ +static void dealloc_wmap(struct commit_handle *ch) +{ + assert("zam-696", ch->atom != NULL); + + blocknr_set_iterator(ch->atom, &ch->atom->wandered_map, + dealloc_wmap_actor, NULL, 1); +} + +/* helper function for alloc wandered blocks, which refill set of block + numbers needed for wandered blocks */ +static int +get_more_wandered_blocks(int count, reiser4_block_nr * start, int *len) +{ + reiser4_blocknr_hint hint; + int ret; + + reiser4_block_nr wide_len = count; + + /* FIXME-ZAM: A special policy needed for allocation of wandered blocks + ZAM-FIXME-HANS: yes, what happened to our discussion of using a fixed + reserved allocation area so as to get the best qualities of fixed + journals? */ + reiser4_blocknr_hint_init(&hint); + hint.block_stage = BLOCK_GRABBED; + + ret = reiser4_alloc_blocks(&hint, start, &wide_len, + BA_FORMATTED | BA_USE_DEFAULT_SEARCH_START); + *len = (int)wide_len; + + return ret; +} + +/* + * roll back changes made before issuing BIO in the case of IO error. + */ +static void undo_bio(struct bio *bio) +{ + int i; + + for (i = 0; i < bio->bi_vcnt; ++i) { + struct page *pg; + jnode *node; + + pg = bio->bi_io_vec[i].bv_page; + end_page_writeback(pg); + node = jprivate(pg); + spin_lock_jnode(node); + JF_CLR(node, JNODE_WRITEBACK); + JF_SET(node, JNODE_DIRTY); + spin_unlock_jnode(node); + } + bio_put(bio); +} + +/* put overwrite set back to atom's clean list */ +static void put_overwrite_set(struct commit_handle *ch) +{ + jnode *cur; + + list_for_each_entry(cur, ch->overwrite_set, capture_link) + jrelse_tail(cur); +} + +/* Count overwrite set size, grab disk space for wandered blocks allocation. + Since we have a separate list for atom's overwrite set we just scan the list, + count bitmap and other not leaf nodes which wandered blocks allocation we + have to grab space for. */ +static int get_overwrite_set(struct commit_handle *ch) +{ + int ret; + jnode *cur; + __u64 nr_not_leaves = 0; +#if REISER4_DEBUG + __u64 nr_formatted_leaves = 0; + __u64 nr_unformatted_leaves = 0; +#endif + + assert("zam-697", ch->overwrite_set_size == 0); + + ch->overwrite_set = ATOM_OVRWR_LIST(ch->atom); + cur = list_entry(ch->overwrite_set->next, jnode, capture_link); + + while (ch->overwrite_set != &cur->capture_link) { + jnode *next = list_entry(cur->capture_link.next, jnode, capture_link); + + /* Count bitmap locks for getting correct statistics what number + * of blocks were cleared by the transaction commit. */ + if (jnode_get_type(cur) == JNODE_BITMAP) + ch->nr_bitmap++; + + assert("zam-939", JF_ISSET(cur, JNODE_OVRWR) + || jnode_get_type(cur) == JNODE_BITMAP); + + if (jnode_is_znode(cur) && znode_above_root(JZNODE(cur))) { + /* we replace fake znode by another (real) + znode which is suggested by disk_layout + plugin */ + + /* FIXME: it looks like fake znode should be + replaced by jnode supplied by + disk_layout. */ + + struct super_block *s = reiser4_get_current_sb(); + reiser4_super_info_data *sbinfo = + get_current_super_private(); + + if (sbinfo->df_plug->log_super) { + jnode *sj = sbinfo->df_plug->log_super(s); + + assert("zam-593", sj != NULL); + + if (IS_ERR(sj)) + return PTR_ERR(sj); + + spin_lock_jnode(sj); + JF_SET(sj, JNODE_OVRWR); + insert_into_atom_ovrwr_list(ch->atom, sj); + spin_unlock_jnode(sj); + + /* jload it as the rest of overwrite set */ + jload_gfp(sj, reiser4_ctx_gfp_mask_get(), 0); + + ch->overwrite_set_size++; + } + spin_lock_jnode(cur); + reiser4_uncapture_block(cur); + jput(cur); + + } else { + int ret; + ch->overwrite_set_size++; + ret = jload_gfp(cur, reiser4_ctx_gfp_mask_get(), 0); + if (ret) + reiser4_panic("zam-783", + "cannot load e-flushed jnode back (ret = %d)\n", + ret); + } + + /* Count not leaves here because we have to grab disk space + * for wandered blocks. They were not counted as "flush + * reserved". Counting should be done _after_ nodes are pinned + * into memory by jload(). */ + if (!jnode_is_leaf(cur)) + nr_not_leaves++; + else { +#if REISER4_DEBUG + /* at this point @cur either has JNODE_FLUSH_RESERVED + * or is eflushed. Locking is not strong enough to + * write an assertion checking for this. */ + if (jnode_is_znode(cur)) + nr_formatted_leaves++; + else + nr_unformatted_leaves++; +#endif + JF_CLR(cur, JNODE_FLUSH_RESERVED); + } + + cur = next; + } + + /* Grab space for writing (wandered blocks) of not leaves found in + * overwrite set. */ + ret = reiser4_grab_space_force(nr_not_leaves, BA_RESERVED); + if (ret) + return ret; + + /* Disk space for allocation of wandered blocks of leaf nodes already + * reserved as "flush reserved", move it to grabbed space counter. */ + spin_lock_atom(ch->atom); + assert("zam-940", + nr_formatted_leaves + nr_unformatted_leaves <= + ch->atom->flush_reserved); + flush_reserved2grabbed(ch->atom, ch->atom->flush_reserved); + spin_unlock_atom(ch->atom); + + return ch->overwrite_set_size; +} + +/** + * write_jnodes_to_disk_extent - submit write request + * @head: + * @first: first jnode of the list + * @nr: number of jnodes on the list + * @block_p: + * @fq: + * @flags: used to decide whether page is to get PG_reclaim flag + * + * Submits a write request for @nr jnodes beginning from the @first, other + * jnodes are after the @first on the double-linked "capture" list. All jnodes + * will be written to the disk region of @nr blocks starting with @block_p block + * number. If @fq is not NULL it means that waiting for i/o completion will be + * done more efficiently by using flush_queue_t objects. + * This function is the one which writes list of jnodes in batch mode. It does + * all low-level things as bio construction and page states manipulation. + * + * ZAM-FIXME-HANS: brief me on why this function exists, and why bios are + * aggregated in this function instead of being left to the layers below + * + * FIXME: ZAM->HANS: What layer are you talking about? Can you point me to that? + * Why that layer needed? Why BIOs cannot be constructed here? + */ +static int write_jnodes_to_disk_extent( + jnode *first, int nr, const reiser4_block_nr *block_p, + flush_queue_t *fq, int flags) +{ + struct super_block *super = reiser4_get_current_sb(); + int op_flags = (flags & WRITEOUT_FLUSH_FUA) ? REQ_PREFLUSH | REQ_FUA : 0; + jnode *cur = first; + reiser4_block_nr block; + + assert("zam-571", first != NULL); + assert("zam-572", block_p != NULL); + assert("zam-570", nr > 0); + + block = *block_p; + + while (nr > 0) { + struct bio *bio; + int nr_blocks = min(nr, BIO_MAX_PAGES); + int i; + int nr_used; + + bio = bio_alloc(GFP_NOIO, nr_blocks); + if (!bio) + return RETERR(-ENOMEM); + + bio_set_dev(bio, super->s_bdev); + bio->bi_iter.bi_sector = block * (super->s_blocksize >> 9); + for (nr_used = 0, i = 0; i < nr_blocks; i++) { + struct page *pg; + + pg = jnode_page(cur); + assert("zam-573", pg != NULL); + + get_page(pg); + + lock_and_wait_page_writeback(pg); + + if (!bio_add_page(bio, pg, super->s_blocksize, 0)) { + /* + * underlying device is satiated. Stop adding + * pages to the bio. + */ + unlock_page(pg); + put_page(pg); + break; + } + + spin_lock_jnode(cur); + assert("nikita-3166", + pg->mapping == jnode_get_mapping(cur)); + assert("zam-912", !JF_ISSET(cur, JNODE_WRITEBACK)); +#if REISER4_DEBUG + spin_lock(&cur->load); + assert("nikita-3165", !jnode_is_releasable(cur)); + spin_unlock(&cur->load); +#endif + JF_SET(cur, JNODE_WRITEBACK); + JF_CLR(cur, JNODE_DIRTY); + ON_DEBUG(cur->written++); + + assert("edward-1647", + ergo(jnode_is_znode(cur), JF_ISSET(cur, JNODE_PARSED))); + spin_unlock_jnode(cur); + /* + * update checksum + */ + if (jnode_is_znode(cur)) { + zload(JZNODE(cur)); + if (node_plugin_by_node(JZNODE(cur))->csum) + node_plugin_by_node(JZNODE(cur))->csum(JZNODE(cur), 0); + zrelse(JZNODE(cur)); + } + ClearPageError(pg); + set_page_writeback(pg); + + if (get_current_context()->entd) { + /* this is ent thread */ + entd_context *ent = get_entd_context(super); + struct wbq *rq, *next; + + spin_lock(&ent->guard); + + if (pg == ent->cur_request->page) { + /* + * entd is called for this page. This + * request is not in th etodo list + */ + ent->cur_request->written = 1; + } else { + /* + * if we have written a page for which writepage + * is called for - move request to another list. + */ + list_for_each_entry_safe(rq, next, &ent->todo_list, link) { + assert("", rq->magic == WBQ_MAGIC); + if (pg == rq->page) { + /* + * remove request from + * entd's queue, but do + * not wake up a thread + * which put this + * request + */ + list_del_init(&rq->link); + ent->nr_todo_reqs --; + list_add_tail(&rq->link, &ent->done_list); + ent->nr_done_reqs ++; + rq->written = 1; + break; + } + } + } + spin_unlock(&ent->guard); + } + + clear_page_dirty_for_io(pg); + + unlock_page(pg); + + cur = list_entry(cur->capture_link.next, jnode, capture_link); + nr_used++; + } + if (nr_used > 0) { + assert("nikita-3453", + bio->bi_iter.bi_size == super->s_blocksize * nr_used); + assert("nikita-3454", bio->bi_vcnt == nr_used); + + /* Check if we are allowed to write at all */ + if (super->s_flags & MS_RDONLY) + undo_bio(bio); + else { + add_fq_to_bio(fq, bio); + bio_get(bio); + bio_set_op_attrs(bio, WRITE, op_flags); + submit_bio(bio); + bio_put(bio); + } + + block += nr_used - 1; + update_blocknr_hint_default(super, &block); + block += 1; + } else { + bio_put(bio); + } + nr -= nr_used; + } + + return 0; +} + +/* This is a procedure which recovers a contiguous sequences of disk block + numbers in the given list of j-nodes and submits write requests on this + per-sequence basis */ +int +write_jnode_list(struct list_head *head, flush_queue_t *fq, + long *nr_submitted, int flags) +{ + int ret; + jnode *beg = list_entry(head->next, jnode, capture_link); + + while (head != &beg->capture_link) { + int nr = 1; + jnode *cur = list_entry(beg->capture_link.next, jnode, capture_link); + + while (head != &cur->capture_link) { + if (*jnode_get_block(cur) != *jnode_get_block(beg) + nr) + break; + ++nr; + cur = list_entry(cur->capture_link.next, jnode, capture_link); + } + + ret = write_jnodes_to_disk_extent( + beg, nr, jnode_get_block(beg), fq, flags); + if (ret) + return ret; + + if (nr_submitted) + *nr_submitted += nr; + + beg = cur; + } + + return 0; +} + +/* add given wandered mapping to atom's wandered map */ +static int +add_region_to_wmap(jnode * cur, int len, const reiser4_block_nr * block_p) +{ + int ret; + blocknr_set_entry *new_bsep = NULL; + reiser4_block_nr block; + + txn_atom *atom; + + assert("zam-568", block_p != NULL); + block = *block_p; + assert("zam-569", len > 0); + + while ((len--) > 0) { + do { + atom = get_current_atom_locked(); + assert("zam-536", + !reiser4_blocknr_is_fake(jnode_get_block(cur))); + ret = + blocknr_set_add_pair(atom, &atom->wandered_map, + &new_bsep, + jnode_get_block(cur), &block); + } while (ret == -E_REPEAT); + + if (ret) { + /* deallocate blocks which were not added to wandered + map */ + reiser4_block_nr wide_len = len; + + reiser4_dealloc_blocks(&block, &wide_len, + BLOCK_NOT_COUNTED, + BA_FORMATTED + /* formatted, without defer */ ); + + return ret; + } + + spin_unlock_atom(atom); + + cur = list_entry(cur->capture_link.next, jnode, capture_link); + ++block; + } + + return 0; +} + +/* Allocate wandered blocks for current atom's OVERWRITE SET and immediately + submit IO for allocated blocks. We assume that current atom is in a stage + when any atom fusion is impossible and atom is unlocked and it is safe. */ +static int alloc_wandered_blocks(struct commit_handle *ch, flush_queue_t *fq) +{ + reiser4_block_nr block; + + int rest; + int len; + int ret; + + jnode *cur; + + assert("zam-534", ch->overwrite_set_size > 0); + + rest = ch->overwrite_set_size; + + cur = list_entry(ch->overwrite_set->next, jnode, capture_link); + while (ch->overwrite_set != &cur->capture_link) { + assert("zam-567", JF_ISSET(cur, JNODE_OVRWR)); + + ret = get_more_wandered_blocks(rest, &block, &len); + if (ret) + return ret; + + rest -= len; + + ret = add_region_to_wmap(cur, len, &block); + if (ret) + return ret; + + ret = write_jnodes_to_disk_extent(cur, len, &block, fq, 0); + if (ret) + return ret; + + while ((len--) > 0) { + assert("zam-604", + ch->overwrite_set != &cur->capture_link); + cur = list_entry(cur->capture_link.next, jnode, capture_link); + } + } + + return 0; +} + +/* allocate given number of nodes over the journal area and link them into a + list, return pointer to the first jnode in the list */ +static int alloc_tx(struct commit_handle *ch, flush_queue_t * fq) +{ + reiser4_blocknr_hint hint; + reiser4_block_nr allocated = 0; + reiser4_block_nr first, len; + jnode *cur; + jnode *txhead; + int ret; + reiser4_context *ctx; + reiser4_super_info_data *sbinfo; + + assert("zam-698", ch->tx_size > 0); + assert("zam-699", list_empty_careful(&ch->tx_list)); + + ctx = get_current_context(); + sbinfo = get_super_private(ctx->super); + + while (allocated < (unsigned)ch->tx_size) { + len = (ch->tx_size - allocated); + + reiser4_blocknr_hint_init(&hint); + + hint.block_stage = BLOCK_GRABBED; + + /* FIXME: there should be some block allocation policy for + nodes which contain wander records */ + + /* We assume that disk space for wandered record blocks can be + * taken from reserved area. */ + ret = reiser4_alloc_blocks(&hint, &first, &len, + BA_FORMATTED | BA_RESERVED | + BA_USE_DEFAULT_SEARCH_START); + reiser4_blocknr_hint_done(&hint); + + if (ret) + return ret; + + allocated += len; + + /* create jnodes for all wander records */ + while (len--) { + cur = reiser4_alloc_io_head(&first); + + if (cur == NULL) { + ret = RETERR(-ENOMEM); + goto free_not_assigned; + } + + ret = jinit_new(cur, reiser4_ctx_gfp_mask_get()); + + if (ret != 0) { + jfree(cur); + goto free_not_assigned; + } + + pin_jnode_data(cur); + + list_add_tail(&cur->capture_link, &ch->tx_list); + + first++; + } + } + + { /* format a on-disk linked list of wander records */ + int serial = 1; + + txhead = list_entry(ch->tx_list.next, jnode, capture_link); + format_tx_head(ch); + + cur = list_entry(txhead->capture_link.next, jnode, capture_link); + while (&ch->tx_list != &cur->capture_link) { + format_wander_record(ch, cur, serial++); + cur = list_entry(cur->capture_link.next, jnode, capture_link); + } + } + + { /* Fill wander records with Wandered Set */ + struct store_wmap_params params; + txn_atom *atom; + + params.cur = list_entry(txhead->capture_link.next, jnode, capture_link); + + params.idx = 0; + params.capacity = + wander_record_capacity(reiser4_get_current_sb()); + + atom = get_current_atom_locked(); + blocknr_set_iterator(atom, &atom->wandered_map, + &store_wmap_actor, ¶ms, 0); + spin_unlock_atom(atom); + } + + { /* relse all jnodes from tx_list */ + cur = list_entry(ch->tx_list.next, jnode, capture_link); + while (&ch->tx_list != &cur->capture_link) { + jrelse(cur); + cur = list_entry(cur->capture_link.next, jnode, capture_link); + } + } + + ret = write_jnode_list(&ch->tx_list, fq, NULL, 0); + + return ret; + + free_not_assigned: + /* We deallocate blocks not yet assigned to jnodes on tx_list. The + caller takes care about invalidating of tx list */ + reiser4_dealloc_blocks(&first, &len, BLOCK_NOT_COUNTED, BA_FORMATTED); + + return ret; +} + +static int commit_tx(struct commit_handle *ch) +{ + flush_queue_t *fq; + int ret; + + /* Grab more space for wandered records. */ + ret = reiser4_grab_space_force((__u64) (ch->tx_size), BA_RESERVED); + if (ret) + return ret; + + fq = get_fq_for_current_atom(); + if (IS_ERR(fq)) + return PTR_ERR(fq); + + spin_unlock_atom(fq->atom); + do { + ret = alloc_wandered_blocks(ch, fq); + if (ret) + break; + ret = alloc_tx(ch, fq); + if (ret) + break; + } while (0); + + reiser4_fq_put(fq); + if (ret) + return ret; + ret = current_atom_finish_all_fq(); + if (ret) + return ret; + return update_journal_header(ch); +} + +static int write_tx_back(struct commit_handle * ch) +{ + flush_queue_t *fq; + int ret; + + fq = get_fq_for_current_atom(); + if (IS_ERR(fq)) + return PTR_ERR(fq); + spin_unlock_atom(fq->atom); + ret = write_jnode_list( + ch->overwrite_set, fq, NULL, WRITEOUT_FOR_PAGE_RECLAIM); + reiser4_fq_put(fq); + if (ret) + return ret; + ret = current_atom_finish_all_fq(); + if (ret) + return ret; + return update_journal_footer(ch); +} + +/* We assume that at this moment all captured blocks are marked as RELOC or + WANDER (belong to Relocate o Overwrite set), all nodes from Relocate set + are submitted to write. +*/ + +int reiser4_write_logs(long *nr_submitted) +{ + txn_atom *atom; + struct super_block *super = reiser4_get_current_sb(); + reiser4_super_info_data *sbinfo = get_super_private(super); + struct commit_handle ch; + int ret; + + writeout_mode_enable(); + + /* block allocator may add j-nodes to the clean_list */ + ret = reiser4_pre_commit_hook(); + if (ret) + return ret; + + /* No locks are required if we take atom which stage >= + * ASTAGE_PRE_COMMIT */ + atom = get_current_context()->trans->atom; + assert("zam-965", atom != NULL); + + /* relocate set is on the atom->clean_nodes list after + * current_atom_complete_writes() finishes. It can be safely + * uncaptured after commit_mutex is locked, because any atom that + * captures these nodes is guaranteed to commit after current one. + * + * This can only be done after reiser4_pre_commit_hook(), because it is where + * early flushed jnodes with CREATED bit are transferred to the + * overwrite list. */ + reiser4_invalidate_list(ATOM_CLEAN_LIST(atom)); + spin_lock_atom(atom); + /* There might be waiters for the relocate nodes which we have + * released, wake them up. */ + reiser4_atom_send_event(atom); + spin_unlock_atom(atom); + + if (REISER4_DEBUG) { + int level; + + for (level = 0; level < REAL_MAX_ZTREE_HEIGHT + 1; ++level) + assert("nikita-3352", + list_empty_careful(ATOM_DIRTY_LIST(atom, level))); + } + + sbinfo->nr_files_committed += (unsigned)atom->nr_objects_created; + sbinfo->nr_files_committed -= (unsigned)atom->nr_objects_deleted; + + init_commit_handle(&ch, atom); + + ch.free_blocks = sbinfo->blocks_free_committed; + ch.nr_files = sbinfo->nr_files_committed; + /* ZAM-FIXME-HANS: email me what the contention level is for the super + * lock. */ + ch.next_oid = oid_next(super); + + /* count overwrite set and place it in a separate list */ + ret = get_overwrite_set(&ch); + + if (ret <= 0) { + /* It is possible that overwrite set is empty here, it means + all captured nodes are clean */ + goto up_and_ret; + } + + /* Inform the caller about what number of dirty pages will be + * submitted to disk. */ + *nr_submitted += ch.overwrite_set_size - ch.nr_bitmap; + + /* count all records needed for storing of the wandered set */ + get_tx_size(&ch); + + ret = commit_tx(&ch); + if (ret) + goto up_and_ret; + + spin_lock_atom(atom); + reiser4_atom_set_stage(atom, ASTAGE_POST_COMMIT); + spin_unlock_atom(atom); + reiser4_post_commit_hook(); + + ret = write_tx_back(&ch); + + up_and_ret: + if (ret) { + /* there could be fq attached to current atom; the only way to + remove them is: */ + current_atom_finish_all_fq(); + } + + /* free blocks of flushed transaction */ + dealloc_tx_list(&ch); + dealloc_wmap(&ch); + + reiser4_post_write_back_hook(); + + put_overwrite_set(&ch); + + done_commit_handle(&ch); + + writeout_mode_disable(); + + return ret; +} + +/* consistency checks for journal data/control blocks: header, footer, log + records, transactions head blocks. All functions return zero on success. */ + +static int check_journal_header(const jnode * node UNUSED_ARG) +{ + /* FIXME: journal header has no magic field yet. */ + return 0; +} + +/* wait for write completion for all jnodes from given list */ +static int wait_on_jnode_list(struct list_head *head) +{ + jnode *scan; + int ret = 0; + + list_for_each_entry(scan, head, capture_link) { + struct page *pg = jnode_page(scan); + + if (pg) { + if (PageWriteback(pg)) + wait_on_page_writeback(pg); + + if (PageError(pg)) + ret++; + } + } + + return ret; +} + +static int check_journal_footer(const jnode * node UNUSED_ARG) +{ + /* FIXME: journal footer has no magic field yet. */ + return 0; +} + +static int check_tx_head(const jnode * node) +{ + struct tx_header *header = (struct tx_header *)jdata(node); + + if (memcmp(&header->magic, TX_HEADER_MAGIC, TX_HEADER_MAGIC_SIZE) != 0) { + warning("zam-627", "tx head at block %s corrupted\n", + sprint_address(jnode_get_block(node))); + return RETERR(-EIO); + } + + return 0; +} + +static int check_wander_record(const jnode * node) +{ + struct wander_record_header *RH = + (struct wander_record_header *)jdata(node); + + if (memcmp(&RH->magic, WANDER_RECORD_MAGIC, WANDER_RECORD_MAGIC_SIZE) != + 0) { + warning("zam-628", "wander record at block %s corrupted\n", + sprint_address(jnode_get_block(node))); + return RETERR(-EIO); + } + + return 0; +} + +/* fill commit_handler structure by everything what is needed for update_journal_footer */ +static int restore_commit_handle(struct commit_handle *ch, jnode *tx_head) +{ + struct tx_header *TXH; + int ret; + + ret = jload(tx_head); + if (ret) + return ret; + + TXH = (struct tx_header *)jdata(tx_head); + + ch->free_blocks = le64_to_cpu(get_unaligned(&TXH->free_blocks)); + ch->nr_files = le64_to_cpu(get_unaligned(&TXH->nr_files)); + ch->next_oid = le64_to_cpu(get_unaligned(&TXH->next_oid)); + + jrelse(tx_head); + + list_add(&tx_head->capture_link, &ch->tx_list); + + return 0; +} + +/* replay one transaction: restore and write overwrite set in place */ +static int replay_transaction(const struct super_block *s, + jnode * tx_head, + const reiser4_block_nr * log_rec_block_p, + const reiser4_block_nr * end_block, + unsigned int nr_wander_records) +{ + reiser4_block_nr log_rec_block = *log_rec_block_p; + struct commit_handle ch; + LIST_HEAD(overwrite_set); + jnode *log; + int ret; + + init_commit_handle(&ch, NULL); + ch.overwrite_set = &overwrite_set; + + restore_commit_handle(&ch, tx_head); + + while (log_rec_block != *end_block) { + struct wander_record_header *header; + struct wander_entry *entry; + + int i; + + if (nr_wander_records == 0) { + warning("zam-631", + "number of wander records in the linked list" + " greater than number stored in tx head.\n"); + ret = RETERR(-EIO); + goto free_ow_set; + } + + log = reiser4_alloc_io_head(&log_rec_block); + if (log == NULL) + return RETERR(-ENOMEM); + + ret = jload(log); + if (ret < 0) { + reiser4_drop_io_head(log); + return ret; + } + + ret = check_wander_record(log); + if (ret) { + jrelse(log); + reiser4_drop_io_head(log); + return ret; + } + + header = (struct wander_record_header *)jdata(log); + log_rec_block = le64_to_cpu(get_unaligned(&header->next_block)); + + entry = (struct wander_entry *)(header + 1); + + /* restore overwrite set from wander record content */ + for (i = 0; i < wander_record_capacity(s); i++) { + reiser4_block_nr block; + jnode *node; + + block = le64_to_cpu(get_unaligned(&entry->wandered)); + if (block == 0) + break; + + node = reiser4_alloc_io_head(&block); + if (node == NULL) { + ret = RETERR(-ENOMEM); + /* + * FIXME-VS:??? + */ + jrelse(log); + reiser4_drop_io_head(log); + goto free_ow_set; + } + + ret = jload(node); + + if (ret < 0) { + reiser4_drop_io_head(node); + /* + * FIXME-VS:??? + */ + jrelse(log); + reiser4_drop_io_head(log); + goto free_ow_set; + } + + block = le64_to_cpu(get_unaligned(&entry->original)); + + assert("zam-603", block != 0); + + jnode_set_block(node, &block); + + list_add_tail(&node->capture_link, ch.overwrite_set); + + ++entry; + } + + jrelse(log); + reiser4_drop_io_head(log); + + --nr_wander_records; + } + + if (nr_wander_records != 0) { + warning("zam-632", "number of wander records in the linked list" + " less than number stored in tx head.\n"); + ret = RETERR(-EIO); + goto free_ow_set; + } + + { /* write wandered set in place */ + write_jnode_list(ch.overwrite_set, NULL, NULL, 0); + ret = wait_on_jnode_list(ch.overwrite_set); + + if (ret) { + ret = RETERR(-EIO); + goto free_ow_set; + } + } + + ret = update_journal_footer(&ch); + + free_ow_set: + + while (!list_empty(ch.overwrite_set)) { + jnode *cur = list_entry(ch.overwrite_set->next, jnode, capture_link); + list_del_init(&cur->capture_link); + jrelse(cur); + reiser4_drop_io_head(cur); + } + + list_del_init(&tx_head->capture_link); + + done_commit_handle(&ch); + + return ret; +} + +/* find oldest committed and not played transaction and play it. The transaction + * was committed and journal header block was updated but the blocks from the + * process of writing the atom's overwrite set in-place and updating of journal + * footer block were not completed. This function completes the process by + * recovering the atom's overwrite set from their wandered locations and writes + * them in-place and updating the journal footer. */ +static int replay_oldest_transaction(struct super_block *s) +{ + reiser4_super_info_data *sbinfo = get_super_private(s); + jnode *jf = sbinfo->journal_footer; + unsigned int total; + struct journal_footer *F; + struct tx_header *T; + + reiser4_block_nr prev_tx; + reiser4_block_nr last_flushed_tx; + reiser4_block_nr log_rec_block = 0; + + jnode *tx_head; + + int ret; + + if ((ret = jload(jf)) < 0) + return ret; + + F = (struct journal_footer *)jdata(jf); + + last_flushed_tx = le64_to_cpu(get_unaligned(&F->last_flushed_tx)); + + jrelse(jf); + + if (sbinfo->last_committed_tx == last_flushed_tx) { + /* all transactions are replayed */ + return 0; + } + + prev_tx = sbinfo->last_committed_tx; + + /* searching for oldest not flushed transaction */ + while (1) { + tx_head = reiser4_alloc_io_head(&prev_tx); + if (!tx_head) + return RETERR(-ENOMEM); + + ret = jload(tx_head); + if (ret < 0) { + reiser4_drop_io_head(tx_head); + return ret; + } + + ret = check_tx_head(tx_head); + if (ret) { + jrelse(tx_head); + reiser4_drop_io_head(tx_head); + return ret; + } + + T = (struct tx_header *)jdata(tx_head); + + prev_tx = le64_to_cpu(get_unaligned(&T->prev_tx)); + + if (prev_tx == last_flushed_tx) + break; + + jrelse(tx_head); + reiser4_drop_io_head(tx_head); + } + + total = le32_to_cpu(get_unaligned(&T->total)); + log_rec_block = le64_to_cpu(get_unaligned(&T->next_block)); + + pin_jnode_data(tx_head); + jrelse(tx_head); + + ret = + replay_transaction(s, tx_head, &log_rec_block, + jnode_get_block(tx_head), total - 1); + + unpin_jnode_data(tx_head); + reiser4_drop_io_head(tx_head); + + if (ret) + return ret; + return -E_REPEAT; +} + +/* The reiser4 journal current implementation was optimized to not to capture + super block if certain super blocks fields are modified. Currently, the set + is (<free block count>, <OID allocator>). These fields are logged by + special way which includes storing them in each transaction head block at + atom commit time and writing that information to journal footer block at + atom flush time. For getting info from journal footer block to the + in-memory super block there is a special function + reiser4_journal_recover_sb_data() which should be called after disk format + plugin re-reads super block after journal replaying. +*/ + +/* get the information from journal footer in-memory super block */ +int reiser4_journal_recover_sb_data(struct super_block *s) +{ + reiser4_super_info_data *sbinfo = get_super_private(s); + struct journal_footer *jf; + int ret; + + assert("zam-673", sbinfo->journal_footer != NULL); + + ret = jload(sbinfo->journal_footer); + if (ret != 0) + return ret; + + ret = check_journal_footer(sbinfo->journal_footer); + if (ret != 0) + goto out; + + jf = (struct journal_footer *)jdata(sbinfo->journal_footer); + + /* was there at least one flushed transaction? */ + if (jf->last_flushed_tx) { + + /* restore free block counter logged in this transaction */ + reiser4_set_free_blocks(s, le64_to_cpu(get_unaligned(&jf->free_blocks))); + + /* restore oid allocator state */ + oid_init_allocator(s, + le64_to_cpu(get_unaligned(&jf->nr_files)), + le64_to_cpu(get_unaligned(&jf->next_oid))); + } + out: + jrelse(sbinfo->journal_footer); + return ret; +} + +/* reiser4 replay journal procedure */ +int reiser4_journal_replay(struct super_block *s) +{ + reiser4_super_info_data *sbinfo = get_super_private(s); + jnode *jh, *jf; + struct journal_header *header; + int nr_tx_replayed = 0; + int ret; + + assert("zam-582", sbinfo != NULL); + + jh = sbinfo->journal_header; + jf = sbinfo->journal_footer; + + if (!jh || !jf) { + /* it is possible that disk layout does not support journal + structures, we just warn about this */ + warning("zam-583", + "journal control blocks were not loaded by disk layout plugin. " + "journal replaying is not possible.\n"); + return 0; + } + + /* Take free block count from journal footer block. The free block + counter value corresponds the last flushed transaction state */ + ret = jload(jf); + if (ret < 0) + return ret; + + ret = check_journal_footer(jf); + if (ret) { + jrelse(jf); + return ret; + } + + jrelse(jf); + + /* store last committed transaction info in reiser4 in-memory super + block */ + ret = jload(jh); + if (ret < 0) + return ret; + + ret = check_journal_header(jh); + if (ret) { + jrelse(jh); + return ret; + } + + header = (struct journal_header *)jdata(jh); + sbinfo->last_committed_tx = le64_to_cpu(get_unaligned(&header->last_committed_tx)); + + jrelse(jh); + + /* replay committed transactions */ + while ((ret = replay_oldest_transaction(s)) == -E_REPEAT) + nr_tx_replayed++; + + return ret; +} + +/* load journal control block (either journal header or journal footer block) */ +static int +load_journal_control_block(jnode ** node, const reiser4_block_nr * block) +{ + int ret; + + *node = reiser4_alloc_io_head(block); + if (!(*node)) + return RETERR(-ENOMEM); + + ret = jload(*node); + + if (ret) { + reiser4_drop_io_head(*node); + *node = NULL; + return ret; + } + + pin_jnode_data(*node); + jrelse(*node); + + return 0; +} + +/* unload journal header or footer and free jnode */ +static void unload_journal_control_block(jnode ** node) +{ + if (*node) { + unpin_jnode_data(*node); + reiser4_drop_io_head(*node); + *node = NULL; + } +} + +/* release journal control blocks */ +void reiser4_done_journal_info(struct super_block *s) +{ + reiser4_super_info_data *sbinfo = get_super_private(s); + + assert("zam-476", sbinfo != NULL); + + unload_journal_control_block(&sbinfo->journal_header); + unload_journal_control_block(&sbinfo->journal_footer); + rcu_barrier(); +} + +/* load journal control blocks */ +int reiser4_init_journal_info(struct super_block *s) +{ + reiser4_super_info_data *sbinfo = get_super_private(s); + journal_location *loc; + int ret; + + loc = &sbinfo->jloc; + + assert("zam-651", loc != NULL); + assert("zam-652", loc->header != 0); + assert("zam-653", loc->footer != 0); + + ret = load_journal_control_block(&sbinfo->journal_header, &loc->header); + + if (ret) + return ret; + + ret = load_journal_control_block(&sbinfo->journal_footer, &loc->footer); + + if (ret) { + unload_journal_control_block(&sbinfo->journal_header); + } + + return ret; +} + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + End: +*/ diff --git a/fs/reiser4/wander.h b/fs/reiser4/wander.h new file mode 100644 index 000000000000..8746710b66be --- /dev/null +++ b/fs/reiser4/wander.h @@ -0,0 +1,135 @@ +/* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */ + +#if !defined (__FS_REISER4_WANDER_H__) +#define __FS_REISER4_WANDER_H__ + +#include "dformat.h" + +#include <linux/fs.h> /* for struct super_block */ + +/* REISER4 JOURNAL ON-DISK DATA STRUCTURES */ + +#define TX_HEADER_MAGIC "TxMagic4" +#define WANDER_RECORD_MAGIC "LogMagc4" + +#define TX_HEADER_MAGIC_SIZE (8) +#define WANDER_RECORD_MAGIC_SIZE (8) + +/* journal header block format */ +struct journal_header { + /* last written transaction head location */ + d64 last_committed_tx; +}; + +typedef struct journal_location { + reiser4_block_nr footer; + reiser4_block_nr header; +} journal_location; + +/* The wander.c head comment describes usage and semantic of all these structures */ +/* journal footer block format */ +struct journal_footer { + /* last flushed transaction location. */ + /* This block number is no more valid after the transaction it points + to gets flushed, this number is used only at journal replaying time + for detection of the end of on-disk list of committed transactions + which were not flushed completely */ + d64 last_flushed_tx; + + /* free block counter is written in journal footer at transaction + flushing , not in super block because free blocks counter is logged + by another way than super block fields (root pointer, for + example). */ + d64 free_blocks; + + /* number of used OIDs and maximal used OID are logged separately from + super block */ + d64 nr_files; + d64 next_oid; +}; + +/* Each wander record (except the first one) has unified format with wander + record header followed by an array of log entries */ +struct wander_record_header { + /* when there is no predefined location for wander records, this magic + string should help reiser4fsck. */ + char magic[WANDER_RECORD_MAGIC_SIZE]; + + /* transaction id */ + d64 id; + + /* total number of wander records in current transaction */ + d32 total; + + /* this block number in transaction */ + d32 serial; + + /* number of previous block in commit */ + d64 next_block; +}; + +/* The first wander record (transaction head) of written transaction has the + special format */ +struct tx_header { + /* magic string makes first block in transaction different from other + logged blocks, it should help fsck. */ + char magic[TX_HEADER_MAGIC_SIZE]; + + /* transaction id */ + d64 id; + + /* total number of records (including this first tx head) in the + transaction */ + d32 total; + + /* align next field to 8-byte boundary; this field always is zero */ + d32 padding; + + /* block number of previous transaction head */ + d64 prev_tx; + + /* next wander record location */ + d64 next_block; + + /* committed versions of free blocks counter */ + d64 free_blocks; + + /* number of used OIDs (nr_files) and maximal used OID are logged + separately from super block */ + d64 nr_files; + d64 next_oid; +}; + +/* A transaction gets written to disk as a set of wander records (each wander + record size is fs block) */ + +/* As it was told above a wander The rest of wander record is filled by these log entries, unused space filled + by zeroes */ +struct wander_entry { + d64 original; /* block original location */ + d64 wandered; /* block wandered location */ +}; + +/* REISER4 JOURNAL WRITER FUNCTIONS */ + +extern int reiser4_write_logs(long *); +extern int reiser4_journal_replay(struct super_block *); +extern int reiser4_journal_recover_sb_data(struct super_block *); + +extern int reiser4_init_journal_info(struct super_block *); +extern void reiser4_done_journal_info(struct super_block *); + +extern int write_jnode_list(struct list_head *, flush_queue_t *, long *, int); + +#endif /* __FS_REISER4_WANDER_H__ */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + scroll-step: 1 + End: +*/ diff --git a/fs/reiser4/writeout.h b/fs/reiser4/writeout.h new file mode 100644 index 000000000000..fb9d2e493940 --- /dev/null +++ b/fs/reiser4/writeout.h @@ -0,0 +1,21 @@ +/* Copyright 2002, 2003, 2004 by Hans Reiser, licensing governed by reiser4/README */ + +#if !defined (__FS_REISER4_WRITEOUT_H__) + +#define WRITEOUT_SINGLE_STREAM (0x1) +#define WRITEOUT_FOR_PAGE_RECLAIM (0x2) +#define WRITEOUT_FLUSH_FUA (0x4) + +extern int reiser4_get_writeout_flags(void); + +#endif /* __FS_REISER4_WRITEOUT_H__ */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 80 + End: +*/ diff --git a/fs/reiser4/znode.c b/fs/reiser4/znode.c new file mode 100644 index 000000000000..f7c77ef5435a --- /dev/null +++ b/fs/reiser4/znode.c @@ -0,0 +1,1027 @@ +/* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by + * reiser4/README */ +/* Znode manipulation functions. */ +/* Znode is the in-memory header for a tree node. It is stored + separately from the node itself so that it does not get written to + disk. In this respect znode is like buffer head or page head. We + also use znodes for additional reiser4 specific purposes: + + . they are organized into tree structure which is a part of whole + reiser4 tree. + . they are used to implement node grained locking + . they are used to keep additional state associated with a + node + . they contain links to lists used by the transaction manager + + Znode is attached to some variable "block number" which is instance of + fs/reiser4/tree.h:reiser4_block_nr type. Znode can exist without + appropriate node being actually loaded in memory. Existence of znode itself + is regulated by reference count (->x_count) in it. Each time thread + acquires reference to znode through call to zget(), ->x_count is + incremented and decremented on call to zput(). Data (content of node) are + brought in memory through call to zload(), which also increments ->d_count + reference counter. zload can block waiting on IO. Call to zrelse() + decreases this counter. Also, ->c_count keeps track of number of child + znodes and prevents parent znode from being recycled until all of its + children are. ->c_count is decremented whenever child goes out of existence + (being actually recycled in zdestroy()) which can be some time after last + reference to this child dies if we support some form of LRU cache for + znodes. + +*/ +/* EVERY ZNODE'S STORY + + 1. His infancy. + + Once upon a time, the znode was born deep inside of zget() by call to + zalloc(). At the return from zget() znode had: + + . reference counter (x_count) of 1 + . assigned block number, marked as used in bitmap + . pointer to parent znode. Root znode parent pointer points + to its father: "fake" znode. This, in turn, has NULL parent pointer. + . hash table linkage + . no data loaded from disk + . no node plugin + . no sibling linkage + + 2. His childhood + + Each node is either brought into memory as a result of tree traversal, or + created afresh, creation of the root being a special case of the latter. In + either case it's inserted into sibling list. This will typically require + some ancillary tree traversing, but ultimately both sibling pointers will + exist and JNODE_LEFT_CONNECTED and JNODE_RIGHT_CONNECTED will be true in + zjnode.state. + + 3. His youth. + + If znode is bound to already existing node in a tree, its content is read + from the disk by call to zload(). At that moment, JNODE_LOADED bit is set + in zjnode.state and zdata() function starts to return non null for this + znode. zload() further calls zparse() that determines which node layout + this node is rendered in, and sets ->nplug on success. + + If znode is for new node just created, memory for it is allocated and + zinit_new() function is called to initialise data, according to selected + node layout. + + 4. His maturity. + + After this point, znode lingers in memory for some time. Threads can + acquire references to znode either by blocknr through call to zget(), or by + following a pointer to unallocated znode from internal item. Each time + reference to znode is obtained, x_count is increased. Thread can read/write + lock znode. Znode data can be loaded through calls to zload(), d_count will + be increased appropriately. If all references to znode are released + (x_count drops to 0), znode is not recycled immediately. Rather, it is + still cached in the hash table in the hope that it will be accessed + shortly. + + There are two ways in which znode existence can be terminated: + + . sudden death: node bound to this znode is removed from the tree + . overpopulation: znode is purged out of memory due to memory pressure + + 5. His death. + + Death is complex process. + + When we irrevocably commit ourselves to decision to remove node from the + tree, JNODE_HEARD_BANSHEE bit is set in zjnode.state of corresponding + znode. This is done either in ->kill_hook() of internal item or in + reiser4_kill_root() function when tree root is removed. + + At this moment znode still has: + + . locks held on it, necessary write ones + . references to it + . disk block assigned to it + . data loaded from the disk + . pending requests for lock + + But once JNODE_HEARD_BANSHEE bit set, last call to unlock_znode() does node + deletion. Node deletion includes two phases. First all ways to get + references to that znode (sibling and parent links and hash lookup using + block number stored in parent node) should be deleted -- it is done through + sibling_list_remove(), also we assume that nobody uses down link from + parent node due to its nonexistence or proper parent node locking and + nobody uses parent pointers from children due to absence of them. Second we + invalidate all pending lock requests which still are on znode's lock + request queue, this is done by reiser4_invalidate_lock(). Another + JNODE_IS_DYING znode status bit is used to invalidate pending lock requests. + Once it set all requesters are forced to return -EINVAL from + longterm_lock_znode(). Future locking attempts are not possible because all + ways to get references to that znode are removed already. Last, node is + uncaptured from transaction. + + When last reference to the dying znode is just about to be released, + block number for this lock is released and znode is removed from the + hash table. + + Now znode can be recycled. + + [it's possible to free bitmap block and remove znode from the hash + table when last lock is released. This will result in having + referenced but completely orphaned znode] + + 6. Limbo + + As have been mentioned above znodes with reference counter 0 are + still cached in a hash table. Once memory pressure increases they are + purged out of there [this requires something like LRU list for + efficient implementation. LRU list would also greatly simplify + implementation of coord cache that would in this case morph to just + scanning some initial segment of LRU list]. Data loaded into + unreferenced znode are flushed back to the durable storage if + necessary and memory is freed. Znodes themselves can be recycled at + this point too. + +*/ + +#include "debug.h" +#include "dformat.h" +#include "key.h" +#include "coord.h" +#include "plugin/plugin_header.h" +#include "plugin/node/node.h" +#include "plugin/plugin.h" +#include "txnmgr.h" +#include "jnode.h" +#include "znode.h" +#include "block_alloc.h" +#include "tree.h" +#include "tree_walk.h" +#include "super.h" +#include "reiser4.h" + +#include <linux/pagemap.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/err.h> + +static z_hash_table *get_htable(reiser4_tree *, + const reiser4_block_nr * const blocknr); +static z_hash_table *znode_get_htable(const znode *); +static void zdrop(znode *); + +/* hash table support */ + +/* compare two block numbers for equality. Used by hash-table macros */ +static inline int +blknreq(const reiser4_block_nr * b1, const reiser4_block_nr * b2) +{ + assert("nikita-534", b1 != NULL); + assert("nikita-535", b2 != NULL); + + return *b1 == *b2; +} + +/* Hash znode by block number. Used by hash-table macros */ +/* Audited by: umka (2002.06.11) */ +static inline __u32 +blknrhashfn(z_hash_table * table, const reiser4_block_nr * b) +{ + assert("nikita-536", b != NULL); + + return *b & (REISER4_ZNODE_HASH_TABLE_SIZE - 1); +} + +/* The hash table definition */ +#define KMALLOC(size) reiser4_vmalloc(size) +#define KFREE(ptr, size) vfree(ptr) +TYPE_SAFE_HASH_DEFINE(z, znode, reiser4_block_nr, zjnode.key.z, zjnode.link.z, + blknrhashfn, blknreq); +#undef KFREE +#undef KMALLOC + +/* slab for znodes */ +static struct kmem_cache *znode_cache; + +int znode_shift_order; + +/** + * init_znodes - create znode cache + * + * Initializes slab cache of znodes. It is part of reiser4 module initialization. + */ +int init_znodes(void) +{ + znode_cache = kmem_cache_create("znode", sizeof(znode), 0, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT, NULL); + if (znode_cache == NULL) + return RETERR(-ENOMEM); + + for (znode_shift_order = 0; (1 << znode_shift_order) < sizeof(znode); + ++znode_shift_order); + --znode_shift_order; + return 0; +} + +/** + * done_znodes - delete znode cache + * + * This is called on reiser4 module unloading or system shutdown. + */ +void done_znodes(void) +{ + destroy_reiser4_cache(&znode_cache); +} + +/* call this to initialise tree of znodes */ +int znodes_tree_init(reiser4_tree * tree /* tree to initialise znodes for */ ) +{ + int result; + assert("umka-050", tree != NULL); + + rwlock_init(&tree->dk_lock); + + result = z_hash_init(&tree->zhash_table, REISER4_ZNODE_HASH_TABLE_SIZE); + if (result != 0) + return result; + result = z_hash_init(&tree->zfake_table, REISER4_ZNODE_HASH_TABLE_SIZE); + return result; +} + +/* free this znode */ +void zfree(znode * node /* znode to free */ ) +{ + assert("nikita-465", node != NULL); + assert("nikita-2120", znode_page(node) == NULL); + assert("nikita-2301", list_empty_careful(&node->lock.owners)); + assert("nikita-2302", list_empty_careful(&node->lock.requestors)); + assert("nikita-2663", (list_empty_careful(&ZJNODE(node)->capture_link) && + NODE_LIST(ZJNODE(node)) == NOT_CAPTURED)); + assert("nikita-3220", list_empty(&ZJNODE(node)->jnodes)); + assert("nikita-3293", !znode_is_right_connected(node)); + assert("nikita-3294", !znode_is_left_connected(node)); + assert("nikita-3295", node->left == NULL); + assert("nikita-3296", node->right == NULL); + + /* not yet phash_jnode_destroy(ZJNODE(node)); */ + + kmem_cache_free(znode_cache, node); +} + +/* call this to free tree of znodes */ +void znodes_tree_done(reiser4_tree * tree /* tree to finish with znodes of */ ) +{ + znode *node; + znode *next; + z_hash_table *ztable; + + /* scan znode hash-tables and kill all znodes, then free hash tables + * themselves. */ + + assert("nikita-795", tree != NULL); + + ztable = &tree->zhash_table; + + if (ztable->_table != NULL) { + for_all_in_htable(ztable, z, node, next) { + node->c_count = 0; + node->in_parent.node = NULL; + assert("nikita-2179", atomic_read(&ZJNODE(node)->x_count) == 0); + zdrop(node); + } + + z_hash_done(&tree->zhash_table); + } + + ztable = &tree->zfake_table; + + if (ztable->_table != NULL) { + for_all_in_htable(ztable, z, node, next) { + node->c_count = 0; + node->in_parent.node = NULL; + assert("nikita-2179", atomic_read(&ZJNODE(node)->x_count) == 0); + zdrop(node); + } + + z_hash_done(&tree->zfake_table); + } +} + +/* ZNODE STRUCTURES */ + +/* allocate fresh znode */ +znode *zalloc(gfp_t gfp_flag /* allocation flag */ ) +{ + znode *node; + + node = kmem_cache_alloc(znode_cache, gfp_flag); + return node; +} + +/* Initialize fields of znode + @node: znode to initialize; + @parent: parent znode; + @tree: tree we are in. */ +void zinit(znode * node, const znode * parent, reiser4_tree * tree) +{ + assert("nikita-466", node != NULL); + assert("umka-268", current_tree != NULL); + + memset(node, 0, sizeof *node); + + assert("umka-051", tree != NULL); + + jnode_init(&node->zjnode, tree, JNODE_FORMATTED_BLOCK); + reiser4_init_lock(&node->lock); + init_parent_coord(&node->in_parent, parent); +} + +/* + * remove znode from indices. This is called jput() when last reference on + * znode is released. + */ +void znode_remove(znode * node /* znode to remove */ , reiser4_tree * tree) +{ + assert("nikita-2108", node != NULL); + assert("nikita-470", node->c_count == 0); + assert_rw_write_locked(&(tree->tree_lock)); + + /* remove reference to this znode from cbk cache */ + cbk_cache_invalidate(node, tree); + + /* update c_count of parent */ + if (znode_parent(node) != NULL) { + assert("nikita-472", znode_parent(node)->c_count > 0); + /* father, onto your hands I forward my spirit... */ + znode_parent(node)->c_count--; + node->in_parent.node = NULL; + } else { + /* orphaned znode?! Root? */ + } + + /* remove znode from hash-table */ + z_hash_remove_rcu(znode_get_htable(node), node); +} + +/* zdrop() -- Remove znode from the tree. + + This is called when znode is removed from the memory. */ +static void zdrop(znode * node /* znode to finish with */ ) +{ + jdrop(ZJNODE(node)); +} + +/* + * put znode into right place in the hash table. This is called by relocate + * code. + */ +int znode_rehash(znode * node /* node to rehash */ , + const reiser4_block_nr * new_block_nr /* new block number */ ) +{ + z_hash_table *oldtable; + z_hash_table *newtable; + reiser4_tree *tree; + + assert("nikita-2018", node != NULL); + + tree = znode_get_tree(node); + oldtable = znode_get_htable(node); + newtable = get_htable(tree, new_block_nr); + + write_lock_tree(tree); + /* remove znode from hash-table */ + z_hash_remove_rcu(oldtable, node); + + /* assertion no longer valid due to RCU */ + /* assert("nikita-2019", z_hash_find(newtable, new_block_nr) == NULL); */ + + /* update blocknr */ + znode_set_block(node, new_block_nr); + node->zjnode.key.z = *new_block_nr; + + /* insert it into hash */ + z_hash_insert_rcu(newtable, node); + write_unlock_tree(tree); + return 0; +} + +/* ZNODE LOOKUP, GET, PUT */ + +/* zlook() - get znode with given block_nr in a hash table or return NULL + + If result is non-NULL then the znode's x_count is incremented. Internal version + accepts pre-computed hash index. The hash table is accessed under caller's + tree->hash_lock. +*/ +znode *zlook(reiser4_tree * tree, const reiser4_block_nr * const blocknr) +{ + znode *result; + __u32 hash; + z_hash_table *htable; + + assert("jmacd-506", tree != NULL); + assert("jmacd-507", blocknr != NULL); + + htable = get_htable(tree, blocknr); + hash = blknrhashfn(htable, blocknr); + + rcu_read_lock(); + result = z_hash_find_index(htable, hash, blocknr); + + if (result != NULL) { + add_x_ref(ZJNODE(result)); + result = znode_rip_check(tree, result); + } + rcu_read_unlock(); + + return result; +} + +/* return hash table where znode with block @blocknr is (or should be) + * stored */ +static z_hash_table *get_htable(reiser4_tree * tree, + const reiser4_block_nr * const blocknr) +{ + z_hash_table *table; + if (is_disk_addr_unallocated(blocknr)) + table = &tree->zfake_table; + else + table = &tree->zhash_table; + return table; +} + +/* return hash table where znode @node is (or should be) stored */ +static z_hash_table *znode_get_htable(const znode * node) +{ + return get_htable(znode_get_tree(node), znode_get_block(node)); +} + +/* zget() - get znode from hash table, allocating it if necessary. + + First a call to zlook, locating a x-referenced znode if one + exists. If znode is not found, allocate new one and return. Result + is returned with x_count reference increased. + + LOCKS TAKEN: TREE_LOCK, ZNODE_LOCK + LOCK ORDERING: NONE +*/ +znode *zget(reiser4_tree * tree, + const reiser4_block_nr * const blocknr, + znode * parent, tree_level level, gfp_t gfp_flag) +{ + znode *result; + __u32 hashi; + + z_hash_table *zth; + + assert("jmacd-512", tree != NULL); + assert("jmacd-513", blocknr != NULL); + assert("jmacd-514", level < REISER4_MAX_ZTREE_HEIGHT); + + zth = get_htable(tree, blocknr); + hashi = blknrhashfn(zth, blocknr); + + /* NOTE-NIKITA address-as-unallocated-blocknr still is not + implemented. */ + + z_hash_prefetch_bucket(zth, hashi); + + rcu_read_lock(); + /* Find a matching BLOCKNR in the hash table. If the znode is found, + we obtain an reference (x_count) but the znode remains unlocked. + Have to worry about race conditions later. */ + result = z_hash_find_index(zth, hashi, blocknr); + /* According to the current design, the hash table lock protects new + znode references. */ + if (result != NULL) { + add_x_ref(ZJNODE(result)); + /* NOTE-NIKITA it should be so, but special case during + creation of new root makes such assertion highly + complicated. */ + assert("nikita-2131", 1 || znode_parent(result) == parent || + (ZF_ISSET(result, JNODE_ORPHAN) + && (znode_parent(result) == NULL))); + result = znode_rip_check(tree, result); + } + + rcu_read_unlock(); + + if (!result) { + znode *shadow; + + result = zalloc(gfp_flag); + if (!result) { + return ERR_PTR(RETERR(-ENOMEM)); + } + + zinit(result, parent, tree); + ZJNODE(result)->blocknr = *blocknr; + ZJNODE(result)->key.z = *blocknr; + result->level = level; + + write_lock_tree(tree); + + shadow = z_hash_find_index(zth, hashi, blocknr); + if (unlikely(shadow != NULL && !ZF_ISSET(shadow, JNODE_RIP))) { + jnode_list_remove(ZJNODE(result)); + zfree(result); + result = shadow; + } else { + result->version = znode_build_version(tree); + z_hash_insert_index_rcu(zth, hashi, result); + + if (parent != NULL) + ++parent->c_count; + } + + add_x_ref(ZJNODE(result)); + + write_unlock_tree(tree); + } + + assert("intelfx-6", + ergo(!reiser4_blocknr_is_fake(blocknr) && *blocknr != 0, + reiser4_check_block(blocknr, 1))); + + /* Check for invalid tree level, return -EIO */ + if (unlikely(znode_get_level(result) != level)) { + warning("jmacd-504", + "Wrong level for cached block %llu: %i expecting %i", + (unsigned long long)(*blocknr), znode_get_level(result), + level); + zput(result); + return ERR_PTR(RETERR(-EIO)); + } + + assert("nikita-1227", znode_invariant(result)); + + return result; +} + +/* ZNODE PLUGINS/DATA */ + +/* "guess" plugin for node loaded from the disk. Plugin id of node plugin is + stored at the fixed offset from the beginning of the node. */ +static node_plugin *znode_guess_plugin(const znode * node /* znode to guess + * plugin of */ ) +{ + reiser4_tree *tree; + + assert("nikita-1053", node != NULL); + assert("nikita-1055", zdata(node) != NULL); + + tree = znode_get_tree(node); + assert("umka-053", tree != NULL); + + if (reiser4_is_set(tree->super, REISER4_ONE_NODE_PLUGIN)) { + return tree->nplug; + } else { + return node_plugin_by_disk_id + (tree, &((common_node_header *) zdata(node))->plugin_id); +#ifdef GUESS_EXISTS + reiser4_plugin *plugin; + + /* NOTE-NIKITA add locking here when dynamic plugins will be + * implemented */ + for_all_plugins(REISER4_NODE_PLUGIN_TYPE, plugin) { + if ((plugin->u.node.guess != NULL) + && plugin->u.node.guess(node)) + return plugin; + } + warning("nikita-1057", "Cannot guess node plugin"); + print_znode("node", node); + return NULL; +#endif + } +} + +/* parse node header and install ->node_plugin */ +int zparse(znode * node /* znode to parse */ ) +{ + int result; + + assert("nikita-1233", node != NULL); + assert("nikita-2370", zdata(node) != NULL); + + if (node->nplug == NULL) { + node_plugin *nplug; + + nplug = znode_guess_plugin(node); + if (likely(nplug != NULL)) { + result = nplug->parse(node); + if (likely(result == 0)) + node->nplug = nplug; + } else { + result = RETERR(-EIO); + } + } else + result = 0; + return result; +} + +/* zload with readahead */ +int zload_ra(znode * node /* znode to load */ , ra_info_t * info) +{ + int result; + + assert("nikita-484", node != NULL); + assert("nikita-1377", znode_invariant(node)); + assert("jmacd-7771", !znode_above_root(node)); + assert("nikita-2125", atomic_read(&ZJNODE(node)->x_count) > 0); + assert("nikita-3016", reiser4_schedulable()); + + if (info) + formatted_readahead(node, info); + + result = jload(ZJNODE(node)); + assert("nikita-1378", znode_invariant(node)); + return result; +} + +/* load content of node into memory */ +int zload(znode *node) +{ + return zload_ra(node, NULL); +} + +/* call node plugin to initialise newly allocated node. */ +int zinit_new(znode * node /* znode to initialise */ , gfp_t gfp_flags) +{ + return jinit_new(ZJNODE(node), gfp_flags); +} + +/* drop reference to node data. When last reference is dropped, data are + unloaded. */ +void zrelse(znode * node /* znode to release references to */ ) +{ + assert("nikita-1381", znode_invariant(node)); + jrelse(ZJNODE(node)); +} + +/* returns free space in node */ +unsigned znode_free_space(znode * node /* znode to query */ ) +{ + assert("nikita-852", node != NULL); + return node_plugin_by_node(node)->free_space(node); +} + +/* left delimiting key of znode */ +reiser4_key *znode_get_rd_key(znode * node /* znode to query */ ) +{ + assert("nikita-958", node != NULL); + assert_rw_locked(&(znode_get_tree(node)->dk_lock)); + assert("nikita-3067", LOCK_CNT_GTZ(rw_locked_dk)); + assert("nikita-30671", node->rd_key_version != 0); + return &node->rd_key; +} + +/* right delimiting key of znode */ +reiser4_key *znode_get_ld_key(znode * node /* znode to query */ ) +{ + assert("nikita-974", node != NULL); + assert_rw_locked(&(znode_get_tree(node)->dk_lock)); + assert("nikita-3068", LOCK_CNT_GTZ(rw_locked_dk)); + assert("nikita-30681", node->ld_key_version != 0); + return &node->ld_key; +} + +ON_DEBUG(atomic_t delim_key_version = ATOMIC_INIT(0); + ) + +/* update right-delimiting key of @node */ +reiser4_key *znode_set_rd_key(znode * node, const reiser4_key * key) +{ + assert("nikita-2937", node != NULL); + assert("nikita-2939", key != NULL); + assert_rw_write_locked(&(znode_get_tree(node)->dk_lock)); + assert("nikita-3069", LOCK_CNT_GTZ(write_locked_dk)); + assert("nikita-2944", + znode_is_any_locked(node) || + znode_get_level(node) != LEAF_LEVEL || + keyge(key, &node->rd_key) || + keyeq(&node->rd_key, reiser4_min_key()) || + ZF_ISSET(node, JNODE_HEARD_BANSHEE)); + + node->rd_key = *key; + ON_DEBUG(node->rd_key_version = atomic_inc_return(&delim_key_version)); + return &node->rd_key; +} + +/* update left-delimiting key of @node */ +reiser4_key *znode_set_ld_key(znode * node, const reiser4_key * key) +{ + assert("nikita-2940", node != NULL); + assert("nikita-2941", key != NULL); + assert_rw_write_locked(&(znode_get_tree(node)->dk_lock)); + assert("nikita-3070", LOCK_CNT_GTZ(write_locked_dk)); + assert("nikita-2943", + znode_is_any_locked(node) || keyeq(&node->ld_key, + reiser4_min_key())); + + node->ld_key = *key; + ON_DEBUG(node->ld_key_version = atomic_inc_return(&delim_key_version)); + return &node->ld_key; +} + +/* true if @key is inside key range for @node */ +int znode_contains_key(znode * node /* znode to look in */ , + const reiser4_key * key /* key to look for */ ) +{ + assert("nikita-1237", node != NULL); + assert("nikita-1238", key != NULL); + + /* left_delimiting_key <= key <= right_delimiting_key */ + return keyle(znode_get_ld_key(node), key) + && keyle(key, znode_get_rd_key(node)); +} + +/* same as znode_contains_key(), but lock dk lock */ +int znode_contains_key_lock(znode * node /* znode to look in */ , + const reiser4_key * key /* key to look for */ ) +{ + int result; + + assert("umka-056", node != NULL); + assert("umka-057", key != NULL); + + read_lock_dk(znode_get_tree(node)); + result = znode_contains_key(node, key); + read_unlock_dk(znode_get_tree(node)); + return result; +} + +/* get parent pointer, assuming tree is not locked */ +znode *znode_parent_nolock(const znode * node /* child znode */ ) +{ + assert("nikita-1444", node != NULL); + return node->in_parent.node; +} + +/* get parent pointer of znode */ +znode *znode_parent(const znode * node /* child znode */ ) +{ + assert("nikita-1226", node != NULL); + assert("nikita-1406", LOCK_CNT_GTZ(rw_locked_tree)); + return znode_parent_nolock(node); +} + +/* detect uber znode used to protect in-superblock tree root pointer */ +int znode_above_root(const znode * node /* znode to query */ ) +{ + assert("umka-059", node != NULL); + + return disk_addr_eq(&ZJNODE(node)->blocknr, &UBER_TREE_ADDR); +} + +/* check that @node is root---that its block number is recorder in the tree as + that of root node */ +#if REISER4_DEBUG +static int znode_is_true_root(const znode * node /* znode to query */ ) +{ + assert("umka-060", node != NULL); + assert("umka-061", current_tree != NULL); + + return disk_addr_eq(znode_get_block(node), + &znode_get_tree(node)->root_block); +} +#endif + +/* check that @node is root */ +int znode_is_root(const znode * node /* znode to query */ ) +{ + return znode_get_level(node) == znode_get_tree(node)->height; +} + +/* Returns true is @node was just created by zget() and wasn't ever loaded + into memory. */ +/* NIKITA-HANS: yes */ +int znode_just_created(const znode * node) +{ + assert("nikita-2188", node != NULL); + return (znode_page(node) == NULL); +} + +/* obtain updated ->znode_epoch. See seal.c for description. */ +__u64 znode_build_version(reiser4_tree * tree) +{ + __u64 result; + + spin_lock(&tree->epoch_lock); + result = ++tree->znode_epoch; + spin_unlock(&tree->epoch_lock); + return result; +} + +void init_load_count(load_count * dh) +{ + assert("nikita-2105", dh != NULL); + memset(dh, 0, sizeof *dh); +} + +void done_load_count(load_count * dh) +{ + assert("nikita-2106", dh != NULL); + if (dh->node != NULL) { + for (; dh->d_ref > 0; --dh->d_ref) + zrelse(dh->node); + dh->node = NULL; + } +} + +static int incr_load_count(load_count * dh) +{ + int result; + + assert("nikita-2110", dh != NULL); + assert("nikita-2111", dh->node != NULL); + + result = zload(dh->node); + if (result == 0) + ++dh->d_ref; + return result; +} + +int incr_load_count_znode(load_count * dh, znode * node) +{ + assert("nikita-2107", dh != NULL); + assert("nikita-2158", node != NULL); + assert("nikita-2109", + ergo(dh->node != NULL, (dh->node == node) || (dh->d_ref == 0))); + + dh->node = node; + return incr_load_count(dh); +} + +int incr_load_count_jnode(load_count * dh, jnode * node) +{ + if (jnode_is_znode(node)) { + return incr_load_count_znode(dh, JZNODE(node)); + } + return 0; +} + +void copy_load_count(load_count * new, load_count * old) +{ + int ret = 0; + done_load_count(new); + new->node = old->node; + new->d_ref = 0; + + while ((new->d_ref < old->d_ref) && (ret = incr_load_count(new)) == 0) { + } + + assert("jmacd-87589", ret == 0); +} + +void move_load_count(load_count * new, load_count * old) +{ + done_load_count(new); + new->node = old->node; + new->d_ref = old->d_ref; + old->node = NULL; + old->d_ref = 0; +} + +/* convert parent pointer into coord */ +void parent_coord_to_coord(const parent_coord_t * pcoord, coord_t * coord) +{ + assert("nikita-3204", pcoord != NULL); + assert("nikita-3205", coord != NULL); + + coord_init_first_unit_nocheck(coord, pcoord->node); + coord_set_item_pos(coord, pcoord->item_pos); + coord->between = AT_UNIT; +} + +/* pack coord into parent_coord_t */ +void coord_to_parent_coord(const coord_t * coord, parent_coord_t * pcoord) +{ + assert("nikita-3206", pcoord != NULL); + assert("nikita-3207", coord != NULL); + + pcoord->node = coord->node; + pcoord->item_pos = coord->item_pos; +} + +/* Initialize a parent hint pointer. (parent hint pointer is a field in znode, + look for comments there) */ +void init_parent_coord(parent_coord_t * pcoord, const znode * node) +{ + pcoord->node = (znode *) node; + pcoord->item_pos = (unsigned short)~0; +} + +#if REISER4_DEBUG + +/* debugging aid: znode invariant */ +static int znode_invariant_f(const znode * node /* znode to check */ , + char const **msg /* where to store error + * message, if any */ ) +{ +#define _ergo(ant, con) \ + ((*msg) = "{" #ant "} ergo {" #con "}", ergo((ant), (con))) + +#define _equi(e1, e2) \ + ((*msg) = "{" #e1 "} <=> {" #e2 "}", equi((e1), (e2))) + +#define _check(exp) ((*msg) = #exp, (exp)) + + return jnode_invariant_f(ZJNODE(node), msg) && + /* [znode-fake] invariant */ + /* fake znode doesn't have a parent, and */ + _ergo(znode_get_level(node) == 0, znode_parent(node) == NULL) && + /* there is another way to express this very check, and */ + _ergo(znode_above_root(node), znode_parent(node) == NULL) && + /* it has special block number, and */ + _ergo(znode_get_level(node) == 0, + disk_addr_eq(znode_get_block(node), &UBER_TREE_ADDR)) && + /* it is the only znode with such block number, and */ + _ergo(!znode_above_root(node) && znode_is_loaded(node), + !disk_addr_eq(znode_get_block(node), &UBER_TREE_ADDR)) && + /* it is parent of the tree root node */ + _ergo(znode_is_true_root(node), + znode_above_root(znode_parent(node))) && + /* [znode-level] invariant */ + /* level of parent znode is one larger than that of child, + except for the fake znode, and */ + _ergo(znode_parent(node) && !znode_above_root(znode_parent(node)), + znode_get_level(znode_parent(node)) == + znode_get_level(node) + 1) && + /* left neighbor is at the same level, and */ + _ergo(znode_is_left_connected(node) && node->left != NULL, + znode_get_level(node) == znode_get_level(node->left)) && + /* right neighbor is at the same level */ + _ergo(znode_is_right_connected(node) && node->right != NULL, + znode_get_level(node) == znode_get_level(node->right)) && + /* [znode-connected] invariant */ + _ergo(node->left != NULL, znode_is_left_connected(node)) && + _ergo(node->right != NULL, znode_is_right_connected(node)) && + _ergo(!znode_is_root(node) && node->left != NULL, + znode_is_right_connected(node->left) && + node->left->right == node) && + _ergo(!znode_is_root(node) && node->right != NULL, + znode_is_left_connected(node->right) && + node->right->left == node) && + /* [znode-c_count] invariant */ + /* for any znode, c_count of its parent is greater than 0 */ + _ergo(znode_parent(node) != NULL && + !znode_above_root(znode_parent(node)), + znode_parent(node)->c_count > 0) && + /* leaves don't have children */ + _ergo(znode_get_level(node) == LEAF_LEVEL, + node->c_count == 0) && + _check(node->zjnode.jnodes.prev != NULL) && + _check(node->zjnode.jnodes.next != NULL) && + /* orphan doesn't have a parent */ + _ergo(ZF_ISSET(node, JNODE_ORPHAN), znode_parent(node) == 0) && + /* [znode-modify] invariant */ + /* if znode is not write-locked, its checksum remains + * invariant */ + /* unfortunately, zlock is unordered w.r.t. jnode_lock, so we + * cannot check this. */ + /* [znode-refs] invariant */ + /* only referenced znode can be long-term locked */ + _ergo(znode_is_locked(node), + atomic_read(&ZJNODE(node)->x_count) != 0); +} + +/* debugging aid: check znode invariant and panic if it doesn't hold */ +int znode_invariant(znode * node /* znode to check */ ) +{ + char const *failed_msg; + int result; + + assert("umka-063", node != NULL); + assert("umka-064", current_tree != NULL); + + spin_lock_znode(node); + read_lock_tree(znode_get_tree(node)); + result = znode_invariant_f(node, &failed_msg); + if (!result) { + /* print_znode("corrupted node", node); */ + warning("jmacd-555", "Condition %s failed", failed_msg); + } + read_unlock_tree(znode_get_tree(node)); + spin_unlock_znode(node); + return result; +} + +/* return non-0 iff data are loaded into znode */ +int znode_is_loaded(const znode * node /* znode to query */ ) +{ + assert("nikita-497", node != NULL); + return jnode_is_loaded(ZJNODE(node)); +} + +unsigned long znode_times_locked(const znode * z) +{ + return z->times_locked; +} + +#endif /* REISER4_DEBUG */ + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/fs/reiser4/znode.h b/fs/reiser4/znode.h new file mode 100644 index 000000000000..613377ef60ca --- /dev/null +++ b/fs/reiser4/znode.h @@ -0,0 +1,435 @@ +/* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by + * reiser4/README */ + +/* Declaration of znode (Zam's node). See znode.c for more details. */ + +#ifndef __ZNODE_H__ +#define __ZNODE_H__ + +#include "forward.h" +#include "debug.h" +#include "dformat.h" +#include "key.h" +#include "coord.h" +#include "plugin/node/node.h" +#include "jnode.h" +#include "lock.h" +#include "readahead.h" + +#include <linux/types.h> +#include <linux/spinlock.h> +#include <linux/pagemap.h> /* for PAGE_CACHE_SIZE */ +#include <asm/atomic.h> + +/* znode tracks its position within parent (internal item in a parent node, + * that contains znode's block number). */ +typedef struct parent_coord { + znode *node; + pos_in_node_t item_pos; +} parent_coord_t; + +/* &znode - node in a reiser4 tree. + + NOTE-NIKITA fields in this struct have to be rearranged (later) to reduce + cacheline pressure. + + Locking: + + Long term: data in a disk node attached to this znode are protected + by long term, deadlock aware lock ->lock; + + Spin lock: the following fields are protected by the spin lock: + + ->lock + + Following fields are protected by the global tree lock: + + ->left + ->right + ->in_parent + ->c_count + + Following fields are protected by the global delimiting key lock (dk_lock): + + ->ld_key (to update ->ld_key long-term lock on the node is also required) + ->rd_key + + Following fields are protected by the long term lock: + + ->nr_items + + ->node_plugin is never changed once set. This means that after code made + itself sure that field is valid it can be accessed without any additional + locking. + + ->level is immutable. + + Invariants involving this data-type: + + [znode-fake] + [znode-level] + [znode-connected] + [znode-c_count] + [znode-refs] + [jnode-refs] + [jnode-queued] + [znode-modify] + + For this to be made into a clustering or NUMA filesystem, we would want to eliminate all of the global locks. + Suggestions for how to do that are desired.*/ +struct znode { + /* Embedded jnode. */ + jnode zjnode; + + /* contains three subfields, node, pos_in_node, and pos_in_unit. + + pos_in_node and pos_in_unit are only hints that are cached to + speed up lookups during balancing. They are not required to be up to + date. Synched in find_child_ptr(). + + This value allows us to avoid expensive binary searches. + + in_parent->node points to the parent of this node, and is NOT a + hint. + */ + parent_coord_t in_parent; + + /* + * sibling list pointers + */ + + /* left-neighbor */ + znode *left; + /* right-neighbor */ + znode *right; + + /* long term lock on node content. This lock supports deadlock + detection. See lock.c + */ + zlock lock; + + /* You cannot remove from memory a node that has children in + memory. This is because we rely on the fact that parent of given + node can always be reached without blocking for io. When reading a + node into memory you must increase the c_count of its parent, when + removing it from memory you must decrease the c_count. This makes + the code simpler, and the cases where it is suboptimal are truly + obscure. + */ + int c_count; + + /* plugin of node attached to this znode. NULL if znode is not + loaded. */ + node_plugin *nplug; + + /* version of znode data. This is increased on each modification. This + * is necessary to implement seals (see seal.[ch]) efficiently. */ + __u64 version; + + /* left delimiting key. Necessary to efficiently perform + balancing with node-level locking. Kept in memory only. */ + reiser4_key ld_key; + /* right delimiting key. */ + reiser4_key rd_key; + + /* znode's tree level */ + __u16 level; + /* number of items in this node. This field is modified by node + * plugin. */ + __u16 nr_items; + +#if REISER4_DEBUG + void *creator; + reiser4_key first_key; + unsigned long times_locked; + int left_version; /* when node->left was updated */ + int right_version; /* when node->right was updated */ + int ld_key_version; /* when node->ld_key was updated */ + int rd_key_version; /* when node->rd_key was updated */ +#endif + +} __attribute__ ((aligned(16))); + +ON_DEBUG(extern atomic_t delim_key_version; + ) + +/* In general I think these macros should not be exposed. */ +#define znode_is_locked(node) (lock_is_locked(&node->lock)) +#define znode_is_rlocked(node) (lock_is_rlocked(&node->lock)) +#define znode_is_wlocked(node) (lock_is_wlocked(&node->lock)) +#define znode_is_wlocked_once(node) (lock_is_wlocked_once(&node->lock)) +#define znode_can_be_rlocked(node) (lock_can_be_rlocked(&node->lock)) +#define is_lock_compatible(node, mode) (lock_mode_compatible(&node->lock, mode)) +/* Macros for accessing the znode state. */ +#define ZF_CLR(p,f) JF_CLR (ZJNODE(p), (f)) +#define ZF_ISSET(p,f) JF_ISSET(ZJNODE(p), (f)) +#define ZF_SET(p,f) JF_SET (ZJNODE(p), (f)) +extern znode *zget(reiser4_tree * tree, const reiser4_block_nr * const block, + znode * parent, tree_level level, gfp_t gfp_flag); +extern znode *zlook(reiser4_tree * tree, const reiser4_block_nr * const block); +extern int zload(znode * node); +extern int zload_ra(znode * node, ra_info_t * info); +extern int zinit_new(znode * node, gfp_t gfp_flags); +extern void zrelse(znode * node); +extern void znode_change_parent(znode * new_parent, reiser4_block_nr * block); +extern void znode_update_csum(znode *node); + +/* size of data in znode */ +static inline unsigned +znode_size(const znode * node UNUSED_ARG /* znode to query */ ) +{ + assert("nikita-1416", node != NULL); + return PAGE_SIZE; +} + +extern void parent_coord_to_coord(const parent_coord_t * pcoord, + coord_t * coord); +extern void coord_to_parent_coord(const coord_t * coord, + parent_coord_t * pcoord); +extern void init_parent_coord(parent_coord_t * pcoord, const znode * node); + +extern unsigned znode_free_space(znode * node); + +extern reiser4_key *znode_get_rd_key(znode * node); +extern reiser4_key *znode_get_ld_key(znode * node); + +extern reiser4_key *znode_set_rd_key(znode * node, const reiser4_key * key); +extern reiser4_key *znode_set_ld_key(znode * node, const reiser4_key * key); + +/* `connected' state checks */ +static inline int znode_is_right_connected(const znode * node) +{ + return ZF_ISSET(node, JNODE_RIGHT_CONNECTED); +} + +static inline int znode_is_left_connected(const znode * node) +{ + return ZF_ISSET(node, JNODE_LEFT_CONNECTED); +} + +static inline int znode_is_connected(const znode * node) +{ + return znode_is_right_connected(node) && znode_is_left_connected(node); +} + +extern int znode_shift_order; +extern int znode_rehash(znode * node, const reiser4_block_nr * new_block_nr); +extern void znode_remove(znode *, reiser4_tree *); +extern znode *znode_parent(const znode * node); +extern znode *znode_parent_nolock(const znode * node); +extern int znode_above_root(const znode * node); +extern int init_znode(jnode *node); +extern int init_znodes(void); +extern void done_znodes(void); +extern int znodes_tree_init(reiser4_tree * ztree); +extern void znodes_tree_done(reiser4_tree * ztree); +extern int znode_contains_key(znode * node, const reiser4_key * key); +extern int znode_contains_key_lock(znode * node, const reiser4_key * key); +extern unsigned znode_save_free_space(znode * node); +extern unsigned znode_recover_free_space(znode * node); +extern znode *zalloc(gfp_t gfp_flag); +extern void zinit(znode *, const znode * parent, reiser4_tree *); +extern int zparse(znode * node); + +extern int znode_just_created(const znode * node); + +extern void zfree(znode * node); + +#if REISER4_DEBUG +extern void print_znode(const char *prefix, const znode * node); +#else +#define print_znode( p, n ) noop +#endif + +/* Make it look like various znode functions exist instead of treating znodes as + jnodes in znode-specific code. */ +#define znode_page(x) jnode_page ( ZJNODE(x) ) +#define zdata(x) jdata ( ZJNODE(x) ) +#define znode_get_block(x) jnode_get_block ( ZJNODE(x) ) +#define znode_created(x) jnode_created ( ZJNODE(x) ) +#define znode_set_created(x) jnode_set_created ( ZJNODE(x) ) +#define znode_convertible(x) jnode_convertible (ZJNODE(x)) +#define znode_set_convertible(x) jnode_set_convertible (ZJNODE(x)) + +#define znode_is_dirty(x) jnode_is_dirty ( ZJNODE(x) ) +#define znode_check_dirty(x) jnode_check_dirty ( ZJNODE(x) ) +#define znode_make_clean(x) jnode_make_clean ( ZJNODE(x) ) +#define znode_set_block(x, b) jnode_set_block ( ZJNODE(x), (b) ) + +#define spin_lock_znode(x) spin_lock_jnode ( ZJNODE(x) ) +#define spin_unlock_znode(x) spin_unlock_jnode ( ZJNODE(x) ) +#define spin_trylock_znode(x) spin_trylock_jnode ( ZJNODE(x) ) +#define spin_znode_is_locked(x) spin_jnode_is_locked ( ZJNODE(x) ) +#define spin_znode_is_not_locked(x) spin_jnode_is_not_locked ( ZJNODE(x) ) + +#if REISER4_DEBUG +extern int znode_x_count_is_protected(const znode * node); +extern int znode_invariant(znode * node); +#endif + +/* acquire reference to @node */ +static inline znode *zref(znode * node) +{ + /* change of x_count from 0 to 1 is protected by tree spin-lock */ + return JZNODE(jref(ZJNODE(node))); +} + +/* release reference to @node */ +static inline void zput(znode * node) +{ + assert("nikita-3564", znode_invariant(node)); + jput(ZJNODE(node)); +} + +/* get the level field for a znode */ +static inline tree_level znode_get_level(const znode * node) +{ + return node->level; +} + +/* get the level field for a jnode */ +static inline tree_level jnode_get_level(const jnode * node) +{ + if (jnode_is_znode(node)) + return znode_get_level(JZNODE(node)); + else + /* unformatted nodes are all at the LEAF_LEVEL and for + "semi-formatted" nodes like bitmaps, level doesn't matter. */ + return LEAF_LEVEL; +} + +/* true if jnode is on leaf level */ +static inline int jnode_is_leaf(const jnode * node) +{ + if (jnode_is_znode(node)) + return (znode_get_level(JZNODE(node)) == LEAF_LEVEL); + if (jnode_get_type(node) == JNODE_UNFORMATTED_BLOCK) + return 1; + return 0; +} + +/* return znode's tree */ +static inline reiser4_tree *znode_get_tree(const znode * node) +{ + assert("nikita-2692", node != NULL); + return jnode_get_tree(ZJNODE(node)); +} + +/* resolve race with zput */ +static inline znode *znode_rip_check(reiser4_tree * tree, znode * node) +{ + jnode *j; + + j = jnode_rip_sync(tree, ZJNODE(node)); + if (likely(j != NULL)) + node = JZNODE(j); + else + node = NULL; + return node; +} + +#if defined(REISER4_DEBUG) +int znode_is_loaded(const znode * node /* znode to query */ ); +#endif + +extern __u64 znode_build_version(reiser4_tree * tree); + +/* Data-handles. A data handle object manages pairing calls to zload() and zrelse(). We + must load the data for a node in many places. We could do this by simply calling + zload() everywhere, the difficulty arises when we must release the loaded data by + calling zrelse. In a function with many possible error/return paths, it requires extra + work to figure out which exit paths must call zrelse and those which do not. The data + handle automatically calls zrelse for every zload that it is responsible for. In that + sense, it acts much like a lock_handle. +*/ +typedef struct load_count { + znode *node; + int d_ref; +} load_count; + +extern void init_load_count(load_count * lc); /* Initialize a load_count set the current node to NULL. */ +extern void done_load_count(load_count * dh); /* Finalize a load_count: call zrelse() if necessary */ +extern int incr_load_count_znode(load_count * dh, znode * node); /* Set the argument znode to the current node, call zload(). */ +extern int incr_load_count_jnode(load_count * dh, jnode * node); /* If the argument jnode is formatted, do the same as + * incr_load_count_znode, otherwise do nothing (unformatted nodes + * don't require zload/zrelse treatment). */ +extern void move_load_count(load_count * new, load_count * old); /* Move the contents of a load_count. Old handle is released. */ +extern void copy_load_count(load_count * new, load_count * old); /* Copy the contents of a load_count. Old handle remains held. */ + +/* Variable initializers for load_count. */ +#define INIT_LOAD_COUNT ( load_count * ){ .node = NULL, .d_ref = 0 } +#define INIT_LOAD_COUNT_NODE( n ) ( load_count ){ .node = ( n ), .d_ref = 0 } +/* A convenience macro for use in assertions or debug-only code, where loaded + data is only required to perform the debugging check. This macro + encapsulates an expression inside a pair of calls to zload()/zrelse(). */ +#define WITH_DATA( node, exp ) \ +({ \ + long __with_dh_result; \ + znode *__with_dh_node; \ + \ + __with_dh_node = ( node ); \ + __with_dh_result = zload( __with_dh_node ); \ + if( __with_dh_result == 0 ) { \ + __with_dh_result = ( long )( exp ); \ + zrelse( __with_dh_node ); \ + } \ + __with_dh_result; \ +}) + +/* Same as above, but accepts a return value in case zload fails. */ +#define WITH_DATA_RET( node, ret, exp ) \ +({ \ + int __with_dh_result; \ + znode *__with_dh_node; \ + \ + __with_dh_node = ( node ); \ + __with_dh_result = zload( __with_dh_node ); \ + if( __with_dh_result == 0 ) { \ + __with_dh_result = ( int )( exp ); \ + zrelse( __with_dh_node ); \ + } else \ + __with_dh_result = ( ret ); \ + __with_dh_result; \ +}) + +#define WITH_COORD(coord, exp) \ +({ \ + coord_t *__coord; \ + \ + __coord = (coord); \ + coord_clear_iplug(__coord); \ + WITH_DATA(__coord->node, exp); \ +}) + +#if REISER4_DEBUG +#define STORE_COUNTERS \ + reiser4_lock_cnt_info __entry_counters = \ + *reiser4_lock_counters() +#define CHECK_COUNTERS \ +ON_DEBUG_CONTEXT( \ +({ \ + __entry_counters.x_refs = reiser4_lock_counters() -> x_refs; \ + __entry_counters.t_refs = reiser4_lock_counters() -> t_refs; \ + __entry_counters.d_refs = reiser4_lock_counters() -> d_refs; \ + assert("nikita-2159", \ + !memcmp(&__entry_counters, reiser4_lock_counters(), \ + sizeof __entry_counters)); \ +}) ) + +#else +#define STORE_COUNTERS +#define CHECK_COUNTERS noop +#endif + +/* __ZNODE_H__ */ +#endif + +/* Make Linus happy. + Local variables: + c-indentation-style: "K&R" + mode-name: "LC" + c-basic-offset: 8 + tab-width: 8 + fill-column: 120 + End: +*/ diff --git a/include/linux/fs.h b/include/linux/fs.h index 6799ccf5c37e..b64c1f3258e9 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -272,6 +272,8 @@ enum positive_aop_returns { struct page; struct address_space; struct writeback_control; +struct wb_writeback_work; +struct bdi_writeback; /* * Write life time hint values. @@ -1845,6 +1847,14 @@ struct super_operations { void (*umount_begin) (struct super_block *); void (*umount_end) (struct super_block *, int); + long (*writeback_inodes)(struct super_block *sb, + struct bdi_writeback *wb, + struct writeback_control *wbc, + struct wb_writeback_work *work, + bool flush_all); + void (*sync_inodes) (struct super_block *sb, + struct writeback_control *wbc); + int (*show_options)(struct seq_file *, struct dentry *); int (*show_options2)(struct vfsmount *,struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); @@ -2613,6 +2623,13 @@ extern int invalidate_inode_pages2(struct address_space *mapping); extern int invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end); extern int write_inode_now(struct inode *, int); +extern void writeback_skip_sb_inodes(struct super_block *sb, + struct bdi_writeback *wb); +extern long generic_writeback_sb_inodes(struct super_block *sb, + struct bdi_writeback *wb, + struct writeback_control *wbc, + struct wb_writeback_work *work, + bool flush_all); extern int filemap_fdatawrite(struct address_space *); extern int filemap_flush(struct address_space *); extern int filemap_fdatawait_keep_errors(struct address_space *mapping); @@ -2872,6 +2889,8 @@ extern char *file_path(struct file *, char *, int); #include <linux/err.h> /* needed for stackable file system support */ +extern loff_t default_llseek_unlocked(struct file *file, loff_t offset, + int whence); extern loff_t default_llseek(struct file *file, loff_t offset, int whence); extern loff_t vfs_llseek(struct file *file, loff_t offset, int whence); @@ -2954,6 +2973,8 @@ extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *); extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t); +ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, + loff_t *ppos); ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos, rwf_t flags); ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos, diff --git a/include/linux/mm.h b/include/linux/mm.h index d436543d0da2..1f3df994812f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1447,6 +1447,7 @@ void account_page_cleaned(struct page *page, struct address_space *mapping, struct bdi_writeback *wb); int set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); +int set_page_dirty_notag(struct page *page); void cancel_dirty_page(struct page *page); int clear_page_dirty_for_io(struct page *page); diff --git a/include/linux/sched.h b/include/linux/sched.h index a9a33245887e..b4844adbf46f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1507,6 +1507,7 @@ extern struct pid *cad_pid; /* * Per process flags */ +#define PF_FLUSHER 0x00000001 /* responsible for disk writeback */ #define PF_IDLE 0x00000002 /* I am an IDLE thread */ #define PF_EXITING 0x00000004 /* Getting shut down */ #define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */ diff --git a/include/linux/writeback.h b/include/linux/writeback.h index e12d92808e98..9d47f389a1a0 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -16,6 +16,12 @@ struct bio; DECLARE_PER_CPU(int, dirty_throttle_leaks); +static inline int is_flush_bd_task(struct task_struct *task) +{ + return task->flags & PF_FLUSHER; +} +#define current_is_flush_bd_task() is_flush_bd_task(current) + /* * The 1/4 region under the global dirty thresh is for smooth dirty throttling: * @@ -179,6 +185,26 @@ static inline void wb_domain_size_changed(struct wb_domain *dom) spin_unlock(&dom->lock); } +/* + * Passed into wb_writeback(), essentially a subset of writeback_control + */ +struct wb_writeback_work { + long nr_pages; + struct super_block *sb; + unsigned long *older_than_this; + enum writeback_sync_modes sync_mode; + unsigned int tagged_writepages:1; + unsigned int for_kupdate:1; + unsigned int range_cyclic:1; + unsigned int for_background:1; + unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ + unsigned int auto_free:1; /* free on completion */ + enum wb_reason reason; /* why was writeback initiated? */ + + struct list_head list; /* pending work list */ + struct wb_completion *done; /* set if the caller waits */ +}; + /* * fs/fs-writeback.c */ diff --git a/mm/filemap.c b/mm/filemap.c index 6529ee3f29a3..86413adbdae6 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1690,6 +1690,7 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, return ret; } +EXPORT_SYMBOL(find_get_pages_range); /** * find_get_pages_contig - gang contiguous pagecache lookup diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 0daa3446c4d7..0b87cc9658ed 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2661,6 +2661,35 @@ void account_page_redirty(struct page *page) } EXPORT_SYMBOL(account_page_redirty); +/* + * set_page_dirty_notag() -- similar to __set_page_dirty_nobuffers() + * except it doesn't tag the page dirty in the page-cache radix tree. + * This means that the address space using this cannot use the regular + * filemap ->writepages() helpers and must provide its own means of + * tracking and finding non-tagged dirty pages. + * + * NOTE: furthermore, this version also doesn't handle truncate races. + */ +int set_page_dirty_notag(struct page *page) +{ + struct address_space *mapping = page->mapping; + + lock_page_memcg(page); + if (!TestSetPageDirty(page)) { + unsigned long flags; + WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); + local_irq_save(flags); + account_page_dirtied(page, mapping); + local_irq_restore(flags); + unlock_page_memcg(page); + __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); + return 1; + } + unlock_page_memcg(page); + return 0; +} +EXPORT_SYMBOL(set_page_dirty_notag); + /* * When a writepage implementation decides that it doesn't want to write this * page for some reason, it should redirty the locked page via diff --git a/mm/vmscan.c b/mm/vmscan.c index 3ccf3b12ab4f..b74602c8a71e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3170,7 +3170,11 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, pg_data_t *last_pgdat; struct zoneref *z; struct zone *zone; + void *saved; retry: + saved = current->journal_info; /* save journal info */ + current->journal_info = NULL; + delayacct_freepages_start(); if (global_reclaim(sc)) @@ -3206,6 +3210,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, } delayacct_freepages_end(); + /* restore journal info */ + current->journal_info = saved; if (sc->nr_reclaimed) return sc->nr_reclaimed; From c915ff6833506bafd33d6de523fea33ef243441c Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 24 Mar 2020 20:56:58 +0300 Subject: [PATCH 290/452] configs: add cruel config Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel.conf | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 kernel/configs/cruel.conf diff --git a/kernel/configs/cruel.conf b/kernel/configs/cruel.conf new file mode 100644 index 000000000000..990328d291a3 --- /dev/null +++ b/kernel/configs/cruel.conf @@ -0,0 +1,34 @@ +# CONFIG_FIVE_PA_FEATURE is not set +# CONFIG_SEC_RESTRICT_ROOTING is not set +# CONFIG_SECURITY_DEFEX is not set +# CONFIG_PROCA is not set +# CONFIG_SECURITY_DSMS is not set +# CONFIG_KPERFMON is not set +# CONFIG_MALI_KUTF is not set +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_LOCALVERSION="-Cruel" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_MALI_GATOR_SUPPORT=y +CONFIG_SECURITY_SELINUX_SWITCH=y +# CONFIG_CPU_FREQ_GOV_PERFORMANCE is not set +# CONFIG_DTC is not set +# CONFIG_ALWAYS_ENFORCE is not set +# CONFIG_ALWAYS_PERMIT is not set +# CONFIG_RD_GZIP is not set +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +# CONFIG_CRAMFS is not set +# CONFIG_MQ_IOSCHED_DEADLINE is not set +# CONFIG_MQ_IOSCHED_KYBER is not set +# CONFIG_TCP_CONG_CUBIC is not set +# CONFIG_TCP_CONG_WESTWOOD is not set +# CONFIG_TCP_CONG_HTCP is not set +# CONFIG_DEFAULT_HTCP is not set +# CONFIG_DEFAULT_WESTWOOD is not set +# CONFIG_CC_WERROR is not set +# CONFIG_EXYNOS_NPU_PUBLISH_NPU_BUILD_VER is not set +# CONFIG_VISION_UNITTEST is not set +# CONFIG_CRYPTO_TEST is not set From f6a18370e0f514931a2de8e44d65ce22ae38689f Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 24 Mar 2020 20:57:32 +0300 Subject: [PATCH 291/452] configs: add magisk config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel+magisk.conf | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 kernel/configs/cruel+magisk.conf diff --git a/kernel/configs/cruel+magisk.conf b/kernel/configs/cruel+magisk.conf new file mode 100644 index 000000000000..cdea053dc42d --- /dev/null +++ b/kernel/configs/cruel+magisk.conf @@ -0,0 +1,14 @@ +# CONFIG_INITRAMFS_SKIP is not set +CONFIG_INITRAMFS_FORCE=y +CONFIG_INITRAMFS_SOURCE="usr/magisk/initramfs_list" +CONFIG_INITRAMFS_ROOT_UID=0 +CONFIG_INITRAMFS_ROOT_GID=0 +CONFIG_INITRAMFS_COMPRESSION_NONE=y +CONFIG_INITRAMFS_COMPRESSION="" +# CONFIG_INITRAMFS_COMPRESSION_GZIP is not set +# CONFIG_INITRAMFS_COMPRESSION_BZIP2 is not set +# CONFIG_INITRAMFS_COMPRESSION_LZMA is not set +# CONFIG_INITRAMFS_COMPRESSION_XZ is not set +# CONFIG_INITRAMFS_COMPRESSION_LZO is not set +# CONFIG_INITRAMFS_COMPRESSION_LZ4 is not set +CONFIG_PROC_MAGISK_HIDE_MOUNT=y From c1953e40e34c0be9ca170543452c519223cf3f03 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 24 Mar 2020 20:57:48 +0300 Subject: [PATCH 292/452] configs: add wireguard config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-wireguard.conf | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 kernel/configs/cruel-wireguard.conf diff --git a/kernel/configs/cruel-wireguard.conf b/kernel/configs/cruel-wireguard.conf new file mode 100644 index 000000000000..fb47f867aed7 --- /dev/null +++ b/kernel/configs/cruel-wireguard.conf @@ -0,0 +1,5 @@ +CONFIG_WIREGUARD=y +# CONFIG_WIREGUARD_DEBUG is not set +CONFIG_NET_UDP_TUNNEL=y +# CONFIG_GENEVE is not set +# CONFIG_GTP is not set From 13599ac3005fe34d66f92a4fc8090b2acf1273fe Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 9 Feb 2020 23:00:36 +0300 Subject: [PATCH 293/452] configs: add nohardening config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel+nohardening.conf | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 kernel/configs/cruel+nohardening.conf diff --git a/kernel/configs/cruel+nohardening.conf b/kernel/configs/cruel+nohardening.conf new file mode 100644 index 000000000000..f500404a35d2 --- /dev/null +++ b/kernel/configs/cruel+nohardening.conf @@ -0,0 +1,6 @@ +# CONFIG_LOD_SEC is not set +# CONFIG_UH is not set +# CONFIG_UH_LKMAUTH is not set +# CONFIG_UH_LKM_BLOCK is not set +# CONFIG_RKP_CFP is not set +# CONFIG_RKP_CFP_JOPP is not set From e309a08074ef7c019d92afb93a96f94ad8986870 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 24 Jan 2020 18:29:05 +0300 Subject: [PATCH 294/452] configs: add 1000hz 300hz 100hz 50hz 25hz config presets Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-1000hz.conf | 3 +++ kernel/configs/cruel-100hz.conf | 3 +++ kernel/configs/cruel-25hz.conf | 3 +++ kernel/configs/cruel-300hz.conf | 3 +++ kernel/configs/cruel-50hz.conf | 3 +++ 5 files changed, 15 insertions(+) create mode 100644 kernel/configs/cruel-1000hz.conf create mode 100644 kernel/configs/cruel-100hz.conf create mode 100644 kernel/configs/cruel-25hz.conf create mode 100644 kernel/configs/cruel-300hz.conf create mode 100644 kernel/configs/cruel-50hz.conf diff --git a/kernel/configs/cruel-1000hz.conf b/kernel/configs/cruel-1000hz.conf new file mode 100644 index 000000000000..23b4b5cac3aa --- /dev/null +++ b/kernel/configs/cruel-1000hz.conf @@ -0,0 +1,3 @@ +# CONFIG_HZ_250 is not set +CONFIG_HZ_1000=y +CONFIG_HZ=1000 diff --git a/kernel/configs/cruel-100hz.conf b/kernel/configs/cruel-100hz.conf new file mode 100644 index 000000000000..04f36746f90a --- /dev/null +++ b/kernel/configs/cruel-100hz.conf @@ -0,0 +1,3 @@ +# CONFIG_HZ_250 is not set +CONFIG_HZ_100=y +CONFIG_HZ=100 diff --git a/kernel/configs/cruel-25hz.conf b/kernel/configs/cruel-25hz.conf new file mode 100644 index 000000000000..c93bc310d298 --- /dev/null +++ b/kernel/configs/cruel-25hz.conf @@ -0,0 +1,3 @@ +# CONFIG_HZ_250 is not set +CONFIG_HZ_25=y +CONFIG_HZ=25 diff --git a/kernel/configs/cruel-300hz.conf b/kernel/configs/cruel-300hz.conf new file mode 100644 index 000000000000..ecc39ed471c2 --- /dev/null +++ b/kernel/configs/cruel-300hz.conf @@ -0,0 +1,3 @@ +# CONFIG_HZ_250 is not set +CONFIG_HZ_300=y +CONFIG_HZ=300 diff --git a/kernel/configs/cruel-50hz.conf b/kernel/configs/cruel-50hz.conf new file mode 100644 index 000000000000..030d291776ee --- /dev/null +++ b/kernel/configs/cruel-50hz.conf @@ -0,0 +1,3 @@ +# CONFIG_HZ_250 is not set +CONFIG_HZ_50=y +CONFIG_HZ=50 From e9c66da44c494fe920b72b999c82a834f9ccc3ff Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 24 Jan 2020 18:29:29 +0300 Subject: [PATCH 295/452] configs: add nohardening2 config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-nohardening2.conf | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 kernel/configs/cruel-nohardening2.conf diff --git a/kernel/configs/cruel-nohardening2.conf b/kernel/configs/cruel-nohardening2.conf new file mode 100644 index 000000000000..7e86529ef234 --- /dev/null +++ b/kernel/configs/cruel-nohardening2.conf @@ -0,0 +1,10 @@ +CONFIG_COMPAT_BRK=y +# CONFIG_VMAP_STACK is not set +# CONFIG_UNMAP_KERNEL_AT_EL0 is not set +# CONFIG_HARDEN_BRANCH_PREDICTOR is not set +# CONFIG_ARM64_SSBD is not set +# CONFIG_HARDENED_USERCOPY is not set +# CONFIG_CC_STACKPROTECTOR is not set +CONFIG_CC_STACKPROTECTOR_NONE=y +# CONFIG_CC_STACKPROTECTOR_STRONG is not set +# CONFIG_REFCOUNT_FULL is not set From 5fa8d301c67ebab4bacc23ea188d686fa33d950b Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Thu, 6 Feb 2020 00:31:52 +0300 Subject: [PATCH 296/452] configs: add iptables ttl/hop config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-ttl.conf | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 kernel/configs/cruel-ttl.conf diff --git a/kernel/configs/cruel-ttl.conf b/kernel/configs/cruel-ttl.conf new file mode 100644 index 000000000000..82ac544db198 --- /dev/null +++ b/kernel/configs/cruel-ttl.conf @@ -0,0 +1,4 @@ +CONFIG_NETFILTER_XT_TARGET_HL=y +CONFIG_IP_NF_TARGET_TTL=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_TARGET_HL=y From 4b70d881213f6b4fd52384b66a2c61674dbb5a58 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 9 Feb 2020 01:27:58 +0300 Subject: [PATCH 297/452] configs: add cifs config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-cifs.conf | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 kernel/configs/cruel-cifs.conf diff --git a/kernel/configs/cruel-cifs.conf b/kernel/configs/cruel-cifs.conf new file mode 100644 index 000000000000..ce2b95933bd4 --- /dev/null +++ b/kernel/configs/cruel-cifs.conf @@ -0,0 +1,16 @@ +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_CIFS=y +CONFIG_CIFS_STATS=y +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +# CONFIG_CIFS_WEAK_PW_HASH is not set +# CONFIG_CIFS_UPCALL is not set +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_ACL=y +# CONFIG_CIFS_DEBUG is not set +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +# CONFIG_CIFS_DFS_UPCALL is not set +# CONFIG_CIFS_SMB311 is not set +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_MD4=y From 8bbe4592ad0b34b86dbbfbadd9396336c45e1367 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 12 Feb 2020 14:33:56 +0300 Subject: [PATCH 298/452] configs: add nodebug config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-nodebug.conf | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 kernel/configs/cruel-nodebug.conf diff --git a/kernel/configs/cruel-nodebug.conf b/kernel/configs/cruel-nodebug.conf new file mode 100644 index 000000000000..71fa3d6c46a7 --- /dev/null +++ b/kernel/configs/cruel-nodebug.conf @@ -0,0 +1,25 @@ +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_SLUB_DEBUG is not set +# CONFIG_DEBUG_INFO is not set +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_SCHED_DEBUG is not set +# CONFIG_SCHEDSTATS is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_BUGVERBOSE is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_BLOCK_SUPPORT_STLOG is not set +# CONFIG_BT_DEBUGFS is not set +# CONFIG_USB_DEBUG_DETAILED_LOG is not set +# CONFIG_MMC_SUPPORT_STLOG is not set +# CONFIG_ION_DEBUG_EVENT_RECORD is not set +# CONFIG_SEC_DEBUG_TSP_LOG is not set +CONFIG_MCPS_DEBUG_PRINTK=4 +# CONFIG_MCPS_DEBUG is not set +# CONFIG_EXYNOS_CORESIGHT is not set +# CONFIG_EXYNOS_DEBUG_TEST is not set +# CONFIG_SEC_BOOTSTAT is not set +# CONFIG_CGROUP_DEBUG is not set +# CONFIG_SDFAT_DEBUG is not set +# CONFIG_SDFAT_DBG_MSG is not set +# CONFIG_PMUCAL_DBG is not set From 46f29d576ffeabea205858ca28e11a25c33b25a5 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 28 Feb 2020 02:16:07 +0300 Subject: [PATCH 299/452] configs: add cpu shedulers config presets Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-sched_conservative.conf | 2 ++ kernel/configs/cruel-sched_ondemand.conf | 2 ++ kernel/configs/cruel-sched_performance.conf | 1 + kernel/configs/cruel-sched_powersave.conf | 1 + kernel/configs/cruel-sched_userspace.conf | 1 + 5 files changed, 7 insertions(+) create mode 100644 kernel/configs/cruel-sched_conservative.conf create mode 100644 kernel/configs/cruel-sched_ondemand.conf create mode 100644 kernel/configs/cruel-sched_performance.conf create mode 100644 kernel/configs/cruel-sched_powersave.conf create mode 100644 kernel/configs/cruel-sched_userspace.conf diff --git a/kernel/configs/cruel-sched_conservative.conf b/kernel/configs/cruel-sched_conservative.conf new file mode 100644 index 000000000000..55255be2eac5 --- /dev/null +++ b/kernel/configs/cruel-sched_conservative.conf @@ -0,0 +1,2 @@ +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y diff --git a/kernel/configs/cruel-sched_ondemand.conf b/kernel/configs/cruel-sched_ondemand.conf new file mode 100644 index 000000000000..4c5675beabdd --- /dev/null +++ b/kernel/configs/cruel-sched_ondemand.conf @@ -0,0 +1,2 @@ +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y diff --git a/kernel/configs/cruel-sched_performance.conf b/kernel/configs/cruel-sched_performance.conf new file mode 100644 index 000000000000..2bf548fc409a --- /dev/null +++ b/kernel/configs/cruel-sched_performance.conf @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y diff --git a/kernel/configs/cruel-sched_powersave.conf b/kernel/configs/cruel-sched_powersave.conf new file mode 100644 index 000000000000..a22c379626bb --- /dev/null +++ b/kernel/configs/cruel-sched_powersave.conf @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_GOV_POWERSAVE=y diff --git a/kernel/configs/cruel-sched_userspace.conf b/kernel/configs/cruel-sched_userspace.conf new file mode 100644 index 000000000000..8c1bc6848c91 --- /dev/null +++ b/kernel/configs/cruel-sched_userspace.conf @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_GOV_USERSPACE=y From 7ff09187ccc0caf61c182068899c0bceb38eba0d Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 6 Mar 2020 14:03:46 +0300 Subject: [PATCH 300/452] configs: add boeffla_wl_blocker config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-boeffla_wl_blocker.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-boeffla_wl_blocker.conf diff --git a/kernel/configs/cruel-boeffla_wl_blocker.conf b/kernel/configs/cruel-boeffla_wl_blocker.conf new file mode 100644 index 000000000000..857797270df1 --- /dev/null +++ b/kernel/configs/cruel-boeffla_wl_blocker.conf @@ -0,0 +1 @@ +CONFIG_BOEFFLA_WL_BLOCKER=y From 29b77cac942f58695d3fd3d6435f729249b0c822 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 11 Mar 2020 00:20:41 +0300 Subject: [PATCH 301/452] configs: add size config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-size.conf | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 kernel/configs/cruel-size.conf diff --git a/kernel/configs/cruel-size.conf b/kernel/configs/cruel-size.conf new file mode 100644 index 000000000000..3c8db587fa3d --- /dev/null +++ b/kernel/configs/cruel-size.conf @@ -0,0 +1,3 @@ +# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set +# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_03 is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y From e603630bbc5ad1d1a7b14cbe1ac6d995febd1a39 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 17 Jun 2020 01:23:52 +0300 Subject: [PATCH 302/452] configs: add performance config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-performance.conf | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 kernel/configs/cruel-performance.conf diff --git a/kernel/configs/cruel-performance.conf b/kernel/configs/cruel-performance.conf new file mode 100644 index 000000000000..a1362236d27d --- /dev/null +++ b/kernel/configs/cruel-performance.conf @@ -0,0 +1,3 @@ +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y From 31b7a017ce0c2fd7e9d52a5d14c87155dca83092 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 11 Mar 2020 00:33:57 +0300 Subject: [PATCH 303/452] configs: add nomodules config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-nomodules.conf | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 kernel/configs/cruel-nomodules.conf diff --git a/kernel/configs/cruel-nomodules.conf b/kernel/configs/cruel-nomodules.conf new file mode 100644 index 000000000000..d85fc9557b48 --- /dev/null +++ b/kernel/configs/cruel-nomodules.conf @@ -0,0 +1,2 @@ +# CONFIG_MODULES is not set +# CONFIG_KALLSYMS is not set From 8b4bcb59a06bb740cd1f816c551300102623ca7f Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 11 Mar 2020 00:28:51 +0300 Subject: [PATCH 304/452] configs: add noksm config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-noksm.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-noksm.conf diff --git a/kernel/configs/cruel-noksm.conf b/kernel/configs/cruel-noksm.conf new file mode 100644 index 000000000000..9671029ed687 --- /dev/null +++ b/kernel/configs/cruel-noksm.conf @@ -0,0 +1 @@ +# CONFIG_KSM is not set From 822608819088ef223e3a9ce1b76ed1e54f9ceab3 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 11 Mar 2020 11:19:55 +0300 Subject: [PATCH 305/452] configs: add mass_storage config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-mass_storage.conf | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 kernel/configs/cruel-mass_storage.conf diff --git a/kernel/configs/cruel-mass_storage.conf b/kernel/configs/cruel-mass_storage.conf new file mode 100644 index 000000000000..1c1f2a38b948 --- /dev/null +++ b/kernel/configs/cruel-mass_storage.conf @@ -0,0 +1,2 @@ +CONFIG_USB_F_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y From 4845aa15e8553b064058b7aa77837c64e3268275 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 11 Mar 2020 17:57:36 +0300 Subject: [PATCH 306/452] configs: add noaudit config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-noaudit.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-noaudit.conf diff --git a/kernel/configs/cruel-noaudit.conf b/kernel/configs/cruel-noaudit.conf new file mode 100644 index 000000000000..29121d528fd5 --- /dev/null +++ b/kernel/configs/cruel-noaudit.conf @@ -0,0 +1 @@ +# CONFIG_AUDIT is not set From 29320ebd8e9e2118910cedac324608295a95b2bd Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 18 Mar 2020 08:50:37 +0300 Subject: [PATCH 307/452] configs: add always_enforce config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-always_enforce.conf | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 kernel/configs/cruel-always_enforce.conf diff --git a/kernel/configs/cruel-always_enforce.conf b/kernel/configs/cruel-always_enforce.conf new file mode 100644 index 000000000000..8708edc51348 --- /dev/null +++ b/kernel/configs/cruel-always_enforce.conf @@ -0,0 +1,3 @@ +# CONFIG_SECURITY_SELINUX_SWITCH is not set +CONFIG_ALWAYS_ENFORCE=y +# CONFIG_ALWAYS_PERMIT is not set From 6f726fce159ce958cd9b7b86d7c7a86c74edcafc Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 18 Mar 2020 08:51:09 +0300 Subject: [PATCH 308/452] configs: add always_permit config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-always_permit.conf | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 kernel/configs/cruel-always_permit.conf diff --git a/kernel/configs/cruel-always_permit.conf b/kernel/configs/cruel-always_permit.conf new file mode 100644 index 000000000000..ca7b9c6277d0 --- /dev/null +++ b/kernel/configs/cruel-always_permit.conf @@ -0,0 +1,3 @@ +# CONFIG_SECURITY_SELINUX_SWITCH is not set +# CONFIG_ALWAYS_ENFORCE is not set +CONFIG_ALWAYS_PERMIT=y From 18e538fb6ff17a04543327aca60b72226c38ae00 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 20 Mar 2020 00:45:20 +0300 Subject: [PATCH 309/452] configs: add sdfat config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-sdfat.conf | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 kernel/configs/cruel-sdfat.conf diff --git a/kernel/configs/cruel-sdfat.conf b/kernel/configs/cruel-sdfat.conf new file mode 100644 index 000000000000..dba44f81a750 --- /dev/null +++ b/kernel/configs/cruel-sdfat.conf @@ -0,0 +1,3 @@ +# CONFIG_VFAT_FS is not set +CONFIG_SDFAT_USE_FOR_EXFAT=y +CONFIG_SDFAT_USE_FOR_VFAT=y From 82154495b05ecc5c39e4a0d0eaace1036074100a Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 20 Mar 2020 01:24:52 +0300 Subject: [PATCH 310/452] configs: add ntfs config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-ntfs.conf | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 kernel/configs/cruel-ntfs.conf diff --git a/kernel/configs/cruel-ntfs.conf b/kernel/configs/cruel-ntfs.conf new file mode 100644 index 000000000000..fa7071455a34 --- /dev/null +++ b/kernel/configs/cruel-ntfs.conf @@ -0,0 +1,3 @@ +CONFIG_NTFS_FS=y +# CONFIG_NTFS_DEBUG is not set +CONFIG_NTFS_RW=y From 706f1e087193929002d4fd7506f8f84a347cc01d Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 22 Mar 2020 10:29:34 +0300 Subject: [PATCH 311/452] configs: add morosound config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-morosound.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-morosound.conf diff --git a/kernel/configs/cruel-morosound.conf b/kernel/configs/cruel-morosound.conf new file mode 100644 index 000000000000..0fc41b8faba9 --- /dev/null +++ b/kernel/configs/cruel-morosound.conf @@ -0,0 +1 @@ +CONFIG_MORO_SOUND=y From 5939703bf8119a270ec2d4677c91d9f7626aa198 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 26 Feb 2020 19:27:37 +0300 Subject: [PATCH 312/452] configs: add io_bfq config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-io_bfq.conf | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 kernel/configs/cruel-io_bfq.conf diff --git a/kernel/configs/cruel-io_bfq.conf b/kernel/configs/cruel-io_bfq.conf new file mode 100644 index 000000000000..3bcadd8221fb --- /dev/null +++ b/kernel/configs/cruel-io_bfq.conf @@ -0,0 +1,2 @@ +CONFIG_IOSCHED_BFQ=y +CONFIG_SCSI_MQ_DEFAULT=y From e22beb4db10bc02e9535b5d9b199434a413c7bc3 Mon Sep 17 00:00:00 2001 From: Angheloaia Victor <extremegrief@pop-os.localdomain> Date: Thu, 26 Mar 2020 21:33:44 +0200 Subject: [PATCH 313/452] configs: add io_maple config preset --- kernel/configs/cruel-io_maple.conf | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 kernel/configs/cruel-io_maple.conf diff --git a/kernel/configs/cruel-io_maple.conf b/kernel/configs/cruel-io_maple.conf new file mode 100644 index 000000000000..d65920d99764 --- /dev/null +++ b/kernel/configs/cruel-io_maple.conf @@ -0,0 +1,9 @@ +# CONFIG_DEFAULT_ANXIETY is not set +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_FIOPS is not set +# CONFIG_DEFAULT_NOOP is not set +# CONFIG_DEFAULT_SIO is not set +# CONFIG_DEFAULT_ZEN is not set +CONFIG_IOSCHED_MAPLE=y +CONFIG_DEFAULT_MAPLE=y +CONFIG_DEFAULT_IOSCHED="maple" From 2bffc223626973dadaa514c95968ff736d7cc191 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 5 Apr 2020 16:55:37 +0300 Subject: [PATCH 314/452] configs: add io_fiops config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-io_fiops.conf | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 kernel/configs/cruel-io_fiops.conf diff --git a/kernel/configs/cruel-io_fiops.conf b/kernel/configs/cruel-io_fiops.conf new file mode 100644 index 000000000000..30100ef51662 --- /dev/null +++ b/kernel/configs/cruel-io_fiops.conf @@ -0,0 +1,9 @@ +# CONFIG_DEFAULT_ANXIETY is not set +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_MAPLE is not set +# CONFIG_DEFAULT_NOOP is not set +# CONFIG_DEFAULT_SIO is not set +# CONFIG_DEFAULT_ZEN is not set +CONFIG_IOSCHED_FIOPS=y +CONFIG_DEFAULT_FIOPS=y +CONFIG_DEFAULT_IOSCHED="fiops" From 0786215c95a7c91da5883027c88bbddda518ca67 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 6 Apr 2020 10:30:07 +0300 Subject: [PATCH 315/452] configs: add io_sio config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-io_sio.conf | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 kernel/configs/cruel-io_sio.conf diff --git a/kernel/configs/cruel-io_sio.conf b/kernel/configs/cruel-io_sio.conf new file mode 100644 index 000000000000..e72f3ffb79bb --- /dev/null +++ b/kernel/configs/cruel-io_sio.conf @@ -0,0 +1,9 @@ +# CONFIG_DEFAULT_ANXIETY is not set +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_FIOPS is not set +# CONFIG_DEFAULT_MAPLE is not set +# CONFIG_DEFAULT_NOOP is not set +# CONFIG_DEFAULT_ZEN is not set +CONFIG_IOSCHED_SIO=y +CONFIG_DEFAULT_SIO=y +CONFIG_DEFAULT_IOSCHED="sio" From eea90b9f31bf0ee830eefd5741c58a9d61950b15 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 6 Apr 2020 10:35:42 +0300 Subject: [PATCH 316/452] configs: add io_zen config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-io_zen.conf | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 kernel/configs/cruel-io_zen.conf diff --git a/kernel/configs/cruel-io_zen.conf b/kernel/configs/cruel-io_zen.conf new file mode 100644 index 000000000000..8cbee0c618ce --- /dev/null +++ b/kernel/configs/cruel-io_zen.conf @@ -0,0 +1,9 @@ +# CONFIG_DEFAULT_ANXIETY is not set +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_FIOPS is not set +# CONFIG_DEFAULT_MAPLE is not set +# CONFIG_DEFAULT_NOOP is not set +# CONFIG_DEFAULT_SIO is not set +CONFIG_IOSCHED_ZEN=y +CONFIG_DEFAULT_ZEN=y +CONFIG_DEFAULT_IOSCHED="zen" From 1bfb02053f2a1de6580e9ac930b5e84b3bc437c5 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 22 Apr 2020 13:25:47 +0300 Subject: [PATCH 317/452] configs: add io_noop config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-io_noop.conf | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 kernel/configs/cruel-io_noop.conf diff --git a/kernel/configs/cruel-io_noop.conf b/kernel/configs/cruel-io_noop.conf new file mode 100644 index 000000000000..611ce52818fc --- /dev/null +++ b/kernel/configs/cruel-io_noop.conf @@ -0,0 +1,8 @@ +# CONFIG_DEFAULT_ANXIETY is not set +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_FIOPS is not set +# CONFIG_DEFAULT_MAPLE is not set +# CONFIG_DEFAULT_SIO is not set +# CONFIG_DEFAULT_ZEN is not set +CONFIG_DEFAULT_NOOP=y +CONFIG_DEFAULT_IOSCHED="noop" From b87dce5c03e04abbc98b9d409e36df9a36e9ac3e Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Thu, 9 Apr 2020 00:59:53 +0300 Subject: [PATCH 318/452] configs: add io_anxiety config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-io_anxiety.conf | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 kernel/configs/cruel-io_anxiety.conf diff --git a/kernel/configs/cruel-io_anxiety.conf b/kernel/configs/cruel-io_anxiety.conf new file mode 100644 index 000000000000..5d3f9d30aad5 --- /dev/null +++ b/kernel/configs/cruel-io_anxiety.conf @@ -0,0 +1,9 @@ +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_FIOPS is not set +# CONFIG_DEFAULT_MAPLE is not set +# CONFIG_DEFAULT_NOOP is not set +# CONFIG_DEFAULT_SIO is not set +# CONFIG_DEFAULT_ZEN is not set +CONFIG_IOSCHED_ANXIETY=y +CONFIG_DEFAULT_ANXIETY=y +CONFIG_DEFAULT_IOSCHED="anxiety" From 5795d4a6b38bffe3e92c7e8b3888c723c0820ce8 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 6 Apr 2020 11:18:34 +0300 Subject: [PATCH 319/452] configs: add io_cfq config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-io_cfq.conf | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 kernel/configs/cruel-io_cfq.conf diff --git a/kernel/configs/cruel-io_cfq.conf b/kernel/configs/cruel-io_cfq.conf new file mode 100644 index 000000000000..1034c22eb0da --- /dev/null +++ b/kernel/configs/cruel-io_cfq.conf @@ -0,0 +1,8 @@ +# CONFIG_DEFAULT_ANXIETY is not set +# CONFIG_DEFAULT_FIOPS is not set +# CONFIG_DEFAULT_MAPLE is not set +# CONFIG_DEFAULT_NOOP is not set +# CONFIG_DEFAULT_SIO is not set +# CONFIG_DEFAULT_ZEN is not set +CONFIG_DEFAULT_CFQ=y +CONFIG_DEFAULT_IOSCHED="cfq" From 7323b4fed63264bd7c41717b68d46b678e0f9425 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 23 Mar 2020 21:26:54 +0300 Subject: [PATCH 320/452] configs: add tcp_window_64k config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-tcp_window_64k.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-tcp_window_64k.conf diff --git a/kernel/configs/cruel-tcp_window_64k.conf b/kernel/configs/cruel-tcp_window_64k.conf new file mode 100644 index 000000000000..abb2ceab19a4 --- /dev/null +++ b/kernel/configs/cruel-tcp_window_64k.conf @@ -0,0 +1 @@ +CONFIG_LARGE_TCP_INITIAL_BUFFER=y From 91e4457d1d1437bccfd3d37503a692ac21f110bf Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 23 Mar 2020 15:52:56 +0300 Subject: [PATCH 321/452] configs: add tcp_cubic config preset Thanks, @HRTKernel! Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-tcp_cubic.conf | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 kernel/configs/cruel-tcp_cubic.conf diff --git a/kernel/configs/cruel-tcp_cubic.conf b/kernel/configs/cruel-tcp_cubic.conf new file mode 100644 index 000000000000..de9045bf0279 --- /dev/null +++ b/kernel/configs/cruel-tcp_cubic.conf @@ -0,0 +1,7 @@ +# CONFIG_DEFAULT_BIC is not set +# CONFIG_DEFAULT_HTCP is not set +# CONFIG_DEFAULT_WESTWOOD is not set +# CONFIG_DEFAULT_BBR is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" From 5e8904e7647cafb0c93fbfa2fcea543a7ae126f8 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 24 Mar 2020 16:40:41 +0300 Subject: [PATCH 322/452] configs: add tcp_westwood config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-tcp_westwood.conf | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 kernel/configs/cruel-tcp_westwood.conf diff --git a/kernel/configs/cruel-tcp_westwood.conf b/kernel/configs/cruel-tcp_westwood.conf new file mode 100644 index 000000000000..87a68f70fd06 --- /dev/null +++ b/kernel/configs/cruel-tcp_westwood.conf @@ -0,0 +1,7 @@ +# CONFIG_DEFAULT_BIC is not set +# CONFIG_DEFAULT_CUBIC is not set +# CONFIG_DEFAULT_HTCP is not set +# CONFIG_DEFAULT_BBR is not set +CONFIG_TCP_CONG_WESTWOOD=y +CONFIG_DEFAULT_WESTWOOD=y +CONFIG_DEFAULT_TCP_CONG="westwood" From 75cc09274836e23540ebee4029f9bb5482c325ad Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 7 Apr 2020 23:05:42 +0300 Subject: [PATCH 323/452] configs: add tcp_htcp config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-tcp_htcp.conf | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 kernel/configs/cruel-tcp_htcp.conf diff --git a/kernel/configs/cruel-tcp_htcp.conf b/kernel/configs/cruel-tcp_htcp.conf new file mode 100644 index 000000000000..20c422940245 --- /dev/null +++ b/kernel/configs/cruel-tcp_htcp.conf @@ -0,0 +1,7 @@ +# CONFIG_DEFAULT_BIC is not set +# CONFIG_DEFAULT_CUBIC is not set +# CONFIG_DEFAULT_WESTWOOD is not set +# CONFIG_DEFAULT_BBR is not set +CONFIG_TCP_CONG_HTCP=y +CONFIG_DEFAULT_HTCP=y +CONFIG_DEFAULT_TCP_CONG="htcp" From c054d5170e7978f7d7900e3eff3cf7b4f857f0a5 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 7 Apr 2020 23:05:04 +0300 Subject: [PATCH 324/452] configs: add tcp_bic config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-tcp_bic.conf | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 kernel/configs/cruel-tcp_bic.conf diff --git a/kernel/configs/cruel-tcp_bic.conf b/kernel/configs/cruel-tcp_bic.conf new file mode 100644 index 000000000000..f93b8bfdb873 --- /dev/null +++ b/kernel/configs/cruel-tcp_bic.conf @@ -0,0 +1,6 @@ +# CONFIG_DEFAULT_HTCP is not set +# CONFIG_DEFAULT_CUBIC is not set +# CONFIG_DEFAULT_WESTWOOD is not set +# CONFIG_DEFAULT_BBR is not set +CONFIG_DEFAULT_BIC=y +CONFIG_DEFAULT_TCP_CONG="bic" From 1ee07fc0ffa061ab4a0147a825b96ec9ae6b7651 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 22 Apr 2020 13:03:16 +0300 Subject: [PATCH 325/452] configs: add tcp_bbr config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-tcp_bbr.conf | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 kernel/configs/cruel-tcp_bbr.conf diff --git a/kernel/configs/cruel-tcp_bbr.conf b/kernel/configs/cruel-tcp_bbr.conf new file mode 100644 index 000000000000..297c6ebfb177 --- /dev/null +++ b/kernel/configs/cruel-tcp_bbr.conf @@ -0,0 +1,7 @@ +# CONFIG_DEFAULT_BIC is not set +# CONFIG_DEFAULT_CUBIC is not set +# CONFIG_DEFAULT_HTCP is not set +# CONFIG_DEFAULT_WESTWOOD is not set +CONFIG_TCP_CONG_BBR=y +CONFIG_DEFAULT_BBR=y +CONFIG_DEFAULT_TCP_CONG="bbr" From 402e4c85b93a45e0a2cb79612248ae6567e27565 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Thu, 26 Mar 2020 15:49:10 +0300 Subject: [PATCH 326/452] configs: add noswap config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-noswap.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-noswap.conf diff --git a/kernel/configs/cruel-noswap.conf b/kernel/configs/cruel-noswap.conf new file mode 100644 index 000000000000..63b4815455fa --- /dev/null +++ b/kernel/configs/cruel-noswap.conf @@ -0,0 +1 @@ +# CONFIG_SWAP is not set From 9debcee8284a73758a0603a8cf552ae116d14c74 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Thu, 26 Mar 2020 15:51:28 +0300 Subject: [PATCH 327/452] configs: add nozram config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-nozram.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-nozram.conf diff --git a/kernel/configs/cruel-nozram.conf b/kernel/configs/cruel-nozram.conf new file mode 100644 index 000000000000..00f4887473d3 --- /dev/null +++ b/kernel/configs/cruel-nozram.conf @@ -0,0 +1 @@ +# CONFIG_ZRAM is not set From 5ee055466b7284efe2516d3865b62129d710ab0d Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 12 Apr 2020 09:21:49 +0300 Subject: [PATCH 328/452] configs: add noatime config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-noatime.conf | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 kernel/configs/cruel-noatime.conf diff --git a/kernel/configs/cruel-noatime.conf b/kernel/configs/cruel-noatime.conf new file mode 100644 index 000000000000..e3337606de23 --- /dev/null +++ b/kernel/configs/cruel-noatime.conf @@ -0,0 +1,2 @@ +CONFIG_DEFAULT_MNT_NOATIME=y +# CONFIG_DEFAULT_MNT_RELATIME is not set From d48815a8b192c0b7a3a5b64c278f21632fd11004 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 24 Mar 2020 21:08:48 +0300 Subject: [PATCH 329/452] configs: add kexec config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-kexec.conf | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 kernel/configs/cruel-kexec.conf diff --git a/kernel/configs/cruel-kexec.conf b/kernel/configs/cruel-kexec.conf new file mode 100644 index 000000000000..ee65c9d712a9 --- /dev/null +++ b/kernel/configs/cruel-kexec.conf @@ -0,0 +1,4 @@ +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_KEXEC=y +CONFIG_PROC_KCORE=y From 5c2d6c4226a143c76fc82ef61a0c5dc605783988 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 26 Apr 2020 17:38:01 +0300 Subject: [PATCH 330/452] configs: add kali config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-kali.conf | 122 +++++++++++++++++++++++++++++++++ 1 file changed, 122 insertions(+) create mode 100644 kernel/configs/cruel-kali.conf diff --git a/kernel/configs/cruel-kali.conf b/kernel/configs/cruel-kali.conf new file mode 100644 index 000000000000..150a300ccab1 --- /dev/null +++ b/kernel/configs/cruel-kali.conf @@ -0,0 +1,122 @@ +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +CONFIG_SYSVIPC_COMPAT=y +CONFIG_COMPAT_NETLINK_MESSAGES=y +CONFIG_BT_INTEL=y +CONFIG_BT_BCM=y +CONFIG_BT_RTL=y +CONFIG_BT_HCIBTUSB=y +CONFIG_BT_HCIBTUSB_BCM=y +CONFIG_BT_HCIBTUSB_RTL=y +# CONFIG_BT_ATH3K is not set +CONFIG_WIRELESS_EXT=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_WEXT_PRIV=y +CONFIG_NL80211_TESTMODE=y +CONFIG_CFG80211_WEXT=y +CONFIG_MAC80211=y +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_MINSTREL_HT=y +CONFIG_MAC80211_RC_MINSTREL_VHT=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +# CONFIG_MAC80211_DEBUGFS is not set +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_EEPROM_93CX6=y +CONFIG_USB_NET_RNDIS_HOST=y +# CONFIG_ADM8211 is not set +CONFIG_ATH_COMMON=y +# CONFIG_ATH5K is not set +CONFIG_ATH9K_HW=y +CONFIG_ATH9K_COMMON=y +CONFIG_ATH9K_BTCOEX_SUPPORT=y +# CONFIG_ATH9K is not set +CONFIG_ATH9K_HTC=y +# CONFIG_ATH9K_HTC_DEBUGFS is not set +CONFIG_CARL9170=y +CONFIG_CARL9170_LEDS=y +CONFIG_CARL9170_WPC=y +# CONFIG_CARL9170_HWRNG is not set +CONFIG_ATH6KL=y +# CONFIG_ATH6KL_SDIO is not set +CONFIG_ATH6KL_USB=y +# CONFIG_ATH6KL_DEBUG is not set +# CONFIG_ATH6KL_TRACING is not set +# CONFIG_AR5523 is not set +# CONFIG_ATH10K is not set +# CONFIG_WCN36XX is not set +CONFIG_AT76C50X_USB=y +# CONFIG_B43 is not set +# CONFIG_B43LEGACY is not set +# CONFIG_BRCMSMAC is not set +# CONFIG_IWL4965 is not set +# CONFIG_IWL3945 is not set +# CONFIG_IWLWIFI is not set +# CONFIG_P54_COMMON is not set +# CONFIG_LIBERTAS_THINFIRM is not set +# CONFIG_MWL8K is not set +# CONFIG_MT7601U is not set +CONFIG_RT2X00=y +# CONFIG_RT2400PCI is not set +# CONFIG_RT2500PCI is not set +# CONFIG_RT61PCI is not set +# CONFIG_RT2800PCI is not set +CONFIG_RT2500USB=y +CONFIG_RT73USB=y +CONFIG_RT2800USB=y +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RT2800_LIB=y +CONFIG_RT2X00_LIB_USB=y +CONFIG_RT2X00_LIB=y +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +# CONFIG_RT2X00_DEBUG is not set +# CONFIG_RTL8180 is not set +CONFIG_RTL8187=y +CONFIG_RTL8187_LEDS=y +CONFIG_RTL_CARDS=y +# CONFIG_RTL8192CE is not set +# CONFIG_RTL8192SE is not set +# CONFIG_RTL8192DE is not set +# CONFIG_RTL8723AE is not set +# CONFIG_RTL8723BE is not set +# CONFIG_RTL8188EE is not set +# CONFIG_RTL8192EE is not set +# CONFIG_RTL8821AE is not set +# CONFIG_RTL8192CU is not set +CONFIG_RTL8XXXU=y +CONFIG_RTL8XXXU_UNTESTED=y +# CONFIG_RSI_91X is not set +# CONFIG_CW1200 is not set +# CONFIG_WL1251 is not set +# CONFIG_WL12XX is not set +# CONFIG_WL18XX is not set +# CONFIG_WLCORE is not set +CONFIG_USB_ZD1201=y +CONFIG_ZD1211RW=y +# CONFIG_ZD1211RW_DEBUG is not set +# CONFIG_MAC80211_HWSIM is not set +CONFIG_USB_NET_RNDIS_WLAN=y +CONFIG_USB_F_SERIAL=y +CONFIG_USB_F_HID=y +CONFIG_USB_CONFIGFS_SERIAL=y +CONFIG_USB_CONFIGFS_F_HID=y +# CONFIG_R8822BE is not set +# CONFIG_VT6655 is not set +# CONFIG_VT6656 is not set +CONFIG_CRYPTO_CCM=y +CONFIG_CRC_ITU_T=y From 29d56b9ba81fe88d95b948df66f438751b7da8b4 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 4 Aug 2020 10:39:02 +0300 Subject: [PATCH 331/452] configs: add usb_serial config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-usb_serial.conf | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 kernel/configs/cruel-usb_serial.conf diff --git a/kernel/configs/cruel-usb_serial.conf b/kernel/configs/cruel-usb_serial.conf new file mode 100644 index 000000000000..2d57ea529a29 --- /dev/null +++ b/kernel/configs/cruel-usb_serial.conf @@ -0,0 +1,5 @@ +CONFIG_USB_SERIAL_CONSOLE=y +CONFIG_USB_SERIAL_GENERIC=y + +# natively support nodemcu/arduino serial console +CONFIG_USB_SERIAL_CP210X=y From 4de3cc6b06e5977daf76a79f7d3f9e26ceef52cd Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 8 Sep 2020 14:12:47 +0300 Subject: [PATCH 332/452] configs: add faultinjection config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-faultinjection.conf | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 kernel/configs/cruel-faultinjection.conf diff --git a/kernel/configs/cruel-faultinjection.conf b/kernel/configs/cruel-faultinjection.conf new file mode 100644 index 000000000000..5079fed6c2e5 --- /dev/null +++ b/kernel/configs/cruel-faultinjection.conf @@ -0,0 +1,9 @@ +CONFIG_FAULT_INJECTION=y +CONFIG_FAILSLAB=y +CONFIG_FAIL_PAGE_ALLOC=y +CONFIG_FAIL_MAKE_REQUEST=y +CONFIG_FAIL_IO_TIMEOUT=y +# CONFIG_FAIL_MMC_REQUEST is not set +CONFIG_FAIL_FUTEX=y +CONFIG_FAULT_INJECTION_DEBUG_FS=y +# CONFIG_FAULT_INJECTION_STACKTRACE_FILTER is not set From e583f87211d293df584a1bee822470e72fd2ec1f Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 8 Sep 2020 14:13:09 +0300 Subject: [PATCH 333/452] configs: add debug config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-debug.conf | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 kernel/configs/cruel-debug.conf diff --git a/kernel/configs/cruel-debug.conf b/kernel/configs/cruel-debug.conf new file mode 100644 index 000000000000..2e4b02f6f42c --- /dev/null +++ b/kernel/configs/cruel-debug.conf @@ -0,0 +1,18 @@ +CONFIG_SEC_DEBUG_SPINBUG_PANIC=y +CONFIG_DEBUG_VM=y +# CONFIG_DEBUG_VM_VMACACHE is not set +# CONFIG_DEBUG_VM_RB is not set +# CONFIG_DEBUG_VM_PGFLAGS is not set +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=140 +CONFIG_DEBUG_RT_MUTEXES=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_LOCK_ALLOC=y +CONFIG_PROVE_LOCKING=y +CONFIG_LOCKDEP=y +# CONFIG_DEBUG_LOCKDEP is not set +CONFIG_TRACE_IRQFLAGS=y +CONFIG_PROVE_RCU=y +CONFIG_RCU_CPU_STALL_TIMEOUT=100 +CONFIG_DEBUG_SNAPSHOT_SPINLOCK=y +CONFIG_FORTIFY_SOURCE=y From 72b433978bac14abf42fdd5d1b647029e68ae222 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Thu, 22 Oct 2020 18:07:10 +0300 Subject: [PATCH 334/452] configs: add gcov config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-gcov.conf | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 kernel/configs/cruel-gcov.conf diff --git a/kernel/configs/cruel-gcov.conf b/kernel/configs/cruel-gcov.conf new file mode 100644 index 000000000000..dd732654c0fc --- /dev/null +++ b/kernel/configs/cruel-gcov.conf @@ -0,0 +1,7 @@ +CONFIG_CONSTRUCTORS=y +CONFIG_GCOV_KERNEL=y +CONFIG_GCOV_PROFILE_ALL=y +CONFIG_GCOV_FORMAT_AUTODETECT=y +# CONFIG_GCOV_FORMAT_3_4 is not set +# CONFIG_GCOV_FORMAT_4_7 is not set +CONFIG_DEBUG_FS=y From 16bc51378a6f1cb71c9ba85788c339ac114c0750 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Thu, 10 Sep 2020 02:03:26 +0300 Subject: [PATCH 335/452] configs: add lto config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-lto.conf | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 kernel/configs/cruel-lto.conf diff --git a/kernel/configs/cruel-lto.conf b/kernel/configs/cruel-lto.conf new file mode 100644 index 000000000000..6c5a1661d842 --- /dev/null +++ b/kernel/configs/cruel-lto.conf @@ -0,0 +1,5 @@ +CONFIG_LD_DEAD_CODE_DATA_ELIMINATION=y +CONFIG_LTO=y +# CONFIG_LTO_NONE is not set +CONFIG_LTO_CLANG=y +# CONFIG_CFI_CLANG is not set From bef91969ad51fad9abad42d71fe5c337f17b46a0 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 2 Oct 2020 21:50:51 +0300 Subject: [PATCH 336/452] configs: add dtb config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-dtb.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-dtb.conf diff --git a/kernel/configs/cruel-dtb.conf b/kernel/configs/cruel-dtb.conf new file mode 100644 index 000000000000..64dbfdd901d7 --- /dev/null +++ b/kernel/configs/cruel-dtb.conf @@ -0,0 +1 @@ +CONFIG_DTC=y From 1bb9e25cc2c61225d3af47687ab3abc63ead3922 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 2 Oct 2020 22:02:41 +0300 Subject: [PATCH 337/452] configs: add kvm config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-kvm.conf | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 kernel/configs/cruel-kvm.conf diff --git a/kernel/configs/cruel-kvm.conf b/kernel/configs/cruel-kvm.conf new file mode 100644 index 000000000000..3acf5655b916 --- /dev/null +++ b/kernel/configs/cruel-kvm.conf @@ -0,0 +1,21 @@ +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_ARM64_ERRATUM_834220=y +CONFIG_MMU_NOTIFIER=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_KVM_COMPAT=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_KVM_ARM_HOST=y +CONFIG_KVM_ARM_PMU=y +CONFIG_ARM64_VHE=y +# CONFIG_VHOST_NET is not set +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set From 448d00445b7591a30e5ca8cee79f0ef006a6269e Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 6 Oct 2020 18:18:05 +0300 Subject: [PATCH 338/452] configs: add simple_lmk config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-simple_lmk.conf | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 kernel/configs/cruel-simple_lmk.conf diff --git a/kernel/configs/cruel-simple_lmk.conf b/kernel/configs/cruel-simple_lmk.conf new file mode 100644 index 000000000000..41a4ca779222 --- /dev/null +++ b/kernel/configs/cruel-simple_lmk.conf @@ -0,0 +1,5 @@ +# CONFIG_MEMCG is not set +# CONFIG_ANDROID_LOW_MEMORY_KILLER is not set +CONFIG_ANDROID_SIMPLE_LMK=y +CONFIG_ANDROID_SIMPLE_LMK_MINFREE=128 +CONFIG_ANDROID_SIMPLE_LMK_TIMEOUT_MSEC=200 From 4841c9a50245d619da44ec826d8707fff24f42ff Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 26 Oct 2020 18:09:56 +0300 Subject: [PATCH 339/452] configs: add reiser4 config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-reiser4.conf | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 kernel/configs/cruel-reiser4.conf diff --git a/kernel/configs/cruel-reiser4.conf b/kernel/configs/cruel-reiser4.conf new file mode 100644 index 000000000000..42be06276c1a --- /dev/null +++ b/kernel/configs/cruel-reiser4.conf @@ -0,0 +1,5 @@ +CONFIG_REISER4_FS=y +# CONFIG_REISER4_DEBUG is not set +CONFIG_XXHASH=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y From 7841223f4e0ff08b2b227b4696439ece88cebb51 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 20 Nov 2020 18:01:22 +0300 Subject: [PATCH 340/452] configs: add force_dex_wqhd config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-force_dex_wqhd.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-force_dex_wqhd.conf diff --git a/kernel/configs/cruel-force_dex_wqhd.conf b/kernel/configs/cruel-force_dex_wqhd.conf new file mode 100644 index 000000000000..da789cb14223 --- /dev/null +++ b/kernel/configs/cruel-force_dex_wqhd.conf @@ -0,0 +1 @@ +CONFIG_DISPLAYPORT_DEX_FORCE_WQHD=y From 2ac84af802b115771a819f05af924ffa737d187d Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 23 Nov 2020 20:23:40 +0300 Subject: [PATCH 341/452] configs: add polly config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-polly.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-polly.conf diff --git a/kernel/configs/cruel-polly.conf b/kernel/configs/cruel-polly.conf new file mode 100644 index 000000000000..3d5d7eaedf35 --- /dev/null +++ b/kernel/configs/cruel-polly.conf @@ -0,0 +1 @@ +CONFIG_LLVM_POLLY=y From 4546e2141ec0e1dfbedd3fc74007dd0b05033c13 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 25 Nov 2020 16:55:00 +0300 Subject: [PATCH 342/452] configs: add graphite config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-graphite.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-graphite.conf diff --git a/kernel/configs/cruel-graphite.conf b/kernel/configs/cruel-graphite.conf new file mode 100644 index 000000000000..8ccf5d33da69 --- /dev/null +++ b/kernel/configs/cruel-graphite.conf @@ -0,0 +1 @@ +CONFIG_GCC_GRAPHITE=y From e897f59198f07e74aac74d0cdc632e255dda1a55 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 25 Dec 2020 15:13:22 +0300 Subject: [PATCH 343/452] configs: add fp_boost config preset Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/cruel-fp_boost.conf | 1 + 1 file changed, 1 insertion(+) create mode 100644 kernel/configs/cruel-fp_boost.conf diff --git a/kernel/configs/cruel-fp_boost.conf b/kernel/configs/cruel-fp_boost.conf new file mode 100644 index 000000000000..1712a2f8e2d0 --- /dev/null +++ b/kernel/configs/cruel-fp_boost.conf @@ -0,0 +1 @@ +CONFIG_FINGERPRINT_BOOST=y From 82b4d1b0ef47607a988df984692e2128a96c1800 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 14 Oct 2020 13:43:31 +0300 Subject: [PATCH 344/452] configs: enable boeffla_wl_blocker by default Signed-off-by: Denis Efremov <efremov@linux.com> --- ...ruel-boeffla_wl_blocker.conf => cruel+boeffla_wl_blocker.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-boeffla_wl_blocker.conf => cruel+boeffla_wl_blocker.conf} (100%) diff --git a/kernel/configs/cruel-boeffla_wl_blocker.conf b/kernel/configs/cruel+boeffla_wl_blocker.conf similarity index 100% rename from kernel/configs/cruel-boeffla_wl_blocker.conf rename to kernel/configs/cruel+boeffla_wl_blocker.conf From 8a774d4703e140e719bdbba17eeb6ce2baf5d660 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 26 Oct 2020 12:16:20 +0300 Subject: [PATCH 345/452] configs: enable morosound by default Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/{cruel-morosound.conf => cruel+morosound.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-morosound.conf => cruel+morosound.conf} (100%) diff --git a/kernel/configs/cruel-morosound.conf b/kernel/configs/cruel+morosound.conf similarity index 100% rename from kernel/configs/cruel-morosound.conf rename to kernel/configs/cruel+morosound.conf From fa529c0d94dff93ae891be09b14627a33fc0bda3 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 14 Oct 2020 13:44:05 +0300 Subject: [PATCH 346/452] configs: enable nodebug by default Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/{cruel-nodebug.conf => cruel+nodebug.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-nodebug.conf => cruel+nodebug.conf} (100%) diff --git a/kernel/configs/cruel-nodebug.conf b/kernel/configs/cruel+nodebug.conf similarity index 100% rename from kernel/configs/cruel-nodebug.conf rename to kernel/configs/cruel+nodebug.conf From cccbf26e0816b9091a106a5aab45ec124334750e Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 14 Oct 2020 13:45:02 +0300 Subject: [PATCH 347/452] configs: enable wireguard by default Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/{cruel-wireguard.conf => cruel+wireguard.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-wireguard.conf => cruel+wireguard.conf} (100%) diff --git a/kernel/configs/cruel-wireguard.conf b/kernel/configs/cruel+wireguard.conf similarity index 100% rename from kernel/configs/cruel-wireguard.conf rename to kernel/configs/cruel+wireguard.conf From 00b6b1648b360d2e9d0ab3e5c10dc3376d958272 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 14 Oct 2020 13:45:26 +0300 Subject: [PATCH 348/452] configs: enable cifs by default Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/{cruel-cifs.conf => cruel+cifs.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-cifs.conf => cruel+cifs.conf} (100%) diff --git a/kernel/configs/cruel-cifs.conf b/kernel/configs/cruel+cifs.conf similarity index 100% rename from kernel/configs/cruel-cifs.conf rename to kernel/configs/cruel+cifs.conf From 2a5db808dc3afc45bb8fe6733b3eb00fde8e9e6b Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 14 Oct 2020 13:46:12 +0300 Subject: [PATCH 349/452] configs: enable ntfs by default Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/{cruel-ntfs.conf => cruel+ntfs.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-ntfs.conf => cruel+ntfs.conf} (100%) diff --git a/kernel/configs/cruel-ntfs.conf b/kernel/configs/cruel+ntfs.conf similarity index 100% rename from kernel/configs/cruel-ntfs.conf rename to kernel/configs/cruel+ntfs.conf From 4b403cdc445df99f76fe28c75897b483f360f8a8 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 14 Oct 2020 13:48:06 +0300 Subject: [PATCH 350/452] configs: enable ttl by default Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/{cruel-ttl.conf => cruel+ttl.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-ttl.conf => cruel+ttl.conf} (100%) diff --git a/kernel/configs/cruel-ttl.conf b/kernel/configs/cruel+ttl.conf similarity index 100% rename from kernel/configs/cruel-ttl.conf rename to kernel/configs/cruel+ttl.conf From 134f8477f9626f0e0a0e14fe743d1e12820c273a Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 14 Oct 2020 13:48:25 +0300 Subject: [PATCH 351/452] configs: enable usb_serial by default Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/{cruel-usb_serial.conf => cruel+usb_serial.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-usb_serial.conf => cruel+usb_serial.conf} (100%) diff --git a/kernel/configs/cruel-usb_serial.conf b/kernel/configs/cruel+usb_serial.conf similarity index 100% rename from kernel/configs/cruel-usb_serial.conf rename to kernel/configs/cruel+usb_serial.conf From 3fe5e2b5dfd3c8f79b3042832cb62e919ee0fab3 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 26 Oct 2020 10:38:15 +0300 Subject: [PATCH 352/452] configs: enable sdfat by default Signed-off-by: Denis Efremov <efremov@linux.com> --- kernel/configs/{cruel-sdfat.conf => cruel+sdfat.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-sdfat.conf => cruel+sdfat.conf} (100%) diff --git a/kernel/configs/cruel-sdfat.conf b/kernel/configs/cruel+sdfat.conf similarity index 100% rename from kernel/configs/cruel-sdfat.conf rename to kernel/configs/cruel+sdfat.conf From 76292d1e72dba42a2cd03e35b79cc3614a440c88 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 26 Oct 2020 10:40:41 +0300 Subject: [PATCH 353/452] configs: enable sched_performance by default Signed-off-by: Denis Efremov <efremov@linux.com> --- ...{cruel-sched_performance.conf => cruel+sched_performance.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-sched_performance.conf => cruel+sched_performance.conf} (100%) diff --git a/kernel/configs/cruel-sched_performance.conf b/kernel/configs/cruel+sched_performance.conf similarity index 100% rename from kernel/configs/cruel-sched_performance.conf rename to kernel/configs/cruel+sched_performance.conf From f582f541a7f2856bb746ecf3571ae3848ed55247 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 26 Oct 2020 10:41:23 +0300 Subject: [PATCH 354/452] configs: enable sched_powersave by default Signed-off-by: Denis Efremov <efremov@linux.com> --- .../{cruel-sched_powersave.conf => cruel+sched_powersave.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-sched_powersave.conf => cruel+sched_powersave.conf} (100%) diff --git a/kernel/configs/cruel-sched_powersave.conf b/kernel/configs/cruel+sched_powersave.conf similarity index 100% rename from kernel/configs/cruel-sched_powersave.conf rename to kernel/configs/cruel+sched_powersave.conf From bdd90abbdbf523e0db6a27d1d9dfbfa1dab4b437 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 20 Nov 2020 18:01:53 +0300 Subject: [PATCH 355/452] configs: enable force_dex_wqhd by default Signed-off-by: Denis Efremov <efremov@linux.com> --- .../{cruel-force_dex_wqhd.conf => cruel+force_dex_wqhd.conf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename kernel/configs/{cruel-force_dex_wqhd.conf => cruel+force_dex_wqhd.conf} (100%) diff --git a/kernel/configs/cruel-force_dex_wqhd.conf b/kernel/configs/cruel+force_dex_wqhd.conf similarity index 100% rename from kernel/configs/cruel-force_dex_wqhd.conf rename to kernel/configs/cruel+force_dex_wqhd.conf From d6e9cde1358e58febaed5e2e1732316b44ed288d Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 28 Feb 2020 19:25:44 +0300 Subject: [PATCH 356/452] samsung: integrate different models in a single tree Signed-off-by: Denis Efremov <efremov@linux.com> --- arch/arm64/Kconfig.platforms | 51 ++++++++++++++++++++++++++++++++++ arch/arm64/boot/dts/Makefile | 54 ++++++++++++++++++++++++++++++++---- 2 files changed, 99 insertions(+), 6 deletions(-) diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 39d7ccbe8fc9..276cb267bee2 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -321,6 +321,57 @@ config SOC_EXYNOS9820 select PINCTRL select SAMSUNG_DMADEV +choice + prompt "Samsung EXYNOS982[05] Model (only for device-trees)" + depends on SOC_EXYNOS9820 + default MODEL_NONE + + help + Select the phone model. + + config MODEL_NONE + bool "None" + config MODEL_G970F + bool "G970F" + select DTC + config MODEL_G970N + bool "G970N" + select DTC + config MODEL_G973F + bool "G973F" + select DTC + config MODEL_G973N + bool "G973N" + select DTC + config MODEL_G975F + bool "G975F" + select DTC + config MODEL_G975N + bool "G975N" + select DTC + config MODEL_G977B + bool "G977B" + select DTC + config MODEL_G977N + bool "G977N" + select DTC + config MODEL_N970F + bool "N970F" + select DTC + config MODEL_N971N + bool "N971N" + select DTC + config MODEL_N975F + bool "N975F" + select DTC + config MODEL_N976B + bool "N976B" + select DTC + config MODEL_N976N + bool "N976N" + select DTC +endchoice + config SOC_EXYNOS9820_EVT0 default n bool "Samsung EXYNOS9820 EVT0" diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile index a469363ab745..99ac941a815f 100644 --- a/arch/arm64/boot/dts/Makefile +++ b/arch/arm64/boot/dts/Makefile @@ -1,11 +1,53 @@ # SPDX-License-Identifier: GPL-2.0 -dts-dirs += exynos -subdir-y := $(dts-dirs) +ifdef CONFIG_MODEL_G970F +include arch/arm64/boot/dts/G970F.mk +endif -always := $(DTB_LIST) +ifdef CONFIG_MODEL_G970N +include arch/arm64/boot/dts/G970N.mk +endif -targets += dtbs -dtbs: $(addprefix $(obj)/, $(DTB_LIST)) +ifdef CONFIG_MODEL_G973F +include arch/arm64/boot/dts/G973F.mk +endif -clean-files := exynos/*.dtb *.dtb* +ifdef CONFIG_MODEL_G973N +include arch/arm64/boot/dts/G973N.mk +endif + +ifdef CONFIG_MODEL_G975F +include arch/arm64/boot/dts/G975F.mk +endif + +ifdef CONFIG_MODEL_G975N +include arch/arm64/boot/dts/G975N.mk +endif + +ifdef CONFIG_MODEL_G977B +include arch/arm64/boot/dts/G977B.mk +endif + +ifdef CONFIG_MODEL_G977N +include arch/arm64/boot/dts/G977N.mk +endif + +ifdef CONFIG_MODEL_N970F +include arch/arm64/boot/dts/N970F.mk +endif + +ifdef CONFIG_MODEL_N971N +include arch/arm64/boot/dts/N971N.mk +endif + +ifdef CONFIG_MODEL_N975F +include arch/arm64/boot/dts/N975F.mk +endif + +ifdef CONFIG_MODEL_N976B +include arch/arm64/boot/dts/N976B.mk +endif + +ifdef CONFIG_MODEL_N976N +include arch/arm64/boot/dts/N976N.mk +endif From 1aa4c62d19edd4c2740536ce7a5b329707fc6ae4 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Thu, 23 Jan 2020 18:53:00 +0300 Subject: [PATCH 357/452] cruelbuild: add python build script Signed-off-by: Denis Efremov <efremov@linux.com> --- .gitignore | 7 + Makefile | 3 +- arch/arm64/boot/.gitignore | 2 + cruel/.gitignore | 1 + cruel/build.mkbootimg.G970F | 11 + cruel/build.mkbootimg.G970N | 11 + cruel/build.mkbootimg.G973F | 11 + cruel/build.mkbootimg.G973N | 11 + cruel/build.mkbootimg.G975F | 11 + cruel/build.mkbootimg.G975N | 11 + cruel/build.mkbootimg.G977B | 11 + cruel/build.mkbootimg.G977N | 11 + cruel/build.mkbootimg.N970F | 11 + cruel/build.mkbootimg.N971N | 11 + cruel/build.mkbootimg.N975F | 11 + cruel/build.mkbootimg.N976B | 11 + cruel/build.mkbootimg.N976N | 11 + cruel/clone_header | Bin 0 -> 435248 bytes cruel/dtb.G970F | 2 + cruel/dtb.G970N | 2 + cruel/dtb.G973F | 2 + cruel/dtb.G973N | 2 + cruel/dtb.G975F | 2 + cruel/dtb.G975N | 2 + cruel/dtb.G977B | 2 + cruel/dtb.G977N | 2 + cruel/dtb.N970F | 2 + cruel/dtb.N971N | 2 + cruel/dtb.N975F | 2 + cruel/dtb.N976B | 2 + cruel/dtb.N976N | 2 + cruel/dtbo.G970F | 27 + cruel/dtbo.G970N | 19 + cruel/dtbo.G973F | 35 + cruel/dtbo.G973N | 23 + cruel/dtbo.G975F | 39 + cruel/dtbo.G975N | 27 + cruel/dtbo.G977B | 33 + cruel/dtbo.G977N | 33 + cruel/dtbo.N970F | 19 + cruel/dtbo.N971N | 19 + cruel/dtbo.N975F | 39 + cruel/dtbo.N976B | 39 + cruel/dtbo.N976N | 35 + cruel/unxz | Bin 0 -> 627688 bytes cruelbuild | 1329 ++++++++++++++++++++++++ kernel/configs/cruel+samsung.conf | 0 kernel/configs/cruel-empty_vbmeta.conf | 0 kernel/configs/cruel-fake_config.conf | 0 49 files changed, 1897 insertions(+), 1 deletion(-) create mode 100644 cruel/.gitignore create mode 100644 cruel/build.mkbootimg.G970F create mode 100644 cruel/build.mkbootimg.G970N create mode 100644 cruel/build.mkbootimg.G973F create mode 100644 cruel/build.mkbootimg.G973N create mode 100644 cruel/build.mkbootimg.G975F create mode 100644 cruel/build.mkbootimg.G975N create mode 100644 cruel/build.mkbootimg.G977B create mode 100644 cruel/build.mkbootimg.G977N create mode 100644 cruel/build.mkbootimg.N970F create mode 100644 cruel/build.mkbootimg.N971N create mode 100644 cruel/build.mkbootimg.N975F create mode 100644 cruel/build.mkbootimg.N976B create mode 100644 cruel/build.mkbootimg.N976N create mode 100755 cruel/clone_header create mode 100644 cruel/dtb.G970F create mode 100644 cruel/dtb.G970N create mode 100644 cruel/dtb.G973F create mode 100644 cruel/dtb.G973N create mode 100644 cruel/dtb.G975F create mode 100644 cruel/dtb.G975N create mode 100644 cruel/dtb.G977B create mode 100644 cruel/dtb.G977N create mode 100644 cruel/dtb.N970F create mode 100644 cruel/dtb.N971N create mode 100644 cruel/dtb.N975F create mode 100644 cruel/dtb.N976B create mode 100644 cruel/dtb.N976N create mode 100644 cruel/dtbo.G970F create mode 100644 cruel/dtbo.G970N create mode 100644 cruel/dtbo.G973F create mode 100644 cruel/dtbo.G973N create mode 100644 cruel/dtbo.G975F create mode 100644 cruel/dtbo.G975N create mode 100644 cruel/dtbo.G977B create mode 100644 cruel/dtbo.G977N create mode 100644 cruel/dtbo.N970F create mode 100644 cruel/dtbo.N971N create mode 100644 cruel/dtbo.N975F create mode 100644 cruel/dtbo.N976B create mode 100644 cruel/dtbo.N976N create mode 100755 cruel/unxz create mode 100755 cruelbuild create mode 100644 kernel/configs/cruel+samsung.conf create mode 100644 kernel/configs/cruel-empty_vbmeta.conf create mode 100644 kernel/configs/cruel-fake_config.conf diff --git a/.gitignore b/.gitignore index 880734d33f8e..cc3604353dba 100644 --- a/.gitignore +++ b/.gitignore @@ -39,6 +39,7 @@ *.symtypes *.tar *.xz +*.zip Module.symvers modules.builtin @@ -131,3 +132,9 @@ kernel/configs/android-*.cfg *.reverse.dts __pycache__/ *.pyc + +/*.img +/config.json +/config.info +/config.G* +/config.N* diff --git a/Makefile b/Makefile index 9190284c413b..692bafceb978 100644 --- a/Makefile +++ b/Makefile @@ -1456,7 +1456,8 @@ CLEAN_DIRS += $(MODVERDIR) # Directories & files removed with 'make mrproper' MRPROPER_DIRS += include/config usr/include include/generated \ arch/*/include/generated .tmp_objdiff -MRPROPER_FILES += .config .config.old .version .old_version \ +MRPROPER_FILES += *.img *.zip *.tar.xz config.json config.info config.[GN]* \ + .config .config.old .version .old_version \ Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \ signing_key.pem signing_key.priv signing_key.x509 \ x509.genkey extra_certificates signing_key.x509.keyid \ diff --git a/arch/arm64/boot/.gitignore b/arch/arm64/boot/.gitignore index 34e35209fc2e..be177135a3b6 100644 --- a/arch/arm64/boot/.gitignore +++ b/arch/arm64/boot/.gitignore @@ -2,3 +2,5 @@ Image Image-dtb Image.gz Image.gz-dtb +Image-G* +Image-N* diff --git a/cruel/.gitignore b/cruel/.gitignore new file mode 100644 index 000000000000..718773c4fa1b --- /dev/null +++ b/cruel/.gitignore @@ -0,0 +1 @@ +META-INF/ diff --git a/cruel/build.mkbootimg.G970F b/cruel/build.mkbootimg.G970F new file mode 100644 index 000000000000..ea5fd3d155bb --- /dev/null +++ b/cruel/build.mkbootimg.G970F @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPRI28A014KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.G970N b/cruel/build.mkbootimg.G970N new file mode 100644 index 000000000000..f2954dbe3aa2 --- /dev/null +++ b/cruel/build.mkbootimg.G970N @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPRI28C005KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.G973F b/cruel/build.mkbootimg.G973F new file mode 100644 index 000000000000..d8374870f111 --- /dev/null +++ b/cruel/build.mkbootimg.G973F @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPRI28B009KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.G973N b/cruel/build.mkbootimg.G973N new file mode 100644 index 000000000000..6c8bdef91f7f --- /dev/null +++ b/cruel/build.mkbootimg.G973N @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPRI28D005KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.G975F b/cruel/build.mkbootimg.G975F new file mode 100644 index 000000000000..5c67877067ba --- /dev/null +++ b/cruel/build.mkbootimg.G975F @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPRI17C008KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.G975N b/cruel/build.mkbootimg.G975N new file mode 100644 index 000000000000..9703db832e5a --- /dev/null +++ b/cruel/build.mkbootimg.G975N @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPRI28E005KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.G977B b/cruel/build.mkbootimg.G977B new file mode 100644 index 000000000000..7d73e8183f13 --- /dev/null +++ b/cruel/build.mkbootimg.G977B @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPSC04B005KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.G977N b/cruel/build.mkbootimg.G977N new file mode 100644 index 000000000000..baa7aa52aa08 --- /dev/null +++ b/cruel/build.mkbootimg.G977N @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPRK21D004KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.N970F b/cruel/build.mkbootimg.N970F new file mode 100644 index 000000000000..f15f5dde22b8 --- /dev/null +++ b/cruel/build.mkbootimg.N970F @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPSD26B006KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.N971N b/cruel/build.mkbootimg.N971N new file mode 100644 index 000000000000..0a1e8f9569fc --- /dev/null +++ b/cruel/build.mkbootimg.N971N @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPSD23A001KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.N975F b/cruel/build.mkbootimg.N975F new file mode 100644 index 000000000000..3452a84326b0 --- /dev/null +++ b/cruel/build.mkbootimg.N975F @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPSC14B006KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.N976B b/cruel/build.mkbootimg.N976B new file mode 100644 index 000000000000..542868855bb1 --- /dev/null +++ b/cruel/build.mkbootimg.N976B @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPSC14C006KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/build.mkbootimg.N976N b/cruel/build.mkbootimg.N976N new file mode 100644 index 000000000000..52d971c78c15 --- /dev/null +++ b/cruel/build.mkbootimg.N976N @@ -0,0 +1,11 @@ +header_version=1 +os_version=12.0.0 +os_patch_level=2022-10 +board=SRPSD23C001KU +pagesize=2048 +cmdline=androidboot.selinux=permissive androidboot.selinux=permissive loop.max_part=7 +base=0x10000000 +kernel_offset=0x00008000 +ramdisk_offset=0x00000000 +second_offset=0x00000000 +tags_offset=0x00000100 diff --git a/cruel/clone_header b/cruel/clone_header new file mode 100755 index 0000000000000000000000000000000000000000..4f0676a586327fbe1b5e0e79b5ab0b79e2e47bbe GIT binary patch literal 435248 zcmeFa4Om=foxlG;AfYKuX-iw$(t6rLLs~+nEp2I}Eon$fDoMtKTH2<C2{UA9hGEJK z38_@sl~t;&qK%45Eoxl(X<SipMP)78s9B{JS8Q=NtLUQQ8n>wEVvY8Hf1l@^VP-fq z*|+O|U3+=b;Xdc{yuZJlnZ9vL_15&XH050B>hnrJSyyVQ!Zw(DYNcaERj4wxn!o3% z93^T7p9<TwLRtFGp(ouKILxx`Nf_d8@v`h&!{JMGER;in(wC^2a%UNHWyjND&!yT_ zIZSk?Q&3^2qv9IvpR#$p)I2&|;;z$n*J-=6S$ny0*Hv|;xv}H9iYJ}EWx_L-7ynk+ zx4V84^S_e2c>HV!b}lHd?XLG><5BO!mcDV=f0fZ4I(h$E+}SBJ7S8amxR+1FF5gJH zrCmB4)lW(}bi$MJ@8Toz-W2w?wlw+G72LPpaJ?V=m#>>|x}og)(p&z^?j?S?d{*$e zYsWq6rroRGb5+^x#h=X0c>gnRdNj8EVA}a^p5N^|;ImrxJ{)L>SuKrLTX=uC<zU!~ z9f|~0qpv9x@LRDKE70B$^o92YEX)F}(O^?VEH`3o`2*30)}~0TrFEm#=(h^}R#Q}4 z^ILllS%p!{(_R>AFSZW)qIeS_9OLC)<5`j|Rc?oWyQ;ara@#Joqs5B0H3UsaEv=Tn zsTB`eS`Vo$?STfXv8A=y7qbGbtu3u;tHdVkYYq_M7Aw>e-Y3p|;TC^D?TiFkeX*vN zuoZ5J;bkP!(kk&lh_(8{QF*iiVSl8hDI8Oq{QlNJG-~-mtpT6^kku5n+M)qfv&5z( z)v?sj5)SLcslw<+Nq6C+ZPxA9?%mt&-%_`IQ?1(G5>^jAaOl1Vl`pIU?Xf^OR!8PH zg;e>&fo5MQ)Y5SKMr(_tMM7+D3pe{BMeD?`)vDRoUKeg_u8RfzQ59|swMBygZlWRI z-l%%L^0fxSzGz8!Z&Ns`9&J<Mx@e>+T<2?eL`7r1ShU2p;3*+tO@4w_t$}^fR6}uI z;wcIHb!?U~ZD?yX!RoxmpN5tLf!0HH(WXZOR$<#}MZsOGS|Wk)Mymmj$mWAhv7oiH zZu71!n`$=ftgG4X-MX!Mi?z2cMtNAxQbIM9S+g&E$ci3n-rEvtYOq30;r&#Za6?O5 zI2LFP_zBAAS3BEc>suPvQx;T5GQH6{;0v_{6a^P(<yJMf#RBb;Ib@fvu60quZwL~l zSV^qG*ANV-=)piBvLyM&ux{_67|x@lE9|FyTd7r4nueC<2vulrh<4$R`bb<e<ztyP zB88-?E9)w^+<DJka&yn_Ep^p9H&xc%xoy+#gof1Y+aIv*ZEcDLysa%U(-^`n;e#kX zyq1D*3OB`?d?9i(Ui~AjEe%viwJQ*9X=`neM(6WWU-nxETiQZY;|2;vRR<0PLe>pd z&=;a^t8HO&qsbp{yGlZAX=n-Q<mj3wL6WO#UqEVOYfI!%VtSgwjd;>1@j0Zt+f_}l zO>K&_5~~BML_JJXRgVT*Ta>OWK7x!WUH_=JQkhj@A(_(b^B-u6N`5BS!>Cw_*R&w% z3AENxkD}5@r50`9R9(Gub6w4@O`Er<%D{mpk}c)nv!XF7Li1vBO!MEN&1$#o*jB51 z0s1ZaFx_*g-Gpsi+qG3Ee0!jop2Ujys7OJ2r=YLZe^83sco+^Gv~xpxKqry9S(_Si zcUzds_UNOB>imHQ-=VtZsMD<XwYD5o8={Az8}<iU!-3F-=H?BipOJPLs|&X63rMgV zXtfl9Uo}M93fl@BbPcDEOX?A*wvs1%eGU8Zw=L|a6Ofi*QRd=Xh&E$7w5S!ajatH? zL#vgvj;4Jo($>09iaF+M3QH?$3)jUPON7uU-H1+yiZ|`Lx;pF5z&>iD(>0On)c$C! zsUd1%>Uds4)oiQ_HSKK+A8ZQyl{}XC8e&ZcsJ(5?djqX9P?%V)*KN<Dj##>{XbAbD zQZowM?EdS4!svq=SEtq#J8n9Uo5K4xs>Eix(dqg;B~H7)CrZxh9{2&1b<qc{@@Q*A zQ#1PQR=J-<=v$Rs>s*_<Kd`rLUx{w;YS)(CJF8z;SG99@P2J|5JGQF2I#axL4bm3t zXhrm{YTJgLb`9D_=eN%nvZ8^&ezn<W1|MI8G-axBI1upDH%6&;fGlkFF|O?h#16K! z?zfm(L?Y<+u%c@KwTy8{GL&!vF|~o7BNFn(>d18lxY(gOY}5J0qH4457o@$=ezh;6 z%YbP)maVG_qgSb#wm{VHJEV5|Vrsjum2T3mLv;;QGFpw(!O;ze{Nc4UKxy1!!T`5k zw*)$F9oJ~Y*PwIW6iQvlCl$}G4Iwk+>2CR;uPLUx<Ip~ddt+VH*BqfMdQ|#&Q%yIj z2;<Z0$5yNGfd<vk>}NQjWwz6d>g*@Aw}tfSvLuy(5S=wONd7%&wMG2WBdu1Oc2w@# zxs9HA$L_lAJ1e(H=e)aW=e>2C?x}s9bg<2ohV%gBDpMKe7pCK25<qW6jd!|(?QNl0 zQ?Mms`C_p^b3}UB_?Shhv_{o#DnWo*RD7<XryJq*QLQJ;L7XWA4T03}qcEdt!vUrr zEv+s^8Ex7Z-VmYVYSMjUTR1*_QH5KXQG|CqsP612^mi0SmFM~!ZY;g&=3B~cHLb{L zD@-5ku8PiCcgXgm?i>6}S(K#<#J883kiIhop>LOTJzCX0$70tPYl-<nbyBi*%t2(n z;@qe7{B87S&C(y)^M;njMk=#ac(vzxPpCa!Vppn4U(C0!)z@tDbsIG#VIcj_&Pdy2 zS{v}6;XKqHd^T5Y+5DP1mv*jGaF0D#*24u8U$v=qUt2RhGL1PJOPqzus3oJ34D*3j zwbM+unLM^K1jPc3>jKMX&pXXr8~=1yL7#{7hL+Y=a*o=W(qY`&%1oD9>~CsoHq&<I zpN0O7mUOOEU@D<Bbh{m^#z-rje<OoJtU+zsy?NWVc!Sp+Xq|LrN#jp^MoNQfZ;BPI zTg`}n(03@RbbnsgvNzOppsA%Tsylxf3-&cMn0veF>n2XW*Ve3Ce|&06ccBLgyK9Il ziReb_^}2es(b5|N`c}K&=WlC{IK6#X#!+8Xy|&F4j*)H|Pwa8S^hK$Ih#9BjwVg>; zV+%t~ye-QRPcKeuPg&GZuElgaN$Zw)T_XN%bcctQu6NWnnPfN7Ew$=GK0tR-)k3YH z(EZdzr^puv3tczVdKGQ(F{fSIyp~>de;_Io<Y*vF#PlS3?X_!Z1R+)<x*JyXHOw9A zbap6TTdYL|A}w^{As^EcU6eZW%mB=KK=PZ>pYAu*(z3rT;&c#ZMk6z1Kb?^<*=Mu9 zNSW=JLMREx%g-v3wU3IbP#aS==4ma5sIbPGrMm(0oTZV>wwjpOGk;;($zaTMpXoZY zg<><u3$t$1Yinz<kGK0*-4A#Jt<6nQ-5-+AO=PY;Lu7oTJ2jK!y=~D$3B4{IfK0FB zJ??AU=&@vWtjA%e1KtpbHEf6m7;W0xm+zDHnnZ2z2M%m7z59kx%Rc`>wVNs7y~O73 zwotrTEjNxf6BAhhSi7#fH$IlyQzbp3O2cQBMhn)<3o{k*>1I(v&loK+v&Y5|_wmq7 zBW??uvbW3KhzxI1BBp6D(8yAE>C)34-X-vw-IWZ#dZFkn3C)zUWU+clRcW^;{JPr6 zaPCswngh%Q{He_{WeiCfDcqZ~Q`=;r+8TI-`3v(MXXT*$vAuQDk1mcUX7a|wKCI}Q z8dSoRj+Q0;bbQxCuOzLth0(ReY&Argo0AQydfT0w&B~5-yk3;-^ww<Kxns9tf#Tg% zQx#vQQ6E^QP*EFWdLg0bYe{oBGrrmtE2*w6KbLFNeKixbwpN*`u>@n!U{l+^wrEVX z)zOyjY@$Cr<h3V(s<AZ?uva2cwPV93<;=1cXZ?E89`Nl~W|Xg!1q(fQv-Au5oY|Nu zocM&{&bFqIf4z)+76mQi3b_{}gG_%B3r5^aH*D4)cHe8cUF+AcU#&bP9##T+Q$cn- z!Y%9NK4Hi|gS9giol#0R7u`79*e39|G&8eh1A>l$Ih^ibS_4d|=vMWjJHAr3Yg59O zmfB<{d(s-HC$c3WeTZx@nazsC$$Jx%|J_W20@jYLyUjSlrkS64ZSwY0q=lYK7W@7- z*{ia4ZC$^=DHKvHI3IY>9;Zc5oM^L`CYutv`Z!b8c$>F-m?%3N{yGXp-?W7Stwl0; zy}r=@*gCxvWBaYMXG=%Y0#!E(S)(?PRLwwLv#*u)O;Q(e&*J2hjhZMEVY7Ue@pm;X z%XBcs_R6z_u{7k|CyeKM*C4Uy*~%_h8w+o;R`(?hv5=H~tfjeWZ;?z2imj585+)vD zJ*kvNX=agTsws<)jj5Fn=RVc|K02d_R3;V;GWVx3@7lEMzB=7*b<3{Wx$`x<>$dFJ zbZ0fI1A7leR!-4F;f5fyE7B&rDa_oQ8JJ1AY;G|d(<?1@GpSH9pX@5e_YWI=%}q=W zv|sTpM>a*J<Cl#XwcE}@yXDGaPcPGC&oZjEwl=A|ec?7)7Vc!%li^Vk$x4%2RTm4% zG*5P1oX$_T9W(J_(Z^0$!jxQ3SmXUOmA|p6eQ|DO=4PiUk-dkcG$;hSmo{^^aPl6T zZDytmo)UX|VXrS5SW-Z;S41DKcQNd(NOsZeI;x`kn<DF}&`d^UW0ze&dcoah=ZA%O zozn%gmna*>DJ@K<2ee$j+T2QGTW|Vh-Fz1pZR7#FieXmcvQHV-r4(V~F`%e^C8V-7 z)DStubcGoWT`F6Tg*9q-K=*P9Wzfv7iZXL#yFavK3T0+wx>>2k2@?A;p-@!4&g^qJ zv)PnU(9D7#P!Fo@deBUrK;ga9H%KomOG9=NNQg4q5w05<<SbK9>A3XHoAR(lpf{|= z3#lFKBDTe3Br{uRQMRNb!4@jDwYXI%3$=FU%&iH%6jHq2GW}bp_ZTDzrmkY9n~enH zXPZ%P{}-)q)D?NXJq2oMT_<~xvcy+ryV2gKO{|ieWNB&@=`K(<IvO{|TH6BaWDmc| z-s%+9YzJFKY`R$3JrGz|vf7#DJ`&R2o848va+{_&>sH-h%zW1#z@+u)>TOS*?Ab5X zm|CnCAZ>a9V^-Ens=M9{DRKJrB{LzrHP{uzQ^HVaZ!Rs)W@K@^tH#>umlmP!Vh`$d zrXQo!^f8HD&zS_<OG#=kTL*e`h|!6$p_R1jsx7<o@$%5yi4h;m6E-1aCuDD1BaMU3 zh2F1WgG`$?_@p~zYS0)+xMhj4ucRSDn~()qVWGXn#SYOysZe2SlYK-+U#90SySCq4 zDitFZHMv+vu13|;4Ti*mXC<=Pdyi=a7S)6WPtiKvtHx_)iM89zMcDe)eUFTvg>96) zS;iy{Vy3aS2G+-S+GTjxb7UET8iLGo<itgWl_ZDuhOsk4CV8?sW)EC42d3jFx=Js= z;v<&Snxvsf3(332lu%>I;=JoIXCNi`)c(|7F{j9;1F>gVDQ2eLIrDeDTjtt9I9qHo z_cCW63G--b-hXYWtT$bf4+7(hW_uZ}{4H(FhSr<qH<gb*GVxeZjuf4msu~&5Lo$wg zckPrjB$<M5s;=C*ebcrb(s{{<pU`2k=_>s?=Rpl}aF94#J7gawhFA~<7=$?FD6!Xl zYQr+Sst2NBd;5g5kXF5Kx;3y@_D!iSvMpM~o+v$_J~!RSjDRV7SSnYzjj4h@T%=V= zx0O_{=!qLG#v8Fo1B{xb{{8@4_=)YnsoDoOSf*C%s&4HK9O9sg!Hpr01-PFPI?+{P z-`KQ|39+8KuU{{ISdrkNXcO}oYq5dpZH5-V-(Ha8BPpU(uo1P2!Yx*ijjUFCol1=F zkp7j5uXeSC-B$;?db{+srlK;!m;=d8_P#P(SL9-`WvaUU;4~rqNHk<tC)U;#%e+f_ z;Mmmd)_#cZCW~F_Fca^6>;l@cUvemw)Zgodvr)ypS8XxYU2<%*c+|<Lny?>hnF9sO zS&T~7nZrUhjhV|fFl=#%*czyFb}#E>!c3`~8iQZXP7~9{gY-kP<E?iCrE2KIOpBGj zk|T&s)toGDVGSy6VT+tKsvX;QoAF@l&RsRzw%(_<aY#|P)hetZ`R=O)y3_b-Azp6_ z^@>g<^(xLZO@`Q}eI=&4>tR?=BlT(Ec3*o_v+P3Z^C@%O!+s@01AA-aeQIy!jCtlb z`TnNJCe9z^<mi9CuUXO$DcKOHYmr$KIjGNV>vrB#<GrWGIeFAaJj^?#@;B?FLUpI? zs?Ztf!$?^z%5j05NQ)hcl4D8P(>41~QThdv!>I)mFX@FCFJeKCA<YSos+Ps1?1b$N z1Y|8ut-&+BY^`&S0c6mwb4Kt(Y}_@r&^awR?FsWn!96uw*O$pl1~SglE4Au%gVS#G zD#$qz*3+bjuZaPkiFv}Ifp2evKhU@@*!1xJP;<B?@<?km)^^}v`=Lk8V$q~LDkqyP z{WdTVt5#CYZi9V}DNQ}$G|<@?vUd{9Mu6UhkmZRUaTRNSYYY9C+d-q=7qDxbc<rnu zonU06Sxy?HJK((t9n8{+mpOP!e6c|v%{Gz_d!J9bXT?^5Oo62+H`Ubb;^1iKcC+j* zrkD=s^DgOa^o|pK2t5k>eR_*tZDUBa#}j!ug@H(pH(KeHsW6E%=*{d1a3Jh-u6B6( zWd?O8XS$870L=hs-fh7vIXuwAntf8GN|=vPUs!+XsjuZL5pUuL1PN;)hQEe@oG-i7 zD#fy9$EF=>({|alluE-IQ&EL=H{9wwR8$x#mPG15rybN1hn(%cki?!m(lyPDY;5qv z52s8$W13?EW1$u?>sGVLuixC@^^CCTNH}OPosL;eCC$6d?qtf8CuN)3)il1X6_y@- zN!Mh*o)F((WIW-ZTCbs`Df_Iw30;AbnaH)u%kgO-By)52zU_CSq$=oRQ7aT^)LnpS zL!xiqy-hleHVzTk`-snF<43@B8~Uh#&WA;oUWn>>gbA&1+s4A}8w+<^g%R0u4x3|K zHl7ceT_T;Kl&3wq+F7Xc!pTpkdCHX3IZ=^&-Ms9Xrk&f5n1dE)#VUE0aK2ARp3u#c zo3<-^tJYZ`nT4&g^udnglU|RSQ(}9jW=80QR^}{ZHZQJLNfT>M*+&U5n_`7S(d_W; zpu45zZQZtGTg`n*Gay;P7nWShb_su0J3FIw&fGwcUrV+~y>ce3S2c;%N9kpuJu{J0 zX?49GKJMGGY5TU#b@y)B^cs5$=|<8ZrRH^By79L%vbCCJON?C{_Ucp(>+V*D_=p}y zoNhJo1Y((q=;9%fyy?MPK&|vLdgfMG*sy{3ZumS9AMwrhf2{S8)Y+u0iJ$E`hyK18 zEqA}{@|f&NXtbN|++4YZ`2@MCtD%0)%e-->kLK)iUHk0}IRe!=ySQGrhyQrK>o&He zyN(YJGCRQZkq~F5%=@V>|BJ68I%^T;0y}qYyKCDHGm)y%FHJ~I)rU#D*acdzb6l@c z%rLlQ`?!JKJM%UoyLfg#APdXA^zTxc0*jSG`jW-td>J<A%Jrsjki$Z?{$7h0m3hx7 zAlveG&NATHIT`Og89JPogPf_YJpq=klox)a1%&9y6W{&Ok`<;!$csB?{MO9soqF>h zAJ?ycfPK{m^^Sd!Jp*7qZp-mW@DdZ6&Fc6m!20!zJAJZfmnWCQKSmPs`dt&%;Sg(d zHU?KK=LDWki?NV&m^a?o9+mfU>-5Qwy+5g62Gi#|q`Tb=h<ezk#5dhjxvfU+sNStN zg?F)eALYE4!`lY?MIQQd4m!-r$?lMJ7p0qRyoDdsQ+e~&8Cx*+YaevHPFC2y(&Q5s z4ZO{1w!G}2*BN$CJK`pco=U$EYOg`fq+Ivm_C9ao*u3?Ob}W)>n~Vv%7fjr<vAg(i zP}(V5KkEzq^ut=EzmIRN(90!upfVK5+M$H;S0);i8T;zY2AA{$&f<V>RKMhEhvz9V z2C2R#oC>m3ah?==_VQwKO*Lg2*r#94;HAiYVSPuNlp)EPpZi#$AEa=Y0N$XR&+3G3 z$L;Vb@vY5-lc)G=VS2DI<D9(aXR#UC+Q`<};vTddDDmQ`K6JQ|S*Ct*Q!fn6akw)f zH50@w&Rf`xGGF!5=4FtQK7=A*UqohlualQy7spk-8_S$t-uyGONU5J}ESr~}WItHH zDZR^H%vkI?ByX*$)|L{bUrrzt-Y;0}iS_%hvi)uMIl6PB&5+q-UsM%I`&!3L=RiP? z_VrAC$xdI?WjtcslygOM{;cQfOBWs+=!Rvx*sg#2sAD68lWgVllA3H7aR$KRD{-P` z2K2k-eQbS>tCx7v0kHB*IT@<dulQ}dt4{ASZF4{UG3ScB<z`;{v1jufI7+5ajT+m+ z4XTn}Lcg=F^Hp9QJ0vdy>aJh5TYZg0)#WvGwyWA=dIFd<_i5y1GuE8)9zH7$Zre7R z*Z1pMH`3_Yn~5!M^=^!M7|3N^(aI?(hZii{q$nx_4YL2wTfctEX#HBCnQWVVP#JE_ zvWZi`cIUm3MvjNgu{gm}seBx##t)b6SNNo{>iv=UJWJ2UmD3F*El6E^hQ(1W#n|Z) z?4Dy8Q|H}yj+5m)IT#2<^kBS=ZG7hd>8>p`QuK9la^x(EqoI}<$G7AbYxqbLuMX?J z!+EPuck9W+b-d$uJLglcp&`k49$EwP-3nSC6IS-$^$P;tox8WyR^LYz+qG-U<{ERn zRA*k=kgR2^#LOP`Jln}KC4;%MByt9JXR4hrIk0<5vP*i~czNuIzmBVC+{qL6l+28; zycpo^(M$bzcd}%Em&zt9AHBuS!IgfiJ;3bUEOMQOBE?{@isU6(+H@%3lYLa3sqrn7 z*1&<5{bZMUO^zlnF9NZ6Hg(!=3x{+UuV<}{hP-4cMZv08zqv{;wRmrzd9a8pHE#{* z{!(6$m6t6Trk%mWEMsI(nYRt-*Vjgq#ut`ttf%?DiHtKGOmkKjA8>SVyyK)(#cYRT zW4^ya>Nt8KQo11$r#tnz3)^7w!X3kcyhOkYrtH?rmWzH9+HPKTW^Ny8;WXJ)0hY4* zn+tMkY?fB`{zskO9x<-%`(-8u&KZ2l(Yvf3IImf<R@R$HDHX_RtMLVNOElr_eonXe zj{}<Zg`OMOm*pi-`W4Q#<1g-T;Gl;WGQ$iw9B#|IYs-CA$?3$Mt<V}??Dy@X8(!vH zJJOPwnc3gik+v$;%2|o6UoSnR=_-kV{Vf)DTiA&)?>JD2bk4Iq9$9C%LGwx;h2~ez za@pR`i01;^6=pV*I7gA`)w2EVI(woM?*vjZS{HGAHsx*@558H>@TAh)c}mS#e4QG7 zwQ37DvA@O4S?!|MwKSWyC36_FqM<^{>sDJC*!1?3<YSY(yDbwc6261iHuN}WJd!<6 zXTOtnZ<?6BA+osPu{fo0`hLD7ASaLdB?r?F%loy?A~orK7k04OQuj66Y0@)~gf*wL zuOV-N$T=qCnVxr=epIoJ3$d}UcO=ZgxNcoEE56sEoIOcr!zA^E*G>AYF?A|x?{+#b zTBnwY-5lwgojI_)%$HpE8OHe*OuU@*&Y-M97Z;TAam$%~EKVx5V_Bd^)t#;Mkh~(m zaHe**Hp@4u>|t%2{oMq4uZVGpLYJ*@=i3Lm4`V3g?12tWx@`Le<3{-cir#|OZ%H<8 zX!LKOWOz-*?7l9(51!b!Fx7B5;9^PF>`OkZ+7<A($s0(C$GC|HTVdHHSINWhO(oZt zc-DK?@4NZ>K;vG&?-tLkW&Xg;je(m>8*aS$2LFvb-%wh1bLowqG6a9AGWADEeV4Y! zNYQAQVPUjH|Cd^%@jVNt8Onb8M!o~Zy0DS2zR4FKTAgmp^>{VDD4@Q|x}c>|wp^uP z7k9Xt`OY7wak^pY$%ix_-csZ3BiUb;g?8e%ni}<qsWU1iEOF%JT1%>(nT*HR9kfq- zk|)Pt^2HQ+n<!qB=ntJQ({akl+h2#AK_<w%!clcrJh$r1`$bOX)iHqWm#=%t3NWR* z%Unk4x5cbPcMQ%#o@!-Z_BW25$7HTk=U<q8*%f(Lo?CF%CY68X`PX@_x#{AK=X_DU zum6b;f9k2vyytVh&#KRUVd&|vj(+X$j(@UF-M*>puDdJmef9k}tydRalJlyo@(V90 zIw#rJrBBZHkS`cN=X+v$P2Xi7ceF*@DN5(dZ(KB!yR<v!x}SNtyW;Ji{@nEC_qToV zhhJw%p(C|!yunO3_Q&ma#_tktUVdY*^W>5%FLSBRbB&DzbI!uO881Ed<VEWbmrD#K zu|oMyuu#6@EOfpnE%$O=!bk4q8|R-rUjMeDw|@POob%4VAos$HE*3qT&mRsw<-MTt zyLZU*BY5-(eAm-2zkJc_e)hpnt@vvB4fo%5kC?yvrh&sRzx<rGCsA|4xalXKx=E>N zt;_e5%{BK%F+YosareVJUd8>_`S>=sG<@tW|9FR__2rjewDWgC^FqwCh=Z7Hr|HSJ za``fv*@knD%nq@!>+{#im@SJ<-V|Y;C6c16*I&n?+RDiZ=T3a%&e=0c672^WH<^=C zcNzJeMV@hH+o<Q;Mr<mtl`RBc^Ip!^<$DmRhW?g7)cNMr!9>pzZQ_Yrc#Oz$l(bL3 zg=i)C99^qad>5@quJ}{@raLG3%7|aKzDZwcE|;-zr@qo%E-|+#kN<zKwDc7jnJZUi zowfSx>~q$f>r~#nOD?@E|MDwdWnFpI)dhvutSwr1?RCZLOExT?Ky7?=`E9q~QL+4i z=L35le5kH|dGptIJobh+cK*d*zNzc4-u#yC$KU$4p1*$kJ9^*wt|tz^`#taNd*Ay% z(EsEIpBnhkhd*-Uqkr?U!T<O1PaOT^-+pT7)1Ue5@aO*S^CMsQ;+IDM{>x8~edVjq z9Q)eWzj6E@zWJ^3XTSa2#CQJjyOZC0{)MUU|I<I8_?I92aQa6-{@0nG{PbtDKmWI1 zoc!g#zc}}+U;k$Qx4-+3h2OvQhf{z2)5|L3!gDUX@Iu9Z6KZGL|NcMa8p<TC-3fc~ zZf9DW`mxr(!e8T3YRlQ`*0hWG$iFL9liHEKBfUlc-I4x`<}SX`z9aps>O%e=bg5wC zV)36ZZAbc#(*Bfof7+YVI=MIhI11wKUi+y8&T6wC#BDe>#l0u}W!wE5)XQma!R;gJ z&FO7t)uxxFzm2Ec(jQkp*DfEyCogfjH0@H-B!3;=rd6e`LY2NP^`Eip{2jyAOKL`q z>3@!Ti}vxmw!iuKeXS1RI{nwNB^39k)K6^p8@Rg_<sLpo>60oa?bX~}o_3QmF6Hk- z>hjAk&*eXJsrB{^5QD|GVV}+}3iinl1Ixv0@<^qO7czD{#z%Ed-*V`gO`9(cXMXN; zBTsDo?eFjS`HG!o+i$qs^ZTlUe|dlT)6qBn%gzhiMjxMg?`Qt8zx>9J4Sw(H-)+0~ zwvWwM{_)(8-tgpmd!Ebs;sb@7TaNmRRvuq1UMe+rhhag5;eCb`?=b9s#_-_<&Cafp zO^R#c{07ag7v8HGc=lPLRX31+C!9S{b&K%Bhc9i06+h|sd>Hm#acuN?SbEKG_h)Z* z(xV2Z{_5V+1Dn<H9s8QU@`=srws+Rg)c$C5MbGEjFaOkqmEISB{QS(Nuc>_3_qMNm zZmzR3toSlGo-4IFO}IKu@f_6u>eZ7<-L!%HyK>D{E7qKsd-0{0TUTGRu6V-@H<i8m z_Dz*{-M!trTfOeSJ#~8n!TsS!V(Q?dk9EH3&5s|JtLN}L-lh68zN!9a{(Us=&)WFi zH#ufn`Y$=WH*4EpQg7D(9J4F5`h0><#^iCWygfk|_g`1vvE84`@AO=YauHv7^Qafp z->U+i-c8O;XgA{iN=2S+T(kPg)lWWoy*+eYa><kClB`zBeptjB=XN=B(wWALXa%#Q zOlCx@R2D6DHN8?c^QJZGT>0Ug)f-=%acNy{b*;Lh;G^Qv<Hk3)zsCGLG5Up@xzXb0 zXVwma=c%mot-LIC`3K^D23N#y{}VT<?!^;nzw-M&(%58<w}y{I@B%(E$C5UFDW7~k zGB3N5&((ac;ZwxtT0ZOfP&%A{({lKz%++MzDzzf#!n_sA^`~aUmdY+TKTlojx#E%) zSNyO3(}AV@S)~%gOffv8N5{Rwr!#V})KZ$F@1_d!?-QCY@b@MC+dX1gS0}hGU7nP> zND7PVgiMyXX6<?Z5h)<9gG>YE`syu)KYfj1x&3QIulft~{NoQBPW^-7&fginvxtt0 z>u2?b?|#bg#Eju7`-iVQT_!?9KQR1sg~@<@AGb3iUsfAR4Zhp(Z5Nw}KldTSzS5Q2 z{HzJXJ7Q7;mAdyz^NU*hUod><drU%q_>dV2_Ib|M_R7wdnyS=8FB<;z#5wwT>bp{V zmAZA(tc%wCz+_PEuXK}B7q5|8tyER1S$+JhL29^CC;Knb-1)<cH8ZZ05sj<UWb6$; zxJ*C3ZHxJVtcB0XuRJLg*kEaX>C;zg{%g6k45gl#DA25EmR6$FZRbl%QL6vxb(*)e zU8nh{8>Hp%yHX1!nl)edXg>9U8#LS6b!$>Xdv4axg;iymgBv$$o?BY3d0px4n%~}7 zp?UnSJ2iiPf2F31ZPlE5SCwYP=kL}`|53H(&(Gbd`Sj-3YHsh?t@*$g?$P}8OZRGC zSaHARj>q?CUiJJ#n)h7e(>!{pLGu^S1vH;83TkfXdRX&IKMHAX+|;5O`S2r}muJQ_ zGn)=*{_NRy&9~h6dd(y6)3XciJN5Gejen`xKHjC7cf(sWUwY5unm>N&ZJO_R@a>vk z`*N>l<rPn8PP|^m7TVPd@72upykB$ooBK6C{{0VX{>!=#X@2^^M>PNK>5ppe$Q;x> zvi0MdpLx?!&5F^#)x2nKNb`?Zd{*=6s?TYzZT`Hb>iL4^MIZf=<}D+CuX+8~p4OcF z)>ky=zV(dey<hv9=0AP@8=5;mVjAvS-|{W}T<?EY^Aoo|r@496cQn(U{YTBOzI9Tw zq2hV-{L2?KpMKZ(HOn{rvu6G?Cp0T}{6O>l6F<~kb@z`nV_*ES=CfDMXsXViX#QdH zr<z07tmXv`KiACb|2NIgf9n^Tv%fm2`LQ+suKBAgU)22ib#t0)i+`ng$<@Es{7}ws zG`}%#*!SGLezrdJTg`CO?=%k={fFk@4-CiNwxFMXx8e7i`@Uu9dFUnm9Gf+~_u)V2 zXEkHkch4#PyyI!Zomc)*KR@?+!xhgNUVYi0wEl3tVct`Q;TH^t&U#s!uf5su@&^s4 z-(dL62MjM6HoWb)VfU2b13xjm_(j93e`omfmkgi(qv8E*0m=0Xo7|?!v-wE|>j8V$ z`9Zeb*(zPF9xw(ETDF0s+x8vx0d4)k)%et%min)l*;04fN^O$5s*&fnt0z|e@BdS- zp&V9g<ND&=6DxnFKBx5+X-}+tMBhm+J<j0s_)7C{@orIJ?t_iImFC~#-J-(sAyYo) z-{Re((v==q`9J@Uk%yQWe<dYy=RBMHEf-#J;WZmdFFxynqH8ZnSKG}vou0lb{aowS zYSpUSwy#>H)}MXZWvjMxUA!vumUGTJ>%6PZI!o#89kvP6)A^iz_IU-DRI0N?L=(H_ zy5f#o?=*gD2j^nGYL(bINB%ISUc3lMWhP(8ZPN^!S&r=-`^Ir^(=m69o!_%GwH-IQ z)-V1Br<LMgr~T~E@!sRub8c*ZQvIuN?AZ};o*g>QypBER#xY2VU%g|`4!6^OcId<} z;@ER;mhsPV=j4lJyL0F`8?bd{$FI|VcIbpVX6r1*^i^R$J9PY;wsmF4uhV{Z==kU4 zrxU*%+nz(mzY6EZYb;dQ_8dCKoz9KNSg>q+4jtn$=O$t-RM_?$I>zz(YDdVaZw?)M zolb)6_*u4p4jp@Rk>;}FXM5`;Kb{?(j=hMn?)c*{?w`|8Y>%8rhjDxKM*8v^EEjHk z^WS6aIq7s5_s>kr`C-)4_~$sZ19Y;<Vcb6%QsnX)IL;i#?PVKtr~WzVbQrg{#<pk2 z&+*4$+@2jTrvMy1mGOm_9X}^LhmL){D3+i-CVVHI4&(OnljG<3<1lW|vhCS)^i=8v zw^+5~=Y;3bvG2sop`)i#ndL;hd>wxrI`-|@ogPj;Teej@e^iD2Y{y&G+s_f3oi+z- zj@g{H>9m*%J4T&0$DA12F^)G-yJ5u}n3Lm917Wz-ms7!=8sk(@rvf?^%t=_T?Jm!z zQ^1z}Tx8QpxRb#Zw*Kb~urs3G=AUbToe`Zj2W*bnoVNMrCSbSqKlcIu_c}q-y+)i~ z*gPs_*>&Gzv%;p=X1&db%}$#GHpgsE+x&AI`g0pHRpHN_$p5`g#FT8O-PQ+ej@g{H z`R6|1&wYTY3jYWDfVA}V6)Q3_GBa1MT(v4I>nyI-TxWA-bDd*fYwYV>`?6mE%Q-LQ zIzQ#QAmv(oX~f*|`q3+17oFz1_%v7EX|7BDo7biP&FixN=9T~7yjHusyN8sMopyd& zTCNygv}VzYy0Vy8C~vtV=dKr?KmB#NSh-r;Q}Xv!&p&@<_9d3nt0pmQKYjl~ck_4g z&Zbz&SjN3M1yri;CU^I5wcU4I>u%m4o^xH3XKAMXdD3=2wM_U?+ksW;?mn36;WE!@ zm#uiU{6C6zvi&g65#R{-1%Id16zm$o(bc?h1oL1&^g!<y_`nQ~27@pccEXBL&eLH8 zj=^>~1?S)bEaC_;uRtmPm{Mi1@2j|nSsWSm!ZJ7nJD?m-A3Kg)IKWY2P9bmQeiJ{S z;z+Rq=EGVz3ESb(ar}fMa0nK0<TwG#;VeAK(PPdv#Cw8tz)n~T$6-74e3!6b?j(M| z2%LhU?-Ax&@&o3<F<1s8994SZ363g*&@;tp6s&*)Fz=u62cCjcV*bzM0~~~TY{OKa zz%Cqx^|1I~h!YIL0T}s#QpaJ&56K58N1b`=@DG;3tZBlB`7j8}U?;4GeQ@wc#1+oM zY3Tj2(l5h|!=h`kGei1eKa9Wu*bOJ)AUpxb;0&CCC*d5NgIU+%$4~gc&Y$8ptoj-L zz{9W?_RiuS&i|Y^z@xt){xs-gCrLNV{Uzy!MX((nh66B*qu1l`6kLD<9Kq(Tr(VpF zhp?EV)_R!rTf%@da0to~>lEyV3vdDEm7p(>H*gr%!VwsOW3U_M{sH%J98SRYQ=}0N z!JG|BSsbaB!E)$@wJ-=fVJAEc`(PJGt7CBfWzq{rnaE{%l&a*-QV;BdURa)<rXtV_ zyI~g`ggtN!W~@w8b1(;HT~9io2NtcuJ*<F17|cpjoiGObU?&`c3vdD!oP|HI8s_l2 zMj!OZYa6`m=Y^#(0)wy@&cPvAdp7Q2J)DJQ+04goBp=R6Qx+_MWw01}VHpg<O4td# zx%dNX;c=LAVVatTGkIw$uN0mC*IXIQ<;Bx_SbZ7!0H>@pH3BED#6HZvnt0!YIV^%h zunJ}r5FYG+y)d^BcW@X^!}@E|)B@~-IX6=lunZ2YB|NyNC{4A)A=n42*Wn(PU5k6@ zh3XdK3G-ktEQ3SP3nyR@&cRNYa~=Lb4;+GCcpOIHH0*^7a0up<;T{&jIamR6iqljr z^uP%8!fqIW1F#p4z#%vRC*UlcgS<woa@G@1=z$*Sg;g*D>tQc!heNOzPQXDp2ghJe zNt&919ykZRFl(c<5y~6(!ZJ7ny>J2s;T-IQIU7hn^uQtLg~wq8PQzZf0Eb}Ct8ovD z;2f-gIUdpvJum{jup36;0PKY$a0pJo2{;Sqpeo1x^`swqpa*(k6^y`o*bCd?5bT8$ za1hSHF_?1$>4zRT2fZ-sHrzuC_QEna1if$q2H_m+ggG~oe&~Ti&<l^l2%Lt!Z~+d% zoZE2^i{KorfH|e4A9`Q}dSN$=zya6`N8k{gfD>>Q&Ovnt?r$Rf&;vct3#(uR*27-d z4u@bboPdLH4vxW`n@K<Pz&YrJSrxd47VL#(a0q(g1PsDC*a>rPA^p$;hoBc8hY>gp zd*K2cf;pRT4~yU&tbjRXq#t@<1bSgNjKBfd3rFA(oPZN>7S2I+C+=@0{m=tF&<m?z z1lGe|*bawaFPwmba1M^aoQ<R(df*)N!mQ1>hZgLGWpD_3;RFoAIoJtvUQPO;2M$3m zJPspp8ur2kI0SPlaSw~&9ISvjd<&`;dSC>4VK<Dx0oV&i;1HaE6L1#JLA3?<w~>D6 zfgb3ERWJhUVJ~clL$DW4z(F_%$6(Iwq#t_V9Q4Aht+<C4?1g1;2zucJ48l3s33K?i zR3G%fA?St2VFXUYUbp~<V9s5*hedD>R=}JJ(hof_0==*sM&JPKg(GkXPQVE`3+JG! z!u=-F4?WNWy|4;KU_I=G?QjV8!U;GC=inI3xs&um51fNun6(Y}(1N|N3=TmroPa?% z2RmWTX3`Hma0q(gaTtNquoo`CA((SF?qLy}gB38RlJr9ljKC_`Ezeu<4-Uc!I1T4u z-d6m14f8%&1bbi=?Ak#(Vfjwt14FMRebCy4JD9hde5z&~t|43)yNCRNd9NcKupAD+ zd3YRV+)MnS7pm>d$)E*`Y6%Z^!wAg2pL~Uf;RtMh0CzBd5AJp_?mkF7;VI~av4?O6 z`(Q8ZsmC1*`X~ojzZZ8qu@8%&)j)W#$WOk*8rTgh0^|?$!sD>Ck$M4-!z?fM_E8_; zWRU!Yy-nl~tZJtGp%um*R4urJUYPY-<_FM%gAwux9)q=T=n?Ws%->JAu>B*X6V5@k zi}}w-i7zbr8`23EhKVPf_#ENEs=p&VnDu$w!Q2tT+fBTn1<Succ(Ch><d5j1lp~z@ zGUW)1pC*4`{#OXEhWz>}<q0ES!yO!l_0a!y;tAVfADsFI;le%Nq8`BF=dky;#B+l9 z!NYI>PJ9P@aA=D93io`Uc-=#M|C#i_$zNh0`d`F8T!3S+{nw-i4#5RD4s%{deT7AE z9##nFNf*rcE&jj-I0UN~$bVQ5)xFdYm<KywB3xMU2g(gr!CrWLO}ZKr{oHgl4gFBn zQr}@7EXYY$WpLqwbQP56i_%peJW_~#I0v)tV}5>3x+;RPwdtw~_7tV72%N4+S0k{X zGF{EW!!YZ9%(tW~5A<$LSGDjkjKHzG(p4`ku1Z&fV!o4bU=Lh?GrQ82^#JLtAsjdg z>tWYD>8cxc!vWX>N8rG{gbVxcBRzX~h921e0Pf@&w!;P32dxLm57-N*;V|Ua_-Svr zhh-1p9#+?-D=+MaK{yAy;dFhv8iF0ZbTtKY_u~E`+7rx!5$J)vunP9WdUyo3!x7jE zkHbMY1;=1P1Mz{yZ~+GW_*X}Gfpk>?z59p<9D?2O<bKi*2U~Clhv5R8fqC`xGyKnw zJ+Kzm!a>*$3;3dMFYJPYa2$@oDL4gZ;T+6sr5t^P2Q4@fB_41rMm*pG?3U+)!~^CY zA|7xOs=d^=N2xDx<n`nO^mgD59*5mx{uucPtv3)ZoQGKr)Xz5(9`wA4{D-|T2y@>; z`N65ji7%{rE9DM{VUC~n*+V^sbAL@egV}E<9M}N|p?XKUIu7^1Sr~yTfH};AC*Dmw z;S8*Y)gL6EU_Bg!5jX~Gp29sGgjtQ`11y3yA0oc+$VbyvCp-xUVCmoB4%WgcIP*!u z-AA~iln?A3Cp?((ZQ={-pCe!3i3#!(&cOv3{4U`Hd7dO3*#11_33Go!{NTh-@gJV} zIsP|M?_b1!cobH_QCJU;!FD(fd*Sc`@q-yJkuKPMiu`z(^!yR`aQIKShq-Ens)zEv za67C|TcHMEI~;*3eTABam8(~%oc-88dxa{4eQQ>zAgsw<q55DD4#5~a4(Bgkp%&oe zB`cH_BA@eDs4CbF>*2(!R;XUMaP<l`2D{g-P;;>0X6!dpPhc4=hh7+iLD&g9;Rx)5 z)3@Lro`O>_uMB@+Da;BJPWcMufk$pzp=#mi9i$V^!#=1gh&RlJ$6*beh4oOikS>@9 zW6%S~?<78OY|9E2fg{^U2lU^)f;LMzZYLgaVF&Ib#Ot-VgK9V7!+F>)&o#J%?e~z+ zaPmIFg~2`K^CPs+2T4Cv50PIm8@9uI*b58bAe^cvJXr1{zu`2rT50dF49-C>T!2AX zyqEXFVFV7sVR#%)25=8+8%b}JcCrumu!66!)xs*s&j_g??1kNM2oAytI0t88`@_UP zM!rK2%-B!(um(n8Pc!)ry>BI4I0I*4*V}N{rc`|o?%>J4#vMHRcG3;U-$A{Eo?glu z_QGkH^-l8n0P%qqtbW%DRRM=#J?wmf^uiuE0Q=wwJOU@+5S)dhP#vTl!8|w)J#Z3M z3EzW%qJNP1z`m#O4-UW)coa^+VK@tqeHj1RiQh-?4|)gj5Bi7j51xWSnDJ@qGt7p4 zunLaA;Fs|Sc0qNBav8%toc{{(fPK%9F4zwT-~b$fN8ki3`!?<#CEq7js3O??UGftS zO;QeU{CkumEPerZFl&nZgcXpla;upi5FR`Ut6=57;tqDhUO4+R$^mx%ocae(ous{W zke(N*H}LRpR;YS7@muV}>ILk>!Ix;CaQGDE{TSwdqP$>tT80Y3Q?MHjtjJJ9a0X7m z{)`MY2j^hU8>r8)2##cCs46%H>)~<O4kutQJOKyc0vv<cD>Kv-wBQ`{z^pftZfHR- zEQ7Vs3s1s!SiB1V;N0pAH3Cn;2{@mfq2!k?rq^VsyiU@89`4~RtcTs_XQ)omFUU{> zFbj^rY&ZdP;4I9A>M!WOU>+=h9;ov02WG=|7`v48!0K0#4medn{Qr`2E+qc2>l*R{ zo`gX-zm|Bzfg;?&{B;@XIJDq2EPxBJ2<E(r=W7WUp4dRRaHtgbu<Rzv16JHjxUd5* zz$3R1ZWrYQD_})MhKfM%U8EP*!Vze_hVWn+T!1I<qx}Ag{Dc*->jB~mEA~+SqQgNr z`VjGjQ*av2!v&aAM||H*d|?r+si(Z)C=9|pAO666H~=j;0t?^-EP}JJ6sosSzhDuZ zguU<t9E1yS4641P8)m~fSl&RoyI~{w4Lw212M)jic(RFbVA;dihm|nvaq<OPa1Shl z10nJm&NUNX7;Gn=@C2NKlZS}kTX}|ga0YtdTnFx;|FI0!38&xytbPOWg=26UcE6Eu z--bW12%dlya0b@G+D_sP$6+6=_zS{^!*B`~{U!Mg`(a)W`Ti#24ZC11T!8H`)P;LE z4M$-8U*TS!;R5u(8TWrpxUdZFc?<60By5MZ-MEKSa0GfE$2~j_7huI(asPJG1<PRR z+i(w$!gg5PgL~285G;qs;V7Ji6@N`Wyo3G%7D3P3DK}UK>tO|KhgGl_df_0fg=4TD zPQf6YgAth3OBW9<*a^#E?K_AU9ER;Mr<ZuaQ8*%eC*>mgyC~Opk}uGL^RNO|9H#!l z^7jxAnEhVd!4q&AcD;{q-$niY0QCS4_Y)rMdXjno^FK(uMTZlx=qch0`(e%#^fLpv zgWb>z)rV-WFa~?!_(up2F8mGQ!R(Kb|F9S49mf42>4bf-79NEWSp5HR4@==7EQe#T z5>CNtI0tKB*1IWxXhA<LgEb$g9AM=qXa}(LDCGzXK8bsn`?uuRd+0|$Mft&b=!G*w z<TIT7H1!`I{|x00J3ddmVQ7SK-b;92puA!A7l|({{}SZ_J)^`E7W_T+1m=F3eCi{d zr->JwhxKq~jCjGxuMjVI{Hw$Zjy^-Y-bX$hBVMroYs3roe4Ti~j&Bez7&=b8VEsRk z53uH&<mdZI@3$y-SUgTRu;^LBfu3&@j_A)3j_4DF^8w=d9m0XuKN1ei|1ROcyh*|l zevfd3&l8UD1;XivQ-mY@KJ`oZPn4JNpDAzQ3CdgeFO<)d@CVcr;SVWi;WYJ2_#?_! z_+#pq@L$Pi;SBlwLHHB$S@=`(S@<*ZSvX5R3x7^N!(ynOqW%3F>4BBMAU)7~lJvkm zza%{{`0u0##$F^nuyc;|43NLSB0aG0*Q5s?`3>oTL-V8uj{cVPz~jFoJs+a|{0HfQ za|@&gp87rMfjKV`KbZFi;s>o$#19tzk$Ui9%HdDc1L4b*k5FZ*Zed!c8WN^wswr5y zB2%f4ke-Z8RV2*JR9@lAO!mm&s!TN?%*s^9g=b~5XGT3-ovHGU(C*L9R29PPOcfNK zlc{=zYckb{@Z3x_EzHSOSsx|8&dXFDSap7;suf<4sXB$ZnQBmYVWyf8@?!)GLcZH? z{SEa$FH==P?<JWkBD^$H^$9P_RAa*YOf@UKJX7U-3|^6`%3#f_GF3gSwMf74O42X9 ziu4Pw&SY{23rN4Pko3bn*N}c#zn1h1i%7q49qAWdOZxvGypHq>i%CE9uP6O5SVH=R z8%V#<L;61suP6P&8%V$KM$!*MrKBH5ZX*4{n@PX$7SjI-SVsDVw~~J0M$!*suO|Jl zy`1z5ZzKIjsW)}R3wGC&emLAlykPAC(hse6;stwOPrN=!ed-|n@W^9?1Fdh6U+~m7 zNe7(!7WoBdo+ZEjmigdw#0$RCm1`TOJ-SP!bzG5lS@x^6lFwiHl+r(YM5)fcJm-Vy zPiDwHR{@`LK9!i)r*l(ln?JGgaMrt5zbE^>Yx;6lU7^2D<;S>#el{CAmxSBFCz7H| zm~`dJkG==S!+j!4p0t0%Z~=3+f8&?<H_qoU{cKKLHSwI`xkvh8v0sxI&&3#t`)tg6 z>34N{Z9ha8cZ29A2F5_*S&rU^-Y$|i!CxN5d=KVxn3svUKRN%zyaV$r`s?x)JS4h* zcQ$(_^!Jz-^iu|szx4_JJNf&*^WUG_pNFAI=Q#F@=>H;8NJ;)t9@>_P7u&=tBK?bK zPPy9oEAh%DyiUw3mob<49KpN_e^T>P+}B_}PX9BJ<eqxLv*c$C^X?<`H{#yDMDIZ# zqCc@55sH2UeFR<nb}qSRJ4}tDkBAsguPG%lKY@7-{mqn^r=(N-W!s85NOHXtcezp? z=n<f*lFLopm16G2T<)E#fSYRcYV;xeP0kl-4>{VZ_#eaGB>jypxfK7!pTn4sW8NVy z;_){5Cz4#^&nWgR`k!hM;`U7X#h()rAIv=xAGh}TzFeJ#j10=-DE)@`V_)KbK6(xP z$sW<-`D@}K{+DCkEB#NZxztb5xFnDK*z?m5nVPC)9sdqnFBd5#AH-ch=AF1BYVk|- zVf4ID(H|$&ABiv192FXJ&ku3O@UI3i*CkQCCg;hF52lOGu&VmdIhKiE;vd76s{M?e zFY)v*)-SqqmH%1lgSd($*AsES2XhbR?PBh@?_Vq*x^vZy`H@6(=|AP!zQms+*vlAp zua9DW4D*^-FrUHv2<GMDXG%U7AZ4s%?Spxiq(7;DTr5YiSB$-Z&zb&ti9Nf1i@h4` zW&fS&kC(G2@$SIh5cZ0>PmNbDQa|RSm^*bztcgBsyOVq8lKXM=qu3X}olEo?7yA;= zQ?`BSr<b-5T}!lI1*=$VexCjcYpL;+uqrW^PADkmF742dc@^dzV(!!fXPgv`OY)!x zd-4vb^yAC+_aC@O_xO^oQS6s}k@Dl-xkR5tFG$fPo+r_B&`ZT~N<76~HWg3YCDuRD z3lMT$^m6nJ7rh33{tL<RlJG<5)97W~r-m=((S!K{=CzXkW%5b<8OC1GmzaNZpJb0m zF`vYI0`qDykEheLE6L}1%uiu{O3YpRMaESz<C1t4oJClp^kJ^`gn17AsQ6ory^JqY z70#gi#NQ6=_dHEsCiYX?UBY;A1bZE0Bo%8(>1CcE@jQn4QOun<i#7581o|L)NQ7nl z7ke42sT<gfiM?g=UD8*Ky|S;6e`0UXGV>8J<C1dNgZ)$3KO(}Szq$vMxOQOP`Bma6 z=JqAw97eBv#vVtPoA=o5K<ppG{;{v&&nxA-Jx`K&&0|0E4LjU;{h(5CNq*#>O<Jj3 zmEzyh`q3{vgZNX9y{dn(^DXX=sb@O<nD=A82fGRBXMUWm-FILvzrW@Y^SFBx4{_g* z`7Gu=N#@3X$-hy|YrdJ>&P1O?uRu3GiDQ$lljt6FnV-3qUw^KSS8g`O<MzBNvEKBX zc$Z>6kNHvY*u}gC^Ui0NZ|5=0`!J7)`^9=;)(xU@NjeBlO=C~;!ns5rMxSxf$I)le ziyX^P?9ZStpv%2;NjaZFpGTh)ahZ6Cy@GSd>u(cZsSj@T-p;pb?8&?DRGs8}p}fVP z5avfQ?_Oe_JU={)z3lJch1g4qm&yNO%quWI;$kjooW%SX=85T*JUod$hF&V}QtF%7 z&0d3B%oFQPF?s>I_}9*S@=V618ti9(j}O+JOQ#=Qq0fq#SZ@;MixSVn*pu%7c*LGd zJclu#z&uaP?Mvc6ZtJ{u9KS@LL7#p;IX<GFLZ3|0CH&lTDMR#j@x{r1d)_AIrI=eU z*z*>re-OJ|VqSxJG3G@gIO&voE-{Z`UW$3O2#)`HK48lCFy{4`2gN*Pz9#O6F(1Ud zRLoucpTvCBF;B@qaX*jw1m@l1J|2FSJel^NL))El4`1@F6!T8ZjSKBe*R^W&2s;18 zFA2|&?)ko3xtsB;3-blcGvcmH-Ib1Z20b;LVf0D##QG)i8b?2#qD%dqK_3;J`Olfu z-`w-)pQe|FTa4a~K7u#SC1p^FUi2g4bO!CiaVPoGfxBZfspE%9t+bbZ%=3Oi+{KYo z&d$71_E(U_-ZAVQ#a@}z`&4^Ka!Ed%#NOyn8BbUboGu^4-~96l|0HF@{bD^~{GmT{ zF7dY#d$ZUpm++nTkQh(dr?4FeW7wbnr9Gdvb;*w&^!$IPzKG^hZ;xVLk9kd^x!iL} ze8(|AjCs2V%jARDo5$V^_WHzLy#BAWTeyhEzx)gE=0(;#A}r&dq^A;l<JdbY_Uuda zJ?Qmw_Bd$k5`GN56WuG?V)#k<D)#!ZH;6r*-ev4bILEM8_A9%dh@V`NpC`~yplkQ> z{9K%$oWeZk*MuVuQv8?j^YOd~bBcD^^h-FE*z^9z?vGRYD~X?<KUJ6~wqtSEfnJ6# zbttvG#T~=yqQ7E?OY~v>)H>m5)#&5s^)C7hdeB8bg&uLyb1$S0x#-2{oi2JMdbf+d z2ff!tkD>Ru=soBIF8UGlK^J`#eaJ<hL?3a{Poj^x=ouH$Z@TFD=o2n_Df*O)UX4EO zqWjTjUGxt0IT!sf`htso6kQe{d3Ko#C_;4%J<CNufu7@{&!gwL=-C%DuDj?3=tVAi zIl9M1uR$+!(L?AJE_xSwm5bhw?sd_J(Q94warAl@eFi<~qMt&KxahfLaJ!3MjNa*@ zSE6^j=zGw6UGx}wpNrmuKH#DsK_7I{N708|^hxv)7yTsqn2Vlq3H9Ga&qtqd(M!>% zT=Z)6X&2p(KI@`)pwGGJhtU^Y^rPtNw=VS`UB0VH^5R!6cPG$uT=aSLJQqFtQtH2p zUVvWYqL-t4T=W|BG8a9BUg4s5p;x)+{pem7eHgvgMIT46chP6igD(0h^oWa|dl~iL zMK4D0bkQr(yIu4>=)Ep_486}q??E4M(T|`Hy6B_mLoWIx`iP5u5`D}?&tNb<?xN?T zPq^r%=u<9wHTtxR?nj?>(L2!RT=c`}3oiOmboINX_52ummWzG@J;z0#N6&N7voELq zyXXbzMJ{?dy2nMYK`(RBL+BMQdKY??i{6j!b<u~>YhCnl^m-S420iGapF)qg=($%= z|6TNA^iCJO6203+--F)kqQ}ttT=X9F0T=xU`k;$Giaz9`Poj^w=qJ&~T=Wc%ypOx+ z`REfadMWypi(ZXB?V|h9XI=CT^f?#(F#3XveiU8(hfDoO&vMaEpy#;g^XPdldbUOV zchL*b<-7l><4ZZZ$3?F}FLTjD=oKz{7kZV8-jD8e(TCA%UG#DEdKY~LJ?NsJLXWuU zxmQyEUG!q~P8Yorz1v0KgWl_+$I$y+^d9s97yStOpo>0=KIEcLqK~-fC(*}T^o*;h z|1Nqy`h<&KiazC{SEEn6=zjEB7rg_0&P6|rzTl!CMVHNp)OvUfJ<CNufu7@{&!gwL z=-F3O|6TL~boq^oJc4&FnP18O>ciur*PxfV=ppn97rhI;%0=%-_qyoA=(R5TIC{N{ zK7$@~(NCdAT=ZNP|Lrb%F?y$qUWwlAqVGZPb<tz!eJ*+r`hbgm1bxs&A4MN>(I?SI zT=bLZV=j6|A@$!y&qtqd(M!>%T=Z)6X&2p(KI@`)pwGGJhtU^Y^rPtN_e<;HG4w1K z{RDcBi$0H@=b~p{L;ZKr3($*P^m25Mi(Z3X=AwttD_rz0^ePv<AKmMs52M$*=;P@1 zF8U05&_zFm9&yog*HZso^kVc*7rhd_+eP1l-s_^r(ED8U9`peh{RsM?i$01z<f2ca zkEG~SeRa~-XQdNM-Oo%oZ_X_u{V(wmdnx;ZG6gTid;;?_F^}&nn%zAyufbgY_bP7Y z(r?BvSAR&}?-zGH=qGU(;eIh(<FD)y9>u&IcMGd{IL*1R_;&*P87uhiJNJwJnSEZ# zk5icUWA6TbRF1gPdy@t0<Q<$eH7tVL`QQ7_m#s>DiN7`2FIvfWwMB8Wk8QEnfj#-{ zA^pC`GWH}LN3hq=%Q?N`pG$ch!#tbc9y*e2uJ^OW{S4;4m`j+c=SiYvT+6;W<|+BK z<UF<*dj+fc?z#A5U*dlydOmu!XfE}^k9ig5?JnjLei!BgnAeMWQu>nLKRSxNsngh# z_)KCi=j=4qmBvFnK4#xr!kfoDmd$tMxnHgwh`s#l@C$o0uJ+9Ni%viGde2#s|9U@P z{PAO+yT*>slJgOhn_{mAdy#X~T;3O0%m)c?7<)&sr}NXvuY~i-Nz6}SE`G-IQ|`G$ zo5!60C8(13K$oN+NiGQ|pGG%+UYdD7!wJVerxbt6G4DR#uII`9i5%VQOYHfvS9?L4 zIcG?=7k^Js!t0Unu;&(UF+Ym=1m^lZwUlxd^Kr~aa?{Ltf|Jkaa*3ZOF+Ya+NfDCc z5s$C5<J|S6>%yh=ycoR`eMa0b=107pRbyUxQJQ{#In`Xk4`Dunc~7$Yq;@9u4r4F> z;xyIiRzCK*pQLvbdof-f8WsOi&Rs~VI)Qo4CA8}mJh=5QaxV9QJaMhdJG+E&DWBiK z5FwtwS@NXKi!t|J&TmnOkd%Ms+`1a`oGbWE2{Di7k8v;g8p6B+^K3D<FG)uidMSE! zQhQx=cLekLSEcDVIPN5$M$!G~x;*3YHSOH7C;2dgy&@}3j~}NSH^hGK2HIB@;hn|9 z8H_WP*ze`HVBFfH#M6)YEao+mKBr#B&qov5KlXC@4Vf9NCFk$r`yr(m4P$QtdlPGT zaNaAA`zP<CNxAZ#U}lwMufF8wB<_NH?edM6k2%j3^K1|EzlPL)+T^E{UoqzGn2&Mq zT#~*@amUMXbd}4-TbDofG6HGlJxJRFUH<4n^rNDsl%tf#5zOZ?r$}7#&rYwn<&yYJ zVn3VTrpZp{VM%_AeKY=^!rokv-%fc2dlK)0>zQBgxBF$D<&tp9(W}wtBwk7Rx!7Ne zdq4Jao9+6|vs~i71HFph$jOrSnbe+EXI5p4Ez|CfV6W&AJN$V5EVj#InENsB69-B0 zFz;8)V15E~iHmbdzMev#La%o%H#5FD_9ec>H;@le#xaSnTfSyi<%+*G*z+HtelBOv z@l^ce(}n#~2J52|kLCI)+f6AP#{SS>F>gAbhcm4IlJ=9htL$SQCE?hY)W3`y=}X?X zq~AU)IV|P{n0xq5qE4wN=@m&YdW*l6nD_Sc8&=#qm*{)YhtS0j=Mp`JK8!B+&L#If zw*5q1^5+QpQS2)T|1|xPxShm)#|M+gFDbv1=rQzp?o;cDNOG0($>mSQQ}kD75T14< zZunH=ZqJ97rpu3Bo1!DB4*t}j>vp<ax!dn~I^jq<hH+Q)5xXBsZZ}Ee_zCQdVb8P_ z<AdbqJo*rN=LI~RCO>ug--JDWQ_8J>7xPNYYcZF&rH;e7NPf)sV4fHcao2%fjXomo zoOUhuToTWI%x5q^llO|m{y6r#zr_6K75X#rTdqnzr?B7k_vFL5JVcgxpVy2dmu7Cu z&%FDJ%-2|%)mLS1FUZ_+P3F!boJpE1Z>If>r_TSA$6-lt2z%4m8|OZrUKzJ|6!RX; zd!9|n58GVAIf{9~xB2ZV*ZwL88zx<o*voy6`kROFpG}t}ic9h{{}#rzpQWiv>Br8X zo>XJM^xyamF7Y6-ALdcwAHuu~^93<auD9{;2Z_DI*sJ+PnmQ)-lKa{CxFGdv6nk^n zs}y_oCHXjso_~^hAo-LsFBEt4m>2yr&Ab<GyQ4{{>@w;kx_sZ^bm_I7>2_a<yC92+ zaz})cjy>o-=(Uo6iTRQ6zNgsh!rthA@Y{0|-z0n42<DP_AI09O1v`IjUECi-ulYT{ zttMKcd)tmc+@HkWJoYkD^o(1nmnpg~fAn$mTxn0I>7T@YHTH-24YgB}j?>te_;g^u z<B!Sl5q}P&2hq>epJUiBeL2}5sRt*}E%aLM7xP70n<<}DnAc)nw4^;P=DYY)un~I< zKFRgk_#@_(m=9tuWwF?<jk%=LkNHu|tHj*Cbo$XxpeNQF@$WFYN=sKP)m-cEFy`}^ zdu_i2;%*$hI6d9G@0L>UC2l7%ug82&93;0Z^L<rhv6uU5+Q*7?_w{PR_ngYH*N?qE z@h`F7CDd=7e(Ysu@Ec`f&%Si}(R0xg(=YmA^fDLwN6{<L7fZ*~>tpDp=qATa0OHRH z^a6CbcP_b~N4L;(91)71O+@libjjBO^c?hRv7eH!;;s_&a?BIM7k7Kmi_yzn>X+nm z2j*3o{I=K<^TqkI*gJx~zLnHJH+u=|;c@IuuS!?-xLK@+W_*!$brN$ci}+;luxx*x zC$4op$iI#FGdYp(S-RF2`}>oX*c&@Hxm@ybvIl(_-6I}4<9ovQm^&~(hIx^g+n1#G zF#06A&hNy2#IXd$|55BMU{ALL7ynOSUX+ubwBFL?kKTh`pWvS^|Jw-{J+++{pl6(y zoIbH%j=q5X)Ou8dK8LQ;ZHFQDL+DxfquYzEOFFvHyV0|xd{gR|E`Q7;=iBB>+N}v) z{29ldm*1o;6Tjp6kx>4a2Qfc>2J>18H~$X)@Y|Mp-M&owmT)VvS9~$QiO0QL{Qa1Z zV;&OoSMa9?dpUV_dq~L#3Fj#0!AtD?b1@&sd>Zri#Bl6=K}pX^%*!rKF?aS2#0{6& z%dMb1us6-U`#5I4?^cezip$c~q}W?hF3IahUH;gg!G1*S+n2P<t_tc>e!3c$cD8K0 zT>ReSF!rZjm0W+tpK)}{N;m7%)A%FlJca$()#>V()Q2VWF%y5WSFnk4xh8e|qh2NS z^J1?Wd$rhGTF%*+noI1(u-A{hnACT-e4}3}#i$>9vumlZV(&ER!G^vhpC@tGzAim! z-6{3wBzgzB&i{D(Fuk^zXWvO4V%{Y_#myJz=f#+Nu1z=l(@uQEE|-K`jd=)jxpywP z_oD~V7juF3g5H6?2fcnJ4~hMp<g4`S3Fj)}&r$66c#_8%@#h$N2YOJFat8j)V}H18 zX*{zxGk-%jJqmtt>GDS(M4u7i720n#_WZZn^ADH&3t`@exmWy2F_&<9Fh7p@9x3l- z>Z7E07<;ES;!g$-?&(e1*PX$B{vCF|9uIf1+%qbr4|6w{d@sN}gt^4gx#Yeay&YY? zx9lDd(@*%ZcVa93mh4wL^~ULUOgWqVN{l6ahp}II59K2TaT@y)k7L*`3fS$*H9yRL z-%0Gn8twdD((X*DOZd54DDQoYn^F$0{w05(r4sw|!Q}cR{_H`YLvK$%y+4wU9_*Lx z=Qmy5${%yN#NIIW`a-X4Pr^Tey@h7xv1gd?*;}c<9n5!LDc$ydjKrfH`*V-cu2=Ao zT3?aml6d&BH`z%)o5{l&<mX}R2j86BU+DIaz6U+AeTsezy$0RXXap|NPoP((=n|iK z^iuR$ac~CptKcsB&bQOv+{(pX4~W0j*vsvu|2%{8miTpGKlm=jJ?@=L^uy@+PtY$) zc=jdwQFQ;|rTQ`SqwnT7cpb}7%IAcwmx@12>bvx>_V0gGaW3_qrS9`>y<7ZUrr(ov zS7I;kz3Hkli-$AF&lvWLpGr5s5AzE3rXPEg*wg#Xr}0<fF^>JBk5GT5o}I?N<mV~u z4-Y1{ThVj3F%F<-bMIW@e=+(I^iHYo%hhB1JWcX{5BBFj!8}Uxf0_IfdtKN&`bpa9 zvh~K4(^2eML&^1A;&lu?2VL*8EEg~A$tCvZv0wIC)*Tr<xZ97v&mi^-?q=NoJiq<? zO8IJkzh3OuV83^i^knmJ2IbI&{Zq%2^IOWHAAQb6A4Z=+*YkYr$tCv3(KG&GsXsI5 z3n}&`-%p|EVSnlVhduwW{VjM6_P&`spOWy((eqvO8gwf~m+(U91?cj-Hm_744r8zH z+xEPAnR+AhwNdO<K1aKidgC&#pTNBQJMQZtiT^3gYcQ8$axT$xt69dR=n`Hry7))4 zNeIu(55-+I=Kd6SqWjTn(X+&zeTm+&OgM+_aB7p%wKxwQ#ys}jbTz+>`K0*EZ=Ekl zJB{bdVm#+DxBe+T>HC_+Sk2x}e}LX2?oLy0r98{ApYsFK<u)#0E|>W0$KD+FGS1N7 z!`MIeBkaFIzpcw3djmfvA0!}`be_OG_OHq1EAgI352EXR7uWW0=56^q7#C)kFG+np zgZ!w*{>*H;Vu)I%UorEr81}rspdOv3U+l+T<fJ`cUZx#LJvxTHKI~<4?_8pvK<`0M zoiEIzccCZNM_v9q$uIO_iT`QREBRlJ{e^!|H{Vx|$ItjG`Md}7@)y(1ds(kA{&rz+ z1bd0`k@)wc54z~X=zS@=#Ah768@-hK)O`psKZ$ui=9OZeQeVV8+si)I5_1zP$)94( zXE3+UpdO2Vd$2$GYkNJJl0RbJfqB_)?E0HD4>0eO^kbe)BcI1^^8C`AYmZ|7|FQS> z@ljRR;`cst0+~qy1oA=%(IgNx8K|NLNGh1gi#7y`it(jZO?XiQp;aulC?Eqw4J5S; zMp4QwLD6PLa*H)6x#bp6>WxAzRJ2}8OAypRu!8cE(O{nMIx|CNj!A~!?|DAYU!Biq zJ~Ok{+H0@9_S$Q&z4qQ`TEPeFP5AByz883!l<Of+aw-fy{-)l`OTT3M@x{X5|C0Wm z0$iQ$DK77}Op$LsbmeE5uchqye(<$I@O9vy1#k3Y5BMhVo50fy(>&sP&d7f72f(vr zHQ~GSs?wjG&>im}E|*F#z}`nslk-n{ujiYlfIkJk9(Ja9^ZdT7(SGRWChER(Wx?YD zDx>}2Gx?8)+7SQX(~s!634C1=&wQ79LhPoyeL{Bty3z6<5yGbzI;k(>X6EH&y~lAZ z_zdtB;LG{mJ#TmSR}uII@IB?-r2H)KYPhbPe02AB_k30bz8w56!Ji)|;Qh}D+6X=( zg=hNrP+r=(3;Y!DdjxN8r$0X~c6%JU6VP>Aqc3iXC}mvt08at_VGr<Z;Cp(2PXWH8 z2ly=DO~8E`6Cr)|E&{%>2Y4Oux*p)0fL8%m;(z?Fcs_RlpPR~mOhg8t^ci0d0H1PM zZ+eL}h^2hQE$9V0vwSJQTY;PPl?{A<5A;)j?*blcl5ZC9ZFW6mpH}K!1iTG+vBcH> zm%!_QCymine2_q5N1K4h=kQ++LTj=UDYplFE%=~fMqGRZ_#zWr_;muG3w(m`3-}rN z&P)75%B{>N;L`^1BaDBL`lmp@;5t1d?<jm`0bc}szl_5$e1yIZ`m`VD#`90Z(Hr&; zeQmL>Huf~m{p)j)?>KbU68*dTzqiSn=)3R~;H@UO$d?U#4{&)-*@ehA3;Hpo=tcZ- zFXL0%Q43wht-A4i(cb8!-<pIU|LJ0^TY~KXoZ$C^&$)y8Lj0%C-$*^Dz-Rv$e`dy; z^4Td&P;>9>!B3yh&-$JhRs>zzUHmr(ynC{vS>SWPZ|n~~O#d795B;W@p>`~CZvws! z`0Ww==qYYx)oQdKx;^*#$D3*Xm;O5i{sj1P;d24<Wc-Ns&1Kw)zWguYGXZ%0Pjt0N z#(Nk(LO&P!;}6iTUgP7?dj#vC-}NB>aUyaC&yzv=7rwio8~qE$@dd=KM*E@9c^J7b zkUrxM{O&?s-FX4@qW@CpH!s$G=em0EEAzpxTjC!NJ@sQy-e3BCBlOiv`7ar%znAeN z_3nW#iDc1yzMr?lpz}k=!OsP6_=%qMNipFQ|6}~}V}APy$RG54<ZSRUk0X!N)5G{H z1-}t|so>9-?_Ph-2mc}XU_UMO)&g$<zC+5H_{zB01is>l&~gIb1^o6N;0J&g_W(Zy zJO_A;)Z>2%pO`zbBj6%S;3e=3;A4Qx_rOcOPw>;5;ibScq4&+Oz7j$+7kF9^@G9U* zJ;2uij|CpAC$Wbn;0k!1unw_@?iNXX2cVk^okQsSFDZWtcpdOvGHyf4cgK%XKIJaj z&3{LAbM?g)*}yY^a}=r9@+qM1gzgl{1dd{v=zMYRBJew&)K$iJ?AjN{uLHkpmEPxh zaH(e(bWZ-Gs-Ev5cH@s9kAPSFXVoIXpSMS!R;8U0cOxr!aWjFJ(SG2?J;1YpXZHY~ z0z9n;_$=TtJ-`<M??CQg{f<U}I^f5F9}t1U_*Ic}7xd5aA6jNU2Y}ZCuMqwA>}SqD zmrUx7DTCLu{O8mK)GPE8pr896tW)~Ri~9FVM9#U;w><Cf2Y$;-?6C^?=ok1;tUl&r z>E}(*WpDJa1J3&gUq1?;J>VCC?<*fAa-4$h&KLEN{Fm^LDQ8{;-kY6_20!Jc@7hTb z@SGmtcLL8a!KI%0z+-`T^4)B&M*o994t}fXwXgHtpoM-H^z%0nUrN7)p_lqkL7&8b zp;hy}dpvZ<NAc4cPv8wbp4YRi$GZK4@R<Nz`S1Pla|oSp{N4$E5%}-KX(m2ZLieiQ zPn{p1K6#{l8^Nywf2VYHnDHR=`=M|71O8XmLt*H}9wKJ2etqM+el`X8v%p(=@{>Vv zbrEzqZ}DGr@Cv*{jywHud0yv*$WaUZHV!*WHwIo}*BgO10I$>d(bK%mK9=;y9_Zpf z(0%uzO#LDF<KQ=g-^_P&zY9KQCg)lB&$>NQkEy)qX*BrbAAWa!ECPO_2l$=9PxSzw z54;1o@C>}9owa^?!w>Y-PH3b|6Lg*b>-PhG21fgVPx%Nt5!iY8yZ!ue@YUdh^=tJ1 zEXGd{@D$+XJ;1YpOFd>jQ-J3HkMYYS9~r!70Ur&#LICFZHpq@@p_>a`@Jr;|2)wih z_%`4ZOmNY|e&89vbA*v8{uh3y{N;R_5h9;nWA5?EDLeu%q0az527YGx3Bc2U2e()3 zz7%*oaHAi?%sUy-R|);!X}2`J7ySs|P0$_xhwi(#8D?Ay-viKZKFEJ7^1a(0Lj9QV zjkp(kgl>x1eQ)vx-4o1)KK>B&!uPzt>bA2|@DA`Xg7?2fk8^=%_W-W~o@s)M{Of?H z057ueqnGg@bi1I-Im~}hiu`8%0g{)r^Eh<nN66D%z`9**Amu*%&c96S31P-d>YV_- z6MTmN%=HG@^(^T2eDPiTSp<BW2`+Nh0pA2XM(iO>KZ~5Zps)V2XE_Cb1pI+M@DU8m zG5n|Id}z=2r+nun<ukxn^@0~Y6oLN`e2Wz5MGwMfE_4e{`R%5Maajv~BY3Ou@xR1w zHUh6R!G+&8;8nnTlluVp5BtD(f**al_wp&Ti39q;PXNEa5B#0rGtczSe-Zdaec;!D z-`fX%8~BW~z4Jc+enB7jPVjsBz^BZ?zjc`M*8YAG{Z9bz>;r!%_*U>$QYPRhg32!f zU*zqb|2ps+!7u2g{5J5Nec%s(Fa5gr`a8jI1@G*Ie@Z2B;y1n5KLPv$ec<l|-_i$u z5%{#u-s@in-U+@p``!kAKltACa{&D4b3OByan%XF7Q9v3?|%tAelB)tf=hpo20phJ zeuAF@eh>KG`g<<;G5jZQZ}QfHukHmea%}?N3f?U5F5vQCzh?LW;5&MNp8~$Q2YAd+ z$mjI{&j7xv2lxcw&K}^Uz#r%VJ{Nd-5AZ7BH=5w0&vn2Zz(3@Bs9*Kv54M5t0Dq_8 zP5xBM9{``-)jPfu`~%=ON%_F|49b_K{1h7julkr*rJf?__N#~<@7)M~7I>@HJH871 z1n{dw4pTd1e%T0q6L=#&>1BQqx;@bCwM6*tP4y8koPus+1phH9_4PrQazAS67oM&N zx>L|Kh@Sf3Hy673krBRiOpkGv?|eiZbgkA1-~LA*{I)??8s(R-54{|LPX1H+S!u7C zPGG!5e=!7#bEEsFlm5(x?f`VX`K40uV`6&8&j()xzBfNmC;a;;-z5A8^j>~H_-gPy z$tUeRCH%n~=ZpH#Z^}G$5*wjLOB~iyf3e>neyIpL`LAvH9(akK?v(Pts{|MrUw#6? zF9QESANY0P_xFOAy0(GOiT`fB`+;WzcS>ERdZpYc@O9wL{xar4`lSbW2Jrbkz$XBo z)dRc~cxeytxxkBhfL8$*d4uJVep&}S1NaGPhuLn|#0u1~+g;EdzW}=9&^0B5=NJDo z`X(_v-2~{0hK8q`1)coIdboDfLbv|{=$fEgH!M8A1JKRlKi$LC8!?}`eRxC<=WNJ# z`Sm;+U5|#Y1G=UNe)KdR0?!@s_sgBoFG>qve-(5^>EY=%K^HS3Jl%fi_Fn*9Cv@vZ z_T6r=!;FX6cgYA(HwC)vOTx=HAG*#9pj!vsu1mx7+XY?iW#Q?LLs!b7-*EO2|8v%T zS>f9|0lKyepqm9<lRbRBwa_gZ6`rmMx}wqH=?*{_^S$tN5x>A*E`V+{bn7k;ua{Ej zDy|4mw+OoIF@4J?^TbBzI-zUh`+0vJ6sPS1zw1ik9>Mo9pB;zp*{krgVd9i{CgufK zhvzo|x+&S=>1IKfHa0w6Ep(?YfUXI;mT}?v9e{4#`0#WQ4>J$v;MBtCWi)ifx#8(b zp-cOIc)CT<bzA`5M(Fll6Q185=o;j5bC`CVg3dWHJYC8onA5f4>58BmlNX+DE_7B$ zc)B|1S}%ZZ8+2Rq!}B`=-Kv7{bTJDVw}s*9vY~Sng{Qj{x}@vE(^Wxt;sWS4LDzD9 zcz*k#tN%fGx=!d86o;qFSj75tQr~p(;+mk#ED4QAGl0(mo&h{2j9u13XPq3Lt_iy1 z7eIFay1i4v^NU!_{Pe@{bfckrc4~OKQs@@l5T0%kbmcdOr`rf!(M{p$_CS|8Ej-;R z=wfaTPnYs2c6kAGMbPap4bN{bbX#x1KEl{R9dsLS4NtcXx>dLJU9Z^X5$NXM{$0E5 z1pWZ<aPwit672s+;pwJ8H|CD;bn~H${c(7@b<mxFE}TE#1zn3=(g>s1<IvUL6`n5M ziC?%oJlzE7ZY+!F{hkWC&)_`^y3ys~>1v^~P7hyi6LhT?Kz9JTtuw;&i&%;s%?wXB z8oG*E;ps}DbKDcYy^Ekrxi>uBM(8@A3)jDUpxb+2c)C;2HB^MBOIb#oFgrY55p=iD z2~Rf{x-pgE+gk@+%-ryF+n{TME}UMDK)2&3;pt+Q<KKT8o-P}@2ks9~cPDhk4}_<y zf-Y@d`1WpsuJZ!u_CxpKgW>D#gl_ZC!qa6`kx!c+o^A?s<qw6Yn-5*i&%@KLgD&<L z;q|f$y5ko>cO1Hw1>x(Be+)nQaCo{2&@Ff*Jl!nlZd@3it`@q?Md9h1pi_&(>*WA+ z2QGjv;&J@sqv7iv4c)3G;ps}Dt8j*=TLhhBX?VJg&?PMkPqzoU6VUZ~ZxXd-@ID3I zp5@``Ql7w1R)w#(2s-Cu;pyf=cl+bv>FS^x^F(;MZO~b(!_ysst`)j)dWoqfr@SIO zT{d*jt_)xAozOi{6P~ULy5idKbeo_{`(=2#{m^w>09_|^d!G#7-i#IOKm95^-4y7m zSM@EQ<Wc8CSMG{X^1nD2a=v3D^mVUB`1a|}pX>7dkAd*p1wM_Wto%Re1)YC{KIQEQ zwGr~5b3w+FqJ&@kO8g)A7L6YlkmoIeK4*7?;%Y-r`6u}sXX6~nT<DzdMkpu0gX%qh z&#e}GD|q9aUw1nV`~-?@0$;i}LM@uakBSR?Ua8c@OWJju@~ejRQwK!O3piJkQp3LF zMg6{;7w4X01}r9kkG&YY=;=FoH)*x6-Ls%OklIgW2wh-5CMbVb1-><{hkBllI)86c z)-m$Z9+{>h7F?_ik4ZItiySGucct;XNP*-JjQtPGZ?wVz7d`UNfB1<2OZg(+&!%gt zoX@~Z%HPTRIPeJ~Uw3&+Ne~0kUlngPlIOqo=d*C&nE&Yin|?(DE-H8`qE6Qvwf*nq zcNY-8O}sZ}`Jd}u;FAmbS>03+{QLE94g6aJ|JK02HSli@{C}W<R$aN6#^Sr)RQt{? zw745AYO}RpzSqrXmM!1w;j?5wzE^4Y{U+_Aq^YgCbK<dR<$WG_L2SO4W5ljQdaCE# za^)IG*yS*$vD6QC=w%;R2_LNan(GZ7g;bz~PI+2&t0(&*75^r%nZP)c7~i#Aqwaq0 zbA_6#P1ik#bc;tFQTGe)kD>1xsl2Va)`+Z)$XX}#9c32xncH;tsT(5PfwI;rN5?73 zex+-kHeEGa7ni?honKvJO;@hg7nIxfn4{xpSJxJ4^C9YK8>GAoQ&iorc;D7f)wL~m zxVP|_tt~SKyR4eS+whU{z6`uoPw;$i4){oiw_G`=C4wU*T*pyOPd&fcE~(>jPkYbK zNb<Ka$@4KfIjj%$G|yIb^_m)2FOTnXCb|BH-)S*wobWsZ{$oASvkzD%d|F4K|GOjI zgbVQoefeNRw?fkbO~dobWj%MWg}&LFb;#}#9+~IXw<IImW3-_RS&@J9mc)Fo(T~IO zz1uD7<k6w)lT$Ty@|f|N&|a+B>;kj$S*)q?uTkcM<a}>Sl*9Wsi-W$-`{08a%5~6^ z(B5iM-u$kvQfW7RTgUNTqs<ffo*JP}?&k50)=1~H+dS*0C6-!$z2?bHl1Yi*^09Jl zw<NZ|!DHncERN<>ZD{3m_%u+5JJI1C0YCNU`Kytk#(FN#W#zl3sonf%Zr#bQuAwWC zBeNfUfIW@PS1ysM;c-Vt3;nmEfA6@Z@ZaDMrPPgf>?hUp?0o4%NAsZ?hr8U8_rYY* zB|5YWF7!IPx;Arx3q2=#uDwjvy-2$Rr!+^i*s#$LY04$vzZ!IL$GL?XcB)()-}ZRB zy4F}@3cNP#;ce=)B4?f!*G_0zYU`KZ;YQY9!Vg=-?o&OJXfuBs)@+gRa%ma7^4(uW z7IX;j|J$LQnx?<s*L%hQ<Wkx@?Qg2zr`jA}4zxN}Pw%fv2S%tZY~k!{JXiRh&bZ0o z-Pli^tWeRrXMd=Ef3J#o=Px&||EQmqb*$Cq_`3DmuF~&m9j_Hw9bdMxtSC6K_`QM3 zx%%DoH`dNnm#?hi{h}J^`5)EK^I6P}k7n{d9=qeCD*2waXKfYlnVk#XD{n2@ZM65q z;??QArS4;c2JV*nZSWJBvMzaNt;gnAEwUA;fi)%Cz?zxL=BeV{IH08F1MDNv<}!ym zIRbs_@s6Af-UZlXd3F9H<!MEalpA&%Vbu4-&cgSe<S*&|r4QWwF1+EpTVR25q-o|! znWePzUGz{)*@2q#ucDJT;h&YZcy(6#;=iu=wyP<iuV7t?o_1IrUuzv*rBdcG_@JxR z%g%N+sWi1Oowr^wqa%ZNPv!E@bd|E&`>W`tc<(juTd{q$Gup7bu0nm^>6?}J)D6-2 zx)}F4U3b;bRQsG(d<&mDv8(BP-@tbV-^J#u`Rz<_PGcQdC${QHbWY3VyZF@v<s8B> ze{52GY6Necx25s)DAoMc)v9^-a8*~$xUg#DU9IC)b62YJmOtuvO7(LzF6HwuzWxpB zv0m()hCkkqUw7^B*V`JY8oiOBvfSgBWmu3biGEr7lEaN0!S(l4ck3X0PrC1Fvy36% zSksI-<YRpNVLifgMDOSMMDOnrSyLk%ub`Qas_)lJMc3ljfE)AQsT&yE%z-kOmC^^* z_$hQ}$By>$AYfNpr1E}5TNHLgna$}p4#2+{zM%Pkfwx9FnprBxx9V~BRXo;9${%IU zNY!lZ8yGX^78WpH<-5Dy^!byUX&>+=WVWZOh<T*lOSOxNU!`9>BHOuz+v#iN!bh!A zV{-6OR(ryws%{Z}=k%b9I)ddDIqS8K@uw*J5PkI5Z@bnUVt%MW4#sPfwX>`xRf}(D z*;<;~X>Fl@$KqQJSp#yNjx6k0m!jg!^7Qv>zg_TNO+R(hY}&pOTUhcvH7`pW;3|1B z)m06TT41$H$GiT_lK#orl{t;Jfi=sJvmkzit6od}U<Tz1rX;#9qmK3G^m)@c&OA!B zu3m46ZZBB6Vr@EbgFbVR>tf&c%>{L_uHk$)%FZ3{N(#UyT;d{C<D++8;TjV7K6->} zkdOXELG5L(m;n8(OI)@9eB0G7nNQ()qF_@bx1<d|n+rCkx?;}x+9+*X{!hnK<Znt> z(5|dk#=G93U88ANi7lq4igs1eE(h(JMZ2nKSHFKap5l2kTdsqjv@bo<*S=IOy1j_@ z8Td%Gg?TdGC{H^DHow2xlH-GIuA;pH+uu)Zxf+<kn|2E<8`$LmI@&L=I(&9kfFF7g z7~^LPxdUGv=tE$oz{qw8zY|sHMPNLmZA)^1ANmnkJTTF#L5H3MHeXX)$i(<y=u2Sx zmD&;=D2v_%b_$zn;ceJv=^$(~&TeN+$3f@EZDHJ;P#Y&2xRKzbf3o3kW&9Yp4w-l3 z>>o2m4>LxQuTJ{Ms>ORMc&wa^qYlPM9rVN+SLV|{&Dgb75l>vA=E;~c_{8KHe4@ms z;M;=jS;i4EZdRKTT-RTs>KZBEIVic~>-#i!PcdZI@<IC!)mYt!pR&0>`E`{0voc~& zeA>r@w8kStEsdY1>W#PH8ybAR!PBrjx#R4)t~H6V%JsKz4_4o1Q`t{2<`kdwja_{3 z->EZoM|L|ty7W|k=W52g>*z>}EB?g6FSiydSJyx_FIls=at|rj*Eh$xJMJ6eu7|cY za#ABP(~0!zSl8j@y5Gmfd0HbQ8wY2ox&ZAVeNtnG-p{2*D%WA=2F)RJKpZ}>f8*8o zM(ZNSQxXGrMX7mhFDS1AJ$Ch1^YGiHE#H<^lWLo{6kQ(t)><uoLE;nf0V4Y*WDm?A zZobR>VfD`+^?Gh&y*`2WHN3yy*sy%a)>rkot=A+oFBQgZ-MT!kL*@<f+b=BF_sxjt zo-;&$?&WuMh;J01gx+1}7G8&cEF!Kiq;Cq`U*DJS?u@+Nh>xuB6nz-|jXtm$>2q{p zZOla%A}e$Ed08Jd$=a$%F~<$y9m6}iv2C)}{MF4`bFA>aO>6G@oo|ja<`{GtJin#E z>&$KcjcncX!w!6{_}tU1H^k@uoHjm$9Y`E`Ie(3|Bgp3-LbnNipBu2vGq=6jAhs!b z&c@cQyrb|(lkmBD_@ioKH1WG7`8lrL_@XJZXXjMniwvKMFDei6v3JS1^6}WR0>7OJ zk5qUxU_*vKJLL0cqh?p&&!W%s5PxR)TKw5oACE0&IU3^patsbH2XY!?x2>aV3uA7T zs)};Wrf>G5moo8@v?(y3$XpPZPsC@+oG^;=M*HxUnZEDFoNx)>4LR|RBLZ-IqWCz2 z9$!dK*Y_PCNSxvG_xL{I41sShz~_l?%9FWhkSeXfkBe^X$YEt(%VJ(zZ;5Ijv9x;a zLxJ*r%pEJybD*zu{GrT=M$9xX$Cx{!=`Yc>;rmT<hqP1lafmswH#stpL*_`Mynl}5 zyD?|T9Es1~Tjif44Y+@fH0b?vr19N9M?!DR1>%DZdjA{=d~cP%oyK?n90|>;Dt!H8 z%!}fq19r>!+pHcK;1c`&&u_bi(k~|py8COD%;En2+Fa1xU#klI{bkVm`^)&=-CuiU z4wwEC-R$()%4WvEs-A3R40QDCD(P4MT)(%VyTA4Z&GoBft{1swj#pd9yJWr$$nB3k z47o+G22W&_*aI4)4Kha>^nSe>-~D>!yCJtf_V_=M=Y-geT6cvpcLnq-dc7UJn%0{3 zyE~cpm}Atrvg){Vw(7U>)4}V<V0y{NEMUxuf0B7~C;q2h%FDbtSk24l_g8u<eVXr< zIX18cmAuO7;j+%?&Z%_A!I~$pQ#;u<RC%i@Z;gqoQOqgKx$y~Uv99`W54I@rLHuL= zY>Q=IRE({r1ibk7)~i>pHDckRzF7EBKZ%8XzW(%ZhnISdbq?cam38O$S?lkZc!+ti zmDs#B(mC<IVXUDCIlMNtY8rFZE16pL+EmurGmt~p)kj$mT2C&1&w9}qmqs382IUHw zYuGs(BKS{uA0l>NOe}#uUO~>%L&W8otjVmbF|A5@8@?@Tp<P?*zqPhBpxbxhDZcqM z`KK?aUve$zeCWMNs_t2IsnF#!<Xa@CWsHfv!(5k-RCP~-J9Wb!Sx5Nt1Tufi{F$yK zUn6sc#Zxwv98iSH2DdkTTOROu&mF*=Jj%|1)yEpJTY%O3U?<X#_+U2za{!Yv7ZLB& z6B`+2T1OjYiovr!G5CmWrZ>g93R+@~*x@i`#*9#P<>X^zj+eF1y{sk5`>STLg;O`Y zVa!Pm;9t|$G(J1{WLb6o6*1bex{X}I`W<;EM>D@zVjS)wW!-(9ku$LFzK_o;b;+?m zsmoT@vmQp4(yB3!tyQe0B^OXdPN4N_=UNq0R3r1Bc6`z5bcfnEBIXT)&%N+^O!fC{ zPq7};C_5r1|2yz{*4C79NuB2U(){(MDYaYb>qqWJ>Qo|wzs?acJNuMbgMT=h)^lB@ zpQ0O)e*`*+ARiJ*{^Cg5uC-M=wBHw~49`8hQ`O=x3)B<jNFH0;0)1d@kF7PS1V^)j zD-EIt?9?uwz2UDN#u`<R*!R`Xl=s}iLGE*J>aOR1mFwQP=KJoK)?ec`$EY_GqgKfJ zv!8SIG2+daU(oj1h&MMet{RqK+|dTSgqX8(r*e6IJJG%AcN5$Vs$-ljT@B9D)I7I- z@zy`1%ZBB5b^vp^m#1`C$a7UO7KE>R`Q1kBCVg~n;SKJue#ST^CX+U}^}9MUHEYc& z+PfBe4E9YthJ4k}EXrk3%ckLbh*4ARamJ%+mQ5Rhy$Ng-b&z)(y8~JbkNotqX)>lS z0bUL~z(Z&+Bi547vwR9&fJZX43ff>FX^hjlWLFVm;N@?+*1W`9F^)H~g)ZaRiT~jt z->wPZUjX+TKYpDbPx-y7YN#t4{I9`DUn%l5M=Ml(V6A@Y>#jBAXIEJx3cXnu<$E(V z<*kL5Sf%-Ma8_05oz1+Gl{*OEs7{J66Z))lA8kA1k$GR*y(&F6#-&;>Cf3zF*_3TW zj&lolHoW&u*U$#3<C1)D7J8AK;+}yj{_LQXjx)DujAL>b3w3P7m#3Ud{gSs88CosQ zY45_T0UuBur^d2I@yw*o`S?D3<GlK4Rr(8j*4cmNxbltu80xu${*ifvzYz50a+yQY z$d4IxA0(ZpyU|Z4^NsMAxkl(ztaI99_$nUFV5d*BryG1z?Kb*I_#P5F3!;~`-SmOt z+Y{|#9}il5bQ?^3S#QQ4WE}&~Qt?&7_Z0rQH=a9bTe?wqqW!L3c*<OVkabcyYa{W4 z!t<b?r@8$iXSz+5=KFaT`+1t%pBCqwMxLNf_%<4IR-!$gHHFAoPu_s_f74v@-EG+M z6uz$@AHR}!)*<C8*%`r}NSvpgcRysC%NUG#Om)otrK97xu9`k)ZX*vn<X5wQlJf<= zr0X>LusI(2&sUyICJ*V_zSMCtojzz>Jox6HeA_klAZxu*__xn>n@95Wjg0N>T2#9O zfA*j7{z8xOoJ?@!?B$(jiLJ~>-c`(pd1qK}Q*ISJ4l%B=U2iUS6c?e(Uc)LYwtjA5 zA#1mMcO1A=Hx#(fMiv=sHz(~!_PY8@oYPp}*OkW`IX7(Cm*0~0sq`N_n`g0)5j=mU z(MBsVh0LKc_iv!DkJ9EjBb2LcN%`+bsU3fv^6>1AEZS<-qH8vhbI+rV<=9mGx$>u| zFP^_dIr%y5sCZ<4M_qrlPxvsFOJz@i9RASYxi8pPwMe@d-!0;+p>0*6{v_aA{zA?S z`6jOxeK5up{c#4q-a*4VI?6P3Vqr`&9v2c<>6(j}f3ww<>(W?DYZqCk8oy6wxh(v) z@Vi>#pbb~KG%NdEej0%pYwsUgTnZY|ho-c~G~%3(yv*T}=U?D>IFrw)c!#(BEA989 zr_;PkG*wfwRJkf|dHPGSA<<QyW^FIgG?xWEP07D|>w4CU`RHT{Hc-2G_phHmSM;8= zVM~;1vOVB<cs8HWu@3Js__gsq!@Cl`mDrC}>~gHbUCZxhv;n(I;9o}ESBd>fZhr34 zvaRdslUD3!y-m&2sHcK@tk9pPj%OlOQw8s{@D!OPS0T13`zl#lVvRMQIFtR7vIKRq zo_;JFrcR1qDWR{0E{eUD+n_rOFWS~Ll=pnbQ;wg{mj3Ew(oE%Yu-+QBylkt)PKPYR zYOWNUq))B7$dXu7(O-E5U(Ozq;K{Xx;s^TiLwxvjGd|IePxj#@-<{Uk)y@B6<^2aT zo?BRiJ;+!oAkUHS_C{W3j1%>L@rq<`=NR!=7yB}j&xzzSpD|)#KV>BEAE>C^#l%V1 zDgB)zDs$yaD#CMVS69<p#6|_YcM}`QUQI#T?zJ;{7qCa;Xe|ucqxp>3M)-ZENA4D$ z<bPIMFZrLf1<LA~$@??fluEr4>si=?8Oghhxx|rH_=xy+!HI2M#vaONX^a2*8G9&w z$@doc58>%hx*-So@_oU%m+m!*>|qceXwGSpn;(F_+oUhOOIBBmV4WiIPapaBpCvFS znesY}`ORW)wsQxqvX3nDqrkG*!?mV6a*O@3x8$CJ*KC=CRXp?1W<^{oYlc?V8f~l@ z%D?T(#@Fmsvyxm(@o^)Ok$G6=tj%rg!3urD2$ee!zbEukhxJ_7*!RIV3}nrt#nt3p zr%uW~YMRbm)X}x(HQ<up(^O)O*s}0!vBcFhV4uM<Ni1e#zNh{Z>6&%7${a>45$B0W zQ+1X24Dyhk@)+k3;tS?kD>>LyyT*75_~o)RHE$;4t=OuD6f?IR=KUkewZM}xZ}>Th zN2DJ4{Ewk3zMs-|mpQeQ&pQjg9>F^APsa6kOU+1Ssd+5Mv3hRy68B8iZ+8LjdsCLU zYqRs*6&a7Z=Zs$Lo}1n2t}<|zlPkn-*xP(OCC@E%)fv|sbSp<sG-zwHw;A*WiZ!Xk z>+ywdp}jPvz@W{{$af2^eKdY4dxF~%ue<xFSlreO%^fvbxkY9fZp=I03}oxaTlNxd zj2)4`nEb;e_98D$akytkj2T~x+-{XQ&d2vrgO6wQweVE#O|(JyiM;6SBzh{nlsW4O zXITe(58mH8Q;qR0_%~GSIB?BR>?qp`uKBk+QnyN3{BYBx&iwbl<vwwuEGM&Kb`JPl zYeiPhZ%?G=1o%t+_R$6I(b<LWrPfE?RWVBp8GP-sxMQ+)_Xg{=?u{`%nQZ7>XN@vC z+u=@EHuP?*xilpoI_1vHD1cWXymq+@9F~)_(fPe88am8J*A@dmXS8n6%+0Ry(fReg z)Vhd%(a`x~gSOh}Lmizjp<g19VS@Xml!@*J`pq-iCrj|$%KXKimsXihoxYeX&i+`U z#JBlwiHpjItiL0Wmk<~+KKlfzMy}$8#O_CjlqtP^Hu@HuEsBw|724DY=}Y>s1%D`g z%3oNQY7?wepI1rauy60up#E4!e*kM<>hFhT%=P@v#kY-H+M2q+XD7~yZ>TZjoofAr z8Q6e|a87ip-%K!YeCL#Dqf_NI2Kp#HMGX<169-HzQxPZAG98aNY3DNh*-XZJfkWF@ zu9j^or;ONd1HRFzt_|VwiMr<f$G*L6{EIPa$o5$B^R%Iyy2`0*=8;EMmn%zSxzZbj zXMu{?H&a<$VqYWYN54N)X|4d@a;2aX`9k?`k7vFQ;u+vKv-RQC!90EZEbz1V<(vSo zi6Q(R$FKcQ+Mkj?q24mpul-H-z&~Z|+@m6Eq`lA6M=z>NSJu-{=61`z&oX1oN4CEJ z-dA5_O@WGRe;&Q?S@Sw`!T;&ncQ(F1N>xv5!?#6Q>mQW$U740S$IaT;Nj(+xr}(>g z{M|9u!o<?9M%KtttdU<XWPb)&E$idPovfW-pWxo~@<hge$2dFd<1*IA9@fVL(4U+2 zF)){h_3=o~jQISi)jkbd4!2JBuuh)D`Ij$g-`+U(X^_{7tf^Z1oK|EMzq|oCjT{qx zTI8L8?FalbcFI|(h<)5k!tW2}TncLi_Lq**mSgNMS^aIvq8%lNSi_IJ*8TFRiAJtq z!}3QPWuHstrs)z}md3c!%M)GC7i#;0_r8Mny*4bL*!a(jRNaBIU2D4D)R>bwv$e24 z`{i2Dez}D`v5Px2{26DOn%l@HW=62KA#a!#v120h-A`MWFQ1U7)JgWDT-xCEYnKq8 zw~Ztxz;~Uw{7rHOtRb4(q8;7}i)xm0R}!1GF5fvp%9r<dPL#Plkb~HOuFfsIo|x)7 z_SSv5#6{>cpr<T}MaWH#qQCMqZQe1)qWzpN&r$NqisZ?Q`^Qx-Ui?%{MP*0HJ;_6| zw5Z8;Y<g1q+iQt+?JG2!$5t__Lt^LWXnSD(lYKDGvyn#)%z?i3%>VoPvQ`G_TYLfa zVJmWmz+)pWlRT1*^;bZqp9Gbcbx8o;rEB}lxq|L}P03T@pJUiBB(9SAY$tWxVd5ow z`}xf8D=g{+>(vvP%z0T^`QDQ6<$Kq&E;nK`)+dv-73fiU$7w6qmdiQ{`%DMEgR@xj zJwNFC^q}ucgT8MF`o7cuU1B}P2J2(x6}ur8VvdOO6k<OOL9yRX!#)bJj{;+TO51(s zxrV5^o^mF#rzf$W<SQ<s%|n^5B@R58XqndluY)hKzLd{nYTiz4i9GA7g5$a5UnQTF zKpd55|AP2$4Yn&VSr-TD4y=i*Wo^mYUi;slcL>eLl#3Wlt_)w$lT7ehMGnWF8gH3* z_KgUa$g!RJMUL&rBeB1cJ7k??d*-oearl*gonc?vFZ*`C>_Tglp{&d)0of0w`f_iF z^#uD4WLK=oQ#<vO#pt@_s_JCq?X@jmee_U=tfLR{`LUnRzF1Xv0$5-S67R0U&+Jw8 z^!>9bvPVa}n`URt5ErmXp%MIS?5aidv!VOUDEX=Lu*39^<ix};%6Zb4f^Y#C^EmlZ zec#za<vnwo4gE*qt2jUPrp0x}AJ@NxeyT_0=Su&|db*AH);WUp19og3<~s*Td^l}1 zyvRj)@>$=fb`Cm;eB`E5?Uw?7`cjR4m$RISo((o5kLY@iPa}pwzin}jX6u0XA*q8M z^JFb#tOXo###$haJsKzL33PkHdL?IUS?k;KSx*mkco|b(=Q8I6=dz^}WM9gl8Jq8w zy6Q$KuNAuLN0s|=@}5q7BYoN=vN}g-xxb*V&Mllwn@h0wN$#_SlAGoH0CA?lC)ZRL ze4NWvqjR8hqOmrnZDNniH%48O`(TXOrOj=fk>lHf`qi42pIgj+gG%%34cUdh9(jEG zRR^mB{Tu9mf@490hx~mTJY?J%`XA8EN8&$OZ%I7p=I>{*qs`du31e>~Cdsvfb{_`s z#cqAEVyZza?KQ9O#&#a8-eOVXq@GLZe;L=$SjfZ2IHyS+5@X63I6~WH-{@0*S3)QK z>7?Hu;*7`B-$wG392NfzdY0b_{0^+U=pTDvZ@=^Y|KbcoFXv1O)xIA~j-EX+vB$Aa z%|*M|E3$aftNXbm-z|N8iZg6-#^lM<#y(B<--amHht&BjZ8!tWi~Qf`Y^zWHM8Et= zA@W}nB)?JCFaP&BBQ3v^_}!)_*^|%R{$6pR8f+cB$cQ0$RH`eNbz>f%t@<!*T+S;e zd!AsAE3fkVIoosf_gm0is&1{Mzf0FcC;V>zCUX37Y{X|v36vRzEhX8@@%KWLhVK3b z9X@T`pJ;=eqaOC1HVh4ELsU>3jI#bV45JP5dnms}mI~z@{{}kJsiWmuNAnvNtFcD* z`Q@R845IVkvnSh$c>?>9A!kqSf6G|g&c-i`EtMsa=QEuFmOZ@}@n!6xG3T;&k+>#u zeX_&_f%S_b{*m&*pV8Q{z|8y%8pe(>PABGjHO=Au3-i<xKmPzvvt5e~<tMG0Ci{z; zR{!9#4Cge-`Co;PQC8nsl@Ex4#E%VP4KKb}kH8nlivN<eRU+pkQ|%+jVY60|{lir< zHyC5A>3jY$CNv42RK^+pSmL{B$eEk3>ao>0`+#`a*X$S%Y%lS^UMupK<mY=Qfp5QT z*i}L&c0JOt?NoTh*=3ALyfT@00_7QJ#{O)D%oX@U<y4KzsqwZnsub2*OMh^#B4O>c zmS1jvP{yt`Mm1NHk037Ww6cz~(C??<De_!Hu7ovo{Ag?ppVFK<mY4)P_2J_~@;A~K zmqORI+(KMyWv;LhPg~fVH}+8MiO9BtwzUms-y}kfZA*1D7t3d%BX<(@v(G(eCv)&2 z_OmltD^>En4jtgf_tss@J`H-1y(x3snS+fzq7R5yrS0|15mu|C*;cZ0+EJT_wSbK+ zI@xb`G|#5pZ_{ppi7#48j<Xosz%SF5N%;F>H}V^PJ~)T2v2QLm(Z;wHKgOKP{=c&S ziG3f@q3G%aagK~v(VvfB_kQr#_x*u9<#+Sg($6HYnys|$sBW=mxir_fhaX)Xh0VTg zNv_FbUqR+d$w5o37^RYHzGA(HjoQy*uaoq|==Be1^KM~%Vx#XyqBE{Z500jM@c`yQ zcpcMi_82XyW<6(wTG*qAQc*SREk@r?8Lu8;mp*URlkiQt;b+R(>+;RV;;Z8E<%#yx z%G{hAm?tHE55`>q>=Aspz|1+>ukZVlG4^}Q;miE7ft>Yo=peT+-`jyKXLQ{-55^<H z#c$hXKFKV~-<ql=R=UwcMHK#td9H%J3pqFOCNQZRIX#J|y4G-|HJcpy_=<s!<_6+| zimTAUxr5bN#JQ5MXS|lS;m_h|yPSD=R#)RX@Ogr-p5fD1l$KN@x%UQQ53ePu#;RMR z+i2ssLDsF~ZWceDh^~BfCZH>k<=~v`963LgM2@pJep!ai7~|?Yx(ej4!`0^{ev-PR zT}kKGSAWI4AoE05p=Dp^{W^1H1a{Vsn593q9LX4D9O@R=hG^Avc#vv7!o8zUQdKka z?%odyMCbkO1pe%CD9_jUJ<ei#TjP{>XFr)clRdZ4zdTOd!<cGXq$lrQ#F@-Y;;|O| zTV|n8w{J6sM7PU1%g}&s<I%0mPg4F*j0xsI&m#7`I7c79AX1Hc9eqEw#Iah+Rh-xP zP($aeQA?XFLu=@xX!PH_ZlHDRY398SbpFh=fbNr#GtTpxp^K|?&S1M8#0;n7@Egc9 z6um^~YKYHH<DpGv?o6@Y%~&N?Y?Ar@kx1qGhBJ1VoHx&uw#0R}#X}opUtiiJ=WYfe z_g4D#5H|AhAV>3I*3?JvU7x%v@+5d{@ZEtgZ@U6Jf)DqJs=IwaG5zoZ`r~@~<vRMO z$lZB=p_~3Pd`wU4l<COGyuLRRA9j{L`igNWbCS$Y(ofaM+k(xBT^~h<0s9Y@`w((V zJV%_lrlmjrR?0A^2g*p>W*B29)h>M{{ibNMj5jASM#~U?nS$;zebvKRpEWIvIinu` zm<uP{PRdBU-P796d^TTvYy$Z?IoB_K;|lERFfj4AVhe%TQO2b|?zBtXDd+KX86Q^4 zcCUG?p67w}r29l-$g}uX(X)*6kM*I{7h#teG8ucTxY&C0!8zk|8m?DczryEt+gO}u z2>9)DuFmPHJ?2;=fqKqRk8kb}Uy?{Yx?O`ursJh)bC|og&l#6fawyM*F*Q55H&S5R zG0{D`cG`-V`Uh8BrEhf>I43MESUO=@!LkWYT`TJq-`$8n?A)bW_I-Vyj@?CIfBmpS z@=Ocy3FLQvMBJQ73>Rpdtk=c9@Vj=&GmZJGYmLlH5?8kBL-E6+3t7*^8!<;A@mw1I z)k@4Md-yx?mD7!wgqV|ctIUx*@OSlE!#K%LX!w&O@JS}t97?PwFf-3<nTKUfkVziG z3Z1NVM8>1MnXlV5_?=t$1KM#t?YWM26)~O|OZYd5J@yVHmsdzG?*`8HER4nPMYxU( zwYon|we@(mNAjD;D0^IYu9keI<TWMl7emfZ_If4f*Kk<vd->%FZuhGb-NccNa=!C0 zdaBz0J=crNwSCeqS?_GnCpK>DADXY+!1y}1@J4s%4L2~}edl_9i;Njg&ed5O9+W)0 zrETHrRWT7$^;pN3k&kblT8|#{>km%h4EpYo&!$biq&DrRMSNb*=ggGLrdIKOF=fou zw|LJ>d2#CMl$WN?>{#;Nm4CcyYCm#U^7%r>n`<w*);jeCHFD((+)ogGW?q4^S6=-4 zsaO8-<kXSSzsa6qZ*>S?c?;c4#a^dV?VHKI=Mv@k(h*TuqtyDf(g)$@A+ftG`jecp zC!e*N*nhh9BF44Va}>M#khA1F2CF8?CA2QbMrO+W)i_TVb5d8Aa=#~geE50n>ts7; zkk{e|*|TJ>;qKvm>_w92l<(K@J-Gaz0_9j+e2;T6AJgB5M~eTpdX`|1@S9h|9@rUt zyW&$~_hp7XCwkIv%yM;|Bqr+Y%6^)C?X%R8z<k)rcyRJv)^zt1Cp-JA>~*w_yG2`` zWq(`scTS6x{ZZc-@;C<2wr`dD;0%>}kTHF5h|2x&R*U<YDCIp%y^J%Dm-xAudFC7D zEatx^8)qlvd!DA+M`9Z`;yU@Ar8#o*+-jbaF>#bOSXt{^ktqs#a_h!EfW(j`(3L@V zh&Ej_UAYvwsE>)mAEFNb*!CnNmy&s(`u~^u)!&sn!>`l#|9_C@k0J6Tdd{#1<&!7T zvqj|jO1TrbXS_gUL{E?7yQSaWxL5oL`9f?)=8<4uk#6{kzUSUuw7X01x3BYO%KP<w zgYYM@_?5x<mm&C%IQ(RU>*HTp-AC5g+@G$G`i>8|9b2~2-(sthUs#E(a-PPzRB<1- z{HfV<Du_*#CohY$Sone^b24)zF8H9oay`Tun%6IO<cM#b$$GJ#^Fges?5|rnfAl2z zMPjMr@M`m)J*4wpwM!Fgghuux+}Lm6?BR3N<^Elc+w(%MyFp*OHPg~DPR>&M=B7l? zYv|K~?&>+yvBi>D!}*=T>p4fo8Iw|>4?VM(ymbIEa>Mf5{bv@F$QQ+tzp%CMU~Q3% zO_jmxHH#ycac1YvMBN#3e#Af7uNm!0v|j=2R>=d>CZS=^pzayQPC4f|<jhM0G7Rk} z<%in&AJRI}$Ln8rWjngMvfra`-TIG=vyUJ1J)Sv0bR5Vv<kPRr`Sx=QZ>FE7(O);w zZ#Oaz+~Dps&QJC;=7Jy6x50S<#`EU%mN?_i@!vQ%=Ip1&mjhFyN4b__I|CWNGH12I z-=GC9wzWs@z_3Puc9qJo8ML~e)+uwItnYy*qHp7SsgVot{E>1FVn*7uSEXHTlsiq# zB6XxuM?Ua$;H<sqC-xio1a=~Q{}3bZV&G5sWOx}~zBsA-u5)MEHrgI~PO+;ncHgbo zWDE0}#4D%S`|{Z)=T2jlcbmjp{na=gmp85)|CFBVoc2EPeJQc)dU6mC;oF|Zhj507 z?KofVOx9KF@Q+*INnYl`vx)1I$Kv0*E>;ibV-GXoQ-ItBi?Us<_*2cvxit3eQdR6c z@*qB4T4M~H@RBp3<(#p=mN*ur;(IE85}zsM#osM~w!(;U<2=@j@Ttb$N2*=gDEo#I z7i>VEw4J+8-FpO`WxI^Er^qyrHM>#gxh^BWD|h4n#Fz+P-^Y_*;_xf;jz;`z<Y9AD z@te|D%eX5qaq*)ky4KWV*L9p7IrXhP1A)8?ZFma3mGfo{0P~&4mHWd}C8q^ka$13N z;pVtD-s<q~H|n)We|aS5R!7^ifk|xpS9mzPBF8!5<6Cpad45Pf>MU&t#CsCsn)6_B z@O_nj6?wjZ$IsyTD*Yz11oB_z_KU5QGUrs&KG`c2J<PsGd~CdD9=VnI=ph>((q`E^ ztYF@90xS2^C~Qe+?-g3)6&;ns3%y7#ZnNaFh@rg48Jp6t!Fu^o9On^Yd^x_(Ue5f9 zecy(Cw^6sOhq~>1ct<AoI-7fVD>#c*@u;oZjqj0lo7_Jr_TwrZjjDbJIP382TY*g< zB0h-wZ|J9jvOlNX+u)0NlV|W&unwz$X7;0H)l0thmCdtp_Kx$WoU@#l$(h{5e(RI( z!7h@Bm1WJ~AO|k_DPJBu(H@)w&j#*>R(t^a9Hm={0R&cpoH7n(p-agZ7DQy8A4`|} z$HEJYwVq_z06ZW|JhBwPU-qlMD@%NcEKm65kohE^c|bm2LmtU7iA<O9nSlSAj!ZI! zWb8JvHj(^~&~k4gUf|;;|ACImu?4f9(iy)2*%FX#xL-Dj`9k#+x>r!{mrYS#`T-s% zPmvFnes?ebu`%YSGcIKw@QwS>vw)rcvw-9{nq~j&ooH3pX}aqzwiJ8@u3`B_9pntX zZN$qJGaR|ZUSoUfleZ<uihn8)8PI{uACHn-$IhAWg2q4owp^+4Vw>bSa-}URXoF|@ zU9>@+Y^6<--;>;=uN}ViTc>|KuL~o$*our<u@{k_HH0?;KPEDXA71a5^Lbh4`{gV} z&T{f+;+M?hvcHT=`Y|A{jB&Rit8eYqlZ>*rFXvC^A#3n_D{?9HX|CgQ{I1jy?1#*2 z<2>e>%)!oSS*!!aR>lslPOkmJXV=14Y^M;N_9r$tgk7>OJ#X9lpp)@cZ>+sS*TX-@ z9=<_V!xvqbPrQtu-<ej!`Q%*g8V?a4n(9>ZtQqPi<9RO$?A>3sJ~>UZ8uv=9JGc`{ zEGIcf>w@GV)_&Sesl&*JLnCEnj+W0hWGoLl6S#MPiZ{1WY)#rCx%@(6a@zn$bMRfO z#M!gAwnQb?BycB7{C+CyM%wjCJ+YbSq=-2;tzpHqs#7CeJLY&en<M*;@vP4iJQ?VC z{v8(gL&@yh(%0{tdGtN{$v78WGFa{V$$vWBPm=GE9Oqo>tsSfe&(&gTYN_+jgO&H- zbk--FBd-Lu`yA_)!75kuIUgUDHJE(R2Icw!JxY81c`o9yP+j-s!visjv_BB1G%O$9 zVTn*)#aX9)%p+;!31uw(o-yPgW=ZHr%z`hHn56+&AZDq+Ml+eSBtPE4TVfadkyqju z(TCC!YUUC@2_5s+v?TI=XDv26`)%!8o>#6HSQp)<tMN18ZH{C9BKrxw#4+>ayf|l( zTLW=S59PP}%NI%<18whdjL)a-JRisO?9XHj|J`42y+4)_|F$|x)-l{WB)1!UZzXUR zMeOBc@{+O_EoV_=TuPkt9P=LMf|@zYf2H_Z@lBE!bR)+KbQnc$Bkq!@UzZQe?*P9= zek*?aY%SH3h+fNy5hNav*cJWlHTJsh9PQuhiu2rq50W^k0p5Xg#NzJ)vD#_YbAhuY z%ppCVC9!fAAHPg&W}K-p;v`u|$XR#Un-V{DmNmo&^k<jF+P)0=tC3%E9(@@yD&i;k z9f+UgoVv2Hj%GfSSTG=q+`9;zEiuPwB15$ir-{w_?gWUPDJ{Ncjo-euAVVE8Y)6KX zoVyE*i9md2=4tpy?45_F)JZ&EPaDNft$fNe7UX-XO`V)cdt^>dK>xlv6KY=c<5|nO z#E#ed>l8lIIXnLp{&>j{&NN*7T_0R4IP6j0W*H6N$k_%t8)1#mI!0-s`?UeQ$R3~> zf&AjnWxPsk`DrY%T#7H23zrW<ug$W*Ai2kn2Pt<O@%8LzM>BR<xePn$MAzK2HR9r* z>DHP~eYj^ReqWxy^9}aC6P-xDXOzSt>?^dACz9XQ(Av`Tv$)4{<yp&$WU=urmg?jU z<Z<Scr~ZmuQ?~36k>{2?h!?-HmNHiCshm&AO)mQ!--G|mjby!S8CF?OKNSD{;jK@B zpFvK&OeIxj($7+F1!vR)a$E2zV)No7&kk43k?^dzG(SsZIExI1Ou)Awm*lcevmZme z-PFc9m$|4pnR^~bh{=wQ#HS*c)#hl<(Gn;BntFv-TYu&u&cEOBYISnmOKXzL^!Un3 z=7n4I_~`8xo>}nH)5${@YjG8S|Air|$X8uiv9$%=uJG%22D+uq#$M%f@b#TdkQ{H4 zr-*iw>l&L`xge*3GG9}l)Dgwrp^SCT$PdOYjd@VyKYLqY);EmfMDDDtqs<+(<7<A$ z4|Oy*p+6h@y%Hy^(QP$HKdL|`Tg_}_dK=y)$iqBTDe{QUEbw=L7yTzJc0BbQd1%F+ zUIRG`p1o748@f&OTtPp^vlmlJA9C)(!+D=(a@R5k*rk801|MHP#!5fJ1K&?SCK>%$ z@zH{<((V!XzcMw{=*LWQ+0u`b{5lHm$D<=1?)FiIS(8}HFeaL<oY{-wQ|`VWC6`89 znq+VC7;Tk)F4n4(Gr^0`JH}qo3dXcm_Ps^6hacK%d@}b6trNQv+Gu!XUY4IF^$5HY z9%3sc+KObUe^O`HmXFb|9olmX`_63{xdhHgY4GfN)51ALD{D~p;}>#<Yq^E<ews)2 zv}JD4*w>bP+Lh$54(YzV`ccFXX*{1p_UhY+Yh|w<TL{^!m$flt-hLJFI(<hBP(mG( z$V;$?{-!S{FL%>Vf7|u?2>h3<i=>Zzdo;dt3rF!q64&g&wyY73X33Y)_Ic=|v;}>N zFO~U1>Pey=i8-FRRk=Sw4>Hb$*X_(3|KxXY{HYNKi;tR4zsMYQ12juGqZJ%?2E}Rc z@!j3HC0fPHbKcko^;{O?_;NbqaAfSxhWmJ?TLpK3KH>h)C#>^V@Ove{FV*ZTuV-#7 zj9uCwcfDTZ9?&N8;!mV2*L`ZtxI(3jb1V08=Dn~;ZJpU!{@#5mZCqi7S}?Qo#`k`p zl7uE#;>N-%)ggC^_5_tL3MyYj`6_?;BFb0!%R^(7-=NgEk=z?9j8{4E94kCE&3YcT zobi?cz6d;J4E!O^?vE^TY`sTCPkvF2jCyg0_TV9JSE<OF-;p{_?kWYf{g@^=<op9| zWxm?1(uTNJ-J#+G-}n6(do!J<KFWFOW7w7CFa!6ejojrS&U{~Xt$WiI-RG&LPso{_ zkaN{?o?6aQ8+{Y>EFtrG>XqypRUhwKbA+5%8Rz7Su@`B3;M}C_*I?K3jDb~ZU6SiV z;z!vFEKnV<2h#=4LsI`{HD!oP_B73N_ASV^<;jsQBR}_{jT~Wu?As9&SUhs?c)H{R zpY3PBeerml=Of_d$QO{O4cd>XTjWjE^!93OU7pb?XZ-H6;s2-~{+lJ95;@X04|Nq# z*I{VLYpjuW2%bB~MtcN5lRAaR2JitL<oEAB=Pq^m%k4d1ZdR62u8uOnV@G1czsWjE z#kI>E96V-_ZLiv!?RtVTUG&XYoJ|kh$&~x@vZp3`%%dM<Kdp_nZBULz_G(@7Oj3DH z6@Ah8vgvHU#J+NV<STiWMWOF(u{_7rFY}4>GB;_4%tvVJy~w}=1Bg9*XU6-|>!5DE z?me&94E?-bZ-&=*^aFlob&R3c$>5b*W~}d)F;`#W<UX!{4^L+g??v|TZ0zBc#5z_h zV(IDZxn(R>`?y!~@<`dkGxqN;VgK%usrPQnnEKYXOQ*iYeVPpKlK0-q*tPa8o_UZ( zxwr5oQkMO-lYw$>`rzJ_jg<M+DAS+2hcn@0=7pawE#k~_6*5$@KX)WDVz=-$__Jp> z6WBrazuv;Xlt*08obNRD?8GO_IOzM?CCYO;@40EW{Oq?68ut(s&lj1=eiHX{#3$>_ z$A?DBep;$s@m=m6%w|n=jQD`FM!So7mO-kP&`zA=9Y9~t#wQ=kdv3L-cI%f`Ey}nz zno0a%W4@|jY*c`kSV!pOY=RY>l!azr1?Rntd6nmx@a$8Ax4>i_5ZEi0IU{f$G5Gl? zKfqq3t;lVx_p9+?`q)Cw{-(;g-p%@TzVF<D;5?1SzJi7Q1)Vu9g1N2xuEq>I=V@3= zF|V)6nw0OY<I}76v&$KpahjvKZmOKyFwSe(pM~!c_Dr}WmHlhto<C^Zm;K%K#49$L z>#lU5k8jZ1K-xP_<~Q;bf`h*K8h%SmB)`Qz75f9Hd1eanpK(Ug`n&$klXUh1xA>kv zmZPU$#Xh@{mzLaH1@Upt!qg!dTJa-R{ki14Mq*3wOVQ6Atc5>eFHwiTe0Ff&lIO0i z`s~JhFXvA-ujSpv{PSz(4J-O8ezkvd+iU%s8F#DJfqSbVf&B~TkRQ{Y6=j9qboLq# zK|2qA(w;NWvbQt%8Ks(sv$w;ua4XqoXnu#itvMh4ykljV?wut2n8-dIdGg7_9Ewt7 z9|xWkWgUNesv39nwd<QD267S~l)U<Qa~a>unUjC0*(UF{L`~i_V0UuumgwOz#CQMF zqwU<ix9=ISC%LwH(C~lhgX}zGacmdygv7`1T5OY#W9y&m117&a;4jHvfIkTC9qO*d zzSe7l+KKl|1y(~`zY4pY{>TFzC-s4x^N8iHT~y_aSALo7P%7H8Xz^1s=gjDsK4(P7 zmE6;ivvV^a8Pg&Ate-PCN<WMJiXOJ}`wL_fIgWkPW%M<7vZLjW%u#eCdVLL=Nwlwx z_DcKnXlEKSkJhY{-$Cx00q-PNz7jROop|?iJ<2X^eVMjbZW%cIbL2x_<GBPS<b$)R zw*|c)qwlKGeJ(cC0IU_>#jjr9Jc+hSj!bdZ>Ko$IRO;G^PTSb4yA@eRiQK@i;8Sqv z$dS(Xe^Kv2(G7L{*<zjCjxLYuF_RkyyqnB3VeQ-fx~zo%5^bQNOM!_l@5e8dKXQM^ z34MV5Bzn)Y*edg1sZG9;ql)Q|E`Dmpoar6qbJ9Cv(WRURp7F>P=XHtih?aH?!`6O; zd|q@m-!EfclbW{zeThAn*c`c0=w|~q&)M0{XE`T1T<*nVYm(Qfr@aeQ?06}|+*q2| zrsjRE_h;=H;VIkMpXYi219yl|@kLqaP0sUHi=Bc`pw2gB9fD4n7ma@6tYoP?Q&i4c z#q&>+oTu@;Ci@BWe>G>=@Jr2qrEi6g@9gAIk5l#m8NWx!{~l)TAnRt|x$xnfmrt_q zr@Zu!lQwZy&AyiUbEsR!ri?QgpGSy&KV{5{oFZSYjH7n;s~B5(ulvRm`OVy7tK_7I z!haZNjUqYY=Wj1E)XQ@rA{@CtW?wk3Oq-`+1F4^LmWTeXrXGCTX5nFFe79Jf&EkXc z;gv14=@|7?JW|mibxRwH(N%Ceq`lIXO!QVyd!!9k+8}&IJ~=OB$fv8Z(yy#PSwHyh zpPjj_2%lSs?=9f0oILB*7Z0Q!W&F_Y>3;d}CGy<p+^k6B{)hMb@E5%ZU+iUUwc=cD zKQ&lv2j9NRdW(_sHO?-!v41W6%JIG6Hd`C7Z=Ov|#ks4^S&g!{uF11y<~g``CAur& zxiJQhALV<8;g`KUuUq!2r9Fm@zGmMDzdu~s=&#FYGcfd*9jFgo$o-Yg+@^IMigHfN z4&r;1-!^y)?NRuPe8X7#HB%4yVng1Yl$Ux8-n6Y4`d7dgYu)`LusP64|NNK8c%kyp z&e$M%q@D7tg-ht$2|Q2DuGHjWrA?kRpd~rWZ5=*|cC};sJO{--d%)YtS&akmPXp{L z@I^B2J;;!TK0b#=e!q<0m;w)ZKE*LTYVs)VN7WAab29$ezTWRUvaqo{ZGhoB1SY=Y zaxJ=M-h!WYh;KiEpGmc7mFkt1$+0RnT8ZzdDDRk8k=F4A^e4r4EV%r<?=a$;!oJVO z8vVcXaap5pzWT00&e!x~tjJkp-=3`GIygHk&oGr|sS~^KEQ9=EuAGM}NFAy8+gTGN zy`13U9Q27hCg*!M(04Kxiglm;73->bIr}cHtVPKiaBnMax5Q8fh;34Mmg`U8A@pCf zMv$|qJT1(?b;ih@#CW^dA$eQwaI#+%>3bG^2f44)<hmq3MDE-E;WFh))@)Id8xub> zmOe`%7bj;uQpi>K#%2of_b|_%HrCPfQLCQf`3H5Ts$uP|jH$KU|KORXSIQhX{M(`9 z+jt)5+lkhisO5KW9YqeROpC6}m-FL#Sw|N6Q`tY*+1XXv0*#GlesJH*$lIj@JA~bw zMfQVy@*IZj3b~)<AAc2`agn@{j2p3W^jdm|c+rS&4rs*N!wi4=6P|@6eoyo&XL~!K z5&a5ZYY*^K<Z?v*H0<mYvbQA0*F^o&v9+Ci6mwD&=Uv7I%fUFwZt(j7`bNg(Ds`ir zftCLr@X0U*8HA6G_D({NlE;#{5Fec_{VQ=KKISIr<C}p8^Un<8zazlEO84`R^NfYJ zL61*qmGRDaD}V>{^G$i!GIn+%>!y5fG5tZ#N%Br!+0zz18#sSC>R}(5y>7}JqMj1n z**qEFFd3QJnaAfmJg0-4k?33UO~#$YO!mIuyCtl=9RYpBd2B(ta3hcCLe3e<a|MgB z?V-r=HoD40S82%D28{wHYXYGYx#!R(;??F7WG@Dn>DQg~L%`=Ld|tTxR`7cDpX7d> z*!vHdXI{nERovnGKY_df>SPQ0@a3FRJ-?KB*^eX9xT@r?0^(2G?yK{Qn)C7_nwd*v ze`_B;*t-Xw{+#ytJ4hWpoVk%0Uvg;w4ZV!9OQ|CcJ9!=c?C-Dg&H0k^O7$?#>#k<b zZ9@k7!pk{>CR<s7cL_PJhpwt#D`zKUt(2(Ae^&U;+n)QE@RdB5?>^yktjnc*oh6~> zXO@KBhRm$1MCNqHrsTzf_cJ;;2lE`yI+L}X`M*Ab_rJv-U4<`bKxVP6sGUX4Sv(t! zdw#3%<Jo1Js+^84?by&e{8l?780)IJ0a@DU`_#qC{Tkni^_yB)_sO#*s~Lx_tot?M zvA?iKV8k@|87b2MT>y6(dJ#V=zn_3k_{hB_Ir9mu@gVcO$YrI!EKWIF!5WkK`Brpc zb^MaE!u1al@1oPV_O`35*UI<a)`D_3E%2<&UD)%P&tzTC-DGl$k|T164~W0WRm*<s zSKz|!Ux-{b<f1HR6O}g+`50Tx<0m?r$0B3f6=LIz1^yR+anam%8J}D7vo8Pdf~+xr zD$E*jvMB4Cr?1Pp^w9NLN10z_zo6p<J?r=t*JT~MtSIYIi^N6)?1z~5+K8LKVjXf= zAIyHY#r_$08oYXc)|@h54Y$9nMMv+^Vxo^~w&*BJRP-HsbV;rrQ<9|HO3qrMO719J z8<$)7TwGG&vvFq&*2LYU4~`z8TcT6+l<0-}(CF`329!)y1519T4=Q;;k1c7u>6dZu z-t=VL7dQPXE_T|gI5Yo8wB(ZKv|%MVT2jev+R&0aX2O3a{Aa>{MkxPpEW^Lc{~q|? z1OI#AfA6Ze-N=6&`E3?kv`34I{slbOQb#Uz+)f=Lb8s6vEh*7I)`v%5NjogG<4JvB z$^FPa#u8idLp7izSaxYkSpLWy+=g)c{~4{W43@u<_8j`EtMps$fmCW2M?bpQdh;Lq ztK3Hy$KL$MD3$x|C`a?loU^N3oOJWc166J<G&RtW3(Kv6=H+CS+kTm&c@yV?v(QD2 z&|R!@M+wcP@(GO{nvsrX_XudfyDwL{CD2ZS#yw8uPJ+h$eU*!Uj`myy4jRw3D%TF) zbG>}V-s~w+x$kINw7bC1W2%&+{H^}-cc|Q>thqeX{N?ZVm%rCvevZnGvcyHZIn!du z@Kb;JpZm){0^FiUdmi+cf7D<8aew)hz*R)Fdzrudlm7D0_{*<VxhwR@XwPbY`QP}< zzvM6PQMq&2>vBKuFaMwZ@~`>J{}-Qnzi7|z{pJ7YEB^&?sOK#o582P~yu)|Bf3*7z zKc9E~d_M5=`AFs3xcen}5!nafFrH7J|Mc<meCp%nIi_-tvDR~c?C0|@Kc6!`J}<JC zbAPRJ-^zYGc@q6`u)j9X^Cf-st;$`_`r~ufc>m(8<_SHHwXtp|M{4iXBkkW1=bWW( z9&B{e7K`Tkh8Vo<4-U7?as7t%&$^ts<fu*@g@4W4z;iIKtX^BqKGW>G3%sdI)kz)R zc6`uoa&N?L?VPVDEit|iY<J_E<vDa&f}cGnXEg5`ExKgXY<muI63?Eqwin9YCcMPg z67$p<-{pS&LSmIGIY*#b8V$evBbjd{58U^jRFt-G^=|f+N9SuUau}ZS$CflM`{k0x z%B4Iv26-JRX+NEwlKxZYV|k6Wz%=Sl<$P2tYwQo0*UY(inVV!Tk$s+O<`Y8}eDP4` z5^bf^xF=i4*_z;UH9wTx8T(D_2Zo-bDH>*+qtPknn``4eJZQ15x`2Bj)YEi_#hZDT zh37I8yAba?+;2{_4OY$MIGb$=e6m-?vpAb)v$t%!h)?#FZRva_@_7lL$$Vz<c`-4g z=A6j6A+POw{2l>3m(NT2oY;MbCvaw9+NWJ>db>L+&&-!|e%C>>zJJ6?(Zh1)=muh; z3fAbd9)1{k=aa9NvoWu6mvN0HX}9df`C<z4fc)o*=wk!rDxj6;jVkmmaZ8d)s!7%3 z+Y8BQjo|lftXt3Co5Y@??=Gz573A*AG4@wwZV%?sN||KJu%E~Ll<Ik%yR6Szl6ULI z^Ht=Tq*6A?^9RnenCq9ak_S%at}FY_?IZb(y!ND{5(}l;AEs>)-~|>3Ok(if?w-m% zC+GB<h~4+e_qVZ=D4xs7I&)tX`(P_s2MI6oF}XJ2NBMnJPv_i_twz?4!EvVOmov@A zoo%aT*<H@ri!9~Hwef7Rqr^n419&D2ITHT6NXfl$zaGUl*_+udc9n&$g3n6$^6ol! z=Lc|CT6eW^-v20ZkcYjM=LcQm4tf62PM!m`T<zO9l;?t_`u5C%_rlyG^LRGVwQh~E zDtj~LXJ&H#?ZgLn=VQaHD|l9}-1%x@4Jvjtf_OBa{-urf^r|fK_*PHxAkIi5`v!}n zaR+qli4vFc+(%-vCE#`fb8trQ1-=ve+OMBVy>H6>E1sjYkUJF1BV6JB2SoOa=1Sh4 zy}@?&3Gs!rUAdyU;~>v&ZS>#4+QeRx=eHBwjlc8V!`eVRD|fD<#4k}k^f^Y;xN~(A zc_ZJQs}f?$p5ouY`6#nqBo-FENNgX_i^TW=y;%Huncjn5a)b187vCFt*^>y4L*@KT zJNzqH54T`Psi(Qu!+4ZDs=%tTN!D7Pdi*_ow>OLD#AeA_h2O-Q#@VPjnwlKV9&QZu z$H;xHhex^OF?q&q8}=`7a$Dq%qnT@zXZ=O$!8GjiFTlIB(Um(HV=)&wa(LeDmG4ro z4o=Rmh#iQHifsqJPs0CYosQ)GwIer^_bBF$O#T<6LbFzGKyDcWV*gP-xyhaT&QF^C zw~VL%CKnM8t*m|C|54z-@nmfv&x4U?iNt%hwab|Coi7x4z&G~9FAIB+zLNTP`uVZ; z4&m1TzwO+i?un=Rzj&qc%(&j(f&ZZj#!qM6cbqc@7M^csl`{s+#h2rs-sO3*g`730 zP%*~;$F1bEhBF5JwXBst&_=DKUs%sZden~`U+<v5<U7x~Y;q{I`(E0p($v1s$nP4T zoFn+0eLfZQ!y29u*sfMBHTL?X9&&cOQ`6PScLq3COPw}+veaA2SyPD*vM$-Z_T9lH zHS&LPkMW+lcl}3IdmmfNKK9U%I|H28moZ{JLLbIhWDo2_T|pduDf#Sp&tv!l`M#3R z@)64QQ~Kr~*t+;Xr^S-h+noq`22)(R+E+z6S(6Em#e5I&cQPk0<gVq@GsJEZJd?ns z8*^HMXDZ))wv+0a5%m2wzB3;7s&cu5K<@UKZ@qrEK@;!!v;Q+-6JlczPnEMX;zvXG z{^b1Xdry{iwBf7ddFhg07=D91i;1zw+JDz!*68G`Y}p0gsGI`UuvSkz^GO2xNl*Wf z{YT3V;~7Cti*sVj2#5C#OMc_d{yQeJ?|0$@^e1co>JiGj%%U|e<vDFv`(=}}F4xHS zNJpa+eePJcV#3a4D<>qO*EHIHD>8_#L?6Mq^g#an8v04ry1C3jqF2_sd)2Ofu6>-1 z$pIJGM>p2Mb1!%827VQA$KK)Z_b?aQF9U}TJUnNP^GNw_g&hU%mIM!@hT+b?YC-=S zC(ob|et|L3l3>D<KMclO6HR#H!C-uwx%}6m_)K$o&aecRuO4FJ&$AbU@hxWj*-(5# zvZ*}(Ll{y%&V)Z5imx!^Ple(e%<W^18O*=hT>i^Ye1jQ(G8At$x9>zKzSxZaA{3u# z#{Vl6Utz|79*S=;<Bx~pTg>`x55;H3n)G`t6klP+9}UGf3^bK*GvVV+_|HuE!6y8t zq4;Vu{*zFAg_-}6P<*u+e>fE1kY=j?<4}B?x&IG^;)~7r)=+$#S^k5e_=^6f`u`b< zZ!^pPk5GJZw5j}oP<(|M|MyUQwYh&k3dLud%l~gEzAe&J|A(P?tJQ@6TPVK5jQ=1M zUv0*}ABt}<<M)T+Tg>?PLh+eq`TrV<uQvOSeWCca5vKO-HQ~+U?=PWvtJ%N58;Y+; zG4cO%D8AT?-xG?@yvS63cPPHvT>c#sex#}V+oAXdbNQA~e2W?XKcV<GGybhmywxoK zpF;5sx=FrWq4-R5|GycEFE-=f2*qcb`R@$HTO&;M?+C?Lo5%O|P<)%kRQ`{l_zJUq z{&y(eYL;(XD84w##QzVW_zE-rze4eCX8D^#@tNlGTSM^`>8AQ$55>3fpTi;X&1<3f zV*a-&1ivK|-(bc!h2pJd{k|HCZ!qik_n~;J8UKn2Z`RM|P<(?K|GQ9piy8l)q4+kN zsr`+i_y)86Z3@K~oAED);xo;7Pbj{{T)*3dH<xb+#aEmC<8MuPbNQD-@fBwLi=lX{ zd4Ad$if=LZ-wP&uKa+l+55>2c{lkV(e1o}seJH-g?BCai;>lTs+V^ivcysyx2*p>M z%dZQ?SD5j?4#iu|<9BT+KGSTU&xPWxX8zBH;@iyqw<Z*yX&#@?gyM_M^*<eoCtn+? zzq(L-iy8k^DBf!JAFD(0nP$8z6klP+uL{LioAJL2#kZL8Pln>#%=ll1;;rWSvo;i; zX~x%t;)~7k&B{=Gra8V{5sGh!G3mcL6mK=J51t6cSD5jShvHkz{2vR&x0&%(q4<gc zCi#|!;#<u4Wuf?LGk$3(zQK%lhT_}I_$8tE2J`rPG!)-r#xD-Vx0&&aLh%*m{#_V~ zFHSe<=aEo+i+O$Ua46ntUf(PT#kZNu{~{FMV8;La|6%X_<D;&w{QviRCXks-fCLhL z1k5BLH8a6l5J;lXB%l%!v;whOZ8wS7c9MYISZ!q$6Tq$s6RiVSTWk&J_S0lAtyZXH zx4Q)C4-mB?pW5B+4};h`NkF9tBnr;w`FhU`Oom{0_w)Jw_x)oYGxNUhd+)jDo^$TG z=bm%!(DJ;*_U{>5UY1yXcS3pM`t|jp<@+WkjQ_5o<)Os#hN0z-NeT7;erUNbvHWX8 z%kvV;?;KiQmYvZ4s-fl0iS<_wEpJaOuOC{zFR{FCXn82Hymn}LS>pc2H?-W5SpL<a z<#~zaUm057oLK(lq2<2B@;egB6Y;lVXnA`g{og*cJd`M(YKE3O#w5&7_0aO>#PX`4 z<@*xb|GS~(d5QJEG_<@uvHXie%l9Rg-!`;7lvsZ2(DJgx@-GZ6Z%)MDEkn!m66N#F zL(6@M?SFn~xg$}2-IP$CXdkH@TD~u_e0f57qWoAkv^+0SelHzbUY1yX<IwWv#QFW) z&~jg*ef5T+<;{uu?e#;;+Y{y6lA-0ziRBeT%YBLE<wMJx6ZNyjL(AI}%gctA?@KIS zG_>58Sbp8m@_mWpzjkPOdt&)DL(9t&_s0u|mirRR7Yr?LPAo4STJA{1U&+w&yu|Y2 zq2>D$`RB7k%R`Cf^M{r<C+=_N4J~&h^54}7<%#;;RYS`|iT359q2*<X{8c!#JTI}l zU}$-BqW&^Bp*+z(J7;KlC~^L04=pcC<i9J2mggmw&q^pyls_|vmhYRIApbuTDIYFB zu7kTDbZ(oOL++~*a@TX#JRbiE<=JY+&)aP#HS}=qEI;EC{EeRc<N3Q^l(%Opd425E zi{$PZ7bH%^x+CHU@5zTGhq^xQBH)Z@j&gF9;X`fZ@Z$d&&L8zmeXKnbu2jCRTJjX= z?!<fe?c}#&=!sn;j{nW(_K(7IB4^l&O+TL-E|%j%XJ#K#cer!pJj#^M$`{M~Wjd^T zYU+S9lQT4(F<avy2bkS>mOMn~$D+K$KZ%ZKE9c?yXNo$vc?|fJhbYmvBy$<L-Z;nA zSz7EIjWhaZ4PP#2g5!ek;E!|z?IzYs9=_gd#*E2Uz5k$I3H44bRctmfyVK;WMJyw6 zwy~I9#m*{Tmt49Xhl%Me1rGVd)DXL?JgO1j4~OOZ0ggBK;DhM9!0+PPjuX>}-(4Sx z*Evd_#PE7Mag+{Xd7Q-bq!35xB91Z@|B#5ki_TI*!0{4o)pCC5GE07%v4&W2e1BbL z<4;G8DEX;;qR#-|N1Qc__7v|hgLCXO@~a-3M$Qm>elT=}*%2fM=TXMif$zWa?{OET zx1L-Qb+xw_%4fOiv4~&A0(=AXOuj#vri~o?ne}~Fnnfx2%hm9{&QN{e>?mXDyRx)M z{iv<-xyEee7dp-dpC&IkiPBs4%<=czv%GouPJD=O1vy~S^P!!TJ@{XQ@n7pO8lHg$ z%4WeEYa{stMWb%wFuQr~-Wl=P57GZj>M57Ie#`F<U!)0D(~14ye>?B$H-2`+gt@(= zXhU&R?k@bb;F*d1R_u|=DmdR3JVp7g$-uF+%40OF2NsVx`GD_0ZQ&f?R-39@`f-^2 zvBbDB4%JJc-T?7Di>ar)rOwM-lUGpJnNZic&0Ojo(K1djwE%ZBJ`C4GtDS?cfZs6E zp1~)i-kIvJHr;Rd4%}1N0bWkhZW=JQGe5QUzQW>dW<yG5^A=xyU7?3IJR@4R+yrcn z_;;Q&8=TfV5xAYeDA-dTV_esp4SD&|esfa;{pxph-M#8pcaMGadt1lxVxvni;Opqk z$LFi@663fdfsSYO>UI?zg;S60{G+v*(UZ{j`w<fg9e+)=lZJL=kqyJ^KAdsVJc z>E~>pXMt9`F<6h!OfE1E_wK_v7reV#@^QF#JKYz&YvlkL?%h|iFL-y*({Rc1Uc0jb z-~5I;?paF>m`fLX_HXf|`?u&WVvQO3T<J|(yR3Zr%USqAjdj<U4a8(rx(}NTF5UT| z-~VbhY~wu_xRo!Qo-5{9^nj25@uM%asqV53W{MX+DhT3l+PBawQfxQ<A5b0s2afXD zkAIx{U{Am7D16gQ4iou$%Ac?pAJks^XyR{EgYtXbZD#a;+Ie8hQG7*^bCoau&^5UY znj6DiS@HvaCetW9Y9AHU-F|KOfeOYp>QwW6lr|rwJY@Hh58tt!xhg85O~sZ~;ES}R zwzjaM)+0Z^z>?a$LI-oN7|?0-a|$@+|M(H{Pd|7Q8AUGBZIi%5vN5xU?@)0hm&`=Q z-%cLDdG-nQtKc#6-Zq4Oo%GY*y>?_$^Om+`a_!i&2K@BfHjTV-y#HNt^2j$$G6erd z@8`*H`+fVw3bn<(Jq_fOtyEbgzGH&-2FjJw=p$m3)US^<w1ECDAErNIo2>pS=r26g zXgKxL@5c2t-g`A|owkjy2$KUSRpTU=f@GNF*T3L9+zf5U=lX3CZKo3d2yHu|?G|X; zVG`#=41{OZNX0)MpUXIRf`8$x9beke)5dDW8xGr6$^~t8sa|-IS%i!)dOBL~81X`h za2z9^G#(Boa5zSO+v+a_4dC~-qmOTkj^+~shkJP+el<8dR#rh1`S>vrD`(MB-YDoC zIywpsrp{{3f<9bj7OkWNmG8?<e4%KGd_op2g&6<)6=qQ{@nwv;h@7(9d)^IK_7OWJ zdMd*wD#)jT`1EI>tL;&`5<Q8oxNpA54!-fPo6t%f{HXns5fg|0U!5`Ze^*-Ef97jK z3w2rR{B3c+e-igwIajPXK>pR@pIAo@N6~i`Il{ot;6iXwOYBV^WAO3qq^y=$7<4f! z_oQI;k$Z8UwQk=!z&QK2L1$<0yPh?932So&Yjinl^<v^~bq`MDzB739wW;|08F$YK zEO_v$K<Q^#ck$zjK7ZDF-h5@CW@gi}cX+Nl^TG?stNl50Xy2DX9{v%5Bbyu@e}2N* z@y^Xw9A#&`-)c+;(S4golZMH~zYRZI{6M$o7j<-8c~!@@_4jv5U)gP+-?7b}J+N*4 z)t#kLeP!!I_jPXFYaEQ_joJFp{LVMeD%SZ+yDrpMD)AFtPHz54&ggEzy_fs$Zqt2t zso(@1<-XDQvDyQ%ytB$dr#!v#Yi{Sh(jMY_N{zM4q)VFQQIbFOozWbWd!VsD{b%&v zB>bx7vv&h>uy?QJ8~hqNq*rlH(lv9?b#mW#2G)&gTDG1~&w8`7=ScIsp7nP0FI#75 zg|T`Av0#U(`=ZK;4Okl2&oAQo>7k#Q^h13AoHw(L2IaDZZv)CL;khS!dGFIkQRp^% z#|z9=Pld5Mw8;o8rtI)S`O~?*q05pw#`NGHm)t!+G>x1b$gV!_Ix+ILjh1g+=&}(V zeYcHNKJ>FEuQmpk{E0DIKAhY)@|)z2vgrTNB4f~km$n7(=g|kHzs3B$ACH4sxXB>~ z62C!j56_iXEB5~n{I~o*(MxC2b{@3o=$&H0-1D^E$~)Klo1_kJtn6D?TCw}R1<=~l zBRT{_2{5eVGbaItZzRAFvUnqsSK5T1kk#xRZh#i9hd-8doW2eHc5j3)W<xJ$E^@C^ z*q(mye17;yexUPoGkM?^`VAccw@ppUCb6FgA0g(3@r9?^I%@DMRxY#idUwR<`f}*_ zE$YbcUA}<iB6@s~y@Z3cBbFCeFw-}=uoH4fM(<)u7R(pRue>*dduuPwr!gL|e6``z z;6m<QCvRuS9`)CTPw|Tn^j$WpBXq^+j>GG(>x|@VjO0N%ynaMy7UMp={@PCEOIEJ0 z(9=m44tf@nYahI7pA&vMsi=38aFI?M$(`?!zxyrTg9aFPMsNb-{&&V5nXB6@IcAM} zZ^nh=j^+a1Pj0h|j(dIjki63JX&qQ$T5xH6;T5(HYi?GUMcu?wDCXkZ?5C>H6GfM; z{I+x<)>{WL?ed{5##gtGJxnt)pv=krs{AI-F~du|YY+2ftsQGOWQ`}so#s;azpR7j zLX<hlPpP{w+_iHHi8~tXMb9E<<pB@v5Q~011fBHK9`^=R_Hsvn+SBvAwdBd+E&%az zCV2~pKX^YkG=;reX0ZNT+nLd}=U8n!%*+F7Z!I);p5CON7@Yy|-UA+#OL)L-41N>8 zQH%Ft{FM)XO-t%H3{8r^7V!B}B*(oKPtx`u=VhN{67X30LEoay9^O?BOSQj({p1C6 zxqcWLj?U%hnajjIyJ8q5Th>`~k`cU&IbqKltYe)dHfOLbHZS0*j<wPb?lc$Gtdp_1 zP`*LYm+<8VU)CB*{#a`bt({ZoqaEQ(YiQ!0{(lnpunJjYt$D;f;FBNN4nJ9U&m1-a zufj(iM~s7=?8$Zi%#XQ$Cf6)s-7|tut(ShOm}2sM5d%>UKU}!B4UK_tg0q-^Z}c7{ zc*&5y^r*39Uf?y_C}6%4^AyX@@#w#YA0OA02LFu<W-c-|xANa{>4L$1<i3^P{>y*B zCUS=nxSctTkGB$C>uy5bizk_10?x(rm$LV=au@HK9GAOzyp_B7BydVMPlYCaJK7xR z#?M)EGMU)qJLz})r8fMj&)@f?yN+g;kvCEL$q)B(k9^a7*;wLjB$hw<2z=Gi5y+3) zB|B3Od>L<NDH#v_GJe@*^t+hf)nm!S#Qz~};iK*Na_QhW@|F_M!@Y~5AERG;@WqO& z`BEu&+Zei^#d@wiiS@iT60;#%+e+U#;7aqilzuc1T4%n<?|7IJ=OPwseiO9ZL|mC- zsUk5FnZa7{BizQ;)^5!aGFI_iTj9MJy=v`<-4FHOY5zwF?YI9I+K-PFXd;Hr$Z-e0 zb)A*-fP0&#TK8Nyz*95vDdbmBo+m5c$2jr_)G<#f+^2(nyA=F3eEoJ?U?n>C2h2yT zt)D@QJvXmiHjmhQ=l#mrf_`&+%|_9z;$@^;r8tZ(@16+(*<`S14LTjhAUT!T>)Sfp z7IS~WT6<^Hq^9{zrrFtK8l4+1HwLSUjKPw%w$75ZCLg1-<XaETYrP!audxN(dnPpa z*P2C#&xR{waNHOjlk>~h=-!4w-3O8J<s|M;Fdmg|r*ZqBr47Ja`)S(2RwLMykFxP& zv!l^#bTxjQ+~FH<bmag;4zT0^Uk-2;1B;J3PVTiA-78km*t33~vFE{g?k{2Iq|6OG z`fmJ`G06R#$foo6O$=wl{RVIeN95J@zVTvY9^ODs&Sj7FJK`9%&s5%(-!cyE?X{oW z3I2~kNA1iDxt5puh$SiuHx_99YvR7RamF~W@?VOMhqJW~mpkDe;Mht3<IP90p&A)G zfdTu^ju+x!*aZyAqvg;Y&}P;w$2g-)@Jw7`3p@-wdM<caGgFFljRw}3%5m&3PjHu& z(+>TQFzy;>C)ckzu0nPL<*blx!-4&>jd4207+rqm(veRtXV%&XKV^_#p!f~wrQv$) z%*35kw9)6+U(E4a`*+3}KSt$PkjzP3lN@Q>Z)%@7H^YjPA#XJJW#4AObT@0z95Z7G zOg>^kcLS4eG%z7Id|q;*kjpu`CY^_gvYo)R8<;l7U}9V`9*@OG`Isx@;aK5K^fv(9 ziVHkVo`S^KK-ppUvmS+_>*!(fg)44FISF#Hg`(@vK1_ZU?O9%&D}8=kaOIm3o4rB) z7IY$S5^IRoV8dbTxCdG;i^@vbV#Vjm3DE=W)^p@!?3rYw^$Zyqdv;z%a_<YWMr(OQ z|8ocLL{2&k@-Fa6;p3=r20VvT0_AIy19vmdx~J?NwO(WJ>*T*zyY=F^Ri%SIcmw^a zsNGDPTaEmbz9Jc(hfXZHc^LY*>v!knW)JiwT2cP#ec+~-{kitchk;k)kJqtve^)GT zWesgiK=z8q1aloaug)ZN7I7K*j4zbCx(6^4%exi0BJ(}YYXh!$nO+eGTlY|z9*oKK z|J&TF=mW3)cXF@hvnTNYR|#t?bCMC$G1QLqPosTWAou8%0b<7<SC0C`JBwm>0feA4 z@k=xIFUJhUl^H?j+R~yH&X<Jizxx#HhU8$Wa*)aX)3U`!pYVJad0qxZ>t*J|0_8*U zk@I|ClCjeRJRiS-&dRy~Y+V`fg7)9!{~p|I#Zvy)c(ES&a4}wd1)53ZMddsaJnApT zdxEK5`Um_RlP#N{YP;39prpuW6wcK>IgeCC#%RDRJ-l1{IyuM;XTRjuL(wwEK3M%( za;q~w;UlvlA~X7-$q3J8dS!!9uK%|NBK#>k#LvNlBdQOGc0||8mt?IU4df)zTy^c8 zum9ESn&vscJ@~b7p9JpX>A%#Xf8D`#{!A^F!~aXb+4xyw@IS!8%D-Nyr^n`2?X&J% z`PcU$BQ+;GhM5zCz9h4?28a&+FdZC$+vbnm1v*Rkugc6(=x2%ZWYG%ajUVfN+Lx_j zI9VD$pR!3?Hm;k;VjDPb=Xz@l_r*=%zBu=V_r=BHjdfQ`4(nVf8tbhXZ`rpxrd`EZ zmMsuuAK=(S9Q3pz%hna!+sMu({KdojUEr0?OZG0|N3ugUuYP2<=2G%H{`~;&w_7$2 z-9h8*UwdvYD;dY~jx#qeJ7*7z^6;Y;4|mTC^s-+&O>S4t_b$jK6ysy%K^E`I))8V4 zn+Wep`qy4l=ZO(`73XVo?!E}TU*>&__O!>9?1h#i^K&imUV}~Md_JGofUR_RIrOYG zhSqU`U5p`KFD)fMWS%8&tUOK@uJ&<OD86)<6X*J%%YEc%i{x<{7n}z?Dk}@S(S61R zzlqJgoA=-T?NB+RGaun!Jnx)Y(p3vDIgtm>ndofF5rJL{FE`Y(*1yb{+Ps{XL!-4` zWRmHu$G)n33=tbHx<Upv-Obs?ShX2HpO*lS^gZDu7Ei0+(qpawr<(s0(CU!6?3ow% z26$KpY*rp@<YU_O3uJc$XO~_PsOR1AdhEG3j|1n}bMMRK?0CfAjO`b_KLf2@&l((J zkK4<fYros6bsn16ephr~&VKh9ctZ3q8?WrU&m#kYJt&$_9P{rl9iHYteGwY^-@G4B z^XF;kCdPve_-1Ut>mxScVrWP@g98{%V7NzouCd3BTlVTY=>yPsko~ziV&Dbo^V(x< zwSTsAyZw^FeAYkqEyrsH7oGKFwq^BUW7S@UJ<68t3vCCnO?G*(H|)lKy_GR;T|cjL ztNn?2eeg{GLSwKG8|-1XaaQN=vUT@7J@gFz1av1`5j0ffv-tV%EPmbw-JHGeChXdk z=f-5n4#b`t8$Nq|@`;l}d%={s<P&!WKVbc~Sv;-04cwV~AGsT3i~9sQb}nZFvZ@~$ zHY8^Qa_eF_8)UbTy<&j*7y>i&fA+qmz<eVxM{+j&k$hZ<deTDH2cN~O=k2ND1?fn! zwW^Y~Wj~16)N~)xxL`WAgs3bG#`De%*1H+#aYOGef>)jeUlF<YT~A;;I2vyMvNy%p zq5F%pFVDOG#V5+x$G-KyBQepn&PXoLX4Wd%YNQ7=qnj!x%e@cSo*2N6*bFbrmI&;Y zjBqNCDSMx?<bi@_<c<&7q`S@WFX}>OV)w`lzDcgQ32VQzMfXyF<M-jmn|a<soi}U~ z`W@y3zsH<Vfeytp&b>L4r+;#|P4KIpBO>dA%EeZFHavSAu)M+Zox1xQ+0#6=WU$HF zZ&>!obNdb1G_~)MywZ4_4*Uc-3tUW|KJssSH7<vJ`~kyxh?LP<r`_Q6ylxQNAI0RK z#^@Lf&*cu%E{!w9J?zM=;H|**i-@ce{$I0XcE-3s1^AEYmFO@<?r)h5=tq_A{ibz4 zqx+BKuEp2LSr$3Bb~ZOH)A_#xxu*MpbPjI$IPgsHdCXC>TvJL48v|y;QQA3x9@2<E zMh$z%a@FJfTYII($4tw1(#To)TE?Hh+P%EqoHVwUG3(wX@;7_YF}t)j+`+u>r=D~b zC(o4gLizl)FZ^gs*1(5fH91?ev0mAcIZ5zY#Q!FbyO{P(Mt4T{jji)O?u4iXFN;}s zw8s1i`qdg!K_4+)R{Oex^ebJc1sa%pQ%Oe=x^Z5rqhQ}?qadrKdD&yw=adVnn)2N* z7WD6$^?25VM%g$c_nQBOeTmkhTFP1LDxe+zY4$7Hf9gDuc^d4+SK|$v)4$W^JiuJ{ zH)H!O8ILZ-9T;=i4;3N9e@i}#x#R@>=2NAfDtlgC4Roh^yKT;XC%?6hNx!XSe@l+Y z10ix3YJR+{-B;d%40M|l@e4@tVpFJea7T;Dvv<p&(Xr^j)TyIgmodR#{g&C`#NVMn zx#57Z9vJfmKUbjfF%Ir0b@cCt#s{pvC-&F!J2KXZ!R;C=?+Krb)$h}qD*N_8c=l>= zbUS>q`^BmK<YK9Of%-8V{a^@=GAmwS-p}s^#s{U#b5`SJ|JbnMRJf8n92paunwAZR zpIE)!wzT6<F&FB?+6z$E<%0jfw-a7V#P>G#A2-pia*50D=qBhwK13ItAMTdC2buB_ zd7DL>iStwnUr0vE&ix8G4jsn$3i;<??>K(dh~_NioEJ_-gSUhKGH7Nua^eMYRBAuc zi>wgcOf+pSH@*tk(<|fY>c%0so?zh`hquR1;}6kIzUN!1llx)B58`>A=kQEpdl}q@ zZMOQCqH`FVyLm(AU;=c?{6%S0ZS6T7$+;fGZw#;J<^>y$_$1yI5|vZ%=y^SIplShU z-=!TNC0{p?XhRl%O0R9VO)S`F&zYNNOPgE!Y4V(+Bd|x=;j{K?j{a)o$rNZ;Wvu_! zzO)WJwj^mU=uGh;E4lwrc~IfGODwyL@Z7<CmD#u}(sUH~z`28YbHc-_<Hq+;I*Z^R z&2z~ZWUR%fDf|~rLl4%z$nkOVT-8@OcdT;nlB2?heKFpy%-uW(PoiVQ-v{I<DJjzU zLi8`&NQrHr0KGimVGq~CJwTj6b%m}j>GF)q7+dTtX*kK5GV9&!40zSI0r?LczCW4P zxvO}~_rJ`JNj#IUUIzRi`Tj@jpIgCI_3LIw27Z&(ya%7mR)4HTy3?Rx^T^Fvnk$Ey z!99M_cKKgR+macQOAgZ=!CTNojz;brI=N8ivXMK7WaCP_mqhx8#?y|zrn7}}`UidX zFc$4Cwt?fsJ4pV8Gk4u*lH0Ul+3`1qo(05c!tDS~Gp!(G%PDy4_46`1GLEcSrNRyL zV2D<<$Bo<92f&HSSX&2ucd;LbHbUAL&y}oVjv4Qj)Yo1_^Zd?-+%u{?#^6MDW5?<# zV;i3`Jq|P5!inUdFM1F0KVoyyd$NIc!&8QAWoG&;qi2R?k6uk%D)%4{QdXypEjIJ& z>cDvocu-$2Q(mL?rbg_Lk_9!u6SH4%UtbqFZF$%26&*G)mkx9T(YN@^fsUX&c+j*p zm!jWzIu!oYxB3M~l_$wltG2{@@q4uR{aJfdSE`NQPkEsa*-<1DqHCuox^|8M2U~&b z{<wL8&N9SvK4cu{44k(*4^-{(^zVy~`w`(7zb}=`{|El<a^Xv(^`;yhzK(vh9|L!T zUZeZ<#cQ2@?akb4TdcDk?d1;BmT(Ka*1QSVCBPvYLgIey%-8-Ixih?e5Z*iG+@0Ra z*LDV4wB!(eXNwq<Xh*h2-J$1;;$}bnsBh7X#>tv!?O$X1m&#)M*I%=@6Mm#uWCEMd zZgidCt{lCy*e)H`nIb<>llu~s%W(gQfr70gOFH}P69YE(Ji+yu1D39}I(zI}>(|bI z%g^T_KAZS_o6i$`9$z~@e6@YBZ~dgA_RFSvdXh(W`p`3blCPs3N93H{K5jtrd#gR^ z;9K@&&itkXgZ4CXzRm3D*qgR%_xiEB+U=PGJ;|dxcQB{2H*Yn7h4-g|Tgi@{wzgZ_ z?Mn-Ng)`?y#vieJ?`Aw5<cjM3-H473&J$0Qi}zGGxg(5!N_aUoVPNPbzxz@AMS7o3 z4rqMgX%jje=u>@1k^}vWHB^DFk!b`z$KG%WzB=XjCF|VW_plj|&+TY*zxQ%I+ynpY zSe^UV1<3u&nYqq?!oQ}U^=8-6%h=alZ7}Z9ouQkJ!86$5k>$Y_d|@?b#Zmuc2ehVV zE%+xpI5XJ5opI&nl>TdGn3V@?lPaqA<o6%VH!BA!N(Qy(EV8G#`jX8-Xk)OCzWV8_ z1OD(aZggMW>k{agVb0c_bKIM0`7ei0j4TTOF1ct5eJN-BmI)<=AJ`{&Pl9*lod1Ax z!ZYXxp(<?XZey%tE%u69v*8qF?d9YnXHQvgjtR8jKU=rjcdy5p;?FVNZ-np-ZTa5! zxL1yQ3yo**YWd!_yV|(xW96sJ$_*(Qu5xr}mzjmns;mCx=_a|#Q(Wr1a*wgJVz05Z z5n5Z1u3fbW-(=S9=ipVr^v~l<3ahr`Q=P?G0q?o-J9eO39@BYdF?>qD8>0O>>34(G zZ%X~kQ_V_0{npd(4Ek*}Q|o8&Ozqt*zd*G`pHI{0De^3<UvnT_sdF!lLI2mHUyyfo z@N|{rL578wUgpntihWD+4V^vM{@bC$!<G*VzHU#G$MIj7=Z74fAKKHsS2~T&(9LVd z@e>!F==@o<B0O6-rY-Y>aOHr*$e3vxKkF{~Znl$G5}XukoSd)pF+M-z(|Vf1`239T z1Msrj=BjU<Y*s##lIdz?jAf>yqK!O!N3+bz=IoNf$H)b3wG$nqd{9Cst}D`5eD6bF z^67*Yn$W#w+KgGtRZhFgg_#1ZF3OAX!Ir=IOxvVc71sA9vljF1118nIF1`-(--SP% ze7mQZ-{0ahH)N%o>0S=zsh=_HYU_K#tRm}s@~i^BH71Rv7aY3-$WGb8$W1>Kx7=^G zH-g(!;535Yl+Hgy@|Vuc$uuifUwDr3;&EuhBm3ULIsWgHt22YXpw}J!H{&CDFyC+K zQdg7=>OXtMu6_89^zq+`?>D+z!(!9udIep&?@IhfSi5;1;F<0cNbhIP#*^n;c%1u} z@LXtp8GG#doG~VJG`?b{Eqj*Ayx=vSc0@PrZzs^rUi-PRMaGrfd6F^ykg-MRCi4Qi zk^HE6D_mI*&4hsYV(^c`uU7DD{PANr2wd&9I2wbOVl*bYkI`70=*wYNw!%Y0=qnle za+MTqhQ7YfJyJ0m6Ypxz-8%-~RB*=H??+xwP@A7PzkB8X6`5a+J%R^k=N+;69XZ_m zW_nx5x0iQtw*N7g5tu^19`wsTYks|c*<b~S#vX%Zljh?S@O3?Qgv9w)d+~II4v^jl z{vFJ}_*Zt33ESFk{fY83e?r@`=_a-v-|lht-Wo?AFvjDrC*i;BxxLU<2-*Ju_f(uj zu83ZpJ6p0mjQKsvdX|HGY$zT+j@rzCXL@F!>LKDST=3@@{G#!V9yPjY*}L{Dyr*7Z zKLVfbG+VRe*M4fv4KDK45B9M>pwnUFO7|vlm-TVvmF6&H;Wy8}G<#x!r_|VSnsUK0 z)&{=mTfc8|Mf^5o`;;t=Jr}PFHo?0O_$NKrlBGV>MnCsB9%l`T?;~JLEZ83{*WDNU znQy%-|5gi^mqdNqOQSq{iP3dzCu;<>r}tt!i|&1PH-7$Q^l4-^XTA0Hk%1#eMh4zq zI1*iBlDFrOarWorJIwhj=Q3@jUA>1NT&B5^oR&RaV`g1W@0pg;A^jMB9~){jyCiEy zFz$=NGU})2VR6joT<6&R8=dGCk|T!MJwFDM2~0iffeE<Y2ChE?m+m0xsc)LUVgk01 zW5AU_hcS5m%pJ{_GtZsO^Cs|8V4E`Q1KypGl2ahrQ#J+r5<G&f3mH6RR{zz+1yFtr zpNIFEuYg%y&|u~jEH!fqW}0OM>E?|EOZR`qs+<1GEd_bMGj_DBxqp*``{Cc@PD!<i zA4criz*GN?c8~uu((cTCw-hwLcCL+G@Tk>ph`EmTHNc&0>1DQo7d*__XBgur@z(V} zx{$Zv&q1}}_&EGC?>|f&>&9OKUGG2#>(<TN0$HN_Ue>Rfv@M*8zARb>H#eWUY_7%` zK8^m95y9Uxl<V2PY-5M;=QudS&))xBos-~FZJsuA3Pi*3-?I+lD0(xE9UkzH0V1P^ zeQL%v*zSeHiIH|+;`x?0joGTJy*)PHz=!sUMOQIy!6RMm6#e{H=Pv!{5gZ40-%{W> za1Nil8lyPQV@-_Vcp-i7pMIW)i|EIqCFDhnPKKk8Kv$%XgFnBe!1v0z_I9n0(FgYJ zSRb-^_i*<C_v!@J5d*4mN~Zi3zX9FlBU-g^#~$Xfu>*7O=KW8a>s8;q_*{!t{^@Vj zANBa~^<^U#y=?v)^*bLMzP{=%rvCtYKJCTy`$J@?c&zbnw7H7gZWGrAtuLYh?zb+I z9ru%Pa%>p-=llvg(8R!7>}j5#6450-1jq7qiRnny2aHPH5fx8+AF#HG=Dq+e^s=6@ zNAKE?Zq(0jFS=0`vb#>Y(LWiL{pc38@EOkoJhRq(wTFB^J{}+B%q00pj}I<nud2Qs zXR3(PF?L)bI{AdPa@A88;zRZmwcP`5Ip6WVh;E>>pPq%rAbZ%I82dBk!gFE%zj5ct zz)feu1>3OYs{g0S6R-Xy(=|TvnZ_8yLpEa-JQiPHL>IjN$qQi;k7#}+Yn-gB;vLaJ zFZ13B4~YiUPfY&A<j!mC&)R|c80+j&;5kJ+lh!m(68k{>Ae+z?ik-xG@m%G(zu<g> ze9axiA32D1+W)b;usIyI>QAE{dzj;TH`bow>w4d@{j}qI-}|d`&Hg7W-^jBI4|%tx zOfDZWWtRJ4^Y}}&5#kJRA37WU2I>3wFQ1BDx7G$@Fon3W6sK{eL-kHZ`kTvsQSkf@ z8n4<DiKlh&-YMG2!zS_%+*4omxY-Z_C;7zEofvWPwekAR=hsH*4Do9<@?vo48a>`x zql?p7qdC|3(I>Qf`NpB`4!7<O+dgZp10BTpDQoSf(yljIFM81h(1o$x8WoeC`%aeD z;9@hSKDPGCZl!(nsTWGR;&r*uzm#^pk1m58nB5-TC#fyz4dONYfa^`2lb{Q+M|hO| z^rJKQpr;m%f`-aZP6^a!W(2C%n+>h^qy{=yT@t8UyJ5@rwXIu<i_C^z@O5%gWWAWc z+V*+VzvUmGwO;n?gUHl6=CRy9d6vde^#ik^kA5cDCeGrn-HMRQ=#t+D>)~BlCU?S) zGG^wB-t5l$e#TSA8B2=I7@M`GJw<KJp{)<;Pr8c!6IWE3M_*p(6#H~VJ>z?ZkApk; zTAuS~X+M4PNODo{JIO_YF`xI3O^(>o`d)5b_R`DG+?BG%`^1v7jfcw5-o8n3rsZa; z3z}>w+vb`qT5ONR*c}XUF0>N*{NOV57@or$$5r-A^z_g;^jw~ww5NH=uYEu;+4xq> zgh{^bPjg<TIe8yhAw5pKPOj_>jZHj;?ZJBryyX9~ElV=3FL_HAHX&;b{t!IgYPww5 zg}Nkr)qjU=@+{9K)a6`oEqMBX{+-Z%Z+~);U~V376!u~-sv<7b&z-HkziZ1<U+QBq zXNDd#%O$y^w)+c=!K>8%iR+5`ez&M7)6A-O?Aekfz1YDSjNoxPjdUkxtj*ZUKd?FK zdfSYyt<a42?~c*jq4#TWy1ull@5IOe_nu;lgkO^l>8SzcCcXF)#jB#nV-KpwhTw1^ zH;?FU@^Qi6;~N&UOLc$Wm;;`km7ORi4@1C|=Q8GehOvjB?bF1$owgIF3LQ*h&Wjmi z1$*N%>J={n=EHdbhhTt?K1H30oC|U`xYR0Nc)lE8fyx-%qXjFva3ZYv%y%>UxEQR@ z#>2Y#LRkO8en_y(#&(3?>f3@JJ9(SU)=&+c-_kgu`0{d|W#P$8@hA4}$M!I2pX~MJ zpYQ+Cxc;j*Mf(4U^Y?gsIcYxzgZeJx?Co$A7PY7MPjfGn=C16o+~*+teiWW9*%>;N zToi+??@+Qem*ug!B<3d(#(r=NUeDpj7Z2Yf7ve`aD51_Z(RtDTZs@lp4rV=D3yo;5 zd)x3`WRBbLMec`p+pZ%fo4M_yj^gr!-^~k*rELo$&oBQk%qc!ZL+14AIGDSeFN8T1 zH>bMOE{2~CtmhY-J69$&e#v>fOpWVf>$fiKV>jbFFwUslJ<RxaGQQpa7vr<X_1(Sp z!oDvyzThW}?*MZcN<TlozSHM<c<Tcf_Ay~p$y|GMzFtTznHyRpn}M;j8k)OjlBEwm z8{_w$Md(C}O1eH|-F9;~2<IP_l4GsR|2Ib(1u^_f#*EK5I);-m2Syo{A3{5_f#;1s z4`(-Uo-8OCJOV7*Tf32Mqj*<qQ|}m^Q;e_BdN}*J<}A@%1$hyA;nhB5TQm3=PmT}S zkrpG<s-iNj7hdNMwaWIm`Tk#V_{jOjh4^@z{`X<`jkRsr_0g^K;IUZSec-5^HP{8c zS1?u=GRl&n$rFpzXFd4v#mUhY`uxCE(xvgm<Y~!Wk-l}V5X0}X=sbw#HXIFCzHnku zK@5&E<k7{(s5y_})W<$DCKK!9#@c<SF()RAZl0f3a4Gf()e)a+E?$_IR=`?+PCngy zX3^X=@RWmcFK`KVy|4dze~7FE2V*WW-~G@?`=s+QT^9#aPTht0{{?-;WTfy={&u+X z-{ScE1?*>n`5&UV+QqkUD7hZPaUXQi4_&kn^CADa@zaeC)>eNVv_Wo<hTiCUC%kDa zo;W#~9S`4^FPtL>{b@Z?J^4)tFQ1Krq0*jlP9_RpmRw}d6O)UodrmI$Tyils$D>EE zH=?{EO2dLv=QNUwEpcnzNqqiebYxUt2&3di3@4I{vG)D&31^h-zg*-gi110Q4;S+k z-^U-~`sn$h!QDa;I+c&L#-*{x`moM%fjfrx4*EHYj+1&Zyt791_Jw!G=J|JdX$9<M zGA7c-H~GDW-?1|Be)9u6QfBMS0($7j2j>5awPtJFc&l%^kS;GKi$7_t*$#c>VdIGL z<3n+Mlq|cjk34WH+Lmq<8-EDh5Whfgi2INwcVaU3Z|QRV^v#*jxxP7vI!80rH(c1a zWNs*JVD1%B81&!A|NJ;uG#4>=-gwdD&$IszV2b0lg>f)-FTN0_FVnYtnN~z$!lrr- zFMk*JTd;Bi<J=GMV{u<=%#<E-?Ej%2GARzurfV*Q^M6bm`1i+X!-(sny7a<6a-ukp zZnfAMv0H1sTmE5qLj3=``OiF^J6GA4@Lx8FVtgp#-_f)3VV>Q@vvYI^j<d8kVLjGb zTxECESCYp@JjptEPKJHbS}YkOxtSM711}QG8<S;Qi!L0u<gM!HyePI-NneS<CD~BP zyj!w?`x?*5TxcLBb5-x0%;ow2L>K*T9Gum&FNE`dLIVe&(e@X^JO7O}O>jSMq!nl_ z5xkElr_Gg~oxypug=6$?^!aBc`{MUeqKDY{ta9|xSotyV9g`QIw2xvRa*huFE^eGT zm!H=e{wL$Bm;8%sdl79**NM${HFl31k@Z@;HP-$3Ex6g^mO)oKPdPe0LRXW_NiJtx zJ984+>A5U2A8JQ?_#LN*()SAqJg}Gleeei6oc7%QKO~gDm+(IgK8W#{Y*6C6>S3OJ zooANqANrQv-(hyI{u=XR+414^$g`5wk4K+rZHPVF7JXKR%#J<NSwI>7<NE(am4AXQ zbQJ!!7utrqSAULr`$k3DxL56siTr+$-_7{Ws_iHLM%%Bj|GBX3lGT#Wp9E9q*x~Da z<!{t`IDPnf_^W<mf2-1lt=APh!*^W7W_$UV;p%l4InW(tr_H-O{Ik`ApL}lnFng6@ zaMY#q=1;=W_2kwXuHGeoquwa=pkeA2p_6%k{WoCk9L8UPHNdL7?{Cz-KI$t_2Jbj? zBWIM<{hK%*h5Wi0yxi}aupV`<-X^^@ZXKhK)%%Bec4(Mq{Rz(+6aM2fHKgtDB>dl+ z@IRRFU%AfWVe3x#{}TV@Z`2&;FK69z%QM+|<YN*mm#tL(Wj?!M;ZV<&v(d#n(x+L& z&dKJ}#0AG>Cw7@Rl6QLFhhHOd^R7Aewp(>}IAT8WE!Y!zXNTLxZ_W{W_$~h<`M)=@ z?i~JR$>63x(;hxZ>~{w{?2%kwhmqNeN5-d+JUJKoJ)im7udH|l^{X=!!P>)k%ZW7_ z<W9pY_R*%@Mm|p){)%?u8Ip)+7(qP4Nc?b<JI?Mk&*jM~!j^r4*cinFM)MwFFT)39 z8n!un?cmpCI@>DxeR?4LBXW2q8(nYUr?Ky^RfYTJkSh~kxV6Ls$S-_9d-go+t}!39 zeNzqPT1(5n&v$RR)8#LT`kl%D>zI8~a1i@uKlS8)CO@!7?DfwSbDyfyW%+@nV%w4* zSgM}kzoPc*@wJ-4rw!Y+d{~O{Lpou#H7b~kA6m6|mUATeMHP3D%L^X$^1kw4o+3`( z@lm+af|F;KKN+x#eq(h~;{2kt2e{8ZVU}p=KKmuJ_F3N<vwmfLPoAY1n0@%>XdE#g zun>A!A2xRRfGKZ7%s=N0Fg4<%^Gu;pxiV$k5FedXo;ABm3a`Yr9`nzUpIgL-JTjJe zAFo<`K(00?d&isK*`i#1_u0qK60fR1_0yY-FEMygj9DW-eST<-v$ufaj%O<#4L`U> z+r;`7=Jnneznk?P`33F2vQWNbRh%uvzmJ}1y<cUYSihK<j}6qhW$$;hQubd(9#yAT zxEBqHKAM0*xm6$F+lefT;oSjF`oT#nIC(cZ?`MG3#k{x5|IeOUfAk8Y@?qB$S0}!4 zwaoi&bdM!G+n-WW^hI-&OEhP-7oGRWoR1D_->bIz7_(?WV{Wsj3>m{|;*~V!HpZYa ze;4?j`{xzDkzxdv18WuUyTo%*9Gzl5PbjB?>RG?RDfNYea&X{};vnC`0cR7uTS_}2 zV31CJHSz87IQ$*$sIKtV4nO@KnB&WT6IZ^E@<SHgO_?=$RCU4Cqst0z8*^j9zoh3C zOwPzDnC$w@+*m*HZTwqY8}glbG1|s`_>4?WEi1Th^o<3-Ps=N~dR$JyZ5j9k#9$qN zO967DQt~kNEWX|LxOVrUH(I_G)WtX7djB?jQ8ecGI#0yaX$F=y-c5XGW86DF{FlFT zk^Y<G-c!6+EAKh~5T5gGz9;j4IGFE?)@i0);k?m4<*r`viqBNm{nWe9K4sR`w(5f0 z?BFE{yo>}dPVjQF;2d7=OTA^t9DXGZj&{aWcM*8M824TX_$+x8#dSQ6mc_l7$9tc@ zNPA^*@A-Id$wl7#Y}|Xzytm*Y?~(WB94-35N#RA_^Txeb#(ST>$a^_)?>Tty(u=$| zKJL8`_|sZE+}x$cy_d&(I_HkZTT)zk8S|^XX?*z^Vn$-TmPfhxRy>;*<<+I^^W4+0 zVZrYXD~9d<O^NxilnYDo6XRJ|^I4zehtb4;oh9B*e{SsU{pfG_5IowBzj_D0>YR0? z%f~wv+;={g5s)nXaduPJoy3U#nD~U>pr`$WHeN+%`!@gIt(`GY&-owz)%~s5+^W&d z-UZfIYwHW&Y%m(~SeM3p98P<-_RhlhoN4|Z*3QvpQ`e8rhUd~&dKlaM7<@Zs5Oa0~ zZN6)c_V*%#tatDMiGODlV@-geD+<Fd<-my5SB`bT{A1NGjr8%8Y@<s#ZdLzP{Ni54 zFOD*6EZz9UJ;L+u+GpmyLf!Z}EAh?#32}0QYcFN7-|~r+Z@YZ4p`D$h!BhB`#K%HA z!nfo)bK0eH=}RH>keI$yLtl%~Z^ABY{pd^5|JbAU*GP8bLwxqWMd&!!q3c|W&T|dA z&qDH}F6clP3Mf{EwPN#ghpE4w80z)PzZ8j~4#ma1C&s&XO#3Xc?(>yHvvb?}2Rf~s zE1dhA_^%k1se#<j%nc}~!;wu+@}{PAyu+Dse9qKu>*sgA%su8WF-CO7gQv$Sr_a#X z(pXLgE3ONjYY*`f=wjaDGiJjxjBN+DT=|PCF6s#QAohlsKzHy&qtP)VBXW<rXjHMa zSMZ*1EV>(I;H#@=Iq~S!`2l{R4sw4g{~$D6Nq%(2Tg!JoxRdrLz(!$PW`BtOLU#G3 zM(R1!iN~<x_s;&JHP!}a`@|pweds9aSHE+hktWvY8?K7n4c{$W!8v#pr<`HIYXGn6 zAQvll*X0x@0fXYDaiZId&uJuoPjp^QYhE*nQ9iE&pB{(KyWX_s^Rvw5aCx)xh_e}Q zMD9FS+;#>!#eL)=YO@*j$nDB|`K^2l-vmEv8T)Yje>rz%D|a)oiNRdp2#++LdjbA6 z&}DA7HQ}!{f!Ou&!SI3b<1WUaa^YYbG#V=(plmDuYk2>AZ-pOM{dWG>@?SP<avBl4 z?es6e7vkZU!;jxh?xC}B@z}BZ55f!W2SZmHXU}{s8Jc!L+fHaa1zLBZ&y%CyZV%i> zu7lbMC4-JBTW@ffUwQI`J;~DPPeVueUU{dStv=Lyg)#VjXbZbt5I?sr+4DTetcimq z8*BORuz&T*DTCGcea-W_J*jt<GVYeo8Dm5E*b>X>Z3ey;^8Og2{V91?9zn$pI&!PC zOBj34LNjo9xfw7nX}{sfNbX3$219<6p!1`<5ADZJ?fCee*WYE#j^C6#pL2H1(#tjX zACu4K{!0Qj_+Riy=F|S8>Pzu&Q~9qP8STl$-!VS<xryiZtWpf%5^zufF3Q2lVsKLi zzc1<tU0HG#oLMx|JFVP<t;prRHf^SKJkJA7V^bEVrF%?AMa#7PSt*yjl%@D^(V=Hu z(^x%s@;t{}k?njj{JKFLnZx+Xllag0KP}kIuRiGz|ED!(JNZuGW9waX*2t^LE`4s% z*=axIx8wF~n{j(~siP*_$Xk(}lUJSXFz(HE<Wy&y=Iz<NH@9UOIW^fu_`LKbw`CQ5 zT{#C+QnJu{t8Qa$xeWORo#y=hAI=!y_F8`5nrti$lOtbtSK%ncZ+x7(dhI!0U~}oL zHVr?Z(2tsL{ZHmNgda$Fn$eYJ%W@sV?_9X$45Q*0_*YrTo>hTt?RxW5_@1o)PL_KT zek|$6(o;Vo#xm1r$lueP)yVe@`s(Lw^%(oUcggAYzMEVC;1L|W@9505;)+sOvtn^Y zf=zcXgccgars~d(D7^f4Am0_c79y_e66Uj;*bzVL!BO6ae;Q8ysx9j%?`iLQl6Ms2 zK92V|m)gl5(|T{QeS)i)v8tar*o6#y=~btf_gT+3lMj5y?yDHn<84{{ZHD4<y~d+= zWcMfrEW;=$W9`ld{)#ci*jw!-1?7!N%NNs@3C&Kut^Q1q7-z-aT$OFi?gd9&;4n7# zwz@OkHA&;F_nbA^Dfs^7Gk5j(lnw^TvtPc(99Pf!UJi{1!^v}R$A{emY`M_EAaHwl z*TcKz?xf|p=_O}t)!(F=Z1uN+7}DX|dI`I%+FB89Yk^~h)mHAr+q3(MOS)`ncV?^Z zZtzyYcye-X&mO^dAO5>l_)NKE3*&dLqdHr#Xde2&C-d%|1`Q@Xx*~fTZLY|>Bis4i z@ay%^IQuUDrO<ad<MaH+>z}^1>P-JaVn3l>d)mtE?L1#G@yprMslTG|j_lmRFK2u3 zr;=}HPR@$#{rFWSVVjanxu&otyNPE>*pKChw}Iz^C7-tLP<?*aQ#Y4(Ts}{s>H}xs zTGo{DhIq}$uswQLc5dG7**Q6PWOs6|FmRb{H_E5#4t|J@t(bS7a@>)9DedLc7VA_{ zV^^E`<dK(5%7f<&zOgd|)&J%6|4+Q%8SQ%|zFU{q<`+sA)4S#AtywF<7xx&{E0=`M zX|E9v1H;SQ?Qnac-~rdPDId%IZ&ektH$7Oqhy0!BY=Tj+eGF{tN@}cke+d7^!@iBO z&~oPKYWy~6$43lzF1)_mW;85e4KKhKH}?L{u>%Fhqx1>vk_E&-1U=Yv^Y9H3-Xm+K zQHXp<e}r~>nJ*{v)QfC*iFX{Yq^0LDAK2I{`iKp$f_JzpAZr9UczV#Is(%~aK`d3* zpu42&fGx%KAGS#@?FDVnXBoJ%fh)l>RWhyU_H67E)_j+NE6sP)M58hY4X))*fnL_b z8tTT{lb+U(@1FWRJdJZp{OpSDoc;4%WlpLuf9`uaW2h@jZ_7HByy4bj@+%OF*~OaC zrSq&1V<}_3kuLao;5A+g?>G!Br>03i#P=E7MD=gN<X*wInCChCKZWc(MPJ$%So{iK z&xQw8eyzq#dG{;f9m2^kRG;5!?-YBqHyMZa60Gaqt9ZBR<?s&Svp<<LU+`1RIJ6#0 zW;Z&G2Ca$x$V)x1Wew?@(3aJ9nd>faouYU0I1}sV*$mTQ{Y*(&ZKissAgfP}U@gpO zvhr#>91g#cS<<x*Seordg=4LeE?rN!T)><!oLH0X6aO&};@1U*E3#e8f$%RHuf}#= z4$aH2SM=J0Ph|-_qxGegJ&F(iU;WPEx8Avhwnamtn-A>;(hbOcXa*0muPWl4V<$TI zZrh}O>1Wc*4)DB(_xJO?n7Vqu8CvVNLucv6*c2<jjI)CLwRe&0FRPd}BhPLu8)Us$ zK^sM^DOy{-4fr{x8x4c@4OzX+kB>E3>z2v6lIFc%{V_+}-LY9?*E|8kPCNTG{m->e zvEJKm!Q~9zWW8qut9OTYgph~Ivt0&W&(OwU#@R!h;vaY?CuN|3*e@@8O)Ee5N#;}c zd$hu5?*W$+Srg+~%QyOLRiXRO$Pi?ya%j}R(+>E^!$&*@T@^X`SnDZrVn4i}hd(uY ziwxPQ@Z%3Y0IpbP(%-ZhMbcx4)!gw7bU)3_Z_b7%#Ndg+(ux1|Z;|DkQFQe|XY&2c zwL7fw`tf5D{ZG<((S1)bzCq>y-`<@+r!B$GekypFGWFdug7@erv)f3YLtb*nWPCTU zQ#&SL6NF~#>1!Wwy3Hx|DV!;lqIapDauGfW+yfRpxPuzYy=TKapQew<J5#K89uqw< zH(Q?%?-2abOESkrcvSDHomSq7;rk40ka)G5+(D1h)*xfi7%1;L30>upOCtxrtdH26 z8PA1x<V|Q|-Di9y+`X`nIINQo9I~;tPTwDXJQtpo-pxkN!i9AE>Ys#nC~iP=ewuI1 zA#||02mL6bqhn{TO6vFm_|qK_k#{qLEBW8l3GIL@^>-2&$0N^)sr8D6CZYGqMxpWC z!gz#V^(WkSZ&#mF=+6vZey*=6RxYcP(~QB-p6g3{1p24#8+kSeF4ULyZ^Ca8xcg=Y zFc8Ck2AsmHv*$6!5%eowILuh{sdonbT{N!oWWxJ@0ay53$yTL1LmEopiFv8WeCWsH z8YoCCBgfS2HuRnpWW{g)>Mr~O@2dUSe6bJTob$u*4*Cg-)<V-FWdyCu2e7|~xN7B? z6)rM)xBF@4KKgE~4Qvsc4W0{6!qF~dZXWsZg;Vs<!N}ZA41Sfl6Af(L7I|0n_E(;{ z$qTTOaqLVs<~Ai8bNZMUwH39CS-dM+67N3q7xtaV8ObR3D0r3s+br5}S}-03KU<%q zecHaCoG9WEoo$HDe;4gbyeK%7f9#oGhAV%LoY8x+=j0!>XjL$19FmtB%g*R~;w$Xh z7GCREtI*LicK;=EZ<r;Im<z`*t@xS-$DYTs<eRVZT=-f2Oy<A7{~0`q7WPH;2=|_d z9x;P*%~?NskHx#p^TYg(;W3kOG(CPEhuh+CcvF%wHz&!MqyDx5n{a83Rb%1qj(yO; zN#<B*9<p6Kk$J_)yu;wL%5+#VFE1+Z%8+gMGUhyFo$B4gZ;el5(|?U2*0$PQ6)n%C zJoa2Pr?Rh5w)Bq|<{HKB;brZ9&A7L8@T*slm-JKYJCMI+%I~+w9mx2PuMd2HJ)woY za~=C;=W6eO&UCd0$jm*I)sBy$W6cAb96vAu#+o%7FP|H+iJ<@O93OoT9sGU0JNxo$ z2lYNS@7GkP+{_KAp5K;oAf;vDp^8y{f8*Mu<J@al**rSMpLh9)!A+kxJ;3*@;9LJ0 zV>9{CkhSOY(Z%vO$LB(Ur*=3Tkz7PNXL2!~NIsEqLFIouJ4bmO&*%T<T;|!$|4?eR z`zdhXOKIv_tbGIHS6`0fYYus9eFJr@BUjeu4aDkJeTIFVVafKp4EpdT83oNrjsnLT z+j7>bt}=@+99FIt_k80n=bE(TDadK*C9;Rtu9~vE&YW_kYmH;M{E3w-&c|~vd1_p1 zl9t0?ca=w<RglleNxR6LyF9$3oB=J!A<r6fc?;{80~%p%L{3Hei{*%MP%c<BU$HU= zdPD5@a5kLr=tq`448FUWTw5MKbt&BKGzr>+58dYY`uZ<9pZF$rjfzRcuCPvj6aNnK z>JSg)u<Se^U^mER{(a=JV;`S>JNZbunHS&qCgK_+YfR08^J`4&ugTNGyPB^9@Pgtm zHq9|M%Lc97(&PA^!#wS@<|#Ay^G_$vQ|1NpL_Ef3*4y-!l=1!(%wqwt5`O}Yl$ZK^ z?!B0;$9aF+a>u;1<<cn~Y0X)hE8Po}x@O|?eCEo_Tsglqae14{(_45xWqED-hAbCl zdZu}Er2DfxJWE}Z&HL_IF76VFyzkG-;W>AD75I$z3k>E!c2OhANY{5f{YH7xqWN`< z6JC5R<LtsgVlFK{be^>jYM+p&=kSz8tMF0_`R6=m7jE*9gQXZgY6g!jPjS8oA8B1P zI0w8IoC^Qd;t|=B0*lUKJ3JfXu^>1TkKIjQqM7DbZJr44WLUf-SU<eTeLfEEd+_jK zUr|5t*|l2=o^9IlA@e;6y`pz&sb`Ko%SDc~`kddkWc`VpI-)THf1kUt<7+SdGvte3 ze8hY?YPgr{F43B^-?6r3i^Cjuz;Tmt(01~Hjp9k!@7kc5V&t?RIoq3(<xg=<@;g?L z$AG%!eEMlO_1cnMFQbFG&6NJl*k-4)=hL%Ra`R)431Slqe!FhfuD`Ecwd+-LjDPf1 zsZTsocloYPY_DH4NBP6_|0;BT1pQvm-(}4ZtoboGmx5oj#L6YZy_QS0k94FP2XzmF z^1DZ9ZRkBiA4cyP8obCIL*zL0HjgaXDEzPn%$BXob1*ZI=XChZJypBZru2bycuMc| zq#8TE`+#lSE_92Vk+qU%C!Zn?6khr52xI4Wrb;&+AAIj}?7TcbNnY&OeE0E8^py#H zJ;t1OAm`)hD>IJ1I)<UIjs*JZoE@=2v*$TSUt8noYb^A&8~U0<{(Fo5b}l-*b%A;2 z<Wt5VxI+eW=FU4`04Bj*wAUTL78RU0!kCME%TW2hY8=3cJlN61J7?h$y@Nhu<&l!^ zb~Ctvw=7+0oN}T@WsB%b_`4rC$6D=4Pn-~};Qb!@ucl4uSLj58I@1xYZKFS}39+@n zQ|}w7)ml)WH}EI=J|XxT&2i{{vjx}AkHS0OpswWBaPvPHy`x9vVaBcdQ!YjePWZ{< zlbljd;r%1V6_^uUr=X88e5C!D1N*vuOO6D!UxKg1-&;o;JKkJpG(2`Y_fEkdIyam2 z>$a?^ryn?^y`}i)H@_ke8+x!4nNA$-amC$D13!6d8aHl0-rocd>HffS$_~>u@hAOT z)hF_te3bP&;iv69UkQ)b(N73jdtfZ<7<Y?8UzMrIIN3Z~O;^PY@W`vV2eMu<M|nH1 zGj?4M4_3ijo!q%OjW&8yjP%`-tE_jO?AvsfQlFCQUkMDWkOOVzsDA(2hqu%t2kO6U z9IQXP;E?P_#4{Yn=2EHhR?1r`#}_ToiJVwv8|B|^X7)Q7Q!8~^dDhBZMXP|d75?6B zruI(*&Xvd3Y}~+J=T2LSzjFb4Cf_g7@10K>f%ojG!R~K0cD|Qu9DjAK5!m{z;?7RK zwKv5-#==KMbS&E$7d|1v&r+>3o1mq0bQfLI{&c0Wc{F2-U(1|P`Z}+3T)3891D%Qf z;_p)!s4DG<(NYR@;$htmG500NU+qgIt0hOhjLm7LkRQwRmm{z8U#Z%)g!zzv&mP{> zyp|)g%7Fcx-hi#xnA3c#+DObX9Fg-)U77S_LR*gNPwrzI_>b=+CVLLdh{&FCg9`^w zAP;IKdj{=;H-jIC?A7!yc{-bQq#6Dl&zd5=<#Xh^6;B{ncC<g>Ue1|Y*UQjgGoJ&@ zt@6?J@mUESYOWt4H|y8P%ke7u3o`z|OJ>S}V6A7D2O78&yX%|GWBJ<ZLwk|&t<XJj zss2{*wu+Bv<aTI8>x9~R)Hd3mMtNFRN!N4cX#a1lHb(njZ7>3AXEeI(OSJXrI?t|m z?PG%LX|tC1J?0p{>=f2-+7%Dc|3U9Bt9FTA@N=5&JGWj8&9$s`eq5X!rj5utPv5b1 zzII*QIuGAfKxe{H)7fxD&VFyv+3Gks{46v_4#L4rXQJh=P>vk#K^DnA=44J&(SPz; zbNYyxFN0=AqrZ-tpSJu>_;ghI!&&d58$F9|^c(b_m(h)WjBey+zB<z%;rW#1N3rMr zgfcyQ1l_1J{o7fsR$2D)N6?Laf^HOfKl1z)bfbsSjU+d)FI)bA@;@EL|4uWtq7B`U zbE~dY=xG(O>6v7RbNVY;Ezn`z+NxdJ+v(YUz4LI@F2U~Sp5LwfAC3I+p#wRn_ssNt z*g^GPs#o%%oOM;PJ(5>?e;f1G3$3|$1}%BFQ@4ydqfD1|o}*{}o3Kgoyqx~8Cx2t! z^we3s_EBEvn(F6su;q()LSrsDb23?ZM3slr#yy!vK<i742b34vp>xA1FZ{pK^A604 z=y^@#pczij`wa6hJjM~eBCCl|khE`Rp!Lo>|OS&bfY01e)Ks0AH21^uJSY1}3J z^|8Ns$>HG4#kO2=f92qYb>prk<nR7uqhK**9?HroyFv57o~Htu5gi?aR_fWa{1|$8 z23lFgo(I`|SF8C*)^^jM^)U2tCwrbo_B@^BSUSM7A44N6X|ItzPb>cqm=9;Qvgc`K zPqPGjBl17J&z|aa5yLEB#tm8AV<njA*Gs=H`aMd&+Cx1|zkd2%Lcdk?TTfdXfM+HB zUQfSO^s7FCJbRdaE9jT=4Bp$ov(_v>{npcOKQ?h}zMOZsyqQtj(B6b|sO9oOk)EwR znU~-4Lvca7NA0eP8M!ahUy22zXZnGx7JNI3;Uk^bct*5jsh;e)71*F#7>AzMVW(|D zChSA6D0?MsX$N+JcGku`*3;>{U$#GO>4uj)emyJ4j-6LKdAXA@FJbHz@TvA4<?Kha z-@G0^T?r0q!9fLls<l>S*Tbip=OvWsSw8sCISlt>Q<l9vA6zVfPb2R~o(JL6I{4HH zypn009i?MWuJ8dzWQ~f*uan8LH7-0OK8U{`&i(gB!|kj)=s);qTKhY%@~5{57w9uy z{Lagfk+O$;9i5`qT9?Lo50VF&b#2GEYm8mmlblFiFrc;*@01cxRndp^AZ$mLPNTaG zhihMV(MQ_P^eX>y6Z<0Ol{<F0+hU1P*bMy>M~Q9H2zr(8?-HXBSv_Yta4=r{SYl&c zXu+cQ@N3*De;(({RlBr@a^&vMa^{$x-zlD`>gSx9o1C2^3+}Vy56syYYq#_~*2w7I z+3hU@FPW?h1<2v{2Oiiop6>y2(C_2p#QvYc`Yrh`-0O_5(z!pYUHc%`DeY_e@T=}e zS3Zhv`w05U*LdHZWB=`|b5fpoh5gLCrJp|Vs_8t?Mqcs#tXFqlYwX&O%+p?`mbiTF zVc6&Q_cBkh{Y~td_BlT0B(}fdp0~%F?3{aB{knrXGw8prWY-MZsxV#tI{NdM<{zjx z9S6d;iTytM%cno?N~!d{G{^6JxOA6t*8KPS9eTG;EPsAz%;YlzRVFyLfn)G{A9>{M zW<Y)f^2<<uSSL9(9Gv0QGSAXY@Pl|w^Az)W5I@Q9AyL-~C0~2y%gn1mU3?NNogcsV zy4hWwJr!B6bIF=WThA(gNzAX}81Ku^!g?0<+X#s^Y)4;DvGu&Jx;D=D7Funr$Sw!Y z-rY@O9ZNTC2{!Wm#oMzNFXe0p8*O1mTh>ePx0|y|2l9Es-VIrzah>I0pY{swPqB~D zIgx?f5An>&eb(HMciaVR-B%d_Taj^4y8ml?!;kNqi~k($>5Qm&l5u<=dW!Z6S`R8X zd#6rO*(KK;@?e8qLOz|m<L<(;>m^%Km*?#v*Ugvi-+29+>W%pa-Gvoj%2-~$YT|Oo zB}vP@+uVg4R^4)_oiXJdb{FR58VB>Aa~FC8?n2R&+V`MSRXkjMNPSfN;EqE}&famz zMf(;1y7G{V_WYOMzp-`xl0)^hr<_x%^yi|j^2?WOl<ib<XEAi;F^ySH+W2?;=Gqy{ z{yLv^kNtl1sfnB~y9>Y-vEws1gWC`9<l`HCfLK!3HCO+(-n=or{!8Pv<}{Q;6HUO> z$T>|9IAG1Oay@u?zbcorN9@1(*lHW+^PKfgc;(rSm%v%Y@v1@(|EqvQ<t6Zj&M!8w zM#btJrft>fWiNWMI$AIK@PUZcDHV-Thke0C>+Gk0=^|PS^XY#BysQ0%=sJaYE9XA; z@(+^>oIKO|B;M58WWCF}(qUuYkZz=RKs%BxF}da@myzabZ-oDgTIy;GozZ9OEO|VB zSpJYa=1%m3163*TpbH+vN9sQA2*)RAtNr`)UReLdffyfZ9~=6(@sJNbL`Qkle0cAC z(Q{97)BGbkM_?=$=gpz_vFjf82e5JXa38mcJ~f-O8+5EW`mZ}<+E3h8=$K@TZ9g%& zP<?H+|9+nN+}`;;_NMu)ubZnQeGTDV#f~TPuB9If*6*oq_xy33z3BV|{h+e<<G(-D z9)4)7aBYoe8}HEn7kEcw+G^i2uj9zA_lAm_$9js4v2UUm%RlPh$G2sPcirDK8rt!j zov^n#OZ<E#dwKD4PCPH)V;pDhD!ab{FW(d4<v+fN?Sg*xu}^NHzH|xwFTUwKPxnl2 zn%{TDbsZKj!`tFp^ys2vSB&T=gU5O%bkC3R_mayU=Xktq%@+^NfbW*T-xi<4<9YD0 zczVWj?!x?S5jyDwrk*kMaoNI-8IlR~F=Nd&&%2<RzAJ4VJ+AKgqNflt-wUsK;9c>t z1?xD~BNmZo;=_2JZe~0e@buRsJe?ZH)7yvP>HnXf|GWHrAy0{)b+3%w`0<lA)~2&| z#pi$J$v+dfbQ#YMvz{KYkHpTE9JC=*e>Cv-herOq<dB2!q%)0&dcxm+{VD#34Ws{u z{KrS7a<O3skvBWtoNwxXw_y+FV3Q0Jo3$1?sxn6QOZF8T_I{J^-9}Qqfy}B#E<c1{ zLMiog@k^T^I;VZLy^VJCoc4B*mS!V&l<ZKM`cNBt)dy`X<oT~CA4z@vcfkAl)*7h( z_duVr@y70@X~Ku&u>G>^oQb*Fxr}eC;h6gt|IM^d^K5GLS=!vUOk>J6J~?@jXHDeT zuhD-;tX|Gs8+FziJS&`<okLretPD3c{`i+az}((s2+sz%HNmU!d_=MkUG`F*P3M_O zo#ZHf%_wd)_IB{w!SknhM*9{%4SW>lFs9-tjtw&@SdA@s1Y;Erg>$#z?*B7;gA|^% z@J#RMnS5!g%n^0Kw9I7PTQJ2~Ud40#mgeN}TWgHluvv2y#!o~0aTn{`o8YgV{tr|C zFl$Srk<`zC@a1s#3&)1hc54POxy_ON%{&J`v)h<s_}zMk{>B<fZCNhf(fixbWlH$g zzML{kt`(cf{Tf3JccUDLo2PSlyYF8+gtxcBTTYZ-bHUw-<z~m9d9HgZ9N_U`@bwUQ zdk~!c1zPTyZ!AB|_oIxjliv^Wy%_kQp=IE(K8H2`9plEIc8)4J_|6A=H`Tnq=1}L^ z;)C+R?E#ktw&t%wdq>i`|5N^cEBIbvn1Pyk=wI&}$91N<mGfUuDt?oOS@BNJ{w>{? zc5m6kce0Tj6#aQVFqWD|)9b%h+1ZCS+70`GPSZ{f?xxqrMEfZbU7z{dp*wkI7$tlR zYrRS4zv|*A`?}hS{lCF_k3Avdy^gWIXCDE-jtFjGj0ZJFaL~%{U-7$~@xH})t4y2h z$@t&h_|sO#>pA($O;0o4R>nJ#wx^mSz@xEaYI^s7)<QQy#=5{X1J^Rv-O;f=%~*qs zb%ima!jt>bmLt>mZmBh#!6W<@eA^i-cXPkqn|ol(FB$7ohV4MBX~RYq8LRqOz*wi7 zOE=;p{`zich*SA2_!YmQrq@*$|72^dvHuJBU*Q-X{ERKF9DV5EGiIv45#4u0HZ+T^ zn7v#j{~OBqPUY;YH8(5Z|3dw}E6LCA@5uL8o9;Jykz+@o5yq7<^$h!>`%5<F^0|ZG zd)L^WIJCxm;?Y#|iGRG`_C(sc<$kx#-mksvH14?gm0|Z^aTc9)P07ZuKTxtU<KyZ> z)2K6(I{kdVHOhQq)w(qQf~KYZD(GJIa;aC3UG{z2e}{f%Qm=mP0{@{k$xpnxChdu7 z>ummYg~?CcO^iTk({g{YF}go+f7%nOJC(XKZ8rZ@-u3YAJJXY&P`@+RmHOXlTHwE9 zo#_{UzE4@KuQZ$Kf5bM@zkt5pH0}Khnv(s!m$hwKV7UBy%q0Kk@X0EMZ$D)l<A0my zVQgf3jU>I3Rb{yPJLj0eetwI$-{SjC+|lx}iScgM0wc-0+jRHWUt%o(OLE(mudZF> zm+W}Qu=#)Wa%r#^G<#mzyJ_!idvL#f>(A~mN;a-fS{Ph$#u@aSEj=WidC~ZLe>T-z z`qy{TTDC~$<fg5+z8_5c$1U3_kIm1Y*~`6UkHjGUnf0g2VI1!UCsnC@<6BjgWE`)~ zFe;^Ah^E9FKj1g^qTn^7N&*j_DGs#J{xN()Y8)jS*P>%1@-`x#XB8Va9?yYBij8XP z8@ax-CbeW^ahh=){;RA>GXiS&&(OY&?;pbVH96Q~nN!h@XL<{?zV&CG!HSL7q?ZI< zWF2ZlUShkd%pJ9N(^PVWHd4<)e{S=}<2^igo7KmwY1<9{4%^%QQ{Unf(Y^lb`)%5J z&{(%c{%^7WhsQ<A9!y)eWgR}>hGFwc#vzL`o(ey-ae<L?pz1aA_%p;fRh==8x6(%v zake#=zPRN<!&WhM`iomWmHUe=cjWwHi}10BHS}%fF4ss3Y9CQ$n8#UHm*!621K&2i z-UO^k<XTkSOR4uDvQPL`UDaDl{b}HHJ@~Xo@!1%~XJZtfPlL~C;IlD`&uPQpa~k+; zG)%^B4=%WL-If=P@BU146PwFR=~MVAP2ICea4k5q@K6akrSnGIGS@IJnROXu9@8Gw zylej?T&uqDEjkgtjVQji@s4D+aGGm2y`G!YX33~?|1-b`?W<k&FIoIfI7}F2(?<23 z!+IoG-ogJ(@V<?Feal$-Q_-mAOl2{+BkzCp$z;}~ML#cj{-MGfpZ}3j`h1V^lPAIL z?5!^LKG;*XqA#CVUdrBSuN7~u^-cPLbc46GCy1_X*1bzS)0$_nuDwHl*CEpvAn&L1 zU1cQq=kgo9sIr?mWz0K^d1e4JG++56=5G=BVV*5L=@-0n2lOl*=~Gc1su&%^1XoY< zuExrqaK{fR{}%MG|1SWqzGFI*?#}w~t0RdENoEh|U=QeI94XkoZGl^em0=I?zLR(o zqifOG(D8@J&p#sAv3~LZaRO&USF;yn9L`jHP1v71*q<v#-m}KN937=g&#~d)J8OEq zJS!hjmQrRq%0uB~kHg@uz4^x2t5&Bi58=}%AE3}zQ#|`dHIKa+pHB5_80-((XKLS9 z4&2qsBY<xr3Py(ZbJ{!p5WS;Y`$7I|-xg}b7RcU_xUM3#(Zk*<ws$O|jc)WJALExS zXy#in9zEFg-kN3xjyzB7H+#9a@oWAwzOa9>b3Ya5cvI0keeAv6xg|3-H?j};*aPBk zHrTQLwgESCS7&RnySq-^Tn0WD5odE<N9+zS!4lhtHUNj4edqz&k}b<)Pn#t;xJ#q! zkLXEdJnH}tdUmtj>H2q`y>f^+tG&cGEmVAyleSan!^NDXGH2}B?;FjW*#o-E)JM!; z`}#=(cN_S$jxO2Q@{O#q_#HQ}PImc-FL1Ia_IxiZ5M(XQuk{rw=fH=or6sJXC&=SE zj(UUk5#H1GB=1S)b0Bh06FEPPVB<f^=HmALg%~Np@4&9Cb8R=F9R}yxI%8f&eyFCU zoDpJs^sE^%PWxin8&eK={7!7692$1H>V3QNYx8ztr{v7r;cvv}L$+PnHL#H#aAH%= z`__U@9?p&jCpatibJmAVw+jI_hcmc?MeB`pAGX`@3Sx7xyJs?=q42lI$_BBIamM1V z?i8(j9DY<~F*~2ieHIP5gDc1*xR3fdwA~Nf4r3BIZ5<UM+LiyphtOtM684RSlXi7G zjoJ8Ydi7j)EPRgVp%vsb$LHE%PO|EdpQ0<o{&w5*y1#mSaN;2Mr}B;y8IVF;S3h)c zl#ln7lIM@{T|-RNI%1)gpo?Hjf4&Ubzk>NUh?{zs@~4TFx(3@^zv-wjuyZx#RcHU= zTZ_)tJ=c_F5I1ESw`Z6CHf?DxKJN?heK+uFUqP%#4)0ZQMo`1If2qTj^Kx}|oq5et z7e4Z`r`GW9V$QxauA?_c;;i!M$B9qMvE;TaPebWot2ruVrO8>|7j(CUQJM1}E6=>g z+#5N*>{ZY^=Y2~R`-9Hi;9b*^();w5tQq!!xgP5D+xdh?c(m{P5MN>8#dkUkpK?be zcs@};{0DY~zYwP;9$=21wdU<yEL&FaB)I#CoJ)y)gr6Vb;e5sw#{cp3`p2`vSrMC# z?yWjKHsVL`I5{TWec0VFh^`YJud_(*v-;6U&r$Lr_f3O$Wfz1GWOwy+W(79H@q+lk zl0SPEovpFmo-OzkN9JJ9lS8ZlJjm~RvyD;2{zA*bVH)Gqd8A^^+{}|f4rV<+wm#X@ zL###*?*$o84}0lfGN$-8BkeVfRZPb=-a(%2{5kK$+H1k?l!wjm*we`#;aYVS&(+KO z>SHr?h+f#?8^axEl-2QhcTtJ_{~A2XL28?bZ=#cXCO2fcss91-2R-;&e>%mO^FF@m zT`uyI5DP{g(8~9KpF@>7p#ppXiGA8l`^}uAnZ!{Uxi#6-S%=*CRQ0pQ$OoTuw+0tE ziA-Y0>}fUGHsZcYp+(W3gLo;?J$5s{DcS;mF7RY}e8ZN1W~5@t++G{8D1G)cuS0&V z(BTr{nz$;*D_*~ySS$I}`gqrvxFUN!bZinsRYJT+Gd|z)sTRzN<;a_06g~*NiUG3& z-!+BR#9Gy4{}bbEU-$)`<>MQZQs2(|eZ+woz^!>rqD}FWL41^hHe+p#z}_I<F|cK2 zpua6*EW(#?cvb4a+|~Ryh{ZBIyhA_NvmY0o%l8+b1agi<VzQ*~sos{9fw}BwEL-ox z^rQ2r?Z7gMc$MwI_aJaw0}O4<*@DzNvXPPXHSl~7xYu|T>n2;W@S}Qu+8}4}Me8c= zWg7ERLJZll|HIw8$46D2`~Q2-aM?2xF1Z2;GKr|28N4A0Nfb{epfx0T;bN_q(*!*| z%>`|P){3A>p!Ni#Eu$1{>^TYA)@0C%6_jXu0BujXsD)~2wf2ypwz+tLf|v+6-}h(F zY=}|mIltfQ_j-L_zdz<RGkdSgv!3;=XFcn=E!udWyd?4u&IuEL3B2zNg4cm{H}7p) z%{MwShxbnp^WPA@!Nb>CLmFENzqeix#|d!{_B~_HxO-S{uTV#Qb>rjjJRcb3PhAWw ze)JLY(|LvQSlCYAr=CUpNPyox;6-wo)<AHiu}AL$=tI-kd!FUFaCsdzt$myqli*7Z z-Ka15EEuofa_l8!>L*9W<Ha<#^HcltP2$DGL&eYR`}q>_Vxl3zL9U*t-4}GEz7R^& zdZ9h?B;<;xA0Em5j4rDzm$Dnde<iURv&l7Igl&*K2lm{@M#t|y5DyA7PWXFS8s%eN zqgHj4OWs~j;NFjK*yDV^E1!7wPS4$|yiIBgeri2+Unca?(fu?w;%UI}6!E8w?eNp9 zGO^qFdM_f^F+LD|(~J(adoAl-dAZC9#5F=6Ina{kN^@y4X3@M^$QntknE-RB^|B4P zb+;9APTI%7F+jULlfxY`{37~@i6rkpNW4zC)fl4acARn_1D|Be0&?DUQof$t&fQZk z3#$HuF6`qjkFRfjIH<h*fv2&P$#0*1r3fCfo^Qnz7lNN#!KvV@<2g3`1v`n;E2nIi z`bQ@6vHm(ok~f$B;<<Iqka8Y&k~>bZ=IVPU|10?4NxYNxoSP1#pU1Di&&_yfr$FO_ zCI^sVzsm0@>#f`zThqY!MW-7*MqW-pafua^9`B9W>)kVvcme)r^Zzf@iOT^#?l6wG z^Bww>tW$3po96IdG}X<0RHCD9XjVK!v?V=Z5%5ezW`7>}oV+W2$hHeQ-C5B>o-1Z1 z-~P^s=JDMPFIPNwk=rN~&w};^^Tr9}8)!o=RV<3d{jzPyr1HhXrf$Qu5SdKl>tsy` z)+>SU5OXRRE4XuFGG$%x5sg<dxU*?{^7x8L?clx&nwbpExZyE5@YWD?A{y~3mX6$Y z!k5;taFzpK*?1(D+rqvO?uz3|?{*`%sz1@h(;5#jcR{;~V=H4`rvQ)U!eCDnJxOkD zUvaH1Pj~&3<moGrZ7)a0orA1fhRj<^-1_YH?pegGznF+yr@pfmMYq~}yI5!BZ|jHw z&*S{ox)k0N%XRoMWSgI^%)Iqy4`v>NhI%<uK)1|^?)hVVQ%}#Uu}R9eO78}kUnl<X zM#iDLDlEfOW)Z75f!Mwrcd8wO)&bti=ufylN}bz<Z^p$+*{J<Sc<*s%g>-(^bMPMG zUI)7_c!+8(baF1Oq~6D~M5oLX=QhQ@=)d;lHt9KOUvXT}UGzQ5hKFI#x65bp%(7#k z#CMSI7tEwD?U(vbeENcN#Rc)rIhweFcpOkKx!ST1$K!zB;67Bfue;m#(zebik|jiA z;<Mr*U7U$t+8bLi8@PmX;<F-G^II`asw;ozKhS3VL1MP3t8s(>xmp8)B|tn-ylgyH zZYO2+TsZjHuRG{iLm$UBElad7TxtK9fL_q%$8VJv(B~c}28cUy#tnuKA5~%@<85i& z)#O-d1aG?U*@+iiuXsV$8FL#=Vw}t=bH=$GUHsRmFFxtuS-hxcnlZ49eA~U?A(fcH zkKEqqA>w*9ugU}3iyrGRbT$|Vs~K0gCqDPkJ@JP&9+K5w6>X`V!ENZC2(>SMAw0j9 z3=c}c@s`%lUzwvsJKs#q&CA3U3qQ!t+^4SbWPwAYV*xT-1@VH18Q=SVXMW#_ZLGw% zU-WE*@rj*vGM;1n^G?iQW^@Prh(~cRXte$<;tYr2LG4`#?6i|O2W3U)0E^^X`4cIQ z?=ZL`u65v_d0x>K+c?0OG^g_S6V62+mxG&Qj72zCf7zXJJjHQ7QS@Tt$v>p8sVDkU z93e4-^fQZRc1#|4BS&HM&A-GpHl}KS4tLCd(MbLndA$;Sjq>wJ7wGgciF)yIOpN{+ z9F=euXj2TR8-8IL<g!i+aW7o1_<`j5601MoGVMEXRHkq{F?jgp)rxm(e=0?WB<E`M z!316OK+EU`YS@Po@uTszF<NWmPqB@fH_?Rnzvw30jvvgj@iSay{utXR{0v?zG2q;~ zml#BFC%iokt%_bWR?*Dr_u<39Bpztf1?Mi&jQAq5=7R0`!j^E3wf7=?r2<3N(zV#- zbyic|!F7o9%I#q-e-r$zM*dNm!SC@W&+P^VKRy`-`$H#vc7eCwKu>=7{Vw9(wPzpZ zxyB<Jn1;L|zo?yzNipk+ag2{Oj;CMqJWSkrojVoXVj3}c(2)D#=4Q^1T03!i8y|gK zeZZ#_x2QZ%Gl20Bv3*;K?bA4f7s0Nzg?=op?yuxKWc`b0O0H9ko@Ac|yi=Kc_}Sy& zRlMz4zBRYV8i|-f&ad=Wzw_i;yf4webjE7$X_qk_dv>1ssi&UPPb&RLPwT++BidG( z3id+#+=!g7*u}x+&!xQJaPWOL?=^?rj7@seesHQbis`4HwGhCTqp>)BXxzuvhuc4b zE=30e_?UHnQhp+KyWAXS%iTfNn=Na@qe~L9^<LtTwGIP^{I>l2d;ZJEPh(VHBkA9P z%b~4q)|qHabK6CZDdAr0=SAvj9P+ix{9SDADE3whT~LTPi9TD77**rtT;}nNYMO&j z;gqq)V(*SnQ!BV};t!(uV!AkU$j6xYfLEU|ePyG+Wkh~gwP^DPIWK4S;<LGPn}738 z{=b4=>37(aWD_Z+o??P_;3ubCPO9s(Mm3ExlA4~x7yK4<O)+zHUth!biCGwRB4*)c z_M0ulECg6fg7IwX;%7`QPEWKNpSW`7YYO*Ng?Qd%8cpA#Ex&7Y-{|GvZ+^w}?Rw?w z#-3M*W7rKGV_inm7{)smzYo7_jP0xU3U%a*UPJ$N>YujJM=kg{F%G{IjK-KIy(=}x zG<|^m<N)-a>>^%c`L-E3%U_t0j34<2#5x$u8)lfxm(MuhPL7(Kp|;RgE3`N#5$muB zSQYC~Wm@>6&wSOBW}qi9a%Iy-AJC!i*o(%V!G6yR&?7b?k99N8l7qC?8RG)ULGlH) zfXk!#Gv(iC<l&bT;oQ@A>vHUf^d;I*Ih)_65}U$Vsr4Z1)=2fK-5zK{ded#dN!(4y z@y!GdJ0DTx6?AdKjJciY^Zb8+r?jO<D%oT4Lp%`t&1dyBl-ppO9kA<4_b!|KD)hGK zaW>#9dB8d|EfV~2Mg(4jov5+d(SO_ZzMEv%+e$g(@HG23&sP~6g3j*^#@mxm{vLMP z$?~o7-*!7=2mPLrcCw6d&dIuIXPo@~W;{Qa-Pez&JI_}cKS@3LyTEJzwqa{aah~rs zzB|GBy~-#VROay$Fsw2b#bLJVW}kpzm9cElSiU+4&hrPA*?t0s4aR*ZaI(ReaRUBT zM(2q#yNwNT9LC3TqW#@Q;|K@N-9|;u$=~D8b$)Na4tDbQ*!XW7=3frN^SdYNt}?zF zA1{6FHdYV9!!HNT*V%*WzH|a7yNy2$n(H?QmDwD}t=+GYYr|8*8n7D5`>o_l20ZYB zBJ#Px-{Sg(hN?o=PHiJHg^}LUtTkkq<oM&8wYSkihpzXX_*dwe&OrJuaT%+ug(I++ z83*0wiss+4PV8q5BO>KIbD7JVvDKvgqrAk|0$X4leps{@TsH#yD6o+eFVFmLa~o$u z@!>;HkuQ*YkuCJyTR0o0VFSB?wvV9Cm*2Ja+c0rC(#=0ZOwUK?`ni`P>L$O5EmQDZ zc@`vNRTA%GRxRDETzhTcz^utFC`Jw_<h`xqb*1epPt};vqq)<X_7k@@pY<mhg*Ero zHl9zV?@r~*<J}zI>3Kc-nO(-n>_g{a*X^>)Oo%St99x@9TP^O4zDD-sN9ec3HLh<d z{N$K;B{_=OpZ4e+%ia=w#=cA0Z|jOaLblmQS%dw;hJDF*Y}gxVV<G2{)x1|Mp94=I z0Z%J%roqk+@J_gppXY$~Sp3KJTW#sTaA}!YP2zL<EjuqT?$Ns{$|(M`k~*3rC;y`K zTydZKQ|Dit_i}9Q+swsV%oDcA2Xwd5P``PbX12v?CLNjq?v08?l{}SWpApleoMYCa zbINV`#H`&;-?x!xMRbTgD~<6KiYD}blEyWJRt)+b&+`^ncI|S}{P_BLVaj^-Y-TwA zY@+hE#%1ajS4pjly~~<ZKd+u=o?*rwFSh4<GmL4~)j|2(9$bC*i22g-HM*=2xG8ja zzLOW=vOzG&cQf{)Np5bFbH9u8zZ*TkF!TY#v2~H3dxaSZNI%>aUrVX1C1TnO8E0-D z?QBN|v7md#$<+k)4;)|mW$Io^8<)`5EZUrjE}<CvT$~4MpFlP|Xtk$DuEB<!ZZ<R@ zcGow5B)ckhKp(g?XuFMAgY8B^KC<G*M%uR5IQEim;H@MFa~tcfGJz-MFWIl>Bf^E1 zJTKw7^q7^jfv!C_!2cfF4&t|bm^Ic;yRu;%=H6AERb<l&d9qT<O?T}&=3s*EO+-&| z9q%MZhDJ?HDQ`%;GaBX{My|e-ccRH)@@XkW+uXI%=M@^Q^A7X8na@|bFWT=O*OxVI zZSzuW$;xjz@ve$jr{kZZdRjLtV-*j^Tv>fTc4stIGI#j)XpfHW#K*O*`a1>i-8|3m z%#<S2Ra=i-b*Adkj^YJw1kP;m{S%FwahCgxgH0~WdlPNUW_?uCeo1=RriaR}XHG-r z2{k|ye)3#teW%b?H}N|9{}y$WhoPt#KC9nz(^I_0!}xf4YnY1#k|B^|HU^Mk?0IlM z)U5fwhrTy%i1R+>(-(j1f#>{;-}U0nXJgOqG9qgx8EN9lgP;AuAAff2o6kWbCbZ&$ zW<H&Vw37O=b0=)uQLCZ2P`=RJoU3x-of&)@;Q=M+ZuY?gD$m6pOTV6}#A|TAlpVw~ zl`|=ElEPb-aRA>E^fIiUv5dc-x(oRj#yI%sgy{Lt#`7(RuYAP$S+>UDw!)@Luk_Ex zz@I#}e`f9D;7{|^^cT*E7Itphub_?#d0O`ifA80^wc`0(frYslu%Lwr>}zX{{%6jj z-zCINuN~I^Yye(c<sP?7@|gwy+(FqH?r}}x!FuL_@A^&GK@aqpNxXjv^Ge?8Ud@R$ zyd-LQ_$2em;Zw|~p(IMsR7B%KhZ~)8rtL4jku%c38+?Alya;a2Lu+%7#5P^Z+-dzv zPd1a^m7e%ou1o=c;FA1|7buQ20NyLXr|SNIx)beneliDYfB8)2u((}12j?z;%9-K9 zW1NB7j_2oVUNm=38{}hOdyE*&M&RVUyU|}Yqu^a&i@i8}&#`OL;2-Jmo(y=;@W|Ed zzir5|#@og>55cFUFKb(ITEEtGE4ggj+!G5L`7IxbwiT{Dt?rASXm!uv(>!B~`?UVf zamE04zkyD2{%m+Uw$@`H7ZCRpEFKr>M29b*h}eAM+Ofw&3r}=Ds(n-b3DU6`Pp}uQ zR{!lQ{@K=-N{&9h^ee!4b^Ebv7Qokr+cL}+a;rLV!l$C?*ox76B=2+skCo0j9XjdP znB1BDS}$#%L%$`+|AMn5n{Vui-Lt%ro@uU#*9`x{Iun`yQL{b3??cFGitXxda<$+3 zNLD0PuAG}A+S|w>kufe@uy2j>osExfyGXg$u;H>MH1~VC-&fE3Sp)c>6cqI!d!X+s z<*tO9JF8CIl~4-b>p9E&aQ-&-U&<e5&VnPug-zf;iFX8sMvB&%wJV7k)IP0uKIqGS z{*+l;HM}Krx|tEW)_ka0aRhr^e!E}s?!B%|`?vBx?{!VIf4^z}&ai*KZvUQO|5hG> zy{<I-_pAJ#K5$JwW5{1KY;68lxN~y)i1YI^MuhW^4A0K*`I7FvBDbAYkZ<2b&i_!3 zkz5WRsD_{2jGsmy`G~5JDS3Y4F7iCyN$2C#|Gc^{vu3}bZlU<{sp`tE{{?lI{O}*t z9R>gTyt?q`f$vdQx|LGKL*Bn8;c)POe6&9G|L^>7Cx`dY=UaIGW9Ic4Z^iU44O8rS z!@Pjex_BAS_99p0F~0%k*T2N9U5HIG;0qTPj3Lhm?<3eIyHm-3__lFn=Zf?vI#*=z z$>cME&v-uL_+;=&dm{Ftw-BEh$@m>NQ1^`slI9oE_A}@n8kL`ddAiT$w`tMy-bBB; zln8wIhKId;pNm-gI`X+t&VuhDCk!ZdF@O%d5uQDhF~nHcg8BAg71N0^hSrS>wBI}9 z!4D>+_Zeo^1s1g0P5BIW#q?gg?6?cCVTJ@#n#Mcaoa?V5CnUZO4j$s`C;bBbFJTSX z_05XuZzTG%c(&ISf3J6I?yShy{dHFT@_Bx9dUfj(?%EhK&JX_#FS*soTVs?=SHI5K zoN<TFH0G9*Z|p|)0gYW_4l-uWO||4!>$n$LLGtfgv{Ug(EcZTODQY0zo98X5?D6~1 zf5R(Em-r)%_+!kjT2^3rJWYn_-y3ro2M1DG-dK}jOwUZ`PWctcYRpCcw8rMnBX_@g z=VW8G@lE6zbkChr`1p;1pJ=|xJLo??@}L!F4{2E2{1N<VKR!lXd<&Mp6MNKJYiyKW z{K!9FmEXzpE@Z+ZVJrWc>x}%d>F5wxo8YTsJ8^5h{PwU;i<p0W&O2`Y7i>+e(-P>K z=MkO_^ps0~%-&!czj_V-BEfz%qow&PqmBF{ldsCZbGB?aS<%aZSvt+mi^vmLYHTV2 zA0A`%Ji$C0n%?X3gams}5wK5P*{n0!$H1pO`xxIJQ_ntkKzGhu6uGndI`O5|oalaQ z;3)Jw!5rk7wT(Q-_h|Qh_xo*sm!_&J+xIIBetXc977<JDg?BVFx4ryF*RngfBRdj$ zz4wk@Xk-G<r(a(=eaG#|DbvlA3v@m>l3YK%k2B8B3rEjiiHuNfdYh2X`f|7zYww+7 zQasSE_As5D`e@UhU)rofCMkQp?+)l=PZ2t-KFU0L4KiJ<Y2J?57LDE8Sd(8M8a#^3 z*zC?~VvKX8hte97O-J*0m1wCymb)m?z8AXQ;Tn5E9rIHLzwf2Jpt&Y<CvB>a*+%2^ z(&F&k>Es!e%>EHN(Py~Z;=-&UG=+@t&gsO0f#dKEIu9Gz){N+X(@*m_Y%Anu`3vtW zt~WOR(Up-Rnv)H}V7}L6UzL9<ntAVv&(e(OB}N<g01X_W56xdM`>FWJZ02d@RB{co zmNnmcrgx=0?*TVE&o^@K1y3KLmlPdfU+<d@uC(UFd$w{vOgVU)lURRla8`d=oL?#C zb^BT1m38Zy_SrSx{|5NI^dxU{*0|0DTH{{&(>lMGeggEVxv_a8ed!FX^}Utf+QWxo zTWEwvBnyf*MZfyLcflR`A92?1L@(_zvMwm<GVSw@WSx&F^9gGdKdtDIKw^Ee4(Oxs z$eCZw-w#a@sIvOVnSuPhXBOl?b7pq_nlsPOzx~W`{#VXqy_Jm3-&;EVl=E@<oX^h3 zpf%j_CHRQ^k_wO`j*@e~_eJ9up}V{fheuggeDNNl{Uz;*?dTdVB(LoR-S>1__<<hg zZ9H=Hg;jw9>kxYQP2@Uc-}GQlY9rr8(YH;TAIe8!5qJyeZg=oij@(`NhsNd#d?imu zZkI2_&W*9P<AF&rQ1HuKKXtcpx04GRp^rU#UE?m80nKQifcNS?i75J@j_<gwzS(Cm zf6y3vW|P*De6&O>%b0K7?arL#lJ9pDId{@Bf~ThE%aPlL^1zAEvv?J<Tu5|%k$CYb z_~^^iKifu@+5qOGqLajW5Pv_hCML*VEGwGN7)Ad+@I+9`Kq+*1FLWs0hwM>EF7N1b zoa1Hxh`CHV7AmK|?p5;BU=zWP!8%r%try4Fd3S;*7jaH*<*q_(>fr6ud;SIBss6n9 zx){7pJ?u@rtVzyKO|lWc!Q5#4G1j%@TJi7}>`4oe9XjyyyN0<MNSqf1ub=hO%{#62 zK+g7N&(?qYebCtYcg|hC>+pN|^T)BZb#8Kv=Er$F@tTo4p{LG6X2ip}TKgJ$BhCt$ z(b-iO6=;oWUY)fWt3&plWeof`?f5O@sZQ3h^l%k<=+=O19sfJ)!W}j2dpw`IHSzx& z+6SRsYez)^IL$ks`8-M=Il$lIBhFf~5cS?6em46|BM;l>Kow_(Zk`2}<Zter!hQr^ zve7vlwdWzLuZy-99=hw*kI;R5#2HSytDoeskLMa24?%+;jSLSgJzDqbQSIsAPdrw% zEqn@(o5<N;2%S4~gB{x56L!6hoN_pp`{#3wP2Kd{27XUTd)c3*J?3Jdhq@m2bKzwh z^7ym(^(<%HzlPtOs=fEVs6FHY!>qsfcZ@-N>nYl~{f+acpQ^2w$wNGJZP~EdZE-&~ z_PHL;Q{A^p=9pNv2|ugoG4?eZKjVCYJ&cc527T(8=zNoFoX=t&WN$iE-{*Wm-@XJ+ z>akZJiCmMvhPX@eeh>JWXUU!TSq%8A@(a3wk^LyS8~E-4rrXala+T+-^QHKHa4t4M z@gVf5Ww)bi(%un3rz(2g<jU~fL7h{<H;S|1P~1odFFvPzTfP(f;SX=Y-*WzlPULiB zV=pk-`CQ=#gWK*KB-d3T*PVl`q_a#V?MbfV8E}RNGWdqSBoj>J>t$S50;A|rK82D4 z#PgJUUv&CBIR`$#Hy6LHzKYEFdX+9>2J2PpRO>aqRwqVh*A$*utCBNLtk+q_z#-aj z)@nU#O6yhY?LX-Q{*i}IPsb+LMBg#ywglZc@+RkLe{?BpW1`vItT`Ew4utnwD}qsb zbP{?T@&4GKh++9SmV0e_e7ziFt%*Jc<7cScivr>^xF5V`w0}w$Vg+yadQ6W`G&&4i zts(#JsoK36KY^ihI*8thIT5``H<j79FdaUbpnvg#ca^gRIir_v-DeeKzO=6?uf5hm z!{5k#`nTAoTfoP4`|wKv57{+K3nXWVZwX&@BaNq|qy9Vl<={4s@n3Cd{g~uY{^*_9 zrW<KzaG65-8T`Kq`X2lr`Rh~vkMdvg4>}@S4m$E|-SnGivzccdL+S$4r|JfVw52oU z;J)^O>%sq@9P(ag`BTlqIp_gSF%L`j)Bh>!Bh#Fse&O$jz~3_jzSbe*KPY&iY1xw- z(E~~c*oY3W8y>BF{E#169h&pw2V#tExvqOMbdrvK%(*L+@u1^6<vpR;2~wnY&|QXo zeB(cOa2M;$AZG}Cj9e^fQO(tL(2Ckuf6`BgpGZH1%(UCM#2rCKU(N3W#-FA{SbMAg zo7|0Q>xthPa-UWY_i1sSkC50f;yJMI?~l)tt=|=?K{p-!(m%C7e%{IZw7TI5YSX?~ zi~F=5nPmcx6+P8`TK&86M~%kTI`(kvGplAUD@F%vpCeWq#3P@ugM(Y?!ec9pJ;#@l z{|s0H<g&AJMn<gT`>Y_px5MWHUHAkLPi&Y)dyR=ks(g%{a;-1=H`_9m&*(r`IdBGc zto?3uzWAY0C)E04`DV4#jW3ant7EAVbZl!?AAP*;rPys`kBnS3sGd_t{Z<}}Z3-Jj z<Txm9*F1M(cL82o&-ssRMbc*$5!cB@THwtXxDkF#EY1ey7?LhUb{AxdP|e&NWL7g8 z8^fJIjIAFz1ip}bExhaqeqG|9(87sxXbp0KWK-P<cFdg`;+?JUd0|}q`62g2?jH>o zzVQNQ@X5q8v!}}Mb0zwXYGSXP`+)ZS+^Cf;bsg*Guh7zVSJnm7`0wW|)WNrWY*zu# z_OsCS{LrwUS<p1+`3?8olM@LZkBz-|NP81F3s<oB?|X(b2Im6#<93fZ;X@iSo{z0n z+XjAy!bSNM?mb}q`=NREnY4W`Ag5Aa_GrC(YczQSo-=A+7n}p+)MTF-?_S%i{~ffg zy<`#lZ0DGGdk#+2=A+08OK7Jq%cy;qcI1;$$=N38xhN6@Zt2Xn(yyKeFAhWk_A{%d zfqh1GwjQEC_-{SmKc+9i^9XtRHAc<Rk9bzeJduaHHh>Pp#>=<KvkaaG<3)J&-@`eH zeMa*=S>t@btbK{T_hp#1vSI9-Y1UTo4UK_=#IrT%6T!Fe$2!PWnH>IWZ`HFBV!o#F z82}fz1IxajvET6iB6x~;mfdfMS!-gC<CzV!Y=Qe80ms<AujiTQYzg20N`0pvd=*1$ zo{O#3{0NT~+V2_r3ye|pA>3>H*HTAi?;2Eg-`$J@f4K1RSf1jwwI6rz5l!MN&@qQP z*YY2I`Z%?vcWUd){MWah&EmKEcK!>7f8yDX5@j#qx7N!v)|GS<htOSVTop&k?R}N_ z+tq&j_g1i{cEbz&exty##TuMrzsMfDy&XT)FUYy9b4fihM*-{-tZQuEX4HcnXC*$b z-N-Gnv$f-Ycf!ubeo^Tfn_n_2d%9(gxAP9~*n>QngpG`Mbs?j}S~4-E?h!L+kyB5$ zCD}q{S32B}{Rul!C3k$NZ1?eT)4#+2A5Jn-j<D|6*!ha<I|fF`Ki}$WgdbG?-o*ES zyQ`Vkd}w?oxQhFnj*H$uiSrHlmZkR_k9|)*i;7v;55A|%)<xTQApZuJ`sWAHkxAav zGtY2i;2dD88qL18Br`=Y&PIl<1P0qC2ffNSEkxd{ax=F^@?jP2$Tq7oRq(CK>9hrG zy^M=|-5ub{Zo3Nj)c%5-M;53rojt30SEVyFcrC%#P`W;&zJ6Y|;AhO8iMk%1c@K|? zsQz~B-#c94CHR*X0^1U`p}u*yN3f(F^cM(sm2n=LTuJ-g%W2<^-%XAF2e=mA1;b#w z+(SIqeQ3JxY-pZ0C#SCV)(<YSZhmi?=jOgC-kW<S`))q=%_Qu9qp;zQ#)dlv+t^s_ zck#O{9se284*GX{Uys!dYd5(2(Snw_du_mXpuC{!O!zjsG1kF?x^`@|;I%HH(^`L; zkt_eqI&?U#dj14{nE12E&r$M!E_l?L)n|kY%O;XzWgF*6)~z*+7-jVO;_VCJLnixj z&Z=NU_X6&}l{jR#8F`zWfR`t67iE&M*f<wn#JzLb>{<TP*!M!lg8h?i-{BCvdZR^s z^XsJ@osXK4H*|+O?;iUbdoS`y=Vb00<oR3h28?Q}U!7uXD(4I<7`0amrh3i+$X21M zZH)Cy@-@M?$Ww;R+#h<2&m4Gcc@FncL1R<SNy@6BTzN6^#8X$2|Ha!RKgstPXN~fg z@%v`pZwVT;%HOVfmF!(LXD4Od#B<r^lmkues{QxDeO*WVJ`ly0>W(|bJ?`f|Ea7%E z?dyFG_}@+22k2)P{p`dSV{q9Jx^w0&V$U1rDvhg^I#bR-n*jZZ_id+dJ#Szf^ue7? zqnZ|ZjL5r!-4iZ+g7)=3Oud(Qw~GIoN5Qk`H^$r+a3jCvN0{q(-J^ih1rEnXN7J`r zcHigsyFB}r=9c{;2VTFPT(dQ%D^y0l_dT?CFaiGo+7Z35&n^)CtmFH9+Smg;b{XJV zsQEYpeK&d^!PP~XspwGjehPQIJ7eV8o`t|ylpQa724&u+jOtIp_vGuE58A0@+-Go? z_od9I-shY>JgX@!T#&QXnES7k*%ye@`FElJ2<gG!esSfzqSNu0+<nQNk*y7OyxI99 zBG-M0FY0pkA^E*(94>O_RTwu->im*%yw5ZaKwq^r#BIuMRczdwSsunWenVqsIW(ad zmrCL|&4%lboo*5*N<6J%mQ}ZWhEbd8N-9(SKjjs0atcW2-vD2I1KnCVGJs+?bpPWb z@VoH!JCCUi)jyPGY-&ru^giPgoHLKc^4ge#?o=c1JZMIA&z*?_52hNM{ykAv<y;@f z@{T|sm1oA~tL@M%_K#J;(@u|6bHA_?CofwGd8ZEK{A7Z?Uf=U#13;eDIfUmMaxOW~ zey;O^Xz?B1Gsk&)FWFGfX#0Ti`enxj^ZYT!rlr8ac&64cKb0?ExXa`&LxcOw?DwOM zO}FyC4O^FGy2|jgsJZ__Y~Y5sOtH0d++JUR*zp!{^d{}@V(qRZKkioWyzts88#ng7 zoRm>}D{r2-cq{9&=nuncqu`V^eL#0Ewlo;ETJtx7+orpQ&F9>cuUw1DqpLOD4qt2H zp2eIc)t_loczcQalyvXr8PG8Iqj5(q=N0yeR(Smtf93vJ{#T$6X`2ZSvW$VI4=y_{ zJV@T#$>&%Ry0PC}cKn>K%Z^{M=d$DIG-}K0p7TnV)6d5mTQSK)4DiQ~HDA~0_W8WT z^d44h+em0`^045C<BS2ty*vc2r00_C?pw13ocVm+SK{Y(ZS}FPFNKeLS3fyVF`o`j zJi#RVn7lQ@&1~pSb#;f4`c&V8=jz+?1%tO<#dvhbl*Xm~dyf5`9xb)MGomGYuYQbp z63rPtXv`mRknDTxft|o$`vjuLXhD896?>bsaFX_Lvq(N1bJ?e^V!yhQed`MLugmdM zo73Jixr}|Sl>Ke?%^zG;ax?p#eeNMgPTA!r=KJPjq8mHrIx*j6cMVG|J48&#@sU%f z6yQsr<C))5Y)qXqs(#)gXoVaDt6tf5dSrbfXYwoPy4E8{t!J*cL5EsfgWHqOWQaNT zKJesX;q6iAte&~l+zW^Gy!TYMf_INk&xHG>JZrk^es3TDqtn9Ey-U*Om$#d>CD@zF z&*5A4sn?C#SQ+J~vi5Ijo%aBH6zgeK&h8X@J*7fZ+56+`%#xmb9Xx@3(B3;c6MN?h zGxAmTOn(~uGze`^o)ff!<oTZL38o&7ZCVE$T_agr#5(`W6!g{1<zKkBTyyzn`+I!! zb^ALb`bWM+yDfYxCQbB<?6a{gqrRgHy<jJF?A*Dw{R!G&UbX*rv46kc&Hu^RbSD}c z@r9dr8}uN*t8D&v*AVB%zU<`=S|9pa>EW(7Gkw+IYZtN3x|g<mV9eAi;&<o351Pm; zTVs05%4^3?<&MoVbgv79|59?9_r_d7`D5z8eoJ<0T2Y^Z4Yo}8(N5uWZ_dulzV)sk z`n!%iGs{<ET0Y&axWzTGrtKF$&h(pEHMfz&Qhaq6ZO7%&%+RkBFmKJkZUUb4{AQko z`7iyXaQA)ThH*CEz0teQMIH#^e4ry6uldLy!ok)wyMD)q*oaTO2j7p=mT+26zRU*Y zkf5&i2F*o)y5a{m&%lT7LB^&33qMtEd~_1!4inQS+*VL7o%z*&!KQmce#Lv+zD^9< z8_4c)8GL+nAF^``@9L9@bxd>ODaS`y|Em=XZdz57+QEfu?6OJPhjdS|_>uM_tySf@ zaRsVav)ELVDkhybaorU<^IUma^olurNYa<veI+?Rjy*Xq3;t4vEl=nA!_qO^Jjvse zT&A`0CiE)4RPQ0KIH3EF|MfpStliL(^!epK98;?`H5uM(xeh$g!kNNAUhL!9T*f*B z+oI$e>$TR!b?gg|&_}nq#D=Tq8`ZzC(pxfnF6#cfseP9G3mU`e@SB;8p_4mO>+iqu zM1Mha@ZP<x*vi>Y$P4np{R<~0QAcn-{RJ=zwl_Hop9?Hr*1YQWFs2*ciRFdtaeHgJ z-o~fHykk<BSmMsYib?nu<OMG`QqaqW+TM!g8re5b8pc`9$i8LL{cn+{8a#HFp~quC zYh@0E)1<L?Pip)ymM5G#|NWO6i@OfTs8>Ddyvr9(dbfDtq(^~caGCByu{_a33?H?t ze;?cQFgi#5Z%TZNCL5*4L>{uXoxW=}vF8hyR35wW_4vN2dDj@HQTG9r11{qCLnY`p z)Q=4Vx(uC{Jj?;SCM9TIk_<1enap#=X*Dw5?TVH25I=)1!-?noCiE}bG`LsQq05hX zCcT#8a~BE^q9Jg_ddu~Gvc=w~y5QM8z~SXQGy{3YN1uz}<@e1kD=Q^$z1Dq_=)p{? zDV^GKop^LHbm4VvNb&QVb`@_wHM|4gdFoog8-qVyN`LlVcQ7uOJh(Hq>1xK@OJB@u z$O8=AmudU23J*Qw7?bI#?nGX&&lu<w>Nx9*mM^5N?)et(6pt>P>+D(yoVv49u$Sa) z=Uz3-+YQgx*-~S5@F*M#HsSF`#(M+pdDs*5pEJIluQ{7s9v1QF>SG(v<PTkipD3`} z?awBUyWKwYDBi5HInSfFAF{uTHom<rwkeIVf{VExV6!ea>^(z#<ZAA>7f+gRe~*vO zwZGG&SK8kh(aZUsxGko8B9B|4zx`f$P8|8dADr`d!DnubEx@mI+zs1eYj2U9eI`08 z@Wwf+_UG6yIvHa*^7%_<lCO*S^exan3?;dS=T{n|d<Wg5Lk*^pf3C;ldxcy}MPD{* zcRWS@gtIskP@Z#3?Ya11Naj=7ZJ&&sev9Syy+~Q@*DCiu{e-lhpmm*RZaRBRmiCVK zDDUCBoU_9@oO|?s$~j}Q_FZb!s$3QQkh?E5nlpuDY9Hr`1IWv-(5`dl`8BqN!FQrL zaR@)<EV=+1tsDUzzt%>csI>XPxmH>5u8LQ4z?1S&_>lvQ`xZ`e?&*^bdT?8nNk$63 z)*+pHrNgxtgRLW6IQcU<dyuCF9yG$!8|lw7%?lbw7#)M#4H&@juWj>!b6vhh{KNyp zD7VDN8t`z37x&M8av5nC6#H%4%<cDj{^0S;^5tW}`q*u-KI&bZHI7}DFPlZ?PfyBu z_Iu=d`~A_&^1FCHcr2c@dOPM(c5V-TZC+yM%2|u+m}fsRdeC>@P0T?h{Gy8c`9$wN zXj}RcgZW$b7BL9YHz9)v7e<&k?m=?M&#*B_X9Ztf+ejO_M-Saga^=oIlVpS{@?3c* z7#-4C&gQuXJE!nt>$$jVQMe~&FM1TXKku$V%VLc#<Gx&SdT9)rXTO<V(`)bBsnL7s zS2~(`eCvHZ@_ysp!&6(hXDVZOW9AQ2$fG^NsJ-n*BXT>il(DaE#|OBQ++Mmr&RQ7v zFL&;c8~1f%kCnJbuFEeST3U3}<KznXgcwH7Bt`Z;SZUG8&_w9Nx(6z~Ui&UO?$VCd zoVW+e+t&(wD|b$BvWQU>j$Z`Fo7lHJ+#|zX?`6R=cV>Es8x<Zt@2<ga`sF^;yngT^ zU-0*9jN|W38;v|Z20hbQ^i0Xf=_$zTsmSf0_7hhQJ&S$jL-(+#@-Tmdy`jyW(Z6L@ z`sP@jr@aJPlkd2E$1<hA_<_+Oodfp;N90%EJ>M88!%yf`dZ;q;qg+Cr!Fni@v)$V| zoAX>}vX9jcK1O}W_O=d$J*EUdSw|05%-+<w0^gBY_&>t$_PdAUk1;I7UbNajk-Zpr zD@3Q`joP=}si7QjuJ(`7?qOsv&N*rNzn#9?D68kU>D)7)J(hL(UGUvD>*~$uUoX)6 z{bk{S7kJ<G1IcCS(X;8NlV=0_k-Jq6oVB~Z68CLK&+quQAHlc1zkELUp2xXpF6W}F zn1?Gl7hQ4l`!#5~6MpZ4MSeRPD|aeOpA~uw8L$%m>xchJ$A$h$dFgDsk91u4A=)}F zUqZ*FxRc7MjhSU`Ph7{vn55$hLVGt$r;^Zdsa~aYT<0ZaE#<j*h;&=BFR1-P*o++= zm-0Y4dMo7>IUl^ur+r7q^-cKf2kHktyJL`!OJ&2ui4}w{L=(P2I<8&#y5Li{M{E4I z^li^C^CKNs4fm?qIxe+2M8~y=cRye(qLINmF4@8!Va>#KTob_KnCKbweJ<@E;`h7Q zuT-aPOKj~o(RIas7`JPzM^30QCxrSwh~<8ZaU4jDLwdcTx-RV<-$EB<mqBk_3*NL( zwXrV>e(Ac#Cfe`~(sl7{Ppx!ar<0S8d`$*rI_XFCwKq9BGwHgJl_SN>tKLsNm%|Ub zuBq7JZlr8^Lf3`9ch6X3?5<99U4H4hejmPb?AAzT%rt7pj97JD2l87gW7Qb2n?xtD zeuU>JG~5f1J_c_<Ke8B}zkqv*YVB`iy$1fzH;0v-s0$3_RzMf!4VC;YM$Vb^=q)Ma zQJ^iYheN#o5IR}?*Vv{!=1{g5ohcqf4iK&tOI-v^74DlR1<Bvmb1QPqmFPZ9W4Cl| zUUY3mv$%il>3cKZHObi*il3*dUe0aGLD%Lo2kF|V*Uh?nD|JjzvDGiZhl`5vXXpEI zba^ev6J_uZ<^7w3PDFB{_0$;=>-9yOJ8yDDKE#jqL-<4YHw<#SB!$*-xA&11oZW#% z`P#a!3?G+2@z0)i?J-`z{aE)mlmjLe{G~_ZvXSNUbKZX&*+uQe`AsT*Iv#jr2016W zmmV0OBfnyJOq_4LT_-*{F)BHr_%E>qsrdeP)At?VrXP6TqU|H_E48P7#q%6K)(@=j z(Eq!@_*2@fAofnYs2>>Brt*EN-G(<}3y2%H=X&5kEKfcX;9#oy*+p)G%9qc!b#qQ1 zYG3*m>6+SX{oI7I7SpOx`8k}^;^&IOUF7dOajtOc+ImmU7Y>YqUp(ubS?CObRpURx zvkx^!!AyP)!R+ASLu^KZx$8Cb;=t_aEgRn>&jIrlY?vGgy@%d%N#%(+8T2W;ZN23R z`6vy8KMiN!afg1%{JqVZZtX+A0>8HPF4ALC&XOJz+I*Ta_0nVVo@X`>sszWn%S-gD z`l4U?WA<_ukWC%kP})1_4TR@+SA_8ah~FXg&Mf&5h!3VyCw{Ml^PaJ@?_`7=x(>S2 z%H%)9nWa{_(gNtZG-uk=E3U0PCR!6s318yZ(qHzr;CqA)v&bA)TZYV8I@e5Dc6`j- zQto`~Z817lur|?q?v~y&J2`w@ZCJ(Z3m2o0qpcuk@DOM4pzc*8|ElWGp#E&?chOG| zZOAv*FPP#w)ZLx5q4wI)qv;;!AbPX_@zHJ4nI`mT%6%c&XPf=gyU7#hWi0V=pqr$A zHo3Pn4nO0#k?{$KL2#(?Sm^N>&(zNw&m3nw4gc@QvmU-F+}Puxy}{$*_h-f<cvZIZ zY1Z+Ju82dkqT{o5Mk+G~jISFTdmg<y*bC1IniJ7KdCTNW)$=5|+~|)yX`y59BiB7i zjuy&zDAVoshI-bg2mAQni~LL8^0GzfuHd!FEu3RtX3WBm=!f&@Kvi1(Jbc(Y+M&g~ zM^4J*y{T6Qdqi)v0o^7Cka=%-K9+lL2|60;d5Dj9>XqT^+{f9Wm+~#9uci<FR!u)M z=v(max)>RWI)eAm^#<~FQs^$^6^*g;pOJN*j@Rvj-@Pk-cRhV-EKhUBrES66g`cK) zQzvVM*iP(VSs~>lUBnvSN-Tk5;SJ>MhE8;C{MY!~?%w1jPB_#Z^93sz1MxZ?J@n65 zQdsM|&-oR(na2@N+JH=gPT9I^_<a0|qSI!PqXRv;7d@5_{dp4l^S;M^c%ASdy!9#0 z5Z~LwoPiJhjaa>L^&R12#Tcd%>o76o=r;$Dr+?7K{4jRIL#{<&tN04z<^NH^%Q?i8 zUf*F-H$dJJ7qq{E@`ql%Iw<{TZWlOOK>3@{8!(=(^S&HJ9_?5|p1;a?UcfTxLxX0j zZ@y{zdcmJ`Yu%4Fat~n!c}u-D^4koO^ANqLZy9+6(PNgS@w>&HR@3<OYV???HFv(y zm?=GC6EwA&KEvc7?s8|@zM%ia|E*U61OElHY@c6a9NJsuE7}8WN7+ZN$L7%J&MG4> z+ew`y{L9TeADfS?N|{dN-CH%E{BIy8sFU|yZnLZm8twwllX^~a7|^fgv~v!6PFL2+ zHt|z@pLfy;>U{-mevLNuf8pSA*a_aI+)?@voGOQdf13TDvDi7V@Yh@pd|PK@3r<t+ zW^yjTn}~^75I|?sILpZGqMZv<C2LQJ7Nr^;<&^J+K1+~iq;pDw&b!g+^g;WFXct{r zX@I#CZ?C$*$UO>AK7tI|jC>GauagWbnOg7vSvZ{9pRYbzbEQw6z+Nm}Y7Mdq?V^k9 zMh`jKT4d`{4bJ`+@`^|8kRKyxHfC-`AIaXlKrrn@Us!^^@Rh>KNz$8TXLIggJoS86 zvIlL=Su`oiZ^_p)>Dz%h4kK}KBaA%ZQTlMn9`9a;9-n<VK%auQ3;Gc3(v>Mb=}pE| z$NE0^wDv0keAJF)p?ho}_xL!spZ4=BzNGe`U-{_w(T3>1XCyKN&#Z-p;$Ay;;&-xo zxiObB!a<!k@!MR`O&R6n+R1#}FWWoiwg2ha6zm=7^9BO515E~P{Ba+?KFon&M)xq6 zyr?za^NGnCh~;Wb4&H^k!SEjW-PJ+C_AxwZ06k6bO-9EA)<h3;O`i*v$;X0zpqJ29 z(35ypxPbNWq<9p$tKr3}FJ9I9pli>%N0Hsg!}~mRV&oee*S&Z(`5ujE7xzhW_Os_$ zYf8`9!!|w1xU>c|_bcC1o=Ne?tk6;Ve1r38<+%EeN}ttaq=!3_S$7FoqOT=j>6~V4 z{4x26E1^N>e#KqD4X>J;cGukxT9`JB3E>VGbt~OzO%}g}4|JpQV+b7sZ|JvcF4um{ zoOgpi=})?tNAYFF!Sujq)t<(vSWd;@Y5lAlp|htQCuOgns<>{m=B3Z+e?R?vUjJ_< z`mctD$?KV`KE><AYXyh;Jm&s!rkw*1oJbE`1fGQd7;~j^_8b7WeoLPz9I4#fHokx_ z6rV5cBRUTx>dhTg&(5(Zo~AOIi!*^s<DASlHt#&qmUzd&6!t`T9%u91?#I}h&qHR? z{%U6U%2_Mh82h$h^(15E&6Bdxof@8-C*cRUK<o59zBki{p5M>!Z1nni{x8JJEJH>R zTspf5{%Xe9$^S#dO$iQD@DM8y`)|JC7svGtoJaW{NF_Gc7w&k0-`c-A8GH4v*rwz7 zb8n}8t@oF^V+-DZ2V8<IrT^D;$MW=U6wf6aSlH0?9GS69Jmf;k%^>e@BYD;BJ%xFA z8JdXG-^4O!AAj@R*ji`L?t^|L159BI>m)PF28k?;4hKKc;p`vo&<FUjzm5EAJP^xm z1Wxf4$r#Ebsrd`ISx=Ic6Sm5A;8ShH<^4=#+|1}X$ke^85!F#!lAkqi(zU#XzF6<{ zU(a{*A6_{1W!e$nk*+*@m@%e@@fgMCvF@dD8t-ErTdcQ6VDKZuaG!0c`j_w^>PXhH z&ke_2!7}6^$wrcwc0P-3h4<~8FTAYJz|G_pVgK#@V@qa<>7Q0KeBxq_uf7(W0e;u| zE#1(cTa>3JHB_Rr4|eX2-->Pe9&@EW{|k5r)7r8OBM;k8nru+oKZ5YxKyqWIast`U z_#fnXfW1KLx0iLGe#%)p_2{OgH=M(FiR6+|PF~Cj(bHHP2KORs?-4I*=fC!|_c*VK z-)GZ)GxmdJ_-1O1y{t<Qe6|c7=9C>%h*kCV-N>2YY;vWwpo_(Ry`X?PI&1Z?UrIMD zIF^ky@~U|*y3wA3jk!bX&H|@Lk(X5F2JAxESJDJ4JY|9Q&!J;kr#+4~G*;oB7?6%4 z_BQ!|il=Fx(_G4S*S(ZDltiCh@Dk>9Zuh^X2Y*EDmT)Wnr@n0&`)F*_6_hPa=1f6b zL6dkmY#G(qe`UX+kExx|y<`?=A3O%1Qa(KKAhoZtp*Js6AN7eo)OL1%Y*RkvG;bQ` zk97{DJU)&!O^iqNdcb{vyI~tS{|L5Y?7@?$^Tuf6YH7>nujFU4(1G_b2Tnd+or6-z zp?e=|-fyJV)Wc82JGF+yGu5a3V<Zzw78cEm_C@p9xqVZ3Un0CbmL6OWZ0g(Lz2vHn z7Q)NIT6@4)#2!C|-`WFzAU?#{I@!C#XWsreHco5JqA%W;{i!d`o9(gad18+o0N=uw zcv&TD=w))Ph@b7!H#E3I-|(^D>YM(5!?()!z#Fs=XfN(YZ`4y~k1<8_Z8US1M0aHH zPV$a)Upv6@PV6dLqrYs9<vjs>J09Zx1jey}HZ_O%OAKf}Y`(*|IS=5|34Py1+&%QI zwx8zC=_cB3OTo7d`*G#+#^xS(9(ELutq*sRpD?alPmfOj8L(1ceImOo-nxdIfiIMA z?#3<};6C-r7yO%VSQE>;7g$8&+kjIvXyXXFQqD=mz%(LjXr4XfW)ux-uDz@|Y>|Ca z(6hfly#VX2ksQn94cx<A&lT(%)8O{BPucW#IF{E$ySJdX`#jrO(U;=3v(AL=>|Fe) zWjj0H9k-p;V>=t|u};{|wsR+fbd4%M)ONO2wliOr@6Us5XV|`M+u2nK+u73d#(v6n zCcl2!&h*}Q&e#*Svr76Iz;=f1E%H7(ls<IF%Ht^8nWIbH051PguD2VoAvBH?e>0<9 z^Nq-1?$A=MH^boE%^B}w_wZ0Z{Fr?Sx!V=J0v;H~?;_s~olTw@lr6?I$CU**TNS|@ zyGOc$??Dr>z34l@(;eiv4UB<q@O9uGx#S@J4WiX`=vTXth3qpGb9R)oqU4wUWFvPQ z=bpH{mm2*gYj^8d#cUc;l~Jx2<&wCVvgNEfJ(De;Tu^qd(*S3*3jTXIr&g5)n}YDY zJl0^_>Dx1lOsl$>cd||DnbzYS@Q}(I!-b88m9J-#i6rBwAK7dwiN_I~k_E;4_3Y@# zmdwBP#@6CbF#X%#oH##|@_IgzSUU87HXjnaJ=mnu;UNw`YIAwYV%V#Wx*IdU!8-FB zSv7+7PS#^3_WVrF-0EL#{f_p#Q^Os0eetVQqeFW}U}U%>E3ww=+~aEW+&+u(uA99` zIa{@F2>v?kV8~z{lJ^40K6(cZYA&Zw@j&S{R3E!{N9^BYYgMO2@Uq{kuKJJr?2N-_ zHZ{7Pa<5~DZ)6=?W_n0%%MW=n?JpT=%>9^g)G-h1nNN*Dyl_1D6Yfgf<3kqk1?q;U z%4VqjNW3<f^E!9js%#czJ>Cf8F5$V_eu(mV_h0yE3wHRoO$X^o#3n%J-HCF7K{8?7 z?>IGDN6r+{w38dJi}7OL!@v8~x$)G8<OShxlZ$gBeSEJ&cO!i|f2ng1lJpyv0p3TV zA3-+Vh<vDc3=7-uM)-|oCS9O+ovB9T4q(Fe`l;Nc=<~~*JazP8``lI2-{;v!OR|mR z!>mK{C?cP`klV12&NgaoA0*~Yd^q^q$h@kFk1e+D3O;P#ZGL>=gYuQL@oe9t<gxEj zBLDCL19<Qa*eE$-3;KNPO;>O|ZR|bEh&)Tq2K-72WZ!VX7ne{LST_=5U0Y1uEw0p> zUCi+q#=80}Y_^osvuf(U_Cjvct*-PkXr$1oU#0rY9r=B0l^ZdKHjXcyg)ibvY?{UG zAC0`EU-sdc+uTF0r{f>Oe-2;i!k?ad+rf*I4@>^^UEosoY2p4)@P;loHX3Ne;=Zoe z_}?=*9K09%bPXTIUN+nGm09T?pY|*3oTT}E!0j>PI#~Qgd>;IfbT^Tm`y!#6mHtQ1 z<=50UXwFQ=XK?3*1N(=ZJGy<?1>NyHe@8d#jwjY1JUNJ8eibqhGH*c+w%4-Z=G-|A zMsi)1QG4%H{FYh6o#=U;{8_BIfo$eNcyMCsu3(=FlBZVtt!I0%iS-@b$2dZq{nsOJ zamS^d!}m(|!a8$I-=c;eG;??6uKTcU-$yRq`+;K%IfmzuODl*D<_gNc!d|(Lvae9~ z70Ui@6gENj=_|<x93#hW6+HVD>MOVKE5x@d*KvzEx=;RJ(rYzJmPM|$E{W&;^&8$k z7kKe=zob>ave`0?UDj#X{j~Q(mjypjjNmrg{;&Ef|0v^@AAoWZzl2WDFq4|*kT3bl zRwHsB@S!h{$Pc9Rp1LPKXdtgKwlm|Nq9-hVOXr~+#M_7~=%Njs=L6(<)}Hb>aEJzj z<af^I+q*r`R0^MwoKS_&PPrMZCXOXkAsT>o^;>wggdh5aH`@2qhF&h;97TR`=EIqz ze-sDUZyKw{|64IJ;_<u9l)KG(k^GC$t^5s3;E$W(hv-oUdWc;?PZJH|lOG};O*B)! z%{bV~_>yX>3w+o{3X7*Sp*urA7LS*}HX6Mbdh+ppR}8s@GS_q0MmaGI+H0#%3m1&y z-OIe=ZmnJEj8X5lF?UC)TkQ`QSiBFwSLus5?yRWfEPV%f=cj*o-U6$pvOwqA9`tA8 z5BtIM_4p?*GP8Dl>`N*;>h?u_v>n3cuf16OMeU3C>A!p#oaf57Sc)CDd>ip7%t;M? z0n(2Rw%O+JTz1bI@@dWCTWitLMfCyO>HL>IZFE^p!7k*vx`pI?0&bPjd+on9?xdzg zjQ{0gWA2!mMFsCtu7z?c(?<J}pK5vk^Sj1oy<(o{d*ZIF`D1Eo3$`%M`^eGu9_6=u z-KbUl7opEF@bCA{(PcWTK4E@8^Ev!z&%tLxcxfXqcQ<pW^^Gq#a>97_mQ&{dJ)5$P z$mZz$b0v?5vEfM`-<D-8cqYrpyOJ?<y9+rBWcmC#oF(k_#+@VBjb!sNu+_FPHxIml z4+FV9lF*5Xr?rv~-<Iz=liKn<?e0a_RY!ZG+wW06J6pLpjp#9aW8^!a=iS(f>f96h z#22b##C1U@^|W8q5RVrz@ENLRex35(vg(3T?s6|>9feIJglwIx`T=ldAYWsP*d?8$ z2fovT>@dFewE5?f3+P9Z72HPIcyeyuuk)OH9DDuvP)dwFft;2ma)Z`ycS0sm{9@JQ zZRp(b^hVz%`B3qTvKg!HFzQJTTRQULOv!+Ikry?dIgGP&JU-E!;WSnwY#w`-Icmh_ zQi&|_+lNouZN7^9@E|mbPG>;@=MM*_9QG;sB&)uy|8b8mn+R`^Zua)!MxMrEz)K2{ zVbF6g&K&trW`3%XLfn3cIZD+W5eJkP)Ev=w1#+_X=b{zFo^Ymb;_U24X0?7x-e%?k z9{H-yN9n}zz5hsT?G|Ff+yASjxd{KF<&0PS0iKa2ShObe?ZA2&e&CtzOkX2sKgDf~ zg2weOOyBnn8tWa%cwc3#!ozs(5>Xz#fO~w6Vl<VHFBAN0{TA}Mh`gSXsdwSO6YnQ8 z)b>eit=^;m2o(ZD0l$w@ZXUk{-ye~Cdfem6yvU8)@wJ+L=8jCy1msToD_{)?_XX-- zK4+qP`V^hE>KpoP;JXNTiuH}`SOa`Hz@+|H(sw84J^48V@WJY3|0q4-dqoT+JV1OC zKe0ae;R1{MmY*diNBhEF;1ciZmJE!(OnHviz|+<4N3X}$&KiWH@yL5)crV|S66Ut? z&y|~lId34B#%UFK+uMgvcQa%9Ds4VY+ZVaX<H)&2awohmq~Fgc*0{zA-1ayt^vyWe z0qeuu8Q;jci8anTH}c23Cte^Ly%QXLfRCu)&A=x072rIA{G$Fxavw#uWH0E*4}MJW zgKQ-o##~}TYl>M@lBu2bqq&kyt@RUNPR6G<W-5<BKhO8d&zN@oW=72-;3|NI4cgm| zEkO09122YtES&+&@nyDsfO7FO$+&0~960nAeK)oMKeFTk%Knb_zsfiJj{F!`^{@{H z?<pQI+Alj^cGHS^-pMQGH9h+1;+Ni9F)vhWESZu#V)NN^N8B+bdD!Oc3C8iW=MK9= za?g=#euYd(et@OOek+W~5!%`Ny<%)et|;;@K7mGb`wQVh)<hn0>}#Xop#<5e64*WP zo^tdG!Q{qf^gdDl%l^&xvB#F7|M@=q<WBsPUV&ETXkUQGt$<#%2Ws!>Mb?;ePPpJc z_NkUGv$lb`dTCU+Q01=R-Uj(EDHiP@A1`@e$1v7+p~W1Jx9J7;@3-8^A?<zKAHm(v zTsgsB6M(n;m3Du_p3@2ZH{LdB{?|tu8-HE>^@6vt<7}zNuS<J?iyfOeEHaY%T=3F5 z{NY|Au6i=(inoc$`YYpphtFek!ka(Lej#&{%N^Q6*|*O4b><%oqvNmWjPZr&a5J7x z@Q=<Rxh~tNtxfp&3Z5R~?BR8L#3zfEgf>rw79<<!nZ|TB<2XRfVGDeGG=5C`?lEfR z$G8vtjJAwD+wgHSk!N$1KdsZOE!KbHOp6%9==$$B|0n%s1Mjd!6Q`X)`_J(o*)vyT zQk<s7^e%WlhcctV9dc*c0mk$JpM#f&H)o&z0$cc~P|Qr~JLjxdGNH+i1MV?V^a%F2 zG=|ymhcf1`=J@)17n$B&E#&okfoFZl{?ix>XWHB?KVf~)&&!q(xu12V{wI^0K)K?E zGp=`7w`~)R6#q}*JB+WMGWFmhm-_m?fbTx^+aC0ry>8-Rq~pYY6CEdcW{eK(Zh2== zW(s%O)|sh&=Yh8!&`OP0cY&bm<+tD&Lz(*z-u1vO_|KM_qnc{GqnbXjW!h1;Oe=X- za_v@R+E)@XZHqaoua){|FR9+lygapt{g-^I3+ni;LoVIHZ@~o|dGfLKAfvYHyp7&% z9q+_D#A{pmcFvSXu8H3nJv6_&?6^Tbl9_Wi)HiEQr*CLzmaXf~KgIoP4dTdkj&l4M z?_<BJhL=Y14}L<w@rNx#X1=$oYV*(eeADG=LMDh<IU(QJoO6BK$*pY_m-@Uj`8K`2 zYTlPpz8pMt=NU_^;<57&Of{Ccq51ph=LtUC30qdP<LoAtd4zf7PQNC}<<g&9IcNJu z(PoKhENLsA<0~0%EI}V_w`cMEE!wF>uH}q<!oOL(vXZgMzxfiz6^4&~k$>|otvU~< zM!!Y5M%wry|7Pii<j1VC4xXJH^E&r?9wIJ~_yXIn$i%L|I6RfD^CaJT!>x;T520cu zf_J5*zQ}xAX6rl;dnR!)oP)mcAL4`6=n<w+em3V<<lf)w?BX{|YU_C~-*#ih!b$dd zd5B)ZxmRVsX>6Ezeh9uqtS;weot1mIN65n8-2?waHx(+HpnOAq>2MYhBe|h8=d4H* z`7Gv;_p!#4)Kp=Tx0ADUAjdP!Q_;?T;PH7FdpG(g*}iZ6(MfsAI`E{eGqd`ioVSwm za<8ptB2On{uR7f`-@#MRG*f6NWSC!?<mfxj-4yqeFlqBtIiz*=FJBe+({F=cmP^ma zcO&`)&4~qX@Q_O<bSD0n#MFzwOO_4m`w8^l;<3a+qc2#X+*s1}mEV!Ic$?%7_?+Yr z)>k31N_LJ&!R|q)8rnvCL43cEuS4WUC>?Iv?f1aD%aKESIU~u=F(3WLZSa$Gz)Ls# z)DC1H_-#~t<5J$MPwCgxXFWX0scZKQ9r)NYJj}bsQpRtcJ8F`bXWmYph9u*_@0&;M zZPvL-ZH|L)Yrh>pZn?hBmvt<mck(RDcsP4oV3%mEhg)j{!0rcj`LG9o-2!(1*V%WN zb0@EF<Fp?smIa#3sEJap^pjY=pZhYPg#``lLyF<MobQhwA|E9CjM_DThkY=m8@~D= z;})!quC>jyJ8-r1^SnoP=-WmcJ=7}!R*SmSdqCqu@3iaq($D5nck-xP?5WD3y^Z$& zZ>EVyp@G3PLBQmRc^ym>ZQx)iO%(k<q=`k)#9C-#6mrOOo<BBcKno>xNm<YacSrd{ z4qY5|2SN^A%)V>f!=Y_~Ch?RV|7*G!XVXO)bWu)Tw8kNHQJbI(wX6LSx_CfzQ3p-@ zWM}x!@bG)z5RK@6qic;#BmZdblbCz?n*ZO<6aJ_3ANc?A{2yE!=L`Rz%s(_Bo$#Sl zW5Ce`JGyu6ZL(82xg1>BmC#A8?sRXx7#ZM%?tdNhDt+xC?52*MHo~0<(&c?f4DsMQ zU!-Slr*1d;b?q<gqpNfm$ISXrq@DX0w$fJE*n08ovEh#WoKHG^^%r+PHaz&&H1^q9 z=qb5ZQF|=s6x-*!ggplz%2mOO&-zR}S>4@6`)%%U{%P<5bjX6YqjyxBWVnIuEbe+P z_W0}^Hh#`}OUBsyX60z3<25s!FQ4$vky|g;ejEY+4u9pWe!zI@#fv2$+Q0w(#fvLk z^%t)uZmp8@x8l~+MjK;v?sn*6OmW*tTz~c9WbpK2T%UcJ@bm@v_md|{_>Uo5gxNRP zS3{lixkqBM5xI%jZQdVTsJ7>a3&rydc*8%MmnrmjXw0O3Cr6y*l#p`Hz}s5w@6=HL z=Ga<f{Eb#U`@Q+K%|-aBYhKW?AGEsS`eB`OBp-Rq@_wUX>kP}p_Xpk=G+j*<M*YQA z*=D=mTLwO-lBd3!J=!QZ|6Gr8)}$hQ>x2{k@bE#8vE0s$&faC$R~@HqM@O$V8t^^v z<(N-Fe*-sBSF*SjHVTmk2RQp5^xV_Z>=}m7av<DZX?m4we9wDs?=H(6b+12hZV+$w zQO9Q{t#{gSp2@ZyY`E8MBS@PVKO@`|xU1Ck-CKU#I2g2L7w@iMj<F{NTzk;J2RVO8 zpMB#Xcmlwvu?&XMOM8+%q{}JiOjX*@)a(UTPZItMoN*o9eJix}6XZl=!-U9pH&2ha zt$Jc0T=*G#qWArZd0V4<M(`7$Z;QS;TiAU|AK_tKp2PRdLndAi&D#B+cuxOb`nSe% zUZa0!oWiw(V^4OtJ!D!<I<Mj{unYSPcq7)1v3bJ8nbGDB&O#2XPTP7exD6{eauYD_ z1jg73bI<49yYm&+{&v=d?6#uoNtBhoK{oK2E!dg(7Hw<(FX#UcPseYI7>Xx|Eo;1S zCU<2^M-$KOAzhuGDHr!g`_WI4yG!Sp#($m3SjRQZP7V)1XHOYCn>i{x@W>~zjpQ9| zGCkpe3eG%b*kW~NRr~)=-heLp4TDGW14YlJZ^3!Owh0b~?x&2=wk2Nki_f<u+WJvs z#>4S@hOq0K(PGxuR>q+5k6U22XI$lK?}Yyu#+1lS_>WsIV}s|t@Ic*JM*D2`^JykD z!n05h_io_lGOY)CNt920kGu`|p9xo5`}NGPV>26U?;EP`QQuA*U#8DA+OO{o4+ySw zes8*QWW-B*YkKJ$S_|dKW`oYub#HS(@Yp;eBl;Ebhcxo(QU>2``wmjA#S-wGK{@>v zew}lP)2GhMi-<+DdFQWQyx5P-3LhKrLz~91`iq^l&z_Q7#Qsps{H<hOG)50&^rGJ= z{r1EZ&TS#+9Q|FK4{f*40+!8_ipGWyYE5qCnP^;cp~{&ad@%WM+UqV}k2p*F*>>{i zSPRK{1Rt};C!VpdSnJMGJ~6ut@v{Twx6J_)njgG&Oyhg=ZUn|z$l$}^!Ssbb$sgTI zes{+9RE14Har@jSKZLh327Aqn{ZeGjFR|U(&%QUW^lA3uS?rnY#fPuNF1vIX_h6cX zY_s@*7!Q1IUs_MkCHkv}SDElt<+{@Pngh=Vj;R%#Ng79lBOYTF=O)DoD1MT2tB*Z~ zoS(mkmT#L$EEap^es_2PUUVS4ZRNc0{*wC?t8s;=y{9N6eXQV6Jnw0=ZDzNAYV7J) z<6i?j-LVy%ORT1fFB=1?J8pg8SDf8|Md#vZJ$UJiq3gyM(<(`we=E7{P2;=v_;mK4 zMB9JoTYIUq7Pr~ByaXNQto9EjZ_J9X!*6qEoc7Tp*O<IJY1ejmj_{x}x-}l1%-PVf zX{4fSO7UF9UTuucfAI9ng5bi=c`V){dimNjww@-G?K+UJbNtF1(Z>y+v{>`m2rq>G z(i$(sHvw9$PeER>af&}B^@Q(w@U3>vrCi+zW2%9y!X9q(xAH`p2F|Hm%wxw}8_>n= z{}Ee@&*}#F)dA$MXwJs5k(nF56p{Vyushsw3uQm#xysg@EW6<yr|f~ONGr7XmOC}1 z9CX3wWhV)DjG}(|pnh(M*Ih+F8&34omSoqRNSzAm*gOzDgZ6<q-cR}yIC9=N@ZMkk zOMDDIjd$@o;rtN$+fP=b?{Q_UN7h^53CzU5NO=G}>u==0H9Y(j=c?yso8bpTrg!H~ z_*}cu|E%YDQ6%@}+w7bvGbyKgy6Q5q_bfM_ki0dR*Y%hG+O~bq8rFDX{VCV4Lx+ds z^#4>g>jF4-jUOJ~lVN0#-`|L8-8%U@mP6ajQEobT5`I%p;%L>|4t^`W3H*+a{{A`X zvt++Wi(XINByc<;QFmCP?sDewMBS2e5_Qv}FH%=ys&FwLZ~es@8}^lP?V*_i-LABz z@60pWZ@<xO2T!}e(Voh$x!Nl!^8#fC=OoabUunZ_zn$q%!>?|AB^Y5(PFn#zKDgf% zVSZQ5{N?0`32)Mx`p=Ho+U4Ibg|`&9Yi&F08b9FZVR8s`JcVEOn9Tk!x;u}%2liY; zpWUoU_6}rsBf5I|$Vl6YO#AM%m7kP1u4HXA-tB2@$4-|mx+`-3p#(hZUE|%}#+^K$ zjUMrN=t8tG9ll<WTtDwx$~2~g3!j7b!q^XI@!!vx*5zKqy<hby!ZR_JHg7#jU9GXL zT8p&%Bl<&sn`aTD>!#1=Tt<iD^=!VrZAv6-6go<H`yb%*f8w+0ktvZk_<o(wR&2d{ zkq1hSmXd#(oVB!>z4TJu|FcQw0PKq)*3iIi%CwF!2A(1RP?-JiPyDxH@j8N|bp-nZ z_rT@s4n}lG>JH~U`kk$;eYL5+p8p^8wP{FSzjE3FzR&CH=e)PaT6;5oc1Es#8&6$2 zP{Del&jW^Ww$1OgCfvt>o4GSSGzN%kTV*|XS>ejN%NtuhiO&tT(b{5omvA58j=&6T z_g8`M39Je9y{j~5IrSqWnxo0g(SG7>oO$^x?XOPEjX_=@^<%YPR!H3Ebjj@h8~T_w zh&~FTkLwcj@%#|_IQ?_z<JKYc(L97co=?!nUU0L$x=eoCQ@b>WtQXnhjCUtSOtW?I z*dJrBuJ(2OBOIlJqmlGEK7k|gzE9&Q{Zlw9OTd?nz4|r!Z%OPATBASayvf-s*~=NE zhy9|ocG&~u$cp5d-d*^$L^9A#^-)H=1>PN+IX>5xla+^7>-aaE-%`1Y(2br*=T7d9 zgcqhp-+0VkGf%B!oNqozAHd2TY4-g-e%jZXZeMX-zxe$+c<vTtk)d+0?qfVU^2_~_ ze@JxE(a0Kmh;P{^S3mj1Ie$~3gInPfqJgRekEr|{9-()YgLuR*fK#}X|J!tE;2P>z z!Xq9ULIW?jewg49sV8{E->K{Hh)UZ1&JY?9+=F?<ganUx4Vn;LtcFL3SNxHW*6qvC zf;EUobl!QAN3bSB_3((_Ql=grL9P^=NBoZe+dnzUBkDiJBhsQ<o%i@|h)1YR_4SMY zL0^v#>1(6YR)R+i?dzv{&sePg0gw3j<C8ohgSp%Oz-5J1@QAuE;1Qz#%fR<IXi7Zd zAJKnPVs1Lo`8zyfyEo1w{x|dy8blxY(1&~nL?7}a8N7$={~Y?L9zq{?455!7CFnzY zM%yWP#7O)cTHz7@2uEq)=#vLxYdJI7IMUwvX&#aGDIPI10iS`~@-+QRhHPbBZiGjz z-aj!C&r6-k`c8`;W$i)h_S$}jKAg4PiOuedaxN1*+kstk-0gfzpUhdTuoJn>z=vV5 zj)=Aj+t||uuZ2&5)^Mdu_dgf69~wE+<^|U7;zH>v+-Y$?U2{hKjAY>3v)ordZv~&b ze?}ixJT_i(v3ST}SqAyL%;VXbNk5_7bw<_{?mpl;-x}G;)t6<xnrSqoW=T#_-N7&l zHUqjdwvLQ!h@YibdD4pqs?Y)2XX)XbFB9)m)6Ica&dyHmBxej>-pdwSy3LB{jKleW z97V2!TiFXsl08j6<cIb6ql;%qrs;DhMf*M(nV)pLe0z)Ov(Laa)bSl3wO#MdK^`fk z9yYlPN;yXtA8*+{_{>@fPm?`ZW6?fq85<T${*WEtx`VR`@C1Ry``)rwgH_f2rN~{S z+-F~sY&CIyneM$O++UuM%_ZBY9i2V(ZPA~8vxnQy2e<!tqJ53mL)#iJchvRwFgB+T z&5zyBaKqN)ZB3k|p^*V+?kx5fWX%)*i<sLg_7<ls^NDQ^9Rhoi>IC0g_5gN%+3ReW zB>&pye#3YInQIemFLBxqpjYr*J8r&rRQT`2m8WC_b2)xVl6}h2_cbslLGCbYGy>CQ zD-9MWU(jX*^1<&C)<K8Uj>mXto}LAv;UK<sp0P%u{X2Qsd{3I_{j>T5M<)i|FY#PF zdcJjW_yOi3&oXa)fbsmod#t>q93TH}z~WivExDI@Ry&SvOthi1GP0Jf+q)b1{8PjB z-CUB_k>>}z+2~f2(HD=6p9jm1mpAUht~dKGU*me(bK2^OwIBm~YGfyDV872vx;SzJ zI(UQmaLTB^>Teq<=rC)vw*w<KR{sT(;{>C0jDz8keh0hjl3v<Y9fS1+ttV>?9&G+j zyV6B`gl^!-H7?H7C-o7I{Gad{vULnPTRUURU`!Tc+rggO4&T_G*yk1VI*z^tZxDDj zt}ftfblLnR*DtvfUUw^J6`fb@*v;-F=-7z7{z+_Y1NnjQkxtQhjq`1EF?s{meVN|B z#(T*xJ&E(G<X2=V<|M8wIC9O*_K(nOIQoLd5#c2cZd-u?eQ~njYaC%Ld7t%tD{E7C zN0*Zub})~UpRst&yQ|<$-@?zFy5b`aPl}&AQ=`ZXtN&44kb}p=ao;Oq4&r)I(Vc&I z%Hr|R+d}*VEqC}A?1?2s=6HL4k()|ZLL-XBkgtsJxq$NsV;blw`q8}Ew?4L5GMv`p zYtTs+Wx(;kcJ2n3ZXriFon=J63(hj|u|qa0xt%t3e)7{tJ!=hlu(pdBoJVbZry@T} z#!rj>g>m^en^PCEue?S+Na^M5m=M~#)tLi)_SJt5IW+$#F(GP4_x8+YZ3I~x)1jRz z`~Y>Qi0H#}X-Z1)V(2?3Y~Qtb8}xy1+BgsEve0wu14WYqHOJti$Q_EC{KYcH)55cb zjHBxA`1%x#x04I-Pt@ziZtD+DTD%V2-o`xZu4my;xH$^`Z2U~-jz@RhFRlld7c#D* z$unw1Ct5oytN(({?#n%)W+7|n&bzIW5^$)P&&H9^_wcyy8*)=gQO*q8|B~nPkZ((l zk&B_1dp(3d{DecVq3>PE{1us5_MPo3nXf8W|Fh0KxEQ}P7n+BqiFpV!#$~SX62Y?& zU6Sr)7JO=7&*a1ZN1h4C*gWjHsstYVh?7M}I-|=^*TIkA5zTZm{`+{|LcYGCI%E&@ zDOwG|8w1d2wHsX+{HB_AD?jm-$Zx(}JPZ6fXGqD1TUaxaeIy$?XGi5K;jH{Axo23! zmSv`jA2s0Pr|&h$C_!KgGUr9`Qt`D4Vj>;=vh>PRc<=DEFtW70mqAaZ&{GZcgwIn= z4)`i!?zNw&-2^V?I=IkzKO6aGwEBX!khwZEuBybiW}`C{UtLH48k^RL&TKPmyQubn znec*Q^kH$oum<t$Z1J$vD7m`Vif=2Yr?bb1XTpoIhZ@l!aC$Ed7xZ#phxe}XM&Z!z z=eV~~dyI6DvSn(1%WQt{tC4=($KF<#pf%sH@RB~x|5f-Psk~>Hu|#8%%%(A^O_MrK z`wbe0(>`#iZTTH)&-gWCl1wHU#n}^-BcqFQLusPOFcwS3(A;XSFG<X`=;Y*FODFf~ zxn3zgiXKO6Ybfp{mnw$E!IkK)2)-DA2E^Zo?#IHFvma}`(gSHc!h`0@0{7jijm+1{ z@n~Nb4xPQr*_TV$k4vCyXFom!9HK4xm9=taA52@|+@>v)y(>6~SFtbQUmoW*I@?*x z;_J>@<|(oD$;|0r5;(W)`NY>);~I;-O)(76nRr?y{7yW|f*vJ*_>e#FS4_wszLKT{ zZD3REfQQ?2sImSRzdcFeCEfNuZ`u4S$UbM&MUt^ZKBbaDf5|tzZ&l8>&W>CPjjHam zJah0KmxG{taJ3a&omgXx4<5I;0T{KHdKq7NPN1fXGR{1xFYV=)<Q;T)qPt${&hPRz z68~f0@ngettOaz(=S%$$o+En=-If%Y1Z)GqHS`{~Zth|0aT}4|$>clZ9=7%DMcAPu zrCr>6F!`U_{||M49v@|S_3`8PBm`!%pg<M~XcDZNOhBu$#cG+LtzorbtX6Fapmjn( zi~CL>RVNH6qbLe(3GlQ{2C-O4Yq}Y<^$DOBq+0D}z}hARt*Af-;hFFIb1%uAF)V%h z{C=<R_m8|LbI)~O%Q@#d*V(RfHWYW)VN?Az*?7e34eGEJe77K?T5QLw!}eq&^#C6I zJRcOzw?Xsm(3)wZARod*v=fhW=-!eCvd9ya*MrAxr`@sWK8Wrr+6M-c&Go+Q*%`wp z)HyR5zS0N&l7juVFY(*t%a=UKJxtA|2>sU&KDon)jH3ICM)d0gYIEfq>Vtgn9`e2T zy5`id9(f5_=~KQJCU^e)YUU&Ro^0aUua>=4cB5?OZu$~TyX*Jud5aD-ZkJz%?}_(w zln=y-bEwOx9;m#UZLBS3uTB2Jer8?0-u=ane5AE)1<u=mvtmUA>nrVbC9|$QUF#YY zz13dVC#=iMx`s1f+2nLbMC<B_k3r^|WV5`{!3XOw<w#qGS!=GVXB?g5`_-Bbu!a`a z^tQbw)j50vy_G%F<j}Knq4~Z*%>#JQ-k+}_&NT0q4q_U|izf{U+%xgHaPD~c_aeq3 z#(uzfY;wi?kDkF9;H}z??C-lTEu?<7BQxmSl8)!d^d9!jiDXU5R$lm&<SU&s$Y)iv z;FlHf7VdFm?nCa8Ja6*vFJfclzN795$>w?ybTyV*gHw=&Laf!juG+JXy{<{DYbNWO z<yu!2HD8yY7dY#xeV}_?&ohr*$U#Bmpgd1{M;SHv*bA|;oX($X++)G7jB(07>o>NZ zenaq<Uq0Xs)x%qyvr^B0;n;TYJAD5UtDZw>LZD;_J~h#W<7+5n|19S2xI_PrUHe_$ z(SB_PX72u|g*aO97=rImc7YIa&C(@8#Bo3Uz|2r#vT=UM)_)eC9%#~-4LpC*Wk(bL z)Y^1@4(9CKlV>#D#(1J3>|?R+^HckAa1TDh-fnt<wtI+S()_p^UmgM{<B{Lnp|$bJ zdABHj*6e?1;GV+gmgLs6U#Mr@q;-@L(=Wf(qTxF8rB~(9Ke;^NwbX1j+nfwc%a}uq zJ*M@IJ4u)>IRQ+^^@QmH!Bl5_2>5yL&Ku~!wKtgR9CgDq--f5=cwBf6GMcWp=T!~e zwgFe|jb`L$(SKf(=e*t2q4fbXC#DR0M$>w5ue<!<yGgQw_UMW3!dF0Rk_BZi5g*YQ zdRKVY{idy~Rk22v4CC|b8TQ+9AADgve4!MbL-A(b`%>WzK7T4S@5nLIzr8Ly$iKrY z#3!zWUsQ5FB=U+^m`?}rkj|<zE=#<EI5Fkt60Z=Sm<*hP+OPH+bpE^7P{<l&<8`k= zHh*Ug4YZd&6f*fhT1O#ysY399!tBnUoBhHcli`WZe*cxd-%q#4aqmy?F>38HYrhBW z{jPITG+@#PI2caOBJKagJ->(ddfW3Y34307CA8=9&o<o+Z5B$Ww)gz(;oOZp$vuB3 z<DKlD|0QjmJ>LqQa947H{N&EQZew37KH5#AYLg7j%<8e%sfl~E=UzV%Jf||Y_L0`5 zy?(OreB=c1boRRq&jo^~_B%Qn`~CER4orK4XX1YUf(^@)+3!JnUWUEjMc>+sqGRp% zwe9RP!P4H}+Ot9ShW5DjSr8rZM0;I(+}!K*tG(V^%!F{SJ=Nmr94o(E8ajj4j!ZDU zlJmlpmv29PpyUIM&23oOtp<YY(N8cxo$;KVJ<fQ^;?iNwe(p8~I{ip)ufslb+xf_U z(0(oZ5q{I<w-@b54_?Tp_HLQo#_``5L0?z$Y!)=2IH3E{6+`s#gniD+4kA3(v;Pio zo;38I@Irmk%yH5`8j{F|!n)l4<^tW@wa>sD&cMw}d&+I{Gh4Eo&L=AdKyq7*cG{z} zfR$vh0_Gd!3=R>;VUGKH=?nD}jy>*{gmIrd$#MT|kL$rcp>fY*+=qE4IWx%q+6Dh} z&Y3ebC*YL0=C!UfN;>nMyzkZ>y4T!OXa1EvwrF5CedtsALMG$rf1MfE_cE7cLp5Y9 zIU|Yj<QI0s@<*<ICFk@OW263eF_!d_UTpyI)*@2}!Dov;G$CP4-F&vEEkLjn9_!3; zd)iR6HrW9Z@7U?AQ|!y*Hw9w&mp9|5Ti&>&G^Tr^vbVH7OU^`oBaU)2vDvZEO=dmQ z2GzdKipMusq?+*>E3_XS+k<n@>{zvHEg9LWIhxOXw!AvapEVfWb|T|u%hy5vWg}1& z`7j2b<o~hbzY@EXIkcTsTBLJAviDxrX~iBXekeOShkCjB<XjW2B*qfzcg53bj4kqs zDW(v*ldeXL-f}ZW?>j@gV)RstmU^MlSIFU8$k;*7Qso#heOm0J_ye8>awNq%KJol? z+XQ20)s;_t+?Is>{{HQ&y+<$I)jr%)G<JLnIhV-+My>JTyrVd$ROaserf;?8vzOYh zS<^iY<bI1O*Zl>|zXH3H=BRO$vjM)eW01#R(ghEE%2pnSZkv4P%+iLg8nEcOM{f2| z8zPAs5uev}#T#EHHkX=#ii0&_`~)!G%l~B$PHJlBbM(buiG9YW;9T}#YSY4Gf5{Vl zfj#{C5YLCPo<kFg%ovJf=Gll`@dR~FHnUb@Fcx4-DGz;p*y>#3yuU#V<U2`5qwbns z&HEO<vP^s(0=CuIXatuRtaU~6lgHzK%ZYx6IR%sFmXswKW68y{b4`-5|C%|*&W+e- z)#u&N-s4B(o6GP=D0X%?w)8e=(1Nqo|IB0~_BY1RbHOo-b!jbc9*#HaJ9$;2Ue?Sy zrx&u$UBnF3a}MY?gSMPy=9)8!70JSvCf@l|`cu5~6LG7Lyo*!1;P^0WxPo`IZxZ$R z!#7T#R!3=rboqUsYOfqS4~L%^R_3tWTi{XM`Ia*LdTY{%7iFL38+ko^w(fSqZ5wSh zek%D~^O=j=zw9A~+wHGaU)Y6qES7y$F_QU7kviy5exZGvw9dS!><ez+P0#VH_{2is zrt?d7=-(4(eg%6aV2|zibdpYU#<tq&c_4B8!Y|q5D@JWlbh|wV`H~9ndujFi$jg7n zcN8a1UGbLCFZ}gI;(Luqk9Ayhn%U1UdH<6o#7)9Wkv*^lS-wQ+{fq8PS>1{qM7F*b z?xA|`Cw>XO-in^5-$h4~_dU>Uyr$pIBR39VKjmUO8j9^`7`CI+8jfDlugi9XERXNW zW7>=AYle1q^@;p0yXA|01N+O!m9Z7P7A+&EJT&b+ddaRa<kS&=D@h&~85@7M<<3-N z^Yx~`C=Yn522GaukpHv-(@z)8&oVY=W?42U*_}?g%~)~AL*LpJ8kmhgI}p45q1lh! zcE^-m<H<F#g_<#5XokBqn~e-|2lI)16!SJMeC#IH1^jjxts^3LL$}rN9qW5><ofHd zS@WBB?TAR~nSlo64YRiOHt=dbUo?U{E$B6Re!tOh=KyM75O3i0_dE9Plb!v>=>D<n zTMaeWn(<K0#IjvSenqeG;Ttb<AH0m5F!hHQ?qdJ#^bTnLlQ+wlQ48GDGq?}ReVJ5z z<3;B!&D{X3(r-Wg3e|ruE7(fzrnDtXziCYOp2`2nZnbBw$<6lo(%f)7bRgcCpQQDT zi<H{yv%Zgwj9`6*{N`Od?sMxipD!BMV}0hDcl*g1^RDTO^<jUN4Cce;FL_dUJH)<w z3EWOtUMiUu+>yh!$XKEC#hJ%K;8F{{?R}X1ZQzGk`trSDd@Jmg_J@sFJNPUbkR2`t zX94iV<<?jhetv%n_KT0<H-aZ{m;KQ@GVoa8Dc*1n{2|8oWw)}XK^6@N_a63!a3A7K z7w!Xmnz%puon7)}ICCg~CQo(_#{a|{y{A>rFlqRS(pcf$!Or&Co{0bcnJtsJ_u{`l ztwnx!H_jJ*q}*ZRhu>u1T*v;omVI<h!;u@m(ZIek?|)>|ryF-UZSlqkaB6>4zPzjs z!9}vIzqC(8ca26&FAmo}7>Ly#if<-XWcmmEzJX^qEegbL_VkG?E$9>32pp#YyS>05 zpY<={|2j|e#TBL17Yn8hz_jk4@kZv`V;m2AHF2EE&i`zj0CiV?D!i|>@s7OdU(_cu z*|nCfv3O(1)uy1AHtpoMty7ydRzG_M7urqQ(kD{o>Z|flyz%ygcAnnaF{b>J9Y5y2 zjA%w}JSp6-?}o9DeKgzE#vCu9O%dbWqw#){Ft5^-$Oczm4bbrA3GK`=6Wal=Z)?n- zyXLhhC9>Vs*Tj$FjTgDvnDa_#Q^|Pc8t+AGUZt#0Fq^a`CDP&QtNnv`W6{a9`&Ya% zK)bcXvK2g48rwr(Rvn8xc(iE;_s_hA`$T_;n{`-^*X~N%RlgT+96^5t%thzSksE)> z-te$Tdd7;xFG=6&<V?^RpZ(o$?aI1s$}XKVzz=^!-^d~UmeOaNeGc5oIWXRn(@|>k zfi{y5WQD68KEN3u9Y^iHBz|yNDLg5sqX2vH-pQpFPrA$lPwMO9Nm=2J$(~q_&6Aps zcJn0U#!W?@aS_eM<V_jg@L-cS^^JU+d3E!qoCMyK^?BZ8&&7ky>SX8Q@TLZEQ)Kfd z-<&SqwB6xN;+>uRl`<!97r)Hu*loipNUljEBP*QGr}!ms8)NbFLD50O(a+8JUh6!o zfG5?HH_+mxIUUl2kIhSg9sJyZT|TfAtUCESJz*}M9=y97J|1vp@@^YGi!|Oz;!5!0 zOwt{$X#Jd?>)0KOmqTO30rb=<j@<Y=Ge*$j!#O4&UW-hT$RFWdC&`DidhlVvLVUQH z{r+qALHp&U@OzEZH?l}M3rc(Pd*5MaoW3XE_q~tP3NL#xVVqf<e~IH%w*P1281Kg$ zFH9Jxw72tX?|(K<1$ELcVI1pRVXy1#N;p?0eq!;`y;oWLTQ;7=HkH5yeJaOkv#6Ig zoVAT9t~P6XY16=YlBYK2`IOTkUaNMA=TpOn@kZ?KIeTYXbJ{C>_tp*`e?>yOS-rJ` z&;N~hTX%n@y|shR4$;m!?<{yFo_AZ@<BhqD*>PhjXJSr=Ii9EInb_VDZzL|YtBpDL zgf>3b*rc)Se%5L}32lm4-!9GDJU3nND(!i07C}!3)b8d4cqO(&p6Mrex!RfYPi)5; zRm<7ku2l0&Y{xn`Xbt8$Y{P3&!a3X;Z+u;C?sUN`p-tr;c#o@%39p1UwSSK{&Udvj z;g!&)0eXL!Hr9C#zh%z7o#zdIjyE=_-9igqR=d8RJNFy@8gKmN$+UYn-uTs%X}3Gx zIFokAJNK{pj<F)}TD%c`)zz-==j4c~w5!+=Z+!d&?Jl8RHMN_G>o{(ICA4e(Ej0{i zhd#5(%USK6njiKMPc41de1?u=pyTL!qFqvP5a=<Rq)*xQJAcPPOK-HEkBz)_wW(Ka za_d#nbA00Q*y;*--|N<`t~46f93F4RJ0Ij+%0v!v<{@2cyeHF&#ZAz+y5~PCoQ;fu z?;ZO?7CKj^Uw3pSoMq~xUGptqo{nu&{#hrk%x%|@&pzMqur^P$k@m8~NpD%pnkt81 z(tw`H9q!DI;OWZ6hED-GK+l42=$>qM?5ev@XP&;P1>w}G!^4Hl^$Pwpmh?knQ=&fR zs&Vu^ex|YF@joetNOym|`8XUHW&I@(%fZhG3_|Kt_J=po(c?+kRg0P9n!gz_>~Uru zEZHvcZ)_$;G5Qv<arhWB58>|&B?U^}O&V17_Ct^7;?GzR<UR7NmDJ;#kpBKDa9zQB z1<0tf@mwW87xV4SuHA-Ty@+<k{`+@1_LJIFqh!-(@s-qhDyha^SIwNBVov1ZB<5MQ z9Sw3050>qHRO<a!eusAE70Bq*e=omd=k0ipIRzdxjtJh=+vqTmkMtb9q&$32uygW* zMyK<e{JB~`ewgyMr0mH_%%!e>c3ALz5g*m_(=G^a=A6=9tK;3#Y($Q1{6T4~lf7~w zbUT*6di)WRzXxNhxn}*ih<vk$7LSX}A%^R}Z#GuwcRg!mY?Ib+m`2S)&lS3l&~`zH z>&M>7-plh%n!RI)vAN}eTd*mrt_HqObooqZ!}J3|2e&~7`rZB8*T7i`^B6s6TGNZv z&03G_;_OM)p;nHQz6pDB|5e6{iDv|C`W@6!MjO#@8#Y_PtZkanP)L7@ldGY>vEa{p zN=}&l*HVOCPqESZot)~C_=Liam*Bb1?+ucbX{R*=2BCY9yI*!7b4{#AwoB1?3BSFY zWK>OHTw>u_Yff12iKfoid*qbSd-^UoIPVqP?<HaLJw%Mg-@uvXg6*j40C$m`In@xK ztFg0~Yc{r+O#U<n!P0NTQqLS3K2y)$(w&QJ=(uNGlKXC+X>VaZ&Kjv#>+j2)u#IoJ z_dS0kfM3p;Pb<3Ri}11oS|4x>#@`L)Q5W3wy)MM}ioGZpx9;<LPsug=Y=&>GgU&3Q zk$6Cse-Lvsu(f0FzUAPc>Q%azaT~EF;*atPXq|$Q_@4fEU@be4?B*YS!B}x8v?G{0 z@YMX@9w<K3J>P0*)|@YJ4Kers?R@{qc+itM-|SPLT$Rck9h!XkQ~C1pqCWy}@Q^W@ zQ`g*G^K<C_Mf!5y6@H%Q{gu26ZzF$zb@og<(DTeGISKl9pD}|3L!E!NJ?~g&jQ5mG z6UGNE9PQ}FQKmo2JmsTgPRn&3)-ulw=IOpm<^2P8SL}M;`)`T0%jU~@E8d~~EW8w= z9}6$F^tF(+y6yBm`g!}<{RsC?KX<Ah`|NP;VtF|q#E*SmLwC<i9-j9X(Y-hNJ~gUz z&MW4Pf+oa8_QCFuf{l)t$b0)y&!9UlQfCXbHDjC!AIVN$NM2?5<BrC4rLj7A-nx&i z+zWN&qz*n37}qw$7~RD04gAgJd?{I38k5aiKAgqbYgUzYF5ES<PhkHfe0AiZDxXCC z=}FkaM_0}r_u>zd7qk@<pJO~cV<x%!??~zwX`|*<8+)U2pf_Cj=Ax#tr93;88r0-@ zDW2X>w(04UGQ8W%v5i&|N3>Ubm>A;^20+)_N}Ke%Z2OhlJWW&a*MC|_J&9{?@2tdk zNlw8q`Kw}P945AEoh!Qw%{v$HhMsjeVnd0=OinUwmXDvxemc}|%qa4kdx;@%)-Z}S ztYHl;XA+;&zqw@WwYPODE-`ur-?@7;m-UQiE(epBteTif4p)1fWm(E^pXuK=a7ehF zTBC(i7riiAb%9PZMoq?VgWqs^6>GeWHR6<LYB1MGEv3_ejeJF=b4%(^H+@&jy^H3S z1oOBvL{3p>?OX9R&c~jTSpbe$xAOQX4}uR{yx?4x56rQ{7h#7l!VbR#x*3TbUU{q1 zu)|L}Z&~gX@H7%TymE3@7HnHp<Vjz$5j%X*nf#9(K8hWl`_*Z}q3`2(`3LMZ(Lm7; z!9VjJ?@8)N2A9t~##&~K2+zI!yG@l_21J%n13Wk|J)C~=gjm(i0g*D+x-=to`wHG+ z4^j8liVc5eKlx+%j(;&H_<AY+9P!NTXb`+6Rlw8t#~XJrrWgOIEmK9Y-!$ED3Nkn6 z?QZD!eRNX&Mog~a^zbKT`QMz)8FWUVq;Rk?O0<3}{kQYp0zT`Qzfn&;Z@u>o%Lmhn zecBkZc*a%8hONXD2Y_ik@Y4RyGsvfvZ<H8cAP-Z{V$+ATHf-e&k$>!8`^oQ`Z;W>L zn|zJ2Li;B8kGLswYF}RtUsHeQxNuYmo&8_nXcRaSjruy`<=+Kf2k@(jPv6b|4&124 z?I*6mPfm>$f{ALf>-QME13s|B3r_VO{ER)5)G-I&P8hsyn^7E|*toc<Y)fjyN8R%6 z1JlA)-z<$y*_j%_j{yz)Od7@>-;@Sk$8(?L1N=Y^4RfbP{#TQR_5W6AxD8zCUG1A~ z(D6QEiCV}L=ir|^&rOSG8Y>8(Z<LIb>cYSFisI}JG4J>KbGDK%xE+6zL+7-8tT@Bi zDLS86uzl5DPxWIH&#NVeX;Czkl#1MFM3X$eB^t|&KZgbcj{v&kXvT8WT0JzgjWJsB zg@2yb6ep7DztE<&7Gi7EkF)NwJ@HYBxrow-^Q?Y%e3ZF=Hj+~*#gf03d!q1cFYyE3 z=&9gF`o86R)m_OP<msGf3iRgTIX;!;@+*!(_7SeQ<hj{_ndMhB?@Igo+Bhyp&;4<Y z<u8?<;5CqC;EC-vU#$3JynG(91ELlAWPL6;`?N0wUvgI!Qh&nn=h5adYJBgMKd%s; z(GGkI&r|)i!e|3I#000|<VkbjyVQX%wh`UyNTpu~9%cWEk6KHe0IgSX2kv#Z;OCXk z!#R^L{iI7y;EWy{IrmCaH(lOSHyz~GO`$oL-*2kXV2*nnzn|dxdHEpEzYaUGTRvFC z`M8d`RTF1o97&H9pKGj`4xIVE!|?WtoXz*Ce2>>85xdcc*o_qY0DU=+`*9w7Igfq# znv)_<EdMXqf7h%pjlIBU8+DHdbH0jK48bpujbDH}W1Dry@(OA~29g6!zxc??5kn2h z&Gqn^7V0>zTW3VdP8|~FE>f%#AHYn`y;9`Xdd?r>>0^=?jEwAXfc@f?<a9_PxQ* zHTba$@mrd7$(bIq{n&;0u?O>eJ!f14XPl*vn|G%d{4=?BmGpAO)9Buv;&2pqw()dq z9Ow}BXK^pxGnhNx+2Q)LJPmESgGxO?^DIU_IC}3y;7|rUE(0!K1wLOPmf}+Uht^sA z2|jbJvzm3b%E!GFUOG^+1$r!Ji)eWz^zzYSVnKjYE%5-dgNtU}V>P2^Os3t2C*^nY zb<AR&8&)A>Ajd_YG-6R;GX>aG0GDT0nfb3`lZS989^0sN1qVL(!^-93UW2ac<^EnM z!N+~PJyGPz43A?!JPW;aoe^{JS8&!yS2nP9>g;*p>+l)mw!jyyvq!)6cJ?H(F9aLK zseKLj1kQDxJ@k7${fZ_+>@6E-{!Q!^^>-(8P|Y&!vpdlH-LR_ogt~}7P)u-Mw1#%k z9|Z6p`8qtqjPmFn%Wl7sv*4Mh1F;ddpY~a3ZaaS3n->LQ-|^t1#cx>+EHB3okV>3# zlurX6?Y03!!t&FCpBQxzV#dCBV|@ZVj&eR~j?Kgt2_CoE@Mu2U*sSN`tvZwIs1xR# z!!MF=p?<hg^6T1en4|#{+H{}Cz$8!SI`9%5wS5pDrSrH7n3(kR0sd|7_W6u{)bmiD z@7h&Kbw=bG_EK3gwSL)4$exzpjc3|RW%4DlPOm=MZ)xl$FE#Wsp|!LX)Cq+*s~-CS z<i3TxtC-ZP)qz;m8OUlYjaYCH`;>lEZ*%TyeA>+I%48#!2JHpucTe)LXc_(_&$U&Z z+*L!?@`RNaGn0Iu@ZDS9qqb2qv2G?mn?6^0ghtov41caP_T<3ia3}jx@f_aEOJ69u zd(I2&yDIj*WQ_s7WbVTe_oq8}7ray08FI;#*mj!cL*Lp<ZrZ5cLq0a}wP%CTAeaaj zoz(9YT-G8dglryDHo)RB2A|?F@Fje0R(#O7q0|MYj}6STs!_6r5v@XASZJhG&33Kj zATc~;)YWy?vzN789WkbdXN)uBVS<eln(jvSD7yO&b6wTw1X|aUf$$sFHWAt6SZkYt zUo2$G8Ce|5+(+jB5NlQ(*tfrFtdRWt(gY*YhnhNira7+RxtIMW`q{+qlUcvydC4%s z?OODY2G&o##~67OHs_-U5*K5xU+>7rcOPS!>uD<0p4j2z>{s8Sok_?7IxDsAS%tpU zio+CN)w`05qd&6t;Vj094o@*-44i$q4}Y_2s|4}SV4LWe1ulZ`^@;e|C-biGhr>@( z!<D&ALe`qU-iSn}`@_=~1!9-8SC_I^MSuO-lWoLQg!l}Q!?>Qk8ep$>(IorTp~-sW z<ZGGJ68LF5aeCqprO5xa@YG6pY6zYh1{OhJu^0H|L(3ufDt3(+dnHy0eFyNl15fjP z=+ey9@6b<+>C?{c@@Wfp@@fAk<H@JZeg~eJX1%Uj=%fHyHW?fX#?Br7$G?R<0iNfx zzGqn98hdTeu(m0zZ3AoDz}jwNZ6EMnHF$oOwM}GQ^H>*Wl|4>Y^itxH&ZQr4zRCM* z3%|m(a63sjeb&Y)u#3)4F=IU(oKAT&-Z%@KRuID{*&=hKvEsr-rfjiEvc;)<AH(<H zk8PZDo(9iIiA-|gJj`5#Tk#R$kvrBoweWM{f%_#LyLBH5o?gT|;Tc8YxzNenPvV=4 zq^H5tYo`~5M?xp(Kqpnu$x>({1NvxzP6FUG$o~ekqW(IP2S@Xv7vXmav=YF7q_`LN zcx&N3|IT<C!x_&#R+xD>W6fo(R(x`bzuLgs7J{#}@U9K4OEue`MIU{Qb<Jm8OIg=X zSl7PkBf`sB7m5EOud}W?aQGe8Qp{Pkolnt!2V+#T9^rL9V>IIfS|XhN%);4PcvW<r z<v(<A_RN3B8$YEsco4sz_MzGY=cGjDx^U9LUR^D?u%8_~=pKKFJw4HdhrRfBRk!Ly z?feJhjiRLu^k=S}zBbU;6xZ6PQsY|tHToOZ-1k_%NN4V^;%oYy=8jLWYwqh)$Q$0x zL(={hZ=9z&LAO2TK075+>6&{g`0$-XyGhXJP|cnGy5>$_^IUVEk6ff!n&>O8xexDa z#tb=g-}lRS<CB^@@jzX3e=j8xb<Mr_?RewGniKfzG52*Tky_W>{qVLwX`h^+-Q2zL z#(lKgK)bHF)7Mhh+?OHW9HmY4b=TZy_cddboVlm}EZ%sd=KkNVx#KtA=9>HDH{y-` zHFxmWWA1xWA`Py&Pr@H7zwU|JEx{-IYt5bhy5>$_&$#Bk5xao=zxX^iDJIH_hf>U{ z6%RGE5}oh|(5UAAdu#5C@F4`}^o{Iw&AsZ?c;ma86ZmuHu6rRNaJbbq_ql(GH{N#= z?V`}&Bbqz?Y3>Ckz4wj0>YDp&@SZiFo4dsao%weJjTPdHZJfX2k>ZbS&&3;m$a-qQ zmowK=?CBwJ*6x~X>L25c(>2!<w43x!ym5}^N;}PU7JY4?uYIn$&c~;Hn`^Fn`UPxQ zXLiB5nYlJM#v4bSpxw`CSN)3R(>@m7Hqxeo{c;t3ovdCeU9_6I(%6PK$u=yxPxj&5 z4=np|GxAOlUBjDcj7F~86vQ@MMXXjW@ykyWvs^}gsQci%PY&{hTkuaUMV7uDT~ha$ zJ))U`)NAKF#Xg8`IwWlHDY-Gv6Zkj-nYxYGZQcC{_79Y_q5lQZMdz9CD_8Q;Rrk>M z*XjE;yYCn2cQ^9jeEQu;zZuj)tfSxNLB{3``hAu@HzKF6qR;tH24ZSI3w!H)&iIj> z@j=db&8a`{>b;vd-(O8KVi}TKK8rV2ePe#-P5lG0_o!L6-Gd#hzd=rKW2|}3BYWS0 z?EUr(?AIP--?vJe9ND{$SfMW2`$K39*?SbSccLwOBrtKy-COv6B3;=W8+jV}0$JKy zH9<PR<PObexiNk2j4|P>vB7FiWw&42H7A30Yff_&Unsd5TkW6VHQjdDEYl8Khb_}R zr)qq2@2-lkd}%^IQ%<(*Pg&77Y3D_LQhp8e+)bv972WIMa?9@7%-JBl?tf-jdT#UR z<J(xv9_qHSKDm#4qoYinuB+cD{fhR=_QgjPu&+!Rdb%~%H<^QILiqJ?x89+%!r>O3 zwH8=(CixOMgIq&-QOSMOfQ|*Bi%x0_97s+_ze(>{G=%q|k2b%tc`mZ!WN1UQ+K$|_ z58g5V*?|pjLL+6+h<Jzgf%ZT(XTYp)+|*eGy^vhSq!oD1<`>c54kY*Y2cU_qifu%H zYiIv7@Lmx(%N~$r>TiDV<wJj)bocEqj8{G~WJN#tN(EQj=(9b!$Y1_WQK!!rXyUnP zOHK6|P3a$U51X8w7G8));Mt?BZQnnQ2JOKEtc|+p>@jE`|3PqImbnML)G(DyZ)Etw zbvYJ%f;rOPWRuT~w&e7rg8;VX2k5ucvLA2q+B7I%?jC5cMZP80CclM4k3YknoO;EW z@Hf!kOnO9@D`I_l=%=l~S@f7n3}=x`kJCQ!;uqyN)92&EAG_}#*kkZFQqL70P7Q9J zQM)oHh#c5Sjo`zq@iPa1=-xqF)}4#2JNap3R`@{?{mp}aEJN09SOjlCCaeIjK3mS6 zh<sSc|IJCw7l#Jqg6~}LJv3T}Z1`vJorY{E*|+*n@y5gOROyXhFm(#S3)?jI_5*yL z!oKjaFYtFomorD<KzR@af5C7b^rk$CTMCTwdC(bmCA#>K4_d2cPL=SX?Tnc&K7{-+ z6aH1nzE$nl0{kt4!5-?fzef%q{2$SZYpuQLWxSi{pPC)fcE)^{F}I*2YMfWG1I~xv zRbkJcZ;vydakes!^cjQLGu00>=@PwFbe(G3tc#${D)iQatZ(f(sjKtRYorffZsS0* z*erYiiE!V}dGYVTeQ?)46Wq1W1b6p73+#+Hp2fIxS!X+E=uGKt=*pF>vC`)0bD{Av z))-`cZL~qpH2avyI;F4IA^Qn0dLLRe&ueF2gs_`B`=SCF#L^|X%Y;qI%@>>Bh?oBY zItU_<Q@gBVE&DY%+>-4bx~qaGh>k=zqNRDXch6h7;WY2|BXN@kPBiaU<P7CD^RZ|C zPM_U+t(8CQZ_r*LwCAVIJod&K*0z@OaUT0&Bs}xx86(3VvL9|D-k18F)4#)h$l%#? z?1wt`!@t-AY0yW6eUMJ=ie_kWpS>r957Ej^;G+(m^%sm`?sHS$%`|Nr(s$*zuwata zv77ZJ!X%76K(r;Cif=l6^*LZu$()5R>8yf{Lr+zX&ib*XvuYnF>a1@4QF~bZgyF6E z%-g}gbjX14kB(5Sc|L35j#?Py!P%B><KQj~?sbQyLN<Q%@^iQgRDQpymoJxIPN39^ zukqbI=iS6SC(aoiscR4XCv!Ho#T#|biRa~sf53A!)(G%=ldb!1BpzA1@6EvH^SbYl z@TuUkurK?;#%UXQS*wAMcufYdX+@^IlriQ()1sX%oLTdr>8UQ-nZ~*Pr7qZUM~ya# zu&Y8Yf0#Cv^yA=jt=if9!@*}1fA8t)>t_}o%)ZuHdYXd==|r-voT#t8#0F=nFLOTv zAA7thF8GYZ&Yw=3dEmWkec*k*dwuXh@<Z5jve%a|rz-ZLcwZ&`^jIH#Rnk}2`rzIF z5RdqcYkhkxU9M|=+pU=96ZN&1J1&1!Unb4k>tnom?)9N(|A991fM?hGnENvK`oPWh zt~uH3OPEs?FwfEY=%>f}=&RPfK5QJ%s;^gE>$CN}uJsYK`XX&k)Yo40&>yNVlNRjt zG2T-5`tT2bk2dpwXV?0e`$qTrz>Q*VtvT82OPEs?Fqf`cNk2W-XTzZjf9R@rsIS*u z>$7#luJ!%eifMNDRe)V8n9$c=e6xy=wrC-(qttHmjC+0P_1Dv89`Mxq)Mg%Yf6cu< zaC2?foU*Kb66RC|%zsRqO8Rluw_D@caOheeeEiF_Lx)`6t^4aN8HPM#`uY4rZ!lK4 z&#>yp;QdE>_FZc3HA~NT`ko7qJx}vAX@k7bcAHmS>)nVfRY;q8z*p;4n|Z9|1J`<A zhwtU}I>+3uIevUSXEW!CbCz(vEZ5o6`Y3iM^!!Ttci>>!E^K&p&6RlWBPY>r8?uMm zS@e+Bu~s@QeeH9tDTBLXpU~!bFnxvjR<A}jaJAc$?AQb3XSHDeE7}Ft#~a@~LA#&Q z&Um_KJIjtETz12$o_5VE+5hx+GI3*{B=^A&myB+d<iwAe_pDXdM)zV1$qh-I*zyv5 z+4;yFr?zuG%14IZt*oCh+PeR&I2|)~0>AN~XespMj!D=}Eb#H-w2HWo-g>0pm<rti zcgHf2FQA0_59prDo%o)%uMvNE#L_!E(RpTG&bx~5VNUWTDz3*%T#x*$@+r~=eZtBM zU5{^BYmkl+<L^M5@|2o7XjlI}^vzE2)S5iR^euX0mT$3qnz7<{@XHEYRygBqVs`KC zmKE~&{uREHJ7DLbp~@xQGk>8|pUIt9!;aT8;f?$<`sb|k5H^`s>@h!MZuvHx+laxl zYFk)#q+7mitZ2z*FW503TTJ+N=PEcfv^yW+8{`-W9JOHUey1AP?s>8&Y$L$7+J-H$ z5%?O94_n1gIIx`#Y&CDidL82L2fe`dr}Tf&gl&!q+n^2Gqf=Rr3%0KkZ~yT<-LP%p z`-glNJs-X?nfQXw$3cFB94Ups;}dGwD=*{$Xxa_mgJyh@2Op;qErX_s&seVbi~#g- zEpx52VI4GSdN6y+$}J+@>wWg>KcMR`0(Xt!z`ZxRK78XP4Nm;wt-!w7rfc_m&`^0z z0_@KRHiiv*ayi8spts}EwemeVu>Tm}k=C&rnmx?l_+DUt1^vfN*ymb4^G=Jde*o-% z>w^7!VEwoY_LY2pl<%e<KwN1Vxsw!=dQo317vr(;;oODA&bVB7aN<x6=B7B9x^&CV zNM66u$f`RO>zl<L32*k|85Pt=`;_~S(t%5WxjwJvJNIaJ6c8t*@2^g=p5MT_{>b}J zk>`|{C;y?Oago6<pA~6jo^SOv2E04mSWw5g*cgwGih;|+;I&k7=ETig@kxWCKgN$; z(vRFFXBZLTM!zp2cM1BhAK%BRsa3&oMZVN<z9;W3jeWbT{aV`p1NiBAwd=WZS+?5G z?{_^f;JIvaY9DbupG92DAMEE<uIFp9&Ar}R|A&dMv!CDRYG1%}`MlMC9<T|y_=N21 ziG0{iuWmm32zy_6YW>*awJA0|iYJNB>bKrvSuTebWlvTd<@f&y4@=;$&(W`Je8G>p z<0y}UOZ6w))32#3=J42U#MyszcQ+q+h41_LuDY8CuSzD~h`NFIk|XvKPvltic0F@Y z-xb)QHEum|(GHDW2Ti$Y?*R8{sE4!TEof-V|Ge9>g}Cy@zQ%qMjYJH1te%N31lt#Y zx$|2o_gVDpx2(-gqvLEE-9$X0ey`<RbmL>UX$$VY+t8J7)pg#Du<?*jJ{;Lv*Rx;6 z7fytS530NI@TFexaJh|#`a|7#SZ(9s1@LgK3lBly{)h_?*YkbJzln#W<KW>l;Pb!5 z!=PcG$HN>851Slb@%Cg3{#V*~@Sh|es<C%Ea>q94Uv%W)q4Xs25O(3=oqt&S!QJl% z^lQ?k3lAkOJjAG<#eUc%`$S*hJ_5M&ojSDrB1wEN6dpJikKCL@%w6wiqw<Hfai_?e zg^Wy&<T?x9n~Yg0u_st#<4dKnmBc+Nm(`&?XQ^J?ra$oaHat{!MW{VHATP~WU<Anl z$DKRslEqekoBfm=C~2Y1dC5lRz0B#}4OtOAtG~e*br4(Fy=g{7IkTuCJaBxwXF=$C z?j@ZTc&yeiPThM?@Ggg^w#>bC9A{<ZUTWMrZK-u#KA3*rNg5PxOCA%wD49C|pUfP` zc%$;3BVP~xH?`L@ViQL5|IXW!hiqR{Z0zSOFJF^m>@Q${iI=Qn4#wBMKtAZ~Xw_Z7 zkMsY<Y-4|ZF86B&8vEDssdkIi?$spP4T;X89ro+dg*@8|ED8(!C6kiyTWNi%#;CUB zlIZQsN#m`BcMB&IkYBk~W{xwR_nV1pRKLc3ndUR;T$6c@pJ3FQp;m2Z{r-~1y?&(G z<`Ru-z4wvvwLx9)U99K*j8UOM#Oa=A>~G^PKp31=HTlukjZ^2|-3?FrTyC!A@28pX zpQCq2n{%<AcUsR2d0xkT1i_+tss)SEX`hG1>9nh!>Vm~0e@Q=J@i??|>np_F124rY zS3nno)Mq+l^;Iq|>v?~k<@e_qcN&~$KAr`NjL~JRy$Tu={EX|*iA?<hdY%imTewfq z06#Lv+SYTd&O7OY^JO^n+y;NGC^W_hp0l)Ht}Bcb4E9r>aG2kUY0dJ#&wTD0!Jct& z`a6G#)31BJd+dJmX_rRd&0jcizq60kuWQY((r;aXF*-oM2WBB_4?l6gQxf_;3ao^$ z0B5;qcMG3dvzvB*$~gF4P1-HHz+dt`%OB=fu1oV>?y>0I((^w5?t0#>{-QCu7TgxS zM*bk?TIexK!t|m4&F4FL?lGDk;u&LaTE}za)nsUGNc3^uZysih(z9E8`8~|<`TVZE zQf2=>Tl?nKbdy$VH1|}CCaXtT^Yo1V{5<cbT>$@~IiEMX=Q+#j^P@eQQ?<1hZ=!7l za{W7e9<k(>rrnq5j)~|q&;QReIG2&1Th8WQ12zV)XUKN>J4(+s%9}6b4k-KI_+e_( zB)jjIG~dE=%=uoWzE8LImihchf%$wo&*4#{e|3y8zy3F3_!+YhUz&T&wfJz0+MamB z_vvpw_VzI%TL-`NaUOP)`qPMA04LseN|p02cD4<>B=W5qJI0t}j$*7<U@P9Th5LfS z%}avAb?2JDkI-C!gJ_@*{vjGDz(<uxKf~27--hj9yJ4Hkm}|xw`>WxrYvvgH75l-v zA0?4%(6}zc9Agmg2DrQY0H3|VdZ`WT!r=+)W*&KcJp0#w(I{7cciZc8)7FtY*gr$b z-6}Xho5c2(yl?T29qWXXx(m!Xe5`iBLF+e;h%U3i^RVbXo>z0fa?L5me%0e!bFH!8 zz{bAjGT?xJK<$SMo|$9KcUoyro^cai##t7;7V((~ueWH|e5MVr-QDo|4RbU3%RhSZ zt!=c==U!Qbi%0&BXWW14!6Td1?@)6c;%Oh5JS}~+$<ylS+l0eb;smIHrhBJd-%IhG z2u2>_jN8q<8e7fpEzofu=h##HKNG(2B%i|HOIq_Yea$tm*O<_4|6|~9n2o;>cyQCL zXDIi-zd*dG#{IymFV3Cnj(Wus+s{O|x}T&mXNxZDto`|r;9;MYtaFFC&P|!-I=@YO z(QCfLv+yZXe`&|9uO~$o(}(10@txc0gY$ZnaH!|T6rH(4qBrxrF2@*M$@6;T>Y}Nc z5noe^{3Dz4O8q5WXTfxT$qdHYLHpk5cJ5!T{YoD_){P$7>waw~N0a!$f77QvzsTp6 z-1#tNzmG~|-$bXD4*wTqpHQ)9dR`NEo9Ih1%!jczn7RcrQ5}2C-KGp(qlont7PPG5 z^9|po|N6Mr(;^<Rm$jFFiY;&z@V@2~(cCb9E8kz@-bw{Av)5IXj6=_j%;fW*oT<b} z_%CC;J@n`0Q?Oen*x9^1A6Qg#mcPU^e(OPl?lX10zvr1MnKFo+B)L*@Wc^R#qwDGa z?qk4qkPTaK(gU`oz(YJRFU7Ne4!&pY@muLfI2*#J_=I$6Jy*RebFUrjx!3-6yCpZd z_u5{bv9EjVwfA|J&st~Gx9ogne41y=_s#v&w4G;(@Ohhe0?1^u8DBaRbh+u)*OMcH zp+lFyrwy?}CS97a{kwHeZsmC^vP&0TzMpK;Wfwg{n<kAym(S3*_UIZuL)uI5Wbr=T z*-o6(^Yq)yy|u;6;c<Q!jz{!@+Xl^@d5PaC-nO2-F^GN9H9q5R1$TwaW2rMP=d8v> z-tKDe%+dU$E&8!F$C!0a-OqRRnZ_K<HjDfvckyfw&up4nZtnNr=0vJ^-Uaug7TguX z6VjSJ6U=k3()pfZeZRr^4&T+>X7Sy`8$3wk7Q%C`;~BZZ_qUSUKw}!MNfFt8%>VOG zk8C1uXW0dFOIn_aj|#mqw?sKMW<6!f^qY*k&W&{72YwxRne*^Xu<mOgUE?*s0~7dc zNf6tCK}~F9`IjRLZUeXIDfGYGxccJAQ}o{gEWHK9&T#&o&v*F1Sl!83kZ0zA9sAOF zbX3C_I~TdIj?b@WOJ~iCo=prpHtI3bqvYR^4(HUOk^KNV^Y>=cIdb!aE;}UeZ-TFW zq&s#6nZqNq=}Y${U%Sj$p?<LskM+_<_#QN?f75tRzu016?#`2W!-vRa#4woaP+l7D z#jB2>-r@ek^2vhr4Jon7_op?z1-vIy+iVdqJ96_c&`UkoY?HVf*3)M56k`lNWkO8& z^1?5Q)^g7SIOcoEHL-`<&eT%E#<af;KG~Z5I5|bl7^V_0Ff`|Z&2R`d!x%c~h2T@U zP<XyvwnRN|g1-OE_rpBDmphij?x>lOb5qTXAh{is&!D`^2ACCH^!ND6Ys4Fqhjg?8 zSEtQw*n6Hx8Wz^Qg2C^@H^6IkQck!JunWC4HCFq^R5M4%c=8*E-pYu9yYkTMQ)7?6 zkzqa$y`B+!@~2Z{2Z5c%6C{gdMnB>{L5rWbN`8}kcQ84$Dw}-8e+72B%QUXcvN!Zt zPmFs_z$pZb_}k$HUIwt*0IYmHU{$*+NQ{e@dTF#94K8;~;LPbQkA`SW@h?Yiex?4| zCyY#uJw#(Ok9e}|Y#!`v%AZ4B%$JRb?kq0y8jarc<`UBe$X?lo40YR|xI;vo6TE+i za<bH815}=sGqAnZk|QOXJPg~uJ*|m2qo$d>Gntr8qxRB!f+>^Iyq+bws*&*$XI%)H zNV%f1*Ogxj?K}N61J@SbPt4U4vh%O7_p9pA`!x^uGlOT)=yK(D@RLjFO7OmyTonhn zkEpy1MH#7Pj)ze^*V_AMYTxO86M4k>KS+G4ndb$(&mvA$IFzpY#1+c_LQZySV5_}5 z*A8$q^AEhqxQ*BnH)A*a^nbA5*|{hlg4U|(&-y+#vf!)u%lPeB_g>B467IdW*mQoZ ze7EM9*e^}ooB3`fCsCV=+$f6qb>{N&d-0V!z_mr2)Y)X8Yt3p?zdt@IIDA4(>u!Ud z^2n`ZppRQH$c&Z)gYVk!il+PXT`@Pj>v#GmPej9{g!PX8U#!>34{`un&9=@GbDtkK z2NnF%!M$*ef1%~T=U|fQUjs~Zcgvac1Hh!MIMCp{dxyCfj_rH868l;`cJ^j)qFl}L zcVyUWww@J7I=^D9c$?n`uO}}vxqk3jEI+h(M8r#eANW}e8_wo7>?iu(MlI(j;fcE* znA~(L@`Z4nIqNI2pK@=Fb8NvZ_5yi2I;!!dmwU~;E*;WicF_)bYk3Sj-@0c+WW}Ch zlU^sW9@Rv_?^Ui`H7)d8kBlHZUSj_iL|-H~Q4?oOllRPba?9Wwabhr$@%gi#6U*7= zYU9PHf;>4u{p_S4_ULH!{{}WoJ$sdB70|i%#|x}q>vq;lp4=Ym{WCCj;CDLj!I%7k zspD&#MT}c-=bYnfTS%^@R^amM!^m6Yv2prJqpv%elk>eF-><NFYd&YzC-CLw2hy7G z)$G{K-yZ&Q*^6cTW*Fy0WJ8`{d^u8fWB;bbx5QVz1AH0Bl;@0=5s|f=--Tx&J0MT~ z?$-E9-8n?I9P5l1X1r?JTi=T#cevo7GefxN_wp{>lj}*b@s-5g1FHe#1uv-tu7c-! z_RR|T>k9a*=qDr|OHQa}aCa)>2B@b}dgHXF@kholSdZR?{ISWnFFEo{eAAj^FvS@4 zN8sG{qe)GYO(trM*Nrvr$a=EY<kphcV3Xapp0h^n>N#t2fJffZ@eA5!_itK{OwPMI zTH=dl7~XRuTlm!Tb-dR$ds>t7BNWoFk9XRBl-g8?JYnu5&Sb5j@aURQF>^JLe>8tz zhA}{E@Oo-<LyUhKYs^R9ap1R_vrX^-7Ron%i{K&pS6?&Lma*J)!u_%G5!ATTp3%6< zxuSUNZT!ETze$Vcmb}Ji)vUQCuj=;{pTA+}+>)gW=9YMUseW%pn&0d3QST<*ukq@4 z`b*lVeOk+Yd>1+?+&!}7So8T3^Ks@qpLvgh&P;sa)30W&TUsq&R6e<*Odpufv}NG? zv}MRP%NO$|vVr;DzzOC(QpIY2nRt1vSv>qTe&_7<S0{NQS1``=|L8a7H{ez9ETS$I zHn1_0VWpdzG*I3ra-ogaw!QJuq7kR9+6mA3-qL%DQcBZ`7A?v}9;uo@PH@$!Y6pjI z{14~3^nq&V(D9Ab*!!s>X8EWN0|)u2s!!l6yi|T><}6>~W5A{weaPMKkLXu$YCG8N zqxzWrBe^5Q_+4;2NR4RXheXrzll+RD1$)@5qEq=vUf}!R_+DrP9y9I=z99d#cVP`W zubJP;%%y1j_G+tE`$gB8pYcoNQ%Od4?St%^g6!HCIf$G~_mb;@{6eNrMKKb{<EixJ zH&VmD`d4X#ct7JbP2}$dVg;J8Cu+XrFiI;ks0YNn%aBcru-l|QT0<SNRAkGXXyG@4 zod=Tp5xd}t1`YaPJ|TTFpL%^7(_&kVfSC)ah3{v5(Ugs1zN+uec#<3)UXMXP+0lC9 zJoEUi5MTeE8YA-F93%4gOE_cry^#4XLSAz6>zsk_Og5ZaY}89=pLW@UGoDCF3oGA} z*Q0r5MMJ!kMt>pc9_Jb15d11Mkau{`oHuPl+_8{fv-T8gej$8Jp<KRG7gK%VvxtRG z=R5E)zXPMtaK00#VtyxHCUh3x$tPic=Z;creQ87O8TR)x`28GyKimF(wqeG4)t+mA zKi7z@N}3kieXg-0*T^;D@HhTnS{#T?PBy~WLv~z4{*9%cA>r_OX<;w$s9l#9nN-s! z5-uj@|CE}?MsW8hgTF^gro?{2U)3Vxxqo^3hksI%9(#0bAg2Ex9c#oMy?9D&6Mv^B zof6)3ae8d|mjdSh%fDpAmX`7xe@kh5YEpW5Qz^fX55!*goU%Qg?@PxUv1h(w{r=2X z(qmH|40JBLAP~#TPkwXh=>DshUSPyj$J&RU@|19ju0kD|=$cE6*we&BbnbS}(;-oC zdeZtw=x0-7A@U4}_KF%yV}0S#UJp3ux$gV7p?iSm&3myGh2f{^KJIl@Zg^oT^LykH zVz{v<E*#+7|4d2x>ZQ!ZgAIziEKM_?Fc!Q_JVUswdd5=KvL^@Dw0q}u5}zDdL%$p8 z<5Bzl*DlUjy^MFMw^+_Oo0H!!uyOb)r5i(Mq(*ArON}g9pBf3zN#%?f96iHReeneT zX7Ok6mv^SoG=#PLme_DvY{5k^dENyR;Itxi*0fmd@M)$TW$I*S^|y4g)2GEAAKu@5 z9y+~$tRT5`;~8mzjT*1ICVbJNnk5%4uBpAKrY3aJnx9bT41VN0_^^eC4<9!1pz&6^ z#;Y-LFcTecX7_k+<=HymOmx6JYrNO<62^O!@g8BkWsJA<qBZ|Anszguccs?ng?<M| z@9!EXCt9g-u3=qYcdhFhdtEbF*VpZJ&FCL%?Hky*r@!FZ2e|eDuF&`#<puaWT-TiI zX?j`X-um4cE6s6pqBFb3&5lmixZ9@3YG0Y&y}oS&to6M#J@)u31FZGEG$7W@xG$OG z5<kp+-6h;N4$nzp+?>zhbIr}3rXOnD%O8YyX}#Ie(yno{q9Zl#g;&OE$6VPx?uA*_ zxaVIPdwfimHSYO5W88J-xP2M7FXQ%Q+`f#P{kd^PM-L0H#Sg*f*k5Nn1YWbEnnOX? z_}S46jUT-_R{Nu?yT^|Xw#Hv~b?osU4YtN#H`ulAevIFb@%u5pm0PDbI=)fk`yRH& z_lT~n@jb5bKbXMy!>^0go_<~T_``=;<L6!%d;Iia*7&)@T;qEg-^=)3#`m)RtX|iD z8RI{}8BjOD6Zt#;S0J0#O)w%m7_(vcrCm8l>(8>}(t7wdvR9=3_2dTW)wO5%BF2NI z8<Um=Hiq8wMLg`!(0ZQfbB-^P%;#|a#sh1$H%iIx&p6)7ni;;jr8AzuXRh-~^OIlZ z$AYzNV_A}Q$marI(j61$_h<Rt``N=Y4$fKp*b4BJGk-C#^O)<~OuwzfMvjA@Rrf93 zxVV2{V=!6uRdT}lDV~kPGmMRS%LX;oPT-7=OpK*HG0_}jqQ@8ZMSQW^uk!rqiQFmT z`xAVh#2NVgi81r{r};jO-`Co|f1mF&`2C0W?`!$~b$)NKfBzxhui^KN_U{dR{}#VL zXaByD@3RB36~Nl;|2bdm@9)M}zBN4QIW8|p&^^nYbv<C?MS9a*@KW2A+ZY?u>BMhz zlJkV~R(J`$p46~KGCk+FX&dB>l|4za-_d)^kXbH6cKIqY%vX?QE+r=u@rmTAk_<GP zT6Bj_8P=I-<MASwj7B#4J28}s>s7vG*#l;uhdlPm)v-d(3KM5TCYZKQFY-EP%cdas zE;?;OY>Dv6`5;@*q=E^tG~d?TN#HfO);H8Q5MM)4!)oty9@!ARWhus3x9#Lmze_tc zH|^0oy7p*x^iFU;t_v<X(Jug(AbYg3Ymbum+1{f;_ULW)9u2ZbMN5AgX{`7MYp`Ob zt^74sjHlu|^P>MmuAY`9+mG_iUIN@E0JqWx&F7wXsZ|TFm~HYZ>s@ynY^hPT`{y@^ ziM&L1g5G6Dr-u1KJ3iH_Eo;SD&m`BT?q<g&3vn+>@KiodCwJF{<nHo;7wJ}WfJ^Lo zawffOMEV+w;UPKfr9tHFQoYDb*24Yw=Z%uX=<Nrwy~dM`lJBv8^vun=7yESIKqOx> z0J7s8)^xmn^uGF0A7T2?yYLLNAJvOv9CIEiu6YO^$|3d-<{`KqTyHdRKeegge8p)D ziAw)jGcd5<XM|?dF0LgX?T2r0AGV{OoFe|%!d;WdaXE<`my^hGDS2cnIWA|Cn=YL^ zbyeiJEaLa6<hYzkj>{$F17tthIWD(%<+v>Jq^1=`s1=K@sJz<BS6!Ly3m45wvuYu+ zcbs}7$kUrE*_Vkml6=*`J3eJsCGbxC&5Tn=Ue#Wkg}u19z2@vQC(l?Ldpt4ESgqu( zgj!nRlc=SYOD(PY*!Q)AiBIA)m-<ZdtIQ{_U>SOXNk31ZSKcsVS0Oc^1nay3f&Jb^ zwX2j*RP%0Q-ag<hcsjWjmoT3ZJ#vaJ&4thG9O23-ii~d8_S)7}+Y6mJ&1=J3KBqSB ztrljYM=VOC{@BbG#^5uxB}%MTnU{Oez_!TX3_dz#mkryl`Y+o5A=dRT*5h8w!c&<) z>q%uTMXV`Uy~Ly)$*^V6PU=a}j@II)oif&<{HWeDx0`EGepGayv{ZX7ds)j~>e2f% zQ^P^Sw|x@pnap~sSkL79(_5ZlJ>!9^Ij2R|d_>EP^Z!#?F3RkkmIYVARIu$0u1kRH zt2W(y`2RBa7J8EZzX)I8Y}Hf~-6q0U=Z5(9-?@J<oHO+d@Zy1Q9;@zZCuhSaoVz+3 z4sh<Sp$4k%;TZCT6BpzqE~rH@LDW~oE?6%67mmlwCrz0sJ9;y5MUv?xJ6s!Z%MLlF z?7$h&Ej#=zsUeTJ_ops2V)$dqy=9>pPw(+W+B|i)cour8y*MQLrBY&=hsd8jBwEh@ zPbDQGdkvNyDmseq-Ll(Ym+Y1qUGY^THV(Ub_3Wfb(pIZZV+;O3>#S0aF6%Cf^4Cr_ z^PXo%>xcu8-8ZqukyAI-i@Yp*hZ~m9Lc@u5FdUc{0&neuE3t{)!QUS94!@7HNwI+0 zen+kufS;!ho3T@W!l^x>^U=qcIwNPBGOKg;TXlP#e$?K`R1M1G=LYRzGeVagjqOKr zmCw{?Ejeop^OYQM6j)ez%#OC>8xEq^R(9#N$U3%O8$_?Y&DLv!=(TtEofc~%7eZ(q zegxTFxIZkLlyXuj_BS*;CGym|l*mf%F0LBhH?o?$ZP@n-MIk<eci$n;-$mGZBr{#a z9F{!%tzF9orgCRLBRXQ)gwF88vmXoHIc1mf8x-JAXc)p-!}+N@_W?fhID15UioH36 z{Z9RCbFJlP$5=}~F|>(m(LMNOPb%j}TE`;jbJ;3#x=^2DDLD?7Fel~tRKBmJ<Nzr4 z7_q6`{ns5D-MiR_9J~~nQ}f)-x%&a%N3d5O<sP-}$A3tjD&3=A4xG|?7Fk4I2K#<| zDt$jke;MZecyIUv{?8}pWp!!w#TsW?3OOXm!==0^`>=U+jl<shfbUb><1n9JsU7yR z9_{!(&(-c(a?z;%V0yAWXZl*o?;o(&oH;+j9RS6`?n~-!^NiUhr>hP3{>}P>Q(XPL zYPQMkYBP<wucV)Ot~UG3HbcAGoWnipM`#1w&74ttu;tbK{<g;)`5Bqnzl?mI%8_UJ z0Li8J7xX;NpH!RYr6I%L$KDB&qgnZghZa=NxSf3@dJJM`6n!c#<RJ8>-^5p1rVU-4 zSCSvgo0=c{9{q8LC-UxN#{P;k@&EFh4_z=v`dV^m#$Z16&iP&Rl@mQ3`ZC-3D#*uP z5lc%7&7dxYRR`6VAM<%aGlpR2n~5(|{fMuADw`=bIP*PEh1J%>9PDSg1r@PX*w1|4 z&<x}pb4>irvD=tm8F<urTe}b$$K%~zyU>zxBvV`aKzBWJqmt`?ctredaI~*8Uufmy zFI%=+<b?=&6}9XnQ{rbmiFx^ap&7`5%qKT`5WDPfZ2ZJ)ne~u;!1FVBi+8GTb@qTj zM1C2y8$ugB50C>&bGd{y1W#%Wy5Bj}Tt_Q<fb_t6OAlP`we`UIzQ!2ofXJT9(E*p6 zIw1Ps$>@PCeT*@C*rVMr%Zom)e6h^&7tArAn9|`|19bQ^zMH&}_UnM(zoQ3UO{|*6 zNz?_U=Q?`etJKQXI7j|$;cJyM9{76b82HLRK<piK)B^s5%NG9sFZNFxv@Ki+54lAZ zv0t>sH>akCW(@VDhTXsU_%1xP+r0Q5_U1MJBxi^XPZO4BTCh~CL<{s$#(b2=WG1pe zA@tY^&nWwGX#=@qWBT+BFk;tqcpAjp@#RFvs}1mcg>{`A-3sRx?aulC6V9`3oL`)P z^AZOK;O@+S6X$;>e(OXyzud<8LL29&IpYCi-!X8`-MG);{4sF8M*A52nf4f4N3`rQ z$Ip@dKM(6J&*x)qy6+Av4obEdPZU3VY|@$7#!R1$Y^X+TPqKK9e1wv%ggfHPz*{qM zQ>w-7{=EfV-2L5Ye*^8+wvKoe+3zIh8Q5|DLOg?fN9zBl$V)BQpW2~q>5*HYJGVW= ziGSH<uIKo7lf3MWRCsx)PyOm__JAo5!Q*9zIh5Rod`rpP)lM>PP>1<0p8qp&WXVQ@ zqw6jP2J}&Oxp%eC=K0b!#ovAK`vChyJU>-?kasiL1LEstm#41wxp+Nt>~iH-D@x7{ z7o9h$g#8xt-hIyt=|x*}M|io<#2JF%zFawyG@j0aRB|UBOiuS}Y+^2=*h$R3uRS+) z^^xT4?c&Y*sNGX0S^x(=`W%1ew?D1}Ua19EP3aEaJJsaPj!kMXxXbZBZs5;PwrV@k zh8RxZG<8IH67P|FYyh&$fF;DHb&xkYJ!5isnb{vU6YM7E()M5E@&CY+vF#?tURFS^ zpA>T3uKe~-i#!?ONuIQ5^Lbi7HeTjcPt27!V_4X@yQEX`!Q+#&J7#f@G)6wIgV4Y3 zfGMWL$EUu(gMOiRhA+H-m*Gj;?&00@@UI@$IpW}AB<F7l=kLW8v3<!y{L~>~-1M;Y zX5rR1GN0e``8_|j?oaWR#OgJD%3La;vC!WFu>xo=Nbca;z5(J9JypaLlp7hIa5FNd z7vHK+@c`uZW=>5T!EtY~A?_GRohwg5!}-wl0rseunryN$N*?jjhvd|J=&Jc>eDqxG zidC{J+H#ESezLihG53Yg)v@i277nG;WS?TmzE!Nvi@pDL`p_8t7^ChmV_@H}l6^lx z$LJcvw)Y3I_uppQ`-9l~o%2y+9PMw7alExp{*&VPlJRBt#s1q5JFpkKrVsmOD!%MA zV)~N0?#^;gXgN8n(Tm6hajp?7yziFS_s*U`emM_&(5j16%bk_Ow9y&V2Hb;Vs6!yU zeHlK<XVCyYId|srothFcokjYskZ0Zua%YY~_WQCCTg_Pa{-iW^*GsbH4~~Aqy{~F; zD_u9ZCn>`JnRl8xZB8^-Fl4Wlp_2xyf}QbVBlame(jnx?yp+KH`ZG*>>2ldiz1VlW z)G0cG{-JsYZD&&-L+{?~U40lCESG&C`*sO?WIZ_fj99%-2YDiUxI^5}v|$c|e_Fir zea=2=AVlMXJSDMYPsu^-pr79CX#f@_WrbGV_&^G=kl;{#9j_0)r}y=4iT!Q``@4{N zw4LE8;V;sD0e{#^+s+O&sV~`f&*t}5?yhJ~mR&X1lvCmt@D4WB7{6`jj+o%qnrm#< zy6drJnl?92ZrHK83Eu6<CWrC43+{qR(VN#j)(VUTo74e;CeJKvxzJTnWoon?`Q@C8 zu-6hB;6tWY9T1nTuFK}}Dd(H!(8{^|HSi=F63tYYHi$vd3+bb~Hsg@b*Jk`jQp26- zt3iAx;4TupM!Es9@bJnazKcgyLSve5%T4vGCn9s=dydVt`DihGuNc{UA9y+&nu)?c z!q6!GhNgv{)T(-DbT4Pt<Ykw2ra_-o&}T8UIE6m2bH*z9o6P@}JS&1eCo-OB^pDWu zoqV<r=bR(29(KSec7#mnG4j2%pBpgGz6$OQ-Nkdp_m2cu#ZyOytC-VLcr^Lf29WQa zxuyF@W=skHgy&VB=0BZ7yDIqf>)5V|4cMl$=2>|3NNDpzcyyH~JvxaPg6z?0tNqO9 zkHq--Sih+oaCS|+*|(Z~*5qAw%`VR0Di1ly?lSkv;OOP=5WCnr&bsWYqAl5TESk!R zYCk;?vt-?OdEdd^_Ai|ncfr{v-gX9OV_R^+r4HM``M^c?(_+@!KCL)h#QG-z7tVkI z+&KWIss4`slfv_PUIbip#>o!7v|#J1rJP-Y%RJz+@0>t`b#|pjvq$<?hd9I9pilRB z((x_d&187nBzW9JcwHGh?=pN|U&RNKZ2G*!Yu-M?+Aqz2i8ty#f%vkO$7WFU_wa3Z zzucQCf9k>bO6{M+S6tGdz4H~`kqupYP4Z7YWB*^;(RUHc<i(!9lTZ8?V`NtxZUka$ zsjGA7JMkR-Uz%1u<5T`WUVTjbOp>4chiv&tvQ-^)?VKSykuemTbOm`riIG{Z7#T0M z&d8JDk5d<=g*!|-&n2G~BID#BpGp6g4Nr1dc^9n=js~C=wN<;`<S^;8lEZww=S9yM z1zo%Mxbv>y>@BFUcoh3pZ5){?4LX@jjJfvWcc3k29ecnj@~fZvzbh5Td%J2iICDV` z@k3Xo8Qy7{caeAsvWG7tG{dNDzv$|F@9s3dyL*~Z*>F+y+wqk@1Kt)rs8-Y9sAQj@ z?k5ykzRs2mW0ZJNA!pHS&LZJN?IYibuly99+AY6$nX4neG{7fJ`GvUiMEPX|``3|2 zOrNJIk7P%WFdu4s{GZ4pAE!%J$c|n{pOQ!JFy#^QHt`*Q-2aI@@{T>mFnix?Kc^wH ziXUp<OJ)^6)V@#8m}JVViTv<2_I-cgd(OiCt8c2hsWVY_o$E=BzG=&@2h5rXInjZv zK{)8`e#2+|zS^n%W8!e6jl+-Qqm;W)^fQ3?$!hZRf5dmimdhtH0h)L@$r$L+*RlHD z_GQk4t9}i?@gPfaN212MAF-uh&-)R3xdWlQ0u&O|{fOoD(aGlBh#KOz?|`;+KjN*^ zE!}=w(y{MHB)RTK)Zx1uMBZhmFJu9KGkV2V-Y-%;Th4{{3u^IY`6jFXBF;m}Y>E}G zWG+S2yd1?d>K|1p_Mnn^6rIN$rWS@r6DLm~H+;g>CD{jFzv8Td+zY7ZTzB7txZ<+J zdk~gw+_9mXaHn70x5?yw%WTGR>U28xcEz-+{gmC^_Yjg4_iNun2onG4+(V#0^BzJo z_7=@geuVRxgBz~1`Azp|oZs7sNA2<Zr}6SP`Q1Gi^83Ooz5Zg>sXCV8Bh9o?KCc*d z%Fda|oGCqXdO10#R3Gv{avzh=DsP&2s_HeG_I7amDLJVY;@__UKZ~G+wsQiHl|dtM z>U4SWTbi&(c5Sokbd5u9{R3zI66)hjq{gIbHeW@Z$y%Q=DolONdB`cn=s!!T)AuC& zak9s%r-|ROY8iaS><{1XaD1eV>Bd;|JNye8D5*^+=eqcry%*}a-yBM)7c`PNnfa<t zv1FXJ^eNt<dlw50&wk!7UvKUY(TDCc<wX0*MuH6J+)b;X%~iDjuY`6+Lc1-Db@blu z+-03JFKLJk>ds##p8Zssa<Uoz71*Q``+E=AjEC>L$FAngR!op=BwnMMeQeDWnV|_9 zTv_eZ{=34mmsq|i)tMaZR}6RC72N0JencDg4dpoN0nY>d#z<ceb=5Y^jH=$yWa5;1 z%;8|XJco82jM?M;Px^7bWCJ&9O7pu9Ugy2vx7uujZNWNEvZKQ*gPliiim!Yc8kEfG z?z@h<Uboi7SIQPIU!w9M9qW#r6LX^e@8M5&5W%Gk8KP+PgjhP~`5|m9L-4B?Lm$^5 z-{c`<`unA>cH&b6gLY({E79L4v;X^K8)G)$7pU4YGO`3aKzeoEj7py`oTjzVrno9? z^$6rsWNqrarf&Bkr%t**ef4-xTGiyEb9PnTaQ|ar?EmvP7bG)JI$E(SioF8+z_z)_ zd!Z!alogx6T#GLEt@a}i$ez>8-jV;`gjuC;HF89g;@7pOv_3Qb2pcJKW0S-0f_lf1 z1+c%$_Nv(C@${#A1hxIqZ;(aw-GI(RKeFtn`jj5K;tfxO9~h5k9M#xy`jU*2$2-{3 zk2!W5wC%L_onY(=?(g(A_Lmvki>*m<(egRyeoa@O*#8aWDcSpOAMTRBkk{nf(B7H+ z7WY<deL!(6E*-90A298PqDd<+C9yuSdk*SM&58b*_bq)OEBfE#Tx0aGwPp8eYs+T7 zqNTzFTU*!sr&dVE+v&sCqrIT@{T_PPr+xOoTe6~ikt@31InS~y@m@r6Lca4XJCawt z$F@fnaGv#cW;^+{yte!{0r^dGwANX`UJIcg`=GCh%whFn<r(X@^T>@u&?9o;{X;qD zhjGrIM!vCr5oce?7At;VUo*6mb9D3V_!E~<E2ADCVj8@;miibA(Gk|d554Gjh3K!f z)X`8*e$Haag0XCB7G$#i)-#OA0sfN7+osr;XBNxPN3CP-K0Wi0U9)PjY8<A9C)3w5 z`l-&y2yb{uvNSqv3idqi`HNq)uomqJo$20@fybQtZ+fnMrTt!bm2b8DXb!DZfNNxk zQ6{XfLXL+wh`uH|c-ONYc&9ceHr7Ph{sQLWW9`C`=4Ij1tjmR*o+-PuA05Y}uk~+o zCx$b<0U0G|+x=RAOQmi1s|1(FvioK5oABOHb8V;eFz%&-_iuuy>%iBw;O&}*BR777 z8kGHP-EUCz31Tjrfzijn*O9q5ci8|lqj{otY=BX0fE!#kz;(#W>$_}#gF5Q%Si^c^ z4WGAT4eQNVL!C9I{m7q{Dw&%YPSz-yd$TEXXGZ%nMgSXN6dT|Mmkm%hq;>8wY#U%b zHo)g?8(_U@1MD8-h|d~hy2fz9qM7=Zdl$u5Zdej9>(xj<-Y~&cN9(~r>^1V1%Lo1p z{_jT@1!Bwbf4^SPCnEZ~8N8*z+qd%>!1o=7*6K3S%v|Ea=~QSfpWN5Ff4|`|dkpyL zhQ)ol>s2vEhVo9R%_>)$m-=+qg`&*|_`a`Dn{}=>Nv`@!nk&BVNoo^ywVB|mb3~hs z_`XZk=BKVUi&MJm3DIU5zVC5r^9xs-mr}ax_|RrPa>lu|nZ^F;nKM~7I4f`O>*Q_A zCztb?w2QwCpV)514&Rsy%?*Y2hCzeyiF;2+C+HWshWnvq;6^rsg~)~VHMyN_;NxI& z3NoXWt5jz#Hb#?|h%ej)A1NmG!Z#=_Jc7OCC+?!qjJu$2CGQrcK=<$x#a&=WH2KJ2 z_=xgiC?-hbHPl>V?u&fRsP#5~>4d*@F!y%mol&FMiqwup<Zj7$*s7P4UIX44a~}OG zwj%vuL$%D!x@oP%GN_g)xp_X43|aCh_5$S07m;J@IqMo!vl-iKQB~!*q`NCS+mo`w z<FU`Spx?p^x_-;yH`y6fd$o)+SO3o>R}66|=Kr!i)Wa_&AGZ(->&V4p$iunDeipK2 z<$RvKn`Bf;_jxfXvqNi#$Ck(W|2^@CWRnl>aPvXwB3GcROD}SGj`*^}bGGuHcus3? zz6N-VVwBwQU1Z9z-vZ~~1oziL1J`1Ag7<7EmYujG=%b7O+^+a6_Js7!25QclaKYwP zM67G-aq3)8@??c45a;T^v=$vlai!>V9c{p{kl07BY~S?dwfjo5YdV+NHM7g8>AaVk z&cw!s#}fDG;J}AIw1#+G!R`|3X~y|Kna|&L$?m34Dmocku6x}T#Gz+ER}LRF?^M{n zl-K@%T_H(vXgy)oj9mQNYK!-;^O`l7{laOjd9OS(`hA`WpKJN9+6+tJ!(kU6UJQ?{ zaq-B|G45qJ@`KKr=jl`S0WWhBAAZ8*!&%Y);(Hr<h2|LAA0NF0ejIl3;~Ls6c8_(8 zI~eY<p0daKU{`#l;_?r%UmQ4ZZHt#HXG?lm=Xz?#7U(Sf602^<DOXUp1K!jBeXDLq z2H!Wqlb=&e5<FRZayk2O1U$K!eYubSE8xp@#3X5ds%A$k`R50#|HrNVce5wbYx<bA z9i0Bpf%Z;Oo7Jv1FZVI)BRFmPv(Njg&3adxWUI!2(}wsdvmRdh23MO)tUUcro7dsT zx|f;$ysJ%3ika))Y4a@nxI=Az>1y+GikTnYY4ZsDcn@vh$2)rRW6|Xh`9u73r-I`@ z#v9+J-SKjc#9!{a(~QGA1>G<!5WAH=Z+)axveJ(F<lN38_?hlub|&}rA4YGF*|C_? z>y;O34SKv6o)qe1`I&-qgX2CUwqh*0b`JJF;adEwjQiIq$+98Icaw=6JA}DP$IOe~ zhU{Y`8)J^(FFpbfI1Im^86UsDMY@XpzVw#1q};IfG`ab=W5-z{UssUY&E`Ib&uZ-1 z)bl=q-*qOj(Jzxb&P#0czu1!p8S@L|P!kUgQn$w8q4)3($x=4o#CqrR_d0fgjj{#U z<F>K3=!^?ZyTg&>TtD@Hch-`dYs0`{z1kg8O}kQR)kyMl&A#`(&a1PGu`TG4!7SB? z3Y2JWX6;{>jiCX3`L%&Y`CGY0<7eU@@G-@(c*VEjJt62;?`bW+vVS+v1ZrD_PO@%& zJNs5~>gp%YlhZMQ`AIH**Y0l(=M8y`%0rz!`KEB`gAe1o+{yj#1%>FP?Vjq#3gMmY zChzQH^3FeEd)&t!89xZP!>cs!8b1G0U~GPRy`=*b4(h1`%qkG>$iX_iTL-A-f9U{8 zs*4N01PiUTWkk=l*20?;?+J9A<UIino5phb=FHyV{UK*I@`PaWUXf8g`Eg@<lp5d@ z8y7c~QA4-ZFI-ax@0+EuDb)1xBOg{_XS#!TmSAT>r@@YxRyA@Y_`dd@&Z|e-_#SED zn|Y6}Pr&v3Vhf*NPcs@90{>QU0*nXBcGT8Sx`NU4>|yo<bzOI}$KI<6G`wFUU!)m- z<bR7e<F*=d0ARNj&5CwKv!Y$uCPa64?dvy2v4r*5W$K|X$7lLJ_NFJXwV~^0rZJA% z4({k_gVEkEQd|dmVF<gvY%sFF$WF6*E;)>t@7KV!W4C#dXH)p@OSn66yz^T&ha~o} z`qEx~iTc~}E$BXuL!-&m4x1PsA1Qk6w#Rf%W6$f@QxjTMX7ZZ*cI6Er7gBq?yiXTj zQQZakgR`P<0YAawAimIe(vXfl=Rdv5nr~)wTP60f&lc_)-!HJg5!+V_HpxQmBY4Q2 z@Pqgx<X=!tHPHt)p;&T`9~vnMkt^GLMqJGE)Jg8dJ|Ml$szHAD3$ig+BM<qo>-&<i zb74zy>L4c4z*O3`VNVp#ZAiA{QV;t<bA%^HdehgHlSsCE@YywA@w~ic<Sanuad>O? zk@!l<vy!jJC;K`=k||xXP;YyunDwV~rv;cUD8t|A^`zN%TKGT37+A|a5cJ|S!Db@5 z)Yrh3n@;5ak?p{u6Z{C!1Ucq&RA+oP{&$D|6w6Zs4SJKQ>A>%zPyHqum7aJpzx{&e zt<Zanzdlop75h2EM%3s$^qI8|M<AO|K}RmGK^BLnxOM+x<a73qsrxtom2(n$>OOZw zA3Ads$B-4>fR9cz@&gyF>LnxpAHa(9DNT5(#5Vao&cQb5JZr5HISaVu;L9}WsL|v- zP4Ks>m*Dm0Gw7|;p|}3s^!5pL1*0=A;4UZUN8<!)3f}d?)$k+H9Jz!|nu`t`WYxKZ zR~?J)=Ix7@n=ofS>sj}&u&X~|^XEhxO(`@g{@j9}#<9^<@W1%8>Pk*zExKnlni^W# zv)YG_{aZL?FZ#dNY41boCS5AL($3*^;&Zp*XK&sfr_T@H5U&%j_d>6sf8$&m&bg+3 zlNrAV+l+8pK~0)M#?x9gH`QZG6~BW=58l<y?-UE_*qE<}FFZjz(cS_6ZXi#(Xwk7d z$*y{`cBfWqS1VRf^NOJ-q|lFv-(Q!;X5D)iytKQ1oauYh{Mjq@;LyXlgAc*4S{r8( zzwKqOa5m@s;{N!`;;^Nsm%_7(k(K3BJm(7PfIo=7Pb{tM7bE!YwqIzEoQ(Y<Kn<l) zjH5lnJ+2(bR^q|-oM<Z<Nj<&)#8%QuZLb#?qZrwIq4qWVt{B;TihDiC%cp7{wmn5{ z674Cnl}LvuMt)a6_!`TLk>BUJ`jP$Of57YUY%a2utk77;vAM`rg1`2-HW!VxSZ$7D zbD0Wl+^sgpvAK*q5O4erZ5$b2bR_?VWB|wJvS1STbl6wlrd=_*iPkS2rx@L2seAp@ zAN&pLFW%s4gKqMStIbqu+%>8Vx=B|*=q9ha+KdEe&!`Q$Nmm<mlYOo>8PtDVPaEcK z-jlHIM+~#&ealvK3G;5<6mNW*cH<55RFO|e=UNCGiqZ5HGv7=bdpM7~UZyS8&j0M> zey(JV(#`dbbk$nkD<oHES0BIrU-t29tB=l^zrt>jnCH)`38gzy;x9GWM?%<+byt`3 zJ|fwq4B9J7kb#PIPqf5XP_^5bzQj{KqvL{b?)Zz11x4H`op@etu4JT1oVU7n={<An zs<ag2g1gwmc|(&I)Y|JL=VPX75qR4ko}qi+vd=f|z;7qpbnC-reVG3uemVGqnX$@M zPVSBDj^Gch+#A6s{~z++JU*)W-v2){!!nZvgaCnnCIOdZSS-qxD$PLDW&xBKYiq4R zXmyeRirQ9dl>ll(0+yjzD%=*(-ZnFUqJTBF6_B<!fE#dYTiad(xMUGz5o8dY@AGxe zoNx#U>b>{ZU;F+3F^@T)vwYU~{&|1i@6zaZmRYekf<I1={t-J4yn9{nlLzavbH+z? z9u~eAE#dyXpL_{^oEmXazvlLo<LvkG(%+f#$DcVfsr}?E;s<%XUVQ3GtFYVjz_+eS z=UKH5ZJtebKJz{pk2S=(Q6KSaZJ#`S*VcakUrYQijR<f(B5pVLH+QjUJmcSZn7P>( zFS}PQDE4@@e_`!!-i|Gp*y|e<Hzt#KpYr!%F6)3R{i4R(#vKM{;xd2oh0t8qMlnDu z-+W~34(8J6Z;W1YEIGR3L~?Wj@apAE8g;y0d7^(^C2NrR)nTEj#ER87E8N|pE4p-x z{+92ESv^|*lyi7zATT<JpZfF0`NTo%%QJi<^=y3nzP^8rXO*cu!*^ZJ6pvKj=vh6_ zR`ww_7%=VvH{^q>_UFzuLVv?wX*2qz`3#uXc|h~oUL(XBFKOsyjLuigvv<CHN^oNK zpKmkA3=BPCkN3g!Q$M8Nl_|8p-}?SxVy$|K@zVPK-+G4j#@~O4cZp3sS}`cp*3-yP zD|=8LP-T`SQ)Q?;)++x}&(EWLi&cJ7&sCmnl~?F_cgicR@^U>_d5cwEs^{G(@3qQ5 z(sPwP^Ud~tsOQO)FSN=vdam--R`~~d&Yr*bHmf{O&sF|`Ri3Tq#HaJtTIKuoT;*+6 z`MY|aK)Kl}->v5=Csv#7-=XI|%0sMjg`TTC(JJ4r=U&P)t@1QISNVrlxm3@)QeJ13 zzpm#h@3hJ{>UkH+hpqDUdaiOxjoJRSdQQv;?+B}0q~|K%V3ohD=j3nqe#a{3>AA{F zt#YoOdni9|m9NlqmH%RuN9(zp@(HVaxt^=se}UQlrFu^6I`5TMd9a?V{0*yoiJlX` z*ZZJV9z=Nx@z@?~#pgbesn`fVl&pkIWc$F_{xaAp{jwMR_`%Dfv*oKr%oICDj^wKw zfyW@U%!-%NCn8;5@lrOP!bhfqxPgz^-;6c)#@YKPXFA{LePsXExO#JlJtaJx&-;IY z-nB!UBICd>`G;q|+&?rmjw7NoFAy7KGrn`;Q*FnziP+04o^a(n<~V2k<Gt27<5dsp zoN=FsVnbNxjQd2s$n|69Ib-={rbp83{+|^i#yV$wK_~FpanAUu+bmlCqq{uBygkz& z=}7krj-R?8f2}+tBs$`I=IUsc<6JfS{Lf5*56P}Rm49p=2Mge};<?!yua35TZq@-G z{SnXab`MiL9FvxPn9sWRG(s+D<{@1V<eC^+2XuWI*InR^x<*%YLBkt$UCebtW$`<@ zp2~F+@{g|X;<{U9@mso{%k_Dc#edWFVy>}O{6*JKa1Eb&L)Xu8J)p99x2|8}I-|1q zbzSe`I;*ny6<zP;`U{oC+jV`I>oLTt*EN2`u0Un+?{wXh>+33ux9EBx*HbEsU)1$w zT;Ew){A*p0=em;k=ejQDdSzwtMqN+kdUIv*v%0>E>xRnWr*%D-YhzCF8eK2uIxwd= ztm`MZuAEb>n155A<$Cj+;wN?elHQ+Nyi(V@^v>MkpXz$A?$0Y;rt8DHKd+d;7xdrF z{fCN|=(?w_A1)5*dZ4Z!DSnjeQ}>SxPq|Fx9~2+yI{x_le52Qg*S^Spodp--Ig)!a z;gy2z;lQ@O5x=<=9w5B^@Y<JZ2M@<yCtk~YS-KX_;rb%3&!`Xn=&bK=8SoqUqFvXY zDVz1f5AA!Ze^&-|iZ4;;dDNN7d5lso_r#mIm)N!)c$EAR^h`3upfA!6d`di4c%7~? ze9A8OR~bHKms3@SPub-ZmElu%xx32nDZ8AgGJMJ|cU2iaWtTlF!>8<WEQd0D$}WGb zGJMJ|f21;e$}S&K89rr~KTsJyWtUr2hELh$_f&>Y+2wsI!>8=>TPnk+?DAh#hELh$ zKdB6#vdg<whELh$*Hng2+2tK7!>8=>f2a(fvdddlhELh$O)A5u?DDTwhELh$U#SeA zvdimLhELh$r&Wee+2z$L!>8=>FI9$5+2toyhELh$6)MB0?DBHTCfvcR?0bT{$8y@h zZyUIJRP{LEs#;|STs@+)1Fq((?0~BWRd&GD_f&Sk)pt~Oz|~zUJK$=j$_}_HSJ?qq zx2f!atEnnG;OZ8Y9dLD%$_}{tipmbS;;_&ea5YI~2V51Z?0~ClRCd7Cc$FP+^(B=Z zaP>u%9dI>DWd~ecrm_RBhN<j;E5FJPxVl(n2V4zQ*#TD<sO*5NG?g82)kkFqT=i7h z0ax8rcEDAF$_}{dqOt>~+$uw}Y#2LCY)<<u>Cmn`gT~p<T6kvb6QWs*(<#Sw2$dgI zIj%RTT&*&6*Z%&m`X2gim;a<PblWcPrfkAIblbisn1AKcGhrS*y)GBM$@^#G+Cx)C zPt#qlmQ~0ERS%X{Avg7?deB!DM*j=Gq5YIzEnD=gr}Vos^+V}h(zD9&XATleJ(qKf zin)mnIl?z?TvWb?TjcLJ9X&DI6|H9P$~)9$o=fu0m{&ZXauN35ilO78iQL!Or6k@9 z@+;yx$i2kuaZxwAVGA)%csH6b#}`T@Zf6p<wS?iWXu?R|$#F%K#=3}=f&Z}T!`7L| zefgIp@lHA4=~+2@$qB>9nSLMAd+d1%$q&qd_X|~a;Qf4+9eDq+$_~7rqp}0<A5htW z_xGvn!255j?7;h3Dm(CghRP1SFH_lp_uo?4f%mtn?7;hPsO-S|uc_?7`x{ht@RP|Z zJMey@$_~6Y{p1{Yf3==F@P3@i4!j?$vIFmPRCeG!fep{V`{61(@P4Su4!oBw*opTS zsqDb}OqCsYf4<5Nyzi&71MhpQ?7;gTDm(B#S!D;_`&4$|eP@*&cz<lzX}lkf4a14| zAMnhH_m5pb*@5>Vl^uA$P-O?+zoqXTc>h<G9eDpI%I)F(&SA(?*tC?Zsu3Ja&oB;U zX-`CZZk_WZULU_6{4)8C<2Q#N_OGU{_&VcjQ_VkdKZ>wt73GreqT%u?)3)brv#;FV z9#kT2eA*rq{+Efe6B+C~=>OAlecHJQh$Dp^$%-STa|rokbWYQ<<>6m@=oR@f4Km1) zl^%JCxk0B~Wa^aMvvo@I-Z)EVG-Gn2*DgiA-orbxnf;pYuq(|<;G9_Hc<cUi@p~Wr zazz@pr&+IHBfPhl&QzsD?(1hPDL{YJo~LMHcIM!ai~nlm)4}*RkMD8y+G)K5YrS*G z`{L?%Xo7Lu&t~xZA-{F}3a{)tse<1ce90H`{}jLO4|ZQ({y@U=xvtxPw&Q{1<@@*_ ze}3Od)A$7*YM68@XS-aS2gu;sc>d5WUh3(5>Z`lE7@b?f_-I^WNMDohGdaYdpXhTN z)z?Y%RWU-kntQF*b7#yd#R_=|n-l&$R-S_RS&2$~Ef$bFO?7bAH+m)Y@E>VF-_*QS zFh9$gL$mDZANdaDe?ynC$^#;^DHAKvdY?U|Qp(n|w8))Y@1dRszMH1|=pm)_p>nP} zR659WXd3*-{U7)VFb*#<yZXD*cDRz#YNj4}V0XdP*wWHO<Iq0wsBZmh3b<#?afb$G z8asgj{~n%KPvZQmV}1Odgchw)^X1jc%ypXSfj9l*)=Kc_W4#n#<|5WC*Tb59;qhhj zC*ixp{`4M?_^xqRW9o{ui+PqUA8S`%e<OAXd)=IC_7&!;R%cY>Yqf>=9l$X*-!J34 zcMUf1tMT2t3g5jv&WxVUpJc=PNn&GY&f&95w6}MzSRE+=H@R|*h|5sCv=r0N;eEb; zOlwV9d%uS%SACH&bT$sHhW5xuZ4dX(MSHN9KU2Y3Ssyk7ogwz~&-c#^b|vnpx`w^= zcYe65FtcV^OZK8&;l<&@E$_d0IQ;N`9o})rYlpKFS`M$S+<Vx|8Cusx+m|8Zv^-(v zuErKZytd7>&7P#$za3oHYp%L#jysO=vGY6flRtOdvjY5Iv)K1nT(kU@<8ts%9I3g< z0`~%jD-`u~AuoI9$j6>e{^QUr*_RY^;Js90XRLJPH2l&P?zdey)jN``{5+iF??;?6 z`SZxXPO*)=UnN%SW%)JoeRv5t*t=EZP&@|hAN4i&z=sXsTWnv?eL^0qp7`CbbcKy% z#@*A1${+K)-H3(prJJM2IHM^VDgNVwr_=)T&A?b=SA)Dpz8bZuoKlMU!M;s2-__qw zyo5S0{W!DT{*m)Cj3vv6De@fq&L2Z(-oQs9$llg$Vr&c`ZWR3~38v5&`VoxB?79B~ zS6H%~#;5bj9}xrVaZkUROZwusdnspLyvUFA2mdC2)yb}-RiCrn&iIe)gGR{L>=&8X zK0fX`dLQfPd`GdIX7-QM{doMQ#uHsfHKyV}bE50$T6|;KGg*X8`1#^E9q&47FL-%+ z?$aZgR~bu2v6rYEm1dk2`lUGx-jx?cRz4#(sP1_)O&OYaTB)t^3(Wh-v*!E6@wfL- z@?X1WcX)hj?AF#ZEe+uK5MTyd-U#7a-yzPk?8NXOj&h~u9vBxt5AJe@n^=e0*eM08 zO`juf&w1cLNAVo52foeN4D=!D=?r{#@_z`79|gwcQ|O_O{qO~}`q?{jV6Z_P?!0&$ z?$nmrct1tA)X!RCHfnBvk$RdxS$mF(0j#rlt^1{sG|sy9WX@H$?6SK4w7NT){O-)U z_MNi$*_dtm?>IiA)8-|eY1|0qzHmoe>>aW{7M-vC!&BefT^cHAX<E7>t@)A_Y4{>n zXwMV7>Z~37r(bm2VeIWCvitp=_>r0PIh#0q)%d+?&(ZN+k-n)WpMc~#y)V1Dbh+13 zOkbTvUhHMrZO(($(uQ(H`-lhOLkCj4lI-))chGzMNk%jiJe1E#W)CBporLcOI#KYJ zK=dABL6~rI3$Yn#Pwhw^Ltc2@NWRA0i`6;2*n0*%<m_B`AF;3Z_T5J4ok7HG{-m<F zD|TVYUh)q*&KyZbh@~5K6X7ieK3vi_gWbj*Z;JhNm+zjHhuNDtSpHNXob|21`XGCk z+LO&cWrTXY^S#B7xf6C&xidm#uHHL>U5L5j%Gg=y{{FL>$9sjw$M@`l2}X1&XAytG z^Vfhe_AEyIj`gtOK=cJ}QvJ&5d6moBn;68rH>L;bQi<1H^`M#mDY{~>E2ruvZ&>;7 z*7Zd88EAxJ<I@k#frqf~w#eAHb!_FaIb-Xox9YGbs=4v@m|*58+9O;OExFp1C+ncE zCJhsvlRVf7o3KSQto&-)^RGJQiT<Ac=zd%OuqZUoKF_6?+vm#F(s(6wWB>OT6MyE} z?Znsg?*IO-ufvnF$GXikWLnok?MLIESH`&s`RZw(`v^SgL(XTwll+Q7C43bwrwa~K zq5I^b?a&91J?{ef-N)uEX5SmX`)u}V63DCKs|F90OZs3Td(oWZ?J@g&&#df==BE{W zt?YQh$xS`Y_rev6<)?4H7a}edW$|If(&$YL=Uim7mJ}nW#be}%jx7b2Pau;E$M>a> z^TlI?+?`f3r^@e#AEnzoLmRobld*9o@7zrL|Mnz#iSV^q{s&?=F|H}ZVafktU`%Zv zV`P5!Ku!U<2AP{4_%i3<gFp_dw1^uzngxA-n>OB|4UgG|Vu_@iwC7z<U%zw(eb1R) z=}PAQlIpmXeyWZ`z~A%uRRJIC1RwRBwJp&83b2ule`x`+{*)8S1+I$!ssDNG@kmeG zguhf6-!YA$4|E0DXVw<z#b3rtuId+gy56Gso8bw|{l=1QtvL-+Tk0>iXny?#7A>v+ zFHg9B@3<U%qu~)&JK40OdQSZZdvEdfz@3s>{InU1dA=f%^A$n-9A@L^;Kk2jK6^fm z=zX(U^9uYN<l~S%ye3V44y*CeCEiwb0lp4d_&QX9gS}^y;|m<|5r=>Ec=yf(;IAB< zP5`ETPrJee7u~klH{<5@jj^8=d+%9!7kobA#aBECY&P)|9Awfb^|`&c<IWiWe?le% zAM2!lHUl4p*nF@%?cB~;eDu{rl5292b(;7WPG*sdQFFQnpT~Oe{l<@C8`6NuJtnOX zU%(fa>n7x<?Z@ENE+4+>X*Kyb<k!@LuOC6De?^Qf&1>*zY~)(xDz7nj7jt=dJlBov zQElQsz<S&HqmCeJ3jQ+ir7`-=EiMPf8aQ*$zU+ot`mJZ^k~!WCBgeQ9zu?rlqMd%6 z=9db7_XsY5Vfe;KXT5#UkxY1QQvRqk!Ki;Ku&4U+>BGQ#%y?kr2u})K%DUh1RkM6h zIim(so=jQ#b`*K@cxfl}gU-<XF6<ML2WqR!JOe7Y^|B_};OG%(g?M`-v>}&sUhE^6 zz~}uo-N-<<{2K4)fOi@6`?tR*{{nppC}t;luBy5lBOBS1ZHni9!xx`xV?;f3nnkQA z=Af5NBk>Je($H0UwH0%01nV-zQNDw61MxWR=Vg>_e5)efhw`OLhwsLGsD3Z>!}8sb zuB-D3$|um<#O2M%Cz6-5c*piB)^q$dcqc{v$L4#L<bkM9`}Fr%qfyc=)aJSB(_Gl} zLvvx#4`2|yw#PypBl5KdalTSz(kW|BzQmZdHmx{A{EW7`UbH`agnVA&<xbd{1WcN1 zLu?Si<nNg$Crp}rpdFa(4oqro6;J3JtZ!SGR6Itlb^SrXr0^--|69hR`rxCf50FDJ zLq06PWHtR?cjIZ8e1!5E%5C@Mv%IoVTjvOm!KWlUFh9hM!cR-*?-;j9Ux_gxpKrxQ z)4T>JWz`tt+@X2IzASIfL$~Qw6L$C6QE?zInsh2RG?d>0bcwb6h{v>}(ABG^hS<5~ z&7Nq*0c-)#??UKzAux)~`CtWWPyzi$jvHAD{YLIADTfZg!$yd&RhaZ5p+<boEB?k$ z^dkuE2tqsB?$z6QA)uLO`I%Ki7lyRSQ}9`G6uiV7E4Gqidz>|o<j~dGz(H3@ZgnA} z8<B5*H4vT({WNuR@ZuzAcO)l$1^U^xztE?U!S{kC``UGCg;|y^<^tywPvj)A;7;7` zMov6CrbrN3B{eRqoYWadWR>|Ix6T2DMg&|T@zhcB|D`W)qmN2Y&IuTyH<8H>LIW*5 zqF>~1S;msc#aD&%^Eh|Ox23MWE#Wx-3&a0(FTB?i)|sM4Y*sdW<bRnq<1mu`Q_n1L zt0WAZTmhV<tny5j?a2$gZ01bG-Cc~~_YX2c(~$pakp<HjyZSYZe&|fXlb!OP|2cc} zHgDYv4Stuodk?t(5jutFu-<>k(eJ0|_vt)DeOu1TLqwln5MO|=Wg`=3Aamk}7+rO2 z%h(sLK|ep77tO%dw-PzD067Kts;RYPQB&3cFV2uPe88&sooGufunPScQ7brQerkcq zoAU6<XWgW)rtxp`j!OdJLU?rxvUwu1c@UYr#gn>Yh-*O25%lb(@cTknpB;PMz8y{O zKFDzH9TxngN#2KM_K6g%Hp2PX4wBK)i1oQ4UwuV4l5DRz`8%{j^r3BRloPj)$2K)C z3>=v>%g*KD<qW5{%^A@4Y|cfj@2A<EnX^Q#eF3>1tHG1lR%7Q8<PST>(e=pA(wj{g z6JHehAj&pE+=Qho8A~H$@j;V(Lj&u5UgQ?uZHn8GRx!`Qn<J9nw;C4RC3a{>`ZEk2 z`;g#7@$oHrT=7bzGl{QW9)}&td+ET=X?d^T+4A1I$a~N^)#YXG>!EXv%y&I71C5E+ zv+kPz_VN$h_xo6CCU^#X_R{|r$N#DOH<1(42#tpZDz=em`U9LvfnOh*4~>+qCL_5E z?Rv2_ZZY*nr>>lTGB+e!z1y_gNQUSnSd$-_d~S1sz+@Kmj=tqMbx)@+)7|J+!GU*} z@tG_itZ`xawulzJMqPR)`K|%qW7T0li*1$e)ERoz1$xvK8OMu^<3q+FKIlAmNP3dw zPU1SQlWe0{a*dY|qve-(=GZi{7JIAuCqGW+F-P_|^;_))xwqF-l}4`jb>1iM%;{vt z<B?w%{wO@p$0y3uV?~<m>AyoBV?W`U2I5^vzx^ZUBHwcNZb7anvE+o5$QAHM-Wk5- zJfq~D8#ps&z-tGg>i~m$&=ujGi;VM9Lb`Vfe^}_(L2rSnbLM1qH%5Gyz8%BPccEwp zdw~zF%L;8SkKCm-+Y4UqqhHSgk7{>w{M=-^t50(y@Y2A!6T2V%?0!HWx3=lWrZ)Zf zYny)5Cm184L+iZ#u8B6eQ)-~+JKl1qv<yO~{|Gvwxn)jAd+F;&pRqAs{?s`>J-^cV z`kXbt)0tl{{>SK6hqiK_LbObCpt(3b2cl7ZBAS;wnFryr<on*P)ap8S+K&5J6HAuv z7x^>K&9z_-=C+xG2jg>~b<lb&YcmJ6$m;d(emh*QlpTAH$2P2F9)1_E?^e#0&umj) zd7JtkvFdx>UfX(Ngnf%|6vNHblP<CJBw&IyQ*5|G;JN_#&IiuF47^_h9laVlYRP^_ z8ME`dDcF_LW+DH>ua68CO+fCSN50)S?W>)4!SSCs#y`GIf4@Y34d&H`#hcpn7q}cL zx)`2!%W>&*7G1n1d}Q$H*jFcrDfk8Il@4;0J~(v`d!Mg%-ucIYrA6L<zcVKvKUed5 z)txy$>^{Or;hTwP_#1k9%s25&cq{vRqXomy2*1*C9jHXF7G6$uj4P?lxV&w~bzz%v zWwEZJKeguAzG05-R@T6lyN_yo9&65JOLV^1z?}b<F&`=U?~hsIRSesX#=N?NF<;~u z^J}#4q$@jQlXI-)dtQ^Sw8Gy1bj+2E`8nFuyop}h{A4cWboDEKX2j+l_VbspJEWfD zxqR5WxAWYDk<1BojmSbxJ+Qk>{xbGBU&R#GNwSb&F*h#1tBp@)5tE6sWF*yjg0lJ< z%X95Kik|-AwI29ICwQPMWW!|xbiI*%=iSVeXnGajd0myu;D?JOb7uAH65U378KT!! z=ZCIkj`L+NjMM6T;$GTtWbfA*>{*XwuFUr)jyLm6_%kYt{geCN=f!5555JV{$y}@8 zK<#b&GjFzN)Jk9*n1{x*FT|d`$0Pe|FaMLwyY!_D?y0<jGCt0WGBcDjRcDY>BL8(g zay2@RdB5`X{XcWxu<tjVzQ2q6E1BCy=C+JAL{D1eo7}aoATz1XJoC%_bjph5rFXJ< z#}mIU;9Bq6xM7cd95m6EOOCQn!Q7j&Q<nPg@M$*ZX7YIS7bD_kPF`Y8#P2OTU`j-M zKVYQ4p*@k`piAnR;{6-&&vCS^@#>v#l*Ce<^)^5=L^qy3$2ZsU&C%}XA9u#Xl>J|O z0e{pwKw}z!A#6yd+^@F2jn3Jb*jt+pr;oAqDUJPT*E~~pX{)2D52cHYS)yOiyOL7; z4-EbvV~;F+xO7a`Mc5mA8?zX9$;LMK=AozL&M{^M^uLyWU?O)T_Cx-|$SYZl{}{X? zcMGr%f82IKV2po@F}XC;m^GDmO2I=Pa6Hg8H?1_onDvJHv1Qq;+g8@Q9(oJEt~0Rx zDfYrqes?Dr8*kvb>Q+uXyY2w~2>rF*+@p^>?faGFK<dr?+*@6y?wq^FH7k?YGEMZ! zKQu6=P<{Cec60DRcHnYi6SSmuf3{_I<>69SVl(F~%swX2m+|Ujz?e14_1H4?yMTTx zZf-XBchH~d)V1Jzqvwi?kio#iiY>;@+3=y4-~~nO56{NNy_GR5|4y?fF>(YwV0vel z_06}8S;!|P6@RzBnFrmLtTO%K)6c8){2MN_&UuVi^f=JD`pr%5K4vbb@0>Mf$oHIY z@=?znJj;iEP^USc(vKvMRk?q>ENIH-XZrLDwo}l@Pe`X{u6}3gES->Rli~B-;P>6( z`{%*`dqAgp!pA#>f>X%D3NPO`s8cwYVVJqHBpb<g=k1Gs&mdRW>!pldlf^lxZ1x0= z(AKfGFQE2j2PJtYojl?S^`oy1-7Ck2i664}3UXYhR*ua#kblXKEt(p9%NX$%@rcGV zC&BVjW5&~t=2h?7@9kj@kzM@gng0F!bpGuo-fP01enRi12F4sGhoxwR&RI_ZhJAC4 z$+@gW<8Y&70yJyEIPxwV#(6dOmEBwDORNdF(%%@*x#E$0v!6N9^=Ph}<JXG4_#W5$ zrx|r$=eh^;4zH<exFQhUgMK+4n8`q=J3)D0>gKV~&PYCc0_k<whh<YDS45Ng)-{lm zAlS?X@BET0-H}g<&#p_lu519ROLeL)U)VJ{cN};H4*!-I6^+Pk>JPl4uHHC4s+qDc z)98^4{1mvz_3(S*xep3N)!%&R8_z=Ak3#?JG*5yv_YGAyHBJlEHQ|GVF6(ckKD*yj z$ZG+P_H?%0c)3yHr@q09h((#XEp4!|@-FbH++d71F-Fx}KwmY-*^hxYJ-UP<j(OhF z%{xhR?d@mu^fI1w##6<b6!Ct=GvsCF-SYc__h$PNYBG3FV?5Bca@Sz&e8Y^*hc(uU zwHJii8}HEDtns!PYxH5}N<7*%%qS_OkIm{M>yZ15Yt}g8z}%E*^*f7mBRg4R^~HP2 zm|RgnjE{Eu)8TyI+J5^7-*Z3P1DR;u<BuO6{>2fYkD2dHZsV>9F*N)s>>+4R0T@oa zq3X?Rfuq<Fch2+&USGZd+tR|d-Iot7pD||GDfYfDG7e>9i)`^EMHUtf8`B6L3?^1| zjJkmLqB82s?qSuZy61QjYYLv3pB6NZ|4OvG5}eA!kFGCskZq8c;roFf;RDoi^zy1@ z@P^?H?9tJ$Lm#2{{;?l<20TV_RX=0Yrk=(j_m$cUCMFEHIWT0DJlaWqQ>WLk{-*dE z=J?v3v;E9LH|Agh<J0`DWiDRL8PV=KE?jr^JZhi$HMh}A?N4PKwa^jK2hoO)>5Jf` zqjeej<Fo5K#2Qd%o#sG%(X0KcIV-Gr08X?f&CJ8K!1^M7^}~oM;WmnEh8d&Mdl-ja z`ZjgO=VR&`XjSKP_hI4Uv-<!ZMqA{20c<w`;|qZ6Y+(GWv?mz&7+#W?99UdaRx!r^ zKwz==)K$C9wWF^!75*_5tRpb6(O$<no`f2$rMJrui?yCv@2B6}0F7x~Q$O}5#_e?( z#X*03UB7>NU7v`63yj0LuKC}Huj`f$)|Gw_qyIzWxcO>w?{?R8#uM$2pSW}9T)T<$ z&;CyM<6`bI<{uy3m-V}j^{-0S-d93Iv9Hp@;5zn)bHT*g@Y(emz=KT-oHTF#pJOG+ z^Swkzp>aFi&_vN)(e}X3SP6T0ie2WHJ}vm%3w&0wH=w+uQ`~)O+;e`iYy|sbqUAwP zYJ&8JZSKA`a~RtM>RiZg<a}d;@J!zd&yEpW312E~9l$&C)o9wUJ@(*YVi-=^&bQb> zYW8&BJXZTPipj5if@b{LyYSvB-YEn|&HFuMH#ACa9nmmzVAiT`b9h!{TztW5_LXuD zrH?7PGO3%3?`t@hdFzNhGc$Q-u>HKVku#X*HK(k2LH4^lKIu9t$Hg9#^Zie{j@G+_ zPx8!X<h%tP)&6tkTyQl$j;)LhJUb+u-8{!A(b(p~o9_dU?fbV<?#g<K@0?J4sW{Io zdpTCph~81gT%T)QXDmN^UBR=uI1gDQ9wPox2#*5q7A=@F6uBWiQhKXvR*kD>neZ+N zoSRWLeasM7w?yF_G<WC1iXmgz2i-WwlT<T?xx3fn+o5lSYrZZu#hG4XXO^ofEz2-4 z`{+!*Z-?O2#!Z{XRwCor`1o!1okU}^s84G@9^YV_#y&CADEXepw56XDu6~A&(H5?@ zeCIS=efvBAcf!@?JKBdUPVILHSF@N;Xg2w_(<3%r7YvREF3W(yY<EIJu5c6h%uEAT z$q^)4{{->ZMC<W2*`f0%1N#~!zh{j<4d+M4*G4o~IO}y;xIZV|D47V(KPtRWx7KHF zeC?Z{VZxVYV84{M+~*r5@6%QTxZVt|iw11N$5Q(??3>i+42xZtaDNN<T@lB9Xuuh` zFS;-R++G8`eGS~75Xb!m4%{DNKL_`R@O(iW_w9FI2KU*QZ^eD{eQ;mz4gvSS=D__q z*NSdQPxP5|aoV3^C7Z$Df5owEnSAD0w(g6S41&g;ixw^XZhN$-vhMq*VyU9tEnV^P zJ8IE%(Qnb~3fA;}U_fi>LRMZ7UrW~XjJ4duT25rG*0PrOv6d6#Yq`*|mP765tmROi zFO09H{q8HQ<$aE|G~Z_}_3lvC^1k?5%7+UZ4gTU5{M&RKx)_p;LO5a3@obZh>%QnX zI+UrmY5i^8%@rb6=}Bw+g5?0?5IkEvtZz-Z$Aan41<N}>1(ung4q<su`WTy!odwIx zjm^h|kDHkTCvMvFP#B*F;j1+desdml|IB$vXq|^GRk4z5nFsmNW+Thsw^VbqO?fBf zbHP{gx6Xzy$+U4<`*q3Mwww*k^NY9nA9!VXA$mp8zb7r<fn0f<eKvHR9=YfiHAPut zyj_jSqCxfO79V@OMFq%L*{=TR2=h(ZSa4wTFY<(JXo9w7BTw=(dDy@~vcbgVpc{;m z4aj#+IysQ<=OY_=(OYt<SMt#u5AjfjFJIPz{an0E^m3cKe=EHl7dXw&j^=jE&z?X| z$^magCxu_K``9v*Xk~?~Z;|9C!CC>n8UKv(5?)*F%1ePU9rLwc$87=L4(tFEOU{N9 z$wd1t*?o~@cggKqH^E4PFR=LKvhp#5UCD{W^*kgW2XK1lf{MXoh?`a-y^`3UJC3uL zxgC3xY=P1%`{SSDW$y$TD6U%yZY<rhE!=30rH9zE&(sWu?30h4-Ue=LnR>RzmZ_I7 zEA0SYwxGY+a`m^-+djKo-Tz-kuHJk}`*31#zazPNIB+7~t9<X`J3GiVE;(WWxUt{C zbJoUr4!q|qo-+xa@)WrKI5c%qoaa31;5ozW=kT0iJbyIKbL@9th37o(;5p{|@EpB6 z44(72gXfIHE+kviG4x=KY2UN4lBMuYjiH4xgyA3CIb)(RSbWnLnZ)})<R|_i9@!E9 zl$`XB;-8z`_@EL4){=!1tg#>q{o!9!7IJ0VW3lAAyxLgFmsuBE&fAAffIb(J{wH0} zfaYucrk>XITAz*Mp5{f*r#(V0B6!}vqI|e&;F)vs<eRaQH(B5R>72}46f2qEMlP8A z@tJbN%r@m#`^a}=__Vqrd-_+7)fpPjRYVdd!()Fieyx?mu{XX!*s0<1y<L4xdqwSK zmJO_uHIofYw!kdn;Fh7MHj#H;`p{fraN0b6F8awKaLvi%ZCdTLe|(6&yse%(hc;|F zJrY^M)PdPkX>0#@iF2{&Xq@r#MQoYUmM7<YiM_t4^3i!syvk!=NOpRg@Bb^=N;1eJ z(4MVCxdS?)>AzvDHa+w~)BhFdp?o-QC4YsJ9?m;}9f+|=9+sWWmWlt!+B#+A2Tz<? z{(y2Te}L{t=9a$F9^G+16WtMS(Yy|-jFq^+v19N+l|{d#!-#ITSC8|VlYZreV<nUS z7wF*D2Vx~h!0G=n?RYL$QfKz(%yB7Rm&WC8Lzgu!`&piYE<d>^R?;)RHuk!(fq2Hc zT+)UHUDJkUz1gNbg?`~%zf<#Fe_7SC+2vVd8u4@VvUh05ydFpY<U@Aq{$lncC$lek z9s85lvQIe)f24`{BXvSPDShMP+wp;!#~$Xj=dowWzGYsg(7qpe!|yEjh2OgoU$m~q z+P&RfYu`?FuYEV&vsQjdW%%4Q4|0dz?^L;XbsqcS_$W2u!;p0Ft;2<@*dKAZL$&*i zovT-084j->8^+$e_DRmHt@4aryWu$b7SHlWnjAiP?RDf&vikG}eLC4D-({>T*5a+k zPTTLp_AQcMQU>Q8^5vU^KLYW&U(cS?8$U<coC4aXJ2b*owd__Su(shzBZLnE@wjxZ z!&n=lFMGHrAB9SH?i)@Yp*_GcZHDAKYWoYEBzL6d>?!rHjsJn!zr0R6_uz59??*o7 zFCqM1Y5Vh6!T2Z9-&fV&E|tV!H$v|-pFZYqZJrTY&6x1z4K3Kie%mF<;cbj#|K&;H zJ)UdUHo|jS7-I!v%w=p1gIKS59&=3O&y3vTn!jw1r|a6AG&Y`T&st-%=G^j=Y8@ME z-8#0oAH;VB4<3Bc@V7a2KmIn1-}XgIzsEgGzBc$3mf#cpI(_kHXD69^l!Nj0H}`^# zmBrellz+li@STk0xy4i1=N!ki`jV@A(2lYE8|*_}q5sNCLLc#u&MjMgLpruSl`|@f zFV{b3_J;F6g*bJW@*m*)p}LPx@L>KoQ{G!h?0K$7;NO$Ue<i*N7wR43`JMw+=A8_c zAE+$8fd35osr<2f2FVUnSu9^t!#KD&HGWTZ_6*`1jh(5d-lx88w9(ysZ(UF2qY2Sg zBL8~V$A9IGoVVmZPkvm;1@h-!#kHG%!>B6O8rXg|nc$~lz#fs04gO4mtD>UQ{vGR} z8NK3b(Kcr8SHQt~>N5S0R`xc12YyRC8ThNqU*%1%fq#DhAIjeBFKtX4WcrrN-+MRD zkWV&v^VwhTf&U4#%9wzyiF0<s4V{5I`fbyfY1Z9+ev~GE41ISjK90*7tJ-d2Uc`$8 zOW+Uw#(B`0@z5E3#^d98`!-++->qXex_NfC?^XtT2rl|(Jk&Lm={-Lr*t+g+e4UV) z<X6-HE@*w^zY~iY;R#m1o(U+10ralH!+t#e5BO7zAkL+~5BnaP|MX`J`Rat;=(*x1 z`<*QOr@Z)~`0yXVUNcMkzt8~F_h8{OW_})gh1lnJ^(lIb^9I?ySEP+_^-4S@8xL({ zeG(g~Hl-VlU=Gxd{yqGw?WLQ575e!c{jYVbvo$}xn4hbdgFNPkcn@)(s(;h`%yaKN zk|$il7dp?~ifavwTfU!})9D-fTGQQux^n7WjUT7-c=|XqBifer3^oE+V#^lR;ne*F z;Ujs##i<EDqFxiG=6!}XytL7TuR}9ulp1MA^l1(4%yuQUxJ)>;>Xn@}>jCd_>fdeG zUt!h{|CpySSp1_AnkIfA9^r+iiARVh6hhC!gT54AJ$P(bd}Y<V2iJ;T3XXw;gJcm3 zW&Unx=m}uPNkg6dXVtv1YahWk?;&u}>D%rCzg(`Bcik(05cY2?k%i@lXY`p{Jesu; zJmEvPA;6l+_w*jlM>bQ3d`*CFKR$es>n7lTH`R(&Gy5x?a`?mu4}Lzzab}~J_=)A; zmSUd$7QVLiK??x4Dfs&c2L%h``L>EPZ$a?wu>^SYz`%WBbnf|_k=C`&B*Dj{wY*bB zdnvT@nA;Vq<Q$M2x+z?o0}ax6w7z-t7g($l-ZlWgf<>Oqs^OUzdQ<g=5tjU}y;QY- zTb=ywEFUKMCfmMl!nw>bgF=h(lX1>r1O3!I^?GRB^EbRe?!fq5t)yLW#hj<rns??^ z^}qR|V7`}M{TRS~@uqFiNdr9exr{o^mwkS{z1ZI-&rTj6{m9eBuiVAtHWts$Ifnng z{P~Wfb5-HrXuh{<9PwuZb=AL$E%o6?h5yhYwb$U8zbuFyO*q<!Pq6texhSo(Ksw`1 z?AnQ=$w&HDJ-ORC6GQIB=-2CBIAV??QpvZXZ-R-Eqvm*6=hL*zj?FC}&;7*pXf){| zdLjLz4rrrwz39VOYKGw<M{1|YQ;zFK$MuCr^?tfvuw(x-4)aXE&j3D%2a?r~eJcKQ zdmB;R(;iiBKO;JOq5F7wXU@VBkEDQi3h1}qnf-tfE<NQAC;G_U!LNcGoD28~cUy>k z@ctz_Z=29k;YuocpYndnoUtpKjsFsRwrMq8kB?5Eo}%-K(MCH()ElInf4)Jki-d^m zyxYjpt$z4vw;WkZIgbl?E?c*3`(=FB!haLKHPgEo8}wc?xG;hL3DmcO`f}-``Z<(7 zPU5$gKEBFtldD_He)@HQe&y27wXSYO2Pl6)`Bhi<qD}Pk71zSF{l@Xpe(IS>KZnv@ z^2ykSFy;SZJqq|A%KeGlC$7PU2xUM0Q*2?$9M*iZ2f@7O^DL2fi{fJ~r<}mM8fO!+ zLqyLt9*xVVd1gF=`A_Gb)*%}nn?(OL4$ZU1vBH(yV$ZF3qUP5glg6`xF|2VdNL%AN zKH5inLufyVx->Ucl;5YVEc&pTx$RB+@6(qet|fP5@jQw5hs58XPua)2@pqVGBU(oP z8nN>=Gyl%MZX<V^{P7yOFIZCF{q$R~Qb@lAE8-o^%y)}9|K1kPkQdo-BbX_r@8z7; zj?j1Y#YbNfsH;Hj(tllRT~xpN<prKOe`L<J#%i|9H$L7M%$jxZuEq<_w!W)t#XTe@ zc!|FkZS>}>8sF(&fosLGtO0>&eJ^7}B4ZtVfw7^S|3d03rHyppYBoQu%Zv`!C9%VG zS&(vkG-vIj(`nDSE<yUJc@^wOXxp4~+O1%1R<JhZJeSXE3wTh?_?kV*k@qQ2B)&lr z@9pSmY?#5nU~&`n3;xwl!T%ln1pgoKd&||m#fEvUhv5DL*5n{-@`3BTqPJLs1FlEY z4uAti)H9QQ5;rVL{=5y_DGLV#$CJ1}llzlsZwKWf`X?O7x-c)?L>|hjOlU?gYfakH z6>#8xm98W*r>x;Q=}H0f<fJQ0nA48v%KOkglm77k&p=mX!xKzawZt}tkt?$LS}>i@ z+zaki4!SMBY~e&uv7Wdu8WiMTI6a-U5)G)KoXA>fE}Dr=kRZJ0`+T13zlxYn+2Dlg z)%Sum!CV5cwLMN}MW0IfB@ojk8QAKG#)=noL}Q17<61M(*ktMw+}2XId4Y7Ce9Fl@ zchXwX&*}J7OBb4e3=!npYWk+}i1ui`?Ed-5EAtUBIX^y6*~o;FAMEkzUvwpVixJAg z-!_}^i+1SWrXQb1$IHm4(w2_<+R}06?Y~RMCwvMW7wwiTCtBAL?S_V&lXhn_Pog(1 z_$dBw(C_NR&rZKZbNB3ym6$Xa8YVca=HDrIh~|o({kP=~@2As;ZNR;gJ_MN4j_AX5 zWE9bdX4d(ifj)c|-e<=7t+#Tu3a>hpOUQlne;YiDCJK)~>OSCjTl<Fh*blJ#STXUX zXDIf-?&sopd5Gb>5}%hBiBVVkMrky>%g)QwtEKi0D=$y&tEJHmyR5uCwXgC|{*_<x zjn27WOgw;<;GWL?T5&OmLlTcWmPQ+v&GfYV|54Ygj;o*QM^B!wB3_;J1*eXncP1Z< zr5eNmd;A0R0RE*P=-MgY`{MGw_Z;%QzYU(X%J<j+6_2@sv-`6{6Ix|^WV8xmB$U+r zqw$HpOyake{%i_OYL(&PEoPjxSD=x8VA__~Ux~}>Z$tZsqQ^|)=al`$lNNpw8&$wJ z?aA_g=DW7Cd?IyB;OEqJB(v`LC^qWz;!{~0{!j)_PhxyS`7L0KD?$@nWqjoOip|(X z7P!wX&)2~JWn&Z_$Nzm}KdzJLug34x8Kf_~d@45TT>Ju_a!!5`M%I6qIkI_$`tPUj ziTn!be`%<wRnAAoH~U{r|JCN_k?H;PUGwhL8>BOAr~jXY=GnSIBRHLoPMdzp2unvO zV|>}r@9e&od@5RAjvO#f{o^_I1XEuykPBQ6o%yriRAEQ5EpyWmPBn1ue|3*f=TS{< zWZ1{g!J`&{+ZtB_|LyW9`t$!6PBlv3MXoErr&c;-BLC(5+d5WmTz45lA7}7u4<`aX zEO-gWM-OJrq)Rz<)-q(oCR11PMaug~m-0oVBT1K9%bazjOC_TR>b)BDuXc4Q`f>-q zSAp@jLf5wHQRq-69eNcy<kXp7jq6OJL!v=<@N?>4(!Cyq4t1nYi4Ju{cUB)bH{IDr zE<nkJLuhjbzqT}B!m-$>GUjdwbLZ3z2AjG8^G1HdZ}Pp<KJot=-NHs$68Ri-OE5jZ zi5#iSTSv4j@)>ED>dJmEwo!XgwhktIj>L5^*@vWq`Juhy8y(^D6l~K&J!kSq=~`jd z-j;8Zu+?attEu0%t%+vXZ6uts^r((_=}^Yh5igy<x9D<b*<Ose?WG!AYioPSkK0~W zcVv5EydBYn268^H>GkP0mhIrW)5cQEJa<GFX3!te1!7bF)6fOkS$xFUs~_00ou!cZ zbLvr_MZVJ>Qb#mrF!L&%{ap6jGUm#uNBxv}wc)I-{WgjD6iiFs`)A=%wynjBZDc-p z(Vnd(pSXqJU|goH1zVSGYmvOw9<5!|5*rm{-a4|wY{#zC)(%s94m*r(?{2ko$PRN3 z8W4bvB}2!wb{*MU&SN~u^uIk>c_KVYGVcGx7L)J~u*DRxW^HxX>^8D?fow6%m1t&r za>W$pYRIR^+LEsoH?1T2n)7Psl&?R{9^>oC9z&lylCg!i9rB#dX^$Bfx5s$F&jj#u zdmBBm)gD8CI+AJ6MMqYD2K}+AjXh@EIq1lOxIHF;|9=)b@>%o~)Ascrv66pB&J%uh zWRuy}j!owO2DrBEGHv&^<!{Bl@uH)|sQ!>xvzb$Rg%q<#b`j;%{ZH~GY;Kd6)821Z z&P8{<_T7m?;x>9V6vUQhQrDCx(n6UlyM+d_&pcH57I;4UHcxbRxhHzhn_cR@eOLb+ zd;i+5%lPv(b+X48!93G`Bwr=lPpLL9kQ447sEb^RkHtLV__~aq?0eKTU+N08S4>Pe z<>tZDFz%mn@{SkqhjZZ<cXk~0Se|cZK7KZhKk$a-S9jdg6+4ubKSFWy6dO>ny0u?k zOFWK@sm6xyJb^C}`}g>*L@#;Pj**!fNnvbFt+Dn~S{mcA_8alDkgs>+tL|`?hkajc z=#54AZ#-~Uv}+4-nmoi`zp+brqdTqU`0ZW8CoUzIbYJaPTX~q3bL8*IhVmb4|C_T= z#_QSaeS6tc%1ZW{d)`gdXZnmnL%qaG^b-3o-POBgpL_;71@3El^O`#OE6@+(clj!? zvEnz89;y9a^{)J^Cl33Jb-NPq^~$>IwZjEte}6de*w(SlcP!o&Jbv(ScCSN+BaJ^l z?ESuXx$pbF<>j@1JRJF1(_!DKTh|BYK3-hA>V><q$(h6XtnDt&3`8Ce?b3OJNaVMN zU7bqrXg=}B!x;lIX8N0Bv_t+C^6cn+FTV`(>y%gjbeBPXoyc`hn(uGV{ge6r_Q#L6 z`@Y^)y{c1nsZQEX4C8B;z!`V%`GLjRbFK(^FG^T$xLlzN@cUUpjJ}P)&2w7=IWGV^ zmQAo%<mZZG>?W5{9&>hd=3>R_u5ugE9(+>|3^yOSCfvBy$Z7n5TrRnsnPQ&9x_`GT z+&s-4dV8Nc^v+gy=-nsXq4(xlxmorSM_c(aJ}fn&Ck7jJO~7dLAR~-F$C4A*8+9v( z7$f#v8;IVDE}%8Ox*IX_uO}w}wh?R1dqo1=+sB%zz26I$cu)1puK`~YVos+=Zh}tN zrJ8Y1*z*nKkgscmcT8|}BmFE%1is)A;A$(ZO>f3`gJ%XdmcGO>?7Oue{uO;&D#x3? zU6DVKJ5SH?&4~JvfwS?(hGyC?S|12M^y)p#jW<#V%-#xY%{607rbn*ES0jTMS^7@? zBKkI+co|cGFZKCj>dXh`=MxvgKQiB6=n5>(cip`nIWAgunK4@9+sn817_E4wX_2YK zqege>)jyUSy5qHb79Vr<uK7!sz|LGx@0RiCRSm3H?K;JLC1=W)>9fNp_SOQmofZMN z@q4@)-;}HH$I1i$$3q9k;TQV{)4%hJ;L%N2jE~mG<}Kcd-B|J2b2*Rk1!A)|=7Beg zmrG6@FY(!Z#Ai<@K0EtZW*+*c%jc)16QA8n%-}?PAoGV;rIlyaFWZq2Fypi5UsS)$ zM|}21;AxwOc$>r|$-g$M#>f0*bq**WRg0UL1KbZOF0uCO8d(Qt%+<s;ap9|>!_K(y zg~Wtc+ZDuZDkBd-HaU2x@ACZngU7wJVaIaK|I*oc1^kRBm+|1MHMxd*$;p(K%XqRG zPucMK@p`u{gI??mDwhHA<`by9;M6s{8@u^x5^o@;Eo~>zcGf%J+g(JA+UA$iLKOpy zjYapB9XI`?IS0WRMl+tTXiei4zy`6}!ki0UvM;u=_LJP5b{~VB={wih=eHSq0&Sm; ziEquh9TUG9|DSs1oW7fLE|^SUK1!j7nse60oOAi1RE<6}7Bn`k;<9)we&<@-vG|>H zZq3VfbDi_=C)YxJ&P(wPsU-$bz9W8a67^P5FF8}q82_4ct>p^ld;xP_F?>N<5@T9G zj)y|#yxNseU3k%sW%<l`E<W~(|KkH5vw_FY)5n*AM{MQQ<-kS(bu}?Btj*+tYqN=) zjZcR;Fa27g>@#a_{LD=v@F=@)BWDB)8H4&$NV^jlLv?%%PPl1r4D(HR+_8-ME-$#h z{CG}$ZgM@omf&FK{Nvf?c}a~Yn|cSw;jto)U&k)5X@y7fU8HHfmH%Kcu|@3s2cxLl z2Rt?dj}w4L3m58_2@VC5#1^j}K;ANJ2kpY-TkZiB+x{La`8VjyhhGmwd!ciR{$B=N zfqsn;kGO!mB%j99#XrPfZJr`N;vMHR`N;f4$vgcb4?K+@+Ks2%=;vso^Z&4oJL7G9 zlQvG22BP14Ew=I3oU1A$ub1X`4EY|?(>=3liP0cDJjwhV+|((2Xb^rclg88-t6ZVt zC_EB-mgI~1j7PDf)5)vTh<{2G{weq{R;+}k2eGrNE}cuNXWo!4qVw@<l<eVnw+O%6 zQIa?G?q=dPcxMxv2iUULtC00NxalhTfL_Jg2BDqGnWA-ThG$iSPrzv?*Fi&WlOEz6 z(;&|*5AnBHt6pB$tY^hjHsEKS?+K2s_1rtUaWuX}=i!$Y%RN+uoxdtZE<*C#_?VMM z;-ttg^GDba%IW`V@|?X-j<eO|ID4NQXRFC^_A)un29x7#4LQzc5cjHZc;fP~%U4v6 zFJ}d9W+BgJA#aMm)t;O^N@Zk+9JL!f8QXX}@7QZ_C2JsfE7%?@30@j_7Wr&f6ukPF zwdl(@<r`s-&3?CuJ>$knSDNe7jGw>v=Yg=~IzMN@v-vM{^{pmXWQ}--e7UmtZ~B4! z0DF_WF413ddF8TPe2Q*LS+cB*_`kaU(ywAARqd=r+ck)w2bZ_A2Er$eBmZM!@Bw2^ zx*jCX&o4N$(uka5$`3EbHp+Ke^1!N1v64@CMobyyid|QM{F?N@ykhyM?PuQ0lgUkk z?`Bb#%AWMSW<P6CbaWy*Zc<HzwTZAU5!U1t*2K@6tYuBgM<y&^NWQO4(5gAWX7vNf z^$TOM5%S|L&Gq~-n|M%7oWT}fD8!FF3mD61zVf>oQO?qh(z@-tUb3L%Y~s+n?s-=6 z<OD~(k-xy}oUGr*M&x(J=kzzZqw{|m2=`*%FXw0bl7^9~jNf$S4555W74tbI^29Z< zrGI6tHhkNBFUWe{!u<x1Cv=S8Pl+#X>k+-d-G-mWMjqr0Q2y4wA*Y<M@5NY2GIQNb zu9FP@_1p{Xwt3pTZLyN6+*d9X@rrG)#!3o!r;2;WxTn}ldUk@goA|$hvg%sK|IwRb zOXp_<!o<t=3%05{;foZ#b8!`RV;A#yPTgr<mz{Pz?~gp4(HA?RCwYj=Wcl<e&boA_ zDd5-_pEN?>md*rke)B1tzNSR}aGo=dTEDaNsJ-hB*F%r!&qibRUD02^7hAd+`JA<3 zKeKX|{5k#S&s<;4J+EsD>!930X~@dGBE8X>^qx0*_hRre8ocZ4IbQhRzu}|3A|-H$ za}Xu(AzQzDxy}aZJLRoX%<i;!9+F-W7x9Q2FAwC1mIXOmP{p_NQvxC3Ujjez`jhnG zC1O&_FG}@wRM)%E-u=X{RbA>=naMK*d%Yq~o^g^kHBSfEa)t`|-ahM<iQF5T*X5it zEjnrF1ivo8mObAOGT$pdjE(#SwvPK)@6WoX$5>D70A`GK`Dm-(TF=eoT6!EEc)huv z_@B3~XYWWc>v=jS9KITH9av{2ME`5ai}%89dC@nq1=g15!8iJ}Y|}Xp#;bJ;pu5+y zPT=4O;it7W!mYmM9FbRy_36j@^d?VC8o3|R{lZrq=9hTkU18?bo=dL*e`D<$;pLWJ zd+*4<Gq-C0i_qs})~lBH)5){D$JAHU4taeNj3uJS(k*nhNBvNpYDcu9y3M+blRm<C zpJi>Ltj&r`j3xHmr80McW3iFzS(~1$O?!SaBbECAnaqwAT8l20a4L7FU=Lfsx@_`( zHL|DUl+`98Q$>)eHnoxQHz9AXMc$OGSuuQJn&eGv&pYu=sIEcYEJvn_Fki@>ld~pf z5nn{>c3PKc#0SNPObBl_Wzo+gyRrT|p}(ds(T8@v+pSMc12!YU|JRT|8b^WW=)ghN zC!IJkE16gQH}GHei*eyBa_Q;c*TaY|+2sn$Z@T`Iyxrb!J+L?^{cTm6NjLrQoWjgE zmT`_{yUzvQ$Lm`Kj#epOCblQldn;wxp@P7nclZrEg4m<l`uoTRm9?dB=tb5?v~)NB z<IXdB-N5fF{LYC>p6)x}8*N1Gb6lK%ASY>O&O3B6@hBg7Jqdp4+_`<uu?jjj1>d7N z?}D??tXkxm#$5i1f!KJ3S*|0#Xw=*IgW~&4U#EVNM}Wsl)=c@Ur*Lmpck(m*tTbA- z?6l8R@Mo6KRPe{8(R-FzK2yOTr$?L6$&?5CdweJP1v^@_34Pq=Z_5AJ#Cffz%i)#4 z_}k<Veg|BXy{!p3=Uw=w@~<>v;{X<WqF>}(_%Svu*70NLg=pc6Yp|t&|27>9pr3z5 zF%9|lV{Bbn%3bivxaiI7?Er5kjc&X&5dCPdfj>y!n#NCZL*h+Y@NQsnr)14}$drD* zD?9OEo$}u4ob+H<YGMU&`}MC?99Ik!@@JHg>$HdN&(8GBa&ZRkDPTr@dYnF8*~i%U zf?2lCh1_@y9(+AGehGQq*h`D8cjv^Obm!EH*9gzHU@xKV3=`jbMTkAOWF^mntk>J? zu>)ZPk`GH4Y$6`>?5T3t^^eFt!Z|N<PJhKW-Dq?6iLck~<D0kn#?8C(d6Ep$4gR(g zyO6H+O}NoImlQL<Pl?R=TWslp(N~2lXtQzs_?+e^$LECc1FoRWHNw>`vvy}AQxT5{ zSWAw0r{BDO<}Zz~cVt4O?Dw}VPUig7f{SijjO-gpCKsI7m4w`r90_`isPe=<YTBSQ zAN?Z>;hznijZxEj&Z@hJ`E|mC^1a@~IOM}8*b)7En>jV%5Z^t);cLiHL45bh9KL(O zxbI#N-@SX{zI#D@_nN3L7eAp~{DjJ%18(u<6HapuWkVtRAm#n>d1id3FA4K=e{ZV{ z44C#RFaAR6Lx0Kx>Bk8A7KCm?%8Q#kS$=f==7s^pWB`tX=LbjsC%!_<;bG0dS(9vZ zz_Q@~IC*rM2b(t5o^NB%duN(E)C~MLd$954VdEPl8=nO)R&Lb(5!nY#8y|CcaT4b) z`x_(P&Lf`<{cJmL!lBj5RfNszd#!Vw7P$(()c!oDMTk9zJkT%l{%ZIGeft<#Ay@jw z@AEylvq-ojyb+(%GbgS!5L4xT#k7g{`DdJc5?*41+|U-Tg>!wuw<&S>CU$o#eD4A8 zPgZ={^x+bGTQ*MR9uhrBGhtlvp4$0@@BV_nPi`E}8{*$u@|F1a0KQ9$Tn~-g%pRfc z>)QjszW8hXUt>%6Uw%!v92l>{u35o4F2HU`?y%i!uAaGmMk4-8+{cz!mn8TGo>v3& zRb#yo;#WnyBYlwqVEw+Qjc_7xUhhf-#_xJo@GUskILUvwA@)h`p?dgc82zFd9i;IS z&Ml&6G^2Y+PgI`k?dTgxaeYzwt|Q8K?MmA5A@WNNdZYBhHLj$hvTWb-`RIk&7rnep zXP$)@-!}20cjSULc;Wkp<3(>1FKz~Y?uCC11!mTwf9YJ%>g2?lnQ^^Kx!9zWy@KBL zGJ02-F;$~qmAaBm{c8RL3H8+u{i+=Os@apULw3h<hkg}Azp~)1f8_h%LxA|mf^8cH zg*T!f(mDQ!ymvi)5+4%&0na(YTj6Qr-aAG$!;5r3pZiPp#x^!=^@cv?H|ZkmGQ4vh z-@biq$GG-7d6VA8#@rDu*7H1W^UkU73jSOVeL&v}cFh`X(sp32QT!mzmzTe7@nx@g zmiU1|{MNYacnmx>aZ0vxY$<}@3T)@Hn=7}WY~{_DVE2cY$$ownKk<gPcqmy^J_#9& z+sSuUs+}U@se@w|DPLnzpiXmzo%{^jx?mh$Ji0yFqj|4k%<rT7$!4N=gYeL@PadS+ ze&9)JB0R1lYfe>K@WhwuUW-6;23;Ng0Q#8E9<Ol5iwxMvJT-wY+2DyE{Aj|Sg1oik zLuiulVhuPYy?zb&Bwc<@Tz4N5*WG7=Hx(n3mTz|@6xDz?I;*e!^&!CK0$?*4I23#y zppF_~v;r8NfNh`<xI938(}B?$uH+ioZDg~Nz2?=pz2+%!v?eEU`BT8|Tbz%SZD92S z-Rd88z;0n2c4Zfp&rJcgMQrFbzRm`IhQ_E8@LM|0UdFi(IJ024pJEe2YwvkBAKfAe z<|5tRq`k_|!MncBft>lSF{6E}0^u*RzWw-#FYRG&e<qoZy;G|!TS33b((%w^zWen~ za>zSmq%FuU%8eKEbU(Mgx78hr@~ghkSn@}3!9_gP_Rip1xkO!&*_<Oj3ay<3-3u_5 z@1oC2Uth_(c{!8AzR)`D3weQG?G1f|uEG8!ed!%h4x-J-J9<y&7b}79Kkc*hap$-B zoMj18Zz^Noj7`<cnI2%qtdBGQW?k5Tth#u1A$92-?|kZ0PHHO;Xlmpl@MRozOyy^Q zC-t;VUQ2TE^ljN}&QI^iH0l{m%q8{9ZnF~IPJLTv_l-8PxTly8e<G)--eV7Jow2!J zNPSfQvaok4=EG<0$LT$;-Vx-?C8GB~HtBtz$P?g9rQ~tspi1s7ga%e22hDGlgUCG* zmxHR1gC2{^K~<(4l+r2(RmbI^DyJNjcBUM3cRO-Wic9oYF=0-hZ_xSXzlCF|?ZJlh z%01{Iw(Umnk-@%Q5ImOL9Rxo$zYV~I4IhGyKpZZfhlZPQ(Fy&HJxO~Vr}NE)jy0S& za^h~Sqt;OH^daB<30zZ55Utl$@%3`H;X+3X{vr@I=tm~M8%2LRlgrF8-`BL6@8SO> z^Ige&Yn<p9ODy|FO5}NHhTzx<KS7=|*EwEqmI=qg$tgNV$Q*A5PxWmn@8~;U5A07o zt7g6`o(+WGj_=Egx7d6|_#?jJ#Fr^1kLn#+s<u=QYtiKPC1kM|8${2vamWWQltBxs z9Jtt-x`l(n#kZjqs(YpIh<d!8?_8WdCvd0|d^BMI+Q7bPD-5VF!ei(677TD+Tl2se zx6n7m*+?}O72Egh{$gi0``h%%wYwA0Q|GyfP2rkl_Su?K<reOPJdtt+yi2wa%)d?y zCc)O)y%wEPdvlIrSBKtcF1H<xZIte)^>C4+x(*sjoHp`9QzlN@9?Il}-}&LS<fFR3 zGqF3)%-IGV**F58wd4tMpA*mF=3j#Q+DBOs7{fe_%#wT#KTC&yd6URbqBthxK=&pY zIR)evehF9&itfkrwr#-{X75?s`RFsK!^~|x%?MqIZl8f)uW|?<LC#t)9Sz;60lnRT z)+!HXE#t9sT2Hyy2+d|sA~?{<IT=vjluI}zQb<1y<yLdleG7FTna3Ujea}A;!)BK5 zkI=u@n1hNNkj+oUURNIBd*6vI{aFs*<J+U$*7s0eM)~v1Q%~B@h|g1PXJe%1hIuh_ zyK8R70TcIs&bc{?TwUpyo9UlpZZ<mR#uJ|#*4D}?&b+5O=B742H`D0fF6QP!Yi`UO z%fp$A8<`8{%FO#cl=AhI+vc5WkLGUeY|?=<Xzpgskzgn;Yl;`kzX!Q(gW&&b#P$%* z2w%(mfp7)1BZ=po9CUXOxLyg)>iwm>pA@IJjeFv}+fjdJ8+!Yn&}1jAZK*wzx4i*9 z`$zLO+gD6H?ijJUF0PHGc9ic`^4d{sZn9;|mkXaZGv0{&xvHRZKKw>}%yB>FAV>Z% z_+@N1Ws8)Eo4EpCqCRMv<Z<mkEN9Ld`4$}KoHlYJvX1t+<il&q&%D!Y$}plyy(6NX zwhSY>Sw(%=(UD>LwM;Q-a!TYfvmK+SGj`DN(sAKv*Q<}q{utY8;QQt#&lLLUv_MX6 zN}%o!hS4*Y7q__$V$acI)M2Z}?+u^0t%^xo`o=kJlk<E#Z^rMfDiz-+eBM6xbn%~H z4@dTW&DlrH7kgC9m6dNzGC>vg41BAQca4py*c3!pZ;a0ev?r=(SpzM&ludWuAuArw z%1mQJ{l$UsTKc|*zMFd|Gj3iy%$16~edpp0)LTPcvSC%Aw`>2dB{rvcEjs@l%-2lh zkD0*rK~Iu@|K08|`NMZ7Uo&%k#FY?9a?M&?AX}E}Zfst%W!=Spas*pp<OA#iioM`P z|KIMCZqq$-5L-_9B<ujJ-`zcp=ncTYZ;<o14Ge?_)2AWyX9@SjUu9n?q(A6ay&Cd7 zp_Ba9Why7|>39E=^>3!HcX$GeKlY^gXZ~vD?gN{Q@T*tPT)ZE>;ehL&^*d;P82!W# zBQk`3R=B#6H|;JnrikvXb)`noJtB?2>Kxt=e?gCm?B)NRL9Wod9#`l>;LGB51Efd# zu~$X+|Ef!v#JEe|*=K~_Wxbv6Xb)2F4d?ku^v&O}2YzA@_9p%t(^(_^_eZboZm(4z zcqqPFtYIp4C@(rCcC<zJoA}VbWo2A;_!7Cw4W4Ozq??H5Y-e2)iH|VQlUgIYkYu#0 zp*NNvSHH*<U|#hRLlv0sABXwgk#AGiYJ3ly@G<;==Lz^97U72&#?P=2|HHxfA(r81 zDE~q~{)ZL#A%^iel&|5R&of57{9xkpR~}4QegHbV_TdSU3d-e_H{-Wh_;68VHL<e_ z_?5f7+ku7c<M2y-8oxw0|C#tEK8<f;fd7H`5q^Mw;<y2sGcUzI(M>rUf5HI1g;}Pb zV)x|<{3pt9@qzB9zvA?`zv3GF6)U)>Z(hb{v4HDa@n_VxFXOwIiJzi;6V-<0^O&?; z?RXzZGV2-4Gx;+5slS4Is;7W|yAFMC`!}i%#*w%by`>vIj;g0GevTi_y&|OVRhPH4 z+wwA3LZW;gNAaxT7sQnE^fA}Z%QyA6cujxEH@WYm>EDse;?P%a1qN)IeiHfsUnU0O z|F=4eXu0&3_4G|KxI~xi{emssBm)y0Zr-{5`3_LWfw9-E{~&K%7{90JE6@(k`9(g! zHf6@}`TK2)7l3PPx%YB{!E+<}XlzCOBz_;H1fuU>kr$rvVqkIiFI~5OKQ!c(Q!DBf zw__pyuK-`K0$(2hZ?8T)u@&AL;_$Yk9e7*gfVVXcczZt%Z$sLJw<HI=S#UBV4sUBs zc(dT-6%*c0!^$h|!dr42-jdsax8yjyC7%=C9s}OG$KkCHFtyPEZ}ng4-U@Fccy<+Q zCwNi3QsXc=9QlrTXC};z<(YF$D-Xp=-XMmX<a8JG<Yf2$$B&n~@Y8gk5koMxwad=0 z!l%5{D;tKrA6+}IzsaYRvt9ervQ7Cik>~Xf?Xvf3;M2fPuSj>D{RbXifo9c0vm(HU z^rvnaf#@&dbZb1cYZ||Y_;rScZH9IcKLA~|SEPXNkC6|#7yAZ3hF`n`FMfBL5gKID zX6*-vhRUv)?%&NG&pUZMAJq1Fiurs8&n<gWZ<Fr6Aivx0$aKJ(@BD^cKH#kpm~BKZ zQM_0Co@_3g>HqiMvSg43;8$awPdqEx3N=UJd0)5kKeIo^++jO99gk2l#mTr#p?Vw9 zA8MJ0GJX-}CJ}#%B*rfO3@<fvtW3wZ!Z$RqehqCAw~Saf$UOLl$v;eSaI=P2Et|hE zusAE^B_|KO)wp-G&T*M%fp|9?zL#JuT3r6&ChX=3)#A%~uNi#P7&W%=i-Gk8j5%N9 z=ilj*Zu_IR^+gxmmmj+5{ZkKmmp}cWZ+Qg%PyxSqO@8UzE92g3_=Wt`8}T(aea>Bp z+wm_l{nUk@1LOSQUll)p?BEBf;NTk${A|4GG(Q;0vzz}N9nIall^@jJWZ~vlxUY2u zZw@-o74(tLik;<;@qLW<dC%q(o4NNV;I<yS*0xr=7IBK>cCC8sTK^fhYt^^0Yi*C) zwdy~oT}v{Mc!ZyR=nRPy-d8rpN?w}G{U?pkF@D#i8%t8r&FtLP$n=Yh%Ko8D@Z`V+ z=!vaoShkqH;FZkd9_ehASA;U(?jLfyD~pvMdpb#ml`s9|9d3NbJ@}A!f<|?QMv=e$ z{;tH$b%nkI%-L}bt@9$T$O&lDbHE<NFZ5ozQTJ|*5tdJhd|SrQhY#5U<9x`-doM6X zRbk(<Y{ID#+1rGF1KI1If?cs88JXKSy*4MQ*NaSRpZky=Az#cYWGMN*BwT&o^`+SF z_aL+3mzm>*j*&y=uBOx#X(=vWVLZmAzdanY&w5nN_AZ}4+qXRH;rxi?Gs$F<#Uy`i z@ARWxic4Caw-J2qv)xZ#6k@5?fZH`X_n|W%I`5JFaKU!(!};5F=EFYgp>rNO<Dv5% zdam;wddGh5KXvPFWck7b{_|blLdiSTtc%Wb_~0dlI@7_Pl;oT6&tpqdI?G2_enSHy zmwg0(VGbmh%2%ffS#~AwAn(@gh1S_N_|Et!?1P?b-*Or<D0(3Dy?<mRFf|1kNcCj` z*Qb4$($4l_YIcV&M+UX$-0p+zcciv!(8ZK<w+b6->)PVG;AU-`c@{Vrv)9!;%k%%> zx-M#OT`y<N?tU-Ux~{*$C!_7U`a4`#?JM_+?1xv|>$-_|?R8zvxtUK}*OYx-&$+Jf z;3YrV+jX?!NZYvFcrZ2s-P<fz0Yh4M@iy$(IpXuD?*B6M;u`1%XNXF#f{x^I1_eFc zW5w>x!d{~@AU<GYFn(RL@#9V4Y>NC^*rzV3z~2hLmYo&sv(3b>YX^Q^Z{gP^U-<=` zBWS@_{w@5vUd68~S!W1_Kbp2S^W|m3fH&gpRIg!Ax(0u%)%2}QzW);~-~Vj<y40@D zL3qja?f-B3`pXw=zDws%EPsDrQbNt(h1VP}!cI^`UZe{Avl0#V@bJ%??&AC}^e@7> z5zaVlFNdBtBg2*A`%#MT$6%gUpnq53^D&qhF)B~znz(Zt=JdA8)Qen!Y=O^5CfB}i z=81Q0$i|+VeIENGl>JFYG&9+Hmf3^%lc1sB^hAA}C(Ih2U*ipW%yWeW_;&pkJ|TZt zyWchJ1<9@#+v<YuyI3zb>*itoI<bz}fbWO)&vS(eAG&3C)_DmviS(u5p{a}aVJiXd zBD3)k+QYhe*;lNBewW=dVRx@&U(E=vtKhS}>t-KrAU{Z>?5W5vzlLspAKLh9Xy<RB zkKcozd<+f!8+7s~@U0i%BUeC2{|3EGg%0+ecLhEum1ECE!)uX$!pJ|S>;nz2g@y;A z;kD54J)X^D72nzGO51KA|0vg&eV*?0`Mmy|&+BjM4bJm+r=Jht`GB*Y+xn_#cR%QM z*0j{+eW2IU@Ba3rg^L;7tH(B>XTIb-otuf~x>8yS(K8b|v)2Ug@?{!3#p|<}TQB#s zxYpcx>t-)j&KzXl64iMLb=q?}2m5^w@Nh^aXUc%76|BP+-kb1{&Po$UqSA;KvR*~B zH-j}<lAK&q^bjyY-4kec#F5xWE2eU9aP}J3`fAqvD%L&^7#I)E67w8>FWXvCRe*R@ ztTXi&1v-U>0MnDM8&dPi!^H4E72B`?JexrIhjRkaMXpYvS2H_>)HZv|(L{K{4*mnK z&0{O*#|HMdE5}uiRX--uj}M6-qgbCsi{foCA4Qd&LdpYUw~>sUTl>95KXJUXxl?Gm z<DLD$@ymK=rQ;o=b7+C%omYq_wN>xbI^GF%4y|##vo;#r_=4V9=Xj^GbLeHqI}6c4 zp3ytcJKovcIrP5cof({^uH~Io$eEeQV$$iQFG&Y{l$guX<9ZQyWd-lVwszfl^7b_5 zzCUX)fVDUuyt@Fr>l(UI=XKdLv3(|`3ubV>^D*gVd2RJBOUF_>Goam0oM{ICZ2ckt z9=6pnq+i&2gmjC4w)3u!LmPD7Rd%2pXoC0lgjQU0ffvZQBcy{Hw|ker-wC@O*Iuqq z<6N(^aIP0H=)}3k)q${dG^c)M?p3-n;%6SL`Q<F7xlbow`yl$Aa5L}(@|U@{W$C1n zQ%~K03v2le*7Ro9_Uqx3x8DR#bv1FSVVV&-%I`*C0bh;OS0}}mc4XHtV6VB6GwMSg zzGb)nyri0m(22nhPbJPYXF+JE5FF3`_JrL9&|P91lRJL?GU!Qh*nk({XPV+MMoDKL z480fzy?Bf7w&2$?3|jFNv|=c<VhHsvjMuY`dWJ$P#CskENA0?{f$Ne5r1RSMvbk5t zSsnVYQ?h&^zdU?PD&QB{=Oxw*p|9oeixu!)`SfSehIoXJHqw>rgTAIK?m05M*Oih0 zJ!C$;{<gZjy+1IN{%@kLE%;Vs(f@q<KZO1drmgAJEm~(l>xR&Oj4jb1{kP(qr$-b^ zZWy%eh#8mN9obJijh;^aWB4j-k4p10j+~K?j_(w@l=%u3Ut05pM*`7x?D2j8jr$?x zz0kOQ(753PJB1eXOsaW_HfFLunt$yB_&A?8449~d-n|Q4G(hV#kBjKX#mwDM+W(OD zM6)%HOvaJlhNdpWAE!N<TErN;Q<oQ-TF?I!+6Xe<m7I$gP1Sg3(9Zd^^XOu0o#(KJ z_UPPBA>}u29S<~4YpDLI&u^RaknERD=N;&jy|x7hVjJc&XA_~#>Z^SJ^V`s7(Ny{K zzDqrV<42cS?YemP(U&@f?r^kwkp1Vqst-D@`t(kI8#+D_+WNlgdBRbTyL0G=j(XO? zyIxm4D;@Q;q4mkc6?;?lL>%>u?;LvCQO})+VjJb_J`sM<+MhQ3;1$}cS3S=<>Y3X) z^qQldFMJT&xJLEBGg|9u%QI-}Io0!mqn?*KhdyxBbC9?)%Ty11#jJ<5%WuP1Ceqf= zc}IL@5V-bpbUf*K;xRf0blj8dkFuWlokTuN!`9~M>wnwR&#!gHUp=C8n_BmWse`yw zCj6_ehaC01%XbTS{%JB{z84vVycx(WJJWr}p=sTTiA7yQf#)lMlP|KiSFkQ)nA05Q zV>DwK#duTj0q+K#T`;GAC?DLB9bg*v5u0XQ8mAe8?U-;K8i)KIl^uXHeuo-8Rm-}` zhDQ6!TXlDrK*;XLTgXe(*w_6kIPe8%MY<cf<@-zHG(xfQz5w00G)^Dx1Yf+}yfuQk zqxg`s4;MWEZffsTYp@3xl??xZse5;eybA8GgBH9#`3p5Y+}Kzr53PBMIz@jDfq&hF zoAmQ(>J@$(ZVNAGQr7zHryh-6@Vn4st@9jYo{44~10u?GG0|~<hIt>KI_}SK++S$k z?;rUq_mds>*P8eHMkFr{b=-f&yx%V(|AzhaTVqqZ`_22Qk!{@9zS<s7611tC>92ag zV}z%fFl0a{0%6wnb*<T}y!&Y~<EPn?&VrpD!20vE7YDDrZapylx*r%C3JhHthoLXV zVQ366Bp;SOz!3Y*&prg)ds%DM$N7iQ8xB~A0SiyCpJKz<R$y$p$(NF9E@qu8;7fwB zw~WLV*^mTd;&FnrD)BmCteLT10vyG7XAW>Q2-ta&c#9VUPvV)6@J%quTXQk+G}vWC z=dsT63H%%19p#)k`>N3UWY*n<zT_r{dt%Ew?91K?Zp)tjF7WW;<Uuw6@q<A09qi(J zcvt(AI@iA+c=>WPwqaT)WAdQu2G!h6KkhM*ziGFTHg_9F^bz1~_n=Op+uRn<nG&cR ztA6}|et4nHl4Yw|Z?&uS&NJKW71^izz=wT*nt6Xf<nP=scif+6-tQmz6ZZ=p_n$QH zr$u&h-|x7;)x6&?vV;2xj{Ez}`+Xz7=e~S(l~ZIdW0kDBf1VNE)=#twIKwAL^B@?| z+B)Z=3R?UK-+!8oLHUPH-q8s?rZaL^7v!(5$YEY&YAYX6?JviLBkUj47=b9~k8ASD zJ5&~CZx=d*z0o|cELo<)<>L&F5t2P3=<-%$Q{0iy{WA9eWEWq{ROVB1)!o2f#4|4E z`6~i-bz6zogAMKkeg>Qqg^qM0mx2-L?Mm6P@0YIdCf4jNeldQ~vS<o1)#kB2$T2;U z)p~Y5zp{8AYuP|t(Rb0q<?F>+O8<MnLnA&AP53{szg(xippSs36RE^qfyN)_{f~i{ z4?PL~-(nxs*}-?w<y!FlI?i7G3C7z@UYid+{lVovktrU0sMtSx&OKlU=aX9dz&X=m z{39Y4(--w2eg=y6`$RaGlVkRQ_K))HN!n?lU#d%ag6iEVk*rR}M(L={^eO#G&nzGC zpUqxmc0oc!I$s8GujfZt;|t?$xWOU$Y3<{i|3lrG$46COjsMO}Aa@cLAql%C0hc7; z0unH4%OqGEHmQJht0jQd1l({zaVdeg1meOVilQx`wq^!VRHDY(SHQM!5VcqotF1NQ z(m-55whS)w`=00CJGpb2BrutL-p}uk`<c1-KKDH5oacGYbDr~@a~^?v5^EO%az{ux zFY!Lo7Nf|eF^2Icb_TIQ8nP-)*^pU(R{`T)`MwX|ioIOojvDyhNBb0mUl^W>Z-c<x z$$pIWT*}CLTi&nWlJlnaO)RF*{G7gXE`8`{tcA}Z_mW}13Rj4KiVNR3%|Fhj<Bmm^ zG|(O^kgM6u86rz`bV6&+s%Os9HEblB??wIl6?O98%lf_yc?XYP^zuy376QInABdi7 z>4-A_O+fBUK-V2#j$Ur+x&mKxzwzc=Cc2-@AEJ+a&hMvboG(@>Ia!%Q!D+n6RQNIR z7bjPJeU~qE3Vu~<n9C+`zjnSa^gMIhH>}+S{$hAC1lb~Wtf!7IxtIBDrKxAid?tQE z<IxQj&EPm5c|E}npLM(7@@bkcumOC=qxVT!k+tKI>sQ;#zQg)`zm!>NE2DMt-)AfH zEb>g|>G9RJGRzBWY-R38w-wno9$hR_S9vG@+qN>-pks<|I37JKQigfuD_fcKSig&G zTZJ4F-Aek~r^GhM`u+FhX&3p{+Pb}YAJ>c}X5GHtT(>*c<!k?cUYE~B-;;H@?8nKv zyy1%Gb$S04&Fk{|EA+=Yi|;sP8~A@T>vGG^Vfg*X{<E<T7r!6T1%?shQR3D{mde<E z&KH=8UxVj2Jtq^puw?Ae;^52yzj$#bbNLwTs(G^K%CCJ-`hD3iN4`z`J@FsFwC}ai z{@C7nvmR}GEq^)vLe@+F8XG$<!D@GDZ^=O>dr*>pOmdEmCEr*M*Hzd<$B}m|m+Kj9 zp)P!E%CTF1iCuFb{|oVxc?SDt9eY#L`Jaui%vEB$VUH?%z!T>^gY71VeJb&h5&s|Y z>k)q&@xu{c8<%{8&rSe4Y!*I20qn5ZckVb({Bd?-hZTRGT<ovngCl-G;(IfTJutC# zmhip69im|JO!mgG<u2q`U|V_3#Ft0l$=;X16IinUC9s-psws~R0oGt(-D}H_R>uGN zCM*k%1w+0c_?dRC*l_32@3a8}&y)Tn`@nsG(;IkVlitT(Z`j?ZG2>F~zP#VaoYP?L z|IVR}jXjd|AoI7;A9>f@AKSLgD%%pUWp8eJ1YQPb&6{>7-bswlN@T8Xmb=JS?i^dW zqpjDX<D3Y*^x$c6(&3Dm5L>VP?7UpoX3S}M!qb6!?$}z#KC*RQp!iG6Ie1UxOI{RW zDmdR%Y@EN(S2pU}29G|DJZ0aCeVe`6P#JcSy!lg`Bv)9@{LCi%IeN0rjJ4;`d@imQ zY(US^)&6KvD2)EB<HwTONt?~S@su0M%`k`h!zXFA4JS#Clk{K%|G)bkKEK4du|}Mo zO+2*vKBFikwzuBsCH0&kD>)>#bN?PZyk&{z*XK>GBwoKquX%bU_S<}ct>6Kxo-;Y+ zzw2>)$|#3!c)mW*TN$97z)|o90gwOCy_A?{1^X@jm*RJ_oqOSt=#-@?T4)2$ZyZP& z^*lZJ68AoGXqOK3)v}JARZYLG#-_Lv-A(k(Li&?)-DTP6o%oaz`>*QhhTn?~27c^Q zUL;qgRp-Nkn^|9g7=SqgwOV_5;)|y2L#FNRejIIP=sg-^Q0&oJnrls#rme~1>_YlO za2~LPKKXw;|L64+eX209H@T?E^6lL>5g4X_?-cRL1vdkptTD-(mW*BsTn)GyaA(?J zl4A(?`1k|&%EuVT#G%RAtpa0bvcaSI@CY1`*G1@YvWH>lQ}Uhk*%JAVHr>vB8UHgi z&Yk4{-sBbiF5un{YrXh~!z=rk5W78ea~88JID<Y>qW00#f)nv;E{o*DOg}ug3%Nk- zeO_R^__*j08u1n_@dO^j1L29qYvIueHl7T$@noRElYEQT6Ku4e!hgmw`~RB1KmW1u z4Truh+T&;V7vR^-*jTKOVGq{jD=2np`F1|6VhlU^IS{^Fbh6=_B5mu^*vsNqL$8(J z9Da54ndrK?T+)ZFyjdE#rL1yZ-iZ!xm6vbLXPd2O0z<#U^;p!scLO@LQLj~p=H0Yh z;L5s8e&wv*{b}qo+0N><`klzN?StSi@+1TJ>-pxl!#FqgNnaqv_voatH#~iPPIk&; zvIke{#+Iyf0sI`Kzp;K1dc-DR*{b)muCUI*mHKbu8++OHjJ@}Y;WcqFBD<>jR`LA; zgYTvEr5wH!-x)m#-*;j-^)BlRGRbSz@B#L5O)F3|Rml0n9{P84oX%mgQtI$(v$Ke? zk(@8cqbcMMpY0;XYCAE6#nj`L^$@?%R`5~}ZSDH&e_ng#a^nn9^5(BAM>b_@X)5N| z-AB%DO$*+AdU%=G@JiT!dYJD{>cyOw1&p5BEpxhQ1=TiLlf@pxBhWjoGrCzfUxCEt zum)T#a<i1OxzL?Ue`8&z=$4p7DP!kJrR`mr`1l;FrLk_FEj~UGzPP-3LPI*=mr{mY zP{T{RHS^6*->GfTcOLXf8&m>MX#6I_kZCt`rk{~}&Q4!;C@ivu{nO^Wo3=QnEn3!4 zCEHVoUlzGhj{RH9S}<wVD#i_cvYz!oHFe25<WPa0r4?w$6NlSdc%7NI8C;}I#CF>k zyz!5c7^W?nZ<zla=Ad%=i*Y~Hf9j9m%{$#S%%5_CFMywd-^&{9O>|xLeEU$9FGfA* zd{ueoJ>lHYF8co{)@0M@A0k7&Cv?Uxk<zH)BO&8J-pjgO{^tO17da!&gwLv8lfHkT z*W2kUv*^RV&>DRY+UVQ<DD)9SX3^KhLEk}i^n}vKJkb(;vaZd94;C(CkeAk2*zn)* z6d&`U{e7YR{a4{B*YRG-R^rDRJ_s-CHU!#!2&?$9&$v(FUjJV6Ys8Zuw~9HSmGP|f z9*wcg*e&gfU4i~WZnu6r@!3QE%v_0%AaV@bvzmLC;d?B7@@ez;W%HL<kfl23MoX@e zS@h{r;Mm7S8T^+v^kECKXr+HmvBrHRa?u(W_A#)5e$rO|A8YmxpTw)t|F^FMNBY0m zpIYl5skZ)cG5Be={&A=9+}8iEO?3Z2Hn-G2L~h#Ke*<IF;@SKI;VH7ubIH#cyJigG zm9+VNgPZ%n4DJ_lFYUgcct*P%x8|mm$YbWz#kn%x*B|T8E*b8ZzA$P-m)cJrp+4lI ziXE`YTiNTg%cK3oy;$<726!uxB`$R6?D|vvo!}>NxB2o+#+CFd@AhE@{{^=6Cm*(^ z())d9V5rKMc1K?=P;;57i>^m6%!WoaMoc-d1zkjai~U>L`AV~$SDNiC?_$Y0KYdH^ zkoiSuBj!0G2T~mEyq~pxLgm0wwDZqtXW>cZLGjsBdT!pa$lX_HZ)<#ixh=e`Cv9uB zeFOL1+GuOxm9%Yt;TLm)@JXYcG}<ZiPVI~)>X30F@vYX}AoFDDK&_w%-dSxc{CfcY z`Isk!pEsIqS&p4g@W#I`zmz<WZRsAR%%P%tIOouLjy^!_Ol0ia`<lH^j5GU$Mot2u za|bpd8T)PZi7vK2af$Q^+t|NL#+z;IU-y65CpI#Ugtx`}oBPBg$Rdl+JNJdByg>i> z<+;s$Vg`L;VP3O5KucYA7x%I9;aPq^Zu0FvG|)vZP1(cE2%ZBEO5lOWoztLy;S-|+ zvfis4sD&0X=U4VK)_;}1^#m%hi!o-_9mIaQlXd7Q@(`|<`H$xk*KiOYj0?lznj6m3 z0&`fGTII#oUW(qckaA_{vaA`@JA9EtH}FjGc#!Aw;eptB#V=Hz-68j^KkfDf>&!{# z84PJzF2BTE%6JkxLN}AHmC$wQyYRArNmsQ=SG7r3HFQ;jgXFfOzp31LCHz(r?*gqM z`4#<V=_JD!U1+KXS1G?8J^Xe48?w?^C(2n26CaIU=jzP6G8XwxjdvM~8H~kJ#*ogl zYGkT5+mKt&FJpc}CR=h~*Za|HDxEczeJw2irG8l(zp^siyA$m%y1nzdoY+^7UOtJr zAib^qc+2K2@x5N{o=FlvBx}clWrZs5iSSqGnW}jki)sJYCBF^7b?yw==X!UiCu1|Y z(iTbHa=kx(iNMLdk9=Fa)A4Q1DJwlAhkWaqJ$!q1pc`}nC&?o(>!V^~a6akj+f$d$ znufe<0rJ?%zRfOAPEhp8h6?sS6r9_)a-Pthx(svebF#7aQMx>9@P4i&$#L1UAx$eF z=6~J3WUWTz5HYxm4-O-@0PChjeX#j_<O?nBtF2S9n=1;55yu|HI%hQgI|C>yc{v`> zRO^e{MZBBJclhD-)E1s|YiXyksXFJs)AwiXV0~1HeJT^5%yRyBO+z;_VKuN`UJsoM zxzD}BH5q+tmgK^RN8<!OYnNJqQ4SBwz$cgU0*3Q!1-6^@GOwJMQ#F#flHQT?Ez<F^ z(es|eS4_!R#_u$DvS0dv?AJ{sucy%UAY<-5&fNMe&9E1V{%_=J%?OUHWzHq<h_sEw zq~@MbM9epF64YzqQw$yYqKZh4mIB73$V+`eh04()ISVUKJTJ6x;CaeMB>t$CCw2?1 zgx1RA&I>&_u$y{bd0aR0T~lU$1?wT5eEx>tG07-Ze!3vGrTnh!t6@va$L~16m=`-b z<wCb3vl}#Rx1MWjU)F5J&|lqE8?hHy!o$-4_(Iv>uH$1>)>-)Jniaq&O6%&Xg7@om z%~#7fK41EWSJP@I@NT8-OQDCzdPUaYF6edI#hgB@(oeCeHCD6!t>erC)fe$G*YPp; z;zyp+6(5#kwb?oNYVUNFFOgU;a_JGf+>0C>`Xb5QI(?_C_cDX_x_#(DpPVD|FD*EM zb}eU*O3qVc{jSz+J6OlbK7|hMQs${w!^_0?!#WE9KZC<^jHZCkVDK3QK4ZvPx(j^B z3%`7qCzp0kA^wCh${5`ES$N7|VD0<yle(q&(K04?7cDMSa!uBc|I0c|bo%!=J6wFc zrG9yStJ(kLo&E*${J^4+^h0FAI+2ehpBw91@mC2FA951kR=a(HRq6Ot{$|qFfyjX4 zPs4`vZMf!lyla3bia+(?DM}Yo{h2(ceA|cj(Rhh(lAgB`+!{Q+m7h|9ltDHXrj}3Y zb{gv;kNe%0;*TsDhmml%$T!L7B)U_M=2LRcl5v9Pncz9q<56d5R3JwU`eiI-uy>x6 z$Cy}J&pz`f#EA`LyiO&4SmYY}@3ykNf^#Ux*cd7AE9h%eIhRAmSS54HPVlhDk)<=< zoqf9)TZ+}UccL>%->#3*x9{T}{Y&jXMQpxmeRX(Dg%wXBxGY5G2`&f0MP#JJOf}%s zBr$Evc`jq}{_XTp^e&zFb^89gO7wuI#8(Cxo$N~5OfKhjuXpzCVZNWDXhQB+%b6g^ z{S%P;I(`8=DlB;<d85;nUxM-p@MyJ5zCrH<26?7~_Hr-399~v@<)p2HXbXe?5<8k6 z{O}dSm)*iu_;v$6d+X^7Vmnh~%v@_|P2nk>%`tW({e~REH7`@Hr7^Y{8Tl;lMHl%m zw!QuMVEl$S)c<0$Gjb_wd2jK))tnKdlOy@Qg&!hgiFZ>uBc!iMn;T~(xgs%YLc<8& z$sWNl;ddtd&hmvM2CjiT1OjK5^p}SULt~lu>aLiywdoD5V94CkCj5j7QXi-w=TB-C z&p5vWn;-KbIqLFcF9ZFdXTw%>e&Xjwdb(`>eyis1MVnYLbjM>q-SMgBPexY|x@FGz zT>1KC2ESx{d_RMD<5aCS>;cB-s;z1SR?4h9Rh}XF)($Hc96O8)fk7UgbrSn0F+lRY zRqv;af600Im)&ZeIwJaAzhJ+w!pnpwrL?aV_g+Fjzi)JS*?wTYirsBgvTsigaU)h= z%t<!-VitW7TVf!@+^pkQG7}#ReN11;R~{-(GvZUq@Wq%XwpnDo=&(+IW1TkAL-UsC zHb0EqQO$lB|75PLEW++ZPQ6^o+d(eKAbUIw>@5`|FPOVS2f$rqsV4D&Nya#GyR^;! zIVHSI?6(cHv&?-`PHgJvVc6x3b}@2R^a@_b7!cms`LLb14)U_)g-0j1nhQ_0os3EN z-kCPH+HMg2fE=y<o%9c@zR&0f_${dX;X9us=PTp&zMXQOkQS77$U=^PqQ;BMe~26$ zFOb(<_@B$#Me@90=Uw_F@5fh&zl2x$w~W7s90JIZaTUawz;`Roq+H@m?h&29h%=Eh z=Y(G}cZiN7{F1RduF9ukN;aT7jT>#oluR~aN=|0Is{Eh1kBTXwo>(y@D;+T<l>+}k z13vbSDdQ^4coGY~>6h|bDfgIBZaZ>++#6<0iCIo!N)AYwCv9asW=x4$M*N?@k}|7p zWk#DZC1x3kDUq|8Ia|tzqmjBQ%$O3hjKq|PFXy;F*~+|O#*~<4B&I~>ad6$8{YYWR z@P`mzh<?g1r3o9~7~U(tlxzFqyA(U7<P!GXis_fN%!!hpTXJ$sE}Tml7qZ^m&ixMN zR`DAa-*_XJkQVI6+Q+ABBQia%Jvx2PgHA89PV5fSwo?8B)}pe;!1lkc2|W9-_9#XE zuc8l#{O`*=Fv`=ramRv3m*2qtk&N#e-pM*6hjVhI-yJ{~tVAAIV}2xKK72p+PtG+_ z<0`+1yuiKlWLz<~=BsDV7l!`Xvwu}ca-E@%=1eSnC3o&^uZYfGjvPRT?2${q_c8}G zkRv(&!<?qNlxE#>Q;Jr5FMXx1o3Vc^K5}(DJD$ETyt3pJ^Y)YhjPDo0+5UVx{aEO8 z`iUGO=HvToFOI~0KvRS+Sx;r&r_HWI&KZ2n2)^(-^QEo667<8L7loIJz9{Dj<-ixa z?z)ii+l5$2(OpGv%;jvQ!N|%k#7(l!nq8dDxmKJFbnj)_`Esrm-}YR1!is*qUEWmT z=|b|uoovag+QN{`Pu@<(90GqsW7iahGP6AIPa7<;$fLH&cuu;*mF6BT^2NvnLH?V( zzVIoBb&{OBVBxBxOC$nU^xah9(@t!w{lT?2a!<8+jmiaG8)m)r1pK%9M#$<L$Xu&$ zG%eB2e{<3qtYK)knxfj=xpQB+_}vt8U~$$DcuN0xvuB?w=_6w<D-7KX9qH&ZlK&!j z_pi6s^)%*|z}?2&B7Q+Kx8xwJ0&^th#9M_;Mb;}J7k+)R7gg^eKA%f+&3yyBw-y-k zvk=|!t-Fmlz0C{QqXD+a&o#gi`MLS-X8Bo;tYV(QK9U~%7CqG7H_E>ZPnio`_ib~V zM0XZ>|5L`4#JG8AqdMquZAek=#Q4nnj6QuVdXTi!UnsMAktu87-DYB5hR~PZnPkY| ztMPFb8T=Oab{Q<VI%V)`@OH}J^`@-aOdN{L#Ufuf6NfV1R)@s(Z4x||+sY6#a+R%& z$Y9Y4H$P!3Lp;d6wlX4vWlq@~w3Q*&;~85Sk-?%9ZYGYS86QIiTQrLdmj1o@Ra+Ti zG`_UK5gGhAWuj$p^ZH!;10N<g-+JOSmQYS~MWw@_FUlH6^j+}@b)hTD^PJm_H9TW2 zvVPB@J#v!HV{Pj;ZCma}v27JUWt<Bkzj7`_QKc*JUJYmD5bv9l)J>Ha+tw)lzr%B9 z9zJ`0;)^b8C97W1Z^f4JB>GK*tS4Cai%q1SzAC?2th-Cib1CFk>c8O~S6~SJyp(r# zd&990McW(ngHC$``IT(;hKFqShDg28tLlXpvFa7uQz`a_%FaS}T4Oe6G_Byj?tBhE z<hiVYE?kP=3*$)iFUy}n#*F0ETL8br7h^BEB`yDy$;^|bvcIBfTRw$XI~dF8acZq; zU-L;@qF0A*Ux!^s%A0nHRrGnOXD|NXmLJM{<X#MWWdGj9{=N8wc*v`v`K@y^q^|v{ zF8sZ1b%_l@t=G-|lSZ8~@7{yoiQr|;!=FNr;BqmzNPg-vY%G<gjinO#w9vG%<igjM zY%FifJZPtbvUXc8_!}{d@bZ}7tK671;v;0N$3z#f+dR^PSH5JNiDB_&C3D&m_#ieJ z$tm6-eJo4cBmG`aLeKA19MrvBJ-D*D3c2QUVc*^<XPIHQS)_7s>*I=D`nXZp{pvYi zP;94qQ?zX-*8G@C9c|5z_@|@_JdL?Ve(nBG8`#Uu){f^ai}Ie*E~4*jI=~(U{&BK) zwcB}1HoE-tMv&|4W3dsjUch&%WWfVxmn^uS>pre~xvIDpaV_Mk<hti<JB<1R*m$`1 z%hmnZCcD2-eq~Xu=xl){*shMrsLK6CZk1k0u5#plIyR$9x8WzZ8s19Vzs{V`IcnpM z_ZFv?vR`QEZp4FlGV*emUn+Yr|F0QM?xT#Vl%Zn_2G70el`+hXGjlifQ#Mp-qoFC} zQqa-`Rvmq%8oRfY_fmcoJ|}~CCS`J&KUfElx=%}&ardbwi8&}O$XO0cE5T(Ub<rnl zOBXR8U-0u<;^BKtWj=05CLjAEcda!>t#guFU9WqR-=!V=a=vk?n{{^v<B)sNZDr3y z<$A;>SKuoQU5;Lt?n)Z>*td6VEgi+W_m10_aBkq{Ov<kts12WsT@(4fMRe?UnE$^= zuR4fcS&hyYA~vdD+3ig@Jk^LUGUF6q?KM3`r_2mKK1h4F%rl^BRJQhf!`)g_7BNls zZw@dX$K69*Y?=`lJNBOGONwtEzg2wD3*oC3f0V`iP)_`j&fFn603-&fWTWPv%QfQ^ zjrz3E81lVZI7<D3$4<(f%o^==;E7+-$*j>Pd>39fq=MKa>Xi6ci5D1hj}aRy@dEYi z%ZZ=+5Y}S?SKtdiF8an0)@3KtpHH1^#0xyz6jt#9Cv(4cesjElz+TLHZwUNpDOSMZ zQwj5Mjnv2bOzMG#DMMJFO|aGTH8`=}?mdLHS)>eWv#V`oBwk>ZlwoZaDdREX1tR#O z7d#?mSer%4j5gv0tTGZW@PL$IZ5An0VZ;kqWh7oe;*N%}Hj9*b!-yBK%1FGx9h5QF zWMbp$OMf@kWlPANYqgcdhb-bjZ=n2K=CJhc+VFYZwAw@Txl2x$wT&llKz8}0+t`cK zJ^18!Dkm{dE4yBCcfG1Nvj0ZfN%Zp4sRPi<y;UW&-KYnZ3>q%ayx-k3NuKBKXHJrP zZwY7Um4q0R*p_-Vdc9fV-@+b`!1h-4qWvs*UYAkM>&gnrS}mJ$3dS);S<#^d4r5!L zSAj2AZ7KaP#2Da=#JukqD`$_+5#K#@V2LsA9enCf;bjG^OICRLHr_S}pG^AubZC?D zF-H1%*S=LA{DX6t--JdT8Z%wJ8#U%{DYKK<rCfZFIjpP)_-^*#L+qZF+hm71pLwN( zv2fXv@Uq*kC=3V>dokzjWDcw|=RoAux=rRBSO#5|Ew&8&8E>@<-QID)9}MobpV2qd zIZLjqNyi0`vqy(-!*{fsG1v8W%yqqj&oJ-a_$_|J0}FF2nGc9%O23hHrnIRJKBbbA zfc9kl#W_N~gZ}{6^|Ci(y))bGPy3i3t!FXX&S$n=3T2|&kGu%>_KUzMEI|kA9n6(> zE6fU|Q?Kwf(7C);;=+pHt>$uXc6Ie`zAu+|%p2=E)5l!tvnIfYbt0$Tv=egGS_@0R zczv{-Rp0FY<~(=$k)O742HzW<o8>n9Ud?jbrDS#vGJ8fZt#*LRGj7@5JGM6b)R5O_ zBCn?+uW!%dygO4?i=6MaQuFUaUX!0z$?GQMb>FfZn_fa*_oFSYVZ4c~UOJHdPItPJ z)us1nP371YgdbLUk=28d)m~Fp=OL?eOj+$}OI9z?{I_sj!#wf{K7$s1Qg<VLr)x#C zjP8nzz7HAQ1sR=clhIvlGCGTOlAOiXMPxKEl?(!hE~b3$Zp!CVh!vH1uC5OGEO6VB z&w^V$vanX_vB~BxrfjzA=msv&Ng11L?qbSjtIXHPX0Zcwd9qnvb}?nMRpuRJv+Nyp zvB~Bxrfjy#JPSTcrHoBBcQIwNRpx$VbCr~_$>uJmY_`f=gKS<vnP%B6XV&*sHnYKw z_F0cyzJ;={b=8LNqhBp$KcyHs74i%ady|sEmF%sE43;%d6Z@iajz+z|dCgPLv(0<i z-#YQU(9XynYp=!zbIsGh9_s;f&C|dh>)d`Cva>I8vv2Tw`mOj;+?K5EL7(u;x^F)9 z=ZK6%HcH)=Y?QU0$k!{exrzQQcpL{F*pz{jNgjZ{n`Ioxc>_|ej&gmF!J|BVA~JXy zu%+MUDjD3TN=F9EnQ9A(*T{v&QOMvd>IqP07d#k?oOnDLTbe6nv&3-}&dO`D!xR~O z#4@*^DRVv1GWT(t%x%ISq%D~{@kL~e$lPAO+LmP6ML8|VH0U~7nKt7m@O$F_uy!-z zAtWE7*w^Jukq2wk-hOR^>9<@dep=Kq^F&{6?j61db>yo?Kgwa8j&h~UDn-U#O&g28 zFYz>L4a2%Z_L)nOxhweI{!X6R@11LWeNM%s$(mke_z9&c+peth2l`y0kyanQree}c z`shIVX!W)Up};>Tn0^4g8mqT;Rek`~?@S22^pCFUdG$M8Lre2qp{1v|Le(!!P&rrh zlM6$Y+y}T{$h-7h<6Zj6u25xOp~{=xfX-k2Lf25`0<V8zk>+2tF~wgwjQ1m4p@nC< zLW_Rt3I$)95b|DKF)83CJ_wmh9+}W0-VcUHRg^7uiLQ#x4qdf!7&c<=4Lhshw<Bx! zX^RhE7g_cV<r~4eCf2%N))i|RH%=aoV2vPrly7Bi^`08z>B0XlHpe*plboRc_?C4o z{M!`aUj{ir4gS@^zxPf4)xp0y_#}1;A9}U?mh)Q*pUO^wCU{rMZyEPF@b3orc>}!N z1aC^=o%m?2MBdA9J-=n}VdjX!&=`18#_vq-$H1Rb_*Dw;#E)KJ*TH+KYlHYG@ZSz^ z72`A)zHWM{tMUPx#{DP!7dt{%=$u#N!Vc2RW9gUnK2PqX-IddCQ2tSRl2#z{PVP(5 zWu$#r^DUKrDr;V=U$?q`tLXf$mFvn2Z1xV;%DP(CZMoaW*UCOZ4zg#IYk*pF>OY_d z+;r;_@l~oDsP+lhI(?PW*=tyVPtpSP7m=x>I%UnqRx%|W|D*%J--c}8fbYZx_8Y{< z%9tO-ej$6OnZbP48xz^1%Q_A}BygEbxtpHQ{EuS~uI@QN*^(c_9=sg;CwWIzP9mND zTg={v*0o<1{_K%8Tt4;Zesz1)S+cG9d8D{fs>lzzNZMR{66EZ0JI?c&KXqX0+JYrg zW?#j^<x%gMJNa&{;SZ7-JO>{jVxEG3lX+q^>te1~$bJ4c_rV1|@<4!prZy#bw@>mW ztqUyBLIG$T3_U(*S&dB^U#n0a{Y=(yqC?4ixSx4N=A}C3rM}D;d--+_JgPJKGZVj( zUCFFXfIlBOC;VwZCl~(Erw#u6aNX84Xc4>)aB&F1(%s<oriqv9IBm+CCSIGsYm*Hx zv6WkRVOOdVKLwq3&z<4&=dph~mi^GYJ5uXbvwthLm-$J_-Ajs+{qADPL6Mm^Z-*9| zH#Vtu-avP#r1SirO#4Fn;p&sZ%O38bZK?a^&udFI_E2$Sx?elrP5WKJyIs&O^rxOx zK4}Pj1Utr-ChTrkb6(FgLwgjQjQwQI+~6x?Wv%7c_UPF3ySmb=uAVu#X%@Db4a}!+ z0(<I1+UyE=GIK`{e^nB&`0n1ht6w4CgkSI4rK$_xjPK-XdW*92TwNMH#4rk;U9hj# zvkzA)F)sMEy~+NO_>Ju&=c4KZzh~@=-3Pnar`U4wm02skRt2U%Tpsp+Wy^`-4>uiK zZYA@T$PJDEV`vM3#e9-y#S!K3J+WAm@u!;AfZc7ITOTLBX!Hlx3%zD#iEINO9e+3% z&qiszW|bhfs?QryB|Z{2vd1qn0-v_LFYyy&ek|CTEOlzL58#WciytNLWe@IA_8hJM zGA_^I-tv(Xza{2_n(cgRz)M%UlRJ!U)O>aZ<;ioH*ML3KTDQzsIrcO1+B(D9>p1(l zwec!`S?P>dkvHy-Idh3K2h+i0ALF!n92)*t)1X=8iNYUUS^U8a8Z-0K7^lJyd0)N9 z@Xab_OesHC^2i@B=@Q;dvhhZ8kXtkhpAO-JEcOU{-RJ>%e3w^9{mh5Tp8R{t1lR|X z@wuJ$4zLfDN1uLXQeIWnUF2@8C%+rIN|5^r^L-(bXZr6YM??wZ_hjV3CdTs`#^XZ% zuOvT&kNgla9#?lFX7QiwpTmbuCVcVJ+hoFj%Y<LG!+`&0EAZcL3I2b=HA&zfJ`+FT zB?b+$HVd%NG}cB#Ds8~oCA~M<%4{;rSa>wj4tppQU@uB|q2O#xBL39I3)T@E1&*!E zCbNtMXEn0%6M^%D4G#NNi)?VJhyz<IaBO8ZnPn_Ev#9HBffKaBVNWY)gENkNIKLM- zwlbT{G8UY{)b%HU^MVb|f=>Q7ZE#Xq^V}<NY-Ki?Wh^+2_=p4q&Z{;!8$0=T+2Cx( z2k};cW0uLLEjF2DEI6xohijHmhBfipW*-KT1sSZ#)V!PbH~a>De+$=K8MB<>dnWsA zuTf5WCtOoOxwd>BEWbg~E%1|Dn}UC|=oUHX76%_L#P2~1deJX(Hk`$ta7tqxayjSE zXGe65(qm)m82fIG)-l8{vo3~?v8n|fV<dYcGS+<=+B!?WcueUR{gr;P9R1=^rC;=O z=oftt(=U!jUbUrPtZGBQSmn?!hT#7$`o;e$StfRO%}<PnO~2^ZScuPew0_aw)Gzuf z{i0tC{ld~M1~6~Dh#VCCs~SHKu|bY<^=sU2>J-K7!)w?OHGXwj%c4{C5}g7(1eZI& z#r|ChbFiWJ^lO}-c4T<@O#PyPHec9^eo@X^$I>qz+IW_qHC$)~`xz_HsmmWO|H=H$ z+7DAs2rpZP-q66@Ue3BmbP-F(n7}(r$EcdF`R^T;qU`XZXWUnocg88`3GYPd7u~A5 zxiYG*L0530BkZ#27K?VIw4hsLDBa>qn{Hw06N?zPH*NEUZi1J_yd-<38NuIRKhSNy zDH}|`m9~6d-bcqK$I4;z;EIQ3Zq`)JyKLl$)HRuYB=es<&-vb%|K^}W*5NOk>B?v< zXP?)=saJ3b_*nl1zn^gb9{ulV#({m_p1vbIB`q=^(zjyFhvOPsnh*Qh=EMF`^P%)> znGaWTrt?@Ee3=iO^KI%^@x%8ugljGW{#gaqeAv&J$70NfIf93p4`Y{+`S52_M$L6* znHck7uQtlae0aK)QFEOM$9?#GDD!x0Wn@0gmNIItGvO2+J|BL|T+mt>nGd@Q95vTj zv>!en)(f0g%E)~9gOpKooyDKS=fihwWn?}KIOa^54;v}Ne7Lx!`Or-+|MgppdAzl9 zP2?FDo$CYOxwK@z3;7o-rfi^3^UJ=q<leL5%K}Z|Wz!ef&+swwRX-`uH){UTYJ3{L ze)8S#<T-gXW;mWNljlV)e}&_@U%_|zpL9I0Qt(~=HyqFJQt;jWPaV(aDfn)`$0hQj z8K1cdzS}>@@%#n_-|Zjmcz(5l@A1!YJik)G_xLLu&!;K)9{-b$=a(w@9{(GT=NBmW zN&ZhA&nGJQNq!IU+jjiNDELYKL5}C675pUsXvg!PDEOWHGaS!HD)^oJ6^`da75q;A zCmqiREBKxKZ#bTxpx}4*f9iOiso;0^dpyzj_f_ya`v*Cmrz!ZI{i7YvyDIp}{uz$v z$qIh5zryj{rQj#~pL9I`4&S2b8<YKSIG*p9=Wd;T9*yTW^1Mj*dsuJV`M*=1FVOvi z9M3;h@OA%a$McUBe6N3o<M{_X$9Aw*YzLXF@A0Raauf6WvPe16b7kzM1<wp&Kct-G zEv-Rkz>l%<Q}mJ_Y-d?pag*#f7F1$elW{$eG2v!xi%qUoo7(6u?^=FE_(8DVGvfb5 z?>!V*JEjNMAL3kg<PUoB;@jAN7Jc}A&OF8!WUY>_v5>tX-Bn&YysN%h@>Plswp{6` z{gfW|C_3sRN=M~<9&}%^@!+e2KGvsdCFPYqh9AQqa3+sNtKZ9<E8mwY8(N<#jbH2g z5lWxyj6T;{a$77FpLIir>)SYOkW+{2XX=)Hlx|5(q_SPDWL~puSE5^%ve(=n-7?L@ ztGcH_(<sgwDpdZf?2nkZlvuc+TNc=G`GS1&5>H~`;u?5lxZo8Q6PFib;IiGsrI^@1 z;j6~4rRV<ChRYk^Vy8hnWa+uklXYZx)w8E2b~Mp*%bNAv&6XYQLHJ<k;my2~eDcz6 zIbH)l>{ALq_BlqLW5e$;2Y%QRO+6g^u=7Q)4`R2WuPyvy@Na|3Km4Z^|LV=Qq;1LV zFLs&q;AHmh#qZ6s6Rk(TZ5Sasrm=>&%7J@1aY<I2Pa|ITNN_Jfk9m=?;pATyx}XJ1 za`XtSU7fWWqiv1)$2;q{@orkbBdj0Zy@=h_S^su)6KTI<#;fXI=C~DKX31q&&zO^O zH`sxLtfP$BLgW1)=X>@QEV>Gj4M)NwVk%9a=d>V6L8IDa{@(DE@aQhB#>N=-tiFvK zh(&hl*W={8f|&NKJ~sWmUzGkXvP1Oua@tPvhFb8q_dFc_;E&^nzkXY|=0njJmA$NQ zqYM8rC;nCcX$SoGi9@l$&n92H6aKgs?R{<YC+mZ7jncnO{0qrfa~OQn-gme?v2FO} z=(cVf|EF>8kQ4sk&*SG$`A6ZJzBc&tiN`z){uk|l|8H{*qt;!08&@GSowmxsjp3R? z!J(BhtPyJ_N;|evrV(3QSsP_Gv-Y^VjWVlQ=lr3KGFAJ+HJjTgGmDtb9a2WECr#QN z>&bDSn(f<48QQnzm^R>~a;{EZ8)X_<EC0NWGMjgWYp!Uc%xcbT5Zh^MbXCz7Pf8iJ z1~zGTtbu3!i?fMrWwNDjJIajv7+=jFn&HGMGx!tMlWmkq-Ol{pMqQ2ADo$#n%;wL+ zH51w>v-*p0&E--?t=s!5f6stp-CngdTyv+CQR{89%!24O`K*70Yi^S=YQ1fi*%-av z9`|mz=6cF(Ag-dNwe+dPDfu>qYf5c6VUJ^+N81t_fi*|?XyP5K%x3Uj+(wzz$lNt; zl&L~)zSTyVSsx->+bA=RKIFCWQQ_?1<KWHV8tj-6UdJkv`gXYHk~YdTVt2o}jWV0D zSv=fEnboxGYi*RNdJq3MDPy+1!r9UGv;GpUNwJk#C2i^`Gmbrl<85U(bk4*6>< zvpq(c)HlO5CAKmyk>ifK8sYQZwldi<%4~+uzoSfR{gwUUrP{;L|9$J_$iuyKJ>|Y^ zqa3kYOY0wH{I^l>Z=~Meg=<c1quixZ@5(S|AGVI)`IIYvDqMq~NpD%Vwsh8&?1gBO z`)d7gm18xf((t)4o}KRBPuwzRO|I=zG2HJR<O^WvT6=K%a6ju!{AAJ_^<?xk{3ROP zY7OddApZ~R@x^s7aTX$b#WViL*r#y+_`oFb`4AgG!+LzoUh=3j40Pb>{?q+^3w=!) z>|JX^UH&omgqO|Lw3^osjGEnm4lCbW&f2{k`?%PtEqp(r9CZ)3aD5Y81;^HKV?Szb z&Q0frbae6*_B8Tt>Kw{I-`oVg;^!>+7fSK@5}zK4Es-<-tojB~pFHcrGl3&C=6ycC zHe+sHQ?bNs@LkU5U23iJy(usLI99&W0~Z!?#zA4gI$O&bTOxa6*0-{E{Ir9<CD5n% zHGg;{=j2rU23VVuOO^G6@NydM1YNn}cRWE0U{hGE*NhHjTzq3w4f{C)Uwpfzy))<L zHhq1!VbiyKkBnUJnR()?_1Ev@><_~y$-+yXZ@-)P7@I$y6YseWyf1GD-m2b}r~AFK z$24I$d;a+6(_Z?WNlmYVJ3gc948JF1O)0TZ;{Wup!oAtY&&lfs_9XRb=Z1#Uo`S#R zZ^|st_ADA$w_MNl9g=$S+1j1+{G#O#!9#8R>Hh2?1N=pl7kg6m3!YFlbg`}-S^dIj z@+6Tzfmq>9%!N+BDEecMrNF5CWpeF`KjXKB4NCUzjPZ~bbnpM|)<KL3X)A4wCsfZ` z@NM9%Ar`n1zZcF9%S!>D@zlKrf0p~P5l-FSrS`eIieC9<Zsz3h1={St6NlWrbnq)0 z_M`5lvtF6igS^d!?mfA#?y2JcQbnC(_+}n9vO%mpmH!LyuBPrTr6;TJ?x&3S!gMXY z`jvUEZjHIFuBqY!BQXZ@t@v~azr+?M&ls2V?dE)5B4?~TJJr<T+!vBP`60wkOP-xn ze8$IuGrW0s2(i<nh@CDY7m(!sF=D6RjKoeeE>pz^T5Ke1<X83c8w~p#|FdbE8+ICU zF4nvGt-QP4d`DYv$<})L2f|xv-=RF0T*ggP$>VXM8(&f425%$(FR^%~uW>dj=K+cT zshnHYXO+u;7dd|KOwvYX;DfTCGg<P<H*NTYO70t1a2<WMMEUCV4sxFOvi-=-oss<2 z_$nV~%2DhgBEK3watAUzzR(58(4O=e=`Zf2m8J~s6<kK#x<)@y@-V!ju0Gip`tGjq zvV6)*J3U1#gnVCv92MVIk!OM%u|Vs}i<12F-AR6TxAIBX9LxS6w#CYxdX<Jh$xQ5u zW!M*2c1x)$FXG(L3pD6H2l_|D2Sa{YIhUo*|4^sEKS;ae(7yWZT0p<u#pUMma3yhd z;_A$m%%yXAxl*`Nxw>$5<?6=Oo$DB`9$Y=S(zw#OGPrti^$tiMYbkEDp8Q=ByNwJ` zv7D<%;uD_it1W}4yTCOwNgJy5SaAt{z>TYrm(q9bv7%#!F)sn%eo1_rhMh*9#oF7! z?^W4(r5$=_l~0mf&4akCTp+~0=KY@bUP(S9?9O{?o^bhP&-j2EKfM?~qv354ye$k^ zwrA^Hd804iRDi4+zEbkpUNp#m!<)t&B<IHqPR9Jw{9m5sXzTysC0BPI`Mam$tC-V? z9BP_YYn7K@@`BE;C*O_y50Cbc|4C<V@`6VS_@vV2UFho`|HaUs!ydGp;V5S-?WfPn znWzf4`8O3vKBy-8x14`h?9xU`%*z@0qSn*@O4y5$Zvz#^8J!w3A&-980Dp+h_J2A^ z^M8h|PrliXFFkWTXDhk<(7Uu;{JCx4ik<t1+s1?2IB*;bu4BlzQ5*nw6&t&QzU~8V zad)@!sn7W_OwKm^+<qXZcMojJ%zfLbPwM&<UoImrk>pfLW6mA5&0g1f{CEF0l-wJ@ zvp?5(eu#b^yC0Zycl`)|En}kG(o5>|{ZIYZoVQnvXnEeAKO$=0&OokJFStqkdq<FW zHg7q6BW7&v`j^g?II^0quURr-@g$GhGfE3CLDyRUQs+=TxsUJ{UR(L<xtxE*oKL*E zk8?>yZwV6@rew)L@`;$=-N|?5uabYcyS7f?OZoNGvCk^|Rb`~?T*{Icd!1h5VoqK# zNzWm70)4Aaz9ToCk^#qP1#+Jb--(H?*%1TImB3kvU!uUtb-+<NX%Bojh|{QJ9@IA# zhRXAp_n9-?Gb<+PU5#8o3&}~j;<rhG$`jpze-B{(lrs2=yDvBLd1nUjh38r38uRyg z^S8$Mm3X~(tndDD*Ch8b70O3dp4ahw;{Dh{g)XmFTi%)Y6y(ZCa9!c<JxhFRMK@fS zqV@2~ndj3qKkXvtXVnrjhYyOkQF0whS;hh}@xIyFeB-5DE;`I6;@Tvp*E`?1AH%)m zF|xyxatra}SQ3MOy@h}DY2lx=0r@Xf8%TY~8lygZH>5tXp-Mgf<X6sbFyd3vg5;|g z-IpA~nZzQOPpV#!U!alaP@@l*-YIfj&UH9kKDBk$U<i32ZMZ?@HR+2_zacliLspC( zC};kVf0P_;hunP|eb`+}SvwTd7ha;RiK7W_E9&q6j(C0YQHK7>Uf4xq8~2a{l=DFL zvKDwRg<O>Ed&qg(66*}({I9#y8~Z$|`8T42^+(p%d-?~T>m%n);46T<mAp#Zc_z7g z9!h5}{xO{YU8*m1DdXdo+s<xkAWzLcWWip({Y1{PnR|BAe)6{-KrXqEJ)HTHUZnPx z1rGIY0Ctl0ypR2}I&hp04dlpDc|%0*h#oC5Powa$)m<ylWjJ>OIkTQMjOYbY?;Pr_ zz4h#-O+Cd&o4ow!4Rz!>4MC5^yC1U+ej!ho$NL6vg{F}ntx9-CToCdsGY>v)Ug6Oj zpCn%kV=-OIU1ydP9=pgnBeG2NHe<dxJip!d*A@k=9Cy|5rqYDXS|}v3PR^JO<o{wF z`ETh!l0!l0%29Jbui(o+B8!k+mVPL6fy9o;d?S8`vL9=`Gh$XHuM$4@lqsdY@4B;R zdZi0F??&E7pS{prx11x~i8(2Wx;@N6##|)rfZSj%>Kl9rJW9;DXe9KHWUq?xvHK8p zX}<pc{`65Dc_(eAyR{m7dwFiVl^lA~-hIGLa%qj)wvxVh3A_or`!zEDrdZz=GvDNL z4nm3!EWRVIzHtowCM&!DV^;Z9=;7jnpL~ojbS1xb*v>gAR*dI&$>%oJYnj^wf5Bbg z{1^EkZQD|L;kP}Pf{br;U2<LL`?LFW_RHS3=({2pZu~;cTO&mt2#+)8=c`yKkptG5 zGA^EH^X;-P@Od%+2W<bV`QJ44HQFpX-$r<}7TFtD@*O!^T`{2PZsvN)HM<`<yAxeN z)?(Gf@9Ox5>Oa9R-<B&NvyYk&w#a;-<-ljhd{D-GFpv45l=<MI{^+$a<^y>q^MU$h zK2X2R2l6X&f2{SLV?H>C=Ym%;GKe+a7Rg5<``RhsIT&B`S?p<l2@RsVrxK&Lh<QN# zT4ntdWK4^!QhDkqBl1e;{p<K2Y@`o~uE?J4QrB%aHI0(9xg`%Fb140VJ|ez=a#mx8 z@*ldNa^EmM1YhR(#Y>;aS30jZKup<CU#Of|0*O!7HR6+w<Nw<?Zbfb?`n=iPpJ2Y% zjc4r1hx9Jy|6KkbWBzCE(bLWU$ga$LlbSNiGn@49e;SxZKdGc1iSN+nd)8?4-D}G4 zE?l$Ve%~7Hu1;&*_a&{#tkG&Kkx`7RNbI`W>#g`zQzm0$J9E_Q<bScxFHKYbqU6Js zry9BXo#Wm*b7dR+mwwVfp2clX7_k>}hI#sY!?#!LM3s_DpB&~&R@9tJyyb#P^O%!; z@OvJ4H<3+KWUijc{5=XA%1+Os<-`hB>CBfRCw1ma4cx0)J6O3vH!z>u$C;JuA6Q$o z>cY@=^h@?Rlz%b0VvW{!!KCTveymGE8aP!m#-#6~dl%?g(Pxi5(+_#+Hgeb=Drary z*7EIeyJ2e(nkqw*_gwt2GV?B>9qQ-{{o$8dA8`If4(r16($xJHO;7e8&;3~L+d8AQ z{vJ26W1hUvM#)*D?%+!D+#o~MSmXTDVCDm^S46Ig?j_?+av6Td^*xu!eQA%J?(U{B z54fAkx_g>dJm6{iq+3$cy!(@y)OcsSitZ+O&xY1Y`tQ-|YBS95dPj}T-c^^G&+G3g z3JA;{$ZIcSAcb6#G6zd;%+Edj(e*j+8Qp*9V04Q?T4319@G>=)$q(8vW5JR|;53f- zrCAqT7#ebyuW15fDwCWKRr#rpjYF5T-j821Av9~ug`p}}YSlPoN#yzSJfC1bN47+s z*YN!O3qyLg`+4;Z@@5rliH1?W+Dhi8m#Oy=uD<j;^2pZ)xUhqyR-^w`GKK@e(d1ay ztE#aPR1W0+Kj#F<!&6l~P~~06&dUE2H3g$V#vE%QdDg((-M~2YqSL5*;{6*^xJU1) zAL#bG2D$y}c?!?bVI)_dx+mtn0iDE~X5N$QqJeSXWj&_e_u{@E_x;U#&IfA%Pj9Aq z51v<ij*X+p?e8_(jgBIDIC}*z;aR!XG{U-{%)LI5@f94S=xaDn3+a>PUh;l||0M2L zOO9%NRuSX1A;@`i&yqv%IdTRL^nkxd@K0@cR;$%7<-1E*=TA3rxP)(KpjQf<>E!NG z_;|p_6TxSM1D{g`pKGQ52?jpb0OJPkcbWKH&wZl}pIzYdHTZlZ_#}Z(l7Y|H;By<_ z-3mSnOnh$T+X@q(1zPAhg-;UrBt`J)>A)vd@OeP+Ip4tNeqcPveO{r#pC#N6E^Ovc zo~yRul)_r?sfFOv34A&k_?+UZ)gR-#<>2##iO+JrebU6|3D(`8NqcnypH2~czUG;| zz5dI+{+!@*fq~Dnz<81SnI=9ja6ik2&rI;S27Im+d^&?qX9J&Wz~@hV_bT{oH1T<r zZ{INS*@#?M_;d!J&Jldxbl~%b;PZFE=VAk&cYv`~?oE9D!To9*K0)w#7JQx)eDK#D zlx+4N@c9?teFQ!`Ong4#+fPk=c4(m&6h6t|Bl+Wv{`^M=J}U&DZ>0Xq41B%@#$N7s znfQFmeWMMZUEuRI_<SSy=-{K9{Re!$<GVxP<CYj~`tTvX^%NTY+3gD5ukg{qM~~pM zz=6+gf=~Ct(9Uk;ne1u8>E;UQX*|m-GWt+Y?gtmy`jDGGR74*t0w*sxdCflLuGRbV zT|ZzAG-37Q+d(Ey16`pqg_9SYyb+u(bKvw#!Rb`s5P!W(#g{`HvDf<PJez6aL|io^ zz8R;P;B*Z*T`M@HfK!UukHG0HzB?0GMJ7(fx$C1%oQVH8N8ywLPLh|=Xty&QI1Lq? zCIaU?;7l^%oCloAJPVpQP2zsF4W}SDJqu3H71bIt2wX0u{3V5<Z;<P%%`V}a89bYA z!kq33J;%AZ4Fi#tgUo(55ZQSm_a~Y6$nl0e?gyLqdEB4E{i){tDfENY6ToYN(JzAJ z40w+FXD5Vq5%aF<eU@i0@{D-)V(B}?qU$f4?}#~nj<r$4Fkqf;!W>2&Be)-F-jhGA z;Y{w&GVjmi{-@lJGVg!NeG&Jg&HEzeA#|5g4SnnB3898RQs2!Llf1W7Ov26?3=zBE z&!bm;N6vu(9&goy_~wf3B<15vYX@`Tqq=7s^U+LKN@~i-{cH2~ObDfNbqP&SyySUR z%AT&FT<#Y#uX{sXL+e@Rdp*6F<9b!))l#0T%bzENRN30Dp*;SlZkP~SB;Wp-d;Y6; z8w~pM{>pnU_3W>_-^4way5H0_l=}9BP#%}Mf1CSvx#v>%?}E$w6GACm#Ee&Y-|rgw zJ3JM9azErbmwNtTSI%GWRaMCK#HYm3^7{~%p}%DWALf}MUqzqG2tL4lgr?8IhpWp@ zv(C@Kk^iY5URt|IXxhm=|L2j@Vk7JOln?vYK7%f(qZ@hOziW~Xzjwm#UEM2|c;NNe z`P%cWJ*UW8r3=^9`Qt<P7mg3T?dr<^E}^Rnxc`}XU%>rOxF2KQ|D;Q(`;W;Z@0~C{ zbT!{R<LXj%RbdyETjyxbZ_h&i{R8&EySb`agNiLe_nxM5JbsEj<S$$cHD8E)ZhP3H zXI(_DZp}E;)H<hK$Br<Nvm!*-Mqds7w72NZMh-x$Of37X*yrnI-HdI^$gynLv&DWf z0e!n%_GZ-keE%6c*f-$69%Zl1i0refcXoR%dn7M|`zHgnJ*(ic$eB^!ORQC9-elJK z!?`Nmtf8Pw);;Qb)+F*?XN@5?7M(WQNuAj|_d=taT!P{!+Zp=a7dx$q`+|95m&=H0 zmlHdHtoN<ow$887IqUM@?1O&9^)If+IoCz<q1{H@`a$wgA24kz`{0r2_Y(U)mRRmv zm?tm7R=kk7bjbzj!DmCCcYSE4#Hq7~FR|`focl6{c=ue+e#zzR7uI~U24X7{T~yXv z>sd>Y3vO8@-$<N0w*S2K@TQD==h-h_VCPC)yW{MaGkBKiG0uJ&Gcl*CfHgQ9_pF&^ zFSt*hoc(gzLL<Jdn)Yoz`=xJi%h%y$LPr`l#wN~w5&Q-BCcfc((%Oe<&u~v|&wSR4 z|3tRE$31&#>jJdns2<uLu?beP-!JvcUXt|HU($xs7pKTCeiY<4RC$su8Y}1<Lf<I( zlV|1vDDE^SRd+4yCvX-}my8i?0qUGk)z43tex4q@vzO7&e<HByA7}AfJn;+_-=B_6 z>piaR-6fZ{#P;^d`<D7kiR&x=E^MPaJ-7tA-$zbd#9rV<_$F_p-R)<XJj{2p-g^Y! z+idDEc**{+$wzrtyxVq$2{!0VSD(g}-xzU%vi6!w9ai5c;(0aonRo?Hv%Q=Cb$H4u z-bLfkC+{Y3kTv5{{Po3d*w$Gy(ckOjVbb$12Sx)qo@C8>J9Qu%ur(O@gXn8P;qzU{ zp)0ts#b7TcX90a7UO6<tY#;GOu;fr{bt>6cXymtzx6W|J;j*u_I%VzNUOIOiWu1yn zY%1;H?O$Wm8B4}X%#Q3)icD9zEwG~<I9)3^0r~q$60(;u`!Z*Y$y|Uu$X_NhA6Y$u zJ}LVM+~=2?&ouJu$h((2YX!UIY!UiL8gSUpGGuutZ4YzClsfG4Vz-v>rOX`iM~R(J z?5f+<*i8#Q6v6-Sy%cM&*9-0e_B^e=*@=Gn1+b-`KFRN$T-dmRU+gh(vG#vO7Fhp% zl#P=AwmvU#;6qJkF2OU0`|39`&%obp-Q_|P^RD2GjNc<RvSO32RcYG0GS6Nfp`p2d z3!LfLvRdl%Qs-Zx<BwcY_I2tTO<l*E^+{dALu_=4hsM4XIbs|z1uqMaq8NDC`+R%p z%!>j4|AJ0E1|Cj257VUr8SNTpjqHm#btz|^iEht#{txPW{{NuPrAJU_d&cMOG2s7S z^xI2f;L*{2=qE=|r^Q?3^;&&Qk(vk5_X<P?S~5wkJxrP8b;u;<g(x{^mr2JSUHyyo zixXS_57_tH8n-t37whj9hBgs<D*Z!r-rT=ppWsZa_RP`$K3W{!mUHt|{RwG<*N&im zOLox*7VA4B^@rL+_wSFk-pe9wUrXP6B}(^WU13}I*4pRQO6EXW4_NbH`4Q9`)#kr* zwE5@lZ}X`~i^KK^&lA$-qmH2ds5bv9Qh&SJJo{+t4JD>t&(YTVVPfk2yysEMm-i#} zCZwJJdIa@bx{w;%dyM+qGp?(Tpw6hiH7zo(+ts%g9YMWO<C=2~6Ddb$#;E^D*A<HQ zA1Ai$&y9hPQ=f?9#}5uUx4->z?t~-2A*wI9B5l&HzL0#h^>#{3y*qmxKHi;upuP8U zCJ$#%hU{nZh<`_beP(M7yc2uO&+v&mk!w4?-Ln3**7t9H%Q~37b6Ni?9}Vn^;#cBh zjc>%yXXf3&dVPvzdu49TW?d`(;4)sYxz^ghE9c!YtlJkd&x-BUs4p$}N0S%!vNA_o zek=P^$tr=*vaGvnPYf@!*7Ft(GteW?;rb10-cOD(?1`&1Y=HQpi@j55`cY^BCu<#V zU(*|Uo#Edd5A0rhj|{dwo=0qN@OF#X%Egv)*R2uzIM4F^*Zo`B$G1e=$J6ch@h!UD zK7PIyC_OY!<qpRGOzhuVbnM*X1C@y_7#qSAe9nR%`2QmBZkXQJZ`J>|xx!P)Ys`7| zX1?xN=Q)W@N7nv#k;CB%t`SMTP~DFuTT8Lo3O`Q~`;1AG)rTfJ=sGhdT~T)bXu7cb zum7!Y$@>Sw%QBAf6<F~1V{>{0yroT##Ab9;44aW28H3F>Bdeb|`@!e*ufK5#{n&GA zu;<idLx1k~C0iGo^jm#P`kTZ^ol9J<z0X-N#V&LL-*@Jc|8HOidWuWn<p8f5cy;g- zIFbG;YoTGl!yk^Apyuao!OQ`s_lJ_Lffz7leIh<~ITLc!*=_h5u9Ii6&Tr%UDNdZ~ z9uxa-G(NFxhO)0AeETc@@qgr!|1<bKhif?aRnmv*u$ig34SXa9ErWfAeB_GYQ%xRT zITK#3qgYGH9z`*ITx^Hp>(p%-vJ|_bk#CRl<bm(y8GU8AG0vOY*NXozZ0_t2(zhOl z4kMmK_B51_Y~Hmz%fg>+B>j&3+QHuV3R=0e4Zq{`ygPXpf%7~0-zzwnXPK1M%C*2w z#zHP*P~=n|_HXth){W&i#mm`6{HAu&R1SD~SIjdVA3*6Z<eLe}*%&RwM#=twH4dyj zBx}5hk8l{7KAW*oE_PNm9`pUb`d2J@(x+v4GEED-M4t(>-&RQOmTm0G8~|<&zE0KV zn17!>I-W8A0c~{f&BD+K+N+*&;)ir!cld`-W<BNQexY@rV%*=weK}`Y7SdL)<G1f) z-=+(&rkVFLk96f;+E?Do{*RY?X=Axx&waFv%a~ONj8R>*+0aejaOGu*Tt1XMasa;# zmz3aZZtOqMAJ@IdwNUn%*e{g*M0v-273xDBjKAHw>WBHNAHL5SQO$5Of@#&z)|5O_ z`rq{rN5h_sudvuyqxVqLf?q+exyOiK@Cbj|Ez)0Pk1=-KS4(Ygjpv1ood9@R<5$k^ zI0*muv8O0;L=r<JK7(R+k@Ikq_?5VvvEWNQr8<L2_DZKgJA2k4$#K4dww}-ZbogTU zi}wyrS&pA1=O-HDC^oLrXG8Q?YfshKH_iyIg=U#szip*2x0jwiM@i2O?V#uOBcR9J z16OVA9GeouRYkkgKV@7}Cf`5jBQ-A9M$hw6^Woa4d7k;O4|<p$W}fJz)y%^mH&g2~ zYYh9o&HeTaCC|6hrU%H&Bx86!vHBZTyJiG8ZZP_Q<ZN2XRmpSJ7l2VhUpUDc1E$Pn z?{U@``h|k;$B&c#vlZE}j>{TDvhO3~rx)W&>T|L8Y}F_G#214n@}Op`%xjAF5&r6% zWeiCCOH2LA6@19Vbux9mNB&;%o0mNLR$mi+#yQq{(oS3HcV9@qd&Bi^2;bnQsUH00 zlZb8WMBH8Hzz-F!NZcKJ(IG_}_A=&}0%DAZ%h<8-ly=<Dn6hN7v}0dKJ0`_!N6sWe z#;-SIRdhREY->mO($#85i_c2_811-<cC0aFvE-7groE&MUU%cij*o_?$BN}UMvLy= zE7;>{cuJl-k<AwV=fP)*vD?|*(BC&0b0>U17k>N<zMMl0$ml>Q4L$n}L(jedIcxu> zoVbdQ2ATdA<eM7KW%V18%?;@2mJHotYoF~3ue9KrZif7m^T6ART@Zc2!kyUPJm$1K ziDP&TI?8Qh=jjh)jh*IL{RXi^+jV4O_0==xWSK*HqZ=dpri9%YMlOF-?pt;16AbzP z;~eN_oZwr~)0*!^zq{g5Gmn?)Z||dRPNhHY<P64K;wqT;{kg<eAQSw3$a^4XF%nla zyxOd1C-Qg~d-<uz?p$o*dbZ{tcbC>QUt+1uG4cs@zl?6v4Y(59+GmjFFQNTh9$$!Z z5u5~{BJhzIVe8v1d`lnk*RkFU96?OLblNwT{-$EdOnk_frhHbcv2dZIO^RZ+N$-}& z!DU)N^fful>>)1Ir^NRI8V^F#Ui!V&X0nz+#?-pu#RrDJj*_zvL#MS?lo(&;xjakH zi?xoi=17?v514bK%$HezG~{BPDG%p^1N|eEX3mYPxG&*dU&@u3>tfMKWd4&m-;znz z{I|QSwZ?DOKdtq}(cqPOgm@(#Azp8`hL>YM`wz6q-CRPW4$npIc;Lrpv`eLC%p2R4 zzGb{G)uMgLqIu#8u8!2*yvOadHAU5l4&G{=yl<<{`;VZ`ze1zzp;yb;{6oaP)-K!9 zbw^OIoz_ZxHf6seioc8ni@)+7TBG(XEL+Md(}!U45!4w)E9cFtwq;$_p1qR%Bd9lu z);?{}%KNrx?Gvlc(XsPMw5FGxi2NwG%_}|r8Eal?T_>Ak>OIrQ4=8ITIqOM(`CQg| zzR+0Sjp5hJuf!~_$CpojHFUDPHNL=1bOiYw$GR`~56DB>LSm`HZtpA?^OM9$p_gzj zxK@*foMBzId)t!2s5;0KRpVv;uz%}g&JHurA*-hBBnC?-hFW5;1Xd3Az6+i)<J2Ru z=NZ9|o<&c?&&`T??oEFaIxKw9;bOv+wVRxCCTm-vt&lvP68mh&XBDy1ayF9@_odPA zWc}V7AHrV2cYljaW&I*`Nz9ngK27lEY*a6?;o;8O(1Z94iVacpjLDRZ_<Wl4g*?BR z`5_ftbgjIn_!!=V9wf5a8Yh;&(84a!>#<HZi7wj8daNS(-S0Px9@!6lsXuzt0QAUX z13wU}V4cHZpC`rt)vhy2K8Jg$!`{zs{vcLAYkjVK2)X+S`gI|?Mh*R}H#uJqkh?fY zev!JKDOIVoZGhY&^Js%r;xkH{pogprkaI*M_O6`&I&xdDo%d}PI=l1*YaJ=MgL61b zS<YfCmbT!mBjG7!Y8z6u+O5d_bzDm4hv)NYA5-V66&_bIk2m`brv*=Q@c5Y<i2<<X zn-KgAo?iw(e+6GJMGw6s@cm}=(28Vb@2yAQIl#3M|1mj-<E;fg&I*M#`oUXw`$ApN zLpLwbLT^I*IA~o1o#mWOC1+^iUl1D4*dnKG&42iI^CDkJ_#%F#o5{~2=UlxrX-L)E zclknZ9}2I#mmI2ZasR-4UuZFB0=>hTKmwQiVxd&*7@PU^k-ud%b*$`~T2;=uKvKs9 z>L?)x|6i%2M|ODG<|RfwBDXg$Nb*ZgA*-IRh*|uT;IiCSW@D27DqESi$>;onlzGBd z#@)$(udU1))<OX(6SS2fw&)pKnfvhVTq$K<u$5WR$^VwE3_0U#mPwgcZDlrg@_%V7 zGXX!<`zf;uTT*ilLOCZ`&aKH%XO4EGeSGBbypwX(*n{*9S^jy8wLSF}mCMC$@0{bU z{iGZnxu|j&(9iDhK1N^Ml1omeoU-=$O88SP-K%9i!@?m*or1?$>W$!%>wS!zCheLN zuS*0DGJcce_nDm-0|(VUg@gR_j`6hEKAl*Tet|xea}2SP(k8@jDm}rk^~kxneQlE$ z11BH-&F)uur1!DbE9P9kI943OU}TUfgL$9t@3l?I;Ki-jUlvELA+f*YgX_!i=V7h} z>Fc%4aR$;>QF0O+n@vuJL{2*G6<?%8<|2jj^c$?XsCnIBt!Ivw-fw_qr}v}x6G*Sw zH;<Oypo88M6G-oi4tjr|NP5xX;%VP&9Q5wGK5^}PvxDC0iKG`__IT(W=Aif41k!t! zgWeMpNiVsS;-U9|H+uYDpFn!Uz_O3uUGF7u{BrJnJoLWmpm#(9>3zdN@3V=dw{Lv( z{?<Y7q1lNWzxO!ky*`oj9vdIMMGkuZCxP^ibI>~?k@RNAM{j2by$g<#UURQN&D~-j zJ!so2=<cBR(AHSvH)@|CTHj!wK=K`afUZ$v(>Jtim;WPpr*a(HbdE~agtC_-I)}Vt zO|5i}Xgge5aFZThw#~4iD!hNDXpWsLv9<GfOk2Hz@g6Yjvg&mQt^3fGmhQ;+W9#-$ zwdK@RI+VjsZ`j$>gT0}#oHMzO1@>QVWsQ-g4SxlFv=1?q68FrSJFh?aNCt4Wd0$WE zB)xmDs(!3r58+R-jT}IqxO+FQ<O~+;`K{Q@^#v7^E+BSSUsN$^3cnKj_XpsYhz*GK zj32y<PdYC&X3%*m*O0_g7N68jogrVGd0uGppl<4Uab`DkV#8<PE4~ppd#_`S`}I8r z`~&wy;NNG1KUd&?Ff}x0>(plWA7mQv|2{P|d26Nt|L>U*`0tzW|0VDrF!5j3jQ>qG z_|pXbrQm;|4gX6`{3nC|g(m)!oALj%3I7d&KkH5d{~PX%;D4D7{uqJ3`TWqB|2@AM z|IIxN{NFe~H2Hsf82G=@BZB`QO!%t>{?G3+;7`0O0$=){#s8rK|Evo_V@6)k4F9YQ z1ODk3geH&7FyNn_5rKc73I9QXKj>}){@}YK@K3hE?<??EUlbbi^hM3^SNAdCKY3AT z^3#0`_)qqUz`x0af1AK}neFRsZr=<We3!r<^sCUA%wILbA2h&#-~U&k$(aKT`27b& z;9q9Kp8@<So&l;)Uf}6p73SXJOXEL)`vGz9TPHNb{h+G>_wN(vH{=9hY!&}q&b&AJ z<<CvH69g{tx8<B;x66bnXL@b}=E8yJnEAZZ8Y@pUa^y-*9D!B&jwevecLL8<sQFJ9 z+PK#WUH&r^u6?VPo9BQ%<aWKgm1pZ+T7GxdITj8lnm7#M9eLXJEG3Ui)~6E7njVyI z{$}DMWA6op&%9=Q7GyT9=e(WD1;1`G@cC(A7d8faj;N-oMdT@!eV@#{@2~w^wX7ps zS|_c?b{cD4_y=$(XZ#!Q^Zkp#qfYh$R@w83Hrs&JJG;zstP9n9YhC!W_~xaOWJ9mC zaQ^qTN3VBS-#X**%N+D_;8Eh}B}ZC3dgbvBdRHcp-jf~l_DLkY`SH=)&^fw&FHa!7 zdw^x{&;Ncmf$jT~_~?DcL2u4c(kptF)t{YuR*i$+l`-jU*0T;9zYaai8owgv%IPcd z>RH!9v*=kC-i_DD7>=!H#gbXA(R_=8<{62id78^Vx*aqRbI|<kQPM2-JBwdVxpkI< z<`ZMm9H*R~%{~WvMp5>x*7iBf*z*IOqQ~*|ir(1yLt?dYYxIVJWgo}8wj^*I|0ce1 z{HlZA5ecOC4F|o?CX(Ls<D>Vt4tft=ow#}U9tXYGCz9R^;-k07LGS-0klt|)dPgLZ z-pk^nx3h!Z1xHD5YcjgKgWf}bk2QYdnU~!DkKkRr^U|iI=y~a9isr=3OaB0tU1t3` zQ8Z6;`+wICn(uYctRE%Kt;y+!95k25q&d!c>G}BPrC&g=_?%mF>04JNPHtW7p!b|a z(tB5Y<2b#U-Yv4mTO1#~>RYGYacncaywCUR2EAKaSs!j`ZQl_6a!X|2aCUt3{?ij_ zUxo9O1k$@5Smqd3^uG0ul3R(P_on#hecVBBp9IqT2M4{26G`tK@zGo2p!eUij&A&# z^OEWt&h=86gWf4I>5ZC~qWkmV>m}8nwL*`7IAbH;d1(+do9iWo_X<VxQO`>;X&&OB zxp$&yp62l%1XjE>f8ma%dHPY(Y|cxHW~bcx23YoS{L$M99LIa&o0o#nYtBmw=M!7$ z*Up$(<}&q-#LT|ppzX<+v^B@fDw%7y*%324k39AA+=oAzmA`%(dndW<xk!#NFR=o- z_#?_5KsLUQa!#Czs})~L_Qx0UZEoQRKfXc*k`HaKXW?=S$K5jzkE^BplrqVO-$@&~ z0lTKe<jUC}Eya6@PoV`*#&DBuZ|MMWC^tKJFg+#@ntcin<ALda{|b9c4{@z0KJO6u z^z}ZzTJgVcX+13O#1}ux@6TKh52Jm>Ki|UpS@<A6`7Ooy#7p}b4%!FBNqZk+>Gqqn z|4C@i!6&V(Vv?Ly;bL41tr9b5*##<Xdu;Nqr9HOYu4q17)j@nX0NirJcFow}tctf{ zjq#)F19OEB$N0!mYQ#)hv@e8qiJ6rBMY9jY^l5tDLHo%aMElDQ+Mh@O?Pm(@>G9Kk zy@U3z%Q~k2-{PQsMgnO6?h7^c<8JrU9kf5!L9`b*Xg@vyw7({_4~W0r_i54N{kjgK z{Sa{N<Nfo^?H})V2<=(%(_Z7CeP{>K{wD|Re@p=FKNZ@KkDvBA4%)vh?U?a?yMy*C z<D~tc%yr^RB6EHCb7*J26Im{?04?p?wI=IFIOu<&gXlloLH|i@)4w)q4Q}c>cKTnp z)88pR`VWMEwDg^|Vt2T{ee^3Bv2?xffoqO?%H;e1`=%QAYol!tQS*G8^v|=?-->^0 zYwiD%gZ^P1ME`3J`kzh!{iE#kx8m>G8vSz|^nZJ0yT*Tu?SH$2{wotefA}-o_;1Dk zwKe)jIOu<&gXlloLH|h!p#OC{{jK=JwnqPfA06YrgXsSrxKZPOQv$}no&Hw*V_T#D zB?tY(I*9()9P~e(0QyJS>2Jkfwl(_aIOzZOiVhn84*IW50R7=lZR5Wc|Jl~)AK{?? zg$|<sYzO@(C4m0d?ew1z=luVJWBhjz{oeyOYW&wFVEo(ZKRG`7Uvkhttb^!(%|ZXu z37~(Jo&Li3=%3@D|J&bm(D--Ie`Nyb4|5JrME}o?kNy!3`d{cE`p<UIe^LVIf89=h zfOEBMcD^IEbshNLG5$M<{_lYsHU9rM0ps6JKWA>nL;p(-`iFH8{jWLbe>ws5kFwMM ze0=oJanS$m<sCHs9rRzB0Q$q*ZR4LaRO4y?5f1ua=pg#fcF=!P+w?DPbNz3pzZJjb z*7TnP-$lm%VzvHnAN>kO%=JHT&GAne*8hJ~<9~50>;E?CpJ%7P6+h<I=zqyU|F90C z|1}5wPbYx>QFi(Z;+y~GIOzX&M!Uv;i|v2AgZ?WMK!3R2HvZ#m{}B%QUuc8=O395= z*fTmmL<@O)Odkft6UoD8-~W_crz0Ia85ol%QGN{3W5V=fID<XF-droOiN0#`B*6L1 zDLu;Ne9gR^?3BlJ&cAF)K1vz(Lu2lReSPRh%cnunF;nqJd>UfsJ#1+|v@Krj1Fl^@ zeDc>=^5N)sadS(&;Ox(Mc(K~Si<3Ht7tcF*@puAwF}x*SaPDb5yeM_>;<M=@Hxl13 zu5s|<;so$w&nJf5IBbk?-fKL(IKjb-RUO2O!46*ZN&qjOZHX70bsG;ac7&qk#jiVv z7Y)F*%ZsglX}`Rf)e<i_=Qkc+tZ?w6e+Tj64-Q@|P5>`*TH*y~9LK|pUpaX3&uJYa zFQz+qF(CoG`20UD$P3PQj)xb09lUs`gLskY;6+LTc=1F_yx>ghczCg`DOz4k>L6bH z8@P6P@xO1hUtUaYi5H>xc=4cv7u`FE7mqr4ac2T}F`y-0a7KAN{o)r6Uc6P(G4kS< z4qg-{fEOQr+=9H|eD-*F(beR|N;Cd@VLNzX!HBgUNi%t2l*#x1xG}t}`mi|iW*uC; zv-_mRSdTp15-;||$BQ=)8uG%T<J>rTA-<^=4Dma5uFbXpw;aDjix+>2$%|%xg~NDZ z`fJ?6y6l(c+HA6e7qTw%-fZ}4K1{z8J4HOcns>ny@maO#aVeg}=d)_Th{cmC2T$fC zfG6Ez@#GeRC+D|=Cq4&HUYi=9T|?Hb7EfZ08R>uJwtg}+CQpuT%xvPEA=%eBY<=?K zr7gzznrrx&KFvQIh#oU@6)##|t6}57&TwpWj>T4N9If%^N9wZkXXl3aWQzDXb4G}= zam=^zN9fiwyjA*`3*(ZlFFANKtPS3@X5VOy57Phae0Vwmd>9pr4-?|!!;KC;?EZE9 z^KEN<xYfaj-z0z!KX5*fHC`h9;r#gcFwDV+XFG@wXE^vUC;@!<Q!GAQ5Fa0Q?~j%l zSH;N(`#c+KU*G_6?c-wm`dIRz?RnPj7dhADgUqM;wD|1eYoXshrw&o{w{K33MgL0< z`d1}@{sKGw6XK))ItTq<UKZc{*;f0{anL_40rZDB&nDs*&`REf*7k)?bI|`x2hl&m zL4S4v=&!ZYe{p>6zk6SF|G%n(=sy75sPX@LeB-|f-Qa!GFMO^^zn1OxuZN%U>ebIV zcrdtwcu?cu!O8^i;0%igg>L`6cJSaD2M>1rs$=B9jSe16jgtrVdEg-D`9$PEpyhZ^ zQx3$`olkMlAM7Cdhd0x|MfS%t4f-81>rwOImbUx@UbfReKR!9|_1=ga0Iw1LneC%r z!H6{v?gOqV2NeCE{Ew0Y?V|r?JN;M3NB^@9`tv%7{&f!eS0sS`k#_o9$<xr<Ja~<R z{#}>0Yy7v^{x>@4pPB&r4{{bxWc<&Kul-MP&>!p|`iDE{&y16P`#kuvNxzou@&5?C zc;~^dzKxavWgWzWy}-50fdBkCJ{e%=!5oVR<aYRTJ9zMng9kYs#Df<cJosIlJh0D$ z`Sx+JGroE7DhK@ymvoFAxZXkkWeK2vKW7z1<iM)<<iHRI{nZ^r|1byr$0mUO8aw^> z#Yg{_d!l7PX$R5&EpY8J;N#cglL7Yrf0Lble|+>m<DfsMgXn+3LI3X(K!3iS{zv1Z z|0)Ol4HtLN_;=8MS)BCS=Yaj3lOy9_<4mlx8Bg)fgM%GBc)EjlFx0_=0SVy2I*SK| zN&fGE7cUQX?T(fMS9A~$b_3Tg2mbY^_RE1AOdbgBdTwEhdnxk1rTj2K2mL2@5dCW% z^gosW`cJphzac(3Fw;T*7Z-Jm9JtOw|D_3_e=q0lMD&0)@zI~>p#P5@ME_~c^sg0r zK!1b&7USjFe_q>`J>W$<{m;io|E_N$;~%_6_^)Um{R&1bd%$ksn&V&5|F2io_-_~e zH`wW~jgS7IgZ>jci2k(>`X7sve)~Llx=Fv5-N}C|{VCo#pv=L8&o6A33}~?rT<hS$ zB?;icx17nMbb-Q7{(^S!AjiRjr#grSr#N`fH%=bd_2K7D9;kV+EWUZL;p=EQaCryO z|21&!a^S<-_RE3UcKWx+CkLt>^dH|r^grvM|Iq}{Kh#eD+wsv~>Y)F#Uv`WPxW+;M z#R;H)4`;eW^niEcqd&(%|5F`A|0xdo`zC<?=k4@=7$5x&UpdBq2hsmEaHGcmEAfqg z`#d<?q+iSK?0*G*#ybx_?cl+n4&uQY2M-=e01r;Hcu?5ce{(x{aD{^hJ1*!LIdGMO z2N%Z41N%Jq4d;YJ<Uk<4dGJIB{ZDid{X-n|_f7!)&)MmB6}IR<XRe21#s_}!Wwaca z-a+(#30%7z_~7OE<bb{XueH<vReW;rj}H0=b`br~IOt!N0Qyh0)4wM^`mb=%zhg=V zjeiII7bbxIZ#aJ^GX6vH(SM?Y{wLZ;|6*BF$65~$X{Mht`TpJp{fk?QpT`~$9V=}5 zqub}f=S=#w>}3A|;Ke%+e%=_70Sf<V9mIo1;F>Z(@nG9aN(Qux2iI6UC`|U(w1Wpv zIe3uOK|BaLc<@l1Jh02blkMYRXMFSE<qrDmC$~!uv^WlCI_SS30rY>xxhxSmkR9JV zc)WxDl^sNXo`e331kk_6PJh?<=>Ke2v<xWeAo_O!*DeFzuZd3v*!%z0cKUn7NB>g} z`m;KS{-A^YhY~>l$#(kt#z+6<4*KgSb<p^C(0@Uk^xNmbuQ>BV#=n-W`-d~0;++Qv zIe4(5gLshZ;6Zu<c<_wHgF@ZE7kKgVV8_mAIq>Ta;z0v&?Q&r2y7tR~StbvJc0IQ! zuKmF$9P}U8LG)KU=zlN)^bfYvUl^YpnC_teKj(Lh94K|rKRE&PH*#J^L=QMQKKchb z=>L5O(Vyd>KP>_DueQ@aG(P%wG&sh82hraE+^F&YVgkm$o&JLO=zqdN|8X5ef3<`D z2jisQJ`WBy>DRKo{_E*a@y-F$96b2=7ab!5u5|EVQUZ9ei*tCCE>P(84{ZkzvK%~k zyd6BCpYLwTE^wmB1EWm7zXv=Zes_0Eb^-Oy9<v|gT!Dbe12qqp#WxSu|JRTM7Vf|5 zAo{-muE+t4{&!z!KmBEP`lrPw2Ua@hAJ9Sc|ItCeKLPaT+UcJVAN?f``u{yKei>l( zp;&gYD;)Hnp8)!Iavo1a4>&(Q`m-GLKi)y~pXi{!M*`>%*y+DGKKkpwaE$*BqW=rv zMvedH;~W3>d9ci+U&~JMuZ5rS&Vwr)Jm}v+Jotly2a6NHgB*(og(?1P+rfihJ9zNX zc^xAME_d)?Vw^m%+lBti89fm>5QuLcJkCM?^0w%Iqtm<JU)v*a=*pgfrm1Ox0~e+T z_D#$P{BT>Zzz-Fj{1<LH>-~mazR*7Ml4Si5-o4-B4(=tFsGRwj;wqn{os;C>KFH<Y z;c@vt<u{3E)t-J$yG}|9JU1h}ti+`a&m5x7&UU$`$oE=#fqzg{c$vTqcrIzmba@+n z2em2P$cy?=XKmfgAHs_pSP#qDk%OsYtm}CH5PnbQI)&?0uG6@Na;+R&c7`j*b^fSV zwAo(Hr0mNXlzaFtx0BYx{tn;TDZX&{n4+?2XK1>+O4Bm8<?#Ih&XW9w@^Yr+m;8Rq z@7Mh9=JzXpzvj1*-!J*y&F?OLp&`G)laqIoOY8XvS1s2^T;FjGaBDqBaOwQ_f{WCV z&vgb@0oR#aXK|g)HG->QkXF0z@9sc&uT~H~ORMF(;n_cgw-o&lUYp$~-(OUn;jgdI z{D17KEfsk3F8i2t^=uW-8qV_7%C|c7Ht@ZAf6)o*{UDw9gM786=qeW^Yx#qcwe*I) zzJgrZAou*^{DssZXK)tts|PatZr8%)*<YTfzCGAo<BY2TdEa~b=Y8ktmv_k1H!tMr zlh;JP%?ha(xI3v^zHdlg(Qia{UB4V~8zZnz$oJ>2$@ib`s$Bj_XKnUi+GRNJmiEh6 zu(x~CgF8HF!P&ss#`|gE@RSJ6!$<jxZp>BX%1rtWsCuP-!9(y7yn5#y?CuL$bPEq| z`dAw-{Y!YhkW2N=V~+Q)s&K#i|Jr-^_^68WfBbAVAvZ1o0twe-36~@wA&?svWs{&p zKv6(Ju_YlO3HO_bq7n%z2$elTixn#2V$DXW+k%y?v_`RQT5VlE`cYc7CM4=60SP4` zStR85e$LG7o^y5&VC(Pu{r&;3*D~ik=b7hu=9y=nnR(`t1BF2+Yw@>3>+q{T7-nyH zHPqfPUh-6w_HOG08?8Jer9+0MLHJpaC+Fj2MTSGjTMJrWA>Y@i|8I~k<>lWJB&Cu5 z<3glW+fc?aC?l1#*CTN=*PyJshKlm`M0ukt(vj~CUS86@!VCJQIv$L=z23S>;QZD4 zPdCv2+gH(lca=P(f5_vqkp5Q=R>~eud{90sWTmz)y^ti6zW49c>XUR&wr7VfkIe7b z9bc>HlG$+6&2kR%kqsY0KGZ?7T9<vm1N3cFXVR`XSx>thG+zQuYYnR(v<sVMeX^Vb zTqBynpegJ?nlmsU&8?uB|D}}ft%u8rat?5fXrAk=qItYaK$>eov*Zhv9X${%=K$A; z=08Cb{B2aG1=ve%88t!9sq7+emco=gdb|5O@V&a)5))8{nWX=JD&<n>T8d!Hz%`P= zsit`>FwI*?|EIilYO{>GMa}`P5zRwPS1zSa>xKlBGl}&736oQ?yR6=F4seZV?oi8F zhBmFUL9&x*(*MUQITxhKIlwid`6!c9@fXsTz;@D6(6PJhe^ho7l>~hn+sQ&M=NPi* zkNxF50y!H%v(lxiqqQhIaE;`=3pA-6*2CvsV7~rtYHwTN*U9%R4wh!!1ph&{WZ67M zq0^W0=zR2b_y~vIM~8uq@LA*|EYrp-^n*D4Ex+^gfe!dUhmj9#j`YzZc~SRG)aT?R zc2ek`!FMVv+OI<zk*?G;`JB_aL9+1QRCe+?FF=naCU1LlSnh(YBae|DuYflB1!?U_ zg|3?BQ=FDprdjX@z%!!N$Y?pFSxiPXt<`$D7ThG~0MCflDMrgBrG+Sc)#(!$NAbRD zo>nfG<*uG`4)7FO0s5vtiZVu_-5m)!c{;(Tw`p2hO_mar5qJtM;O6T3x!(x6X8FrC z)>lT>Z(KH`jK*^96SPXk`IT{~k5-f73m%J>bAV@5#_f!jLn`sty>g#qkd%&kc1h8h z^8SO+fy#CweY{9+hXpt}pd;$$72vZz%B1x7s|2r8_&d=DgXGh-zPf44hi?$Pnv><4 zQtkI9`VRZ#Hsu&X?I&@hoC8`f1D|--f#x^JcLKWq6MpvyJ-s*1kKgBNelLXh@%xzb zLpybl9u<C<EpiTMy$t+7{KkSG^(j*41V4MJ1U+hBB!3S1mIu1XIlxnN6~JDGGTED? z&RzZL_%Nr%Woxq34FIjKj8*_U?kV(8$^5dq4qw#La#`9y3wWXq^<xZ)(L!6D=-2L_ zQI%1#2lDU0Gop1iLh&IkNkSRX2dU*+>ywMwG5K}i8PWPDqveoZi}j;b;G@Ot^nnyP z2Y5!b{>*4KNqINRdON+-M~mcYvXqUHbAU&*$o|Oo)MK)vqKwhK{pxYNR$nel8GH!v z6uAP_<9^Um?CbI<zcMDO<WlU5#wx%wD&uoPE(aUatNmmz6)lv}Wy!}_3V23xZ4mmh z!cXezF$lDHyLLzuhWh!COY>N}7PgFb=CI^rOaxjl17FB5d<o4P<_XzH`P<FcM34EA zCTV*oKYsu6@grNHcHLyzjIkwXy$t+7{BjvThvXh9>+RvMK7KAKYP_Exd6)B}b`O5c zZv}Uib3p55;1fT+-x?;$*Tlv=>UQxf(Bu8IL#Z1ZbOk&ix4ykbi@NEY<R|+cA1%^F zlf{O<0(e3uy)B!B9!mV%{uUptCMu_8a2)yq<F@~8IJf1a{{7BFK3X*Iu~{a;mVswf z#?J(;CVyJ<^|U@l8G&a+s|vJ~{;oXL&z7gFXeqk?pfl#1sfM)PV6>Vf+s%G$e}a}) zlf{)G=KxQUD?r;n09s<)nhYP)pc=b|hfB(QTZ~ti6K>$~ssk|vn>wbTn$!A?S04bK zK;zYWIsLp4DV@)=T|5S2<1rAMJ_d684<u8jRwjpHA2iR79;E0lh4^61tB&KKJ{?=2 zGwC?&7JaQcenE4cfBYEprxmKcnZ|O>e4ZVkZ=MJqWFuh|ucNUF{Jx&%d!Sje4EjMm z6!Cta#w+T!bsFPRbzhOBrb}az0Ced-(~>1Vy5u)47?)g)Zcj$HNs+f?h+I?XFYgOP zx7atnvdkGI=d`i*TL=7ux_<2n;$hGMqj@|q%~H@TTjZtLWLb~#c0Ttn0ch5PrWN)n z1xG6~o;Lf&+bc;vHa{W1*JODN^HKP0rQ88%eZu5p^&Sg)M@)WoG#51U3t8JyXkNzL z;tU(Z*8x9QSLSzknH5{Aqp^WM&D%)M0&kfe7Q`QNma=}V4)}VSuYo4nh&%rJ+tNCo zCqT^Vd$sQcprdZzicgz19OEq3pIlrcC4INVl;kN4N}?F%9OBF8&J8PkZEPaNEisRD z$L#Cd&`U}i2b-LL_JO%;)4izA3$XM1`=!e*&}R(V%-+US`R91v_18H0lRKm3qE1pq z7aotL`lNaB&7zFpv!;_IE3r|~-xw6$_-+qrR#mW+aV_7Y(87E;S<r&)BE~wN#rQP( z%0i5FW2m&MQIcj+-WqdX#2RBO`Xc`Zz<F%dq0mL#IGXE2?Q8ZIVjNp)lB>=AD_TDd zvB$w4Dy>p_%^0hvjK0BEYYR;>#ZIa1A1RG#TLzr?iP|<zJVd**^v}S0fYw(G1aBX& z=n4;b1rwcHRJ>03^O`~Oj1RyIxP_<_%jPbAyv!P2y_Is+{|I&tI#(-KCV1s%F<w>Y zJCr_3Q!Z;)KVJCz3QBv(Yr?t!>I?qOrRW>67{o4=SPjL(-01&~a$I#g;Pq;wxV-4+ zM7>6y?a<WgA><eJin@)V7`Ue}2R5Ibba9O}DKD7CV@L*y*;4;XGLSrE@AZ&jX8;*^ zY)B&m#qU*ptxX>rdJ=S0ZP8&_I$ZH>kdOQ<^J7iu6K4ARoFvpm8S0(-0>zKn5YsJZ z>l$^y*ZZ6uT;{xiUZ3Nj{u}w5sC{7Vp-JC=-}eKRbF!|Skk@7*pL2-yje4Imp7;u% zGuy!Dbd}PuUeE-8V@t;`1ND^&U3`Z&v*z1TGU@t_9?|k;)L|_A&nNc}m0Jg9$ca9` z!`d|bPMjzwctQ6*zhe$>JP;|(+T!y&iq6Ou^g64-dQ2BVN9A+IGM}Tdi*c}vi5j0n z>p6WZ+B-?R#&VxSJZj9nD%t?kk8ONDpm#;vg%G=$+GU(^yL=1&=5Df=e!N|_b)*x0 z;cIHlG2pNDHD+FxqSkA-$qz|J-PrSL<sRNF+9WGC+K9K@F9v9n<zd%ulfMce!++8y z_kvEKHu*D7AO6AXqm2B+wjW5ANA$Ar*fILXUCcgWlTTw^u@>ukb>@Cp7wVt<HP#p7 zP28VehphQOkgRta$eIT@`BF<TTkrV0#AU;{j{AC>qQ~_qetvc=$);N4HidW0YTs}# z*4FRA8v87J+q{`rbDv?qyy9+q$B7WR(md}IK0dgnZyWRjl{-pXZksZmqPYoTef=*f zYu$*$50NuVJ9hu*(<{5luanP~>fn3bsq$K^<IncNT?)J^PF@c9-9C7e0-uI)6W~*Q zaE3o;s5~F=TN&IX*(S*Sz%QG<H!1v@`^&U0I|q2<eefoU;8<Ur0eHF(?o!}bBb)|! ziVyBk;8^Fo4e%sA9BXw}z!Q9MMhEL>;{YF^hhq&a4e%Hr+@{dSI@U<QyZhjb4%VU) z0gv>-nLJoe8U%QV56<whCe$DBj!zW5lO7m-%n5q~?(xAH9n9;Z0KcGzV_p^k_*oyE z(Z{?h81QfOaLkRaJlnCm#s_D7F(+vQ{4*b%(ZRgp-+=$G56<L4-+u=14*~Be4L*5! zdKC7Qbho$X2Fs_rh1u(3!tGzjMcDE8hYp|aW;$FK6Lk3NIP>9|*l$vVc2izDuq{kt z>+9LrztLz8k}Gm0dC5DHTpv>Me8uY%><wl~4sZJM-&0IMa(S(Evf`DS?bwG>A%$2E zhi$gn50;uvmWAGYxY`_6u^+M!U3=+|l|3i>F=lSxaJT*P^t<ekPicS4aOdXb6~9Bf zG@*S4p`Du1UV|Zb2;?@&9k~%FFM#KRBPHj8-sYr&-jefv$eS1A-5=xn&TPj1grMYb znrojLBNxm!%ky)By=ywOH>UVm$wT*<#_v<sDjdo@_k$sFL5>!;2zdVjUfD|u9-F&v zNstQ$`Qv>|^wfCeUFCuxf4sjEy<f035{2)XPIAHNAdQT~x02{FJSN`=NiKNTAMdwB zZ?B4;W29Vgz#s25q6fT<S321@j+Q)If~?M}Zc_S|)qGD1_MpJV#dyvUhVk7Xj5U-! zDcC>pRj{;+@@(zvn-gUQ7@wpgPY2)kfN_=W6WVuz{59s@7$@|q_y}}befv(x=h>8f z4_GfIy!VL@cx(l)eXH5tnk{`LXK@6NS&bSbFF>6h#Mq4R^<$4ZE~h(8!M9nqjFcCk zUXAciar`3e(V_BNw)d15pni?;KM?d0%L2a3g7vZmsAnVmN<km}DZ?KuFF<`8;U5+B z5t9bK!?Iwkya4rXgkLV`vsj1CatQQM|3>(GfnRj5)l(R8hkap>Y4$~Zr`i>{5<_Ko zx41UaY4a8MJ+yVI@5HXxZzB(IS^iranF2aG8*wN$g7rd@FNgR9u#vy8I&ZQ#P>1iD z40SM<%b6$Yyc+liOmeA#O-<+cRjkf!mMZ9YlQDj-ppQByIZ3~o@gB-RHoX=0-Hqew zbruF&#GKS6=A`O&)f?aR?W*0Z)tRza0DSQGSZEA|{cNm#QF{^V+mE82i$+?Vk9LAT z94%!KzxbBP?Ud4fvf=pU-y*fg|7CJJ&F_}HJV3@b8}02Xx=r#Foi_dYA^6rLXPms` zdC3zAxt81)Eic|AIT!aeIg1ZO+h02%$%}JL&c%yO&Waq9Y(H(1mmUbWFG-HEKa?D9 zUwWX2z34YF_WWn1_WYNm_Qm)1vKP&eJo&$o+KY$wwO68_i0E$h6eahy-`+*?gm;%b z#s7o7XOLkXcqc=iDW;)v(eox})utf1nDYJ!bp8(c4)nMGMUsvtBY$y4e|sMCCnA5* z*H#bp4-X@M1Z18vBttI#oyl2`cCYaMC-R>LpP_N~D&%MU<9z%tS(UPq9+vzlLoPaC zW_-V;d?x3Ti{LRR%3d#cCr5dCKf1~42}11ZQRE>VONSMGD?UCcBvyVD@)!MEa^|l? z`JZ5ZdOhTP1o*Vy#Dc!~QIm8uJJOnz8)<b;LOZ245eqxYLp-8*hSfuRIxm^Gym;wK z#~VuB)4mkoTxrK%X|!p3jwBcNkUaL&NJk=tEf*(Cp1%x&{)S4Pdia~-LC|#}$~92( zB!S0c%cZ2^zLF;oZGiU~t|ZoHxcGe5rtdT6!1nZg2JKZ0WZPfzJ|h<O5sTO#$y<Y1 z8ub~sfsWX#Xu|g({1ktXhc#K?-9miO&eZ+IBR#Ah%<WzMfZHFxOR)Q&p?$wGC{^At zHC9&Qir^E@;)ph)Lv3a_XwkR(F_)j$O*&uJN19bccWcy)kz*fuw2jsq%Sz2BO;IH? zNVZyYKkRFat$6vaIC(wV#5&l0_7?QXz?r|vjCpt;Z+m-qwdDEh9PH=1UrG;wOabY< zH&R}Q_A&}I3PvIZ$mxhSOEjFI5kKcDG>+dQuS0u_BO1wpX_SM8bnjJY6vfKx(5AW) zjiG^Q>;{eMnNm7*VhdQ7m4oGVXjkpefX1M}G@b^H%Db;ZV?wID4sGfT(HIz*#zUY{ zHl3BF2z4A#mc2b?YFGaxI(_}=tb<+)K_~wX*f-k97H;RosQV<;eHQ9o*tz1DF+LLZ zPva!Teh@p}6Xb96G)~gn{8&CtQewx7t@nqm_o>*1{G#t8TUY2}PDpX(%|sh^PPR=p zs<!t6_&yrv7Q#<Zo+#00)pv+K$f3~UF-huw5}~Ut=!2GDr_2-=rLo<%uMvas${btq zW0lM|AqGSGd$g0)^Vo80k{dBmdJn^zBkv0xMt$M5>y+bKec?EjJn(;r7h>HU@}>cf zIfxT7WhA2AUrV0EA4s0w2J%F4`gyG#X(U&XBxOE;-)EUp2I=hk_}X^FZ79!wJC0}s z9ku;oe9p#RTd)oX9o7;bov)Q(r(0l4v|d^OpQoO0(Ol*yv*EACO6f(Xt<GZL72kqB z2J^VEoAF!q*z+xT9%~AFe<D(faYz$x!CWj@o{zD@XUHpIO@Q(~gubc@`@V5Lr-J_O zBkk)ixCMUR9J4K-+dlbZu~zc%bCPHMI>}=}ySAn<8ulRTSd>rh2nuWbBq%)6e5GR+ z^~XeyXeCOUW)vYGwGE;<AMJ|Tl%QDzdR@q8jQ~BerAR4*`mQ{TQ}#qknToxwMJyh= z8g!ZUx8J88v6uY^smDkB>ygc~%CH6s*;WOp$0F3L3FW8nK{!KEG|=iaQ;IQSFw%V9 zpXd3;wT5l^z8|O@lU3zVx8-cWi#FwqEsvDa_M#4yZws&=2eQBS-S^k~O+S#l{Z#Ur z&@R05+gZ#9^&~#J_CtENTt)9e-$U=;Ax8Q`_yLSvy?#Kg_gcWIExNl%X`gkGGH5PB zW46BdVGY;Tg0}fd*I_b^;qLD@Ox`<vyqvV;p{!!ePeahosD5g&)_IcCLA+dkJ14Pm zKGsSIZ)-<K8ui~#;_q9ZM$Dkr+$;I)Q>;GWx34+&B>EJya0bu=t@Y{q;Y{>t-ZflB zHX3urRM?SUto2bn9)L^=ZS=(lh_<c{#dlf*9t``VI%Q?;l^m!o{e=5|H`Yjq7IbXb zKR^9_%R*};s<Nz8m8DG8KVMHfUKD_Lpz+ML`s8~+NAxRddzub7{0rtRl1y<taV8Dx zo3q}=-=J>aa4TYflMoBcwYSa7L2NJ^G5bH2Q@kF1g%ShZkGlCaY-HC-C01*rnEB$z zJ9gXHyNh(I#D)<Md<A&V`rtMNj@aLRz&HEg3SA3geR}|ZLJvo5@27yT_rV!`#PW6k zzS;+8^bxz;3iwhVoZ%x@R|@!pfRDtzqSW`n));T}rH|p;Jba`6m2{@W@zFP~1N=U~ zzi(W38RD-Eh}|}#pFM+kZUkbtzsf~S7xCNei0j%B!##|+@2jugZ2#4f33fMPx$%hQ zhVxkNX~c0^JU6`JCdhEDc&-O`C0;F_OY!Aqb8tlqVz!9uR$yO88nv5?=1{~fI^n!q zNW~?bcNAy5`mp(&t3S?ojfH>NFpTDjJ>gIKpw0HHINs5*doN!Dg&wg_lg2D`{;O}r z8~B!uK0Qb(=|M2^D|KiiQ(%9wLDEqLKaKhi!jG-6`{Bm`zjwtgz(-v;EG5{B4cvXe zqq<S{(&2p%-bcXC6(Oz~4jB-iaCqpvCguxMFkhxQ1m({W=lzi9IpitA*sU04M?8Y! zE)K$@vwc%=o^J`x^JT$ztusCQBG#LgGkr0@YgJ;aRDN26?OpL0=*05!D>6BT^78bl zcnIGDl|KUIp|MU8<^fl0YaOLHr#i0(+F?($$7salaYi-QcRqCrbWo3aobvm@a(QY0 zHk_r5np#|(6&4=3jn#2b@<p>rsb|R)_sP8xQredh(kkpxo`w0m%M4xREFCBpjv9^g zrc(6J`1cz9_c@*9iTGX`gg6T2$r>$JkC9|ke94T$%^kaY^L&=hI=XjtlFNFUPG)1? zbRX~vK%;j!li_N(cM4prBa^)EaJmUpha@k?Q-W?Ta33VPp$zxQE?ya23NF^*qVWB< zuydki(SJ|FIws{u->Uo0bfYRmB6wkL8cpMiVA4bXTjb5qT@Hge6j)-00=rp{2OZw4 z#{=wcjz_e5s-F9)o)h^q=(`l2C{wN;hH{{tkZg_|$Uw4eR>@9nuMKOA`%xdKQFmjn zfqNFX7}L<%SsTUclKvJXPx%|-VQ3@Nj<`$;ufgECSyd;*>kH)Hg!V=JsLlQcaZk6h z7WII#7FBJQX8j%aIg-3exkzs)7ws|Az<B+T&Z+F?YsmdJ<LeQ8cj@>RT&6YYd(fU{ zp<T_iU!FGuYhi3Hx~1EIw&v+3w6h?b1IIjxuf@HC_+PqY^^m`8hF`%s=%f21tVstW ztj<G->rwf7p{<v9wR*OqjZiGG1+l;u!~*GC`-U9w%chvW;<H*pbKBb5I${Kz-l=cZ z^KUkr;iu{Bxm1C+bChU(%KcU%+fQsG{uac+SR9@FsCHk-;}ma3J(MHvZi4+BLj2K! zc$X3WDvpnRHw+(fO3U$E4DlBV`WPoF^byarAdY23|2{$Atl&2tLfq4Wn3WO!9fCf_ zvY_8|XuestAU<V;pDpllR#30s&AhGzoaD72MpX!3tk)aW$;c}>3qSnWvibzmvdb%i zFb*<fJQNJO2|;~!f}Mog+fUFMY0AmEOv#x?_Xb2tp0HlbRxq}((b{M$)<$7BY<-mW z;hyL$pFS~6wLTi#R{veFhwd}AjtZi)Tn;y6+KQjlzZdX_$)^hwWcP|-PyLP%&jsLB zrUa$eteE6!F-Ip?)AyNO<l4d!-Z^uNImBa~1$uEow5G+@{8nPW|IskH4!F6XP4=YT z+x&0Dsxj_y5pQKJcYB5(?~lQ|Yy{)2@I4sr$9E9;mh=l!>WkW~r5b#(ueuJnqTbYe zKL%e>W`*x0f4;vZzDX*+3I62`2H(mUrM!+H*oty?@PpxU9pMHjZ!=(TurjlC=Hp$x z<!y$p-O%+5#CNEQui2mP*T9jYm3p&zbr&7)r6ua^DZ(3~qE*}3Pj?>?zwSZlsD~ym zt;cX?MP~!6ftR14qIJ-pRwZz(-E_2YUPn)B9`GtHDq5xfv<?HOy0b#dXCKps`O%sN zy!dz(twMiVF9N3|LMK;RZ$DaN32&f^R%xW4zMdg|VZJt_*iRJp`|I>I6nNGBRkRBI zX{`lLWr$9$Q~tW|2|O$8o7$h!26Fvr6#ysSth0%>Fh98@;Fa}J(Ms^AbtiBnlaAJQ zfBQN+19-72T4sM*<B8uDlh(fSv3F6YuYZD8es2}6+6X`0CxVvsvMHVVm!c@Xc24aa zZR7-RZ+gG_55(sP-;FhH#sA`5L>=0J5&qveeo+^W-*gN7NN2vz?BCzLBj`s8`h(>< z*z-XHdEOB8vDZP7Cw7Qj2b(p*w+s4Wufwk1pdab=Yx?s3LeOXZZ<A$dFVHu}mj!*X z?!B~&T$gK%|CFH5`rjtY+En;EWBf-1ee88m%3Gd>`ZC5}Cg`*N*JY^#K5U`XK%W?2 z?Zz6bL;7NJJ5pQN?~~-4s6UG_dFZ|3r<2?1JuJi`Q(L9ESNYb-?U?s^5GQg?d3l)J zXbzXd{t+wJPYuEy0Qh}}b>ffkd)4H`{La-d+~jFE5h|Ze!2XHX2iV=Eo#dn^2g+v( z!{oDLOdj{}fp#~g=E#cr&k*;n#aZA4Q~Q||lczBbdHb0>XCh6Wv!Qr@v!A`8o2lLX zoz(9B2EUJ__WEIc>@6u+pXck)?%{pB>(I^bv$f=A&?H&HP==(Jd&};(k+&2w{uyh@ zuy2|_n=o!%i?Jg5=nDOKaVzvh<Ha~rv@%x2`0)+Y2j<VTZrrnC<wcB_4`zG7qp278 zB&Ej6Z)}Q|>rwV{sqM|bK>ws0(rYlr#3Y9s?=5&Q$G2zc`_61n;~Ly82K~8LWP2K* z!v^TE0dh7h$;NsB%6|^!gpL{l$bAuVzk_k^35;{!<720}kh_Y@eGy~a-a5H+NbY@# z+^(cMMt?tfcR=1nT;4*Ryz>If+mZqu@-=!RU5~tmu6sddqC@sM0>8*#Om5$bJpw6s z9)(|b*elkvT{tsiBbz;km_h^Q;f)7_<*3nv<g?~rIrYwg^0!#K-@C7i>^>1B-@kK^ z-1t^+``O{q_J+)E_HSSBZf|(2v%P+<$$9PwY;ugr*|ZO7q{(@H8`-GjynL{$z42Ap zA4XyIrTDFYO}&M0bMgF^<h)eKZ0q78tEUzBD2Z~fgN-#K7V#wNsuXo4#<@)tlUM4h zcVjc^Dq2@ppSCbxTm)akV<#?t#=u7NJf;1jtloQ)V3$^kv%<b9ZX)6(JT~b_f_}u_ znKZs`Np~T^ejRUk<0ic@M~SW&2|6M+3A=YF<3!qrJA%cJhVpnW)yqb{C#?wUr|=O? z7%SSs&W@DpVdKi3N6o%*o11itv0DSiZb`VeQ>rof`sow?eb~dmiT@Mt!^D0r?02gJ zu2CP52-)iAW_xIiMs2MDzew1HwEY%eKZ^Qopz%8DxDm2+0-yXZRI(iOmt_`kD&O(R zLVB@G8;JhS*x!bbEVC6^l=CNESq=^K$pRZwWPzQLEVa1Lu<{=&SxWt7NhNuIuaO0F zCG<tWHS+zZFF}@>OfOs1dWpCAWPzP4vOLUWse~-?pQvOh^p~Y8WRaXqmMx-=Y^WpP z8p-lDWFb4PpPua@Ur24G0d1%rZHV^s7`2%M=<{lACW5yaH|F0IHzl8)1p3}{M`$Bt zr^T?@hqw&?ZCfb>9aURlchZ2*k2rm5Bj2YDPyd1Bx>+liuMMy5uk>N*#0NSG)P~V^ zTw%rGzBXJm+GiuGHk<|fsQ#nMK63r-<7wbX<vv@rz&}{14FlJx4QE2O`l;C-@+F!! z{9b}j4qY4W1wPhyRk9@b%TfrOvV%TZyloh`Mz$JFvP@RmjZ4#R@+SCX(Y4`!<1WUs zKd59e`^#b_dG~2#VQm<=MzWmuK$cvl7mX}ggMG5-+VF>vMS4pmORaxQU;t#Pelb8B z2Ck7Te}^ne8=m<6+VE)T^J;ClPcx%igfa8A+VEyB!++a`SA$NVHoTP6r@k|3K$zTs zex(t1c?LfHENtRi_|tQ!k0$uBdTJNY#bQ&$wpUG^x1BL{-IipE+NSt6>Sy5J&%m!< z%f~f<=hgIc@&eOOGGwCl6B_e0nj<P2rc0idgdiE?M`d4R<7A9W#^AS8a<+Vm`vr$b z*z1=_&PLobn2q=@#U=-tOJ<mnpT@K`*zfT^csI|qdiDkNYQ$M_Ha-ST{n+IluZ+C{ zj$O*Rj19Y?uRYlwJA80w_~Pv+lVp;i`0<E6q?a?0xgPS+SiBj0NPhiT{FmTWewDHK zPr!@(l^Z;i_(EDYUt8cYxvppf&G5q|X#2jnfHwBgjF{Y9YPXww?EwD4Mf=U`a*gMl zogtsn4z^``XudQhJYLa1)d!W0`ZJ@lt++<n?nT*1Zt{cP@;#91SH4ehr)vH$RAo!> zFWX?0t#X5=Y!2ERUk6+xKm5OKyll@XWphl)PPy)~WnH6eae>O_8sJy9eJGoCx2kMr z|FWH>J7w2u%GP9Qi^km9xNI+pvX%0B`{~f@E}Q8ZWpn?V)f?>67bgr$@he*q<c^nB zWvkW3gvcg;3mj>=u549^YXH}%-WG|nZB)wEG{rRWy36)g<h@#Z`?arZzBZTWAMZ*f zxwomxcF@0U8>#$@G-XrHkJJIzsBEJ|*&gO)`+e$lmu+X@vbn^({!w4qeC@3{*00_! z-2r>stSZ|HZJ$c@mWQ%g^EG9|T^1pjyM!B-&C|-;+jymHwkdxbcHL!r;2LGi4OF(x zxYt7$TRefXRX(9En}5A^N7?f4)07SM248Z*xZd6sW!u2p+f(71vR$nY7;=rWbw=6T znO2HFHDMos1AUFH3sTH&ZP<%tqkR%xX)b{C8m+&Jlegnc#$lY1cw=aW96K*V9vd1f zf7cEBe{YsDDsi@h)>LWEcoDj({w}z^x?4zlbpn2w_)W!cUP!wY=RydN`e1kI@L$)X z&d&@F@tmC);;|nLI@#D5;cVCt;%WRY-1*l7lH8aO;rt-SBsWftaQ+SZI2+eQIRB2l zl#P2LoQ)?UobL}Z%dIgXp0-6Ho{JkpJnlHGaYTlA8ba|N2ipDcJO=br@H{!h^KCAk z=Z1J%X5x7T@Rx*ms@H_HH_isnQphkY(_YQbij)t}^v1}_&9iaUyA)jx8HoqQrtGD? zE|;5oA9La^AL`5A#5dKPMLGXVb18}`&>UZLw*kfugLvJl$6>TT1GqM-6Ls7{?AM^W z(Z?O4Ku7G?06!bniP#=Z<y=G0iZFa99wZ~h9g=VtC(d9v>6}LWaGck`dgW&r*L;3j zl0U}UX#Ft2uqH`k4L9KKS8#qZ2IC*<ljA0NC?=&J+dK;%H2$i`cUmi^*bL5pQT)&A zCsv+^f5+ae`*BY$_WRj9Sl7KjH%+E9NV_o(X~XZH4UZ_gti%3WI!i}u=b(W-Z9SE9 z75|I47UANK0(s&v>AW>Tnx)QD<%|0m>R+LSy?xEtKZv`rD=I;gbfc~ZzQ<2f586F` zH1?yjh%pro<X7$SvnlkIz4pB->_i*7C%xm&Yw(M%2!sA<K0OI#C4EqNP-jipyJEw6 z4p%kSMd@5fPJF6d7@R26nUEH=iB`0U;~Q|dJdHhpQ-ih9(+Pv*w#l&hF~Ob#)8Jn- z;DaV(4He^`YJ9IPwaB!mN$fjr*&X6J2fX-nQ#$ci@wNH!#hQ96+TXsb@O%oitYa8Y zAHV(@ejfp+EcH70VNJdj?Y#6V{E9)VGDXiXTEp*6D&NTK;D<H+R``=eSK)UDm2U*& zcY3%_r)qufq4FjB@pJg;6MF($;kTw<g<mR_FOl;b7MR~gD&Mf{;D<d0t?-d4SK-%- z$~T1bO9;%bkjj_f$In&-yY(w0_8yQ=>;ay78*PP+dZwa$7S1m&Fu%!EzIfo&#>=#) z;2``ewW(#?UnW*}h&irHX>0r}3bm<3$V7Y4-sdqbb(`ifT}_)7`%N1^r@gMCP4}vJ z3;D&|wi<gT$Uo5jWW>xJv513HOgzsw*LE4j%j^N<;Aht~a!_npoLvj#<9CCO>g*a~ zsI(8fXN3&-CG0b#zMA@M%1eDY_07~rQ(sMeH{FebK85_Qi~8TB0U0u#Y5Sx{x?J>; zltFVr@+%VU<w)qS=v|x@b7LRJNYtA&NLgdG?ShUay4$R$7Oxq2gMnwwG^JC2C;ji_ z_PWA;@)`J`+S2}V-E8a!L7!PYIk-K4$K>|cx@FqW^b1B@U=qy%9Z|?b{nr%8b~kv? zIt0avX&y|nHB1loRL~d!KJOfC<7{rc{O!Rwxv9hK!QXX|s|yhy{?zQLIWYi!ae(~Q ziN5mrC1K9<34P^@!)RO(+}_+T%;|15x4Uc2?al9pI^7@Q`7Jze!1F8S_RFuEJ?@=& zACC7G=Jrbun?2`d27Ath279W__`PZN)EzNne{t_)?wz2O8|G}t4aWE-*wdI2?D=*$ z_*@8eHpL)480tKG0rH$Pd%peB>^c1zXwM9F)*Li@8umfv_syQ#KLfte>}lM9Z?&DA zwM+1Ak=au{2IHr=;P$V8_cHFzanChly`+=#%xvJ!G<&eA%u_!z*V7I4c5Xk`XLfdS zx=Z`W=b>ZwWOMtu>7bX3-$b(~0{!*W?%1=7=SIM=?(E6K`BnGCT;&WY<%8}WSQD~A z|4nDH4>l@jsGNE~?s7u^W;rxKeie1lP#Y_|$C%q2mvnMAq(IK$W}JVOjt&RydOSA* zwivXNLA!o9qg{_YG_Is^;r8?)SYrZ>IMB#NS%!f|Km20M7*9z@BSFWF=SIMYP6Wvq z!(~K1vfJ$aliR0~EC*xc#(tfgHIe4_hEUKjt7O4*BVb-x%v=`a@yha5wA^5hk!xCm z+8Zx~IL~whc^WR@cP_{$3!WPR^U88Ahv^IX6nzxp&fHq$e?P?8@Fnto7Ua_ho*M!4 z%J3P>TaP?m8H!Wnh7-Ny>JLHVAY}LyG~UPW&ng-4+z6OghCg!|kS{)1>PC8PNC1tu zKw|^scoX@LsN}$NBVb-Rj&M1U2XZtC8rxe^5d%YgzXCclA<OHaa}d9MDp~N{2$)xv zeOwmgQ{<@aDL3py{^5{g2l8%H$${raz`Sy7<8mMm<ZvnWaX1|I0U8@Y!weao0gVm# zJ*<)e&y9e2Wq6p&fPC8&yXYaimmvRV!On(7$XlqA0nd$qd1WZ%G92tdGB^|&oXM~Y z(3lMxM}nPa=7Gjc{HCj9z;h#DUKyry8IVtrp|HE`&PD!hkYOV7j#tTm=SIN1GK}Xk zAdlBBPC^GKqGWdpXcR(*OwbsP-!PR7cy0vDE5k4@1M(>{Bp`o3<R1?iVvx6|N(MYP z0_K&WCzk<v&Rk}5xa}>Fp|qRq4h4;#kf9rBnDOhto(9oY@r?UiyfU<QWGn5X75Qj= z%3a%4c3&{HH#C}^?sM3?&}dTH2c4s@$8!T<d1&W!mcF@><!weD^7ZZ$$iwEQt_H}I z<Qyc^9(MPap!a%^r~Vt{KM~|f>?dWoF-D*{pBr<&7|2$SbFHm-t_Lg;^xZh4ULSg^ zB3nIh(Kopyx{H9?4&}wTI&VMr3t=uw^Hw+Jjc&{vadt^L^IwnW2Ec@@?tZr_=l&^g zJ;ukkXXD;Qn(Ja7T93JAJ?5HjvCjg}H2)NL4%DaIs>~e~z8LG~)gUkC$mfS659X(C z%ul_1@!SBImv63$FUGC6KN%#`+`4HZ^325#b6785JU0O5<vUx&7h}*o3(ogqPEPY+ zH=hT4`7S|Tz`T5y+=@Pg_GVySk2yTewHq+5tZ&6y@CBL+v%3x4cy0hp@NB&>iP;14 zQH<G*IZORF!R=?yA>T*gPWMLiRU3jm(a66Qb=HjMdcdg8wosjIVEOBjPpK=U?%FV= zuBulAx7R$3{5$d6fqbH_@Z12HSN0u~SX~kC$Z%FyrRZC?A<rxL9YlUnS9op!Ovqn< za1yI4;yVU5LUpw}xc&S-<arCfH&uM`+yI!D@0%*VbHSJD>eb-(rX$GnA%5?x_~N+% zFfZTtReVdqm+I>6;P#e3BTp@UU#j@xxdAXQ-!CziqPjYPdx<{^Zg2Yx-(L-P)-OSx zMXI{Oa|2+4XY(Sa>t^I5UDwY=J9sP1dA2avL-TJpV3wj3Wz6Z`5$1gSESob^j6ix= znicj4?#BzA>`Z(DDcVIuwj?(owh&Q+6!C<JCrxrg0#XZN4zLG|Rb35ncz-8I4l6Lr z&1mB|JBz*HamjRFVju8(5ov5fs_Y&fWKRS=x=+#F4SCct0gV0chb+V!<72tS+%Ne{ zoXHJ(RGNh`twWh3mkyVuqh008YeGDp9cIs|Jj_l0fV&FU-0E?susO)-6J6!nLX1)Q zUh|9l&@M1n9(+6ID{tYRxX-c?*O1PChr3|OUdn@{8Ra-*P2*K%4pP}$&Vn4^Y4h^L z-D-m8Ti_`jXFO@1eEOh{A9#g>7tPzP$0XXz=#w3Hu@Rk1klhYCCBIYA8RJi<6*%#~ zRcN@pvfvzokmUvX?o`nz^{4SQ$$L~sV`Voljc4$^@=X;DvyKMzN3n*F3GdfZdNu0T zD+|^bge(u?yA`rhT_}2~rM<}7dLTOQ0<YwCMIN7iUmNPBQ%HOuua{0?V0j$G@35|1 z@dLed?xycYRCG=RmggnlNw4bUsT=L3Gm+#$xxMln3`}P`;g##;ar^Xq6UpOH(J2i~ z=TYF5{8FLgvzx?;UU~WvpI@oy6b7bKMEnlv>T6{$FP(7u4*hw{l^a;k_X1BkfOdmE zJa4R&f!G$EiE$vlg}BHFCB8-H;|dQ=lsBL0t;D!4p&t%@nBoQ_@b?Uc%co%nmtpgs zZ5Yo_Mhv11e0St}lsF8&*XFA1q!jkHA;gnK^5On8!S5^BT|j<_ZCyg&?Oqp{--qBw z_ogZQC=O%5uRr)zzIYw{5Zk(h{{904en+T$Kj-`?4r9PiqVny&4t|JjUBVc_VZd)U zmG5VqAH`t|_|*Zga;J|U>JM?k0QHC1)+LN9b{X*7K;?U$^Fz$jh+h@(thVdmhuGF7 zj6v2L@S8{F+rjzuH0Jjjm2bO`AK43wYXvAHVq0#ETjmDfc?mX}16n29IL~gzJa>?6 zTd_|AF{Ps3-hGNT+NX&AM7alv#;9VwK<Qr)ukMZbIptBuv}nD+4Ve+!(cJMx>jmsi z7>a4p{dT=7Iw8MUC#a!zK{0jQCoU0P#Hfo|jLSuRZWxbIQ{4LY?kE%R>)_+Xz7~qh z==X)kaQWyyK<XFkYP<OGvCmGAAIb6a*uHs39elP}zqFM3<99T%@>g|c`@HK;sOV$w zoSyy#j-SW&&D-h@s_>5``{92h=wlz8Qr=P(zN@Dn{%3+d_RUkh)fKAnkNL~<p`ed* zXbOG!PH%bBhWODxA?W+oG_GkQKjC@)QyY04`PDXp^#O-sC$x_he)=Jz4_l$}8`+6^ zAFFcyi{=PQ95t@Og!M(T*YdvJJxPpKfOze^|5EPhD!IL5GQ>!4CpedtWXYrU2Kj+w zD9eQZQkD@0vJ6$p;@*+#tbae-llXYpp2Qx~8!ur@tH_78#`i5>v#xs>%ju$6uUJ2% z7<V9l5Tue5_UID+;PN?*KXB;%K@!>@;a|%i^yKoD;~a`&@3pG-5x^gWa(p@ms`!s< z_=E3APO>j{4#jaTe{hcD(>YK@o@@AnT0tLiex<zE@CTm?`s^Gi{F$n}0sO)Hf<E^D zD)g`658meZVGSK=YWs0h%;Ups&qPyL;}+~ci+CL8H?UVMA-w9vjf16C<$x*e82$D; zm{a8}h>|PTcI@sCo2$Z|1^A})>B_echRVGF-?-hh%8hmK)6`Fhe2FpgUgYaa-}iR& z|2}7oybIqu;k$;Wlq^37c*6;tgZLDC{vd1P+t?5I7WM<84In;*z3sVJ&&N3LBf$5V zJ!hWD#ofWZ+o*li7n(iwZvuC`*>m;?@Dn;Y8!#Vp_rzTI^=!|Q@DE<BUxSzs<UBhS zF{*?h&&4%i&iXxwb2SFhSrPB~m3N-&n3efl2hOj=_?=(*2lUJK@Wgn}uRM6UBgW_q z#^1d7n)5Eod1z`&N9&n=<~@V;D%|5x4_)#;Ub&Y6eY`{T@$7!wo)rUl9jpB}trww> z*R*N%dQl6$>GuRgf{s`(B3nm2VvjZc-a*zt#Xp;g4_}uSeZ4Y&-_((gzh96{?FxR= zaT$5Bo@5hiUaeR!s=7r=qcx|ua2}axTQK*f@~%gDFXA0*P>xUVcMh<=<fz9Qd4suM z#djEgT!hZu-Eb#fhLnN%pt3JmSw})VU43@A75g$<%;6QZ?qr*U@!0}4A5qqw=x<RV zhOWk^b*HC+mrr<~V9g2ZKMjc4)L{-vaiZ!SA<BAF1Lg<~pTX9!e$#;aLa=t?AUW<r zn>d#`NR}|B=>!<Xs>%U7gE4U<Z0a!9oW7+sr|DP+!dljOtUXl^!&(s5pK7rFgmDMv ztOMn*u;%oq&j-m4tPM4l_Lo}_pSp-Si5u%(Sf58+5kJ)$)a4FZ^YN`iUHTO7d@ZUT zaV~d4q_Y<BurnJFyJ?JYp522zD;p8#!@AS!DVcUR*0#>f4)&a%iSJm4I-3ibCSnaL zCD>Cl9Iy+a&U5{OJ*Q*v{-xP-J~Y@<YX<yasPpvuXlsAQIuq8X>ds*u>P<Xj9}KO} zeFYtT1-`T%RzEY;`St5&PxFynPs2gv*=P3Lh4|d%eYq4Dvbl$cI;)h}P`F|PO%yl5 z`kDL7FlV!y*+0dI@(1Gn%Xq|w_T1{+GDJ!v+qeYXw;|qweIUtnZn+oeY}l5J`<~Ke z9^<hoeG<kCHK0X#F*cDs`1=BJ=CH>3*!!XE!@@dWHEgVTuG!Nv+3d-OEmY&Kx0<=R zEWU$y)M>0k)?yt{#CO^d8+MyRJg<>`;SM$>ZbNv$k*3AU&7jjV#_YKR->sRTHCaUq z{A#ggrl<9DvcY6NcP3iEk+ye}n?a`~&g>b7?`7$rHAY1X{A#gArl<8ZaHT{ky%l^r zf~4~o5qD}u+$qt_;!f1=6y2chy$IcS<p(Wk&QQ4-{8}Q-o&lg!IT}3Ua@o0F$W)89 zG%r8sU<t`JSfvAem+B_U&B))<8szDW@A;{q5vh_Jd}^_t=B2~+IS07bK^Vipf4I@_ zU(4?)Wx(9#OCC>D`#su&qxXAs-{nTl+=l*EQlE+skzee=p}th1i@D8Th%TSokj;^O z34cdp*S}zFO=H12l*c<3M;q49C91i6sx^PFZGzt2>b(cA{g-9gt1SzwXN+Zk2Dm7L zdqpl{{%DhAm(&lQxYb#YezYF8)qsAq0reebij71cdaNGtv%JT{_qfLgdm8qbJ)eHn zag^$Z+GW949kV{K>p0qqy-77VYgdc2c6B-Y{*Q0b=9E3;{To}cpDPAu?fT)~FQfi2 z9QdsN4BQ_!eZ%~L`^)R^5AO@0%m1W5oD4bv`@^V-ir<<@eDwVx{7uzAJJRV+N75H! zzG=VJ^?7fhOzueBBaQnp?B+SjJus$G(k$GcA-8Xsj5{@M!`&LU+S}$$!d)A=xN|JW zUZ0S?tZ{hNQo0|7&qZuT`$zf&C_n9E(YFir*~CEOfl$y9`&pDeoBqxd`fL;6Kl5~? ze~5a$mMj;~u=-QC(f_0_>Oe<b7s}t9f-L_6oW>O!e&3P)ZF@%y#f<3=w(!OH`yYR} zvRiq_-z=#?-#&SD$L?eeY=oQx*bok*zDc>~3~~PnfX8t-m0f`&HlG1_A3Z)|?V|wi z>4Q7O-#tkNyo(-=nD}78Lw)pJ3O-`o0|7Vr;EXR~*0F$p_j^_z@;wSZV$eMRZ{cvS zoQNrR2K<~JA2H%ifY<xrHiZsiz7pWIJ~~Vu#BeXI>e&5-51;Yfk}Nj^{;3bn=%5e$ z7VwikI!y|F^k;6s-`C@#&p8eFAN6qbBcB8QHsG|Tq0|fgy%+2=`UG&N51+|{yJ1{_ zAK`HF<&1BJ{8zva>+x}~%6ovn;=^Zh;@*=#0KVUcZ&T>t4wGYm@6qGqK9b)6{!=|1 zcZ<9R_zoYO$%A`BehK(iADr>UogZa@mvXqbesO=ti-13_$H!e9y8wUKhtK51y&BH} zzDf_r9T{5yU*e;~<iULxn*d+P;4ZKI;~ep0fY0M_uU+9>?>fNm^T8d8esIQiCEzoC za8|B(i@X@{JA81)7iSz706v++y?k-z@W+5>>+x~^Zw}z&_4qi8Hv{l=JwDDQO#?hd zkB{>_w*j7{hvO`a74QTf9VS1{k&FX;fDg{(#~F+?z+-%HhoT3ZUl<8^cRd_?{}Ta^ z1Y8+&T}JyQd&YjaL4bz<{<l-!Z|n|6%vt*3+m++EYcaRe$;&HxqA!R>pU?|^LvQpE zF}Qar*4}RJB+tQpOJRtQU%;JQ&}CDzSxUB4;%|eRB{^s?<|X*MGrLb**$p30d-<AZ z?0gx&aTq_p@ob9x$?3uJcs#qY4qA&mxW6-*{w|!j&-goYSn}|XuI!ErlTvY4?X0lN z(%o@iAVz?*ZCkH&ME!MfQC3oT#I{7xh{yeUai>hFWrzdB9W$lU-`a75R|EPA+=HL| z4br&nrqm~6q*)|KE#Ai>Uk%a;NWVl%a(#ic0C$Kf{yRQh?)=S_-8&H@hzpid_xGZE z$RtPkFK=@e0tfR{M^0X%92PEh$2}5DYH)X9HE7#$zhL<<pLWuDyP~dP+j`$&wO3<q ziM{%}T5y*b-APFO+2?p~h0kdH9|`-rr3%`=M*FnSeGPw$Iq2QEH`j`J#!$$>zV)nd zE8lR3BmWkMZ{^9j0~<f=WmjZ4-bu<N*+`z8rbOs9F8LYYSB6NL81p!{;zx7roU{?j zH<9lN;FJXe2X{Pg#gE{)_iPrGBMdqy?*TY|1g{5easPnH6rM~mlpNf}N4%DTPJR&R z;4bB@`1#Sn*kvnzf#@hYUC>9Nb3e%(hI@{oFGb$t6BIb^?4x^XGs?PF-dxhP<mPdq zQs!8blu;67lK0~mWG$KDdH2fhl1<_Ae*A(;N@ld+9We|W;u=aEBk61}d81icjr}^V zDFdS9M!+rIx4bwHuq8iFk#q2jySZ1x4jhGemYP!Jt<e3GA5z`*s(9)j?0J(?m(dk6 z-bn|h$Kqz@933pj173?dPC^-2p6H78z@hdc?t6U)cHF*U4%*agw5@y5#_mB|n}u=L zOxR0YGTNU^<F0X@(2R22N4*7q1Ip7Dn)!FwX!#)M5I;rlwhT=@b_UNHw8bO9>x_0x ze$>*OsmPRqzm4U>-~H-1fxnMcioHzJ@%OPd^uT?4_<K=1!|Y$}3AcYUTk`1tM%L|U z>&X~5R+J8C!#&iQi8O{R{gE<;w1iMOW}qDS+g7Dk4`?fLpQW>^!6nbXnoD=DO7das zTcZ0{Uww1DeVZvrwkMcQrlCE(`sxI`#biCa_1;OC=LS_AIANl{d6is^xJnKE9j(#{ zhYx>sW%o?f@lN1;^cH*;?gl*h9xK~vv>nS{3v|%4Vj=LJ1P-;EKjWF|Ve#OO-8-qw zx#p9tQzg&E*%6R666Nj;KW3KiMcJxwH{w>*Z41AzlkW4hoBOx@7XFKLOukL2+d0F% zb^AK*0(={HTGARE^bP*e$-P4M_5I~Puk0p!RqzK7^Wq-_zPqQ@8Pg@YhdZdp=(s-C zRib^-9Ww5IxJ%`HpYt`Z?CY!8dl_q=WJkM@w<E&33h<^H%&&7CR9^ToeESJ~>uX*0 zB+k5K-??eV0ki3($>dl~@;vvp$}Y3Qyt0&%k1{{drskvY9=5M9(OeIjWVZ>5eaDec z#$B+{_?wAw6}O>$@Q1%9y^uds<SB~t(k@0`;=K;zN!Xsll9!1)Ojx^V{T=d35|zWH z_@^qmf0mVFF7h3MAEPqnv{8&EE_q*=w5nz|lW8WtQ$5*B*&UW9^a<1!+c!)@eNIIi znqqIA3j517EB4nLb>2Mv52$C{DG3`iC0d<ymm{@lDK<|2DeME!u7Y<ZS+d5+GTw17 zvy=KtscfJeo)kRO^mw0{K?OZ#ns1GqIXFA6dq@|l$1@@ye2p~tX1Nsal&1i`Mrs}` zZ^U~6d@RwHmX47(&^!F8LN_{2ewg0j7nSd`ZkE^3JKD1HJ!-hTg5HNJ-<zb$aCr&d zi=dYx%;^f|-$wZyQgCm1?el%)1^D(P>#r~t$dL0nZd|yususQ*XP-Rn8?x;RFKd+C zJpJAB*<W43-Iv+W*}KY}(uyyb9fkMn^98feUeG_F9tN}(z!y+GZBgEtKQO8N!EF4k zyYJx-sDCAYK<%G=LYc}Z2wzZ9$9%yNd~?B`$QS$_?UekS+84|R-hS>2-orEbf=}QR zZSaB8jej>Za6<z(G;l)$|Hm}Y!T$+{;(h7(1OZY~0T;md&hg6QrT#2UPh)AzAeN39 z&C(p6hGwz%?2#;;&EeZRvG?4;EZvmM(wkygI(-C7O$jV*iDPLh=f8{RFN|jJn|VGd zp1nWL;rDiB?=v}{e}uC4&YbQPF4vL>25;`eQmHRXPnlWz#88%oa`?YE{0lC}gS=eN zb2=dd82)z7zm4O6HIBi5lg`o@uAg~azKh)%{0|dYx{Slada(D!V_6!<<#>vh^N*a~ z(_Bsu=d*tl!;cux(t}){A9MXWlNh`#k)?aN{LA?FH+cSmJpU8CoI`sv{Ik6L5y9;J zr=0&fF6RM`Ulqc>OZ?w0ydGbOV(`~_zUjOk-{X9r=XUWCe=p<j$GE(aygvK#bP|{A z496e9<tpXn$>#Pd<anR+{aV-sx1Zm0KBZj$dwF_^(>==d_&W~2kC!Kz>*=q&{w{I4 z#r*pWZg16`Z&WHP{|>IN7r2~%<n-;lK0fE~7kIvTyqp8Myjh%Y5!c_Hyqu45zT0@Z zkn_*rd^hp;;atu)IR65ke<;`A2L8T_)1SrRRos3);`MPSm&3*7-WAQt=jM17yqrsT zehat%-*P)i<#s);AIn$A;jc!q_Yb&y6}&w0J-Gh3KK_@tgYYm0pU&xg&FkYBr!$V{ zTgc@#bz}HFhp}`r*Uzn7zHd3*pL0F5a=C_ZJ}-0r<N3R!`<FP~qCiIQbQDh)@N_9p z*Yfl+o|f=bM@|y-CKWQeYx7xptbnC?96n+`d*8y-V?5Q<`(P=<tLABRF?-*<n5EIn zSUPP9OF!mmBFF!b>-8S4uP=B#wLQW5lS%C?jk?0p{ya_OX&O(3Tvq-*ji<ACTD@|x zg5u_>Z4-Mp@r;XkJ$2^qZMO{-B$ZU)1^0>9z{N}3X)V3(+#ieYB3)Xm#rq@o_u{)q z7u0F-UgdTrzKe9J4lj%Q?@*3#Dlc$3`iI9W7?B*W>Gr^Qvw44q`M&b6d}?64PkDPW zbG(wf0^_~F+o{lJn*rWa+#b4cdSQ15rq`GI2T|UrnSt^8bct6eVV{TcFL732JRu)P z^CoL`dfv|E^CIwvl5e#2RvH?wM^WCJb$FM!Kh+}_xwZ6;4+~81a~+<jk04I)*gb*E zEA%Ju!tM=>_ZatcdimBiXyto}*RLMWt;3th?NpEVK%<skj6pkc>F_$I#Op~Z?~~4G z>51~{k&o%{wi}dp#91x9=Y|C4Tc*Pk_NV6?{jHYXBm=!{(c%5VfbV4;UNmnXdcG^q zY5Ddt;M=6bi!<Q6vPnztzTtuO(yYV#nD>i%`PQD-(u+5c?~D$wgZC$TdJF!orDx=K z-8wuYzgu)cOYd1e9?<ix*5Pe5=;!A&Yw3;Y8CZWV9o|uc{^PC|Exkt!{Lp(kyeACs zCS26gGqTg;Iy@sgeNTrc{FPpRAL#HB4eVfAt5&|*V+{1C!*lcThMsR;o0guD->B5# z#TxjbDjnVn2L8pR!?PIpzf(HACk@J5t-~AKE3jVNI=sIO3yjyK!|QG|9@62Z8}OAp zT0I{!&`X#OFO84O_4O8|!|P;FA8sAq_L#usopg!GC-{`{cQJl3@=KFKhbUPDo{@iu z4%6Zp`G?nZct0~3e@qJ3()*Wze?6wdi{s-XQBIMLh|tp8XyDfm>F^{z&k=M)8XKvl zCvp1`dWxD8c>HmwZ@eI(No_iM_DF^&=*1h*GqRuH&RY4#8nnL$x@z$z^baiG@t#_| z16)2$A@K5^4(}-gJ%6CXd&$5KKGxx#GO&Zsb$H>O0+*vshj+I@dp@JXGa4s1>+p>9 zQWve&U#&qoKJKN(Ga9G7*IS2YFg`jOqs6;#U|_u*iq+yJ@p+ZLes}fJ;)(Hxm@mcO z7ufE5@cIbm1Ya<~6Z0iu_gVJ`ruX!az~#8BUh9XA{AR3f9N*c%|1Z$t8P)Gn9iEtn zNAU8N7|cgT8ML=)-)YNnAtP{kr(M?KB^cCOnGVmW9X0FljQW$!?OMJ@<6qYmEuK;T z@qwhZE2I8nuStvd<C_EPWoeKWZ=yjvj}B&dyqcsf-L>zDy7n#VRoMF+1Hb%?!FYBr zr!VZgz+ikF-$N_E&0t(~NQWon6ZG4@r+gE1<vnBIr#AQ1%GYLqx3-@aZ?u7ZF6ght z6MjX+L-LILfQ8k+z{{T-*k6coufSXXKwvzhb}?rFlTX;m$LU&mKOV;95Z^`Gd5{+G z*9QIOWgXt@2II#w30iu6xnC9hMEdz)Ena^E|MtNUE#7nke^fbCi}y`%ykMnCj}Ft~ z8R_Lvq886cFT0Yoct(2JoUFw&(#zW6T0A4YEEu81Gt$ePo3waFdYLv-i)W;l2`O4U zBfX3mrNuMSOaD|Yo{?TUkJjQD>E*Hx&qyy#W3=>)^ir3m#S{LkFSn{=KMLGVKR0M^ z$1}9_o;4VM9?R6?8TDV4Sz0`!{_FTyEuK;Tb!?m#@3=ubKRRBEH_^avIwolGZZp7p z?Pe{W(KxXD7A>C9IIw)87VqUDf&D?bRf{**pdT*J*5UDZy~erVr94NA_ag&;Ri3NG zOE#DnmQT{+jWxo%Rf~6vQF(9E;{C=5Z?YE8Xq;c3r^T}x%;U;$*W$$`8PvxVE#6cf zhYIG3t||!J-UehcdIGO#eqg-kLK&VIe}okV#(R(3y}*k#z!UR^a4uh>0iMzPtbD4r zygRvkoSZi)pQgoIWH2r(ze9_6$Y9=Dey0}CP`>F}JVW{J(&8D)cefVLP`()qPbi># zruKdN0xca;|6=@<SQNNk-wcjdND4e_abUazZchSl&f>s$FY<A(z+1T_Fy3gsUqRq) zSsEBGRmZn%Szx@odAktwjx7(2C;WiGbFB!BC-Z(t;JH@@#uNRYm^U6<5E#$MPLA_> z6z$5$PO4^U>(|ImKE6kbS7hLqtMAp~{mLNDb!N5}uQ{06i?BnHw$0JvwHx3C->=2{ zl|g%qnybaT!=Rt+|9}?nE$+WXz2!X^xIT==bBRCF(lZ**rTtioXEdI(&eP%<jpwH2 zYw?W6b8`x`ct+#7rSr9TM&r4XLM@)KXVGtLSrk}*M)q8$^Fv1VT)t4t*T|k74{Gt= zG_aRri?nz~^;=n_#WSkk%3>{^QT<jf*5Vn}Z{-p#o>Bc)F4f{0)$e=Dw0K7KTeVz^ zXQY>^6<Rzay;QB#;u-0sYLynxNH0~ZwRlE)sam7OGt$ckYqfYrdT~9Z#WT{2Yn>L) zNH4C3wRlE)ajn<l8R^CKh!)RCFRn+mct(2pc!L(tNH3=z)8ZNF<<#R^JR`lFdP0k5 zq?c10wRlE)IaQ*?Gt$ecC$)G+dii{l7SBj8)umcIBfV5_*5VoIrTQr?o{?UvpVs0T z>81J^EuN8Hs<&wIjPz2sRf}h&7x%MTJR`lhw`uW=^y1#G#WT{2`zKmFBfYqv)8ZNF z#l1s|XQY=iHZ7i!UYcYro{?Ufp4Z|T>7{9>7SBj8O}n&sMtW)bsTR*jFHJww;u-0s zdAAnNNH1+KXz`5n()M#Lo{?VKUew|l>7{Lt7SBj8ZF{wNMtW)cg%;09FPC4^;u+~h zD%0W_=|$SF#WT{2bU=$|q!;O+7SBj8(jhILkzS;iwRlE)3I3%P&qyy}uP{6@E|sio zJQ~H*M4mcQ#W+>WPgojX$>BU*`{&8Zl;!i=EJ`|UGD{t|GQ51AN_-#g`u7;Tgr`+c zv-d+hE#dUm^R$e^%SF0{;aBneFYxy=o{p+w{9U&)c}sYjcar7%U=jOX&f%LmA4Pcj zZxPG4@-~KFIgh<p|7jR~mCCm<_)4BHpQm}BGx)KY41W%X&*Etjr&q<(@_#aX2d8&x zFMF@z?@^py^%o3Y^chP}arhh#FX8wuk)NkE{JV>P&*%B$dHyC2ujKe=IQ}vIef^JF zc`LcRMf`mMPgn9ZpQkono-&?F|6=m);_oH*F}-AQ`o}okySRR~a6Bvjp2y+qxt!bi z_d`5g$>CHdH~!twzzq%D(7+81+|a-c4cySc4GrATzzq#tT?0{Ucz<=I8((i|;D!cn zXy8B4fJv7}+#@1V{dXPujo1In8W8p4jT3TtMEzY$y!cmGoUQtoEG;Q#sg<WC{Cf%i z?s%AeKgv@R7|?Gbeu5{ZLLPc187^DnDe?O!*4zC)aDeM+7N4qqhw+{BE=vXeG5+r2 zaYX_DZ__XkpmG`gD|+Vo<jUL1@QmnuUpVcz5ogB(=H+_0mcA<gOSwe72t7zT{fl!4 z0?(oQ9#HnH0j*d2=>JW`m;JwdPl)yV;s3wFH(vK|l<uFkirFREcshQ6|E{CseN>vT z`LqDASiqK`Jr^Mrc&qUI{M#KJJKz8W3?Q#BkgW7}?SBpYo|kK5{{Mp^{L}@0o}J1< zHXP5-p`GF9npX0CetNiQlj3`4zW?<h-Tu)>czyx@mY-v9=4mHg@j`U(H(qaO;D!cn zXyAqhZfM|!25xBJh6ZkE;D!cnXyAqhZfM|!25xBJ|1}MC;{tW?ho1agqllB-%kM=I z@OXaiTHLGgv?X4lAnwHw=R3vuO>sU`oWB(3E5-RqaXwO<e-!5%vzISey`p$Qd}2mg zdRk`YnE3exYZfkuU$H!X^_YzGj4`Q&qm$wbmliBv6u)la>NUmqFm~jqky-JHMQhit zTr(~urGQZwxoE|TMN1d1S@F>7!i6IXS1e0eyJE%C!lHuW<ta;-u3L7~%GE0tFDzUe zKQ^l{D?L3eYixQ}W>#j}{LuwtMyD^#EJ#nym_K&xf~+xF>0`$hW;~b_pXilk+O&J` zV0_oCTb?9MD_*{2;p%bm)22-l(xl9PsCemu`773@tX;N}es|nGW5%inub-PVIX8D) zeB$Ig?@5YJ8##95`uNeKQqxDJjvf=AxN`Nv)eDy{gl3W?$*VEQKW5aJbU%&F52Ucn z|6E~NSy=_?Sqm3tEy#FqeriVMsD-JSqq7R;FC0@iCNpbP#^|)M3m%+b7+7JMio&Fk zYl_yaUA?wozBF?AinR+zib^Y9uyFZW4qLSRp^=IfZz^6Oja<EALBZMrY2?D9c@M5G zShjFp(Sp^!H)-VBh3k=VEklBWikBC!oma4Wb-^RjNcHoBcoA?y9tll?xCDDaewKay znl%!c%rX+@hxteM3y0$8jYkH>D}+~azzfQo??3(eOB_C_^M{$^7344;&>!r}syylQ zj|P-tr<Bq^E6;D``R`i9;NpL)c;M+cq?BLCZ(AI%e6cN75^3)smWu!N{PXZZoqyC% z;uX|UKVdmGZ)d6apVoQQ|7PHY@_Xa3@fP+uo}iMny{|SRYgM4W|K{M4FwH_RzE5AW zdMWyl@nqG!Nzy#LN8!iS1q2qS7y0R%`k#<rq|5QKD}ExsI4=;z`BMyE{ZHf*>7)2a zu3F?5=L`~g{%i4n8hMrSb0%3_{w$u~mC8TxMDP{Rop?~^4_nR3AGVt5uTmI>L4LAt zb$+RY>#v06pEE#kVrjhgReA~UB$Ft=RL1hp`31|rInkocp;O2~d?o&)v{;hJ|22J7 a{S&Y1)XQ6icgsG;|1}ki9z{L@{{H}uTN!8o literal 0 HcmV?d00001 diff --git a/cruel/dtb.G970F b/cruel/dtb.G970F new file mode 100644 index 000000000000..8c5759a06000 --- /dev/null +++ b/cruel/dtb.G970F @@ -0,0 +1,2 @@ +exynos9820.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.G970N b/cruel/dtb.G970N new file mode 100644 index 000000000000..8c5759a06000 --- /dev/null +++ b/cruel/dtb.G970N @@ -0,0 +1,2 @@ +exynos9820.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.G973F b/cruel/dtb.G973F new file mode 100644 index 000000000000..8c5759a06000 --- /dev/null +++ b/cruel/dtb.G973F @@ -0,0 +1,2 @@ +exynos9820.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.G973N b/cruel/dtb.G973N new file mode 100644 index 000000000000..8c5759a06000 --- /dev/null +++ b/cruel/dtb.G973N @@ -0,0 +1,2 @@ +exynos9820.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.G975F b/cruel/dtb.G975F new file mode 100644 index 000000000000..8c5759a06000 --- /dev/null +++ b/cruel/dtb.G975F @@ -0,0 +1,2 @@ +exynos9820.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.G975N b/cruel/dtb.G975N new file mode 100644 index 000000000000..8c5759a06000 --- /dev/null +++ b/cruel/dtb.G975N @@ -0,0 +1,2 @@ +exynos9820.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.G977B b/cruel/dtb.G977B new file mode 100644 index 000000000000..8c5759a06000 --- /dev/null +++ b/cruel/dtb.G977B @@ -0,0 +1,2 @@ +exynos9820.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.G977N b/cruel/dtb.G977N new file mode 100644 index 000000000000..8c5759a06000 --- /dev/null +++ b/cruel/dtb.G977N @@ -0,0 +1,2 @@ +exynos9820.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.N970F b/cruel/dtb.N970F new file mode 100644 index 000000000000..3ea505ba1eb1 --- /dev/null +++ b/cruel/dtb.N970F @@ -0,0 +1,2 @@ +exynos9825.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.N971N b/cruel/dtb.N971N new file mode 100644 index 000000000000..3ea505ba1eb1 --- /dev/null +++ b/cruel/dtb.N971N @@ -0,0 +1,2 @@ +exynos9825.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.N975F b/cruel/dtb.N975F new file mode 100644 index 000000000000..3ea505ba1eb1 --- /dev/null +++ b/cruel/dtb.N975F @@ -0,0 +1,2 @@ +exynos9825.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.N976B b/cruel/dtb.N976B new file mode 100644 index 000000000000..3ea505ba1eb1 --- /dev/null +++ b/cruel/dtb.N976B @@ -0,0 +1,2 @@ +exynos9825.dtb + custom1 = 0xff000000 diff --git a/cruel/dtb.N976N b/cruel/dtb.N976N new file mode 100644 index 000000000000..3ea505ba1eb1 --- /dev/null +++ b/cruel/dtb.N976N @@ -0,0 +1,2 @@ +exynos9825.dtb + custom1 = 0xff000000 diff --git a/cruel/dtbo.G970F b/cruel/dtbo.G970F new file mode 100644 index 000000000000..8e3447ff2f8d --- /dev/null +++ b/cruel/dtbo.G970F @@ -0,0 +1,27 @@ +exynos9820-beyond0lte_eur_open_17.dtbo + custom0 = 0x11000000 + custom1 = 0x11000000 + +exynos9820-beyond0lte_eur_open_18.dtbo + custom0 = 0x12000000 + custom1 = 0x12000000 + +exynos9820-beyond0lte_eur_open_19.dtbo + custom0 = 0x13000000 + custom1 = 0x13000000 + +exynos9820-beyond0lte_eur_open_20.dtbo + custom0 = 0x14000000 + custom1 = 0x15000000 + +exynos9820-beyond0lte_eur_open_22.dtbo + custom0 = 0x16000000 + custom1 = 0x17000000 + +exynos9820-beyond0lte_eur_open_24.dtbo + custom0 = 0x18000000 + custom1 = 0x18000000 + +exynos9820-beyond0lte_eur_open_25.dtbo + custom0 = 0x19000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.G970N b/cruel/dtbo.G970N new file mode 100644 index 000000000000..6852531ca57c --- /dev/null +++ b/cruel/dtbo.G970N @@ -0,0 +1,19 @@ +exynos9820-beyond0lte_kor_17.dtbo + custom0 = 0x11000000 + custom1 = 0x11000000 + +exynos9820-beyond0lte_kor_18.dtbo + custom0 = 0x12000000 + custom1 = 0x12000000 + +exynos9820-beyond0lte_kor_19.dtbo + custom0 = 0x13000000 + custom1 = 0x13000000 + +exynos9820-beyond0lte_kor_20.dtbo + custom0 = 0x14000000 + custom1 = 0x18000000 + +exynos9820-beyond0lte_kor_25.dtbo + custom0 = 0x19000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.G973F b/cruel/dtbo.G973F new file mode 100644 index 000000000000..3c36e23cd44e --- /dev/null +++ b/cruel/dtbo.G973F @@ -0,0 +1,35 @@ +exynos9820-beyond1lte_eur_open_17.dtbo + custom0 = 0x11000000 + custom1 = 0x11000000 + +exynos9820-beyond1lte_eur_open_18.dtbo + custom0 = 0x12000000 + custom1 = 0x12000000 + +exynos9820-beyond1lte_eur_open_19.dtbo + custom0 = 0x13000000 + custom1 = 0x13000000 + +exynos9820-beyond1lte_eur_open_20.dtbo + custom0 = 0x14000000 + custom1 = 0x14000000 + +exynos9820-beyond1lte_eur_open_21.dtbo + custom0 = 0x15000000 + custom1 = 0x15000000 + +exynos9820-beyond1lte_eur_open_22.dtbo + custom0 = 0x16000000 + custom1 = 0x16000000 + +exynos9820-beyond1lte_eur_open_23.dtbo + custom0 = 0x17000000 + custom1 = 0x17000000 + +exynos9820-beyond1lte_eur_open_24.dtbo + custom0 = 0x18000000 + custom1 = 0x19000000 + +exynos9820-beyond1lte_eur_open_26.dtbo + custom0 = 0x1a000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.G973N b/cruel/dtbo.G973N new file mode 100644 index 000000000000..536816bdb5de --- /dev/null +++ b/cruel/dtbo.G973N @@ -0,0 +1,23 @@ +exynos9820-beyond1lte_kor_17.dtbo + custom0 = 0x11000000 + custom1 = 0x11000000 + +exynos9820-beyond1lte_kor_18.dtbo + custom0 = 0x12000000 + custom1 = 0x12000000 + +exynos9820-beyond1lte_kor_19.dtbo + custom0 = 0x13000000 + custom1 = 0x13000000 + +exynos9820-beyond1lte_kor_20.dtbo + custom0 = 0x14000000 + custom1 = 0x14000000 + +exynos9820-beyond1lte_kor_21.dtbo + custom0 = 0x15000000 + custom1 = 0x19000000 + +exynos9820-beyond1lte_kor_26.dtbo + custom0 = 0x1a000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.G975F b/cruel/dtbo.G975F new file mode 100644 index 000000000000..315073f30479 --- /dev/null +++ b/cruel/dtbo.G975F @@ -0,0 +1,39 @@ +exynos9820-beyond2lte_eur_open_04.dtbo + custom0 = 0x4000000 + custom1 = 0xf000000 + +exynos9820-beyond2lte_eur_open_16.dtbo + custom0 = 0x10000000 + custom1 = 0x10000000 + +exynos9820-beyond2lte_eur_open_17.dtbo + custom0 = 0x11000000 + custom1 = 0x11000000 + +exynos9820-beyond2lte_eur_open_18.dtbo + custom0 = 0x12000000 + custom1 = 0x12000000 + +exynos9820-beyond2lte_eur_open_19.dtbo + custom0 = 0x13000000 + custom1 = 0x13000000 + +exynos9820-beyond2lte_eur_open_20.dtbo + custom0 = 0x14000000 + custom1 = 0x16000000 + +exynos9820-beyond2lte_eur_open_23.dtbo + custom0 = 0x17000000 + custom1 = 0x17000000 + +exynos9820-beyond2lte_eur_open_24.dtbo + custom0 = 0x18000000 + custom1 = 0x18000000 + +exynos9820-beyond2lte_eur_open_25.dtbo + custom0 = 0x19000000 + custom1 = 0x19000000 + +exynos9820-beyond2lte_eur_open_26.dtbo + custom0 = 0x1a000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.G975N b/cruel/dtbo.G975N new file mode 100644 index 000000000000..049423bd19b8 --- /dev/null +++ b/cruel/dtbo.G975N @@ -0,0 +1,27 @@ +exynos9820-beyond2lte_kor_17.dtbo + custom0 = 0x11000000 + custom1 = 0x11000000 + +exynos9820-beyond2lte_kor_18.dtbo + custom0 = 0x12000000 + custom1 = 0x12000000 + +exynos9820-beyond2lte_kor_19.dtbo + custom0 = 0x13000000 + custom1 = 0x13000000 + +exynos9820-beyond2lte_kor_20.dtbo + custom0 = 0x14000000 + custom1 = 0x17000000 + +exynos9820-beyond2lte_kor_24.dtbo + custom0 = 0x18000000 + custom1 = 0x18000000 + +exynos9820-beyond2lte_kor_25.dtbo + custom0 = 0x19000000 + custom1 = 0x19000000 + +exynos9820-beyond2lte_kor_26.dtbo + custom0 = 0x1a000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.G977B b/cruel/dtbo.G977B new file mode 100644 index 000000000000..94076674c375 --- /dev/null +++ b/cruel/dtbo.G977B @@ -0,0 +1,33 @@ +exynos9820-beyondx_eur_open_00.dtbo + +exynos9820-beyondx_eur_open_01.dtbo + custom0 = 0x1000000 + custom1 = 0x1000000 + +exynos9820-beyondx_eur_open_02.dtbo + custom0 = 0x2000000 + custom1 = 0x2000000 + +exynos9820-beyondx_eur_open_03.dtbo + custom0 = 0x3000000 + custom1 = 0x3000000 + +exynos9820-beyondx_eur_open_04.dtbo + custom0 = 0x4000000 + custom1 = 0x4000000 + +exynos9820-beyondx_eur_open_05.dtbo + custom0 = 0x5000000 + custom1 = 0x5000000 + +exynos9820-beyondx_eur_open_06.dtbo + custom0 = 0x6000000 + custom1 = 0x6000000 + +exynos9820-beyondx_eur_open_07.dtbo + custom0 = 0x7000000 + custom1 = 0x7000000 + +exynos9820-beyondx_eur_open_08.dtbo + custom0 = 0x8000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.G977N b/cruel/dtbo.G977N new file mode 100644 index 000000000000..32e9ea85994b --- /dev/null +++ b/cruel/dtbo.G977N @@ -0,0 +1,33 @@ +exynos9820-beyondx_kor_00.dtbo + +exynos9820-beyondx_kor_01.dtbo + custom0 = 0x1000000 + custom1 = 0x1000000 + +exynos9820-beyondx_kor_02.dtbo + custom0 = 0x2000000 + custom1 = 0x2000000 + +exynos9820-beyondx_kor_03.dtbo + custom0 = 0x3000000 + custom1 = 0x3000000 + +exynos9820-beyondx_kor_04.dtbo + custom0 = 0x4000000 + custom1 = 0x4000000 + +exynos9820-beyondx_kor_05.dtbo + custom0 = 0x5000000 + custom1 = 0x5000000 + +exynos9820-beyondx_kor_06.dtbo + custom0 = 0x6000000 + custom1 = 0x6000000 + +exynos9820-beyondx_kor_07.dtbo + custom0 = 0x7000000 + custom1 = 0x7000000 + +exynos9820-beyondx_kor_08.dtbo + custom0 = 0x8000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.N970F b/cruel/dtbo.N970F new file mode 100644 index 000000000000..b4d61a7dd5e9 --- /dev/null +++ b/cruel/dtbo.N970F @@ -0,0 +1,19 @@ +exynos9820-d1_eur_open_18.dtbo + custom0 = 0x12000000 + custom1 = 0x12000000 + +exynos9820-d1_eur_open_19.dtbo + custom0 = 0x13000000 + custom1 = 0x14000000 + +exynos9820-d1_eur_open_21.dtbo + custom0 = 0x15000000 + custom1 = 0x15000000 + +exynos9820-d1_eur_open_22.dtbo + custom0 = 0x16000000 + custom1 = 0x16000000 + +exynos9820-d1_eur_open_23.dtbo + custom0 = 0x17000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.N971N b/cruel/dtbo.N971N new file mode 100644 index 000000000000..4bc1c136137d --- /dev/null +++ b/cruel/dtbo.N971N @@ -0,0 +1,19 @@ +exynos9820-d1x_kor_18.dtbo + custom0 = 0x12000000 + custom1 = 0x12000000 + +exynos9820-d1x_kor_19.dtbo + custom0 = 0x13000000 + custom1 = 0x14000000 + +exynos9820-d1x_kor_21.dtbo + custom0 = 0x15000000 + custom1 = 0x15000000 + +exynos9820-d1x_kor_22.dtbo + custom0 = 0x16000000 + custom1 = 0x16000000 + +exynos9820-d1x_kor_23.dtbo + custom0 = 0x17000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.N975F b/cruel/dtbo.N975F new file mode 100644 index 000000000000..97b6e49f1f53 --- /dev/null +++ b/cruel/dtbo.N975F @@ -0,0 +1,39 @@ +exynos9820-d2_eur_open_02.dtbo + custom0 = 0x2000000 + custom1 = 0xf000000 + +exynos9820-d2_eur_open_16.dtbo + custom0 = 0x10000000 + custom1 = 0x10000000 + +exynos9820-d2_eur_open_17.dtbo + custom0 = 0x11000000 + custom1 = 0x11000000 + +exynos9820-d2_eur_open_18.dtbo + custom0 = 0x12000000 + custom1 = 0x12000000 + +exynos9820-d2_eur_open_19.dtbo + custom0 = 0x13000000 + custom1 = 0x13000000 + +exynos9820-d2_eur_open_20.dtbo + custom0 = 0x14000000 + custom1 = 0x14000000 + +exynos9820-d2_eur_open_21.dtbo + custom0 = 0x15000000 + custom1 = 0x15000000 + +exynos9820-d2_eur_open_22.dtbo + custom0 = 0x16000000 + custom1 = 0x16000000 + +exynos9820-d2_eur_open_23.dtbo + custom0 = 0x17000000 + custom1 = 0x17000000 + +exynos9820-d2_eur_open_24.dtbo + custom0 = 0x18000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.N976B b/cruel/dtbo.N976B new file mode 100644 index 000000000000..a720e48a2efd --- /dev/null +++ b/cruel/dtbo.N976B @@ -0,0 +1,39 @@ +exynos9820-d2x_eur_open_02.dtbo + custom0 = 0x2000000 + custom1 = 0xf000000 + +exynos9820-d2x_eur_open_16.dtbo + custom0 = 0x10000000 + custom1 = 0x10000000 + +exynos9820-d2x_eur_open_17.dtbo + custom0 = 0x11000000 + custom1 = 0x11000000 + +exynos9820-d2x_eur_open_18.dtbo + custom0 = 0x12000000 + custom1 = 0x12000000 + +exynos9820-d2x_eur_open_19.dtbo + custom0 = 0x13000000 + custom1 = 0x13000000 + +exynos9820-d2x_eur_open_20.dtbo + custom0 = 0x14000000 + custom1 = 0x14000000 + +exynos9820-d2x_eur_open_21.dtbo + custom0 = 0x15000000 + custom1 = 0x15000000 + +exynos9820-d2x_eur_open_22.dtbo + custom0 = 0x16000000 + custom1 = 0x16000000 + +exynos9820-d2x_eur_open_23.dtbo + custom0 = 0x17000000 + custom1 = 0x17000000 + +exynos9820-d2x_eur_open_24.dtbo + custom0 = 0x18000000 + custom1 = 0xff000000 diff --git a/cruel/dtbo.N976N b/cruel/dtbo.N976N new file mode 100644 index 000000000000..273ac9a7c960 --- /dev/null +++ b/cruel/dtbo.N976N @@ -0,0 +1,35 @@ +exynos9820-d2x_kor_02.dtbo + custom0 = 0x2000000 + custom1 = 0xf000000 + +exynos9820-d2x_kor_16.dtbo + custom0 = 0x10000000 + custom1 = 0x10000000 + +exynos9820-d2x_kor_17.dtbo + custom0 = 0x11000000 + custom1 = 0x11000000 + +exynos9820-d2x_kor_18.dtbo + custom0 = 0x12000000 + custom1 = 0x12000000 + +exynos9820-d2x_kor_19.dtbo + custom0 = 0x13000000 + custom1 = 0x14000000 + +exynos9820-d2x_kor_21.dtbo + custom0 = 0x15000000 + custom1 = 0x15000000 + +exynos9820-d2x_kor_22.dtbo + custom0 = 0x16000000 + custom1 = 0x16000000 + +exynos9820-d2x_kor_23.dtbo + custom0 = 0x17000000 + custom1 = 0x17000000 + +exynos9820-d2x_kor_24.dtbo + custom0 = 0x18000000 + custom1 = 0xff000000 diff --git a/cruel/unxz b/cruel/unxz new file mode 100755 index 0000000000000000000000000000000000000000..b8ca34f889b33beb530bee5c54d0a3904a71842a GIT binary patch literal 627688 zcmb@v34D~*x%hwHnJn)lY*`5jFcTKd47i{PAu7!zU<tvE7;E*{`y&Bc1GHKetrV3Y z+6JO+UX2@gC1Bf|8Ku8qNlSY58nm|s_4a}&S8eY#LE270TY@YZAkP2$yh}2qxb%Mh zd_G~`<t)#6p65K<dCqy)Tz$<~U7Dtxe=hYaC3m;Eg61RZf29hVJm$Up7gRG;6911> zBh0di18=Oi)@{)I>AJ+K=lrTs)?4W#iNBmHt@k<e=9({@-$<2JZtu(a)`eX!fjjwY zvO#Xrg64bY_bKyEDd$(p%D)@_wDLB&e)}HXynp+iigSLyG0D7D&aYE%KJ^67FZ<61 zQ&rjhedhQItrjEpuRR<k&^^eX+?U;P+wwaX-=@ZMz5KFixAF78{x`kovKi9~i~ra4 zKda&=Bggmkuhv5Svy}0PTLnh%Qq5g%Z}<`OHzRP(Pvu@2j3+bpn%dXD+7)U|P)GHb z=R`yN`WMfMhWV}3=0uft`?J1rIoEZ$>t`NLRne_HFK?I={gC?IwAsH-8NX{(%|WGh zDCIrS>0WREd^PF&QjAv<)ZTI><y5%Kt%pbLQkk#uY!S~=f2lIhjLhvjetkj+7<ums zpT1oS^(}CPPTuSe^{-9@e$6<^^Am0@{Lr)Xk)U>*qHN)hRC7*(GP>M3;a<knnW}bl zyGMr)a(|rnhnlqJCtbmkU%6G`(@Dy>Qq@=Gs<!kV_vpY3uHmCh|LEY1Gj2T)a;ZY! zIF<eyZF-ej7D?WmTgP)1ixr*uG<kYXvP$#1bC>#bRUL7A1IJ>q(fzBh4jsRKHgL`Y z-ty4t+si^{lE2y~W0&^zHTONH@|E$r;5$j}I0;Ppn&4ydE=#GX_nv~jn}O{L?xXI^ za1p;<<5l`rV1UUAOWmqO-W7X;Gm3#v-fiMt5SVf`HN7=SrJV+r_uen+I{~ax?$oET zBIZP<JeT|Laqr^ZY}=g??sjJd4!X0$xk@$fPEbvrMAg>CeU2x&V}@r$$5SS6WQIR$ zY)#Ov#jTF2m1Cf1cKGc~)jUDqRl$zGJ+7LC#+`1}rmmV{{0Hx1uetXXz$=kis$c)7 zS^cMPPXSk{;A~jveYe{CU;ORl?*#mC_D&D@91b72pr00q($=kiQ^u(WG-Ir4tI7k` zoKH%P_RBP*fAxq^|2nsUgvy*!J0?_7q&j3gd%zcC>X0^_@?GxCKsRGQ=*|s)2!9-c zUoYLLn)L)|`mwgG!mX;;D>W)*b=^IFPfDXt^B%Bp6YMwwzea#fzc$#B(y8YA7w4t> zwUoeO`l#rS73zs<{x6p&8m%t+uPSI%iNWX`aM<b!cI-*l8%q;aGy)AifM0YqHr0ER zT2@f4s-FkGrDdgt^t)u$V85%~bE5T(VJ2<VRI43*;9wVJCn$9kzU=67=Y{iq<BW3O zMB_vFJa?>WcHr}-sbw`)s=5bw{k6(C;`W67X?i0trfs0j0C=hcPrn3~d+0~z&7wz` zH?!3aaIw#`v~EV}<7!#iY*j5Zo<N;-!|JQlq%>%q6ZS4Gn6Y4--QWCR)N^m}QT;y8 zro}s-earLE^T>zk5vYFW(;;|EFX6eS0=3uCrV!Q^vH|9W3!*4^MAb%et;p)u*6E z0qqNapA>kRd-ASZsp?ykm9ZFDfZK4M*TiDgi{bmo-`2diz^xytxK>qnRtBRVb*koy zPwv|<^Xw?sot0|GS#Z{|F1L^C=&z9#QRw}(WVH`@l-V$HOrvM*f<BL?q9ydRz&++j z9=|p4z}iISANc**!>U>EF0#za9IONOice#Of)lr@e^l+r1Rr_ulQ*1a<74dHV6=X9 zNncHWto;Mmm?O1wRJBgo2A;+D(|tBp-LdZ0zRjzz=sOKB3jYg#zoe<m-SDqIJlNqK zu9npuj#UfYd_1oJNB<7&f-j+M!7^1c4;nhW@*?9_v%Z*Dew=qMuc)#?UO61cE2&z2 z)fC#V;J09%Dk(?}Mk~rydmVIaxJ>O>%yW-wTkch!r6F(<5MA+3mQmTOj52NgMv?Ia z=!g>`)$ALoqUunrJrO>L2%Q;&XO}9;i-Z3Gfq&)~!@poC{8yeI{x|3&UM8e6SI>SL z8$E!(T2*p}duI-8h2HAobLYU?i-ydBrcncP;8|zReaShn02&VTy;ha{k-i<;&0_4n zqAwo1Z^+nvm!E&^M;QA{@nhYhN;<i(fp%Mmw4tUAwlPK3zjY1|sK~Ju@SK*Q+8o$@ zw4L91fl*+n67{HR4%_`;1DSJN<km>W(*iEPNN#<UdoC}wOdW(J9**Z}vmt&w+f~WK z+}H8koC`Op4tek3^KY-vf8dMze{*R61?TU73cOxN`?cWDn*T>1RjuiU%>A)8AE>KU z^mZEtg$(&UFw|dQ!JrPQJ01Cr{OMQq^;fC-wO3hke5_G^bc`v-TZh2on{<A7mNM?y z^d<DXcnCe~C!D{|lmzC3_V+sS3fggKTkwu5na=%gTL#tr*3xZzxGxy4Ixex_9cFHO z5`!HP=CbHSzouO_m-!X>&G~iVgWzJQE*wi+gZM<Awv0FNx!WF(`i_cb#KYFDN=9-o zeWE`*hH)+Q?nHJfC3Z+&_%3YE&U961KD*R<_FbN}IL~UVXG?ij?>uX=o_$N|j#EW- z;PyxGvcTqp@7=1QO4W@vVcP<1^%<(@pxDc!>Z`7T|D_$76Hib_WQo~#r?i<9o-h53 zu;GEm^6VO()i}?5x#qJuJj3s@;J(3nRxWj&XZ6;znLKM5t%@4x?|!>K(cdeitmj<W zrFn+PuHA$B`%Z^vzNGBB((X%l2TKYvRbjWOBXh$4y#aVKRJ-Wz&ijMW6zDAQTn{{X zrcIs~9>%la2(ig?(3N>%(aW(%wB|qFucBhlJ=qVxK|_6et|9tjxA5ND+0g=Y#$&v* z<{SKRj|IQXxl$W`vG3F^Dk}D1LwwmkT5?*-7RHyo$trtaeA%Oe?fM<OY;hT8+nNeV z+si1Ay&o3aL4L8pOj$0zLjknN<F|s}J;3AF#uyur7y9IsO}Z!8vHrfzRsPAlH}<5c zVv$w;pDLrESKBG|3N&;e_UtXRFZIQa#dbCny!D%(fwzxig{|mS<xw3sP^OMDuVYuQ zr~S%ZV+`HzG5et1HsCew;)40A<7&$Guf7gD`dV!4Yp}KF;g?i~&fYmU6qyz5#~wGt zKY1?)y+fPdI~#ivpD=LBzCXe5yZp^bfxd#{>RjUwX{y-|UB$*f3E$f`ev{bvZ}ZM9 zmyl`fbFSY`H|OMkIQMlK=KUV$zAMxCE!QiUGZ}WfU9`*G65pXo1*cO!BmD9}`7Cp; zbZ~FJe_^tD-|oE6wEEl5z4*LafLnaY{FRdo8KdI^7iL>ymbSk(+PW@g{IPYeeG%|@ z_Ra)wl?cv~z+Eyp)bT$(A$_}M{42EZXu*ypBZD2MDJ$*!vCkG@tKIsEGWut^47Dp! z?8=VUuwg5hFLf!Zs3Om@=Tr)QBlWx<t)z?o4<#~Rn73zn)^KyM<2b*&_+vh`_3<a| zpW?Uq)!|0IPZ<ZcCK*xgbKE10c3{yxs?en##9qw^uXU+tw~pVMptjd1sb~xSg|w;b zs!;mWw<se;3ziJa50>cuV8>i=lIp6j!Y*01IBk9IW_|<OxXX&uHsp%!85yOjvFrBw z_qmPN@lU2KuA5QmNo~YOYiobiWd!k$yg91Ztzm=n4E@%ofH8k>uJI~uKe`^7G)5KP z#Tb!+(~<wtU)^68^=hg&D{b@pzba7e{{3aq9&}`Z7L2y?>+=Mo`P6lZzoC^L5We=k zqeW{jQ0@N*+wmHFNg3-Cv{`d9*51E*F7&B@PII8wSE1Y0@c8Tyv^4p9^ZhDhj(tA( zPgKi-398!bQOlHzU&gpAQB`*xD2vvlS+MLGX~ELK?@(B}4rtNfNY!3q*|2T;t|UYC z2cy}*-ZIRp?;-XeW7(Z@{`&PPl%sxY3h)5OI^dB0JCUJ+3xB#*KazI-`YpgYhWfSO zQs%%`nVZa$^3Jaq1rA*KnsEZ3LTxXLj)qr#SytPNGpx1?_#Ha77RJ_<uG(F+@p&{I z2Ti+DY<|MG^vn!);M*=Mg9n%wCGdTr4u9=ds;~q)3J>ew3wBflRncVZ-<81qXMTmJ zW!%9`3$~Cw?mB*l!X|k40oxy_r{IfwUIqq%_X_l#_+i*qUY`o7dbN%BGr5jR+ujP* z;r%*)JJl*JBN&x<2Yy`Ci(Xk2P|d_)0>0_0NOZllyU=deH{;8<yX(_nbQV7OqMBjG zB*rU!eDGHl{ou5UO04C=l~zApm#Lx%F!_BF<M9SO+tsS9?`S0h_$gLJ{|y{6M_TMT zlFb}xxx~Ux&qWsPTKOG{pMg1Yk!pX`#?dAASURVD`LPI&wjxu?JC)(!=_eQ2@K3ej zXG{TS47FNGO1oO+SvEKheVQtI-p1z$U=Y4)0p`!5Qwuohac70&X(c!nJQAY`IJgW< zvEVpG+4=$tmY&ztWhO4^-*b!VaBvv`)(ticAHQVrO1=NHVHnCQ(myfDy^C%r#OBME zIMDzczgS_xQ8UScLHhTD6A!rX#L;s%FfO)XcoP^L8U-hRHXOz8ltoWusA#NGb;NGv z@7HRTLx(8yIUD*Ej4bQB3)n>ddbO-0UC58!F0~_)9*lOwJFUbjx`>ZTOitvd*b@hM zwmT^pZQ!>vB}fdn^njb^Qtnw`b7&KrphDPHQQtbX%}0E<i}p^WFqhn_{RxdWxUb+> z+AwAGSW89|qipYDKC8~*#=Z+wbFYk>cR!+SVw5}TfEQfs)$?`3KW(%T`I#1yXL|1# zBQjn^o&Fh1bm%<x-kTWeVcfcEsuG!D^2w!IiTAtA>#HbVuR1)wq)@~7U{vTpjE}K1 zzifJJ{!|N{m~7F^hutjna`>b*4?7zDC~~;IL{*E<-JM-lBKl2qMLxJ~{XX$6;!mOP zt7t3Z>}gQV!h>1ZK;4v+SZ;oco7l6Lc&azN{Ku;K9b$memdM};zj|%3L}1t5s<<va z*a5u??*W%$CqG1cW|=b0=)tGb@u~dC>{GORm^yv%Lqj6*GIUjKuB!g}eZlBc6I6RQ zI5BOYZ%;H{MkkJC-nXI)^!9)uco2K1mcKgefW_IWW5p(`A8dnZ>Yr3c4f49>;;$1< z&bmy>uH{eU)ndv?nNH#!C;ms3e4BP(07kL-RyA2RUyJCRcLT;|`Yoo8;7stvJg;6w z`JR!gqkwqm9_#^s?PNpnTacwUMzHNg|Mn0^iU9u%^rWthZY&~Z7eOWqKFa9pq)Q!% zU_0&nL9p*#cw3%dX+J0SS1omSyNGA@E|}qcSuG>3Q5_i>jB1*CL&_fip=!p5-JyuL z4&bSC0&_VhJOY?MfIkK2+2Hb*xzc`icp~kepx>Txz(@P7F3&*wL7o)=r`3LT_$09Y zfj)$fgw91{S?d^!Px!;KkVzBzI-Ws$y!(~R1$4_B|2ilGe2Jee1ANGUcv&bi*^z~g zEZB5~EemNs7>N}+bMF{@F8USQW4<?EEo*>(Lt?*mpKHHiGaM}+W!Z1$wXPZ2z-eAz zs*DV-rJZ_wjI;3bL3nWl&tz<r8L&4}tvY+lyGjjNYmj&4D@zTLE#`G*DK?m6$7hB! z!2NONiP&%5#Nt2Cc56)-=<ld$-#jDhMwww3ZS<fs)+2`s##!>n2Oo7mr;LLiDq<*J zQ|?7Z2cxonqiLHr%DRWFO$ndhD)cAjcyfg5-IENh(}@Q!T|6VzJ-ShNN#=#XTsvIt zQ_YhMcsY8)of=*My?fxXBg}o7m%d@DFqW=*L)edzY_%_kIJNM%_Z4J`(?%L?e0{Rn z#z}Vy>sG1ZYvbF2?}E}!uIil#t%R>5!q;0Yz8+{Ni*~+!ZaY^FX{QC7O#1f4_x<2x z>_AUg`o4tnheapgFNh2g-W1;bI(U>hvq;*5|MxKO1dal2aDH_I+rT{f$s}VYye+)H z2HA4X{2$Kz2iMF+Gj_ctz8-SwF}t4A_fhB|eF=_54mk5S0a#={*PV^+2%e1<De-?_ zy26+!Z6iCx-ex`9^uv1?Qx4ZBDKGPQ8g;YO=*BMEY+<eM<6q+|&L#F>&ZE)cK6trn zz1Dn?@(P-Zo*Ya40(e7sSZt9xu7!t1PE~L%@R_o#o0uo-D6+QG1+BB`OZpXAou#rH zrN4dj7yPQzU$)ubZ|JX^wv;=&@gO>(lY8Br)rcQdat}C+fJ=RC(3GLzpjznRMb@@p zLkqu%?Dgu%f8yLV=<)m+YpYzB)>j=OUM;d3S~S%l<2z^YK0DZP_f{8kB|CgEvYfbk z;VI_a2lzqYy$PB(Ir4fJeTjZ!eWAA%{Z_`kg+EibH95LX=8fpKe`SqGbla)!<YH;7 zHNC7v=3T^H5Kx**|IS0$TEIM=^734C;xE&+lG}M+&zeoobLF9>n||=t8z+B#Y~J2K z99z05!urs>O{x7=kEPs)uIjI7EXYsUR58ob_icEn?qW-)-;$w9HlzRa{?-i@v-G~X z$*N=`u!?Vuo+y-le)oi_-<kwg?>F(ixqk;3@zI;wCn#eT>#OCjlttg>USMzLS^L4^ z#)Qj}#rP)?d~{nbg<ns^w<K26dO<KG^MLi$XluHPrV>|hd~K0MNt6@WDE@W}w#%0O zSoPKLP2Iv^$r1WkicVjQK3^g*;ai`At{;KNKKF*)Jp7iPL}HETdeNP0MUGHM&vzM~ z{9q_doT!r6Q7OK1a7@|vuhcfbU%?ud4)1%kyd$gEHEjIo^7R|X)1O)I9elCgvYkR} z8K3xMTP|en37<@8UUxKBR0nTJU)>k0>bhSO7Z{$I?#&M_E4#$*`>wK*NMKa4;8525 zL}uQ!Q8m}yA+l6I;zPgRiGG(cn0@Zkj03=V09<;AL8QzdW~43~W_*n`wZ;6=cZbl& zlKGi|ZNn`Zc~7X4HN<XXztBQE@fT%YJM#Hhx@vx$HMX`<l)XFHaRNSe`q<qcE6Q;b zOM#BqQ0;3{gS|TaF1);fwI*{-JN!6wXh|51f%sTw6MA587T&{{boyG8thU!6=T^Jb z5&6AmNf4J_Wj@7w%YMuW)J;^)4cOGIJqIe>YDZ}@_8>I!W?*k1=e%-{{#cLwSc-iE z-&YF`CTKb~h`^E^o&kUO`jRE)*A(I2pFe{9g)Hdr4HO7|D36^KUGu1R&u@^0jxE|r zTZ4R-9ln5ex=th)Tl_J?v=g)A>PtHUpYIH7io|;SZgrX1i(<ciA3aV?b-wf~HbHBe zx#rxqG%>+I){k03Oh?wbO?{0`AZy;{H8#PM;N<hhr(}G+gK!B9V#CJ6mY-zv)-HGi zUeteD78TyC&n7m7JuS4+vCD<u92#C~(@(~$Ue==Oud(W`EVV<%BI5wQwi0X;u{Bz- zWvn=1ZukuI^8~oXhgl}`e%Hsb>N^;N)cLmP8lLHMtv2eQsn~fTe$D4s2ctrlQcXXS zw|4zTzn1q#54QLJrQT2+A}?j#zHO;L!3dp+eJ&2)wfoJp+ppyQBXou{zeP^0$G`Bb zTHLpE00!0jaN@%o^;O^QTL7+lMsLntgB&<aE($pZDJfd&k&n<xA3eF5n4M?I>a>Q9 z6=|C{Mu7J++86vtJgCFE-dm1MCu{xYb8NL=IM>)`ZCs0;_W-i}AhN6jIy~>x!{%yr z%F>?Lg-#pfP+8Z|V7PNldnsH?dw1CF4cLXWHDDJ4PdwfH(5;Am9lB{<*z@4UgDpJw zgP3XO>HL0?op%(OBC^iWZ@xXT>YI3WH#&3mLzW(`u;Ci)KRf&#`mdqPA9h;)jI4i2 zotcAW+tiy=Or5&WF1yz*E4nv8dF0%V8hpEMxBiCkav^_7Ppjrvc_-JM{EA#H=TBr^ z>?un|)sSys^51{M1M|ywioBL`;*a@PWu*tStjk1qloOv7J{CNYIA`j8pV*A_pY`9W zMC7E5UF3fOW$NxwB@Q0CI%CzcjwbJ%XF;Bgu%9_~op&{_#Hy2dcd?DLGuRmt>o;*m ztpB}K$>&iLD{s<YE{lGGEhIT({p*Mu6{+5J=w@JetH-S*=c`F)4cUi}S59pBIbf6V zN7ks&b+=)g+4{SGb$RF$Y$Vz><F;MN!Dt3{&)wh|J9C=Y!uYbwMAt83+`6YCpbzIS zjlX>U%J?G=@6|Gv)>3x2^joA1@F{VDw>*01F?8Sd!^|bG<~b6%aC5FAe`jdFF}HhP z-vXU@vg9D85cf|F=_**#|C-CFz*cW%ZsE%}skxmqPi5n?|9*rK`RfSeri%8A7)eZq z_=#3OQ(}^l>qZ(qKN@K$?jye+NtybYJtIb0_t%ZG?tf(8|K7eIk!HV7v)|kIzqjv4 zq}%V)?f3Tm@9p~$8TR`O^S!_mA@@ye0>-~g19$N;4Uxg0@NUIcxAEeGsyQ2+OU|6& z*~*pjhGot13GBX_4d`8bBUz^tdqDUth(FJoMN{)aWjsbd4HJj~`;4j_EhB)xx?K0I zt<vD#64nXDR;@qg?g%13iJdg%*N-wDf|qOXNraa&`pcpQFo?bTf8jZiM}zY;BfJiq zfOUvW^Vz$8V-5G8z)NrP%pAi)&5)Rfz`o{1cgOT6tv+jjy&hfN;<Dl$GPk76#rFFy z_(kpo28o?`-cu#1w0Q@x2oJ8=%326~qiZu#b|)!Q4`%{vJ!Pbx*>{y@)RF5Yye+n} z7yZi|sB$MZ<Tf()6_1WEocX0?o@;ln%}~`h(pMIJ30?&M5>pa6AZ^JSD|SuCqx2^* zdZx|pC=lB%Ex2s0Ek88i0WWQ8#)XE!RchmL*x=l4Q@@#PTxR3Ufu$3h;a<uMPn&fA zv@FV8H-1N)UnLXEXPuWgLdS8Q&!(K%i!z6nxKaWeJZk!3>Q&;qR6_4C_wv3Ao^aYf zpO29PY?=00&Gossv2JCoE9tL`d8MiPbdjb1M_~)XTa*n54?Aso?6_P7JnW{;(RP~- z{)9Gyzei~EN!nbIOw1N|m1lOxW9T(}&khgcThBWgpJ@;Kv4`OY6PG(KFk=pDM_PR5 zjLCN_R(%P$z+MULU~Zhtr<vjRhLmqP8mkr?SNa~H(+d}xF_+TB?B!zbo%8O}0%IiQ z=V}$E?dUI>ZQ7U<Pgr_!pp6E<Vfw-G?H(;KZlZr@oJIJLGDjTPgCAi_VW0M|o`-y^ zM9$4+-K-*n+%x5FY?ij~6!Lrl>C6qR4SM(!-Z*))i}I3(n-DtnmWChPu(9U?e2BWb zo#f*NHbAH2ZdX9)S(&&wcRKf7@KKq`!&!lTw`Pzpys9Ua91%D1(LA;4C-4|_m3+-2 zktHG@1ovW3#Mj+Ox!w2z5tk~GGA-b%lNc&7f>pcm6|yM5Zk;7JyrZy#i3fV8qAT#n zHoK5P*dvjQhK(zh)b&*i(yL8<BLICf!ruX30<Wy)e~n-91x~^{#1qgDvl;g+@Khc; zCHjdRWLY<lt<%sGR&KJBvuxstaX7ik-6Pd>e5aCv1XW1fH>%-hzrlPJA6@wRQ+!nM z9qaKYWX{Pmp-s%KN)j}1LtGVJ9^m=ib}V`izPj-IR~d5-Jim+h$O&6!I6S`>ob80K zaU`bS#(1PHXYLGbr;v6;_DDNJ=gz;;j>sN?S!@r%e+{yf-(^02Ys-TjF6L<-V`;&c zgO*42edI}Uy%HO0oK2U*%t3peG^tS+ne*!T5y*bpUHH{ths^a9+E6)Gy^(gkKgQR4 z%BuIz)T_IKxjVcfMf2pQ7kE^9zDHfIdB$BX?R}HFLA(F{2Q0pLJ-!{^#b*DFw6l1^ zoRHwYXQB$#1u8<3DOyO{`1A*sO=^zkzpdCz7rImOK4e^fvD@7d-|nQTX1njvu7mF_ z@M=81cMn%P#7Bv5S9spKSB484P~`qv!8P%rhsdQCoGbh&flIFI1rG2Z<aZ`;s^J#S zUbSKRaXd^vxx|ENhu|8xYl#hok*9L){iWqEnSH-!jj>E*x6trCV977vDR2l+2`me3 zSn}cFf3f>oNnhYk^mLP&bg8jX`oRv?C8t`}w<((j&NkWQ>*C9IUTpk`@&ov=VNm$& z@og*uKW@fp!nF*4QBzf|E<Hu~c`f*@`9XP~%;S}!Cx-AjHev5>JGS+GU=Y2ti}4NB zI|=Z)=$)bY?#y#@zAvS&GuMXBkrT3Z0FIubz6qnvFKf{g8_+Y4D^={^ex8l{&USk| z_Lysq$EshWtnjs#uhNCqzo5^qo><Y%JU=yZY~RV7HDnQX_G;wtYs9;W2O)FK{SY#q z|6*Js!<vcXw!2g#GSuW5a{aMMGQ;J~mb}qeLn)?hiSKnHe??}l1;-L^!M-h$zMVQn zcAZ*esZ$nT#+0x5)X|u?BD2u5fn}e@o<#N*4g&_^8L^>QpIfFVw*r3a0mlO}pmQ30 znGqfV{e|EASI=T><;dBx(5d9veU^-M?KAO-eIYqG>;(wRIEC&qXC)^`@@>k5A89)X z>=Ab`x}G?J*!a7c9})bB^}_4imvJw!?uG|_$cK|_E!pr|JY1%qml2K(SIy4)gp*71 zfW%_S^*4E4S@8eQdCJ9J)ApUcQzI9}g>H2-rUb^u{*~29_(+<u6&dgzv=VxqfPN<# zm++M6=^5an<VMYSpLco0ME}U|sfBJsaM?347&^?@QY#v&j%Y2*_2F-9EXdE>q-!aS z5n_~UTv<mJ4pY_j%)ec(jK;05j5l6>%%Zu-3LiNs4c`iuWT=eBR@TOz`j#rG$RLIb zE}a<e*){lNXJdu(OlbJ^46Wo8&m?}^1s{fNoQZxFyG3|s7kZ$97^+Uaa^RFcD$sca zvnu+8KPrf^6u5aWvAZGtbxD8tfbsqP!R}AS`#R&VT=w^4t{_g+17DGUo-VZ2rmfGl z^k-)HKl@`(wrHy8CE%6*o#%Ua-lbHL#6qys1J3i;c<wtHD|BS4#M*_Y3ZO;95>+B= zUqTz<YxCJcRpQ|qe<xZP&pV?oGkN+Jcp`)Nu4QL^7201756p%aX2BEX<R_Jd;1N@< zY^UtWn_s33`?K(&g}>D(@xiQl-R*Xpb4z^dlU(=8d{o&-b}@Eo-<hY2VzDQIe|sl% z_NvSy;^ztd#J@d$eHs0~)+%#q1u)OBX_y!f`!_7u1HdCTrQ{cxeE#lb#$=vZbM)M{ zPK+GKek}`b{o7)n%YGeY$|HeyH!=6ekWp(=g1s{LByTc=4F=uXb>abDPq23_vPj~r z9@Y1#&~cbKE^mOmNuz<7y957;<XNUnSWDT`1l4;4-m6DW2@PMcWmG41H&aJqxCg<F zQ&(^#_5U9+jd+|mZ9ay4_@v&F1)a$90U2fL^NjGHhMhZpr{3w?|21UFC&P+uihZ29 zYffL^0xeXEj1rq!>}9ciHDEK#l2_^)9t@edoqf5fn|}ws8(2H~CF`fZ;_qE}em{RA zBMyA!BIDF`egm1;oAaS&tau>PD4;E=zlFc8qId7P+{kaf+$b+BG*-8$=0f4q)#ROi zTD^bsb-`mriR(AEG9M_rS3Q?*lwajHbC)NAJMVMp=H9YC>gmr_m3aSMEvrc0n9E!> z*9s20p4cdLB-i;v@c36L$6Ow4FLOwHnI&Q;9x~gjHrs1WdVHY0yId|q+LZRXXzxR4 z^&VxN@u;sCnqv|ANq=*ACwR-eiTo9CBzQRQ_*)9i@n1>#|IPS!n(%({_<t_tzQp){ zVz&3?#{V>B&lx{samJs_J9GTEe8KpGMdtX=Z2rf`FL`?RnDAQTFGzlRfL=rCe}|O& z663$kZ12mB{}#%gGk(V6jQ@9xr&ZQR*$*Xi7dv!WH*xR}h+~|+^Vi7J!9AklhdI7i z%$>2XK(Wqk>DIs=cqpH7N*e`hRrLRnXN<fw{mtX(8y&mQI~iSuT<MZ|QZbHv+xn_T zVxXOSRguU)slUapSIZpyb6madxOy7(?xUXQlkVTE>Y!cs(YU(*^*MF#kh<s~?LSoY z%S|Hh(Y3%YF(@E2^w~)>_EV<ibyXyCqGn^Tqn19tZ@1sIhgd1kB)4ZR&)J6+SYf|Y zyff=OYt{LV{XDpvIy{T7^DX;b&F`@Dpg}%s@SU{hrf(UOF1bR$)`LwWJSyuGzA|!* zdsWE;*z3CFc$ImLZr*p|gG($Xwg~^fm>f6SsQ-6WB=3UuyVE@D`VH%_>{I#PeU^P1 zBzI;d*P`2WbgJjx>`lK(uQ=^zy+c=9s>D7hWo@K#t&3Ri`i-$zLTEw13RMvIj4jei zeu*CIHuoxIhi|*zgtzy<lIk~#?94wj5*uf9`1v2JX0fxLS)>fXjmX~?;`s%Pv7wZG zMfl^Bkf~;!vsRsFsZ(#4sr#Dh80)+rHOg%B3EpFC?5H62T%bCSNnEkPtD@^^OZ*|T ztpclTBW0yOr>s6j)&|Wod-K0L*=+9t%G6k8s`MLG#{-hXGn@J!N?uHMc>B@VlUo?8 z`Fy4I`~{v_?=u4jk)__=*pm&cnXfNpZ5^LP)-Bh<x8^umW0!I2H?rnR4&d=iR0qy= z;4rpXJ8{K!^pGE$S>l>y3skkN>qtF;W#BzFBJ+&>4fH|#axHP8z+BaFfa}+<3wB%q zK5|(PSVFsR^NiSDrnH&Qd|Jzx^8c7?2ye|xQO)LkL!Nmr`w&`*t$0{ZmwE+h!AE7j zwr*F|fp6xeFWIh&wlQxXrj2b|W*YU^cFuf(wN3bA?-bgr>`pemo&Tnp*S>%?*I;7( z%xg5&A@5u1d)^pV2WxtJm+qNqETx{vnI+V_CXrlt@D!L(Uo{+^wQbslT(JYM;hi^2 zEgP=aSB-#QM-nHyhBjW;9?6ycw?~jOA8Q+OALhCAQMpVvuAQd*^VrAXclAwwgJ(-? zbwk>DgfcxM+rR_)b{*fLOlnSj6+BTbeL(kWPmsM^z%Y;Zb9ry#c^Yvz@G*t{1GF=B zS)!S1caf23T)aKWnCEWX*vLHiIe!7_^|*=I-WX_1Wgf|x=Iv4Bc+m&8ir9<0fl1$z zBzpv+3$)xL!GBTJe)O%*oUX-AOJ>}ql<jV{%f2IiRql~m?u9lYqh;)`dR4Uv-+bUB z2cUy@5o~{dSw)|$x9)}y3W(pCW#}&;{S8;WB16Sr_tT#bxSZ!T^u6FA)$8N8Ue<fn zcth6K3sUJ1cs%T>FnQ(O@g}dl<WbEHUn7qaocM`1TK%|;fY_YS)vs+}eHS|txp5^q z)(xzg%h;8S2RY;0tg2s!=fz%yF5#)PQ>kgj$Jn;7FkYE6&^HXNivLQUnq2>t9IG{x z$<_F0RsEk2mGvzpHvNh=@`&K=g=f@s@Mt`e;c48qCm8*<2b@lB*tmI|>ivjys8?8* zD#O1nO;f!Uo1h1{K(<z|0T;VBs%qhP@jsdmCK=@yW2a(2@1@@ru2DvPQdytC93+N# zCGUbylOv+D=1<;j{R0d7B-dBoN8I%rYo1co8Cpi8Q)exH{5PPfNq6ikaMPOyK12ox zUc2Fm>EQT-6l<S?S&qF0o_unKplcYqR_;@h@6j>0O!ZZaQyrHEELp7{w`d}{QTZ=< zje2lM9%JApXd^yiJic2V`||jHbO^pXjlakDRO8Fzd-snmd^hkb_^#kL9^bcnEqt#W zg6~fFvxaw_k9;=1yB_{~e1C63(1<0e?TUAT6FH}%2-#-Sz?74%BUDj5etaXoJbu0f zZqMOI;sJlhkEvfCKNZX^lOOpt`H?+^@%VZ6B5*)1P6hp<JNK?|B^X`kbcr*HZ}x~S zx7GUD(S_(IQ}4}AGCritS?q_C_+;^XEO=c{o9tiuOg?U5{ctEBF9xrr%%e_;Uw%K> z5$LbpUwUou*eB@5uO)8SsIzup;<%gmi>#AvIoA(5PTq=lOL~dKU)ui}`LWfqh4tj8 zavO;2$$V&~9<~AdUHwMRx=h2RJ#PCkIru8s;eVl?DTj9ZY<|xPKdp-$LrmY5cVzu! zTW)T*{Ke21Rjf~Dt|9k>V=Osb7pvYcc^uu)?HA0iy6M4=Ol0*U?vai2>!z!YoBh@t z>s%%0ClC))d1l_`O5Qamsrj2(FYjh<x63>O7c-c{Uo`%c_V^1K|6Pn@w>|!bq~_e+ zjDH7X7acBmXvQBsa8NTIa6P`UbF1SU=7evTdc+g&0cLcn_?vB=ThH|mKiHaCayfl` z!|cQI59*R0Gy9OXtu`}le={dsMLEgSk#^TyA3Qb#U-s|El6{U3YK<k^?5Eu9=WC`P z>hxpsDSou{w+eohJ}rNA40Oy17g0y(xCmc0|GW97P7#0Pkb6vcGV~N$`bS%|M8~`( z`@lTdG%1`XVd^P(cmc7DF7(6#c#>SgYSE$2`+Rgl2KuLtF=bygY`NqO-wlp^%q`)s zcfi|W@_S^S<$RcAw7WE;{(!BUmRNBV(Mw|2d<6YB+dAnm&z7Q-D%>hlu0O`+TYyfg zuT#}OM<-RHgEng;Or3P(c1s34j81wOorGOt>ZCR3q$B90&2c)Z;aiprsNk3J^y>VQ zOA?Msp6Yl@K2D*}Q|KdKvZaq?9_R3`<+j0j>|1xPKI&Ygsz;Oe>(mi_bjp^GT?<vU z;CT-?Jm8u=U+{DC6fsET<$CN`$;CG3^QxdJGfN-BZb;&coUHY^lD8}TIRhCm$cN|7 z_YbzFmk6)_`kC|dy4bYBAI;dk?eP0nSMxc%j!ti04E=;J&+X^@{N7^r^Y{F2wL8e~ zPf%_szYmV#Tt5F`tHtB%&9*<E$EBZm9>145!sE9Ik9SS5d3<!>L-_np@cGhY%ih*K zKkVqq3q}uL7>r&qTyLD~CZ9%22}76mUn8GxCC+ymzs6ZV?YV?~v2w5J^Gv&o*jT}| zV8<We<&(3JqsTh3XR*IZLN}?R<FqUGG3&Iwdt94y=Pt(I!Z$F>!9&@$t)SPETf$jK zK4`XtybT|9>tyc^?QNmlRh%*P%rC}^|F-v+qf}7`^*$h9Xcv72f!WkM4GG3~R^yiu z!+(cz?c}^j4C=pF3n`;rxqgQ0Zv0WXewyoi`}q@GZ?T_0#<k|Q$}~wC`}z^CciCk& za9w6!|B&lebf1TPg8uG{jIFP^_62EIaz(@+?s+aLbR}i#iA}ezyQvSp>+J+?(>JD^ z_|2E|UR{GsrEVAPO5KY;qmI0nIv4Q1rqZe-c#}H$pHWBNOPw*i@2s%uwAlS*enuU6 zFLj3T-Z#gpqripqll&QV<h|615reM3+J?=pb6TFA2M0d8&N1G1&9e1)mPVW*xKnI= z4{MW=Sgg%2=ZtMhH2mAg8w=oztF(rV!Vd?MBnLh_z?^O(#%k`9Z{Lt$95@{?a!Lb6 z-QCP%eA}t;Ob0$-$uDw^&79p6#Fn_lE%D?r;d*!Jfm2DU_tuHZkmpiICvV{vcuRPt zn=%K!kGx}DMfkFU_9WKy3TxUm@SEiH-^iSD+R%`7qv%iY{!7XPSv!&a3zczY<cuKk zx$t@Rs>pO>R@^&{ch#)toR~u1>Y}m|V#RG6iCYvbF!unq2a%6I|2gflm({mGMNN2h zRdhyZLz~6S1ErWhtlxAy;~_sr#&U+dMb-w%f5-}#axG($YcJPbjQIms&eB@=b2WRc zB$k0)P*_aACzo6~qk?`{bB@v)&SsJ|!`d@z_KVCeL!bQ&cxpJ4#et`lG3N;msOzn} za)!b;6g<lq8cxUB|C4o^Kf*@`(QzgY3TBw`a!q6tbd)@_%`V+s3z2-8Jod7f<yvQ$ zxu)jz$`Z5A@uYL>hzyc3-A4WydZbOi$&w}7MW$YF(+z7qd@Vi@vZAdWIk^Q{FSafG z8@PZqfnU5eH}uN`6`@S}Z2%r^*PQ+u*P>@1;7{c&9It$}<F{eA5z}uv6`pD=21c{5 zrx`E$_BlW6X3)&^o3Zg4*l*v;{BHS^c6|%KWKv$GG;N#G)V5bX88hQGa`CYb+GHI- z!=`g!aRZC)4t8YB3r2;<rN4H2-JsR7!<x<)_ZR(lnl_!dzu0=Z8ySt>JBUp*828Tz zAC+>_$Mx{nK%3|4I`4CtrcV8n*`}rIoHns#EPc8#f%yO98|x=Hc4J2PJ<8W3A7=p% zvV&NaH}IeM4Gpp`1ML^t&*eNRdB^@X^PQ|URe*c+Oq=hKVE=k(kPi)98u)*=_I`&3 z!*f(M^S<c-H24iPVBNLt)cc9X1I(-PLo*GH_>1u9uYkj(&z1xunLQXS)KNR98h?i7 z5@VAXn&`(vtXcet@)2Z%mpTJ9B=+HkU)DoYX@_{3q5ZS$r3#izOv7KMy&hn!yp~v+ z=PPC`Li(6MJVJfrB4ZeB1(B&&^ZQ%il<RV?O*pDAHfC{c!j5edfftYeUyI%{Hlg<c z>=cJKUD)OE>pU^+9kILbr;Uhh+lxIM!LC9^nzUH~kG~3S?s~wYjm!^OpIPR>uu}47 z$R$7y)cl|#wD3oBL$?9PH@}8$01w9F;k9(rPi$)j*7XaS&y;g~vYYV7960NTz`5Cm zvzk5~A57qE08WR8S{ZMMzvkQ|L!SvQ@yDK6GQyaEytix|**l&U7JQkw`=h{}9o_-l zk3pLiE+x5|CDMk_#9w=fQF+rP1N4xy%hY!-F<L24-d4xrYoD&_S-Nd`%B7D~c{u0p z@JFvKpK|1Zs)_w~?>{pA*URgCDS0XPJ-_Lr?g!IDPd{BXyZ<};*FW8{yumjt&vRdB zQ$VXvpWMG_e=Fsr&%@YV{`csQc~<%I6~t9HY!o;uKek}NZ)*A~Fq9`;YG}mvWIc0~ zGnPv)HRah%_~W;$to&5r-KpD<&F+jq6FRb+d0~#5Ty6G0G$Wte4*VUzXPa_9i?ux| z>v}HJPz@K^ydKZz+2L=2hYV!0&io(1*+mx{m7a?Ya<hA@fz_dBJkOargY`wR4ZCQ= zku$rKo*0lbLQ9daLdSM|vV$&%h87HOQP0|wlNFxL^%UYq<{a(Dhvp0uow>CEnpP5f zlz8?L)=1W9`jW1U^|=e|wIcLn^XgTWzT1gTd7Ic_z39}jKH~!LV_tt_oa`@bUS^m3 zij7C>-DJ+4l-y{$Y*&%No<no}D2N{U<Gt8FjPYlOV^5mppBZbQ+b!Pf3m7-iA2A=Z z%s*4clns>kuuo0a<oj2@5;E)MTxs;Xkr~vDtxMn>)->{45{yU^d&PKucp_^YM?X0B z{4CBx)Lcd-I0#MrO6d71Gecd;YTK4|_iyWN)I(dy;XJ!eML&GZ6Z+Gpl+a(${|85E z+YXI(ZTqv=O}Uwb$}M<S0gvP|I&=3Fu?IPCHGVBEf=w2m7j+7M+{ueFX^|y)idJrp z@S5=TE^PAnytfKsmbu_|G&aXB%A9~l<M++~g!QmrF~`Mb5;};SdmOq-j(y99+0lb9 z&g;|gH}VgSFz&$qU+W5PGoOPO$s>a2xBZOgTPQ1X^KV=uBTEF9{L?WL7KtrOF4&eR ze&#&`xnSlQJMsC}Yk@^_!6Y9m=5`I><_x$weY=}Hj)ai-9Vc&21W)AjtWG8$OEaFE zZrctg$bXq`#+2K1t*k`!6S^wk*T@0Kw?Q97{lnSoJDNQM`2MN3U68_lmx?Ur8L`_S zWz0Rz(Aqo)2cK5<G~+9^Rbs0MZx<u)vYC5tv({qLA+b#MxfOqpvl+j}pY@D*?Qrmb zjoeLJ5x3vmtLge4^8p)GWPcI7DDMRKcd=jd%-!(0?AM%FHn3mwP0Goh&4;nOW#8r_ z%)7_8SM&|tzxfLIp1~eY{Jl2efx6ZBJ;WUrueNelp9b$^Y#tEZ9?t_4!TT!kZo=J5 zo{~H3jd#5ps&>w9S<c>zBH;&lCv<mxlYCm69}Z(bzs=bSX1&^Aw3)fxeBeqs4`)Y@ z>;oqj^BS;hK=%g!eNKNq_JP<4qEGU%)0%I!YyqEMGrs(pD{XrqGyDr|!v^7@b}QFO z`o(A7oB#Gz*npYgH|ci=&#XM6(ePk)_$m$miFQ?s$CT4=!iQ@PUflaC`J3>-(dK`< z!jLwF4+h$cSnchky`Jc^Z!KQ((y`FW{m1G)owa{`&LQ&*RlR@qe(*TIRLfcF(N#6L zJGz3l>dxC%M);?+<;;btz+mErc`EavHs7L)%!fMY((O~#mw(QDc$sxmGdB`GZBACx z9|s5R|8}Ku9rq33RPCA5zYv>OSHF8}7r#O97)fK#PfpEvjq{?#4!(%>3H|V2-tthU zhj-?4y>@)Q{eCzx?$3Ka^<=EN0zSQ+KK*G%l_zcc@>F!|lH~gIMg8C0?=73L?3?J< ziNxB%_dHQmS@`1e#rK9b>Gy><36D1>vlc`-y?^Qc6)ub3lO9RUD<5{1QH#8gT&fRQ zC!Y?@1*Z)sVnwny&fzI@%wK}1M3)IqwV#)#vcltOKb~LOkafZz51>czxsD!yrvC_D zhvU0_9^M`yR{nYKzl|R$ctz%#`{~7x5_~y0Di03g=a4%w@K<=r#Lq|gx!_0E$TF!@ zKpX4D{{D|S{n!X?GPnFaxrWfbd838z4}Z5gcM>tA@{b1R*!#5C0KQxQ5-TdBy`k-h zK5^PP@R{vohj-J?mzg^~;F*~D(Np00c!HYl%%g%Y;HyR0^+WkebW}WFojNaHjW+qJ z1^9NHO)gHMAJc}n%MRjK&ZABYJVCjYXOfG%kXxeH{t5mNIoUuQBa=0hUEo>lsvC(z z<MTI_Jm)bu(;!f5+KFSr*Yf-{_xUDIXNTu<kIZWOvplQ03K=iCaXA68j}N(X0}|`| z3v%n!^(yof`Q5u|Q@7<r*Z;(-`&Z9F$9<Lj!K=}Gv&pxa6*?pO-))_*7ISO+WUoji za7li$)IX82Ay@L0Kfxx@w|EQ>>*n|kreDYT9ONWg`OC<|a1VL+1UbxHWXVUfa~jRE z*YO-5t?dxy%AcBPbWIdpFK2vYhhO9!vZrYU{+|n<uiG6A)gyaqP2al~-+STs7ofG& zFMoHYxlcyQD9W18cFr`e1lRF-$Bf0y$&|ALO+27WoHGM;_8<Gtesw>z*jsu+>lNAi z9COdB%6k{d9Msg2iTo<`imrCQCGsy{iJrOrS&;{#TQu%fPVIPIts0L$UgpliRxMD= z)}x!&>}Gul9;+Lr(!%%<9?rqufDaKsepU7_++XQQNu8^C=D9UbD)X~R#<1ezM58=s z=79W4Hpf#I5*&V#T;3<P<(e6uWjeX6b(2(@_$K0)q|Q|7i;<m`@W>H&>O4QbiT|RM zyZ~}lr~MRUtA{dv`ksi)T#SsZ^Lg@|eiL|S_I>s$<3jj(%?-uN8bsHBq|!t->%15F ztkZ{6$4xtCo#0GE_D<<@jjA<DU%Fdh`-UeiG`*>6&*JBn%N¾Q9V68Y`#|JME~ zwChH87kE6WsnYf;?~NXOE|KXQknK)CleVPfnf**1#LF&pVgKrL@uFY4p{hW&FwPnC z*KmIIR^SiOcimMp(&p0lxv--%1lQjkZ1bncD$n=_t3u>qOMU!PQ$8%FzQDfprpK#< zcJpp(Up~>ZJGWxb()|Lj!0GS5ZGQ?eO=(l;C@`0QW3cVFrR^KmR)v0IwOs&g9?hLf z4#9k9oYJNV(}U!?P+s^_cyj1CCAZk*W#U;*uCC<fHgLX$`FsoZA#3@iVzV<}^l9dv z_$I&X@x*QlvL=O2Yiea*1v0%!><6*y8tlDD>+QWr6YsY6j$+$4iM)!p>$c$^$@-1i z-*Ea%E8B_BwzrYL%KS;j(y~cLDfYCEO*vO<+;};*uk^DPc*K?$Ie3N~U$eePGZJdc zce-jT`rKQ}clv#1{CCwPa*ajK<&XUeGA<+hP09<*(oQ}+Ch&^A(;)Ugv^sUyfc-xJ zulR5hn_dgNroA&HciL>f4>?5p*T%K)BhKmM8asK`u~{zm0JxEt3CPVv<Yy9c6hC3L zZqehL!H#}psO~du@TNLBO9i?(dHA_2@q4im3xP?Vi*3FV+dPx!Vw>YrG;!v#acW|D zC;;9H=LVyTIa_59XRFM`PG5|To^jvYO*!l#xa-rZ$HMq<f<M_)Ux1GhVg1HxV}#`B zSpLYrXd%h#@^Rn2i2Wt>E51kMU&<s8zo`=2e;zUSO8V>g^u|VV=9<<Z$EM?lWZ{P_ zaCpPE`PbVv|1;n^*RuHsa4q8zTwegLzwhvdr>x|YZ{=*Nz=x?KpWUQ4GI*sc^Nq(u z-Z}OcG3Gd%zA4_Om-!*`c7<hswVi9zXN3O^9Vc;iD{tN#xPd(@n{E3GpA@_H3R6Bc zpj*VQl~}IWwOc;JuKf<Y^m+ExHs;Fby?5*{*|#IMl#Aa2XpSw_WZO~_A8B%Ipmxg! zYWsUzDl7a$#uje_H4O0|UdNXbA0muDRjQ>dUC;c&=G-eb=LGuwJiB_9E#KnpYHYKn zc>CvoWz)2sYgbDwP7l-on}<H6j#F;X_K`Cd69YF3ZJ@23t>c9*4YtiAIS!WHlM&7$ zj)HI5w3@SaSN%di$MzY)yFuG0E1bf;%vsa+xkluIWBW)9*Rg%F!Y3D~&@JqfSu=&T zNBT75@yH|5=XK;uMvyJSi+<=LGQyLnl|0XWV|nJ-N~TPN?@bv4-<Lu6|6#6X4&i%8 z#^86^GNuB#FbDbYRpi9g$cx$R51AE0ewen1z@uUJi5&bjFvj;;kL@mJXvJnF5-ZAN zpFyG#Nwi}{B3tZO(ZjT1#*20-<6+`tp@~-zGb*uTM)z*ptf{4E8zt_O4lEg=gCq6O zq0yeupS>wu$H$GX+;$M2>0ez<xf_AGDs;9HTX0sQ39AE}*nPsYj;?66b%mUvSV0`< zDe#hQ<3()1cwI38ygY$C5c#m>OzcVFmuBEUKpdzZU;9<!K;78y5(oN-{N$D&uHGik zw-WObJ<;;RncF6Tr?os2oP9@dhJPWjlo1CKScFD$-qkMRKzDr(jbh)oXe4Lz#MTLp zvxvum<MYJh25?MF?z}iAHhK0j!Epw-%M3}J?z7`_S8fw~MDXdUwRCe0c%Li0S1WrI zZ=#KA@PA|I>|+v(8;XDRx%i*)Irx8-F+2D#`vUww{2BQF4bL6?50AtDTJRl@e~tUB z&%ys@8~^cm=sv|h#4kY)Cq624iRX8r&6aidZu>`Q@dE2z@h}K2L>B!2L5p`*eD+*e z5J!vuIsN%_!T%YwsEwmVm}_S)oR1bS0?TK~tE)bT7Iyra^IyMW#;@lCYiyQ#-|5?R zWSR#)O@Uuiv3-Vx8lZ>B*M7HK)?1doHqzpAom^oLe<}Zd_0B10$=$mqcx+{2<HiNp z7X_w$VXg%RTAe*W#5IH_I{7~l=p*u`owX+H>7y+})|6ghO=&vxdIer=wb#C64FZ3w z13i0m1AnHDWeuW|wIq=hvKCQ}O=GPmS?gf`kNVht*ezM?ExU#>&0}npjBze<yCQ_H zAvWkT7`w5FyvA2tv(3296QW~Zbem^nwz4Pd74~HDtSu|CVdH^YC&}Ika~!PgHp$v< zE8~%IDAr`+*HdJDwU}`{0v!5t*7|BB=}B`f<!SoZ$X`k6MaHeyTVoh4_1#a~V~{vR zR``B%49d)}v(FU`uafpy+p+sS?;f!t`n}Wa*UF1@_H?xqlO4d<KwlQV7E#``nSbNP zXBQjR!d*4G{e1WnyqWuc-L8Flkh4ecbb(hZ-X0%okGQo^0egES=1$++73Zli_O>Va zli2Sw;EuhB0nTs;U&)z6x2{VtPW>RkX#UC!<C%GR=3ZLyySlJ(CGK6`lwdsTCa%R^ z;nm!eFS1YC4W_B;56DaU(6w=+Q&!@>*rLsK<g$6!s@@l{i#(k3Svp3UG2Xk#2hoXf z>Dt&vx1sjM*L#S1CpgpcIo3j-BlqDP7!D;G_wioNCPlUoCyg8v8&_nF_aW807Tt9Z zZGS?{DrF*PA4~~$G%^PEJM3M6Zdqz17!Ut+g3*Zov;h8vr}z4YtNE421fvBS-NN(I ztJ#YUK6HFK>NP#QHpx8Kzj5(m^IU&3##v4-nC3lTURO;p&mm#0qjxRw*E;%-huO{E zgYscX#`VCx9k`_*&R1stCugwIj~kv6*bB(Z8=KXTyXG3T9G|9o%_Oxvh<>B3y|VUR ze(y!*xna`wqI)xMG@rF4m@ymcS&H>+cY^VIWY|yWN1i`O9}a%YyAt5Bv4N?;BJ>wJ zKh~Q(-Thd(KhYcPNWPG@LUPlj-awtPiN>Faf02L5IfE)Xj=F0Y2V-w*e>B0^YhCYc zUpT?|59fOM1mkt*`hf|?@3=l+jO^^4<(#Qv*>J7E>slL>wS%_&XOhfZJNd4O$W3`C zxhAsiC-W#Dys#G7w)XuWRz1L<Doi$N7ql#2$X^0u7aLmMtCy0EQtr*Naz9Kr@{cDQ zXAI(E#3e<>iVcN~HTTP9FgA&6Nq-u!d_p|XZ09PSn1S_e$d9PIKSfP9pFij^F0|WM zFQgcgoa^H$M!s|X(QsqDbA8PSBhR^hFwMvrxXv)Loa<n&k?vgg<r*WM>lt2Sm~&m@ zGd#}q+Y^i==X&a7!|hz(d!eD6>xzKU58NiqZBvcY&UMSB#wq9eyM@Ma=lalequ04s zGmO7l*LxNGbHuq$7-mGA>uW|De{!yyM;Zs6>qBYA2hMd@y75P@#oj!?FY;k8@a$c2 zL5`vL9*V@rP58jwTUMPm1-|~%xn|67I@gSOk8{nKf6Mhid+FKSjWl-gzr-M3vC8dL zQ*(`%oa@Xyqr<)yeWZ@(8Tl`aHSUUg#vItIMzO!KhVKVHIL^3(cjx1oAGpY2BnBZe z*_B|eRe#ah1~tRT_vO4a_$#b)UYfvpoPF!gIR|r>4SuUL>8H@m{4S83ACVKrcMC^X zXw9dS@e}a7ynF|67xJQyZv^I&Q|3G){!v}PdB<zbeSNv%m%e-Mclx#v5A34OFL|!` z9!HdR3-|^mdlgHFdll{?*Qm^+c1W2X%0L_P1Nrt1-_eX5UcG-8d(_Kx>wP!x&$vei ze8gOKXOC_in;49~#JXZVvaltYbAFK@?@}g68wKddQ~0`(zuvpQera7F=K-7V_wqhM z+tP;Q%SrAud3c<~XSc_<0$m>VK*`=C^ocxovpy{FN&9jZ&_Tu^F#@UEkTJT^1sqmh zL>{sHG2s?h>484Jhazjq5?6_ua_RHW9_T{f^{@WyGEe@*&OKybRX4Ew;gp@<z<d?B z{>!;PG>-d>@P6*|p|9i>{+0E_ZeW&qAm0ailXnBWq%2-Ke*M33rU&O8j2Qej&)T1u zc@sM%{vt6D#Xi3L`JFROej3$M1y3I(Mj~fLMRnRy52|hXzVSwR%>|}^FZo%Vm-1xY z&&vIr-J2@==lG^c_2R9w{n~K8EvacsKSahlanj@0zuGGN{RuP@xI2fy{j4*_-Ckpp zbA6l7c#3P0-=#jaOk{Tj>$?Ikwn@OhC&}0l_q`m^r*+H;eHZ7l`#H;zoWR8i!IC}9 zt<uM}WxG<j*V!|fs)}^Jqvm06W9ek#p*&AvZ{zEnO^mKTx|Ds5==G+h=<6VO3BYd> zH=URNMy}vPCDhM^{&M!d==Y;3KGji*{9i)3huPOSkG+f=s4ICgQ`t)>`x#%D_Hgd7 z7ZUs_Q>9*CasTRFCO%wa#m}^_Gvlsv;;swguIYE6JpB$_Pl~&y59j$jfgRcmWB=rE z)~-fi2c%N(dF|m`_Q5S<|3ctRtueQe=L7JaiA}Gd-UgmYU7hkNz#uk~;6Y@C&b~>V zJ~^LcpsxbP>IquUJ#o*wkV_itYeL5XUM<WweHb6~jjdDrL|^K=H1nGZq9@6j;LJGq z?xJ8vKepnQ47E?zb6<JQJ7WpCs=`;)v+Y=I-zVgZ$a<Hwld+6CuW?Qyc^AZ#48FT* z#E^A&e$SjMwtY-)G&z{RBAzbx^5gu<8r3J}nwzyg6&;ypo-yH^$@U53kn=+Nh`V6N z;G0-yPGloPKX|~3qh}-k<Qm_(`hyENf0tjzyf=S_hkkRy|8ggJ3w&o{_p9(IyjRD1 zedI6Ni(*3;T>jy)p2Yg}56HE}R$}jCPIyUHaK@V{*^LiJ*X+Mm^E8I<t9h~VzTjI6 zv|&d^U*5R!Io9D16L)!{?4rg#&b_}5-v0pJ|BzUD#W-y~xx~u~h|7x~?a$x65k5Uu zd(QpXV{4hG^1gug>@z*q!u=RmOI4qHbXaFRb?oU1An#;<)n)vSspoq`_PYjrC|T1^ zVN7C^I?s0#2OlS8RqH@m;3zb6N$$xuad8{CsAqpFvZgKnZT7P<-UU4Cf^O!smt@~S zPWV=y-NK&ip!Ixj{vR(etoYvOa2~M}Q}$I~XgqQ<_T(6HdUW>P>Yn#D3LaY-%k(wb zn?7)59+}1U2d<1GI=>Q|jz4p*Z$ViontXMi$SAYDuNNR|$-%@A5m|RPZJW=>%2|au z;pN;*K9`(L_bYfV!v3$F{5m;Zaz-EL4VmZkop8GYIg8l8!5Iq9xe2?$+Zb<Fqu2<( ziRb#Cr4uvFvqC=pF?(+(TK=bbR>&ge1a<EDboTxfWJ*y!d#hga^d7#MZ*MUt4}f#i zwk4h_?TSz9g9eL{QTh9<oM}0O!b5(@1lCJS-Puc(zHt1j)a9GGY4Ok2!MCL&;ZNR4 zZox3_k-K|Me`l57sI0YoU-8|$CR*#otWh>c++zahi%heB{0-+FI1FGH@ZKAy9rkBj zXvPpGddO3F3je}xLvsJTzw?-9+pc6Eir}@I_2hxR+v)p}!M=qy&w(ddtB=o}bG{Gm z<j<Xx=a=vqd45}zas2vG#0z!u`8=V&+^nLK?-w7N4NaU$O!q2t&E3?g_{Uq^_ixMR z_c{3CEHUXV?wQ;60FzJaE3T9MDE9^*W4-rC2wZO9p7rHM&cckMUo!<C%r9iueBvB8 zgw7J%23Aw9$(kbiW?B4tVkbJbf%rw%L$giaLcZHAW548)!Mz0QA12PAvI8<VrL5#L zhz)ZAWsqBx&kWa_WwK1ar6y@}?)<LUlPlpLnOE0wE%idgkfiSA_%Zdodyskh;La(g zzo;=MMUVd;xhZ)AGGEupJU%+bSmc|^ytU2-`X~C7HcMzz&b*W}dmMS{$WIxA$kS); zuyQcO_7Zs=#Qx)~@bDH_PUAvX&Kq4B<Rri+FC)iwzMst*#77o_57t(~*jOD`V^eg2 z^LHt~#+7sA)T_S60``9hZ-uCXPAMcmbbBXsXt+Q4SPc0v*OUd`fY3s5hSgA+;G9Fr zcQcMeIRC!JrKg_4=97GkQaO_YJ7Ed@EO{9I?N=C;|9zEFn7KJuay4}1<c(?TH*z)# z^WMtEh%m0VS$Bi(GUg^(2Rp9m#qw<p=1kj%JOj^r%Ns5?7I6;9gPc)n`q7NxdhD$N zaOu=rsp-?j@05B{XHoj&8>QZZHG_4^?K;iwQN?d3<P>Kn`HHKP3yPcdON-z3%qY%G z4Hj1qt0-<BenauwBfegoIjW|(I<2m_IsLoE)!IGhZ(mIrfGI%xPHx8uawnbKj`PCR zGziyN2d*>T^Y_s?ST@ZrTYdV%^Orq3ST@Nnn;E_2{AGRq0oeH_wTaKSPZgiP><xou zIrH2s+kCS0{AKG0%l^qOTYX~A`OCHqmVMtYn|b{D^Ox-!Ec>4hyvJ@nf7wxifwBF@ zfwy<T`O5|e%XT>M9=-kiWxqRE_U8_~f4lSiWm^WzKIg#u@%PSOcJE->ryO`cT2n0F z5cu3Phh~l1=lHp@W-0SqY{PDM0{S-5^n+(`KN?<Wl{h%DYtF{lTSClNWTMO$-;ndU zO39y3`w{xqKATJSqhqghNW60sf436vU2VsEXHa*cd!rril|AXB!!GJ#AGE19**Cy< zf{}X#gR$PW`1rEeG}52Mn2$?2yHANROP|;idqt0*A~rtQkHnR;!x74x{p85Gh@-=| zaBt-~%HB9HKB_nHKIiOpgBP=0{WRl-fil)N+oa6C_%e}c#vIDX{;E3Ui0rRwVI4kx zf7K(%Ok%!m&Ft+F{k{Vjo8M0aKF-G(*xQpC=9~r#z8mD6yUg%3=YA*m(CMN9SR|L+ zg7@d~u=p-FCQ&9fYZURatI>(G(HFCb{g)G?E@RF1x2E3|89C<c=bil+OAYR$4e=}X z>hp<@;)`wdYSrfbcFlMO{pQhv9UJ)mjrhFsEm`t@%<s#3@IBn<1+#3glDwS~Kl}0Y zF|>pK(~12o@8sLE4d6@m$E`t^wAjz}TJ{Y<TZzda2TMHps$_N~_N1yZ_pfnQ(-cD` z2DeGST|abh%b7}^g<JJrM1Ai9zDdDvEp~!QzZZ%OZ#4Gg{d_O<FW8*%{cT}g+APyt zXfR&ZLj-5%k26G)=bRlL<vZX`ySEh?9@@qB$UNhA8;_%RRzXYAy{Gw;bM@OVQ%1>$ zrAFNpW!&w)|HasIqoT2S*hJSi%<P)S-yHsC@mI><mHZX+cX`81Id@9NaV_6b;+r(# zoXgcd*$e4$>y0yr;WxPSH@ugt-pn**Ou0hNWOf@55YrQU$+-Ye9N}EaE6|Z|aDEN* zRrcUV_`X(YzS_>7;_BCz)a{hDfF-e5;Spp-PjN6BnAce4*Mh4g-=g*_if_x!Dhb1{ zm9+`RN1PecLq5KLT=3Dks}}D(JbG4%|DROVlyNG3iI!5i;g%&kkL1lNnVUbiGQhJ+ zE!BGFJ<qd==XrMFd7iz-cYpI2SFX`KOSkaNL+?1Xd>7x1Izic2%GI)kTt|M#_XjUm z(a|UF-ybO_F3n!f4~uQSyG3$uRPPwhiFu#>ntazdI+t%7r%uL?A5s5kN`JC3<-XKS z*YwZbKVsN}Rok@GHy*xk#3p!n-z)frOSNH*Q{_y~{A*6@PX|v&O4q*C>TNmJ`ue_O z`swat<y$IF*WqvL#^(1+=yy@FRvnoFZP(lot>HVA5(A6b_g$Pj(zE&o>{ia|(pY!l z*GC*JA0BtUAKbNOR1~|aJr#bE@9_0(#unpQS5V@V_2>U~EPcg)gRh%D4$g8J5@$Po ziHgR)2fr;H6@6+f`6iSNmZ+iv`mEr*v5GsjlGB&)O|W#nFEv7?=eS3|A>ZySwZBy~ z0b5P@%HgSlw~vY{_^PG=zPkMezAF|qlyW1Zyn!4orx6^yac0q|=xp{q$-Z?j`Jycs zLN_^Et^it5N7u%L^$S&dg#Di$`twgy`*d==W^hL8#K7P4@ges7bWKvl*v5N_ff%m> zdw%Cs<ASC1afK?1z;hvZ)DMpyfxjguO@~JdR@Lq7f%kNHGzEUdU#MJs3w$f{A-}G2 zfu=7lgQui^9XN?l)|A71bH-an9FBK7F*v?ky0mt(s*VEhsy=9keYQEJ?3j14>KI+3 zqFJlh6aNb97q7j3RrhOqt~zyjFe-9D=Dp{iiM{M<xJujg!>h_ps_ILq+cQ5HJvy13 zwnXf_`6^0)a7PN?15HU!X{_XV<#1(Gp1A)}k$)*#%Ja8jn}4+J>d?pF_+i!etB3pN z?B9lreKQtoe}P}&(WT>*v6MQ@!^Q>hd*znR?@Ml+oU7rq^C#wf3OrP)WtwID+nYD` z(Eo<szF+;C*zaEc&I<=^;&(kio0#F@pI#l3v%)q2M<sA@Zd4lUOYKYe<(!2^KXCYg zL%u^M^DYfI<}&60<0@dhuM>ZmH;yr;rJgg^Dd0rT1=h9HM&RB46J&SbAM8t~FRzyN z{ABP|!FRt-!~b_XIIzc%$hu@2IEaU7!<krn7xGW$m(-sGzZMX`n#&*GS9<;@7ZBH( zGAqP)!~!A4Ej+fAu|Le1-(c+5UNS2r*MH^xBfQ_nyWexq8eDrNwD}LNk%f(w+VIB8 zi!9nUs!n20sYiBCRMl;KUo$l@D^$t8BY8G~cxc7P_wFaQx?N>$-q`yWcj)LV386nf ziw`)J^#RVHkhKSa(U%<DmhVRXjFNb&+Qzzp<O>;kj*9xZcF~S!EZ5}cE2SB8N2}<Y z-_hhOGJM%F;R@)Ho5sGbDSSI@A~ec3<>iqw?362d+@UKblXpbF(w86EDYBxMb|ZJD zMSH%N7VTY;7A-)YM%ScSvY;l_h<q}kqm1>2mF!~-rV<ZNE!*a&zk+#u+xx+;>wKy8 zGlL^LX9neZWc|#*V)ln6s%b6s>Bq*DIPu*4J-P5uU@moJT|ur3=BbWnf#vi(<{`2` zbp0t;W~1m<(Wfu`HdcKC`NsN3_*5x--;jM-7s{FA*mTImh7o0b1$@sq#F@OU=<m=3 zSu@eYK5|Cci=QcVY`xxu9N24q$CK|xr4d_|c26YnO?UW7%E|p*+~3T7<Z|#~KYJ%p zwO7SGyNiCLE&28rHb#^6?XRrxS!@t#<E8|@i!h|Tt`pN7M}9pq&BfI9rzIPQ`(p#& zvo~dw{XJCkns1-=z`rM;r_6Z=9$5pvi~8+%mm2Y76Br~X<PSCs@0jPZ58_BiPdYeS z4}Ul~N}=s;?)?eij$a?=w(<Qa6Mt^rNxA9J$I(--@O@@Z0FBl^ptjY!>SxM$g$~k2 z0d2Gnw2@_g1J!CHD?FJtV(7dp(2qe`x1t<zg}voBj1fH(&J^7OPvlYVIJ{6uxq9$C zv|Nf=E;F1)IniIDqrUPN;wN9i#*_Hv3FyjNT;Thx5gwPdEt`j0Imbd|Lj|yTkeLpC z=70mC@c{3TUnH?lauF>4IUZ|AUW~TO95wmN{7!6E_#VpifFD1)K)w^J^XtP#=myq& z=G4|j+P;Br#ojsJ{8p^aw_@YzExcy(5;&6iEb$_d+uF3v@9%bKeKgh{1_t4WTlm$W zwa{~q^cP1@f;gEbZ}IIj=|jdOJS;RauhEghXAVyb|I6I@s@>1yAII98XkT!%o%5+2 zo>;*CnRuQMd*u8)ksHSox#1St5S}<eKQc~{Lj`;%(3?+e6?t#YlaG^(@S*e1lX~pp z6Rh<eL~f1;E@y0c*0uRf;2h@~x@9?KpijE<obSbr<XUhiFi4wSv?;JiyX=D<{YCTR zjyPQWZ|BE9(T4OX_-K{f3Gy*cU&$JDlA2zDoGn9UdXTB5woDbj-Cmc-)ijB7x|#pp zKt3^lp_xg;Z%j9yT2JirN_c_qrN5KQxuv=PxZZw{+@^n2Z;wWAzl%)H$x!Xj!e0&e zlHzApu*Xuy=NZGEWMX{+r;I~IW*D9^<TL~CVen9a4P7?(%Z>RExqdy=G4TdorsIn; z*NV*hZZjXRY3NydC*WZ@YfttzyicD_PM(|<(n?OA#2D}=&GldBd<Qw(!D&<4le|2M zJIJ|xduT&oXmFYR%UmDQ|GDcJR{z#nCBHWNA3Uq%v-)oVZvum=&mQPo@D-t75BU0r z?J?g!PFDq8`=F<cwSYc-;5vT{b}qIc=i+{*EtrZeXxft4Onz*^+a{>$)X>$Y{ndzl z6>obrI<^&dr`XK0CQypqIf1^D(CMSG=}g{2N7UgXNKB@Sn2gK4XyD8niOD<-{Ysv< zVlu?I+DiUl#$@8oTNxAnKj6=tEA`XK!xEpzS=W-ikU8c#WPfqzg@4;Xev0rgF^=aN z@h_0!dTpv<`bxwDJoyRSCm0I3F23K%zhO(^>-*$8eEi+P9!T$4wLCvr^7)zHz%2d* zaigYo;zrG`VmZ$t^Dp!tK^OEqIX}vJQd{M;0z>EAmPRuMATg?(u<lRYB;RoU5d9_R z0saXd{4+2{E>xjsfYXG@hT{gi-&$<%Q|snO`_{~lMjPixPycX!^vq-PqbGknKf0QH zz)tX2YvND#E!K?}INs#DY+`5O@V2#E@hme}PS#W;t~CtYNx2imvSb`GhJDD(2r&l- zFM^LUnctirbde29H|w&<_O=3WVB%488^rb6Jlg#)ieDq=;zTY?3Yqw39sW6V!7y+W z1P7br#wYFO*lm(4Q|*+WNPJ3k>_J=4mgCF*f7HEsd{p(h_<zn!Le9)dLPAyo8UiAj zVH0IZtTvOdI5}|*QL9}Da(g>TPz%-t7ZSiFVGtNg)i3mxfVE}@QD}*h-rf?RO8`Zw zRa@`vC4kF>pb|yMSa5#t=gb@!klN*S|M~qfuQTVI&-pCR=lML(^Le(<A^bE&UZ_d} zK94C={t6t9zX%*Q+z|O}4{+IeL*&@Yz~&eH2d_lOr^ewSbU<LD1;+%grf%VtR=ysA z!*2Q%W<G?EN?9q_!yH=kCv)dw4oy63n*xpv3AxNU6#J>nVXld1?*q#Zm|vMkH+5R$ znc5$%Ik>a7<GqIKBk!`;et+N$$<?uP9WYb8C4r(=J#ZoAC9jC}&60b-(GeBoTku2< zt>7F6%I^l|F7o~u@w`7$huA2UAD0Bu`TjZbCt~M$Vk&SIxk~uS^8V;k$5`uPN179V z-V9xrcOnNo%6qvNU2|6hxeh4nhAvb=4=SMtdyw<yUf_u|WU`;-Sp)lE+0$Na(gSD# zdnus@@A{OQtDy&@X~WtNvkwi(e%Rb+okI&;(1Kyaxqgj4+vv0K4B?McGd+=M@Qw5F z$s{$2SmPm~OW+sgTzvoXz(AQhzCDev;7a&VM#REPYhBBl?iqb%J>O64udL^8=#1E_ zWZY|@4=!k7@FJysE4m(Qoy*#gwT=yZlbq>h<#ROsta&E9{PVlm%b-Wi1BXnxr+h%5 zm>l588`vv>$D)ssJ`}3C4Y`Y+EVz~S6YG_}B-VR?{MRP0ir3v1)9sr%xP)(wpf1t7 z7ouw~00vrI&tA-Pjq@TkbnU_eC1>NA99#t<ftQBv{_}gUH*;_;W?bSUes-PNrO9TU zF`jn*I<0(@1)<{&N=M|ec)iHuXZjlBb7%URTl%q-dnT4=^4xlJpUE|IWJ+Jehe6iX zA*)_w(lhnupVQy`(m$K^e#6YsdTzaW&*Uf68>73gQho@wj_ts!8=P9nI>-kO?BU~a z@A)A=&t$K0z@*)|p~txY?DTzJXbJZpa4)#X`iUstx&qozz`7ek-v_&eCdPPBj{AMa zbAs4{i)c5DjPd!BGvl#fFa3}?l{{O|O1XHPfi<2ma*g1j%&V18GzKeauORfaRrh0O z>h4~D_S{Cvox9wu`|P=ux}{!Zl$BPVPmxokUOBTWOr6E-N4l3>e@bQD-h)0}a-qd? zq{Vp7Txjb{d@p&Tps|}`G&a}dHS?%L=&SWzc+T~a&;8d6ja?}`5`VRhHold$B=q)c z&{vsn>N(oI{%kyolC$*N<`~Y#BWn!Oe{47_S7BrN5xIF|Fp_wJg3xt*7n{%QP!{v| zH2KPyyZ9XEhtlJ7zRtvp+>n!d!HIGc2Zn?OaDPn7o9}Z%HtxH1{Kx3$dfHSr2@d4B zE1I%J*XxeWe}OswAK0_qpXmy^5GAtyQD6!TL^deAQkhpro&*h@_Az2Uy6!KWc@{k) z50C7r9P~lj(H6B7C>d7<!gG|4mDJP7dGcb@3D5DE{n!D2Q-1UHfLf&7P=THOG0F(< z?UXjpM=RNvmNOpV@e>CqtL~-^nI~kfl|q-Syl`S;EFY)T)H7#|;y**3USxnC<Sy-z zwFN)mn=rJX7&&0o*Ij`yx}IX>0nxonALk%DT?P(F&R4;;Tc6R*TyZxZN==xfywLmf z^QJ>ud~N22g6Mj_amabrJpGhs=Gu(ULo4^;yy?LMz@Qu2Cu>qECjMAx2=gI48b5uv z@aNB-QGyk5m^{GMqR#?@W8~TT%JHLjwBT*(0if##8NVf;jKJTyyF!^_;qnRL3*fQX zWUX~Bepg~6eV(>0`>3_Xi{o@#a_Cv#hz)j~l!b1e*kILnW?jeX%PxJ|tnch~eRh2o z&CUxw9<Q(e8LPfCYb;h@Zt25jeP^#RtG+4FYUyKaZ3#`y58Y?g`J7ef89E)SGr#ms zv(B^Wv{k3nHwjvOX1(Tz=1^X2(%a2>&JSJ8Je#m(UpRDKc0h0-mZxNBsE~KaIq|iO z{~Y(Sc4O;3CzQv%&}&&kF<L#?EjTWI<Lm>$k)g<OvR3oN2Qnh~zV(qf+_%NXWX+-2 zxvhD;+#FLZrYUhAEj%*mbv(D?8JNXv>!af^JMppwvoFu<aI^mN&#TaDtIim`&I{#I zK1_Wkz1|k1*Y`4J$sc+yy@p4v6nXs&z0Nb~bux7by|%s+TJ<Z+iX8gmCE4bl#s%G8 z3Jr+CcW5YQq;t)$(KS6y%*vU0$Pab!Oy+5wIfr?ncJ2=#7s=enUSu=(r|0E=%AlXQ z@X(>mNh1A>%}EXOA?t6%LC!IzPlx~WdSrj}4A4=pn!l$^XelxsHkCnQ=Yv*lGLXd# ze3pasUv$|r=Xt7gkIZk&-&mW!m3dE)sT{~uF4m?sr)9!}6Xx{HTFno=6vvs^y8rTg zzF^jU{`r)-92S?yWZgeQ*`5aW>a2Y$_vOvxz6`$^75&6Yjdh@`JA3W+o4S$Q(p7wW zK3S_39%IsD&J6Eie__@GT;7InSZzpdBgwI;#q=O%JHm_4)x#v_{J)R(y3Wn{Pag&U zeSC{teDv?-+x#ANnCmm1Z}U&Y$jd%RzGW_tCg$0ca#o(ry;6>S(*I?iO=;80v1zug zKDFA)uPLxNH@{|y)%SRQ%^-cM00$il!+#itPb&4TJe^yX{1W1ODtZp#n+d)rE91T= zn-YCbim2O%UXZ=~N{K6tttE5~=qL_+7JaG%IR;<4jsxZz8yxx{p2_-}&H5Bs`ZIoS z^9<P|{wy!_2KOgVuS?Fn<X+aT=pv<_ey+QDCi(!AuMSC&bB>e-VlqwvbW?N~L!g_o z{~hY?AHg}_$T`Q6i;%4%-vbWj+C#_U#-<!bpE-Jh$F!}vbCig4u%|<8q;A#xA6Y8@ zlND1ZHkv)B`z!XLW7vnr?0BhugMH|4kb}lzm&HGz+Oqp$2b9>m_P=G575_-F!EOMy z4d{U8mU@C8mU+y6)YGTA4)kN}OP66Q+)dv&Cn_lRF|G1%+sH!HWsqM})>oD4>A3pa zoYjE6Y_00(^{Pt8ZOpr@J8aPrAO1Gf-_Zm9&cdexoRN6Yjyj$T?z{2P@v54$7=N2% zz^-Oew0$%FF6KJ<@nwMl*#1TT{UT>|S#~Blx9cQlOP<ds6<f<b@fj}0ulGLa<MDWG zmC(@QZ+RkC9!{a9`BMi-&gmKl^i<&FXs9c}ez9jsh1euB`W#L8qD^R7eE`2+(a(rp z>n{iKaT$a!!jfsFZJEl%7VuWP`@)%8rjmJhsy!gOltMMH;cymy5F-><4|qb(s+w*) z@*4GsJ!#1xVhM<CDI_+=gN?iJBCf=FJJ28YpilSVhY=!nYXfmV(BjQrwYi{e5INij zdLl<SLvCAvBKaCN8^nKva+T8Ew|Po?dG1$rC-~yr&H1Z`p{cW=P0bf#L&wLq3VFHl zz`%JL_f^Ttpgk8Tlca9{1?amjP`3DQrEcBSO>9=sX{`l5UEo`4!`zaw;KfIuF8ujM z^dZ~S^xp6g^dV}h4$k%ZC{qsXgvVB3qcQkzep?Xx)+*;fK9=@sfp-};=S4DZ?t58F zO+0HMM{*(fvmIMBNnScOa9(dWya7Ls%^vh;Zeo5VW=H(4#jj{AYrshia4#@lrmBID z(eXr8r+M}icJa{dob!=GU*!K$dunL^NTqKW<H~%rydPV6AgX4X=X^YluR6Xrf$`X_ z@2tDC|JC?8AKN(R<3rB*h|)*v?5Q5+NMkKwk27r+*y?-j`0Nj(ov`$kvg|Wz!t^P- zTnWheQt9LzYnC&pc(({&d=pOCwZsl$;yAX^AAQXEx{L?h4Tul<xxUONzxP|yckbM_ zWi55l7imwUJx6kRe;Ms;ryWiGPQSz!7ttTi`ikW<x{y5ngML!&ohA8gG;=L>U8r=l z-X?M0F&oWiTz#_E;^XP2jdpyTS{ciKKZz|OVJz4a%spP4;4=8)qOBa(k_J42&puKu zG$5GVI$z>!9X+;=E_8r74Zf0Q)>jL;xvh1jsX2Sd+vXP=ak8wh6Dd<}BxnA%L7as_ zo!G7B6)|^*eu&MLIv30S2fbp&uarO$`jo&DGxu=zp%LXBO~i5t|1#gV#oo8@KAk@O zkaxn5jsxSn*6V>)?<j$txt@~Qz&^IVvqK9h+fL5ryPSLr9Hs9ZnwW=fy-(n;aSr+Z zWTmg`Q)Dz?C4P5<f&E2Urk{HIeUv@-oR<rD=lO{E7VMqH#JQY%&P(lw(J!6z5+3de zB;wi5qtTj2srz<tB@C`_M|U|KJL=-oHf=5BQ3~G@z303q@d4YZ?3447ZO}m}|0(P2 z)Dk7YK7LCwv6H{$>O}Xsn`;?o9Mr-~eXPAo-mxCGRFxO`8)lc6?jiO^FM`hFOQyrG z@Dpp-UFc59Q=^adMEr7{Abf!9B;L!l)YPrE>sRs~-?Co%q%AV-UK6o5aF%&R@KYUo z;?7tcm;)ZzLb-PsA9OlW`Pb+c>Cd_4e2;*?;M)1>@WQVomeEVxz+CWS*@Tq9hhIc9 zmZPWK=t%d=`f4T4us8`DE_M=N5-V4k5<pHlUCt~+S>jo;%kUxFj-A0F|2c1THvh#o zU6#sufc%#|gurecK0aPxGM8}_LK7vQviMDUsYm2=A6MZo&0M{eO2@V(h5ZsI;X@zm zQ9Y4=<Gb%M?q^0Q``*Xb79U_@XEe?`*G*rCAh{HLZcnh`((<637Y~o}6H~UQo-5~F z1?!;!(0Jy;>2FA<-f<N{e4F<8IP2MmpI3uC&s8F6E`@WVV&m>b7Fu?THTDJ*MlyEn zS)n@7g@N}4^w|MiCEl=&@1Vh^pUiHFFI1iN!Y2wD54pUvck#}_|8wWead&xXkoXPH zdFC@1|EBP6we;NYT)Z>s#(CZ)o~tc5t$?rDLt|$lpc~sFHlNwplH2gV2{V=!Yyi#B zDsYT(#r#M95#N%v<lE9%^Wsyx7T*&1Zlo38l2&|6W>-%Q9-zLn{YtVEd<e2bEBL<g zLWvQIoeOTAi*3dAT6qOL*mR_Sn)sDq3ol~qHmB&U%H9%R<4)E_6);wKwum}S|6}5> zrC*w-sk(8Y5^O9`IvT-)TARAjHyoXmp>!OJ*V~p@Z%(XU=xTd5bu@E^L>F<|(yqiP zOPib(l&w(i-zDD|XPC%bm~BDZ3m;U1g(s7Ph1a7~y&gH1_)?`w-KhDHbEu>6q#DGZ zC{m9-zkx9s*h}l_8~C=RDzo#}hD@c}MLe#^f=VX+rO!>^QKi~>tBd#0o=nYQ(x7?5 z$8`P+O{ibQnJL&=>upO{S0cB%p;MzB$kfz<%(2DG|Kc=}s|DV%T$Wbsa;;4XN;~I1 z7n*;u3C}`e(9Zrw=1As8V6A{#A~#ArYb$NBmRDAEiaj<vlt$c}_}?!EjyrfKaFn_P zmeRh^<ixt2KZ?<2D>lrmKQ&%IxO@=0AZ;9EJxl$c5ep&zYr)~f=O4$P%NV4bg=0cf zrOm$%Ea(><=HEL1`H<HCvb5a_4mef(6SI|lhoc{VBJmll?Ox)YYVKhylAECOVr)?K zNdpdAhUU*7IV}jR14FQt2@RApb0pqNc;Pz-B}ZdUNW&+Qd;tM?etVeyCEj-ds|EHP z|E_BGKX=CL=<ESZpU))Zsog)D9z;gn{2OEsp*doI2af{c`zUnOq(jg?7iXhc{-5GE zc@cP2H+*W)9sgGFR(wW9z7wBO`Bt74n{DO!HSBg8V`#xQQe;o?s;1baQ(ul>fNr?| znlhtb{ImoXj$u=S8uiL|x6w}xpGOb%`-TIfxG$v*UrOY!Eycov`9}PmWNphD>ts#0 z@&7$|UN<oJuTms$<rZ|(5vjwB$Hu3!j<uXWR|$5Fqh8w6vZ3M3hqcatVaIN0ysVpD z)ry><vc=6hGFWF~udFZHT_C>over5;My3Wvu77Z?8TP-w)@1%W7u$nUw*&le+Oz!| z(qnd1M~1`i{%%rmS7>@r?2s}K26MJ6lpL&cR|Mxy#s^h=F7OZb^X_@x)wyl(IeT!e z$Wa4__DjxWiP5w6ZIc+26_Y2tb@DAra58H?8ya>3JUg(wy#F}YW8m0qU@q(9Bi2f- zt@Fz5{JPx03*I1pp`Fn2I_MPqRPS=f=$GJO7j(Xk^(H<fB40<LuWt77qv6#;&)kER zjw<{TEPA$KxX?4bAr}}+Us{n33}9@?JjCf4*VFV&_S<vg^vtC$TkVFPiQHAix9p80 zmGrxq|6b@BK82B*NVMIG)jNK3dGJKaG!qXU!>$kN%kUXSPFVyFitlwZ{+$zmi*qIU zGT6(2tLB-6vM*s@)$VNeM9^2Y7fTuBI*tD$`Jc-3$LZHb+CFSg3vHx7qF0c7=SzV@ z2t2-q_Y$KZaT&F;$Kai`32wKG|K8QW=Njr<tETnJ`J7?CG4NNs8acC(?{*+_H89U2 zKOG>)k{{l3HGbKTBL^+xJE^-C`Hh(IZ1LR|nQH`lKk4V;6#7p4A^I1lea<t9$eNZs z9*5!SUi#P!Ez>E}2>p_0(#JmDSMh9vIW9faz;{ANr0>1dQ_pw6pMFTc^<H@<-^m%V z($8*Wxr-=k_Jy@`zCOg@5bJ{v`QzL^NdFDSDtnN>`#ti?`M$UMQ%jyH@wY$1IWAgT z2J;Uur2lE2B+g6%X6#AjOfHehidic{w}fU@^53yiiAW!$E~!JlvFecePJa*1I7Uic z&fZr1VG8-q;UZ@VFm+@ok%419k%7=5k=q^6s5I3RNrP^^{`icog`BN=)V_Q*?MGf` z-ZP4o$X@oM<Dg3!6FiX#^kW4+RUbo_C%_*#!>s)^%57&4pJB`NXDnndo-xq>F?DAg zU--nw@R^UH(F1J*{R4+l7iITCU&s0G=uf{0-*QEnEOMfZaUgjz29_iRmqk;iT#YVn z8MGzUt3-(ZLbgivYgZ_dUhv=!Vr{&c>3)qdYSf<^=VMpXW;*iTb@WAKjgKf}sLNNU zUX6^w7<<80p<~YLmB=03?^T_9(rc7R1K%y5pagcolf>s9y~v)q)Vqv&)A?4vL=QTO zwV*W@>EOeD_y#<nM);A;OC@uMoE-TX_-f5nVKcT=?j@(ti6N33GYMLlj9fAxhz%zs z@-03YD^D!`T0rt!px-2JEw5Mdi=JA7PZ4;~1wM5R0=~AG|8qC*EIH2sF1!Iee9+2f zuC?%c;Y~u*rQCjUNNI8TcmFx^@0-;3Hnj0a(2u*oKOgtST%`_Eeoc$<Iq4@fsJ)eO zx~Ny-`Rmjx&sk?%SUa2O$0n^0{3RE@Gv3~tHu&7n)ZjL^5;SG*^rkD(14OnV*YYgV zoXdWXeTk1Twm`#WPbm8ojeEDM>9b_d0?k{B&O`pwx0-m4c-@ue(2)W3w%dpU#9yH> z*)z|^``EtnOdQ?`;tznm8Po3|FR;r|>2{^@%j8#7Nxp=pCtT%~?zTT9_j~2+O8j57 zc~=Bn6V~1ud?I-(`oG|@Ml~316#kVHYGJ-)eGa272e4{mUe*D(FtAUY7acflTdxEM z{hj%_H(`E&%~^iVJtI89Aa!(6_dz>(8ff2|cge{nYv#9tTXxCAef~Ux|AX8jIRw|* zQ+g|q%MPwkg6|GgB5hd`dzpf7=>T-^$suIL4$VtU1~`LWK9pOl23}@9x;WGR8>7j) zZ5z5LOdbG{m4={C{ZIPTY8&d`PQ1VPlT4efuJYOQ{SVrQ`U@v<#`tI@@?+-z8P<o; zu(kHAUT>Asv4nUc>G$8uCx@<W-fZxo2fe2kovFtzvh?6kFiYt;xWdC2JdyW+$NR_$ zA0QumRecJ3;#$UX2v{ZVJFWF6GPJojKz5h4=3`E0pS9LvdqP=je`MUv@wF!Vgv7Nb zx$CU8)_BfZlXwH!UmWCn`A+81lJ{j#De}I^If>7X2H3l&D*MoLgvu5m2SD>>4S3Oi zX@5xxEJf#`V>dJ$y5GYXHO44B9C~y*x1#7;yZJ_FtCx8YKSv)|C)Z}K8rL?iq7&)j zD)LyEtH{~KT<fv9Imk6;9Kbf~a;%Yh`e*%$xZs#fCvu$7tS;6C{-rDPe~})LoGav} zWG^AKEWg);EqfY^mW6@ufqwzM&j-%HHwt|BBja_lmlqlEPvF{R$PUM;_a5Y@8sOc` ze~qia+oWf|jL|dcugLCKGDd}G|3SU-Y#8z<@L0(_uhbUh1zhFDfiUz-o@wg%Kp5Jk z#cA7d+Ut5kiR@>6b}n9YMd#wh{FYon-l53;;d#MMTPpoZ+xYhX!Y9y$??<kG^H$=^ z=}$NPYLzt|jUJ7~r(}o7k^a;Vz`bU0q?l_ANAk?QyyWwE7r*<?|HX%kcfshR)m|Mv z&cF@xw|yNQf5^UI*B{i59_Cr_T;^2H%d2Hv^6i`PZ#`c`O&XpkhxX6<jj5ORVy9@c zX+{e+fc>|ol)m|$lv0U}v|=JF!MR~Kc3#=@ZQA{O+kE70qw5{DqjP@Km9bp&v794` zbxZ7t)V~0mNYrlMcl>5Y@GZp?IeF&*;>lC6{b<4cwv^su*E!*b<j&n`3k=7$;pj{b zpr>d5loII4OAb27f!UKH_nb?b$C+<gNkO4sJ@C^W=Ix5)8C!3me`n}fjE0qdQwtDd z(h){}>d}>moH@TX5B(yxk1%$?7S@j&UAD{N^bceGd~?>ff_1iB=-d#K&gDbr@I~Ju zK1t$(p{Tk3FtkurhxoNx6+guxeuq7mn8mza?;$190#1zJ98PJYmg_8@{g~W7-$EDo zBy%qLkL<*aZ<RGRY8v`I_JGjS*~Bb_%iv*>--|QXEB3QjhrWrg`1UH|_i~j<KV~f^ z*0Ynm(m$h~!+J%qjy3AqJ~80Uf=;4ma4o3cdMImJ@ICr1byzeKSixgC>p<FqzSd%2 zml)e3u3`8j{V|_c5<@2W^&X~v@9(MeKcThpy5zac>%07{dL@^a)GKvu$4=&@|B~Cx zQ8YE!f}H4z?fo-%AtzR{mvSIuiS9x6?S~r|{=5(wwG9|}zlndV#Dboxkp2D*VvneN zTkYtgKN3T}6x`(fAc-rlBqmq(FviVSmHrCYxXy_wulnXyrRT<!SIzqJGpmfmq0gF= z68lEg41;#$nfH4srNzXzdtbIMXl8ABKVVNsY`OOhwIgvprlS9^5O`&IN{<7}X8bnr z@#Q@1rYB^5I7TYLA68;Bl=Fv8y^zEK<c6;IGQT`K0A5MCSy|*PFnBgVndIe+<!>=I zX*02{Lp{+N@;znCdzlXhcEK{S3$izJU>DqlPlVVY<(&&06MNEVc&P*0VF2S<#3>&f zvGyH#UJK2b1)PPiRI!E{@I|Swt1l7#N)PrziLVtLJ-DQKwTF3~Dz<iT>K14U-%h&X z9K5u@7vt~3M}!s$9}&I%sRyc%dky$TCF|bH`oD^N5;Mpj8_S1m;-t!64I3!>vM=*# zbOK-E)4A|z=w%ygG$wofV|=<`<rzNG2>njf3Hx>+%K<0fZtDJ}hnTGafq%AsTVnn2 zg2ejoh9BHb{jKNW0loN*n|zBgS$s=iEo;m71hj(rI)FSSKHMS;hRJhfJ@+ubckf6F zti#5KF0)<W(an3y*5_e<<y{T(y6_m{&P*O7bz5ar_RRl^obtOB<fkKFmZcBX6p<}$ z;-hBDRYGe-#~-E-FI5nC9A6XI220y6MHbyVAovRFz(XFpw#7+-w#!A&8`HO2XUzz0 zYy-dBE{1o~f6<p7L|&5q*W%aS27F%PTY=FqY+&u@z~Vm=U@;6i_#c6Vz*%tk+`hC9 z7yFL1pBt*Z3K+-x3{RhUZrNgRU+n&g{Ue61Mr1wl&$G(Qxr#E@L(_#<CL_<MvqlE8 zRx(&KnZR}sJTmL_o)O#D%C{~h=kIAbD>t@h+=-t9xFB-um*uP&yhM)usy!o{`_po) z$XSW_`w~3v2k1wd*>e?VljCdvx;JRG_E7rFwk*!COgGPK-m;oGx|JAMZ*=|};X&kW zh_0>nl7mD$b^RMH&Dda*p)csFU8=UHOfBeLLLPGS{US9m__KwdpzEwr7^6~~3aoQN z9W#iBz>lB_{?a@gJ|(<?GBQ6+jAto(NrnHF@w}ow?EGHxiWU!4CSgl_eHnR0g}yCA zUalptXxL8v6Xr%MR|9j4UMVO8zoO&Otqy6r^2ki~Lcs8LzU#e9=_rFPHBfdV`9<d@ z`wKQuM;Wn(W68Ohj_vx@36B>X89&hV;k`=7ZhQY^(YIYqdCA|IM()pa(P?0BJ3?Kr zsE?buKhfW~KO_(6GI<{V_ORMNxr*<Ecgl0=leDjs@AJq5*!WrdQfDn!r>CiUH0>X5 z#4ba-F4~ZNH)}4?!=Cjp@{Y_w0rA7+WBS7bj0GNY^d)dNAD@fC$n*~!8WZ^OU0OhB z^jr7`bU`1)FT;s`Z&r)a7p9$NWF&t>eTl{%Ly7M(WKSz{y|uTH@p|d^KfAYZ+&*|s zaT@Zj86OtgOBE^wy#-CHS2vMgLGZLd<aqEU46cA@fm!5B3BqezX;1nsbAIZ9nXHd$ z*2xX5m+M(K*TGw^g}21c3~702ZFTX;uGQGIrpW#vmwkfB<RrU@?4PW5><=N!SCn_O zfXnc@28s3Rno-^nh8~GMB!5g(_5HNh^%Lo1zCVfB6Zx<4-?1oppR!H~5O>}&0>1;R zOc}9jQis@2@<*fuB;TDKy{I>qhdGpA8msT~h52hVwZN||N)CL4@4tNSsJnlw3wx{e zPUW3aG$1f0UcdRCJj>zX`D^66ixS$BcBZn&mUZche*X=DPY?Rrm*J_kjG>+D8|d+T zz@eFIt+h9zKSE1oALKzF)JD0OT%Q$sF^Qc0Ut|UM{|ucC^*Si~_Q9lpoD*Ki+8B<F z<QKv-snf^Rs#EHN7D+C__EprU@LdJDx}*)|ljxQ|txra$k&jNJFeLk_X2w4(OPQE; zfztjVb1OEL$&{BmnvR-$tD~zzJw2{U;?fH<J@YjDDt0X;7MSz$%GAN8|4>&!Idhrc zprE7PzL>My8}rwgIxf}(vd(Ee+4e_~%NHw=w}&SMe7y6*JG0ET)ldd)%X@hzWq!eT zQpT6?y{6~;wZ4bmAg{!{DE$<BkqKL|&Bp8bf}C23<)n<1{h`zc%xLp0n9Z^)ee7!^ z6?w6`q)(y?7I=I@UGiSuNtv1PI<#}@kTDZaAvVAE%j4f3;#=!^D>#Aw)|R*DCk6hz zREhklK<$t|?SGQB08U_Um%YH|U6-mI%wNr)7IU5kJ`7oL*<zQBDW%->i4xgD-pxd& z5cr5}AvhrOZ<P_*VmmZO<QC!Eu4(Z5gV9NEUJAdUAFaRv`X+ngc+Q5b&~wDD{1CqY zb6&Vgot8W#{1Xq1r#g`zhLJzxBtFA;30~8FG3(oEU*=7o$vpDB9ln{p?iqF8FxJ9f ziM=?9PU+7T0|NJcgZ^mBq}HA4z6^34cO#R_f6*HZ<i2SDXBl&!&;OG6yfzCwsaM(% zd8^!fo)cQkbLrm+{Q0GSt?xv)?Eg9EZo#)??=JgxgZ;bkuw92aGjIod;~jND+pcI$ zCHwHMU)xJ%f8Bx)%sG4T*Aw^PqDPZ*`+sRK-M^4|`$XyJ-m&D0UjYm0Pc8jO9Dn%p z*t`f{u462c%cX}i;KdJD#&&@J_mdM?p39otzeDYiXNviZxM1kar_7PSvyE$PK5|VN zET(VH4Nav@!6R}C<`oOgqz(-}4lUoL9mo&3W~W#>l}*~B^gtM&7A>w*iQrTJxqV1T z>~;Is4+!oDcAYkDW2ez{rO=<w#gCz5Y3A2*#r~U=$O_u{Dp^V7&{N1m@4?p(U8O`s zhCEoNM2ctPs~~%3)e*q%k}-OL?>^C?^`j3xi9R%9Pr`3UA%DFZ^1$<cu+S5E68&Q{ zv{X~G_ps+T^#eKR2R;K1J?xEr<djj@iT_;={yw>W<y-2iHl77-=m#cn1~v1;d`51A zr;_8%y_xt;fe(HkLNhx=pYa3mF#0v}!~py7W7smGkKrozoAgz=1(~Jn|5bbMdTxI7 zX6Bh3I0`=7-Q>j)eaf*>N<{Wt-Bqk3=;-8W$`r|SBDonpyH4;kJ9HB<6ZcOp?}*KD zcIZjUowyDgV+yvsE0CpFixN9*oteXVxsm<ga5G~Ue!m#LDD_N>*Yo+Q=%nNH_nzV9 zoGp@%|G;2mho<V6SS!8@%R4k;NoBls&|=ZmFMuXFzROvsi^v0S@>b1#?BAVhdYJd- z<(~crpA@}8PG}N)*1xj1cc?@6=+tN6TPSv{%O@;dE%{x-3-vX{oIN0Lm-s1_b7+RC zIsRw9sjg~<j#Q$9Ht<dOjIqcbQ}p#3_0&Pz&>=*gfnJ6GU0WmYaeTWx*fl`8!8cs( zkg**nPg5i1Bn~fPjwvVfOW}o-H4>f)tYzKE_?64p>tBUjLBD+==z(o;ujsvd9z#b} z$e9V)Jx@_SOzs~au&4uG^r^vy|E$z|RD9=u27bxfLKe|BQV+aV-i@Wr`>FGCWNr^< zpuHz$xktxaoyM53K}g<<NFDI5A9sCl?o#M3vI0hqHU6=VHB}EO!C8xx(gxa{OS@yy zzid<W25j}2Jt-{(4UD_)Lbb!0rbIq%iq$PL#X#B>Je9VJq50V8vhPKn@gXN6V>BGV zx5RqimalBdabP2WF1T46qnR&k>Akm3@UIs6O-qi=lfoRa&qsfj>n~xhq`biGX6EW9 z^k`9=qc<1(*=%4pk@1QDT^JaOj1fjJAaL!$#+j`;oYF7p=ejsN9O$l&Av0AXKUDIq zz#tE}2z>r4VO)O(cN>AR$gWb~!B3(!;v3q6AGr0+=7jREQr?==#eLD56_geF_XFfK z`qd}A{J(_<oAd#isGso<(U5mOtJc=EE?4@|m9+noeo0+zoMCYvV-tS<g&#heP_7#r zv)qUIu7L5y&c`SWl`~H2n79u7{u;T2eCUwEobMs+37@s<#;3I%_+?vl2%Q&R_xV6_ zTY=x%z~xsrQ5QO*)<f#P8yPF~-TW`Su7>+2_ObHr+x!<9d>#K~ZG4CO?mYG(+~36i za2|WW6lJ2>R=u`H$_U>m>`*&q^1fE`b~2V8*68QF4_~aUal~o8)PWq+k#)N*Son}V zSU55%n5U)$lts3U$^}hVl1ndg+bm_3w7q_(y3a%3{`ehb6*B)O(QQjR6-On8wII|7 zkHXgAUKcMXdaqrSMb{25%MO)DzXy6E-vFmg_yK?6k52=83vSxss44gP!14QK4YEFb zGt`a|%(pc@7kzLrHp#~%xHkjb5E>$FM2R60+A9A&Jij$ZnI!nN2wL+wIQ+|tm5J!~ z=Cz{V^|21S$mwn3Hv340XTqE0YSG&6z4#`h-;>;xDs;kkX?e%7Q9{c-kv!JD>?7)a z*-~AZy52lPx^_o-hnyjO_`#+@#YN5`PH%JdVa{#V9&!#5xCBQ`34Wb<Z?*sVigot4 zuQ)`0HQ_&Dc!~JYs7d+P7EL^Kt?l81*A^vd*Om{hy<?sIsVjsAbul&>iyi%o&~>3- z$Q@_V_Bv?0l+{MQyhek*7sCU-0B2{xqov>7=>4SM?eV&VK7I%s55(d3S)=R=pf}LL zHsm<r83HHSkIg|Y68Z)Y`BIx!d)}_-Bp>j%XhoMzbGQ9IWs1Np%sFJz&ibh~6OWJ4 z7Wwe!9fziv?VXIaOIubOcJ@z_8%_E%nb?BSjKShzGES?_w%<f2J^35jpj|KjrLAUg zF0rjYC$^OUyB)OAn=t-%Yy1gi)?+I`9?lb-Y|B<AN<P;d_M-PNZ$9Xc(9_w#Q~u+B ze|j%UpF5hU&noj{p^?<FioIz2@RY#X_+E4+Yg?XoLHo{ExA8aXu1l!<k-t&*0_rY6 zCvh*=%jxgkTyI0Zs9mq3&lwW(Pmjr+-DwJNgJ)Bw4mn8Hm=n8!l`9fEfa&*F|8p6m z!>?Tvvm1zB+V^v0rwZzvY}qG7c1g5P3^nteMC0}e;n!lLD90a0+Wd;P1i$?bY!SuS zDDFq@^L+=|f&GC-?pw=dp&<{UFJ|4v>=kt%!HbdU>%eXKM)nC}x4?#aRBRS5$tQ~4 zqT!`j4%{)^6YpbG-O{ji?k}g7){z5uG|%uoi9P%Em!DZ><Qw1b?4^x6?B;i@4Fg}u zLEc+@TO}W_=&S1AR~Km5DtzoQ8i4J;BFA2POH7U}9INz+Ec^LWteLnh3vY7&J>5&9 zy<K#`mJi@+MOihQwREn&TX@;*NAdq+Pt^>c71?7La1?qyW+~sq-w^{6vz2mfa>jU| zk82})UQ-7Qt-6~%Yob1UDBoMU?qTd{{}COq$RN^o8s8??U&Q+Tj6Gi4@~is0ME5ZR zdjWXZh72d=ML#TK5&ro{p2=85zO(uo+b^l16YQz9I8SV3&y%QYy&D|Oq3(pe@g~b& z+<lBZ5Ak}XO{v$~8^`L-33XF<EqjXl7II}T@ic4oU(xUHZBeo<n}1Ggvcyh@WFId+ zonrq#wNnX*zF+3{O3_ihz*yM>cCluCa)ridmfj2c)n(GIeE+3!eO8N2<_|qeXVN|m z8QR&Jv`_3$QiqrOI^N5@)Kv#flKK+$YB8S@IfG;>dcX?wfaT}`f7AAzhsnE{;D0K8 z)uzEm>A&e;MZb%QQIh>uF8z9nHPdYNE!VHpH~G&V<}_WM0$p_jORLNb`qE5a&|d`J zg060h(^cUCW}PLm`b1Vy>h9@3f}HZD^5{ht(avSakhg&&2boLRS0|1|d__Hq_@JK| zpWv80{~mKAXJd!)@i}?tKy;rOj6D+=#Ll^veQZusav*!yfI#jrTOhB~78puS*CCxs z;C{wSzF1<_;^%6AY7Y)E@jWLrP-y=kY=^osuOFFPS(hC69l8%6`u%+N6Lq|Ers2Q8 z`NrU|a%_gsWA{b{xrw=Fu8KD+k;X<PSo|zF@B+RW&nl7sekfL_)O7;*3Jz_i&ILA4 zhn#n_4!SAwQzNwMb;`=~!Ppq(nc()3_%kWni7r+AMx~B-sY{-%$F47ZmN*B~=DOX} z;cOwEi9Tt>cQwuce-;jMH4JwK>#t>hWm7hG6Km0Jv+=X@OWGK%RwAwnGq!WmZ0w8O z>`i4q{Jtlq2a)<C@DUlqVu1(uj!ESmmB@30C#f9@uvnKc&i|rs3xKoO{G+4bSIdb3 z2ZrKnU!2T6-w&maGLAc_uLoJl=PnP{;oDM4za=ku-Gz#I*18EN_UaN3Xu;$;X$yRj zatgR4WhA~{>idbnh573mq}2R7ZA#ts<KUOWp;e4c@a?w`d-`>|V~^L4?`Wnc(3t7z z3omIdxM<PSs}GG(1H?;{8(Zn{K+9_3e=`3v$A#+9-Z}WyN;@8F4n>a;pF^3iCTqTU zj-O!7ee_ex-xu$fhRt8<li0FGf}wAxKik+-$sVuJuJj#7J|m}})1fO<QlSH{u<z+& zd}2@Z!Nc!-!qX9j-)j7CM#ry>d~J1`Lz&{mPe5ZH((CRlX+E*=3E9Wz6Z5(Yew}Db zC<A5-m~Wrx1F%mO{|)=pVe}fJS6&RyXasL2GM9n_B1_14WekFEDt#;fcZL-x6Xm^$ zi{ME=>)yhXU~&vkMpK8Eb;3R)@X<CU_~B)?ppUv^F*=K@yO-EEhOzw=BSVEpv##I~ z;mg>!vd+8h12%TslQ(vcYPvE!UWq(-6MMm_@;o=xi%*OX-PnEQ^h10Q@I|Nt=jyLz z-sm5(VSb%_>y6m-7sDeTq^#A?y$SuS1E1@VVH|OsKJwt1wX4N(`s4`c1ACfU_AN56 zzn1T1e(=@o{)^y7Y%TwVZ+nrU`}5*(0?x0~KWTf|$>^kB`Y7wdt+ZBu$XwOYPoeh? zV53pC4xIo1*kbO#h%xu^zHF+e?+D+OT()$zBSqQgKIMHwr#!KYkx*T6iHuw7J%CS) z`0mKqMb|5~t$NA?<7MP|D{W=ptgia-#fqTREA^YU9m=-xPTKFmW*lJ;UCCNTr(ni$ zOFyO0(hth-DJ=K&4Mj)QOn;p8XY}d*xJr8B{gJ#OF8MCrkG49O*;lI{Vfw4W3y<Cu z>rW5+)~E5Sls*;O9ldhqvcSda*Ai^W?0HIk%r8E$pD^b&f>Y8rxtIS|Tia->oOLLE zDLkJzoApyhu6>1X4$^KJageS2F9WY--x~b|XAgp>KJ?Jq_R#_Gdt<l|c$$4wHbxs_ z=V(R0sYJTLOF27Ww~mZPy>jnLMTeHc9GEzp2OS+8`X%G)IaCppXL7IEiQ@pqTX-)2 z$@A~m?8pr9bq=iY{o$N0*4Qlqdj~YU8k(D?M4%~of;Wz$nE#8E>w+&Z$AgxB5uGGB zFVALS$Iy9BKI|<G^sTX+v!n2Tk>?)dxJIENTr0)D2)f~_YbY_-CTm4}V8s8W2mcp2 zw_5Og2KX<yJsi9~h78!8KD~d@;%}RAiglNQ%h1;r`1n5)r+7EIiPLi6@T9Z+fQ453 zppDR)ugs(UZ`5(>ff{6>nehK=<bWHH2RPU4&g<}7=UlI;F?*=wR`AN%EE7B(C+DCS z`d%7k)A?o~-)11wWd=`8#Sa6&vl_}ZPd;Pg*LWu1W{o6Plm5v69Q^q))^2J00$cw; zWy^Cy+j#Drj!hoART;O*d&%=FHhB%d0c`Rs<2LzC|D;WR<^SJo^2lJPZStq#g$`;8 zcKHRs;a2eOp5aO$54(ImcKK55^0$s~{9U_z)p_mmoR!90ce8H(WxM=$^Uq_K&*{x+ z`jKguAJ6=X4@Vnk$cqn$tPd|fH^M{lA^jUZ9yz@uprPh_d^~dT@o0trJOyrbWAlso zd1Qskz8>@QKsLiiQwi<*0vk3sgl?ccI><AxnCDCRMn|t0T$s1U;2ajcbmbGoN1HrN z^2|vdU}xz{&ZOaNh==ml9JaggLw1_6fl`-s-rw<?uLJL|WsO`DJUNFr0dfk6fAeAT z+lk%bm4(hV#)J)PjO|K5_z^mX4GMYDk3t8m{gAZ>>Zh;Tz<4aj4jcR2m=Co0K#6}S z=TU}apCsezVo%qG+_#Q>O`Az4WWN}*qd9=>TRiJxy*G31L}rqGq_tmcK)0wd29XoC z;hR$kzgLzh8x?!gm9kHK8QU3fZNG{A1T++S8mq(fe;yjM@pobqZiAMh1It!;_HF8H z!e*<*>~t%&B8~Wmyg-}f``kA;(1r|ZwcE-*jJzj7rHOV^(bWz!eP0J5xA%d2Zl|)c z+Hr|mT9><ab)grX$oKhO!8c7@AH<)xcmOmeaWA8}EkESwPTRgNMSO&lnU?{~PfD;* zP3b*;vj%N)@EyLwOKgGL1}il|Y-Ubk7QV?EdXT>3Gu+|mR0HpgQiDB9WKSB4Z#ZsG z!WZ3+eaePyFXkKkz>AK+q}fW~$0LRZB(_AsU$+N%m8rSCi*xabL?5fs#$mhD?Z*dZ zE^Dp9k?OC-9_S>;%&bNBd0xKRF!t%{J&QNauX<?1nub%gZ(ObFRSn5k>kSVj&s@fN zvQ;aTj(S(>kkP*nREtk(!_epAWoN%JH%-?$>!XTq^tJTCcrnA(mJ^GYb-}j{;w5?n z)@oMoA!P04%$G5m{S572eV3=>8vG8frCfc{tE=~CDt+W7avR8u*CH<#u^!=X6Pj2L zRhN~QR<S3o!cVKgQRQw(<Cn>=h+jEBblr`rvtc{_R0eo4i|?gv>BH&viCMct+P`di zP};|i6sV+qgKJ&UuGO=Ols<CphKy0k!4K*4LE4qNr0@36h<D@L$`Lb|OGhvD)`&jP z&HKbQc9M_dMxJ}=FY97r8T`2x`MMarb~sgOuSAbt2Ank3kl}|Xu?}SJ+Gq>8R+o2P z{(H$&RLK7>z6~QkIX+&&K9}<FbL<D%@|{-X3Dm{M>YajI$9O9*LuR6XUHFX1chS3S z!7%G<8?mF?SXW-;CfWB4qYYVCUgV&1WR_z7$8<h}O`T5~ZHV0QEnVzPLqi$d|CzO1 zj4ag5br|&|uIYQw%gB1h9un~^S#*VG$zpy>uIL$25%f~WL2x-{2O1ozOkur4517|U zUfM1n&-mcQ%3zVB%HCSam}Ksn!DWH{2z-UbPvF!82J5F19`9vMUB%j(fm|>>$a*tr zo|o}@s1JFw<30K(u{N>1gOaCQa#P0QY<l9l)FaTqTJ{HrMJA%nFxO75?{iJm`3e1# z7&p;F{9nY_j0RQ~9cnzsH}PN5ZRI^ejLk&R1uGH9CG3y59%7E<ned-7)oz|0D`OMh zAh5Kq_6hhW@a(JNuktgX!Qh+3<O>WmbRrt@WE$~gS|j?X#-!k21>2>`AJl;C*XMw@ zrL?yIJxJqld+E!ZG3~?e>iF@6Z@e=|$;1b)VG(5>a3af=p>GBs?`1Bqi!skvG0!uY z=jqJzH0F6K^IU;1Tx_02eidGWp4`3Ifj+eUo~?xn_FQsEwGHr;7N%V|U)Ee6^~oGJ z54vQE^#6ML!=5|RHlVY#EvMgn=1Z4*PpRyQh9k>NL(h$F;E6tPf4fuJXG5NOc-W-C zNHurQ0O*7xE7N}!w7MG``v<>r1PX1ry@e#>E>o4raB@wVamaitxd`^yGWI-g%kV#5 z_dv-DgFR2|E!w@h3m%-NmP~FOq4d?_2l_w?IvsULFLX7q9r=DC>kR$DJg04lpEVf~ zJwhXT1n6EsbP0{n_9={O2YLt5E3~95lN_fOzEPR3v<GSHd0ZVOu2#<AJIT+jWb9c+ ztjAb>FR<_0o9roljlM*R_N*RZFYw<%U)ItWsdE^y(#t7Byhosc{y-P^eGbpLmYADc z>DPYx)k}ZY(jUqHg^u4T-!(XuDTX=(d%2ykW&AqRlzp>^^%D5QpCzu)^ug@)ozDl; zc3ReZls3G~!$0eT*_@4BgC5Vt7+v)Lz*IFj+!6C{em_qM_S-W}n%0V)rjLE}`+1&V zCSyD>r#$Gj5B8VY2J|+eUy^<LeaJK#^~fI8!iNm%vhV;IBTw?1Jmpj-T5~OQau2xj zJh_@4$FKIRxlS&bTn1lh1qQ+=8llbBTnjD={H9W;%(eVip|8i#6Eq_`wQ+5V%TCjP zjo?}%__`ijs<bWbiky4~&wQub-UXj{HK}BBVZ3eGLpS0>VzpgIJtN|{_R1jT2AS7c zYD{0XmN*RQi}bJgP_zcUsM9zUoh0kp#(g0?P2O)HKWOtoaz$`|39&F9;wIRCu8i$J zA1D!@P~lhB{<HGkSl%LO+kuRI34I#-Zgi6NZgh(|cHm&aAiF=>o=pG5CN6U#aQVTB zn9cjY*}Iy$DAu&fygTp@`6~OwYl-EF$96oVer5TciRIZBIv%opW%-vA$~*7_;GEsO zATg!*g)@G+3O>bf(+OS`gHM(GOgRm@=gjex2yQIpJW3gtif%F+ek?f91s%6=AO?>@ z_si5Ty!!towEK_1;qk=s{{S2oC6@mO;Ba?Bd4WSK{X94Jvsq*ta6~J0o;UUr{5k3> zniLq8qxpp|xz33L6}m3Zg-)DX)_0y~&F6X6cAjTl=Xn-B&$Ht5JUe$EB{8F6E0*-o z6l8cia)T8YpWn-Q-e>Kjx|s9r$U1Gzbxa0}#mmS(>P?=t#rILI=j@{f18<ErAad7! z<cKKx_HJTHyNM<3M}DngkM=h8-putl-`<1&X(#v4BJojO8MDLXgueJN`jo`;NIUFJ zw@5qIUhYErATrVzuI=Dt8{hl5N*QxM=7{abWX#Bv?M0Lm9quRS(`E0j@cuae<vl*x zTeP?zB|15LC%+zWZO#c8MfriZ>954u?`Dq{h3*KCKL(vx%k!J)UpLP@v@82Fkzai5 z%c95@(Lt2w8~oBD(F&zQWaC!mC~<FgdS7O@?fd+D_TT~q8hai7hH-hO%%0RMvFH7> zl8`whuR#j-Ks9h1=R=5}uQzGx){~rndYpND;2>wn&rt%u;w;n|;PJ0G54BMl()$zU z;RJiM+Vx7{U3=1=-)(gUuA(3LoUJ;P^HSe~Uw+2#i|Bz*zR&Z`{N$X*9^yL7E?_?} zLTI|f+^4;AU;Ta7J}rvAwgH(|;wT)-;?=!6Fo33ei`ZkKADzqo<#AQtbM?qqS3f^Y zX`f}!>peJ3sW}Q?SgsWKSJ(>tN9-A)qjlde$t>_ZF=6<ttKBxIf1aA^|Gu+i@+HHS zeU}s{TUJ7U_Ny5iHp2H8A%~!_$ZoE`zl1Y10`D*f;iA~tsnTB`aF=$bvY*>TIq72^ zeMA=7BD~HwG-ktc)3++svBx=b&+6SnVtupmPI6%`qi>JXx5M`I&<k~Ul!#37F)~RQ znmJx|`qR@&CVy0<?0X?k*&_SHwQBkXkx8U)+h|kh<n3|=58;LG^Tla8RHyE+IjXvV zHS~3m*r25EE$9w9`BwV8i~4!Dua137OTu`uT}|4>cQQ^pd%AllFFaS|yhNBUI~Fx{ zh1b|q{iCs=ZltfT0<Q+@b5r*F*dQ9of3u!_w%|!CSMifVrjwjwHSdwL;N3UK&j7xQ zd@VYdTE=wSa?YzJ|IJBg#dz=^`@|#}^8wSA7duY~p5EsJ|KEMX6MQcPJt#OTcxmC+ z#n4j0Pr=P%+sXooQxM!e7dHhb6X$hFZ?xue#w)T#BlRcF<KaH^&%CR&JN><kRruB} z8Lz;Wvh1(T@eW6xlJVAZ{apM}dH+3N?>Lnb7|of(4G-n7sfXS)es{p;;el_NXAIZP zMsLlTHR2~5M!u~B4u$H4z0L_gSzU;1;x1C4Pb&)&`#R$NsHr2=Skn&jhlwsv%Te~p z9@vXK+ib40w2%dp#BZk}k6`0=+Zm5sWj)?j!5Kuh!Txgl;5`ik#P<L{WqZ!w-II8k z|4Gov0^lL*<?i_W%l_j-VD7aOf~02n$5G~L%5FpE?@GXvH`TQTtNFH@@&a3_>vF5E zIBfsSyCc+9Wlu+@NJpk;pG?`c)U}7YO#EgJ+NtYSzBSLmq>T9b-OY6`*BiM?-i^d@ zlK*B^7&ysTWqm|#1-<CU@RP&t20WI=*M&asf|;(7l4Hu%AY&|tmy2A9o}$mkc&D;9 zeHWJpB{!DHvmz&NrY#xEi(Cc98@ay8d&w^&FoPdt%T?e;4i9&$Sx#|3W4?Eq&*gjZ zZ}l=(Dfg6=8F)dz6|0$}#d0oMd-EHGhsF8aj)eRK(~d-+lKpivco&Y#RhGZWVxHl* z&@Ouq;XM`D0sob~_{2r%W-`D<=)Rn>T?MY!5o6TL+T6g}+`xL&;Rj{leMO+k#QS3K zUe53>R?tr-5lh4M5dAAw1pn>5yO4Q=hFN)gto=JVf8mp{z5KZ{i1@dh&S&xIU{jYX z^hNlv_`zGgBbF{VEVKofwb5=&2KmS0Ec@|G)#5Tp6d7cHLOzSN3zd12)1r?4OAOaH zfvv=@-9&DS^TqtwHpOHK={xkf{aVT^JbRxu<e3NijAq4QEE*ng6^$ZKkFqJA-(r)o z&K=OUy90-ZqmzjDNn*10ucu!xNS=+eb6jjgmT0yqgSvJkmul#HTP{=vg^(+D56=x2 zC#y5NewSQY?6A%3d;vLpH~%w}W_Io6nr5Hb^#`uZN!MG+rNGe4fr(uEI{V#@!jpr7 zlB-JWjY?A!;pL0?C2draJsS&4)Q-YwN<?xhsZEdZ{Lw3vhf_9o(mxON$b9Z+4etLn z`K=hg_|*7-S(tedzF^8+#W6V5;`1p!{0jH~0gSqdH<WSyP5B3#*=f5NdgqgOrlMPk z+0Dd<R#OI;=gDZ~s0fdu|Ha&+I}9VwSa!7@<{?}N9e%{K_3$(J>!6?hEAb|ulYd8I zP9#sG_)+!4w?uCyIWQ{mTj?Qh#Q|*pk{h@90i|X%yeW(<(GYP4;Kv#D#IIO6ES_S# zTYrr_0nQZ5Iq71ThMwmVt0H!VF#5vTwmfq7h`k}-f75m9sxJ0hZ}UvzRGwna+koFQ z_hQqhY&r9?gSnEk)3@>bE%H%#9wOelV1}G|u<3l~rw1>b@ul<ALrcp$PM@FtAi5^L z58p#Ql*MPVubK1Y9N-<gh$n(i;)6q*+lWor29C55gVsjwlpu5KgGUB;OfN+cofQ1t z^isw@(TeZ%KA=o#+daLMG85n6+%|zl6u;$)c}k7I<WCnNgWrU|CizI7=3SwEaPMv3 zS87g@Q{-3R=`i4W0zR)4>-e}04RsBu+PaANp~AuVo7{<g0GKZY-uiuYX1tI`Eqp7A z?$|A~20w0XB=+fxgnl=#CjNHIEabW3yOf$1VpXb;=fp-e26=ATe5HLE?XShBWc_?( zI&A8k(;m9hmg?V&Y<aZq_PD&cXSK`b@ZX@O`LB=5o8RXQJdu<3t7)c8w+_DXZN}=W zzqf=q&p-?F=baamH=EJbS}-_7zL5JUC;fHN-%9!?@@DZJ@c9+^BZw{^c~c*`d$s>| zrQJ{e8l``?<EKskILAD6hfViyRCWJrb#qIOB6DU8-@RHBT?Bo8(NQvaAoB2QK4puX zXFwjn4WciR{;{^RTSgc`ku}fF2ZEhl58I2~qBjgO=gn)7CFyrFG;EL=Bjt%`6%s?D z>?>B#snD;wdS6MN@G{2QQwI+5oy_x2VmZoaOLRMH8;Z|{IkM(t-uZ#08#rO}2w)6| zES2^sE3zi`l0IJ@x_{PYnWFVJeV4hjdpeqb$XroQV!jt)yAWAP<Rr`X*+n^@^gRLI zb3TgJ45dAh1J|yI$$=JpM}HRGlEu4v;O3w&VnbO<Us`CRnX-4%7i=GW8uj*=eR1>- zn@wNP(aTyczYRGUna)d_E&R%W3piJBR{MveH79{z%Tt^aF}^aG8qZ}XXUjLBS6VvW zVa`Vz`B}vODxRO!Jeyu((?1`fMEW07f^v5K5oFM1<ZuCInXiz;<?+jmAbKtR1br(Y zP93|Ge}lktv@;lfZUD=aGqj+`&3<?}@$Z~B7SeJ&Pgt@3-&oSJnz%k_P=09gwdyJ< zcLEx8t37{@=n)HZJfAfCp?UaZ$bL9KNm*s;qS0fuRK;L0;#Tk$J4MTNvHNFk;U0TT z%gyokat~gAK7wb!NY=K<`~u(p$2>uSoxn_CttZ0mneJ%K3&2hAtPuRa?OJ4M@U0Si zi^yy6yh!kV;<xY}Lw?V63En~%4~YE6cR}ck<3^>u6?)RdIgR<y9L)|7908xBzll7k zX@t_gnQvt6YQu@6iJw8yc2g{8kqi3mgno-mKN-KG<L9B@?_Z;?5}LLf`0av*&q7Zr zG$2HumQ-RpW1MZ|o)VlE`YpLN4j!5w>>h>80{r(y&<_}_p}o^f+ipuL?IypA<XLD1 z&joj7Esds*Bk&{ab$^ia8rISuu|LS3&d(VQdCWJt5;n>GM*K-<ja2&J$2BiJQr<7P z&HmFor(r|<oQ5&ve*BRBJYP4rpM0r(+Q{bBjhxlcI|}_c`q1sfJz&f0Yr!Uw3m-f1 zHGH7ZkBDrw0sq_XspMIdE4c6+*8?+6`%Si0_D>Q+1+KJ-Yyr&0UMBYk`0h*3sN+k| zs4wx^HemJ=F|d=EE9t{h+B{v~shD2%5$l=M7k}1dJ(K$4&mQ2J)OXV~>~}IZ@U%AM z$5v$M9^!puPDF2V2zy)X+?oPxZ$sqV8s_3mo~80D;i1)>QN=p_CAfwjd7j9+GH1cb z=&OL6BdPx-O>h$YD8@&s5Z$!A4>LDz_Vgj*zCxEGAMp&@XXXfU0<#Ub#(bp0aUZE# z_A25d#lEDb7<oW^D>Zzi#FtR~A}k#>exs|cwkxmfe_4l4I-!%j>?<W6u#Ff~1A1D2 zg)-?FJlFVzCzu9Z+DMFr_)$sT&1KXnHW=d<N^q=+cUnkt*~aOQsUuhh4f+&#^g@>s z^GaX+VtMd5cy;qLN?<5-2^wYQB(B<^o~BE`Vtn4KEV`7NK$nL0J`G*!j?<+K)|Hfd zi*@zm1iIwC%A!kop-*fvy5tpHo7DeuCAe0{)p)KvC^YmWV>%4|6dKfj8S$fW8Y($& zW#4Q)uNOK(Ux^P1xPZk{a#_oo*Dez~v%_CFTiN{6OO={W!HHw|P?~ERyb*aY_+qrC z26;y6_r>u^p7*>Et+|%xBHzA;A8t2iG>Wd%%Q_V~J5jbiOXmIxIlEQ%0hb^{GB)J@ z4xuN~R?CKH4YGDNx|u#%gJJ9(;8R~4*ArQCwr^qR8gd&ctl?DlpKsYR0^M>333R3t zoSMPA&TiH$d(mS2P`b%OB5PLOhwV+PyZ1+-1G#?d8_{ERakd6Hzggj3@aMq)CHewy zB;K~>E#M$Dqklhmf*!q@`LX!M?BcWe#(`01^NrbZ&%8ec4mrr}asd0g@UFsBN}m&2 z)>B_x67#>#3(aT#W$i#C-HGpq;bUn&`-}L)I73C;OPvz8D7hk<!67ZJ^POg7dU9N@ zY94isYw7rFUCX9i=h7VH&*#@XD%sUM>Ux*K{|0_b#}9BV^>aQ1|DWgQzbwV2bCt4Q z`sW{|xum~VKX#q&XKu)Qo_@;v^Y)WAgy)+5ga@1bycheL+0XCL&snZz_VSL$Q;6w^ zx8FP}*Hy{?dVbA1c~nN{xmpI6cP!<813#;r4PWM1>}GCg>G*ucH`uj<yic7&lu06| zF8*41M=k5^fGvLyF}14>u%5b@%cifX9m1nU#_L8W)=asUHTVdz7V6=hE_5r7k-G|d zk)=exB63|3zB?l4!lMJ|fBRg>NL|oJX-DGvq^=_Hh`n-lb9~*%yMJcylybGYY8U<O zg722GcWM^dfHkpG^3~z*E9*mIv0SQW0qeP85&n<CK{HB;2U{ueV3NO1;=^1Qc%G<S zP`5RdJEQd6Ja!w{Csh&`=Eir3JnM$!q}#xr=}XUspXb@d=Xti6SOsFj8i}h^v7ra~ zF6e|0P<9qH-iux}2tV2QXnDt&jpf1MXA9qOWhw#oC+=6EWkP3!7PAL2WkHek<k|Dw zW7GDFo>|V{rv6?PIBX!Uas#<EHV_+W@(|<-BTX692s~7DjG-sVKfpT1K2cLeY~2y^ z0ZN{6;a|l%&ycTro#?-*Z;|xv57SF`>B>aeD<Eigh>T|cMeKZ+Zsg;GqhfN+rV>-{ z;TAfubz8K(?-b|8OFS+5<L%%<8*-Y+(8aWqo3pmy>x@J2;tH<Pw&-w|!jC5N-vEb1 zK7TU4SGa(8vX2mcbp(EsxQ{4PQhT#_FLV%`qfW&wx%%pWjnpOhDDs)qC9<gaSIa)2 zmwM!^@-jv5P23BZ{X-`y@~E{w@DJaU#rlwSk-_)U-+t(-^#3^5o47y5we79xrD68M z$AOjPcfE<L=sYy~Ah;_1VXdQ|y#b$*>#^S?_(R@#b<pyMe3I{^-pO30-g~%yBKLg% z4%cn)g_n3H@w|z;Mbr1^|1fXg82qiZxNfQuy;NeX+-z_*JFcH><JyG2M|g<nCfCDv zgx`wnHiSCP&9j^PVC-Es@1(p99wgtNFGsI(NqL9ZP#s%LnKR<p<T>Lr#IrDR@>lu{ zZI1a2CB~(R-p4$%17Cdc5t2AH@f8)hs|=rf1;1JG$ydRD(LKj}c(OxJC@~$o!Pq9E z-^*jKCF}8z_&!K{tLSHiclFpL$69vplZ#tc*P8N8Zs<>jx=PBl6VthhoTptpmwlJ- zQgmz3MS;;TsbA*QHypVJxo>SQbQHS9emyjYK5ZKii#G^y{+136BffYaGS_bRg8F`W z=PmN?7T&j%9PlC!tQz2XVgs^g2pigl8J?{T#H`MRuHAm6Hgn81WBhk~J7eb9YsMl2 zUL6^C%{X)o-UzminS0e~k<qs_^fxc^?$*>ae(Afn*^rM^efLiGh%)a9edx3LAnlja zzVz2OTyhGY@j125U-)$3h5cox;jZHMt^|Y@3Eat%WWs$m^uh<+eb5^@2j<h1ihjWX zfuHc)MERiZG~dk%)dRnC<pX(tKE5k;3CxA>${Yya^<AhC^MJp`#a>taxXRPAf$w6) z<-6E8EgRI2Q+Z#N@ZRR~!guTV`7XA*&gwIB$vA*T!Z_eZJ@(=5vWwV@F~3t8ccB)W zTW7YXWHfVBlw{7W;e`LlJK;Yvx9+14nR8pDA%lPuPQ~fhFWFvPM_!`we0K?RJAru} zSUKLGQF)2q#e8N~PVj%MeybzH_KgllrOQ9TR@0$7#`qs6X2zvn-?5w=feq@_9cd15 z;nYoUq&rMp=-~VSW7!1@p8y{^K2|f0)VjG_duYc6Uv#Ny{L+m=#n}rTH4YIMqk<O+ z{TIA2`!9VLyy%Aa$~=nCaiYI*qObAL9~dsJxXP88TJFL>(3Sr!uWPWc(v_@U;ry9> zsPo~k{f}|C)_LnrCD}b1c>jdoWM}8CfxO=sQ&qeESJrceQsHup`jauro@X59JjrDB zLVZF(AZw1I>)NM}R9|M#b2`TFGTv44o%57T-FrjZ{D+k+-Q{S>Dz<5Qi{j8T)dHQp zmL9ZEa4xZDI_ngNd&K4Yj1jpzjU%>vBV}Zl5v1Il{3TiLCz`8|+47yK_?WrqL(16S z8;37`Vg52TTmL|NEQ`H{zD!NgN2!DLAF2+$#XidEwxu|e)qI0Jo%?Xf?~R|?b9K9# z@0@E(b{@{&ZG5<3ZS^whnsf0R#xB}11~1J@+xpUcscX)-oyHt(3H7E?uU#Lj=5clC zF56h=AtlZEUGhpxf1fFR)p*#Rqu1Ls=Ob#e^O>BT#tT1KQyo&A`XeJ>F<RpNe_`@} zQLkNpWbAK^XSGMDHyuCeWPO~Pub-G>chN`vurkj1teWo32iAP=&ah|dPMcGIy>zef zdd}-cM#0LgJFA|qept;i3^hZ)WaKX6HTQG#m#A6Ji_|3NC1YPVmMC^T!?`lcPzUPY zN3DAu@csOIHkZ`XrjB>Mt`2k#W^OfdNer{6=$fMG-KDP?-8r4csk>B{lHZh7^!3Hn z|CTq^H36K{RJ08vcN;}t-!LDYsPhIj)9D%8X<TB<&=svIi&$Gd!=~t}nx#{x{<>P^ zJfy15|DX-pcW<<(=^Lf5CEJZlvSVXdT(h#;1CGn{4CeUYRX?9URdqNORdHr81|>d* zjjB!0U<~WkR9ykjQ*CMbhw5<WC5p}Y9QCK!(%c#NqBZf|M@qU`)>E>>c$GO@4(vVI zoyO*O{M9n1<zx36fjRB-H!_~Rj^$bDwsiWHsW(BdHv{`!e6v#3oMV*<&TExS=P>G| z9=EiAhkcsMKJ!P_hm`-Ezn8V}5x&TKfv>bP4tyP#yVuw|czM=F=5!o$GLE*Sjg8E= zw6h6(lCp+!iF2GS!#RMy?^d1ePDRzBExK|!W6td~#shEZ>p0;2BJ*U>*Vd7*8Xr;b z_*#E;D{#N_dbI1neJ}51j@JQ44|Dt)Z4}#vIni}EKLY+dccO=O?`3}KQ>MFS((ihk zO&8s?v!v5-g8zc=#DD0AN4{np9?QLBX%_v`_rAQcdfcNwoexja(}BIgH=1pnGgV1< zUPRxUsW-rymUX-j+}xM5&ycl$1bU?b(;L7S&R2CFRMVU{jNNA}V;(0s16cvy&E2}9 zdfB=Vb*Jj7z<nfjXv!GpKFU*{`v!HOu^ISRF#ZYjceXym^;c{~MYMYfb0Ijkvh)>W zWqdto;LxUvers$R18#x$f|r38{MGwzeqnwl?G=MFdA3a8U+k3m6`a|mIE|ys!)C?l ztfXGfchM&R&#{!vEbTHf<Kq$h=)3rBqi-DJqyJKFU)ht@`-W_qznOPqZ3Fdeo~=;F zIx`q6wA_8vmTat~?rvy|tc!W}t6U>k&v$^If-^5dFJ8=f&G>lm3g&R2zT)CP8Y{+e z?^uz=_xjO4HdU{D;1}~>XI$y5*}rmis^h?ibmw7kW`!-!xYIt+xzd*B{8-ix{ZF-} z>PwloSD1@ea$Yf(4PM6jPu1s+`fp?Ic<vp`n4466#kikT-+9^A`A4r)Is|vFheuuq z4f-+PW<2q=fD`@F^U#QWz<!J^)g7=6VeT}&**1i^7|M_TqyEQe9^YkXj%JgN*J?YB zusy}qHSR5=bNv63ucJAO_?s*(?Iz(1k;BjyCG|VTh46>Aac>%jb9S3$Ox#Pm(U>s) z&#ZS{=Zr}3Ay?PN|H+teVIWKDnUH>qp{0GpD6!@1o5%mfd_E!l+s46fA~&cx`Xt+6 zy>HC#<Ik@$N^KYDH;ma4zppSx*;4d<V}2LEpJ6zkyw4>#)yMjsH|~x2^O?p3+W@^l z8KD0=eONl?H{?lQnzd0$)00^{zA-xuBW;>7dhD)PzoByt7x)?9RO9ef16&Phm4*Ty zXWAWl4>{-?_-2RmEu(k5(lL5^OO|`gc0*v(;F@YwOk14wzxFI$>X-3V*>%05_$KNv zXPh?~@7Qhn4q#!ZN&3V5QlTID>le6YD0%vcuX#E)!Y>_m!J~_v2N}l+b%67PE#18l z-t>0K?~J#z-!@JZJe75qeW-DVV-NH-Prv!19mX>@yM8x(eWsG-+&g`F*3DzyHtw?J z=zCpN#tG+BSvTtM7(W3&@3Q6TKb`TTtS#`=+gS^5=>KaxTzso>*Nh)#wJS;bUGDD~ zNl)JIDzRnho1JeO6J`XmCb(}jir}jn_;j-})c9DP=v)A8*vFAl<5>?kC_|k)`PRk! z|5VL$3f*}4@;@Tiuoj?g59j{Icya7&#>4p!XUQ6HF>b!or>H~pkAbU0|C8YYjxN_X zj4R=#F8$Bu{RhCw<+{cA6knXrc)m-S;#|a<aKV!`;YEDgbNSmwPwp<`r>f0x4Q|QW ztlFIK!Y9pl&^+nOyH(_~V{Sz*xnt}e1KXs&S^tZ%_k!hF>PHW_J{U5?CF9(iRz>Qd z0)2w}+s0n-^hv(8U@%l2s{c+w{s9&u&$QUCaK6iWS!Y-EM{LRZ93`LknIiMJCo-4Y z!4G5yeW&(2qiEWqtWP=5;ZtOzKImr=_%LVM64r0N{w_2H8s@yqrs|83$>tQ#09TXr zY2bwuzPTuGJFb$8vYgOv7kDT18(%(sIP?~~oZe#_j$CDP&Y|6BmBIQm@YU(?#}CGP zI;Lm;)=;6f&*ndpmC1UqbG~7e!#CzA$@+BY_XPU8Nc)Wm>*>fWAMm~O)#aLL$}dxj zrx^zr!vw~6J8NYEI5tAt&RV`em;O5#=Q7sloZ@n03H=@8*kvr6zAWoy+Ys<KSDy~w zUFNDZPS}R(YwTm4{{wxx8oBCPX!J;Er}C2rUBoH4UjQ$kg=Q{;#!b9<pYd!C@)LcY zIPQ<oreuAUb5&L+bapQ|y8@h26r27EbzQ5Bcdln#*DC4mkKn;4phG8eV(Vk1&B+|* z=}#deOtd*ox$UX3ZyW2CY+csF@0Aq2g8p|gmme@T>OfW~hM#H9Ym_v18qXB?go4bJ z{D+53ndvEHcabeRNA5KiGiPsu>z(jxS*OCUzYCl^5V{aF=2>N+z5`w_^?a-nT8IoY zP#Ng%ga@t%N2Odv>2Bj=%00&#smR%3ROBztdJdWQW9C-m-Bt8ep`U&5p;gdjz6a+g zI6btDOy^#&rW<X@b|aBbo`ZKBq~4LazcniOM)}D?m%va1e($jk-iynm!=zp29~qmv zo&4937sen*r<$@vGwrN`_pZwMqp_-BRn~jpE4f4U8feU6aCN$p?5r8{M?+*^nU_(> z;|A~3kt03;pG1x*Mve&3AK~>Pdv?OJIt7k9W^EPOr$!kFJymosICz6H&{?BqJH2DO zi~xLAc+Pj}pYS0K*gdPH>gaI6vtnd5%_(#8yqfBkdC49yZR-)<OP$|j{V$`=Zg}}M zz@ZO*);+G%5PmOn6R>CNt-OC6{Fwl*d;orA^X)6}^>oGsK7m6{=L+@{B8v$RZ-Q?N zAAg>5!pobeZ<*j9^>tF$a^`6W?^5C2t&BHSNp-_h&ytsfmoH=f=E0|%pk2b(=Z)XT z94n^0By+n0d^wD*<pZH}@R#FvIa~(*Gso>0y1yV-)fLn^OI7=(?ugbLCC=^esHv_a z<11Vu8ywX=9WC&sdb?Bi+jQ0l&+rR9!Z#v&AH{AZ^3zf58Bk4^eyPJX@N%asZMe?< z`m1B>qDxfHzOkcYOTs@oIhdnh??z6Om{iNR-SjD&ss?iYmVZ~?VlzLt=+rv#jhKp_ zN8#BW_r}g)>ppXK(I$;t_w368V($^ZaM78)$(|pZO@uQSIR9AP*qYmPrJTLcy7<v6 zTFI;1N?zS<*D8_4=)GEZ;HN!8Vt}$j-L|r~B=#qa4W;l@Szj}{5KUQJ9Zpe5ae~dK zT-mI#@7Jy*SI37BJ|WM8gFM91YP}wwuS0Kn7yED@dNu5ar}HwK^`U>2{Fy$=c+t^` zE>m=Lmolcr_cP^t%CNsW?Z+j$y;jE44J^cGOJaN^pQf8{<#{bSJgK9N@pkd8LnU8P zri?)evVZiK^%a&Bt|N~JHil;ECGL^jD4d(gH?5Qx`<vL@U25(gmGa^*rO!}Tz4VR} zJb_-VXZei&CvQ^%(fP`P=)$C6Gh-I}lR`{;4>rcOuM^)rj`sgapTJM=Iius;wm$i+ zxCCjVZL$*CMgM}#U6<5dq3n}+iurSuy(RiT@q;_{z?t)J#RdlsnsJ_ArRzA9hwopB z_=Be4lQsa~G>Mx=A4$BmoV$cy@s?Zcz=pHj)?>?P!w>G*4qM=};ou5(vM}-2;q^At z|1ErwD>Ch|kFmWjt`I%+BnV~pG2m=|j~z#zGlq#zGM@0G6{V7o8~y3Trzsakk1x+P z=B-f8?cI*v@Vyb!f+^tMyY?YwJZd4aOX17Wn*nEuTNQiJ!4(z3|Bt$NkB_=K6aPP- znIwGXk{b{p;gSSIXC~Yts7Zw~1kg!_i@8{(-7dFQXEIz&)h<zK5@Oqg4?$vS-OBEl zfZ8TOR9fB5?0&o4cH2M{rFOg9e%s~ZZ9;^I6-}%(zxVT*&)^UcvAf;&_s4mCuIHTR zJm)#j^PJ~A=Q-WxIytm4<0I_vtRq&<;XDWzbQ0NMDSRE9rj-3X+7qicz&R1`L8B3H zEb$S=<|i=`Wy}OEnOze1S!mjo-|#s1I=JXR?wQ+mGIbv3L`V0?dZEb=DMRYK(>lC| z`YR&T(=9PmSl3s{mqUD$>iO!{O!m$Z`=LLwlf9kn&6YjJ#=q2$UZiJ3p94FU!iVrX z5q!y+AsTr3NPL5MZjrM2m%X4pMeHT8@ofy{Nc`;I)8E^PxwL~Ay+)hAWhvZAn`7T3 z{{j5+1^*4d8<;VRywZn#X8W@#Q`#>vs-*oD^tr^hk@iYkgO+$)j52T`c$a<=9Q2zw zxY0Ul5Dp|p(kO5+TrU6zoy5oXfrD;v5HxYn4Zcp%uGkoj;Xw8>t8qAx=NJye7iT;; zV9%z9cwNx`mHF}488<UoyO48+J^}~7<9g}3#VwcaS!PTxzi3QfU5vj!-2d^Iu3=1H zv~EFrhr=%#z*#5!C$?@8@K9Hl@$Q8gW~{>q|HZz?)-^KdJK>pcP)96J1O6}aE*67u zE9J`nMP~VzTi1~Pe?b|)V85!^p=0OL$k~KP?q~l->Zp&pC#b7G>6}T~^D{6v>8Lhi zP2q>|jgOFR@57H}JwyCipE{V?;d0_bHJW%m6Ih2+ZN?sEErf51L%HGG$*fB;PQ*V- z_F4#k52j$N-pShAy-j6KXfV6l-4WtluqdIq(!#k^*~H_k&g;W3tvSSa+^BjB*E}$# z`4P%kz#65qI<I3j|A)TDJGJJ=tJG9|O7-NvMVYtA`epNK@}yKxHvU<ZG5^)YnYa9S z65j`ZJJVnD>RL5LUywfU)v0mcoge4@@!G@`{m6Rnqw;-m^4tsMr}EEQV0dwT&5ze4 zI`xINqF1L>Pwnfa{<YMfTs_sOe-md{EJ&V9IZl01;+%`>iNR?6V=FSsS*$8Ur7ba7 zrJexw82=>B`Ee!Xc&c4}8ubLINBBVC#wmrv@0jbB{*CSdAA_G!R{M>~tcg9T-lUbW z=Bl)Ot?|jGvdTvt&7q$=ngbS1Z?_Kbvll;BwrfIun_~&(SZsP8|2eDiU#cJ07oaaD z@tpj9`0KswWwAK)$vmfodA>uQo%$4>AL*_uOBdhe+43$~pUS(m*Xzn$zrOXn@qW6z zPtm9G-u=s4&+ozJ61hp<rRvJ7b!DEvo_juh=H{|UNZzICIsCWnzv{{&_$LH7clc(W zBYwG8^twZjl|}Hg2(TA?t0lvK1NGj4?~8>tMsAk(s{Rkw5q&TGdRFrXJ3rq%p1j{X z)>JlbUv1g<iTx5;j8AarhPKv&OFC@OEWRfB`;qN##@kYrm>%GILz+?=#U;MjDRy1f ziGz(we<k?NPEoc#QU8SwXga*Z`h~vxn1jf_nrih+3^2jF2L1jAF|q_EsbA`D`CIj- z_&U+GE2NBe;6N_;vsHU&=Od~SCtCIZVUyG~*5?lo{CyeUdOA0&6&m9USrvK$9xmfc z`<^;O%7`?nGXmGIG1i3TdGt!&!FTW9=npUZmOng)w(Y4e?ei>J5O$R<2;)l_+QE9g z{M(SH%t+$@hs=NN_^&dS`*&DN`(op@mY8B%f>Ih><kD|AR}{AK|6YqNbOJq4aA?1d zxIK4!!_rRX-2Mgl3~*j|sPdZ|vsljy9RR<96is*Z7lmcK<2&Q=sE#h|b$TN@7;)wG z1*Fi~r3IT#EG@uKAe4(<An`Y|)r^qJnvTTYupg@{vn|Zr<^iWV_nu&7htTUa!BEFs zaOf#>hVMv-?pqRAY!l|X87Jw|9~v-X^YGhwUOGRa%EK8;zKIQ4;@^D$d<wlKD78Z; zD79rmZ}TY!|5#6(wSqW`@F@P9rh+Hcsc&>BM;m{)v|y2%*7Xv+X}_#1s>udEbgQ@U zMdAra8<v9~VzYZ{*dty+xh|e%zt7FwzX_ft&uM(m=g=3AkWb<e45%rg{rtCuvSmM_ z><fI>rX0OTP1EH}?%c{X1s2k5#bWT;BhV&!dSu;8>iUHZ+*?M6R^UhYI?wMhKN7zM z`G(%(37skm`@#Jx;GQioQuik64pF!Cne=H5F*mLF^y%MF%U7yOnZQ1tGB_L3a}W8Y z-7g#YQ;q!buuJ~`F6=uCl#1<^gwSh`ZO?jm!^2t9hN0BY`IMf}nTz2UUz2$F>+mfR z_~!xhgqtYGu2j`etIxW_%zqF0FM=Q1ya;~i?!OEFN&Nc+{zbrWu&(|>8`ivAgv~jn zYq5ztIghFl+?RlR<|U#1=ymP%brb)rqVL5=H>Bb_NgqoLg*BwT&_^BgD*h(oI}_`7 z`~-}Cx8Ub4bqA&Wadq#7Caf~%8OH&}vBp^Z=Om>nAzevd7Ue~(Q_Go6quKM9;1M}3 z{NZ(QFJp<l`VJXKGEQD&9xh|#A!t}=EJVAckMLva@*8b)gqBlgeA`X{v-C^-SAb8G zSlYmsU!NuYB(N2s1MZc+!Z+Av@EW)NIbf9fviV=?n?T<%2Vgx`DXm<Tr<cHsgxBn~ zI74%&FO=fYL*%I$`^AuqX>fFOHvY38S{<Hq@Ejlh4@(Ess+NsK54<n}pJd8?g7_qj z2}=5G_{<8*SUN<jswZ!w7JVn>ES=!33gUOx=#0Lzau06${|-2#I2%Lx){F3l9Pqpj zo}#!LW8iKY_;MvJ2>0U4p7OQYvZmf9c)O)aWT@4nRJbe)I7>*G5%Vput7^I*eV}B$ zTD|~1ViodsQ}FBSdaT5v${aHk--{}VDIPjM@712lPyy@Z4)LL^v}<~CaD0cG@<dON z`K!cIo{b;aYqU|yx8eVFDj4jL{fnXfYI<n(81X}MU;r&W6?7YU?K1Z_>8ze|b1eI2 z<yh)wwT)F~^v)JO9`%vDJe;eq63!`?z?t$Wdmn(cX`rc0aP!}Sxd@my)Xgd$1<d%s z|EXXWc&*Uq+tZ_U2!1zy0)97s5`H%bemBf=bJj`9kI+@XZ&T`K0|(-F`ag|lf%zcw z=RcMn*v~Rt#;2SYBjZ!X(@x@@h#VDvJdvd<*=Hf+dIHzMdd0F^lo@totb249W7&aT zk&O&uZ(4-|y~2siPDZcTq&obY8RzVc3O)VJO<CE<t?}Tx65VVe<2?oWe9G$dtWuM^ zuv6+?szYDRTzfNVH#)@;bcz_?jMB4a=oEJNo5=NL-*SWl8y#WvymUJ}Zf+60gK|ap zSjqV39LrEW_3cVRcgQXdow|uQ#=t4<lo&D6PGg+k3QX9s58tBPv^NEM@X}_=lyaV! ztyK7GhLp?vsF-`nEBQS5@rzvD1~2w9HqXI(kKuDI`-MgBimrgq?C@*2%0HqDv4_Br z>zU-0@ABX6q@(;d&-`yc<6V6FVq-SP90xfjjpZEJH>-hvb2e<4)y_T`IoDEX%)fpP zvF?=V%)LrC<#uJQxn&ZunVkL?6YX7#vNz)^l7h}(TlUD3HS3;(H@#y`^QUEv8OpkC zNiTe;w{mU4bMUK|s<WkxDp}(?V@>v)wx)TO8@vg9TD9oNiE#4IYs*%_t7J~G2|369 zCDuR{^b>rL4ZdYL8+~WtT@mYw@;w0=Iya0R!O^HDg>$hF%uaHKT_p>`*et8+k#Vi{ z<$dq$^<gi{Zr9cqSC;^99(z}bJuv+=ywjhUJ^j>@%5{_2$9l%<^slmIPw!b0TDKdX z%GyN9WZHFv`_tBB|JzGet%IIQrm(+NzMrwC_;<5zb0qVjM|acS64L3))di<2i6h88 z^mkZz@h0xyu3S}sFDW*ES<iGj><_?KY1iyI%Gu?}yV)hqaBNIuK>IO$dl|9oWBT?? zQ{UchwS{D!GFab!6B#pD=iY8*E&;t>S?3;u&OPQLog2TZ3W;+?EIea;h)=25UWB%# zp3CXe27S*-sKVY?A!BJL*BEWz2R*vbvtKv$Xba<1<sT1yzgS7o-$vIt&G>9o?OjvQ z8`^+b#_4j}8R3~dScNiPPVl}_v3L2&BfP3uvFXrO^F6^pK|U$ll5XfkLNn~|3yDrN z1bxU3{8C<S%EjeH7_&n}p7Q-I+9x{g53M6R8qsU>sCO@8TkMkYFwEh(fw*C%z#w$x zS2RN(cU1-pR-%`&C;ft6He0b7Ft~wXwmKx#Hg{x)oUbAHlrbj!axd?y&_#7My(?hU zsa<Hd=;Fd(q}>flde?mFJZDYv$o`qR)caI|(%c#d72HM2R+Ef+1t0hr8}){$cUS$$ zj@G%BquP=OI;8JvR4wFV9Awo!o+b8u*~hY%e^#QqeN(k|wJOqXEtHFIx3v4&1c4zn zw8MxOVeOg+487FPdYJw&^*0387R)DQtBFSaV)vM>q#E`2QvVL>e)izV4%eLzbPP~# zz^wZQp7&akLiOmrqJJL8UM#v)O!tiGol(6hs&j50)K5}&d|!#~7vER=p~1*QYI(8L z$^1p||Eah)%ea7h%J7q4@F;B%+I}GaQry*nJC}(&ky`@CR1<eXLyIXx@E|b9$}w<3 zSutE3_+X%7yEVy>X?wv*gmT2r+sakwcNy0#<b}wDh2VmI@a#3qjFqvISe(7^d*K@~ z{eBx|WTD@;Ft7L|9G}Ck8q@71{{2@@7KK}}75uRLT6atR(2nIJ67+=Io#7Ra`k@nV zVFNb1yB_z47cd>qoi|C(4FuNR{XIW(YvS+8yN~9*nxqc#M=F;W+?w;Vtf#K|Y1UJ$ zw{9P$RQ*65b-j$Y*81Tcho&hN!*e9wPl`v`S6lYQ2eZrn&-hy$KI?G%&IQW0PAluK z^O&a;-cfyE%#iYvR*S!A)R>_@MJv(Ztr29_PuFFaA5gMNj(NRRZ?TTLm9>8H8~z4- zN7^O2iRdLVUy*stZPGq)C~|cFzYbJPVGohai$#XsPZBwLIXuPWZHy+RT$u|=*`nji zo&xe=YjK2Y&?AH=%J)`m2W^}|I2+m#8KlAYo}W9cV=HmwZ?4;zRcFz<qUTVIZs)wg ztzP(k0A3V(C*R-UeJyJz0eD6H`%}DU&3{IK@f-jCb>1ty|7UcrdBc=+(JA8F^xj2t zyWto5{`tA(9fBtrCvqR-;V~Ra+XK)*5c&`snGPMWUSbc9u<4a{_PEV=>Dv>OeC8l} z$quFYiTV*8Pu6F4h_AKi3+!1l^8Me?{cYInUfJ$s?t=a%b~PzeY;0m*ld^(IHeG+B ziTyRvvc#6>T50TiGs+ljlbe^pnQ0~0=&P_PhbYVaMp1ZkwWmYi3?(`ArXMtw$vk>B z_3ffQ`DY>Z0dJMqiYxg?Z)?JK#nqOq*KzJa4e*LhdX>y6`FEq_<KGv-kJt&dyt!_% zL5poD4cd3Ac7GrxS-0_DJO2k6BTeW~(xx?xkJZJi3)WI^pm-JYwBdHagFE*~7QT`G z-4l;w2~GxxN6>~|BkgTqPk<9!cizHr+k&)f677-tDzTBTt)9}Hf~|XToShh(amO0I zi;X-~^|4(%8T<Ew>M4CkfN2e|Os<|{l#_~0JZArPV*idW3tPGIPfEu83-)jMo*HNW zp2|P*_U~lu-;1h0wtr8Kvwu5H`*#ZV@A!HIW@(eyww<Q^I~Cn)QCz#E9$7Ci{=xno z!2a#4cK5kxkJ!Hh;8bD)AH?pI?FN7FNyggH;{3)e@k0unw1mI=;lRk<j78SEuHVfV zJZ+uqLEm(Q=nH2K_@e%lu|vv*_L_2ijzw;06+aa5QxW*4(g&LSgC2NlzDMUgh^ocl z_#XOO`g#-o4XGQUBj9-g{u6^~13Di1il+t`?gIv)^ALSmW74M3`DSQy6@4nPi)x_9 z#%fRBX=0)kQ*I^y2Ptzk>m!1L$`N9V*7T-lo6u*8F91z%1Q$Y=lGlsw`xWfKC+-z_ zDr?7@=kO5Lk0?iM)<Ni$a|f%0S8L!$aI*-w*2HKXIGQx2RL15;)?X?G2FjN4BDh&7 zcmWP$Og$YPQ#$a*#_n=tX5|_Feb*AMgpXC8;rb?RcvGG&VS!QB`UD=g2}^)0Yp(_@ zdBCE<XNX%l^ry)0U&G)3Wr4Qs*TiJ~;>~wdzh@op-^yI$80BoWWR-0F!@O7DtGut^ zEo|^Iw`i@e=ny%1S5ktWJIed>H;@S;%T6GRepr5&dt4dqPrtq}vDq2U`o1&V0FUWJ zeu<3zJiOe3T=9`lWJ@x>WGeV<pnujN1LeOX&|WRLKN4KFE+&JHkmn5c*j8vwe0+|f z4~<7&OTD7!rBGKkbxFP3tr?-(`tN3?h}{X@K>m?&d5pdCE%mI$R5`<^WL^8a&T!iO z&Tx9QGi+V&471Ov;zRbk36BuoFbW<d?TjyPCv+$Jf$-^hBG>r;mGw1`N}kuOY3-%K z%x#iaQ(DVh?00b$jqO;eDM#~Hs7LGBw@jS79))`Fsqb3Ezw#gbS}HcC&iP|ITG6TR z0k47&J8MS`@XOa|M+)~V8G8Y6q?!0BWnZKQjudqNZze0-^64kR*Xxy41zKhlzdx{+ zcRYcg<4XD{ve?^yG!WjFU`y2BK+lyuZRohA`9D&Q?xn9EPE(E+16zaQ4PR&RhVLTJ zF=a^TBz->n4~qL*+B3T-yFk`^e^_7E(Ex2uK<1_a+d=w5;OQjA!;_!o{do)~>F@kt zcw0O?0*k=m1_uI%*b(F5xE46R3LJZtBm<6#z~CuTK8nFXV8SZFOR-tE)e^1W4IZRk zH+T?SbTfaD`d6Yiy;ixhK={A#t%v6;+tTqF6x;-X&1y?9;6WCZy7wqYYk+5^iI)I) znFwFI2A{C)N}>VJHH>NZ9N?J~#mkfROFD!H^Z-|V99(wbN}-?P;gUIR47L)pzhr*9 z8`$*8DcgkB?eIo{tsB^IP&Hs<4q(7G0Bj-ld`h1MfQz|Zcp-UYoJpTCzYe?NU~7XG zWNsojwtu%VYbo>3R^(rI<%$9gIv0H^gv~m{TzIMK2sJ>9;_GRlf1pw0+iL7Je(b_a zk)dlSYi<7Kti9*esx$CF4LEwOsh+jhrs`?f9D0%QvKHAJya#?EbHt*m4e+j`<l7>= z(Rim+y<)8`E49>?<?q~*wT1ILYT4(U*0SaCJrj>Uz6V-1${e4n<3}|Vzj(dZmZqn% zp1G_f$1MLF9ch7kI>e_vt%0%^rfn--7~YmvlBd`1+gzqpF6)@aI%YbwG!;8ZFFtPr zm|_nA?<wrnsnAp^>s%$QdC8jU(%{sNM|j?iPA|_3c~1UqU76TVmIfymRHmfp3&Bw( z^RK1Y%5s@k2<|v%A+(=3|84aZeZn8WbAHPn_$4w?bW=s$H>-s+VOKt>Ag^Y4zWbZH zGMV?YRvPXcsAJD;ZP^Xf(ORF@kpga7>u>9j7zgqn_JVZkD3x!}Ls<TkXUPjc?pr$7 z)iZu4I_PEA6m7;_#jU3UbMi{oHZ92^;kkBfe;T$w2e!W<@a&Z1=-aB@@1!l;>Hp9- z8?t6w9HHx|OWHGqs|B4+))s>)HhnU*AU1OB3OeKW=qc-DBPKL<5<QPK>`B3@f`z2* z;7I0*z$<Z}^F)sEf3w)w@Xd6bwx)Z|3jNS#fsgp-o)|pwb&KvVu#5g*6{r8NcsvSI zs<}q0(RU8|t9WtdHWx7GkzBz*hn!2KtEqbLk1cKNN${@)<{t1aYa1ao%P(!dXX3`J zr{|`1>_(>!qTlCHP6_2m871TwoMTIzQEAE0{dt+&R!yGc-n2e*TM2vVYOuw^^DCAj z`|Z$oh*XlVXNg~)9oi}h-rupC`KhcE+M&<b^HTIGi$ROo9`W&#{3Yb=fgURJH)ctm zrEaBa7h@Fr;ZU72v40ZxN%Q!b<6y(+(Jfhyo_|!r&(;s^d(Jwnqvc>)$Jx|qoWw@( z;iC<1+Mr=8_i3D^Or0mO4ai#Moz&TAwpDN@Z53Q~v$kzx4i?L+g9{t^<va09I^x?| z0ZhaRsA{AgH&dRh8^@K^A>UlgQ3mHR@WLL(lHi(-cI(Iv4{Z)ouOB=+!I@s1r-%5q zYl){rco1uGAvbihl=6Jga5v*u@T^UAYztuH^spYFqT^}slNRXGg3asn&Fa=?>&yCt zri6}#rsSMPfh9m*H)#gC(Foq1h8FKaz6rit!8Nvxp`tep2j4O#<NM}5`o?jBF$0`} z6MJw?pWuc4{-tfSrG4(mzE<o9Eu0rEIFvJnd#tJL?6(ao`|D;Vr{hD*Tt9GrX~8Py zeDD0DH_SN^p5TZi{b}pazD;VH-93MjzL`1TTIM<K;F@*InB$}?*N)p&KcX*$O{>?c zhR^U{Ds7mCJUO8%{<W0ZQ+ZFpq1=60+Qj`?+q1C$)F}9H+8ocQ4u>E8Tzni)e1Lp_ zUS%E{U_KDSp1d6Wdpvsh97WZ03+||f#{3%gSdoM5zq?=)P>{)@3yuUQl3&fKEjtPC zFG81e<@g=*a5SGF`aHTvz_c%{ME8HqYW1(gz7UABFUY)LC4SR2==)M`t}?-}FU-Y8 z8d#9I%|ZO~AhPWUeES&vZvkc-^d;YfrbVyLVy#E$T%HyBOySuEy~=Yo=eFfQSKZh^ zM3zb014&8x2J})bxME!zI?6lXOKeAp;IP%yO`lZLkQ4S$129F7Dd8Q^Te4ZN$bw`w zS#Jd{@gY0}f7@by6Pc$W^IT$Qpq_i^&kg8vMqNoxeFM5}TIGrhy6wqh-f$Yao4|nG z1$o;FA7-x^@>=Y&(nr!Z`Y0T0^FMJ<yDqd*+9&PI#vXz1asLF)KNr19%8>G6?^nJg z@@WaSLVHN;v&d{uHa;e)*EeSMb`cL4AJjMSDGVeiH+hWlmKM^!)oA!{h>o}W*=26A z%htftL-|cvz5iI$7J&|HC{K75I*Xjic$BePx(i;r8k=pZv7Vl4*e~7ieHp`IJKoKh z?Cs**Lh4~Gmx}H0Ab32+-ti4zCT<?}^s@FM|Ml!DY7<<_*f=&usak{$p$EOlj=aZT zq5T!g-8k94?bJ&A*hF_`?4Mf7J^Hh8KbL#-Rq^+U;5+U@hqf_hDrNlRr`(RL-34B{ z^OXKZ`bXjhwavY)L)sM~=461jNngl(E(86?3%=jPXZOrhrN6cQ3w_=2jTF_^1w9lV zCS{ZFgp0WcHs-^Io!K5*&hr3#%ZgrjYO1&Y2gvPitF0@MzBTOa)HxB_v7@KvVQ&{5 ztj?61oQ-SXEES#OEPJz#A+ls{6Ip8b_*RRq`x-IUyXW$ZF3BA5EcXBEZp!irUx0Q+ z|1E$fq+Y30Y^G9QtUQ_9bTcoA&=xywS;~CrApHDv2>)`c?D?>D`TJ`g)ku?X$lSJb z&e)Ef-%RY#_-}{sWaLl|{|9NuslWL+kJ}CJBYwg7f3tM#=f5{8KOSz*L484deB<Gb z&0n0rdkTNt*t6J*OK4XJKd=yML$bdLS$MdZHj0if&)tlt*Xg5E@S|}&M^@s$UVM!% zXBY@wE`^uDqmLrrD~Rjsa3G5(Gmb;>ndMw}vwv)_s)lyL-&QluM3%*Pd{%H`hc%ej zA^L^z{On*}ft*9%3hlHr*1DnP8b#uwXbxrgBhZXJR8of?#q~7nGZq;`&>FF~JQDBi zgf*pI)&p9RF}tC+Y{r$wxDvjb42`$em-ShY#S>YBSjwD3^eEaud`Np<F6#r})zG6> zt9FBL{7|*|Bg_|nz&zKAk9p*W_>?18y7P$RG4g9h%)%aE(-=1m3$<;_u`2>|sp!}% zRLe#UJU!_P790w$EqID`hbajb{SoBPk>J{Oy~Hh&dH8bnIz57a;PBwuj!2ESzn6LW znGf(o&FRdlyXHt%E<Ef>Wz_YFIp~!87+>)B)|`2cYuU3tenAQ{A?bq6dBZ2zoF7Jx zY*1_^+p#&zUY@6vsLeS5es@{R`r24~#7`^z4fgslMX?8N$<_653y%%0V?E0l55`=J zu^@B69%x_m<kz7QSxb?A=th5f%bMtUQ?Yd&)0DGwdG~v0y@-9}MZD|mt9i7Dbo1)W zZ7J~3HFL*ytoe3gM-l%Dy&qGw9%D^}GQ@t1y}eIt)goi0T;aQKf>(*9E8mg7hL5F> z@=r59V&7$4xfxIKV<bX;Deue=p-0*({pG?&vo=Z9wc;uI=7UXTjh1na-8Xovd(jKr z;B+&6l7g<}f)<GF;b^0;?8x7UjDKXMF+NOOxRlYXH+#iT%2CW1!3OM@4ehNE*$T~z ze2tj$H9bzY#>*TJ{c8`p*EO1QR%Ec~i*9gW<zDoH7>^g-OY|<0v7&=9XK$`a;~XOB zIe*>Cj%Tr@oJsZe?>cxJd6slw+u4%A9P*f&8j2vdi~e&~v&?yl;V-hL7Qipb#(X9O zJq*B?Wv;3nbu?qw3`f9uwyK1z-y5iSmOke!7{@O3VObyB3GWqJm%jI?sa^1bvjJd` z{uO(V%$-}A)5;hXUMKHkV={hx3Qv@B*^AmG{O~mW_YC~-nK*tZ?GT>0R!!B9Q%8h8 zkK|@<>t)OWb59EVkXYl)1+i_Tvph?G3y)5Nu7it+^+EfxS>u$p$^MLd#zDk^EkKFd z7iAtOd_r`SR`AfuoW+MclRXxZyV)1;L{Xa+niD-h`l1!wO4-@SlAI>&gVd4094;BW z3I9@=QxB)#k^k_21wE|gYj;uC46eJdSMFtPDVYDQtnDAE#yWHa9u~M3d5_IRV{Rk( z+pa|C_(#as04y@6+pb{SVvfHEo&3lS;;b^qe}p-HWJkl}o3A<Y_@cm?0_33Q5vTXj z;CP!(EC7A2JymzvC_8v}M|W^JG_5&ojDg05wr!iOiH@nv3l7od&CKu5K=V`K;l1$h z<IuFsDWf(*#qc#d15L<$-i4mtm=>MWX9Z_;*qGPH>xgaS4Ip1d&ezgkiIfkoO-E)K z{9WuPE%>nEQ;Mx4(INl2;O%n$my8>kuXhu#TE4UARu|r{p*OV86}}$5uT+xo!%fml zSdS0kFB6mTKV>|~8nbH9!$i+;#`AV`xw*vT6?rB)#xCaZC*TW{sk{4bp}&MKWQZpt z2RjUWye<1{ZEdP;<4Iqrz!6+t5J|Kdyjv6A9avs)0{$&}!duW7aYpSY;N21UcCXm$ zbN6TYuIbD|r*()v-a3A<V}+Vw-(l$R7Kh8!;ZLB$pRlU_Ty%I>oDSbho)g&Z2hib# zU%z0<HFS94)h=)-{JM#L8)xw9&+B@<$*Ym&yTQ4@70<8Fnf$s3e%;94Nx^jy{JEEL z`yRBi!NS;u53l6i9BUG^n%Z?Uy!r&=@~wkP*vI`4#_oy4%x%|s$97!zaAL=U4pUEo zZ|A~C-a07b$956#PKREleBs&hUHEn?<2TCJDbLVrl8kZROW7Ly<D{wANLj+OpQWwK zlg8;A_G0%|68*jT-fE%UBkWPl4XjvqXt8bEVHF;otdzQnU3xKlmp8(@?f6*>uNGb` z^2&v+a`PhUfM@se+zEZBK%){fM*b0>pV{!M4bYz9vkjd$@V|%gnjG|WY=q|JKAU?x zJX-GapcO~Zvu>mLoF(6Aql^prZsS>ah45eD*}{W`_X_V8{wusYo{yaP?LfsBem^ks znCs&Cax(NQddIW$e{cjoM@h*tFG%lIwT-eK5C{@;Juy|6^-XLI##~6+d7O3%9f-^p zdHvc1&LM{e=D=&k=LFeg$nAHms>f=vb=5!vI~l9;pUg$&J@Gk*pM(y427k77eFa(& z9-Rn1jJIWOn-4wAhaO^jYmJi7Bl$(QijZIWP~?=zXYq$;FTH+J`rRzgO&Rh{`aY)9 z#^>|VuZQU$Y4d8@E^ANNFxKs+y_=a^2rca9xf@<4{U4Lt0orZoDO_!4zn>O5px;a2 z$EoyvG4eXv_sR5qYG^0;h{<b>JTciDn+r?7i_CVzkInvF7n8}mJ{lM)w#As;QKhZ2 ze?erwZ*kZt>vM~oVFxy!Sb1VU5*wA+Oa1Uuv5z(ZR|s8j5zh_KU<5z3L-5v9tf#NS zPF6BKylr8+x3C-E0Pzt%MGEDIx2@v**q6=^%vj4mXBa!l$Ur~3=xwtyw>42t_E4qJ zF+vHy%yVysw-DJrV>SP-y&=5qCFJcux>C41k=XBZ)IP~8FbO>W1;3t9xgdPang!tr zWzO)VK4P$aZcU-45%c~a`#``&;B!+7#Xh?O{$Z^7VSkbJw^ghgHn0vjhqYs44Q_-( z7hjCf{9Il3O$Ag<cd-`dLx#(`sH`1suXY)Ghh#0iGD-aI9J+(=7GRo7U9v_fdx>QI zFi6}%*=Hnc%^Ue9dt2mR`Nls+Ing!6=pL8B-#*JTcE_#gS2v2iLwu3MMo>e$*=N@L zN#|U4Pnfg%)DYiC64$Gny=R`LIq+}xWvKR^@%a2Fr(rLaSk>9sH<vq_ol0HVGsGlY zlcd}rcFts{ra#ljnizcj^!l{XkEm(>)Depv$!YYXs#K>qkz2G^OH(K_rDp>+NF#rO zazl#Z?SGjwfP2}K{4!+@^joVM8I!4;OCmax*ynuwBj=JV!+$FIHg91{nzvA5Ax#dr zu$@H9_9=>SmWll~rBKGGmVh0Uxtx?Q@XHw~FQ32j)l|Mu1t&+oJgR&*DP^Y8CUB-` zntsIX4Ijfs+JN8VJ^ce|YlCYFu(IjTU?Y2Wy`x;QWdAaSwjQCa>69z&?WMhGBX0BO zTT=DiYAWG4*`G#vuh8xT&}0htr_p8ia-U2dc?NbJ9@8)H!@QSuY&mOXHhXiW4;=hw zhmP$0D>z>Te%=N@tH9586F*MyBYPKQFXILLpyxWkj}!dhyY9%l3j9c2f}2&~#tCjF zftzHrze3>0F^%)G)#M)OyMwGRSF+~mZ8|VZ;1jsB$s=$oO6{!pHpqC3ogZVPya2jn zHnvxrX45_Ruyis8#n;2hH=8%1*^a+xatnKKsweeHyPQsZJ*p>l*!iA!+jxBy`?W%u zN^>f{rg`SNxSfBVWxZeiSwwv)Hp_H<{uCYm*z)8=b-MiLrz{8mHO`-`-v_RQCo-Q1 zOP+vBDV?ODg8`Sr8QqJvypi~#{BPHIj)T(?2WJX!PBQmUO4*0mGb!aw2F_iX3C+$y zFurKQD7Kg9(i56ffzxrzRQ+w><ll?nwBZv>kdxs8qx6H|Cg4*_9n>j)6tdUyX%jzd zz>nZ>@+hr2H8ZJPa4<Oze{0R}o-|GJPG1aek~0#^r^fxW+Wg1BS!UAoMc^rwb7!W+ z{Szx^x8OA+k#Z(;UYhlya$>L;|D-2G%b`y{v7BUUv>b31508{1WeC2L!P5fjGRg!` zF`O6kPVkN$bEsW&=stWWH2$|UHigd=(}x=0UssfyT)y$x{2I}-g0hxS#oV>S3xAPu z7Txb_@f-%<@89tgVoeas@<GlRJ}!HSi2FKiO=08Dk)_Z_1l)>VuN}99HSgv!&Yba_ zL~jt;tKzqLzFy9gvZALd?IX|~<_-)z7WhVTc`JL*UFcQXuVyvditjBDei`^iW;w9m z5U~#PxaHgV1+TjK&U#gWZMnixvFzo)@a@izzwKAjH>J?u^x+m{NxNMsO@3ff2YaKd zLck~AWbNr?_GE4xXV-OP*JAWo`mb*)eJbN>Qwrz&kmnKp-OPUrg=Xl}MewA<;B^xH zT50+kJ;J}v2c~vNxzY!d>DQO}$3wqLnX&Thly~7D`c=xAO21ad{Udc=_=kQq%Ax;0 zu^jsQCCX`FPc1asrvv+Jc(V>nb6LNh3{Ngmv#)0?lrGABF>Ce>N>wHPUY+R0O}9+Z z#h<=yzPn3&e<st$MbxDereqDYqSKzm<ex(M?fB9+;fF?BszTK94778a^(C<*N?jsD zZj|x3KB4^SOr>=4#22$<oUN6A@p+dr1y8YZDW`P1?0Y32V|eIh`son-p^-WlvJdwW z@dIKX?``1cH2y_y%4sD3E9f8ckCT7I-bp+n{P>mWP4n|~`EMaOkNx|S`R_gW+UxVD z=_T}8Ph44t&F{(f?CBe6<EglBhi+jmflhwt7S1I+QxtB*pGZ@^{olclsNw6X#Qg{} zhSI?!aa)TR6Pyol$H7(^FAYj#nHC6i*nGLVoiXKN-e6O_)yWCo>Qjt`DWb!pC%AWf zzWGITd3gMcDag=NbYJnWh8DMRPNb(QS4q$19ISlyK#qSPyWE;)*H<VOy-4#`$L5){ z@gvPuEG6Ren#;U4jTlw3_H-712<fLZONxVa<(_Ft31OKF)Z{8>Ct%AKJDk{xCIwfq z*HsJ2J^~AJv1XRoBk-3Dt~TLucm$p!$Vq|eEc)r?VJf<6m|kK1CI-_Afr-5c0+Yb8 zfx35tlgZSdV&KE!!49|WY3i3a;cNC=)6tcN9)T`jVXP4sQcttbtHy5S?`19S81zZZ z9b+HJRP40q8peLKMJ7I)n8QomFC`{~>!?%uE1svhhkL`qqht&<NL-Im(eu+K)_?-9 z%jSIqUw+mA982XN?l+EOos0Wc?)7WY_pz_n%u1+o$@6%gq4gTzYDF&#e$NpO()SWS zXBp?7E&He1Svlwl4)lb|+{0Oc+|I0StA}+4m#3yoDXf9+LPM2s@KN>$J?<ceeeJA< zXNufS@LO4D+l*fAVH^<e*x}rV4>4m#_PN(SsVrVdy}K7ay=^ysf-S1DyG127k7^-V zNeQGxQW9whNh8@vc9MhSBqftlNU5YWQaWiUX&7lZDT6eEG?Fxmlu61WjV6sDjU|mE zWs~Y=wIq^kq-0V$DT9<r8cWJ0O(eNVlS%oc8%TwuS)@6n`J`gfLQ*N|cG42kmr2V> zUnK=et4UR)`$^TL^>woray}w*Eq(O|12e>)=3#8GUVXMTYBOz@`LXox;IVwUcmDg= zKe~x|kL-mMJhPvty;iNARa;#<Yd7O-(;Uu2#a1Qu&05Nzq&m7*<KHW5)xw{j7QMEX zy_e{|P5Z6Y(8t+LoI5Rf9K08}9DH-+la`a3mFfWFM9#^QIrjqUZ~B|2veopp)X)B* z_7>`Iq5dP(zxzV{&Yoq|KUsBltzm8T5%M4hnqNZJ?V|i$)D0g#i;d~yy2<ZIq|O^i zUnPB4sg^N%QQZ&z5p|R2s_M2;XCWy_`u=6=uK6SCCeKyXolKpxNUKSIf0??m%Y9~? zljo}HPN&W}q$<*m%hb*K&1bHgJXckB26fIS-A{VzGIi^JMBU`Ms=70&vzSy(YP(F` zkNgpJljo}H9!s4IN$W|wFH`q-{)oECb5(WcQfDctj<n}8bwBn;)J>kNs(T`J-cI@^ z>4nSGz3q>vn><%lx0^bbkQzzh%hbL7>gy&JdsiiVZ4-G!=VO1P=z`UL_!`e=OH96I zgQp6gJXN1`y=}5mZTm8*iS!etT0u4`$R-8Zq#&CVWRrqyQjkpwvPnTUDaa-T*`y$w z6l9Zqe?nM6CMw881(~QI6BT5lf=pD9i3&1NK_+qrCMlDYMH)>SLmEpOw|m(Ge>ZE{ zH~wx`Bf7&1{N-h?bfdyqzxWI9AAcx|SQA?t%x|{h$9Qk!eP<SH{#zp!<sId>x1JZ< zm8Lv5v=tq+r6}PeV(eX_i^gJ45d+Yeb7{<RyTOCZ-$c)`DN0zycG%54YU9v^X2FwJ z-7?dgST|E}xC5Ca?-cY01^q!me^AgL6!ZrL{Xs#0P|zO~^atYalI$c#6Z(UK{-B^g z2(CE8X@-UK+$8qMaqDo__A@*;o*#IO_$ui&mW<GH>WRr-i3=rqSqyf8^_=Km*kI&5 z3pvMBU?&#e3nz#><>EbZ{P`Ttc(VNV`(?S{aRs`O?BnB~eq`VaExa3CzQVuD_;1F_ zXk5SJ#I_2cuZgdX7rB{@&bA!CJZ;*P!T|Ab0>sJ+4#m$><qRzLN64C9Hs3<b1saQ4 z%crc3USb2$MjLZmVv6h7iH6F&>KXJpnJ*ku9U)|WI{xkzljb|a0rW-Qm-?_{xJXh@ zg!5x%kEU(LltRvyFUu~NQrJj4Y|MdkzPxW%koqLfR&7%4ti~klrj*e*l<Nr2qS|R` z3jkxo=RT$d!RM`<Q4_Dr#Bhx5bIZz>naxvk*+0g;c4B6;hE0%uAL$B!3vhs)v?7sq zj5r-FL;7Z5u9Y)uoozPJm#tPqUlt!mJNmLlOi0m(JMpWL_4C=>2e{Y9P#*U_<{8>G z#Ogv%o{gR?^9@^$G2bekO?^Ijrrq86TE*JT{3$HHNhhtG#XY2UR`(EW7sEM+1wHx% zdQoH~&)E7Rqq$<o#AeYv@Fk`Hczwd|<MmdOWjC>z!pDcz&iY&qwqMQ*6gba6xS$RD zTRZJBVi;cTUH22WyHELPmuk~({ogP1vGy9IujP4ge!+*dmoaJJA_CnmL$4HkM4%}@ z_^9E&fqTw13ClfwUlFo7#8<~rl5CIq94OdlSv!z1?n+ZS1m{wYfqTvijz1$fb_TGV zA>43;wL|8fjW2F0J3lv}`45a0p~bfP!Zz?cqmr@GQojV>s_b?L=jXl1+1(3SD@~z) z_o}vCDsX2)*Qril_%_-H4`@!uhRoVHYp}M^`(GY4o&6$5M>w_cJ=kW`)3oq$;FrAM zwBmdCv(@mQ${h54)-RsI(}(r*zK5vyHGINGB>TeRYb5Pz=i6UVhrEA=e<ttqwz-|` zXQN*_$NIvDGkxJ--0lniKbtSyIybYU)%wN0mot3fqq)BDuP6G#qZa;6zvcG7>F=E7 z3%|P17yfCfFMM!`FFZ+2?#i}0d)~l4kucr=q1~Fy-sNP@IUMe}BfatQ>`b-l(Ak`3 zp`p{?R@<bn@cZk_0mdoVcqh4iVNbp<?4InyHz=X)6#TXXExb5EN%I@$Av^S^=Z@-V zASUhcTLtz}eII-$v8^vp>CaKKe|gS2vi&{l5d0`ccmg$}3>(&VYl5MRcZMD+kp1ls zE*(|w0+-@zaf-O0MnA(RpoQ>c4ZD{44quJ%G{NEAJ39n^{ABIfLzF!-CihxK_Ox)G zWso{-*vX`h<15vQL(~zXe5vQqUyqtD>l;1R(FQzEgF~^|tj12iF$3EzdnQXVz0C`- z(f{*$=ja>N(ILxt#wTkR#AiIq`2hZg44<P*-TwG4{MLc5I-n~0+j;ZaG}h@I(4_1c zY`54qVoUdHoIzZY`{Lt1Rbsn+vCpxObA#OYA6wLl>9p^C=3enQX_>E92u^|~yqzYz z-M||GfBrk~UFQMTFRgcs9sz8pkW+?_EAX+_=MQESJMP6cQoPQy?tO4+l+D?bf{UEF zmzFK-b2qD_{bJt^K9D^<km2|L4w!u8m%P3|$o%lXUw6ywKVRKBGyi;bw-o&O>dv|0 z&sTR#{-3YzoGJgkx))p+W6sg|lSjuGyxeAtF{i(gF(%`S@lt*YzP1?tBx9%{V}UV_ zWbDXTl5e;2?LodZ@(sH#Ho<hyp1Dgp;(3(fRx1uO=GT~G{;)ab&oJ&zG3Ix{H*C{3 zm7Ru95pP9bwPfwOL-2r`;ZMcDH{OP?Fl!Bq;61&J<6eByo~ggHF9z?2=(+-Pl;7Ar zPa!*k)sxr7!?15+luuR;;*+OLK8e3`-va6k!56o|dxS5ZoR3TgC#+%nD>F(Q@{jPd zO4=D;ws*p(lui4bFKsGYDCL*3r)>Uo;?6jA;^AH3!Q+W}5v|MV-$-4;<D?#`PwH|L z?^Jl4H|Ns2{t6!V|LB8_^g$)<PP<usUhuOAJe&nLu`*&Zqjp@h43SAOnIZOJkzdjm z_UW;{up50*S>q@#F=T~3RK&V>3OK<&?BA5J$RYLo9=R)ZtQv&#meJ9EkIAN3zl&^= zeizx)O`kVrpm!jboHI9-O#(0YP*ewv{!cdge+6}ld=eT6LIZgu&TjL!BB$2kgZ@7C zH-IaVkG1J4ek+N{J{R$ZwVqbyQQh^M9~a#XJ*j}$2+gzEcbfLS?>?>tZYVfswHdPe z8hkFl1P;ZvS_y3nJ^zk!gJxg5-y4{5fmW`fKM#m*Y|zf-`|}V!0*3q^dTD>=(-#j~ z?Z%k8gpO&S$oo&CW5KE8#we}ns>A5Nn4FjKwF13d;A&xvGJiH`O=w1Z>BKj32k&J( zw`VN$ALrdh+Ank@`pp8qUkOKz{TJgX0Ih4#x&zwB2UmB}uYUUVL1-yY#uxK~cpTZO z3*48_Wj*Ov#I2LMq#p+3K(Sv14hlDwouR$N@zR^%C((M7je5t6u5R>AGVLsJY^<KL z?l^o)a3JjzzqeRBrGB9Y#!huNYas&LkASTW{21^Vd9#eXA5unC4|I5<<8Mia_>UgH zm6++!ZY6Xp`qNt0ONCd7ekbE*2l%aIjLZ1*n0lSps#bK-&oVxxuk#Gu(P8Y_5jwoU zuWY^x{A#D!x0moMn^qkwU*H4JQQA|Y<1?n8dW<^k9?|3Ug~X<y|4QgTVs{$6DxMGQ zWUVn79PT#wz(RvxtpZ2VrWijEzSYQium+u<S1zI3#U`HH;j2OPvf@vGbJ<6VAwMpG zlYIVUA8ix<Bs|+5S|KqkfJ@@Hg)$a6p5~u;nDpe!<D#Yi6Y3CLtiCcXR!q4%Tog}< z;-cmKOL4*83S-O<=05^^j0YV$ck%ezL?2yArW6r_Cw_blmMO)Yr7L|?`|hRi+(6&R z97*_)=*$9#3*3p$EOaV#q{Zmy9mUY;WBn7G=Lj7&G&QltIgB+uO<%C&UUY6pxn{@` z*8ZT^!8Ej>I&WRHzcgcPNPXB63^=`KqV>e&mdqU{#*Nj>@on26o+L6p#+QV~9nf|F zzO-$<HhK~~sp!HO()=2{fw@n00G{Nbp4c4XLHx``7m|5|!0H2Lfiv>qzzmtgNV^*^ zwA*I1J4n0Tw0i_|<rTS?Z};yhFE*#~<wo1Ayme`t6QC7ahR^SUhX@Z6`T$1emQIfq z{`4<=m-iRO72o6am3^;Y+TSu~yddA`cS9Fq??_A+T5$m{gLI+9i_7+&{FJi6jq~)C zb)l1=B0IZH+37uYX<gTwbSv!_x^)4c^i7ObmH)aFpQ8K3`puRcqt{FOZ3A#!j$Y&Y zEnvcy^ZzcbXEOB&T!Y7WY<vd?(dwt+spwPiR2+llKQ6_S!1AEgc41x|qvs2>3@lfL zlVMkZlVRrgef3hDeD_muVq;8Qpi5{cHg+A*(B*UrY|a<)ybC%7HrRhP$A#ek61mRz z|9cfY*!J^_;n}qMQ+O~j0<Oq|M@Dh8@9?F4^unj~k#_9kKDrXW5q;;==vFgrD93+# zX&>1?1rD*H#AxM8a1>nyt+-5DQ4U@T$9Fyjj>y{|!*Qi?{8{jkv-eUsJ_|nnr{M7Z z^Pdh53w|%3Mjw%sD4j0*$4lX;`m{0JeesyNNCq>02Fu`+*gB(Q=3*Hv{(mvKC-N>P z`wY3q`28LBz$W%cUx}}|PJc`zSEAL=L?h2$ij&VmBUgfB#Z|~0UrZkEx)hE{pE70w zXFtZrm1y;|z_ILypB)bEDs`s=S3#?-oWEq~wNFIh2riA<IYeJv_bK>@hvQ24_$>6% zvOUV%1120|qZhmMmFP6eQ=>L~!zP1X@-})jYexD)^k|1MkB-h~WxR-eBxV!wRZm(c zI;)JA(dZj8R~22cIQkwv(c!U%t)=37Uc)}dO1>F;^N_EORh!CUc6;AgjhG=0U3C3; z9jf;Gm*Q3Q$1B;<g?^&;y35<}V|tVF*rjDp`4n8WzVopSUvRat>0(^vnYa>Lr{F3I zM>2T9&IwM$&MCOM8C<o3E79`>hhpQ5fU61MN_2hE&A*P0ApgbY0)y8RBIr$5!h;1_ zd?h@@<Z#X-m*U}nZ7oCp^VOZx@aL<$<>AYZ(<}9N{5Tc6x*b}R`R<pP>v^CznKwnp zW^`T6w~o0Y^hW%c(rng(C3biT^ya|7H5*@T^wsH|yc0X#UHA!<<o-0PYuBc-O~im~ znD6S6ShljxDdRFWPCMBPD`hn-Vn3iodBwdanf+8sS{`ecCHTR5S)-Kk>$@$=7nP<< zal9V6DZE1TVuM#i;e=P%nde7gJ;44Sk+H%LvWr|j+6bj02Yc2VyyGmn;df;yr2?D$ zBX+Hroj~ldVt;DKC*;8zmAN#&6VzqL$K16(dl=)=&YoXvyEX^whz?m-Hva#^fWgOG zmS5gBa~huoZS|OPsXyMH7SF4eeLdRNc$-1|e6O){lVP90Z$X~~Uya$gMqo?29FJ$O z#k#0{E4udRGxg@$doQgA-<=CK9hqN=ed1~MOuxZAU2MxP({7-}>(Bo~@GM(<mGE%p z$o~dB-q59EOMF|!z9jT;0YB`$y*xZ0%fgM|$jLZ6ZOTJG{U&w_vEOW9ttdL)9L)cH z4j;B{p+Edu@@x4Pqut_D0~JCyV!shujaV|ey4k0K&qGgUe&gdSl}!I$C6jmy#I7)S zw$>%<38z>~@F0Urh+if9)(eSuZD$=p&W%#>@yWJi^gMU{mdBr951-VP!hTE1ck<wn zwzo#6xEqm!-PnWi_a;_qX8VV0m2eJa;ghAGBeu>tY}W5#N9H_NPapATKD3T&XZ>tz zFaA^Khzlcj<#(*UHqM}Je{19-_nX#Hp4Y4+Jtt>#4(xa4w|%(Q8}7^V_MfF)H&`?D z3-J6NxrC2FrzZQ)@G-&9;Pd8~7a1maYRys7MVA&_N$fuHztHfx;CavSxzVvMenHZ% z*Ex4A2|Kv9-uv^Bz~9ZjgNSuxm+bTD6PT3@owF1}b*#U<`ADMs?A|1Iv4@zVv^R^h z?M?#!VVAn~c3>2k`#Dp-mi?@9&Y<AsAm3iFWQzX*wwXlMi(>0J#4<A0b2zJv^&A=Z zLvo^i23Gb5#E*UD&gdA3t(C-l4aA-x^VM#}YVYCvZ#m;(!JVOXVoN24pGVe9lA$HB zCB)B*WNl&re9^5ADG`0)OYktETcKa!>oUG&zwb)owaEB>E6$!5vp?-C|I34^cJW^x zOkV$A9!$!ue|a$NEBecWsrK`Kc`$iD7lkPpHy4bbvz#({!76<67T&pPoy-~Dh8I4# zKH2b*5MJP5jzvs&|E>(bL*@+rb(7YWn7kmSBg*_i<_$i+$=rec;je^F%xYe{WNk-u zy+GE=HIK1YJ%%_bhM!$p&oE?&tVy4<YVE>bYu}gs&(ZZr(W6A)YB)Sl@g#no!XF>J zPTeZ}{p7)0+s^L2#Vx%5%}3_DMJD`#Gu1=}h^!Fb+n5Zf?;4m<!#a0teiHKoiRr8M zi{?i$Kaf0QeuUrfMLr;+#|Y1pwu-!#^A+FYU2IJxwvKUu?{U7~mHfUw3oK$=5qt~p zHRfu-;mZ9P@%s{`Uw}vZRCrpZ|LNf&n709s;N}fz;Y#?aWexF9ji1i~&ocL=`kIWF z2(nY?@v?Ir#>n8g&b7$K7##}@2cBmRL97j#Ye@geccG(B&h3!?6WW(KhcUnSG4Y`I zS9A{1(G1xh)3>jS!Y6wD!>o}@JO{C>NdDu%*@LcA$=R!->j*tmqVN2g_wrv%fBAQw z<-0GbwoKY)+!L2c^d#}=6dg&<bvE9U@2BQJ0qzf)_YK_dGw)lu|A~3u$^Ac>_Yv;@ z(Y)7~FFkMGySU%Oz0?<=Ua3cXEu>!Y+j-jjuYqqrH1Auvf6~0~<bH>F@1hO=$Gi`4 zzs<al@Xz0x_YJ(4^%kk8mHS7z7hlv)?&Y1}#Yf#+VtIK#)V$Y-Q}r$L-p9SnZ6r^C zd*Kao-@yHP^WMe%1Ll1v_fn6PU2VRL@a}8oy@pP@Hg->JtX1Z{i~FE?@8kY1^S*)m zmF9gb_xQ#}Vd4JE=6xskUyS`jU3ZxGF6yc<?|s~tn)ea@Uu52E#N+bE{^#Cj-Us-9 zzIorkeUW+Z<Dc2)eJk&0#{TDhVeEhIWxuq6d+w*3_nq8NGw&nZ%etKWqd~hK^WMe% zb>@8o_t%*Bt=#9D_nq92Gw&nZOH5xW#|Mqcz6H4taGzn`H*g>8+g9!+#*_Th$$hME zBizUOR)dbjKS=&@aUUBmE@)fm*G1n($L<J^#Hf@pFFq=*_!Hf}Uuiq#@b*7w)55`I z4Ig+dJWo~n17kJLtNCKz+6*oH)l4m1m8*pxn5cy<x8Kx%??NqnSE&}h?{+P`a)}m} zdB`bZ;>lcOAAE-SSC_;um9=)?!kh5J<~(rreMEpo5kGot4cXWfTCn$tjY4c^@|;Uv z+0WtPoszLVOMKOjzoATRqa8|O!g(98oqw>D{oS{*zk6|8-_k{GAKtyN?W45|+6L+q zj6DNVUd*@dC-5g}zYm-XZiJux1fCK9?iSv440?AL?|wY!-M4u6%%FEa;9VQ<q->4x z_!ReU_Ep&7iTY7||M8nF$vu>Hiao^kKwihBr;FUO|0HE!E%D|weXZ(r$bO#ZuqCBg zp4p~Y*w<rWUyp@-Jr?%$SlHKNVPB7heLWWT^;p=~V_{#9g?&91_VrlU*JEK{kA;0b z7WP0{*aKxrC$R_0!X79Kd!Q`rfwHg%%EBHf3wxj}?18ed2g<@8C<}X_EbM`@j3cr4 zinBbGB$Az!LK;dMLCPYHBaJ6rOY)GWkgg})NctS<^Q0ouEu<3CB2pRY3#2cS{+hIc zbQh_Tw1)IG($`53kiJnj>$!<9XYFQR&KC9sJJXZHPQfE*4$8g~=gYG8pXW&dua1o5 zuo8HBruZbvKJ?T*)0>4(o&&epfhir1QC{NxIQ5iT;yxxP3%#9BJLe4dGXInEdpYNw zxTK*q!Kv#ulZvqoNICL<3jP==`K0BfW`(`%7w5T${ko5kQi_!7GjrIt7M~Y?hLjsf zUnPC_3VAn>w{B2g8+i&zLDKiHkoN$2_YcaOOrBY!)ug|_LSBnneMVI;hB=))v?Ha8 zwBrhSi^w}?P~Hsk%qQJXdg=;!H;}h(P~J@P6qBk+ZCA*9fV}$$<sD0&g{1YQ-B-wK zvCv-2#qG@{PbsO6wC4(Wi^w}?P~M5;xt;V)(hFC}yMerQgYvq`vxL-03SS}b0rKvT z=CwnQ!XN6MR8o7`Cn$c($@o@mUf5Lj2y3(U$x5~T%cLgKPuM^CpMN_``3>nf>2*?s z^fu`o(tD&n(nloa`?Hh;l16fp(n!Neqex>&IizbyF482@G|~)G0cj@5OPWXWkrt3{ zBUO;@B>fd>8R;ve0BIHJ9@2fJb)<(#wWNnh4Wvz^Eu^jApQTQle%{fo!9Sh4LrK=1 zio~2s)u%A7Uxr7%GVxGW@9BqLWzSQ(H~8$#Wu#hC3n^`2V1{kSXk*VLGGLbN#OP+@ znmZ<XU2a}?nAa2LHFvD}-MsEFuP4lF?l|+idEH@NPng%-Z1cN$-C<r&nAhAK^SgQ7 zVO~#|*W6t5yLsJVUQd|U-0|jj^SZ;lo-nVu6U^`Cbx&UO`K)=(pA>zrGOv5g>sj-f zKiT|lUiX;Sv*tB_iuv8V?lG@t&1?Qt^SgQ7V_wgi*ZgVbck{Z(yq-0$`T6E|^SZ~p zo;9!e*PGwX>mKuZ*1YCVH@`=(GUv4ACWp0uZz>bHA##>Ir05;Bvt+-Xy5OdMxuPR& zS$W&|=3&1b7#Y8})n~>Wku@71dmoG0Ln(V)#df+3-C{QHTd_GyoLA!??gQ)%miqwr zwHBG5Sle?Flo#qO(fzPKuCgCi_QG2J6S&w5Tgx6&<oyexJ4(5O_rqT9okHGK70Oo0 zo5TLs$iTpJ=zv?8b8pe+SexB-R`y+wZ*KU3(zcQKJOOiWD)9CP*vs0={?q{bQ8`Pe z|Cj9dZ2(r$U2N>}v!SOAhT(GibYpueHQFlsOC!c!;OHJxANpHtk7-2Ka@kX9;EQ_= zz0cPB2yM6Nf`5fN<tn(Z8Lpg-#j%pT#QU+sp9AOYlQ!_apZ&zNrN4peGP{Y#6m&xH zc!K)9pR}i#e{#T={G$vEJl|>|-hG|5suS4TCjV{Op$)sgW?SF3%}uO2V#xU2iTIl* z)(&;APb_!k+DChS)?8LO=7(R?#wtVIV~91qzMX%Uzv@Z0IpqAASlk)clNR?O+Wx8n zoZz{EtH3GnMRjqt{eyDOVZ2N~w+{7oNFU5*pYLp9(zz5R4BcGP7Y4niO>TA#y*Tcd z;A<!J^}f*$-u~Y~dxE?7z@5<GeSB*GcP_5Az+}mgc}Z>AcymuSy3q^q{Tz#%6i<J# zcM5rP$k#<%ma#uugYKZe=b*pmG{&pDPHiq}X7BEJrR|VMZ9}K;-?_oNdxvG;EQ?oZ zmidT9QFp5e#7FtooZUM&7wwifd&EHvD+!j}7N>Q$rl{48;7?-@GW#v7jI$p!OQDjZ zG|T$KYwWfD@;d{M<uIoRj`y-(82W{#+~;q5m3%L_m6kHbW>+8%hcWmEtQPj_TH0j) z>xXw+nLi{jk4S72f0XwYClN<&NZa{?YV%3XqggQ;nys}oN7xUm0XuZw&mQP@*H=qA zMtd#9O-lgB*7o<o^9Q^im1AlCopp4(>|+&P5QKL~9LP*7b9nH7D9an}VUE;p8Pl~? z&FqmqFJH?4{^QHkF|03TcAX^e>hbE<Xk5i?&qLNRy4yOs=l8Vt?`R8qulrB1rW+x? z0O!2*tl(biX+{6+u_#;QjGfT0lA9Yp8c3@oCY#%4*K^eDUv^$Q*L|2h>EOgqY|OlU z8I+M!RM@I0Gmb;6CxGvX(*utYafjG$+3mtVvul(#)~ds@7OV07&|Ti}uPW!d?FlXH ziN)_r{G@5$8{l5%vi+6CZDLR8U*zo%kiU&KYP3n#>qfI)_j~#?LffT{#9h!M%r|MX zo=<xN)=XX6nw_kijVx07rLA@N5j9YbwDmdWRFvgNRI~g7?@8u!*U-l&tRtX>(d`w# z9e7OIc<L5!zu%hamw99W-VBU>@oPVIi_)(W>nT7xBsORf<FwmSROn{^b&tKMFeF#t zagLF*XS3VcCs8PKbszl`A@8wqO1PW<H1rRTz_ig)sIAw^Wu6-OmZi{_u`z1^xKAZn z3J2_#!pH_oq2S<qz}*9$i5)b<F`RWU$~5X3^>ICNwyGno)`)A{Ls@Tcurz;2Sxst| z{tRbx_}SZCdEWPGd!kZ!WRBlaVy*enGr*V3`%TQRpW&O%^Jcy`*_6T{XCrmf*TQo( zcwCPsx~ILBHm_nGZ9DIM+`m1=8<zVBe0h`2$r<Xz08W-O$wzX2jm=R!%U+O4+)5wq z7+}sI?dmU4`lUZy)~qh+-z)cRmi|w<>ee9VG{)2OZ0KI-x!dgLmhNcmGa1956z7#O zYs~9AE8z#6NrFwSq7j>7BXmNn`Tp6#nVgfrxk1E$kn@h;Tl_DKW3^2q{(!^@J3m)x z-hte3^DI2kX7a%k*z$AW&l`lM;DP5KoC7Zz*8bk&1Ff6N#FnD9Z!A-`#=rlT`Tk+v zOTHEGK;fzKkHmnGyj~^ymr_?yc$h-$2gX?dz8{M#su63!lOep#9$HRcXfAI!FH@d5 z-&^G5B%YC*^1Pg9!HvktrSup44LSO8S&~nlYxve+^s(LOV;A>sQ}#Bn_uq(tF^Tj4 za+nW4o#3{?V`K1~e{gTxd8^XAws+t$4gT(uSPG&uB`AA1UG3NBAKY)m(UZ2=7z5df z-m1tzU75s_*K9tImP|?~<&x(3qHFooFZ@U&UV&}>!dDyN)$o&7V(}ui<w~iv%SBuT zX`5!+XJXH`^_k`P-NTPGkK~JwHA&v|!TCS_@8h_W8gVBF|7Wzp(n36J+NaJttJF0< zF7+F-wbtIe<<|ovKS&+Z_x>F!HYQ@SJZNppKvsxMeGi!`y#D+_rTKQN@`CXEFF;qF z@OF*szL;JzGOCwIjFf%IqX~?0g?E1_Rkq5z<LGT#RBmq3>QbBC%p*ikJ%`NfCT_^y z5lVjxvFKVh)Xo%M%)b9{n?)^bfZiIQsm55$6V60x=UrQpT1e337w~;EV4?3nhQ-u1 zIdi%HIIxJEA13hd{@1`G@0Xz$p+|1%9vFD%6e(VhY`s=(YX>H&yAfSd`YZ^X4ZsjU zhdlq_T<}zc44u>V!H{`<g3Cy1=0}~<XD4nw|KZ)1wqFsi;iI)GvewFf3H+bf#+lRT zC|2ZVRL2wDWmJyZT-cl3{5fd!H^6!RLF940+WgPp7aWIctiyXYh|FWJUTbEQz8DW_ zp3uj#E>ihmK@i^125;b>=d;&as{-lK*fBX=4<*ehEXJqbK2{m#N$<N!V#{<=Ch(p= z^wGeK1K=h={#M~Fl+~F<-{#u%&Mc*0c%j&82Y)|2)Z4!g{2*U-&MIsU@ZWy^r_8Vy z{t^%W1bnpWDI*?E?YHY*?HmV=$tQW_f1zh-?-P9k6@N3h&T-!U!%2obH{!-h-oqF2 z*^PX!;U5D3xu_n|djrVTPttou=M~-e&|UC&bYC~Rujsu&^xvqCe5u~s`2&Z$lfBWM z!~hi>oM3HJbl%@wfM<*W&q~gQ5ZnB3fc-=MkvQh-_`kt2x@)6#G-Gj0kFRbk`+!Fi z`*f5cTg1=jSI~O^e4OA+%#*CE62nQqCP&@sA&<*CN^i7gcD2)20qFP;c&KCyii|IY zPYQhs9oaPH1);${d|HkHSB%D@c;+l-@VxT$K*d<#lkxUzY;y*E^bXu;pOsc#l77#l z#PO~=OrMJU7hE5t{8sLdF)llk&<{=x%#byM!<IRPCHNL)*IBB5!+ZH}Fm8kfgZSLb zyf2$r9V&X%zMWn}ew<`oi!;~TUq{asJhsyIL*PvC0)6$Ygs+NzBYloctB5eBrA_f| zvA#7>!8jh7gIo~(LdLA<aZ!E#7RKLv#^F51<6La#McB^gv_*#8YQ#~`p-mr<*Tq%x z9fP+7b}EJM{twC3f4D6wSD(M_V!8U9`TiN+UrDZN3Nk2Z%Y`|l$od-24-<KB$a(PX zQ@n-3tFw0~Rfl*M`52Yq_(me1g-3~AAUc5y-X7q3VQy~DDVbZmApJWypThS|z?cKi zY>>Gmb%>1Cuv5$WOEG!u_=?)_6ZK72(p?f~in)!*U;3>-oB7=`=yh<p@%9Ii5v?h6 z`UFPNS7d$%UwEE*d;fCip_MTajR75{5gYaXf_Duvp16uWBD7;Q`d)2c0=&TYsK}Ta zcsFsEkuSs_C3fiy+HUASW<Jqj;%xw4C7XED7r&cNUoGQ*>5BlmjbhH@(YK7)S=r~U zRazc0uzCBh1=p98r{H9Zh8;<BGPmMh`b*~e+SysnB44!}rT_K>+C*C#hA7Y5upP*L zSS>NCgUqH5#$AQ9sr6c9W>I3{2aKDR4a8I3Ks?nA#8cgnNFtu<2I8r1AfD<5+wOXB z7y-B6Tfv!5oUP&=f*ptQnS=N50Jjn9jN*2fG5+$vp~$9t!Bc?mB3qZm<h-;IJR36I zl&!L6r|{ki&gH$xR->(@*AUw++6Vtm|96U<zgB58`XQxu*52{(f6DFb8+bu6==D;$ zdL`S!@x>qaF=e$4(8tH=<Ky(PwD}2mVW-dzd~yP9I1a9)uTL_E8Z5(?8GX*!XUq>+ z)uZvcko1kr*IS2q!{@9?&}Sn3rnI}k+cL`k2htnxt~V*4`7*wfvwzvny9Mfqo?7P9 z_=L6xu0>}_o?ozi4rh`OV?bgohtv@|dndYdVj3kg|5$U2+Pr(L_fa_?^d({sPvYK< z?inmn3ttXSDtH?|pAyy@S7j&#_2{?%2Ht~=WvNS7Y+Y|FcFuWp_Jr~`J-%y%@_aA# zuA<)4)GP6gWgq$(>is+N^;mCgmopk?t5ZS(%M+XzFqQe^JIKCl%aqUro{ycko@M`V z#WZ!8o(61kHhO^PUf_Bs?p<g9K*bS!D)V`Fg!b&`e1-fHwfP9o_mHobYc8<uRnvP8 z6!T2VRfg%WUAH}Jsgl-{>r<Ow=YFr6+H;2MY~H<Nd`s==HJ;OZ1jZFgg8eMdMeh%} z=?G`4rf@x@rrVF48*)<@vToV(eY2!}TP!1V{q#eRPMyV><!Xw5IWSn%bjNsQyk3-c zY^GXyW@f&Uud`OZXaXts;qhxG4BgKAXJ#HDZf%X4(j&3N&!`Un5h5{eR~<bv|IA@N zE%BZng+}gSF1lBp7Lxj``0(ygosOJw;7*wqs^`p%>_1q~`nVFOSXbt$&Dj>Er;~A) zKhFDTj^*QbjKhlOz=vvabbZy5rY{3l!QFC;st>4%J=1t!#hKb~8F)~7?&6>Kx#xUr z14qXcrRTVbBf-rvC9y|fdP^B$Kf&{LoD-U6QS9$nm7At<*zR*&kASOv;EGt<?WM$J ztFzekHQ=iSd_Bc^Aa?=7xAA-VnR1<e5`5)WzBBXRl!+JN>xs1OGjjva%}gs&47w0} zr70=?w9gYu5&XRazOdI;aDImW73S<mu`8@qQ@S<-|7}XT-mE4&jJAT0^gvaCI>KA9 z6S`uZvUD}{p0A|))xh`y&J{U(6nd|teCFsfUWj|gc@yxH2Kex7m3U*!2}IV5?2aGT zjALUC9dOR+vn7a*Wa~MB+%kNJxhCWwGZ;T7!9y+MUvvfG-BONmHTiuj=K{p@dFro- z|JP;C=RSewKX-x8mvXL)@Ok+E|DXAM<D!rGJaedbv_CPQzl-`}viwiY=Y_8epBH{F zwDC()jOP#L?Z=q|4Cd`Cpbg<ww_S$Eu7pp-^Vrwm55i+xxyJKY7te!vYy*7af0M^r z;2XkYkH9a4$8P2dkA3B8c<dZ_hQ;Kux$xIH59cn5@m6^j{*Vj5y&RAA!ZU=&#>+;h zYICfHCvAd1tU)$D&3OS+P1z{&@I`p9$V8EoPY0(Ovhi|!V6fgSaVBGHT)F6)q8CTd z6E5h**k?r7q2GyZ9$PP%!(1Y&Pe*l@xM%)9u#nie@;^2*>~rBD`m*p7<aj!Eq;P{J zst3o)P^pi4v3p^c9Kw0e_m3;r(4AvENOb7<=US5|`QUx<oeTfP>(Zh-ALOjz8qV*W zLmT7Ql$MK5jlSUG8q?`EpubByWxZ(dx=1WWYqYJ=GbUp1YRUI4+AH~%(cV_<6#`QP z*aZGMqpcU`xspD&in1hMye@9cTT;jMq1SI&xokvp0zCRD>b;h99Vs>kl5x8vgEd0x zmU+hSh7=ig8ENYwi}GB~8@Ha<Cd(W_IUoPMku^9Ov((!zb-jPbztF#Gn<K~To-|x+ z-<p4EhEnw~c0c-}{cZfr4mgz?PgCaZIg^@G@X<?U9yE>jN5>MUi~S!Znb|{^?yws1 zd&L%b%9>#8;dvb&(gtEL#Q!7vbL<?DTcjrVmkuYUZ<K#{ES#ZqcVI=qz(>}qIm~(O z@RJR+t%!MWUXsJUpM3HEc*%Dwd#-ENV*_W7@Rek>{NaiJ`uLNH%8d!hN^>G{sP7GY zr637-?hAai;Qqi}1#gYf%I_YRHvQY<MooW9$?%kCDCuu0!yJC>>2lWmt|g(4`>Mv4 zcd5hu?^#K?zsNdwO;^?n=sA1;V675a`j#@IMDoPHNB^(59lwO%EgMz--a~2S=N{77 zdos)qU-aCIOusL14fRhfxI1udftF#!bnOw`6n`*KaUbmzoDE_9K-#{KIzp;)y~u9) z$Bm6A#6KZ5?E+34&ka<FALbeQq=~*bLcgTaFLGvUzVz2s^h+nY@#Xqu9{sXCu3r-9 z7r|o<?T_`#!}QC;^ou_s+As0{NWYZF^~+c3mv2t|x5uBNUlQq;B>F}AV+d`QKKXj! z?t**AY31LcUw%x#+-n)(d0w^q@3joS&@Wx|%l-6AkDB2>XU*_ujQ{tnuV34h^-KEY znAtD)T5c$jJn`?PU%o)U+_z#>`TgHWEC2d8v~uZJ>6ciae4ReIH*no0{gPt#%P{&y z+J3oyNi_N;{i1$xxD<EvT$%%|vbJ!g@htmD<Hz$?hky3*Z19P+Yn#6*yvgJduQP7T zhI7s>{4jp3A4pJclo-TvHe@B|yNWFXdug*fSK{#{^mM~hM29C{vc8&qbx&uE8@fGL ztvZ1&y2j=$d@)fOx?SRP^SqdUo*Ao6KaG4k%X#ugzC3Pd8glEUIr!1#a`wE9v)U}l zj%U6)YWj2Q)5`a-mbOVv_N+OZT_C#hS=O>vv6gk5xrLlN4ov<aFv<Fv7nn+bX(=#i z>2hwm(%yjmEDv2c9<Kd^;1c;dm49Mzy<@_a4lGf)(mZmW>>glx2AG~RVX6YAGr;st z98A)0gK;Z(l|K9ZFlD&$?VY^lM8;bZNqDn-e~)h;;Ons6tDKF^sbhUG(bz{<`)K!& z+DA|IZhthOw7lC*I;FO}+oQDn{B1|ok4`(j_9Lz1*t@oSj=gI?{$KApl>d6ysl5Dd za_!IF-JIDnw72>PLsz6LH=d;~Ia_r37|wO9J@9j@@^3#+R1W{#=u_q>`5Wj{OG3|C za2Lr@Dz0S?^0O^V3y!CGA04K6e^m3=%Gndl%YV_bwBX;LTUzkTeM<{|XC2c1AFnJe z_|CX>{~vSj0v}a%?)|Sl6Ox%s0tvYi&?H<mGa!P1CRUqCqLKuQa%r`mo)Vy~lLWDd zwqA+^&^8cMMx*rLd7Gf8Z8DlxD`?W5Q$SlAjkQwLUf#Zk1gr*vB7%@8koWuBGXrD9 z%iD9__y5o5GoQV)_u6Yc>silwZtGdkdg`gOzx~u}-3tnLogH}U@Yz*Q9l^HVa`v@P zJ?KbYx6R?`lRxvplV9Go{GqwK8Xc~6Z>5-#TOCOwGaad(A2<?x%N-fM-)+b({N2bj z&*AUI&3pfAar55#UgEsvlzFf+BlNrNxrIOYN?PctlUMKBa<XdITgL;tc0G|>`06Ws z)16!R{DkeB&KYJdvf&mS?*b=2zt`jB_e%)f42-*6{-P~{Gs$1{DsbKmoQLsCJO!M$ zIFf+VX~TKT$*=5c+%#|3a)*1}Tj{6aydF3k1t;zOZe+UWNCeJXW@m&Nf%C2Jx#umv zE<Lmb7`FoB^uW}@-^H1t>rV!Dz2-7Sze|FCfwS&N!bNP`Y={SrgmiyVdbYnPV~oG( zFus$+W4`@VW}d(30pNQI_}&7KzXQG_#<Q-;F|2PhKJz2Tui3Ty<osQ4nZ$L^XPc2~ zI|DqvY2v|Sx-Xt~egYnM0pF|0mggm7u1@1!(#ZHRyo0yB^|iE6e7Y%m6FjD8o1*wU zQ*<YDOST?#GHV%hV#UyjJ<o`DD6M0jao#L;?HuyFhjjkZIS$9iXl2JgOe<Dj4?5YI zXeE|T68;Nxf{ee&h8a3}HHJ<^&!Xj5p%a^)86VN}10SL1Fr9>9{K~H7&`Dzqot%OZ z8i|GRL1;wy7mYk{9djc}BQ;~sNF&Xn5%Btt(8!KYppjVmi2n!ZqYnBwy{;EN|6wR! zi}Ji*6OUxS+g|)JS@R6<hRPc3DO=VQFfk-+BvY0kLz>#cdg4&IQ;!Z8TSj`~68QHG z$d{$aq7lfJ^{mr8epdFh_119T0pc0%S>c#>?~0N0)~`sMw^Y0v9)1J7oHfp@UUVVJ z74jz7az(Nu+I|)K<(Xtm06L8=C%G33Lu~s=pV7YL$6kB|i(~K=#U*EpMdU}Ejms-y z<n}ktfJ;|5`#aQ^gyx&}aBk!z$!+u~FR>d@xm|#*?L((p5~wL`NIh%h${pydz&{b4 z>xMvW;XBUre7jT6x=Au}{nX@nO9Rz~=TnF9c|+jF!u6Ib@ujmercFjSmz;iSSdu6C zY}P?iP3k>gO`moz`tc0`U*WnHN#NT(uO{FvT$_5<1=WFxh2#$oEe)Js_&>q>P`Co{ zjsH%#*0aBy@t+leZO-R~?XSRgN~XVi8t>6DiI0wn-}5p09!&i>eUnS)Bf6Amw-!2G zX<5rG`g{&!^d;#?(C`z`z34ZUeMvgocj;Lv)4I^-`#67MDKz{CD{<s287U)E9Gs`e znR>O*^PyX^r;UKGF9Xjv1|}6g27UhweQD^t2}Ja@XxrF(^H-j*mhE7iXX#vs6OLuo z+)uLnzxW$C2tQ+Ok#qPaK`(#eS*-pg`nT(%j%Yv5lTJlHeojA9KGKH~K2J;^cG3sV z8a~yBcj?3V^x@mkO>7^|H21L2?YS3i+wKRr+$<WduCei2$~|W$*!>7Ub1r+7eq0&p z$Eop+8N=xK-pd$%aQ&}X6LqwH2b;Qofnq<L#D2sP7h?shlFSyZ!{u{z**-86lHW8B z{1%$JKMvqSmha>k^Dx`=zE||93GQS4V{oK7v=E#VY@c2~GOK}a29B|xxRv_?&aSlU zfVZh!9oPNud~a8Mjj#Wg_@K-0^Y;hvBiQAv?7q#^lgZkngWutZ-=ZbPZy}$B+Ay?% zuX*cO+7w?XUg$i;|MPuLH|K5o2fPbqV<z^Mz(WgomO0Enpfb;9Hf>sdVsL9W{g_{2 z8l!a?+9)AzPU}QhWLu4|KognZXd^x#t(U9MDyy}vqv*eG&VtV3xym3D(w~NZeU~~^ zCOzNmT!jw;9PGq)qAe?Q0c|zK=k{NN?<e|h9&2U+)-qjO<(E<4R3>Xs@MU5bUB>v7 zF<zytVVAIm{abrYcW{k!=Z9s6xK20E6M65UToU7$%-XOs=!iF4<QwrbhDWtNOng%n zxqSAIgI_X^*OlO(U@R4vqVblGqo1Gl9=I9TO3Mnq&G!e1Bj6mNx!Wu^v1}tk1J2<g z`8h?G?>G}fD<_ykRp9&&cbD|nLVsGzSqQ#g;*6VYVoQ<_es9<BYi{Vr#yRAgP`{~( z^92vEK5^q>%0gF@DU%QF=BAlLt<;$XE*;?VyIi}SS@i!)^y^CcH;aB&(*KIq56j45 zy(WBSj^ZfwO$&2kC2I!I(fsia+GdTe*+Ja-3DzW811RVE0^>oBltb0yid#Fud4L$x z?ZCdBHPSm4o1H3;|8A?+0<#%M`}+?g-*b(AUlIAfT;F5o>-+2IPxSjg@VyH^#6EDH z2(7rzbv^t(`feiha+KIht#|Jx4@);~chb&3y4L-p^3(qDp=;f}#El02YFaa?e-ChI zU0d<Q+vy{*T)sT=H@AR~-XqXoWUU?l-Imz3cI{Jy=6ye+KcBYd{=H9u<37F}>Z3%5 zwrMjD+7w;-B^#kh;(Ff|9cnIX-(VtiSPLD#10C*W9<Kt|It!Bg)E8;*sO}FsJ)tIO zFb`b53k|M^2DM%(8sv=23-P}OC$ooUa$spcF{3sOZi5Ej)|wggR|4%7r<z0isN2O@ zMSYO+Sw#JvqA|TIpw1>}tQQ(PLfbR28>+x3b7AWt`q}762{y-<^s7(Xp=G_R68*XG zyE4C<S1;(-{MP$kXWgbc&d*7=(i@`RtfJofvHqawZ|Itm*5x~)8~DwJV|xmMT3gbZ zd#;t$r&t25FGcAoy1w6XYCOa1`)+XdX@1zmm~qs4%Wl@yx3jLkoptp;;nP+8L_NOV zTzntZ;7h(?`F`pXd3I^ohgrq5SRZ^ePHydrrn%k;otub_yr>zvBQ9|38RHB_hkerB z#28GY?kecu0QfLG-#61VZpXjv0tfVkJs?wpUaKhB@)P1;_5Y491Y5=`#v&Yh&<xDE zd8XCWvClNA?o#d*chHgRHOq3ZHXZ5XdH=om*4#;Uyr3OlUd6bnfAaC=K4CadjJPu5 z&0RYt1})3qmKz|xx^7sTcvSASW?(gQCmcs|I)-8K;NUBZtCE7_fnEK7kUSU7-2ak( z3_fQOlW)Pt9PqQ#F@lQ!b4~FN+|HbEn|JS5EFdvZU29E_9Ury`d>4Y-<+SU%+TU>$ zn@)E1`@|$1fey;G$C&uOZsfG!Ux*$&z<l@KcX@yDF#mf!@P6(8{uOIn^~9sqF$Uk` z_jhFXoA8FWka_))7{w2XWe}a}`)bN0A`=U!r?$TC{=UJ3?|H2;U7Yz#%z?XaG4TRr zR!-Nt3lz8IZdgm)LH%Un2Q6nq{RL(q-%SoE_)Xoj>@#rWG~C)XxN#+Us-#bFmejHy z*0<{Urf&6b?f6eKtnSjab4`caeCtPoyN+=P0E6anE^!sB>BqL3f<pO<^*+ED6WeaT zx0s)*%c4zU6Q`@qU(;SWxUZ+qf8kw$CrqoY;4=%`NPePQ!K>uE)(gBP@xhDOPq67i z_K4GNuOp}H1>*MPht4KO?;+}JXI?cEqql?p>><AI3z=4<i`Z|s9ov`HSAQYr;Q^P% z44G=5Ws&!&_H$$sgOU|GpYrHZ$hU0d+nHn3pc8uyJC=9;cgIQ%XFf&e%24_7kHkuS za7CCl1H>{1M92TY+WX)dYv%{oD}L62%y%OD(M#_b#{A~2zcS(}VtCItG#B8l(sOwJ zq>()J^gr_|xaDlKkoUyQU5%Xa_yW3=%Y&SS?p4kYP1Ps%KjVtL@8rGsn7)ga5iLnB zmb>kEtbt;_)7;{m%s=+S4~VgRg7YLpnaHRtayn%1ofIq@F+8{p{k9pJ&PmN^s9@gs zPTaZ6mteN~lFiNqw;@Mp`!V8pC)L&z*1D4#DrvLI%6;7h4SIp07#iP<ek?gSf&XiG zFCMgxt7tXERdJ3laupBS&$VJ<*26B^^QJ=k=(yd}40{C8eVk^7V4yDFTGRiG@WH&X z@xfjK)+Ak?bnLNMJgLvhAJ>Y0?yjzecbF#^66Yg%TR>YbwdG|W+=y_TU-gN>r<MCK z%=a&6ye?zh${4>=cw7m23c_^E{*glg`sn7Dx4Aw?xlHPZ6SIEH*(1AwFF^e0yPv@y z#S@z&>!9L!jODTl@ELCV)8|n=F~ln8&gb06o)OFHk+p98_!Z=JT#MeVcj{{w`aml( zdmVB@a#hd7gBBp);IZ>r3*S%+kEp>G^IEP3uSE{H!*~9o82zLZUMM?2Wdq!=h2HeN zhx-it_bqoi=E1LuR>qqm<d98^qS<Z3Ip>o0_kw3)s-CF;&-y>tavXY;HWRH;7u3{v z3&jIPv)df*zI^EOIq)Uk7Hvl|YYgoNC|eWnFDjsXg_UzbHSI;q)X`q9mBD#{oKwo0 zTE)@9XY9VymXq&&c3WZoD!Nd<K(*CETh;L0YU+iCkh`PodL{oH=#Ul6clEXTFm@_) z<PYd_uYfD*avtW$aOTLto=L%!b;EUD=b<Wmc_k_ChGWdr0=_-O*++uI&3tJ+G`Mw; z+=Yt{4I;yG*@uvYp5r)F27GR{TXSLI>(Hohfj%GH=E$`B)5Dm`*Bjuy>U1;biX(F_ z#GLcgR2M1-sqpKM%((*a#om)XD=j?d#`Ax*=$z|Xu9|bLTs7xj;Ho*dk88oD!ym3l zNhL=h`UdkZKQix*1M7#*6lCcLY~<9?2k2>%rKWFi27AIfdYF$kE|44VI!A{NQT~{7 z400hS^a6b<W^9OAoIi_x*z=8cqVp~Kes%BQeEaN1=S87_Ba1U}G8*=SzXjk(-zKxy zOnA8xT&-jeQYCnL9KVTp+6?M_+L6`O<ru}D!K{W_x7o>lvj)lXM9Mt+_rdvIaHp|| z@+{H)ON{0EVIzVsyfK*4Ale6ydEoFkebt;6-T4@I>Fk;dtM(4g*E@~Bc!%yQz?))5 zbzjZB^1SGNE%(avqWf0v(e3Q}7q}Px^?V=qnp?W}a_<E%mEhky%0J)-_ZB`p;rv5z zZu7p~gEK^%<Z0>99*F{Mfbu|~zYl*yE&bIP<U+3jz7xHQMi1I!GA3m8!TX`}YVgzL zNF+u-k@&MwTt_v?uSa~kuN1s-_N(|-QEj?K{DSPO48}3nx95eyjlJkRHeaim(f$|o zKjJ(-!v9|7*{ACGH)XTGGQ1C4I{y1Zbo_eyCLcn}DIGt<rZeI0HJ-J^qlZaPiTD(R zJK1)rm!FpTO=r;YccJ4;H|)l*vX%Q5?gLy^Pd<2e_`>=*d_;L}?6`4_uXuYJS2Vr3 zgO^42Y)9gOfeBsxte}^@`t|7Ql1J{UIC~#ZTD@IQoA$(}#i5Tk^DDPHVr>ZR-HO@~ z(x2^pHqdBzZ%sHRx7eA`7rXB!x}WCRbnHUL#uZ=EeuraN@EvU7-PY(n_|t~>(V>r` zLmwyp<^aEg>`(fDeHWTr#q3q+W$)-da(%6Xcl6;yjOH)4^=x9m-0X9zuBk?i&*&RW zFau8FM3GPFO*I!6UMHFpo$~uDxPtF@s9YKHWu@@N_~?HZ^s8@6N1FN4L(5b8pQ(XI zEXc>sWDTm~(28BqUzdBr3!BQN^D@WRpdW}&%YU&h(pH#P=k$p;t3CNWYiOs0_Pd!g z{mvxcF|M1Q7~GgU#@uA6Lof&r_&t3G&_&((Wv#-G@bE5tsg`e}dIRxRw%*_-Cn52e zvs%<Q<fr;o{fHT8<$t?>$dsA1TTLI<;zOj&&T8O{;zY1T%kDigIR6sLu8s6T<6`5A z@>RrT)Klk5>Q)<TqxB<S&gUD!UCuMX9SgI{>baYJr?F$B^QVO0iedfF)B@W|;TOL{ z4fy4ZvM%3*7dKUdJMqxP;=htJz|jzaO+0j)Bd3r4oWieoSrq?K*!4{n^)e2rQP@RK z!j1Ob;Ir_FSIRCBt%<+&g7aMZt~r!#HRXH&A6Zmc(osjw*%J2ryxcjsv8vJ{=IgWV zg=?upz6ka82-gG7F+PpGZ0OiJJE*f0JgHvcO!kQS*G%67kFcMU_7jOU5j}Y)yqJ@V z%#MzeJw}X+V$rqMp}zcuD?Bf>1D-P$y?-@V#nWluso!W{g)8)4nPLJyy5I04evQ&e zHqVRtHSGN$&wZL-<MZtad9~~`&pVS};{jj|MEn~2u-PR0V);*$_XLLMkn#h$nFFop zkk#-<TaQAfOw@Dy+3e5icpW;GUi5ltd{B0WbVC={7i($?wTEsWx}$Q3$-XSd9~9Le zd(j!Awnzi_&G2DAK9h(KW}p|C>!LiFI49{C@Sd$#K~IMNe?*_jF&*`lJmK1TA@MDi z?T<9<BWY$H%5Rd}BLB#$OHAErerqnt9S{G77v~-y)|ys;9ZMU6e>->yM0_Xi<MFK} z=#isntNIbYSNgB~KNXg{OZZE~t`^UdpG9SRc&0d_7i-wN81o$YVf$Q+bW(Kr9{u0W zIrNN0CpKgX-*lo!p7OmcF$1P<{Uugi!zC)yLVUK??{mK<9ue7lJhrUpR7_=^vuF`= zQhv|a@m$0HPm^bYi?5y@WL?CTEt5V7`)F1}$59#cG5?H_-zA&}Askz2LLZVZL3T+! zv2&6O2DzZVtl~O=+>zfT7e7TF{A?{_DEn^paGO5_3*Z$YbW-t@wT#D$<X+E%_Eyj* z#UfV&dlfwTVZQsT(Yizi`{mNvFPDiQJ|4Ys*r|Od1@JVrr#5`FnS>8Xeizj@5ubB( ze+W5c`yK^0&b;i>{)RH{t9||fcwj>few&&L`~wqNJJI+0Hu_yH-|eLjPJBHpEoa{v z?gOjKiYoE*)ZpjQSjg8?wI}T7Ir%(t2LFzDRzAL{>amx%`rODhXsHT%SYWyOHnZPT zZK=(bk>@LEr-yjJ0<YETg(u-h=@9<%kt+e<ly7HMJbTP(J3yOi@ABV;eLUi0D&wY1 zG4$9<J5e8x=H|JgLFQv6_S-ec^B;n%+b&j|Xm;0r^kM0(7fxNbiDWHXq!XLX?4@^P z<KxK;J)3GZz5qY&<o{yyF~)nK`r@#UM|z%URWP;4_Yng_0W`f1-)9B#`vumlRp&zC z2>DtxoH_dL;EX)_c_u&5hm0GvcxFG)y=T}nm&u+v=!LQPh#%h#Y@g=8UtfBfKO(dK z%X{W-E&Vv3FGnUMnjg*GKls>9_P(lN{++^i0G~?bzY$A~uB-ohk<;rLm*f*AyF8|4 z?(kKmyY91Go)Jmp0!{P}Y_mMIzh8Avj`ZQ-=<>vXH+WqBjy7wAXFcCo%#+8hB-ZYS zd*l~dZ<VGFXCGpc%XI8OkA<FV>ll+y(~--;$4bU`-A_td>%nKWFUgB-lv?u@)6wAb zcWAtpB%3)+my?G8-t{wh)XV>4M36HCo;ZlE_zH3A2i++R@VpIe`HtYrJEsJ@v6b|m zXHT^Cqz7M4O$buQ$R=`&EWOa#>P2S=)4^#!a#Ze|`i}h;FVD&O$ah-rk{&{tSM0js z@h@*a?;~Yc`{JCJQ)OOh3+B<j=ObkbkUdTCukTo6MlNLD^bR%=M{CFA&T$PR*MV{$ zMDrdH1EIO;A|s+ZulXiqSM8&G<KlS}IV;@V+eQ{x^*O4~TUR%?>(01&=iS|uQ$cRS z+R&H0f$Q+ATge`kO)BxH))QmB(sI=Xev-_Rt$2gGwu-pAg=75#x5x%z446l!Y?))S zweG#Fe{gkfzuuK&hsb`Eee;(R(-`~y)5iH_$__Hl_2dT$d>!5qiC=F*1_Ty`_y0en zJwU`J5Pz8}*{nXgN=&NCKPXw@bnJ}g)rscRNrwiH;!BUl-z&$?tK=q94uyI2BLIE7 zxK=^WwoOGY7Rhh<w2-q~U!onuvo_$<GsE6~7qr9}Zg5#=2fe8U!QSYa$JEKe-m$0G zJdV(xCiYu4`N*4dGS%C3(zG_|*`4vNO^>)+>sGJ3q;7Qszx9_iEhS$OFzj_|eK6hL zBe`8`pc!VyL&QxI2gmbF`+3qJI;b%h8u;jsfv=N3pzm#||Bk<-4*uPA(%ITn$v%e* zO-D29d-eCq*TG)w_wU^GXo}U?bQAu<(TroF*|P5o{*HZaGkqO%sSX*Zbpz&p?O4tt zwk${2>MKmgxA=x}8CZ3NzeBVpIM%KUuS@u8zn*U;=N;rtT@D<=Df09bPKD3y^xJ$m zxb;?*2Oc-D=$nf&SpV0$tG?kH9S8AbeTUrCeh7GIHhssQ=lM;*qgZ^!49sL)RCgZx zSE1K*&D$Qz<nUbI*5B{%_#rYe+TK{&JJYzUtySQzj`f0W#$Ec9>OY@)qR;DqZy~=^ z_&YiX9i@l%-CNoJ^db`+be!F;^*-s3es~CcWJ~?m!P{)c$L@n=kDuC-ef|LN(PQns zzv{Q1Hvy}jtzyli7drJ)ZkKdop4D@WmW%oj4Dh)kYiy#a*!{PHrT#1ACjhss;|z34 z7M03Z65fBS_A=~#=K2JW=IjAK{iSWe5yr6<dW&bVaGeQ`|5O=p*9`uL=F^Gh(HSgr z>^yOYi`W)&4P77C`b+ex<2Nc7)-e3k@%XC~S`{C8B(jz!{f_fa(63BrG%=w)4OVMC zeaW@b`kq5>)B$rr1AD*G_wvRmkHKhsK2g0ayv~}(nQzf=r0Z?MKIpaU$m|;jzrIm* zFbBM4qG5OKbMR;HMDpZJ<C&E>OV3K-e+4tOcINW2=sQsGPyT@bYoB>n`3EMD)A(G< z_cWBWzWJyLa^{My6Mhg`x9iRF4;)ALYeN4jm}xqCu=S5a#}_w{>!Qqb<j-P#jQ!=a zfWvz}?b1#jb@nsIn2WPAZx}PO)XJP?naqZ}Mp%snKfS{1JHOnUWMz81Jg?*z;CGby zG3lrAUf(#EHx^Fz4sRtdd+4a<7jW)FN9H?nWM!jcx%w`dTEFSI=nI{7k8=!jF1w+Z z_AT~B2WW4tGqa(dd>8weW4-kKVscg~ZbG{AarS&h`&EU%+zZZtx%LI*aI|lm8Jk%A zj^!QwgwI&Bx*4zHS>WLd;r-gjIdgCXcu@Vqk?<9*vz~Dgt~Q?-obm6R>!vj#mC-y9 zev83pH}>_3JIbM>nb6Y}&=u$2Tz?sPCdyi&y-(2HDnFj@fH}L6IoqQ7!km!JqPZfu z{b{<}S<HdxTwKXq+!mAXU?FqrRd_(XWF+&xE;8Ts|3diA$DT2FdB*ZjcwWX+cNOL0 zxGv<K;Ig>K4zzPK)WdIMVZE6)R#W%bNLy#luV97VEjEo&7+l8{=VXQYqR(ij^LRMV zl=>fSi)UNYBYo)vN725hy+?Q_|Get?3-ug<=MT-}u&dal$F8Ni*ypMJasm1ve}ggM zd|5rredRw=+s*c#vXAa#3lM|RIycO3SIJ(*Zb4Se|F@<7fo8@pnzJt&N20iM#gWvR z@IK?{^Jr{|Y=@!G2A?xKqxm7-<eYSk*IE=XR{6<mPTmOSs9?`EuG$v_1A3Nxqyg@+ zJvy)_Q&n$u4tf^%0d#?A8`N#nWG!>^0Be}*7sdr^GW{Kkkiq5bE11o`f~#D_8(-(| zSbtlfy@d6_z*zslI%|04l@rX!%KQ<Pl~!_MrInJp9(h{HFS&Y2yR*8c-Kw6%&)0ta zSKuiodDa3eq4p8@*xT&iy3!<fttW<gHf=On!)tqF^Cy{Qds)wC-)F-+&g3pHzIDa) zBy$$zL~D4%QtkKTx7!*~y9&Kfu|3<+-%D7>Sj!o!_$22PTc&oc&v+&K3XlVun_4@q z;Qkw5_ji2r9RI+#<|^g^UyDa;me_5^G**wT^w#{W)SG0|yXsDq?K%%U5x<tY4LuZF z*t0KTZ4S2T)N0}eYQbYO{TPZvwGj)0g$`X!do{E-?lxb0t*_F1!!Js`#AK%ed+Kg< zoHlHezJ!LHZPZ;$-Akz3fCK3eL+h_z$~vpFz_*szj%vn0@u{=G_oV9DcG?XRt6Y0v z+WMSH)f3xmDZ2oB-U)81Z}NB8-vG;}enYua-#`QSqh<v%&p<2Gt(@+)z@gZ`C6O^# zyk8rAdVAVEImwLu63WS6AQ%GF4LwfRSa>2ZLyN$icp7HP<VIi)(7tkvs(t0GXPj6+ zL5E;}-J{o@)4mrU!bIbowGx`rxoYI_?-1RKew07zZKub#(u%Kr$#{snP4vx%-rlWi z%Gp_cW8qbHTz87+@oSf~7duAO?tu1Qh8C}a7DZe7|83}xZyFSL_6KrsTphTfaL`Jz z>5kk4_-B)Cx?8|iITdT5J<&>X!Kwn;Eb=Xg&Y6459^wCSG5<TE&50(-hJ8Qz)gL97 z#%SIP-Y@wlb_r%;DSZcl_lfGIg|h=oi9;V@!<*|EZs*Nwi_D#Enmg40B6bPuZF3m| za@3h+iIY|pZ0EV!U&A%p-d<>OyOTU+_&r?Abr<a>7KC|(JKJ<rK$q?UbOP|)j4dvH zlL)`5wA`~6fM0x@`19}wrTaUQul08*zPm<y5r~7XB@TKm{9(jxHSLLc@Dq4ML%yey zbBPk!r<r;${GyhzzY=<_`0C{L3h2~5-ap{E+TX$1h3!d`D!s$ElzQ(225_7@h;9F} zmFzi?uwJowQ<JVX9m&9$kN-1i3He0Hhra}x5|6ltdS_SP&@P_vGT*&3?cSW()#tS* zQ?AKMwrMc&+y0Ik%4-f>N%`~OQ<^tzlo!pO2hAo?W<i8zmFrh~(B0`_d~C)KCEUgG zW90y*4;?Oev3Rohaa3m$PZmEe#;)zv88H!lEF6Cxe$3wD4Q-6k66V4pE3rZJc{O`= z{P5(Z@Z+D~c4K?nZQpLc8veY>O7tACOyzyxczu3KrT9Z?UESObm-2r`a^+O=8sY;< ze4BocaKNjrRL?=~#j_`aw<*;(wvPiJ`QR+S`h0$qY`%RSxjZJDlv&#%y!%T3c{bk` zEKST6?RUEpy8AhFxBiR9dp`G~N$jEdyEQktzUv(Bn`2VCrm_d#&v(TRQ;Y9q{!4sw z@9gs%-gc&Rb%PWB?^we3P0XV;yq8^Y5V;kOHy&QQ3jOO~f|;(oCciH^uS+~qWfxI) z9r#gBkXg*t+0Y}o1!^}#_u{j;$Ow(I;uaD)kF=6C9(}jI#@D_Iei4Z9Xy^X{k5-Om z@z|k!P&5_G2Srn{d{Df&8XD9(_(X7@#9UyGp5lW^;@y-f9>NE|04_u`u`*2ll4@|U zcGcRPb+hvtB=aTH_9N4_oE+Tve=O6$*Z*H-j`)?kPVq)@#H%YuC%I$cTa0UJB0K^9 zo0=HmTUE#d%}WpS5<m5<mEcKy%M-)5mO?9C(8(W?{YB7@=Oy@8%5AmnFT=w|Ft0rD zuOvrO<pz5`4X?b{o<}1p*#nfg9v=7mh`xhP(q8k`Dg4fBuVJj=l><rev)Q*zYEPb0 z=^fEt>g9Zs)R#qvj-=Xy)^N|;3HRg-!q1Z7XTyPgA^cx_>VR{Sk67mEoW&9P8MJX@ z_0o39nuE~q!D;v9+*m!ieK_TqUp7DUz^kG%_y)?0|B3cqru-@XN12sT{s%v+w322e zM)+9|K6LT353c`r>=X-o#ev=8Y#kB9)4bLQ+y1oq82tZHXv7sj)*@HL(=6tmn{n7r z|Fn*;v4Ea!8M25miUa53qml>rm=O(+f%C_~^<?B(!&$dJRSn#iS68>suJ*R4fH%p5 z)UPpioMAVyVq(J*_B^M$%yh?mf5$Fs*sPTfN9}5S!f(>nUVe8`r}h{hgzu(1(rWSb z`L3PPl(W*r*FJ4!b=@-M`#FE9xux)z)wdKjI<h^D)H}UmI_H1p)M6LT$GAx)rpe|Z z$DCPRKlION7*+e-!W<`jN@eJ;$`B(}tNjb&A%iBP_F0}^ACJuE+xO2}RnQC{d5d#U z5A%DyW_jUL)yoT~@_sik?V;>9$pO+Z(gev3<~u@+`cCAv2mIX4^8?s+<URBq!oIkh zb*pGU*I>^&D5JhLQEnwa$;f1As@Jl*T<EEim7*=lN^&gOvT|EQR*JUd+m78YA|CO< z^}n(AjMUQ>GN}XGtD~N)_6PpImaAGPKE}TA=$>%RYvly4=Da@XBeGXqk$K&n!g>?5 zDf=f8dF!&A*c7hXec)HRUm0`U4Q=kHkHygEcH~6@^yh;9+|b_v<7{Yv{_cVPhFeb0 zW6)m!`kPQ4XrEkN&^`kCqwawe=&z6cbROvM2IwyZnUf0rJrDiu#w=J3@7Fq^Vte*d z=P~p);zoSuT%bH=akUTN+n71!uAG}|t}lGB`uf5*9r2#$Id?U_;+%#iM@H>J=<Y50 zH)s-Vx;x^`?7GE&Zo|_xHy1vGjNI+WvguA`WHTxLGE8@gwRz}d#HsjhhNd5crd3}G zbS4=n8tY0itYIej-h{@khL`--oQC^3_>6oOKc?Is%Djz_D`jwS#y0*JZFvfyttUR& zKJmb>k{FZC#9VmbL!U?XiJqc1fE!-)S+Y+&Eh_soZn6Pf5q_2b?%;g+B3`6V=y4q{ z#`!x!T>l6S4c)Ihl&8gggr~)w=4r@=PvU9o!2eL5_6qHjCqTCH2HDEefomLb4HeMU zZ0PDL<c9~i*WXs%E}E;g;<WZuDcyHHwsKg0q@=zK-AVU4A098AVIsVJqV!MbY$-f< zH#~eSYq3|COzs*1P0XQwa=q2A@<8jryoS8JhoJMT;C-doG|rkE3l|utZJ!)O2FR|F zT?}uSGsqk{ztDJB5x<lQjSq*$zhxyu%SpB8!w>&Zael+Q(D}2_wFT{KJyp5w#a|cE zxABhnT5@0c9-1n?I;?i4mC^OBDR<`_CQr%y>Tea^WimZ?LEpcr7~inck#5uX^Wbi` z3G-EWV%L2CISmiO7ayW8#An*{tundH9b&R=`c`@NK(d!6-gh`~Gj-owsQR!)=7@*B z%~;<DegB>JZxGwJ10E`WShB-(?B&_J_-f(T)9-;kqWx3O;auJi@@-;Xr8kK)$i4(m zK~CDX{d&ve*`Kg3rwN{!2z`5??<!!B@8HYSt=xFRu``0Vo59<Zq?Cqb^!M3{sm$l3 z+Bv|q2$?ht+)aR2C>Mt0(>K7~a#GabpZEO`-0e0AwO>Idy*A|swoJN?`MVsMw45>6 zRWa?qCX<j)l1cYPWKzZ#R~7gfPo2$u#Fj~6eEt|*?gppg7hf`_V-YebEVmq?%i$L{ zAQRL_^-H)Dzc>&49l~EP*)<G)f&SjHfN#YwlCalYj<77c5uAqYw!~WK-1ch-ui_iB z+YGkCC&?n=IF?qoa|Zepo;||c4gga*?FEoi1+=|~XY~eO2xD3g4{-5pEzkZtc+yKl z^zoO_$76X?*d|Q0ZNlHvW+MDeKAEUp`APm5!L}fx)5r2bA3UH7UzuzG_R!S!O>4;M z!e=I1RqNmKov2OuV#EhUmp`Z7{ssTh+NoF=cu=hDlrAQ|6|0BImhXn9qdd@4CA&5b z|AX6>ow83QI}?$eedu%xBC=C_E}6E6!{emCErFib!iP4%=YsILpTpbS@HwxQQh6Ty z&6}TE>BYw6|5Pvil3sTedhu05bh~8e{k)3v8h$3-j`<?p?k!~Nph35TW}~{D=1;aG zf%%i*n;FsR8ks-K(dlenf{vBoOz^zLTs*hp+y?O#$=2oQYbzZdo5ndSwd*1Ow1z(+ zFaHeh8FXaZyvHtMrPV5KT(VW=C0mc8KR#IVox+Ezzf-9Cq{C^hNM^pxJZEndXQwBJ z<}g?O4h=@(j>^{GQjRlPeDAP^l*(M$&Hs|GpVzmcyeI#!buiIK{=u+p?W0eR0dF7k zs*7u^Z0%q0oUJzwm8~fs(aBP5olN!uF@m;ijn&Db_Cg|KG*q@`kOwFgntKVlmTZ;$ zpXf+wSPISEh@G|=y88xnckgXW+wa3hlk82hQaqiOvvN4}=gN<(bVH|(I`}pAnk!>O zrOx>wwjl8alalHpx#}OF%Qk*4e&g|ZjKyY}jQ%$XpT{J8ACu4nlcC3Y=Er@O13e+x z_E|3~Dd;N2cd-~)BxmLKn8#dnAYZ4sjCU;ex%flm_ZZ|G`91bJNBF*BIlJ!SyKnMc z6S7l&59ITla^~pRE5|nMbvnDQiZdO{`F^D%(e`_!!*iuuPK(qjzlURlnT{_cv==zU z15~z@x^A?b#3Lp87Bd&#;I|U}a}{y``X+x(nymwjpuL0mJoL@9!1?Vv&?_X*MxY1$ zz#7iHORinW9O|jaZx{=WOAm-cmfeLOpt&erU<%`~4Ee=AL*FyVvTfK`%h7Xx%3PG5 zdkcE*R7aL)Ds%ggEf>$M=Vp|CK0UXY{TER^_m+sBOKvq=515QB$hU2&1RtAJLcfXV z0ke7jci`LOObz`Tdcb7lMznvDWs+IN{64Rqo6lP9KP*#HPy2Wd43Q}ZhR77HA!{zg zMaHC&F)6sRpkXy*vbSO^I!1EsB8^K#$G97wxsp9Xw?GF`U1Ry`2Xiz=!HB-m9?>_x z6Qgh34j!kXZ$!uFqc(8Z7Lg5{U3z94ctMN}yb`-Z`o;sczL8j)@ugJ-RoK9?f1~=w z&FF8j`o>alKQ5wg9CA8seM4iU{;H2reM94@F^uXP8n>1FPU#yF+jnV1-_SbBne>f3 z=HMsk8$Q~tp-vw<MI~3+zIx`1dG?+ECY|E5#y?i4c$GE>h>4bLkM(!O`e$S#%V+-> zJVg4c?E{PY?AbRk41O%1eG+oM61mz0eq<vrfFDcFdyw-7;2A3;a$fOCiSSzSQ~AB_ zbqs6x1N`b)c<xSk?$y>XeEK)GzX<Pj+_tFwpp{y=KR>bZ5IpxP_-ti{qcXe(==Qvg z?AIFL_2@L;{_0eI=e2(ueMWYNd}9vy%6agJFCqVbj{J8(%TD-hnj-<<j`NiKmwkOV za1`M`=|`R}NB%d#b6z86pb?%UTlr7W*EDCEtv}C0f0o@W`L8{m(qARFiAm^s#y`EG zADKRY{LgicvgN;B#zHrb$bXfW{Fko&bj^3E`@4mzuN0o6dVY#5yus!<>G*h(L-5|| z*F!H+xTEs_Ey{6zg-<!x9ms##9g_c_*B98s9=HE5e1Qw!3CsT_^yzWnUBbLzJs~3h zPv;vMh@5lt;fS36GwsXGfR-d9j$w~Q^IdD7?w7SzPn<wG=MA_lVnE;<#I@A4EHWM2 zkvE1ovRdf+SN$t@1;#m^?8lZHfJYF+5^@kPHI47F1ME2WD(cCH_i0UDYxS&iZ7t<n z#Y(+#Q8*{{cC{0Zk$b`woEG%%bOg(PYUc%b!Zjy8SU&D_jNF&;munwDv(ANxoJRm7 z82A;jM7PrJVZ}(%b_>^N+uG-C#{>`~O;qmIv9!O9d-c)9|J&%7&ScPe3&f&~e2Cbr zeQ%ng!9Dy=#<va<+oZDzbPmIIbcq(?4iqc3S?!#P`3T1kDIVnk^*<i`aTN?|Teh!a z(tb*xn`zs;Ke$z~PvN*RE3}E{u1H*#`WG9QHS}Br_PA{M=(z|V-@E%^!WI2h#4Utl z6wdtKULPwv9b=`Ks;!Y2E5%zW&T3n{S*AFvh43VedpG^qljdk0{cU1qB6$oxAeLR@ zuXx9L&H?Nhp%^mlpU(;14vt=6A3pTcm&G3ZKY=gK(rGa4xwnX~Reauc@mAx9JUhmo z{FTlzeGe0xO)j2+HQDSV8e3B2$~H4}7LS9rze|1n_+VWl${DMyt}(>I7jq_0?wZ_w z{EzdGGw%Nrda4`i2wFyG@r((Ts(#|C{<V}?O6sm3jxI%f%|^<&MiAQxyce-vz0Zv4 z3LsMj>lx1iwBwwz8YWX8<r=Cv3-CkEb<-Z;0_bur^_QTBtUafs$UPjLf;LrG6?@B; z0Q=t}efRTixbI^^qp9m*{=b9%3a%F71qEvz{Zx$DUiKNi;T#?MJGhMIdSD*eIUc&> z!e`T0u|};k4fWk|_UaIaz4bxx_I|RNv7G1q#BgaWE~bx)FH=qg7kdE&+Ze_~aP`tR zLk<Mt%d&DB-XCT))-y(Hc_!TV6Jw?NqkXLg-X3T|eQia5(;g_%i1gk1v7E&N{A(ST zx2{^`?^sQlG3YjB^z#ks&tpE+^L#BZ{;Sh;X-r&}@zgU9HAcukJN6(KoNDb&aOT7B zzerz>Gq$Q*<11KQ<g+Mox*7t&(<zu^#<u(ru}B}oV-|SKR{j+mhaW<J&4OQc74#E@ z|E^MEA!Fdz`Hh;#f?K#9ppLpPo`(At>Zs!zJ^La15=5silTH4KFg<00=cmE5#(p93 zaoU&gufVH436yE*XPoQ7gYfzqb-5WU^;<aA+No$iJeU6u<JI74(Y0=^aigo8${BQ; zcYVHiwPhEc=20u*Q6Gs{o9rKm#(1^BpXwrf&yMjT#@lJbZ-uI;V`zNWKV7K4asChQ zR32Gue}ASJw14zm2<4_wE(*!FUU*dx@<)5A6hFO(IWvfi0fx42r)jf@F;e-Vc`Wp; z>n?wPR8|t_(&1WBwaayLeS<5Xc<V<^tNU($Kl&*#Y|0tp4E4fWQqxT11b%;wC(ryE ze=Ep%V(Zv+_98Ri|7r%vqXkYfLU((4PwuBG#ygXD>?N{elAHOT`Q7*o-yIt}ntF+G zhW-?vruflRzH265QZw(3e{_(&Ta9_-I}gi<EXrkuygU<acaL?nwtPLIRla$<d|o>B z<=SQPDAV$Y_U7;IY^5&mDqm1_6r5=4yoq<C>BAoS0Q?)a9=&1!Ik>?!YUaQ;2RWFC z?aXDL^nUxA(N}J{eR@W`o3EU_K~Gcm^_-alozTUfm>Zf)uOnx7lPjPbnwS4u{r1yO z<|cEpZtkVrGcONyAxm8B`MxIfuX8i!yB^+n@8dTqW^n7CaQ=X2(fL}?h1hFdyO4TX zXrncgSVrnFzrXo@U}$iiC=R-~$EVTtG&!oZ&vGqr6tMqX^I!%!)dW+^_x=6KDY6zi z{G@lQsDCrx81`+L<^<>`H}tFN8bihP`dh1r!(X+0a3lD6!ckqO{n)P<_Dm~>%(u@n zjTbw>v&QO4e}`nSJr3RBc+<n=&5zBeJwWWfE2(tA4L!U-n=ariw$l3Y=(Bqm@r=Mq zjBI1CO%vhRIXAkX_=Q=ufVxiS)gN?PI}fMnECX@|K4*gSUirdn#BL4b-t1T=I_+d% z*gjyTE}!?GhXq6IhnZMSZhZ0u_)W8&cfp{eTt?k_$*bp9;jby-9=;U9R!`3hk6F5j ztDCDB2vb)#w@6p?hKp1#w{GqX{hwDicbcwd)!eCEtCH&Gp3l|K_vh(3=gdyzx}E?3 zo&Fba-{&@s`CRMBQ$1c~O#8^O`kyj+`hP{;+_Smn0p}R5J>!^<ypvDdupcff@=SGp z2RiO%o@wt*9`!v+8`9g@-ZPTDIO*kyrqRPyW!+rst_h!s`vEYR@A)4P?OjV58^@-8 zu5d_B7Mow^GcG4MYw|Gi6nxw9n!mIReBRDWVV@Cv*GSVCJI>&Hg9|$B6iq>c-S|ji z$1QsQ=WLS>FQ4@j>Ps`oFZOC4wQbPR{joM@G#_NI&bE9lA!xzptg~FR?f<8`f)4(7 zME_sw3R*6BUQ8MBMOTk2XecAuVQ~(29%K4d?ITHs25<G>R=^cIbfE9B^$W>p<<qIO zOhe@=eB)n!@IB#Oe)3|=)u()f4)Ahi$=BPP&Lck;bCevI1M63GJ^bK_%XX;`TiIKJ zul=@auAY=|>?3{__W0|}Ez?j7e|1skk{SNi_5A;kmE46b9bABntnD$uvDCk0)q-}` zFqU;=H#f1yY|&1DcF-*vl(Xg`=??sq3sNwPXKO4qw}^3X(A-L7&Tw|bp=RXnf>GqA zqmD}En93zG->#vY&K;z0TeA4w3=b)c)Wh2Eq3p#b=tmzLwPr!Py)TD-hd+alQQr&~ zGTp`Zfp{~Z_p%Loc>j~3?}ta;V`J$3kA}X_jJ)T}PrZML_j$($PuI_xi~kE4hvuIg zNVvRT{Kml?_IpI{pZ-^xXAUjK-g6@JCh%iDv5(wb^Ywf^_;Cgn^-th84&20+K|akd zC3pN<`1XQ2>kxcvenOJ}{!)|BU`#?+KEK>N=WEWC9W4{GceJQ%Ro&$u)R`+jr~=<= zN50p+*p;jwhV*|4{~P0I$mi!w@wZOM_P6T!D(a3cw>WA0Uw_O`@4CqgVaC^8r#`E_ z(iHRLbAtOafBOV}l&MwTKA!K?yE^Wj>HfkA{7R8KRVkO>UI#o?X4ov%F}{vmGLDv; z@{+9Cj@~=&-;fkH8(7{;`2G{Ko9Syw^X@C<BOdp|yx9}h=PkXA>lIuVk2!btUHpDL zX3Fg6`7Ims<JtF*`N{0X*w0gceZlNujEAm|j(={`gfF^gKWfIUdz2g=s&8mr(Ql`o z{fpUCe|>27ILbfAe)UgkL-p#Xa*NIOzn0dSor_b1AOCy)xH870$v#*3Oq7sdtQYq` zXMQp0A_pB~n!vS$embCqar|a-K3w^q%`YxD&KKV>1?zrd;zH-3yFCy7OZj!tj>e-j zuWQp{ex=86-R18t>k=-_yDRR`=l9mTD|V^Ax10%Gs#7+wYr^j~l^R!QF~7H<<6PP` zjK70%8OLt{y1GBF?Dig>>7Dq=xp~x;x8jd)fs0Rr?|I(;nL7Q(8HOXK<8hPx_xIA@ zrtQUtCSh+llKicg8CTck^zmC<ujhJ~nXv!2=G=A7F8>FD>8>evZ$iHP@h<v#7ya#? zR=3F{UDlvJS_i&&f4<-B9Gmp4U61Ox+r)>qjdAU__;zek$;X~ILxX%1RGUNFlgw9p zdB$|9y<zNwSDQvKMA{siwBu7@wy6KJ%5<}a@g_JD{A0mE9AlKgxV(|~n@x9Zw|-G- z#)rPiFW0Piuhjft(_(Y?CUmB(a`XwwF|)&C%e}!e=g#-%8FIaN$OA`yTc<PFOs>{E z^j%$dAYb1=pWMv7pL^+1b9h(B+1@*yR-<H(Ka;g)?j@5lkazMig!S||baf{`N89@o zO~>JB_@+|OgCz6v-NEjQh$F}~166nWA27rGZL7HMU4vhqc3k8Hw~ToenhtJ{wA-Cv z+6srjBE4L&{455R$0D$N8XP|Yj@b6k)OPoXy17?o+Irvy&YwmuARBh%aZa+o$hgf& zn`hOLZ(_WeW9!zOXVgpH(>%-N(>vF8d~#m1O#IAsYtBg5Fl&y^=<TMA-sjOTU6l{v z*l05lJ<mFtTjwi`RVR82G0;BrZJ+jPZ9u+fnQ&}0dcTkTFur-{Ds}AP3P``6fQ<|v zSV-M^e+lo<&!@_FZyYVy8=TL;d0KDq4*yLDzAbcO*+J>PAG<8OY(6w>*Ts3Xn}NO8 z6Si#&&=CWCV;Ez>74@C;P$qVN{W12TM)%cU0G<cS;&vXtY6SF^0)52=-^g;czM1Q8 z)p+h1VYR)H<!E~|*V!grx0-tQJ00v(sk{HNGV<iDIlF%!@Fl&w`#tF`Uds_|*=u$_ z_Shw@txsLrYLPSa4ffgW#WuaG*&X~bHlg~b^}fx{OWVlN(J0-ob<NlNWvi<{C+@hq z_4rj+v41Sio&#=RtO8%qTl(O0r}qZQ7M~AH^?VPV`>+w7FnObcz2p;m3%Y~1Y~h;? z`QN?(`@)c0#dN&MbM)2@Cv{;%`!=BW>-&A^vZvb*?|GsvyZwD5m|y6X(&=+e+_Kfc zXq{F5l<aNM$B8?(fj4wzwIAnUtX#gCIi`{I@Qty1t^}{(Y_|F@Qr!G4%%?vdAKa+C zSq;F^E!*Vyg3eqky-#DDd)(Dok59cN0&6r@S$n6>A&>cjV&`k?Pu{w#f%Q&ojwkl> zTZwMyJ#Toha%*z1o;vJ3PJed?e~%q<Z*^^9n|pY}9_DxHH!ck}Te+`0oaWuqyG&5~ zH<~zSccHykqx=3Qho`S})!I$@Jg58<rS-`{*#`FgpFP3R(fdo1gW1vhCp^K7=>3tT zplk!Xd~;Gzwt;=0ofJ%p-hVBLb8;ikQ<8(Sr|mLdOAcy$?E5Xr!K4%6KB{kP{?q?J zC;d`?y%~wY7wOj|`c+9TN$BjAyXcpne#OzRyXe=j|K)!moio8Nr#|&BpEbparu+U} z%hPum{j=ZW>%&e?7oFZkKlSY9c(b)I`i%bS*{j3CIeG1J^jpu^6uEVBv>g4{v&P6C zV!Ir8&@*`5*74DD;6u;m^DH;|47}*s7Ic=J=ri!6XX!i>UhVq8lb+p-KPV;o41DR? ztE>yi=CaFyH$6K7O?C3KY4cx)Lz`(n*8F7qq(|OuwckxjvEPm3oyKQ6^`rBLziUjj z-|cb6;VT&y0=`C#kIPCA!Ed%`9<64MHd0=^tc3BIO+3B}J}7?2+?j8;ku5(-nlF*> z#iL^BS@Xy)b9`j*)={Rhl<$v!!kUYGNP}xe?Cfc9weEeu-TL>36I<U}fbVGtzxpz~ zp(V^4LM>}@`j34$xG|G;<v*R=U4ISx?+cl8$cgEUjZZRam;?U0eR|(Q^p+#>b@wyh zgZsugg6Z(K_A>UG<D<@jzZLbE;0N>4TKlgb+1gCm?u3fA9%v`OaCC6*8uGH+@-Qvb zosiI0mTelNWqX_zZT-$+K{HN!mYqS%FFr^v2WNcSH#x)4g<ULrYA_Kw^(gBs;Bk<( z17z&{&XWu3bFs@-fy><|UG?G%vYo{P1pDjOX<5IEd)xku$oghyQCqX~5`JIccX3<G znvv~KIn&yIOa7JSvb6U0HEI2l@shERu5s>Mgq<3d@4{c}n(J)dES?13gD39z_*%$u z)=D&nKcY_I!yZfU0nKR4!G)`>7F-C{$6{b@83Jp!GZB3xnO_n=Ph0OStK;xIt7DVu zy~gSo{CQX(vGoCP8LNwIJu$fP=r#XAOh4zt-{EK#zugY670*>J3h~_U(8pV#1No6A z;{(_p!*hGUZAOIWMrrftHQ^W_;dwf=?&6mSKdOM|7Q>I~vF*fnD}S6A?1C3vSzT@C zwS{-tJhm!^$NsbCRk)6=YvJ1k)R9XaEzpq7&n^%1v#7r)T8H>qlux&Z>)1LJjuzra zY&dTDY&d|^p1Zx|Z?MPdl~KWm8Jll{zXjrv^eHMDrnL_AY6VItq#ycu3u5WR;- zPKe&a8^=cP;fdmLc6oTA{7UvcJP_R`d=Kx-jNZfZ#G8sGi^%`i3vLCA4>(-NH7|0l z$&uJMc2)hRTcY25itJbio}**sgda*Ks!rEyVO)C{FZt%+nIDrK>ZkD0gG~G+IK~5q zWTee=BQnyykH|>-9(+hf+V>F|Y2Qa=q<tTek@kH=M%wog8EM}~`Wb#6mXY>7y#8+F zqIheRZtZdHOea?Ixt5=iYxDoccnLQ<K7&@KM91iJ%6h>q8E-aZl0W`5ot;tl0GFq9 z4{-c{L-!Coq8l?M+m>BX{0gtvg4b9dr1;F&KS1swW6X&K^=T*E^)JAuqrOM+$Jp{( zGq{yGr|%>;cffz@&<9<To5Ze^@c&`-WPL9g8$H`q>niVKk3@5WOCFxqgUQ)^+&Uy% zq@M3xNu0TkOhM+RXXJ4{!^z#9Mm)%n*U917nV7^`F}#=V+dXaFCR-O~jZ**j;HQf6 z`uAlstO@&K?fKI)24Da7OM8%u-Lhp8>MlK$jO~aI*KIb~xn;?Lv`KkG76FI++lo1I z5trvWYf8{!9bpmQl_6{5kW+e3++C;#-8}(a0({u{%zMzw3-ZX(%ibTCF#|0p7j)9E zt<5K0oh{(@W$F}fexLQH+ax2<rNMdD@g&oD9=J5%l9&qntX_?!xq5W4D$6wbc&Brb zqw55#h(BT+Liusotk+0ZI6_xYM;`0LwrqcVG|$sRB|MWYr}Fvls%(1b5}q~l%r5tv zaJh?k=I5E-{|b52Y?&>*e<lClbL?-<=RNj`{f*{k{(;fKQZvTZ9hBp45q51A_M&8b z6?poV{KoOxn|}J-Wk1h9ps#sc`?>BxzKN!^kKx`C_~6lh<VyfwTTF((!_S(iXb@c6 z_U|XX&l--8JZ24L7rOLv2eC_LH8!jCm0aJ_lLmEf_1v+vt!EcVmvERF+bL@dHZ1Qn zzLR)f#q($#Dm#HWqU+-)K3WIyhmD4^vGc_qv!wCC!{FmTz+*c<!<gP3JEjN52m5%x zm3L1>-mi##zkNb*FYnuUr#>5SGVENF9<|$+4GeBv3*4@_vH@?&Oy$}d_z`81TOBpL zbD6RMTR!KX#XK>GMAs_s{pWDmKE}q<I+kqB4OY-hxdgq(3iU4;+;~x(X*_Yqj+U#B z6T`1I-kv$&1ut4(A`bRr`9QU%Bs#p4_O*s57`j>am<1dy&_jKinSs6<3ex7y%me1h zvS#+k9D|0ARh|_rvBx7zv4*y<z{0Pf@5E2!hque!bh_LZBIU$;V#^uUkM2fKKMHT! zPCl8izk)n}jFEU%4{|iNy!8Bq#BJ!zCeg=c*5y@h^N@P6J<@lh(^BVr(XHzJ5WewA zb?%^V`F5W&ed|A=PVxQS=*(5<$(j?Y^UX+|L;G+0U)V2O5~=gMkvfO=KQFCrE;eJM z>TR?6SEetBO(Hm(u_3Vqw=7}Zq8B>8lKX1Lwnj8javo>aPGx;OjdP;XLc@S<34W=h zyG{R+d;I+eoMwymyAFrv-N)YTgUrdn_rZtTfBVm#yf;VtU$?FDAIfK}+E%2_yMf$E zyV3iVQ}u0foNEoG4Sv0iy%K(Wj6b99gU+PTp$vcFEBuNbDYYfQiyvx6@lk9qzS}?@ zJ@5x;ci`#%!HsEg&H-rg6&JLwb+)6|l(eStPJ7CtIrr-5Kd?78Pq1@}G*+eC{ugLD z?CX%NqVc27j@ndm&cyK2Jo+g4B09!qd2%yjt+k?>u_oBNvg#KT&GtWDd0TbAm%h3z zPs8J^4HeVJDrhSGMAZYbEt^;mf17>;Zoj#o*u|ZyN9!6|+haYKb(zzA(B@;TIk{3V z8*ppAOlK7Ftv%nYuN48~vU*@O#MO!a8E`gK7UxbUmj>T7!<(@CLN74qB$G5xrNihf zgoX4gKul#IEE6{5Z$AfKo)$_FKVy!--#6qpho3ok=8rz>3_m-;97Ya=%k_t!_47<} zV0EP33gFyB|Dv#aXxpy0C0zdJteL_S4)wAwBiW#M1o5H*YfKS5Y6CXF2Jhx`f|WbY z5wBY2x7Kco<)`W!YsDj-d9pnvFI=HtCF56f#_c@3T<hBS0iSaQAGxon_1`vL-1?KI zFMOgOFp+gzogtm*YQ6~@HMGDgsF6RgPUB{#TqHS^4vhPU)!h%>2iqRCb|zv=KZee* zk7w(#rQ3Fx;A78R()##gm$tT={tJEi+5>8qg`8)%Cqp-_YrfXcvtVe=nEr7V`Bj0P zSQFn;=6ArhO~Zae3)(qx$5pL|mut;9&i31dod4WUPTq$1z-K>ra{;dlSaYrXnr>jE z|Gv$@DcDu7;Jp@HL}}cD{s-6K8(ky3q!RZM7wp9^u#dj%$1mWD^hG|;${u{7<G$GX z*u+b1d~IHHXPf*J(8UIgpLylnV1CNE!AHuhov}WPSpVwgHJ7$MLR^ItS~v~s*c#z; zQ3Rh;V&G#hF?MXOuZtu;gVY<{&mLX7j{2veU1C!?|C#uNXHEY_-Q(m>cKF`sy3S!W zijK7QY_GMVXNL2+<q|WO8~QhNu}=9%BWv^Gld)@}v}@zK8T|Gy_;+}T&Qrwpj>JbC zaXM|=`zW+V{zo6@du+@`ev3Ege7o?OKz9B(-@h=Xe)wS(^6FNHbe+?=K>HW`#@>q) z%V(&=_S<Ykk5K=#&e6!d=v`w<f9Ok^@5oipwv1)1Hul?S{LvPCyW;7h2l2EzhpS<c z!__6c&Nwc8GOT~A4)F}F1t{-+bPeFN-tEB0bap><xP>`Mu17Oa5T{r{@+8J>zts56 z((W~DE-BXBeH8sDwQ18zcw8@I;Naeg?_T;Ve0WPQae@K*uQ(>Hb9b-Vaf!aKim$s= z`uRtF<>7N*qW=6ZG7dv?j7z6bf0Z*<^89+nAnVi^NM7sQhV;-T=v}y|0vGDH=t5)h z-63NUl@ZZ-;?-IrHpN8dYq4c5+s|C|oiM4z%;U-@*XGPx5uaAOV#NbVf!hAcZ{{dB z_ADQKt0Mn}P1*-S4xNxYFtJ~<lG(elGZN?FLPl!;zVs*I{{KdM<3C4x+tHuJH-!^; zD!d>JPe#nQ23Vud|NG$du8ZR31LXE+;pLOy?2GyKOmKEb`sZeSQhh|`o$BM?{tMs! z{u92H{d$ZTs@U_bWW!#AZr%%hN6!85iC<|xb(fi-_{aNPe+$p+l-@-=Zj^8BVGe1o zi;h`yuxYqCLc`0|H_apB<5YK)kM-Dm?6e>Am|~40_U#R<NfCDhKm8~#JMB-l&n59G zf1B2tq&L79w*-guS8FOI#8M-lUs=bm0G-I|o;gtY;LL$0bl`3$d;t4*7BU+88o1C} zx2a08OFYl#`F3L9bYBv4zcA*0Rm}b7nEM@(`(7upEqt%K#a|-)Fmxk)wneyzta<IQ zadDd0(s!GuPXrH9eIcqVNJjn~UV0`T6V3N49y1$Uh{uR8@6L2AYoU$ZKS9USb-!7* z10I3hyP@>mb8Q_;W49OE;XlwNH^OhESH<%GD9;JVhC;u#{_>rEm65D^nKiw`$oMnC z^j*qpZ8kUEvFzB`sX@W9jrIk{HsAq2^P~Es`=Pt%rlBw70()iz_H7Z^Ro4E7Z&$(R zqOkW6k0&^k&xEsu?bx>5<IY3nj6vrG^*QyQ(wD=3y#@7eFsp~_o=80p(--?3urttK zRGy<#JYf!G+3TEtv#iEE^ituO$-;Vnu1~!9eadWoJ=>1Wl3ag}cXiAqS2=rvumu-N zm%fs?-88L5NFT@uh42Bx|CcS#d>}{o)OQ&X{Xly&J7Z|32mH}*c=N~U=6|@fru=bz zBszZtKRVG9=F03~erI-2^Za4-9)lfa>jh=NGfpwZX`#VE(^`YS2Dt2VQoE6j%n9Pc zIfn~9oO4Khneg5O=34HPoIk)B^RJI=x-MF#03F<QmKo{ZfxjLbRcBUcPLwhq{^Yc5 z`tD|)xXI^=T{_iEKIQ3Fy5}fzM{N{fJE={rO-=^~YDZVuC~EtJPdY+c$YTA*o>LJ$ zDU6?V@N@e7;;63lW#MOyW9Q*A@mV)|rQ=i_X*d0r9`Q1BRQn9}Eoa{zcz=xban}m> z!){`5kwJ$Fo~+-b{q^CwThV&BjF^!VS3W?!_T0VXbF{IQHujBUJs;m*FYVOQ&f~PB z{3T(1I$SUEx}ji&>tX8O6|H|f^+)C0cIKEpp2X7RAIT0De<;3@=fkc^i^@dGykdBN z--qPRmhFj7&p04cU7<}e&-S9Hgn3Q)-Agg=qUBq7X4hAh8q6Ah`u^Gp*h<tHwh?lD zzfCmh$N|}+9pG$PH?h>xBV@PgYQtfT46Z~cb6RUQ{nTMaU>nI^Vspr0t={A@7PgUt zGfN%hwN>8aICR`$=(zFdfC<=2R`9FHtpe={p<mvTk-;YEc+m6=>#j{L=uf@qw28+C zH`XVH<5#!A6Vj2jUxS8pHnnU^<<0iTOV7v+J?&C_rS<~3LQY~iKjfLt+vR|gxdX_X z0P@eOzM~yKugzDj+UbQQ>?<tdeHL|o;7IRUz<JV6c&u@zvri%;^p`=?I6xbkN8-JT z8Sb8^IhGOnBk%OQ4u7v*#&C`jetmu0W0x5fDjhJ5)A_&2nb}uQUZ_Wy|4ojpzB}L_ z+ApJXP+gpLG%h?ZLQf<26eB8sh4vh>x3G`7skoH^>$dOOu-8+^LVTpy_d&&jM&bEc z1fFK#OgHHb;)lXTGIb3gv(TFdbg#Pfzi`Q!WDS~|y4PF<#+`asO_>6BcyEiwC@TL; zs9XGPXx{7(t}i2ZacOHbe|8Q0_zlKFbm^e(6~Gef8+FyOhPT36wurunt}<y`c1aa{ zXXR@65xi*}@*;4O*rK(>8rkip`R;=syT+I_aF&{TIedxd@(UK^cjmaKpc@kxjt{;D z`n7fOSQ|cfmAOf>NOjBqyP5CAixvCTDL#KP;i83>?W^@YtajMbnM>@G#!Nl}@0?M= zyuSq6@mpr~mVbX_fO9ZiE1dIk|B}4S1suH<(PwcyiznxvvHfZ~mwA;5*Lyeh=4P8^ zV;T2e>U3=$6*PaTYJZ(FLv7}L*v#bU3-)0%yRn%cFEhbPY-YtAhtB<C>*MF2wwa}G zY{$1Q+gUcV>}B~VKFwx+4IA~sqk|hYP792)uN-(|?FGdi*}Ow`_;1b$61%uj<5|j? zpt0pn@g0?0a&C~dg^$JWAXhhR9XTc_n?C=~^7Uuf`I2Q{$q#Hl|C@aLVVjEF7@@!N zd^qq$`DHI<_t3WB&9$6;_269lXT&Rwi+M@isz*A8;K)y6ZGg7+Qii<>@-^A-_v$_U z(fh@;sX8|EK8l0-VW)i=YU^X){VmV_u90tIX<Gc~-uLDIPU~9@E@Np=b(rIM!5{EV zv_Hg++kDH7yezizYw=xd+6T`oCH|K=A1p!Fu)n)xT<|u*AL*-bFFoNfwsO>G5sfj^ zSQIms@+WC-Y&QP3u%06M5U25MyBwc>0GSoFhsD<<Zx3s&3VTBJYoF4``hD|wyWejB z$CrU;88M5|zI@u)Y)&}C*o;3tHl^y@(6QnD;!6|<;R;>Md-)GO>HO;x*dL30X3Ji9 zJT}7<-Z?V|8o3TQhjCV#BXlADuf|7aVjp1(!DD`iym`a68_e{Fku$&I>>XFyO!ncr z<#(Ox2Or1|-*9-+h46@nkUM&JAwJD7@V|rS`rjU#4F$mk!;k}#UHBGVX$c(_$_cnN z9CK3W%)Q_ZXI99~H<G0RV)zR0$Q?P}D!<+3{Fj{KHnU9cS3xg2`%il<r6cL8-1tAB zA8*CY9M}lVoA?bHbUtWAHhy_lLPu{-LdTn|kFl5WmFNHcV)iN2jr=G2v-W|qfqeGn zqfbqTnWn2hM=47lo79Jl=@>mYIGz1ZwN3QxMdB!G{`#H%bq_cPD)>gQF9e<v;z4^7 z&5ZB!JUh{K(Y3ke!hJb2JMa@vt;2_2ne~GX`aZP^`S&HuJ&Q9hXFZ<Wm9u`UXLJ(h zXum^lWqj<umy%z~S>G{Y^gHA^S&x5x5TA7|GVNuDdzNzYl0$jc%gKMpImr9nynB(h z*W-^SZ)~3*8K7qk%!Li;QwwkZcK;#r#Bw$vF@@x!c=DbczcC)i_f|ZxxMorR!kV%D z<l@0TafTM(KBd1${(pF@##wSsc-Y4O2Z2p+?1vBQ8GA4|qabnCK5!(wK24bl`qxW; z)&FApnFq|1$glJwa5p5sm{a>?;^;o)u<|#nuaD99+O5M!zYToAYp*|4Vkb?f-A?en zpRq%BcSzr8;aN9zmuJmXF6AJ4#8iXs@S$u=+S&n5d$H@j0xXKB(DU=T>Rmo?<a4jw z&%1>i+W5W0HA^|R4>*%%wI#ot^A0d<i{MhZKFM=y<5JIr%Nw!Rs&2or|6OM?xlu;= ziXHCSk|)>aOeIxB{+%lxSX8sHzp5s`pPYfqw6448_VZ8SvfOfc)Q?fn+E;;Z0GzR= zX2V###VlJ3zC_QJ<bg;8KbxWXb<n5I9UHBEFwmOz<}}maTTRME`lf{Y;$E}t1NL0# zER6-;1lI3(#yfcW`~vzQIj?*V^`cYWYyFBGG^xS~{MH_)D#lZBq>>-$#x?6Obv=!5 zBf$9U-0A@Iu3SZ`M}9MWsiD@x_L}9PH<-VBv1w$N=v<aPPVIfp_PrYMjnvU6^gQ1t z+Ari=%}vcS%}<@J_9k;ya0<R6&7;2XdG*E5gy)lsXXP$4Lv8jrN8wveAa6mqzxBY- zto0vqHvbd$T=4>HnfANnzKzY#c-Pa8{2{6XU1J75E#<t#rpc)7q)+>q!-3la{c2l% zC<j-u{ZN0^mjdvHZq%qgxafoG6?{9uu?-(*e`{R3fiE`pxqm?!dVDE59%pl-yGuso z?qmMY?}ZWg;3e8CSmZ^guIK(@>K9&AFZ42_-jIV0y-l=J5AIwUcI>&%?3>=JoEy|v z1@GnEE3VX;9jDf1dJ2M-GrXtPWrVxGu}*E*v32IaDERUKdo*`3uIi84bA2#4qa40J zT78VPW%rq9;O|s>;36zz>%GAq?2PAQ>fOix`1Cpi+diH93w)!jwK*NM<06-~{@=m( zYR9gdzLit{N#2)Rr^iRnYAC1i`K4g|yztP^M`GYnKee}N0%Lm#F*K|R&nh?8tcv8$ z9B->@bOrtDgZ5eP^i?OnkmGGjhCe0y*20T4??b?l2R`1WjQ)QNzHudWXpM2dGjY~L z{P^pWf1d+C_BBO#hZh<vCNHq~u%3x`OlIwI*{w_a#oLv$JJ(^=nkU!gq;OQW5$_08 z^)CzL^^?D9*&*?cTPL659bw%?cA*n`3g>x<o~IK%ZwLIm;`08WL-al^ST^t>a{P!h z&L^8szIf$u-HV+_+^Y7^&acLf_V!r8!rKCvgXA1Q9%gU5IEcJ_;<|Y=2R1WynhWL5 zF#VM~O~>Q!p>xQ_Mb8>#(z>+X@HcFpF~slQKpiSSnmH-EZZPtW*ckM+bosL7zhs@E zDW^Cgy#H8v4&KK%E8hldLcTw*Ho+Xrov3pJ#b;(PW@8dcwI-W71{*ItH+6pZsL+2L zC0>Rx;!K#(N#M#gqx!m;Q=70yh+~~9+>bt)bx|)dm@j0y8WPuhy?vtF&J$Q$f*ekC zF6j5VIU|wSqaEb!@1%^IJ${@K+yS4h&F7icv6Zt%w#aMPSm1a=bp8d%o2(oRF6NDB z8obW$!FRJAd%~pn(7{I8{B)4GU-3Qgy5W7w_ftmrRlTk=JU_yI0l}XKogD%8&dl_N z#C!bheb|%8q%G{BoDmBL^JIEUgby$_^X>NU^5O5$e)BL4{|OAL$4!4Lz@yedBn#ou z(>1=b?cgzUG{*CI9$SAwO#R!V^|w!q)Snjm9d+B~;VIpk@65MS>OelvDTbDz#|_?L zGY85UkBR8+{00=CFFf1tBHz(Yhh+W&zUiTzN@rciEO^5#VCqAT*W6aq@8eq+d?S}K ztEjKQf@c%2{+aMjIt}l`F}P?A+Y&y-pevTF9NNwTju*gH0l3No#$L+cTf#@2YnKz; z$mI>_s~wfVUe3FM2<|iv`yxEg!1F%qtfE*u=z3ys>Y(?T$eCTt!NBbB9&++W9_j=i zw2%DbEC$Kt$h#LQS2?E?{nOnwt>m(f;*z)yH#qU8nw{|AfiF|1>?!8Wsd&CKt(!{6 zmrwT!%4?3DLmO$($)BkI&-}tNKiod^ym1;bw-*=~i-B_X54z`-;wN$S-Eu+wCiywY zr`eThDULrA8pLOQM1D)1ZKu4>8KDpOU*9)6G8&dUGP-P-t&BdwoX_0+JM%5p*Q@nd zY=3Nh`FWlA!{YFVg>yrkUK3@_8J_bkY~M~`7HuWAUe@6y2I@8bhmTKR!1*-`!KDux zdXfK4YaDakbhH3VFJqs(!n8ikJ-YsuPVxyTo)Fps_pF_ukNt%;TIrm1@ELv6Lb>R> zbHR)FsQ&)}aZaj7aq)xjH=ecaMz#ws7vrTIio%6qpY=1~!sWkbuFKhc6Z$SVP3!BO zR=-L2_40u;?-d^@nHi0bJbDf1U17gTF8nX(c{u#fpeOuK)(GW$uz5h;q+kg?R}b%W zF59Qc4#DLHF1{V;h9_qNUjaC}=u&^j3}Ea%Uo_5KVBa3|qNDd*u3JmIlmDaCJLR`! z-m!-+^M2vM%f09HT;bh^d}04uNB^Tv?}76#_wGOUGVh%Z=|;)E9^_sxdh=oIkiE`S z@(er3_c_ML`MTJgl2@aBk69iM=Zt#xS#D3FBhho6(^FaGOsdRwx+{-45-YDO+mu#R z_AhDKWj{<iR=P25jx*IW*6Hw!agOoa;mq=!=SZ%cZH8Cg=NwV_ZD&el+njsSUY>Jr z+QB*ZrKQZ>kT$gb+pV0+f3ZeY7FyYr*H~GV*Da?0#niu;`WJn)esUgty#8-e|2L`s zo7DfU4QXA#e*pO14!0*{C3?O~ottQ*h&HaJ4Z$4ShvSYho_}(V_DrQ84*GGgb9m*A zz)rrw$}gGZ%2?Rdm(Q#pm}C3!S@r*?$4rfdzm5L%kc0drXVukOXM65k;hOjBcvE!e zij;Z3PBcX)Ib-fIhbgLEkv;FR;ihOkWf~~+Sf(jzpv+@Arl{{+@|=w{MV#-#JIbAH ziYBVeIl5BDN11W{j@Gf1;eG35Q>1gCDk#%>p((1MOzXv_2>$E|UBElaguZBse7p}` zp=-*#P^Bq)(Xu?PrI9*j>pQ;xO62?NOwm4UuF%}b_g{~E|E<XPHKr)hk>+V#6#0H> z<ooYLzQ3L44yPw{Q{?+QBj0~N^8GrVvjMMlb>#bdBi}z1`F@irT1)=i(EX9`e-!!t zCz0<%rl`i5?rD7_^8L>u-#;Ds{+C>x!#ts1M85xZ`1^zCp`qu)b@b+vqy0tho$;R5 zXCw8z9I5B^NIicwMQ*3dBfkh|f_8-d7^&|+!}W#U4A&LfZ;JL~>$UEQ)bmcHo}=M< z$Tc~&^#fD%{PgeVR4^X9<E>?(_ZXv-rf3cJ$J^L=?-2X*F0zLFlkoXC-*IPx??d#Q zW3>Ic!<w_zVFf=#2k-rrzg04B*r<5#WMs)cCwdh8wRkgYO`KO)&zfTO+|mJj&-S{4 z4<6KIxf-+v<$(8A+V{izTH(z)Z^*0n)is5a_%&Fb%8Av!Li8kZO}P5X(7glHC0>ip z(`etrm)&=ut4uXKceL4j`Ad?w@(q4^zNSR$j{CbxOmI?(6~u=Ts=9k++v<B(wviY% z*ap1*F?mZDj>%tIcXx5ydY)OdpG*8?H#WBX)kF7aN;XNB;KvWuOYTHugw|H9b#-=J za~bQqv2p9y5$CHl6hF4E<o0jSIrCc9&b{l;lY-m-H*4ntA60qg|8r)zoJjy7S1xK2 z@G=S7Rs=#+nh8`57p+{Zt!|s3``=DTZHwZCdLaQ@2SU4y;-$F%1+=@H8BD8|RMYNO zkZv2Lwo;U~_F{r<og~nT7sx0$|L^b2oMg@+p#AUW^T}ss&hwu4^1RROectDN-sSO| z{JfXewFU7y*=KXOD~Wn{T<J>&7x}bL(eVQBg#P=v*@1CJM+5l?1BHC%@Hv6cF?^oE zXC9wrd~!!p;A}o8@>$O3nb?eG(`?Q!Cj;m6dos_f_&kTt*@JhQIrr`@e^YUert&)O z!2aB;@6R*V9AZdWEA8dZlv(Ss<+Xo<Pp~S$bK>`B^!Urj;T33+p3AnCgsaKWNDzC8 zyu*9y+0!XELREn`OJ>Ozj4ew{1M7TZGYvk=m;>1`!Y{s2JKpNIZ>3|mWbX`|b-+LD z_=XeEbN5>9St;J5q$0K27fY=6$M%8yopxMXO-%Y<^-E{UziD7c%O~vkgHyY8)|pto zJMuUm#@VzFIYTIaH1|;@bq=tPeGGUi8lP)KbJ@ozXaAy&eGHv<ejGn_{68*<wS6yr z=6M@-k<K^&hWk9Tebn1*n_)5Mn-}4KNXIGeg$MAz#lx&yI>A*5@xV=#lfO&Pbk<M% zf?MHB*-(m6FPBZp?;xKE@V?wM7UXh|+9=BR;-lC?9rd!ui8)R%$9ldTU#aei`zvj2 zIm5U~@!-mPnljI8+=YK})ES{!#2;TkUizpX*x%p{`RX;-nm_0J^U!Je85eM-g#B^; zr^4ewavX(Ct5M_AJZlbe(*4HAnLcknRD6c)+}nymv&1t;S=VUpzw$TE@$vgz>{je8 z>Q!#M;>KqBBwJSJ36+nl>}_mV_UA>TDy#WU$1xVNmogZa;KYt($Nv?KQk;<)zv)ow zp@LDq<je5lrUQA5Q9iSRIQjcG>3bfZ^~5cInK{~v4Jq5b$>%F~;sL#~L$M_JWyW6a z4N0Flp^o8vcj|9~@4v!+!vog~pM~*x#5R<{Gx~ld-}R|A#r0X=ui<+W^KR_8j1Z5e z#n--@?%QANG6nIg?Y?R+bMUTClRQV-YZxkL!M<<R^(UZXozc)5<}uz+Any~bFFpo7 zw1y5$pOeg)H3vJ{iWk7|h4`$mVZUXkZ|7{yL6fg(cIV_!@;P5k+pZBiXYU7p`@x^q z7m>+Ea*fYyYbIXtN0~k<zGSz)j|{anfzO?5)?KoTySO>OwW*Zx|Ir>peta)wvi)_A zkM1I9LiY4X_R%x;^h?I+Gom*V?^p@U*}uWge#!XBjOZ76egV%zFHL$y^lN1PS|>IA z$|P?^$*|CqVD;6+!pZhtVF|fy^xs+NfiG~wts)P6z8jwKzCX_m4|?CPb;Bchp879$ z!#ln3%iQqRiJtc_b;CRJJ@8Mt;bAX)t{dL!9p4-`JmP)7#tm=v!b5Jj<sDzO8(!;$ zf6@&Pdf~I(@URzti5uSPg<tH3cY5)AksBTy>%ni88y@z;XSw06V?6Iydf){f_)HIc zoCiL`4Uc%?)7|i}xBd&=@Q4>a%?)oY^|XJ18=mma|0mq=S}*+LZg|4m|Bt!h;Srwp z&v(NU-u|EGhS%nL-k<7*hrRF$H$38<-*PuR=zTxvh9^dP+CSF~w=56*95+1dg`e$) zN4)T}-0)T}e2N?1>4i^r!-L-bm$~5)uly)=!xNJ|<16vNz5I8k8*X{!`x$O{_;gSG zr@P^`Uic(8Ja~rZ{fTaP#QXjP4}6N}{qb&itM~n4H@wpeFLJ{ZUU;D!Zh8A(;D)#Q zJ^jmf!-L-W&vV0Tz3_2vc+gw_SU21n=4t;lH$39y?=fz8!smH^v>P7w(&s2Q-17D> z*A1`D@zkH=hKIfIfE%9h_TO^DgWmT?y5Zq6Px~X>@Py%k4|l_B$+PCNe}=i?tzNj_ z4Y$1b^||4#Ui_MFxaEZ#9=I1jsWO*+*6M|S;D&d4;m6(ZM8GrtV{Uk>m;T;&!)v|p z_uTNH7oK#(JH74qd*I&pkGkOzuY5e>fqUP7*9{MQ;qSQNmRCQ$?S^-H=kF~Ke7Fa{ zhu!dmS3bPyhPQg(|C<}$>6Pz)b;A?h_YZmC-uDl>;SulqeQtQz3-5KqEieD}xZy!B zeJ0#+%Uk~&Zg|2we}8eqgI@mpvm0LPZU0Yhc&k@_yzYi~df^A$aLX${y4~=g7vAND zhrRIE-0+AO{zo^w(+mHD8=mmOfA5A{Uj6wyH$3Qtzv_n9dhMIvy5T{ueft|Xymgd^ ze}3(TTi*4-em6Yqg}>s4cY5o8*$q#4;rrb1@Mur}e&vRDdf|KB@Q4@wk{jOYh5ynG zPk7<KaKl@@{P&_8-sy$^+zn57;d|Whuy=l6aKmfMJotIu4e#`>4?5j&%e%h$nH!$) zzW-A<ywwZ;Z#O*Xt^X%(c&!({+XMINujkzG&heh{?{dQvUieNo+?wEd|5-OY?1lf> z4G((Z|K*0)p6RLoKi%+%_x&Ha;jLcy58d!iFTBGIPk7<axZ$<l_07|6xaEcazzq+2 z;q7jC#0%fyhKIfI?|a~0{5|D{w|eRSNjE&<l}}H&;nr!M`T3q39`VAz>xOrF>u+<z zgWmTacf(t~@a=ARrxzY~!xLV3%nh&g!du<&h!=nV;f4pj^7-H0@UXZ3e{;hvul)Ls z2k!NcJm!XXdf|_H;9mK$%?%HF<@Z)Myw(ff;)X}O^ZT!Ec-ZS-{l9K_#B1OFiyPkR zm2Z!@;Sn!<vl|}v!oTf?N4)mg!)|!17yghN-sy!u=!S>A@CV%RPVe|Px#6u|`2B8p zt#^HVpBo<b!lQ0@#0%f(hFf0z{j(b$^uoX8hIe}T=bLVL!VBNvhDW^Xn-(|R^77w$ z58P|tt#iW@UjK5$4X^d`*S&6d&<p>D8y@l6FJJe-z5dyI-0*~V{@1$UwO;;P<Aw*l z@MaI(D}S2Y@XpB|`M)|1_wF^Hb2_^>?Db}!O=9oo3FFS0>_O$n-yq)j$%OY@lMx?( zviFQi`@P@t-ebHs@^tpk*u#B$isBE_ac$1oB%Kk`xg^dA#=1GXR6{?sN4|_RN?mKy z`_nqJ#y(XW`Fl2X5myt=I;XTP6F2tul);<>R}m+cTI(k!%p#sRz&X<#&Y9*CS4YnG zJ4SOx*dNvT@5Ed)`T}(|a27Gw3_VqF7w77TXUjELKQ(%I=qbhd9ixBmv$v@|dByOa z$Nv7<5lxE^O)+8#|I~Qm<7O;QKHP(hEk-=6&J89wzuxE%v@CD9ewO0!>K;h%L34&H zs%MG?C^UU7)x_!dUT9Y35MQ#4^4cQ}gR_H-rT4;`Dz&4!=Fc>ybN-XKlM>>{N-7G8 z8(p_+MoXW6VnvXB=J(i#=B|%%Q`!5=*=4LyZl{u%vEp%XQ#+0Q-e%4m(1!9Km2kFb zC(lbb&(qCkg7z=qy@Tve>$_rEh)pQ2JDYfBez#Ia--#XJoI^>4&Wh?RKuHJZG1(Iz z&v(Vs3$}p$RQ0E7YVIWZv8b+`9MklroICNtdm3iVpx>%f@7AzKPK<d)gmLJ-9Nz0E zMtz>nmh)cV+}uew@NU5KZeW|asABlKeD!G^{f-bL^CGm`7Ojl7kTdpi_Rky1UtMpO z><{m`WmXKlyi2`f>0>MN)6f{6RlUtzky9Ai9By1btDHK@hp*dwHGQ+P%RFhW2-s!P z{SMGa^*!eS#&w0cA~-eEZbxObPwlQ+e!JRLE|&Ly=ZhV#HagS?;&&>h5(BdO4CAmB z)cGdkiC=u~qUZ+Rd2v<a?HdB4Tbd}>GO4L!J2{)aM9!uy#FB3y#_+~6V`&R}$vlg0 z@EgfS;tR^?<4|S4S$aa*RYfN$+g5Ty*$&ahQ1!le<_Tq!<txru(&G<sSGTcZIeBMB zMa@O?%Aem{KCWf+mh%nHe=x7mO~tV0#q-bp`Na9Akyo<JTtS@i(vog-MJ_pM?C<|& z+WD+=!L8yB^jvWVq6gwu4<CGbi{4$l-pr|hk1FHDQ1#9=t9sc}r~N&8hu_hI{1Zp9 zLv66;Tzn9|iI@@2Eab!$7gJ4KRgZrRXUay!70dN3IpIGR*t7W{d+&<d`1!x(PTB&^ zokorc#Wp@(Xv{juJ*~=h#r?N^k}+F&r=CylzKw4Lp72-1Bc`>TxvC0Lr_M_(AZ|lB zix)JMEAA({upv0hV(xXWX$tKep`VHqexI2Bz#GV@GmV?JO#mk&jSH6XnWzqNrVzQx z_*=={c!|Hb@kV&8&JQpCY}ivhyZy+d$mT60$#d+V*xy3CTc&U)1LYt7eq^HJWF$kl zU$o*g#6|s!fBXW~wT1V84!@=2F%+|0TyZ7vWBy5T-Jz&<!^Hnr(%$()v`4(Y-QEJ) zOHDRb9C_;DthS0OE~c)dzM=)31z0gk<K)b!WSHdFkBEnhK-;a*b^zLDURSIWZF7%! z1ls0VlsFta)|xroSl)e+os(fE<7@~2!dWZvVdQyQr86``*2O(xp1L~pUg~nQ3K?JZ zSmwP$#K3#|afmZg+3yBSV)aJ+v)x_-8fbv7dik{JXg23M2$}3fAH~2$9W*hO`=SyV zItq@4&Y`1&(BP<P8zw>@xwST}<irot2kwKnX-Rhyh?WwJ|IGzvRS#z-N}wa+ptpB% zmu@fT0Yp!=!~w?nU%;8m$Dyn38M+cZiLS_%Smg)b#3B`M;2mNETO=dKyMM1LaR2TY zoAsOdDqhFC=(?6IS>L~x_5DYDx90%)M{HekIrl_~zUu<UVepfj3oaUTKOJKT^BDlv zKzujxX;op$s;}W3;=^sdeQm$uy|+PU$M3iToxBj;yZ{|NA6-3<vt+fzev|i|I1_A; zjnCO<q%Vrj`Od}B8qV_X>QmfI*7M*c(PbAvXFOj_JWFcsaAMN((enk|BQZSs`u!F; z?E|qluCdPs4Sfd(@n{E!4RHs@w)7nw+ox8=Vi#T%+qU-3HrXrB`e(<s`Ooa%w)Wz- znvA`&_1-($w(d6eI%n*+-aEVPHS!T}^?&}E6YZ6y#H=kLj&8U;&a>3--Tu8tuTvhU zQQ!m{<&H7L)cIX`oSd9vt>o|PB4)jY`|1?$W13rU(j6cxz=7iTcR^b<e;Leuyc6EP z5;@qj+lcN0cf^NPD0V-{yQSDVWe;BzT{F6A@ml`7)^g|M>yb;k)^eB4T3=h@24ht< z@r~WQ`<&p!yDrUY=iIDzh##|aVQ^-3#%pI9D|Al`d>hqy>GE67T+;KHQI)vP9~*lZ zJ8proDsjINod>LYZaM}raqh6#7yiT<jgdQNC#D$D0J5w1`^3Kmw~bLOSK{2^vEJ)O zC@0|YcP}=Q&-|G&Dn1dJm-yw#*pAHa#N|fP?k{z#zuzSXk?c3cluDMrr*gzUDR+-> zQ_7i^Zp!_p$JnIY*v{{l`K@-9ANv3DPA9Q?*4{C8pSvFO>)c7a=a<7`4+2AezC=zL z`#fq-CA9X~@R<4#q7Q5M|E8x8zxDJXVe`g6*t}uFPt?s>%PXOUE8vfXv7^^v-|kNH z#lxHdIrhaL!80Z>A7!{d@ea<6_>p=3aeF_DZEGRd-{!n?cCNbCwUhdPM7xRC!EKZ7 zdgd(F>%@^WzSI<7Y?&|ACjB#LcPl>7XgeP|KEONae6)r3Ie!8|JJd#O?R5ivrfu#s z7|hYrUsn@5_Qn6U^UXD?&!-K2FJNkS0eKux&ZkZ;Gdpe+K8?>+UY|*J+?^de3ZD`u z8|^)JbS&}lF|qEom$jwyo~6(Bcds4Zb{FIBUVCYq@+B%BGV$0j8wXwFp*#Rywa!UB zHms`We&J#qb&PEL8+ok{P!1ZXfIi~I<Q(`7<4(_2*0^^UoH*{x-FD}nYTRqb?RCcH z<WTIt!L<9O@uhC?#q7Dc!K~UzJ{$5B#NUTM>d~d5%YMGwwh;PO`4$y7UQJAJFKd_x zbY2@EUm4#yyHilX8Llqo%hnyu>n9f7r5yLlt5FpH*EiDnk;vQB(!&{}l7^YHh^0^V zU}teZ;GS~oDM8jJppzczy8+qVL+)ACr{}?Di~CIL?-wr@l79^u^JdvgV~8y-jNdy@ zcVVXP8Ft;4S-3~_HA8cQ^rm=Gqb&;FyTF49jrMbA)_KGpy7+4<{554*tQ(pXf6d{) z9U1EIw=avgB$LoJZz?DF0qX3c?7xwFSoI(I3*)x=E^98=4nf12x%?q{JiIZgx(7qD zWsN;21@Zft6Jo04%h8j>n<s0Xc>zz$(UpF1r@5#{Pdal^XwO9fb5R(-7<}0})GTbV zb!fwkS+))ZU(%u3ajDXg|35L<H!^1BT*XelkND;2cKFG@bE?~j{sKOV5rf^vnq2oz zeV2Qu%FGaY&xk*|R`#XN$&nj{c<lM`!-=}h9glq@I3r&CguEnrkGZz<bNwG;)d%V^ z(z&H<8vz>j<{<eT^;yT}?$a4b)?G`j2;avUho9rSY$nCBONQ%H_{kYNcVEEF*)uhS z9zo|F^atX)Tl9L)G;H8Kookr$+0rX-;D1h#yAk=H6RhX6p3kj(Zsl{^m>c*v?s;@f z**%YqDU;5)XX_Yq*eIUy+&Je<)+swG$p6pXzI#Gn4;{niIc$8kbc`|U6CVozH;h1T z4Yr#4l<j|Ez5j%D_2tR@0`BUi|FS=^k^8i^ZU9!^b~$;m4bFh1pKGmQKaX8z$E}No zwlelo=vwnqE;&be>5u7r&-Uqf?{jg^MP3b!HgN{Rj~^r*56&ISe@(}YJ3MthbnWQO zt&HEHL+J$PUYEb8{@XnD7k-5Lvt!PQ-+#&2d1kbid_~~!C2QGP_B~e?c#1F=oCQ`+ z68mg=K4)E*bJinggfShvb&=K}H(&3Ieu?>b2iw-EYZCXzbzRfESm#6ocPg(EcFW<r z?o&Ly&J2itax9~xVpnlgz83sc$$*7?Lhd$k&SuLzBiYpKZ)=*+G`q<(+nP+HZT<O1 zvaX8zhnjtDp=OhRqb>CHdoS5=KC*S0FIuvzctuOIS=G(CP^TYUMrDsF@a<0}r<+Fd ze&k5Ow}&MKm(8Yehx3e%zmX%cfqTv^d@$<U>bs5D>JQA=>I$P{^#>zk;Uc4>ls=Tw zms0vyO24Y<OPF^8<ZqV!qH`_AuC<pKyY9K9<nu;{U?ucNpR3O?k|i@Fa|ZK+4|VR~ zR{A9zDPQ9L>(leFANe<vwbQRTbEEa6as<4>IJ9QhI<X!6AA*irnHO@JEedm<qBga< zQhKe4T%GwweueJgP1`{yo<A7sobN64qn-AP%#C(V!FKw9UuVbDS$%khJ}BplmCw0< zbK*2BpL-eU&-fdB(QncpJy(Cwg*nw_+%HJK^I12(%{`p~zkN3QrhLD_yyP!Pui?_U zeJ%WwTNtNxn$gk1Tv}7fA&3r5^HTw5VXOB;FDtI#+&xL67i^);o1ZIfvDf8{Gy9B% zlb_kEYpk)Mj#~ftnd$QuM*JP{%X&=b#cx`PZkb^gxcU_4+|sl3DLjTgAveMmp=7=8 z=*5;n*9`UvSUY`smOgEA`ovs1Jno!x3Nu#*v?jcX{&Y`<WcyLh2YKyK`B$Gu#_KGU z^kX-<?R5S@=VvSYCTADO-A*n5two-jDLXws{uI7qtzQgs4Pfh33`4)@UGjMCxCL5< z&o@d|%Eu}`*ZJixjZ4pwlg=~ANc$Nw(s?#0Be_58^|X#o+jS-J%aD_aRO(Iqo^ST@ zZ<!ZF%bTZh_rS-aD;ekVNByye3L|;*M0kg`8pU(BT%HWW8;RmjRjWC6W<By#c8Fy7 zdTc()&2HrKYX=79W*77&{}y}_?*unJth=S>y6LaRpKUuI#V6pLv-$;fIQd`I=Q*so zl-E^fiXI>z#ffs)$5~_B_Kk0)U+MYIulPs$CEM4L>Ca^KZKqqN#~qpeKg~b4m37hI zlYedoYmEo#SBQ1B1+JX4DAe<&>^LJdJ6iTHv!lubH&k9>C$DP)IupM{@M~G;X)R>L z1I;y6>p0IPTo)Wd#!3#>C|{2JI_oxvX%n9BASXdmv|ekD=WfZ8itw4-g`T;Gu#vt6 z(F^oVc_;I<u3kANq}*a3nHL+84=3Zr7oiz1FDgHW`lI$7-cz4i>96?Nku94Z-EwVs z4tFXTvu4)Qrx<xuHAVwo>7s1IuT1U_3-n2DJ(__r_GJAh$?L)RRJL$MT4wY?lWCqU ztdMVkT*dKEv9BWj+{OFl9@-IIE4PR0C|N<y0nOEmyJzcn{hFprEN~BgZQTFxU?vZK zhW=}8`d5C}K{@Z_O}?FeuKuKvBtL)gtDN<B*#O8tXKq!0guNCUPldym<j^0@^Jdyo zA9U7JWu^mj`tdL@={ETYoHJ-HI(z1m(GYa@k=mRvVxY~tylnz=+Pve#+8k9K#aDd7 zI>R+i&hTlRo#+D5-i{$?&j6>&gCbogJbYGt;+@C`C6(F>`KY>KG<K+$9&Mfgr`dCQ zDfJ7FL+Olcx|L7e_R(FFhu>n*Pj}!nEB2@20C(l)oNz~mlk2#3?HT>0=<@{kUG%Va z(47&wFCsSOBKG2Z(KzcEYZqrqr&QU#I%jPrKb`QG-QNQGE1#YGb;6Hiq<nT|<f{<9 zNH1lVA5HmIWOi)r=WTyS^8n2*WgJUl$F5mC;Lj5c$<JoXs(&z|-($TwRe35{KY8<e zIPxmJ{?{I(Y3KRy_BVmgO>5=!d0q8aXB&;ScR9B?RDZiEZ|fMX(WPVH+4TGj=4q!t zw*PUMm#?6wY<hEO_h;0}TC@ZI?jq#IiiRAcL-Hz&oyD`Le8C0ypJn3-rxto%cvSzg zarr~)8H(=)aE9%DJvgfwLI(_#dC7==jxyQ2N#5QaL0i__`Cjb#>y%7DCR*nBnPGUP zlUz>ed@uR&`!yb5wW$*9&V2TVOwK+V)8FcH%XsahX$-COCvZWiqX9V<Ko$osz$VdI zat9ySSw9CKTPo~39F{gzV4InN#zWAG8%IVJj>bGMURDl)7tNV$$d(K)l}p@3UoRWc zVh<jg;gMh{RF$Fsim)+$W`wchpKqtW`1dp8d?st0vg<U?$VtY@`vc=#ml-GPj1^6c zZCz%Z=kq-REz~nM(Zf3Efjc;=veyskdCzA|1#!vTtNmtyYdi<Zp^go3Dqcz8qarup zz+B5OmQ7!0fJdz(KEe2)zc}^pz>jVFX5ZasA}jD?<r*FDV7H!ZUget*O?=ldHzl+` zgYBO1D|dsnN4iey!3J=py_|RaTdplfw}>wEj*s@Ud2diwoXC5wv7co|zr)x|52qel z1FhM4b+I+>Fin>oG{6JDF{2OLxMI)antbFCdmeWb65lZVL(k$%raq25yAgXfk#!GG zn`~y!T|AE?cO_%B528Jg?_*C0uac`?8vb$b&~kN;d_3Sw@*sPzo~C@ZTpg5CS2G^= zNv>g^WNmt%<W=?|WPe-q(WDQibjj{h-zP8H5WTd$mU`$m-aQ(>%<lW!(5d=k%c->O zV9(vR=ua0g_Aqv=^q-t=Bp-*82cny@A!tT)B0Hy>x<kw<dvdld^-lhlYbB3FPby=u z-ju%d;`K)Ql}%IIs4Fec(|6V!x#m-`?7Qe)bT!r=9Rr=L2Y<)1?iNe!wod7HSB#GG z<tx|pA^h%-qhI6r1$!CeXlR541$OS~gC8Fq>#a=t-Fwhq^0%AA`=6FiVypj?ZCjy} zt!ppgzT`(P>ALsQHu+Ar`_Gs~o}D-G#T@RscVyeLZI>U{eF55E>s>M|T8>P`|FU`e zT;JZFbIp!&e574BIKFy|JvjRD&qlKEM@F*u-27PgL&ouG&{*Q1#gpti+PXS@pOtXM zSn<)bACb9xNch>tKl_U>*?FPKV?5`eMPBGYd^i4Q?Ri0W>?IEJl!FhAQFw61sPSl= z*<*6X)5{*0Yb=$9Jr?yp8!x@kXy06o1;2KQaeNu^Pr^mlW9}HJBztG9IfoqM@jI?& zd`k!Tg)zRw7++$HPk6_;o-vNF$EX}0W04!_JGo9}hYjTKz}D)+20H{D$mX*5V6eMR zmcK)OWBH49KecOK(3QvUSVW(%qR;949o_i)z3bVz@L|~2>x27Qk|A2lhBGpEtG!pJ z9BhNR!p9LWmXSFw{Xjx(^uu6|2WPG8;LEn#tVz)ta5R*kX*c%>a2FST4Kp6R^SMWA z@i+eYkIDgK#2W(X{GbtRZTZTzUW?#EP~Owq@Af^?k6ei$cO;kSyKPSdlp7Vhs&-_5 zrSx<dJLU-a4T(SOXhvR0m%T>*x#H#rHxs)NpVn<`lAc}1JNtdbeU@3=Qf?M6V6DqD z;*N-kqWxsMuee3;S!r1jzXrWue>^okpT6wpc{}%W8QAxc$)RME?H985g9iLU+EbE$ zO17TH6R?QWA{JvFx&QEUS7=<8zi5vU*tc0aQRB^C!#RE|$5zr9tz>HE7W$}h+IMC% z50@b?f0DLqM6c;J{>#2-H-6Y-U-=<%0VaE$KK44@x$#nK$Lv*(ki-<$`h{to)X$tK zHs)mVxwf7?v2%@NE%#?6(Eo|KInkaAebF4^|E#mQi|#Qq7UcW)>4)O|5-q;i&FlTq zR2_c8DI;QvAuB2l&3f%YV?~lV+I6w9^a$m2KZEk)?e~qJrsw_S@C;C{x`%ru#*@=2 zl^WalwT9buKX%A0*)QL6&pA1dRPN$#1<K10T|P2Y)q*S;1*{^M9IM2stutFD>JA3M z6dz?jCpN8N_?C$?&7z9WnGbJPKEeII61&|4_V<bQclizW`%bsN|K9#yWPjIMdcSXi z{arbm_WKI#@5+@sGkHxVW2k&^*x1U?j4&!^j`&1n(TGsxk>RD4J?9elVK#Lfv1V01 z&wf`Qzbi6#3@$f`=^>V;51(fPx|!!up4od8Gk|@cx}EojdiO>0UY+FKs_%TryVx)% zdw10XAM&o&w?nmyt(^P@@AhK*&1XF1pllHivwy$g`K`5W?09OX_xZCt{}<-<2RZdK z&mD&EnYfK0cT(KRGx)G#2J;(aeoI!FE0#?o&roitYSw9KdzHj81qt?HOaEkix@&DA zd!>{3pUD3N{>SrQ%zqL81#PL_Il7Oi3V!YQ2kL%x+NdQ})cpf^R=g)ZChkynLwu=^ zyN>Q4*D7)(z?$tfa($btvE{|1*zS@giYX1Eha>Ro9LA8UG?MD`t;6bPBEO(@V_G5g zJLBnOEP@xAlcrhFYJ&1b{`#5N3zRLMb__mIpGYKC5FE}O9u3&k=Wdzk;33UFh12xA z79OzQH|uA<nrX}8*?wOdF4%)#tFKf%tktxJysR^qZ(W5f^Ne#M<LtWGnDL-dJ5%jC zV{^t`aW?Usg~p2Ok&zm^#$3*rwRa?0x*qu_d|A{}|3PXxahL_w&D;~r^DX(v#n)0x zNtX2H{ADAf5n?*$HQYJN3ItjVc$|HO!^ylYuRfS(%$!(gOs`%`3@CF^N&d^OBX{rX zWS__Q3U|9)$bR-z{@LsPj^<l;v>qRM*b3oiY<_6-QRLGBWOX;6>dTwR%dIPn9q*w- zj(q;A$}XOF?`A(cWL5s)TB9=WZ`cU%Civ>;gim|<9>~Pr%@d#gBYEZE(^}}7=TV*| zd+L~Xe&aX#ua|xH9?jwRi?(e3%owBc$ds!pzcx?#q$b55q0iF0T^Ae4t@DlcTJRAd z^h$l62Tkwy1uE3{o@)Aj!Me@l(T~4J|0Y0F$M}4Y_v|}mpgU(Ss{NDtx(b*(o*ktP z`cV~VV-9ARD<V999Q<y$Wn^?c_-!E$&XJ)#mF$TUZ<>Sb;J(FzUVg_YTmGaKt@v&4 z?Y+>*1fI{lE<E%3TgT<iH1npBBdOrXxTcxPH~hjyW0tJ*7q=`obBM*x=_?y&EZzUL z)A9n)u3~r;>)c13_WV+312*V_-}c=OeeA_ge7KJ?k6pvQbE;+W^QmVw_MAv#<t)+Q z`{Zof?4Q)a7#C@;T0FG8KTS)wU<cLrr>1`?Q-2P0{k(7NwAJwE0_1%!^_80sPJDqn z)y6y{GIM@SXwghFCx?BC;-eRal0O)BRpmvKJT!%Ea3S-fxe5J)?m9BaC2z$4NIRR0 zxeN5@-TS5jufL9cIbTtpXzmzrY=D9XORuUt70n#F@<f^uy`-ok0o@*<4b5LK`c(2{ z9`m&B0`^<sWzDyq2{xbSJ>cerPZ-nJgD2gYAv&n`2m0oLEAgCU&(4n%UkTn8X811% z&YCVy%PZa8(0NY!9v<I?C-VFm!tbSnvd!Uf^7Gj|o<n=$^Yye7q)p9@EgNY|_alkF zck*32e3;@_p^=HaBia=G>i7Pow^tG?AMg60ZdSmUG_9Jr=55GU`8AGG=6ddQPcR=x zf*JmV4``$6$l0H*JOE8?NWe#D2P^lVJ*)BuXO~t!c=jhMZ#_Fy`I)ofx7x9l`{$28 z<$PSR@WlDZ;^B^Skt4)IRLsJTc%S{t-rdGi6?f%45*lq?d2$^h{ZgQOk<%|C?s@`u zy;*03?(1RR#&ZwJMT%#ByKa`w4wRuc*$=I1RbI<mOj{l@hmBtXZ^6}}`-S5={Ek(x zL^jv^t(M7lUc!kL-~XerX*_-UD|kj;PA}nI<tLZ@NE>^lCoX|zq$iNQ#n@K19ekVL z>YH~K^9PNgGdU|^=V5LUt!zUcz7MXMv*|~s8138<RZvuZYI?o`yUi;DPlui*t8~^v zbbYa8@hRl!i!)EGW0LBi&+m&)GW;O<KEM+bjOjhlQ6*y({pW(G7RE3iI$RGOO7>xU zR3*4K;)kp&y5aSd&$N99W&Nx7X%DrV`mW}#U6tWBT$|4m8JS$|FYen~M2sWlyt?oj z@YMA2G+$)%DY-GH_rjA6(DhN`+u2(y*7#HKwd`8SaK)T2gT6cPAzs5=C0PS$eClrr z{E`4BUgyrO&4Hcw{;J&A`4+y<-fi3u--jP=wcqILs!YrHUU2+1=&9>%V%^AvDZPfT zi+Hh#@p%mw&ypV2ygIy@T8-^J*GPVm7?Bdoc(MyV)*fd441BHh%l3t==AL8j%G3VT zPOW$Ny;67QLA%!T^|Qd~j88D1@6$#Z{oj(CzKciYYFQ7Ko^8zNDbZOVd_Fu2uBv>d zi#>eZw^oY(@O^t8CfR$n%ig|g-%<7#kK*gozT#WlHB7c`?prL59~~J=-tqqGeeX-B zgFnew(YEj@JhuBL=2k)H&fF|~H?0%i<es*!cT&@T_c5bALA$Ns_ms3(dLr#H7s(#p z4WQ43mtEN7Pg_R&8peGDd2_1z4xOyNx^ynoUor;Ct=FmN)>l71^Hg=c$XNz2Z`pmb z>tap7S(_f#Q@SV7GEZN?Iy5(aOm`6oKgGG~d+5E?Qu(9xOmwd27W2Se7Q_Lc+_pLY zByHzraME;?^9j*wDjzhAnF;2*gn5?TnYftm4VAMJ^bvhDE<yjkPM>Z)$C&=w8e-CZ zp4JB+J5Id3P4|-)-0ClCk?sg`U!Lf--B*-*JMWyTf8@eE0XN#8m7J5_R{l!eG4nd| z?Z>QtcTVQ~0e#}$pZF2vL3Z7JS$16mcHQ~dN?OZ=sZVwt&%~d}B0iBX<JiZ`?PXk_ zrjMdW?D4ql0LeVgZ(-&b1wUf%Ace2A8h)*xnC4gAL2(KEDn1pzrg`=B_`Jrd0bZ4z zF~F~LStk%<;qYn`JSBc@Mn?RYHjp1PIMdS6PHymH%xx{PA&S9KAMSuRPB%Ai)|@11 zS8(x)`Y0Veiaf)T{b#{1Ti;7f|6F03Uyi|RqK|C+c<tV{(I?_Qn`fjy6<=kQ-<lI3 zKf7pj7`R&SA-qH0%oFA`i{6imw(!yHDKRJZEh|J$X6RqC;2?SbWoK~bRI$#1lrvvC zi!mJ;EIw#{BenFlH&X3i1|QeHmfDd752cNF%#xikk9_^Y*XohRlgscMzlFZcuHzWL zRfqV;{7Y)-(Lbl!ucw~uGF7yb{hQb&*Kh1E*Kg&wmi?2+*g;30UOn>$>fFq;4$r&v z$@MPxzq)>FO_|;HYv4Njx6Yts|JGXmRP%ej@@0G0HCgkp>HzJZ@_lTYQ@mgGD^LG* zPBOcHTRr2~nIHM}<=>b8%<%;#kkQiPy1$?cn$zBrF?P$fi7C)YA!`_a`iu;F2}|*D z;kO>#H|npOgFg&E_E&yt$JJ}!PJV|3x~Pv&V(kv^fzOP?b@tga(TV2j$I^w=ulD4h zkUWupNIp;Fr9c!LeUtrtRzPu-n^yOx9@_C+`M~iHuT>0S+CSX$ciulqog4qI_@8%( z|DjzIS!Xrr49FLrsXilpW|dsXhplcSDq6?aXvg;k%({M~dFLgX=Tdl=cz~9AqiJ?S zsTotOtz~FU`^A6Fj>Tndf4CbQl?)FZ4j60fb*g0;_WOFrsoRM?QXS3Y^UW<YpX8im z5_(^1%ZZS|y~*ee>I-~*%jUo^VnKtUSeW%BI%F^B5BFH+==CLLZXbNNRQm+EypwB= z+V0fjJk$EKym`G{M>%y?%;qdIXB@dVV13<j<8V2&pg898GGlKZ^hEyhsA5wU|Es#4 z{sifx#**E~9O}!JKLuK>W8FHxxn(nJ=44<L_6c<8+$*g4|BmS4vpLaw#*d2n@%MJK z2XTNk+CMQbpI}}B;37!d7Huo8KyAzJ3ox$0J74E+tibj_ru~8Ew4X!!)>tE{b+a>0 z;o8A*pfnV#K<=TZ$e(WQ;p`iD3-s-#{s4Q~8q@RW4X3Y8-Fj}*?I%V>XVb@#HqDxQ zhw>LYu@<t~65JK_)BWlF;V$+tO0ZFN4(Y%Y?faHgEaCS9la<4|Fn)`Lzx{gdLqWgD z9_i=b_F?c$>!PCvc%Svjqu8C1e<Ig2{`h(0@J$Rip2VNBbk68d@=@YP<)=3T?2r2b zEj7PO-%YOi|4$D2t!Xy|9u?a-owhsKzjE%^6|H(@fD^m%Ku_wS_fB#aNaOq_W7L_R z7Cu*U<{&bnu0Mi~f0wZph4St3EyRHn@nLf^K8@|$<kOyrpNe@bD7)i!?uIXl&tl!C zG8(VOdhP2*bTVV>0S6_l-JfIK*TdM$8@|@BaVrP9<R7{l-Dm80h_+Ra<k8vGmEie1 z*vqx4RQu`?qC@2wcb^@4kF!H(a_{s|&qX^!#Ug902Io$-Kl3iMdxZUfzvkL!2JQRM zi<g?QqN{wd5ch)_TPvb$zHW-}J#cug;+<pjuvIR^URuYqibHdg^9`f@!bAR~Q$GK& zKZ&s1BpUI7pC;%!d(XqkkM7;W(p)%o%%;sv)Zg^ixw=nl#{+zCUsFO3c>FMb^%I+& z_Mca?HyZP;-^|)2`2)rv{C!4pu>hNnGKzndER+nYg}!9x>AQRwP8^ogruMjQ7-1AJ z$1fRscg{xVbYPPuOW;XkSQB~3)ADp1Yw~LDt*>Ej*FldOV}LQ{aBgb;EvM(<2d{uG zI-K&I_{xwWwk)n58#*j`u#;z+p9Eu8IWvGihu@|xZ_@41K69S_uJ#%{@3CG^R@`2) zWWM%&DaSpWoQ+7=(>i6-;?`Gwbdz}BsRP?`N02@uck|R|WL*XDfqu3I9(LXtcsA4= zh3~G5SmTUw;kyHo7<n4PzL0!FHwDLXwuwELzF(&v0{0z?e+Z3gT08|DWzI}+-%m;W z&;0*|w*MRc2EP{&PXzC#bIGf(&R*k##P7bxJb7ch3#ePP(goex=RKhn^+9n82Z#kC z4wLIbj6Ai~`v+qaWh=xxt=!wN`iJHPPFtOv)ilg46P?)Yp?Jlz$D#Z46uY0kzhWJH z+FSkY#q)3ekDH{EoOzL-e;xcE@b^Fc0~;qLH>poYIpg^veQdS+nBG5p-CvyPV?lfu zeN@bc%C4r5Tj-<Hx1soEC;b*)Oe;OtwL06;PD~2)V2#hrKQwGLkB(Z<ujtCMhy}UD zOq$SxemgqFG`=z9{UUSv?AVJZ>+QMOW!q7=Fs~-~uyx7S(NPZnKeW<*_Kn3q=)ta< zYwor0^2<7F>BJ>!zD(o3A={FkIVaOz6FOY?1Sp<X^z#g|TQpLyJ=hVUXu#MAFR{*I zZs8rpC?!+GDSeW1E9V%=*I6GN@Q0G<=qK#^P(D>)--kl^iZdvqxI6Vj>!Wr2HcPiI zo{JoJ?p9H|qE8?FNu<^i!)&$GUq+19^Ecmj)$i=SuvRFEvtC=;m1<hN2fv6_o4@2{ z{B5Rjk3Bxk<9h0Ti_g|99kt8GWm<mg9zZ8{{gCn;_!akP63^o!&VaM3yA3=4igcXt zV8?2YhtJN#CNYhC)<=1PtB4`y9@zUQUtSI_>~)!Ru6XE$gZ8<Aic;U3mE!Yt*K@Dx z@H6du+#}cs$fttHMXZ^TWlh-KikXB*D|Yan@ZAKyRnNyLhwiz+VE-T;?^RN<CsU>w zpX8h9OwFwUUHm8e`_1evJPPh#=b6qE&Ut4{bRu=U;}3QGnX=*|m3=a+?4O;o51$s@ zimc!cTI57Q#cX1{w!$;oW7wb7&ZAlH4zv>*W54?e-l^vu+cv<@B>O0h_Y>o${~5dw z6}upPD4f5I-S>@`Qx8RaMcWS`>jS|#c1!~M`rEJPw>3QUBzZi3IL{2-Ut#9F@B(@; z#y#tS<JHmWFRr|A3-JSUC}*EL;#@@i8l$bAdn~ef+S&h~J1jClgVO&F9oDnPU_PEK zg)ivGp7Fy&dy9-Be2_-`dT^Dt7ski`%DE8cX!-{5B>a9Z3rF19ot}$_$7S$4KK>Wt zTGIFF7Q}zXyQ9GIU72_15A|+jZ06m9_^)|aW2*Nto}8xHs~OuK<Wa0*4tK*8wA{AX zh~0X<8PhpGaI`o41z#*ondc~z9gC{Cgo5G3Rp4hLmf(q-U(JhJ^kXgbc>e)kRCF}w zto*16e~PDmMcm7pZ_S54YGUH;o!oOIzWx(1#Yvu>kPKoi_LW@Lf1`+p7)?CH7<lS5 z-XVU+KP>9p6_~{K(H-YUZ5?9R@es2azx38N+7(Tgg|Yn}GonAi76acq5{my=LwqrJ zk|gk5V)v6Pp=q{!WMRP{<2)|13%-eyZ{neTe1W&)qwIMM+RMkcx#aTLvASAfCpZh- zoxAt#H;mYXLwDZitJC-<VE2_&yp10M9!VZ4;Xd5o7@L$gs0&%}J7Uw+PU2XqUF&nv zSQ#`{ZCHKX#A;08JW$sZ;PhcZrtOD`qv@G4&(*fmZgx98_=VJuJ^MI^!#ykk`yQ6! z_<P8-cd^6YCB_Gv=n3hQBlPcc$lT6v8U3|7mzmK!wUanob(@*In!c-_-Nid+_uvl> zvQ9}%!3JrmX)C9nWoy@7GV5=FSn1m4OQye_6Pvtt%_W~jW@~MBlo*fHn7}h7OMF#5 zAJj#jM@}U&HU@D@1-D>hZRE4A+W$%|<y9tecTVI5Y>;x|{g?90Tr#IhbPX?VN&tU{ z_n!xj4Lu`4JNd+?{E_vKQH<RKu6lo;dZ_SqGuq8}tu6etp)<zM!FR*v8d1YAHu}1n z7aL7eqK}T;3cXFh?>HVCM)y_^i@yVHu-9ARyqk`#Dv3YE`>!E4>yevXl#hHs%vK3@ z3o<Fhy|K)3g*DfVHqfTvf#X{u$LO;~|B=lb%eS2s4M1Z(*jlL%(qpU+kdKPJf|a&y zVXTn7<-^_@w6Tig+4j|qzfId$HTbwc3EtGU>Q_H*p$>1|yO32MweHVk>TZJW%q5}Z zGHgrZ)10$FrebSVRQ+KHUGSZJTZa4$K0ZTq&A6Y{JfW{y1KNG__NfLv^ilhCL8ecN zN!o_4)#pl|(IHz`-`_#!il6skZ{5oCh~ghJek6@u{XTxjRmh7^BR8%@eq4bZS%@EL z0p}Ldv3IIZ<K5PuYX2$n>;ullWy>_l4bgG~w6_lWb!=qack9rtU||p4F3Sr+xB2nY z!ABVQ$}GCgjt$Q~7j5g^5b+N1yL5F78dDwrimX_6Ae){`;$xu4u<x>1<{Z=}$tc-@ zUi-e<z>Xg$ACcCFMtsJ&$i$=QK*>e5Ct91!_(tGUOY5n^0h@j=@=CVDR@=@=+w+bs zFMPibzJ{_9)z0jZ#wOW{-^aENe=L+-c@c61n$jAl;bYi5&7oK+=R*YxvmPG7-B{r{ zd}pmES<p2lZ4;)jB~y<Xu^)2&uO7H;z0;t{)!2sG>kZE_qsl+_4*AYv*gn!XnwQM) z8G+->8?qwjcy6STPrF|1zHaJ&^af+E@LY(@sK1Q5u^szqXX|AAG3-NZ8I#TzC*0nL z{iwM>es5Z~*yaz;6_iwbis$uLhLU%KEAL!l`@UpsEs0)`UT?n?8271U1Lu|JX}vw1 zbJH1k{<vYuGS=6ba{>c**PX1sGt%oL<-6F?0Dc416=t2H@9U{^esgJ5b!#n6+@igf zR^950?!_Cyn)qaOkN7+5R-Ia_ALc#b1wP#&e5gIO^_{<HU7r3f`po7n*$dbc_Bln? zn|;_jOC6l(9LLM)JKtZjz*{TzYVF^I{IH;*R%pg4`zzWW{VVLMut6+|v2+RdM>}?_ zZL@AFu;rBYm;Qw___C>W9#v}xr;c6J;ov{(T-*~zX`!7bg>`8BdgR=B@Q%)DB+;2e z`P&Bb(3o-A9pZ(u)<S$v#9odxlK=L5W7EydbqQfM$k?SfV++lqA1a$JR}g<1{%4%T z+Zu7vxD_&^^PzFcJ<&Ek(hB@dD>`{!^i8g}#u|JgImbsu&V$C`p=l+f*&_zN55Ghy z@7>J%XY%<j<M_dtnrB+k7ZJ{hzJrY*+cU{;$%2f&W6fPOAP2Oz|HZG<vH;s|sr_!| z-aO^R(Qk|2H#0}9L7+wXC<@|VWE?sludy#@+*UFEJ#?0A_%d5QE8maX&!0r6JAS@L z*!v!84R7C(*Id{C05radakW2Ms`v%`UM2C^^4t2KCvV=-Iqa{kDLuRudeO6WsgX0o ze9CuR2Cl-s;+7m}?*)8FalX&z`*OZNTjhUcJLOi>N989lc)p+gaQxkzVW{5ES?S_i z87KGG?_G)QmTmhxelYPM^A?}Ktu!hcD<|HeI^a9JlfKlB3$)}yzuU2qwdPOH=csre zcifLUUe~$B%(eYnjl7e4d~|0Y`TNdiPUrI;ccM<4&mL#Z@hzPJW6;kNMz%ZtDA^E} z@x)CfhShiw99y>=dF$v;Iep1FbmzYEhUNY9;k)_ZuXdc(!rF6Y&eubvRZ6d`4EiQM zb@Yivd-@K4+RwA=e>_va#v7n+jh7gx{vO8Wv>_R3w=>+Z_a+=aS++0CA_s?sA46r; zcC~d9-x#skDpQVao9!FZ+Qs(i8Ae+(Yc}d$<<u3#XBhZg@sga;p|{G-{Jc{7Tt{A- zozQU|dl<+Ga&_iSix|O~IyYKgGj3X|5v&BitI&NNPCWtRVfltvm2+QZIXUkFW4TjT z-^UGG5-2dLoO0Q|LB}U!8KG5y*<+SivqSd<$Z=?yH{XXId@6^<LmhWs?h+s0nNz!7 z`ib#5{(jMh))4ZK>)GA(ujGP|9sA+P&YV*Ai^jo!W79tT1;~Rv#C^=WD>t&8`kcCY zQd@`@4Ky}G)5w~QWxp?s{sX$qKt4ES)ZX%2j68f`9CM*h#JQB4CDUZ5s*l<?$?i`M z{Rs>Yt?H$2y<@;v(E2!yAu!xn<<u)1VW@lwP97-bt-|kT`-dbWov{@$CX2B>kNp(G z_Ug>69VAnVX<Pj*r@tCkH~owFh|N-b75%MYeJ$B4UxPi~rt!86z7!v2^N3ItYtcNd zwed~FM`LGV3oVeG3IR{Z$CHs$HP8oopE()y(`V-?h>QrWa&Rj<3193u^)E8QSfzNc zo8irae1?e)$ku(%Io-OK&|6<MR;=e;=^sb;q;0YM_^+{x$P>&O*oW=v$A%q-EjyfY zBUl5E#IIxy<jc$CJ!8$8-s=(Fl?>0jX*~3{47=X)hn~V#t*ti4+w+SpTPxd0XPeK; z;1g%v4#t%1!5=yge<--M{h=>ICtACLWAXy@KRtlcxkmKOSB*_Y{*r0fxwW@ar`BgB zw9y2wm5_(Cd*qghkJ<RP?_EW&6vS_rZTXCO!7}ztUdI0^-KKtMU+OjLv*&;qKDA#q zMsuKZ)|WHx%NYNq;Ner?V=lJ*oEZ4A&-AJu?F-LCzLg`lXF@yJL>-Fr7JURR&C4sF z4Skn|>{z~)(8pgWAAm2b0yp1RJtf$948Oi<%Y@KVcQT$WJX^*%8tzW>r}}v72&3aS zyqE9?D@w}GylES_UCBJ_EP-fJxOpG^m5Ju25JTu+{nT~f^4}R(^^{8*MJM7NmDO+c z&2Gy-p;3DqU%T6?)%x~);d><XjjorzY`B}bE4!pow9WIytT}3rVGHygPt6ei68_UG zUPd>qWB#hmNqrXf)H>OPzW%43dGIlQXD&1kMVWaBQU9I3&?@z38Gb;;{i}bfU(a?C zgYs*h3CHZ?+H=LeL2YK=fCQGQ8TSKs@T2~SW;Ff{JZ~j;f_H5afIda570AXQG`if6 z9}{`AoO;6_<ks$?&N|5~@QePj*T>qA_}Si%wjZ(Wm9)(wdBpi(XsD#(LF|=s`c}@I zS0hU$*Xr{O;tdAZH&cN-GA#rSZCwUE&4->Ep{F|REzL(Yb1!|OdNa6KB%Ts3ly9IE z+hmN|g0}Fvc4%A;nQ_g--z&MgjrKJ*@rc$Jmmrf>Uv___6K#GA79qp(<9}scD7k$U zygE8YG80*x#BUrgr=K~OhGz8+GiK!6RTmKs?RJjmM5JTnKh}C(^Si*7_t=k&BNu(P zIzwx@!$Pb2kfja8XsUc*n6XM@3TDQnI!)ej>TlLKocigP>NbaI4Q$1K!kFk6^3`Ho z>Ai;>lN>FS^U{R9Ud4uy?W?(7n3-#5t(x9@O6T&)_KlpAnQQTu7kAED)xnkMt{S-* zga&Hhb+0}at{i==@%l5k5*{>H7PwF3N0_g{@klQVhmP)Y^l~lw7{A(pK7N}qi?&M8 zms>};X$zd&v}MBI<yo={y|mPkH9Mh4>&`UaS$76%?Y#iz^bZ-FTlRcfjrO?45<`HE zFMVh-&M@*$GRlG;Wq;&if2<y`KXPkZGPIF{-GdCb=TKvnUnDRpv?^iidCQhx<>)z^ zE=C!vehgeT=zsEQ)9IZ>(MzFGy~|o=fJW1H5Ofc&c7m$`9%FpSxSN{kqjYHw<EtwR zHg;3SnFqBcU2e(V!B-)=>(#!;T{)3B{nj(PKgYI!?lf(y{p>li`xYD@J(IpA>6bUo zCy{j~+<P88HszC?1)pu_>DadR?l#5uutw9_y^Y38=SIuk86Q<%jo0rFkb5^L_C}qN zJaC%X_NQWB+h0ojZQIt)Zo3#?!M~vIhoboeG~WZQiSFg2`4;7}eKd}5Mth;zbRT{> zE#row`!MS=(LOj>pz(r#^4+Czlkn$M1kNIk8G7y|=Xi-(a#J7sn$@E338Amed$MI> zHL{>tbHLtkHF;yF@*SHMd15tcZ4|gOH==s>pnoK@B&Qtv(4?IYvs<L6Eoff4S~3e= z-?$9ktA|ea^Zr3}Xga4UvfvX&wCJlw^kQ&yE&rF`)2cL#yoKmhY!cVF)}3?mam{C3 zb&P9S*0>spPhF3V>5OaL-Gk%$KK<B-P7R||OUzMywZ-YY7PZCwPpjRN;jfl<YTqAO z*-E_;==4W-TM>K~GaR|A=ReH&1TD@U<T2m<%zb1|sA>XxYmQ$|^#|dtHU0=^BITni zj63t+voAgOJHc6YeF^46^E`n)M6EF+<inC(8X=$9x9_ft1pUUeNXC|4b0&TO+Klo1 z*(|@bc+qYfx*U%jKEVD(&o9$?y}Iyy4Em_G|2^JClkER?JV)OS;Fp&Fus#=ig*hR< zc0~`gR_!mjsTW$a>z@?*TJXvB6I)p`5_{jFan$1b*Zx)WWa$st=*84etRFI-_;S0< zN5ScT(cfeSr?WmHPB(oBPG`Asx<WXWzKDQ7YgI{}gV&*OTI0d#0~vg(kEg_EzR~gJ zOuuAPvmetT-ca0t=)a`Hyr7$y*#J1xo(M5i9jxm+lmk9NZa?O`LwYA|SDf00u7}oS zS7@F7u0Cl)?@D&ar}R8yRV<$Ldji`~&pO3R$c1X;g8WbxehTZ(9ArbFA_tmxbfVUB zR@R#O?~xUf6W1Xx8d#TkWyMeFPapVj@(J0pqREKrOpRoP<irB-6c)c`#vomiJ%%7- zkRLyL4D#taV~9~+wspjo1G#-c&YMKAt%F7VPuuk(AN|Nghu{A*!|(FBshw>8WIm=B zG56y4aE9N@k!PX-hu<f2m&2f6evs!6{%UBR@AU9|cw(C8*{AJT1#JekKFjd@^^?h2 z_7Qph;4eNp&;K`lb9lZBI;qY#R%pN6;p+r^t$n|28dVwP^IzuTb<TUD*Rpy2!|?eW z{S+T*T;lbQiqD5X1fLGSXYjc~_!Pg}F;lC~%yV!$6h6KD{_PAdKMKF!l<Aj|;djxu zcu{mLe&5yup9z;4{ua-M;SKS)_$<tN<HOd=;&GeTsaL!{RP2QKUbc6q*<Z;y>lL}! zpBg(h!J-EAg>8G?a%P_F1GR1D+dhyy=6c2h^iMh--RaWtvg@=Svg=u8<T>@o2HeV8 zf8{hI8ia4x!H>wB8IDX9?PyK4nosdmZKg~FK79c>Qx1Jy&9i0Dfc(#QvF?aa#{(HX zt2MReu@(M%7kv^NGY3C--7ML%G3=yujB87l4rEVeKnE711Iw8|Cztx5yw+Mo`e`r@ z&YzjPW#CG=)63{@7#$oTp2KeU7x>@joV?v#o_2FTV!MCdo0bhGYhAT_4(%R$5qnd7 z0e|g7{yF`gqCSBq@0hV?2ga=R*FoOTwnI<0{`zUAZP7qCb?8&JX94Z#yL85Hhv*9- z2l(#>_rA038|^zhkyWqkoNRx)6KkXT_tBPY+@Wl1@BZi0o-qx|S@*g!nrVBh-R_`& zKz6Riru|RpHtiAUnK!OcFvY{%|7GG7*w4Wzd$avlmnWTEY)9GO5RBN{9pN#=DSz4C z$Fuiz3sb2b^~4L8l=@#0Z#rvQC$=$pl6wWR?f{6dkZ)%z=ltov?yQ=}926<<HApu~ z&FE*}OFi^$zE^Tq=QjF~IH#sucB1U?UfxN^BNgNGEsDSJ8acYS<3e<!=X0^qoftd8 z6=SD1g5Bx(rg~^l`%3o^KW4}6-7{ezZcq7K*k6jDPn|*94x?AK&t>nGP<QHXGnUFD zH_+SWQ!mUh_BLMqz*`CSlxpwV^N7XWGd+{d@=DHkaPLt^z~8iZGVdrxB!|9RpAS5u z{`7K2t#FYU%O7VXF9g1sw(9X`sE=xgnD2@RW6$q4s|N7Fo-Z*BE?=+ib4X@#;VbXf zv9Fl;9ewy8{rDk=;g95Qw>$8~t~H~QlLg?sYoxumYvVYTBnK7k)ZP{9U?1j*8C9X= zBYX<awRe%T`gODF0pfZR$nJ-DemdhhI@yfst^z;(Y{$lUfY|jNjCD&Ur*z~qr#~{0 zGZCL7&f>T1bt)F&v%H_4FXc!qihqIevOb+Iyi87yD_-iaW?!c`{wjS6zg|;S>oY2~ zk40YQ!`HGG-iF^#b*_T;?mwQ|QHw39J=bo0v<YZ1jpuazoMlU1N*j7EJQgx8jpdd1 zQtkTPgWtzu%#3qUka6zg3~ehqK`?HQQhbO#<^sl9$R3T>w-cyOalH?)x2tj`RfBju z46QEV9r?eG-!Yi~tG{k8^DzfMa7~Q)iPpU%UO74Z<HWUQ`{Bdzq*Z1{t>dqCI=SJk zOwK^XIOlN=B1F5{zIh*eWa+(P-4&9KRSOv<`<vOrT?4(_d(J<Z5ZwyhY0p_}mk)?5 zKu*QACy?D{E&EkXqlsZfPLxtd_P%^}Inh>vvKl`*-8YqH`lLN}$sUvQ7S8^)o~bTZ zf9{!>=}&(AcbUE@X8yx)5zNBH6P%e?0<WHck29RMPl1n@Pj}%%@f;35luzo<>;e6W zJ^vKvEEEIUg^l3&>s;g5bcS6|ocAyNIkB9qSFnRRRF~shY`$~&BVEJ;%7;~}+<AYP zZ?Dt4SaS+jqGPQ+O!y|XcI4h;Uz`9xPlTT*!Oy3|&u5TNaAefk%V%A|o{CvPj8|ND zlnm6#T0;J~XGeq%*AkOe4lR|hx;)tmPg%$3?wiToXp*m0{)?lPsn<GhBX;l~?R^m4 zCsB;uP$)TA@xf$!|0!NtXzVB`G-k?Y{to{KHxt+NjW6tr<Q1_`8%o~tjqC4QdF#S` z+@aXFi@U@uXofuJJB-l<Ee4<1YDueO^?hGvT((TxIV`#xzG*`4rGNXQ7d>R3{UG*l zSTyJCPz>8A8W?UQU(niyzFNz;BZju@=XV*g&*rD&;hg$TqrM0GN6u9Ig!c2DyKr`% zW-R?4aWt0k*IzwLoNVn_d=l@i-Ut8hwZ`py)haX=uLJL+Mw7#Wvu!!-`IcX>Vd8dh zHHv$ZN+uX9YPna>;_TOk&l`)Zv-ysXeBIewCKj22+-y7^eb<itc)~C)ine5r&!~#} zobm0d8hU)&s$Ao<$GnR(zQ<p)_ZS2C6=e?w@MX)k)w~^r?<O<1a~58n6rG87ny1<^ zq;VTB^y7JC>N@DH_uf#lj``vI$I{*w<xxVn-D@O!n9uTYMJ-dAvk>#u*J30K*;B6o z_o26l0Tn*MyXM|HKXhO4G^?c;nUv)Ba_AtKpQZuLy$Nq<?jzU~n)?u+_S_%;>OT4C zoj#O7lOMGY#s{f(>kQ(=M8of1o(!%E_xD^+Y}H+Low{SE;;!6I?I&l?dGnh(lPG!o zdHCiZ;GgT@qiebU=yT+@8)@&!6;WsQ+!ZHM?NRX5L(W2tMR<{&T~Ra4zF*?JMN?bW zeVsgSM^ih9fmn11_;#LsxjB^lia9L0p=?;R4Ln{6?s~yNDdS(y_pRp6+4VI|vxU<b zINi$mVfyQ8$AnkC?KJfN&)NySlWKoT^Zrm~-m!fvnukRfK<my}ppEuOR++LP%JjUS zYTv3dThjIP3NMtKziU{uF{`eIqp9{=Jmt)x%F(9w-TM9my)2qh8FM)Ig%0*B03ThS zRmN`5Q>L8uzNYrR=jm6?@aXoex?<4qr#<EDHofJ**FUMvr?dLiJUqH5tFC!(rrIyb zDr5J{Q>KCTma4sH)BUPpe8SoMUBkJXba0G4hf?k3AEn&iQtctil@ZHQ_Eqj%psus1 zNBpiB0qom~OTLY5_j0No+x(Q}rctix^;G+LDn}jCGbg>fZ0@MNP7J$re9=8$*jKo6 z;XdgZ@P{rK5k1O(19c`cdf+znK($%iSCf$g30n>nwqWlK=m4#^RPJKQgUf4>NyU9- z_^x{w)TCw7<t8#|M3zh{Z0TEICW|vNY3K1lnY0caT5ev<UiOHzY#ME~jJIXei0EAU zH7J{kJ+i6rMA?+-i;4g2qxL0BHofG?roasYvT2Van<P8?`9G6BSp)L2*v?;K>q=t! zjnRcIrF=?Wg14Dzd5#S?1-+>F;mqA`^~j|6U*)bbTb>v9X-$4|Sqkou=ML^l!JTl` z&;R=myZhpn-Glfs(c8A{&fuq6?R~_#5`NH0oS!R+U$?<Mj_zb?X=?5$_O}P)SF-P@ zOUvP6TMlo-rtr!~WY<TO!-Xz6EL=zq?}Xp~3qI)ibPe)e?Tm;vlRLuOPT)PKoe>`* z?}u)u3t6U|gW2sYL;rc(Y3TW%wPTP^;v!EwHACsI-v3!U^?ymV&!wHTUV+!8yF7Yj z-rH$e+Ivl!zvXZ8mT3SltmlfIGR;GjLD#kq&nmNRh%zzsNr}qX`l+~2vR381`YHC; zR6BbB#l3auKJ{w8hbo7RU*ajZY^ZX``9G3_BD=ntp~^vLM=6)qcWHci_1*J5srHGq z+4sd7bYgLz-Ja>zi9LO(cH%7u%GiDPlnF4#4z-o3XPf%tDO1k)_Nm{t-ps;RjazSW zcUk+pD)$u+zP#nIXGRKNS>^2hd&@D#`#km4sDIvajB~rjVC&%wzM4IHxGUBE3zfMo z3tyfx4F`}tS!HZ|dCIK&Q>y*OtTHyfJY{0g`@NJ&>vQBSeI81m$9|V;=UhZ_@9H$Z z(&a{+p!Z{cNVV%8<_|6RTB`lBk5aBX)m}%rQ`ra8S$l+DOtm+=%8fX|j;Nqq{Y$C# z`#+>yDdn1WrP_&C$ZGe*`idym^^;V4CFNMpY_!nT))l2KCUL@A@7m8;;~1=Q^xNwr z)w-5-%to!JGXBGg8Qd+ISvT^$D*E{S_PTMSm9?(YIwv3*kFU^fBYSOiwGrF$UbTJ3 zU2)k3*db0owAQLN3(_&Bp7qvX|EISUVPg;nfX_odr-F(D*kMzG_Ih-nzq(`9@r`P) z*g2n)?blFDMX4eG9R3B$%MYk^%WaIQVe;G<>&z(q=?kBUzlpKnt551#_*Xi+I~bFh zU8mE}z!haJIaf?>3DVak{Hrak4~e;m_nynV(2o6^@AtoI*ghI3#&f9s76(U#zYisg z+4BSk5!EUG!z--OQ@*0cHS}@IAIW#d9*Sa?<vV4sh4TU>@%_YnvENA!F>+P=l;bwP z@i*VNe<FJ;E5e$m9AaeIC(-)-9q_t|_sVQLw;+BE=Zw<pxxurwj*mToU$mTZ#^Jm6 zIsOy!R#rWsb25B3;V*2WPn=Qg=<yd5%gY_Y^qVt)+6S1Pb5}YS3wPL8ghme=(+eCw zbKmRqDfD$icSlB*x7)y`)pOP*OIyAc?qBeAqu+sPZ&~AKPiSd^yVrg6W$T!t7UB2W z7mQ8czw*MC9q6fFA}8NJZDRk)a%j)M#&5s=^5nzNJW01U;&<KH%HD|V@A3F*uH9V~ z)jsjjN2{VY5S#Q<_TB~8vo6~1q`dWCS9Vx;nM)LBpSUo>k!Wjy_32WZW^bKf?C8Av zCVZOg#S!Obv6e4@Hnb=81?XTUbRc*(>?-E$x70i1M^|<{n`5ka4BN%wNn(X7QX`Gz zRnRW<DPNAwlRvx9*feiSC_}&beYKPk{U-1k3ulQdjaZQS6#uoB`jmgsI<2?`{_A9~ ztX(m`f-B!&YEH24Ib^N_kI7c1oW@YbeN^n(>JH%wdrXW+{!-C6Ies_2<}(`S&@M6F zjJ3E$?}?`J^&YVZdQZQF2j{&}nfHA7oR1PO@<--OeZhCs_^$7PiB6x^5_hV$3+ZbS zzL*03)d%4+H-k$(b7**kp6%CJf+GA9?z1JzA@%;q%WQwC?Qfw!&KTe0tl%33<QBvK zamQ|7G{hdE)1NNZmd_%~-qrZPV>tC%q{J*t$55{(?gM{OIF-(wVVyS7u5)MFXY@qc zXJp@9RPoL^#M|TR$KQU_k^H93;@y?Ry+}T4PeS7qjwJW=-NCi|K=RxFlJ{?ec7#(0 zpXxuGY{;3x{z4z={({#CegFUMFZpISK~Hvni^e{<Ifp*hgCFUM(cDR^e*Zi3h76gh zJ`MCetA7sNZ>28hUCqyN@)kYByU4Z*=lR3bDf&+Tu8N-a5$HQx$K=z$F#WSK{=Adv z80)kG8^=dZn4^M<7W%3EQm0S*=;vtqneCUiUdHc=U(dShreuTsW$0VU4)L?*CCK_% z^Rk?}R%@(T=iglQ{O08KXzrbQKC5~%y2J7JTj&SL<A7!8%!4h%OMWl9x8i>$ub%Y0 z;_s4^bWZKWGXsOZZT1z44bF9wUj#pS&@yHqAD?J{;c{|J5obhB5RHxesyo8p3e8GP zFlJN%$4~gga`a2p(aV$ad20`84Y{*x$=6ypEOdB2dxaM3$@#;Kz4P&f&uCaa>)C(x zuSiTKR>-(_aUFN^-Rc_|O^}~30q-^BSuMd=nma0McsACUUU4XtoVsYFeAA2OkGA$K z#W&hOOjEDqFtM|T$XgwLp=RKIriK^Hjw^^Acqd3soa=7sZ(#3K=kmkcA1$9{5C76D z-9h^-0kXjz2bCm;o^y_X`!6?j9UVDyIypow;u)MVOlJ&R7(?gT#0rhsSyg%6O67f8 z5ud_u&Xg=?Jk{)FAMvl>Jg>l*?u@gR{D(>GmlyITwDgc`Hh4wzGc(5F7eB+8zJPrj z_AM7RGRBpRkxhw?m_5co<2TP_jH2hVIaRG^a`sr?m(8gPmv9z_{E)(p_RgPYuk%Ly zDFtQB5#v_QE#+ni;ENZYYfHg7e)w|y@a6d7*F!hw<A+y{xm^74^Dp@3#D&b$`S{_h z@xwQiy|B659JRg;KYaPwe8&$T#}7Zl%*oXp2Hr|7Jp`|b2Fjmg{^@tM>Fe_|myh4a zSQekxviz2NIvRG3i>}Ypy?vuvM$Mj+Y}`98T1%fdjLzA!iFe?kj;ZjB;^`khOb%s! z6N6kF{zXkk+HNX}hnd%pJ&jC1oN9lGHZAsVGd5Km|4qkb<FL8Wx82a_pIMU%ru*HA z8Du}Hu)_QyI%o>{g~xN&kuo<^e-H1i;Iowe8;Q%3-Mm-$v-BQJ7yfBu!kWb!{u>$~ z$3qC5wt_G50O!i8tWu-OIE$QV#cS+6q)R%LLr}6hKVAi|gi4JW+4at%Uhz%%FDcvR z6kp%-lXP1j%bKGgboRe7N7I=j(P*GAwe%kFmCgrN5?{r42XC)~7vfba>QA@x6va2O z_7Xga>_84YVKJwA&ti^BnAaQF+i785Usyb~WnTN5j@n)1J3q~sz9%oYrSbDM$%T7! zqU>Wp!vULy@yB=MGOs$9dWd}>hlV*TGC;%n{ya3SoQ!%`eDeZye2}=NPVQZE=D(9? z-~EBHiNNsotLS4+*8GcC6!WK?<Ui590s;0~*iUlkoU->#9ZlYP=zLz;p3S{x(|z;E z&)@4Wk4Jnt*o{WqX9m`*EyYzov?h&@5NpR7w3SV3t<cPN<(X$M{zO_+d|pAtKXs&O zt&=z((V8>v+5@TSiuoYfY=`r#wL3N4=AVD#4qx(#%brn=nBch<@e5Y`@FZiC*86%c zno-QeyZmp3o=l#vLKbLWaVEBp=8D6MI~<&)_Z185IevqB)R(2KbCW+w@0V(wU>Vpl zS#q)d*Quq?zLM7I+M^C+;W+@Fza;!}J~l}1RcAksGWX?xU+w1wkr_SUH+Vs0bB{k5 zkCA6Yc$!R3LkGXr4u0{C=)6J>^*Z>d{ab4Kwt_Ss6!(xl?i(1h_B<S&{4MVP&9)PU zZ-mDr8(nMD{DHNp4a;7eUSY)2<xjrvC%m3$ACy$AJ<Dw$?1ASWrEmD&pD=DOkB09x zHa!NO?cYX4^dI@Xh2M%D87gLEP!Ff?F3#4)R=}7s6Mo!}F5ZcZ36Kj(a;OP;Cz;ZD zx-lat8O;Be(8U(}cGAUb?k;>}f%Wp{`RL+>BaIo8*&E*AD~xYAUcYbQ7^7YKnSJ8{ zz2@lV@OkKv&)w2*+4|YC^>YyZ_3CG2)d~9fYGg&WepX)Q^txTNTM&Qir_7N@Uyr8l zSJ2mKomFrWU9B96?syKxdC0z5FPcP8&p=Q6*gJQ|cpmwYHAd-OM|V48tj*|djZ?b2 z0^J=(cQ>HBrAv=+Co(bJ(;XZ{*fY`ie~x{kv)Ieg-3{pO9CY_L(cKG?U2W*IcE<lD z=-JU{htS;(&}2EfJAl5<#>Mx*g;#(7oE+U=x%bs}V-tRu_T%t_*4z)gmD;hA|0KUx z<9peO&V)t_96gW!V(Gm9MLi$;TWaYasb4fEonU43!RzSx?dXH$(g*1ITJ(JRIAeOF z^Z|N)A$s1?2e<G}wm$e0b&BuhllJQQ>5R9XeYjn;nJs&z=NFRuzrJ`V*_+n$BcDmj z-dg7FU3g>}^-Is!@tLiUC!yEHLk2oedj7uMX+2+h=Bf4k9Og_sC_S%x@=DKe>-kSo zuXr-@`k<a)|6IDQV)~%@Ywt_#*u?+3%=~rrrj~vqd;Z{u|IPV(K9O3gSP*i-*}2wl z;Cp&c)AmWB^9n2KCK{WBuXow66x^9>z2nUHZtQr?ceeg-7&Wy1_s;i1`k?vN-l|up zNN?@u9)Ydgbsr;Eq&$muPo=wCvvjxOQKes6o6_Ux`hBYX0>+V0|Fd+M?p%?5`9s<( z9a4v<>F5@0P1%bUIy~V|-&qGAza&2HL1#;kpGQ5;vw1wLz#lUoJsv=hYo5jHW_jQd z1Kj!;_iel{d>-{(96j;@dWH56Q1{?m$X+e1I5r_YhNfSq+Q;lxZawY+$;3B#b@j{S z?a=yrC|%vm7}fVKd~$*0N5`vW4%$QxUKeNZ+VpCw{dbH79gG=%QM3(Rl+nTbu8N-Z zEn5e_bP65(l69gEru@luu#Kzq8hzuSAEl(?QR1aCYjnFWz3}7X^bMW6Q9AhEuN#}5 zd(PIu8x3^uT7LhS->J1aM?Z{s!QsTIj38cRBylSiKHdQFUcP~I^cOLo*Y3GIN$mf| z1i944v&NOon84nA5qtBTN5<D`#+T#~3+E4YO!-?n4<d1VJ6ffe#~L#pebk87j-AlL zIoD(Z`|x$FJ(ahsl{Fye>+F0s#^|D!5H_o&xGiFLitY&aZy}Z}NbC?geWP@FBoj*( zB$jME@K$tH3|*DcC-_i}6>p%k<QrE^t<H2S7FV&;ZD-m(mmRI=)R4PxJo!V4T3XNH zE@0(1Ag7zX9)`D%-$72fPuGHv%fZWK;OA1}DL+LFPx@T_+r+JDoIQWVKdd;j4XkJK zw7$oe%=%9D<U{bqGH?vv#n(Zz+KZOmnBCS+e0>WjxBbBo`)`51WsGzCX4d)mQsNI9 z$vC)K2yW`Z%j28L5yc+zf(hj6MF+}X=irB!l%<L@Q$9wEv*8g>oY|@D(sHw)WfnH+ zccB;b!bW@jcY_gq06ic%Xt3WXy)X*d-G%QY^og`y5NxRR-+!`}6K)i@-T{6>+%=P3 z?=#dZnuuWEIyS(tUFo)NqYu+<c~KC506%&*uIk@TE&Uxijg*^uE#>0>8X|@x&}W`* zER7#X$4a%Ke;@yLD0yBcR_eRZ+#X`3zS0~@e$^ySirAqhaQSKQn8UfHIG+Y_QZI~~ z(4tr=;TQWQX&g+ow|elgD1(ok#1{)6o%C7w*m*8_w84ee6s`0fzuv%F_*oymsL4jv zZCN;32~H^E!pY6jx!_B5lsJ@{uJz5cJBgDJJsl#(jC11wBW;hp18w`Z8qsUvrCL8Z zdf+AeKk3*Po{5)g70<yqEq%gox$u%jzTg6AEq7ChJ3oBf=c9Y(bK<LcS7%fj9|<KJ zr(k<NWF*7+@G14^uD9in5LZUuu0~$xLVIEAJ>Wk*UQ0}ed0k^a=b-U-m@R=(-1|;` zeAYfUz3$Jg*h##5SU$81=2Qho+jjXzYxL#G2lM<b{qSXgep#Qcd8T~T4bQ-LjqttJ z`{M$B&RP@uuk+m({x;Yj$@1&yUa%XXZ}Cz#Z8RN7EtUOrV7n0$PBa(&<Z%>Uwy{=^ zWMoY3IK{gZwHSO##vqr7cS*<S&SG8IOC3|;fyO^yX6Hd|WX-<X$ZfnnYb-~Iqs6Au zI>R1MFJt*^)L7KAc$OWL8g8G{v5NJ7`Knv(aW&zW)40~>A#WJlJp2PE8{0x+QzG_S zzmTOd`-6Pf-7<;=fAT40IQ@H$JWj*zGV}FJbl1ys3w{&*JOO+G<CjlOegw_!Hr6#U z#$V2SlA~uwDZW@@Q|<BV9mUz)Nn7@K{&0Cxx?6f%b&GbS$7N4w+{=RKaB@CLuIgR+ zrsDse=EG&Q6Q4ZXj`MT)@E|tSe)eF)#7M9|(6@}a2){ne&QJHHB^50#-wq}B(3knF z8E$-xdxRHNv|QO7N`4w%-2ktO{>H$Q3E~<fe1`lxXSc$uA$WCwCgE3yCR_1UT}PkR zBTscFhvWnIYb;%dOl?4>Mv$p3;35nzdcj{Qv=BkAvbT`5pzj9gJA}OrKJE9ROFJ)@ zLqDB%Tv^dTT$ymExU&CAdpqGH_#J!}#Fa;Q9duHLKgrJ=3&;5G@LLqb|M-34%9!UH z8Q<fKZ%bxuk2AJ~jBPt(+s@d&%-9a`UK8{DUB)(#aeaw#HQ{4Ow^JA|T5D|jIQ7_b zUX`9-&9&xszUK71nK=b_@#}}%@s`e<E_@}`zKl6_Y>Tf_*Y=%*w#AqD{U7`e|3_xd z(Wl`l!|i-+&YZW<7tO8Yh~|;}9mVUA=b8u3oA!0<d^9q>9C^wuh%L*ZljU!xc3=-L z+JH=7x2U}3eCXt3&`Be7vH_YH4SjS%Cn4rE%y$D?QGI>n;4Fn+G{5Vil@Ku;iapJ4 zZyU1b?`cnMIPGP()j~g<wwBXY7jZF)f!@y8Rx@AQkX_pu*LjTVyZD4(WL!5gt__Uq zdyMPgqVrldGA<GVM}NV%wlarbWh_(CReSgp{rAyE6XVgm-bfoeiNjd0Is1Rqy?K08 z)&2i}XNHiOEGUp2Y7!`#4B*OQ{9u`Y)ewZ%i2Fjo)=mg&-4|2>xJ(ErgD47W0@gMe z#G(>E###;7+5l=1t5w_DfLkEAuxAj=_xZYaZgLY@?C1CSeILKyAM==d?mhRMbKmEE z-rISf_xnX_&Xz%|BFilw9(&H7`Ad8KZt~Iu@x9SHRGr{eap7r<!=95?*6LEhh4pOD zgJQ&lSkvPi^RNvctJ8l#J^$YJdg0Ov+B3hMwpP&AxsGq2K#WnX*T}1mv2U^bf$Xur ziND96HFo@RI>!D~9C<)H$Da6cd;N5c3B2tx_Nj4Z{8<}^5||I)52#lRe)iMYX|H4K zv^Cu^_G^%f<j)#;!!h;&Jx#w|d+gi)&|d$D#*R;J$JjrK3r8GdAN)ak{h1mQ^VenU z%j3e8)U(I#hqisBb#jz?)3&wOZ>Qc0>UE5rw&pp;J|Fq!Aax>dJH|e>r|Bnbk3H$d z_WECG?0<2L9pAO}j<Jv5gkGkxGk;yiz9lYP=NNl2K2h?mI$FKC_(lCuW2e22vD4Nw zj<K)AP9YzxT=<myk}aK4=SD1_<aG2d<4JH-WB;o)_F3%X2e0ZG-sTv4`J3(a|JIn8 zKYQ#tCmLc7*Ez;M4gLAfA5bp>4nCl<)1JnjYvOy)@SBdYzXk27{@&OvI%tpoi}{=x zhc33je?=ojA6uSluYa2Fsbs$Fu@<tY9b(S5I>wrSo_wChdX##_A7b0kSgEJ6PNA(8 zw6)za)@$%9yNNn3Y<8yqcf75ir*y#j6~@|F-(ElLDD|GFUd0=hUzy!r_j#+MPATi< zBHH?)dZ~2LibP{|Dr40CtmM8LY*KTGD<l215qT$wuHj8Fh9^=dh)uX0n{Xv(h#uqI zP!X{Q?}Y9?($`njgpbHPWa*ntyJu3XOE}ZVXUWspCyeCGDuaK?joGfi&SYfj7S2x% zgl7kP1qxcw|AOeE(_?ni7SFsTchL5QwEb4B?bnExvKe{s8roe6pH5COR@cyOV_#!+ zGVMM~n=6sim(b=lj|8HsKLs1+HSqYM@c1A+USsOTyL#_h`1_k~auiE$`L?~j{E}<h zuI)vBMo+STgA3bZFKj+NjFIrZZzOx)g6#c4346#cWZ(A-8*JIT27lcS+50PS4B2}a zvUhj;^iW{pl)KkZKANs<_KiI4HDzgU`51Kmj`1vz&2&W7#n?wRrlOnA>KK#3cWX@3 z<l`^789VAn(3(ygX{xyw+WLEJJ87Nzdj<Y6=MhU{?U;B|PPX=dQX@I5EIC=Rn+)J# z>$(4U&9X^0!W*R5mAo3$>xLiM{^>^l6Ryer*-q@~#$m>AXS?6gu5hntd;73l)|JLl z{70*=D;R@tLi6h)){c#5c>^pwYtFUsO!6gi2DZEGh~&N&;>-lWi#BX`d$7GL{?06N zT?WCA7QeB28nWYfa6`Dd6}e|SwBwp*`_#P$jue3-q8(ZXS_2jEfGL+;+g1*~5V*|5 zl@|2H*U;bgczXE*;6$1BQqbSFvVNF@Z}P#RwBD(v{^n=CeCTh*x8MBIXvMljR>a2L zJAt`cPn%mk`Tj{C=C}ELfd=lI`<V$oqao=l;<6;$`<dQ|k9!aDZQIEepf$LMZ`+S- zqi>tS9)7S-s<{Te8SG^;ek0jeRfFAI>oAxu{Y|#plt@#0S3C%?-+MRhwy}5U#6kH} zZIRECZ1{Yed<SfNd=Z;&!ucbrE<t}Y@ey4vpYO{?KWzri!p8*8QRF-HIIR;ezDYbY zeGh#2L~SR}q8Fc^&CG$u=jGmoj3tO1*yc&(`+Vr%vH3&y4#s5NX~??ckqKL&2l=!& z9r`gJS+{N$v;mp0lzEknLUQgn<ik9!8_CrW>Yc@WXEERXA~nc{|6slokqssL%1*f- znkv2V$EHppczI3x>K^_tW&QZjE7;SCEMSb91MTYw{(|9j@J%sz*W?<Lrh{jkSL~oe zK5(sqF_l4wHqd91=n(S9Waw8J>lS~&zPap$2nJg`9{(pEFS<->WJPQ(dg<?4+9!T9 zah;PQ|EAA1=!oj)P3(ZzK<~7Na!stCYv^Yk{YalN@c;fP{g`-(-YUFKi0vEagPY~( zt$X>tWhW*q%|Wk`KCJUtnghvVQ?w7?4elG@7ylXDGdk9p;I4HhxI5Qb;GOpR<LP%A z-?<eYI$3%fx^fxcD4kR^eHu7k#5V@{z831BXPRw{<2$9V*C6|8Ui3b=X!2`&U4-y; zve!i^GKi()^^H6+v+H_j+|)klW$++~Jf2M~%w??C-~dauxACqVnjkz9-UyebQ{Or6 z+}OCc9%%nT<8DUIP~1!(Yvxnh?9^+852^lq@IX4PpE}c-mukLk8T@fN^D-2gd0olS zs;^iN*K+1&CF|kWtcPUoJ;!>eVLg1!8b|~`NJ<h-BIj8nxVSyGCNv+ym1~)g8g$l| z>BC&-roNkE+BT%`%6GtmNown6zOOq>s;~zLw=}1sn>Jm24%n12X3dv$R>8)`r*d0o zWpC2Pr|vqdQ-9PNRy$SD)*Qxd&%boYfaV_^p+e())|xwVs7(q^wR9VM?y8u3og*#f zyuEDxbD94?zk+YK%Gmtxqr8T)bIhALchr5%=t%!7V>Z^e*Ne|t>kaxLnj6!7uZ`)x zD>)M+-S;}?_4~SSX4SFGWnNF#Lu^i4i1}Rsd_-%KflV_q<yrJG9h?^Ktbu1u2d5`E zaAzWX{l7Y3$2oE8bcbCza{2w#DWe^GK9{LpY<<}C8QH<v1GV*{H4kQ6%Pl?4o(Jhf zvaKAgt!<po(YZ+r_kd5VzjGP84WFUd`ID$Koq6y0KIZ)z=l4Mei5C?cQ|$Y?jj5b< zDB4#>J6*nyw#sO$<NKi9pNmGk>iE7bmM+)veH*NEMMrCE8|O^_rM66*jeQ^eO?Q4D zdiLL_GaYz#d>><<@BBXIW<$rAV&B(oOy$5_d&*_B)8+eUtJ3*>*f^e5TW>hNFQ)Hx zd>`jVUZc*@+S-O5`n1|IaUu47^f%A>efVfSPMzt%v*Y_1`%35cF*iE*VvQ;Geci@X z4$P&imeEd^?~B2qWB$-pZ&6!sJH9WbBX)e>AFXpm&bD%~O9i{NwGIC$ol&xIA+fbE zR_7V#_o3JSf;!WIr@l{hrZe`poZrXXT-GtBRI8nCV=4#czoSkW?bzS9S^dS}(D8lH z@t;tys}3n%(jR%o+~@PB&ove~d053lXg@inooyGIYt7R0?Y5^uV^7gIP23>oK&;N2 zj_+NGER{!{>A+Xtt2)#9mM<OO`!;kh`;cSI>KNl!mGEZxiOoxFnY{Ty>`v(UWwdX@ z!L(h(;MFl!&Qu-v0rl1+d#Ij;4~ebIq|?&YcE>j*bAEdlb%3dP&fDIDcfxcn<E>bV zY~ZNp+^@G`{}S~APqo*-ca(b1QqOp-YrW2XJK^*j>NPHA{nOqL@!j6#Azu~y6!N2x z-<Zko@!?j!yM5RL@`yW}$#)j8mz{&$aXWc)Dzrz&zFQG~VO9(h)At~@Z`?QX=pgh) zhhN}k&f^@(S2{l){qaEjh*J3}INmEIX3&Za#Hp6QK+MN>)GPR2kTXMiXB#@t3;5=m zKDLZWdx`R`_2OHreOCF`QU`s)$~iC<T~gm59V5!`O1{Owp5bi&3fgb;Xdf@r+*|ZU zt-Zxs{4)Oxy+k(HC|P0VUB;r9Up8e0{0IEu8I)h7EFZmn{qU9P7T2jWhq4=Zclzns z@J4<ap3Zmf!zR;=j-1Wda$<09!4KZbsbblY*5S+Fl*U>BR@vg=CVV^n+xGRt4?X7J zwy7uhb<l#X^PLJ{JE<FNKl=kX8-p#r5z)(!0$ce{*sx6mwi>tMM(yJle+bz21h#uk z*ruDX4aQ)59<Xh4z?N7~i>`9O_Q#Yb3$`BjJBQ0(a0zkJ@_@%KPaN>=0X}=W;j@+3 zPxw#-PG<pY`DbX~{ZGUJs)@lmXySAR^wEkhCf(~Z=;Y_<WA_7h^<l&PP<X9;0`?gI z<>+aQ;kENU;q{*NUGe%pU~j}=9|iUYfyt5ZnmQe@e+IbfJCuiRKfhYOr#oK%j`pJ_ z?6a&r^EL~wcjEK*CkO1;-Dxc9;eh?0DI1h+JS`%YqWmUDk^kvP^I^xMz|Oc)bj3ak zQpVV1XRJxG?2N?ul|T0a`S7H2PSKk-yQGvHgRe7Q>A(wsx&EJGoKs}K#!pD)HTdJ` ze{zWPOx(YXb%`&NU;a@UGmZ<_Gfr~u^#1n%<N6x-Vtsr2uqbo6pLs3RSuf76Sw6yj zBfslsEGmd6e%U}Hthv$iS;Q|x|Mj!?acn|)@br9NLRF3{`+fC!v!nhp>VFRWbicxJ zU$L#5WB2cJ+|T8{Y;vj}cHEzWU(4TO_sbpkmtmWG`%vxg$JaA<|4v8!T<&YnTkU58 zn~;M}$iCj44m<JHNe8#E_BBt<J1ts!NemxFlSF6r>`=ZeK5)^t@%3b`bGp&jxIb7n zzTh{VK91)xw`xzer;}J0HjO<V+?&$SNn;04zJjuHXYIYngTGM^{H}<dH`djW!?QcS zy+tlQwOxuGTDHYn&L-J7HdteE;NBk2wBADbeQ;<EK6dz!*4Otmb`b0<Y(QgmPk15N zz68weXN8<8)xB5o({|$M&=`(>i$9>AH^Ud5^Rd~q1$Uk=v~yND=b<7t4>`od6VKZR ztV9=%HV<0w_B{Ls7<8Y9r)bxN$G*;a_?mGFRxdFRZ>{T`hd<3Af6Pmr^Uz57Tgv~* zJd__{9@c9t|HFCc+yDFXa7DLyc<UYu{xz|AnDsxHhvT}<!^1yt9)9YWhcSnkhr&b5 z!+gg)lso2ODsX?oF%MT#eoXTKUp!K7AH@)D;asmb6&d*$Bl@ZGJ_2v!3gU-BpX=A2 z9laI$sd$$AmK+zpgJ(DK`v4lM^O_m)#Pduvt~Y|jgyn4IAF!(}y_)sp2^2I@=M;~T zG6S2*jMvTx>t5|IjbVGSh0RDb!it;e`;pOS^j6pPp<l3%@uR>(e3y^ic1Q3wo2E8R zyYX~*Wq3wBapYok*Fje^X!k>R->MeRh{z}p=kRt-KArxCWj{yEJoevIU-$4$7|!)O zH+wQSR1Y?G)bOl2-Pn=K`hu3MTFw}ZqNK27lX~W(6#oBOnz17%i!-i$j2+AP7tNWi zdZj<6US?zk^{`(L&*NShu*h?{3yR%38_M@3$gj5`ax-I6f6JiVy#LJ@<X7Vtqs@Mn z@_r+JjTQ1Iy+>`PnL5|Q+-IL)Sam;R$7aqu>%Fz=x5j#JzWU`ojmK!~eD8kUk2i*e z`f^t06k|sV=kdy^Q-1vb^mXIdX}5R6Q+=A>@{-%^_cpyd+!~8{|DV?V8@OM?nLxp! zae@Vl!inF9MUZ+G6CALZ;x32>7Jb2;8{gpU8}QP3p;GYT3blz}cWF<><FxKy%)Ojm z+~ZuKeeZnk1@evIMSOcXIA(rt(Fx(YJJ9nSuwBFXggWSv+1Gbn`x>J*;4i5+>z%%l z(mZ2?+L<EQZ6}{b<B!AWWZ2N<)0*o4jPd-9^FtkT`n0>iZr3^9A+dII7~6L@F^@k! zdb_V4u3g7Br_pXrt}#47yDhX^G2rO!)^=-G=QuQ90eHD^cMbpgW+(0*fTkF6)_fK9 zbr(Et?T7gl``Ub$Gc9_z@RaYr`z7yIoNVr0ZOs2EG5i>7p35kxq77ZMcd#*k-PxRr zH5%^Yp88(SedB`x=DPV8?>F{0hUwmohj{)T&vSULwNlRW3UGC**3ISdCa%7vu_stK zSuxBSr)&84$N4(-0_-1}<9Ubi#Kt)#*5($C>44SdOVlkzt}p)Y_QeM*xus$ASOYmy z_?hh=dk1_O`MK!?a!X@l@VYWLXuqTI1Y=U;8RU}1mTf#?G!$Fk^UYd~w=KgQ?=rQ0 zoVi}D`@haK?=R&(G-~+J!}NIx{;TRUkG(W!pUc?8&2M>llW+H{JH0)O@Vbn(JEh~* z{>a<|UgceOwwV|CgxFZz#9ucax4!dk`fLWaqAhDUv(MaEwB>S-Ii_1QR^T8UsDXY6 z2Xfh?qTVVe{!CZB922&j&FzFOXE0K#M;bdSpsUqa89Q7S?tSAX-mr0&MN_We-2m}J zzJBr0ux`0M_T2-zeK+IC?%~?;)XByqwRgMqeHQJG;nvx-UDMCpdtBuOH}P99_{^dm z-z?XhgskrzrFy_Y-*5C+4q%J+j^KU;F%GJaF?NuDWK#8I#ts7;d-XZMf&Bs1A0T-C z#(Kw3ee#Z&@G_3K;5Cc??(oW?UgMw`ygut(Gp{l>=*y}b-#Dj(?ukaGP(O#W-K7p1 zc?|bBgWQEiCaB$h=69@urmZq*+D#TstD$YL_G1s?CqRBp#eV207s5yN%!Qxv4s)$W zm-1YA%vs!mN4cI1U3i3leal*X^PPiCIDLtnFT9<3nEAUcHh&@J!HKu7e#Bt;F?J#K z`<Cgu5GAfjYb|?|_B~|isLlur&QrBMH(K+xkv?-Qp4a)E9}F^S_tn%Fz7p5gqFL-I z-^iG6ociOia8`1)c*<+E0lyyBF*iYdPiCZn`!(st@G{<E92@h?28DfPaq{>6COg+% zAi6P*>%3Fk1ts)Vc$n|L248-y*IMFFJHHz}@{sFwD`);hC;md4`ahZf3pn#(03YMP zInlq@Te&9wiR=>^>^d*If%C<*B_HPf*c(jU0-30WHRh~Sgszd#nDcU*mhivHx9T@L zD_u>Z5!<A@?q)9%+s3L(+l6!e{mqomqOVeXW-l)<I2}DVJemJ5;HmgX_|Ks~(Mm7> zg57eiZCXAJ*j2#G*K$wKoH*#@sqLKa>3}0V6`68X5IIS5<s@KR`}_9cwY0yDaU6>N zro~{})D5<Uz(X`JJI=M^D)yeW#&4t@^_9uL=!A4>-B&I}bFHoIy4DJW7syS{wKj}< ztm`go?IiBy@U2s6TXw!8{<Vhkr?8&kbsM<X9X<niCqUa%>0dgNiI*P@2n&WbUS?7U zUm@Y;HzsTs<e7Ncm;24gF5u-iCSHc~OuX!XBe-eeD0r!JNm`@T{8N{6lxLf?PjOSa zkLe(KNh9aZW;2F|c&<4fa)`OzMLo%(qIdGQ)foD+E;{<BUvphBj(K*!@LADZ<n507 z_8857>Y^W8WBlbgCZD>Cvf4~!3}&4v?t<I6w}pEedk0Q=2ZhVI-vRdqOU9574`W;9 zeavdF%r5`Y;)B1m%g|koZ3<;`-k?G1m${2x&b^uF@y(XJO-%jc!?OLTf8&+o!%4Rp zi;DV=DQJ4MeOTzXV+s`KYRaRgOux#wI4^t-_&pljHOKKD=c8%Ycd*)PmVt?v9JfJi z2ZmuRFuoZSUUWQXoD_pb?SDEjy!%$#XB~K5_|Cw8e}=zzh07SJGa0k8!8@+snFc<P zb9zlK_`?5%X_R?>13v5qF-E0FX@5gH9J!hJR$r^^2jH3iP&OTBPSXH=184TM3&OcC z^$ky@EyXHW%Xg_=?875N)X{wRZRpi7dP;oM4a}Xfs_)s3O%Wdk>pO@IM{FA9sMS08 zFKrx8J9$$$YxS>$hWCN@AOGCGc;{-?`*kmK7R<$&FgItzy4q~e?^b10bLLU=3%$r+ zcTThgIOgE@Hklj{eaX{@jcG><GF!9fA>w+QK1>B(U}%m5n_(t4L(Vb}KZE&HJTm9~ z2Jrhb#<!pQGl+B0|CTu=>DSIF2@+pPF@?xe8?ynXMz)b#^it7APiAW~aJB2)guUls zcmFD#E0{(8-BRYYf?VHGd;>!7Pl#4-nqbDs8cmLa(EG_z=5A8x?FrF`HYJ<)LvJTX zA9;R4bT6>8Xo6&sl*oT^2Bpbg7_H=)=5ntmv?Pt31^)teI?FVVdEdo%cln+u=bC_1 z2pD+<GvH+as}x}6>jJCFB|&^#@b^;P;pSYza}E{DN;oF}mjl<mQ9EOfk)pn@QQwTC z-6K1j3p<-)*m=kkOiX$9E@pX+dT&x=fySq`vK|@grjIy7grAdWzv80RY7c{0X9Kal zRuZS|5$0k2s}mdYHMhJo9-mF4@~k_8am9&V*W4`S%vuYt3n3FJUqmU_w{xD$Zl@8r zHt~M<cxj<n3^dk$c@28M#^JnYa8Ed@_)31_8D7A=ZzEpR9?l~wHd20af*EIN829z< zz05dRR=kWJTnF(_HDj$Y?^Ezo)f`IKZLU=eHuAFc#HWJ!bmo)+ZeiKTFQDIgY>BI} z8}9y&*wB=9?)7VMt%CNfazXfNaLJnI&Uo{afq@Ty4Sl!ytlPO)8!rwQbKdL|`(@`k zmYhW1$G)-nB(bp!VJsV&YYR8gg;?kMX88rz?r0wt955!T?`{E~vWahKppRQHNQqPf zgL`7{3a2ljET0?R_1oV{tfsmbx_z(zfAGC_j4qvLPK)sp``qv0VswdK+H<eDW`Ch+ z&-Y-G;;#lKd-z^^%wMpcS_TK|?00W5*TUiVo-V+?R*Rk8e4l&?+V8l<`OS@<@RxTR zi!KJ{8FPAseZ<?li0ge*dxX8j*n^%$vEi(4K{rsj<!+-c4VwDy$HfgdB422(Q?3X^ z_hLVUk6k|noFG<KYXy7hlQ{RTYw0oPQ%~|%lzF~!ZIAGxwH^~+U*LO2lOv6GCMjNA z6YbU_BWNC<i9HLVFA^WH0hl#-2Yr}T1l?%I2NPMBU+g}<oGp$zUiMUwCwr@%Gx%O` zb-3C;9h;@@4d-4dc&_!alJD1d+uuv9`Yzx54`6J=?|#MvUGfX2_Fmf*eB2Jj=j^?< znZ!G61}=Zx&t4v}<m|R?qODsQ6XlHtbMtG;zl+h<9C+3)=yK!72@UMkY~0Lm3%@1Q zE#jF`ni|dxWQWI;4hk2&)~n&^AGa^QglF_4-VU!H5I%_QDbIuKfIK<k1^8X2DO--T z``b-_71mm(e^0pF0SEC6%{|X2b<92S7UdgX<_;79tKP&|DJTQ3g6F>Hj72HfA{Rkl z!JmyG(OBX#HZpf7zJ@$Uezn5aCN_*dF#7tZ(7Vhr)(j4()2GH5j5CJ)9XPi<8EDY6 zar(wP{btM(S88=uC9y7&Vs&fbHL6z&uUSlA*#}2o-|}*=hNqCp^=?!9tP*3?iQzT; z>;8A_(YCxiu|cs7@@Th`cUqn_8uE}QSVtT4;nn(vyo1%DHNXg6aHPiXOUAa$H+Wr@ zSt0uG18vAb-m&4=1NaFZz(P4cZV)_#|7z<6;GsEm;tA)+COt{q18{w#`c>RB`C}Jz zJ)U3jlraSp_%AOUQ*f@H$NBtq&y6XVcixx+uP?#xO-}TCT|V;PCHd7~?Q`ye7V>KQ z;A{T|Px7V=E;!tH9%MZBxZiIkP7-)#&KG<76@06o+(hD;ImB<&-b;|!JYMG0i|p!^ z4B2As#gqdd^S%9No99U7t9=DFdwsKL_*Fco9e;(}6@G<&UidtI#H-A!;F(XJN^D>w z#FwR;nmBOVknkhSk8q%6Tl;X~h+S9pG|xHS!aMTg3KR2Z&B{U^DIY^#4&|-f${af9 ze*pKT4;YNw_R08rY(14mtv#xV$Tr%esyNDC;diUy7mQiw0lxj%JP$eC+3tSY6`Wf3 zcJ5J~4U7fDE^yo1J{<lkUC4~xkinWf|9NPS%J^oaQ2rU^JR`8sxGngSc(Zrr9CThY z#-bU|*!a`MX8tu})aB-X`&jL%c#vItAiKsPyY@s5iboD2)`Q0#HuqH6r<XjQKwEwz zq3V^d3+qJt>8D{FznAb8XuzJR@e)TcvB)sUHD)X*LT1g!Zj<of9P)}LAX}zK@-7Ls z?eWB$z9B(_HW*Jx|BR<r|Hj1VIwN4lBW$8P{4!HEiu%g$DS3oASzebxJ86+x{5((O zSsr`+Tjm(yPp&e;pN@sc@H~(4&O%<YW89s_-kEF}mDs4~Qa|yWdrKa6Csrxuq2hQ* z<_+;q7VU+kdz@lag`iiVKD@(w=D4XF;*5p%H7n6e^lpefrcf5;zOgd65IT-B@GzeP zqtF1#_^Fs>{AEJNQzo8-S>}vV^r^zS%7L-+K%Sq-^Alp_6AaVWtMa5+`6MH{#62<k z&85boEF;T=!<Sso8ytwT=Bu!WEVy*vInjBp%&MwW605wxqjGs-xENksH5i}&W9BRz z!r7l>eh(C!8~r`M@>#}nU%Pr${k|Y6`rybwRM!uVG@=ild2VzSzhm9URINHQDZ1dl z0_OFC|1zTU3VFtF9(9j(CsnN~<oW18^ljHM8<Hr`8*N0N`Kk5%nV%*_&%HO$Hvcr_ z-5k$*^M?0YI`1?ix`zFJA9~88np1QYa>+%i#~RVc@QIkhS}!&EX=da@_QHRt{t<d{ zLNr920pVUg=ZbnlqoFs^x!l)z{}yx)=6UrtY(-Vj(<C3~y2@6(G=cFwFcu$f?1?jb z`*u82khF9jV{u`F@=K0?*tq_6{28i>DoW;&%P*N&RugATZ^J)1Tur+xY2(4z`){3@ zymUVAeu9k(KAWBsA6Plyn8KBzfeGQtPZGj&pGpW<U6lZj$cPMdRh&77-xPiZzwALq z!)$1$Z*B}Ovn{v?CjA{SStOVoKQUT4V4^8UnL63=y)2#VxQWq+2J|xThmPwN&Gi(n z9GDnbss1YFRE?T7XYQ!kb1Fy8nG+gS{d=R~YW27Gerq1Sy5F1!^=CNxyUm<~$>@NS zJNtV-JEj9pMhDEc`g=ROTYnGI-vjhFpZ?~Js{We21^V+Y*7teA-;BuTl&!f=k6fvK zF6Fx}bbQyPvF|G3yDp4<S4ppEbI-uaExiQS9>BE+a0SP&Qe1%V!?pS(SHlqXd*eMN zi_L!1BTXIsrbPnkcl~+M$~VsI{J!<Qt?yfVUi6_idRyPOws*9Ve%G4)#?dcljX7^z zbyXbwrhjiftFLo4^isb+yBFG}?@fzDI{HnG$j>8m#s$&J5f^m!dq%3&@2M9=9~zNr z^?NG!=y$o<Z%_K|Nxwbmw<rCkeXn2P(KqlJ-{AY8bF8m{_c5=j5shJPNB?ONxB8D< z9IgEA#hv{}GOYfWUmShtw;5Ld%QGC`9Z&!9^dC?ER&1R^;qgZG@4MgX-zB`V`k&_L zf6EN|A8>iJ^0>=8`ybHX>Obr9=tIZ#xBAcO@95u4|6cm{(!ZDQPd()KucH5l;Q=*c zT;U0^K5IDV^k@348*o-f9PrxXExEK7x{d4=u6^56C%w9Ipf7CPTe#9aKd>_Ni7)J8 zeTJUmp8l`$g+2Ta;5QmrtG-d_3O_`j-m*C*zM6R@53^@3ex>ngU*<(%?S(FR>+pNz zp6HU!nAp$%sOR6_U$XbA*$WpjPwCgp26is<`+mguoAHf29eP&LvvB3?UV)WCk8+%) zSLMXHRt`uuR%Xxd+fX?M9vvPRO?-Hq*~d7SugVwpMJvzd{$t}fQ$+b;%Ej=&C&oq1 z=Z{gI$n#~f=TA^B;rY|C=gTNx$n(0`^QS3a%JY@6=XI2?<oR>4=PM~s4MZ0KYqS05 ze9;NO<^2Kf=Qz9^Hjv@%@A`uK*4j^rOlDpxJMs%)V>*trIc>x|f!}IgLT|h4)<~v@ zf19>Jc&zM6lKl?OC_-jA2ifIpWSE~K%bZ0_Cj1kLQzaSbZ;kDX_Z`!}t-FoKi(E1s z+2~VzD2XYiyaC81M&$a-k;mS+SaF_Ln{#%rCAWH!*Tq|c%y<4<=R^}UpYVr#e#KGF znfcab6*I5FWxjsCKI}E**Dduv=aLQ4TNG!EblOh##h=xtv1yIo(y>O<BDXU4%R1na z9?1kQLDpzl#~LN~LTrr&S)(__)@YD5DqPxnv9aiLzQOXDw&K@VKA!UL%#M74Ts<*W zHdn=b@627G@qF?xY|zk(sV1$m-gVZ&mKssLyVCH994kA)p=CxpPUqg3f2x&h-tx0n ztVx~CZkH^?xhRuY-`*L!>kMLd`Ir~!R#yR+=nKS5dff>3G-g9X(pgJ=@o`Z8=oG$% z^Y1Sh1^dz4_hNf(_ZS5$`2Gsn7^w4D&p<dwG61sUReaNt+R^)JM{O*j4ZRC)n(ZiO zEd7|{h;xiX@KF4f&l!i{y7wugZokK9IQa_sX=IxEPj#Qb4xbS!shnL&O!%)haUQm{ zmY5=bV$Bs3!>^b))5XM@mOL_nIMb7f51T}+*mB}bb3W{y3B;M6Oq}Vt#N=f?#o|nF z=!i3&?@CC_50g6{T~WC$lry3%%~zE_CDF=9$l9@Uej!h<E@NGG&r|5D0N(BAzFP*o zyFWAi)R9-U)}~-DKGa&X*O?t>tc5k+J<eFA<gIS`EUSJ%KFcifS>DOIugs_@S;v1C zITE$6^6%HsebE!PV((X;Gw&};-c=q(!8*HlV25{B<r2mE)VNz1x361lzqyQONSC<Y z^Rl2b?+kIo^+raI<=0%_kzW&?Inf(~xAvS`IJcUYf*vs|F-$(pCgzO)gr*3-T18&Y zK?B=-10H<v+;?NJ?a0BV^&jH9zUF(J-!k)9#?SX8@Gbd#Q?O#Li93>Ei@=?PAAmdh z7ANi$@hyr`?j5wj{1(M1NB2ogh<(d8zGa&y!S7E=s0tds4aI!Vc)o|clKdw%J;V2m z2Cn9qW?ADAF4NEdtGJw>a%fx@Tm@6X_E2!03tZof;mue7AA@h6%k%$7_yT7uuc`31 zJAB1AM7KZXe9ZuO>Okhj1>HPc?)5f!!!G!)c*7p}Zgmgixqa|0>}8X@_yu|K3u=;2 z5IK&q3r>>#YZ&c4V#-8mkq^k9Ys(HlzuhT2q?@t>JfKr{_}E>Sjo<sDXRvRl_KQL# zk8N><TU<3axMq6Ef1DX9zJpldncC0Jj9kU_qi)&pGGvE}$Zrao_KhXGWjJKFl*pp9 zjp*rp;W<;?VfQ*KSMqGmE?c}xF}kd?EQ%pNzK_LseT-dp-|l$^?cC{J<Yn1AoUnWr z9PXZr%7%F!@YXuG0Grq?{I-~9_<isu`2wc-ZMmj5`#d$+jO`pycK#>vM<0EPM@}_m zR-5;aL9QySv)fU9BSm?jj~pAci_HjKb~v^l$yGj6pS9$y5sX)Iz||e|m=^h%{plcj zZCQt2i>wpVYlG;uH^ubYAbRcXJtsyRh=mYZ&OU<dE}S2hO-eDT<og?%8W(<ad0cog zXBU@nR&Xh2+pzC*oWw}cyB`wgZxps3$xKiGgS_+iU-|C*J_(%JPmT<kKc=nf{;3N? zx1Rg1;v3|$pHP>HT?YQCGxq`hv*A5fe89}ey6ZHrsgcth-?HK`-@=)$N!`Ci=iui* zqL?3vt+T+-`AdlDLOj}eod2K8m=xzz@xJB}17NVrh)&?_zs}I;+{Jd};CaZL8s}#C z?w6E@uvQ-A9JS8Je?_iHougg=oRYW~o)w5z#LmYj(Drk*mu#Mo_f~z$bq+BvD+(*l zR6i5r*q0*?m*S#q$L7`14{Pg7%I7-!VLY#>9`>>>^(aqw)O(g#G|B;-<cW=$w&wBt zOV*k_<_9<fP^Y%sopqiu>!f$o;oQHO1NdA=J8znGvO4NaWbBJ+XS$=#cC${ujyflD zj`{)W06Qy|5w^TJf4ym;Ek7%lZ6z|eV&qwSfT@v8@Wl)NNr=(BL}d6oSvx^uEi2A& zzubzFn^{M~#~^k_;ivpU_JVJECc4rzvES0{f}Cjfgq&y`?Qw=D{O^Uvj?zKw|MJX- zE|@NT%@Zog;9u`7rJhCe(j$GrFSDMnl$<D~(L{HsgxpY8E^=QE@nAzGnb`RzvzMuM zL|1oXGtI^ZXTIkuwd%SUL+oBwZfSIh>}TFk3386vC;QFOe|?0k#XO4NR?bAmad|gX z&a`A4$<)?5P)yydh~)aG4~V{HM6R>P3$A>DZ-{KQ$O~cgD)PxnrevS>2aL<-3zZ-T zGM=o+UhJ}`lrSHht7-wBKH&K+w8cBYw=}JHAgp~E)yt!f?gxkgrLjE2Hw1ssHz+?v zKl3}jMh}o4Sc@KL(*KwqSliPWAstY%=Qi;L>44~iKSU2~>S2u7!W!*_S#~5J{-H4r z1YSA#ln&50fQQ*O{HV|7-hZSA{^|l^h4Ef@T~K<itp~o@n||r%m%v8-d+h$0uNjA# zube&j-hoF=%%A46iR<66e(br>JY?mUMqh4eU!9N;D(UA+sB%8@QPw<djnU#ez==!0 zAZADmo+d1ZTCkL_L=*T?#CR0PWHPcq9{AV{%_#a^VI8q!qx$#tHlmldy6QyR*~^KH zRvo_Qq#um8nsW<xZ}`77=g*k3Kt|-ZjL|vg%PE^{oVm;T59fT_q@&OI&th}l@}V{7 z_u2gc<L_=c^cv-C-1p{uA#+}>b*wd=%KXH1M9Us?<QUn%enn?_zVCC>d3IQSP_o6i zBJ9IQFZnsPF>}vGHdG@@e0XS%_6Q|gY3{Nk70g>Bep8>Y=AF;~jxO#j+x2gvzUtQC zuOj=M<UE6Q{4IO=+B;JFYmt|lus>~u&Pb131Kv69A-4a^DD!)cd^X7ool1a~hkDd5 zO=At1@(?s$c9?yh9)6wUjUo$6R@+ZmG(X#(8*q6sbE7tje&${3i_v`PnxgMM=zV~7 zBATB-9ntO-)_~}G(a#c=`W&<#I<`^q-ts+JRr#kB7qH%<-rMhZDJg$l)(|h}ncyL< z)KQEi^(S7S*hzamNq+T>&qV|~iP`pLCnYRB;7Qvc+Pt0oc}2nn=D<grqX%8Rvj%u2 z<Ps;Cd#${8tVx?~n^XpKm+pVaF!z$p{EF1UhZ8tW7*bWtd&C~=jqK8UF1~54#Eni$ z9$z)zY!90Wc9WB+`!%_AzH}w8zm~q|=Mw8Dju^L#uYNw?m0VTqN{lp~qVLDX%NT0$ zx$-9WuQF~gXp?{NXir+}6wZ-GiO01U{MQ*U`IPwhSNTKsp!CjwnRmTwxZE3DynES7 zW6^%`5qmC%!hZ|kzh{<4w|g@E<N~ALq$=snnp@w{9G>U!oF8irU%iIijHL`53w<1j z=7MuUVh30D3^a^(xypUWa7MDLsu3B}%igL_{s6?SXG{$%nd3wGhB$p7#aHUV;T-UK z4{Ov*UR2o_C69P%Lvm^*<7hnCK71N>#d6sdV{(k_ezLh0G4`3@)#2@o)*MQwNh8;Y z+WgxG$VAxtZ>9~Ee@!1X`{@Jwe!1-X-E@qOK4SL%Aol*7V)p(Z_I{f`s*i)@Xi%F+ z`u5cy$dAv%UUpCHzwy|Cz1V|&=r0NEWhdg(=k7Q=djR|<W;J>du^>(|qIq}T5Pke4 z<?(W{2F=*k4VC!7@28G<Pz!Jmjvx<#=ItlQ9Q^0^<{!Q@jWT(iqT)q*md8DBGO;s9 zAp8Bqh<;07Gk$kY^tQFK<!40P(6S2VR=RF*i#yD9%B`kOn;w}X7_!!i&`E>k!M66n zMszni(mv$K?6|;=+JV^kXivEgyx4cV<O)21zN&l&EhmtpO7C9hUAi9`EQ@s@`*s0q z<SFLlTYU9)_jQH0aE3VEv|;v#ep<BiGk7045F+h;T?J8(t6(p7(B0R$>VQSTMdurf zG}b^IzL3nJ+B#AjdQb1`-J;mLr5>X`k8!jNbQSOmZ#|74w$hdp0u5?Qw%rqW-b}0( zjmfgBW|?wI`)Ry`O*P81k1n#t(wt?i)_2!p%QS6nuB<BC<|cS=MK;;bp1a^KnB>2A z`NC#kEZ8LU4m7x?V9N!s^2-t;z%yD2oo~T6z=uq)TwD%YU5CwMH~dXwXofF0Fi*lE z;Y_KyPu(|i622Fm`H?ffpC9>icipY%t3mcon7eTBQt1XBGnQaJWzncIa7^QEy0&)d zIAl)to}-gvbaXIuZ!og?cIN2>a3%u%r~*gXZ)lk5N+_=dN4LSV#?L>eEfM@I2R{dc zi|5h?cFt%Szwum`aW5bI97lh`(Z7R>xAMPr0DO+Pde{LY*b!2s$7t_m>q!BV_my&P z=r-=tzkeumHF(0%s&d9O4;oGUwcb_G=<V=`p~>e~{hs^fuEvi}q+U66`fY62_y(*O zuXz?4Jrvyh3L0JRN{STYLy$H+ajBp2{2d=ZAK!242Jo(N*ZG#R&KkV)FMSvOTkawz z*=^=p$%wr4cYGHQJ!c*ERpFNGITlW(N3@>qj#_e05p!tIUE@thpS$2xbKc$wPQ|w1 zfJ+Uwfm4Bt?5BhI-mMb{SLO5l#lQs~(7O`2$gbMjtGMbK?&kv+@i^t(o0q$8$vk+M z;4&S!Y(FtjXYsCtNZL@}(hxihe%+-%>G;;(&3I^AF*I%*w5|x6cMf}AXR`<5G55Sg zYrdFht(V4sw%3=z3q(V$I5vGFe}!&4zk_o#lM-HTU##`B|NOCaT01}G9of*e)+GPb z()a(S9sON=nY`HZ-{GJAixILb4ln}IW#qPdZiVuA1)_zC#6#!$NI9z6XOjHn-xrgg zBwN*h*ESD%2N^@YN#_$MbU)uG9~tE=BTk0DoxCVboM93_mwc9ojFXOhCjDDBJjr2y z1lKHF$%qUGS5#N^4kd?4pOqZu<2^5W&M@#=@{qm8?RN!dZ*HkYqgby_nJE!G8IO;- z*5a?hE&DsRFsI0`{<FWca_HQwTn+YEkVE|7RbjGsqQ-6ZhwR}?4wV>XTSr|y<MuY= zp3M`Dvbs@Q-)~?1BJj5GK{@9#B9eW;wT<3ufQ^gu?#cMxvljE<MN{ELniJI@zXHE% zbZV#k;$^J1{89&<Fy$Bg&b!MmLs-AIJYw#7n(|0m<d?vg93THH^2p94$qH$aReYo5 zky}i8BrS3-ec+Gyzao!(80({dY~5=;CnB?o9%|i7W)(fux=%_jHf7fC^zbd#eJ|j9 z;>=!4uPwi}t-I_x&6N;&FDAS0F>@lMM`rL%mK@X>OQUV_7}mZcSGVTyuGk#zY#*lB zg~Fec@#m=^KL0nA<y)>jkul)J>u#fujbDd4H=!KhkN*;WaM2&3H!fr;&PdF$&PS|? z?|MFB8)qPNR)A!_tM_0RsZAJfo{gA;|Mp(SrSlQ*zisLE>)nSxAK`YKkEmhq?v&N| z0LI#C!5_2{y<#2j=PRc?d|~TpmF#8t#;g5&_@QLBMrc(TW63A)^f2y`|EOHP2d4bA z`Rr)^guJTZ_{rnQ4V^G`N!EeazX@N9Am;*V;p@(G5Klhd{Tzg48@Fxf=GviMowrHh ze9IQT*UrOb+uP;Ss`};s?mUN(%e<&R(X1f;pY}O~AT&Ytu}17I8lUzN?qUp1xK8Dn z&e7P<PwW1Ccl)IGc<vkv@qM9{UVky)sa%aZ570;*#q)|{r#w{NO*`h4ay0Mp^f2kH z;--nFDrd53Z)c8o6O(Et`}=d4pIP8S%ZY)7Mc_y~dAhvpTiUW~ODs><>By~rgXhmB zKh8LEMYeb{{1=fgveIV^t0ITwbmWx5=s)wwmHP-ZbUf!1w=wVRJC@If&Y117_d9?+ z()uK0q*;c3fdd7VNt|DGtc6<6H;20A3mVFp%y?DDSTfEs+7#{3xr>>GYX@^M=_zx4 z2tRb5DLwMxFR+my1KMZPN~v=Z^?%c?-llH#*3eh?+?>(Ao$<>=vmZ@VOg6*62%B{G z_C5hNqoMoGzANC_@(GfS#A{Trj;(R>-3{R2;tFB`ZDx)>JKwUGSbI^*-Iw9NZz(eG z`S@ZhcOUkRz0?DKtF)JH!gEjj2<hvgj{L$Y5#<|t<k7C*U$M7+Qabgp88mcxe^)&G zB@MV``_Vg7B-eTG@+~#%U|X>GNm^t;S+MQJI{YobLCKuX@2erN*JuCTzF4++?IkJ> z&*9G4**+&~A0EF-b`Zg(2pJ-O_&L!e`1w9;ESc=94+cLjMZS@J#2=rq)b>v?VSuc2 z0s8xR)_;#QW5i(2nUpWX&ks95QbkQkna@|1sBfXp;PS+!Ly%9AwZY4T4L;=5;=7WT zj&>!Mk3V?TyXC*UYhe}k|LO1r$;`#%l#O7oz&@~k8uDJqji0i76Buj$&wNY$$OE$H zG_rQI|8K&q%(oOdqCx)aT2uNy)Bgw?DSW!YrguTTW6J{AUuAohZ}Vu{(>a35Ug$T- zA}Sl;dFZ#6{Z#+bV;605)%k((X!=o(9lI?rV-g+0j(*s_Tfl9*zV9e~mvVmRP<{V| zzP;F*<QJ_y2c56!XcPOtfjlL9-#rr?@)z=&_BOP3R{xE22r+#?ek=|hu2UZ{?S{fh z?Ny{i@b!t_u~$4bJ#yB2wv3k=dF6UXA6;y1X@|76r7>RNQeHP(TgUh(luG`4$H!ie z)`Gt8ui(4>W4s62!r4sZijH?qvFu8`7nYxp?-a|9<Q46S*&}n|XNTh1c6=>cej9`Q zCOKN)`47&nh0u?E;MX|Dkj(mOpA-MifnQ~!M`WQ-^n=g$htL0rcw_NldtJ#ED|%l$ zr(YX<boI^bC(b2TMlE}YiO}Xs@?*?IM_2|u^rGM8p}$s=M?*3B;l+{#qiN(UNa6dN z2O8l${MwPF<@@r?Z0+-L&X==K&)gTwSv6ZZ4il=z)7E_2sYp()T5+FbX>{5+?0KB? z7rkiWTTGe)@AVE1EVR$R>Au#L)_dMXzNOknvvFk|xPlBZ%!Ktt$nnqy;nz>>dDp!z z^UfJXY^>d7`_mYU<aW)G#%0Z=nU~ANFWIipaZLP5qR(CM^g3jeV9f5<1YF8ucE2*_ z@^E&)WS(i>>*ie6COwREsm%Ko%+uw}*JaGxrF92>bqP5r<72vC-^l8JnKr)qoxs<Y zxmS1C0HNiK2c2(t1RLNAhYfH!^72z1Ho(5EwJ~4AT6_&(i1`}Un!bkOHKzT@pPC?< zJ0<cic~&KJuQp}wl*o1T5x@o*!3Ma(VFQ#6X}PnHm<_NN8{i8u8(^(z1MKYMfY0jV zW%c2JMI-qwFMqLp@rt<tGhdDL;}v5Zd9>~gMBgHAx%R-HVgLKVS%K&R_P^iG?GYA! zUB|p7Lfbd+A7Jmh3S6s6PBdeQYfdMCYdOTe*7^Gt3uApiPgl(D(V4G`K9Uu8LUoon z>Tou3I&|O6=S7__+50|Ub(TBoxE=YIG*<S$i&ZD$s58cq=ZHEh+50Y3o#!2OX2*5r z6Qa(1_P$S7otGVT*2Z<_@uAK&$QdV52bitu%$H=H`?cftuD_A9XT)+IM7{Rcp%WX7 z=>A`2fph)9z5d`JbYjMF=mhcMOF17}#N5bcFcZ13c1~7X3-htp6Nk)b#VQrA#l~pT z648a*pd*9vz3}x-tQx{v^5b`rXZl@`w~}}B<G_1piTp0GBbsz113IF(81e~He|2*% zHP=NBJnE?!eQATfe8JeaGVbI#@~uc{oki@H<om6BIZ1Pvclw-8`|_<wy5ES(26YYd z-Hb1Ta*0NP3*%o^FautTT=W`pY%RR54*4}7+iQM#+3D`v%i6ZOQ>#W}pKU_Fg%)%? zOXr#F49dM)1kbheq~nKTUd#4S3%!(l+=MTzEf<d<4rg`jUMMCj=Wy@eZlhef&ui|K zR((4(c2YanpNKwqzE20Gi=2<HF1^U6Iikxp%~{8LqB+fn+G~Ku$VbTu-&v*%dnI#z z1#^EnIB*%dC$wh+zU=rNfgc_8XOVniSQFAW|C4;x#jez<G5ERKFs($#kzXk~U26+4 z%)|H5E892rGcSI#3HZt;CKD?Tzu6+}jN4ol3yV$(E!pPDi{KkuH4?u^dk%c)L)G}> z3U*`3r`gW6hyOoy$nNHzROCljaZcTVSEcx&Cxcfu9W~EXn6``5$O!0Bj$3DcyTYmw zxp)MzG{ke4d(9loe$8p6d9FMq@&xxZpUWsKH^W@$aFv4&&xS_ManQ)nVa{dP@`HHI z3$!Ww!1d(96&-%qq{FF^WX9NnUZF9DcC-(l3q7uK(BnDOo9*oDFlR8FeLWiM>q7dH zU;aMUiw$SxQdi8Dq$=_Cgw{3SS<+Z5Z^tp`leYuf)9W)UZ$~oaRnX+;<dXzV)|y<v zIvfH`Ze(3<=eiWST!T-N)~9lIG!uV5L+w9gwZEA)ku;}=ncKl`|3q-_7}Z(osPlRc zGe3e|rx)wIr|LZAsN=D64A^z>pEC2|C9QDO8EeJqx9hwOJ=VF*q!%1@=ERw??(I6y zLXTTj=MRoLug973;q5vPK##Xj=lk?nc$umF5dYOTFvovyum6C0M~XSp{(8@MOh3G1 z&<)w2-vu4J@eXp7@L%i6YLlL)bC_+Op8oyl?a`Poru2HnE~-Y4_d=6GJ*<7E;I!cB z-{M;_5?wnTd!Oc7^s9*T*Kr=%khFJ`f*hO4SfyiTM{Yv)F+9eI1MC+cAn!DK+3Lyd zqjxk(SBbqZy`{ySRi!meZ2k?{apr2TD@g8UbDcwH)%R5Lc^_ckbuzxuuM<1Yi*NMT ztjWFf`D0?JiG~KrTVvDEJ9vj+DXYKYd%xuOHg<uPvWvy~ZQ<J@C1;p+hXbB0Kly*( zsU$Yniaz}hX?I94?MeydLy6Bdb;g}-7pEE{o6sYJsmAbH{xvq`{;hS`80yfM-|Ayb zdOypk|5o$^Iwl_$ujn?kCj{Q=J$=h7vF9dFAh%VBIG;9+^Fia-V>D@89{S@}Xj~pN zZYwlyE9+K%>S`z3mEJmr@kuWJcdWf?Vv`ZaXi})HE8Wyw9;w}YR}YhR{vF%ncGk$~ zzQ7$?rE$;U|DU<W>c^h4bb!3RU3Gvdxtcp-uny?d0V=qb4&YW^T;@x#(6=@X>H4je z(B|&v1X_RKIRThWJ+W?N$6nxl&eV@hn0%6NOd9`?abAQR;N$9NHx!XWx6-e<CJ!Dt zm(L}qmmm4C96Qr3$RKmEGojO9XG$y|I+Xdo?2fjJhsNf6s5Re=`+IYJ&0uRjFHAJ* zX9E9b<^&k`k?p7@Ub=$O@a%rp1bJO$m-%E)pzgCd+KV*(kNmldvB}IK1^{+jaCQN> zyFfTA+?8!Ycz4_O_!06Yti>)<3x3&qrYA2l7CnNk4P8Gak$zM+qqVCIhPA&SUw$3f zFGAS$WrLCZMRuACo;DT<hknjn+jg6ubMIWrzHVnH#B&FOkE7wa<6=B_+$ojt-28}a z4sO=4+R|EGOaAQy#-#H&HjaA89X76gbU6R5n-+>sW6x{dGAFd8$fPxQzMGv%ETpaN zlX`T}73E#fesF5!ec&fp>}4;s-JRLG<<!TPSmRBJ?EVe5sc&b#J32nFqaNE=6E?{_ zts`j2t<Zz^2Z+BwPBjxhun9#y>3%P3H$<##^Iq~f(HF>*+=hKXdYzSn{Pve*W3E6R z@?qEad5nfKY$<jg#O~{T0`*$3CyM6Qc`Uiq#d^>fp~>Mx>FbI~q`fcZvtzuXdD-)c zS%A!A)7CWnDkaZKz8>xIwT2{9I%J_kt)0Pqe-dX}fGP6PI$}#D#?G{G9cT2Z<Qxck zaiU-|4qfW!%#{;QwErX9frTe2kqy9@81w1MGd`95cN_oY%k%a_*e^VRh8KA*{M0ky zsPx1T&tB$!Gx#3mN0jA7JK$kM=7=Bq%v^^<kj>9UM;?r9?uVv0b^pQ0=hDZd`#1gz zJ_$Z`@*Uxac#ixSQX_XwHRY|ylMYzbN=E))fED~HQS(xUZSryWU<-Jjy37b458Tq( z%QW#QmeUgYR=yTmZ{u5=jc>g=@og7*1tTS=ah4PQQ9p*9g15bNG4x0{M=W6z=OTUj zT6r#G`QepUushyO-`+mSggN7Ritl~}yZXa1`rO?{QwWZVJ~y#XW7}v-x%NVzl~-~c z-=cF?!^xqgHLG=K+rKrZtVRFhZLxJo-lVfMuhg??o#@<6?6Ws+Xs6AuHi_1W)_cL% z(0{<!2Ef<Uu801=z0#V~QgYJd(VxCmV^cn+1kpQabjG`#^iIB@wvG8XaQ0#93HSE! zoAPT57j3(f?5aQ1?$k`~YWWIkTpQ36;uwoLzXj(+r_8twTH2XE&fI&`_*pBp%%Kau z!ybZPxi+qcUTdwuo70C7vtw|TrKc<Z&|qX`?J1skK6$|JMc>DlR`!b_l%4hqt&tyM zzX*^+X&C)z&2Ww@-L{puuswITl?)}H-v1L@Ni(^<UZRh|$mTP(u32}3k<HI_eh>2U zu^LCro}xP4?J2UANQW4V{H}J`Yn(I~`CaxD8!ob6e8;>V$>t(k$s+Z21e=R&CG6K8 z(dMGQW~<H-Y%UYPjoVe{2sW3Yd)n)NO&w(X1+uvakF>ub8Njx=T*G;i8rId-)EkU$ zqVJcEGZ@`up7Z<3Klm!&KX`?s4!X%Rjye;_aaXT8=q4TQpqsqus56u~dq#E8O*-nJ zn{0Q~Nhbf}Q`E7?ZJm$kACvd3{92=R4rWz*{bSS{Z4jr5ctYZ9dBh(w>e1)8Zh;nL zbJokWrN-hv+p(X^_(tjGswZ7FpZD_Wq4#zh4gbS78mu<jCclE+qI;Y_D<_ovZY5XD z!9EhgcC53y@cXc2lOk|0zncs+Sm#6wjO)uc8|Te+Rg|=zR+TmSOyl}|&XghxWl2UV zhTrPkrFYP}C5dsyX}7V4v-^3juZ(?XHZ)4P2)r%#m+0KLYy|_zPh26}bo2eh4fR$% zR*v8Qlar%mOYGPiX|2I0t=JpEM<z#aSz^WB2tJY>eGfa1cz5u-yBqSKCN3ztTCLlN zPU8M;-~9xBoF19>wrmnQd)9gOyKVBHO!?yj&P-}QIgj{3UauFQy2&BzS-tVC3q?5p zY0g8}XXiPedGF>-reY0oZq!FSTiYj3?{)Vdz}FK0OCtha4~yB&{rkxyq&eXG>vsa1 zU9qxz*}M{uSNj*%{^qUNf*bq8uZSB{$oFdR99TAhSNcVbPmSvg#*usHN#WVdjbeaS ze01NLjlk0AXN+95H#u6pKRG%Y-*7Ky($q%9{(cP=z&rOhCxxdHD^~ARyL&~ed-jSx z&wGJPa%JOBIg2_2z|n#HG@j3jh=Z2FGkhcUY)tIF-v5|q73n;~cU{jEk5upI+246q zlV*(6+HkQp<b$jJXU{gm|HNOZ5&hD92F~j|p!sZz5oV4THK!OOix{)9`KJekC+7Hh zH}`KRg_qmkdw2H1dl+{`U;6*jdVf(N-%E^_j`}@L4sVIozn!|orXHyn6zXdLHvO93 zl>3Lw@^O@NR6f%xgW!v5j-`C1RZi1$m1kPzK6-u(<r=HpOV3qaW0e#1ycgvyR=KC1 ztL&*X`*Z7gGUXpz<?opD9F>1+mG@JwVb9-twN>7$=PKWCm3QiSBIR1E{FR=oyv{0b z)AIz%+pO~EdaiQfT(kczdhVk<#43NR=PFOI${*>um-4k%`F%ZC`9Z6^Nzdacud>Q- z>$%GBSmpJ4-jni9tNezZtK7HB?0>DE6Eng)%qqXC=PF-dm0#9#<!`^mD!-`bDlfLm z&+EB|@=I3vSv^<zL#vGPzo^DddB0Wut)8pgZ=Tuzay=(@op+Q~ep1g>zQQUm(R1SW zdhfQ%k5Dcp9^1nm>p3lQ12)3ll9iB&Y#$igUj{p+U-qI$?mjj8q<poAnPSJtk$iPE z_}HYgRc5@DH1tYr%*0EnKZuXaVd4gEeIoWwgQfcyLHG2I>LdGi#MPTc>?y6oe^LKK z=-nY`QzVzZ<sY7V`pMy`u{9z(^ZRM6O?>CXr@DXBYs6lzUhXP**m2JIkuBCa<7Id2 zoN-#@WWLKfXPg!p$MwVJIb-={at^G^H#=j*Sm%t7>%n?<oHGtClf5_F|IiH{V%~Q3 zN7{AFacu`@;IB2-(0RL6zLo6BI?h$I&;ML0d`Ncnsr>t?GY(!0&m|WE=Qp}NH`{=Z zp2qVVx5OzPj!DZ-4KVM0jIaxud6KRNa7~P?A-X=5>z?>F=o($o1r0x5*CkvhKtpvs zmFuL6k{n&%z;&;Rk{{`MHrK~ilw|38A=lU{(sjL@Yxq=(uAk?+e?>`eU9aUjr=p~n zt~YU=hfkiaw{U$j{u;X8$@S?KC2_jOkJuHcC~@n$57$4hC}|(f^#HC*D@qRN`c$re zT~QL%^%$-zI0vKa60U2Asj2I!TsKyfe530dxNhbwtFC8rZOkg!rt5`V2WFLgq3h*b zSIjE;RM*dQ-8idcv#!^w{_K*Eb-hV-W|#a!*IRUdPRR$l-l_X@O5W2oc;hneDcPj! zKDxfQ<gdCOpzHfe-sJk=jKaFoQ&qmdWLNx{w!e%vQg;9BRrc#Fyb#Zk+>;Bh6mB06 zZtETKn<?-Bt=ru{TU(nm4SOBDR`q_QYj}>Xv$*ck9{kbSUh8=H4SdmV>kG<e`|v~i zp4#6$o;Kl2YBP>Db2*Q3otJy?X5BNo_XCfTKZ2e~hIj-zBR(Y_t989lW%!g`en4gT zlwD>+Xp#7oUA|9c_>^6qtulPdF5j&(e9A80p)!2RF5jXue9A71?uk#?<!e=jPub;Z zD#NGj^3^KCr|j}nmElu%`AU`HQ+D}MmElu%`9hW9Q+634Y?1hsU7n~ie9A5ts|=sA z%V(<$pR&tiREAI4<)5ewpR&tms0^R7%Og~VPub;DRfbR5<r7tgPuXR^%J3<>e7wr= zDZ4yCW%!g`K2Bx$lwHnJ89rr~(^Q5}+2uYe!>8<WFUls~!K>_h!aFZG)&+mN;gw76 zIPmHq`CFZMg_BYjyxOC(1Fv?d?7*upRd(RjR+Sxi^_j{JyxOd?1F!z2vIDO^RM~-7 z@2Tv-t9Mj(;MH3yJMe0q$_~8xv&s&<Lg(&+SASI5fmaPGJMgMrWd~k8r?LaDR;uj4 zt2&h(c=fc(4!l~XvIDQ4P}zZ3kE!gytA|x~;8j><2VTur*@0J;Dm(D%UX>kqHA`g& zUfrd#1FvpZ*@06xs|?MuaqNqrvB*A4dZwO1<Lqb6JhSx)(X3^~lw&%C%1@{q(;HNN zOl9b<{r*P15B;{wZ>kL4w#!YFO`L~r+xLX?YlB^J9zDHbG<uWw1LE34Q$<g+U9Q$; z$ONIgCx?)mdWY`zh3e4%g70gODW!Fdp7oJ_*Hu51-X%S&41eYzvD8O%Zc#Be(IJQV z3Jat1JzOJy$LZ*a`L5_(_O84`UFNwY-!*eeDk+c0{#!n@Fq+7Hon1<zUXb5-u7li5 z%rC?(1AkLuoKQEKFv}NCByMLCwzY&)T+xK#)G2U9lg@MzD+B*wwTG=Uk^Ay5Nutg) z-qW*b*pd@YDKz~)r1#ix>Y=%Etb0Rc2R}K0Uh3p0Z7Ms~{ce>V>;4;+9qWF(%8qr< zo@JMH|EbE3bx-WSF6;iEDm&Kw2P!+({ohn}tow~BJJ$W1Dm&Ia`y^e~{hw5J@RLTB z9qaxTl^yH;C6yiPeznSub-zkw$GU$;WyiX&RoSubf1|Qv-J>UUS@(~t>{$1Ys_a<z zi&S>3`vodH*8Mz{9qT@%vSZ!fqq1Y&SE%e*_jju7SogQ7>{$1$zv^7~*mJwA`%ii1 zT=%~jN7=FNYgBfu`^Qywtoy&|eaE`rsIp_-ze)K}>;8>jAx~k`Qm(2N)?jvyu_I4= zBHD9{D~fo1{Ce}t<yXjW7C-D?TjP<P@wJ)DKXE_CW6v5tntT_{r-e-0p10e+@}c&i z66xb;dr<gaCdy7^u<xM%qviUva}f|n3OkY&M@r`qicZ%#P0N;tf9;O9<i|8ndCsyU z?*N+)osxUDPHEoz7yhHr1T!WldhKH5>&?`W&1^mIVOP37fpcONW32msj@{e$yXq`# zPuIg+C*78!GgW;fp|gxd`RI?@^At_YpK@~8#s6I7)4}*RkLmr>l~<((R(fZV_r;aD z<6PtFC$Hi6Aiq`oibrKkEa&$OzU1@yf0o}dcOSEK+Fc1tXS;s&<c7PFmwv+km>*?K zyoz7op5}>DINRmoJU|ZD#!EX88Lxga?%>7m_B7&J>+sPy!H~Wt-{;K8&Ct&fIbk%` zD;TR{gv6VBt=4mA%qqnSS!?JVVJuHU?5souz82rDkS(#V&P+$g(+>ZUX7o+L?GE6x z6d0OiPrt~$lpjZzvC91;4^SpnqE(+grE4i$&$1#jx!z1W&AfMm?xTlHW(*ax-Qmdt zJv**~|G58xp8((CC1zJYSJnnsQdZT}U3a~EBRc)$L}SM%;!(Z&RgL4GG0PnukZZgH z4)~YyeC|Yib~zhj&yU}e(5gAwcG}z}<~&{Nfj9l%n=9~dqmTLW5?^Kzb2i$;oSppG zlFEsOaI#>tM|{_~p(VXK>v*2!%g5T4;cvkXVb7a$&MpL2wK}63o2xa%?*Nan`JRLC z-r3l|e~RzkS@`aa<;>_9&Wu`dQf+)sjKLf}yGVO`M~c<aH{d2$juCMgR^Ee_c+R|E zja@@?4Q^=f*F}4wGx&x$W5)_;k9^d=q0W(L5BBos$~i0R!)Bl}#D4yH|GB}g#0{ad z*<1hY!A-@vRZCj)7i_9qShurv`>Q+a?)`Y@hU@;iGe4nq=ZcCgJH4Etbq!j-1R1Aw zxtY7#2#1K*_E-95PtqLUM_e}vR`G)I;bVM^1=o)j_iP;guX*hIE3R2lO<@84iNgh( zJl0;oaD}6up2YTui+t<p;ctUx$-bnR17D;QJEO)`(EK}B-Rvf<snke+^!H}Y@n;gJ zO#VFbuTyLz@5P06`%W#YlJCP>@L<d9`VPfo(Ed?|xd(pcIlPPQ>&5TdS2y*+@4m)W zXC$)+(#MF(AM@5;#KQQ=<<Y&I(G-mo|M9_7YQgz!;F#vH7FkSvDyNiUezdU0i(LKu z#7k)KGLEkO_KRer+b<!e$cyYde+!*?A0LSzds{P!vC*HnQH*C&urFg_9Kl%3o*7@e z>LknQ`*dD;A2F~V^JG?y7>nQTNt}7{B0n-7{G0rt1Mwr(p0nRL{6{{4M#$H!{#<My z-^P#pgn4w<Q7oss&Nk`(My)Tcr<VQkBU;k&pV=QjvKHUib*#VjIxD90G;dkw6OR_p zx2Y|D#1__NC+xE$|IK<C!Cs<rRGM*87?)reykTq<S-DGWP~G$9nld!;w9-4`7nt{v zXU+P=@wfL-ivD`jyLDqaVz+jjX=!GSKMc-b%NwTkmi$ZSS$1N05J$N}um{J*&x4!X zbz7N-nb;|XtN%&dp4ky{IUdumt!*+f9xif2OURWqgWU8USNMGJTX7;Reh>$K_werq z$M=Ba@+q9o7&Gw&w8jap{=y&*_t;n*?)28$*f>SEG|pOLHVU@&V><bhwdbf9z&eZ9 zv0oa=;;dUAV6L`hm(}&r+U{ZUyRL10a?s*u%l@MO!^dZI+6<&K6%JGG3wOlD-eDd- z6qVXPJot-uCx^$iZe3iRwe5uJEPRp6wdaXl_4*C`XAio1C-(M9vioiSiLoe`G3OJ9 zZ!UhX+H-WgH(u||C7*!gI@OomT)N!HKQU5-yI$;N`9n?I?l=!tOCQP=?IRup{upMw zlKf-QcaU-XNk%l6bts>c+}_xHlknX@CkkE}h~7jj2oq1PG_WZbGRJGpy}^SsUSpn3 zTk-;A!~%HFFvkClQR)j9Fqf7+e2rPxf?emexo=3UY{OS*?&aznU;7sZJm~wm^7gSW z`00&C__Kk;eEzPYBp$o6WH<R2wE<(v8130c!vxlvfe)DU)nKo}wyo_?Z1UYyvr~Ip z)1D2~<^3}7LXf>r?I#u;G{PyL-Lddtcfy8{J11P`O5G6bNz4{k&N~(EJD<;8G%!3S zw&xd2Frte&%ZN@9U9In8PvcL_rxiyc1H4Q3D<|k#E^BXNAoJFe9cV}=-goG3Gk;XH zdW)+dbg8#a`SX5%CbH82&N_|B-Z2XvlqZ<g5o2-htTUTvH?-5Evr-GZ$z#kMN1Iuz zk_Wt}Sn~;eHff;fq~w(z*pw}r(|JxKwAT~;Gvm?y?*3&_Xr_H0OfkEUl<TEs6m;jy zI~Nka=K1x+=k$Jg=cdcxY58Zm%`<4ii(>6(<KI_?f4qGCv`@YZp0=Cw9q=^2VpwUt zixy-HkJI5B<gz`i4<UQzrVnBJtc6Y=LiNEqY7kv6W-ps_z`bYw$a8)EpvtUqmz1?7 z9C)pd`uEnAFO;9ZS#JWgo3i-0VsWGr19~*FU29*Xpw(j(hz>4hE$m087mfI&FF9kt zX?KqrV5<Cpsgc)+o5va4$Zb80`fI84Px^m+19^?`)!F^0_QjG}ONrxB^kCrhTKs*B zjtP)I)ZAa{jW2XTnVG{XD{?lzoO$qvPwC?``tX>2C>BY!NxMGxWca0P=zaAEY|Z^C zwK0WpDu&^3+IR`SEE_+XIIFAmo)2#1!>h&-D^NM5TRPmTZQlB<}L*YLNh!}m<z zkOo~scDjBI^yb4clFKq9UTB=?Wg|SL0zam89XS!wTbquzct+E47A<c2*i+ZErLdrd zJd<`mBj`u%T=)iicd`ChJCkbh^X6Mj-{nNkX9V$+n2Dc+7e9$g_KaH48)q`-<@iY? z;3JWLN>!HpBv#<#OT4bR<M5Tp!&f528cdyO)CE~1KH~VV7~_5?0sNcBnoa<xeZP0r zjT?0JLf<u)ztGbDM2YvNnj4T4B3^vWgW%>?e!_!X#-uSHDsH(mN5J2aDOryV(qFfM zAH~>=uv@*ep0oVuyE`NojYd}5%D>iR9=RL^)6MuyHnH9>+Sk513!L0+(u(xRSMcE$ zuD2q8t=|i;clq#b&#EfAps1>e^|}u_|KG&u5?q6O+J~=1F7q0*Hv!9?W4LZ%kLxx5 z8<}rA|J2CGES)I_UmPQIcFD8gM>A*k+1FiN%eaM`=%NMQ9HYSaF@DAAvqd}oS1v(! z0)IDyQ<s6m(Cy*Qb}yw}6Q8##|5TQ6)c@N@tiFmE!#%a;I~$GL^20MOH5R?NI94X- z)UA}Cr!0LvioDu3xd*aA9DJiEvH^LdUU!*iNKHP)oaD1ccR?#88?-<hMsrTA2E0U0 z@Y{3)np^)N-(A4E%VFGUjM<DqrI?@O$qF5V|0H|5TVuK7@a5;)7}f+#Su6FxAjPJU z|C|Jj;-!~cG0O^>mr_UhPRiKVI-cJ`*<Nqhh0J`bvH_F;54G=weptRA(v@}ILHP($ z&9&T!d?R@~k2<zbvYzAbK^=TWV)ZJ>Bhi#~^m+k!k8~6Dd8GCP3!8oj78d<Tj|^hW zUe2$Jx6sBg_{u7quY^oGWx-@T-|Tg+8RBnr*A=7v;3F|T5!_j2<K#l(0hn_`Y!Tt4 zuNzL9eBcl`*#ORIZWWK{Me^9?SUB0yZawX4uA9CQPD)P8_Ph8VwFe(f|M&0AcUB1} z-@n9I)Z4<zRmy|5iE=XK?)&<AUfH;<bBKH4Q<5D*z}5#{G0*Su-QejeGrk%=<BF>$ zxCSTYRT+iu@a$2>qG{X4q8p`D)w$C)lz%lgnsjh<cqqSl=prlm5zlEuu`8vjirBl; zwt1rEUwM=#<eOsVpcow8K>Ov;?{erja^UdE&~NDQq-oHBV&+JEt=yy+302~2CVzvc ziGBp39YJVE_q~2QuLLyndwyzjp$kL0<vI8vIS$qWW5s4_<o|ncBnPj~77jd1a<2;+ z--!I;;y~S0=%;j7t&0Pk{gIq>A@sBRc%e^WgZCvD+Sjgw)n-|`oQw6Vcq9jiCAa@q zZsbG{G^GbLB@UVbU76zw2a#3MW3tKtosmRVsr0yYjwn1V;0lYUj*$N_WBC+)R(f_p zzzBbYOg4_3&EkFHUu&iqiz3ILRaZ2YbDO+7*_F{+7vq0*@IT$F+v2IynWPqMUN(Ld zokO279LavdbA1s!xDGt|FYu)AGS7LkUB!bhjhxB2v8Qp$jDbe@D&)UfWWg-HUE{hR zx~4M?PxUBz={M}{+q`v)>BpW%PLcG;L+B)?os9bZ9OHh7ad+l1%4q#w9wWy5viJgg zEgxE%gUne39G2}}bLPuuqu=iw8_mI1Sc4pj{38BTRf{ZY(+k#xWQ|p}tbu+oT>MV7 zr50R;ehjM>o&uj*aPrczMp$~U^xZ7}HRhrd0(Hgk>Q-d)L}c?IGI^^feZvq}|EgW+ z>5Jj_#jdmsTim`4Tit2I&2(?D@Fz?1J~R^_nH5G|5w?e9bUb2(t}fD8(XE2W0fONO zU?}>~J+{jJUyZ@GRagfenP*b%Tp(V~fO@-~Aw87MdOY)cG@CUrOVr$tBUj{H)=B&8 z#yh)^KkOJu=Oa5y?>1#jd|BkfDBB8g7Z%s>EiHVD51Ql~8hF9yMQ)+))|ee?8F1FR z*(Le?b;JCI<-a|={yiL=mu@8Z+48vJok(XAU!4b>Y<aH-xYH@`Wqx1Y`y6=>I;Xb4 zhiDUYt_65EfiuvUXcO};_#Y~N!KT;S({ov8;Ae{df9v?Sb;z~gm0}}_rr*Vx7WnlJ zA2d?7o*bWxe!bW#E&b8y$CL{m=T_u~-JGeC52fh$c)kT##{97g>*VJmx*|PU@eO}= zhUC<2zwGd(vmYE)I{N{B^ZCu=SH*8Gze;`~e)o@(&TF?Zm9gK#?^b?JonLx-3-XHk zAF$)Dcbn*6?^*WFY`=77*Q2}x+&5=ieo_bFiy`PE`C`U<Qv*92ncqs}iLzegFPSyD zt!PlwlDt{j_-u9R-0fpM#JllA|9sHD1n3`eOlKsahkL?yOeTA-_b}Ir&0((JDMbb1 zZ*BB1ep!Uyp4JGlf@BvQQHmU%2fw%DNf_`A#eQjlS8VaP!k<8^gtHdk%`k0$`?=r3 z`xVdy=ex7eF%Hf+m$8j!jKz#~95lO#+*?*0LfOayv|Drxw3+YB!zPK{p<pY1e8jae zW2)MAXYmW2jZki<&ykm;A36JKBzMB*r->;+d(Qg?_xH2@4wd6eb7X*9Mx!sh0Xd@r zx@DEU;W4E#`G`H_gh7Eii~e`!Be!5l1~u#?dghy8)TPo^?bM=zAaL9TUM&G`=vUt~ z_9-NHf>Hm*x9y9uEl1ov7L<6fDF`RV!wVbxfY-+wt5c{ON-I>()71ucmjLpr_Ay?v z-x~m(h`o0v?^UF^qWI0OR{N^oNFOWhx?hGm>Yhs7W}V9|b%lNSk$LkB&f^#jO?nSo zPaZJy9%~e+oC4g5L0rG|5bfm94t}#{JNez(kuBT6UR0e$yR2a^xm_##&@c2j_cZqO zlLO|OuDsh*SABPny3q0Ny3xXW)xi(J{R>MU$*xO|>R$HB*#C2}|9b0Ru~<&A-`jL= ziTl_&rXHjFgSnsnX8U6EyL?6id$YsjV>*(tR=d;7XTgVi1JB9?{MIMrlLyNPGcQq} zFVMigaJbs-Yu&;ggzB;uCRO(gnE8W!)XQg{*P)Z?yrOTGRUSuKc|@J<sa_@L%f}xA zejDM}HOQ2j2hBlBY!1|4joqJepVHq);H>)jfdA8&AH_TpzM#`bQ(cN#neErS)lT&m z*f@O0TGK`eeeCq#w{yuIZO>HuqME}sod1;UBRx!X<6CIO%@0{RRx^77``KFv5Thc9 z{#pbsyok-}05E=ksyqBG=O=c-!#=>ie=+tquWTRKC3dl9|CT`BKgIr|uJ$SbTv)Lx zGM+NRYuShG2XB*Hw28j5jjUFexhEr=NI&$btVoEtE&Z;2__y%9%UB<`%^20zf_-~8 zFp<3QZ+Av(>g}V*AMAyvWysz->16sDWBQ_6wv~j7%)Q#@0?6`Bj1ifX{b2FM6rJfT zu=YtZl_TJKudA9fkk+1WBeIfgh3fYf`fa#lRNFdqxfXQHaoB>u_hBTI4coBGkHHIi z!aizd<O#|n$-yg_Jx*+K@f_f|p_=pFPZ0}(waDCNMjndQ6O3KNwh%3oZO^iq9(jMR z7TAPhZHxcz25-8{<~IGe_rxXl>bvqx9*`En=CHWY<X4uzuy}#(ldbbd(o=1D-O@P~ z&nP>hIzjq7QXJOKzI%daao|=D`u4zsEMBB}Xku?&aNs<i<Pnn>jpX}Bk}JFhJkx%Y zQR)u&14q54T^jsy8}-il^31q{b9S0<(;jQ`i5hV6Qee~W&T3`-O|suDg5TtS*FG#k zau)AR#wIilyvfP!x74n`4BI+=uZ}-95d9VZ&bGVrr0(`rJN9O?k^R2Lud%D0FQ5;a z+r!nDjN+7Sw7w<Z7>(X=?t<L#0N$01Bf8LMGJF@fi5>{X*;Bj)F?qvp%POpeZ*qBe z_+%r6>y5fT$8mi&*M~a))Q;X`=Up)AJ9M;mPXD(1;PvUWvj={$6<I)hRA)}T$UELM z@twHq&V{GDyc@E4mYrepg9YLTM&9@NK^gpDCj1}>KlsDLoM$=&KhQn#1M?q#VE)4o z^e?)<(tfWSKlmN@HLfDdHo72x^V!H|*^vasIT-!999?TW-yl6DvE1J_7d{}nv<KP~ zVNHuxiN3c`NA$|ebJ<SUnl{zc$XUR_HRI+s_K<h9aGp&(6np_kEZdLb@hbMp1JqM2 zGmRG>zj4uQujzB*tD8VR@j$e~v>jnnH<0sXSAFZIccGi+_xW-u53+PHd{WJO#4?L| z*{4x^-h6)S-I&+#9&bwQ8rqdTuSZ*UMRuF_z)$Kj!IKdENY*uGc~;{0u(IMl^2I(7 zSZUlFM=skQE3@m#R|}1T7IwrflZ~lv&gE^gmu`kfy@np+<S+L5u-Q*(4_tXWJji?O z`^`WfAL|O+zBuo*_aZ!zUGM$%=a}|X$@0g)*1lM{RRN6+$)<x{4LsiH?Ohm^j4>0Q z9QZ%%y?K08)%o~;XC{F=SwIY7*AS?h3?N0;sF(=|CV^mu_|du)(AEYbP}~(UK|}*V zWGF2yv<0+FW&lM=#JCYeTNXi(YHRyx4Ir9DK#(Pa;C$cDoqLm;>m<Nr^7-`p{r zubI1@^E}Ua_Vb*3pmzb^@IW`^imjfC{g!>Ls~roLt9(OF#a@b@l$sd@*h?f<(sR60 zQ)remW4@z8)<{*2_V4S>9$~CM*Z7-nzDS;G;u3sea;M0hmOGVup>n6~;jaq(cFAeP zfrk>zQ`NT{%Q{}csmL#RD#@oMxo+{0D})z@K(C$x139XGH^cW!^mB4lsht0q^K;b{ zR6Zl~pbi)71<GYEbrrEAwn&pT$#En-XM0Wbs`y^whe_Nm`yR<RTtx57c%-zq*JV9t zuba}|QMd9rN8P9GqUs73Mb&BaKwian6S)5>xWI20>Z|=(f3I^R+3Z|gp<l0jW`tMT zl<!Q-cd=ub@0Rb`$a?*Qj_&ySJ;-%*@>Tc2x9F{S|M_WP8M%kW@{O7*_CsC(Ok=2< z%D?#*e_!@$&q1b=_}*eM-;E!GFKOH#yDrC{pLL60a&KeMCH2=|=6a5P9o<rYy^QNO z-NHP){blVP^v!BwiK=wgWn-H*((kW0<XdA!-Q49~4(1qQOYnb-kYTTHq?hwznY%9P z#reO<BguI$&M%kzF8|3TNgGunbHqY&tqR*+g`7I^X|(e;oX2pE-&5I(^%}kH9_@Mz z*YRPT_(0luJI?V*oXp>A=hSmlAqP(6Ks)cmIeBF#xmxYK8|T33WbXp)95^p!9sP+B zc2B2~Qm+%-oBCp|6)JV)z=P+poU1n?U%o62zp8PHU!5T5=vje(Jm+iN<?@~75y)$m zk9p4**=OV>@_)S?!0!<FW2;`IR^)cxyUp#dxl4!PHr~A(zf#({i*L!@DeyRe$59W@ zVXm3s`GRwGhV-8kgl9T!+|PN94$pm@*P7s|0iMIab41{Y0-mTKJcog27VkX<JWuNI zJjT0ob$FgcZ)xyE0gvn@AEd{23p`r{p63Lfn}YBxqK%((?#c+#X9?$hG8*XPvXxg| znNjZSp8-6qo$4JOgy%|Ixw?Y)mIKf8Iy}pH_XQoE=N11e8a&a!BYVsT;rXKlo|OX6 zO9Ic$L3m!IjSZYn)!})C^L!ILQ-NnX@XQc++5k_RAUxB7=MCO_9eCc+;dz~Rx9IS^ zqxctT@U#IQ*?T?+&yyB-W(hnW2s}Rw!n2Du_R6^q&xf3^F~Q>lo)>}VC4r|c@U+$W z2YCL;d;b8Q{W?7V;N1f{Jp0*yMT4g;@C5gwm)ejYS>U-v;5j1w-x-AGFl~Izd5sRw zG0tmE@YDd$Vc<C;@TkC}>ih#d-|*gP;IU@}W$HBVIx>R%Y`6JGYw)PR6Wp6#;JMlY z&p?5vJ$_ZY41W!NhbEitY<@L?YpxMN9wJ|$_8DQ~Av-)Y0v;LxOzcO`{`8uRU1={@ zyYpT*+UljZ)s1(1>oE1Q`I9x6oWSI)AG1UYOzi}w{<L!??Odg|b0zHz<l0mnrmHy5 zH^DR&n5F~M41p;Im|}E30;X$t?`ql_p|^E4?~c@A8e#LFxkKbW2AE>%Vfu<|X1TBC zoNw^>tH;sKc)gu*v@?NgJ{_j<oUbv#<O8M`f$60Y<v|?;EVuLgZ5jR}=yi?OxAD&1 zT)Rte^DdkJCDy)H^+H$n*7>X#x)UAlyh1-mk5{=k@1vi)IKPtf{`&cq@WGlK;K~V( zr!U^GzQp;9IsTeg^lLA2Z3EX{%?a|$E1YlC@4XuDe+eJI>MGhCsJD3)eGK7zsD4g9 zv+8QjuhGx1=KMO&)AjS~I3L0JNd0_-d}EsZrI_ne|CHme`Xl{4Ja@eFk-6;WCEvGX z4WEO(fvENC;cyoJoOPR$JBj(%?d5$qn726kG4_bP2x4RY)xF$xG{+yy5$Dg*Xvy{B zn4|IjRL<vP*PZ@&e+6;A)6oeV*QwZ5&hs2`Z{@J&f@jO){Vx8;ZqDHwTb#eeIsdhL zn}hhf-s3)pcI`dxZ{wUpJKq-XkKLK$cX4RvJ2~IOIfr(>2UtGJ@yBqmS9-DYqj>)Z z&{W__{e<fr+VxN3nOE*qoWXG$JoW&;zu*Y^Z;8Hf^?VqttGt2pdYldd4@X?J-seGJ z<bUiZx0f#voT@qJe*yJg?-1X|eA2yKzPX~}8#zCkH(rI_^3ArI_H&mwpmp{f<z?c= zT!~fUIHnCA>tB>H*3bSB{EzcbOXGZ$exAnpaL%*z^Wky+_TRQ0x-e&~e;V(swZ#?R zn-Rx+OQ7@FS<Uak7nmzPesCRVPh!kJkPm#CBZa)4<RaAQULKjNI6yw+U5@#R+n=7S z9Nmn)m;3M(`G4kD<_6~`RPqSD_$HnB+W4!!4*1s<<T1_9gv!s#+#x<Vaj(;JlKR+F zdM%OUFDAZSpRd*8;*Y5{(Bg(h`P#%<XEe2~z@^=rY|7W(tavv9`=`B>qpP8@*sFBr z8;JQ_6NvK%aUdW5B=C~B$B0SfzDkUdXGf+PBPZ}B6RSAE(av|yBuDoNKK@7GXTZMn zEy>FzhRn-J9zf!K<M&*1n`&G|8g)mD|H1K3j^EsUga5Vzqy4i!xWRu48u2|{Ew>^y z6yo2@yj(Wlih2Y)c`Lc%`OK@!8Vv{E5>gB9^H1d*e99;CT+jK}KbB=vE>4Zkj!l&s z7QQppi(Hxbq7rXa5KG<6-T+0slSCU;j^KV9Q#iNox8bDiRN56kun_th`)v&48vAzV zR}j->-I!9GMhspKT*3K~E-u+;qmH_vyzlI@(bf0vTxF@?kpS=2QSTw}3+#2gQ^g*G zzhpcE7b{04&--_D+g~{^<M1+$=^d1#k`pYVc1ZePt@A4N@EwI0bNMZzUvd`t&jYW4 z<6MDVXa;>;&_;nqnbBFXs%?HZY3E7$5*Z;E(6NYLjn4-L>#il%leULTAMnRD{AS)b ztXSIzgq#*@u}b%`zo6hw{Ns9E$2;?zz@UcxMLPPP25;G`<JQ2ZTW_U?82G+2_79QR z>_@yO@!qfa4pB0F1ZkP%>OdWr&{6JXQY&HXACgH9I*GaLRY#2dL%R5;(TBksBe-5d ze>z;g@0sr1bvTf_ntP35=;9g<3=%Ug<(o>9H$2yzYvcD-Y6DeQU)rc*pG1jSAEytq z9)j0=Lg#ttp?f&U#gH#fa<xPc9a;QcJ=9#En(oUWU(f>G_u&QhDY1J?@hrQ*J~cea zsk8*oUxny1l#W-UWPh6|bd`geP-W&x@@OXqDruLYe?N^v_ad_!6?dB0f)ad{Wm;`U zqR$0S$~*$+gRwV*M1LQDM&@%9eH+^-X@?dD=XMilhxx3aE@zDixzgON<mI(*1ZuL9 z<a)@iq)DDn@~ZnZ*-h}})x-bYxfElr+6nAl<~$AFjD{~ir)}ZW7x;aOgB+LdA5RBi zG3KpB7a0HDJljbBTWl|MC~LzZW0JzNC0mk)%>|zy3r^Twff*ftRB~jQI$o<2ls#f+ zht}iJV2h=lyQsM`+g>xz3+(TK#~(T5*-i8}lD_zkuGXLQB{WpIFEk9!OC`AqENu#0 z1{{eY;IQ`D5<Jg7_gy*u-@x;qA>gp$Y4W8O3ykVpSGP#c-ad~$BR!t0{}=jv>VKin zpPombE#cc6L$v?D!MFWFz+vUvNb}G|=h3G@+Y<a8bvWNc6nSBuR&=1DleE}F*GW!` zPQorU(sO2=^wpy8>I^H+jr#v2vi<MA(EhiL_Isg&PJ{B)_k!Q>Yl)41=|W)`6<HcQ zcOLy4e5dii(a{DE_*;Va%nR-J*?PRo;l0-y#l6H8rntA<Y*&qVz_5c8&ZFN(JU96U z%b4GweYdeCxL<XlFdVGMJtA^<@p<&$Xv}v;X3UR2dv1Ae)bCH~@oo{XeSD$)u8s`8 zuboG~CSKdE?y2uLBIEq4^XUH%=xfO<A=|svvq5^dgtup%N1vuK-mPXuW{mGVkA54` zn(y{U(v}Sm(f|3z6&medj_lZX4FOLxwsp5!uGzNT^|ozu%k^9x@t-xvXoaB>FZ{ZG zOj;!G|G3b8->Rp5i~4;tM88%ZXz96p>)`}7?8k3ppWj)m@7|(l^&MX3nT;5@n%u}J zYT_>E*vI-tiGPjwzCz-2=FTPl)oL`zD@t97n;8F6Jr=*8cs<w1y<%IFiEFFcN{)=Y zGPyhRdxhM)i1+3bYfA1l*k6M0QJog%XT?Svbt@-gWxZ0c_W!TT1Ivtf-oW8*bP{WL z%YH)4`{_l&eBx?_9AK0(Kyt}~(=jdAAnU+`wdm%UKFD{$>*2H=Q`B_6z}&XE?p&+a zkag7L$|aYw?6vy*IM-B9J8DW)Sxej~Ki<)tA78AR^W!(MzVdXz5?R}#Kxge;^4wAb zl|(L>96~NNXFi#y*ucEN{ZCSRqxb*!jY894&AYKiJk)nWB&Q?!hIwts>2VC9c7E%( zllJ1bmkK?vq-~>3gNG(t@cMghD7+fw{Tt&&-oN4r_mYp;+cWVZx75yRG&@1g$p_rR zXXleMdN4%J=%2-g&S}UQ8GL5tgM;wbpE(47@;PPXbIOvzKlRL{z4LYa4c-!dBiG^@ zZEwsMMw^lsx{UYRaLE5H<bi(AA?>BmUJ31Og`Tui&#w{-T}6A;!@11Y&(&rMZ93Vj z)*GTt;*;G{W0%7I**5a}HsnXHrG)qf0q^HpVVe0*$^SNnCsfW*<~4+F?@=HBM-KUa zH^0|&3<AC)cxWp*GtIUEkD4>6I1zXTqgMnT`L2icn+W8@k!z`0NOEoDo5RTY;AGmB zYeV^cg=^@pK|wih7+<5lI+$}$<eSCrCF$T{d;?r+ie>D&aIGixY(wF@{lMRuTEXBq zD}vv_p}yAj6!(mF(*Gs@JNf!>Es1B9LdDBm)8bTQQ1q0G{5SIvrP=((IQeEBzkC<R zpUENjGP$Nk2Xn_M{NF42^pUnb6uGh%qw)7SWPZSq17i-!kT<Cj4xrQLA{&Ll^%0sr zIXxki+;wSMpG;P~ufj6{=G!t@cl|MQGAC)bj9RA>UFJW6N5>-b8yTZh-;VYVVZ17N zPU<8sD$?rX237LBoW~jGF~M_{^TM{u;0(rU6LtG;=51u{#AN+k?1=2sC*v#kW&X#> zxs0)#Gu{>*mzbYH8|iV%Oz?&`Y_2y)A%A=q<~4q+Zkt4{d2s##ek}bf$9$P*V!lx3 z6XhQE%HM@P8u)Oq#)lu(1xg#*Wl!NRz^$(BP~o-i@R_;o3DgQ}`yE4@hB?#(U&Vs@ zIYv!?&3aY%Mdlc<r~YyzV?2WOHspP&!Ib=RQ)6$)^L%8-EWe$Rms8OHICF}!j!4!5 zNX?++UHE>BFN$AT1N1d}BeRB@I+x&F>11$c&f5Pq_g6C3b2z^Xx&-URS&R1xb&{-I z4$4tzSVaf>;a6i$b)PnGoambi&SL*$-`du~XY&Q()8ilE<8$MA;G@rhYh!GcO<BYB zTgKfimmNdOW%Ks13x1dyz)nOfWj9dAnxu5e&tl%Ufp3Rt`g|W_dXl}fM27kPY3@7P zxF-4@>x*2#0~@%<q)c$G1v!{Z8<XILD-0RXzeB^^V}2HVp|#JNe~J0_q8my%3>lJn zACaF<$d&YGWA54LkJ^4$GH~`p$L~ch4Bhp*wx?!7lQMt|`*L(xC(zel*}qZh=H<Ir z2Cs=fqgrI`;CyB2Uij{F;kzxiJ$`C~L#;bD*4;4|%{p*mTJmKo`N|Yk86>h}z$xQ+ z!c@y3<JiYCj?a$|rC-@&9Q|IetIYn|4;63yI6{|?sE^Rb(V(+t2ZQ6dnsF@Cb@4uJ zoblfogH3ko*pW3yhn36sj}+g%lkfG<k#D|9Ai9~~uf)Hc1f6A#UHv!Yg0>U7k4E>5 zLib#c?iuOzC&>ELp#HfTJ!^iakaZO&Sck9GTbvD)4&o?+pQPSGbaNFxx}ih&o5m;j z?Y{(HA!8yn7XRelxz;X-zhJ=59>XqdnoHI(>;eywo!~vyGYXnlNv^q(kKC~4PrGNP zIcxs3YuIG5A)WD!S3&Q9Ju!b$knZbM=x+3_<^=WsMf7_DIia@TV#D5xkI)|7xRqGS zsJC}BHvQqp>SMF_rn0UAyYES5Z3Q~P(}gwvvc7_KMT6iqt)FW2aSe6UvFPqpa^h;T z;u$kfsgrLD8893z4J_M;Z_|!;`KH>@F1;1cB*xE%{^q%Qm;|2Mzq9TO`Wo-<=UsTj zV}@}bW7X7}fZ2?1DF5x0X*^EoH7ka{F~u?_l{bYNlg{;W@SQQaQ}K$w_A0*cFE})w zlG+b&Wc|78Yxv$6Gl^yLh$-#R;&1ShfoBPL8nNQZTO^L@>@xhkP;rc5BgHnJ)NP~K z%WVgPwsEV@*K>ej6*?_Jw~fdl`qAxwhvz2gak2O$V*kYE8#>9be_MzPgYjR3|7pY* z7Xnx5dBU~sJmLCnb8uPav;V-DNX}1iRH3=(9S8LIjBzPag7PmsW!%qG8rPCFrisJ% zM19{4bKKTEPoqBZ!JF-q`{(L&@_F?69ytDyqeNtr{YEu>s2h{M8_ge}?>6iBTKjEm zhaA3<=h3ej*CJ{*Wxk>jeUSx&zH*;F8_ii5xfEm0;_~z8vk|WBZ>ZziqPY_FJo;^f zE8j$F7FX_{3)fn5Izjm|@lq2pllWzZzOedb!_XgQyYkWxLg}yupG?z7X1hX;;vwDd znap=ZBvz^uyF?9m46$7{_p<mU9xRnLixt%J$*+P>=33|W%D2nqcMNe~>L1XD)X2$N zs({^@Z^JIhIw|}Te-SyC1oRARc-MbC?uJHv#1MBlu^;Al-Pr5^b`D*YTg@6Ql{M6| z_Db4HA>Y^TOMQKN{o3<H-_H-@r%~r-ta<JXzX=`&JUzu;hG<h_H~G$%#I}N4279r~ z+GjI7t63|(6Z;fg_ocvh62Et*hOm?G*KeXziC?5ISu-ZMPZqe_u=bm^;Q_w8a*CQk z$suY!2+!8neCqZ>u0M=@hy@l^DZE%}3?IY~65VXbiBT_f2fQQxh`|$`H=H9Lo7;BJ znH}OscEexlj^EUSoa!a4nQN<k6HCRvGTTY1e>MAz@;!&&(TADO5<~J?^S1IA=-p58 zuQTv9%HXrkZLwj`D<0p|Kia$X;^5xR-ly+cQotCjmYPw<1V5zI`_S0E3Ty8Q`LCk4 z)$)R4J@MIv7l<R*OCE@Iic3=1r(MM#$&|5}eV^vj@l1JDtWv%gy<f_q`TWp)4&$Ty zd~6?Ks;9(95kJXcq4C@7jVW|8zEvUc1!Lelq30dY^>+Nw+q`FX;D^p_tHr^Uv~`l> zr^HErfp^}2GPuXA*y8t}W)A@T&>c@I{_WsC23*&HXJLGN@s6hz?K_yV$7~@rnE&S8 z9SeecB1&E9juF9qId_c@?#sFJba1cC_c?!Vj@!SOJ?wX}hrP6G-IsF>eXNR)EiPmq zdukZ`IrK4!y&&JCkG{VMEZec9VeiEqPX_nPH2OKjn#DH+mgS~r-U;r7X*{!&{W4#X zXP!4bV-M~ZX*{!zScrWbI`8nAo*5C`FVcACS!z31$uqB*o_R93U!?KOG-$I-o_XE$ z%savTB8_Kqs8e0UGvrCu%RZOJw_8PSSu@MJza-ZNd~r8>Pd>$S*wDr5zP6r%#mdo2 z_JY>(85Y07m|v6ccsaFi7~o&bcCOHP@ej-=v6ppAZ10b$;rotlXaWY-4zCwDvgx-T zmQ?2ozG>1Tn>aEOFo^uN4c`ap6#@ord?v?u3NCUqIX=<Eq@UvtrCh{XNzo6i-_-mB zkJ2IKhb<eMv-7??Hf~}7b6w^6o`;H8%(mXT4q-St$gG1)$6dIs9}KMIJj;H`Q6+Xh zF~iW4!Q6;hPvS3{^rT<(WMe-u!B-AF%(m!(mknFg5H}d{Of&fI4z=#W)_^bfRnH?q ze0MjFnH%{wyBp0b;oDg7-Cd8b&YO+w!g=63y8&Mf=3x=U_dVLu$5+Gm+DPJy4=1vC zq4;K4@IA9Wa^pMSg0B)ue5qv*2j6H5z7-L~H^+kSyE&TtMuwmHmQpzQZhNX;el>o6 zDuVcSu;5!1NqqTcd^q?%Yr!`&g7|(!TW0wk8%cb-hKKJ>7JTC(i0`j0_+AuAd@l(P z-$V<(yI+c&zPi(b?_bA8P=1rc!}k+euN901HThk7f%wW?0db`EuH;9}>mHLW`2H#+ zz76vP_!EuoFY^RL@HhU3uTd8CH#GaJB-=bo;1$;Xx@0cLut`P4gp!jNpF{2uQ)@m) z<2+n~?>FH5u?go}G@LszC-7aL<IIfL`3|6s*zCLC_}6pw8t$LdhFNy+wBULi+?Kwh zddPM2ZPrG4`o=hurw``Y*)xJR3hg#ef7)J1Es49QGUzqpi!Q98ly%R<+^+6?kEF*# zbH{ge6pdHgcPf@|vYw`%;$yx6^r^jb?W#+ObM@<wku#V5y>4dhuDW3E_*{Nvt?)+L zpCoe6WrGxLgZ{;b~Pv~LZ`TFT5T+WA*O)6ArC{t3O?Y1cE8+Tjxi>j8>+N7{*| z9oZXDYyadk_3f`TwclCV|Jy`=)?Q;Dc-sG4a3A>04<`C2><#V%pZP&jef#(8?H_uP z_Su_NYkye-{Lh%$53qkr=Iy|LiwXYQb@(R$|1CQF6B^*ZU2p#*X`el0wD#|RsviCs zruJp;*UTL^`Lq6VQv-c=bO_RC%T4|Xf9VjU&z27L@Q>8ne@ohDPc*Il8|T%xFZ^fF z{{w0Nnw$MuLvL<q|C+>L`vY(GPZ*jQY=2;4efxd%_WvO5v;U3OexIl7+fO&Ozfsy> zbE`k=Pq#L-zotvD{TFWaPxw=pVEZq0sc%0)Z~s|opFP^N_MHvmdxfd}mC}CiJN#Km zcQmx$yGOA7?sxbnB=rck-@QkD``_@6@Xv#^pX=zM@#M{p?!_HU?ar5W|Hiuay*Ul- z{w+S(?gu&W4c`PnwlY76uW$FD-tI(cm-V-W>=|%||3d4+5sK#l?$58^FCd||=<?ux z0rE{8X{%_L!<)%_!soUO#j{6X;9SYDd9pQFyB04eFD<bD!fxAhkZTouU${MSjv*WG z>M&f#J-*X+bSd9)>3KlbvIgJzyAF@Y-VZc*3iQ2Xfal4ix(fC}D|+%rbwPNp^VX1K zFn=SeZsG{^lg#@hxz5b^bBV-}7aAvR1ct)k+#<S6^;`)Ynl5fMw-{!eG%7s1H1NsB zcIoiC$l2jp7JQGfCuzj+<r`_?_?4&TG{*Op2;!S=!S~fj;yXAzeBZF(`%rlJt`8OW zI~wrizUp}_i0}Gl;{NsLi2K*q$Nj^@!?&OTUk&D~FA!hd&(hwt`dNRWEnR*!e6I_M zZzDgeF}~mRvow4~&kaKtgzIOG0B7CL(%?S3R%AG|pA|}HHHY&83(mGk;XK*q`G&T_ z#W~u7bHxSXtmp4EdRg;#IToDXWlvS~R%G~kCi5K385zFxIp;a_wddOk>-Clf^HUnW zp}!vzYK)u1w}S=WqDbQVlknvDSqr|I5ybZ++A_=W*hu1gQ+W8^WWhHsg82U0g6~C< z#P{a#@J+PfyIb2EHZuHtrv=}?W=2qc?+g#$PYN2#@6rpzw>cf1Y{B<eA@L2zF4;Xx z;FWN7^l!k~Y?p4)aE^>!`i!>BGJJcaaGq@U+}r}3Ct7gsc7ZrIr>Fm9!THjVI5&zP z8v7sLoo8wAw_Xm<F7>wX^}pHkGdSP=-5k$_+NFF8zWZ;8pd8N&PmaHMqOlx5A3=Qk zTk!p3B=MaY9=>ZV_}&vie7~kGvkXs-B)$)ZhwlsvzOD%3yU~L0m661Ec6j&>wBUQ# zdtvh1oL!n_!S~qcQ1Tm&U2=H7gm&TDrIWK8+ohK^oFikGuCw4=7Ac%3J3KG70OxWG z&JSE5&bnP%94ZgMYA{2NxzGH65a-3s<N+3+BM-2+J`Zp-Ji9a-eD(Pj4d!9Z@U?Z# zEb}bdJF;eWJMZZ_O2aK9ByJ6BW{DFU<HnlV0=`o(*WJ{U8Q-s;OrAfLIT!iHn3J^v zsnjFN96&O)kJaQD<y-e!EhY2g^QAUs@+BT>h0^4EXkR<{rlby|9rYUk7T6!{*0 zv@&2bc}-c9D|>%5wLVI(Db#S5oJXB$9g6G)Hs3-6B_s_R)f6_SfnI;loJ!d*I4W45 zcbfHWY8Q98)ZaH156itgYAqYp{ps;=G~=t+=WB3Ru;-}M<X`Aow{zj1ZNYu>C{2f1 zc>CPAcVR8v2_5%01osqb(x%KEFMCy#Lr=lAsW~>e*VG)_md6{@;fYqFgKD8eL6~$f z^T4A*hl`}9HMnN7QBFjkU(|UZWKGjk7TnWY3HNQZYt{ko2;hFT;NCHO+(%h(S6T`8 z1s2?G5y1T$){i0kVUK$o3+|i$)H<?1#)A7h*M}$jW}bgjaPJX5?thupSl*v(CEVjI zxX+CM?z08=p5fzOWWjx8E8)JAcFppBeFSj7PH_KT__$|TaF1#w+!tAJj}8;}zhmp9 zmPBlQfHh-c??ji&T7ahJ?V8i|P7D6;{;_prew+pW_eO>%^Je@vnemSf5C3hCHJ1CQ zS_%Jl7X0T$0RI9r{>{|4HaGtBE%;yGO8D=hU9;?uiU9uUX8fC}cWn;;EDQcot%Uy~ z3;xj&z(4SrN&cIue{Bwbrv?9a|IkYEZ^8e)5fPAoGycug!#0Qiwnr`U-%9wm)A6s+ z*7d8NdEmeGJL}&Y;vMatd7Yvj>zVamff@g1>SLS3f4+`?5H6?Z`WE1Cv=J)S-9@`1 z{|5e}B7lFo8UJSLWt+o4%YuJYE8)M$f`4=b@DCg?$$vBTv(4e}wBY~l@57h>=Ei@V z1^@RlTF(EQ%=ljxru}~;DE|iRPqh;M?JW4uiva!wX8ikwhyQ#F{@1q>{<~<mk^Dyh z|8z6{8R6ldWx+qHmGEC=!9O|z_y_iz<Uch${GAs3-+iH#<lln-d+rFxzZrio`)ajq zsHNBHf6gP<we4Yx{I?SR?JW4uiva!wX8hT6GaUTqTkyZWmGIw1yN%>O0{Ew!@qaly z{Ie|hN3|0Ei!As@M*#o8K9l^jhiW+E@3i3m?(?lA{}%k;OOJs3oAGa^Zn-)CXWK&- z`EMos+gb3R7XkbW%=kA`$J`wL^DX#a-%9xJqTNRF9|8Q+&G@H<XaBM+_(!!8{);U5 zM@InvK&46k!yJF71^;(fH!uH1+B|Gp<L^T>@!cM&VKDq^`EHMS{!_knnq{F$#dV={ zLPH(HcVc3_?t2(>fSoy3krRDgr-_&SnPWN>%Kn<Jl;oHdD*InHr61*)JbkU5b$$Ql z2ZJ>Y1|E-V^pTo|(BIQ+YA#r(#crAB4@HYM4Yc4H)iXOti`|Xq`Ww+=ccZ!f5VUx> zDO#}iXE?NYqJb6~9%-#ai!HRP>k5q)S4V&rgPNiR`<{kFi)$>jIJrvXCh~kyXraZa z;hK!Jh%b(^ue;0}d`Cvu?=>7+)ICryBN|_9XeC+<x6q<A0<?IsDO#}CZ8)?jv(Vya ztwf8{v}=};nGv8xep9qypWkq3ai4`2{aT3@8!fcBG6J+nX^Ix?aU2dU`desGv$A#c z#RC>v)Lz?ieR1&LP3Q~ucMgXZU*6wXU;L?+XfeP-i;@V?;`yd%!JgLP(86n>#gAKw z7KdrqtS^2N0a{FKiWdIxXmO8)7Rjwdi!~Nn^ojs2dNf4~_9zdBFM3;O@$cWYj=spV z(Bji;TCOiXVLyeC{ssH9heM0X`x@(u6|F>z%Ph3`O$2E1%cf{?G(1}T)<TP0!=#1O zXA;wB{+g`Qw8m!pY1gbTei)J#jpi#G$0PcD<s-yp|E0%f6D+ilxXk%*uvT*%GAMb9 zaB4Mgg(gz7YT%Qg(ImW@RilkiH2JNCCLJR{lXjtK@<@;-&KA%l!9tT=zimFQ4>xWV z{xkE*2UoXTe{B2jCiKTAw>P=QSC8RC)->;)*;s!RYqV%OR?~7EtD_o!W3l-hhbBj% z;`)mk=(Aqp`e%alS>H@tzdlr4FLlnRsQZ!}#~c%V1aCFbS*&K=5|(bA-#{DSWB&gf zv^fW#Xl;a|gYciuCmJ0_MSu?Jq3Dnk9vyD5(81nHbXaJigChcTILm%VhP|!lhnvEq zL$rks6~Aeb-_ayLWLxO4IZe|WEs~2jLeb&o@aVARXZ1Qm!{LcA>7d(Lt&LE1F{*_Q z1tICs$j&y_4<YJ;9@Xg}_Eeo5US51I`0F-RgZ(-U|FCUpllX6?U9(<Dj{yE@X8d!) z!+)d&|8G}>XFtz1{zVr21H(ee!G(>#pM8?*>w=s4ZbEbOLJkZ5Z?_WunHKyvMF9VD zGyXpeZ~V8+Xw3glv=aWR1^<FD@vp--_(-n{e^kd`Nw#|)L7u|(tDm&cV0bIh;4idm z)&bW=fCj@18f4f#Lt8+D3=0j;EN>k>@U(>nXNR_251e9;qk28yZF;?@t_MQ;&Nd7F zZ?+Qtqb>Nq6#@J=n(?0#o*vjdy|Es6yp`~8W5IuR1n_^@jQ_Oo@SkJB|Jqi<|If5* z)&avKfd5c4{>^-+p*cI4VZr~*uUkp}E%=`u5&`*VPqBLWe>A-Dw^{Iivz724ZNdMo zF!49r!HqipO0vWAD*P0#9jutvSO?5%84VUooE<6_h;E<(�cqgEUy&I9Jff7hK#Z zSD?oNW*R(X&>+L%d9VdEC}^O82LIKqM1yx}SJwj?4Xz232Ih4VgUxbK9iAObx8Q&J zS1r;5P4d7~7W}^%tm%OZ<^ePQC;n#A1FOT+1A%+%Wk7@d^;W|FdJFz<L;(LXGyczp zhyNxE{*Sd1{!teEXGH-22hI3<!o$D7g8$X6g#Wv=+erQ+fd61K{=W_n|8xufr<b*o z{9Ew<W>9$YZ?*v^*ymdO0EIoVjziCI?clfhjdehIE74%2g$A!jfCi-o4KkuU8(Khv zw=6Vxq?Kr3x6t6xFlk`61NZARknvYjGn$-B(f#3&@!)I={=-@c|95EDtOwE}fd4=< z{+q+o1H&!&*Zs0}^uUuA{QU#N(*x%5m;K}G<AHVI;eU2&V;%5XE8#!Ff`54g@ZVs@ z|K;%Tf6IdZBdvtL-Gcw45y1a`Gydh_;Xm7g|FBlV{~g+GB>!RJZ?=O2b^Mj&XwOyX zlW^_ewH6wj`bF#LfH@W#)Lj*x4lvW;*k5&DAS2py6q<%hgKwrZ)&sA$5)IrI8f*-c z24+3@vQ7id4o(Tr4!&W*|DhJ(ze@WiK<J!+qT?TYM)f=l{w2+PH$c00j_(G{G~>T7 zJUuW=$KQZ`NGsuALA#;{4E%>i0RI7I{C9?j|1}o;Pc98#2N*OAl@lto;D2gB%g6ud zUXvfNCp`SW$qUNA0sE`1gumN@|HcU5|FRkXPr}3h4GaDcwG#e{1^<V`#NTWKX6pDW z$!$D8L!QF50gqW|Fu0Xyu!(lfI$%fyXz+c51{rNU{aZkTGz$%m|Gahdz!Mf4ocMmr z^}vx2O?to^o*nf6w6Pv2Z6*A#v*2GA0sLPw<8RAovVKnFqp9_Q<re%OXeIo=rCqZO zJQxA|XPEIn6rLV@)Pnz@R>J>n3;u&6fPa58{zt>ZKh1*w@g=P!{}%jD^pAl2?=i{0 zKRo>X_gLh=mGHmLf`3_<_?zwEOFI5aa$C=f@Kd;U@HGn!?r$X;oTXi}4tO8}G?;GC zAfv5kS_^3Ku!RO!wGs{9w9sH+m^3iQgZ<2MP#vBf9BRS;n5T90z~dJDzrM2Ndf?D* zlO9M8&kmmYNn<_mN-N=itp)!L5x{?)8UOh3@ZV^`|Gt*tzq_d#fitwL>j0inJ@*Ik z-`!M=0QrEB>vGKeKh2Ck-^XuijaGAh(8CS*Yp`F{O8CEN!GB-`@b72FziW8-54GTb zY;lX^zsd1`+=Bnt{WST%;PE%x!9yPe<zGowJzql4aP8p9AJ^*u4gQx~i3ZnLXz)q| zXt36xL5AvC(*hclS!nRHR-(aa+BNHenPJkvtOxRS8p!yosUyOgAAHb)|My!7|JN<} z4~PK%ea!f0gr^4vTk!wtxz^DGk6G|P+BZBsU>^V4T_!)EUwHVRxVy0qSl>$cUv0tv z<p|)v#*F`f@bE9Q;QzB$!v8ewHj@7c;Gb{CKP^1`AGF~A{Z_*NbqoFj!o=Tf2m9#w zE6Gle3w;u<9UNq#!I4F+qXQnb(BP{+;pqS~4Qh7kzCec4Qw>eSrNP&CHP!<!wh|47 zS!nQ5m^3i!L9b2&%??fp&kk;|;6J^U@UNp?vmTfc0sN<!@t+)?9=P9v|COzT|7#Zf z`$qu(R5SiL;o(2fg8$)XTSo^xV!{82D?A-w=Kt#VO@6>l;o<-FWQ+W_68^(1_`ehZ z{Jm!Ue;6MA8yfImFR}demf^3p5o&(8j&^nV=NZ*ABZ&X{X7T|c*AJU*z!V*SB{{~E zhdhOA1MaiXpkFJ|V55ZwS4MyaDFzKPVmw#0fCd9BH2891i}XN~eDIKk28U8LJ#fK% zVAca)>@ev8Z+Ld_=%ji*puxWO9Qdaw6(2gCo0q$0+g41pskJH5^Sh~vQeNSh_;KCD zH@zosv3ZZ*X!o9-<?x=}qIfc;7Us7%um@5b*Kzh&JmHA)u@_R>N%olh+F|FrHa1_8 zO&O#;x9tYMVsn>&wji*~*;DZ(Daz3*zJpWAzK?l|QmQKMv?{wYSbKKgXn&=ni*KJJ z!MESh$*1NFEmqHdy7hn~(f667qc6#Sd-*TBD@%{_KJr&u$u)WQE_^2We|A>GKFxI# z-z^dN_YqfLbtn6tuxDf?aP0#=+URKf|6L!YV^76Vybm1SH2ptm`hQhlc|OS{?HRw| z;g@r#!Z#gbl+u*0Zhr~C>4}P8uGKnJUwT)?U+}EGZYujEZSSCzX7NlGaLY3VvlZ{e zvvzN+)9oM2k>Ag~WFbd^Ev~k9*~)HTUa_)Uk}ak--xfFKOU@5*zR(swrhq*&i*3c- zirdx=jiH?}?7<OBKVxV&kMqn+6n~W?##clC+uACn0*8x!r`nXVVeHSD#r<mSzSB2_ z_i7zZU$rC7cW)b|RGzDeR!Yb6JC^oV(q1|+3Y`6baU91wVBE#A$=0s+5O5s^u5{pB zXKOd+Fy}`&-(_n*W)pDkv_0GHkWx3)MLRjb*^mC>&jd=nod28g$l|vj*K@d@Lw`Qb zUBLH=HdaZ#AibU1ZzlI*d3KDB>r~ETcvkSNt_zeoz)kR}0UxK(8GQQio5VF4hg64B z8VCG>htOH@SZRx|HE5e;bJhxd4V(m@mEf?}wy@h;TisA6{q?2)INB206mk9~eWd`y z8fcqH|6c;j82(?({}k@W@qAzX`T3kXc~*Z08Y})Oz*psnWvt?SW>~kK4wM~&231@a zUJ}?{z%9Iz4cx*j6K%1zHPE|OqrY04ZHpaa@Qv`yL|~t4ThPr1>;j7uSYl`^OZo+V z`7h%l?F(GUJF=&0v=&^oetE~q^TM-Q8$2ucBD3|+%Ks#rt#(z6lIvm*$e2W>G?Vw_ zT9$3)@)Y(5+}cqojRn^}mnx-G`OT)iiS&^KUlnl3xJ+wuTw<FXmxT#+Lt|;@UiveS zOCB%^U4{33^sUjHe)Acdm5j|)?$>DdV|)w1uf`GY`;zmV1f_Hg&+WKaDV@fz@Z=`i z7ycJGh5u)82>&19_|(?E*5G*=58?YGjLBDw$r0N{V?JdJ4%-%WJB$pBp`Gc#Igb9? zAp_ev7a0&f9>?|RTpve&J2)Q$d?EuW-y7+zj)m8f(HR|sV`9}S$UsE(N-Q*G44c&} zS<uO>SC&H4ru52}=sivU@cW;jSJ;o(Cp=kH8z}SI;cNEp6rRq6_QLmap2s&GZU!fj zi9B?A64yl+<?$;reJ^7rI-r>ISl$&{)O;BzixIi!{Y>u5Z;`5$rXmy4uDmb2R*UY9 zfw#8ndRFw)M2?s!r8FMiYD&k7Eoe%|_Cv;H%tXh=)0XgU3Fn!>Cbqx{T$!B5bKk6M zMStF_C}pwSA8W&Y@a_U&6MRJX$aop>xxNXMoq{Ll>vT%pr+8$)X9GX^6}^(WS@ERU z(6`_(x<h^q{UJ0EIGWPqQx1Jsk2|e;9J+<8$H$&SkBjb3kTC?trgS$tq*>ja3Y|pX z)P5T%i-i7O5PSamTXgQey@6a!=c2=e&ld1&_B%x9iarb5?@-UJAGX2wX8quXrcLRG zd+||3Kh!YJ{|Wlxd~Bc4dG*#n?jMnd=J>h#)c<?@EILu-_*DBIb=J5*J^9S|fr?rz z{&^*Sx$HO455HexW%ckg<5$_$;@@9RjP&tDzxTt5el=S0q}w{ymVBJ#U&TJ}CA%j2 zOFvA~u9xiM_s}!rH}Vd9q5CdB8(6jq**mGV-5Ec=lW!l_q@M@o%WtPf{{N}A*VhT0 za{pfLuYTe_e3r&Bf;=<+y+CK>@%#MC-wkx`$*;r*^55*=J9YnFZN|TMS>&wVzqjE_ zbL5}>@!kT@*m~a{A1%M4t&&^(KP^wdGLB;%@NDvotM}otE%}N~$=!*LbfNzYe|@L! zuYU%w_alxO$6@yU#U?%bZD2?i?}YN@|IT~FOpSc`SlSrNVUBggXWelsFyunzX^O=@ zOu?qdfnPt4h2Xf-lT+{G<KO33fa5}Y^ZtA>_P+-@OZ0dj{&(en9PkSM=9od^h4;?} zhBRj{uqn;j3opL@=g`Qo6#~Bt*kd`efq$ZBOue6vkFUYM0QjZP3-Re)z%Fz*#|;uQ zYzO}H(RoH}P=!n<5z{7}RlE`-OaZ@C^ml5Ppnod5d@6oGhQP;tC%%Bh2nv3I%@i}A zk4$AZ<=aA=revz}A|-cChjZJgYKss1>t<}!LS$R;is3h;jRKzkEt#s4co)AeD=8T7 z#_~HASdADfU5{P*0^>A}P%`0mD!Fknbwm3wW)e%8W7aA7h}BxG<n&GLB(ap!Coz)5 zQtO~uQ?XP$aiBa`O#BrxmI9U;9J}E0Pdzu*$5F&knjYGP9x}&FyY!ez^pNPF864*L zOJc7@=%J?KDbYhs>76x)o7X$r4hJ;9urGa1<FM+0u_psVra-&C(9Rqi^wDAi=yozd ziAv-A#Q)dyRwa7ZcL91!czS+ypzM0+)|Bq@ou}@Swo<<clntOQBL)+B_USR0<cA~% zbD?|1ZZsv!lk7@vKSyKxD6y88u{ZqNIC3?Thgv}UMs7`XhS5jN*<c*ilr8NCPEFa; zvAjzx_nq8}qUT-~AZym#OQxQCS<_VR1$>**3zZ2<?%Iy$&arGq*3CJVl4kV6G~f}v zP>cTkPtgmKXK}_UrCWP7ooC60e&#speEhq8__R&woIcQ1V)o|pw^N{%Iga`*bT#-) z@|?nF29L!-PvL2a_x@9Cl#y#u$&JiME<)v6GWo{#o#3VATF7-7xfb!aLh0JIwSggd z(5<OF%y#lR);vr}GkF*z-(8>QkUUH?I>3z{i${;i*fo`Jxd?pXfj^Y5oP&)LAGamB zn3(^8Tuc^YX3eRmT72y+$;Ch`(V3zAib>F_?>T&J@vpl<ucrL#zR;^V|N30{7-v)Y z7+`M7#}>J5(&k)XJ|;uY$Ee6>4Dz|%5+~N@V}PeApSC$YvgSPE$7)MHCZicWvQW>* z#PIu{qDRgrKGAYte+lIN0Y6XVtEn90wh%d){}=e$$jh8)e@Wf(8*R+hGG9Aa@qA6Z zw%VqAb;99buGa2Kem~yxj;(OK<V6lJr}=s>Wod<?4C$#9jz2qV1oONZ-V<#{YPpk1 z&)Yoqt%c)d4z<j#TwMG^Whrg#p7eY-Pjb%`PcPnibhG4Ml3WG%IQ#`u9sURY(Wd;t z`?{xD+fx2Mx?J)xO5R8<e-v=o_89ZYB_rKw-tO*lU;h!_z+5FwwJ8@f?@?aU-{xhm zI4w$1mde~N?|7t*Gc#WIp8l!LTQ||p+_VFjqrf-Xdv>JF6By~-leznvkE)(gy=tP{ zU*~8`9;%J+H1pKQnd3dh9JQYu&)4LUN*vwldfug!KJ>ieskGZXl_M4ZoOyv|lgL*| z&dHywEUoS}(i_ZECDc}3s`$xe<yLiaYdNZ_U3PDZgLz+a=v8BgacAG>S1SU`dO8&U z$y?fZ%k15X>wenSd%C~k&+Q`f)d^bOYCn0Xf5j_HCvi<;<1D+fK9#v|l{ux9cvYM8 zuBJVilbnnWRaN@i`M{DSTVm}2`d6dek5>O<bh+f&fbC+Q%Xf4AIC++m#}<5;nezNG zr?UP-A7hnr-v`IChQD{r{pdTxYi2C@Ft6^bW2qgF9`jZG{+Rl>y2AOmbH&t>kB<3% zSAEQRcJhY2Cx1P@WcBO!rOu97kz!M~+v3%6zF&Jj?D<sXIG^vWW45S?GipwMbgXBO zp3_}50s6V;cj~oW#>w+4M^D>Ne>Zi(Z$DHn=`zmugBP^tx2OM2dw%<`>q0&+&q}+} zrnDt((s%4O>I`Dy3&*RMxR<0R5BI2-#jH?lHqRqZ1(q#sr<9h#H?MASr@apE$aw98 zuD^FEON;Dozkeh&J28DprK78_$gcSFd1os;T=Vs4Z`Bqht?I}qZ(X|EzYjWl<@)_L zZ_V9y&u0hho`YNLp3h&fd%l=!_sE#-rzYWmqr30;M8$u)k5XO@kJj{7yh$CDrKcw- z<*WKCgZAC%_D?1jkmpCWqefwZ+y4N$k>Hqj^triqfH9N)-lIRGU8!p*YC}7W-EHXU z^3Kd3!BeRYcUtN&@}|S`hE@S*ZY=zTjX+lGd2QOJz%sd~kYl+O9_rG&KlK$|Y70jw zxzhfh+fw(yeQGoO&Uo4!p_JCp|CkMK?^C-TfHtZx8=jpEZ#~I@KJbmAR->ma{bP9# z{&LB?NwzMv$b{DBH?%46&9^J#U4t`S**5o*Oxyh%@Z<bbu2O~ye*1aX!0`a%@|mNX z?=HKtl-Q+X_dvR5#s?29IcZBQ{-lk2Pr4(qb_8)%CF5mhjD!wVHvluT>vh~#r^!H; z^xe&eY<tem8il-HkKB(${zsq(GQ4MI{8_8@xD6WI_NUSQt%12q_K-JLmBSyV+tu3d zpDx&2H4?dLqwG;TRQy9tRK`0WymUfRnlh7lteX~~532{x@0K)NnW@GrH^nN2OEL!( zb(@;Jb@`4M_a5?O3o<X;y4)G946K5mwmFnRDbdPJnKz~sJE2cXwA+(p>so7fD7jqs z#L%zI*HtkNs=r__GOG0(OTT&a%YG9xRlVP;fkoX4=r^B!vj-G+lfLtlw=bV!b1q1| zZ2NNB8<;sOuTG^8r`E2sHuE|q*Gapnw3`Y(skG~&UG{O9nN7RJ;8QbjLAP|;O{Lu_ z1Lo`PZd)#}<k{2(>Sf!O$Iy1x+0lEe+Bu73ZyrgWu5d{VeWx6JZ0{Iwu6d`MC%=bM zHs;YOb@b&)@1Pu2k-xhG0ZrFbT?21KyN`Mg1ag-i2$Yq4o4&_@F)yuo+~)(M8TT0a zP6hw$<Za8>z!yUEEMOG;YpDO(3eACCqq*>84D^_YJ`|cWE*j0H4y9<QlACIx4LPmx zQ*@e}$2t_vgLK)hjdSL#DRt6^;4=|DTryC}%`}Z=9PJj-?n-?uh310KN@%_in&%H# z*ewp67AEglo(;_x*kTrBU$$d;CitY=owY>+fE#|4@w*Th--jQ`l`ohIZ)DL{HFRNY zZpykbwKx^J;I~4TuC;#VeT6nIXcMd95L;VyrIMQs4gya${f-5P1v(C9z6piHe2pJ> zET_GJS+l0rrRlUucQ|XQSIkZQrZGSE(fKi7=kt>Ti|hF@3x1UGma*>B7FhJL9zxqr z_^}3l91A}NWnt@b;X~ocVxH^KwQxMSgOEJ=sl7-3wtoe3e}kSme!JV>k(g8T|H8Qq zHa)86xox`G53#R?O%XeyW;iuFGCx*o&AR&j_GDn$@mm`DxZl)AZBonn_?h0v+w^gI zqTB!22Z6FTz72GqVnfy$w_!Hbmz3m~S#ojE=AVH+Uu}x=9_`J1|F~hr%4(ZuydN7$ zK1=+I`QW3HXHD{C)mBlTQcZn|iqEhLot{UYRoa?^pRpCX;am9Uw+Z?lrf0`g5r>Jt zA<tHHMz#ywOOQv8F<#k>*H@da2L|FQ#x@V#nT?*2ajU^*EkK^&(?&Yv9*INDIQ4eS zbes<4Vrx38wwbSpO({(fdzd%0#PP_`s-e^pT|`}4ApK|&dH$k+;!zJiw!{fds@mcw z*ql3lP7Yxz@UJ=e+=pMLF8*-M67R7uzkK`HntA^|_Wq0yj`fMDJ+`)R|FLQ93dd&; zh+W~eImb+GgMUDuDfqJ~_?u$iO3oAvk#l^9H0d|*OrY$iJY$T(wTyxATh{hKZeD-) zEBI$0`jM+|7>h38DYX#>ZpO3K%o$gWyH*>YnmXz{fA98+U*~eD#i{&e+qx`BwW-Bo zJEWE?h2QFDqz*8#Hq{pE${kp^Je``N+Y**8pQ0#3<@!4t1Gz;Z#=<%VKH}i1A;v)D zN$|-0CXky1kBJXy==Hok%yFNUxv4Hb&3|}1P&Npfh(EA;Qy}+S?qx%(G1%RF{MWeI zbH_`4+9BvZH9n>|o7&AWZ3-_2_Toc~!I%>{#JF+AKE}q!xcC^8os5Z#F<Hl$OdTAv z;@Q~~w{D`AY7V@)V0Qf0X9Iyjc4#><-SJWCZ1<9ChjWM6h3vN2U3e@LdS$j%{DW=E z5E-`v6T}x3Kf60MsSmueAHPBPC=ve)xlT)YD=;XtEj6carVq{hliS-7x)0<qYDqWZ z^DA$*9TGfGa_y(Boc~gm{u5jc{;sDF<M|`5S2`S?lN<|(rHnXY2C`fE+rZ$j&Jd4o z>Ebc_2?yQ|<i<nmnl6Rod-5yyRdlyu)8=jq<le>gD(;J|*tRQ>o5eFlTsz6NjrzUQ z^j*#Q&74bH%lW<a$AM+@d%C>`iOGbwilV4R^8ai}5qV=9bZj<un%id2?Zi%b_@Jj8 z(Zq{U#7Pd{Nxi1mug6T2kg=;@P&^Mx%!F+o1g^;JAffx(OO>Tl>SLy^wRw8Xl&8l` zd7s<8ThT|rQ>GN$=YR9fz_JSbbH;}G%)$?)p3`;7^bHHRrrIVk4qX|`ZusCGeTl?O z@|^0we+hEw&%5vTG!^^z8}{d0nczNp0DJcZzV+t=iRZNUx@dL82YCN8U^_?~HYYyu zKzEwxvOK#osEBvxC%8Q#e=!_l>(2ngJ7)vAQWqudHPzPV$jTu#SdT1lP0?(I@Lors z*=C%fPoY!xoWL@nt>M=u<M#&UwrSR<B_{Qp=J*=EH0b?z+F5lxFj(qO{`>C0vhy9& zlZ+>M0Ak=^jkeH0V&Dp=Qu=FR;0@Y%Cip67KRBL=zF~}~w|+dS)zHVYL?6$R5AEL9 z?-}X;2f4tKsUwLMI@NBA5`R5W8#gzxdkN!&3=R_c433S|ZFkXVw4eCk0AuwT&mUx* zzJPB<UY9{v72D;7rUorl1^dR>Rl#fffL;3d1GJU?uSP$|GhQV;pA@gm+^5AW(oZ+v zz8Gbx=wpd3$R%_XIHXPKM|k$L!p7s0A@K<BozK|#8JmTSjX}Ga(9V4_Fn9xF^B`jr zs!nDw@x1tz$H~oTc?e>$n6v47g!jk=EKdEyXnz%RI%Z!j2Vc#HPqoS7<8Q*>T!+6Y zzGnV_XS<2NN$z<MwFwJ~@i(X9Q~975e&<aoIVr^|dd3_}R8fOsjU~?4cf<bgLH}y8 zL?`-rxLv2>N^(ZR|3Ae4s2YNt69eZlK1tO1@ZG{(`K{!)=#LrRlz8NxUuOr!zw|?! zSL&v>emion`jgp9@+5v+-A&V*E^JPA^2f_lZ86)Wp1^2tH8NV%4%<xbN!p#vx#Xep z;6ru5%{%hQN14|I7&SpDo4a`4W_(1K?&bH2OO=k-a9qpLtSmX&@A+b=;{Pvn<W~4! z_T2o2yiy%<C32Jrzn($9PH6ch8~G;1SA?FMMD5X&Zy>YitP=d0s&sza^LveUUS2g2 zU03zY_(!!`r>?$A__2^NQ!AetKZ$F%T%s(^``twUl;sUIQ+dA&)=cI7a-#o%<-wY% zyk92ytBJ|f8|ICFjQ7NUA&=&-CLTBJ+kRxGx;@thVk<Sh*aNxi;*)$^HGa<L*v&7H z$trRjsgj4oANt-m<g^&aZ#0?f>gxi0g1@21+{EYqWmh^@@$NU|x-#Kmb<_?1JDA&n z-!vUv)gOBGQK$#$Qe5?Ix<_nN3bq?w+>->~&&8+A=DjJWpD53U|Azv5A6w_xeE9bE zALiE;IMmt^_^7AaC>`W_L9%0}jX3m0ct-T|*9C!P*LG6MUf0fp>q2fhi4C5Bj9)?C zj=8kJ276lI1$$Zvx>}R7Wb!5S-BXkIj=smp9j)SC9^>`d2J%4UfSinJ8*(BJ<w?nd zVo$pJBtKHwTXaW9-$vePN1p|!Z!bT<JD>56ooA)yNqmqR{Ek)Rh2+1yvys?9{F#9E zeuD45mjlZV553--PoGsAMx@oeFe1%M9dJH<t`%9`Jaca<K2@p>+ES;k-h0P}>3>qZ z>fjjPl=tpg60a1EUwGL)OYnVt@ei!JPqoG2_r&}19EyL|5u10BmV**{boV`r{j7{u zhTMJe%<{{iubC%4<Nj^HB{h7)JEH%tGx(6&J>f%&qL$`SyEnyDyO*ce?&VRt_kdoz zmq+bhHSMKSCzMW|(9~DqTWa}4rdfwln$0}O)b7+ggP&GQ0)1vB2G3o{x2B`07ZMn{ zbN)DEI|$hF(A%i;@zstL7qNa#We+7+_&D#9yrF-kR%it_tOh=-mK+_tEc{=$M)B74 zA@>C@e?~s<;N5ml4g6o@Ajdb79A9tA@df!sWJB_Nk{{G^e9-XnIAv*JcV*CLBi;Vb zfYVAhk)buzA)Y2@^;kWvyZNriE`_FZH{U{AFoqlrUZ;TV8+hf7E0waxdEby7ksGmd za?dPlmHm|5SvKMMAk4oz5m+X2Ne;5q%GV-uU68j)I)A@I4$k23eU2W!Gx;}YHC)2q z!5pXL>O>!O(|BC`J?ZCL-ur}lpLCtiEA@B9h6n9;4=u;nA6~Cuj!>@4yJzqn#J-lV z4=g)0aI|+SJYGazGoNu>NZxSa&Ik6c9W{N!v{>Yk>*Ny4<AlHA=QZ$r(Qwt5Z^KU* z?DS>9>yN&qcw^!7t+rTr{JvL&zlF~QPZggu@NN3ht=LU3@kI?WNY%HB$4i`1L+l}O zqL+AMJMl)G9xtX6cld}qkeMCF@n4FG8znAWYl|B*CDplNK5=2{W$!N^uFJ)Pnp`CM z9%0-x{jAHy5!R&r&&oxjCKs<h5m@#J_A3Xyxo+QRZ4KF)_}Jp<dfY|6q*!9Ioy1-5 z6L)#RX#w%oL|eQTU(KH#vvq+fzM4vWRpW@+A$iBCruZt4_$tV6-F=TE5ALo(9b)jH z$c^X^i8(&P-<tqTVuwWjQfx|^$gRj})&3bnYOqCeJ(KH8_Xo-<x2T?PIIfYL3C~=_ zyPw_Iw5;tu6Ik{cIp(HhaVz)roOjyRw#d%}^aJr;UfYzR8t=nvRbmfxyZoE-pk3DF zPV9lA)^ZR3ucPlIaw;;V#lGZHgn#qNolD-_$d%VzLEax*Ci(O0Im9+t<xqT4sY#GI zHnZJXCH;(1%B8Q%kg>`!?sB0OdGbcNb>VUJ*U(V9N9bM*&R-JyNzO!`&BKOH`SuCg z?TVaqj>X31r_3qpmUsHP@>F={RPRyVBk0FW=6FSBRD8fH=v0loq#`FS<fEE=3jWrP z<LD%ji?zs<#Pw^DCyC|P>alxYJ$9dt+~g0ATe001Go~21xfFb5etj&wxe(rrhYw}l z+F{x#hDY<^(Xr$Pvf<0aw0AE&I?WbeEO{Hr*+{--m!7YA5g9E`i(T;|y!$EZBPBPm zW_G)+i%h(mt@EzrMWyB@i`*hP^kQ<u)EO#6a*=O|arT4f0r)J)yIp<avq_#Hlh`5- z<07%Wrh8exDl~CMyVK^|h7EPDc6+a8e7kapUD^k2*W#BkhQz3HF1doPz7gY<rNVP> z><N^b_4a0bmrBMe;Ar1`ytl>f@pC+k?)(T@`0=E`vQTSqzsBd3_@N+5nRx<TI|sez z2A7A4XC+==#ki@g$zfh-vCIpp@UP4bog&s?eiB#`eb>R?74Ww_C+imr;qSk{7L3Qu z?`E==C69JHgL?%zRh2b8@Ql`;5*=)-kI@$QzDHZKj(0xoO?}br6<PTVpYk&Fdj@UX z#i1Z4Tj`s<nq87ls?g|@=)0SChDIqv1TLe`LSj3C?PUWt`bgngXZrb@T^S<J;lq8U z^z82u7zMr*@?GsO0FH(^u0$Vx=2G#4eyQpEPQEj<8}xl4*Y58|9s)mTe!U-*SX-p~ zK}GmMztH`lBFzs<sP}^w=zdU<*$?X0*blltgddb(6aCwGeS@rT9x^-7ITUXQU+g0e zF>*J;A3d43%R`RE@6JO$g}#;W#4zY)%p1COzIY8CuJJ__@f&lJ#yH-~JKr;n;oQMF zdu1GD427SL^WNW(wKScVM(N{a?!!immiI@uR{@S>j)B;dRnf{&Grd<?=-v50gx>#n zTylMhK4Ofe@+)i4UqfdIADj6nkNeO%O;uAgJ{FmrB<l!~zY643-kr!Z@}9E;`6uoz z5Zm&K+xwY5FDvt~hFuZ)5xZiRmr0t9O7tz0zN8(-qT23^NntKFk2uqiAt$mh1zk{N zlErA+78w*-{0v<oZLbnJq8&B*XG@aixQ`Yhj~Wl48<;n(=K+C5<k<XvkOx@fA#?!8 z&A`?c9_Xwr9&cPT;3dzl!CU?9jeBE=Q|H=aic@SeHJF1m?WpDKBwuHa-Nm;Mp5J{s z&{=fBiyMP_O8T2~g1kEVMrgV1M4(J!M;Q;>sX%TyI+Sl|_~bd;nLzG7&I5C8d(1T> z6C~$JJX<pN(mDk{mxS|IOSj+PFSNVMUwsVOmwA+h?qSeja0>TS>{$}_OO11<5g(?J z2kD^3DQQ`XGPrFH_(^O$_ea{?^=5Km#+)_%YV|9cuBsaC-*>m-nRBDEv?q1FO4ldH ze~q72$NQDUMwP_v3cB`d{FM^$+22`<KV6e9S3CvGN#ylX(*E=VViRB#KKwm!Dtt%5 z-1ZB!{q<bt7=S(Vbby>$lIzG=W$6ddVAqepi@G4?kyOun7m@QGr=Ke>&t}fg<OG&o z2%YYwfBM$wR1&QW7TQ3Uo02Y3?i1Q%z!MepeIc~D8u$xMw0ZdgXfw$~n>$0$<_;5W zN_5(k0N;nu<{1-ho`4qR&_bgXw0Vs4GR{xl@)mUn4RuzbbncdD@U`gN3TUM9knU@W zEtdKo{I*iz{~rc~uhF~k_!O7hn~&~@<9?J$@AgL43z1oQei_fl>H4<%)u2q6+fTOW z+x_TLv#uTWIJil!iC9K#+nZhf=WUx&D<(GX<SkoD3TFj6``dS~tD9(}&chC!`R>Rp zYP_ib%EZSxLC#Hb*;30zO`G<;5y_(#q34{`jX0rk8giN@^)SqZS7^RPg3k`EkQcub zohJUc%s>1FnpN>`Av#mu&BE7_IWDQ;)%<6ksnL9xPWUj1KGB_`H%0%6-YlX$@_hI( zU27+4Iyu32mDZ1P@!46|1N%sD7(uN{hS%SARGrkX1-2;Ez6I;^ym!0PN)p`Ve^!)> z10(gETW=)|n99jjQ};$q+!p2Bb#HT>d+tDw6?LZeiJG@>9Bo|Gyt$0L<SFRI92K;( zYks5n1V!XCsI9`^RmwV(QxLs+t4<Gek6-Sk^a}E&<aFmA4J;EnuS!-*w_fh{t^@Y9 zz^=`mOuJ*rmA20K+do@UO1s6hB{{5o;&#b*)duE_Uq{S81A0xz|CkP6f8~gC9lGD{ zT}K`ue)RMWK3j}0&Ng#NmgKT*_mlIIT-JU3#{0+>`i_tnkZ&)j#Q)oE65F)*eMK&3 z>NxTMjNkniEB>3|fj9B<xAk&+`v6m4;91Hwv9FR}$OazbtB#c;9iB5Bza@4zKl@k4 zzXn)mINVFVaddM{-#C5m;Z2Hn*Qn`B4iPsTwmq<62mN0OoYZ0X`T}RZt(`9wIOUrn za&4Whvya%rSG6(Pdx)Au;wayKeh>Dxc|M1SzXyK>ZCwwEBVFXH{D(HS@us0K4jxcE zpEKU(XJigio*TgZGsK&3G6#OTH~A)htCAQa`Rz_z+uj(fPT0@{ctG)YCJ&_&Q(l8S z%+lncd+jRScew6bYzFsad?YpzowJ>Bjph3Yy&Rp3B`=f=pN~S{1naoE`X<5i(q5g$ z^WAlxPxL)VTWhF2tft2B2=`;CKO93HqL(_uZ0ZmDP=`2$Izy>1bWwkpPaUF{nnS5I z{QE`9koTX6U9t0tm=!PD$YsnM>&xeSD(4l{EoRRf<6AS^xgv{Ws!iPvFKo}CF7YMm z67BpZQ=9k_wTW(idr?Ptg!;sc9?8@DQ=e$(Je7JvH?@T+TAgD16*2tAO5Nh@_FBE- zy?VXkTIv<^xhC(tPt9T$|0h$=DDS>c?P4-@ic*^>eFSSB<5oyN>g+hJoj%-?T1FS` z=W|Wk$>P^&L*6&)8>J2Kh$Vhm-;NqbX{QTyj;DGJ_sILwmO8QBiYd03SgCm&!o91p zV@Zxq+W4uwv-L-+Ru4Id>t>z4n{%^HzoB!3PCtWwz%H}yx^-g~(d81i!~@IM_+O%z zjQN7i?ZgL8@CD{JU(a`#HVzN}!G<FvGrZJ2`FEl_Sm)<ELT*a?hR?t5S+WpWTgSEc zV-)Ty{zZY6TgP!6NpSnWyk?|#+S~3W?XUa6hC}F(oo82WmG5>u%kNJ3YZv_W68yGn zUQRu~RqFh<BLu&#HSyb86Tf|_^IPAL{1#{8w;-QP)A?<k#&1DB*{Shc1F!51$#3yG zzr}~(w|Jf3;+y5SkKniVI=^*-r^-zHw)MZ-*Yn#T?)~mj=9U?^PwgFRm1BWi;kly! z3i8}C?wQB5@Ms`6?T)}Q@zZVSlQT`&@4%Kedv3)}skAFO3}ZgJWNvqD{_G^NtIV58 zZpxXAKhJL}x+_(mtHDmgI~{%PW$i!wuoInCg3j{69}+*c>*@CYQP*1|&|P<PJjD@> z4y!<S@%;c|)sDU_-ai?o4DHCg!7s5F2j`-9?^Zm$HQg+^b<v@c*GzKlWsc|INbdKx z-e>NZ`#ZQF%qJykdiQmyyKRq82d_CVsr=9hzg5ArRrn=U`18g!$+=Vj|E$uW4^k;U z2sqE@dsdPw6dHNw-mb+FtC=5zcI1v4`xIw%pF-NLB7P`=4pTUM&?c68ia2l=`;09u z{*s!&d#SB(_H%DoOJ97qjBnlG^H3Wm^<n8YzWp|!X!-nS-AhtDs;`7+rYesNm33U& zS|FZH#qPx@i<eA2zKOhf%mT5?@>~t_CO8Ui-nZQwvcNf0@aNZDlWx?bTWisor}|3j z&_&<B_=LLRr6-&#q$WKdd+~wPrE_fx*VbS!q)xqxT63-D+!ni?`Xa4PUF5TuZV&!% zmCujK*BSPpGcx$GNj|G?Yp@4{x%cWL#Ax=!dV5fETTnLFb6v(2x!G-ASI|jfR`OaO zX?d+g-)LmmuoD$rdn=Kg1bMA(^?9w1wOjSP)>iUbf7SC^TP=C5?Rs8o>jmbu#0L@^ z@f7gN8WJ<Vuc`{<UUoazUr;<JIR<uBmUbpKJ3DI=ae%DXkbK`Io@C_YpTsQSp|4@t ztkr^7Ku2mCd1kmLx#9|s-7f1}HkUluiMdyYeb1R0c525R_~lXf=h67-ZHO`2@_j>_ z=OK8`_^x4ay@-wVBZ}`;cn^c?`65Xv|GZf7N==E>whRM?<IIV%K4kDCmnuVw$ZrL6 z!kvARZxi|J#a#C!@`{!5_}ogv*qotV6~EC~_aSkF)M6IlLrLvR%&3bdOeBB551);? z%rq4}mTv2QUv=k|-4bk0rp%R&*jHC588g;;6cwl|<`+0uq|D3oiGL<OnfPMje{GNY z`G>0bRU^xg=T6&QO2sj!ty3|wT`cQ9WX*@H_eh<WwOyT;xn0(L7;8OborkRPko6sM zU)FcXGsb<_*~xqH<+Ee>&9tf6;_qx{Tx30m6I+rkYdV;d692{l4<tlO4VmQCdibt7 zg?)hr;+IOTP7%KB>4(viNz7HE>x>+HH1oO#(C0F5c{e^N@d^69yYD&Ln?zhI?X`uk z8)}%keYb|G#_k=64{FfdfWfFclD><H#p<AQ5%GNe*iyS-XKZV@=l&}2|Bq{kaebCC zyZ?(o{kXnKO@?(`pK5YkWnQ_X?+~`y7}u_*arJaO_qZk;Xxr?#VuP3dYJc0I(I#20 z`YJGp*jtnBB6vu~U2Izy<}Ag|pPh9B`eHPCgEd4GuSbuJWDN>&y2Gx;=_%xEWDSTD z-snSJR{?drE_AWfwJ=Yen@<fsbuD}HnP;0$UDpokx;~|@OKRm8vW}pZTKP|@>)J(K zSG=qt7_g|@y5#qlUkShQy`2SXnUgN2-f9i7O_AFFoM7#LDmD4iudIVmRcimkto5f( zd(V8EtUn3X`#a-eiu*h}x^4`4f-z@D?#-t@D^_6+kNT{8ZLI%A|M^%q!WxI|)OS!@ zVe?I-_G2QoAAPtlYjpFe`RKzpV&wc@{`1|r(m9F2bK1qPKqpf3k<5SRk4MUPV@gxW z=cZo7{0Qf+IK`hFAH0{`f#>7!HGb^yJ6TVdG9a^9&2wn$3bUx~dJ8)t^{@uq#mogs zUN5l4hTgX^UUtUK!T3cnj;tG;h3=ng^JG8uqrE8?#T3T^OV(3&u^uY8POyL)p?!>- z%DiF``g_U)WA}E9cNP!ge-U;zvAp0|WgI$N><#|Q8|cl)(T#7QJKsb<K88K{1|9m} z=*hpM|6a$AT!S9{Z}eqn^kA3X!>K_j9NwG`FTwxu;{RyA4?4UA9iE2{FF}Xzb5smZ znXRr+ZQZsj_&>^1`|8)zHLTC;&icIWTHIh>Z`W|Y2lsn?=e`lIitg@;e5c&qc||Am zwZwNrkXzBQJ-PN`g0fWZIpaslx*31EEul7>I5Q@ixh8CvGg;Xqwmt>gO8l9^f1#aP zUa({nYqYTExzgsGhtLl?P3Mrm?|>ZkEo4m@JhhT>*vxZdpOUrGd`F^C@n<t$W9V-h zW3n_pzIe=2@Ca>>rQbnc2g*+1yNW(PJ8Lv!J&G~Ep0OVZ4~#%&Gsvw4bMj+~<a<<% zGwqLYM|t|f)8l^7w|M6~w;%mex)3=V%lUJ2-2TP3D9^6sD3A0#gT7+12|M_8+bV|V z14k+I+l3j0!v&5U;5g3rW8_<(V;1XuK#wtnQJ%4;KH|x9%Y5&cUzwh%i1OTPdgc&( z{JuQ1%Jhs9?OAAgW+&g1+9J=Cn4WP*d)AtsS?3Rwy)MtJH$77r?Rnqy%(KKGFUvEp znVzYL_Izo2W*Td$OLzvFeU*$arq$A-D<uY;)hSpP-I4FEtmK)%mbQD&{Inai@6H(X zU@R^{?k+{{+Insw9;{-{MAj;6HOa(+Jz3xR3yI4{TH~%@j3sf`G<3IFW)>iSMttE$ z4y`eU#1}>!A+g1O+IrVd&<(QQRq{Y-=mhnrG4--`J92@KJ4j-1<)`Y34=*OK$A6Xo z4Kmm9J2KZ19yH5b)f%@~Vl;Dnrp;B_dg^N)g7m!_Uo}tUlUn;c;yaPe<qzV2X>(h_ zm{k1Kv$KB0Sl-E)-oe=3?mhF<+mNZYnjNgXTk)LWxE@}hR-^NPTLa6Q%Ijw_*IdOK z^}h3dwAXb}TyYM1qR+g$_|7zILFgwN8Bcw1?A|Q&F5fmjTJ4y>9DOq0t6&SLGfi*= z{r^7bi!0F=pYq;j>RPTuSCCUI?T4=DOS{kN?QEl+e&`CZJ&TZ0qpfYoy7&ST^BUJu zxt7gZ9bniazI-;vNNP*+u@|Wq#TNGk)~VQwmDt^6^hXMPh>dX4M-s5gx6YH~yXW}q zsx2V~eF!~Omo=6*<_G!#|0denOl?I9@Mi*lU*PXUU-#0s=sE>m*BAInw)pdaKlpv~ zB%gds?n-po*V=d4?Y=|wQ{{+qoupP-=BR`&8Eur@MI)j-{gGwQ`2NM;f7<O|&m8X& zblh{C??=ZSK*tT}73EoYaa{2`^f8_B5&FwKfRpukSHcs8=)2G1i%N8z&~Y(vTn_E} z(f@J!6P+!1B!fq$MW;SXJx(Z{ItCou)0T=(-OBGI`p5&{Rji8_ohtZFqn}IYXVH@2 zIL~1YZPAlao*T{hpyOl=1wMiKGmQ@MF3IT}B==~HZPwvH>3nFGgKidBGZ?>2i*6R3 zD)qdd(~j`*qUFJUZ9Kc^ohZ)?Q@>v^|GZz?Lyt>)@=T^hkLRFUzm#^KH??Dr_B>~5 zXFaxSx3sg$)Q&~h$MapWe@Ht%Q#&J~JujKs`PtDxnbf-HU=QlyvDky1^tDymdBxPu zlhK|JOznLCNT6)3w1ds4Z^vpg=<8K!=XFy%??ii!nA-V@@5(HfcCag2JB(eX#jfPg z*Y9~|A3k$$WbOCFcoOG{jgfUgb&hzKpYf#b#J7(afVqFyXO6Be8E5L%vG<*|ai2#U ze3we&e`)I}Q#*TkZz1>3<r8MA=)kjMwKd#HPUYy`?UmdwX{#Und@X!(HDh}X<1!4I zra_OP;4%b!6Q}`ihn`)Se2FI$*^xZJ-Q-6MozY*{8N%BEk#%$&{<~lD0IcylTIDEO z-cE98^#5@i#dCifx5t3vQ~ag7nb*A)8TdZBBFPTl@_v6^N65GFzK`DMuj_}OAunn> zwODxW1U2N$!}$+uw$`r6C$z~GAO46Id$;rLLiX3A3wGc1{o)RGa;!J?D}IqSMgJT{ z{@ROd0_RJ#EApw>gK{~Ya~YpQv?I6+|32#oj`JLRo*b=@9=<QQo@2T`O}kDF9oMIs zu0N|?@9z62*W*ps*J;<g_{3l8XS%*qyWZ6&^$mxBTlh=*J)~Xl?Ayk5nOEEAh(kBE z)9O_ZI~4B^G#-MN9B!|St&G_&o;{b(c&<FscX+1*y#89jlDunwu;DZMalu3V;Gt`E z9=clRp<(cl)Ub4dhnR1E<tg}HWvrz=)<1YYHt|9LUU;7Q6obdMz+?Anb}6p-a>h9y zyCgjJsS;Z&IV9mRv2ns@MPlpVu^Mo_0zL}xjM&=V@XiZ-Z}D>YNo?lRyptEF7GDlO z^|2}bxs0>a1pYVgogk;oyej%Wo^hA;Om^QyY|KICWhW!sl288}et7$)-o<};#_c~y zUVI<V%KW6P>puj)+~5zC-W{df)cXg$i|+@H2Ne8o`mLhRy^7+08h+c`JIZs9J!o?# zxeJF29M1rUif$HPb^+rp{mOWc)cWk`J0REL599jX+Vvj3e{p@P>H1vldUxO7xSnmg z{(^SBn{N--U8d_>wCi1cJGdTWx_&^r-o^JG*QHih_7vF<uHvg6nyYwUB}S?eokdNK z&_Q@W#@0-WB6RW7ynn77Ltw7tFr$cLqVc=h;D5En4^#1}oqPw!?kV|GhS$gZL9ycY zv;Md^^K80jikG=v^bq+*ZN0MiGWj+qYj6~g<RkKI>H>0#J2JUG#ohzo#aVk7^pv@X z`{6&IBO~p#Yux4KTliiNIk?l*8L&<izaomg78GBiEn&xjKiRyS7_(0~0vzZve*)iB zo6Gp%$6SoBc5(D2h2sw}mX&;0^mF2Hsr6zlrRxj$p^6%aYU&@DUoMxqAX%$;x-+>y zbbKApe*?c9cf`2fIu%$ZYY#prmaC=qtB$$))8Jd<=;%7`=#DIR@=bD3L&f~jtM(o{ zSf5l61M5u3Qy<~G99RT~;F^an^xw(Hx|}o(2Kqn2y%*@G7PzD>H$KZ&dx9?|N-2{V zwFa1yUU1BG!vCqvMW$x)t$pzA3E#{8uNmX->3!IdA*s_kz&q0J6~rzq$Q>fjIa%)` zW0B6cG_sIC$umgI8q`$@S`L}`yO=ii$opM*SMueu?x>3Q-HcBr@CBf$)HX<))y&5j z*X0?Bx8?pyj<d6};FU~x<_36YG(0qlSonIry%fw}1?Eb93bjqdUSmu?ZZ^84it$*9 zUQNbkh%Qm_2@RW7iOo_KawLj-gL-D=R{8HFzMq1=gGL*iTsu39n0s!po<;mz!$%bR zmxJEP!PgyIh+nSzy3)S*eq(i8CcdB85Anwi@_Qgb@s2H$Z?a-TfoZJhROoTz5Be1! zp6B*oNnO=CY*`NH>*u)rFJs$|5W7qJi=jzhbc^&+K_6doF7|Ad?q`ZUlRBZX_=XzJ zz&I9tonvmFxLsg5kl^-i2A;9_ee$g6+Og>MX{Kj)5x<|1XI7b>QKCK1nx1(PeI|B# zY>DX^?7}+JGmG$TMYoN`7pw29Fxs=z^vra8Oz{oJ;%C)AgIzgfdS)E)yXZFYjl{PS zemlV028rK`i7!RJH5a!x%;Pr1?G<|5Zi&lx|KA*!my_R+xLoGrBrdPIry(wX@V<t) zyz(A(#h0vslDNEz{}&RM8+ndk-H*&a2jg(5`w?H@D%N;RvMWRLM7CdYd#6&@;P}bK z6Ii>jBzr)nZ)%SpY?z8I&myntk~vp?&2!Qp$$UBb%{!CzAGB$nYg-Lp<1-VFo;#Mm z3BHhc>AKv|>k^D{m+_WwkjWgBe1A;7IhM`$#!@)$B_BG5?~bK%to=XSy?K0;)z!fN z%uGTi2|HQXGzn43!lJUPG#g3)m5Q;heF>m7Auh$Wh?Zbz4F)w2QmJ4IioRw>saT;p z)xLss35^TXy4#n8V4X=oK(-8!&hLAkW%A^i8PK<WexF}|e>|T|?sJ}V&%O7YbI(2Z z+;j1TI*75U#&5Y7zveK07ZE4(BL2-r_NM0YJBwJETf}$69#z)RXD)vc-%SqtR1za2 z@jnvRBk?v8ha<5z4*7<d9S?rkOk#pO_+hh_?!H{&aRT^ZC7vf2|Et8{NF0#F-c+#% zCce&cz8AbBG@O*l-Wa~zW&9P~W?nOi<q>?c_a*oQm+XHDt}dTy(wY(A$_LkSOMbLU zelIa_nRrYb^8K(q%I)IA{T*_t3>|j4@RRHV4+Kv-_}B~a9%ipM<Q&+ZdOdz$-oM41 z<2UwycQD4{mzD7^HTjWuUHo`@->l|a0=MkVy%EMot_K63n|E@?XB|3Mehb`hEpSXu zx_^py-?m<}9ls9aOYx@tAKPJ#Avf=YletskF!9MOXiiu=SbiU#`9IiB`kYsKvVgpa zF0btEwPM?deRF9a{Ty#CIYcVx+Z<%Ff;eoM&koOgOWSik)aT3&6UP|hY$_hSbrNzo zR_rH%JCsn|A$2*3A(nUehP3*v?`|Xx)a9<8<)Cj*w>CBP?<D+&iONH>jzF`-fJ{0z zRXYPD{vP3#*$O$s7^gSotQUV9ovC8;?F_h5n>aW8u;lP^4`DtBPwv90;-BB?W}hdg zvSMNmxyLg46&~Epd?s<ml2c9m^`*q4?CM{5urY=4=M1ukGu>jtw7GJ;1<Zf`8f*a# z&;9R{TlZjND!wf6yB`3*qfl$RpS~aLh?9H|{rv-!NyNWzI-H<vuIRmG(ga|S?IzAX zURm()K&9-iPYZ)92PvC~W2|2^Zz}Q9g<7765`0+~Q6^_po_cSp>?8Re=G|<*!za~W zSvKXqiumctZtg-OzNKn6@%XjO`5Ej_RP+1V%NYv;m*1u2$zH`W{?C2DaV7R|mE?t^ zk7fvd!7Vtd>BCCs$t8E*SjyHq-P*kjfy;C1CJ<wh9?tccg1^a~cak`Z3ij@W7v?w< zG(Wl6u8rfBmC~-qdnm!blYe^80OA_35yg)o_aDl=hc~Stu3qvN(?*HU%pEzE_-A4U zsMn;Yly<lut_h!4+VNv!PI|CLi|L-qx$oL@E)OmncDc6qB6~Bn=k(U@T&X?t^5Emc zdTZsiXZ9xd6EI6^$a~}_x1GKRM`8x;thTo|d3IzE4tsYTdvG3dFUXuCHtzuH)W-K# z%lP8&ET#Poy_E5Xz$JZH@xO(^ENFM*Z>;Pg{pqOk;2%_aIqK+p*+chnX737-4QyH8 zWxQJ_HX62lAVJ10vE8AasW|n1j_|4YNg6nB(@6e5jThN(b+g}^$ewCaFJ&iZ6BgvK zZy9h@uaLMW&Xp6#o5pz*_rsFMWNskDyqMv&)?LUMlEt%RE*;=qE%7thYjU@MJ5J7F z$hc+X$(-$mc7gfZ<DpgJ2Ql;FvF47*w;{RE@}Z{ydZv-TqYZk<k+!<cmCHCM5sQJ0 zB7<-336<r8>+rvJHLhe&X&5|FeCbriKzJr2F9mvJ|K#u7qb2*DX8B(X{*!m^e}=|Z z3@Z)_AC|HvlDrw^Ur0M8&t-}?8=f4_w++rh&(kUFFWfZi{bA^Uo>P>S1HKMz{uS^1 z^oiC#&7rc_jC$nTzl~E?4&?vtbcvtNowtvDd%r7P^Vv-TM(qBg<my>ROZ0f0Sg|-( z9Vhxt&Nlj459YA85Itw=IHB_b=$!3xY562;(4%_$WlU08N5<t<%esj**DhkFhGFx} zCYDz88f#GtY?}8T;MS2}OWxPOYqQBwDKb`z?WO6+7WVOtn4J{l-Ok&koaZumJHXu< z!rRSJc>59F$y}uI7yK=}UBte9w6(t(TOhP>E?uL=)R97qAOD`jcddp-k;$en;L!m} z&`m5iyuYcIn80VnZ-kCcaKtr{*J#t5Jqiz2fXmeT4U!)dy?-`(-_06%H|sRL4@DQH zXlrzBee6<v?;Rq~4LHcT<h8<mQ6E}Wdh@LJvl$EXTj7la^qBOYNvrhRo%pY}zzZ@z zTKhohAn(`}x{PgwZ^);!`BmUz$ygBFzu~>CnZLyEB=^zYL`>$F;$xxg3-tQiygy}b zmy(2V?knjJ(XqsSY3C<|*CdWk&l~Ir$7@L&#`8|>tlOo(GwAQk!l1;1`H9IFJj8v} z=N_3FoX))0_@i0xcWhH8jaXRGfzNDG@?$lee@m{TjGT?i0+|mvubt-~uK3){UH<nu z=S3{p1Xs_dlkY2P8e?L{lbwY>V0WvcCSWV*@k%MQQ~cf;-o41j$$6ZeA*V~orA&G* zppDgL!If0CR6BDg-&%1+!(+v<5*(aK+az&t5)UKaoAtJ$J0!=egIpBV$XEAwgS}TD z46Twrses4L81QoVd?aIh1f0KTpSK{P@L&!x6ecg`B<Q@D2`^UkF7%L3BF~MFWj_80 z_q0KhyDL~aK#$d^#4oWJ*^x8Q;ybbLy}219cX06vt4|`gpvx!mG4pb5v7#n&zC2gX zu#gwQ8w!o{4=?nU(jMmS;8AE79ji!eT%0aPPKVO;=VCp#R1V|pX6_R>v2U=$#+m)2 zpWjIH{#s?t1P*IIv=HOKS?Rn`Nx~`5g==FcGD*MpV9d?2%Z3lgSE2^s53@cwYt0&7 z%cT`qPyQ3+^{@LRx31!qamYlE@6zPOp&sWv%IDbD(*L=Kg_c3yC&)#xp7)p5i0>d# z^YvW%5b^Zrkr_3_T+r`k%tf`tTs$N;fgW=q`PZetWbP0fNBT=-cSfBaM<70$86`%1 z#g%$|#YlWK+Fl_4cZ;u}o@ntE>um8AwSxa~9lry;KBLBntuXN$KAN8j+!{+f!!{$n z!hn<bilYLv-U8z?;wuao*$b4rd1t&}fhjTKD-0NkuaJ9+$g`}+lrYE4s4?Oz3>b;8 zkbR9AuUlZY8SxbcjKo*SJdUreKI_L*gSu}(`~rhzeDj7eZqs<L`Dkt*gdZ<@e8qLd zSCqn+KITNpRVDeU<XqVG$c4mQ$enk)nOkKKP4<cPb9IV$F!7XyZe@Ii%W=>R&$+Pa zb)SOR3X&^kP~J!QC1j0}iS5og@A`qv$rb4Tr{Mw7|AUwZ3S8;!yB~aVbyJbDQsjFx z?_`~kL(W*?yQA2Gwdey=<|iQYp+}Vk73BET<Z9eha^$4viCi(a${E~xcdx0zkNXd) z3(C1)?9rT=MX%*9T=W`qXjL_O02{JjE_|QJ9N;Hk(6~=>IvSI@Y|HIQif=i*(%4(_ zMe6bXjg<ZBT=H96^c3@UncM~U3N%~G@tJ8hlig=>ocM>6w_h8MfuK#{cFB4w;}K;+ zBYIA+Gu7MtOXf>UedXAPU(V#Nbcy35w{8yoVzphDA-_F|3l!T`?8aR34CkXOdlEy) zI%`2`7CD~Dle_$f$`z91nQ!|qJA3WmbVp*c^yxCrN{^Iv96l}gKc)tg_?sB7&mr`0 zaQbsogBh8w59Z`cTyDXKBF}LTI0iUNM8D|$#96Ss74%aM>m<oRXwvG&miR7c#okSp zehM57l?{Q`beZ>r-Y>AbSUa%iRrL)0ua&_YL6bMoxmp>#(Xm3g;;*wNvW8*YHW&MH z7cPA5+MP+92TiP62c5znf9*f8PIzS64~v5L(vB4D8958${rP{r-`HQDTRcD4=N9pi z$=s5IuJYU?@fV48TYz^XLk@i0DbU&MB37M;7{QhM!T0uqy8bM}c6|HiddynWg9>+m z==yU5ctn3T{k%(mR->z!XV}9|@qUdRYUPdUy`i#&;B_uq*deyF==*b!DT()TF-DEF z$FVg@8z<y5Zx43h6X)sU^haQt9yW9heb+?%#|U`o-C4R0zLotx(ZO%?zf}hdt#%#! z0(9GT@D@W?H4&2`bFuipn~2G{)KZ7UxXB)D(`pM0@e{XLU_=LtP1y8|1%}v&<rWyx z!7`^b5%&<Ti@1juEij^k#U^YbrXdVNOvBq27}3GP-%Y=_z!0yn*8(Ft_$go<@L!h> z?pmLV9^OI@i!H<~tN<<ten<pX>_u7QnCk=wwxX2haL1^u;f0TLdBmTP!+7MxU5@?l zY|n(x<S%Ch%1WQrb0o@N$&pxG>+m=qBVR1BuQ_qOHTaf)g~|%}eezNA8QRXITkDg3 zSy?NY^@@Efb+5<1@ymLGb-(yTn&DOXo5{Mn!pPAmf2ICA-*tFKz~>dbv-%r0-O}CP zkk6UZ?*0bO8e04fzp(fl!u4wHre69YTD{_Xs=(h++e6wtpgoIx#cTQPwwKc%QZ8$t zuc|_;^fkHUk|@ScSH!uDRPBuUM*2(6VILyTf_WC(%mX3))=FRPMwYSTw6&)79F2@6 zc6HGE2!0*n;R1$V;%RtZ>XDoc=9%rg$g2=?Id<Cb$~ALHN?meKuB=h5b%_r_Tdy1I z%mLIX`-cy4wp{4ab+POxrjk>Ly>MueTuqhuSZWO)OD+0onc-u}rC($DSl*%U8?EgC z*6KHfem$NKo%BOu02TKB55w#FdQ5BqtIs3F`-{i){E%k9tYc0)pMDUZjO5qy3y)<g z2Zit5aoG9srCxU;Pd}b4o+6$lJnTycB(FDqn}@Z0RPGt24)=@#^1C*ZYhQe+?vC!h zooMr8GIgA8epHxq1fRlOBY&;FTsLFn{^SyDCB^GxK6#6^y!dRzHzZ>se%(SPgM17F zyq{haS|#sfU94a;6`@PS{uh{pBF^eqe8kO+yVzgvoZ_3~g-S}~oj$rO`=)Rnn4|S# zqqI<;@PyQLw=<>gl}kgb-uyaaVi(_M<eB&S*!6_aFuPt^ApBO!*vuxUl)~R%5NqQ3 zdiKPev#~c)l?AlBY_qMbh8&y)OOyqL@^=G&*@r0DtSs2dUytP5ru|3Z(SohYf;s4! z?PnHF%-gD5F*{3HFc+BF_}~*c^DxZu;NaPr%7PD_zgV4%On*$DG;(GO`NnLmUU8JR z9pSk%N!ffA?*%u0DC|`ocOg6`wt|#F<4&`j+)-tm@kl9{<5*A*{RJ(K1zGrA@O^vp zMix$-F0_2Y+8aA7A6n<*4{T5G^V{~t)n5}g(uVI<qcI6uiiO5PWx;sIFIEfP)1bQz z9>}G<33_e;*YY!z{%g<14nwA@wkn+cNNgwXV%dz5iE9?)7vDykC7!E|7=S~_&@J%} z{hOPV1r5+w^$-1<8uXaxwp)JI^}bf#|Ine~tf9T)BQ1=7_|rF?X<FP)T|}RydJmE3 z4nJzjQr?xub@k`t@UZy7YMoE4u5gMD)b*p}-w^&6-X0JNZ9Wc-(wAaCgto;IbJB}- zMsMN<`Vc?R7oFOVeUtvgamT3@<%&9uwMJ`poTnla-6{U3IR4^~a1TV5vmG9{Ti2<z zoV(6V<?fnC-kNOs@QzYvc2D=^Cl5K)LD1UVr+Tu7b;|Awm0)Xn_2g1=?PUBySs-&` z#V%#6oJ%gIPb%h*pIA&i(ysz*!pYo7{69WN;%|6l?R_0^0oL2c1ungCVuvd+FPCu_ zd$SxKPF$i~QNURExu1jeLH%+^uKKW|^sQT1`IG61$#sQ}j1P;kx5x2~tOMn@?9WOJ zmHZybZ;4OXH&5|h&+l{SclMcl&E%D9;jBS1Jd=qIKhFQH>02hF%gIkqyL{xVW?r3^ z0DVFmISZ47H*=A@Jf-Y7^Y?%#xO!>x!C<f85}bl7(c`$Hpa{7_hW|ib5C3q^FhGyn zQSEC~I>t-R!o)14Ujh6x&7t@<{7ETm%PtHGy`|_k-l>#pHj4C<@R{?f+UxxD3Vng+ zwYwwo{NOX`F6Yh$WVFAvZgLuzN#5rCdO7E}eQ>_A?hfQ>zT}I$jy4M3Y0xTtocjyp zW@oPIwe<7W-tNn#uib-`{%+cnL3?T$lRDt57K~7r0Xu(T#ZPh*lj<^olQw>pQ2jS~ zKZ?C!X=~NB=c>`#JMu?*du?qE{*1mVNK_U$<en60Y!ez8hk}<K3yRK!?qr?r9C95> z+=SHS?o_U*N*ooHo|yeTCwlz{zI6VUOS0=XKKbO==e3PiPtf0QI+Z)Ttk3cWJUp0j zAnrXQFZrt4PiMkMp+n9|w>+?8&2oIUk8J3(X5{=^Cywe=`Yk?NIhczr-%gz4BQr)# z^dkrP4oANbxZw90mEg<mr{fdcOI*oy(_UUSxI*c8RdjbXGL^-gIgmccRk#o1{=&!I z<kLDbrgk-R!+Fhv*h_RO2an;e{D3{V3YRjgmYkq(_E!!%FJd1WITqO*rV<|=$6C`e zhK<)5V;GD+hWqFP&VtupO8cb$HqaOE21Z96$453s_M@!#-Xj;a)IXPYG@wUBKZtHP z0WHEOvBqWO6Os51G6vxENgv3(=$1XrC2RG5fKOyh+USRw@OnK@<0$Uo=5DOIQTR=` zFX^qj!D;lz+!xVR@u#NYkrk0NV6z9?@nhV=`4;S<zb%KabK&u8s84v{1anDpufi2| zl~oh-9w0wX=al!gv4hsSs=SF+j3Yd-*&N3uuB18{OLvdQX32Qw?9KQ|Gdd<brg`Kk zpxthVGOHrb7}xu|#`PI5vOqhm<68Pd<|gQ?S~4=KEbN2s+ju|Hkj;006<s!OeI|PQ z*`qn-*j=_P@;i|q2Yr{htYgAJ#_#)#e=V^R$B9|pO}^9&53)||?5TKSgC`QN0za6# zz^e0>eH<#A!(1zR6dkwIiB1%~E4saqyHw=f4w;MOOq%fLZuqm-nbwp+Zu4?yTAg#r zFIPJ!b59()yuy{9lo78i*zO!yw}iG`3eHD(#w}yt204>&Mb3_L4{(Fi-K6n5_SFRH zZ#eqX6`91v{FQIV;~#G6vt@DsACttCkc+5J?%0sC>rVqS9Uj%n`$<fmQZ{F7^KHX& zp6<PbcQcf8&U}IO;iQ8Z6K7o1v$uoq(RmZ*jBU9s3z&TKeY`R$pZ9g03FDK&Yr;1D zD`EU3hcc-f{J#<=2yFgI-X$uN#1Ad;1l12Bt42Gv(Kh7lpvdf&CBzxfR}V5bFGP;b z@;UsUz<A2MbDTaf=e)|d$py$-p_0Bn)wtfeCURVnGoQ>M!pAa)$ow&#IZEVidFr{? z4H@3qa~uom9CfQj?s_6~w^rUVafG9HvdA1dcjqHjBPNz&H!X4XteZ&R{e=5!<QtK- z#2$6CvfRqf97pZo97S2^q_5D&O~R+9Y??ZjeUz{~-p#sBbZidzWbBKHy)bp`Gq)*a z<gHIR9besYQ6yh&y{C(>?!M>$pM16D_UL>?gjGzwT0noIvpF}C;WhQT@L(}~SqTs3 zI+KzLL^k2itikZ=VD{0Gqi48}OLRSZ_Du!ss}Cb?<xh<9>Bc7B7#lx)ATrAypeFlE zGW0VOkBZ!9=;QOCG4=s;n8=IV13sI&oM&(@g}R!N^)_T(bij6EI3)gUI(sm3kN9Qa z5nLksTaoW7L-x@D-DF?%!llUei}2gE$o{2<>|?8iWq*YE9@!tk`{jo0o3L*q``21z zUxP>X1$G3of2~FKm&_EsCF{gQO)t*-C{(ru`Tb99%hoHpw`Ci5BW9!HPNj<;xi^|F zs`lN_{Z690+Y`tqa75SXqQ6C7S23oN&qv16fvvpU7)!>q+gP?RmX|SB8yL%ljOAs< zSUzGI%aP`L#&RU@A2G(#gnfsxTxc0f4W6+S*pZCoLSrm5Sl_NgZ<+jW>T&d<Dn1I4 z30;rpYI<DCMUS&?($;M<{^r_^n|#Yu?#`0-3ojSa4&i5g4jWiE-=*{Px8miUr{QJ# zCnhhqWK1;Yu?SwKZ_Ig2<Z%mqV3kd?ALKl|^n=K&-Vb?NKS+7^en<-U!`9kR*-ZK& zFn~QXY#BKRaK!fiPX0d?fAzX4lE1{J6<b?uGO@KyI~$#sCv#ig1MjSv&bng8b3NBI zVOMq#!^2vqUjb{2x*0hW-3iJSqJtV)TbyvYXH3Js;*Jv52+K6vSopxSCx5~G6+pM; zVo&mDbJ(zK@xd5&&{ay=ChR+_o|O2UW!OgCH_AGdJcHOrOSmU**m-MK`!3MuGSQdY zokPO<vZ%0YK08tndp>&xJ4td>i=Gtz62FgWGl{O`&WRafHwmvz;|_)YFWO5A_p3&; zmkKAwp4VPCd;#tlet^r$BKbsYBCG8#cDsz5@JLc(;fl8_=TFRc^h#zw@}R^HA=5jT zSLIL4L9S(8c>?>UgWODi#or{pKv`D~;f#^HNA)ahAY*MQe50=|?R+C+Eb9={_L-Y$ zv3*LlJA=CMjcHTMeL=cSy=L{u82qx8^_yu|-_5%1o7>ex{$uRwEyJVpi9&fSyZSu% zMCRVT$cM~3P2}hnJ7PJqvClH+Y%t~==AMW-=W^zh-yrKxqEjz7<{Z}bVf$f}`JOpv z6z?B3<{T6DUFMu8Epv_r&zvK$qnLA^w9Gk0_=UvRbd+_lw5j#Cp|VwSFQd$zw86vt z@mFV})&_mvO!Qt3{9`;af5;peYyK2F>Hn?y^GzqQHr(%{+d@ftTd;-x{2yuyIdaXm z=yqL6L#S*j<6_!*t;~h2=Tuq$$y!fA=gaub?ON-F-xX0N{UYz@{XA4QkvZ=_(LQ{7 zSoc0z|7ob~Q^xn-?US1NP}wq@U2w&T?smg%w*SNak$Xer{vZx$NcCii4LZi}objPm ztq&D%Xd%u=_9i)x>6JBVs$-z$uV^@5_kmS2X5s@oh7T-<_^?XWsR8cqkoC}m#JQMr z{DZ7d4kByTIo{OOR{zIa6C>BDOQ^%t)7<l_>FHwjRP6qbw}`c2jg}&PY1)*gJ-Ot2 zp|Z}PoL826dsEs=*cTE%y*cmyC;3V;gQ2K%e>rip(za;6k~e6psSgv;>Hi7(FoV6b zIpp24>cg6^^cWbihsDok+Qc6)wpJVYr`!`^{eM6Ihvx_Mj@aC?UWul6tnWnc$lN0R znq3_#;|>Y%7`hoevW5}89Sx85omGF8ctT}Yd<S}P+XJDpL&)^Mtvi0lJsBE3-P=Wc zh}JIRT)XLIX_xt~#G;qiw}i_2!)MlhX9Ka@|HExM=n|XG`qcJ+Ids*spDX>}cz*5b z#q)C}`iZY_vv+955*I<A#Cvoun#F$P73@oXpZ&?1>{DJ&EZ$|r;>BT~%>J<7C32?) zdzdr(vS-P@Wl5ac`ncQk#hOIVmscyn<^*NK?mmtUd(xd7{+Z$0An_oT#6`4aJJr2$ z)hk{oVLzNZLIT8J^gOWZ>*-IkKjPr5eXFwbg%>XJcwU+8!QZ@LeT`%1)2_)IHg)Ln zA?95mdVI(go|7}b&lzZ)ro9)FW7!tB6H35aOl-<k)Zak;U3sz2=*T1opPRE~5|0oF zg?8jVJ&dzz;&YPo1Iod%j@s38l)??o>lKw41h$jp-VkMj3N0;^NsL0ZlRL`5>#6x+ zhsc=3YEh>uF)QXi@JVAI_>V$gpEEkFcU@@omBj6AsdIZ;A5WzJl009|j^2M&)YBV! z-xYd$RC7K|QTNiHiS*xw5=DKX<_u!to$B%y_S=T{@@%Ia`^NP2w74$W;AhThr;Sy# zv4FNUXER<kF0D<RtMcCEShl*wm9XJDX&dik&sy3hF*y-!W30n%ODO3Oet3ko$r-z7 zxd6NRUeV5*Xnp<;nJPG6AZwxw{D0-}-Cf+7d`|}ZFvRTphwHJV62E^tcmHnT{yFr* zf-Gc2``*J@oq39yP8*w{vzhj{@c&oD`Tb-C@%W2p*K&_;zqG~f+6Lm!1-Ha>+yJiT zIQ=~5-E)Ye0S>$A3isl~S`To7N5hv5K7M~H=Un9XJaVV;yMnVHE&MNIFZNbNk`nxk z^1Frsqm`$4ckq9qt7AgNutFc_rm7m)3lJWWeFNc<BIa9rJ`^)|o}C_v)6(kUyqFb- zg0J9Q`rNE@qtIs5=V3m+XPDx%!b|Li#2Hy*W)#lGiaca0jt!ZLvLTZ=1BsV>5?s<g z`TYdH7Y~-0hORT0B2QKxE<?6zX`hZya=&ozJ+WR2copy};9Y9LxkUTT-8<m@j62yF z`>|t~7X(KD|D5#E68Z=lI7h9=QkXGd%`T0`yuZtLj4AS{{)6Az`BQ!$!gkuu|5jXE zi0}A<_#W%IREf`_-N?Jc`_WwUjPC&NOkx1K?p#laymOs9x}`77bE<-4_*^~yEq0&K z#}dOqpO}3ueN<rSlZz~U@`T<eoO#gO`lO|;&+wbrsXTf27{AHAHJ1L{p`3f38X(48 zla0yxxxrG0GS1o$Potac{kfKY`TH1T0$-KLakM+OiFYJ^&OywO;!V%2o+UY%vU$ui z62wX6Eu!Dnan6gJItMp%Z?ywCB?bjL(-^}N`m30^q{w658|_fYWl4PsF`n`_hre#4 z%+2py9^nK3s<ndy!`BWbrfh}@m&iNu-I?(6tx?uwmI)5`1CBM_{!iRWP7S?YvkoP3 z`)a|PNqgjPsKl}J2(dQSdvZ+vmY6k>H|_@Ejz8$;o43yktqQE?EKuQ-v!>tq{2e)2 zNo!;cR^cR<oHmz|=UDifJ!g%tDKm4`9l`FRK5agg`tRl&E9@3zd;;SW+9vsF;+cbc zFc&A#k8b*t+%$`lh!t|G*CT%w=$0FZtvk%w1IhD(-VE0MW%?xHZyC1+{6^w4n#)tm zS!>D5S4*4~^W1_8%67YI2VWzxdE2H>ien#8AKL+(O(gz`eScTKwS(mxOs}cPTAcE7 zb3cXgRlFY%7b&q%)tbCYez*bNrRXJK&g1T@vPtk3W9*ZDa}vXsNzP*Ub)(`Qqv^|w zbe^qu8$6rSM;UvstUJhkaU*b<4a;UN{rG%!%Y5%0$V{GomY4fj=ohJXOE1<g*fgA> zY-5fKHnRsU_KVOl*QA3!Pq)$WFV0t4>5%goZaE8K(82kIai!!8a1+lSa1Ib375l;i z+Os(iam<z8Bz;StPH;mjL1CMX|4-Y;UHvQZj%gA7yBWJm;=Ck3+BSTva?VKlK>DJq z4~S9XTj>{xTlBl&b;sbkR7d*5KIh;jHbXa=IlZ$ieemJ=-N)h&Mjy!7$as80UJGj< zG;jx6qjOM`gIq){9idI@=!aegzB#Oq-m>BI*zo<yhOhBxXsnNKWY1psH9-7`$V7`K zCz4loI($71{w{^jr^4@-U~^uK&8g?oIYOJQ-<2aDx0mR4$06?hF>?=_vBz=;T?_rA z@gj9RVWal}8xK5bqgV821+v@S=DfsnC|_cdb-A5M{Y$|s`OA(#gEjZPY4<c>mubB8 zr|FYQxhF&Wy><GeMB*St9!1_XIm3_Z>bFdjGt*8z4}K7N6Zw+()M{)A;q4bJ?JBpm zYpShX<hScKt~qkI9rZS7ZPV@6IhtP5b8#P$_PO*vmwCYo*G!-Hr_EFUjW$brCC)ik zn-A+e7SZP0EN#xVwK>Vw=5xN|Hh)kYT9rhd<~(8Eo$w(2SRr%~!`h7PA@?H4JYv3o zi~XSTQ@r2J`)i`S*LY+@vCq%^zP-P)G1oWMhpbegv&zI?mbqNw>&p2nbxfT?3=aR# z;eWwd!vC@6Gm|fy(d#moy*66sv0A=!JE~VtL+?o*i<}=ig4?N2)05|(uReOXJ0F!M zcJYxpzbVA{K1^S^MVIcoBq+G@GOK6h&52)#txr6uvPsTDX*gFh2fzoHPtav_9lVWQ zhK|R+Cho-LlDuyzdCNHqFLQ4uW%7S5|HH2vv!;y24rjj;c}S+L+(}LdDSx)BJf3pJ zERP!(h7XP9TsnQ*NZ(d6hSQ-l(U;(xw!!I>JXKPj!T)miru>$;R+sU68oveBlnt}( zMd-w6bNl_+2r?G-N2B$|aq2MifSggtV|_06M@N*q`YPTapE^1GB$tZZ(>L&Ghx#*o zgG<pTsi9qsN64c#4qH^_WSP6g@0cZaBj<4?hv$<g(Yx56fng;cSx*#@lYu)n<nHb= zKYBpQ{GU2Ksmvu;u`WJ`vvk<jUT568S&~CV=F}HfGk=hOPV5OKw6?JsTfJg-Xcd0M z&64B)S!@jXelu~>^o>{U3>MlpheDgGIU}{i8K*jl_k3y$YZcCg)%Is!kQ`z2IRjP6 z*|2rJlj^FA$@P7ef^F0NZXVN4k~(Klr{F)vdPZy-_x*~;eLwNg_d9u9JaIhnJUw_4 zc-%aRJV`vsJUw}O@$}~D!_$|iA5VXt0X!)@sXS>s=^jmvw#8$E^<z%z&)hVCIVy#@ zDiu1@u)*TPdADU<A-pZVU(w;hH%Z8qvLy?9#{sW8)K>n+Q6}d`+lI$^cHgbpB4aZ~ zu)cLTba{Jrg8EzX0?62yzL~;fO4%z#p;eqE*fgIyWPcf-aGvbBPJdvWb_PvirbqFN z<~f(=Jf3yaD<?W~h+}?D&Ic%EgBX{Ce24F;pY>e@a|~w>`W9EtnW(s(bqeP&q<`fc z?|$HA{kWIEU-S1X{vP13#Dagt-*)~=uC)XFmH6$<PR@LDR`hPp-v5%v$MY%A37(<k zh#1e~=652rNF5RbF6XHy@m#=jA<soTvexitE55_;IX$66%B0YR+Ijx54%R$#Mb<Bx zs20C*f!bW7sQ=AbLBXfVRfAskG-cdF?vrof@qT{3*WTYSN&6<-&HHTbibqba;>=@q z0<n>Y3MW;9JNM-aRH;X^k@?^KhYM6Eev+&okJr9EhVDG(8k%>~H6%~&H$2W6Ue3@G z8=sa}BlUteK;81apSfXtRwFqHlzPGKo-|I)^<AKj!N;@<zeYaeLVocbE5A2U!)>0m zS^ysLd(2@yC3I_TuD?Jn_UrUl8tpr()hqQ29YT-LMUD{G9cH_w5AOblwqD#QeZGuG z<IQzh>eDsOozKj3k`qzx7tinp*1J7zFC=&*ZrfXv)-eel-Lh917p8%4ihq2eM(eYD zEBQKQeW2%_%<$e~w7(YJEB!BXRvm3*y}E;T)~7DaxbjZgy7B^aGGiw;jpVi<C(cUF zoM`QpzSr7Ie;3zZNKU<PJ~r7#{nq{;7o-2*JVpQav(OW6yY&CZSsLAmQV;DDnHO1C zPF|==e?R?r_x_Z=7vA&Gm;byAJEGOnmjf?Si}_Ca;aR?uxxm_&gQ!FB%a|$GT%_7@ zH-K{|I15`WIPU@{u(5E)S#d6pj`Iz0mhZCQYyc;)v2gC^>}HtGH=^Tw9-P(xu;Ba{ zoWRDy`6)QfeviYZ$Qn9XEmkJ0OV^#NePidlcdayBIyq`z{apJ0Qwt3h-~=`n4L&Q* z`=jGrApQTb1?Mlp32ZE!&suRdM#p)B^#6wzoO{3tY%H9QS#f55FKXMTO8@`eg7c#9 zsl~v?!g-&LvtHx9vgkO^2j{xKX*lbVSMn=>6WCZdZ!&O-&rew%iSrfs`3!Jw*<r!i z08U_I;ru>0MGgb#+(Wvqze40~8G2oG&wS4B$XR`KN`2P-i5i~4@{LVLi;nOs$^mqA zy;VnBbi_uxj&_5`)LFu-S+Dih@H-9swZD(hfg2A~M=Tw9IJpZS^oq`>Ptk#mdi(eC z-q1Pq%7_AOoJHsCE^)F?q3dsHyXc&zJGDGA<BD`$TAx+gQ!VD3SpE1fa3jO@tS&UX zR-CUHxM-t3&Nmh?&arW|o4D#DaXn|pwGLds#iHwD6W1HyV!qkU`rB;limo;u(E6(5 zEOd&2E6UvTCllA_;CcmI*<Jm%xQi~m-%lo}#lUHF0b635=YFf>+NktYBIvrMJFbmc zLlw0cI1N`6y4ISwz8{I}T05?#v=O*ixR#r^R)UK;#X63c2rhg8qN|l>dB48<`|sK7 z0;U)|=GZ(Bygo-cwE2F4S=V0X@%2u1cWc+!<b!LSS=U7T^)=n!XUlv1EFw2D*WbjG z)iF>l2G?`IOPzbb`6b_NqwoLBbCp?NJ@{<(^|99Xd1QSF(dzs0Ab8KAJ_mfX`X-t6 zjde!w%b`1T`7-Ixn&MQ8fz$db3cs9V;%WdFJYePHW;?Fezy+L!3t0PHH^9X8Hn?nK z{Ei*h=imZP!v(C}#!yUL2O@F(z6+Pg{YFi{0~ZU|z9iiaP=>l9=z89cOY}N$v2cBA z;<_{v*UBzj8t-J?;9?$+jq5KauAjJ6JKrp^<EjN0a6*^xpYWd5Cfj7<+5%m+alFlr zYX`W1)7lti9G^CEHAlAb`Yv30-;1pRT&(_j)Wj7R7eQBP7cRZOQsUHN;9}9W$iy`T zTsHn12d=POFXjFE?mA?oytm7B8TEndIp9tDO<f}EhS_F)n<DEQWUcS4_=x)YMXT?R z)CaETfR9#RiCN#$c-77ioeM1O+rfMEk5#W7k@xoYt&CTT!Sx*QQlDLK4L9rS*(0Jq zTfkRs!Iw3(2XjS_Fx_@}O)+tm_K2YUT|2I8zy+L1lbx3xCa$L<>3ZFc>qT$@XZEL^ zuCK^fQjU&LK90oIV8`VL7jUs~eP-fHNr>pLU)XV-#XJpMEL@Eyu1h0vJ!r>u1Gs>T zg^T?jy^YJkWt08e?YN!-7jPP`D6;<yxJ<iMVx!tDwre8(RfpJqhP_HM?C->0y#YMY z?A0F__)FuJafT11-td8JG<+cSb|1*J?}(-e-D%QwovgE88=&>wXsHML-s;B@KC7kA zlxD_(xz_5xkTr2n#-jZ8F54nYY`LLseJp@4vFUXD>Dsuu#a}133VPp;^O)oVzLowF zpOGQ2Vz0>fn)3vE8(Pjl(-)F!#Va;R6ud2hx5|bWyW~`O2Uzh=bQ7ZzNpF+jt+ez( znOiNsOYeg{z^}LU!N2f_Nxy1+&=4Kx@4&h4W(&?gg7f3pI0FVwMM>0X-0kYJx7Q1O z-Sl1}x=LT4MWgRS1D7`LZ-eg@N5mLC3eNI*7W(|)RLJEeV+6c?+_zeB-jc-lMB-d1 zbk4EhtW8pjpEYm-Z^!u}IE6?2ncrNN?lHcx`>ek0dh|x{Smj$=6P5K*i>q~?)zLeZ z+!Ixf+~7QCuJ|yok$R1_bKC7*GFBjdOV*Kc*IrwyGIlV!=zgBNc<$kmSh?#8CaOo^ zp>$;Csj128ll)%k?4^Eqc`vo1hcdpG?vJgPzL$0JWV4Obb0=}?nr{^Ri;4Sui!+~% z>}iPZnZ{WvL-*7ROez1Hx<|vsdYHt}5&G~#ef_>ezBkwJ6Y;SwPEZ!Kv)3Wtwvh92 zH@OZ6^Zg>q#kX1nyoMLQaf;C&R$cS-JnWkq)(5Vk-eVudduG88TMCtNEn^CU>*O7> z)H=_hitkkH#8+z4JJtb{S<p?UwSF-9I%}_+{ebSe;!Nu9sw>4Cq^=2q=X;jAwnf(U zGok0gD0Kllk1@#_+bg0jSGT&-v~~>zPc?W>)vo!#luy*_YCIT{wOjXgRY%rE?2|TD zr3cs-fY(~^e`x*9+=lN$^VNut$<O?^$$+)W!KubIc^|n}E#vwu-<#vg*rtmQJUFiy zo-fKhdS_v7De-?M4dSzv`Bi8T9K!DbXm~sd4Tf*18*hpK-r^goxBG^cfX5<>JF>Ru za|QH?o~_sPSVJ$h<;qB%bHnZES;k%ZSCipC_F`S!OV?w-+jY)k2F;~s>N@9CGGy*y zT>Blhb87c?))ymBS)z0Pn61%k*EttTy{68&I)={at&C;ApkDS1t^%LvoO^jrATxK~ zF<zCv-t4_VJ;@jjME`s^x3_xiE4P{xMek&o?W8XHzDw_r7xWq8;%jYsM|i=mcVv&E zmx%}3HQmL&&ZH<h=K}bmpqtK-d}4#WojsJ5lMJ1s^DO5<D3iX3u5Z%39f#sQ<Kczs zjM#GdI{Id*@XHWGE<1>2r46R8(a)ZA*EKGqEfvR3FO%;HjW%EDsoI?{`HwZ3)Z2}W zgxf85R@!B<!F_s}{7n=Z{x3556nLV^<P!#d^g~!j#nKOT-w`bfyVFu{`i_|!SL*#Y zka&ned9Cd0?QsoZUub9^_ps(=a;LD2$2Xz1{5zuc@))$1QZDmnR!@D;^<83FHQKQ2 zgs(+^Y5iC}Dnidr651?#+>S(vq4)uN>sPVIUdCSgmF&6CA`a{d;=tmySjp4b2c~Wt z_8qmmuch76PEEJT+T?JSI+QaRqQ{l;p6Z|5L#rIQ%#HjnZB~{it65W$RoCIrsuMZ3 za;2Brd5D+{d-<ik)MJ$Ix0jdqRu57hpu7+olxHWZ1oW@^!uVh4Qnp;6X0b2(C2+g! z<?JuEQ2vR%oIS$ND1YBx&OYA<l)qyyXRl7~EBK4Ooc*)yly9|{vj?_~^4IO<>|_0b z^3C>g_M%>;{3Uxi`$;cT?y;A%C-eg4>y2`)-=!Z|6Fx`zuk7Wl>()}f%3jX8>`BU1 zdpYZ>$0@I~m$No{gz}%+%UP2=MEPQSIqQlglrOZGGxsl|{7%a4=V^BLaeMZpCwl&w zk>rVXo~8o1kr+oe@wKwYCFcs-SzCEaxPx*lx$okuS9o8#NOIO|XGG*|h@7wSK0nzb z`E-@|!VT`Fg&sL$vo2xs2JX@Jo(nC4w|mTE_j5M=CvlGD_TM9y4&>8`oVt+LIAUSq zIR`|xiISw9hp6*MqZHqbX|CKkX^PLxn{LF%Fo(N{PjKcX%3AxbKI)vgE_E(3Lim%k z_ydWJseD)o4!ITEcdfQpQLn8@HvwDR9kv3ve!#8!wFS-(+^|Tv1i@#8JFBlc#~BIN zNgm#Hzp~)_F>t$`-Ds404*{2Hg?kaWw<6&Jg6}B{zGmQ_iG<rF_<&pduO6Po=M%&2 zEc7+@A&&nJV@*U1iUZvewoQmp8HTN)={({mzKBy+$~VgfciATsqSz<n_$Fk;J*0u} zLy0@#dx=SwGpg@{hgkmb+9<5^*e@2iCaH%ymQmN*JM`F^+QEvilDKJ+!L0gB^+v{Y zJ+_&iOSQZ0vB#j(?>69n8K>UJxW<D2*nqDfMn}W1^i^+Ud}G1CZQ{q5rNOgbb|d2) z3x11<pSfRy|NGhMjf{6J_)R8$_O>+mmBZ8<8TVN54JLlw*HNF<Gh4lp@s9=n6z~;4 zC=AX^y3BJ^|4ThL51!%C>KWRP^9LE+2gBX1#H@#qbs6uR-rt-@ZZK&1Hav2a^_rbW zrh~`ABRj%8!hWI9S1k2J;gRi(c@g{3SqfwFmLp~#TyM}>%9v~L=K{~!lA0JiHPe7^ zWXv`AGT<MK4PRp7XUv68)}%YS*?Y7>czPN9-N%59(q|i|(H7I!V3n(Aeb&icI+i{= zpxJxW!~N?kq4_{oLw~LB#4lj#!uuHKicy8W`?%Z8f4(wa>dV}INvHh9FS26>-<5w) z>CAlRl1}lFR9%~@^6r4dc2!)$Ih5Uwm+wZm=Kd^Ity;l-u)M3<kgCpqQ1Q(l?C@1S zlj3=iT>bNl9lrVV9X@ZdL-p)-sJA?m=&8y}_uQSA=(**Y{+^28rhCdCRyxant#r=+ zL8_->7I(b<R_UxfXRv1rzLlhY#DnGy_Dt)g1QYuy!OFkMxrFLj_fYqA>YV;bs#>wa z;cI-*saDFje+JLH(D%$x&vxR1^Z35fJJeIk_c?rD@lVe6@cq5!_o-=W<?kK70CKO@ z{VCt?rk-;$JdJ#>*Pqc<|8ZhPjee-QEKRL=#--Q$m3-&$RUM^{VSPOTv+lgU;kxg8 zurTPvcXc1%NFOUZwf;UJ?a6&0O}!8LE525I<@eD3pXz%0C(yYNc$tf{n2Yb@e0@=J zVQxusp>GOuDl#z;J}Si@Q8`OL`*56levgw+Qg~1E^8@oQV%J5cJ;jPz*<T5IcJp@> zf8piIJSDh&82xt+vApP;%3<{NJlaJpZ!UG*f18q9$+@~xWWbm+cI3hn;%_nLj2&*< zoN*1jXP+}9R?)905xl+2m@~*jb6_C8KWS47zBHLLE&-1jt0?jp)(2D0(cr!(^&n@~ z`C?&zE#COR^^DU=a2|*6pFqAhd(+fKQ_taiCw@ihNn|WUKBPX8nS5}`JIQ}LyN_~k z9eGs!)alL|r4C$pU&qD#URUGV>F6tGn^KXbA=niIz0cL2rTzq&xCg%9aez4)n7I$K zAIx1nro7#I2WLs<V`pEdjEjk<;VktY<mF9p%o&9b(7<EGu@@Yf*PH^!+u%Unej_;Y zqT~1s9Lf()fkQc4y$6~4mEbrhI*z}Bqxs5H;Ftmq<Z8L#7#1Ce4;)*rI0cTS;6SDp z363+P<7fcKx|uqT3dT6<_`GqpDsojRcm_w}xrecN0zBoH!QaS8t-<F3#ygj>pUBvo zd@lM?c-`cGv6D3Z!FRlddztJ!Z`euQcz%*$CuzQ8jn{|5>w~;?d~ea?J2bqk3B|9x zRB*%R!rQ{5R(_v@?h`wA9(qE)>1)neflzo3(r_6*Ntu6g=&M@hpxeG7&EoGC+pg}P zatD>jr_emm`(GBCN8*E#_TR@{Jon#LnCr(UD*wCLb3EN#c<DE!<8*W31r~a=xliKe zpm!|gtU-Lx#CbZ-DV}C-&iRh$NsB>GUju*XG46YWt_sfm-N5tN1ZBMR*|*WPo%n6! zn}64iXa|p#|H8U<9s8y9VXM?*)3xF^sD+ngzjO{d&$`}{wak`E^wlKJyzMUZRRUM} zJ?0qJaqh`H3+`XRxes*`=O7FCtN9@QK7h4YoH`d<p*J$AuqPnjlK(5%NZdF1X~?OY zktcEebH2wtr>^w6a)b9pC!2dE_dcowe{zo!ECWa3XdQ=#v#GRC4M{#DXJWF8b7wLi z3qHY>LoS92z7yFHoO6*Yktq{r1^5cVw`GKOhR^3u=B`R)q7*x2O|mjU<F~U?w7wd~ z*_dxTj&<MBIFbu8`Z(UN)3zYWI94#Ol8Zvd*~uA-iZcs+F8Pa%7{}joV?Hna26pYW z-%&fJc5g@6uBD&3pSgHaL$WgVDaJtS_h0I5`{Rjk-|r*8BYKB)r?<;iIjPgezC(ok zNWW*DqTik0qTf&8BmFM)0Jf`@f5QDvKCqP{i~e59*w45#Sk@w9vt5S{xPeE`;C}ei zk*e5m*9}iqpL)Hwnp<`E#7fquayHzYH+C9$@RzG^7U#6jWv@iwmW4uNW&U2m*)`*S z<-Gg9(#I3MeOlX7@QcsF7o{JJy}oYq@C4?x@E$JT>NaP(*TeVbULU-*;~8kua5EP_ zBe-ok(0ng@z-PgKGNyW4Q}d$9(j$iM_p?VLxWrD^@JE@SfBtRTqTLbM)s}lKZCPiT zpTCJ5-yEgxX!gwM=49@JvGUUnbA39Ka`a0O+e+oGkMNl^_BYGk<ji8{qKokXPQe#g zf={p*-(V3w`#)=EVVNs7if{U9#-<P+nV7Bluy=^hJorUu)mZ)ijWT|k4;z2rc;H6a z%kll4OL>mH9A6)Y&{hqzm*d+zi}IoNa(sDbQl4fn$9Ff7^1k+Re0BXPPon(&F&gb% zv@(|_^UkGzvqNix`2Cq1<0<bP_U-+;x8i?o!*|=xJi8ZvZW8%@ep7-^S91H{*Y)7T z-GJZsg%>7!e)Gyj9zVX^OnkYChA&t0`RM-KMDH8>PVdkCHElYTKUe(Ahqb#dU3vI* zy~K5l6}dU;O2BW?gY$0jUb$1nJmW>;mmTQ0`k|chnuLB?bfKKPm3%UTutSG<$)C3B zsjj$-9a($C?vwLhgT0&;T_yKCIXNRPW!%ZdzE_HOI{Y_~yav+0S)ZHm1HI(zvBB$i z^?eU`U=L&|=V`dt0*CBw)YsSpyleP>1bVJQ_p5HiKd~_==QUX`OlQ3;YY36`V)MKo zv^>f;71(Z-w7re?;akw+;^b`Kbk6ftah`7?>*jkL53gpwSv%8r3>?Qa->S4<^8clI z?*|WeN|?S?Vgk(e4D#O1yJ+p_O!7*xbt+f~oJ!V0HJnqO-5)s|fIOz)ljn?TN!R&Q zX=ec6`}8Ff)P|a&9d3uxcSdF9L^t>G=ws~U9w>)4&Wa=B!yhDZH&BwYfEeX6Y|I_Z zD?5tg`l$1&M{?d&NqNF}-(r7XI8r6QO-hZkFgO=~aIqi1))+-~WLD4mdM~nPe3wOD zLj!NUhWD(=>be1rokgsh{J_ltM_Qsz!>M5ZVZySHEc9-*<E`()`!`^JBzVawC9q4# zAtE$n!!H^Q$~kIZ-v16hm-ku5`x0QZ_Z_`ew|vjsYJE@NM6}^m>Z$}*@b8hn8`wu( zO5YV*%O>h&ldWX}M<4#cS|)ImU15|7t^t<ca%Y#+Bjq{9@3W*$f{Qkl7-dron5DD> zInloXH}usnwe-Kp-b2v;G~=<Gv72-n*!zLSrjgjKdibn9cg|Vr^iPIqvLte3(o}5L zWf>Ex>lVI$5c!h&MD~A+-_x({Mcts~R^zOH*`4QyR!JMRc7aFQB{61FrnO7TjPWu1 z!gU(DFW2i0n)R-<)jNmWS)GfnLY~Tyt1CSxXU`(%oW2)rzfVqdrRc$}`2WT!l5bAY z?uxt{U*OY8g}LjwH}_fmdeXjBWW9m>UdxaX$%AqPU*HjZf%4AG(`4!_xo@tcBb08I zoA=Mz>zDjL`q^{Ei|mjGW`Or2LvQ7<wnry!l=`#qgXw;B?ask&@v0AqznO7p(EaWk zpT+-}g}*Bn`~m}>xEmcme#$I-Te0A8GVx<4YV_dG%)*Zq3;tRYzsrEf@0o?qDi-`@ zCVp&L4gXx1nuUKV7JQKj&sjlx|1LGg$}AUrS@@{tp^NRYdom`Y$Uobj|JtqG&vxs{ z+nv}!F6^N=_$D6y>482@@N{mIJ<`!T_e@ZHrQ~j6ZK~y96kfqr+97+R<ZzOlTl)T} z#Nlr1sqWr3+_FD9up@9HF8KaU*dymU$H{koXj;aj>`4QDxVn2@w(7q<E*N+$K6uWL zSPPGKj%&GnN|3w%@|xxSmA%x~c_YGW<|D57;GMt~W;mtaWKFaqD>I!l8|msEU`xO) z{A4``^EJL|vu<rKw>CYZ?gVhI8==>&)w@11qTZ{3sUBjfx0!nXNWH+C<7Tb*@Ai7# z10(7U&^POHE%jzcws(T`OS)EXz1ChiJ6JYQ-6OD3+IyI?%|_j_7ofb+RqtW?+D~6M zNxkP->UBldJBWIfl&*2p`flu?h&H?iF6C?suGXFreb+<!x1SByi;=jV16O&r1=spW zT!(-u>|?{_9vnf}3gEV6S#Z@v;%Ww_xu*?Rc_gmIz-4AyaLtRv^$sxANj6-sMdG?e z;Lfz*s!5LMuis03ZX2$gArWo77P#i27F_cpaXk;rmUtVkb&<F(0j?1K7Wva;pd=F4 zN?^)eHe7v&M$k0|xOIapxUwU0EdfSx*l<-x;>r}bffih@NL;r`egATVb&STV9g(<t z0au=8!PS}+(f8K_Q+U!bPUaWxq}TL}oS)c(jBGRH&8}BJ!9OSPe)hOUFJ*BqVh?f< z3;rDgzM_`_?*|@UUmpYiRug}+0Y7E1x(9xa1;5$EPppGR&r`s|v$5dG$EnkA?t32= z{K;XxW^eDWO#J4)_l-lq9~+*#U3C2BzW3+Am&AtunTcPYe|KbEIuv~u8-AgQpI8Td z{sZ0>8=m{mHF_$H`8R9q8H`s_%=ThmtzwV0UisjX&K%}fcYLCH8UOp!9YM-BdOx|O zQ~r0WXQ;w^vW98+$0ePtd$sq|E6-5dUBqy2I71E0aFPRn=LGx2@AJIi@L`W`Y|D29 z+qNaB`?3>M|4V0h{53t)-0f$md*`{;ePbL!fBu;szx?IigTQC_y<6ke=4?mj-qDU= zdj{VQaRm1!JA(TX`2W=*p0+-YPX7s|)Bh#U`$}hE_#n@b(d^F~`_TUULE(Mq!%g~L z@?mfaEi!N99vPzg-{jjGX#5L%$?&(>jSlQa&R39QDaC8I7s*Ml*^3$6c_X$W_M_|@ z>*vFmmw!xdMgRJupww}d*x&okAU--r4bb)m?xWle|4ZLUzs6w`3mty`Kf?bFyn9&Q zKVB4Uzq2Ulr~mx77X{ns!#4V`4LaMZ$c0Fsx9_K&^if+By5(-+t=PHSuyg-t*iP3& zcO!PL(0vrT(`<AX3*BpVx^olH{C4y{2E8{M^v<)<J3BhPM@B=7u}2@PuScDxuT!B} z@Ce^t%9Gr3N#`<R1ZML4`}F@6Jos5R;&0t3Ji8yCLL2Mw_VsaU-xZnaK3AN2UjH2R zEB5Z6s>@OR+nnlki*nWW*V8=v@>4u*6Z&|*da0kM?e(6X!1WH_{#W41F%I8>wfs$X z_zu<yk1D>C>wA0JUx5Es@zn6#4o|(#yX*P=y5c)NPv@<pHx~wvkw?jF_dW2~VSEuw z7^@n_%CvJ2h)-S{tF-pRj8%$ltUe;oo~FCeHHPm*J0mFT6l34D`x!y_vQYf2@VEF) z1V;QN;+t&59}TaD^K3}IZ8IkA2>3RK{U)jSO;Wt0z+>6-5_^p}GvaU$oUMvodXC}G zm2p{Y#H3ZQe=0gee30(5;VpPv%Xw6IX4kwbeT(hZhV3T#p_LX#*F3!~at`wXllf<3 z4l{Ke@osy7jWq}4KwIGYqM*<rvew3v3|}a3Sm<DU+r(aHjN74Q67`h-!$QmYNLn5O zX3JI^Eq!yC?_%rQc%kKLot9cFE$3!*^$9$t(E^_dEooBECl*?2B5Ao%=>3C@mRe{5 zwuX9SjO=sW?&HvMWfWSTwa@~eYqZ>}({hM>&6yusXqgvDOA)jvKAo0YbBunH%RCyJ zmN%hA_%txHC@8v6WTg!m3Lrxg&l5{#vgyxL$&BQt6uf@c-{LnFot+E*@VO)UMfkK5 zp1s?k;op^&dEl|g%0}IfKfwD#1HZ^f#kY{*ncopzle^PZKMZ-$WcbC=nhcMVdg!B= zGMts!CBqkGcJZr4hO6L@=092Zqa>0)e84CTHeMAO1~!%qPk^?-j3_evnuQLV43|<* z;ae73vLk7E0+@B{ZM29C0~<?jQiPVNQRHSsR#%_cWcX~U=YK4;xFTt}Q|Mi5qeWyG z*jTh23_?pu6k0fo5cWgZWY|eP%Ig+dS|fb|-v_37brcx}HWn@KLW?HD1>a7F&!<06 zCBuU_!y$Mpu$fOM!%Gbs{#_Zq13b}W_!a}d*m=2U3{%^fSK8srz3B9P@WfZ>)BTLc z0rXfv<buAK??|e9!O^pBucLQeuA^_ArrTtmLBH=subxiFwNdA(`e&z{8=!y0?<@N! zV)L|fw@cehC3qy;sbW8Bagps)u}jAAJg)eTd_;c1{3K7HO7XRmXRrwWuJ|U0k@wZb z_cC8de%L1JK73_iaINGSCx@VBvw_oYyKD{9m>Sh~X)tJPTSb4ZDGGYf!Pssamroj| z3JsMHB(0Ht*$d49=n-4|F!c!ic3b?vsjI=TX`|WVayCTfdOvk&>*Z^Pb;*L^lPlu> ziNomOYGl95FVNlhaTuT6qrk0u&?X1Oo9`)!z2@u*eVQE96$NFzH2r1#@G_qnZIk&d zR@-hrP1}Az+l20J?b{LAzC`FQ|COa}*^zC#8kjAMY;7Ypeh;v*^zaAVQKGf&A){^Q zWq;$gO*~E8GNQHZn(T<N`y07+3s+g%=89}vCT(lJ%htA)v<=u;ZTq#^wi=^to^ReZ z$7$N;|2oRJZHa8#8rqhrTH4my-6teG830DPty|mbvrf`BU}LrIX0vUJwYJqy&pPWH zkK14Q)-G>bSwEQjxi5FMtv)PsrDsKq+l@kZouzH-Bir^{Y5&c(wsC%B53sS?cCOjB zdyTgFzIodoKTX^2i`KRSk!>3Z-OWoaZQIs;PL*+61WaMMt!*RDX6=#~vu(j+hP+*9 zv~AAW-+0_^I8ED1qP6XpXGe_N(b?#UpIX}%Ic`&ETluxNwsDSU&$ig(_LkYUMMm3# z-P?AmIpFNmw5=y?^G_%g|5N?dJmM!1KbH7ruHhl}u6`8zv$>qtIQH{Ybvb7;HgHB_ zb6T1@aAul1X>po*q7U)Ela=vXINOm2tgIQ2vUk&bBCfN!Pkd){HqQi}89cM&I}14% zB5*PX`)l%F4lw3>^W%g23gUyF_0FB`?Mc42Me)J*6N$dRKBK5yXyJRi*rB%1Nb<cy z9B2ETNxpZ9rEFi5<ZIuS<ZBw{Qjev_2Rm+#4<20{AM|Ii$B`T#Y)jz(3~&$O_ZaYx z=J(Y2;8!L5zCJ#9<Vt?u4t!O7u=&pT&i1RRvj!T5Pw+GwXGI$FCxm@u4X&#@B)+o& z8l?{LO?hfkPd2#HUh|R5NA&%xyffD<xzcaMYRFnjd<r@IZ{`Ql&%zDx4l~BqYKKXD zMkD+1^1XRxNBj<Eyas-Udb{7DFL=y&jV!(k`yI4%4cr68d#OWc6u(0*xi~q4;gfS3 zfqc$uuwVHZcFpIz74`4zjRv@zWjlM4G9UXX_dn11$#m==nUgc71jW9w+cpnVhuB{M z-pgLG_+~i&wb`lze|!-APHfhv&`7*reUKQx>uN@+a&BuCc1Q=$Rf`sCeYuDDTRBT7 zd*}Fc>&4cRHR1>OwFH(N1!}=?<zQj9QfB?8(O5H{s{b`y#P%H~?ws7%-YwvizOjyj z#Q53Ap?i#<*nV;rG2Oe7@68xL>3<Es7HgmC^$2eIUizJ!*VLCH_j2`<_4E|lD*Ykt zkuf8FqyB99o_$d{7gF4irq0VeN0l=nN05nQ$i$32++85HConDSjqc7Krglt)=f}ha zKlm^7>v(j~RQ6D@f0}vUT9c(pOp_URd}LL8a2UEObDU$G)PKr)*;7A;{Ea$Aov%@6 z;TXNnuKI>{tFH(3tvl}<)W@FuG331eDe8NQ`nHU=*O$_*zBXVoM}33(*wa6TJ~`SN zyHBg7eIxYxc60BD*+y%Bz6;E{yl+q+F#*TWTc4hyz8j@|IYxclKNGFK-%9(2e}npn zDL96X{GU_QS1Rp0+o&%)dVNnw`?4eJtAKAKc$L@#(TPt}r=3TS!J{LjeOX3*8PV&z zPuiCWOlzhpF$L?<t0Ggk8u~J)Ii#&|MYm|OX2iOROyxk6#Go}9J}s+E8$R7`y48#~ zZU0=(qL?zB>V2K>&9!YaF%zO6WKP6qUOx~&xcJ0NjkUJW5zAjDcMjQgi=17v>lV@R z=GnDqI(`*+EN9p1O@En6`Ekz9$XqS-$=oe-xy;QnN6TC-bGM&d9MdQt&;F#$zq$L| z>V^FM@CQz{;(cX2YbE{sy2Ns157qkJJS*lWj$_Ubu}g<(d#v><>0?E5n+@m=cNn;< zfh(Ng7$@_c^2H^cd*%&M_o9PZYuJ;!I*xN_sVAGK#&wprU()$vp9!A5L*noYOp!I9 z>`T;(uAdHVS5SxSLuBD+l6A1?ytbKfL9f^d=)C>##=eqF^{e$6>Vc3e$h{6~^E`aj zAGv}p+s;5Qo}qrRZLoT<%I!OtJy<<DT<n6l&cj38KL0USr@z(JdAKRT=YNmiuk(8m zzn^z?o_xs_^gqu3`TT#otMmB1uHgPF<AVDW;)2aCo>yJLJ+HWkFHU>S|2Vix+`hJw zIP90WVEgE};8*$7b11=gAf3PK6MXv)LC=0y@T*;};O@`BePx2LWxXrdwicS3T*21A zP`=m|Y+uB?)*imrD&F1f3O0|ye#(gJ`~tY=$erWA-o<`N58vLafxXfdB&aMHxUwWD z`#Jj^xvKy19zK7~AoU=9?4RoD+&>e1B|HVLU=s87jDE!I^1Gcf_ML;JoL}`9lxSy2 z<vZzn_Jp+l-$xv5-%nH3^L~?x&okYdbxo@J0%OqDI#BhGadoy=_3*WghR%E!=U<gg z`QQ%lyPdN6;LZbgAYaEF;2UlKiSb*0Q$Bl4;K%?+32hk;jv+khF6=2~Q!;q`{BEaA z@FWS1=?0B_CwyD9?~=|LLd*JewS7nrUrVy9vn>G}E(<OEZl^3vi_4&eZ^E=p>ZP{1 zhy^<4>})?2@7o)42HOtt?00t2!tZv<!nEu!*87X^wEmcvrZ%_oeN()zZ5Q8u=IrVZ zez#K=rr|UF+W_B$X}IYewQU=H{2n;gL&HblXyW;cg$91NQx>M-F9r>Kml>z@5qrBW z8yv5LV-a+`%J;8W=-_udWnnsAG3ekMp~J+n{BCHe;cUtC;JFf7UINd0p0yTQ_}xxf zn3lB$EqtfZ(K<kFdz|m{q2n>Wt+UX<?{><<bkrGi@J+ZsUWSGea4ZIg3mP5*$0DA4 zEi~}Eow6_u_Zl?tU7f}k{Z)Sz-+vb8YrC0m=UHgrcROWa8s-@^tnaUmfVZR#%Y(V_ z1vst-$18EZy|ck_CC^L?4g7AWEKI{pg9g6SXqZP1gc825gN6dWz0g7fzuPGb({Q0d z1K)&cxHS*H=&SligJT{vOaMnd&u|M3{BEZ#Ov7-42ENm1$maVYe19P{r1R|n3l02k zrz}jv0D}g;3DYocIQ-CuJ3+xQ02=y$!^IPFbjb(5$>$QL;aI3hlaFJ3C;L<W*50cB zkfXD$-R1M|Cw8ITp~;7wqYv=Ajj~eYT+Y%TZr8s(%r~Oz{oDA4wW-+ge`T*VH;|`F z47-09_+D}b17Gs}HfJzrh%(-fjUa12KWn~p^hkhnt;hHspezUce$J=|5-!$g3joX9 zv_p|x1R^`~Ep~P3(~8={T2$7pe%6hC){UH9($4$`_}xaCNvnUz#oD=l`8I%kJgtbl zi?Y^b9U5S*8DOpHH{&e$E$dG+=Rjce#oF3Ys~20hw1sb3BOlD?8`h_O)~Dfm`Q1iY zxZV;=z1XeOmXPC|we^7lzPX-<HEg(Eez#E;uJ>w7z1X0oS)A`>O)l$Tzp)Mu*IUK6 zl!fc9x|lgcVl!CRvxb+ob{lqO;23+shh#0R=Qi;3yNxol&SQtB=={KU;$!x+W(j;5 z*ST*$-@Tvc^DkzuS`-&d;rnHb*<pSMD3dX(l`&hSe;?pGZLIj~Z*^;9)qHzgXUn~O z|2WTMd}odozuPDa)Be~LeXOMJ<V1a}YM5K=_~v<@^?YxR6~EgkGwBbkpQ4YI)H?<q zk+E78*LiR)-@MNAs-<3jw^0_Z_f<>1*Hf>I)eCW*2VUWu_jsBt_42!ovT(gkmU?Tb zSH|kixXvSg;hR>TU6y+J-9}ls-d$7lvD!9FZGJzlv*R<~zmVt)RPoKtma*b@8)asl zhi}&V`Y_)~Uk9#74qkWr_RWh6%9_?sS=JvVPr?AN|1r1k0XesWO)NeF<zA)Cy^#EP z2~&MJKjklSkyOMT-}n}iTI8;8{w{H-ZQ1<I!sh@#Oay0J2LEq$s_r>1_3zlwoSh{$ zJR?u?B@Uv#)%+d!LYnH&cY1QbC;1fRK0wZzq+oM%&JCXcw*Pg|B6VXQt4CZz^0?Ew z-Fcr<mPsAj8o4H4RX#sQJ$Yw*F!-1&n0!5Jlm8)C;hh%;{iF3Y$nI^u)z*0uH>T$S zI=U9QV6A*Luv=ayPuyok_%)P+C&&dW{L<i5W;JleI=tThK5c@IZgrAd&8+jQ>#(2S z(Cd^n-R||a`l#z7>XLPP;cMJmXr-N8Y=Y-Fw0{ns>fc-NjETgP2cFE|*>I4z&7`FX z9GN}~j+#gugTTQ}h~4RUZFm^R@4>O<RSOQ64F~-vzJ>(pzn5)z&dmwqX#h_lw8~g$ z{n9G2$lb?5@EnnLzhuMH(1qs-p$B@yc;-c?r&a3PV8fG{8>VNeynn@lXIpf7b^@oo zV8gSn3(p-w5A6=qvpzbWHwCW2h9@O2Oi#Jcv(bX5COV!Cz*YaZ4NrX+p39}4-&pX> zi;icF)c33nPtW`?Jr~J)`Y+tBlIZ=s3^?T(<c2xCbdoY&&Mt8#W+VQs9Wu@iE$_)d z@4R*S>e9~#YCf*x%)^iG62HL+wFQ~o4Ii9@=Yw^w;F;iCw~l%rD+y|T7~Z#*Soov{ z9$6G0tP=XjpLW{%@NFGu?!GrheNOO5-ZZT~@x#QZZzgbCR)2%~@NFGuzP~<3egC|c z_C0CTCw`b1^^KACt@;M_;oCZnjZhGyzQ0mm<`YJJ;)jV*U%IsK@o!KczOCcf71=TB z`;D}3g;5_q&sg=H0Isn98`Oty>o_(@LX7%WNc$c$>KhQdzFoj=S>C-q-LDmeNAYd> zv0K{yH<Cw>!=rbDtGdppvrp_g-xAuE5vRfYS&_zH&Uj+y?vVWg@g*7i1v||B0&Skd zU!8{kT)wgTw2b|Njq<&@Um!j$*)Py@!iY~xa@?kRd+@zwpFp2K$tSJ|UVPM*;Fft= z=3KYoqZYsQw0_DsndkPP<7=3cHG1&N*kk3=4f-S>pa$RCs|%hO9Xouo0bfeoyatcX zHuo>Hl-vmT5IJ|G{f#{h_-z*a#Ln6AA2Q%eiJRB(ueZQ=M8bb*;wKJHgRil`-;x)B z|1%Rmaq~L;7I=Ro{Cg&T&Y@}W=+1C^Z$#f2wugBy!!{Ft*Pg~{dE^n}oBu11Jizx> z9+CJrZM-Fp6+L~o;D=XCKC#BJYUjUXjiC8aGrSJ=7iGK}2J10NR+|m}y29$Oo&9gp zJ<~?FMl<_G(<q159m3N>OR2~k^h1l@t|-3ai(=DKkDP5hot%w`LCZN7TKtcd_ySEu z!JG%&YjXN4n}3Z>tI>z78Tw$Op$~Q#`e377AN2n>>2!9dQ|}M-!O8vI^nuoQR(+7G z%ioUE>4O0VeGQyLf!}xRIgLI@FyQ4Js0M!;eQ-kP6#inLP=h~>KG<)-%Q;XD{xtfa z)x?jVUxPo5KKRJQ&pA+?{!{CNCKEsLzZ(2$^ue13yt^$l*2+JA@p-&g6fAJK+iQtG zOL{<Z04A-;PHbGg_$*~X17*muW_N8xe-}6BtCRjg-U)cFk(>qH-qq%-sgy5X?pWYw zAH3VUllf{s-}RRFPxXm>pE5$t=6w&|zs3Hp*l}8YaRb#eC~w=wIf##l`GeN>H;D&) zop?ZG0RIrN?IrBz<F|dE@-?pD-iJ!a9h}x7@)4Nl3I<*U_Ci;1-z&gp_wco`9`g@i zE&NhZuqyHG)qy+l2~pp^8ThEOox!7bx_yB)__^Aha#kdKer4;Up|S~&hB&{H9&vu< z6Z%(=;YkmlU-{9=P<pI081IDPyUx3aUNK{SHU)J4EPaUmDt(TRDbr?5z!}CETlKfG zU(`+aSoe!QU@dHq2}lNyxnCr_CF7{wcTmrrFz9EO)MM;Ro3gE~-yaN(<Nks?kt@-g z7x7HMPi*da9b><!@q5Zx*>ifG^T>ibi?y%d{0Z%q{iSi}$Pc;afc+(MIiz{pTtmDk zus@E{=l(wA#2c@SXMMY)7GBi$k?^Nm&ki3WF7t>h(JT8-+kSw3cB8f)*`e(_$z2rq z(5>*Y@8kon9JuCxvggG9Pa8g)J*+{+Pt^Qaytd!e#u}mRGkA^tn>O-=jD`lG<NMID zU(Hk%Y?^cUUwl;!l<mbPZilBfu;=uZ>^aS3ABerIgX}#u4`(lk{ihc8pP08<v!1Db z!JgBf&mE<1WN+v|%~16S{;8v^N&M`0u|JPrk;k$JbuuJ-K3)4z$3NnKV=pRzpUa<} z>}$mzws#S}oAxB%zBR<GEXL2rzSB#iCwTnqZSB1}E_m=t-m?$2uLPP3*n=7!7i`I= z>`;Pl|B$%g?sWd&<q955hzqv5C|{r8+uek${e^ud_NVsjXCLZSeiH{H`*UB=M_*8{ z?1u%eOz{2lC0Fq9D<#3U^?b9|75qN_xsz*4#4ohNpP%4s6yH!LeF6_05WflgXZ~Go z-(kPb|KcMmKT|2I$iz3a=3-y%*~(aWW5=iPZ3i|FaUgkeZaEb^i}FXPa%Yd>;|b5v zE#Q)Gu}#z<_X}jW-0gFT{m|mK+2?D9#|~fb3Lcs23O)`mG?VMC<@yrczk@$&H~WyS z>;szqoeq4%eph^Om+%)k*n|%>T)-$BN2rIvb7YJwSk3#w3E-M)!9{(o?3vkdy)8VL zXTb%GGI6AO7(7QZT)|s;ziu43##nGsUn_fLc3i^GN{%w_81;sn%E6=foygU)DaRG| zJMGYH9As}beG{f1Tnf4P4^!WfWX1|STh6D>j1v7^FEq8Xmlm#%KKQlJb(V#0-YbpJ z&G$!+IfDhfFFy|)$rie)r<MJ*Fdm~n>wqmBh7E)M@H6k9PVZ@LU~RL@@F!aJo)Lr7 zP47v*%dR|IvbK@Ck_LI-<9kaCPS}>&E_jW#jqsfCm#Oc>cHNF`t<Al(Cv1x&!**M| z*`Uv|=O5-x@O86gU;H=Q@>KV>=;Mqn`vm1?8~nGI;PXc&g)e0u+;*`qz&sj&x7wIT z+Zb=RV_-6K=xYJ|XQdA$uJK<O7i?SO3V!rOXp@YO$mN_bLS>)t32iz?Y*Gtn?OHi& zx2IUqWI2`mP<}0jd}#YI;<?f}Yd3_vU$N%HMBw%LGx~gZ;7eT}_;-Et`S98(effWx z52u1B>U`M9eMRW40;$J7AEIv>KMjqOoQ~38_<YCy&Nk=0PMiG6<dII5oMYE$d0-sp zDrMx)P&*e*C8x$E<kq;D92--}wNXOOv0_gkyXe;T{Ki|hxMqK7u0>+Sj|_^^ej|>h zn_O7WCPuRd62N1|vlKC(Yxg|SZU^O01ViKAV_Z*1%Te;sNIzI*^#3vzd%$Ckg_V~7 zrCjWaMSlp5`>HdPE;>zeu>JCH<TdM~>o~cyq=mWt!%su2-tAU4LfuK(cB4#WTD#u_ zzkd_u+wA4|=Kn<bAMEA$+TW!7ReL%9b06g|+spBZze4#7_VROc)D4tBXD`QR{XFGs z?dABOpQijtdpSPkHIzSYFULpx3(6m{m*ew&jPi%<<@j)yQNF}po-;zNp?s0OoH_6T z%I~(9GoRf{`R(>{=9~qTSK7;&N2(~Fr<ZTk#zpQwA<pO~%718wCpXM&%CEMUlegts z%CG3cvr!vg@~T`(`3!qGc~35*{9=1KIZUQfUSKaLA4w7AlkDZ>7P*k}G4^uugp8wn zq`jP+AEPNBZZ9W)M=s@A_HuG@WK%xGUQS+(Gbm5Bmy;tSo$@~Ra`IjDqdd`G&N<>_ z%3bzy&h^Gq-q~A|D;XEPKRII?`UG8JFXy~#kn)4}a?Us&qP*Q+&Y8n~lz(9_=ltK7 zl<%^auj4Ep<)7HgIhXVq<?q|eInVO}<?q<bIScb1<$tl4Z^>4-Q@+(+&KZntl)r8- z=lsGSDBo-^C-(nU%3spU>$QFpekLC7Wy(F2-;ts9Y1cc}EMDF{!#jIEtZ}u)zC+~o z^Q<67yl-k<?)P)+-0XGcZcS2C&Re#+)|jVXAohA$k3!#@k17jt9qt)tC6*>va2`$W ze3P^1lAkPx9DMAX*C&2a!g@EgF6q4Klkykd{n|8qx$|?k51!3@KLb4O^O7|CT5vVu z8^Av>R^StX=Z@J0?44*Zx!5&{4rP+$TNNKNYX@>24)ESDxo4qe8Fke<6<@`}#lGCF zQ+-L)Gux3^<xi*`T=B5eXXd0bbAg>IhhxZ<pxwzM`KBtIdcG;i?JT*%j*@pu?%OGi zEA&k&EDGM3r1(-CaWmF@{lNPb1>}By;Qkd8$@`oE{H9^b*oD27;M4f4vwl1^`0g`` z+`qTt81qU4xx|C6-rm8L_ji2gP&P|Gx_RUzzKz%eKkJaEvXz~cuAy}WS<1`)pDP`i z{pcIZH%IZa%y{U(-p?J7TsOi|7@Ypl+!dwwUi$u8M^d}Q#^=Tt2Df*HHf8p5*H!b^ zO@1}EZ}EyY=GOAJ6m=obJnkI2jo60cpOd%Fpi$}<Is(9rtSJn>3BDza!N`XSgO{BM zZ5mOd1oNr$7VxhF_lSpxv2!TFTjP~r^U2WWoqRW<t}rNg1;5bafHy{z#Hk}$SB<`M zWZhRkD-8aoBeZD)V>*)mH!dj*zU+uo_h-eag1a7kBWUXu@bBe!xns-Zbw$;arJmcU z$Ly!_U}*DZsqb;lJ5o;zKB5sdaq4B3dcJ}tufVLaz-)_CZ?(X@dm^;?X@Oa9fpNvF zk62)Sb3C;9mjd&G1%@0I8!Rx7LeFCY^NIzgCSHBV0&`nOX!Fkn=5-6qws>`)1?KW& zq0J8fa~5UIT3o-}w=h_8iRG18#>(u6%!8rLcLKi<`#7bKGInuq#rJ#0^Sahcf|4I? zXclYPB>33H9U-n-;#c8?k%cQt`?%}UJ9jnS#W-bTx<06wJ#>xYNUSSo?0$F=>oDv~ zDN8)@&@3r`|6^<}`9HBdm=s?gL?+eQj<oi~#LOV}QCXrxa3|KKF-|7FM28NS=*SEf zxYFCvQDqvAzB+6ov<Mz#Tgy*g0qv5TTh3z~roAVSm5WMpn%u~T^o8UFy_OtaoVnP! z)-|YoQMRIPf#2uSHj$5M{||R>10Gd%E&T5@laQHA5=aOE5+DhnCKJ?xfe?je5~u_s zT4JnUZ4;o?2@(4SwG|PW5N#8Jy$+%%woTA_n+bADEm6|;UIVl?7_~<H=;t*7+vWqU z$X5n|`Ty3LImuxN!S>$wdEV!r=gD*C?6ddUYp=cbT5GSp_TIwhvE%EkRi*tIp*GrR zW2!c`PjN1~<mur|x6C7J4{RQx;=#AwT9w*wsmSa%!>l?JC@Etsthokz<c@rw(2z6l zclA89%HQ_iF8mFw{V)diBYW~Rt)iTEsaUO!{W`nK!JqNQeNXlx;Aeq*3+F|aBpwUs zdq|`mnO|Zq%$}6b=r<d(By{JblQ?It>Q$VxoYb_mG_d8~PXjaWot&RpqtvX+puxS& ziKI_$;N!m2_KfuBSZFS>v7dlzD}6TbE!1z{q~8Y8hUhz2sPCeIc~1Y`8U2j8{{~>> zml1zFwkcEkEkDgagL;Loz7d|5UT9kYZ8=Y?U5mB1-!)QgZbWbF8Uc@Ki4}{X!!D82 zoZ}drr`)E`Q-m)rb~f@W6Nf*0H?VKSpxhn-zcUXC%56=T*%{DzIrpng(5yvUj;=k> z{l%cXz8ZO5ioAXhpO7I3Mb5`HC@;RCO`kFLq<#I!>+$9H_P>O@PNpyJX1s~4-aJ`# z^jH#gS?&Iw>i3Ka%4#FO$m%R)wLK)Ooyh8pkgS$HbHmE&^~!rc|97KD9{ZC?PE&Uu zyc1hJD5GPM(T^gdW027flZ=ir$>_8*f#w60ixC+OOkHk*Lrh3Mj||D@EBrzE92+K| z1@6V<v(TjzS=b`=m}GNINH!aF#DU9CC67rq$Ao0Fk>_J%^S?`;=LY3vOh`5xdEP`e z|4{OnWOGbNHXC{V1AI119+Pa23CU(7&-andb&|&<n`1(<*~oJ@vUxpu24(X&Wb=4k zHvbtL*5DtZK`V0ke)9f`vu}4Hk2fE@)?bL6@>?f34H;Y$7nH%a4b{c{78~PuT+`XB zS-Y2UyuP})leGKZXI_8l_5Pm8*ZcABc=M3C-IvDsk<TUFldkulm>j34cTbA*uV&wg zpF2=L1E<sQt(;wjFX{;Pgtv?_Rz>|8A|sKFQnw)+yO50{b3T59eR1F{c+3G0@rj;B zCIPcu#z9m_4&U&mzBb#DXB{^ngO>tZ#!03wgU8j`kinVMQyXWm%R~m}A%oMX$48!C zXs`%5!P(aFnObyv1~NFm!r6~cbpWQw;Lj~{@t5c_*LtqZ{eem5zCQ2bGB<+#KBq<I za(BsvWLi$#1!NlS`e$X@h6&L7hX3NMQ;oBykvE(VE@yWh<!stQ_UO9NFWuP08Q20f z7M07pf5oM)%*?f}$8E@0h5pRIj>^-bE8NJ~mGtqMHTn2@_Uh-!+w`-+Q=8n#+<MBJ zze$?;{lv}xNjdOXWAUA-MmB!Cy7)$As}or$ekrkkIcK$LvP<*dWbo+S)x{0)=<DyH z1KwNg_r1M%fJYnOi`99w@y*5lm)?%m(;MH6^>21+{>@i#j@pZh<xH?$w#)PVHN5+H zujN}}X7F3$<(j|78H~m5#^!H)G1gzR-tMg}P~HuDqrEjV_&!VX*IuppH+)g^H@&pj zZ(mtm?6cVGuoZVL!baV|_bg~sM_%f}R^=QrY{;4!tatM+?K0*DCvM)?xGpy^Hhf-m zG}P|n&;i}NIT*t-Sgw=xB$-$2WZZ<)a29g}p`(<Qxz#)R7*A}9G|{h>v<IPI8}!>h zK)*44Z6W%#LBDrH^lO8DZO|!I8{Ox^u9l~VryDwzUqPFoottMl?-|hV9_V=wwA~MF z+|W+eWgC$9^6cbU4jop^($ACgm-Ae~`$FjBhF)%HH<Ix#u-l-$)W!LaB_HtH3~w9b zG!we+e<@a9_g%{S2mF@1AF^_}-?c#Vn`tb*Ml;VRT9~`dD9V*HeQgmc_Z>TPI^NyL zK<OXme4B+&JCQ>MUk^Kfs~*vDvz!&odDwPf@5cWouw~ws*-_9k?&Jc02C^qlo1o7* zZD-*1`ySYo8tFRHHd&SU$e+1-f$(dG?aaIb8RhQTiEG^b^$D&c>yf!4Q}ZIzDi1>U zL{(FaJ@D3h$o4&FKWN)CLhZ8JJjFr%5S$Ooc%I4ljabfHe3G0cr<!gg-+j+2?+?EF z^#hFw6Y9hce&)N&Hf{a(b>NlKw<nT$FZ^2=MdWL2a$P6+k#G7OZYK3-e)Ql0?5~c@ z_|`vWj#0(Y*eO}D$j*tXa$$P=)?UrCX(4m=UTbDkH0RkY)a)Ci&kuq>c6E~(=PL9k ze4S`h>o-ZBW7V}=&-sq-e3mvAT(^KeO>G)i!2JNU;Z@NS$gJc1m*Z1-gZHNOE`O=u zpQ=ilzUz{?;V$2L<@eFXEZXCuE!)4#U9~b6;WL@Ti47(C@HqNM^imsoX*~MkC}qD5 zjoL!=Sz(Ft_C_gqEJdG_3Vph<$%Q`fbdWx0{?7q9=UwnR!T&t)`UJdQ3*n`)_V-!{ zul?Y)--K7Y39klh6Wc`WVfsCDx#o4QEp1z*+3K8Y9c|kKftl(<^{uLisF7s_QC>@- zr&!M3S$9zR*DZ=@SvT3@FB?%ccj@2MnLo#<&EHN?pS0~=&{DQH-YfZRUbVzR|Gmq% zUfM40cU<Eso(7L_hv6sv$di?p=(-oC$LC&-|76GNrrnEVu0?d5EjG3;R!gi~xgx8- z0^iIY^yzEBE`35(RwMgY9E|tYMF5L(8&_}NT|qfpY)oAYWv+|V`d=sSIxVKp8VHmK zo}F`9pR}o2?zq-mS%Z0vy8!O}Bl{m>)h?X}mM!3ZD%NLWRpp@(s)Wmw-l}B|l>55{ ze#U%@)9Px`eL38vq-y6s;Er76uxT5T;iHOW@Nf<KO5}#(_k<+1Szw`0oC1%zN=XLg zZELj~iIcDBzN#kqJ&UcV2Kev;^M#~}G?8uKV_Un*uaTCgk}ArOTaDMxs!No)t0Q=B zJ9kAmTk~~&ncEX3b*jn}*4nK$bf?G=*&qM(S=t)<Eq<!`yh~djXRhF$Tp;?Oxr?%r zN50EAw*)?DGty?0U-ks(=j>a9^OmYOh21p98Q_f6;qIES``3N$@oHndCNf?{-dO$; zDCuU;Zz6acW1J3-Lupg0b0%#Td7|Twt^AgZNxhA!&cBD~A>SL1a333OEM!c{*`oLi zN=}5@Dzqs!(MIk7Fxo71ICUnlMf?%wy5+oe$~nhS2l|ln*HzPx$m84K(#K~P{q5tf zF(*9zLb0>1?h%*2y)%GLR!f?AUtHz#UysjcBR-!p#_#3GgZ+%>9gN3XemCIrapCh3 zIk6o%k@ovQ^9ks%KLlTkP$m0A@LvzXuR9oo|JpF{-?$L`{|+=qfd8e*_)Rwj+aPl@ z-+Jz2HMPM(AG{-YnDXoo<uUN+qaTit$G6o4XK#deg$YhO^N76y$CPJ(D31YWJF@Wu zf%B{hjwMpx&t}lH?s%Yir@%4g*&oVdz^S0FHv~?T2~I(zx5<Q0(a}Kjj|7e>&;C#z z15Oro{Zin(Xo9mo()*eT4so>28wHLj&;C#z15V$kfo7k;`GpD2-bin+2~PXRfoAUV z9lJl2C!N07AIf9E+5SnOc?)@%b8*h~0{@`Sl{-wi-<$FA9p+B1R|Cy=4w)}$Am2*z zsW$`7)#MB8i|RQ2t?{gpCbL$W!kXy>?Ba>o#gnj$qrA6ZTiA#j7u(`x>W)_5D;C=# z1KZ-{i3R#S7Iy55j6K(IpGS0G8*(}0Kj#eC819Kf+Ze|lIM>EVjmI7t!p7Kk0UP5m z^931)t}$wtVP8C>+ZQRieX$k$;%VK!NDi|v#-C?j{4?_EV)n(hi`W<2!t4v>3fdRH z)n%FZ-IaF<{WEA^B=_Zy3AZm&LiWXY-M&a3!oDzUiwWqBpCbpw{%TAJ(k4$!?(3qT zbeqd6HpTcl#Z$Mruql$nrT`DY<qB{ymn%aD2koBZzN*om4=-29zUZdUYlpEfJj`_r z`{Iec*La!3`RiHFsK=)EJn5NRHA0=)7p*pb2fLvg-R@ytWVSJW#W%yos9UDI8)ro8 zet5An9xZpyz5+Yp&2#LFxVkuPOx@ks3K}*-ugSL9a4`A;w#69Twm59EEev~N1LO9- z_gwz_pk+`m1>^s}c^aQ2W5^fMqjCq4%oonA`8s3mYmB+CGWKp~4BkeZ!mU2Wq*vw* z?_y)W3+>FhNZNZ(p6F)P^mlkB9XTR(T?QX9U(S{EjMG8=_ib#*Hhi?H8s{T^Hh9j) zCh!U3C+FylX`0LXJHYy9#(`OHFFP108BO`1K7_Z1(1%5R7t)90P5Ln99DOK!E&8zG zV4!)C3BKsVaDD6e=wFBL>JBvD0{ms;41Ji~cOiY4A$aKeaOgau55FXNbX^z9GlV|m ztn*>>h(5eZ^60uQ1jll|J`_DZTprPf>5@m+bs;zf=j+2y(FMch5q&sP;OM%}X#4s4 zuv6d+lSlO78OfvTI)gsv>%%uqc|;%j!gQwS!#?t$4|iTjAO8GZd|+<{_4si4eB=|G z>wVy9D$1)N|4xpa(0}4)jq!<pXpNP-c{qR6rM$9cLF^s(plSGrRFjXh-j=Ob-YNPx zlsg+o%b7^d=OleEbF#4XA4<AK)9(oi-Y*%<zeUmuG;eiSey<K+^9Ez&2I1G~@U_rg z7lY}K=<qGxLt${%>F_Py;Q6V8`B&@kE#4`b$dSSHdvy2~@4T?|l{$Q@cX?R4oZ+~v z!0N3IOJAzPw|buoOTS%*Z}sjAOTS5nAK^U|mVSc{Kf-I}UQsii3w8Jr-YH?}^K|$T z-g#l^b9MNU-sNHGIXe7EZ*^GubRB-A_qnk2EFFHNcVAe#oc*`VGQxW(EIn0+Kf-I} z3|lk)<8}BWyi>x`N9*uMc;|(s$LjE-yvxJVqjdOD-s-S)O@|-leJ(8hvy<StH_E#& zEd98oTWs+8xp@9V(hF=}EAwqL{ymbu-sYVWmVQWwZ}ZLzOaF@w-|k%=mi|8J_zvC@ z-$5$#`{%Gv?nA$C8OSGgu8h6WO;`KzACix_I&U*JgWLsu2)pFWnt6=Z0`zIV?_|{c ze$k<<H|EyhTa$4;nK2<}2}=5~KDBu<9bdujZNsOAADH=`hCegtQ#&;<cT8+*Jw^Nq z@&~(k=Ni_Z#U6gwf`5T_*swKfSr@Tso|f!bTl?sUpbfTFw^5UI8}(^y)TeYCb$nkH zwy)S@_~Ed}#?>{D-|&G=0q1){KC}y!7o98R-MSBLT%F=+l%J*Bb0e_lMtFbm4lzK; z@t_SizHjN2a2qZ;WLu8YZOic?+p+<@X85kewsf<eo`P*TI)qnaLa<GF+W5YFd?$iS zV+fZr0~c&dxht>}JOmevjxDd7aM31zK3rTTKbjerpAUgcR|uCvTad1br(x&H9ft-k z_kxSL4eFF(=hB|E&xcnhYii<06Fawj(9Uf){AiCuhoBukNUJ^ElPk1h@0H%~_@{LI zG9p#Zd=q}xh2e)UF=U5>AAY`b{f|TUq3dG!4MD#>A^K&6=+_zQOZs+}VdL6ZyO*(I z*tf0Nw<D9p#thCOCWhhe=^w;>Y0Brry$n0%=ZuYT`f-lZ039^$8WlS9j!@0Pz75uY z=D@jq>oL)8Y4Yc+AKLvKziW8?UDzhle}#-!8Mi7lZZ+bMCYk#0&tW*oJnC(D-{8M@ z!oRbwV2sgxWW(p`U*dn5AL#2>bAEenpHv+U5E~sctlutZzmM<RlX$*;T|`{f(EhA( zCi^}49Q$2lhuH5P`tFZnJL>+d@qJwh=fh`z+rJFI^}RszAH-e^!Pkf%3!fj<{rCR> z{=0$Z4^8mXN1hMA=z{*fi_<6V{XnyBe~0kT=U&-x{EI^VzVrQw7l-e`wzZh(zm&V$ z!{KMKH{oCI-#-SL$D80+*@uAtxBmhBKZoWp`n+p=-!^1sxUVv+FVLJXI1H1AIb!n- z(vQRB>BARSevv%w%ssw)kv!X(=RALrJaxwc&FvS-Q*k2Dd{FY}^T|-#!{(DkheG{3 zOdk5TdDKPVIQ|}Jc3vb;A9Lje7s=D!8)&}cB6+qS4*E`qYgZk8@tnZX=fI)1hs}X2 z{uF55XUdZ<yd9RO=r6=-vai8_GgO|e513D0B#)yD{eF?U`tVg;c9A^o9|oEiUnI}= zzXh7VA$jz9J2I2?BVX9Oz3xDu`C-YU&$mN))}K2kuXsDqyhifq^X*Wcz30xii#h_$ ztH?9l9J#Xt-}3%Iv)hCd{<!mS>I2rdg^nS-hsx6q-j7`*&vs<)j*H}}LvFr)kvtWD zK(<~aPZ2z1H_=hYIgE~3?SW?em;<yPDv#rhK=W-E$<v45{W}-Q(~i&L$&2LKPQU)@ zB6;fGA^uJBg!*2`Ijrw1ejR9zHs#qSeHxahh&6?&raXIw$dlEE{59q28X}M5wLo*3 zDUT*{Jglxh==@z%p7bH|v_t0~k_Uc$>_WbuZ1`0@N&DGHK3qO0`C7?$_#*kz$k+Ka z<NqS{CP}?N3^ZSQk$f>yZ$qFNyJ}ecEadb2IM7U-$yk}U`bVbp_m^tSaV^Ys@k_0V z@SU-0#8MHr#QKc##CXLwl`%aSR~Jlkc<YIE>*xD8Y(V=Im+vIMPfm`JJ(<MGB=*^& zu+xw+-4=Zg>g_I8KIY>)+j{5eYsDGm!F3AD*?%jRm=Ey*?5Lm(y;i>qySy8Cwj~bl z_<UFY7}l=Ubj^F`7Xn*WE7knUiM+~gY*;Dt4d(71{Nv)MHt;<|KI+akaJ?5?1;^oV z>!H4kmBbrilSdOz;asWt$6#;n2VaSEmRJ@yF<%nXlg7M1UTks72z~OYPtsyY6FAaF z=eq?hV^%x+3uVs+<ucy2v>5qkl3(I+&a9aa&F4Y;0{r#)K7Z7gbRWa{F;s(XleOdD zgtc!D?bGR%ALEU_#N{Klh1l}ZeRgc+R_OzwWjg&tyX2dFvGU<l*lFvT=O1(Hz5UIs z=Ll{R+b!)$U7gt<qvJRjr`3gDpS`qBiPieeXM^{{8*P&Gz?Ffq=GY|haTwnP?=4|? z$NdlR*5Q^pymkeDEzV}mpEX7L%l2?Y|6i_TEsu4IVBC{&Zc;G*=}8^;!5F`AS~uc* zu^s=CKb!s({Oznyrsk?68z#4HwPm_aNxd5Js2M-ou=PG@s5}mDy1Lw3Kz{KjHGXXM z7t=20wK<I+&+|)M6mwmPi+U_PE((5(zZV!ad!t&u;pW~2d{7b_6dVtun=Hp4I?%m> zbr`AVuk7n*F1Q0YJDy+Q?_*xpLw&vAa~*Z>AfDy>_y|ioEG<8Mq+s_yR;S(=xJgz1 zZu-XLk?ySB3jfhacg60Cc;#J}Z#j~wjdV!-OC5DCq|7>eWK)=X>hUkYTS?t9?#uOZ z-zQH6d=u+lxqF=!*O#fqIwS^0_83T6iS-bAi7!mj7?<#N0lb~>GxiMG)yHqkeG-k- z5xy?8fwI$LiNhrR%0VpFB5;N_9n<ccbYtFlRau_?+SUxswqfJf)DqhzuWjX9LhU8> z$&5>f#6XLWWCu^3&!-pZ{<$vbl1|@Dd?P65hWeH@%(qKJ-|+3|u|4pG?5}#vb_ua< znoEzb%K&dVBSn0Ije$V6ErvKCxp(w^a8oK*<ZGZ>>o{>U_io&PTwN1MoT}!%3146% z{te-;ZtmmI>tMfD8omc3wkLHAcl6?a=!#IY3PZH5;fV~&6*<~^a6!w1mj~CZRkYgW znXJ62ig-rGyJg&ZVyZ?rebuTq&!SA(v&#D<<$m`&)(z8uk)ZDXcAUx;9C}zslzvG= zFTNBX*s^ql%5lfJa@UbxcoteOVP8pZChaC>DR*Hgo>t_b<T2Bv#`LY6wL{xPH98M^ z^l;|E09`aYd$3h*BIVuWsX5EqqQp;y=w@!;{EM`2D(#a#r~#g|aSQDe9}>Pz|I3`G zZ*E^YJ{tLEjx(Zf^mS2PmZWhW$rR;{wy4db`#h{qskHUQcVz=}G_kI6oE1u4@-3CV zwWX<CHC1^Jjul#`I;Vq+^ohi`UIE^n+zGD3b;WsaMIH_4BUh~Vt@e<-*5U@_wa9AD zM-J+}uGm3Yy;k|U;br^ta$b(;*66;a^l?uV^6q(!c%kS%e2^tQ&$)c^U1XsA&H!HT zcICSoI-A?~Hhcce?aMaWhaYaZ_Hh94K>JQiqkWvcWVA0mtbI@YZ?_MbaiR9f`68*% z!NBF~S3v8~SlILTK#2>vVE+CU^Y@PeCI5%-3O-kyJNL=nz?Lk9y(RC;={?N*WxU%? zE3f+N1DkrTBEDsq@jNh=8M|(LE}8I426sgCAeR|qD;ls_WXx|N{$F@;S#I#TD)d|* zdj1`o#+J0cxQy85ztLZA;F!n7L(pIP(1opHwADQBzfNBn<H9@!_P{3><Nrk={*V|8 z;r|z|0!R2?U4%a>P5f~;_$d>AJS{Xg>@oO%)c-brAe%45AEJ}Y{r?_g)1X<^i9pFq z^#A>%LVXSoe2YGB3Y;C_fePOLop<T?<0+hPZj$3A$nhFvD0}{Pvc|i!hjFSrsn`RY z8;@Mi+hc7xmPLKY#iQtJ$!o}4S)Vb>qvI*8LWJaz*cA=P66P5@(_63fCPU9M&R~%= zkp;q6_O7_x)4&#fa$&!@zwese#`%TP@9t=oYp$!6xbr;P*eUph<lI}th3jS6M<@L} zBh=3gp?;QcL&-Vp{LO-g=oe{Q1Lv;jaFKJVVg3B%YyY&I+c~V9`+WVpQ-RA@>1Uxy z&B;Kv&{<^kMAq7N)8Cz(%_-yi*q*?aE9qPPUB*A}Sr_SRp_TOQ-)2Bhbb`>SLF5Jf zl=`rm-A^4NN6Ri9)D5C1-IH})xtYFw%%I<op`QyqA@8-JzLdF);BDiqOSfgv_CU9O zGL-G%Mu&>+5w1hmh4H{re5l6QH}e{Ox6|N>q7Y9g{QlC;9nwa|{>6A=m5C?5B|H%v z`+7fYmGKrF`@F|`<AXf0(>Ap3+i5#r_x-Q&#QTgRp>5&uL7sSu_*{d|J;wqiFTx-9 zj-=hfBk)88Jn`>mLp%b{+{yd1ybl%6VZ?BJ$~@<Z$(lE~_h(Ggt<azh8i?HK*dN$Z z`|Lb@E?P5L`D@YnHOav_a?L|lUk&r+494?GtF5UgT4Fqt`dUT*kuLiSP7;@WGw1i* zb3OBE_9YwnWzC2^O#WK(l@sSS8aqI~jm8eRho|hbd7Shop@Hl}5Sv}nHpx5d%0sPn zhtG{hr)d(yr>_Ue8upY>yBcWMGqlSWYFA^ZU5%l3HPWs|aL{9?8N>1{<5_blc8cK1 zQ|zD3_z1+F7Q7q5Rr1N+x>xuel$C>Xif>E{j6FJXQophH%cflV9vI5$<6Xw$7{;QT zF=QhRoA)u*6_i`AI(2!Ut@kl~`e`e3FBxBRgSNema~x&vcr>a&pN|{!NO`Zu2AE9$ zi*0ZI5A0JN=KWuWZV>s<$-1RR{7E-Hkx#J`yRG!Mf88czf?t=@*m7cnZ^N!L=1suU zZPSjpz?SfRQKI|XKRdf+v}crl=Fwkh&u@9RO)c;f^V%$ak>}5x-7@AI%DW6%Bry{a z%99}dF7tTVQ`3yCpBkZlh7D07d>VmnNNcPue%N!lw{=C+Dv_7$3m`^fOw%u9Jn$R; z#^!h9wEkB5TkPD!y!)s(X)1dmblW=EKe87L-82?DzNOyXJ69>sT%px^)`Ktcc#0z} zjjPgPwCk<iYc@R}6LGzLl<U()qg<O5xGZh>;JT?*s-r+fY@f0*?e6*fFQ0J9^$+uZ z$FHVd|LU(Uz5dlXw(F;|C*akozux+4+OM}}QtnlYYL>DE9(BYh=YyViO!zTn{-&21 z5eV!W3RXAy=4<x4hr!`tkLvg^DxzKB>hM+TAD5H%fl^0a9o72BvWRQhXX@E1?Gbv) zQ-%H_mDXPB(T+UpDLD1)l8BPuFkg>QmbxkYKN92GbXWE|-*Oe%Ucmny6V~}QX6O0b zQy=oJn6}n;SN3V&!}>SNky;reoGI|#33)!rS2OjCdcF<Q=IVJjX8%ghU!b%j>~AQU zoH)-Xc`uz%py$n)n(vdm&S@^{nB%h~T74rXSbVmrnlE~q@)<anDr;Z4&U3H$7DwU3 zqE7^0X@kU0m%7vu3$)ds_p@r;ENmQql_ymn6N2A*HA>Q2s#;Tb>-~!?<2|=P*=ALB zrrsZ{$1``Y`a+IJ)z4X`GG?g=&s>lC(H#97zm?K<cvRjFc^8=J6I6or@PnI#XG&GX zk+clg10LwHURm3h6E9cb(mKl2`WMP5Bkf1`m3Y(_&w=q*b<MkvoV_gP`52Y($|zM? zMH|YftBksqfB3+rGG*COrmQ;zCVbYhTv@zQ_mq1b?=DvwzMLT3GNs5T_?-*?l|)sU zZ-QySTi&^PlNn|Zo&|W85T0{@HTN8N-(`IMHtjdbf2H1171_6sScbnvsE%){s5)uy zv-HvP>e3Aj^iz1d53%2Mz0NaHeb4bdSYK3Kfr{#T7P|1Pds*f;p})R;>P7b5HC-0t z`gAGrJyS;QX}nW8+TFibFaA|!Z1}5E&9%I5;Qdm~x#3#I)VxtujVFNdd`#;fUx5F= zm#(~bsxQo%2QNsU-ASL$o3C~+KV5d<PL(!m-c)t}^3z{FumFA`kM0AR_ps`JmGZBf z$`gw+P<}q;9}bnDPx*&K<;kO$-^Te3QxaXX=OwDyz?>=dYMN~zts31rmGATUP8t1s z7rZ%TzH9e4m80l+H6{A_R&7-mJ^;Zrzdv=>l!smYrndKTr*(<o`AZYM47y$DDLgaW z|1$Q0*_8I0&^^`e`ZUhw+Jt|lBrZZVFQ$)o;8V7O$5j3~&*g|4+LuH7J5<DLzx#6i zA0xHQ-cCC{N5=T&TK`K0HrJ<}^mD-{YY)WXCwnt}@7Cq&iVY9*|GbK8`mKs=`a5)9 z&i`R({jmH_dwuJ}{4YO!|ADg3`RzhG^~u^z>HJIGy(8n=rG7i`1gA{=YfX07Cc&*h z#nmm+;_8+wd(*@G?}%Mg_a5{y+AQ*6GGo&U?S=jY5$Z@;P5uLAY4aZ_ga67@gkIm` z)AJ5|ADiZ5Yh3%Azy@x+q#5PbMR2YtWvXcBo6w<<vT>T{ZK26N;Af_--ISTW_U+oU zfn5f@%(N0Zwc1>#h)pb!GLHZUT5Vc?GO$afsg891t?p(0Q~4jNTt4$a<jvbUZA_ir zo$M*rlzK<LH9CyngBiMY-L9ciE%?T*=*kFUb!89wh$bnQ8sYkx+U)sbW3RPDU<a($ zj+670|HRF9{DIN<10BQxTd)&@vA?#{XSdW$&d(jITrFZZpqH>2u@w~m<6VBG<-M1N zyqKojZb?lqpnFqKk33QiPsn=0rKCS`DRvt?fzvD~uP)cd4W*Y(rOkXhZM*)tfM zn85y+N_?axN_6z@%e_^=9fuvy{3E+YY@y5re%YsK$FFRQcVSnup0nOF$Fsg_j-0oo z=Xv*9ztqJU9)277Y93O)MV9~rJG2a4Ah>!aYq{TH{o~A<BHFx&wT^|plk=GCTCKW$ zqr>rr)dd{SdbPt7=b0<EOwbn5<2CiVgbvW&DQ)gP9hK7!Po7!x3hP$jnwFni$b4I+ zh3E$ElD`3bY@_hwocW;U%$hy={(ky3!T5cSEh#Xt83l&e^g8`x2jPf4DDz*j2UlLo zem~~^7U-CMI0gN5g}0S<AL4ue7|vx2?!DIY3J-+aax+hVP}59|r_@tJe~NAQB>OyF zF|sdK_EM%g<xDW+d{((rU>W_2?Ka&*zu$NEgPJGKM&*b-kB_%S-pBB6*pc*)Q|vLR zd-xbt)p0xi3^)E@;m4UC4VzW=ep{N-GqQC%Qu_KBeBM}{+`k{&x@?5Xem_BZe@~rH z(S}>V-w*y*voAG>zw8GQ{KuVx{|poUdf5>ES6fw!ypQ9(n=;8~ZaGjmk8_6NAKcVK z-=5|_SF^R}@$BS`j^MdJ$xY8Phs&$DdiE>1)_2>W9cKzx(BCEX<P&(ed=`~+7#<1o zlB{FNnNZ`LWtP16Bu^T&d!Kw;Xv1Z+p`SL4Kd%jA&uN2ZYJ*-j)Q0i2LEgvmF1War zCuc7-vQkIe7hNrTLuVL>9e&6<R>whT?yLU%XZgJDV4aly{CdK<bD-c{!_rhfRc)qk znA7F^@ZlWkj;c@YwA!4;d`3lZz6#~d&);1VjAJ&B1A(XKkvS^7VB2x6>?8JH8q2u^ zA@~MN9UkLJo{jiJtlzvA8IT(MeP8gm$jh=APr^ag0=t;s%6i%KNR?=e-9x$@m2cE5 zeu>%H-qVbsS5KWi@4P(=Z8#2}X4v*_wLM~ari~cQVEaY(r8;-LdGDs{;ES3BIa9?` z+<2+-)#87{FYbL}vc<Q8xwY&s+=iW7&iq%_ML9F?NIB>F93oBf%6xkpYce%_cSD0( z%GmA9*?8*jq}w?|+f5xZS6vFv>EAd5OjGOiHSIseFb5?c>nI(pqae#!BgW2>GBpor ztiuxP3G77RuogIQ)`PKbrqkgY+&>S$v_Ok$Xd!8g&7D@E(HC+#gEPmwe9qP0y$PP; zo#c6dJO)jX@w+lzszYd>_Qt)N?C6#azS?4EM#C!B4l2^|3EMf_DephE%01w(L06H9 zJ7Pqp2IVC8z%D6eZLWYdB3VQBMXF}j8Fr@Kv~RB5QQ1RZW<9EQI|QFd?Ce|j&D~DA zW4db5)2gcHlznaA+;&O3HIz1<d`tJu?US@BBVWy&vTyF2>tL?z@P*PYCtvx#xml9d zVdQ&sPT9(RbBjnzU2E~Bo{8|Ku8rbp=NZE@&gbx}mejTJKE*z0SrcK6&hLoy6uU0j z{!FT}>1QBF`&Nj(CwOPi@h*jSSt0tW-_M2ScIQ_~yVOYQp5wiR_sP76%kge_-c~iz zYd`aW&F}+BTBfFE*_PgrTZmo_%AdhCb&<<8QOG6nA9_aOTd_6WG*SEmW17lX7t`^Y z@I_rWPZxQkRHfwYW<5>vH!d5QTOG<Tbr4H3^ElrdPvo-}KDsXNOX|p8bbK0nFZP~1 zyX7U)528~vmBbu2v8jZ8dTo}`b&V&^KV!tsnuXM#j=WW0{vzjSk8T=I+YkBih40fk z{P+d?ORebh2=qGZnrlX|zgHXB-<zk@s?2y#v6DVJhFydnaf<KgEAh3-a`zwphFyoS z>pQ0eeF+b-e$b5{<QVT6*8Z1&Lp|f+)svJFT`~gM5{yAeLZ6IA7VCSIo9Y?kq6=GT zr|gOB;l3vErR?GT%$jS#?MvV|pZJP-KL2R!j(x#3zlK;(f}GXVjf`e(s|ER3BxhaR z7ah=j{r#mD%ALDzYn+QBZQIYiXRpD#a@LUO?3+UEVodJhoRCnvMmJ$c=<Pa6yKJ=U z-)Pr3+9f`hppMk{bke5aUKH`oh>kB0wX1=4bj!Lys9oFic72I<%?H1Ew5z~ptQ$z1 zj?;!V+VuNSn~w1AqD`NTOzuB*r$+ws+ZC9n2F`pr2LFg0-Mkn6LeFQ=#@e%`HMPvE zY96xJ_&C2q^kr$D_1(hIx0;8dYhELLAv}IF?UR0<MZeFc|8FG^GC21a;3Vyp{cEz; zYq36=9PHD?Cd0OwX7r`VQR;k}`t}2t7|EHUKYu}+PSGCGFShh-Z%Qn2LCB+K-Z`h{ zv1UuQm$MrmYvCV$XztYlTM9=nZehGvQ~@*Xd)HzMs^(F^g8t41i_8BZc^c4VFD0lJ z7kzh_ciY%tnO4d)a0Z9Q9VqD@83SXQ{$hOBQYu=}4FXf@`rYTKYkyc>9R@7$8>+6o zeAnCBai1R3AivdriY`^n4f4AzTlOpKHrDhJ@R_8Y(fq_^J!U&_LvHOur8V_ugMB0C z!HUm9XrBS^CPmgKAMOv1F?7UZ(r4+dBfZi`Q|Kc(6U68j;m6+~D`cE0V%4P{Th9%- z8L?{CKg%D#qfUcAa^a8LLj3W|F#ec41b_TEgoDsZ_+x7rfBZ7UADq4NSlfE~Fj7tL zA}@WuGo5<Wuk)~J8&*9c{@eAjEf=HF7-;ky_{w+{8p$|6mqzE><qwANOJk1z4)xcM zfL`#3_(t=AWAmu^=()-(w(O$H2RC77dS`Ne8~$x?5@(V<$NZ0Z#7^uFXTei>1M@i9 zqjna5?dS(xk6W3iBfGJY#Eu=^^aqRHFF)HxJMWh|z%@0V^I@0+rvhJOhhbZa?6}Z5 zk)K6f+b_0T3pA`@jA75W{Ep{zdtl3jVgxy3=&^f{6T$=KAsOKwLPp3pw@F4^51vBH znaGG)@W9QK#}3MUDvSsATAm2W2z^hh?zf&|;sG~szZ1p-g11>lj1S3(UEnHwu^Aa5 zvVuK!x{T<O^?j3!==|=WjDSy_9%RIy$m2mqJQ0!+hj?#G8k7;9b7Vwf(>um@ZM-fc z1g6M{S3gHxtzmVw8L&b!BD}5^zJFdBf$cjWBTmr%wzqG{Eq|!AX59s3gz*0%>;~~i zd>;P))n`E&(Mim_AtTzNgEHcu;g46T)8LOB_~Vuke>@$=9|wovk7q(Sh>Q^axIc_P zo(}Pc=#0i;WW<B4pEMvNE=Hq7XtWi4lR`8S-FdEzNIXZ5-V?%4F~|5h^)ru}jw~-p zWZZB5MIOFliG>}__)cuv$k;>X>SMc#I*hU1$=JS7>?m{CmTipNZ}Swt2sUGGC-Wpl zJi2*q2dvyibh_Yc!?u($>}H)>=8VRi&h}P*uK2dBiJUo$FG`ye^x3L1<z0_IteXG3 zH13=DMQw3m%(JU99?W>gac12C+nGBaw{v${bfKJ6>~QoVa~*Q#;v@DH+uCUUU5`7` zg6j~&#Z?Gy$Mc>pzUiKytdceNG}St{YdJm~)&|nR_j||BZt0pLzJOHc!~E`cu=dGV z?8VL_Ua-B8Gsont3RxG8g~sKK<=(ufH#KeC{i&QcA?Y$7v||srRsYLwzRMY%lCP0@ zI&(H}C{G8p;qH0LV~0sRu)vlyef=&JL$?Q>8*+`EwUzL-)Hd<4X`}G@t0QBy^;_YA zT@oX!^AG+j@wNP(SOgcp>(6TXS$p@=Kho~W;3zQ#=#3V6mwkc$6Zj6JIde^^7D<Oj zo5qn|Z%M0M4_>x$%zel!@ly2#u3*{(;uJ`ebK&bPg_W|_W1ED}jx;;I;ArfG4N2;% z6)Q4lGe17E`JoXvF2y%nW3lNme1?z7n5+KYghMC1H`IK8zNQkxX(4CDk751Vm^U3~ zKHQjq9z%aufx9coQ~W%8qZ^RFqDwvaZN7#NB4CXmwlWr5IF9+pNPMYQ?~k|S`u@HC zYF}rR+Hr8Cw&TOGmK}dfweB$1V|%gJ`{1j_>WTfG)I*;&H-UE>^BtkxXyzGTg&y5j z&!)rZ&mGTc9dVa66x%-e;8Wi`&VbP@-W^rMSF5-czL&4@z3|)|p9kJ*wEl2+qw1e! zC-^518_Q>%xcgD!Hyf*O?<dXct4`^+Vyj9#61IMm?UIJ#Qe|6lp|Y{WYnaPMgvyFd zW}7;E*+-eDo6FilWo5thLTEh`TF0c-6lb!|R;prFyo5jF70z~Ur5z4p$~u{=G55v3 zFnroJbgzSXNp#acQuMeXCuaa<zeN0j#KFfvvlr-_B4UX=DdM-ZI1f_(ugqt2=r`f% zpJT&^_vsg;Pc4>A8AHD6ul38kO@jwT{+(HK1O0kEbDYIK`dRlC`>bE>XI$6)2fStc z{)OqcoT(VeSPN7~beyghUrIdjwh`X$5gy%t$bEud>BsH#V<!E$sEdA?@<reFthqYB zY^#1~M^~NAhwk`9LmsQQF7u`%>z<vbbsT!!?)&RA(Z0WzD!-YpwpGvF@hf7hCc;DJ z*msSyEydRC6(7C$YH1fb&8dj%dnhwV6V|(T$(R&BFJrsqV_?YoT+qHtZ7RV|DqAM= z<J6{gUm;@Ptn&S8asUyp5+$B2=vyWZD|B9M__&gN6r;gkbg7J+1C*^nKDv=LPtfPS z$*$FM-fJ!WD8EIn)gTia$x}mpqp9Z!&KAK2YO%yhoa>mTkwKj_U1Z&#k-0V%v^Pa3 zN$dt~t*jGygRNTgp7P0=LY(`j*MD+~_``!S0wUiGxgoSp->JNt&_iY#c7mtyPU6G) zyZ<(oSg8=Lehi(&M<{gqsn8v|O8e5GtE34Y&7u#;vtRre@FzZ?7NM6xE3+=74ri*; zD$?P9nI{PUea#m1-Amt<CDh+$=)@4s;cw1t!T-O;Hn{dsnx-n3)AmA}N+?8M;@`-A z96Su!B6t`+HF>^H*>FG7y8FkjN&}Ba&MP+<TOoG3_*3>NR}OQ4fmnO-C%pnpj#ewS zL^iDYc8K@T{c^tu=T0c+JHVCpuH-2;i_FDzeuLLuf(NDTf2Gen$VS^V);t&^Uz!|@ zp*N>JJSCWR>ds%{Qy6Ohkj9zEoYhJ_^&=vVNIzCXn?~r1-fX#=K3WZ5-Ahb}oRj^M zWnA4B%eeN*I{kxp6+DdZ)Ubct<du1iLjQ=q8mGq9rCJmF=FzsvypP46J2~l#oVD|q zKJMg<ed(hPe09=?+o;=sA-tMQ8Sw#}qVAV1$#qXzlG}rGt0ZKflpWWUg8mJ!U&@NT zkgSsHN~wPe@5IJ9z0gnWkkzzJ)?y@WBWb}tKX;$csXKoYT7Oz!toNlUj!AUkXwikx zpF0#<1ouvGH``^R54$*rMeaY4-~Y|;GUV?`-UHRD!-$E}ZH%R(yqr^R+mF4`hF>b( zUf@qB#!%pqm$i@7_l$fE`%-t~2W!e6$2<YrH0&$gE#=sw8T3V#X<mFVn<c-pTVzi2 zi7FfAT}FN*uMVHsVSSvWdWxkEY2$f#Z95mQ$?*mL^(MSr243Vd@al4?mRRcQa&WG< z3BL>jzm1xAlYw7+2)~9=-m63SNjYhc)U}rw$KY5_<^0hL;MjF8jv3<%xU+zB8CXwL z6V&L5^s`Po@|kf&hWihVRhthI=M|1?_n8naKV(0ff$R7XuAQU2Bf-@uXV6sYb%U=< z<-I33W7Ai5vZgbDH~!eu-_khm&B9nb-zTf%sKtBNVQY@os<+Di4$`vkIyNYquqSn! zG2CZ*>dperkb_r3e$#I5`pg54++lrlV*S=JTGiHL#6z7#R*MYIV&0^H-Dsf>H9Loe z^<6c<7Qd6=EOQcfv_AL9b~Dz+9%bA#9h7upS>4FYLeganxq;z!1kZ2p0d^+)PmU2o z=L4>O*XT%3@zvxVLtdvnC81QaR?0XZkKGDRx^F2?zQs;RD6I;8Q&q*s@H;#}IkELK z<0mE%r(G%MrsS%I;$x(5<^L9|+(R_BFXgH&Ti2kcPlDrwwPgopsn)lpoSYR@8z_4` zU`grwfi<bljg4G@+!h-2;>T%R@txVoikbGv<b*uUQt3fXTq5Ow6R?cyTLzwK*yul& zcGJ#MhdNScu|+Qj#zM~VUjh8WF)rQic7HSZsy^25s*ruD_!OR{Yyz;ltq!NWAEJzX z?Wh~`@>OMx{9ZC9AzD3CEbBY;g}zs0A^nhn4%GX>`qWnG??<S+?`Uw|u#VqiANHV^ z<<1D=4?B)J#D)r<XMdxt4P&Pf4{Z1xn<Sp`DE0t;`<8OP^-3Hg@=C?m-zNJ~q%TC@ zA7x$!JY5d_Fd}HbXy8BGdbG?1N*_~|V!JfrLo1aS5m}3aE*8ql+{}KYbhh&n)~48R zQ4MYKs#K**o;%PJ<jreVmDmX-cIYT)h?m}<vT6_Sd3%TnlXUngQ%jkV*QHg8?zcy} za-DmCmx&(7hv8hEoUkZT)8Y4E%a=0ldRcoBoA*ZOiOuGJ2KpIxhVZf28DHYN)G6m3 zOPwC#?~Ss0-OxqWxa=c>d*);vtCaCk5x+k9+sut))F*FmjeU>v$7L^FYSUQO4sRib z)6e|w_H~Yw)07h(Xn!#2nY29_?M~X(Gs!hO1Kd-=eF|;K0CyX>r+$;YDA=AE^1B@V zxa#R-Z0I66ADV@rl0|0&IjJhSPi(d-&WK6Huf3S(H)$91#}<K$IsA4swu_A2$7#3V zT4UL=6}bMJh?TYRUDnk00OMxL6@sgcvXny{%lKC9#=O{Avp98UK62$%T@Tvq%Im;R z+XMXnmwDnjc)-jPnHS)Rq#<|$nfWjA#9i>j7I-2SS@8?|`&*OXg~D|)DRM3;dqABA zUwmfGa2kBEaP8P<oO?3rvfzvT|1@8W)%k+CTuv$Hy4Hm8#Rs1T`9k12;0gF*lkmkl zc;d&exgKyuZuq0{h`iTWp455dVtpS&-^+Qm|F6r0&)fe`LZfh*@c)<oFa5vS|L{P- zs&yP?ZP_>@)6hGtiFjq**fyW@qqQ%`TfUZb<D(I3bEma+uIQzK^-=g$_A(rmwO(s* ztydBMM$Ac5x@SZ2?umo(ChM6j6iis)@4{9<_F=25f%%!p-JsnX(eaOKY%4#y%FbQC z(JOnVI98tiRt&aQEVfo0{)v&yDM#Ud3i=vuMNekn4;1|+^Plc(ls~X8!iPTc+VH7$ zXK}yOC#AE?(OY@wt$FCHbv{-30(h^h3a;C1pQy60Kwhjve>F%t`YfM#UD;bL@>KR( zcM2SQxJ47Vvu=;7+=ITeu@BjXK1)TPEk!m}Nle<PS>Dchn)l#68fP$S-1D92d}^}V zY>QE=)D^4;vcEcwGs{wm@91=@l7mwu{!2LzGKR4w-qIdf+|v1+=53e;t@$=91N*ep z)zUdd^LCMbh<VPR+0$?!OEsex5`+7{ZB2rcoDqyo;Vefl<z1pGZRlK)Te5Fm*1NA0 zy_e4ZVDwPd<uNH0qI>h%?>)UX*`cEAR-)rSqMa4!WzLfFuH^eG=(ow}GbzWsq@-e6 zOv-Z7Wi4OyrN9;VA4B_fEwl~T|KR;F{~z&x&>HO&+QtE2zNbU~w}AZ#^|VvZYuK*l zyiTj^?}qn;-sp5`R|93TrV<~BEY6@W+C=`54!ujLqlwGg>h40P9Twc}u3TbLz4Bdb zlh^t781K@Lg3n#QQA_Hfjm%Y_rLPZLW5Lsc-XGNzN8MKf|6_h1Chh-7-#&@0f~~q; zvpFj?i?b+3Wp@DYNC^K=fFu0ET)jm2M{KY!0HXtZ^gQ6RRQh8o@);ka;MGMQ){ph? zSxJL!B&}m9_~qf(ki1jL^C9&}`7CVpZ%Tgv$4$GZM&l=-KjnM+w8)e?;(^m!)sj2N z^Fc<C&mV^Wy(+%_!+o{Id6U(Vw72Fz;BBo+PPM4YFO2ZscAPun*0Uzzgf`L+OQgy! zQ_ClHUZ&2}F;>Oys7Pmz4*DWrZAd2mxN_0Xnq;xp`e=ju4QHKJb^Gbb+VV+hwz5eo zW0_vIbdIV_w!{=oqYZ}qupvL>&W<YLpZ+*TWtT>&%I&P%mfo%^?}ENd|M0+RfgyDt zO;pW|A)G#@eS&AfXZSPegYMC4=9Tb_@cnVVJw94B|1gwS@>x!C?>PM7z6AU03GQhX z{{d~u)U4hWX9J_MxF2Vc-BIV#Qs1K=9!swwes`Yc{dpEKe9RTQvxq++fBHAZ>*?6N zx(wOQclvmyd>30-_|rx{b*t-);C$mq)w~8gXwUQt`p5mo)$N+*I-|xd)!TGqoNB&@ z?~P)sX_lf=VkRHG8hcPh7unSvKf2Bu?Z`M&cAy>_O#m*=k&@b2wW<|5FTJ%~r;Uw# zHu6Y)wJRsAYGqvJy&F+Uz-&paw6SV|zrG5anK7{(+SaX&xUpTSoORq^Qo+3?GNy?o zE2#@a%}QUr;yjo_x7Qv0Ie|RYBKH#noNBYk)K=!;4cz7UmrK=Vi4Q8n&)HZ24cMPs zSL8Y)bPyUx^M5*z7?GP?XD(mtI&=LJ*O^i)cjvDAMKpK8H;LcLHmYorebil(PW@!- zZ8cVhBYM=`la5`3A1q3p>6;Rfd7O2pQ>m)uDQG2gw^2MDPrd@p9FFcASbw>7#p$lg zTxX&;|Fl@ymQFHgV$Y0W-WOdVv{?w>Nm=oON}W=-xv%b%xihl$1KJ~gD`}V9A@?W! zIi~3k`ZKBNeV*$+8MLqIzg^P*VuyEvgXo=}Yh3<%#*^k+I!VPWouvEJ^F${uK~H@d zeRTtR>w5IpV(jnhxWm2(y|xJbws7S?t|?rJ?_8g!^;%<!77z6I%G1I(!WY65(x#%d z5u>^HBD?-f)by;`s$_Ax{l@xyHT}9+)&${|IB5UoHmi3#{Vjd{CTk$uiK*UBU+;kr zWo((@iSJ70G0~fTs>dZh2cLQ9OX+*5&%<|i&_j2-L(+uyYe=hG`)G6@?@d>^W<{?~ zys^}$RTf^wIKG_cl|28TDg#C2pU&97yP<d!I!Yf;hsWyU=@0N!+QHyBvoRk`-G)p+ z4<gIoN-qoP&T3+ziEB?~-_N#8=F?NI%d}<2%;=e7&m7H}d)vUHD~hp|wsg`rvhV@W zmv`xXHnwSn{v6lj4)x(3JcW0!;CU)F7~dxMS~VtlTDsT+-U%PuID5qcz3%!He>VLp z`nL=Hy*5*HF#B;Q1Y_&1m#NJXtDMGrcLnzjqnD%CHby(N3C!<Q<?ULsV+HhTXTI3| zlbYK~{UfKBYxbh+kOy_^JP8$AbWv&LsOg!SqsT#;(0@^Sdvb5U!qf)83Geb-{2g`G zo`e<TmwT3EUa=wlwdB6-mdxJETrD%T6bI{}jtSKDie*AY<BLDwypNQMd-oxmk*n>% z7JlxsCOdBo;oO>}c6C7~Nk0uOT)dy6Eke7ALL1t-^`TwS+bqdNdCdLcBb~0VQeJ4# zny8m=IS$Vcd}r)j!*`Jhp3!Pol{Hvabc2kmm-#00K$jV;`!P>Rl=r3Q<QvztgM7!X zakU6-%g8r|{*`yZO?<=GVFOA0UT0Te*B|lG%HGH>+VKHC*em$viSN(pOq4a;VERaS zOr^OeX}0o7K4@VX_24AYL&~;nlE@>mXJxF4PO@Z_GiFugbund=uAH##dhvtZFtO?S z>-bNOR@vP*xLWL|e_EV^d|79;S8S7YrOSvz)@732A$FOJ4d(WnMJ{>lWs}k}%5=G8 zXY4vxkGZk*iIJ5url!al&z4U%)pOpp!d~nnZ3%6iLmZ#jHMVyeR;@!XJWCzj+G-uI zo;z2(Xd6?X+;dI$r<&+l?am)b8zv)f3TQ)Tdf6n;qss>B!@p7#{a!;c_f$GE)|UPJ zACE4b6hj%olQA|Vj)L2t@dI7~F42s6DceJvzWUd|OsC%N=!&inxnD!Of09c>$LE$! z+KMjBT&xoCZ8#eb;bTo(IVplzHI;Vvq(=`0O74S>-9`A*(Ps_xfzUK&)I*bMjt6E6 zO^x@|#cEX-`@mJ&ib+>4UOMS;{?bX$frmLy_tC&i;fKIw_&+!|?-^n%<h?HR6rQZ% zcQ^KsZO`QPg~XOFUF|--><>Y`Dg7>Oyo$1$Bp-Mc;@>MAi9QDx9S82llDKs{eSoY< z$kgV?Bg-qMklx09oy^N;wsFs9s-5*V&Ke1>TO~D}W$XxVsy70=4F0QypHuN&y&!gt z&_Q?zS~1?TqtCvg>!~heb`N+&GpC<}J>#IxyO8A@mlPE(N`F0B#(j+NgBDY<Xu9s- ztB`oYXv@xoR4uv!xFvb4*K?+0iQsDk-vIJad|}<Pm;P%(yX0}s?7N9J_fi-A>a>GH zPYV7+v1Lf(d<=Y)_7$Dj3;G<xI>EV)ea6De><`IS8A1EyO5h63FX!n7PvSX>1ouMD zTY(3nyOH_$uk^MWbQBs2ZbHXpw3qXdvhC;ud1rr<z9y75o3WA>wEOmuCjR4cyGM9X zhrcj=e-J+WD6(1drvEZ<o_-hJxNlFOIgz$PizRk&vn^J-&Ja2BA$v6RH7fl%uBlUh zj%oUD{h8GCXPy)Ga87~L;r#pC8e_(=FH$qF`&9TxYYmi~<{ZC5)~W6mJNpu1JE0r% zsLKD~Y%%<9C8gNsuWK=muAkH=zXI<gP~s~{zd^-1j#%TIjKQ2M>~_bSoEMgNt*YGr zv)bgmOPLdppLt8=72NYJHlO6(b2e($-8QRZA9+PzOTLe($0_3pUYGgI@@XSeM0b2d zepx4zHPOqN_sI9G%SWbs@Fi6#`O2y1AbH}LQ;1FNU_S8)_VSy+HRe3O<_tyidaw~6 zgWa>a1RiyBzpMS7#$p?KKXZw#D06MufpqAkVTYw+2dIrpC&_w=vF0m2?V<MQ;|JJT zuyYo3S>`wRx5cmhU+7xq$O-mHwM5oS<s34Z%htdXz3}s*{0MKIQlqXbFP~jukEtue ze(#y$%FIZ&Uu7@z>2`{=zZ>7U=+F3%2kkc7CqFHg`Jb)-r^T|*MEvIBFHeV#<&*6< z8f)a4n#MS?JC%0XB*TCH2C=JVnz+G{7<8x1-^EvsU0Kotewkm3;I1=u&in$q(Eqq{ z&8mTJHcgv>Fa4Y~@eBn$?`SN}Tw-z1&Xx@Jh1ga*bQ~n_jb|6+$UIx}8Q&%S@iPl@ zJ{B1n8t241oH=@WK~4{82R<5XxBi{^iT?ew1vy=Ow^4`vDeP&+r5*brPjnw`u)7<I z#r}wPHLS|i#9tj<k-0W;^gejerZp7XFJT<g--`YgKg|kk9I;Edt5a;0x62u)saj#B zhws7@YR=M0x<7}tvg6nf<~<8?9-EJT=w^;B>kPsV64zg#a){fR*f0F=BKB<IC|AqD zYU0ftp1IvWiOf8775p+!d9Mobimb0oKNRAN(fdG?`*qIkgLUj}yw~c=k+s8a)<y7J zwe-d|%9^;p8#%Zz-|pc2o|bNGL)+?+I_=pv)bftz%8|K8XH@H4;j6XbbY0v!*D5fj z4;Hr0Ewgy$Zl<qU>(l#MU^LQ3BhE(V{Ic$EtoI#9CK+pd+3;AZO;s+_^Q1b3Zf2gt zw^?Mjw<gQD$lj(>ej#h@DzRc8<6Op@&2}JJRgF$D#@%o}{lLBn=DRVtFMIHAjQO<D zeTB;}@u#1`+ivDN_UWwQVjB=IOuNR-lD%8|iLKSwLy)m{_8iH#!a!<{ti??mg}qJw z4CK!kzPs6<chru~AkRYle(+p$9{vLjxyc$<%l^pzoQ4su9lh3ZUJLtQo2tk33m@86 zPf9pgT{ZV$9sl3s|8f5RjsIu(-%>R<aE;a9TRlFv@v_O8olzrpxH<czGwNF4*t~X+ zhdb^@_q&}ge*-aV2dz=8p-pINvX1I+`pI;kZ{MihuT&@OZnTc+?~IDuv5Pnu=DE%W z1uo=&QDSb;-_4f#+ZwG)X1ixk=S~o5fAS2@HEDT;_V|bs>UlrX=bNm&C$m(`i9nPu zumIoVjqnvX^x!)^oI?!bb5UMtUtr2upDkZ`dk;l<`)Dir(w{L#dH)UGzfN2CBKJSP zDD!FeqncONTI2A!{KK2$O;S4(dhyS$@M34CQU=-teoH;oTFYT{UN8PmX+!tT%75A+ zb*DDfR=8(NpXK5I@W}XO-Y;pjvbTe^)eTk~`^+cw&7Y=~v|Gj(mF~&vJDjDJ^cT7O zGNyC!Mf65#et5&*OI>}`<-=ZZ({AxC^wU1yG%dRZILxh`NB9pM9g!P&KPq<ub&aLp zw~lqqK4u-u9;6A}87p^gCOJ>D*4a%wS7EA3uvM{kR<3dHX`-`n5w<Yns3JYmTg&)e zvD$rqraihY4H@6f9<th}pJsiTyJ*$URkcrVTUBqdSKM`}R<a>F(Xj~K=+KgjN;OBt z>z8OHi@;s#E<ryR?NdwE!E4p{fJz_7r^~wF<0U(=YJ5m2PK^6JKm0`BKs4;xGLO zJWu2yC#ZKrs9rnuZqVzEu6TX2R#Hd371TS8dc{7QMw-C8mpPdZi#ng9&J*|+rC!-5 zFY^j%gS=Or#83N6m;Yob_0WEyOAr0$=B)V6sOxuhBBvVR!!Ba!4(^Ne9(vB|?fxbG z{5{)_Q`UIrRopo(d~<~Po$yI2Yrw)QLT8=Ez|yd(`)w+5x+Q7GD(bGWx^nH%ME470 zm-f=WI@%{|rqQ&oj`kgcF0WV|6&q8uk{6=KI5yHo=9xwHnyun+vQ|>V8q^c$3>{9W zjhBfGJbG=ew8i}i{Dpt38(t9q!E}q7aie?(uB^{SgR6t@c|X%CV@aQG89$>)e@>jS zkf$4*r0likGMrWHAci1sn}%QP2V33T*Bh_JJLl@<5@%eaKgZ6<)t@Oda(GIcq%A$r zIMs`7g$+^8*&654HgbTrC!lGNzR^4WY{Pbko@ryW5-BfqHpb!+c!SCLu8fYEbw9w4 zN~A9Mbywfb#6Ndr)rsxeeWlAU?>6oYY9l_imv?)5Fh+GDYt?Vyo9exaJ*b+V?j;R> zyNd6l&yqNIS?D<HZ-H6xda>LyoHkN}kCKX}TPDqrZ%)zsCf*UgY2-ZmbK17gI?%RY zyP|d+r;X3kwjke(Igf8dek|o4zzTRq+HfKKWAQZ#{?dMP8#=(Nk+_Nh9%C#SJSKc^ z@L0X@mrX0#$QU|@zoOtThbwn8{Plw<Vhw~>Lt|Is3(?zh9;(Ed*CDUR3d~{qci-UH zk+uivVBhf#qkl&X)xTq$wVWZ8(UD#EghP2JP;Vx_!d|_9opnheJf!UgE{{up3_~x@ z_&Be>1)iC&@XN;cLVp|mFXLDI5o5R2-}Y0Ps}2MEBkZ<tUNXZy!dV2;j$Uvy)2}n^ z{iwXdTiwY1W7hG=>}03dh4$v!<V@Q9559HYDBn4=ntxlZ!<+dfhqv^5T4gkQ5*)+@ z#SkkI8z*ZgS2|C;tXftuPMfuj%-w3Xo>+UmL;U5v=m+%Cu4DAaUgEWmAg`nkyLI|z z)Ge7bE+;czVt?WpzvG<=7W$pK<@Zev_yL?U@r8&iHPU6Q3vTkQ4gAM5&yhL=hEK0! z>^XIK)wrDfq3<Jsv!DK!ywmxu({iGxMUN}b4~^N0s^z0*bOgL7`54Rkm{s^xUzwy< z6;h`fQ<Hq~9`1}gGy?l{1h&R_XXgQR=5N%W_9?!?`uvuju7`{1o5w^>%luc`j9!lK zoD%Jmxjf@Np}Ssd5m^&SyDx-G?2CiA*x1`<`{=<<cI*m~BZ_;d4V*M^>Z}GQ@cIzE z{t8~4!{F?!@XYn3vDWbscm?_JF+;u?`1}>0?ML+U4*FSqb~%;_GmepWY;;<V$eu#{ zpEkw_=aC`9C(P)(#^pcE_mA+6e?otGwX&Q_EhA@%mX<SJE6j=4zMQjU{}=SK@o(Ic zlkq-#``14BxUIZ=_Xi`8H^8*vKMbYyvo80!;U4)-5bpH0TXJgN8i28zF{;CbX9wVJ zgx-ag{+Go+@desA+}K+Dlk>+G<I^uNY-a;6IUg98AnRz@dhI^+*3H|z$-?(N=&$L( z7Mclv>AVbWZa#6@ENN%pB=!%!RG}Yh&^nH7_@jk>wvK6ain2zT<IqxIo>XZ$GEUk! z_r%t9wYP^mlQSt#z)xc5JoXYyBqmH~I4%hHHPW|!pt7Z`%%M+UZ=SM_%azzYfi1Qg z`_Vo(jpKh6q_OvvTXNW)ZqRc#d#QB%Gth}f-_4`${g(`m;auv`c?o%8@X1j8;cW@l z(ebNWa@=nWz}sDI@CR|rMjaB%+?mB5Ioh?J{j<_ektzRmgmWU1DZ;Bd?U=(nk<dTu zUh)su*DIg8a9<0r{P-f}W48=lUe?8hUp8N){EjDvE-z&lQooltVmEv&zfU1UWsKpY z8@^4xAz=~+j*SH`jJ-(tkA~oX4gJLWf%hPDnx~0N5qTzS2-r(MGh|3vyDHg?56jGZ z$IxxUbGO0^J?OJcY@_|yMt%HtVjGnryH|*9^ke1$*cRoCGt#}J={jHFA>WTAX|-F& zMD))RKkLGtvs<KY+v(B+-K<YuDSR@Fu3Y)-`Sd`B`vrC<v}Lb_b1$}mzRoa@H6PX? zI0PZ_J;ogS_5JT1yxM!y=|Il5hg^?K{eOsK&4l`e2GTwmXVON44wGoB;G>W03)q5- ze{?=hGDf6-Mb_BSt1@<k4|?eL9gHF20jbB3KZe|Si}_h2I7|G-VemP@{vpw6nGwtb z@hf_;74qzg`Ie(ezNh~hpHHl7lMjEI?acfG`_HD%uHl?hDL;jBshR4C<Tc<`i=XT7 zmKS|bKmDnF?fz#LF~85`%rfV;=#)i~6K147svUU^7~SkcYGdwW+oR&!co!c^Ppaq! z<&VZ^6m3^m$=x2#<H7o7F<%sX-iOCa_k`xF<U0YJ3{B-epd}TRZqX{cp-I+w&hzH{ znhWX1#hV9pqu2~)U5&i(pR1#jbRC^A0Uga5o<A7|?&X`#g*%kq9TGme_D{f9FQRK7 z&u{qvebIw0fGv!lNfnL%*^|klgJs>?(7pNC0y2+2@v^H$c+23Y?qBA&e8PMhIgs5L znkNY?u^VKpadu9HhRxlBErh@HE#}j2p`YKvPR$h_TJ#r(x8e<KwrZ_%;~9r{$43tD zimDA;w^wc4nwP6p_CT-W3xfJ$EV}J0THV$M;I$s+>waYF3i@%8HD!jhqx6SbWiR!N zwTzn~>j2&C6Tq)sf<9c8tUaBa7ptac3BR#k1|B7aR(nyjMI|J&Rw}S&0_zm@iLD~< z3SUqLbvfZv{I5k7wC@G}Z9m(PT>G=SWO%Xi_@SuWp1()s3XWOi|0qS~&Z$kkuWu}V z?e!N{MXz-}a~*qO7oA!5coDq02p>E=Sy{Nvks`d<Sg!A>?C7>eJMV%&Weu;8bjHRJ z`Nr9ya#owH>l1G>%9+UCEWydb6F(s_A@;ftdjq=6`vh4bcAWH$ta(V=WQ^f^aGroJ zS-+`I7Ma!?wKW-^kgkJILC4!PhXcP*i^yK7-)BjgkvWmFPiU1@(CHZUON??)UsSH( zT$7|`_uwy*HQhRky{LzKE~GB01HY>?ll5kiI|92eNBOT7_(!kJ?R|ei?iek(!nS8? zve?D8NY;gU+U+XduGJ>jNW8wqwxXwAwKTvpGJm(lvDW!pXj+}$(tC7-H`=QFx<5Ed z>G3Y~O?)1{)^7Ykz4(JF@F8$!_O7l&k?aR%KOM5u@JqG+K+P216+h7c|AJEnXKa6g zws*tZC$Ge=v8FVA1|N*4&+}*_fiNXCl*?NH&P#H<Ho*Ztx|A~G@cZV@Q%m&k^9H|v zB>0E$juTw5g~M^pqQ5COfa^jtuA9%t^>56F1b6YV9pbmt-7WZY_Ika=QdtI`k4QU0 z^*YzN9*>r~+2b5ucOAY5zct0_&Kj&g(^P-i<H7nrW1ZVfFFWuJ9Hj0-;=aXyFSrOi z$$#=nSBvyr;is&r3Vxq)Ckp)2eIP2=z^(T{l-`$%;AI_0;?lx#?1RRI_yvu=)XeyO z|9tug4P2C&AL<u*Z-swdCY&X$3LcTZ?x}aRFrJF)M-Vs0*sW*XYb1S(oGO_Vg0Z<k zEvYXEreFR!`m`dfPv40$@n6mPI7`g5K~LFlXV7N@`uswD=NJQzPaLGnFHLncJaT>= zuh72s(VY1)MEja)-z)!v_Tdv59A8!E*L|V(H4W3gcKWb8ez1MjraBrPQZv8B-Yqjv zj*WHAk~I(E!IxuPv$_}PK7r;kc<yk5+AR5v@!q)rn`nWn<rI3GH5xruP2|`{`u{uJ zd1KIDWK2?)($8lP=gW51*-ybc;sei+_?3{XxfMK*=eYcbz(wZPsmQii@``TiVSUGj z%^><P`)4)D!gEEb=tahAFS4x$dL&IzoP%zwTZl|64au|~#`<>F`WsFCjZ7M#dD^$n zr^koXPfWmU1Geth$F|O3j2W<dq0u6AumgTCqOA_(gf2s)#^p+#70|<NlB1KU^O%Eu zqO{MDr><4Ox@BF#pf8(j^?neZ+i*Bg^72vE|3YmnL>^tJjnd}^P2J2R4Vp%n+S+=z znrZa=&2vZPOvN7|Wu)JwFJ7KADhFLZAfIkNy<pZ_#*~fkPVf@kCBM8&{&Qp{G#Gh- z{_aCoHjW>}>4Om_`mZ>j{=cR!V@wMj77?TJU#4;XGX68c`GF9vcJmY(id;8n+zZY6 z;EUQ3YDp&hK9Vj`K6Go{3V353ywUU9puQ8jNn0{aa`I&}e&0O5k8IQ@`bf&jdYRDW zizXa6$ag>{3cYl>$ehQJi&Az#E|M;C(dgs2NajX-FADLn;3?}gA{T2-y6*cX997x* zIEvgbXd-gafM3TraY!ASqb|G-2mNHO<33XzonIxcB*dq(4lnJJwi|Wm>+s-h(A`Hp zhp};DE<|^9L=X4T8vXo!#;6?TGKu4W@g07z<+t%o#(oVkY}vAA0YCJyr|kcRuE{mE zx9q0#`SL=tcsO0N9sbI|$6<`eG*ca};`8gsfTqIRVjCIl@5VM{jndg|Q`sVS3>kYd zTdt3~ZTN7Ex>=hYkb7m<pI^7g-0o5Rvm_=+aFBO5?^z~Xq%RD7-rJj5mqF~$KaG=V zto@m@lni;;y6}9QzDeEeEq5*t;e=0hfG%G*{T5tpj{aH4h!IQulA11d2=k-=H9O=* z6P}*==i~V=^9C{3M*lo*s-rCb{5sM?G!WZrp*`rg7JYZ)slZtC`@Ou=PXp^JZDH@? zgUI9lMsprXTQo%4O{5L*Av8{wxe5AMbg{&`m;5}ai_wDvGHf_qEHXyqW`>CeZa1~N zA@}@ti@cRGvR-89DzR4#yhJvX(C@lzAQp2#=E4Jp%$0HjGMDs!#TMm^-2q-L%RV2^ ze}xCy;nBvuf#&~0rwQI$)TkWMC4%pks8O@T&Q4@KTH1z<QR2oH6WM2;M+rX|?bE+w zj~d@Ug5HL_7;YZLJY;|mPeu&3Gwt$0o8ez+Uxmm&6YNf4i>+gf`7-<-Uq;r8?v}Rh zCvG8?Ic_0*CF?1NFA4J1cx}AHZi4e>7#!mGbh}pI$Q*vx$#ePp<*+fZkM~~22sWI| zx$FKM_Wd7W@1qz8#uyVHl#IKwA<{UPOkc-sgTKY^Z_`>=e~bRn{rHUaV47?7mQb4L z1|w}-D6J5gZKTOsKq2>1%DaA^(lEZz*rfCQj#B>(z-W^@6o7HRz#AF-{V>045`w>f z#P9B5V86lqNB7S`FME1$T?(18TIBO^<#r?tUG9#HlzTLO=yH1e@i2I<95rOQ7Gn+H zb^)L5<-|Cj-@evdIb%)yv>BHNURv!Re%<y_<|^mIaJ9&K^Kdj;95Zw|J+^I_w#H%y z4N+e%HktFc7r}MM5b+B1r(U-0B4rncVg(A%<%NsVI2QSJA$)&Ho#*PK*445f<lHpZ zYR<7eFYUk(X?<a7axQ}z7BN%jygwQC-VpZQ6!zX6_TC!y{u=KRZ&YK7m($P6Ax-=| z5@TX~7hkEwm$`|l6F!yn@x)p=_$~G{I&45TKgJ$WLw4eqnJMy4^1BPzbE74%nrW@S zP1X)~Cb8!Qe<FEy{beLE(ySwP@>}AM<ct9idZ+6<F29_Q6d(qP`L5q*4W6Cfh0K=w z4u~oA4!7rA>XkJV!L^h2F5(=PlkC}_b_keO3umcV*dJwOpHu|<r6P$R=4|UVQJgQQ zd5trm{p{6~7|~;#k0|HR%Q_8vyPQ2!us>y=GW3(Rt=!L|yn&x^hijB-d5<`awoglE zx6M@9vKO$5c*)Nwv!6M827Xr~2CZ$fDp?U=Z{QmD{l?zFZv4*@|Md}f7x?jS%HDs8 zKa)7Hb@=OFV2@XY-J!>U#o*hLIItK=BYs8T%ih{){MUzKSn`NNI;z8pZAvE&txU!` z>qrt8mFHt0GGo+9{!-%5PH-lWjlH}&o}`KYn>`UF!oNnDXj5F2%mLO|$IcL5T4S9! zqfLJ%&XDuj*5I@MHP1F;bEF-1;7JTvH+EPrZIl==&Ndr}&p8cF>xj{Lfjwq-MUOrw zMkj`}npD^9tMIMsa0D;ro4U*lwZ$CcRZa}ZHCl=@Nqcgu?CDrzO`0KNRqB&^dZO5S z0A04hN9%~`tAp1%v1hu73D1T{Bwlx&Wn4uq{d)i2r;?wPxS;)4&5otLrK~NO^J6FK z`Ae<iDi%hm_zje~W#3cD(fhBSeTsACb=t#2!XF-RIK^2m5An1k%M7~Pph+Jz*$7QO z4)yzKaCOk{8zuhF8dGtYJ#LRWCOCEw8&^)hzk=;?9clYHW9O?{tV4KChZpMiV4t%u z6`TcDFKrfHkT%y_qt9srXZq-EsHY9m=BL2lzJJc__t@)uBe<55-yvfzM57b*=TUg2 zN6P8Hp(!*I8Z3eabs-vL=`>(%g1q^_=>`X}$*<AI<(O&sK5(S0(5;d2^e1pOfB&87 zdmG;m=zKR}MoMg1&NXp`Ik%7ea?UT~GjdWA({fTAUzlaoV}|ixCKwVsvo{1|4KX7r zF@-s6;=Y{or%@R>*Njffxjm7X00Y;gTXK*aB_a=vG&9^r6WliJMm@HIvN56jw-bvZ zZ8n#A##E*TT<Xaio`?O512l0Hzx?C{>aQ{7vr+#>^4b3!nE7u!Q+OW==QW`+HNX{` zud_~A)dO9LnM!_;a%-#;W?W+_%embOT_T{%2<T#mF2{2Q=&~l}mUH^>4ig@Yv}wf! z@cpVOUpM&a@+d@WGmVN(`7+4&l?%WtH05)X@45@*`=Tjd4fzT#kT2Vm&kapxUm%~; zl&_F{S6m=pnkk=+d{ZxwFUgdz8~TYZ9;)wRO!+d%C+pm1x^dsa0N)hSzcM#9e?QHh z8e^<w@Lk5OjM<FPSY5(AFLeq&EXKQybd7kaWz;i|_*vs_Nx8dVDf<|b(5n;C&k~2h zUJt*lB`%}BRQ&CI*l+kZ9&03C-AAlCYaQ_t<1J^v?C6O0iY)z)NuHLwqxy6HgL8QQ zKla`|JgWNI|KEEi$ev7qgj@-jNkr|;0QHE0Bo)g9yo3a+m{_gWlK|FE63_;<l`1BH zZ3BarAeNT4hoHS=2Jv!)n)G-MiMFRe6h*47wx<Mdoe;cG1QQM9_j>Qiz=+uM`##_2 z`Th0F^W>SCy+7CWS!;dPXMNV?MP&BJX=4vE+YkBuUhU-mdhY14ukKre&aDR7>^;W1 zr?$R$e*^wc>`P<ttAC>Q*5dbFsi7Whwnv*y9giI!noe73L+IwmVB0~ifvO8=^F4EP zNPa4I8Eilj%Zy^Kr;VXwcnt0PsS~fSc;g!LW2#>o>*Mh}qhkvELG|}w7q<tyINsTF z>BKJX0iJi(K04zC>L%7%gl+cY#OBnv@TbU*f0s?9Y};jv4eo3o4V(shh}{M52;SoB ztZ9edOJ0g1hs5Qj`SewSd^6-m*N?m;d6F|~-+b|IY>1EFeFZYk<;XghA@f{{>@yqR zaPmqx9kwqxd&T<4v1R(djjP(qz5fB$-j1u9lJdXCRlSAym49H4$clSEO2>xew0s<2 z#cxOQpJGc&=IX*<0$I#IbI@#fl(}s~mn(Zwd>dQd1Ri#rIdJqyqY)l^FcxE6IBIg& zegWn1rD%!r4tRC+RFc1$IxDg3v55cSAvX{>TqQsLG|FP{8`)0#*^tqYBeO3`e^ICG zQe*X8X?C1{<xA>V5^saMeS8oiL1Yy5tMB>XNE3VXK)UVo5$Tk!;N*A}<3Gb5?>n@i zI`GA+9d-G|NsK`;RNu1ec_M;Cd|gd@T{DRfdn!-JN9oACD`W9+X2>pW%!K}9*O{z4 z@!jrrBHNO81wWfC<5cXL3}lMy`i3@aav1gS?W!ui%g5<Ez-KvgKNJ0Xh$%R9j?o|= z>jK6xbe8et)6lPhEVIDTguPZazS3Ed?QaZia5D$J7YsIoqw)8T@vfEM^C=&~u1odX z_+86y>8zFC$w2op3tNbNlqC)uer&Uu%PerXH2l%XOZ#nKFYxAcoRP0r8?gE|_xi6s zb_KRw#HqOHiUGwJM<%u7_M8J>=mJ-nYoy?<(cq00`GNfIpet}=YbWL2V2u=`oweHf zU%cz$SpZrbi7%$w@6C?U813K_b!r{rYw#E1IDB5LBi8vs;qwr8a{*vTAJ`Pf@M?e5 z#)HH2X_s?S3ABj_f?rd@`fs-T{B#VY9rPE^31tyC$n!n3p@(vwU8Wr)$MZjCgXq9j z7K=&eYHqUq@3c2bf2r6yvL9@QSF2B-IclnV0<mIR%!Wg>(~Uo-;?B&6|LShvf}i)p zvl_7_X$9XK4`5Tm=Ol2d-W)d;S=Fz6JMsaBM(sP9o%>I0P`qB*kYH~Bu2}G<SPrUx zmcPbpVT(E*eGvY6%1bSN5Hw=GVRPV>vIjxnUEhh$?s59?GAHp$!EG;dkuTLM)^GZ? zfp7^t+M8k(?Hp|s<p!Fsc?esqY|7N|e#f&#eeF{><Ysf9DI9(STy}zg^0BMsJ^pAF zJ~KP?5ob{O+u`G3=L+iuXI~}G@^*)-8+)R@X5uXq@1SZs_Holj!kcFsEw7N9c)GK? z|2q$t`l_AYy7|&eQ*VdE)#u`SkoIfvb*num(2;E#-BCwoNO#r#rJjqfN7v&qbNYI* z_Xz_3*!~=N(114(7j~QK)X}cn$PU%KVTN7UKNXqD?(Z<hddBD-xT;9=V;%$AGj@S< z{dV6ueb5K^r@7`tb{??vX%7gV%+-#6?<Uq|PybNiQed=zTxL6-J*V#+>O5`JX$(fc zRlSoi%B*;rbr0eTSA&n7+EzSO_=O+5Z&*z(PU+aPo0_gU6MW+Jb~sWGKg?Rh>#MGt zGix(@r;FIrr^9<Q`~9`FtMj(*Wr$n2UHnJo&$vFwDd_CBeS)2f?h8*}r&4f8c9+Tt zRL=Y?Bda2e?IwPDhrfh<)5WZfU@HH_1;Af-;5&%3@buK?jUL0**9*@ROma+zn>eHO zb^K1mtHqy!b+!%bC)f`^a^~QVX-vC;It3pq_At23b8H5uX>QL0+aZUee)sSi#@Fs^ zpIU=#@Rao%#!<D^g%8-f|7=~*5r|GiGjYE|@d5T!X!I0+Q$8xic8uqf`DebLLE=kG zhjJ9ZTjiXzmY0@@=SE#dVIjKXEdHB;VHtFSt>S=uLEBgp`GD4<Lvk?|+uxITD%zLF z6K_(s7mor<$-hn1ozK4k-Ye&0j$lAp(dtL|V_D|73SU=AD2TtUX!Xx7W4m2nycM|4 zL}%4$;b%x4t)&AxlU?qfo}mqj4bsV2f;_jdxgUp2y20rGjdUMNlbbp=B=tY-ML$sM z%<h(~g$z-(i?TDE*`a2h>zNCkipO*uc47aPyL{EgGUC2+-aBZpcGifph~?rxjsF7v zOZi`37RjX_&5Kwn5o;{_B=Uk}?AQ7KJ?p1&r}YkPD8zT~j=#m?`|8fAf$u@$7+CN< ztz#{)jIW~?xCeo!>W-xD1Y$a#X-#{8Sv+Q^^mc>rzqaE=MOqiFUA&EB(V-2*C9v0S zC!e0$8O6%KIrN-k?@{z_A714`<{<y=O5*Vc0wuQmW?Drx_}>N`{Y8?0rm^q$nEge) z>D=2A=fOwL+L^9E!%^a|a<(hXU@d~H4eU+f-+wb(e9&7^IPxnqJdS7b!^wb$;Nu*5 znA}N%Q_ag}I0IPMP!8TwsQ&IJUzvPs*PpdMH|A5D;YSXPwJSK$w)m{@!!kX#zin;A zWc=mG8*{=((OEJtTTV`lDX-Y+npfKy@KtbbV?U~43^qM!4#Rk95pMvyqZZ*LJONoh zP$Hk|c>MX#%f%#_p^3WyFa1hJQ~W^}YqE-%OHuB0L^EMW0?jbDpd+=YY*hYq`B7_3 zKgN%{GX;F-U85fuES)g6@nO?vnR)iSWj~>EOIcs7d5}2YDjPkXJ^efUekc!HP;m1{ z*uMv%fdTRe%ye3P&FI~OFPkm+5B4GJRu#buX8}|3PsP*u4(nsMjBV1P=0K~lSV6J9 zV-mEczU~4RzPmk@woMGSCwk_>9%Kq*svQmf5${&(#(r<pq)lJYk#bY|k5?C0n}zku zp=+(<d4f0NtOe%5>9obz4l=LWC5CJS?6zwepW3flJhE7Q3BR-+wXBP9y)4O?CR<yh z2|GrOpE>so*Ok9_@`vPtr2bCyaM1nIGM7=p*p{jd_07Az8cW7uPqAR9^=IwVwC>Rr zb`E2k9w!28!ChlWq_MMkE`N6EyFV`%(9m-u_6}Z`0iI@puUXLBh?cngtK81Y?JIdn zxlp{!ua#UDZO#eBlJSUJ+0nMrvA5NE@zz%74E{IH*x@|8zXv%v3Jv#6GzL=s=fnmV z=Q-ti@GZ}8>A8rpe*w9bJAq#8f8o7bdmce<g`VTGawoE~<l}f8f-ctN_}yP+jMs(- zud5=?_z0Vp(utR$z2)@S1Hy0Nws8DRw1s@oQ44Rf_qJgf&^Igt`Z&k7LUUha+;JHY z+PCpXYf^{&;^ItE-R0}sIlKq#gFkdaH+nDo?mG5t?2uX{H<d%Hb?l)T_ys@**e7?s zJZHHpv>rO}EML_}Usap@vG|Ij6UJ-P*?-YyfN_94#2YOQD-J_ER>G%v2=c7WLmW9p zJDvH{y^hrB;@j{9<uTkQyp_BLAE{bE4ideaDcYbf#yPMmNqe;`S#;4j%ug)y>=+N@ zeHh+U<}ix56D|rOqgcSgh0Lou9@DbpLW)00*C3f(>mj%%^ILpi4LP9ImqlC{)z|w* z-gA#v5rlsv@~zWzlW1P|{Po0!49?c#(It#uvHgJAHl1r@xvw)KllGrEXGkKPXv^P0 zTihW;($VdxU&g&I&NI(+*0spHtubWgqdy5WUDM3|m&Lr4*RmO&=`oWUpyw(NcJu-E z?#;BH&G}gJiE<>KLtZi`^<p_JPvxh(8-68ToG<x~Jkaca)0BrydzPIuli!1qvv{UH zmLpevn;d1egG1AlE9FkUyZA2IbOYyFwb4eb$@lHOU9#3^&$;=~t8`Fjg0bjYIo^Wt z9B-^^7kqIs^T|w{5Bs7MTNS}QG4JCQ!}Ad?$K%q(^R_(1_>_Z!xFP>E|A1gN({S{m zwAjnuKCplo2i?S1K<<m^4npxIB)irxO=<ZE+8)9uLGoKad&Ax62fxM~j^TUI9OIGm zv?iRPI`9oA7Zv+2`j<gxx*z`8!rBes%WFSBLhPt4_RrU!<&Fv6Wf}fDQEVVqDW(H) zH{&rK_CIDcl+ed!)=akcJMn|Kh~J8%+rei6=hn51XXp8`7@m5jyP4;i!<wxrExzMJ zsjE+GZye`?8szi+zahpfYy1-JMzKL_0}rHg3j%+&W270=1dHlP#KdI&Zvf{e`r7|2 zXKCtxkMDZ-Cbq!)e`_?nMxD(AoTnW|LymL9M*Z%fZP5_EvFm$?A*S}?Fj1S2AlvJF z);Qj1I7B<UcpqdB_T4nD#mBh!bJx{Lzk2SwY*dTaen!qz>;hD$^9cQ+yKUn0XY{4< zJj~s{=BPFL3D1J8lZUvTUTDRJ%g<Bn*cpj%5nQEr=pxpf)_bDnxz22$eAWIevq5t2 z{#j;24WHu6`=1?tM$B8ChXuc#@J*E|;J4&jJuAb$@FM;PfW@~N%l=<+x5|6YV78sB z-ERl_4Rpmimubw>pY4Ab7$c9|$TQ*DQa*2?e!L&8(}};F*dTZcjy3QI&HY*CDEtuY z)z;Ujqq1)&l-++X^We-$1j}jC*^3T4_!my1+v%7~ov-n`oKLl-cWUc$e(O`uX7gQr z$A4=KU*_3QhRa^echSp5(3NyFM~K&}dDR@NF4lf~r}F7K)B9GU6OeAw<1vck@qP@> zEb+X^r|0;nV;+h8)f(Q1hzZm|&bp$gBfsb-%Eb3jdk-KMO(}g#PBFAUArIBs`;)5= zxK;gjbWD!pd^HeUg4`uJ2U-YDM`sEy1fkoDs2`U_kCN}O6aN;qeI{8n$o>`NT&R8M zAUQ!*4!*FY6WlQ*lbC5!jaFxi_P!s`mfrio*W{(C#Jls>)$x2j<5K<Cc|Tw6ofFeZ z=)A6b4Cy6wHmG5&i|w+ym*!jv4qDuIYfse~*y0YPmiF04?X!dS%*4G<@KfLF7Z_Dt zGH?QXiODvM_2eIXeBfNe5gNgNb-+06f*(~knTN~3(K)Q~`4a-++N6IeT2t5<4jbO- z-g8UCt0&(Zw$3%fk;z{UpEbb<KUz35ESbrZ?|h{?-#Gjx{he<F27b^~-4Bio{JP20 zAAkR>kvg4s9Y5%-Zs%RcuRA^Mdgnm4m@`<qk9Q?MAXd3ISX<2d5-aE{{`Ci*;&^@a z@1Hy?Jh^aoc)4|N_`Z}zdkh*+@?=LirO+APY<)TWi<H=yTq%KdPIAsoE)0YV3|IFo z!xcJzLMi3&e>is*ZOx{w--Txd9EYWMx@CeH*8AYO4*PxGWT#zbQQ>{Da)x<03U6L+ z-4j;X?@w4}mu)`x-muE9p4=E-RG7pVW`ut=;gWFMxnBlOv9So~g3;&wD`#eWp7Ap1 zHn$ym&iHo+bFfkZ{}505BI~jTf7RKkU2Q=lrMvde#=o!TGsNdvrf>BX=A_ko(EIN+ zMn;Iu{BbAa)pv_?alkOTZ$aKM@{PTJcb*loKT|qie>b)BbtC1<5IM>EjJ3-*Y8>i^ z95CIU0^{&*{AAUq3mHfK)*oKBuN=ER-+A2|_oS7F78<U|_xNA<(x%t%d+B={Ux!a< zzUSra2se*!df7CLKep1ZCn@kG(Mn%$+dfpKKF%g?S|0yBBfHu*yve>kTedJx^f^h~ z3ynaxeim{%b})Ba;p5y5Db~FYJ$DpXZN@KI`+UL999v)B%Q<0+Xc&JU$yq%sIRh-z zSs*<!#9hwNkP-d_Iaof9$V-R&IJ<7-K5!KDq;uh8<aa!VZdBj5@IA5qyYYJH+NQ9d zy)ip}zeC)4_CV<ZiN#rUjJq&{&n|TNE8v-GtDtyv%f|nqy}_ZvhtU0fL?4;pyW*TQ z@m=T9jjx?hzTd*1F}Hj0LH;qay@5~BE!>;vjH)|~x$xFb^6~9oV3eSn>LB)O$)CUK z2>%G(?nk<Vqs;GUrwqRP3A&m#oxw-Mz9XaeO@y|w^|0UhkWplF(*(}zylRc&tjCy= zDPK+5N1)+k%3||bQcbx7%<1C!MvDR8DP>G|(ued>>hpZY1@E={{{{UUe=)+kTh`oL z|34Vd*`FFuR>W~~JXw(@jpv9F&g5?6E{zd5eIMM^{mN9WF}N`uoRK{11@`ef6x|~o zzq<_Fy8;}%99+B%oV=7f%GqIX)IKl%fcv~5C+FSUiP!zjF_cNAY#MdOg0Jb^ZxMgz zhgSHl4X*Gzo0311AM|=~*`iP3s`B1n%UGk0HUDlcH$ucXf3UVLSgf_5i5<;+)}yh` zTP)r$`x^XB_C8H4D&X($qh2d<%@hylX=IY22iQN5QxDNrbVSp-gHEG`#cx@&JS(I! z@w}qt<fE+bGFpB@U*X4#!;wcy!fnot?OsR!^b~vjbSBD*+=2gg0NAx+le^uK6;ix_ zP02<BXVEH^|8x%0-1bq(Ds3yTAI?FV(;s%EcjE`$*9q^+1*eGJJMbH9vA_@iGmQNh zds`>}zp>|&<$s!IU*$J8O6Vi9{7>=wN6=0UXB5RTZ(|JE<W11rALX6kfz5JZU}&gN z_F-Y?*ZO61to&X&?+V88eB`C9tIod1@2+H>u3^3Av2IthwpWD*Z%3|o#&iLxtZkw^ zB6=>EJGw<{^(W4tO(TpV#{81;<Iye4!JS+F7CVFW(7s)k=fbV$DGO~(x658S#x8r~ zaM>p4(JssLEZ}W_x9|SaBiOhr4{xmA+E~3ShwJ6X`{21{uakXu6|3JrrbRMptld<5 z3|nYR_?;*p3|2qG+&_I6J2X4>WL(4#Z5??mH*nj^3H^>#W8fKL1mcr3{mvv~I(Y&I z3QC%;xe7afgr9BP*OuG~Z+i$Hf?nKTVDrA%U2^`}#`^8(Uyy5-e3Z2*cN+X{2*3AI z@L?hCJPRCW-8`zLXQI(!HgSeQ59={f?VKC-JlbRLbT9n}Y)WcID<`zEU2@Tx>R&xv zU;GZ;fUWz|T3BbpQ<1ME2ko@=L0|8e&TcdCWbW8+o;v3aR{ze<6QwatmEKf+vVRJW zhrz36?#2`2Q)VO)M}xUbPt?Tu&_a&!z&Ev4E6_>mj63eH=!C)R^~il6lDC#|L|n|_ zLEu)yKR9B`mbz;uH%H`U=uXeIZgE}B>F4bX?jiqZ_Zzbb)!MITZiPJ4`RuE}zKQtv zO|+w2s@e-1xMNT(-P6z3iTIvQyHDAAk@U#zv~fFaKE=8v;?U{y7j0Cr=QK;MC8w+4 z@S0mZHY4)V`zJQs0UQcxLvwDXJ>>C$CZ4@CT;@LdQQ6<CEaMf7bpHrkwv`bZ-`aY) zY#Rn1fi|_SiEwFu|3tQtLRn;r)AO5J%td2cz}n4b|03o`N22{u7BQH1CTE2*k!b`Y z_o0~GLo0kt^2P7K&-}Jk*WC9+YD;HbWJzp&G9qtMR(#@)8yw*~bFmdchqL?Gy~JX% z^UXxc(~P}~9}9#ZAg^~dc(#as*Q6O~i_DbJ8v1-8%^27}pS>LYQ9aMPkF&R@871II z)g91<4|@)4v(X|wjPOppU$`4zH|1W{d<Vyj@9#fDKGZ~d*8Jl%-9}z$M-pd^06A#L zt(-qHrX&1!=sN&?XO4irSr^gwb@-0N>09^xUe=d;4m+oW=$tdofM{K`X_%Rz^T1h^ zV~wxL;m+wJhs*ClAHV%!qeXVI*Ex-%g|vM-%`apR|2&%CjeJFB737Oynm<GR%HjI6 zMe~#mf_rDC`5SGTmp&>+^PkR>IymyOe!*094-MJ<whq(1YzJ0j3n3edqu4bi+OfoS z+`^YPu-#R@?I^YshK+k>q&*k=RBQ`5FJ5Rc##O)w|Lndn<5&d@ld-u?KQH;GHKR-W zdB0u14SvUY!HcX@4RaFhZ04*f_!S%)-!jXa*3t?5--NCQfPr{Q8*TR@SBkg6Kapc% zeC;T*Pdq=>^~~dpI?4=WYt}fXr#FV}%smm^b#flF>GmV`+gDl7A?{XIF#cx7Z<tx3 zixuYuUZdY}UW4yj$iV*&zOY_=Gd`9?o0-#jn#CBJZJs8ajpZ;Ao*8CF=;~qo+Rhl( zGX~vnPej)cr-`_p@bor?|HAhM!I8FFnWOO3;`5f_Hf3+Mkogn>&rIMS>oYU-ufy+= zds>9Yn}+*TJM*z8Xk;&F9BT~J<elVIPsDlEU3v#T>i_gH!qc9xTF5Pg{fCug_Lq-G zpOToDclf==9xqeM_{H~x+tM|E_dWRDgfsI!-EWuXtR<eIBVv*le8ss<#5y%1Vf288 z)n7!r{#Tj5<n-UXiainMDJQz|0%NS?2KJyV>1F-ozFOae-efs?A@J@A8}BY1JB)WK zlNq@I+`9ulEuB*{vOuZP%GoE~-wJ=sPwFqyT+1~dczAi*6$5j4-}i-a=x<uD5nrCj zzi{_0>d2<kh0dme|2FD#wt7-Dvy;Achh#JvTa6}@f5%p1LenM2C9{qPGdEtd^Zp&g zZv|E<2C>4SpMWuld}QlOghv|BrLUjZAUxj9d@b}4o@wMDgGOTSGa~Udiu2arQEq6Z zgBb3dKkw!a8QK4U(~unK4>Pt-=B60h&D_mJJ?I7XIXh4?hyJ0zx-Mj#1H{hp8jiz? zfnNm-=J3D%vCHA_R}7qUhPf|C59DR;wZn671t;0VI$Y(eujzF~me4L@JI_+McVgbV zn75ua@oYIf;xEv)V0rxRImiuV<Xn!OR}%MYOwjC{)z#pF?hXw4(44r>m%W`0s{v$d z?Z=MTzC(Mb<dDzH!L4=91+Erz>&3>xN6hBwOYdE$v*a#vUMuE&E#uT4qPV%q<g`BG z908p>{L(AVab~$UeamdPma_@Gu45uFTTT7|H}%U#K_|3z5MPEAGdZ*gdtB~1s*=g8 zhP>~-o;m~0Bz#n4$EDmaPUM|&k*EJG8AN9p<=D<3Ut5;nQ(sbi75rUxHi_>p;MsRM zTkF~BcbxGY+MjnHF4=E0{)ohQd!i)$3eLw#*qAtpCF+2G+WW6xa-jFp(st!S4Fd<= z!3L2Vg6CkD%vhe|nflg#djD(4+vFqbAunc-JHR&l$AeRW!SVggKF;6TKcLsDDDp)* zbC*r=k2!1fJJ9{{zIj0mUdh<qRI?YfK|=%NJjZs#9c7Nw$5`#tvGEiA$M|(dWD&5^ zSoYTy7Rwhehcn3Mwdb>O#-^*!z-nN4AN}9@yPE#gm*$mod=vL$<07`*#_WHt2ii=$ zkT=n3zr{ZMw#VrBDlmU_tYCf?cRR5>h#v!Y?tzs*c%Tb@-h)mg>6H^36xSV^+IEjK zxBDLKe!SRw?1aC0XUAZ>3mOt^S8|4K1GX8!Hs8jR+{hy84^sag@+{Bb*<Hv^PXq4< z8w2Z9FL)B>**46PgR8bVvO}zq8~AtpY_!#W6nm<EVE&_P;;_$UoavEKHY_gU%%6jN zmFYi<o=RiN1};fXV{8mhGbo#j>^Z^zuh41}I`09-^Ic@EPx^<pdyz5V9n9&}Tq4Xt zXMN47=c3gc$1v`8*7X25@&<PqZz31G1@C(MDUByH;-cM7czN^!@Q8N2^nZ#6{Bxbi znPiIjjPW|w#|sbph&77)uxWqNe$>o(cY<@m>t0|O$N$7}A7I>)aiWZEC%Gi-I@nIf z>u7EZY})bowN_d;gE@=dnjAA<-HEQ;t~2XYoqyI3IYBn;gy!rz!!-B5jjfA1lGFd+ z$T;G$nT7*+%XZX^y!yuz8^{F$-_`o2Lr=5d-KAmC<?!98{~h*X$u}j?xkcSKZTvQ& zT`Tf1&&~w<tR|EDO$R(kZO80ejyRM2Itw)X30lvyD)-kF6}Me(v>#!AJcVP}rtp48 z-9_BroH`fB?VL|PV`b9MG5U$^9pYbE%**M&o;JLJUtNO?|HO{E>BZ&8$LhYcBQPSc zb(X91lJMj;*Q`ouU9xJbv%P6n(~RY_md{u*YsHKQzpOj;7!2ZjvHT=I&qwl94%FD( z#B+sTvhkFi^&a4>eQ8){2_KHxQN9VhC3C#`cjOfKtK~!{z&Ef9UOC^ENwWRl;hpx# zPWWA`*@is@dJ6c-Th45ME`F#ey4UO3>22<1AJ*NNeP=~E16@?2j?tvOGx<TpeIRao z8`)!a(%!+V#vpr;@70;q@>aGLe#hhbj4n|*Yip5b4>}z!hn!98w!k;SEA#q$8CS{& z`~OkCT=BXcEp2Zad$(+v5e`3fN!T<4Tkk`rBS&lGzE)SuFR@+Myp$Jov-6Uz|0(-D z+6=G!*M8)SzRl3r@w=}M4_<W@I?yCr2YR{2h@b2cbfCxYW}MGCTOR&lN%$8Vz7+n| zri;%wPPN%Pte4#Kh`HCr_#S1vZHza{^SE9z^6N{&k3K#l{AvASUi%Bct!?GkhxLo- zrjQMez4Pg@#$bC@_=v63i;Xqxyvf!JDxXXa<F=BFd99qIJg<<u{h2Sd1o7M71)lnL zrGF-e3jFX1BPrXSbKIu=`^ZPlz)y9o@cAvi%^5A=$>DtBeaCIc?RxDQc0K$1UlYDZ z6TW|#@crD(Q|139;ron)@7E`Mza!!M`h@S#Bz*5l_&%8MJwGd<uY~V`gzr};e6LCP z{;h=XO$p!ECVYP);rkyGz8^~Xo)Lqq4Igj9_ZbP_uTS{il<>VZ;rs6rzDE<j_a}U} za!$=RC*iv{;d^ny_p*fV*C%`rCVamu;d^uJdqDW|d;*+*oKWV8*!L(rxhAD)EP0~g zM^<Fi-%n&Gp8q`Gisi$J`6hEVgf3<GwhF%F!>7TIJ!<)U`K4KXx1*D<<s7Ut&Ngym z(EqmdOO5tKecflz#Ac+LKE$(SyE+lND*K%FId2cQpXvV@?KdGmWg=%cqsO=AVkg5M zzKZj)=HyqNi?fYle6gohGL9hUnK&<xpU+hef8On~A#y!m-R@;h7WtE0`KIr;<XGuH zgMQkZ!5N5u_WJrdc#-Zh%HXf)tL7Eez(4N2_rWYa``P<<^6!F|bMCP1*?ibJD#mP; zYgaCEG<RN}2syzb2RRS)u@@ZTZ1BKkf%e-d@8OPi&$Q&9zCiB6_e#Imy~lKQZ^GAM z*YcWuw_awn??R8oJz7UCcKQ}NK4kg6UdE$mVeB>H&w$xH`9|ECrSn93<aci1=H$F+ z_bVM?R;2IpK>K9sRhaHj9sPw$Cw14GR`-x2r!Po<ljyIP{(`%wg<SWSw(IWY|Eyo# zb)4G2`-k<~L$jTGqZb*+2X7n6-G{{;h>QDxWMmFEdrb;^jkD!O?J3-ySQA>mVwt!6 z8*+orY3!*c=NiL5f${cUU<~{WA2{L*=^nI0XKNp8nKKYrSIck9`OSZuFi?X|bcWyK zOIcD1j;*@N7#k%Q0R8~VZH6C=egCu8xMy68bOExZw#exiU=LPa;scdti*a__S8hlF ze&{Ruz#Z2ockJ7x{rbe;Uw%(?@nYbq{NrQhO`$G)WUX=ixE2TY4;qhfs|h}>zKmyb zTXg1@TsQ-HEvP#`;)ZB{>sqY+Z7wjV02bxIWDc+?<IeAjaP*?UabRZij^2soKAkr` zm!)3lW<I`qo5m*3NljDEuvHUx<tBe&cdmSlwD<c~G>z4B7tiy}g?X-Lhh8?oS374f z{$yWdEXpUglQEmAjd|=PYmt@KI?kSbTzLme&zpUGVgcXD-{>$F<du?V&hU~0)6TDX zSDr;KK$Cowz1MBZHOK|a*>ml|k(+W$CSE~ItK{Td{PUGxx(^%KC^(w`m+uZ5!1gk} z-!Rgcr@XehGZ2iTd?yxjN3S#Ak3FUvyP1Ym;uR~8>+7s>^a7(}Xd-rUj$HS<<i`+f zvFEQ)Zauw=lG8NGdvXdlL|6VOS9#uDX~w)`<n{Ap8V!@WnsXcZR9?qEbffPQpW?lA zqvHe4Z}1Q4ch*{^<7^wQWPEGmwsIPqbj{J($c^Y#PL7w~7BZD=lB49GdXjstPIRXs zc#86=<LB3K^uIUd9-<s_t#W1KvytilHRa%6+m{cQo8!!Ob3fjoesH`=GsyX&I=z(t z8D)Fk8``$xOU!A*rrcc)L$<1ZW9`j(J<3&;VHDv*(J+beSBx>n-awwU^2VgfIkaVh zv*+AcKN!JJM!JqK<&k$182LvTV+(Gq8}u(rO1I0o=I137qa1zL+WO^J3`EFZT)xaq zuP-oK%E9pnI@JZ_+7J&b01q%^$etq~LduqVk}3<*0>^9B-?;gC>Tfl?_)KkqS9V(q zhufNEEwtMz$XSqgaAu&xK^|(=-2vRFUzMM~Aa4Yp2gx%~js2rrF?;x4Kptg{MeA@7 zc(U&79Sic3)-KGONSh12H|M#2JoIusxtsC13!Mvomoq=#i~i7g%c}?bW^=AI?#gqL z1A7b47v_8;?>y=+EWSCfp!gekKH@&>-a9{kVcsrmFq1fki>F*#JU_3AXGz4l5-(rP zbB$#ZZQZQ;d`B^sRzN#$?1ogIakg9r|IEVf=ZlQjv35ybfp<Y(e*VpQ<eIGN|AJ!7 zcsN6OA|K=XH<L0CTQ}#OOM8=OtCw+T?rL*Vrco@O<b~!8K8c4CQU6ou{}+_sI^6dn zVoXn|om4EJag{AkTa&v8cx__L@<&uW$xB7UjN$n_?5K-19$-zIiUq&xjp}0VwD->J zB5y4FhsLO}onUM$$a$->zl44h$Bt~C5v^pME+)P#?F8{@DS*~@V6!<F9Z?ZE1LEbk zkL@oq*3u_q7cU@JOT<U)DKGXWg1dBcJApg=A?*R$?Pa}OtWz(%VK-&07gE!7&Yc5q zIf(A08rqqzcn!o%?m-_@^UBaRaz1qopi}5}B)ea8jB{tujsyHG16B@TrLmkNo>sCT z&o#W>Wxz`7-IQZgaoNzY9Ggw;k@Kk=Z%_W@efS)z&jS+!9Z}+e<1^KuxZu_1xcc%Z zf1+4ub!BOra*vH%eZx%ba>yCi!Jg6KX5U9Q+fl}TBRh~67_aft(6$4N<=8}c1Z{tt zXEiSl*?u94gN`G_`eX3UWAtS)4x3(~Ywj_#_5K;nnfIOKz7<R^R(-y!J#2|O_A`$V zYsJ~n|0M64o*&vKc=nCto(1^KWFFcN#j_h-<o01t?1NwGc`bX$!R$@Bo4(*)0<4o& z#*3YMAI~P62K#4n@=`O!uN>IN&=Xo2O&wZ4%R={&3H%v*GkFuSCrR^Z9f9R6)_gX3 z)q|ox)<N_-t9W6an>7&ph2u5Y*<#y43?5fR_}YX1IsnaRe_4Z%Vi2DKeb48+%3M#| z!Xe?!$Ic?j2Iv~h2z+sU3I253kpXu&#`V<z2k94P@w|uf0Y2wYSLK_*wLT|!mS&7i z=8jgl<w-M^^g786%bwwN8rLwNj)k;Q!k(hN)!%?jmu55!I9KQPvOYofWbIof@$a<m zed>=jdKI{9?pi0tu-!>q3H>f`PO!^uvB%|#Ja3mnCf_l%Eeb#EMQ>OJTnB060P~bD zS|ze?esX^iwp)Jg^9Fj+(czEPu?ZYp1D(CkxLoj>IL+EQ9PkD5ybTY5m&y-Q{>m0~ z<l|p7240o8__y~{_{1(~--~~TbZ)Y3kuLiv@koJZ+J5d<B*#du{x)*8*5*a>@y5p! zAInzk&R>C-ONVz5JX4N>0`$IuO9-8$@PDo5%lMBmKj{#SvyAP}(3Zw7JDUT%Q{QJ} zPes1ufy_=LZCdU0VoUcOl5?|>mBHD1`r64jJ?4b^WX{l~$X%)@JwhgH9MidYBAUxx z_-{QzAF(nM>@q3B2iB(bsiAEezvPn4^can*oZ4|xCJygG_8`$}C;HB{v^Bt7GzZ>y z90jku_?P4pKY9r;GoBpU=FM(m-;dq@6`*H`=emFL<)IBRJC*DRaoTKHNKaPtcs!4_ z3t2Y{J(4T(SL}UzkdI{VCR`t=P71$7ogU<#xc>Z2em89ecYu}p(;aaZ{G8l>e&NtK z<UZMDYd$YBHo;f@3HF^^)aL}r){(B0eNCXR%*fG+_zs@zYq)>f{sYejfQ9<feUIQ< z0_?sMW(?>%2Z1THS~vr~WZ!cz2Hk-?Og(hie&M+0lL_tL#CY0~I|O6ys2c*%#EcYp zzI4Fu{-V?0ts*b{Cgh%Ec*QIK@DzWIvZA&4dU46NKL3|P*f=sa)+ahK_KxJ59Uklp zd+-mC4W3{@JoNRQkFfT`W#esppXUb81t-C%9iHpuK3OnD4z=wLglB&PH-rPNn`32# zZ?(+PgB|rE=CK_eebY!|+CkPuZGDwBNW>-4?xXME7Ym;ekCIP80lzl`w;tABV_e8s zTK|pqY5N|0L`5Tt!KVJdGTfJFQRC2>J^K5hs%PLcDi?p=&TsoYW6(UrFEy9#!{tO* z7V@6pTE||6jGnRMotO=g&5u|M>-Rdx60@}Hq1<W1?+c)3^)sH|`ur_$5-#i<mLoh} zF*#x~@6~o6a*s{Btn>YRkI_I*WDRg^+HeYnISDYl7Mrhp?lv`^&5TX3JUN$<P1o!M z2adAFip{1NTP}FsOnBY_;3@rtc%FBd-<84J?qbefc%ACq$#>09bJK6lA>OvyTs-_f zllSrG!a2RWg?HyEUdwa;+#l$A<mH?rSHRbj(1knEO*<mia{+w6Z5?r>41QM1JvZ{w zKreR?I-~oPCj!3|i#4k5G7d`?OR29bcH>7`QaUM=f*gTQ3Fml^{}4Q3HF^&>@4L}^ zgeY?(_A1pK<UR#nb!P{PNAZr>JJYcL>CR*=^iFza@_$6?h$)9wqnLfJkiOW{#z(q< zoj$$1V+{{?YTqD-E%ejSiwvOi${Vcpjp$+)nd7@ZcBfQ)LcSt5ZTp>p62%;l+^qIR z|N5=?81d&(;=h%f<EK`4nGN&Vo9agfN)&TLF*Ta#yK#GL&+^koJ$WhT@~J&aXDgNK zbB>Rk!*8AEMpx7qcf;Qr=OfQhS7r2G{I1@a5?aLkUnn)^jj3N$e28+JD5o-Q^m{ct zzJd3TElVB0$GpJ3b=mmJG4&0_JDBI15u0+~rTmU>7!9ib9P-E*&Va-!BHyjiu+@AZ z_qW7Zo`N5{;L=9Uo+xWr%igEhh|nkckIOmJU&fjKQqJ_VIn&SLOkaxMXDl`ivQ%L+ zcRl#Y7K-)*$er5Py2cw-zZq{#yNWqPoh6)!$GfqGZ<xfI!EgPZOQ6GLW~IUXbQ^2) zDz;LsoGVj^(JLO>N_<tdXAmPo?H!=qSCfrt=t(kEe>dgx^JOn-L<YGx6dviha>X_} z$MyN>r)~&8V(_Gi_Gd1K_JFN{PN<Icjh9cZs4Fh#Oj(ZpEMOXbA2w#H?*&$dX|VTC z=td{i;yONSUoX0-oQAV2mteQ~6YeeHHx)U^&G#d_#P+rv|NYYMaW87Jmw4|PksjUc z4O!y?9r9zUo!Di|*R7nrBumJTse<pSi~MTy+`C8qG*`K4Url3AU_NV^XAgP#;2&dR z`>;8v_&Uwp;{}mrzPaaAzV_?!b9@jSm2Gh`vTl4#-_RaG{tMNYAJ+}eoQetP`gE@T zHa<I=i-8`XxN$`K%ye=vjr?(LQ961i&ZewUy4GkXw6|DmMBg>!AC%lVbEVOcn_+ZF zCi1`=tv_ta?PM(+NAKNtz=ItSK5Kt^iF>fKjKkso-n4NhHbN_yulNHzBST{mP3SW| z)?aZSl<UlOH*>dNO}xEP;JDu1N8ghZ=6Y8KG4GhG;E+Qc=TWS;m)L{y)mjYC$OZnQ z-xB^gS4^vBeBJny#romQ4ZQW9z0qI77>fD+3FWTgyT<n?_WoXHc7^2Woyp|-Ab-7U z9J&_zD~5&y`(pJE?%Q}zpTg5teS*Ks`JBmkO7-bR&SQK9j7j|`(RUB}&vg3o9&N-X z!-|vxn*+jA_`Udt_@>9q?1LXxS<3?Jp2nw?_t~#9F7d7?xY*3OOnEbiDGRP-M=l0N zZ@zE}A9Bd4I)?Z1*DPagoBtAQ_Z7UJ8-qcPe>~@+a^Ss@IbBbiKc(%9ot~)!V`Ss0 zbs6W^_x~8Cam|x)+w-i^C-W3d{nW6gH#?yRVs%=kQItd6LE)%uLEhJ1McY}#asMjg zJVp!$_5U~a$9(jW!buPCF@aCBXbOHclXWkJro4<fPCr^JU%_iO{difE9P*RO|F)m! zuWFB@UD<^!VqC@GuyhQ_)$>%}fd0M*Y%J>GuU6qPtomfi#pPGd59{Ojh5?0iduhd# zeUbLB=TkA?MsYs0pcy|jjbBfs+8kA%3~zF|Mup~bmoO>SSdzTUAA&xru0$7JjZQRj zUU#m8d5ofsHPFOX(_Jz8TSm)^wTs%{FL)vMAZOIwU7q&c{N4kPdC^P>4N<S0dT+vi zcA+0nV;xl2ZH)?zGEzd1HyACqa&IwYj_&JQ{=mk1b5wWzH;ldY@TbM#%#O2-!(Qg5 zF-m5Ue@Y*6SRH&%Jbw~A-p})pX@tH-TOP;gzR@cl*to}ZckiK{JxA|d_Y&hsbr_*B z%r|w3%8apf^n0kYhdTB2->Cj+dm;0E?A>MSUep+kF(JJxH^+qDcaDyHz}lucQbPGF zx@Hutcy>nG3hZ(5hcZ?ypJA?8G2;VgTEqmuchFWVcr<qyZi^V}BRsD)t!^Lla=~Nf zfSa-{WuNNM=g@Noc4dm|K|HjTMoSg?_Ga!zqa%$0o#lm_%n{o!3mYkP+>Fbm^)q#^ zVN8S1+cq7yuQ1XU(wAa3sGKe9W4CdT^YZtw^-ed^-D<aY<SCm_-4n&{WEh97*WO2r zf;RNzKH}#44qq`~Oq+eYV&Tebk|(abCiKW7OP+so<u(2WW9g){v)U)mJL|4VX-Vz* z<Ba2z=Ox{x`9#NzAM8Ja9kO)Yx;xOFyo>yF7J7zS?gln9H~7R$qOG`Iz72YlZvJ%J ztLVtf2CpXWwLx9tdsewly#KP<S(A4Tx<%>3B)<Uv50ncquKQPPp_9*s?-sBh$#yvo zy`1dmd0BNG=%8lXZ7j?yXPmt|@E4i4dSj%K&#y1Yn==pn1Luh1j7_<_kyAYA*Q|j% z_GLe}I#>5*(l@{h{2F)K0RCe<Gsxo`<(Uf^aAt{d*v;5Fzr>zWB42cmfA)*mmQ3e< zp7z9}W{$&Gk62yOPbHhT{96TkIdw|P&c1Y?k8{FY{O-KO>?pf3wX))pluB<Gc_VMS zXWf;{YSvBK>nX0dDWkG{aZaUmc2cE(v!{6V;_LUdF(>Z<PqDYa*gNS-PqDwngWQ<a zr}iaRRNP;)PkmJU{N{agkKeq{P5Tx9xoDr8_CizcS-0krx%=vAPclXd{kdtYe9GK) zlJBLLn8V!<crw*R8-FGSWgBzZg})NKZ|^R6WDYt|PZ6+U|D24TXczZ!lkg?$#y`n@ z>BX<qn^&jR-;`C^2tO_dCz==&@<Ri56jhDdd)Vi|^O9=htyRbZlQ>T_B5y5+7YVLB z+qN5+RUED^_VF9OP@?w%=s+^(YV<boItOT5b$YQw`n)>w0X&GFK3=C(I7Xd0@b}NF zvy1*E2iJ3snMD7qp-IK95MC#<ZsqtWm47_42;Q8bc%#xeNtU+D!VALe6_VA$;EwJq zrPE<g9`KMCRBQDd{KN$eR@K!OyM~{wV1Cj!oQXbcp0d)px4$|WS=Wus%RS!Rqp(?Z zwzN8bHsfh*CgOC6ZHzy9qH$jkIuy;DKkvH4w%r-objeVrEyqs#eA>K{w&&7E1@d$` z_NC62+J5w*BRelKxho_{jrPT9`rYdM>5R4`Hx^stjInJ;#uuxvR_9-4m{)dQ(&KEp z<SqJQAD(uSPKWR9l!xi$vtiop;h8<w&0W;(yo5w2w!9evPQ53-wXbdH2V(_myUZ^s zL;p8WriVExmQ47~8}5qEY#!??F~*`tYtS9upYcZ$?Rvh$*?}0e+0Qjgw(#fm$7ne} zk(Tc?4zqSuo!^F*?~Kv%)qBtj(a%oym{rubc`v_bUVDnBd&W0ia`1x7!!|8L+oD^} z2qo`cFd|$AjrC-AUJ|G8xl^o@G+wss>-#1{cXOd{o6ezeFLW%Lp8TYzc+%z=p7b)N zo-y?C1$>#s6X;{|vP++GgEI#&a)f)_otFro<hSgH)_l;e=s31ds2+JecqTebr0Hhn z^BJ1HJx0?h2{gUs3^e`!)ARo+J%5&_M9-(&*-gg}>eYY9hBD^wC7Rv&Y@nnJ{Y4sg zl@WMK*|HJoL1ac3@<BB^bYEKYM&%^-yx?hHgKnb&9s2|Dw%x8k$sXjbxjK(?wp<Bs zk?x@Yd^(8iGj~d$cn$i2O~fQw4vjrODo~<wUqNQmo$m+yALc(9pYSorhlh}*3S7yd zXVIIy=}hwrzmOeUW*Uf_><?b}_&c=wd(QejjDNu$d6nP5Uix=+-za_yezBv;Xb9@; z?LbokpPy-@^f|!U#tBBl^VoS$Gy)xOnU0Eg(Am7r|F7o;+K=Wxo4eiN^zWeTo9F&6 z_Z9frJJ^#y!lzTb5}vUY{@j>vG&FDqtk+qF@$?esq#PLNd^&Tfzx_h^vUD<fra4V! z9v?VI`8Oe}jm`)Z@Bg0B@Eq;!2R@@MV{aERlTCd53XJhnXZDy4rTU$OPc?HG-Soi5 z|DxY~#+$Tg!bRuOzI1Z;vnOdz_*2+(Is}}jP-Zl++rzv*U{3GzfB4Ekd;U4kvW<=M z51A=_Q_kO$YrvyEaE^(*%Xl;|&0!96sbKx;k8fGM$V~3ugk1V8&-##`E@CcMa^~6b z3(h6<!(CU){p>C3e<HDA<jXXIdA-d()HcBw>-i;nEA2wJ15Lo9kox+Z%4Z*Uf9yjY z2c7N;H)rg+^KAQuWcL0J_TFjdQf89N9cnbw`z`=(yTFzDWSs-teX_~bc*aoX{=>`G z-HQEfxj8CSpFAq`ekuNE&QXz`?=@~sJ;ylw{&M8d@6Fu0htK(pWs^C|mZ>K%t!rmp zo|H`e3!XRf*~l3aIYawn2jiHgyJ*)7)$L*Epb`I^&AgLb$$r|=%4b~GJ!wn&oNP(Y zV*eV+K4!6>or(`Azijr9mt5F}81rTyj~@O3J~SgDVf^N}^FOXQWfn3Fu_ZV!UY%wk z$H=DHx6D(y3cS;EWL@sTOuwIJ<s-_H%S@|0IyBN}8N^<_#2EXfrKy#QTV`Ps9sOpq zZ|A7yvDa}wqkav8eU$xJ`{RTR_DAM4#65%VP_^%AkNrCPS|{_?Z|!f<M(nWIW222m ziQ3Sf6yIaHd|ls(Y!d_ylHHp5l)Xj|a{n6>&6YQ@`{rK2{}y+YZ*#Bp4(s)I>|)Ew zg%X6HcnSg+&SxDZTMM4(j<E@i`O!(Y8IMbN?7A}Gc?J5l%h9dHd`>hLb9le@G7jnF zyJ<^n<8!7?)i`>uGdlhUd@SQx7&z$JbxxQ2&pdk!f1S_Ttr`A&_@EKJV&FfH;a_z= zHYMPAGdf;tep*Y$Yq$4*h)#4BysnOzu&$;4e)%$}&&+~-xow=Ot!4K<X#L!1F_tY` zS9ClUmpg#%NY-$<Ikcs+g~u-+P<dp#wW?FTd2Ea7g&fJ<$*X4Xs~8muH7-v&EI*Xy z(fFYpKWpH@ubRI2`2Iv}0es~LWY-<X!MF`vqCXDhw+R>U!HFo>iJSSv{k-J+JM#Dm zeCJO2-xM<c^#`x8+x;#u2qrgmtYVBl=C8gid|A<5_oLGsxTx0KAFun=M<=iyXH8w` z7w99HWE3?gSw+?|N2LYsY7G<vS$hxq%_Xj7sg=p_ZrwR6CqwPx36*u`gp1tEtV-py zokK3|AkY28YjiJ5szkrCq<r{U1+gn#w97emiH|Zta-l#UMLu+<jJ1%nLYj*{<Nb*j zt6%9<H5RQ`{GA1ljDJ6)ojd1c!A^Rw>#$+;@n46|s{*|V{h)`+s;|Gv_0#Vl=Tx8r zn*@LO4rd7Y)$Dh~?FrK(1+0G%y`90>7GNLN$+`ryn(P>CMgYD9|BBxqZtli@jIvrU z;k0c0em;0&Ln>`>4v$DTHjQ=KZm$!*!k<~E%+IV-1^9x_Cv6q}$VXVmBI)af?FSR> z_~Lf>u6t4|tr@A6Gfm{O)aG2RmF@;omgQ7VVy*nFmFuRQO7^h^D{U3eCsfv^t<H7x zPS3P%R$3_6$Fr1Wd6f4|b>EpcTt1YW&vWdri-N`nMFwl2^^q=BpNaS@-?2E(ze*kk z(PGAhvj*04rV$;wE_4itf0gMuG-cx|w6x0MLO(M5L0=|v0(8_299KOoUG-T5k{=B0 z(XRrgl5=WUi#GAcYiA#CI?Fg7@9zX~6^-3NU&5K@Jq}-tb}}MyzxmJeSF<uWgYl2e zNPW%|%QqH1(X{bn)_WXx#=Ym1`lg}B#mBTh|CLp_e?z_!jv2@-cPv0pg>A=g;4dN3 z5$k24pCO(nxaR7!h^=dx>D|_~#$I&E>qL_}`(Fyq%tVhHf^YUF=Z2Eq<3iR#Z1bsG z&VL{6rl3Q59-i;PS8n}Sqeb$9o~_|rEt^&B9V0)iTipJw+QsdA&{K{6Qp!&ssGHKh z6?yMA_EzcN_JHTozv=mV@KKF*Qhc2AfG_)Im_B5q%AFUuVeDN5O~h?gPd{J&Z@zEb zqxqZ93ak@+SmQ$4e#uubzQyaZLS|QWyJ$oAsA<rY%Jifd+kSknBfXt_;|<`1_}S4{ zFCP$3du4>N{YU4-e4}QAOL~4Z$=DuW?=KJIS0?!N5NjSj4ZkuI@GE=<{0g6jUr&ER zAB-uVc(8H&YE8heTfna!;MX*3`2E*rA8(yy4jz5=tO4B@>O6iEWq!guH13jxY53e^ zMso0@cY(utpQd>*&S~fqnkX}m@+!k!wH=>R_wv^P8)(bE2Tw;=5$7$!FTwBojB~8r zo@@ZJBNdeIq5m4%)cDz32YOj+;o4DhB4|&D?*+d4V1KRlf_iWNRQf)m@gcBruV1Kf zZU1Cw`>WIy-%?+vuYcU1Pw$7Ax8g^9o*$$Oda~&xztmTZuDQs}aZ45&f{wOGj=*+Z z-^E8F;G6|riN0G$8{77;Fd80OV6?cQ58cO%<38q`kM7+U^Govh@7Rq$5qA*iW?FO? z1HZTJ*1gLb*R6)%UkeSLeiwlrL3a^p9y6zYJM^@LJ{Ljbb@UU3*LII(A9JDO6`jH3 zbhcSzx+}f{jqEAt&V9ig<=={bR9q*%^}OeEC(=f5vXQn!e3kufD{|2Vtc~Q}MT}uF zd_eMRX!-r<x6@DRx6u`~kW>4xi}Q`%ui^a~-jffpWh;DQv13$d2X`he)_l!3jJ@d1 z*R3%#LyH+Ju_Ib`a2}Y*I2WO>UX8x`R!4GZ>nv;t_}opuw?2%1+L;pRM5dOWTC#L2 zGWAwIB~z25&6cSvhV|52nAam(1I~cjXC4HXPU7A0o)#rW^JwOlxR>2Dj9;t3`IyY{ z*}d$?;Jonf^KdEI=DkhW4HMrK{u`h_d;e!1e&*&flkr<LL*?+RNyzqdSr6ry>!O_Q zm=s^5jIp1T8#tR7)0%Hk8>gR}V|>1;Ba?n4vs%)l|C2tX!%OS~-t&ayP30O1q90G6 zGHc)n{Ge96XNqIsI^bi`e<uBlpBA!@G$R{iv8PCGxe6O9(FA;DTid;!O2r6#9vp1u zznis1&x$XCC$b1U)LK7)Zt*r^gzQ0nfya06=6n~a^|edqe=9b%`&q|w`~!3!z6QJx zQGN|@Tg<<3WC1v$eL`)mb&TdN#1%@-4Rkzdjt;$Ow=vqj2T?ntk!5$&))(u1?eB5M zTuGa?l=qoqLOL(l-)UDgME`sJzpri=zTn?c7zAfx{NmHTdS1Z@K3uE21onB_i0$+A z9pC3`S0wE7(A~A*nPAjJybbv96UI%mkJrTbunqep@Tn0#{NUj5`}w?w59^Gt`x4!a zq#*xHV$b2M8Yu&3MkBwDx+Jx7KXf`O?f%^Nkd2-|HhK~H=XqqK$B>OYtk>4G2Y5cA z@({MVkMmB?9zZtQn)buoHTJu_$_J2*9!EBcm5)7t0omw&WFzqn?g{Lilxu*K?#s5C zDHWTL4e?3nK;Kujn6c?uHT$#cyccp;froX=tJ`I_qi4HR=Kkt-jXgAldsu#thW`YS zfh_7hdfv|5Sh*Cxi+APhtGe?Mzf$?ltQ&V_<m!_y+vDFt-7@NoGTrtawVs8p#omzT z<@A3gu_L|brA+Nb&*55D^He@}2BMwln6n3uatD=oH)ZdS!!#g&RZC}3@;iwZ&Lrn) z{Uztc*MC)f{r`!a_sT#_&TB&FYOnt(InQ4I`=C9^dB{ZDbdNFT1!Vc-ckG*Nrc7=0 z8B5Bzmsy33n~eNX&G|;~JIMKFw`E~Z@2IRmHY^7=>>Ep(;J>>@8bxz>=i^;D@4ljS z;LKA2&Ipg*1y|}hvpfbqJPNKX=F9`{Ua|(8lPzW__kQr>R?a-!VJz8duF37@*<;|y zBHH5)gK{CB-JiRLGtU~%H0aq1byslEnc{b&H&H&#)wxE8Ut^+QKmEGt_YnQ+40S*K zhUj-L{Z`X&J#DRKJd5b}O8Tv)U-c2;+5PleLBG}Xt1(9yPl$f&>9-HN7W60Bvb+7# zaS0D}Ho?cBQuh*)vvnr(^Ii8KZgBSyx}M1e&(mMBJx1a@=B^^Y*i7iizQ-Q1inI{K zZ=h@i_Qk81ho0A=Q(py7*oj<G_Co5sFg9Op?2TUb)AJ}_wkvhs>fOGOo|R*x?5!PN z>0-`vnR^9vsxwJB=MkMZuY^t)0fSm#PywB4uhqLNp;N8%T;A!~B;X+%d!PL-uW}Nw zm<yf8%Ez8Zpwl|&)Wvwk)6iR`ad%e{WE`<QD#pK#az`2+IeSoaaHex+S-z3>p)<+< z5x&jnV6i1L{hm3pm9}j#{gTyue`vX;cEYchSj&7rweLHR&wo(&gf`reeLU<zl6@X! z@7nbVG90p=&MiBL+iv~U;oCwRubxj#yYr2CJISLedq_u@F>e-jp-t@1#NTNsrS1+# zdVM#0p>%8O&&M{Ga(Y%r{Xafi7`ok&S<#GtUcCNP)kj{&SCHHj#Cf5OGo2|F-ze%f zjvpmv;+bL`iT)38zbu(ech&Hn4F{Z(*NL$PU-&t{dnX2btFdRO=U;H1!}?p9_!M!s zXH7||>|@+6XxPs=1>0n|_>UL<BU>65_po}7&a5KNm)a8Uucgm4#?|D-$9|ODa^y)p z9*=EQ@Twi#Wq>`Z#fOd-9#oi&ZC(Mo=!y~Myt&|OW3AD!`oh)Xr}aJX%~LW2`7c-t zt$*Cl_bcozK756J=+#|2eIdy!`<aKIy=)6Q?FwtOZ8LBcb`g!{n7&2Je~3)y=(goI z?*7sH+BOYy*`Ivu%U_}V9`@7yl-)yFbids%BLBUQtap_$hVz@%SIge82l-F740}u~ zB-@Hj=IFkMsgFDnX-0li{1XcwPswi{Be{<lx_ol>Y|`1&=(f(b?U>f--1)0ND5iZZ zua)Yn{3Fa;vD9Vz@I11RVWx!UVxMtUE3&5c`%>v=N89?@_m^WwfsWQdmbCaTSyDC+ zUF4HFKpT?zWh*A!ew1+t2YuL+AU9Pd@AQVskz=GguEj>J+Vs_FpQ;fKu-^JExLSe_ z{Yvf%^Ulxy&H11(z8>*4O4K{JSR2)o+~9J#LdnR8@W2Z<>->P8nDO*2WUN8(uvT_X zjL!`WCF4rR!#%(TLuW1IFyZs*Ivy`@6JBRRr_Hn>oeXD>G~KU=U)I9wlF`vf|2&81 z?90S0h{b1$>mGDw5}eP(Po(qn-2?4{TbhH;(b!DYA7uXO_anw9-?bfl+OiQktLA%0 zq;KNPl10v^hsJW32|Y_6@omOZ0Y2_TClN#@lrF*oo`OMKACV1C#dQ+0ZxiqHFvsW# zY*bF3sWKw+1@>dt6--XH^&0rxMCwo4cO~0rh*i6%hh$f%_MZdB+u`{xWD<PBDtz1# zxUdU~$%d!=Pvd&4DE(shJWckf${F%ry>a~Ai$*W*n>1!|@5I!_gSVxjM;?nlIUW7{ zr}K%#<E9?PrsQq(?!>o<v^lf-cg)UgA8K@k%Zyk~Hrbx%Li5od#$rHW6V+0NooKX@ z*iuKTi)+urP79mw*^Iq0j14$2Z(L67dFY~oJC93QQ`x}k{3x4P6T3gf!_xk7E^Gg> z+HnR-w7)iX#cUSJu%X3<Y^vh;m*OwivywO+vlWL#v4=)rcbw#Z<wIhHa;MeGcx!~G zIYxtGQ0U%G?GMuKhxn;j^rPQ9>8p*hdM+IoeqIC3oaOJ+-OcQ4+p!0ms`qbH*m3o? z9r$5@JuNfx5`4IaXSaYmttuzIz4NR6TCXnb_TxEFj*$bUzq%55UxQujym0Ttt67Ju z7QcJZR~Ell4__V5kD{^QhZfOX@x9=V(Bpq|EH<3%O^TU;Eh+LRaqoC8d&s%$M>7ks zRc2o9DJkPQxXqhc${pW@&AAm$S7<T#_AYa($4-L%xMeZ#cOWlI-=%s%)}j7_l<{}- zTrnGDi=uYb{t;r}G-4Mk{=zv2`)iM1a_|7Lq5cK9RnorRYmd8)z3zSWa~64V*b5TN z21X#0fEU6EH+zKU)=Hi34~Gg7aQ2GE|3KgN`qG#5Z}ptf7jegVNNsY5x|Ol(eSmt4 zc=tno3r94bMZY)ZZ2~s3UwjxmI)toEK6T(YCUP!)%m4QX-?1OJ@oLE*Pi(ji{23yL zA@;XbTadrVUFh$7f2i<V%;SUMdAv`1iFVqXY5M}&`WAVz?K0#5Y7o57$F2d}R*mlf zWm1RRa3fd5=gPCa4UBW<IYz@y`~VHg^w5v$yMft!wR0}(!Tb%{mpq~O7k&|gXzma% zB-h0P%2az}w1jN4H`Pe(?s;z2H6Hei{FejYP3>&S9Wu#-d)C?;Fx0i}Ml}a=6GX;A zKZ0`v96kt-9)vd_uP#B>sY1qWus>bU7r!gbqzcKdc3sBsHF!uHFe>}|(6o=i`CG>l zONh2a4@Y=^6g*k~&d_#Z&e}N-WN-B#vX|BezU-gLm};E(iQ;G0dpok=RmSm2=rrmJ zt{6zhMt0_G&hk&K&OKyumP0<1+*12OVW?pFje`yoA5QsD;v1)WQRwc?^f5lg6L}s! zJaeY(*Nv)2k^46xFUU@M)7S9F#CC%7&01@3ev9?mqV}GJj+Ue3E;<T-h<+2lLCy!8 zvClrXQgSop7|VgH0>^uc0t3H#%CXmY?c0OVZw4rr4*W7B20r1Ifj|0d@DY3m)n1I> zq!TB~1&_=^PQ`zXG5!`C(!iJ)-*~H0d~!m>b2jqi{|r^7C&_L&#eWyD!9#RF@lE9~ zq4w0TcwU^3^)uGD>HiR8{3UJHxMOYhGe)(kyF|6S{B>kZXKcL&J{X$z5bFR8E>u6= z*q#PonA~mRj~wqq?Hl+f>JFt1SzY<^Hknqv%Fo3%APVkTT_xSCkPYWA4Gu1Nt#&Y8 z*Io<k`Ql^L_{Fo{o=uD}#;W-r<JnQoQDY|Fy7*@t4oA_`YRm`zNbVHI96T?Ud!_l^ zp(^B&wZ6@s7XQ0Lsgsrl?<(iMJ79WVUC#4@X0t_nUa{vs0WO1_4QkN$yai3S_6-$^ zUqhd33vM1~mqRYK_n_-2gFHS>?|EkPpdc{Txls74`oiB{=Xi8O_>l1&yM~On@i`Kl z53S_xm}kj)Hrw`Dfe+&6&OCHBi@#^?>^oWhI9~gl85J(hBQwFtN$>_Q=SZ!Y_PLs` z1qX#|!YRQ^{2Je<s)L)5?>KYLG?N-CmSS%>&m4OX@fOO_9~|6dbgYCnebNQulb(-1 zy4pZ~oW~s^`{w*S->^*Y(;1z+UDcmK{W;V>KtH{-u?T+e(U_nUwber#Y7biBe$uS; z6&Nk(ehb_1hc5tUwBCvzqOs30`=?{$QjyGDV)Ll8=aHXNW6#6GJQgrN!O#b6G#?B9 zd**ZDndUP$KA*4s-_K_YeDmadXfJU-eE-aR1b5B7=PBs;IR|mIVmK>2{)x_u6~vYO zhOw>pk!yVi;W<8Yf_s9QT(Jp#dGF)+In&<(@Z~<_jOgRoZBWKVnW!__-@7H#*T?UJ z$iI^~lPxkWH@r4YzUeP8XTeAK6Gd*Q#XkX`xQ;Nmc%ti+P5#03t9-q}H`)Mi)8J7Z zU;mGx!qsK?n^Mn(&3?RIg#-H$Z1WFNp7Tn5AN-9w>8cs@t?_nWf{a8RjrYin2J&@^ z{~&g>nq$vR$U0BO>h{6!4vF90sPiOqc?v%k+SYgv+#cu<Z&G|e;yn`IalHS~6~w9F zyxVzWEIuA_cst(vi1^0**8JP<-JaY_d(k1cFUTCCMc5(GKXVx?TAy+lyN_(6Be)!y z1YMxDY(ypYp^=Mbqq8JVLUQUodG6GjycA*>^!@sw8w3Zzt?weEBP!h)wjD?QZieN1 z8Yq>0MLNFG6a4YJ;{oL9wBNBl%-wJWI{v`eYQD;R`K|bbMu2`Yn>tMDdO4dp!2J=F zKeF#ypUy*@u}|!&qWnVql$p<g3oiE|k9PdXVWHDN2jhSih~42c)7_P(={^YjJnVJR zN1D-TWmVuiUVpF}+<9_SZmsEZ-{Ww(b>7b4`zB{bee+Z6b3KHh`0lgKxwg+RIAyXf zfqBXmK0eZOu`#fPF?3$d82GI*>s;B*JhZp!eJ^ACgni`3#@{A4JI7a;=mAdpWy8O| zMSFvl$f}g-LEgPx>&frs*c12g{(#f0*mnfo0^?M@Ezqz0#`n|jfcl$jY+LRaf2vL1 z-AJ1s%72jeHMIG!w5i|o6U$v37??-7Pv}GAR5{`kXV|})i|ya*F$UhJOy?ZzGC13+ zJ@KYRjKMoT(9t~GC_F$rUrLv(JuZUJQOEO?kAk0N@*~5}B?UZ>;v?M$?jNDuIZmrw zdLHri+Nts#%)&;<<==>W;I(ybF~2&!zezB>ki4Yy(OM`!s&SEx(7ushkE}ww->m!h z=*7{0jJ6i#kx!vPF=Q;{6_?sUcP4v_PGUQ-H&<y)yUn}u%FG3MdrE?N9%QNfe9j%r zr-{!Xdr)V=qPz&-C11~?@A#NwWAyl6Kg*aVI7;6v+2ha^_`I_(d+Af-Jpg{_JRn~{ z7tj96oEo9;FP<I#ikE-2BU$Ks_{OOZmEC#vV_%W~mgFYSa%3yo5dQa~gZq$Y)_h{3 za-QFfZQ%M9=wHZbqw}V84^hgX3vSrWdfZQ&DzCjz&n8{SIfQn+bMTv?jZ48v%|~Ny z=DyqZo%t7XzYGi&YEC*2Xgq>l;&_j}c&$%k`xu^t-}1o0g+|9XXo9!^9rRgskL<7L z2Ym7Wr|1!$6)1)t9v6=qR}qC5tG;+u>w}KHn;$`TBX)#WI)@@-+vex4Z5Li0KsU+R z&t7BEl%BDNZGW73i3YUxY0$xu3&bDC`#+)2*EydCvzt1C?pUmcdzs_#SR$q1q0K)p zGPeB;f5RX+DE$n2mHKYR4X>J)A^Rs>XyjLP8+C)u4BN*~@c0;84&7PR@*Lc%$2Y#J zmo<+9Kj}{nu#V!(@+<F!&#FDmQ9fur&KUiS!tX?5=)53CKg5x;>pp*m{;&F+{^tz$ zUk46P06ywdyiUAU<4~W2&Y$J#{zP+^FVr9Y5!>^ISSyvY*MM>ByY4XK<>n2qSEoO= zUfM@=9vH4SFQMMND037~QyHzrx?{kXd9LO2!F2Ta;Ffp?zW4*;$DGXzqrYZvz5tm? z`>UDdt_Jp9%)QH3muIY6oR^Oui{V<F_dW1ebowrz+{I_;`Tcy)FR97X^P9=peh)H& z#-+21#{UoI*u(E5=sh)#TQwd}U|^9x;TLuG_{K$od=B7io=+@<3;3@6s|Og>b(6>8 z6LJpHzG(4<=up+`@PJE^rS$uT=+HF18^v?U1{OMeJx6A&5D)nh<z|fR$6t;!hxQcK z;RSFahJO<(;`{i`j5EGx_kll>0VXjATNXZ)ZOg*wa5%G%VE=IXAA~N5CGI!Y4HY&s zPVp7V7`?z;>*sYsPm-1KiSTUZz6*Ft?vBa(xyZP=k!{F42cZ$wQCpIqwQkb2{E=7@ zdZ*ud{u008g%`d+JK{Ui&*$T#+RJ>5QgbkRcMQk-pkoVqYi0}{WEk#7{B_U3gQz1} z$38b4clauhgCrYCUfTWi&@{adbG|_L+~Hk3VQe+9I`}8$lJ;CQbHs!tnqN}`ckkq} z(|75HezD^OFi7{8>FmS3;kIuLZT~)Nr9S_g@h0NhJz2)Ijlfg<U;Bp-zUxhE&b=`H zjNd+<%WhBfdk{KMKh@Aq6S^ts4d?P%Cb?vkF)ijxh#xz2XK=@+y+^z#3tG{B_Aci& z@%wz*--w_3J=pzgjt8Mj7kst?9p<E6lZf$+-7jZ`$<k4yi*15m7gI-PtzPy^?gQ-e z%{{5cv^t&(Z?va)GFV^18kl~9yri*yo!pZ1DWkD&)>(x$I(;siwa3wh<|^2`fcH%H zHsx8A-?H{Ot)-Q@DK~nz(J^JX&jauh)^uL<2bsQ~fO~?i_@DX~?=rB7(Vbv9HuT)v zBg4mZ*mVW5_-^!ZVGnpOnI*mt4#KD8@2EXM?Q3q9X;!F@rr|!+c0RF7iYTXb(>#Bs zb0Fnqix^@)s@Dtby~O-$=KQ0v4YCL4QRns1*pbtg&0nz>(%pA2YY>n7qjOL?HvR)g zwQlM4P4E-(PSKEPRejz~JCX?{3k&Ck`@;D;Gub_f_ho|1uQPpH7@PWz^Ipa0E`gT? zM0<>JCVTuOzH1L06yITPJ?vfLGjDyYxHR}J(iiV5Y7WPEvpp9*AKoJefVbc!UKWIg zzQnl2&x-X44o=r6e5_EP^goqPmG6Z&XdlpC97S)`+i1^mtk&Du$XOEIk-<C3I|qQl zE?~TS5FP-HKC^LX+E&K5>&N7nVjgGHrq*zX@oPP7zQepZ4|GHw;P*0YAHZ+5{S^0? zA=+)jmZ;HT^#xZnZ|rqWlP{pH50|}POt+pHDeGda)K{O#E=xLpgl)yM)$Phx?B#Ag z_^hY>>pvQrwwkdB$GaG(aL|Skcy)w*r38N4jI3eXIB>`0<{m|Boea$p2d!@sdiH0j z=Y`&yxi68825UV}W7nJ#+tWT}<J*VCsG!|j(cA4m5B>pu*LN8WkAWlHM^?@J0y-=+ z#odEGX$N!uBL2kko%k2$DEDFKXnzwv(xY9Ldk^=SGcQMP_$23q^YP!KJo?avFJf2K z$(U5OlU&WWy2rc!LRs03soZGZ`+=ciKS)n{_XVlrrN?-e@-9A=J9i3vL+{;FQpfMV z4E{s8ApHzb<~{T!@1fu8Lw77cUE+0T$91Vs087P+9=y$gPTPrYJBd4l5!m>jg+Cm* z1d03bb@|$6i@$M~S*g3svm*H24;TjLZq9fgJ4X<sUHTj+Ixt7%Kj4W0Vg?jB!Ly08 ze7cJ<&Fl&<XDjSJ8loc|zIU0=(5vV>fYV*XFY=B7Z}1cF;UnO~M?ko`8U5-3WFh-Z z1$;i?tSI@VpI8Z9oO@#OUV6j_Ep?{G@))X&;<GAlhvF(!Lvwo8N`KY(o!4;A@Nzz? z;kS!(YHhhM<b&@`g9h8q*-0!htFDxHg0G&59`AyO1Q!HKnhmQ+&&2yB<ES6q*`y#t zYMhb<#ryT_laZTpZ~I_q!=1$cy7RUPl{ly%@1;j3P!|24&4&b+Uhbqa;URH;)W%tK zi2JHfoXxp^)ZK?MzFuSfF7z10CMlOQcWm!Tk6cIl(eyxvU0?hv-5?GZ{B>lY17Cle z)*GGK^?Gif#dsHGFX|r&pSAa5^y1uuA%oerI9_BQy_4*FC4GtqO0S{%t0=ScKV|z# zYypjz{Z@6=zvB8JSNIi+eJACXu?IFo$CjCC$1GKjn2F4PDYl3oGml2rVGHXic!(E1 zLwUij%$ei27@y(*=+034k!=S+9DrEa@%|iQ7>r}yWwfbr|CsW6_usUkvBST&39gyM z#&``pj}Dj97$g%a4z9`{;*21i?nN#H*ZK}HU+(w(XNoaCc-yRSJkGfKkPIRCZFg{P zq>rnKQ!(`1r~T6vW1M@`n18wf+()7xK{nlnd?<aRl@#dM2EV~pX{z4!q#G@7GbYv1 znNDL8e?y;N5%-0l58L;=kp52h^N88(#dv(&o-T9Pp6+g)hk%KJ&93YW;`dH#xCgCc z4I|v=Zq;}0FDu}atFfhfjQ`tY*TxyrD)75g3%=;yiQS1+TJBDs#V31T9q-F2UyTeC zon|bxN>eL8xX@VYL^rvHezx+z#vD~qziTr29o&5n17mb8As=-l?^p$s-J@u;%rus^ zmCkjuk1j=iu-mhE{wD1NS>MF_XcPJkvX|*T`ZDGfkPYVN+(+NHLwg`O+_ct=<@4^N zMa#N(R@peL<9;Wn!&z{=<&BsPuEBlj?H|PKq2b@#Vs^K(-Oh-7gT8l=t0Uf?-o@Me z-}1I<Eu|w;O#U~<7~v>*Qwwd1E?0mP$O*{ro`~+zZ+BXuLFf2Lw(KeS|HK{7-;ou` zcm|s@=n&lE?2{GY3=(n`l=>u#edJh`%l>?!aCimf?m#9ul5GqW3f|mPDlVAr;Wkpo zqMsT*6|?`MP8;Th#7;}w<QP{kzFPK|rR@*$egV46Y9puK%U+~@xsyCOZU<|LO+rNZ z**+R?bVxT6JipYZF?_@r>@o3tJL7_<3>+m6Wf1#MbV38x2>2fR<Xgy(>?dK-u!SD5 z7~R}Y>DSh?*t!a%;fAN0bJzcBLvA&Cxq4*7Yn>VRByg|ytA}#4iER`7%XjRtYJAQ0 z!S<cNcGWO!7XVwuHx)kogmxuA#qtyoQ&6}>PEBw9UGmE={FwgKM>4)l+JoChbKXIA zdX6?U{*SJk?fb+Xuw&Z&j5+r+kB|AC!9KB#Gp}pBc(KR7fxFk<*Mp476{=-S$X$iy zjHep@t?>wE8c%@t8cQazM_&IAa#Eo)YC|sYn1qb7VVf06WgRovN2^%FC_LFpi^aQB zY$vUcWw#Ss$1di>eQqIn=UB%admV$X-_@>ppTqu9ZDiZ)=w%PrI?fq}QQ|uGP7H+m zCe8NE0r!z>b?(E~(CCod-RmTW5zj@d#6Pohxs`FY^73B%?Vu;1J!PB4JK?3Z^v-ts zb5yKs6L_VvZrbvGq12~-geAzYEopZcFOIJEr@;EQ~OEy+L6x3U~Ri-PtkEhcT^ zbGP?&Uq<Cg<|+mMMae6zOQfTH16}?BXDo+R|BK|kAf8=RYo{{^a{w1UmBT9E-bd2? zcXBqB4NjuJU);~{=y>q^#L&}<tsT$h70X=`%jfkQ`tgv*s}tPPJ|TNK=H382s&)d; z9mvtz8{k*|8|f#XcK#ps-aMYF@B1G=*F2O|RHR%<h7c;GWGYcfMf2q1;&Qp-N}5O- zHEGmHb5xorrFov`Nh1wXn$y6m@_nvz?iF5neLnB+@9}$l|9HPnx#yfctiATyYp=cb zTKnw1Y%r5W`v6csCs-yJBOc?u<{tU@<`%Y9CR>1Z3BX!x1Efiu!a&b+%cAqqXdTx- zosT#KV_GT3C)(GX*76Z7Iv>#t<%@oenKheqlqu@7vaoklBhYw^i{fn>rztLMipDJ3 z<K7%|zQG6+G@&zTF?YmXJYk3*hkW=(wKLYJcLvW?K<?*44s{Vt88a*K7nj!s{fqqk zWJfX6#vVi1?`#A5LJN$qwC2_oV?NQ7{0BHw1>a~I!tZLBO=rQ-_&*6e54zf}gHJgh zx&wvv2A)AUn=TmVBqn3O1$WmrX+_4|*{bTd(GECUbpv$NKjE?t;c{GbfJ+i^Y4%54 zbZX%;T!sh9F*nFGy6*!%WZxy6Xziyi&Kwc|C&Gos6aVK`%Smsbw5@Qb@Vf{jyaKS! zw=SKGFiy&G(*<tBWVjJ;*QS$hE!;ZFaAUY}7J3Ntu^hTAwY&T~RsqZ7Iu`$QCgd{I zhrXLuzl1ZO8nyafbA2?OwJG1fhvT7E-?6r<q7>hhlAgU4HtZU0a;;{YY{Z%mY7=+D z0W_EjdJV*wgz*>O_mV!zod?^m3VJqd0mPeOflL?TDLO|od)g?h>0v$=p-wa(q<+NJ zgIm-317`+NKX`EQpYaB-TqE2Gx2@A!eOJ_Wtx!V$BAn=4-U%{1*Hb^0;b{guG5_ck z09=4?t{T4MlAnFp1^al>dVM**&{sfj!d1PDhr0d=M;brAfX2IkYt4R6c)^GFw=(#$ zGHUksy5D|aeimca7`8CXC5cx$!$x-DAjj7Pmw3kmG*yAFQ4GF(3cI1apKUzC^{-6S zS+4iUzabw7exP-IRVQ(lf(M<YppN;b2IiZZm~S?~d=q=GOw_{OPKs%A-FfRYz3)== zC+xYlX!bb$CfMV6tcZbL02@1G%{B9NZPKzCM!P-souoAem5QqPHpcMIXZXmPA|Gs} zSbNOvVfh_0>jz{O=~am{XpUxRQG)XrE&^9In$NInjhnJ-jp>`G9LzD2;J*`LoF`ib z$zaesTjQd6syfzCRt&+|1pT4C$ShwCI;$qeute~a2zhCL8rU~3szc8;o9@1t_Oz;K zuHNT7))`vB-i86aGv-#;yG+B{NIKsc_UR|k!*$WGORE+%NkZMup`P+H9-<IVVH79d zuZiNrNDcs}Ao%0XAwM6md%<p`#^}L+t=6X$aA`cKgn!lMEv;+Q%Rh#_3Sp<j9EipV zJ&QAMFoz6sJ>Z1>omm?b@|=UNTnc;Ty1l&Xluio$nbOc&V-1`kkqTU}PXC(4W!Js1 zzh4vJ{DowkyU-Wwka5n+UN6|XX?+!gGaBY2egn>B=mDQw8BeVb^p>$nW&y2F`Skc1 zzB@^u)=u!4z}CHQYY#BO{@Na}nbR7fAnYs8N4hbP^#a^e2dHWCF9oy?=k*w%iT$|M zVEY#!tvA*}Q@!2{;m&lRb<yhRS8vh3&9N2;^A8O%tw+LIBGAmAUnu@T>!BEaSZmrh z5Pr4+bk2Ya%CVT7rSWRRN_|Vk@Al@%Q>KW&DrNKLpfmZSi7sK^*mKsiM}j}d-c)Jo zT2X>CF{s}L;5!_)-Kx*c(9e!4f$tQ)sk&uU?Nv6%_X3(#u<v}Ewp_nTAK$qGKhn9s zHR1N`R{PSZv0<^{*b37O{i<g4ZA{X@_ghrV$zT84nANw~en9(Yndi0n+c`|LKzn9D zkDmgCz}AExomruVeoJdX&<F86zBSX3X<P+fCK}Q@BC;FNxfY~vFsC#0Dg5m;Rr4a~ z<3&hkjQSKoPM=1dYQp5;B{iY_<Mol|820u&gfwz^Sc9Z#hB@2Y4XWluYsyP!N4!FM zDx)FtV5julFzBz|hzrNxyI!tjhaGDUI~H`*nBMTInB#j5$Zwp(Gh4q1a~*5=>^^w7 z9)P0Q$q?sGgu(9q9{A(HozA5Wm?z?#g`<EETR|rt)*>)Qg4QN9HmYzwRMJDopbX7M z@Y&;>3Po)}YXzJ!;RaqL{!_!*48-^FO{MaPw?IEl(1~Xr_Q!Y|tdpX(mkRJ*zE;$R z-4-$)eAt(0jr9*|mn~8iajpaH;fwiAC#FSE>uKO)9?olU#oqF>u)j3<5eR46H{Pr{ z<Cp?`>AVR=;Mc=30OvXgT5m)>w|WetZ_6+T(1_TI^snKcV1PHZs}f*Re`0VJgBoDU zVJTy-Kz#?c0Dm3WA9|rKL@%PDQ-kmJ33z94?gL|vG$_Xh`U&wl?K4bkM``YfF*Z9? zmT#rTcYD~D*gd=Ep&ZzkvbRxtVsHFyz%K=O^Z*VSXfyW)!L1=9BLuWw3S%18htAcA zKplualrI%9H2)zl?Xho+c5%gd6>kCK9o7WUeicTpIO_%YzNmq(9M@}zXL;2Dys7-% z-WBf*(HnFSVvb04^TP8nJB}tx!MCJm$?Hvi+bhUJWtC!1C=ai|{6M~UGuaH`D`)p^ zzJmIw!Orsz`P3Muiy5BDKSA~<>I3FjS50xy=ey`&?AecSYJ=f8@90Svtc8GFlJgCf zMc>ddg;75)LHFGWUn1d4b+c*u-GPU*DM*HrJj1%6>}=WD6vRV>2Yiv)Y+2wVDr*tS zqVr37V{I78^m0|^fC%egIw5T?=pTl;pErCWF0eIdpgbzSN((ZZbP&iOSKJ}r`{*Ej z71j?>9$FX2_9xKZ5Iz~K;q-sg0eu%^r}~tI8mR;e@>Fg2Tz&8deEj%ksUvW}dK#)* z`QC;4IMYe<byu>Xs#(ywAc8v#{hf4O9=>0pGf<4cOH_AiOIjyKJkRTbJ|gQgW@tm| zFUNs5wIAj=I+ehq0&rg$yB?^59k>j8+`=zoL9&jm8|sja{f*@5V23<F^Qk58A?J`j zsWEI2?R%M1{>9u{A<Z&BS92Fd^$4@Q*zXYb<ub@^%t=|BCY@i@1@wspO$on5gk#OS z{{{F&$rfEq<zsDvCi-8IDts3xuM&KP?}RMMsmzA7W(emmpzO7H55T+JeslC4_F02% zp-ho%c={IPC#1X;T^a1>%IIXHzm4H(N6>zN#HYFH@Kc)687Kj*x4*7p`@6aU_9n+W zAMcNlhtmTzVK3+4+(enA>m;c$d3_O2JXNaL0eTbGs)J`qW)N@DnH8&mj{)|XD1a`T zsAy1z@egy-mf1@YPW$sOX7A0i({QKt5U(&!x+pSTNM?ce3FllrMfi|l_b7sGM2)Az z?hV-i<q|yzCjr)wQ2ofyLbz@NexK2%gd@HO$RoS50^|tQm(Kg~25e)jW6T2WMc^4a zYex|>#t5?20(?R|qCw|&V83ai7xiJHnGxhl?$@eL85kE_i#b|R+%=j*qdek?M$|sQ zxfr}lJ_)SX^Ph<@Y75Ml@`#_pSiOB1tG5?-Cq1|ybXy<jxP77P_JPjZ8{Z9hXO(); zw*s;=vXEci7m06a>?M2jYm`$3eCQh?>X$?}Iya(x3v`=Jv-M*)FV_D79(s+r5BL^s z0{;)_LHQ^=9R4je_&X_%*80nR3`fu&@cF~nsNP2n=Yr_L|7oPyls)g_0O-~S<q@s_ zLY^B$Z`2D?AeY7)qC3{uT9ECL!a;WntW{(4f)3f#7w8;-Q^;58L43-sEu%fX=$*#o zt?c?8(m&O)&QCqt8@iM$oej>W*GHHdd)@-c9q9M|?kJ1KOL{K@Z<GCfJMQbyCh{`} zJW(fpId|s3edX^Cgg@<9ZwJ`qlObJ#c#PyM$q<^mT)I=`?+v_&=CEg`4Z<_KULSdB z{|#HfO!`=rOT0_<2L0LfErg31Y;0Uw?p%&`#N!Aj8dMt6x_tDZ%Bs9cGWbL*8b9>N z7e})H*%(KZdEM|h_YH+T($&Pz81HgeRPS(nLo$oj^+1-%&nDRhxl8>FZJQm&@}VBu z40Au6O)?t!NltS8TOI2I^zvVIgm3H_)_i~tO>sWSH${!?&p1Pf+SL$yYQM&LB<0|< znl$);d4uaKuI<716*4+BCU{Q)i{R9R(H{MX;FG){I^Sw6Un|ip+XXllm7tAea4zDV z+U7FOFe3V#Lp%S))~6+C$Q-~r!%hLPJC>t<y@3b+MOAKCbB>=Gtoampz&9aV?>YN` z{CpDqY!kpG8TATnob(K5n$)2K!P^Vm$>+q;r)Bn5!VUT@edC3BQFb|KRRI}%0O0|} zRk?JQ0<|f-ehN4ee`EotAE*n_oXX=DadhHf8nEkdh)(Hs%9>YQ7M)pw^HESv2EqtF z&F3nB!^1~axwD$m`0RSX?<CW>68d@=Y{O2d3)z9>WlTUmt{w*24#39?bAhdp2WpCt z7w{F5OjUz_mE-~G^<HY<9n^TuEdYzsbUBCfK4={v$!;2-dP9dS2TqG+eDMmr3>!oQ z#v$2RHC*2aM_Ebts&c8_h$l$?6W{RJvr1aB^bt^+yH&YFpPGGT1j3Kq!Fea39nozs zc$N5t>PkGb<Q?QNa3UGV@&)Ey#4{up$&Yyseo!yWW7)9?-`~{jOZd*EInC85Z_PZ| zcdXMZ^koU?w*>kRrKuTT3Vpm3I2gjt$Y6XZLYc*&n-ORXyW!Z2_@;u!>~e%teTWA- zLf;^}NITSt&c`CZ30GH+PJD#B;*6|B#inH>bBI<g6<4inD(gEOzrj{dWw5^E7TK1t z`*s74pK*@JF`Oep^&q+s?$oznqtQt$!FOfo|0FX>uTwjXu?@KRBaYHIK%U{7X;!xF z$DP_1x`w}ztS^it*>>S~Uo6Etm5+J7Kf&9pz}P3jPC?})A)maQrYJ{?aFgM*0<b9! z>{|Zpyb*f-Jm`Nl(_14w$yhnPEfG&`Sc<xl&69kp1j7yGd`Dj}!x<@57kL>}?;qP- zv%Y~Zi4Q6in`f8)8dH5v)EHiKR_`{)_{i$okgr}c-TDsJfzh~W_Sulte{bNO&VQvk zQdz1fUyhfYw@T5^h_|S1i+w$_i1yTfP9iVWgTAA<dFB_s5k8uw$mFb5!2A&3?WDEC zoxZU&MOZQHP0z4Sq6juA1*~h>@;&;d$|2XAYPc)?NW588wcy5jyjL+y*(SV0SM~R1 zc-dLdBQU=$qxWNsVm2RpIaOg^F%^Ab45NZR23?a;WNm^+8KnpY?Fl`|e<s!uxgoz1 z=G|nkAov7(9pF$n;N`kA0~WP`zD99W1`n`KQW=2Zf$%ekS3!Pycf$EDbf-BBm46&^ zo%Y6(e-}-4XHZ`r>PmHg2EHbpur0z&5XM8DRb_SN$?8k^Y(<^35WXH^Q)P9e@v1Y* zago)V;VESYU|%a`luz{{Iv6sBWiPN+TLWRR{pC>_3ZpaM#_*KGi4HvQmAe7sHwNL# z*u#;J=Q&slWXLG6bgRPm-VIP^P4May)CqDD>q|29i_zYRj1v1DHNm8MGfc*C9pGfh zQz@H;bOMaM8K67-7ntW6Le0VcIZ7G&nn*+G-|_IR81BSRh+}263s1F->L9@U;$jWj zRnu+a;Vh!tbI^@1;~^u^?z0~+>jlDb)^RrNojDKmnhCn4fMzA&uPkTAuNe1*sNW7e zr{L~QI#tzzfuLaoA9LE5O#eMQ8oFM8S$=Y{;KSsbJAS%c*8^@vpd|x8T`2hBnf9^! zq{ju}9>nm{1l_O)`v>NuCF+=oK4{*6!SXy_pO^geE0483w(>acs*EZ-k2No62Jo1q z8)qr*=gdWljN-)|JjGNkgpu!u%E4SQx9ErDW)#90#ua}IyXUkj-YGBEAl)GTlE3$3 z(_w!L)YlVreXppDKBSyI2X(wobp#FO;`t1od!xQjQQt8<1+v*<y<z`7b5UROFQ*r8 zKz-+;zAXT|4Nn<#WOCayK6W3r<GgIt)t$#n>xsG^mDP0v>Y9zZ1~JNg%#ALrd}eZT zrHE0_euihlZyV~W2;5#7U0it@bzRS3zXF~@ssh5Ou2cs65xFM30gD0=c9i%7d*G`f z4)&rOlo!4}wyyGbcf4aSglroH?cNcv1#jmyNKM54_sXrov(OP|p$+gp7IMrVGI`hB z>uu6fheeEsjM7fcYM&YaJNGNd7CMg}_0nnc1LrMIa$RJEzhFF{oUE|w=49Tgg$;PC zW=v98rJd}TYN()C76@K8W{%xB!ziY9_=$b#an?xe6xT&MzXEO=Babcel;QrgA#YV= zvUaL_(ty-4;CsqvgnSX$W9kFozeG8<$QLoyJ@w{f)m2v}Yp*g+R!B{DR9z*-T6wo5 zzf>1St1NAj_A1KT26=526jIwDtvS-ZG*Mke<=Q5@rM^sZPYq7yrBeU=fH-+s+6uhX z`3fqj?kMXCPqEBBNj3GAX~s%-Ml<y|Pbt+GX9v2V-?md|l=>XuRq&OZWRxh4{uoBH zY)5Bab{U?jZ$HKT364#~cU%@otMASzSsdjVmPNE;{7O_aR)$XPl}fVXC8Lmf=8{|X zE0Edz^2x;~J1J(LSIpcO%(yO^rsR<w^g}({{HNPZTF=$1+4y~JcmqDY)SkVPWR8*c zH1?jNJ!9oI#LL^~g`yI8P>c2BzrdI<Mh)jvfhJ=b;0~R8j1u!PK!@=sy^eT_`o<bO zWAB^n9t~a7QhvCkWdQzLjQ0XH*F{s|H;2eu#4rYSE=>QAeDH`16TsfrL4FGwxGr+h zW<HXwMxc#vp#bXzcwYhcP3ZRmKGs5_O^J8RO=g4FGxwQS^jXxSv1{51j13u(m!t<6 zH9Wbv%}<Y;iO8ph@(g+XKNca}kQeYV0I&@~zhcFV`*f#%LVQo}bbk&wDNOQ8+M`7N zUfh(6OVLhVnT+w3VH7M##z7Y8tgo86$eq!+IOZDfBhBr`{9r!LMHxzJke_W&tW04P z`m`}Qv9g`fsg=R}Q!9xc$1$!wN81@OO4+27jA3{mG1d(*GC7WZn{*=yxGKR<OnKWP zUkYRr(U<a4zNyG>40_^w%v?noJria0Oq9`c1L$cCdM3)~Y5WIz8iSsR3=efz%yw^^ zyz&H-zK_~Q-j?{@j_vzy4UXe;>ZBX)KRj-_GJFR!<)QWr)5^{ixSR77v#H&^X-$L- z7os=u3DKL8(R&@zkjy5U8u5~D7%642GOGGr2lN1Zf=lI-e8RaoivW8OmCeU^L~ZpF zb0EU^Ipph8X22feQED@aljF^$4_&K@F{#%f*S#|x`|sVtxa}=scCH0&J7sDnIorU0 zpNT#5Kl{1Ce|K^#`A%tkBmIDMgXb8><YODPHH~jH<}n!8UZOlt$aHtedlTHpFsfxn zc!sXxe++GkI_J3{i~-KzdH*eFzh0mZ+L_f|P9aS&_?h&Ub~3%j1v&=lH5-tY>Ut9K zn-D)8{4c9JtJlc&7}NvnNKISwTNt%A0#2DY+vq9YdD_+pYa<KOwtvcF4A8d>_-0&K z64SySI)MRSO`eZ$uYf$zX$<&|ZCdjIi)c|*m6(`wdW{ryVE)r9HZ1M!q$*?Al`C*n z40ZjH_3?&J;0Kq>m*_Hf?TG91%#*GJ7k!((z#B{z3Z)l%hCaex@V!F)vgQ>ql}swr zk0;?Z<IUmiXIAqzFq@bVUT5Y3qtAHq)+@B-9pp9P?NrGB|M{N`v0Askms!fAum<=$ zT#b*J?5P-W9tu_XJz5=C7s3~&H}5o02Wbk}G@F<(79yoP!ai+a@)g$LkN&;jF>&hi zTQHicri$84o2_6{7A{#nbIv?Pb1(m4eflv2YGDhKvRBPOqB&jE9`xsmKPZH?t_HF< zo;=iEc!~h=V3ek-u`HsIR>KCG8tQ7QD#}WV3J6qEQi2UnTU(pY=exSPHfCIEUhX}; z7{?BFmR-B!bjIdo#qVB}G9MM1Pb^NsgO;8FwwGX<;2aR8R;^lTwbH8ZQef0H8fa-Z zQT_9!!?a|qdNKw&Iy#IBn-Widx|*OSdQ7Oo9)<r+T0@t9GH|JXzM3$a+PeCNZ8iRU znKOorIpfLF&x;wtv;w(2n76#%jIZKAUK_@ejp@w{WgaLnU3>NRXWwm@UQ7rxo!Q8o zWS%nk^h7OMZK>KpwI^yy>gMYG)Z^6K^ZN0Gyd+*ZFHLc);z`AZO4k%$D|S$NsPs#T zubid)R@q3!QDuV43YC17yDDm`)~bC~LsaLe=BT=Br138Bw(%GRH-!X+G=&@mUBv>0 zTM8c)#wi&qx+qRkOjkOnBvhWMd{MbjRbBCg!f}N)U>Uwmm#*LeJq!CDIblpAmd1{M zzRYFOtNwhMJuWSK_p$sNTh+h+RASu%)80EziNR)2zb~!k{P7cl1X3n0YWxI7EPE7B zh!ikGhmYye)6;EGUw=P(_U-S_L<%FtqDYa{Do7F;9V3j5HTiqc?~q{OACmnJtWNkF z2xAiK#2*wD8y6idiIEC}`JyPPFf>LaP2dNG34<c|(u8Os6DXDhMOej(CJ5Q|-cjQO zVo@+(5EB{~DU6cxr4qiC6_-dXij}g7{3Q~8q#!DxI#ul7lG*Ts1W|lRlsJJODCEb* zqIdyPg$em$L9CQ76bo7CqNv)?eJO(^O2`ipiG?f(<c^IN28lvM01+>ehVcWetSC0t zlrM>?ez7sNW?}+mRgQ>~#7A);LL@Pf0@Q-48W$xeAtM!{DiK0qG=l^|LPjdFij@XS z;-pNhC@NGe#9NF|5XnR#JX#PFD`eOrZ5UB#lq5!I#S)NW1;OEQ03{rc^pT(@!KDVb zWq<9M@s9RPv?N{_6CK2e1ooX6K};n5NyFl>XDY!CVuFQYsel<9ClX2tWV|3Iiiwc~ zN~DY^Dnw!hK9R&EFk(qms8yIy91V|#kS~lHCyJ3ou|ymvh!F__K`)b7evl+eDiB3c zgVb#!#z!z-6d4!Ek3!MH7``NgNF)`;#PUV4{18c83^Ryo&lm8C9feXb3_nH?&ySP@ z3n@vkC^iVG1fjxUYYvI=f*>hh5-k-;P$J+_`*L|>!5|5IU`FjPjTZ`|__2}@De5NV zk3*4^g9Bns<U;XmJIR<8z=#nDT;Jh7ZniQ+8SVv~6(x~Y^A*b!l+=3s1Q{F_JFarz zb<l&*7qQJRkcx;@Y$h<0I4(BKN-ByJQrr6>G$@Qn0IruYf;0?_5Dea9sSq3|<XF9S zMviWA(G+JA2fQf{>Q}S()j|iHD|?qwkLgj}zd;(HCl>RggeVqm>LYR|;&Oao&A72F zhbXG^@3xc`$`6T=L{ct7h5aR33C9N!Fv`J!L6<~3@>u~yz`0_6!zN0b1fj`;sBx(o zNkmiwCzQAXWKaJ|AwDca*TMkJLkugH#8V5hc!r9`foWt^0yoA*TQfalVh|ey>Plsd zDvB050eM+Ws8GsAP(6_oqC+Z>#(}xTf>8VtWe^e-EQld#7#$~N^OBtOjs@GtCPW5G z#G)XOB`U&_9~&Wxjz&35b%J0~j4()w-dHEHA4n|V#|T5?!~(Wug>@2m5;9?sfK(BP zSJVPep?C@uN$IiP^iok!1p3e)Q+ZPrxQY^NhzW3K7%O+FG*t2r6^WLJ#b)OG5CKpM zX6tXpi9YlH3nHS|)TE*Z*E1msCF8huA=3E?g%Q+TT#@|vFtn>QCPCKE*wz#h^;tp7 zR;RjIWB@@RmM7RYg=`nb{9}sH7)f08Kc<Kl#zcx@St-P(;}{E44~@&>1qSjR<JP}_ z&wf2v?jdfkW}UjV7E4&US<Q7dX!|F?wV1Ld+}bS3@=UBy7|HhHFu^z>L>@>-v<<f8 zQ;%Z_E$7f77}_x~P#3AL`#{K{Izb#pfXlXt9OvpBgQA4vG2W8CAdCW|vV#P4196<X z>`w#z0bLFr9f?#R<_jVvanKWxFgh$DRuqIFnTRnP=;mYY#AkYlVrh5^=JK)P%nTsh z%{7=mhyv>W@^KoX#|a8MzrJ+8^&c)J;~`U}NQR=H^J^uL3w?HMh3*3B2^#*^j}nAZ zYo?loLAYRn6eH8$r3S~sB;#4OMn{SgL<;{XU|889XM#HE|AFQ#C)8663$kAv1_%so zF&Kd{mSglrZP}jnH?+FaPz*IHK@x|S5pX&kG_o4H6NF)`lvo9=Sgl^U$Z3IIf%X%{ zhG7f{jY9iE!~}~mh%+YEwir507+V_~z6oPxWrbf8YYQ4q7?U_B6B|43NB+Vz<eON4 zdGN>Qo@{N{N8E_5EcuKjmzN)jK^sFU!QqF|69VWNBO(IWCuHDvpyd4Sd=u-AAq?M{ zyCdApBo6GwJ#=87|DouHj5k!{7^)iEefVa9aiQiIz3X-bwppr~3|)lf5DZ>Hl9-sd zXlbnhUp}(4{aB{oaGf?t5*mfkH26<!jpl=L8C(kzX|L2YbrkrnEdN5A5{0nBl$DOv zX_$VJ|1blnRZG<%r71=Ty)*_2AKxD$kyRJKZis*~$aZRLBC<RE{BQD7P7^3b(E6D{ z{rdFt9nud?iJbWDgJ04GO@f?B?QllJT8e|5P2w!Y(exV~NWY|pA<8mJ6ofn8L+CA7 z7%dIME8`4ajX~ZxPOAo_SoV#^Av6T_6;>gnz6S{gy(R#tTd|6@DXV+1qbMofq>w_6 zSkruMaL<8$-oE_+SEH}DJ1IvsgBU2DW<!Vb2T4WZSaW`7Ydh=Cj2L<*X|`0(AX2ud zNd5@S`~YZ_wY4>Wgr|4kp7f+47h9wk{1#b!iYL959s7Q3x7HdEoTSB&CQ^G2Mw5hU zg`6ZDrfITtBv)g@FcYkfhsK39z<gf}hHOn)71s$Ne5(nTpov@xlLY1yZGtc*`z>HK zkPhRJl~oOltJD1s0|_Z5G#o1{ns%|k*dO*y4iKHGHvdv4F=e7@=kHAHTaD)73j#45 zbGknSaV(=zyFxT^jEUuH1)a{yie`Pk!Le^<!ICIbDJU-%N`>YixLmD@B_u*97gu|E zKJE?Ua7;Wq=%R0OIssP)8hOluh34|Qv1%9#D~Mom>XsZ92cf1KAOl5EY9dw>FNVtv zSQvq-r>ix2r+NYvB8mb1xcRIlJ1EA|fF%_tpmKk@xG63_S_1uurb<vUFoU9zjw1}o zJci3?WmP>vW&?sHtVoinbTt&ZSZ-{p!4*-VL|Z?W9&sTdqVdkweuF##g+?aQN@Cei zegFkysvlL&dNNKSdeSAc28?iKdlMh%VjLq@;>KhUHabQU1oH|vl2fX#d{GG7xgd_} zA%X;k8R$TkH6T%U;((Jzz@k`dF446fP``1BgsFh0t&l<(%><-lRZm`{gz-?;(ZS?n zdhP1j$m=H-MF{0}<_CiF<@AcGtr3v5MOKU?2A#rP0NspZPjD~UM2NrS97_6<Y`o&S zil>a}k<i)G$Datzc28MeP*sTOH0%O0i4`pD`kP~5LW&b&LKP#dha>@lIH|;x`bdZ% zPAoOI<OhtOV2WRon79*a0Wdw7BEOXfp~4zmoISjHdh`J*)q@<!CAVZb@dKh@IRaFo z2us%>6oZ<XTh@n0P)txqJ1Q3s_MK`Wgn9+Uh6(IC+nX{}dt$Vj#vi_{Z*s#g%MNw1 zV%4!)EYARn4WDGcRTmZ}_fxYKIP*zYbY+$_G+^3y<x2%<b$K$NW&sLW)Kjb9B_d(1 z***}I@C89I;y?)u6rwH3G)Fz#!K#Z9A%}UZm6gnxB|{K1HJbd0V55Y<t44x2h8;He zfeA7o?8xcSe-NMSDfRV+U<o*b)F44f2&Ne|MAmO?Wl9Gbx1ikdt^5fqnSH~#-yk1g zMr{lpj+CHBQHU%Sl!EA}K|9;7G`pq>*wjNVM6tlgVg-|?+>F9mD?uRY_A&_q458>@ zjNwQlw~LZx#M#e#c+YxDU{w-KaM8utaf-F3qW+|e2*E!Ie98<uVnF27Jd8i}(U0ry z;0oX^pB8ffIRg!F<f;JSB4-%1RUMRR6lGEN`@g`MNX%JhP(Y->DprV@J4v&8U{Sw_ zflOd4_uB{y1Zxc?wM=Tx{@y-4eFyp11Cs4p)zym%wIqrD+x{WTj5&iCHHBp@Dma$R zFZDo+7Q~W)8a)9T4%H5waGXfO8jfHeWW|wuS|w8t(L(hAi<iWSVb&qDIs_SJY0;QR zQy-(sQg>$SiQ+KKSGNkfX>DJCb6;;CZ+Y#l$Og>C|L)w?DXe^`d&pB%2Xj1H`<ED| zmPFv}e-H#UdBw6urv&TNCq0n~HtMjhd=^!z88lUvi7|&GKF0LllEw!cR<D+I%HN%* z+Ir67i1w>)Ply&^N|wCXf3{rKhZ=DwT1_^at1-Fd%=Oe7?D#D&Bp5a&n5KhCd-T8{ zjqx3$9raJndR&J<d@}(>Vg#phD3X(+GSj!Y6TqNJXfU%K;S7!6Zxq)Koc$W<>A|im zpQ4Cx0;+q>o&=2%f(<mBAdHcKhSl)wYQbZf5viy$A+YO7;;CwKgS;8%%Hc-i>Ji1< zsUF7V%S`vq{Rj4hqC>nQhXh$u?G!MxvFb8HILf?UDD32olhSp~$7Y={$>Uq`Z98|g z$E2?--x0s{L&EHjgTt?3=N7}t6@!^Nc{(ssk!cjueH*JzUHKjV0aWxqfP$(kB1NGl zESvgaVIul>wEhIcu6`KoG}gzrM^An&wr8CTW+8Bqk%13tnwTc}bgv&2I}_#*qDMY7 z-%z;8z!m}wX>e%p&Frk~96EO8+jpuT3g*%M+<brzUzRCE6cvmPU=|kC(UKn))X9<` zD7B*>yaq}^ZT!>^j+>CPSVhX*PO(BbyTTw_Fjeo$w{a#Nkfzu{zx6<_-@s>9i_7_Z zgz()xddoYdSw}l7vWSzKXy1t*2@xwRnWL>n=Kkb*>+ex3bD$`cOq(K^f0D~ezPTFt zKm~6%EBj7-GjHhlqPR%&8X(xR;KQq(p6@o$r$!>a85mWH>C@kel6(JtXGcyee{u-^ zuky1_!{3>Ks$YM%fi>$`PuCpGdUjcvLo$oTrr$)x6q(xoPZwuWVcU{ZWh5aXBpo=Z z7zGW6HKbQJqqB`U*O07Zvlga3$ki!p#;$M=lB=0T=s%WKtYL@adU_Pl&@J~ga+*}2 zkc44uT%eRSlGdl4)T%LS>?I2vA0r!azpWK!0A2aER3l$DsMa)TEDgP^=e8OQ*^?nX zG2?|FmxeHCpsWKMx@A1|Pg30EX;^b3>-!?68b{DT&OiyYTkgVaQ5H}a3{fB2Bl!cM zp~@05fHHuf$o$AMD3LUMkmFNR`=)_IIG&tKtfLIs>9VroOyt}fr))=pD43sGk&}jX zHpAdeRxQY0sH7orVwN3ZgJML{(pYnb%Ze$}Zx1$t&{EN2_=TxhX&vtYv<0?9Q(RD7 zOblzHz-)!a*m_v=@2U(e$Rre~N<hIXj7UK~a}FK){fz>~UCtiHCXi>NcJQKX)&6!i z@9%67uw-OoGog>iV)TJ(D`D#-2p&g_2;GkJi4z_E841`qSesl;aMh-pKz7b9hxI>V zh6?>gO<o_uV6cp@sGG92sY!-vf4qE*t<I@2gqB-GNmx?}tI4ryH(5|fX|b~8%puhI zetT45Q5-0w1t&00Kz#$VcQc9Y&NqW@*b}BI@*;q7Jw;Jbcr-_9tOtn|S_eUU5k<L5 z<01r*x7Lyv^1AZFB#}_YS=%W=(SRwIGqN&aQfYLoQ-=;UGISU}p#w5G!Tv{sIp%XR zb4vhOf}B}3$Q00E4}*Fso3h9Z1*Cn!)j*D5*#a1j>U_%jr*IY;Lz0JKWg@%cAQp=& zFzkwhSSAG5u^7-~SLT23#3h5Bk#(?fqqA(Fl}&oVO;QZuv`9u~;;!WiU^U1NtokTZ z*H|wSr+EUOAS^!#Vxj{X%FINDFzj&4U=YV&Ck8_}{yO5X6GK`C!zmYR4LxBq11r-+ zCxP`<2bf@u4QGa{<DO<2gEfkheF%mcVlXZxXT}42$UO)AxG0!qAr4_FWfgC7q_RtG zV9~BFnCQ$FicvW(id(?*x1z13$ic1`VXUQMJi^vrt8wI}HW}7z97a7X+5keV(E-0v zR#C!GPIbp@N(9}CvnL5_!-|AqN)#=I^;ixKkT6_vSV8WQVITndEX+oBqit=-hy`ty zeY2xC>PG>+WFZ}G%&i#?6`8&+V=$J;a!!?*`eOu9Ser2d{#MMS#Brk-6Vo{SiRlL( zH`eXOz{Q0Scv0zzd*~S%jm2GZBGhOz51G|oHsz*;RYVO8F24<$0=d0V{5J>J@8r~o zQgYwZgaCRtnK8v+P_Dt)(TpWJ^(1n945``ZGBF{yl(P#E7&r|Xb|{FI`<H*;5mX(@ zBnV>}E(&3wL}z%C9N;`Ov9<mEi$K1K6@q~UO9<8nNE9J&4~;mnPLPQGYOa710#{sw zE}!qitxch&H?%r}y#NXgs{w}^GYm*T--5x1o$ZF9uaI{E$tXMY4*??(M~4aE&a!6z zn^1Zmn9ph9m0WtsS3q$z0z7uH8HWa2q6{9N?@nff+7U8DYcweteri-<Q1vo5)~3P6 zag`fLZXQ{sYM0|dX4F5%aJWHi29ptkOG400Vo^1jb0%IA6Cqo&g#m_jR1wAbX4N7B z4l+pn>Q_?z&E>hPErK#nJj;_LDgNMjN3pCQkL}PxVs|!#79_@^Ba4YK!PNqpYAHvB zO$2;N%7CXd7~+WEd?sJpAm=v?$aHj!$iJufdrH>OQ-4CrKw!bT#J22jH4p{^MkLl& z{bp+o2K2^)4_U*bk!rMat%l{Zjfr^H0L~}pChPHFsvYdCm%YZ16?xuSCYSRYt5(Sx z7)5c+#xVvJ>&vP62;fTo{Z45p1$6{2J(q%euY(y9^yj#Tp|o=R<oCJ=vuIkdc*?lH z4w@XE1d2oAH(Gx{lkM~WGnxzwjUy(cG>fDc+qpqmcJUaOpjNFRD_F-aOMD1JC=l|+ z4IJ4~6~=_>!@`4LBAF#i0*OLGgE6sj9FHGcdokVY#xp@4PB4{uI)!<OBYd334v6t{ zlKMN18$7|r)~;ix&h`$DH~_^}+UxJDk`aQvuGuTJo_Kuo!Cp(_{+wSPpIJ{lKE`0L zj`hUj8$0&eR8M?_EIzxQ_z+oq>lJ^-FGv=@tDgMw_=EMtt25lkB<Jdh*O0|0$;y{~ zP9Q6lVf<uIhgy2EPxPHa%}Z03|Aq{H-FSSzkVNTmN)nr`E_`kkKEo8r^4E>`U~@4{ zVLkZk%HU_q+P`l8hO&68df?+*up}zNp`QHsCW?(8T~9nd!(ihl)f4YSz;V6S|8xE2 z@g?=t56kY8DF6LIf6mW+o4_zz<2nARTYr2{!sh=_4}2j3$F(v2&-{mPw%F@?J^9f! z+3Tk)zHa-g%F0(9$HA`~uO^FEuO}YBlc<1(^~9sTY`ktg@hFdt*RLmDQ5N62o_Hl$ zylFl0%CdN?dgAd-BU^q)Sv;yz>l!P28@uGs^dBdS?@~{G`Bz46^~5)omEWtLczk)2 zL`4j(Cq7J8zF$4@99J?-n3w!XmO*wKT>G`&H}Yi5BUvXkUWk`jKQ-=X2Zr-bGYn)F zLkbmc?1ecJdr^9FLC_2FnE=KC0!q)Avf-Ee&W)D;ilWS!pAHj-4ref-=@5$;j8E(x z6BzbRA2P6){A&j8a|Q~cPa4=uZrPN5*MP8>%WJnI;ly6m5AuLN-`~{9FHc3^e952x z=^kCDA5%WM<ay%j#AELg`4vD9lj_FHVu}8A4X=j}<nf9BL;m-A9G{%qTANNBbcRW; zlV2_`<lni?l*NCqC;yx}`Q>nH(m)>er@LGZ%9|@o22YMh-8>6r@sK0zwM6zVr$@T% zoxX6RYqjhh{gS;hW$%#Ff4od&<MQBo^n($ya6vuse*{`1P!4}*KdxQm<%G!K$>Gc6 z<>koZ-2sc%F{1Y{=sAo({`%GE%N_$gI@-y~lg*l~=w1HY5zk12?}WHZ9*#Xes?$(E zl!rUicy_GuOyiKBmm3+c@$@^rt!<5G6fB2hi-N14QF!$;75onu@kR27N`=r8_3-IA zp#?FYqq;eL@!ga-P;0>4Fy&R-3a1(BT`n3yASf;I4gC>+)coUQ50?f}t_F#2xZz`F z*`0l<@q=-Oy~IgeIwRS;V*&TR@Hls$bb`CfqFEg1IbRkp=gV%lYp0W^Z*i-3xR>l* zUVd0%?fCyGAI3lSD!Tul<x`dAyY=Kh%cmyG$1ncR@<C5#uiU5qSw4Bc|8SnmcS?4b zfvdjpWKX$Qf5WG+nmj>CWa(UI<#Xwd9pLWT2f4eE49*f+J9j(Bh4+=+<?Y*`PCEtE zX~$D_+Cv^MZ?F71beG4=+eN7kedY0T`lr{SqdZ<t=kd=uyk*s4<neNPK9|u+PS2E^ zT>A75bKKK<IE=me*Ri}!5At4(I5D`($HpIrZA^AqbbGYHw)AY(d%O|TnjD#K+xFz- z27{!dj>P=h{Wxih%EMLCkE@O!7p;0eqt}u5XVx5VbZK3Kwi3%%R@o`i-Vd{Ojl4gr zV%)$1-P0q_*ls;%s<`WH;;^HiHH*&lO51XKK+px1P;+yg=^Fb@9gSLA|A@5oY1+`y z&p%c}SfCL;&ied`snacE7Og8iy1q|{{?_Q!J|{K|5TDg6U39&z%8EPFJ8P9kXNGGx zJHAm*P3+LRZ@iaj-l1Kc_BJr+(mVTJKi><hM|_r8hrLT)Ae97d8m}|{T#A~)*}0Wf zM;4Ze`)~ZVq2kD=@ng?4(a~+zta<YmdiqLgEo>N`f})a=va$*;`UD$ae6yGQ3*EZk zKWo;x8vGAe!~fx`@4N8-ZFr;qj~6~~|9^Tl{(pQm`7ghk{+C~W+DqZzu>T$k=>B{C zS5u(bzt_JM_?H6zwG?Rn@AWSQ{-wZwEd^Tqd;Lp+e<|=U1^%VLzZCeF0{?Xs(EIoL zmjeG%;9m;-OM!nW@Gk}a>nI>!J@Na_!=rq?5v>KGbvv}CL;jo;I>Nu7cTaa0XD3Jd zP91G+tgS5DTbP@fwlisK+@_5Y-_Wpit5yaEE%o*F^jfrN-W;}7U0od=I9i)Dfwvp( zcKGb!-^a=TEFPfs1hmM2797yZ16qVYixHIR!WQ&OigYQk7mvMw3oh4a?v4d7gfYVy z$=>UR%h$8W?_6rGUX-P7xP1Le-SFylE3$IraOCS%>ZX^kQ>hy+U!P)D2M)W)vc@_~ zSu}fFW}_N!DbRX%aDe&>y;G5@3(CWzJ8uelwP9)STDw^fv^M5HOlbY;cG0ZD8_V1G z-KTzd((Wm$R{1Z}S{+>&ti8Y0C;qN*udxR{KXyBkGWphh{j%qeA_wH%DnC2@=Cr}- z;-<&8Mg&i&2>kixyl{^1NS;oiSS2XPxl!ephHCT9@uRwq))w`B?iO%6W^CyC>(3Rt z^}40h=jZf>HzM*hHdSuT>nu*+<9_FS!2{QdL))K;@7cwTeA4r3!#fX$IbYwl-KMae z6Z3dL(tx|?nhCBQ%`49}GCs9;@QS4euQu9rIDc)vf|=SZ<uQ8-8=PtTN^|$jNs<nY z`-TnO)=F^P*(zk;Lbr$GRK`Bw9pc|ex6r=*e)ja;V-556scd?FG^69K{L=O3RmXR# zU>eQes=lm6x>j*P!|+5a=Sa2X;@~vi$e@z)0TB^_0<pn88{zbROyH-tOI5@ZH}P5? zFIP(*a;njnGljc^{a@{G@;d)W>ZDl*emrUQDA3#LzV7EqH|Glb-ui2(V2{t00eMbR z=Fo<xHU+obHa+Qmcj-On$Wzxh{#AbGX80?S$B)9OPM)(ucU;R4xF2h!)Z;}f#g1e8 zYGj|E)bLTavC93k-4sl<wKb2-;5RsLk{32)?{o=Y{Z`0<jOT(YS{08+r=Kr0Z<Bs) zSMJukXPZ0k9X7RLu65UuhxhLgA3VmNmj8C&-lHoUJl&_b^2TmymtMDD?~K2(Mz`Gq zrD@F`#x~ImdX~~PcuDv0$c9^ohfmPFs`Y$LmHJ}S97bdJTGh!t7B~8EV}shvuur_! zzfP!#Mhyx4b}>|#G}d0M_fjPyYFx`(zlz%5oIQNPeeFxVAB9Su9H^*xe`LPLk^RkY z&)*eLy5Y{9DT}YKJ9Xlo<G@c(d_RU3Tn`;`X!9eLJeNN9d%W(qZ+Ne7OO2ghdn?)p zOi*$=`99#upeLbQ-_MV-O*kU*oV!<WQGZ&<q0Ac+6Z@xO1DD1(xTM%iv*188g*F}9 zDGxf*_26kU&BJ>Z4bN?_6ux)lma4laY_DF+OI=%N*(&Gph+QXwSGD^T^k(&j@Hmad zkqTKV>Z@JswaTW4s>U=O!YC*AKDtzALjKd7_PZxKw%pfX)%+Wa8Xmd*V*mSxlZ>7` z(7biy<_!0zw?2N~`zS1G+I_>z&5leR(eA+4;`m*X_+I;4lnhs!6&>EF>OxnQkdd0a z##L*D3q(1A-)~k$2zp)>Hyi&{BlF#jhIjp^DfK(CSFwwrUFeo?%>u6Z^b&bLh>z;t zJN$ai!{K)dLo}bbe&~A7!7t}f*6Fncj}x!%>Gi5A&&J@>;lp!I9K2w&c(32)4Y_TV z?XMkNtaA6V`H;u{M?(vZttW)+&F?KZrO+~L)ROiR3x^|`x$EXPIN9=v^6+`@6|6`A zxY@SUOAfTRJLzZDUg@+rxMz9%<QZFr4?erh%~x$s;W_Vu6W8aIzOTGkSpKYmZ{BvF zDLHxbt){KLYLGs2K$8=_wT%n?Zbp}nU*z^)Fz9ZMwN6Q%!`-}egJmhxl!v9aOm30I zta?yswQJ=?`>au;;!6y~;;JVuZnxHI4Zpm;ZOYPTx*2o%o_n?k;<JuyE-wA}^4jOp zw$mP;6z(~FKRajCsIm;t9%h#!B%+_u<8mTwySyK6!M9Xb)r@pBX!6<K^T{67QD?^b zN58cZMwEa1Y*Dc9gzfscG=tflGF2D;btt3x!FefK36HZ(9N+KhR+aEM_J~($_~e$S z-MU#k9;)n{@S(?)z*jS^d{3M)@PELYqr1elKx^K#Qsd1h3LTF&Je}#-x4gh;!Iotj zSC-9ds?J;*>1BJjXKG+&)CIrdc>hra2X)1n8}7Kwo3&hPSz7<}yN?@ZT{?TCviQy7 zi<MtS-VKatRgw_#IM3fTBgJ>v&?Z{V^o(^I-H&!`yUNYjc}v7kVaf24NhazShXgxb z>$f2%jrTNTP79+;TRH~(JY=!6xb2R2*IaU~rZveB?@^zzt*zJj*Sdb!%sr?46&5di z`Zd9&!+x)}_R^N>tvXvYQTlYqug~6jUIU{a3lnVLPYL+-x#Z>16F+}VO1pO3IrHMB zO8Ys}3RKfpj`u&byNhtk);{Uex@u&lw>e*#r#|aqj<)Zp_hYog<(`*a3M1!gosfDo z8U9(p*zNW4Xz}qRxA=R#?%FR`DY2S=I*-|ulG5^!{v5ZlBMOFxrI*Hg6c&nyxAwKt z9W%wg!D_3PrjHDm?oF8V1AeyCG8YEsq+a*STh#1y`J*A_?@uk+a^n5%Wrbf~9a7EN zI?rH##ADml7Vj;lzfFj^u-z;ArljSl@75Nc#l`lILo!uQCkFYK`nM5&_IdGnkHW?i zSuI1;Qf%5~W|-&aoPJ)B@%X3HrO$UJ{w%$EC?YHA=kPr%x~pgGNpwuvwyUjy<wsrB z&UT&_tz+YDRdb7@y}n$F7+^omb3*){Q9=FgR$gpWQhei5-ld9pDR+x{H(9n*(|F## zbJ3Y=X1f)nc#bkwR24hwpK{T)ovx*2=AP~ws+i?J<yd9Hps5!F`x>5hoI1MPIDO3) zt-TMI>EcXX{};n;eSfS747_{KFX2ja-(v5<Q!4u}wYod;mcgaKrgP?54J=sJDXp}? z@LFM}%A3zqM(;Qw>=l*fCGL>v7guWE#3@Tv-8kI8ty+7bOXKGWTb6C|I<z39W!gql zi#dmj4qg1TZr-)Bz{fw28NV+nJY0N8=f|}z4PB<qX*Xq0n#+N<*P_4cUKDlpEODO@ z|8qoMgz#bc@F}Mq)cxL#bM*SQJ4ZeLLq?Nz9WS}eiTT-fG4+3k@DseKTPcdej(93O z+^VixcH*MSo=NkRrZ@9d&L7ZJYeCoRjqXfe-mv)npa#Je`s#j*@2P26t<`YuHBvKr zLpy_B=byJ~9lzetycN!7*X+=8mEf)Zi6`4ye905(o%__eNmcr{rkmUCZk*OPR$FIy zH(g_!Upf=!=Qs0tKB0M$r;p=Z%LQHLFFN1#z;{ijZtr@yXr|2QKG5k{H(`GT=b_C< z+e;>|cCbHrzcVkZMW>XbLu~inUTX6?;-=jrUgM4nUqrOsvLMUkm+gDg^CPU<_4Nrf zo@kocrp<~HzV7F?MrOI=EeszYZ0{NH!+fkpce8SXu~sL??X_Ha?u+$~^$s15&087$ z?d8z1Im24VuITtM#=vB8e0$#k<6=HGm-bwBJ8t~L*;9P?^+{}|)^M^>;MGYRrAY~k zTf2|Hkf}7`=Y=y9cUd12&+IWJ@^Pi3<kiB;DDjNEh!Jn%!xaYFhIMxQEK*C{8R()L z85FXuy<oucR{^)~ZWcZ}92|U5u}#Q4QBf%W;$5%i>vX;Q8T$1}h)e7JBj~a3MRkLI zi}nxiyXjH7Px{JJo;yFN^!nP*wde7+$vt}5U3QBd+Q`+S#M`~q+*FVLVITcN6*>;; zekg8;`knoQ=N|j+SCH*A@TtzE{<o(b8Bp5g>F_6h{1G$Wh78+1bK}rXX=S4n|1uvr zyr*PzWQQGNl3I2y-aqzR(Yvd=pB8S4eKvA$w~~m$UoY)K^Is@8obcSScf0qE_|M-B zUc3H{pgiF1^QRq3Z_axAYL5N3vcn@quRr8B{dxAr^&e}+%YS95463r!)~{?i;oe{F zr`A@)<&6C9^@{nNIPJuzcFrju8V~XOxWrApd_((-Un-Z*`*yj?_iMnY@r6Si4&Kw6 z{o{U@XWbth`ZD%$YR29tH;leKdeO(>;k^bCci)F(-Pw2V{jI5ctZpl947}FyO6K+O z=#rbG^xNLh{x$lv>5|o_rdZrR<L%nw?4`j&PW;tv>B;rUH;*qZZG3F&PM?bzrx&25 z&Y%0(So6G8wZ|oo$QhUQZyvj9bwJ^Y>$06`@joILTD!Mjq-XYO@#^lImu>S8UjFHI zo293dik5sodnn`Tjwx%GHFaFQE}?QwTS;DeQ|0&-y$fttO}_qlrA_DA^Hj(5nLE0; z;e1i*)dg?XB+Ys7%{?{QTWNNl@tIk(f>%yEsx>sZWLN8?>-Qf{^}n<@C1~@2=}s-0 z&ukcTdq(E9r&-Gn^0!`53dydB+PH1f__CZnP0V*T*)G{(a%RW&7E?cN=}^{j^SD8A z8{J&?Z#rfEJ@d1N)4J@RlQyhbeq{Z%*i(5g^;B|?Y;oN;`*QOB1`jUp>AJho-oQrQ zy9WtVcZIdNTQJ5;_n^vGzkJ)(X$NxWJU%q7$l&n((Zi3v?Ua6m`oA#rP=4w;_3U>$ z^O_7<9=O2n!()esV<JorINn|J-Yoa@9ACA`!9|B9yqUba`A6^cesnMVQ^)uxMT512 zhGq=W>tAWJsZ=LJyK?OJ?e6D}O?<Lr%h^y5wV1eek}H>^6L&1tZPD23ul*Zc)?RBd z)3o%ezWajM_1pPpcAf7vL9P2vyUY>i=C{ZT?W%E&nYl;g*uO)AF>jVX{V;6$%;hhf zug{zz%q?<yG~9Q=RTs04@iVlazkU`!bYR1KZ(YNV4*0(Ad;1TkGM0N;*t8k$J7n$C zuM+RX#cE&9PTt~ryT!2|k^Qb^bZ)=jc;4w2%Q7+(Ell-#{X95{@20S5LAmy~Qm?Jf zLv>aSb=hjXZ(%Fjo8Q*W4!=51r_t66+ly5bkE8^gRJjqIIeLF{gOXwSS>@fH==Pod z^2GI`q$wh;4i_gzc*UPw->Bimvtheq&Ce<c`ex=Aeo2?~SXg!Q$CUmd?^-(CAM9g# zK+DWkdEc4$yEbl^)xbc1*Mq5vcSF0Rjla>p+%Rgf;<t8PLl&-_Hp`_$+S{Rv&HV#< z<l3r!Z}4QRPwRtMKW={WWYMqL`PQDH%~$y=M4P@T@cJ<JafiO=ZIUV;40_o!XmzGb zM8(-@H=BpqACGA?C*h*QsOLHTi$}Uv`S&wPFZ~)jD>bLtdxvJ<U6Ym8n07zZ*6@{) z+xS5%cHS*Yy|8O!$FBQ^y&Zl1#h_hN`uDo6_ohm0)HP7ubLRb*uk6kx&z~QCYwcUJ z-4z2Cjv4$s*~xaO(=_!3TS}rj8mb+i(J*qeQ{Kj6*TctW{7~*-?%lk1zb`vKk85MM ztnu18<BzmYZ&kRQ-{*Y!c<UUmW5;+uZ)^&23pW_O>{~(?3*Y6-GDdy+X#7H1FTy8a z%H1#XuLr&?+xK|P!1?MY9A4#3DD-5)4jCP~_S*KztObLEYy=f|+9<6JUwL@*Vby!b zc}MM|a(DQttLd)n7t^ZB{K}@rGbZkr7N2c&xlnu2)+5_3JR9pd8jstQ6c=*lo;Ghn zz^|OEF{{qUUTE`i<B(p<TD;Hdvg7Ba(T#hmyz?0sH2dR~dF^^UEOq-HJRqS%MW@z_ z-R|#h=XdnZ%Cc8yD)!D@*Hbf4>w})EsPDQX16Fl=U~ihzdBvh-{eM|ouQ=e}?#JWO zwN1v3nj7D&xYHGD`>bu=a~k(7aZ%(?omMa|t>{_PuOmnN9QMjPz5Ae#Mk>kT9)j+- zvah6`xtirL#nCPC&Zu@LqxQ}oH|fs13#TM^S`E|Ky{`CqYn{(i6SnrbE4tWpk5>G) zk?T$z=yGNXFX>m->K7|?+iKcp+D8pAn7^f2$cz(X?{~L+EV1-HaC6CrsY>cgOY(P5 z^lrGJwe{|nUfpkaSPhH%>*Uu5uQsQJ4lvBxb!ozt2GS*h10f$(lpj<7-(?=)9N>3s z!nFyRy^d{8`VzgM=v?L(*9F=S`FoD{iMAOVsNHZ~i(S1N+g^+3z5Q@{_o;>71xIX- zUSHlnIeB?efZdU#QBP0t^#{InHFdWQSo*02v$aiN=KDo`j@|HhxUm1{FCQX}7xcGS zyxFGtv<cdqI{HPYKQq7f#E;qaRH?)9<GCv9egseA72Fymm~m>hTPJCzk^08_Q~T+B zE{;7Ny!&IE&-F{29LA>Vwr;#n`Gl=fUfF;y!nBW5Zs(_Y%*Z!dXP@S_x}>c5!Qc~9 zOFUbz^Z)8_T-44-V7)Au*R<E?0cqdv+io|_Oe<SH`{SLNgRTcVOxiu{rAp)0LpqE# z_8$6qXvNsYLUYZIZC14LGo1Y^c1M@HO&7k}vG?w<l$)<-HJujb==5w*sjEkmp(5iB zUU`aZzON3-G%GpzGX3g?`xmb^b(~ldtGsRXP4||0dlNN9i!T?re*O^Q<hTDp(~fgA zQi9YD?ojL;es@F6{a+`2FSL2~bAslyW7%VkXX+32IOr2^x8mpMfg7#betNd1;iAX$ zuI~{$f4?!{ZE{SrQQMldpWDqntDA0{C2u6jH%EFteb;y8iIG3{t`}IIS7^7oD6r+# zq8k;7=l7i2xwpwQ!<8{d8hH+Ea8zPvE74sv?n$>#eYCc%KYt|oM0wQrz`c*JE6l&r zuuZoqZQXe}qn~=Vj!&MrXjjSZHY;Y#{q(HeMfVGyUk5e|>G9M>CBd%e=I+VjGm3tH zEt~r6#@9m~w+{Ym+En{9i$^t+8cWA{f7ZIA<zc$7qqeEa^-QghH__5F+m4OeyzJW4 z8Epf$mgw!e;W?W5t6k7_@wj3^&t?yIyX;%VYj$UI;|pJP);(KhK6ZtD)bU+|{g!2z zMz`D7%jfH*ml~cgW;zV+vSoUS_St;<@X)Fm9;zpX+#2Hg@~6XOhndTrX6AfTT^@5c z&veB8S>KGBU+tw9|4B#ftoBBuEmqPIs^O<%LWkVS>GdLbrs=)hpEEO*ZoOQ8+hadp z7=E`^o_73j<u1)bbzeB#-gIe5Y23bVJ5OgUU-Y0<_f^F4xLXAUw-;+J%FAilZ>hny z&@V<4TMY`i*s1df{SCsx>^aKMCw$nn;X>i*wfB`@ScY3pZ+vjD$zYAdo)N<mzs!C$ zcxi4I%g&!?D-XJ|qA+ff`3GIMiaB{I-WzUrP46`QfLW^-Dc?hTp7iQx@@wcK<-Vl_ z!m*CWPutHb-K6>C>@73NyIkF)OLPV{IxLkK3j81K^y*ZUXt!j`)t2QeThHCtB4^Vn zlPOPv1Fw8l8uQqq?b|5D9@mZ}JYTNWKP7uq|4v?S6a3?x70d1BSuAO^=4<X@t0%XT zer}SU^_i<vxw>VK@s@UNd;aiBQRpjp?lSW5sd;afY~0oDozl0tXM-<L|KD_T<VDSO z=X`Qvmdt48p0;Xl+U5SOo(;Q`Z=>8QGTHCVxdDw#LyT9gJNe;k_{3u;;vY9%dnj6< z;dFb?;E$^m?kIM@KIh!R4Xyiij&`3u&$U;&+2WRdGm017HnzLj`fJkk_kMnbZGRg1 ztRMQRv8d>Tj<jcEucKu_54-f+mGsG8dAsW3J0C7NZw&689`87n-^+RR^d*8J?MuE~ zpWiu4f5gnHvS0oi`2E{CZ2s~kBGTt{RCH-Gy8_SP&CbvESAN~0IzdV8R=ca4H}COQ zjC+?{(6cO~^|Jl`yPg~0&~-@EJ2u_CR5&=!_+s1QO;O(tN8LG_9~-OlJpWNhbocRj zmS4mgLmQnhR5}&9ZK3;#@t0;#@H)Tb$*QGe#=hbe-f|k%gf~m`*~Sq=%rwkT=IpG@ z5H`QCa{L~>GkXmp26jG>ul4Jj+WZCsAKW<mBg@lm*aZDK#k~!e_gLXE@^#tW`@W8; zjaSZjROB*jYWuYIx;r|3_YRqRuJ1(imfE{!ic;*`9K82)c%yQ)s)t)o4#?@&vwU*E z{JuWI#jn<O8QSa3j>k`3KI%Hm+wy$T=)XF2vy^15sOlJKbk#mF#UMP_BI=jjg2U@8 zjyG(h8T@$4u?yQ@drA`b&AaQncVW!x_ah#@oA@Zf*|gdA1!G^TkF>JfVtpW6;pYp% z!OKOH3|k-Czgui(t8-LZc~56o>&a)|+`gGN-fvt;+w?1gjiyITpWN^Cw^!!^+U6<d zEqnG!yXVm^M;zK8ZTUQ2y5UxgNJZ6p{nogHZ5wVbdGFCIq*cjjr#oKzbNasx{<Lb4 z|F;<*W^VHt{l(=Xx<6yRr1QtrULq%x&OZ+&m8^?*yt6TUjq>+(9plBO%Qo7)dDqxL zug?kB8!vX<@^=lImGRwgUh=4<>)xL~^}IURNKa|nV&5<AT$^^EWj8b5s`x?d((8|- zY!s3=t&MJG+;v0m(zvD<^sgOTc74hog&_|FqgMwF7%}xsvTE<J4~s8dHomIbB5lRj z!|tb@o7xO0*!QeqwAM|v$Zd@OX62Y;{LFQ$%W_+nHg0@5_x{nN{E_<NNz2!dOqip( zzyt?C?>^hW`fY>4)dfE%`M$Kj;T*dw{#}QOS07u1gf|?w<5c3)_JZw4iaHo{Z!+g< zU}4+CU&l<1%3jiC^v>C<=Y4j!s|r1}+_GyU_qCC~Ui#Q}_IBB{_syG%@%|5g2tPK; z%o#JelUZd>*t6u8J(`)GYw>9I!MP(WMb{QC8FqV~?VQ=38Q)tSb87FV`F=(7%HF;E z*gkiBvfr(2+N6pHGXx{m?q`3R(9d#6R+E={dW&=qUAXeNN&fJm`y*5qHyWWhKYP}g zo=TM$Pp&gxt|nb|yyCjo8oNX%_1ycf1|<d-_76O((I87SKE1=GZKs}7|IgKo-)!DE zWBD5WQro6&eap9Wa2)8=`C8EY?pscW_Iuy)X3zAc{l-LZtn^n^kcMZTJtnv-7&XzQ z|Ep-PuUA$-4xO+(>-fugZXf(+*bjb`pSD=Ex1@7<mX?z1`h@jaH*Eb|6?%S3I+=5_ z)44+7!LL&?wSKCrEPh$+VESfR+UhQcubSCi5apS*8Ok#r^lL$@PhGn<o4#gqgUIbo zRP{CfiXZsHcaq8Q8zGk?LLv`dF7<1rHgafq&T;qTj5zNvT?TFn+FFvMqct)2hs)KY zsVm&3T8{iOXGCS#!o3$Ct~wRjxoqyLsYQP|8&o!Ju=QhbcDt)5C5ukwo^SMc<oANx z?MElXY4*50Wzy6B!AovFmpqFLAJAjh<KZ2fdAL3>{nlsu)Ye^}$7kCtzV2W#v1po6 z1J8zr+om1Rd1%sBtCP+`Wsg1kU)?rVe7b)6+V8z{Ej~Lp*?4*7UF|b($Ig4G?AqmS zOv$w^PTIlSefCxC+ceYX@C~Dr)8d}T&r)!Iz36r9=oM>jzv#O!bwID<R+mPZ8;l=3 z`ry~FibVII`GrpsLP{fI8uv?97?y9ZZryrY(=#0mTkM{l)~;D!6Dz^%e23W&8g%<L zx&5`UHyIBMdn+9OHgVsDq(_eBrY+`H^*_@1MgD?K@2;BnT|4v1_oW{agbP}DT~Cp` z8*}hn`?B3f=Z)Jo<j{#szW%4z&1QKP3#=dfHQP|~;n#uUE5jxYI&FRb+><k9lWy(y zS-P>Jz-Z_ER)_LJEY*%Qo--q9pRZl&@mD+3%<r{aIeJRBap&I8ntaq-H+0DObz=VG z4g+VlTs7>&VHJ-SbN6^jEJiq*K78M?|AZLx{61&eTz%_h5N7tHx#4_4!*54#t0d_~ z-Q3pe{=J9Ay-swx_W8WWxXIt1?>+xxv95o~#I{9m8zif*=^;J5tm6&-+y~E=%-r5n z|A1yv&Ynr_I&NLkeCCE{yEA>`_&PllX3QS_dcX6aHI4U1INeeW@QGPDVUvgT*c&g- z@>VvD)p(otPFp!;c*~eJw@hxo46t7B_Ony(Xm1ziJE<A1F1aiT4_`k&c|^16Uw)<+ z40N>IWwLg6_T`<*>kg{Fx&EnY=>6%#P9KX58+%dzO4XOcntgs<*m381o7I8u2RZdr zJh|<oT5dyelabx->9t=u*mUlgr<Q+JB-%GUchNaGs*R6j`ZCWE-8Dz<ZFF?VsVrMj zka&8a&V?4^fBdyQHg#0<&6$cDcHG_6eZR|+IfuOLl#<_n+ci6VfYsvYfEDSgXQE4e zZB9LNTsExY+LpJUw(x>3F{kdHjSuYoD8uj9ozgFbPE&#-ZPibFXkYxf2Y=b13$1Mz zPSN$Wf7U2Rb%;{ox+BuGAsUG#AD0WYju}TKgzV`5c!S=EEaen8w@x;_?H5u1U&~}) znyhN#{!Dej{7S9fDZyGrUNhAtXY|xhYvi?jakG6(U&C_-$I=EHgpcyo-8tmCuJS6o zCUbPRH{lnyw=I~IXWMw!VEeV*&)IkSblv6ScVCwgowhsPI%wxOW!zee(K{K7@}Or{ z6W>m@x^5KQR;PVs+aoXajBQ0TjaO|N95}AxT;Rjb?S*~?dBTeL?c#2e?ZnG=Uysmp z@r~H~`B}nZi^&OHO4d%<7sgC6*r*p@m@_jzHZVB)$D7LN!Tia7m#07T3-xCFOV6(L zch{UbY9>!_l-1?R;p;tuhpR0(=Vdl{uvhZRJl}>o?R~f0v-1qRvEB1rYhSls3$MGq z9r^ae8uQo_4lfp-&X3SK-DFGiOLuq7zBE}7e(}@$@{2=_hZc^nJX3hX-sJJ9{DQ~d zr0eh6P3d+web19?&AYi>JN)6y=XQgJexAFcpj@%JNqN?TZa>A>*Z(}z%B|9S@srAD zV`58B&UjlY>aF#@<kZ6Vebr`{%vNY#Vtl2%c)M$OvC7<n%(81HnQjJWwp>{>bW8AP zx14qUPjWQVyJe*`Tc2fpU#mb%abdy6tFedLyS+U$bzXU1kD1|l@A}N%BRJiB&qeix zY5lirrBw#JT^9E)cG;6Q;Tig7<r%x4H&1s7pPjyRv&o#i#DY17dxy?j=zM0L)5j+% zRe!mq402dM?f(B^?@hpJ`r1C=U5ZpHGDL%eMvaIDp_yn>Q7EU=Ij8Y-&gl#dgiyxJ z8OuBrLdcXTb0K5Okc1>d6fz|G?zQ&bmGC_O?|a_&`mXD}o*n16*IM@)_geS5*S)4w zcJqbOhbbd{ol{nA&77yx-DY0yW5&{$^0P}HX$GbGt-X-?bBbU~Wm3ZyiF=QY-_NIR zoT##PWtyDQ%F#u&>kqreuGd`Bkeg&K$Q}GV_2};C9!FKTE9G6?zcw#8EcVR%SG8wc z^qdcIn;#y!X<?IPe>OAgwdm}2^LWPgl-vt@6m5g{Z2O%0(qmtbmoGybs={jpRp)hM zUv)97eRbfO(z}7YweJ>fvuVys$ZVE9=KOux$cNtzzFhe5>_^ataYN2FUMXNSMvF7c zK9|{)1@?P*`)-=^?Sz1!`}sZ>?)Topc(PIL?2};?JxbK&Q%g=23$BhB+i*2wY3+q6 zb7L=j^jLc_@q*IDGF7XwduHq%t9*RC{e0_Ed!tV!Zf}43y15R`@hr|a^JEL>1v>53 z3Vg+FnHKtnHSM}VyiZrt8lRm{<tLd4(kCt0sTHf;d0y;^tE?Dn=a!h&i)!Ni=f%hW z@Jyd0DwLmdUwLoz*LzmcK7C6gAFm%D88^+BePm(@TVt~sXT5q3XVepx-mUp9dNaJV z1~#6XH;~ya-FQ<Md1H%fH6{fv@g}_%myRCpJ%04ojJ-B$J*;f<D$OkBl;&96)9|&L zx~{~ke(Lz1Q7xrC&keIu_dK&#y*ehR-+-BB{g$36(U!CE)jrtRqPpCKrK(#sPhme_ zOF?Fbyv)U{bQwM;zVp-9HJvBuqxO#;0W-7HS$DIe)3(`X`Hx8cWZCj^|En4M#txgl zq9yslt(k!`hRwDseinuQyy<_p(QieU?csjWr)JoN=IgX{UAX;A*XO62Ph4B!JW6Z2 z|MbIpoqU|W`BnW;%9-?R6|u}YCe`-Lh)%;4m}68NZ)ZIeHO8~FWA}4LKf2}?I&7l# z_Vii$HfcT!>W<y$aasILu)hD|SsP26Za*4gJHtBvV1CGi&b`7k56GHvj}6$+-1&{) z%@3YerpZszc<NyO(DdLCecL7WDN7Eli0o-2nGxMNRPF3+O}(q~_XAg!XeIBMc<jaN z2Js_#AEmt)9mncE`}XWEU*2%0OH<%h`{S#xhwczMT7Cbq=G^uhDt*Tl^Yj%BBu7>E z44d0^qxI(e?W<n}eVY3+=|+j?^zkLd!<vTfZQOZ+Em<3_@>%=4$CXdQ_@IPK>y*>Y zpG?29_+-JMTh@(cAA5W)T(R}@$^yFySIoZb_|e%lE-S`+yVLmaiy6yLDDJa3KmX%x zwXQuA-&#D{H|gWJ=h+K#8$Z;4J~VH5l47rE{qGry-cLFbuzh3abG4ryDqm+L{#dww zma2&7<9XC?)2{U1U%OT)jmp%UT9$XF_XK4JPPdplZf{jv3<sUBIj*#;x6k5~#jmn6 zvve{?EFX9A<E6~UickBn>u0|Cc<Nql^0d{fru@u1F(|;~ah%8Yg%{pjH9pR{ai3Ew z3ENQKWH8@pv-Z%HYvXn82Tjx(wEBU2PtV%W{y!GH=~KUJTy}ASa(>^+XK(iC?=Q$) zeLdjH$j3b|WcOK<#hKA(R+SASBYksz+NL#OA1~S#3@$rU`iblFW~|^!ZMtn^?YBX% zrX9M}!%1z5lUCuTWA+DoMIWktTA%k}iD}=3XZ=sPU+%Q;s6$SV-T8a#Ds=jE*Q{SS z{ZYXjmsqzSA-PreU5n)J-cIU2rf$HTgmv%jH@w?A%wKPL&^wp!-BRB@m34D<ao7=b z@5jNq?2!un@^*B6*CS4U@0m`lyg4@l*WL-deCpghk$%n20X+=A-e)atUeJAA?&mwh zChT_0i<rALu&3;JIlV(B;cs3%tz*hZb;)&n=BsXepXfFC_~?BTs|Dki9ZRa2<3H_K zq)tS}JvGbMCEDe;zVDp$<@E8R?-=2>Sz5=s?NE87;{RQz`u?sAmO`NR)U+k$p?fXe ztW4|GyMA6Xele?Z+o-ORr@@+%mp+s4KCASf>Nmr5oyNSPe$RS|hG^O)TTUFXtSsl= z&n>+@@5CI9I+C;7O$^H@l&)^nH#u8<a%}bB9(SjBO&sMJJuCI6`}>&SO089kUM}@I z-AjDKyn1-2wR^@6De|gF81vMSFK`bT-rdI>hCr&RQGENA+cXasx;&N$#^wa%z2Bs1 zQFXND^Eo3&)rIA2{dvWv-i%UXO~)C14_%pao>e<4wmvv9(e(22n28F8-P7`q=%^er zK6HPz%;1J`Nv=DrU0n{opa1n;??&CD8x1pVm5iS7cC_)pjOWYds6R-$R^xV6&3*QK z>AtXpK6SmzHPp3T;*T6I-NPLA+1+FE9520%hX?g&NNk8WA*&RTyWqg*FS(o%>M81Z z54w04e==(-6x)wk=WFVm8uZcHYGi`ZaD$uv;Tbd5Cv;6@ub+8)cBh>qm5VCw6nZUN z6n$@B!mgOfPO?k7&FD8OzJ7%C_3e*i@0^%2*yE0eQnTFjrFsh(M;2HQDV>+H<!9DX zd1J0wT(a!-NXdEA;v$ndw=O)DzUVb=XhDW+?6fJ;xl7sGA0HX?<Z`uL=SpQkgY@}k z{t{N%Qn%Fe#fzV8+TC<=l-a-$RjOyoLlzHMls!P@_?(s{Z&x>6dcX)+>{D49Tlb`3 z;OSknhs;aT7$9HkJuhW|P#QjQ@Y&b1%VW#5<+@eQwRk)x?BMvrmFp|#8Yg|*xciRo z<J>~8&rMPT_SbjK=jB(YE}7?i`oPrTD=cqBD(w3n^k_<)n&qWkP4{1p9KEn%nzmTQ zA+2zWT%IteTb{V3@AXHe!&oO*x<=iPoa3dm>r|?EzWfXKYc<BgQEt~-u8(!n^{zjB zVW&p5!Mpse!u2mZrFOYI-0JJeT`Nv4(M*26$k~pW`0zk#`SUSPEk5a-G?YtI7(abV zdECzR$2$vO-zvLy;ZcENS;1mnA@`#$cWLa>sMN*F2UZ)Igb%&6ZB>fp)C-fp&z$-? z>PKPiiAk?2)A~ipl(ZZ?eC<ql!Q9*0$0P6A-Lc+3;K6Xw7@j0$b=}pqX(=k^_vWn~ zx_+diOz6IManWIq9d2DZe2f2Jm4MygX?4ls%_ZF}-B(^-p800|#+<LKpX;pJ|M@+0 zRgh1C#(A}qwz-DAm<PQ2+bD8IzP;`fv6?@%%3x==8>?>hzLB>6gORNbk8}6yf#o&} z8RLUnlxw12Z?p(p>FdDvyQlMhSN5JuXAb3NUHd3=xzqDanla7;x0sKvo2;XmvU!#0 z!i&MxAtT}yB6hf(zBK%@^Vsf={SL79{}^Rz>Y^CZ>#+KkzIk=U&DTynh<!2ss^h}- zk$t|}vGh;5Z;#JR582Qd(!)I<u{dT$(WF6L9$me8p{7Q6&(SSAW@J2^@U7=&tr15e z=5+ozqS0N$Ol^wa@Cn3-&DI0E-|VwB*4Tg6@#}l<25ex@n;2%l_rnwA$DcVrUsUjv z61P~a3Ebn&8DuleRBh37?~2L|7b6Gm8Z$p3eTKerpXpmvEp#->D~6oQ8n(^vn?QcF z*9JE;U&BaNZTgpxS?SS*NjEn=>XIqD)!@4BdZV5rf(ye+>gM}S85714Z$D6lJAe4; z)^ICub;2vZe$Td_(*1a6Zy)uT7sr>3{Bii9M@5L+taJIso0IA-($?;`e#>>uI~2aB z*v_W(f~(!j{cHQY8#Pev^;DM$iO$!Yf4Es5yn3VlRQ<{74*@6o)=iGNe=SwQyge;R zVZBbb<0<ZI5=2=Gm&H4T>@~Cf$uYU%Z){#M)0uH$&v&t>>;XyXu_f!iE?T$vf%DX> zkqvVm&si4w@$68Wr=rB;hfF5iI>r@T8Ncb=n{nY=`t(*fDXV4FeAarXYp8{8H$G?E zm=&C1I{oMNK9;+2^!uBSldlJrY|#H$?X9ZWu(o{A$@8%vcF()$6C`Oa4p=dKo=i;& zLpkHn<mo5BEV5rUeSz%Y<*c3yJifflkem8yX}-Z}m03s6m`XM0Jv`eumyvv}a@-ZI zdsgf7M*j3LFtiPg4C7heNSrBm!EN%@ecrK+>b))=b&_M%gw%^{;=l3w9LcV-Uv=PW z<?^+b^UGIVF`r(%r)l)1TXyCqYC?JI$0aSod0pS~e7??#;iO&<W_CSb`ngfha$LWP zBKtWy#V&7@)9!TsvibQyivyi69p3)#ZdF5Cen8{5!c{#7j$kW%8sac6$7@O8G2hzI zC5PV$Pwud`)hOzsd+4y+lY@?S4|dpZoNqVs-ZHtxyYp(lt~uN_URpC_?u-{YQF5KG zD>-PWZz^R@7iGCyGY{ko-*=xo{NSvrXY0P+8+b-p-cRd-p}Bj-l*`<~iHlRJf_v=Y z*(XaZ?2?|O^js6>@L<HGv#++STa~iQ#Q)5bPuBal+!?m&j(qJG<^2yYUFyBDN8W&^ z)<rMfJPiXBcziQyTywgoH~YzMWB#*DImz|X8~4LAz9o#3TRpzJ^uT9@1#7a?CT;)z zZSu|0b@`2NUmFZw6t+;|spgm1emeZXv3IU{-FR#mJbsX-df={emO&PgpI@6b6?>n{ z+ZXSzzPbBO^<>Arafh`)>c4es%L^rkBcnp6?$+p>6T<p5$lZNg!t?+}m7?&#qUCZ& zHa^$QOK*89r+)vziAeJ^>mr9N+g1PlS&_!Zb9-fs&rW3t*PiV$b=uoE6T*)^U(#jv z>K%4e_fKd%3JtL~?B=Ih)Mw%H9V7P2+FW_0_2|hDr)m5~eZAASJ@=j6n;YnpGUe1w zciFOtc)xM|)m9!#GWjrfkdZU*vRaGqqqMA-xpOUh-pb&uJx{$@?Q{J@&5yds%v&Wr z*^i%wO(s??=x2FM=n^BbSgAU0R_=Dg9j|J%9?ja^J$CRmMp>$dN42k;<lI)fET0FW z#n<_7KNW6Ef4ofD<zdtGmWHGsYZrtY^xnPxT{XM%fN8*{{+46hOD`(hl__-I&}W{- z5QF<u75PydeapQC`PJSVjV~oUH`{!wq;Z>H+0~fuE5A-mUJ{_t<KlbAM!~yzMmEom z>J3Z$UKs5>qpxC?^@53RON%GlItb*aZ#a20^_t&T--z({I!DfA_fR@_PS^cGQvasz zn~x8-C{=hB98VM|8Odwp=&ru}Y}U@(soJjcyBrQO0v$>dJ$juQX1Xinb8Kzy`5#~F zS8UB6`DM<x-iie|BRU`2oh+VQJ)l>m{m^gGeNC!Ww6-NBdn_>>swZ(eAG5IHSwfAi zvEIs?YcjlZ)b4K{?NoTa_*-g4;Olo^tu}AFeZcleq^IQ5<+Bs~D=vC<z2=p!<auUK zQ1pV#p_^t`v4Z2`_zGRR4v*t>UV3)f(4rY@%gRkYJ<5nsm0cuSAil-<d8f3hWmB@a zZs(B6((MVewZzdEmB*Rhzvtzrc2lMMJ>9Je^NzoCIdAo@&eE>p<ju>Y5ASgq{&xAU z^;<JePb{22Y3v1+^C#2NTu11i95+8=<B6IXgNH<p8Z-N$)6WkkGDBA=_UA3VG)XUG zX46BNpIL^{`}d5C@6((aV3c|AhgJQ)z2CmuC0Fh`leb7`iOP#9ITHdLHR6=J^`6I> zc|CXh5KVEe!Ss@yT;<?P=WEAq*dVj2*OnQhW3JnO%+Ike>{aC<%X@q0RCDgs?%G{B z!>+jbSCvfqJl;&Z!Ki^ZaB=v6=KBLIa_Z+w-(2&)*Lg<Xq8i(z#c7><D@t0vMdodJ z-Mvu7XoTU*PA=uz0e81VTc3RRcF_vup10p7>g684U%viUOeyE}_=B^PL$CBwZPs;3 zh#GOpmmidEI@pgl%Wr3qjKS{w0hb>Rm}GIh<f72KU)HFx+8dr3eQZu&4)e3Gj}@+) z)9_>0n!vQVyd;eteZwMBH+wrL1uq}fDeI0)=!=6xRHn%G{gg45+jqRU*|B%xhMuQ) zZo4H|d;N%L>qfckyY&V0y)Lc0XS*wF<!A<jDKlj^>s{gz{n~EToyPhsx}Q4W)ra*e z{kC(ZKcCjT$u^$8=%d<*6Qg}jR7`rHxcv31hN=L=hj-#@HFCX1?zg%TGRV#G>ddG> z&8ylIy$@N+MubcZ+&(9KrP@;Y2}e|x>2J#U>grT^JNNmN?5ZWtYJ(a5SJurOn)_kO z=Zj6@P7|H)me_pRxTag#KHcc!-)#rI^|N<4t@wJ*Y}38HviqiQD#~14yS;Y8<*Ai3 z^Sca)Ygw*6DoOEH=1wh6X{q*wGS_|Y-W#b6?cC3Ledk$QyCiE?_Rx=6{n>A<d(=(6 zloxtCnx1~Tzo=*9&+rY}sb-IhujEggmNUqtZ@SsQBh{8;!e?*DTHM#>#>s(-w}U<6 zv%4!>n9H%GLFc@8Os*Rq;+b=-XGK|&pdk3wg56^>iem$B`Gh8HySOklwXpxnPj}OP zM8vM$-?fr!I=uR=V-Vf{UyF`hHU6p6n01dz2d+KeXLiu>vqzr{(sD34vDamAih1+P zhZjnNIl33}c(cL^##+ydk?HffanG<bkA*?Hb9OI`?5%BeY`AmkqzU^McYYnVvS-re z=j*pENH=b{@`*Kxk+sft-<JCR=XUN5(_%OkJ}aorjv0SvNvGWMyr+5NevVhY7n6GH zLGFIJ*Cw|7$f!jc`(OIYeIF2^p0;kXdbiS>45Oft2^m|=jC&Kyu9!P?v+}=Ib>OVh zTf6YERWEsS-!Cf2ZeI4|n#%m7hVjEZdzx-NXKOI!$@J@KKPTvW8tsqFbV{0_d-Cbh z?8L`EHYttH${E#rW&I|J+iBT`$1_Kl+g`S|&FOw<^P`e3VVw_YIexwEnjLj%nL^)( z36CT1%=}UC>SFIMCtshK{_OjN!=0yu&-$5{RKY76n9-PGazMXd)>a3fmxb<2pUoa0 zofrGTdQ{M4lNiknd*kwh)QNs8nDhGa4_=FVaii!vch$Oc(`NO#pKvQEd&t`$8`1j- zcIRS^gvE796ZgMluR4F{@ylL*`9~_(^j<RO(r4d$*<nk=hjnVK>7HF9yTh$#omqo- ze)n}&4FcOyWl0rfP2TI=GkhlB^IRA-;PVA7OP7<x`f<VweWrZ7)->?x;?F*p^S%%7 zljzNw8MBVLV8r1_XA{QL;jdQ5%D?njx@)I+mhH3CUlI>WQ)7(EHrG|BE%cNG51Kym zM7nB0H@BNHlUG{YP@Zm+T)%UfJj*icM)#~E0p&|HyARF%xF)Y^$+Wu#*T-Dj7jXNQ zlIEv9g~jPZ2;IGEvu8N>(Bg*7>iI_HW1p$&iJi2+PttvPzk5XL;av0kDVgPC`0Iyl z^t^iJpl+=7o`9(ymfmCL?^<JJu)AO7qVyYcH}<~tBkiI5+m8!ZXl;EVsjH5ek(V7% zkhHwYVp?X{vk?})@8eJF8%>*fe)q<lzA1y%)XX=@-S8YcV=i~5YJ+;i#i4lt>N)o_ z4(_Z!rLkh~=2Mg8`x_4pslR*C>nEez7}w~ym5=S*KN)wad1z-PRu4^eOz;}-c3-LH z?y!Rnu_<Q)g5NF+vhut&v|$f#vcj0#dR=_4a*ut=_gdH2n7?GNVuRjg1<|#Tu=L#2 zR|%hP^u8V1`E|)4`I)D`xGP=O3Mi3xUSs$$Xw5dky$8j&L!NDqA7mnU_QdFVk0YVW zUsXwFWXqg7(73&z&*haL-G6S`yKsc!LV1^rkk^6fCDoog#{@nz>~Y2C*{30kP6g;s zd7NUg)$2e~WUyyYW93U0i>dx*LbSExES>uc1~iLy#tx8ti8A}T?fmD0O)>>X9Eyvd znG`5Thum9a<Zy4Z!Tib-0ruyk?&#Git})GO+HkRE_>J`5Zew3IN#s8#-tX4AVZz0@ z_um5FXgpW#mlEDvQN6+^d*q{HBh}3t&rKXZzesLJZJ}#^EZ?gCpaUkRb}Q@{vo%+5 zd7jN+9gF>{y4S;adfzTb9uM-|y7*4yefNk%X~$Pogti<{UG&BIurj|ndVbOH9L)_~ zujNR8K91??H2C!Rol!UQGP-6eg?HX<TvnTHIeXot!#lS38FB3U7)GtVR@YjOxofQ0 z#JE9>H?p(lDaH8=X#TWF*ZS&4_jTO!mxeW7Y%(m<t})M5UGF&7zl-kDoiz$;AIq%_ z3||@|xBf*sd-$k|6XPx~9Qf1wxW24Gs`iSB+i!iDTd>X9?wZ4y;M}c!mq<T^IAvAd zdD)+BxMSzj;rr6AEWKU%=;xK6lh0Y)tKJ!7|3XK-cz3wgV2R&{^PBo=kD9mLufgDs z&|m${u_lcbmtSqOTvs#2dH?(&v*YSQYabRr|0J1mc+iQRqO)s#md}>aKF2!ldGi=A z&fD1ErT$`c@Se(sSPxY~(JD=CmdNLLZt{d-tYI4;RrZp(6REsng4qgb`fU|weP^99 zGae4<yRzv()s)M^J3m(1?K@fGwX=EQRi@6*58<ooruAxREE#U#TYI(QRqD_2HVH|i zo*nfWDc-L*dNOC@<i57{PlBdhtjb)xwb`Mc>ar4zd)njmW~V-U$2gs^;n>rhIgwLE znYI2Z*N-LeYX;eJf?76kALw7#NcpfOZFSvtQO}cewjRjob3HRGaQ@qwGxm%yJ+tVp z;>^Q^_cc%R&-H#ZadlJ9#Efm+XC4o-)%;6T#a*m3&A;cnY+p7~{j0@obBikvHLgU5 zt(vd#O>V&+#qT|QG*nNOE4{7GJZmSX6}muL$Fv&haLfAg&_v@IOV0TC$8To1WSH(K zQ7%m%>u+Ba=sJ95yyEBB_Zxes+MG+*ntk`n!p=FHloshv%soBx3#%_LryyD1;mN+E zYj$3$GdVW$n<%TvA*kh*I7Z8KiN)G@o0WZflrJ#8R&!LzSL=<#)NCJu7yWv2_LNV< zZtq!WH%h&i^}Fvw<}JTILidnGU~T^e<>o8PyR%R0?ef{DmDqK#z-7u>hpQ(x=7hSG zF3EhX(6rF=&G$W5f2t0cHQD0YHi1iFY((@dor+D*BMtYcNjz@Jvic~pMsJ-ebFc86 zvY(1tRb@*?-s>|rFPuN?U!G)=GO&2Ae%bnm!sI5qy3;=<*m4^><#coU`ogu&ab~bl z#G%ZZS<&?mK0bOEG-uD|qamwQC)<BF?snVLWW0=pYnZ)UiNk1zvBKjGQKI=v2KO^K zy(A&OJY!cHXD|1MR>KW>vvK_MIZ4UoyCyvBHM8!W>$`oo2kTGsZXB|>B&u$NEoWx< zWxot7)78swG`{kmI=0hA^PgpkZ@=YeDnDC)Ztj`^6YE}O(fxnLc9VhAm%P(H=`_VT zzvttToM+O|XIqBp3l|<)6mOIlm#x2LpZ-q2$fApHu3sD#=H8Qg<=v(eizim8aJx77 zNZ#8WavOH&{-zHB9(I#FT=$kHzg3G<zdp8j<>K;<i)Y^0;BNT+ba?|K#qjF#$HN8c zJ3mG3Xo-69<E%@ki_3}?E!;Qms6DhksdC~E`_08tow~RIPY=H_PU{poJULeW@Sr6l zU+GOXc)ypUPj&yq#L!M`-!rGQcX!*KVIAPSGa-A~+0Z-t&T4ypy`Sv9al^UyrA-en z$EDm`qTbU!=4WYF<1r(2zN-~S7mxRD<_>y)I6PFe<9*48C7l;(uIUxLVRGg!o06@| zQ--?SW}3^K+4-*e$E=v8zGs`}+OBzH_wc<wqxa!s$1OiNo@bZ)8+Iw(!uD$Hy5m)G zq(Yry$k?87$M>Eyv@2<fyfY!!v(mTNUFG!#UeW`DiB$JbTs9MLbFQ}6-f_M2<vlaR zBWFhsuKrrTS^uWI`?{(9y-U@11?S4GJzE^-+Urh@(z5saU)t24oXh@DFz=_%z!9%^ zvIpI<OU#!$+vQy5)GY(8HarSm8ef?8AjMqC2v*zZj{LY!es|-QtNK*;PfU0f_|)xg zA8lr8y6Rod7Wet4Crp>U6Ha`2^ZW#X&?#;t|6Rn8rUQ|y)O9v=OV-%oab#$ougd2F zBk!>lhgV+O*w_&D^L;<#<fm!bYSIHc?u{CrG}PF@y=Y2#qhIl<Iqy<pAH=R7w(4Vy z_RPi2Wjv1q`TqNN1kUp=o<6X|{`}$CUM(u`b;dEHWHuGcB)62=J|6jE&_k)U!Knu; zU98_I$#CQI!$Mgvv`W<;_PV&-@SVcp&sLk{E`Q|t&Yu{+sqtRUheKk0HO-=3>%%wa zXRh8DKf<}-nQ3COR_Txj#8Iyug;MSB(`~jFSn3qW4X+54sV!KeA2s=iR%%d<{o<mz zw%*NGL&_Zm2F@G1rFt~h#BQ)2a8P}YLR9lZzoLexBRv>1<6j?&y63IgYu)@UtKx01 z<mPQJ)Ldt<bMnnA15<X(%=z;CMWX5it*F(yN;N0Xdz=`iV03b*iTB;xv%IEe_m5Q- zPK=h*Z;1M?`Ej-2g7Q6syeBzB+@2LV1?(%kTYmf9)^7_wEejg+acPD_zdo;z@9-ZL zZS58uP%!L?#`OI^qc*;uZJF38NlBjG+egJeeO&Ck6SGaacHa|KGf8Run-yz4Y(E)< zACsTyZMLdvf%%aSG9N6i?~1Y3(pq=t)cG}g%x%Yu*H)F7oHID9VbvdWaT`NUX^#5p zbSJs<H2$=HkDu;%*yNbt?5|pIV{6L#%BltLdW_zCEGaVlV{Apm-H=by?(LgZ-(WZU zSW%|U(5)7kugYd_&^g05?Q*UEjHpMcM=k3vJR1A$j?KgR^0P^O#x3{VuGBw!cyGBg z`GOx0d_QekzUgRSW6VCa;DuwY{Fk3=+-#G+Zf5q@Ibz)wiOSVkPwiy~-5XdtDC%H; ztDF~|PgNLrzUf}9`*L!z?~0H4j`BrKQeU5y!P6XjhCj3Y+~eDU2b)HxKeWg+uv$1Z z;m~B4Q*zrE+@CMe9QHbY^%>_)bCcP1o?782pQtRWAN*6ha@5MRWuKWtn%VulugO}j z>7>wdKw7!BV8CRlQ+Qt7)v>v6gr1>gHQd*%Exw|o1C|QQ`p6#a(QU6;;J5x)>*H@; zKY!Jx_e8(C)#G$^O~>RV?>w^Dvti4+JwL3&eAgyEFYLd^PwMvg<}(MAyzrot3d)bt zhuO^St813h{ju(g?-R#`t#~JEGj&#^jXcr6WXO}XK2<-qcVGGOgmR>MzErf}_PbAv zO?&U|`<OHP#=<klqqPSYk1b>MaTp_U+IvmD(Q?YV$D8UN_-V{6@|V<y+s*D8dGeHO z?q(lj%c1`LZ_P3AD9h&9C|4>87U?h1)i`~9ZjV)+q!;^D?R3|y3*Yv2?WKb$m7a-H zPQ9#GweVduR8u4UbxEDy8Y|mN!Gr28VjKeveyWwdy~UrMAo+Ri>#@BnpI?aEq}wy) z$Qied3++GGmom%uZO`zp_$(9ToIhf)NtaQB8YHim5_;^t{Ueq~NS)?dnRxjWO<_Gb zbKb}Gz?Ky*iW53*Qa*A(GikTZ&v#d@ebZTIJ<#jTtIV``y)s?5-q&MN(iKy<maD%< zE#QZ#`3SiI+m2ke6Pmq0erC_k?^|*M#=M;&Tb}>=&g}-BsvQp%+@F=7cUb;)@@3_# zVx!t6pN!5~A6j%VYu`?Dy=#$&Z;TtI)v3?5&2M|3_@LA2;8fXEVy4#<)mX-zC2M_7 z*L8BW=($G{=5Eg6Zy6V{#$Z{x{LEwJUJJILn$|GwsxhbZ$tgkj+b@^C{&@LfLBsQH zW7KE)sBRl0qdsKN{ikQ9JrDIi=Xxi7q2qYz*6Vf)@=bPW9UQUhZSb;1qwDkMG{^~l z1oRl_UVi_^?HTS_JFZ^e-#e|Wzj3;nqV4fTYcDO3)Oy{DRQkLv%Z>fPa<0aiz=7ww zF*z?fCC#rhmwBh8Q_b4o_FQOfZC#ifb*8CI&t$ha#lV9psh4ABCR{ljkybPKT>6xG zUgO4getzjL&*z(Y@TxGo?c4O0yC1bV?sdk2XLIk7VPg8`l3rg@cC22oIO|T|{=GEI zCw2Wf(qU`k)gf<|2JMu&{I+g<=A0i(4IBCnkWX62zvp!+$!JGW^+=!XA%PbM&rp!e zx*)8P)Yn8r>^Y(EZmhuiSeNv{IVE%TZohcv?&Eu;Q#a@HeB}p$I{1cITZW&zFI>SK z77cfN!;QlcVSM}sx8Dxb6Ax^=s~2B2g>F<H3OA+BkSY=`z0`Z@n_Kqjs%|*AuPot# zj&^_p9%Q@7sxn3rUAm0&=+cESRnpY#;sH;eF7j60x_0e8uxnSKfd`Sw!ha>D?z(-Q ziLR6#q9q@lF*c55nAn3V5OnE+{B=V=_#_D&Bm6)_1U}ZG?T5*X;)?iO9+Qo3ZDodW zC6WlZJ^FWRxE{XEmLd~`a##s%w%8|*tL;WAI_h?4E))sa=3*`{)I5}9uE!yah#tL^ zHFY!wsOolgAhZVbFt*Si-p96E9<ecF&5lD`kE}bkvV=H#DyQIl{-xtZ2bjcY2kWup zo&85n9d1hW?yJ^+pq8GxVYdkC8f>_;S0sqwFhe<F7N04I<8wt!NrDhA4M*21!)J*n zr2L2&DHkXRbO)&$UnrH3cZy=OmsHYb;RC@5q8-?5xDXl<6W%Nlo6AG5&Rp{Dc!7uz z$M7N~T*AS}kKxL}WX>!JD@??S_JwlgO5CD_JYs5u&_M)O(#LY)R#$XKJVOu?3D;_) z>LR7?BSr}&#<OBse{*BOWyWxcEZma|_r}6k6LayU!3<7oVi<ZH6S%!wNM6`Y-Y$zu zfx`$%fa^OM7V&z#c)07CEs?_Yvk8pf+=&1AIHY_W%m2raDc}Dx&G>7>MdX6$2sV=q z7rnw=*G#y>JRDt2%@j*S0(3nuzQ7p@n=T&6eq1<wN?ej~fiD5Ke24J3rewjwcL_y~ zzmzSQB)DkdHga&p-+_(?m!(J?9w9{Ew1Zz;Mkoiq4dNBQ6}br`jwSxLAMHPgv9$>Q zm*e?oIist?fB4a;Vn-J*qj*IOXmW6Se26s642MkMUt0#;SS+NzJ-WXdiV<$rCgNBk zU=Mt_bDkFw!fx*m7`+5FFS=1;#=$UvyX8$7oCq;1gomn1$0v!ju%mp9Cd@h9SaZIV z$0N+e31ah5G2(g=d}%ys>EP+?<K^beboTURdU!dz5M!OZJSQ`~9sI@-V_p2f;L3D% z^C8B0`T9AyyE*u}_!8q?m>v%9?p{tz<V?6bGabAp`7zzy9DN*orjUMVOK%?+cP|HL zOf<>a%@4gGUv6WWu5Ruw$ms0iIB9I_3loC!o`mU~r+7MexH&QXT^uGL_l^;G`M8aB z^K6Z;T~yvaUUcZb<GlQt4wC{VqYx(fx-e04n2wMW0>X15W2f`u<?ZL@<w?1W^YWVD z%XIN{a75|!a&mBY_VRFW^F;5seCR?L=i)ShNl+#~9|tEF!qd%{OqZv-FQ)Zz@%3__ z%tXaVmKs<v>FE4CyipnZO+~+L4LX_?#buzI;JMx+0omLcaKR<IOCG*HT;<J&e-V-h zkOXFM|Bb))o}#4^mIU=ds1nlL;~zxU&YzRnBtKVEYr=`(@}X2Iled?zTL2*ta{0Cl zHn4TzOK}mBaE2Gt$;ZXP&)ka%IYl3a5h9g9Rv6I&4wrEC^6_(XokHK8$H1}<@F>Dp ziaR{QOUM<W8`$~aMZy3>bOiK29JnBmA(pbkNfsv%L8}k}PZWp}T3t{t4_zkV#)oUB zMN-^jK`VgH3#YG@4%$cr8+t+!11{VKDML&+h{B{;xZ*{KQ8N@U;1<v@!X=)|W`qhv z(JTp>PQsDJ>BwVIu+YQuI1DO&5L^622@F;kD}qlz$By`NG&dTh)EnhSEJm5*aQP9C zD-<A|3Ve|_IN;QYxG_@5A%}2oO)Vr<AO%SP%2S+x#Rg#CkYF+4Boc_lrj$=;?@=A{ z3Puni+ZCCc_*Ww>$57=!22R#HK`d7k$`iy9==%6*79V<V_~f`0#8kLQf<RS{D<b^3 z@e;Cx+7gM&lO+n{;#3J)kOLk!6v_pa0?I7m2DK*)E)*AYxlx1<+$e4;;6p!&do%J2 zw#AOJCng+4f+$FZFal&34;Iuq2%r3f0<y$Gf`anLeWH}lCTXAmr94y>B@7W)2quuM zQQLwjsgmVD+(;~C#0jMEMFniOR74gApB2q*s~Pakq4o(x9H=PlC@9QWXj(WZSBR~( z_C|mV#zhqieMh>!;`ot3mC%+caYA&6fEU4L;M_5|e6~P}YB5>wqoIzn!l1^6LV<}< ztzclG%?&D%BqB5dx*-u%uvnmW0^tLMYX)B$9Rk)=VTh1KLb!8d!DT3pU!1^ahl79u zyo5tJ3}b||cqpYL-7p9fIwis#%KMMOAvPso3wR8U2n0(J4i>~X7CW3wwUe(K1B4(c z2z?@MyAq<4LB-}q)je9Ef#wUHUjzrD2Z0%yW1A=h4U!bD7@??(gO9-CBc9UH2fMwc zm?w~s?N$KeC|sq;CQY|2R2^6-rCe4t3X=m}b-U#H!RSPU<Rsi0srF1#i%}8bCMkvr z226ktY+8&90_wIToXg;IC2;~#6iD#wDD(wUToHUR$O6DF2`^F>2QTRJfiREDIvm8Z z5H3`1F?=sn0~|Pjk_ZsHF|#CmMbW5sP$nlqbTkYu*d)&+*(t;hb(=%9ej?JN2h{$E zaDfnHR!X8Ld|Y9S335BsA~qEu#Vb%LP!*4ujD|5}?MLkx@kID6p^#q=SzNd}dZH$R za!8JKQ2VCy@7h78g_aotoDT{Da_xgNPqRT_rl__;Rc4W001NjE@QL_bE(aon8=X-L z00~OO0-j2alaN5hVnB<e`Tnm&W<U)^LbgnZoMnbuMk_htKo$rJ1%xov2vL8F7#HSv zzeE|q1%?Z)3^K|^9R$>!Um~N}EH9yzt%W2=xDY#3XC(UlrF()<FwY6$qFN<kiBMDY z#kEq5`5g6mTG_Bv%oT|Va<GSZGSzFMQWx+|#dr`1bzCfoKtn&UrurG&uyAGep!f=A z(5S;}tuJwS?1H{3{?=4rt<EU#Z7lB>Yq3-)6p(4hDZrcx*NQfmU%U8LdsJa0z^cS3 z0N{Zfp&)`UX-iZnD>?#t1r&L!A7~u73upYv5sf~D6qjlxWbvS(a}v<^g%YH{Bnx`w zFg`9CC^4vBbf@e|wqVq*IUw;9?aWd>gaow}^&qr0=mCU6@Xi(h8z!q;2$#)*m=O0S zIjXN9R01Ob7y%;tUuan&kafbT%`^6a^TCDQ6}VG~G&B_Wkr;QQZQsD}w{k-%rt?ar z1}fB_=}?ij(NL8eaS#S86nddQd=;pzQVA}Q*7_*oLc&lD7bAHPw-2!p`ZOrxw)`NC z4fLzh4w(!^1wo7x0K0E3Xy{^r4zV3ZI5@Nu4C!paCxp)%24y5}=L+L~^l@8Ve1Y3? z8J@uCF!KlQ!hsJ4EsN@z5L*$$Xq?O9qcN5zs@seJ7f-hU2I}<5z83pM<xe_@xiG#9 z7eLJI+JPGqsyce1n46oq7u9|zN(I1Kp|``_3<U{sL`b8dp-F*d`{R~JSz)%?)>`1h zfF1}6%7+pT8BtuaJ+xJX31EYI5W2&n&A3MqabrPj;t<FZjmkw_qzr;ALPG&=f@Lu; zJgns);S@>^${$q@B<w&ZgsXa6dXS3{B+m#mKH=aVg5l$88WjPZvj>)V8A8<Sq4WTY z<lstAc13u2N=kn)M#Vz9Ia??thpoTcn}a+Ey$vDa5<sOV)nk)GT_zJgGq64hFl8na zjfN@3Bpw*k+DLS6EBznYp?8F?&P+F7Ul%|0;^gjw0GUET*$8!*ZfPJQAsvbx!-8}n zu&4`FOd=pc5icP^u?-{+jSfVDXaWLY$8iXd`a#G-fP^z_6ebW@v_l3^%wb0)WdYNK zLc_mstvSf1EDnu?5C@NpZSkNkiY38OqE~of$Kt3_tA&J#B|<jD1;B5Iw$N)+Lra`k zAqyHDYUaS3Ky_@2J1R2Y;8g@Ng)Our5JQjz%OLap@BD;|+WYw}ns%WJ6WUY$qtJPf zB$P59nJEzu4L<*#r?$xW|28k}X>rMkP|5wP(Em_k=##e3)Zel%jb@3XejPT`x@6?S z&=U}`V!luWl`ItAs2B7}AZbu_@W<7IN{U%pn}keP)W0~59>pN*xUbt(7xcvmM2J*R zVqD8-ND=!*;($0BL{!9jK^y@&ER@4UwG>ge)rZ#ngNC>Dohn8|Nm?^TGkj5Wpmhnr z0U%P~VtBlbYCWhZSt1cDp<TR4zSNUKtYQLd8QiCU8pX%Q#mSE_^=A;kU+{2IAjEQw zsVM{a0bPE-SW*URFb(_}q#$~rNP43vg7w6DW`o5BxYvvhi3pPlq+(ER;0RhBIRdVj zZvg!R4+g~T18YwSf&3{x0*MQ3K^Pu#fxOllve*Dv5q^MYL+8X8Hk53LQNTu^<pG0( zp)gk6U@Z<z23SIG4+C5*dlLkMehkq=8`^MDhwHA<5!>WLIXK{lJZk&bUy{HPKWHAP z<AdB8RO<i#19YLeeCI9-T@{s-yQ!$Eb?>1LH=d*W&Na2Pb^0>~3>>7Zr*B|rG}ze0 z)Xdys$k1Vy!$(+I+t`k@8#Q{2Ju8F_qnWVqh{z~jG+!W$5s4+z*tqzF8PfuTrZa>8 z?mjbd*6cY+bLY)ZPFb*U(c&dbQ<p7Yv2xYwHEC<tt>3V5Q+mecEnBy3-?4Mo?md}V zd-v_nK5+0*&fz0RbB`TAaq?8&>HITi3kuJjzi{!=<ts(SSFc?!xpDK>?K^i%@0FF` zfAH{8#pB8+RZpKiuYOVUvi8;MH*f3Sy{~Wh@bOdQ=PzHsHGOaX@w0`j8>q2jtuxkp zNWwAeK?5up*uhL9t~y`@-3B%6A+=CccVK85iXBanyo~i~Mg*T5N7XwNHmdW$M4>MP zE`d4<F(Q~2^sh+>xnAAiwp{}L-6FhX{`dcgKgLoDeLfv`+nbk+4Dk%xpMzi0C#uCt zgpEvZ_>cYukb(v|0y5_b!Z>k+A=0oL5g|~55o~j$$~R{rg$h&53gxzYWdgYwTPTDs zpAXU#*85R|Z4j!Z{I*dH8hpSkC5*!{QG2JqG%Iv=yaEU@2!B%Vi0PuFF_6}EFcMBc zHsTYZcxs72T?Jpjl=6|v3Zy&IOd!Em1VUMx18qZuas{Dn*2n}_B9WjSEomk~k>(D> zAq*g?z>5SVLmH8dgbNjj0%Qi}hABt{8$5R*0j*dl)!iWyHYQ}ZO9jl6f+hO>)e<5D zOL03(F^obW#c_0Saij%=ie*W_I5zZ1=ueRVzhDw^<03!^htxwqMge3Iy&*ry5{rE8 zV!?7tn^|N`n6e2WS>!~9+*RU#=5KdvI0mh6R|&;G{QVl=0!)3HSb+a%HyvzS)-EK{ zWihr{-p(HRzf9bw{Hqcth{0gfn=mCTh;rf#p$n$9P)BcJKgd6{7=o}>QQWM!dGipe zqv_jsGx?CUnKHD23jO*wcgUv<$n2fq?gn|do1zP$Ky(E=B|@3#MyL>~gc^arF2f+& z0Q3lGk`?rBVsb`8PmgV`rzeL0)7nv?IS{0*1Nny!!*e`$VKCT$z%|?!FS4-38XlNH zfdP(z_?HD4ASc0a4TeN$VFs6D+kS-x7Kco*z6wcGScZ~-&KpVFh<jr3M$B-;`~zn2 zJ)rhev9??DAx5LvHU`e174gt?=_g5*eEXNH9|)qYgJ@;qB=2HMc)&7n)MyGuNcI{` zHuN1xV}&%iXn_WPk@A3NwG%`BMhvsyEEvqT$t!;(ga*|bsv%lEFjJ2+%LvcY{0qNy zs4#@$acDt|?v$|5BnE{YR={8d3GVbd#Q$3u(Nvem@G<splVhn46orPgf+B7he66-l zlnDYDE<M!S!iWU=)mE$DO0r|Ywk82)nLxqTR#bGa4OI%whM-Ve<ytx$5{99L5sa(F zNOc62OHcd<5<TIs6tw3^(5SVh24?uU@z6&>B(yteMRpM#EG43ZKagU5&cCu}Mhieq zfI3PhU)Ui?V3O>pKX``m3`o5RNN)rJFb9=CYBE$J890%^xN&WPLO`lbG)sW|fDt=0 z4E2OYu=WEc$Rwb9Kq?<lmZE7&0oxGTpb7fYMy+Kp;=&{aQvZ<sCssTH+ed>SlK$V5 zf<_3jf`1-3#W|R*as0D1LE9t~CH#Ly53Mo*$^`rQXC9SxduR}YEp+;J()nNgqnTNI z>_W7Uv%SkdeW(2s5Yj*O(}7NgkSRpUBKX^nu^fJzh)@;}^u~6P=z~M;7;VdNK{65I z)^sysNua()85$`PWQ#VFu+g-b8JCZG63(AJla&>F5=ptOlUg7<hLY>Z2r?dEouJYW zV{nkxQz|hPgqq^{J*2^<DM3g9eG@C583HUM4D_goTj3!AnjR<T*D$$|6(Ra1Ind@p zf%AaoAq;7Ts2&vbjEcXId9ZkpMWh|Y!H^z)CHZqmuudwP^(!IP6~N%z7LCAvb#g%Z zaE2jPi^CEteyA;|ZMjCM$<yO~96z~S#t_6$2GGWGjm&-xhb%e#y<k8DS@ZLC{u2dV zw!b-pj?u|`5-1Pg6EcuoDwG-;Vby4>>eY<lOMMwWCc|C7p0q<sXty|Xw6+0)A%hIz zCJ12Y&x?T74#2xOP<7hZu=be+-2^q&0~!nngCPu05X1Z*kI|MwEX)Wv9D3r`P!E_L zDEWXw%z$HP7~wFr1A2X2C7?XUbSy9*E=S5n1x2S9dYbl$!%d|>{9cCtWJU-`2U6jZ z%ax#(la-zs!y|AWnLNA(02T;<QiBPVGLE3$aS;+6m?ufV;r7J8a0V`gMo3u;Pj7TF z89Wu#MO(F$+_S46X&*;vQqEhE=9&p4+EEfy|GA|3*$xu#7nA7qnZ#9wNHa_jFM~;3 zyOqR>Y7(0*$b?uVlbBOU;$eG|#f0snSdA8W16bBt5|{QN6Ml0$i5o1*8^^ldB5{nQ z3y#-+0C}@mR5^*O){z-~GF=h7hgm3Lv{q8aIQ>0|H5J{k`TkuMj5ejDT&Pk>7EnM# zcWmEBMIGZfOR|JtvU_3k+l<~Ay`J>JC}-RkBauWF?9fV0Y+mZ3jq&R-9gJLa2FBBe z2Vi_|Hwfc}Te=wSqsgPpM)e?XKFcUD!uF%2#u$GN#aA5>`CrYj+3&Ii#;xf?F-qet zG3Er0z^FIQ8e^8NEk;#KJB-GbqcPsFwZ~XA))C{Ysm>S)i7UqYtH)uqKQSJo>{EA) zuT;G-7C24B=#l7)aoVX#7!No3W9(%=6{F|kK#T*+remC>&%&6Uz{dFICKqFwVK_$f z<VcLCpYkxmnGG0)J7O?u%S$lIN5o=$SrU(N;joz)cWs=7ksy*VP79rfG5&foMvb8h zF*dDRjPY61Qj9BtmSenhb|pq<9elM6acd?Tu@Qt(`Fe~!EjD5FU66rs--9g}9~*7Q zcsO<^#@7YAF?z~pVcg}q595*f*%<Bf4`S?HpM&v-&QXj7<Bnl8h(3XlNIiwI_wLge ztxlf7IOI|R#?ou&FxFqYfYJZbC5#VGT*2tIlWe$47GA^VU``3f{Wdo-I(4~?QKsY$ z#`8-`F|zH;NON5|#)8!kFxr_v!l-qj0;98MCB{v+o?z@U{wYRD-ZP9P1FA6+NiQ&d zFRj6t!>Glm&VGeaBjYv36W88gtZjIUF;nFo#)bj!F<v&V$7o>EfU)nO4;Z(reZ+X> z6Nwvce!^zP_C}2Sh|d_)48LH^swA;+=~rz2Z2k>n*fkO@rZ-`;q?Sbg$nV%pRFk-2 zQZqJt7Le#Q;0HF}oJnG*n<Ng>{E6)&gGtobN+Q3U#GI}z*xg_RiP}LVR?Q~yLOO|k zb4eUkMB<|RBu;xlVxRXU4*E>ut0of5evmk|g~T@$LZmV%;`Ti!rD*?64r0K{F6hi9 zpedl#aIngP8h;`Efg*%Mwi<X*L!lA`OA0e<EM6QdL5$4`_!oXC_twcrvReQh1^Lj< zt#mk*;6|YwMaQ6!rc0vCG*ZEW=7oCbm%oH(Pt#Eepv^SW-n?l#`h|ArkiKA=j*2&l zGSf(>Pe{|zFMkP-_DAQJLHVPR_O^|(C#du#QDz$Hcnc|ex`6B{GmUh3Rg^tJr7wvx z(@2L$e;+!1Y7`xfba?jk3za^5ijGFwIf;I;APE>09gVbeA^jpG3G6948foX&`bH&) zu5UEbbV+oEsPr+Y@MxszK)psjRQjl3>Fl=_M-ojZB+2P;XlxCSuGmySw2{VEI(af2 z`uqMZUVJGnBDEmt>3q`I8XhT5fnZ9~{WUz=BSk<L6OFCmA!dLcZxU#4G`7+ylkRl- z=zP-HN~c26QR$<@p|O>&L&9nMj!5*R!lTkh$44VgPiXv_K02Q?w$f?+nm#%l8e8cY z6di@MeMck=DLN{BbbK_@^mMt=NZWTr!kP+?DnB|L8fp3vP<F8;Jc62gA-F;TI~+~M zghqs+8Cznw*$^`eQw!6u5kt74Asm*Kg^e|bJ0g@j!je5~#8A#KFb}mvcOF|<g9XPD z1}W4a62>>}-i1UcPbv<l2Q=XV0jyC5r4lFy@kob>$4&%~9Pki4aF+@RSftxJDo5`k znD-&zhY8dEuo?$8j38Tq5N6r{<5&?A{2~U8B$LICAzkriGcgSKAZrp>TExSIOuRJ? zX#kM7HWL{o%&X%Gek#3UCM>3hi5D_;VrH~-#*8)-nGKv@7@$$4c(6thQsX?dcAok! z6sTYCb`QPkZ_wO_z6mwlBbAIofq;~T38ILir|TCi)P%t>THFa!H_-?^%^fHj1oZbM zh($6T|4e|1Et2VsUAn>?LC3%4#G20k`+vkAV@V?nTJ783)^vVJ9K-gVWY%<!!EY#+ zi(zoMxHI|J_SR-Y5E?+C(V6^fduy{Hh#kOLcgT}}ZEtNh$+Fze|MP$Fr6FZxxF{#+ zjW(0-ZBy_>+pB}4le3F!I}38&{Wpt0P1OG)^O4#f5!bYJ!wk}7ka}Uz(&612iqL#R zS+F1)X0d@k{NjI7+srJ3+CYNWv7tpWiUgYPWs<tTUwpM{0bw~dOdA*)q1ggOazz`t z<<&eo+MHbAiWDLeX1Fwrivl*M^@5~q$(R<+;>5yw?;w~$g0v|T(@2LPnB2e)v^_;n z+fKveL5hS3mJM?W#b4KQ&?YrynI1ASJ(01u3T+89;pEc`0KKEG2O;Xx5O$>yJ|s4i z8V7>tOPNs@gy>KkMu@uf0VV{YU^PZ<Y<q#+gIpL$>*7F@hbBPK76JG)s91qx8xF** zdY}oz%*?EVD$MoYQZt!Mh9k_^@ENp@mYha|mC$T416&me+JP_y9hwU$-e`;Vl;EaF zpuN_%jG2T9ltBAzv`a>OYpQ{kPV!`OzMpWE!qy;D#Et1SKa`FUW+QM%WQJA4Oabih zi;X~wjwoxqsgO3nTs@C|=dorakh3-vC0Q4-dX%PtDXq}<@6i8*gt6Hjt)zTB-A#pq z<{De0mqJB_rMr|9&Z2-#_(=JG&I|OWX5`}K);BUi$Vm1esK+%!N^69aS`ojK>O&@0 z5@L&J=s3|X4Cow%*y6X*8@9!r7L)3U#H5l@Y}UaRMTYyc-v~R{B0^pm?BB)eNz`+~ zc4gSu!VHCO7db}4E=&9}gDVo?7pjbqBV9n~o$L=W1q4X-ju0TWp|ga)5TZseu!<FS zQ;-DMOhygepCHtXdT*sA2dQ-G5Q`(}{$P*%KoW3}ViwwrM3RCXWo!)<c3DWH=`>>x ztwKmEO&CWSO@0te0Q)~cL}_(}`E9a%a6N)ZApq>!&J4l~R5ARe#jSO~0V&m6ZDXV? zz66pokwm;Qk@kU@CG4evCB=vp)3)tO*n<@x5e;jp$+9rTO$&C6pv8@B9!#0iUicsy z!tOO{lT;`JjP0nnZFJ!s2-^qzS6|@A%)$&O@!zlvv|1eP9}h7^e1_&7|He~D0`dTC zkt#<rI#^v8)|zfA1+XQJ*}kOslmOa3>sTxQ$IMg2bXI0jypVwT30O5i%-|4uu&M%n zO!YVzF^~)s7Fr-~V%8=tQ)b+zC`Y=TcQE2+Pm;p|`v|njji$hJ`;?(oE_FcDa4>rY zi!kuaF4<rx104r8!qf)}9MAolz??oAI8BKIrwnx9*r*8m$LMJ^s%k+2;JA=Hjl^~^ z@?cWAz3`5$7<>=`sI0fE=1``svI$7m9$eT}g_gmhhQPq&wu-RM7un)1Jmk!F2lv(~ zZ>l0p(-Q}QH$Lth$YBGP{z3Qw%eGMibwI`MV+Lpvts@5N9hDavE#Qx-CpN?Xe#sDI zg-^B$sC{@B4djHjXrFT+ggv?FtPc{H5Jd-f)H}d`daWT^p@<g6!L~27ajGLR8nd)< z^$xBbxO)dz*gb=Id&eM!<T$7!F*zRUU`zHhaE=Gs*!pKTN-q1eEA|LW&B$hgzdV!> zne0D@O1b?dRMM3S6?cm8-E9dz;+as;aa=BpBPqX7%Bb*c3GB(191H^dnif1n{JjO! zNhO^@)J4tj&wgPL_b(i&4W42Gv!LIi$F_fuo^<|8^pxYDqo*8OqsOCWTY_R69dkmi zCxVhg%Zh$WB+VzN!Tuj8LRhd`0R8;t7k99z3#DIvqoA1==7{YLzfn?sAh|BLz2P@X zn(IM|DfUPABdGSlf*GhSLg+~%r64bdCIH_6IZ#AMB@sMW@d})-V*w#c#0139+lUFq zpB^kpcc@*ow?Ee(+V#(0kaEQ;d`RkFLT>%Qe{n}b^<O<>eGY0kzqScFT~K&9yMK0u zgi)n|g=nguwlE`o6BcF`7J5<y_!oXU1VeYzs9678KJ6*~Tt@9F{#Fw0Xdub7iBOzH zD?DKk_pcr_NJI|<|F-I7346*+>HCOa%1r6&h$PCqjlx0-t0=@HNO-qLBT0CYC@iFq zR(!WAwkd^lt74kgB+-gxG-^c;tr(^?t#m$V#VxIvr87n=R%yj4tr%tC(1=#WrUhlD z6_fUq`M(7~6+|$F|E&R3K_pSQjlx0-t0??$6QJ7qe|f<FJtj!9S0T-XNh3ipsJd@K zp*@A(6b4f$q%euXZ4?$#SViG~+t7d85Lp%e%S8U~F%k01CQ)sD8-;}wR#Et09`Iit zKvsqSgFHY+R<=_oIXQXx&Yin-QBdd#k0Lxu@F>I6je1n5N0oZ0ZQp9$J3Ku)Jn9{u zo*f?8*!wS^-v8$5^KTxFfAjSH51)Sj;iLH<K3f0bL+W|RwTP&W*DtydxyXL`;bT?W zQ>A%NF5?L~27QPZ-e3Fcfhp)<WSD&eHEIMuR6rf2#UPI+grg<MtyeS+-VsUo!Kr4g zHfZ_+_BHau-~c|fLyJC`8qS%}<BVj$d=u!s&@oFe>q<_f)91A$Fwy>WCfbmNCil=r zHoRe75)K<H>F~`MhWHRQ6bm|+0<`N`&yT24R7St12FH%hnqiUqWymd^RPsPu4_awB z$PopE4tQ)m$n5u|!Tt_IBlF+wQ3?%>M*fow!R8Qj&Kr0z>rkw4uoPO2K<AMxRgu69 zHniF5p?##Vu@BC(0o6Co51Jf@NdU?bO}zY>0DXU2jwV;>60P#|Y~%=!J^m$#F%%|I z*q=hWuT-VY)PgEnYl0{D!Hr)2*P1AX6FL=@VGshE;3z!f#AFN@#jr4-HrV~LRi4^5 zPwha(3Su}zS%f#r{}v+BUYm|a+nJbT;P`}22pjEl7fM8}G5lMfVkR5T%o{a|fx{6q zq24o5=*-q2a4g6Q6td(NDQX`e%;X3ehHaS}r8k2^Z~rH^?=i75947J6c}g(D4XZZL znVC>!6VS(o`ik?3=8UMb6WV7VCTv1F#)+duiGis(*bR!$P=vEX;V4pexX~Yp=|s1d zD1C+*Zknheq2Sv5qQj*iXK>~coVR2Sdy3GJA_M?^YNUf7(+5rr_VOT;Yy$fz;Q(<w z?L{dRVWtO+u#yIyf6T%w2F&0<WHGhTEtCiEgkz!}ClTfcT@rs;3xhqO<uGWQ5k@Yo zsu96T8C2Qv_hrBdxr5PE1)Q3W+8(^o$V6{w@dC0#rw$-QM+_j8GT~G<ksss_UZF9& z@g?3AqnnIQG6Y*-6OJ%_ElQvrP?bk#DYUe_pR;!OwzQU(Cu4@cI5eCfs>YD!n^d!@ zpF=i}uXDPi7jkJoY1QGEcF$jYfBlg=8Fs@mv+fQ0>}E46^ON%r)!jokuTQ<HkT*@w zNs!Gk>>S>mkXMB5?69}J2SY=F(Ec^Fq!ub(+8WHA)Ou-jB6>s+p0b{@0{rVKdjX>l zF%$4S(F=a#XfN#$`DpuN$#}{>mH8<%Rc3)q61<atu<j1t{3(+eU@dm5Y^9(nk$(%> z7Rvu@qD5vQ_>CbJ$V$5g$Xd%T1=A?m#l$P@GX_FJWBPtF{XjTHzch}*wgfQ{Y|)yd zj(;RA{aXlBO++<Oi2rC?D;(loDqJlHdoYf{82{2#2$BD0;sxd39NuieW)l22lr1IH zWJbc9w#;yX^of3_6WZF^J>ehu5T<Ps5prl-%l6ySHEMAip9Mz<I(R|@4kg??U19Q^ zI^+`O$>N|n0o!*VFB2dLMcavT{g^%70|*`6-RRSiyL-^vZx87XCvAJW`AvZaMEC6I zTr}uU;M8bBYD=;wrpT7+eA*ms#TIC#tQkT3OF*l4U@{1p8GSq(Y<g@xO<c%hN!q&d zHc~h#j3W@4w<jg7{vk1(T=|!veaVBITf0pvPS}D8)8MdE939t-&Z9$83xjsVyT+-O zPj<Ae1qX+Qd&4?YDI8mzU`8F1j*lyehr?0ngANemhUHqV;laWO*a%JuI)n$zko`kh zfD&(rPAc7d_UWh17^H7xVm@@Z^~li<&SS@Wc>5BQrvx%XxZzR!7zq(KV^-4q1&h<r zlbW_-wS%vdn;W6$%Fy!zehV*zo*$v-W~=96tLF<H1>xX9czeJJ+A!amKzN|Fvv9UB z9G;8NpUdHsZ+_u0ErqOHMG**mQLlnuq<rKHc|qhLRY?&IB7ln__^?G4{BT1=_{9Ux zp9&`&V0jN5@&}=ehpP?X2aY>+kcL5t5WZYEc^jQROnAZh+z36PKg1T!Fqe;CAXF40 z1Rz2|p+Ha=C<x>q`9!{8?E{;bJ}qHN5d88zgQ%~^auTpJ7v33jv?zF~;gZW^z9($; zR(HM&{JC2n6%997-VKYQfO)YzNe}daP^};Tk1y0Xxz$a^F$UgO7qqk>7bFTHjG<&A z^iKOgvrK3m<Sh7)pQI|QgbNN%efaXnj-6cN+W2kEt4?0l9z(S)zKx5UH`T5{Jm;}j zFX=z)7mBD!s(Pyb(X}TdnL?ud3KADxATjbQ#-wC32Lhg3J=hqN%hzM%mXsi5Ft^D% z0@lWkvqHEdtzR^t{fi8X9e^u!F6Ng3TIx4ODLWBVeu!=NQ~WJsorogOu;_F9orqDZ zf~y0bI@zZli`PEX%h~(=v$E=b6P#Dy^XPoDKFPWF<d<6xbvkdC4iS(~I4wHT7XJu4 z`bUAFGDmVLDjYO>hi2|jf2$7v(Hd4XW7iM<qZzyY@P7dO9|ZsP;Xm4rFc|)$-KXa8 ze+c}yg#YM>7&MxaYnrvWw`rQ)(5Yi5p?bBMAcg~7w*e{x2|oNq0}aG1gHTb10CPD0 z3kI`{W~Y$>x&*NVrXYEXzqI(d6U5h(-&!PKXL#ycOB&#!)8AV10n-3W0k;9Z1#AXX z)&uu5-&#xnO99;h<qE++pb20qpxwD|ExVBUJcI+NbpgTw47>>80P+CkAfqcUeQVJL z%m%arECTcctOw))-ntCw0hC<%){+aD2zVXv6X07wlOjlu0YQXcgY*DqlzeLm1XRBB zttA=I4saJ>65vI^qPyQ(ssT5YLi|w3S%6xAMSzxo)qw7R`S&1xWDl4OXk7;30J;O_ z12*4>Z~%KgfN+c;AAq`mMSymI`41r-fTe&Ez>|+4AAoNGa{)OOP#%D(fS&-<0F~i` zYdnT@044$k0-gj+1Z)P(0Nhpy@dGM6f%pOA0p*OLkpSufGOE6{I0LQ(<Ny`{CIiYn zh4cg316~BI27C+n_Sv_Vp0Ea5?m3hn;08cHKu$Hp3#j`7`~f=GK)C~E0lo!PcnRr+ z%4rE`0%#BD4Cn_K2q*-U044*b0<Hwy1(*St510*D3RndA7O(<P&J4agpccYc@Erh& z*N_iD#v8~7;0C~5fENKT0=@;T2DE+){-FVE1Jng91#|{fs006i?tsb29&i_+-8=9P zIIa=OAJ7|6*#hW4gMYw^Z%__^-c67XK;`dH{(#QS;1BSYOjFAzz#`eEmY&c!6*@Jw zm;mws-2w9fd4PU$O)aT_gnU!WE<kI*e87`{)qu*Kn_A>xD|r^6E?`<0@DG>`$OFs= zOa`m~%m8c#%mvg^Xll6*Xa`sg7zo%5m<ZT&7?d-h31B{;GhhW^AYe0~1W>Chqzljv za2H@8U_M|XU@2e*;9J0aKsjJM6@XfR&48AGT8fZPKs!L#b3_CJCITh`rU7OEW&`E} zUIeTFtN?5V`~;|_)YPI3BNsbBUBEy<JHSLhKfnw?9$-FTGGGN@24FK_E})h&<R8!u zuo^HBuo*BBuqSNA&j2(5%m;J^tN=t?^qT=CfLh%k|A2OYy8r_L^8ph9O93+g-vZ_X z%2|PbKrO&#KubU^705rJ9UuoV5HJxi5ikuf127vfAMhez1z-hWGvFscEmg=rEXA+` z)CCL#v;#~8^aIQQ<N@XbCIeOgW&kz=<^pP|LH+^l0ILB50h<960ejkje?Sw!d_ZTw zivOp*_YZHZtn>Z@WT0ZeszECTtQfUu913KZq7kbEDG;$@)v8fCkb$CssSt6%s1>6Y zjanpPk$|ICts1my)QVB-+SR&7oz<vCqua;rv)k3^RyS&4(&l-6a_-aSoZhGD^L_TZ zzWc|V>za8b@6Y@G^||kJa`L0+z$mQ21oS<c@xu_Dgi%<4NmzneSb+uTem3Qy57uB1 z`X0mhVF>b*GglNQVG?Fw7Up09&cQM)!y2qY-(wm7b0`n}FbYF32?t;n#$f@bU>Rm% z4d$WG%lKgkF2N|Q!6fvCC=Ua$0K>2hqp$`O(02;shaor#qp$#zumrQP0t?X1TfQ>% z!5R!g-{TlR48a(T!X!+>49vnDEWkNfhGkfTRp@&><G+CN&<~?91e0(8W?>u_U<#ID z7S>=M`Z^dt48bKBg*BLjUe3a@FaQfM49hSIYcK(QPhk8o1Seq>7GM&VU=~(j0lIrB z4}Gu(gV1*><A)&_gHf1-Ntl6In1cm42g|SwYp@D^Ph|X@<%OUhMqvmh;Q-9SI4r;v zEW<3U!94W&7(WcbB^ZS@n1o)=3bQZ(3os1JFbZoh0ez=2ei(w2FbWGW2}>{wE3g3F z7g8SjU=0SL?{vlwLof!TFbR_|1G6v(3vdpWVHwt775bjU_`{Tkei(%zn1ll`3*)c= zQ?LxPum<zc*U9){2rj`WtidGoUPO5qfCU(aWf+Aun1H?~GkzF?lQ0SkFab+22di)i zdSA%-1q{H!GZ;7Qe+J`)ZO^1%u;*Ef6INgqF6|~?7rR{3-LwyT&n8cB38rB19NL9d zSb*_!$qy_-cOU0<LE3}UduR{#oJYT*Z!hh^Sy+JK^BF%p1U;8<{`73h!4QnV?hxhR zB+N+sx#SO4VF^|)Aa8K&dCbG5)Za^gVK0oqzUR|l*zp4TE4+}r!!WGC?lAee3_lFO zk&Bo=n0+Db!qJOq7kc|>7aoFT*m)`AjPU$5tS8v<TKsSbrePLN!s#o?D|8J}4i3N? z4DX}AFJhcm(H>lYNjP#f<={9hz#=Tc3ar3Y=)Rouy%_TYgD?pDU<3}s7)-(>bnT}; z(cv8IxQ2RQCv?A<^3VsvFbD@=1RjEMI0aL1>00W6(d)<`EWVC)0t2rnfBpCmkay@A zp?x?2Q!om%un6bi!W)=x*mWcAzl8Hk=!Z2JhO;+O9tPe>9^oR)!|2Vl3kz@wy5B?| zVLS9*!FXT*_QNn-hEce3kUYTETj)RRzLkE+^CbO;k+(Abm(uUIvH!w@Fa}pf=|3EM zJLRD3Hp)pHR^T#p54c?Ucab;feK&c76Yrrt7=0h*U=bGK0$hUO_fzg=oIiem{=(5a z=qGf2kaExmGcXPFu>DTvAI4!7jzZ7Ni9<iE+|BxdRTziSkCSgW0`qVb7GdlYl!r6W z`wI5^N%{?ApQL@5{|xztHJF3p&r%K!!zEb#BJB<`{;yLGw$GC%=wG0}aN<GME3Ezs zKO8C3&MTQ;7=X3k(vCb|B(Ko(JKBL$E9@6A_801Z73Ka){V;fh`e83j!hV>6gD?k& z;2i98AE{U2#I7TC->b=A+mU(%F2fk?JMl<838x-;q@IDZFbAuiBlQyWoOGmKff?wH zGEb)*sfXdz<BrthFmURTdKNA}=}5f@Yj6p+bsnkL;Mh}+)B~>}PftBkAAs&I+JhdL zfw5;Esn5amS@aY7_8h7EU&}l^A3vOfad;S}VA~654|-u9Mqm*pE~Gphhn_1rzC%Bp zhanj2W4y5Ek|Xsr?7xh0!ZBEc8Mp)w!5UnKz9I4egRll8uq{G8aOiUS0|#Dmq&^8F zFJl~V6_#P@mGomD<9Ze4;rMGP2Q#mweK>X{?Za7Eg!6C-mS7D=_R;^V$n$>k07Jv% z4W_Q6Jy?Q8xCocv3ar6Z=)Icp$C)SCem(8NJWRs^oP=|*0E@5$T{n<Fn1tRK`vnZZ z6b!>L7=<;Mgn<$IFT9EJu=9;a>hArF69!=UX8f@0P55CDW?(PO!3dm#<F~NB;Nq>c zdkyRNowNswFb3z|MZe`aoP;G<fQz?N4i3DBJYP%xU;u{R%RIqp7>5Vbv<v589u{E{ zmf#ZXdmsHCCNIzryWdaya1zE~#|M~an1++kHBP@_0xrR2=)R8Q${m!4hhZ4@e~|L> z9H!vFos@?qI0qvc%ELM6ejWYz5ar=K48z{LC=X{~3U+^(@^Bi?LH`8hVHUby&wg_^ z<)I&jU;qxl98AFOkB|pA1@o};9_9^p!HUG8JI?+AeXth>VIPdZei(y;FbRiX1`fj< z9D#GNFH67RBy?X-zV4-8FbBiJ`<NHeKgztq0hohJun4<9#yW(7PtcDWSZ^>4i<6Xt ziBHikY@1>oz{$_hp6H)t9US1e@;UkqCt(zJ<mfL<!z^^)PdRuHmf;Gl!Pw_%Z-jL) zO?z<p%d`h;Fa<~R<R7}f!Z_g=T!L9xgTAj){te_G24Mh3U=YS&2qs||W?%&7-~gNx z{cFqv49>70+(<oNXC9&V0m?)7H^>*P!aQ6m(0^F^CV9Szb@VOPKb-kC^A6*4^czP1 zk#=C@d(1ZseV_iqz&!JkU>*E`enRg*u}+}-hvW%X;T&8l(l1#05&e22?fjU2!I_`X zFPJ+_zhL&C=@(4@lzzeF&*<09^rJ+-VD#to3-<qlJizcT$tMghFz?X!Anm}8U(wE+ z=-01l2l~pi13P|0JJ9=E+JWu=LOam2NIM5v5C4mHgukO5;lI+3aEW$=|C@G%|Hk?f z{yXz^3tVQth5x~P3xCi22>-x35muOI;U8IFx57U$zrz1yeF;~{v+&R4S@;+7Ec`F> zEUc2}H^aY@XW`$-GxWhU?D{+7fx%VA1A8B0JW1y3Va5Xok1!q>t1%upvc`B|a-H$O zu{z^{8CR|Ddke2$$JFX!m^-#skHeYcYV`~(x@+|UT-;TwFTu+3wYul6?1v}R>VDYP zR;x!~`y*=g1nfAmR?ouDN7m|dLQkz;5uQ}5d*236uGK@rN7d>v=x?vp)3EE&wR&Fo zm|DFod~B`m9);doJs>=#Rv!>Pu2xUNz~gK6N!Z;{s~3e&sMV{&QyI_O;S(8;(8qX$ zr!gKFJe~2to+mLLVJG9c4L-S64+_ty)uY0v)aogrzgEw|&{J#m66}3itzHv8o$<c| zb}@e8nT%if48|{fCgX?U0ON;!&tm*3xSR0{&tm+-ZpJS>oAC?JVf-+1F5`bE>psZ% zg?kvk@I1yZ+{^fdJ&a#?KI4Z2&u0AZV*a1Q_=O?HFMKZJ7hb^lh0kOB!d}MzZuYb1 zGk!Sq0>&@Aknsz{j9+*W;}^b=@e40z{9|%{#Q5RxC5&HqDdUGJn1}tBF&-F&Rf$8- z?HqTZACAHhOv3?~fpM6HDL4tU!po^o^p`VUIQ$CggCo%M9@aJV!xRj`%&VvmF2FeK z9HKrL9Hu^4gaufFC0K?P*l`{8y_b2vnR?&=9DrR1DG%pvp&xMg&5R4iVHJ)*PnvN- zKWuv!<)D9z`GSM*Wxn8OnsouQ?<4=P_I}E}kA8lD{KNKf%EA0yv<C|?3%&PH4i3U4 zIP+1~)BDNq$5<b*ILW-h(x+HYF!nk62@l<mANr>8e}LDoFR?CQD$n|ZhrY_XfP-Hr zA8-hkU>sK9H1v#fT!#VZdVun<{4M4iF2gkJ{|@D0*Y_v~^FL%A-obuXWL$9QNAwTQ z!z4WXWAXu$KcO5f!7^++%=&_Z(Dy-3n|?}raNs{_4=%$D99y9rjQyE%u<gItU+!eT zfqv+&QVtHn7#xC0*!frT3A<n(j>9sXfK_-1dNLdbp&!n}5G=z1xB}zQ^*81Rw!<v! zgn8Hni?9GIu>26?_z>+sOg(Vm2zh{gHTnblVFr%CJRFB5I1Q_C6?*Ps-K>*;n1W$A z2BR<y6L1`+VFpgZDOi9Db><)Ty4LD7IC;ieJ@8@Leac!r0+T&!^(5>(f32Q{%g<h` z7vbOqYxOEzfZhq#L+@HW2$yePt0&;nd)De1m>plM&%u!o;fJfxdpG&{@LD|tLof<s z6KnMpoP?7S|JYi+1ly<9>h6!Q-$5T-{M=eS3|C<k<{nt9r{Of5gy91HgRXC`)hnVy z&pnLe+l&iNz%b0iC_D@kuzi+#VClPS^*mgJMY!;f^c&87pMGZ<-wzlkEdCSaVf=^W zM|7Bl6_|%Bun4Pg39iB#94Io5dpW=O5&eKZ7=<;MfVm&j4_JVcFnf5dUVxsTk`Gw? z8RLQ#=)RBgCF+9*e@=aH2F73~7g9~ZEX={-g|&Ja#$gqXK+i|XHw?fW48uH(!U9ad zS(t{aFb8XJ4!R$tKG+Vc&<8z-IDSGu9E4#w_AB}Wmw!!v;8K}-;M{K*51jZdd4Z+h z(T|U@PXBGKJ^-^Y4vR1it8fx_|2z4B9m}*2J^w*@=!f2qGd>uCsXx#zticpaRwxg9 z|49Gg6kLK;=>7!t{)v9Wc^H9Z7=tS?2?Hzi8wTMV9ED4;<Ij|zBwx@E6MtdcaK1|Z zp#QJz2e1p~U;xg+-oKMK7<!0upX50V!O#)LEze;JPQXbx2@7x@mf>Moh5e6QuZKQG zzF-u_JnQu&>^^C|o`r{C9?qY<UN1rKqt@$HIJ<Yf?*BC7>|L)9z=7wl*AsB+1(bv7 z3)ky&Fc4m^S6~);rkK}@*6RV-ehKYC-(~CdG_1iK9Ez;hi!gmT?ZC*3*XzE|P(KX9 z-hSGH9WS9hI1Do|1M@I>#d^IAtFQ*=Uru|UrQd_=^#}~X7%ab%al-7YC=ZJ;2a~U% zJRH1|{(g@68lpTrxQ}*W1twtMRg@DQ=Arv)@&nso1-3(X&P75g4@dS>9`;;Ac{mHR z(0lEAy#Qxm85Us`x`x;5-utN!2H?<jl!slfqdZK&3><kqd4{Dp?ZWg8<m>ay_ks0# z7*=5nwv8}en1GXT85Uvr4de-q-blGG(7&5l4{+v<%nvNW1njz*_TVhc!xAjQ;6eHe z-S1{SOyhe${e&|gSg*%nWSsehV{j6BGL(bm4^s{X@1fl<a=gqkKXB$=%E9jYC<j;I zB&>aua`51x_4*R@e~kQoiT(BCtP6N>lK#S(Pp#Ke(EDlf3*Da~Kd}F^<QER!Pq{Di z{PW}$dcQz_;UJ8|)HLnF6*vi}zDRjE`z7)Vy+5SAJpLb(S2zz-u=_B6n1XY#_A|x@ z+e)mXuTUQh!p@)59!&g_`GA21`Uw|c0k%I#KjA2Jf0g`I$P4WIBl7@Lf5H!Ae<dHV z{cp4by(}5oj=A*;*D=Y{k9nf!NsZ?|{+EBr%tQ5VQ6iqx+V4B|Ubj4E^Ybsnzclfo zV|g&B;&&dOY5UNLcX>X1@<jUyr#I@45uUq?-)lf~lXjE*o3ZKAW`=(`^eEhD_s%wX z*60)IaV-;1AH#!wv)#>j33*_G-=jc#dFgkRf1|`3w_eco`)+Te-#+Re`^ZCeqh_hh z$X5`30=-?@SDP0`cs_p5L-n#LLeXRB!&!d!V$+l8z4ty;@3qyF5&wPE-{hCP<j{xE z{o)@m$1VBxG}<Ym$3DvMahl3W`*N#^VU1oxkE3@r`6WMI<}-=DDEgNC7`>7<LimD* z_<fG}Hs;~@tmI4DjS`<B-kd)vmq4FFKgi>zavAYSJVV^|v4`qCfEO0ax|4XGcn9&& zaXi^<w~>bcQi*tQlHX-XJwwfTQtR^W$9%+_?I38ZV>k8meCnb4h#SFDAO42R=vM&W z=x1&FL({s95FaNVmNMr4(iD#q&lB&{%%6-aO}zZMhw6td<(XfeOFnbN(>d$95Pc3k zh2F_ya}&L+{8G2M$>S>eIQ|6@ZT*yTUh>d)zjc3=asl)Z`iPY4v(Af@ix5u}m&fMj z=Rpj841HSW!?JE=J~r1=8ejYLL-oeY*y@+|bHv+-C#CX@esAWdNIXWov2Mh}CgqmU zQ|MI@Hp&^RQu^xN#WMe*YF9jLQqHIJB@s5(g;7r8A>zI-vCbquV$Fxdqr|(3AC$OR zzOi>0^(Tps5$}??ZJkTMvc$)U8!d{-Sg(2XQS_7u_I!%Q=I37-Um0Ia1iMd;&&bkG z_wnpcUuJ(2-$s6nyh_|pJV-q35Dya{Bt9u|bDZ)zAZ7U%BVHyxEb%S<Xqta~zPx^Y zNk4ML<J-iG#0$h5^RHPy72>6B%6m?rzh7}KA0Qqg-pG@t{s{3whq&Y+PCP-pktc`a zs`xVaR`9iLIZkXIk7Rum@E!iDb=^w&lF~CgcFMQ-YWPB5<9DJhe10Z!5#OxNCmNg7 z7r}Rs-z9f$({Gu_1iq24t97}tUJ%(NE(<45d`JXMJWsqzT#og+<4{Sedcb=8l6+NE zIeBbu^4R?d@=blxwz-M!Lzi~!{-E-kuMJX0JQ4II{B}KtzJPA5ba5O0B>G`ZyBYM7 zrrn(KH}^+!Fo#~m@7nJ5?w-drUh$-C6@T^{*8a%YJtvZT^v3*c90w$s#=Hb1UT{91 zMTl33H|E7$-;L5n`8e@a;$5Pd$APAJnz-wm>bz}Z-Z_qNek1+M5f2a_mNJ_CuSh&W zJScHZ{T1T!JNQOiGd|BF$qVtYL;V5bv&6?FzR|zU{6vU%eM{dz$w{2}2yr>iIG&d@ z%~KYCgx}R0El8ury2+!5&}R&(@xRnxL@%S4czthGzvQ!qfAk0ZURr9f<wN#opNDlw zydZHipUvm@$l?p*8~!Jbo7?R7jq&3<_(RUyd0pB`|FY8l;fLz0*f;ZStOr@g1>(7X z=KNXWYLju5q#SxoG~2i&UM1e~(}(K)&2f3mCUNgcEHB~{B5cg_=6P+9_$=|9#MLJK ziJ%Ys%zFMI`H7(?&_~6)C67(7fB1^{2E@0~@6FfY9C25P-?dA8vtNz<L)s}4PZF<4 z+}@68Y>n~b>;1X@JWIymJ(=@i;`VhHK<_roHEJ`~V_5k+lqQgRqUam#sF>&p^gc~J zY4m=LK8Zf4(F^E98oh)*tkEmzBO2ZPDAuJ$_o0t!^dS0}MvtJ6YxEfUgho%IAJXU< z^eK&=L!Z{@bLcY~y^KDq(W~h58r{QEJgm|E=nEP>gubZJ2hf)_dK`U4qo>eUHF_4^ z^$Y9yjjW$MdYeWsqPJ`GCG-xBUPJHH=-x+j9M|Xp^lptFMwh=g+Oq!9do_9jy-%a3 z(fc*}B>JF6FQ5-;^b-28Mz5fcXms~uSpOQ`hd!#&gXm)#J%T>2(PQWn8a;`ANTX-a zr!;yFeOjZ>q0eaaGWx7WucFUubkAd1{~FzozM#=V=!+VC0DW1b$I(|bdJ272qi4}w zzuda6^5|_Ey@=kf(U;IWG<praQ=@y?$+|Rp0KHqIhtYd9dKA4^qbJb&G<q7nU!zZ= z4{G!R`jAF1p$}{H3i^mfcb~%g*XTa<QH>r%AJga&^l^<IL!Z#-N%TV+J%c`_(R1k2 z8hs9ZMx&R}XEk~ieO{w`9>@CE=zjDCjUGZ@)aV1~%Njk7zM|1n=&KsNaSK`gZqt5T z&Es#==tcB)jlP85q0wvTof_Tyc-Fs051@Bz^e}plMvtQRYV-tppGHrk_iOY?^g)eY zKp)cRCG=s9UO^wx=x!2xP^0_MM>Tp7eN3ZA(8o1;41GeQC(#dS^bGoxM$e&7YxFtv z8I4{>pVjD9^m&c$c>?QSqx;bpG<pbqQKJu_FKhHT`ie$Rp|5K6EV}DK&H6`g)96L? zc8$J--l5TJ=$#tfdn)T+qX*EtHF_AmN25p4do_9jy-%a3(fc*J+>3iqqZiPJG<per zSff|aM>M+oiL8H(?n58d=t1-`jUGWC*XS|y35}jaKcvw!=u;X!hd!;*=g?;~dKrCI zqgT=AHM)m4hle$~AALcihtL-_`T+W}MvtSfX!I2Nsz%SEyMCov|LAQRy@=kf(U;IW zG<praQ=@xNWBqIN0D8AZ52N>J^eB3-Mo*ykY4kLDzeb-#AJpgt^dXI2LLb)X74#8} z?mnIMuhD(zqZ&PkKBmzl=;InahCZRuljw&udIo(;qvz14HToR-j7Be`&ua84`n*P$ zuf7j!bU*rnMh~GcYV-m0WsM$3U(x6(^i_?XMR)yLv;NWBG<p%eU866dcWCq)dZ$MB zcC!98dH}s!qleLZG<p=hSEDD;`!sqQy<ekGq7Q2H0{W0fFQE@>^a}cjMt47%^{>%= z=%X4vh(4y#Bk1EAJ%&DE(>e6Gl1lI4@#g1!o1Z6ViBD5b7w4^vt3Z67xLeArP3kS7 z=g~(*+xVQ&s5gLACGIZsx#|u+hnD*MXYkn~*BP0y)MtDiDxW)siOcmz^uMRzLl!n^ zH;ymP^+@s}M77_`XJuTnNxfP8EBIGM(D`wTZw}uq*CCPLKREeh92My&*C`q2@n&8& zzhCe?h1?OJw8k6X*h={T@xkA#&y8()m3~Kvj}f1h@@kWEG4yV(dvZ{;jn8$B`H=pn zq#W@fiEq>^{o4G#PkcFivn_nmUJ2hSzLex+qdntuWT~f0Jo-oVJ%;^rV)cEz_<a0V zz_-NXEuSYheSaLrcaZC|Op4Fae@|n6V#I628~riA7uxt9!B`(yTxtFrTv5OKvDCAf z2N_=;Um9N{pQf+rdv&?~(Gqc~bE7}<m`$`Q|1A@*N`CC)7ZaPb<9jM?{l)(M$mZwq zA>wWS<s6R^j}k9Sg=Rk+<2Le=B;HX~>)YB7`Rb#wNq^<KOw0JPqF8;F-=`F%y}xqZ zBJtVgUB)BVaXL&~mluh9p2pfC-uT{GQ$9ev@HhSMQ>4xa@%Z2Q9<oJV6Zpcb`tJ#) z9=U#0KXJ|X7#n$!{jGq{`;hf}OetSN_n>!6KOEMZjJGD`AEqDT+nk?`{oVI;ULTLB z^{<JCh%XUu<l7dPe#mvL25WrZ#p9;>x9rz)-Kzq=QC;5DekRAy96s+lK0deG$@gbv z{IN$r?6{xQh_^rY@5XxAm~Z2_B>9!=Y{_+H`gm+^qRVx+is&IzgpyCW4%Z60SH@xc zepbrK^|_WF^RV%~8_#8va&o;c-(&f^b`fmlB#&|(uYTey65leP#_w1pKSg{UTrZ~a zdZF{l@4loCHmSFUe-VFH1p9u~bR3lHgbkeXu>Jc$%REZG5qxv_I;B3fiOz+6T}AZf ze2Jb!U(xvGx?`*89<vfC{v3KmqtBr)qIcW;WmS)qH8*)I*DtGT+Lh~>$@OxY{gNj? zy7VU{m29qa%lI4fk8gnM<qU7}8OOUgak<XUw8S0qEp2D;`5*uAmhT^P=w9^Z{>kGx z^mcUPl|(8x@>ND}v+2^$D!L0jB1YSKllr{R<h7o7o5a;7<pStF^w5_5)EHlccn{a> zIk+Xh`T8aKOyC=P!o&3uov-P2HH+`?sSnqOqz@bWr}6qD>%Bm{{fS(M#LW|@-#Mz^ z6G;E%`f&qK;<^z$w$~?d50xGw-e-v$S&+QQ_2%Y@H|Ish6GopwACYp~tdBT8S10Rw z%eal-cch6|i1$l98{;)vlm6w1%XM&~4)G%KLE^getj73>PZOWBl;1orlI!CQoI!r1 zf4cKceitm`4dR=43fJ3^dbU}|a$P;YpX*tOPi>M%xz64g`j9C?(dD{(htV7PS25A$ zI()9DZap5zb@{6J?dx8y(^o<7R&@xZ9=UFxn|daibjgc*H^*S~#_PUqe~|I|iN~L& z;#-cZM(a|KT>mff^oMQlS2oux*8+CMh|eF3xy5>wcC+|vXY#rr^RdmklIsd~ab2TX zZ1(+G>XGXW&J&MIe4BbaXURUG_Fr2bWPAbQF|L=?$e$)2AzmilSbt_a>bz9?5hotx zI!l(gc^)i%W0QHx;7j2fX(+~jEL~Sl>PIgg$CDQMlX}Yd51wOPXHt(`KXDd4wS#&Z z^Up%^p2vP8^R{KZ8?V3Oi{P8?+5R{n{gUf5uHy6X*xV$)Y4i&E)_F3HuQ}p9=X1Q1 z^{{0f(hv4e8D9xsit9<ui?2nV8teaTjw@VmY83nCamQG<(qBLEBysVYn>-GokD`x= zs5xFmi617Ovc!#grF@cjkn37SC2sap9<xciS>hwa<*~WR<2-sCy*Ym}?jrgS`q1$_ z(H_4Zv+@2~>Z#!$ztDO-mwLSCFfR0AIrQ$Jo-qEI%eMA2ik?Srry6sUb&^2Op)ZMO z?kBeWK=LEk&m4}Z^TLh%ZsxB*JWIS^>aoS8oigzv@u;i=hjlCCb)U<Bms5`$>c{&L z?>!v^SttAp;_tjd|2ir00pfiQaml+}&vS&h^wHepv0UFXiJm@zC)$1(uRnQwi?3u| ziBGp5Aj`PQ_(J<QUY*PnbA6e4mh%ibA6DlF;`apQ@4uKwnXndq>2C=C*xS_pxJ`fM zdZQ(-uhp1`E&VlChqRx@*LEAPmoi`4`Zmwsy;utP2i{>_cT!IYy&wIc9Isl`Bja%I z;r0BT57)<Z>x8&$;tSwg<@#US`J{ch9_rv2?<;ngce!5b++AExYrAo)d6Is}^;G*O z*#G7DZC}?&vPnP6_=5Lv+}_2L7I~0-dd}l@@{skoBDx>FfZn_xiylJHqc@)qiavlo zY15@Yar6xOvJ}|C`pV*uaDB3V-MpyxXHxGRz694TOYLC3rGHiY^EqBed2DW?d-k&b z-Ouq;+Ebh8e)PG|Z`DKSp6Q1zzfX|)8Blsq>e;g1jn^ruSFS(X@x`s><$AQDkKu7T zKFYWY_zvO=?=YVg{OPYhZ2W!EHtWXQ!@7B3XXgjfk1+nRZ?b-6owb-(8FvD|_q*2p zR`fJ_JGzI*<|g$|qPx*YWxYGCV|9F%{Fm?#eDC4<qU7J<^+kMg{ap{&waYrL8{_W) z{pa&~_)pe#F8vCjkDw23*Drjsi9e2i;ztkHyWBj{k7MKYS^Qc2<Ny3{eSEvTW;<{i z^%wE4{DSd#5Oy#RHT=<kwdPyq!TW5|rqKiFVRX{Av57y79@F@v=z})D<UN6Y5WP#g zUmNF>Qg2p#OV;x)(evm@jb20_wdvB{5_$?fD|z2$J;?QqSC_f|A~q*q<Mkik<bSZ= z%DUM&?=X&njn{wT*+1yt2TA`E#Ph^un#@h~H2RcHm-;4EeSKoyXm9iUbdLC(t(??X zMxQ}9dM1U9^UA72JD!ly??KDBHqW=@`pl(2a$QM>c!aoI-*Hv;2TecY#7C=K&#}=H z<3CyFDRl2&Iq#8jE!M5fXCD8^D&x|vAER31^$%bFLtKAzhxK}%%dx(Of1BgBkNV|$ z(yle~AZ^*kDe(c~CE|_!#P&Y1G5^FX#O?EvMlYi`uL~LPB>G|W#^*}f<A(9Rr--k! z&Uu!s^Bv@|hCgx~*9Y6?_$Tel^{!`j9WnlHb({Ssf^XsYBgXl-gRe1ue6AC?{unSf z(bMQv%G=K~CebVC&H0jbQ$R1H=cWHG#w+tu!9Vl}uD{0P&HdY`RmSOg9=RqyDe-My zw}bdPPqg+&`X51W*XS{Hmra-cB+*xBFUVv2=N1yr5_eNBDe;YUX7pFulk0yE5qC>m zZKBKdz#|%6t`FX0)1@7EFLR8ZkUBQnG4^}Ov!D1u;*IZ9wCiv4yj=1)ApS=lsk>yq zv!7=)owp_N^`Ff3-eiAkF@I8T4!^hk$hMy|N`FiEqWB_GuTy`U-p{$8Pk$ecAN%I` zH}fplJD()(k+|9<&vO0q35_n-Lr>du>AzebeH1+`>s7bDkj0n5mwhbz(J?%+=MPCX z8D|0CocBom;E6nGkw2N|3VzR%Z1apRo3!hB0o4#+6~W%Fnl~AD5MMXf!)@LlMUS9& z+H~n>480w_@qT3o{mkI6oT2Zhj3ZCH_bJYCxgLCGn|PJ@vO`?*;JuK!^&hFv^Vlg5 z(vJ|nGQP&T(H!Ta#M_>_HBSljRmwHzPwGpfFQZ%bL+JyX#O3<*A?kCA7m1H=6R!|o z+9od7x9@qHbNvD0>22Z>;)~nF<HUnccdkE8d~BO|j(B;Sc#(K_mvj9U;wj>zl2G$F zCy&`Azn+W8&o*(nK7aU3u8%1NH02}2r-+X_#N))<p27a=5Kj{y+9sYOK11B8{UUMS zGo9P75Kj_!YTxrhj-A`Y<$eNzfOC1d&p?W}<M@d$5qDa@Y2rQ4a;`r|e4MzhemVXW ziLVfE-mgSoLSMA$GT$}y!w&UHT<(X^z1un8A>tY0PWg-yuQ<e|UrFNOv$pm(gC5lA zIds29pF{U*^fJ0zqgT<Z^vj+nPanC`=zjE)Mh~ISY4ic~yiJ#U#?dFy7kF$vuBz|< zh)3yfRN}Vdsg%zVpV%f|Bwiw(lk(>NDW7-p)X%>P@u6<cOSXAkm3F+Buny1WI<-93 z^&?0;zD<09_#xusn)1?*1o8HB)cJ|S>#X>)_=4wh-CgPLHs=d-_}cayF+SJX##g~N zy#t^3QkpyONPVhByCHlFdwKsLKJ9sy@p-k}mm_>W*I{l^UmD-cv(-4Z$x9yJ&~uI$ z-^1B`bVWAFUm4%w7CxD8_hp<<gq)8{e&U&J;$h;;#GQ@<F{%H#&h@9He&SB$Cy7@a z%F8(Cr2Y$z)En>Hx0`=_BfVUQ`8W#c<|}J_-sR<lM(#f#$0&1?JO$84&{HCs`<F6E zJVLy*O*~G#C#)|oZ8h$rLfqc2+*f4+eN@`AwJTn^-^wa++0V^Qbh-b^vPQ3>FKBen zi}+kvqsx6;W;A*TeM+Ot{avKL&Ha&iildLAuSh5D>$d6lMj3ql7q#}u{bD*^*xI** zZ*d1c&*hBg;@0)a{bxq{TKl5-<o*<`#+$<Dy0o<~hi`rdz7oFi%XnY8O`Ze9YWSiN zK0k2tM7KXQeLp7i7kDvw>~G!A0en+0Y3)nm8@ZyjPwvCh^U~Hnxla!l^4)gaG7h<q zPjLr6Uq9>kWv$x{;~RQ;YhN5+*DG54GWb?@;49#p8Ejpj+<$2Fm96vQc?pM?SGD#9 z@pZhqb-Ph~i#zb8@J&Tq*O$XL@|xDZ626|-w)V;Wlw4P~9<Toj*2@lj5quLv+qWyP z6A64n+z)1%#~a7<rt`E6@vf^l?~(X6uV)2(HGGZFfw%E3;VWO=x<1cKIX{ZE_66~! z_P6#$@eN+n+9&s&3S8UTm&4~8ZtW}KE8%OL=Wdgi8ou0ht$qFh#{0U~KDi%O^!2TM z34FnLYhM<h=la&ZIebey@Kx{?ZfISf_hp=q9cb+f;fs&7_Qmjp-_Y8Z#^<}SwNLI7 zR^5TGjIVf8>-yX;=Y3hCwJ(4#`Nq~hxj)&!&8>Y&e1SK$_D$k*A8hTD`<|6|;FJ5J z<!{-(J}<kP?-iVr-fBG`mDf+XkJ=b|<9Dpv>|b)9wfLJ``%?J&ldXMnpSIvzTKh`) zI^NpaSHoA^fzLn4>)6{`*B8MzH`>~lz&G{w*1jyh^lhzubNEKy(b`wRH;`)Wll$29 zytB10gwOXb)<=u?wK05c?{4i&<E!G^Ze7W`%;Q@a+qy1`=q2=4uZLB9`P*Cje6Qj- z_@36jFuuflTl?bp2GZN_cd~!Ueg6XQYwauGbHBf}ZwX&{2R_fM$;$`$KCQ)k2JwxL zxAsNx#qVg{zZAak2V48(K8C(KTl?fbht(bUYWRwo*7f<Lygq!W^>`!rM(=9vOW=!s zxOKZ(e8Gv<zBzoJyIcDz_?C9y^S*}jgpahYFN801PwVl<@Qq|!`_lL#_qJ{~kI#Q! zYhM{(Z3jO0YdLOwv~_&}e7QrdeR7}66!*PrwI3z%4Sl?|ZxUbV6Rq2o`(}D4Tl?g` znw1^+d{^>%_Q}@uh4E!S)!G-wm-uw+@n-P#Pqp?H@C828x?Q;ssQa_6eV!qXlRNMQ z@#R0)y1pnrxlduM_aP~KvHM$(H;1qH^R0a)e7-NVZnuW7x&xnoAIHh**7Zg3O@6Vp zFM%)lrPjVIzJV{d9`78!?tE)s1)t|Dt=sip#c^^6z7W2`S6kN?!<YG5YhM~)e5SQ8 zk1zc7*5fVX>wKWK&wVw=Nqnu=X#ij88?Ai<_;Q8Tz9hcXH(UGU{<hI?we}V91;5>T zymDXM_Sx1xUyS1<zU}fN=c95z-MQ~@|4AO3o1Di*(dW^NB5wJ-d-HdmDSYvN;Jnu? z45i*I`jAG?qYr5GB6^=jUqTOQ^cs3Mdh_S>(yn(u>qeu?eSq6FdKle>?m7;tP3n>R z1uxI>dzuqW)%ad2fj(b!%1e90+!#N;_>Z>sFOMEYx96*f-mmd5q4%P<+xk~S?>Wrx zkv6;6$h+LPIELQ0YcnAI37}_w&ijt0gvp%C{f)<eqrQ)`{O-@VFNxguxb3%F$B{%| zrCd{Wjo)Ep(3jCe(!A~a9{JopPuxv?ohR|6#qWuv-%I$bzqhVWsi!9GqnD0pU61(X ze$3g5`W~}IyMFvp{BnQQNvVI!`rn-A6u#7dvOi)ox5oUVk7#tc?{n0qOaJ6P(0%Bc z6M3?O{?+j39wINt;@{FgWB-(K_=ovE`v|{Nl=b7_lXVcmms>w#{I0us9T1avoOr%Y zdy)@ZT-r$!pXR==L-x4sIw*O3%g1nkM9OGCALj1>rM)t~EccD=5uauq)ucbfCyyuI zB43U5e;uDob3fTOsZVXv?=X4?`k+jp?t3J@S8a?RU*AdGCsE4Ue%~PTmnI%3J|y+* zpg#rtS?*6eA^B39)KfzDai7|x%vXzg#P5C`A1gnOamhN+_5be8zZ;bC2J!cDf7@~C zr|$DF^>>(2d{aK|FDdozpdV@c`P1q54*doEt|xQ9$sPD5{}ucJ?w{)szh*snUe7#o zKi$Uuu!B6veaYS2Z+AxObJ#Cr-Nf)^&a|!@(Ua&YbPtcsP5LSKH9v^nSSMyb)%PP3 zm;0S(x5SNADfP;I&}VJ+$~?<`(evo#9j-55oN42}z|H-WdgMOp1L(stZhf9MuCpP2 zxzBpKyH;P=LA&CY`>+T1aNkgQ{b{joWV~{p_VH)4Zg^~Nejb$3tLTm2EjJ%;nvN%u zPr0vq?KzA`3UBy~d`dlXzxQy6`O_W8r3{<&BZAM*{fu)B#rRL^kD<??^YP>6x@)>G zREqct@t{L|lDPYVn&x-)#@|KH5f2jYk@?u@Kfilw8fSn|1z(u^D$hx$TI5wMa=-e8 zDEEQoaZ|mGzi5}dgosC8Q>!16xb-;D_!A_N8{@|}hp%k+;gWh%#7AGtaai(et;e{J zoT?{>uXd$<zcA)W+9?v3`zg=xxT(L5-+xKGLVSk!fW&S4mCT>#03PB-jbbwT??*4A z&x+8TU!F?2F!AI*YdINL6g`gKSXWz)4^6NC_;R-LQg0SLtI_l5X><=Ba}$3NeH2~B zYHp%0p{LO0vAN0Pn)2Iq?}#xTqlZQb@dVHl)MMAf=rT^b9z~C#+w++~A3)DY)t0<? z8oz7L5T7UBxr5g)8S5PWnHcvymijj4XLDUF5%=xqzQq!^wI}7>Z(#p(h|9SB#OJQr zI*t%}9=#Kf{q<bxjkbs<w~0%;SrzY-`qd`&=FyjEcWle?$T;6F5sz@+aJN|qN_`dd z9*yq4QO@zU`hDo_8a;?!yLPKTg1(F{ZJC?w2Ql;|^u}@6VgHsgX?zvhYwRzo0y%C? zqKB^Ie%WX6WCzEs68;hHYu-2xZ>$I7dWBM7m3WPK;1r&i_g!=NyLidB|0doi9OC%I z<IVN8SznlV@?$mQdJ|5shfROKoxtz?M6Es|>w9ZHvhB#yZWdqkQ?>dikDJ=%xNiJj zwm`f<yz%?X&2cv3A&QiVcTe%UcnMF2wbxrT{{DYg->JJU@$I_w^j(*AVwFwW_a)e` zxsQ2G1Y7-*#}M(pzi_;j*Xb5{6n`B5;lFbqaaoux{EhMBPjH{{G3@r^wXCZG@iOto z>$0VPo7Y>CdY14N);JH9dMrNU^~e22UgwB+$^K-CTk8pkZ@pIUm3nNiA2PlO@m1nc zIrOwxr{YiI_n)v<Z$F{6--{)O-+l6$<#;N2m_x6gv{s+jzCV-Ir1pP&nMbc#{$5@B z;lA0>^C$D9MZcvVLHr$^+<%(K<|h4!pu5o<>v8M4Zd{*5<|Bb`<jLGuS_)h8ZLI4I zaS!){pO&~S&r&{5JWIT|Wxmw$k9eP-`;$v~wMoA#=n;+XeiPl$=sxrT^j@<PDB}#G zC(-4;o7UHp&GjPfMe%iXaes0io12tRpnK3)w;aDW%S+$0_{wK;-}Nm%wf_^JdM5Yr z-tzv#Sl4CZxo2^|aB1A~x<@<e_omX0`yhvh-D}2qoMt}!#FvRDrJjxXkVUS}e~5?A zvaW~5_|dyHdIH_2(bMQ{HeLERiC(4ss5IEzKb}fH=Ttj0_IT6zZ)5)P%~HRyM~K<z zzxx)>AJ7*JX+!s+k9KqabekST?>Jl46FIs4UiW>+O8pV^cJu|AFSUstLti*&%{aa^ z=}GkN;95Oz^Jm1rhx^K#l|Xr%LtjMiZqgg$M_)ycN<CZlJ7ctx#|pl{UbUWWpFc?4 zb1P#Y-gy1B&9k%<Af6$f<gsNQn({2ilL36o_!|4?4vr@&{HgP|uAeOWh)tJ%=Fy|* zLFtFBpOUANDrc-vF{$Gp`Vi&py8F!xN2B}Ddo}F`(F5o`Qn9A}0pej>Iq6p%y<5{x z3cVA(`8Xl|EV@^t=h55HJEVQJNnVOdm$}lNk5-6#D7W?fuyH)`FtNU8uNj}e+15v6 z{)tDQv!*#8*gS5DZvbC+Xsx~|{m`Bd8Gjd&#J7yEB)%={*7*EU*3%@u%yZZ3AvaGP z*OQSAX}^U3(DQkH(Vt&6UWX*VHGIK~ta<mLd*8z6eCUn+Of$bh;yuK>@S2;{7eVhs z51AsA`eNvD^kva4$6+JyDdNEwt{Lx7nvZYlzTT6>+b*`ww~Vu(^ggNAT8=xf8=Lr+ z@O4S~Ew2-s^CtcDyp`kHC2Pjtp_uh$jlD(U0pcUXM|r%t?l-Rw5g~q<_?*Pe@{RLq zW1Ml~iA(Lrm(B8N;sxRnOZ}Ve=ZG&8Z{%0AK8wWNm#rCp7i4LFbDT0yRp|#ld2DX- z*z-2VXVYc<`O)L(A?@*%b=q`YS*d3L|G|j$I3xLsqYtAyjW10+Lwo}7Mt<cno0QKH z_q=G$cpqY}%SP0Q7m1G(pApR#mvK~xcU`_#ACPt6xE~q!iIw(zqg2yxf890aL&g~* zK1STlV{>baf0X>%bZI}1K7{U-_O-_a>-EU8_`_Flypg=O@JqWz{OtqQ{z$t^=(U&b zydLk{`TXH!)_No#0rWijAdffK_hvpK#4E%-5?7m)i=i)~%f4toE=#{t#FvO0HHnFF z^Dm2DLLYq;Peyk5{%u#}j9oAC@4EbHyI$NyF=>B^dOZ8>>uz(ubKl1CjQBj>&2eqU z{lpiE7bR}4EA@G_^gm2Iea)KXdX>^{6nzQ3@xDrP+)EO#T&w!kJinBcdCL;-80P#- z2GU}^iob|IGtPPcBU<~Ve>ME0ck7RLGFQHLa2`f{;2554d3`d@tHc+^=e}qA^K9|O z@y+5p#AEC0z~=Eu`jx@glU4cLIF1>sL*jYjW5f$mkL@@pc_<P0-^+2}IG!};RnF5k zuKU*^r5ky6r#QA8V%;9QoqzMV5X3+BarOGHnb!g0eV<q}uCubSUX1=r9umaU!~@da zmb}XG)HpxL;v4_uT74FqEngd-FU$Iy!`JmG>;5BoDx-IxSET$#KQ`BOjrbDr1xtK0 z&%SrE20y)~{oGyZ4-qdCmz;0bzd6n*@de_`61U_@=3(=^JcVy!%C-*G>!#E_NqmL) zPVehn!k@{pU&*@N7`M?c$yb&5?EPEcAA8=#>o<B_%BfBKe)PdFsP)#QhtOTq*7qaQ z-vRUv^l6#j9qhL${1abVtCyu+Cx6yhuZ{JOKb*&p-9A1)Qc3#tm9=_UyWgI?@%dT} zpYN+{*6R_d<C?VNdpCz&;!z%3j*IH^w-E6haou^Ml#dcG5Kl|_jd?KEpX4=3yz6V$ z*BQ|>=w5VV#-y<EJ~xN%Mqd$O<G4V5P4B-N<HuLTHzMQL?f>fg=$f?ib#>ggtWV?f z1K$|^Cl0n<zhMl%2RKfO&(bgBye~$)pZKIEF5^uRFA<l=<|g^eq8~;Ninx)#wuVK# z1>#G@4W%JBJ`X6NOMQbfeoKF>$JrV_cft8}&v!fXOFSY~Z?tdBpRDr`@de^FiEEEz z*6Sk0@lSq}<D!0DHjaBzZwBAuw>W-_&uM>X`aF3K|L`p5_u{wAqxCpb!8iCF{qaol z>3I*wb>hwQAnVDGo<fiF*r|T;MexnzONh@Q?=rpwKJP!M_dB}v*>pZF>tGW90)E^2 zX}mL$_#E-*ch!0CM!UwUkoXeuD)CV{9v$1OPNaCuCS!HKmtlR+I&LZFL+?kAig9E9 zH|q@%PZOVz_(r?3KN_DyMv1$>Z>?AIlt6c(4{7IX^L=j?U(ftn{jhv4yWQvIyZU^R z*CPIfUvQpz0#9138yR;EfAKeK^>OKk+GO0`G{@I};d5Nkn#UzE{soE85D!UQZH@Uy zFQYfEE3VrgcJ+B1<Hy(YuZ&C1Pjo(H@n!KX{u}$T_?r9Cbi4}?n#0%rd(IPO96MM~ zRs88csMmc<y~gp%`##2BQLo1v^S*i94ie82pQSl-Ym8sY{gLx1?YtVV_maN^zOFy1 z&(Tx`lJ_)v8a*ePtz8*sj(Ep^t{K<i)8(yczmfiw@prGN^E$2HIL~swpLzWYp9^kt zJ|_JR;0sjM{=jqDq@6JO2>ObQ+mg@C<E@mB<LmjGIxpe5Y*Idj9{)SXahYf9@oQI~ zM_h(4hcEIlpPP$d^=+I_Nqr@Jz9Z^$2<x~v>XZJ|@Xg>0OMPl<tp5*i%s>ywcx>m_ zQZ7h*rKXODP30o!;WgX)Z*@K){fZMW6W{JQEq%%0E3Wf;DyKXn|CjIkx{MCVdMQ)S z!sFKKVaaQY<AIFZGtT?5C#-M1z8*1&2Z)DGU9S&snP+3ai4adcab0^JApMCGpCNv5 z2mO&eXYsq9v~GDnEqTnNS5IHBFY|cIIuMOb`dh}Ar+l{v8~c-S{FZuZ#OH`NJ`d3y zH`IBO{|@H+$?L}FD$VPTcOlYFnE2Qk>&AYoiN}b0pR%s~yjc2?A|4<<!egiXr?LOz zOW-R$249QU1IeFk$J}}a-|w7$%oFA2mTc0V^m%p9x^dp}e7?Up_P%2-Jn6JsfBPF{ zQob-Op0{p%?zQPV_R=O}V_xws?p?1h9?yddPwu|=`1^K+Pde=dce*pj9e4jRrJp^2 zvw*ap<=^-PDqk-!+Q%Q;bQ!NRbNIZ^TdxnnqubF89L>Pd3>?kC(F`2Tz|jmG&A`zN z9L>PaGf-EXS;BlCe%Xbl)S#)G;u{{{*LZlF;`<cuR{W%5UhxNt4=Vmn@z076E1uY4 z)O)JpGZfEP3@g4^F{*gI;=2{^R{W&m{fbknoo}e;^NRnh_@LtN75}EVrg+k+M*mJx ze3s(Himy=ogIpY!?SOiIyW%0mFDZUc@j=CZRs6H!aZfbrd6eQ)6`!kkiQ=mjU#EDh z;_ZqbQT)8(KPdi8@pp=UR(yodX!mi70mbJiUaa_f#oHA>t@waqN%4=0$DC%=+ei-o zJykvLReY)90mU)J4=a9B@mq?&R{WdduG3Y&70*_Do?@TkYZT3XyiPrTo8p~{pH%#^ zVo~w;imoRa?LAuY>56+5FH+p6IHLFt#k&+gqxciW-z%QbY1DtZ;zf$DQ+$u&1B#1^ ze^Wf;$wv7-iebg86-O2CP@Go$zT$5cR~4UjhEZQw@p{FS;-q3;@rR0kSL}F-QT}Yj z%M@=?{IKFbDlRLoDYmQq>NLgOiV?+Y6qAY{Qv9OgPZXCF|ElPIs?jg6;?oq*R~%4` zDc+*^A;m+AUsWtCp7HjJ8%ylTios_X?Hrykj<52OnQULU+|WJ7Q`c<&CF+p@(>L5_ z_$)!51lFzfjX3xY*?edA>+74STkE^>e7*0G%@<Vl$yW`s9lAn4j+<5g9%<NIGcVQq z-lBZQr&N6G`7*ulm#W?4RDFG~()&(UJ~N-Q4!%Fh#md=EQuQ5n@V)BAhR+=DLR8=G z2bAwbRo}{M^ggqH%C><iTb})c`uWHoE1%Bs`JO8I9fx($b)41rWd~o@=KHpTuib5} z&zukW>Oi*9EA`_w^Jn@FJNS-Q`7>(U{C{efwcXPke7@tYzGpc2#%;dy9DME*to2>& z;5%sZnd99Yf$^wf^IhXm-%y*i-CG=dC7aJ&fA)6!9$~F7>rh|8=KHLJulq!6eUDMc zZF{?uHs6B|_4yxZt?$XN)UW$Vn{U~nzAlfoz6%_VYg0B~{U!Q#J5RFKcY@RL)aG+K z?)px)*5`EG&Dwkyyj<Tu@1v~sMIC%;o9~wn<8`-N>-%>HU&`kDyhC1AZN9HL_(mRW zZP#g?uGoA|>vYxTGmlsHb?SPIwcV;i|3+=TI~;uO$6D+Aor5oF^En+isy5#)hxw@4 ze5W}0++J({&UWy5Y`*;tKCjJpmxIq|^L@_2=ePMDaPS3ez8^UFf;Qi~9eg31?{`7{ z{uZ|RiVm-rzEiAu?l@0h-$CUw*Ja68&gr;x@C<AHPRED7r&xVX$A?9m@02SX`s=sW zceaDCV)I?>;2V0XwZ2;&d<!<;#~pl;r&;S8QuAY;FD>lTpQpUxrTX#C+3NeVt-fW4 z`kdBJ&(p2_d&pt^jGt-sUGA`MS9V){Pjm3C+I(Mh@VU;i);I0oYqR-2s@|WQ>)2$w z&3B(eeH}L6Ee^g;o6qSyxy$Br%1cMLb-XV1zQF9i$+oktKBx1PRh#dP4(Bt==UD6e zq{I3C!ns!8cO885L95R^|1jS#EnlTy_jjoNxz#|2uGag^_e<t}<cjHif9}!mM=9I; z2dCp^#CE>_gAW_;ug!TdIbriT&F>+b&wL+lzEA5L(vM@!VI7P<&YI_wL;CTKKHln^ zb2u+6*?dm>QCEkxKBw1{8Jo}P{MUV|wLYiU50}sCb9((aZ1erp;kb6_G;4kT;^6Ch zlHoJwyAZHGkJ^rB=DeEgz0+a6KgZ#9<FKmVT;DHpcs&?+mbL#w4!#AO&+MOB|Lp%| z|3+-%bzP!gcQbpe{qs2Zrp~kaPI2%}?6vyL<BIutNV~&vAg1Qk^mXmm?`P(Duj%W( zM(=alFVa0m|IBsL_7ZD<&u|!5?{lrbXE^w}FR=Q~b?|jQ&+6-S@U{0^eJ^qFxt?$J z?Q`%|ZN38zzU3EK>vPJ>!i83!Q(or7R-aQ|W-hY&obodDLaWayFB2DAeNK5v_gQ^T zc^SRL>T}A=$fZ`FQ(lHHv-+I!(jT$<obuB9BCF3SFWr}0eNK7tzu4+?%1cMT)n^{R zPE|YF=(YO&bpB=f<I;+4zx=a<uX2U8U8mQt>Pu}thu7DY0jtmH^{etStM68a`j%gA z_1)p%TY81n_h|>;;-J;%bRJlKrPb$j9$0>r)%Q(@cFV7}`W|%hMXkQSJNaH?^*#0# z`gKr#t<`sylkZBa??q0&A*=6yJDeYv_gQ^)C*M_8pVN7M`D&~0GKcg0a?I*;dOuL! zZ}lbBaonx;xAE8O_qRW&?{iIGcU<qAROdbB`Nv@g-;L^g#`G;Z_{{eWk5K(vaqv04 zKPz8j9k2UU4(sJwtM3da->}v9S|{IiR-a@4UT5_=_V4vppJV^xR-a@4t~Y$<`Aqo+ z>+{*SS?e+9-#mX>d4qobk5TVaOkdkgdf#no-I=~lxoK_FX0D5Io3H1Mdf$CEU;oW| z-xqAY;Wz1h->~^c59)p9aljnM#4UQ?_ign}->UbSuYczI#?c$}KBslEqUO=epVK<2 z9k9-?(>iH;gVopTu-?5lT77SI@O9o~_1)v(3nZ+*A2}TNdfsUDJ>XDZ_-3o`K?h&| zo2<UQ=NZQfbAH=L^z-3#J{LV`t<ULvZul0f&*^+FajVtmbUruwW~<NXd@h}|`kc<^ z4!y<db2^{Pzt!q9*Rxyg+x-s54X5>7vK@z<)^mB(+OE@jUV6LLmwvVW^`LT_)#o(7 z)puBZPV-w$S$$6PTYab1=QO|7cUgT-^ILtl)#o(7t7BH5)BM(MxB8s&QhSfp=aiS) zd#ygFywuWGpHp6H@3Z=x@=|-h)#sEK*SOW^lo$6MR-aQ|+#j_1obuwn)9Q1|i#ucW zIpxLuA*;_RFYddnKBv62ec0-A%8O^h>T}AA=WeUdDKDOnSba`;@!VteIpxKZwfda$ z;<?xAbIMEmeO8}SUc4W*`keCOJ!JJc<;D9ktIsJf-j7>-PI>Wu!s>I%i+9rMbIMD{ zC#^oGy!bw4^*QCm_i3xoDKEY$tIsJfzRy^FPI>Wt*6MT0i|=z*pHp5sb5@^IUi|l4 zeNK7tf8OeI%8UOCR-aQ|{L@ySQ(pXEwECR#;{TG>=aiSOFI#<1c?smLKBv3{zGC$` z<t6Y{tIsJffv;J8PI(E;Sba`;34Gn^bIMEi16H3?UV`7S`ke9-ELeR`c?o{g>T}9V z@LN`&Q(l7Kw)&j%5}dXAobuB19jnhNFQI=he1qRL=$|v#tr$}5Q#9X_4yfltio=T2 zdoOBOsy7&{DVpcKW_gpLm%Om?uvamxcu;XvaZGVsF{5bKW74c|;nzmJZ4VlZ{K{Zi z>9gv&Pd$$)n)R4WFB;_zDVEB{^YCvCmXz-K7vp(Gaaqx<$B9FazO)gVRp;Gpw;S)L z`n-l-e2l?qx51oxzWiRpcc|UaJJfTJinpuhv+8+w<D%qTKI(+mH`?n^&u33J%Fimj z@3F@7L#kYls;BGNUZbD)8S!4F_g<#n|EYQ+gD+_K=2d<DY8>u&8$Pq1(F+V;*PD#@ z8w>9-;-jixGpd~#RZm)tvt7kU)$=x0|B&j>VO7ukn~nO~RJ$&fSC?wf<ub<6rpgVf zdFWE(8B_K5op4!WJRM&$`rCV*(ccLbU%1(L-t$qTzK-V_dCIEzVb$NN%1gIuZ${<0 z&CLG`jCwkizg_irL5<&|#xbGhr9<^|Sk<?v#=WTg1(m->=lO3+l}o99jjP|mcdL5a zRDAk0V;mmUuO(HV>nfvvy=t7_QGWSc<#_%H<+sMStvoE)_@T7<JN<^H`LA)!5A>=3 zrPh(g^WQ18KmEV6wxe|&&A`zN9L>OH2F%ZyO~w5CPLuYy={w5*85uC=$9}$Q&Y!)P z;y3?2kH@A@C|<K3^Z5&(X}pd~ta)=cKhZqyQLoeUpHt@{IfJJ4kb2%@p68f>|3|h_ z-pq-vs>3_%*YtmLzfPnx#)*cmS6rWQ%s8KDm9H*!-lVUkwQA0bnFq67v!waEIMdhh z|C)zhv*w-AJa65ZdmC5pHZe`;e@eUN{^;6a|G%n!M=NVy=5GEu#{T*f#a}4eb#pD& zA7v!N^i8Yt`mZR;xw32>mm2k&zBANwQ-6kfZti<;jTnBjo_DF|zgK@hZ2msb{QaN# z`#$q~YV&*O+tl~2=J&|v_rzaNy7@h?`8}@rJ*^s+(Ru#gwejfagQFQZnt`JkIGTZ@ z8917OqZv4wfuk8Xnt`JkIGTZ@8917OqZ#<0oq_QW8lR)~E3VvWJfBuvG4(qPy{LY7 za9DBvfbqG^|6ik&&$sr__>Yg&>p$cJEA#WBphlD18@wL>Ff@H{;PD5)U9W$D53fzF z)3$ZnHj22W)c5Yszae(xf$L(OUFQY&1o!SexAUrfH|>vg9=M_N#&gfxbKbdUUwzJ* zoma>A-EeK^&HHb>={f@E@9y5+)7dqgNQ~U{?6c0=XVkF!+5-o!jqktdz#DJ8djIaL z4_tp%;=qCU)x-O)yWy;O{O0SQHFD#D*X_SL(RqH))jfOm1bfck)3di{Z}6&f_MLmq zp8b3G?K%6rtIj_^)^l#pp7YPY`n+q-?Cjd;OC<907a8r|bn^{ox;8pZ73X%JyJt(M zuhR8-@BjBc_w@Aa+tague^2bZYpy!`yuID~&)$1Z&%UenpL_MWdwaUiJ12O4?3$~t z*7tdDqt6##c;T}<yDq-`WoLE<cb~ueVCOmAXYc7g`<!z-yGCx@f8+l6eiDDC%eDKa z;hPdSCiY$B+I_=;#Qxo8dasM^zain;o!Ebn|8G1H+n3nq+P#1H%4=@icm4h=hhsNx zK6CA+#_MjlE^+0)8*kiqi)(lD<7;?g>Qc#Wnx;M%^GC%-wO8GAlS`)XdTH{%XOrvM z$=@89pR<l1)7z-nyq8Yno<I%7_|N`*%9D^~eVd=Z_8!~IGncDZRoH&L`4<fh+0|=c z<Nv4fNXnb-j~=+V5gR>Vl)K^)=H6oz={2zN|Fd}1T)uSBIBt{<wlzFa|Aw{kz~2Ao z*vfa`Yn1Q4*C;>zL2G$K-PGjQSnd2XzmG6~Khp7>M>I?lYu?QACI<+f%s;cd`8$~| zRbGxQ&6`=y<Pbsm4B0Gi{%)vuhwaBHEBQ0a_p9H(^sDlR&nDQkneCbqx8k;x?|h5# z{ZQxi#`qV_iB$#6@+P^3t);wc)F|&7G0MA6H(N5;X?^NS@m%UM`|moW%D>+zzdZb8 tYYE$g+)h-+-)(idT&ldeADChB%GS({wvABZ|M3qP?c0{mMmf{^e*ojGNR0ph literal 0 HcmV?d00001 diff --git a/cruelbuild b/cruelbuild new file mode 100755 index 000000000000..9f14f439e766 --- /dev/null +++ b/cruelbuild @@ -0,0 +1,1329 @@ +#!/usr/bin/env python3 + +import os, errno +from sys import argv, stdout, stderr +import re +import json +import atexit +from copy import deepcopy +from datetime import datetime, timedelta +from subprocess import CalledProcessError, Popen, run, DEVNULL, PIPE +from shutil import which +from enum import Enum, IntEnum +from collections import namedtuple +from struct import unpack_from, calcsize +from select import poll +from time import sleep +from timeit import default_timer as timer +from ctypes import CDLL, get_errno, c_int +from ctypes.util import find_library +from errno import EINTR +from termios import FIONREAD +from fcntl import ioctl +from io import FileIO +from os import fsencode, fsdecode + +CK_DIR = os.path.dirname(os.path.realpath(__file__)) + +toolchain = { + 'default': { + 'CROSS_COMPILE': 'toolchain/gcc-cfp/gcc-cfp-jopp-only/aarch64-linux-android-4.9/bin/aarch64-linux-android-', + 'CLANG_TRIPLE': 'toolchain/clang/host/linux-x86/clang-4639204-cfp-jopp/bin/aarch64-linux-gnu-', + 'CC': 'toolchain/clang/host/linux-x86/clang-4639204-cfp-jopp/bin/clang' + }, + 'cruel': { + 'CROSS_COMPILE': 'toolchain/bin/aarch64-cruel-elf-' + }, + 'samsung': { + 'CROSS_COMPILE': 'toolchain/gcc-cfp/gcc-cfp-jopp-only/aarch64-linux-android-4.9/bin/aarch64-linux-android-', + 'CLANG_TRIPLE': 'toolchain/clang/host/linux-x86/clang-r349610-jopp/bin/aarch64-linux-gnu-', + 'CC': 'toolchain/clang/host/linux-x86/clang-r349610-jopp/bin/clang' + }, + 'google': { + 'CROSS_COMPILE': 'toolchain/aarch64-linux-android-4.9/bin/aarch64-linux-android-', + 'CLANG_TRIPLE': 'toolchain/llvm/bin/aarch64-linux-android-', + 'CC': 'toolchain/llvm/bin/clang', + 'LD': 'toolchain/llvm/bin/ld.lld', + 'AR': 'toolchain/llvm/bin/llvm-ar', + 'NM': 'toolchain/llvm/bin/llvm-nm', + 'OBJCOPY': 'toolchain/llvm/bin/llvm-objcopy', + 'OBJDUMP': 'toolchain/llvm/bin/llvm-objdump', + 'READELF': 'toolchain/llvm/bin/llvm-readelf', + 'OBJSIZE': 'toolchain/llvm/bin/llvm-size', + 'STRIP': 'toolchain/llvm/bin/llvm-strip', + 'LDGOLD': 'toolchain/aarch64-linux-android-4.9/bin/aarch64-linux-android-ld.gold', + 'LLVM_AR': 'toolchain/llvm/bin/llvm-ar', + 'LLVM_DIS': 'toolchain/llvm/bin/llvm-dis' + }, + 'proton': { + 'CROSS_COMPILE': 'toolchain/bin/aarch64-linux-gnu-', + 'CROSS_COMPILE_ARM32': 'toolchain/bin/arm-linux-gnueabi-', + 'CC': 'toolchain/bin/clang', + 'LD': 'toolchain/bin/ld.lld', + 'AR': 'toolchain/bin/llvm-ar', + 'NM': 'toolchain/bin/llvm-nm', + 'OBJCOPY': 'toolchain/bin/llvm-objcopy', + 'OBJDUMP': 'toolchain/bin/llvm-objdump', + 'READELF': 'toolchain/bin/llvm-readelf', + 'OBJSIZE': 'toolchain/bin/llvm-size', + 'STRIP': 'toolchain/bin/llvm-strip', + 'LDGOLD': 'toolchain/bin/aarch64-linux-gnu-ld.gold', + 'LLVM_AR': 'toolchain/bin/llvm-ar', + 'LLVM_DIS': 'toolchain/bin/llvm-dis' + }, + 'arter97': { + 'CROSS_COMPILE': 'toolchain/bin/aarch64-elf-' + }, + 'arm': { + 'CROSS_COMPILE': 'toolchain/bin/aarch64-none-elf-' + }, + 'system-gcc': { + 'CROSS_COMPILE': 'aarch64-linux-gnu-' + }, + 'system-clang': { + 'CC': 'clang', + 'CROSS_COMPILE': 'aarch64-linux-gnu-', + 'CROSS_COMPILE_ARM32': 'arm-linux-gnu-' + } +} + +models = { + 'G970F': { + 'config': 'exynos9820-beyond0lte_defconfig' + }, + 'G970N': { + 'config': 'exynos9820-beyond0lteks_defconfig' + }, + 'G973F': { + 'config': 'exynos9820-beyond1lte_defconfig' + }, + 'G973N': { + 'config': 'exynos9820-beyond1lteks_defconfig' + }, + 'G975F': { + 'config': 'exynos9820-beyond2lte_defconfig' + }, + 'G975N': { + 'config': 'exynos9820-beyond2lteks_defconfig' + }, + 'G977B': { + 'config': 'exynos9820-beyondx_defconfig' + }, + 'G977N': { + 'config': 'exynos9820-beyondxks_defconfig' + }, + 'N970F': { + 'config': 'exynos9820-d1_defconfig' + }, + 'N971N': { + 'config': 'exynos9820-d1xks_defconfig' + }, + 'N975F': { + 'config': 'exynos9820-d2s_defconfig' + }, + 'N976B': { + 'config': 'exynos9820-d2x_defconfig' + }, + 'N976N': { + 'config': 'exynos9820-d2xks_defconfig' + } +} + +OBJTREE_SIZE_GB = 3 + + +_libc = None +def _libc_call(function, *args): + """Wrapper which raises errors and retries on EINTR.""" + while True: + rc = function(*args) + if rc != -1: + return rc + errno = get_errno() + if errno != EINTR: + raise OSError(errno, os.strerror(errno)) + +Event = namedtuple('Event', ['wd', 'mask', 'cookie', 'name']) + +_EVENT_FMT = 'iIII' +_EVENT_SIZE = calcsize(_EVENT_FMT) + +class INotify(FileIO): + fd = property(FileIO.fileno) + inotify_raw_events = [] + topdir = 1 + paths = {} + event_files = set() + + def __init__(self, inheritable=False, nonblocking=False): + try: + libc_so = find_library('c') + except RuntimeError: + libc_so = None + global _libc; _libc = _libc or CDLL(libc_so or 'libc.so.6', use_errno=True) + O_CLOEXEC = getattr(os, 'O_CLOEXEC', 0) # Only defined in Python 3.3+ + flags = (not inheritable) * O_CLOEXEC | bool(nonblocking) * os.O_NONBLOCK + FileIO.__init__(self, _libc_call(_libc.inotify_init1, flags), mode='rb') + self._poller = poll() + self._poller.register(self.fileno()) + + def add_watch(self, path, mask): + path = str(path) if hasattr(path, 'parts') else path + wd = _libc_call(_libc.inotify_add_watch, self.fileno(), fsencode(path), mask) + self.paths[wd] = path + if path == '.': + self.topdir = wd + return wd + + def readraw(self, timeout=None, read_delay=None): + data = self._readall() + if not data and timeout != 0 and self._poller.poll(timeout): + if read_delay is not None: + sleep(read_delay / 1000.0) + data = self._readall() + return data + + def _readall(self): + bytes_avail = c_int() + ioctl(self, FIONREAD, bytes_avail) + if not bytes_avail.value: + return b'' + return os.read(self.fileno(), bytes_avail.value) + + def collect_events(self, timeout=1, read_delay=None): + self.inotify_raw_events.append(self.readraw(timeout=timeout, read_delay=read_delay)) + + @staticmethod + def parse_events(data): + pos = 0 + events = [] + while pos < len(data): + wd, mask, cookie, namesize = unpack_from(_EVENT_FMT, data, pos) + pos += _EVENT_SIZE + namesize + name = data[pos - namesize : pos].split(b'\x00', 1)[0] + events.append(Event(wd, mask, cookie, fsdecode(name))) + return events + + def _gather_event_files(self): + event_files = set() + for data in self.inotify_raw_events: + for event in self.parse_events(data): + if event.wd != -1: + if event.wd == self.topdir: + event_files.add(event.name) + else: + event_files.add(os.path.join(self.paths[event.wd], event.name)) + else: + fatal("Missing events with SRC_REDUCE=y, try to use j=1") + inotify_raw_events = [] + self.event_files.update(event_files) + + def get_event_files(self): + self.collect_events() + self._gather_event_files() + return self.event_files + + def run(self, args): + with Popen(args, stdout=stdout, stderr=stderr) as proc: + while proc.poll() is None: + self.collect_events() + if proc.returncode: + exit(proc.returncode) + self._gather_event_files() + +class flags(IntEnum): + OPEN = 0x00000020 #: File was opened + Q_OVERFLOW = 0x00004000 #: Event queue overflowed + ONLYDIR = 0x01000000 #: only watch the path if it is a directory + EXCL_UNLINK = 0x04000000 #: exclude events on unlinked objects + +inotify = INotify() +watch_flags = flags.OPEN | flags.EXCL_UNLINK | flags.ONLYDIR +unused_files = set() + + +def get_toolchain_cc(compiler): + cc = '' + if 'CC' in toolchain[compiler]: + cc = toolchain[compiler]['CC'] + else: + cc = toolchain[compiler]['CROSS_COMPILE'] + 'gcc' + return cc + +def mount_tmpfs(target, req_mem_gb): + if not os.path.ismount(target): + meminfo = dict((i.split()[0].rstrip(':'),int(i.split()[1])) for i in open('/proc/meminfo').readlines()) + av_mem_gb = int(meminfo['MemAvailable'] / 1024 ** 2) + if av_mem_gb >= req_mem_gb + 2: + ret = run(['sudo', '--non-interactive', + 'mount', '-t', 'tmpfs', '-o', 'rw,noatime,size=' + str(req_mem_gb) + 'G', 'tmpfs', target]) + if ret.returncode != 0: + print('BUILD: error mounting tmpfs on ' + target, file=stderr) + else: + print('BUILD: tmpfs is mounted on ' + target) + else: + print('BUILD: will not mount tmpfs on ' + target + ' size ' + str(av_mem_gb) + 'G < ' + str(req_mem_gb + 2) + 'G') + else: + print(target + ' is already used as mountpoint', file=stderr) + +def umount_tmpfs(target): + if os.path.ismount(target): + ret = run(['sudo', '--non-interactive', 'umount', target]) + if ret.returncode != 0: + print("BUILD: error unmounting " + target, file=stderr) + else: + print("BUILD: " + target + " unmounted") + +def inotify_install_watchers(inotify, dirname, watch_flags, exclude_dirs, exclude_files): + inotify.add_watch(dirname, watch_flags) + + topdirs, unused_files = scandir(dirname) + for d in exclude_dirs: + topdirs.remove(d) + for f in exclude_files: + unused_files.remove(f) + + for dir in topdirs: + for root, dirs, files in os.walk(dir, topdown=False): + unused_files.update({ os.path.join(root, f) for f in files }) + for d in dirs: + inotify.add_watch(os.path.join(root, d), watch_flags) + + return unused_files + +def remove_files(*files): + for f in files: + try: + os.remove(f) + except FileNotFoundError: + pass + +def del_dirs(src_dir): + for dirpath, _, _ in os.walk(src_dir, topdown=False): + try: + os.rmdir(dirpath) + except OSError: + pass + +def mkdir(dirname): + try: + os.mkdir(dirname) + except FileExistsError: + pass + +def scandir(dirname): + topdirs = set() + topfiles = set() + with os.scandir(dirname) as it: + for entry in it: + if entry.is_dir(): + topdirs.add(entry.name) + else: + topfiles.add(entry.name) + return topdirs, topfiles + +def tool_exists(name): + return which(name) is not None + +def get_cores_num(): + return len(os.sched_getaffinity(0)) + +def check_env(var): + isset = False + v = os.environ.get(var, 'n') + if v == 'y' or v == 'Y' or v == 'yes' or v == '1': + isset = True + return isset + +def set_env(force=False, **env): + for key, value in env.items(): + if force or key not in os.environ: + os.environ[key] = value + value = os.environ[key] + print(key + '="' + value + '"') + +def fatal(*args, **kwargs): + print(*args, file=stderr, **kwargs) + exit(1) + +def print_usage(): + msg = f""" +Usage: {argv[0]} <stage> model=<model> name=<name> [+-]<conf1> [+-]<conf2> ... + +<stage>: build stage. Required argument. mkimg by default. +Where <stage> can be one of: config, build, mkimg, pack +(:build, :mkimg, :pack). Each next stage will run all +previous stages first. Prefix ':' means skip all previous +stages. + +model=<model> phone model name. Required argument. +The script will try to autodetect connected phone if +model is not specified. Supported models: +{list(models.keys())} +Use model=all to build all available kernels. + +name=<name>: optional custom kernel name +Use this switch if you want to change the name in +your kernel. + +toolchain=<compiler>: optional toolchain switch +Supported compilers: {list(toolchain.keys())} + +os_patch_level=<date>: use patch date (YYYY-MM) +instead of default one from build.mkbootimg.<model> +file. For example: os_patch_level="2020-02" + +O=dir will perform out of tree kernel build in dir. +The script will try to mount tmpfs in dir if there +is enough available memory. + +[+-]<conf>: optional list of configuration switches. +Use prefix '+' to enable the configuration. +Use prefix '-' to disable the configuration. +You can check full list of switches and default ones in +kernel/configs/cruel*.conf directory. +One can use NODEFAULTS=y {argv[0]} +samsung ... to disable +all enabled by default configs. + +If you want to flash the kernel, use: FLASH=y {argv[0]} +""" + print(msg) + +def parse_stage(): + stages = [] + modes = ['config', 'build', 'mkimg', 'pack'] + omodes = [':config', ':build', ':mkimg', ':pack'] + all_modes = modes + omodes + + if len(argv) > 1: + mode = argv[1] + if mode not in all_modes: + if mode[0] == '+' or mode[0] == '-' or '=' in mode: + mode = 'mkimg' + else: + print_usage() + fatal('Please, specify the mode from {}.'.format(all_modes)) + else: + argv.pop(1) + else: + mode = 'mkimg' + + if mode in omodes: + if mode == ':config': + stages = [] # special model for :config + # don't run make defconfig + # just generate config.json file + else: + stages = [mode[1:]] + else: + stages = modes[0:modes.index(mode)+1] + + return stages + +def find_configs(): + configs = { 'kernel': {}, 'order': [] } + prefix_len = len('cruel') + suffix_len = len('.conf') + nodefaults = check_env('NODEFAULTS') + files = [f for f in os.listdir('kernel/configs/') if re.match('^cruel[+-]?.*\.conf$', f)] + for f in files: + if f == 'cruel.conf': + continue + name = f[prefix_len+1:] + name = name[:-suffix_len] + enabled = True if f[prefix_len:prefix_len+1] == '+' else False + + configs['kernel'][name] = { + 'path': os.path.join('kernel/configs', f), + 'enabled': enabled if not nodefaults else False, + 'default': enabled + } + if enabled and not nodefaults: + configs['order'].append(name) + configs['order'] = sorted(configs['order']) + return configs + +def save_config(file, configs): + conf = deepcopy(configs) + with open(file, 'w') as fh: + json.dump(conf, fh, sort_keys=True, indent=4) + +def load_config(file): + with open(file, 'r') as fh: + return json.load(fh) + +def switch_config(opt, enable, configs): + if opt in configs['kernel']: + configs['kernel'][opt]['enabled'] = enable + else: + fatal("Unknown config '{}'.".format(opt)) + + if enable: + if opt in configs['order']: + configs['order'].remove(opt) + configs['order'].append(opt) + else: + if opt in configs['order']: + configs['order'].remove(opt) + +def parse_args(): + configs = find_configs() + + for arg in argv[1:]: + if arg.find('=') != -1: + (key, value) = arg.split('=', 1) + + enable = None + if key[0] == '-' or key[0] == '+': + enable = True if key[0] == '+' else False + key = key[1:] + + if key not in [ 'name', + 'model', + 'os_patch_level', + 'toolchain', + 'magisk', + 'O' ]: + fatal('Unknown config {}.'.format(key)) + + if enable == None: + if key == 'model': + if value == 'all': + value = list(models.keys()) + else: + value = value.split(',') + configs[key] = value + else: + switch_config(key, enable, configs) + + if not value: + fatal('Please, use {}="<name>".'.format(key)) + elif key == 'model': + for m in value: + if m not in models: + fatal('Unknown device model: ' + m) + elif key == 'os_patch_level': + try: + datetime.strptime(value, '%Y-%m') + except Exception: + fatal('Please, use os_patch_level="YYYY-MM". For example: os_patch_level="2020-02"') + elif key == 'toolchain': + if value not in toolchain: + fatal('Unknown toolchain: ' + value) + elif key == 'magisk': + if value != 'canary' and value != 'alpha' and not re.match('^v\d+\.\d+', value): + fatal('Unknown magisk version: ' + value + ' (example: canary, alpha, v20.4, v19.4, ...)') + configs['kernel']['magisk']['version'] = value + else: + switch = arg[0:1] + enable = True if switch == '+' else False + opt = arg[1:] + if switch not in ['+', '-']: + fatal("Unknown switch '{0}'. Please, use '+{0}'/'-{0}' to enable/disable option.".format(arg)) + switch_config(opt, enable, configs) + + if 'model' not in configs: + first_model = list(models.keys())[0] + if len(models) == 1: + configs['model'] = [ first_model ] + else: + try: + configs['model'] = [ adb_get_device_model() ] + except CalledProcessError: + print_usage() + fatal('Please, use model="<model>". For example: model="{}"'.format(first_model)) + + return configs + +def setup_env(features, configs, model): + set_env(ARCH='arm64', PLATFORM_VERSION='11', ANDROID_MAJOR_VERSION='r') + set_env(KBUILD_BUILD_TIMESTAMP='') + if features['fake_config']: + defconfig = os.path.join('arch/arm64/configs', models[model]['config']) + set_env(KCONFIG_BUILTINCONFIG=defconfig) + +def config_info(configs, model): + name = configs.get('name', 'Cruel') + name = name.replace('#MODEL#', model) + print('Name: ' + name) + print('Model: ' + model) + + conf_msg = [] + kernel_configs = configs['kernel'] + for key in configs['order']: + if kernel_configs[key]['enabled']: + conf_msg.append(key + ' (default: ' + ('On' if kernel_configs[key]['default'] else 'Off') + ')') + if conf_msg: + print('Configuration:') + for i in conf_msg: + print("\t" + i) + else: + print('Configuration: basic') + + if 'os_patch_level' in configs: + print('OS Patch Level: ' + configs['os_patch_level']) + else: + with open('cruel/build.mkbootimg.' + model, 'r') as fh: + for line in fh: + (arg, val) = line.split('=', 1) + val = val.rstrip() + if arg == 'os_patch_level': + print('OS Patch Level: ' + val) + break + +def config_name(name, config='.config'): + run(['scripts/config', + '--file', config, + '--set-str', 'LOCALVERSION', '-' + name], check=True) + +def config_model(model, config='.config'): + run(['scripts/config', + '--file', config, + '--disable', 'CONFIG_MODEL_NONE', + '--enable', 'CONFIG_MODEL_' + model], check=True) + +def make_config(features, configs, model): + objtree = configs.get('O', '.') + config = os.path.join(os.path.join(CK_DIR, objtree), + 'config.' + model) + set_env(KCONFIG_CONFIG=config) + args = ['scripts/kconfig/merge_config.sh', '-O', objtree, + os.path.join('arch/arm64/configs', models[model]['config']), + 'kernel/configs/cruel.conf'] + + kernel_configs = configs['kernel'] + for key in configs['order']: + if kernel_configs[key]['enabled']: + args.append(kernel_configs[key]['path']) + + inotify.run(args) + + if 'name' in configs: + name = configs['name'].replace('#MODEL#', model) + config_name(name, config) + + if features['dtb']: + config_model(model, config) + + del os.environ['KCONFIG_CONFIG'] + +def update_magisk(version): + cmd = ['usr/magisk/update_magisk.sh'] + if version: + cmd.append(version) + run(cmd, check=True) + with open('usr/magisk/magisk_version', 'r') as fh: + print('Magisk Version: ' + fh.readline()) + +def switch_toolchain(compiler): + cc = os.path.abspath(get_toolchain_cc(compiler)) + if cc.startswith(os.path.realpath('toolchain')): + branch = run(['git', 'submodule', 'foreach', 'git', 'rev-parse', '--abbrev-ref', 'HEAD'], + check=True, stdout=PIPE).stdout.decode('utf-8').splitlines()[1] + if not (tool_exists(cc) and compiler == branch): + ret = run(['git', 'submodule', 'foreach', 'git', 'rev-parse', '--verify', '--quiet', compiler], + stdout=DEVNULL, stderr=DEVNULL) + if ret.returncode != 0: + try: + run(['git', 'submodule', 'foreach', 'git', 'branch', compiler, 'origin/' + compiler], + check=True, stdout=DEVNULL, stderr=DEVNULL) + except CalledProcessError: + fatal("Can't checkout to toolchain: " + compiler) + run(['git', 'submodule', 'foreach', 'git', 'checkout', compiler], check=True) + +def build(compiler, objtree='.'): + env = {} + + toolchain[compiler]['CC'] = get_toolchain_cc(compiler) + if compiler in ['system-gcc', 'system-clang']: + env = toolchain[compiler] + else: + env = { k: os.path.abspath(v) for k, v in toolchain[compiler].items() } + if tool_exists('ccache'): + env['CC'] = 'ccache ' + env['CC'] + + if objtree != '.': + env['O'] = objtree + + if tool_exists('pigz'): + env['KGZIP']='pigz' + if tool_exists('pbzip2'): + env['KBZIP2']='pbzip2' + + arg_threads = [] + if check_env('DEBUG'): + arg_threads = ['-j', '1', 'V=1'] + else: + arg_threads = ['-j', str(get_cores_num())] + + inotify.run(['make', + *arg_threads, + *{ k + '=' + v for k, v in env.items() }]) + +def mkbootimg(os_patch_level, seadroid, config, output, **files): + if not tool_exists('mkbootimg'): + fatal("Please, install 'mkbootimg'.") + + print("Preparing {}...".format(output)) + for f in files.values(): + if not os.path.isfile(f): + fatal("Can't find file '{}'.".format(f)) + args = ['mkbootimg'] + with open(config) as fh: + for line in fh: + (arg, val) = line.split('=', 1) + if arg == 'os_patch_level' and os_patch_level: + val = os_patch_level + else: + val = val.rstrip() + args.extend(['--' + arg, val]) + for k, v in files.items(): + args.extend(['--' + k, v]) + args.extend(['--output', output]) + + run(args, check=True) + + if seadroid: + with open(output, 'ab') as img: + img.write('SEANDROIDENFORCE'.encode('ascii')) + +def get_dtb_configs(models): + dtb_model = {} + model_dtb = {} + for model in models: + with open(os.path.join('cruel', 'dtb.' + model), 'r') as fh: + l = '' + while not l: + l = fh.readline() + dtb = l.split('.')[0] + if dtb not in dtb_model: + dtb_model[dtb] = [model] + else: + dtb_model[dtb].append(model) + model_dtb[model] = dtb + return {'dtb': dtb_model, 'model': model_dtb} + +def mkdtboimg(dtbdir, config, output): + if not tool_exists('mkdtboimg'): + fatal("Please, install 'mkdtboimg'.") + + print("Preparing {}...".format(output)) + inotify.run(['mkdtboimg', 'cfg_create', '--dtb-dir=' + dtbdir, output, config]) + +def mkvbmeta(output): + if not tool_exists('avbtool'): + fatal("Please, install 'avbtool'.") + + print('Preparing vbmeta...') + run(['avbtool', 'make_vbmeta_image', '--out', output], check=True) + +def mkaptar(boot, vbmeta): + if not (tool_exists('tar') and tool_exists('md5sum') and tool_exists('lz4')): + fatal("Please, install 'tar', 'lz4' and 'md5sum'.") + + print('Preparing AP.tar.md5...') + run(['lz4', '-m', '-f', '-B6', '--content-size', boot, vbmeta], check=True) + run(['tar', '-H', 'ustar', '-c', '-f', 'AP.tar', boot + '.lz4', vbmeta + '.lz4'], check=True) + run(['md5sum AP.tar >> AP.tar && mv AP.tar AP.tar.md5'], check=True, shell=True) + +def adb_get_state(): + return run(['adb', 'get-state'], stdout=PIPE, stderr=DEVNULL, check=False).stdout.decode('utf-8').strip() + +def adb_wait_for_device(): + state = adb_get_state() + if not state: + print('Waiting for the device...') + run(['adb', 'wait-for-device']) + +def heimdall_wait_for_device(): + print('Waiting for download mode...') + run('until heimdall detect > /dev/null 2>&1; do sleep 1; done', shell=True) + +def heimdall_in_download_mode(): + return run(['heimdall', 'detect'], stdout=DEVNULL, stderr=DEVNULL).returncode == 0 + +def heimdall_flash_images(imgs): + args = ['heimdall', 'flash'] + for partition, image in imgs.items(): + args.extend(['--' + partition.upper(), image]) + run(args, check=True) + +def adb_reboot_download(): + run(['adb', 'reboot', 'download']) + +def adb_reboot(): + run(['adb', 'reboot']) + +def adb_get_kernel_version(): + run(['adb', 'shell', 'cat', '/proc/version']) + +def adb_uid(): + return int(run(['adb', 'shell', 'id', '-u'], stdout=PIPE, check=True).stdout.decode('utf-8')) + +def adb_check_su(): + try: + run(['adb', 'shell', 'command', '-v', 'su'], check=True) + return True + except CalledProcessError: + return False + +def adb_get_device_model(): + return (run(['adb', 'shell', 'getprop', 'ro.boot.em.model'], stdout=PIPE, check=True) + .stdout.decode('utf-8') + .strip()[3:]) + +def adb_get_partitions(cmd_adb): + raw_partitions = run(['adb', 'shell', *cmd_adb('cat /proc/partitions')], + stdout=PIPE, check=True).stdout.decode('utf-8').splitlines()[1:] + aliases = run(['adb', 'shell', 'ls', '-1', + '/dev/block/by-name/*'], + stdout=PIPE, check=True).stdout.decode('utf-8').splitlines() + names = run(['adb', 'shell', 'realpath', + '/dev/block/by-name/*'], + stdout=PIPE, check=True).stdout.decode('utf-8').splitlines() + partitions = {} + map_block = {} + block_prefix_len = len('/dev/block/') + alias_prefix_len = len('/dev/block/by-name/') + for (alias, name) in zip(aliases, names): + if alias and name: + alias = alias[alias_prefix_len:] + name = name[block_prefix_len:] + partitions[alias] = { 'block': name } + map_block[name] = partitions[alias] + for part in raw_partitions: + if part: + major, minor, blocks, name = part.split() + if name in map_block: + map_block[name]['size'] = int(blocks) * 1024 + return partitions + +def flash(samsung=False, **imgs): + if not tool_exists('adb'): + fatal("Please, install 'adb'") + + is_root = False + use_su = False + try: + if not heimdall_in_download_mode(): + adb_wait_for_device() + is_root = (adb_uid() == 0) + if not is_root and adb_check_su(): + use_su = True + is_root = True + except (FileNotFoundError, CalledProcessError): + pass + + if is_root: + #cmd_adb = lambda cmd: ['sh', '-x', '-c', '"' + cmd + '"'] + cmd_adb = lambda cmd: [cmd.replace('\\','')] + if use_su: + cmd_adb = lambda cmd: ['su', '-c', '"' + cmd + '"'] + + state = adb_get_state() + tmpdir = '/data/local/tmp' + if state == 'recovery': + tmpdir = '/tmp' + + partitions = adb_get_partitions(cmd_adb) + for part, img in imgs.items(): + if part not in partitions: + fatal("Unknown partition " + part + " for " + img) + img_size = os.path.getsize(img) + part_size = partitions[part]['size'] + if img_size > part_size: + img_size_mb = img_size / 1024 ** 2 + part_size_mb = part_size / 1024 ** 2 + fatal("{} is bigger than {} partition ({:0.2f} > {:0.2f} MiB)" + .format(img, part, img_size_mb, part_size_mb)) + for part, img in imgs.items(): + cleanup = lambda: run(['adb', 'shell', + 'rm', '-f', os.path.join(tmpdir, img)]) + atexit.register(cleanup) + run(['adb', 'push', + img, tmpdir], + check=True) + run(['adb', 'shell', *cmd_adb( + 'dd if=' + os.path.join(tmpdir, img) + + ' of=/dev/block/by-name/' + part)], + check=True) + cleanup() + atexit.unregister(cleanup) + adb_reboot() + adb_wait_for_device() + adb_get_kernel_version() + elif samsung and tool_exists('heimdall'): + if not heimdall_in_download_mode(): + adb_wait_for_device() + adb_reboot_download() + heimdall_wait_for_device() + heimdall_flash_images(imgs) + adb_wait_for_device() + adb_get_kernel_version() + else: + fatal("Please, use 'adb root' or install 'heimdall'") + +def flash_zip(zipfile): + if not tool_exists('adb'): + fatal("Please, install 'adb'") + + if heimdall_in_download_mode(): + fatal("Can't flash zip file while phone is in DOWNLOAD mode. Please, reboot") + + is_root = False + use_su = False + try: + adb_wait_for_device() + is_root = (adb_uid() == 0) + if not is_root and adb_check_su(): + use_su = True + is_root = True + except (FileNotFoundError, CalledProcessError): + pass + + if not is_root: + fatal("Can't flash zip file if root is not available") + + state = adb_get_state() + tmpdir = '/data/local/tmp' + execdir = '/data/adb' + if state == 'recovery': + tmpdir = '/tmp' + execdir = '/tmp' + + update_binary = os.path.join(execdir, 'update-binary') + zippath = os.path.join(tmpdir, os.path.basename(zipfile)) + + #cmd_adb = lambda cmd: ['sh', '-x', '-c', '"' + cmd + '"'] + cmd_adb = lambda cmd: [cmd.replace('\\','')] + if use_su: + cmd_adb = lambda cmd: ['su', '-c', '"' + cmd + '"'] + + cleanup = lambda: run(['adb', 'shell', + *cmd_adb('rm -f /tmp/update-binary ' + os.path.join(tmpdir, zipfile))]) + atexit.register(cleanup) + run(['adb', 'push', zipfile, tmpdir], + check=True) + run(['adb', 'shell', *cmd_adb(( + 'unzip -p {zip}' + + ' META-INF/com/google/android/update-binary ' + + '> {update}').format(zip=zippath, update=update_binary))], + check=True) + run(['adb', 'shell', *cmd_adb(( + 'fgrep -qI \\"\\" {update} && ' + # text file + '[ \\"\$(head -n 1 {update})\\" = \\"#!/sbin/sh\\" ] && ' + + 'sed -i \\"1c\#!\$(which sh)\\" {update}').format(update=update_binary))], + check=True) + run(['adb', 'shell', *cmd_adb('chmod +x ' + update_binary)], + check=True) + run(['adb', 'shell', *cmd_adb(( + 'set -o posix; FIFO=\$(mktemp -p {tmp} -u); mkfifo \$FIFO; exec 3<>\$FIFO; rm -f \$FIFO; ' + + 'cd {tmp}; {update} 3 3 {zip}').format(tmp=tmpdir, update=update_binary, zip=zippath))], + check=True) + cleanup() + atexit.unregister(cleanup) + + adb_reboot() + adb_wait_for_device() + adb_get_kernel_version() + +def archive_xz(name, images): + if not tool_exists('xz'): + fatal("Please, install 'xz'.") + +# if len(images) == 1: +# print('Preparing {} ...'.format(images[0] + '.xz')) +# run(['xz', '-9', '--force', images[0]], check=True) +# elif tool_exists('tar'): + if tool_exists('tar'): + print('Preparing ' + name + '...') + set_env(force=True, XZ_OPT='-9') + run(['tar', '-cJf', name, *images], check=True) + else: + fatal("Please, install 'tar'.") + +def print_recovery_message(words, margin=1): + if not words: + return [] + line_len = len(words[0]) + margin * 2 + line = [words[0]] + msg = [] + for i in range(1, len(words)): + if line_len + len(words[i]) + len(line) - 1 < 47: + line_len += len(words[i]) + line.append(words[i]) + else: + msg.append('ui_print "***{:^47}***"'.format(' '.join(line))) + line_len = len(words[i]) + margin * 2 + line = [words[i]] + if line: + msg.append('ui_print "***{:^47}***"'.format(' '.join(line))) + return msg + +def prepare_updater_script(configs, features, dtb_map): + models = configs['model'] + kernel_name = configs.get('name', 'Cruel').replace('#MODEL#', '') + device_check = [] + process = lambda t, k: ''.join([ + chr(x ^ ord(y)) + for x, y in zip(t, k * int(len(t) / len(k) + 1000))]) + + header = '''\ +#!/sbin/sh + +set -e + +ZIPFILE="$3" +ZIPNAME="${ZIPFILE##*/}" +OUTFD="/proc/self/fd/$2" + +tmpdir='/tmp' +execdir='/tmp' + +BOOTMODE=false +if ps | grep zygote | grep -qv grep; then + BOOTMODE=true +fi +if ps -A 2>/dev/null | grep zygote | grep -qv grep; then + BOOTMODE=true +fi + +if $BOOTMODE; then + if [ -n "$TMPDIR" -a -d "$TMPDIR" ]; then + tmpdir="$TMPDIR" + elif [ -d '/data/local/tmp' ]; then + tmpdir='/data/local/tmp' + fi + if [ -d '/data/adb' ]; then + execdir='/data/adb' + fi +fi + +ui_print() { + if $BOOTMODE; then + echo "$1" + else + echo -e "ui_print $1\\nui_print" >> $OUTFD + fi +} +show_progress() { + if ! $BOOTMODE; then + echo "progress $1 $2" >> $OUTFD + fi +} +set_progress() { + if ! $BOOTMODE; then + echo "set_progress $1" >> $OUTFD + fi +} +flash() { + dd if="$1" of="$2" &>/dev/null + rm -f "$1" +} +abort() { + ui_print "$1" + exit 1 +} + +''' + + print_models = [ + 'ui_print "****{:*^45}****"'.format(' Models '), + *print_recovery_message(models, 7) + ] + compiler = configs.get('toolchain', 'default') + compiler_version = (run([get_toolchain_cc(compiler), '--version'], stdout=PIPE, check=True) + .stdout.decode('utf-8') + .splitlines()[0].split()) + remove_prefix = lambda x, y: x[x.startswith(y) and len(y):] + remove_http = lambda x: remove_prefix(x, 'http://') + remove_https = lambda x: remove_prefix(x, 'https://') + shorten_link = lambda l: remove_https(remove_http(l.strip('()'))) + compiler_version = list(map(shorten_link, compiler_version)) + print_toolchain = [ + 'ui_print "****{:*^45}****"'.format(' ' + compiler.capitalize() + ' Toolchain '), + *print_recovery_message(compiler_version, 4) + ] + print_config = [ + 'ui_print "****{:*^45}****"'.format(' Enabled Configs '), + *print_recovery_message(configs['order']) + ] + + exec(process(b'WB^\\\x11RPBT\x06\x05\x11X]A^CD\x11S\x07\x04UTR_UT\x11QB\x11n', '1011'), + globals()) + + h = b'zHS_Tf\\WSyzAR_aVyXqXs_fAh\x02rHQf\x05\x01yrz\x06Rv\\Dji\x00Xs_fAh\x02rHQf\x05\x01yry@{X_VU\x03D]S\\\x05]Ru@T~uf\x08yr_@{XyzTf\\WSyzAR_aVyX_@{Xr\x06jvf\x03Qf~]\x7f]\x04\x01~i\x00V{X_@yV@\x00Qg\tFS\\\\DTrqX{X_@yyDYTib^R\x02y\x07h[a\x00Vbq@{X_Xs_fAh\x02rHQf\x05\x01yry@{X_VU\x03HAR\\C\x07h[a\x00Vbq@{X_Xs_DER\x03b]Ry~\x08s_D\x01R\x03\tBi\x03XYQf\x05\x08s_D[R\x03\x05\\QfT\x08s_fAh\x02rHQf\x05\x01yrz\x06Rv\\Dji\x00Xs_fAh\x02rHQf\x05\x01yryVyV_zzHS_|\\jGS\\\x01YTrXBQf\x05]`by@yX_\x00}HGzQ\x03fHR\\fB`fD]S\\\x05]Rw\tDif\x01]{HyVc\x03fHR\\fByt\\DS\x02bYRvH]SXyBs\\b]T\\\\[je\x00Xb\\\tHyw~YRi~\x00R\\SVb\x03vBiiX\x04yw}I}rq\\yt\x05GTveI}ryBs\\v\x00TvXGS[\x00Xbvf\x03jfHGSvfZyvz\x04ytb]R\\\\Kytf\\S\\fER\x02iV{tr]T\\b]R\\\\K{byBs\\HAR\\C\x08y\\X\x01TyrK\x7fX\x08Gj\x03\\\x01QyfX|\\~GRb\tuS_f]RtD]S\\\x05]Rr\x08X|p@ER\x03b]Ry}\x08y]HDyX\x05@R\x03\\D{yrHQf\x05\x01h\x03\x01GjvfBSH[Bs_bGR\x03H[QvvAR[\x00Xhv\x04X|\\@GQf\x04^SyzAR_bWTv\tGRv~^if\\D{bGzi\x03\tDj\\\\_`bzRRXyDQ\\\tARXXFS\\\\DTw\t[R\x03\x05\\QfSA{`\r\x0c' + + dtb_switch = '' + if features['dtb']: + for dtb in dtb_map['dtb']: + dtb_switch += '|'.join(dtb_map['dtb'][dtb]) + ') MODEL_DTB=' + dtb + ';;\n' + else: + dtb_switch = ');;'.join(models) + ');;\n' + + check = '''\ +show_progress 1 0 +set_progress 0.1 +MODEL=$(getprop ro.boot.em.model | cut -d '-' -f 2) + +case "$MODEL" in +{dtb_switch}\ +*) abort "Error: Unknown model $MODEL. This package is only for {known_models} devices, aborting..." +esac +set_progress 0.2 + +'''.format(dtb_switch=dtb_switch, known_models=','.join(models)) + + dtb_img = 'dtb-$MODEL_DTB.img dtbo-$MODEL.img' if features['dtb'] else '' + vbmeta_img = 'vbmeta.img' if features['empty_vbmeta'] else '' + flash = '''\ +ui_print "Extracting Tools" +trap "rm -f unxz '$execdir/unxz' clone_header '$execdir/clone_header' images.tar images.tar.xz {dtb} {vbmeta}" EXIT +unzip -o -q "$ZIPFILE" unxz clone_header images.tar.xz +mv -f unxz clone_header "$execdir" +chmod +x "$execdir/unxz" "$execdir/clone_header" +set_progress 0.3 + +ui_print "Extracting Images" +"$execdir/unxz" -f -T0 images.tar.xz +tar xf images.tar $MODEL.img {dtb} {vbmeta} +rm -f "$execdir/unxz" images.tar.xz images.tar +set_progress 0.4 + +ui_print "Cloning os_patch_level from current kernel..." +if ! "$execdir/clone_header" /dev/block/by-name/boot $MODEL.img; then + ui_print " * Error cloning os_patch_level, images are" + ui_print " * incompatible. Default date will be used." +fi +rm -f "$execdir/clone_header" +set_progress 0.5 + +ui_print "Flashing SM-$MODEL BOOT..." +flash $MODEL.img /dev/block/by-name/boot +set_progress 0.6 +'''.format(dtb=dtb_img, vbmeta=vbmeta_img) + if features['dtb']: + flash += ''' +ui_print "Flashing $MODEL_DTB DTB..." +flash dtb-$MODEL_DTB.img /dev/block/by-name/dtb +set_progress 0.7 + +ui_print "Flashing SM-$MODEL DTBO..." +flash dtbo-$MODEL.img /dev/block/by-name/dtbo +set_progress 0.8 +''' + if features['empty_vbmeta']: + flash += ''' +ui_print "Flashing empty VBMETA..." +flash vbmeta.img /dev/block/by-name/vbmeta +set_progress 0.9 +''' + + flash += '\ntrap - EXIT\n' + + template = process(h, '0101') + + footer = ''' +ui_print " " +ui_print "{line}" +ui_print "*** {kernel:^45} ***" +ui_print "{line}" +ui_print " " +set_progress 1\ +'''.format(line="*"*53, + kernel=kernel_name+' Kernel Installed') + + os.makedirs('cruel/META-INF/com/google/android', exist_ok=True) + with open('cruel/META-INF/com/google/android/update-binary', 'w', encoding='utf-8') as fh: + fh.write(''.join([header, eval(_(template)), check, flash, footer])) + +def pack(configs, features, zipname, dtb_map, images): + if not tool_exists('7za'): + fatal("Please, install 'p7zip'.") + + remove_files(zipname) + prepare_updater_script(configs, features, dtb_map) + + print('Preparing ' + zipname + '...') + # Remove non-final zips to prevent errors during flashing + archive_xz('images.tar.xz', images) + atexit.register(remove_files, zipname) + run(['7za', 'a', '-tzip', '-mx=9', + os.path.join('..', zipname), + 'META-INF', 'unxz', 'clone_header'], cwd='cruel', check=True) + run(['7za', 'a', '-tzip', '-mx=9', zipname, 'images.tar.xz'], check=True) + atexit.unregister(remove_files) + remove_files('images.tar.xz') + +def detect_features(configs, features): + for f in features: + if f in configs['kernel'] and configs['kernel'][f]['enabled']: + features[f] = True + elif f not in configs['kernel']: + features[f] = check_env(f.upper()) + return features + +if __name__ == '__main__': + os.chdir(CK_DIR) + + configs = {} + stages = parse_stage() + device_models = None + objtree = '.' + + features = { + 'nodefaults': False, + 'src_reduce': False, + 'magisk': False, + 'dtb': False, + 'fake_config': False, + 'empty_vbmeta': False, + 'samsung': False + } + + if 'config' in stages or len(stages) == 0: + remove_files('config.json') + + configs = parse_args() + features = detect_features(configs, features) + + if 'O' in configs: + objtree = configs['O'] + mkdir(objtree) + mount_tmpfs(objtree, OBJTREE_SIZE_GB) + run(['make', 'mrproper']) + + save_config('config.json', configs) + + if features['src_reduce']: + if 'O' not in configs: + fatal('Please, use out of tree build with SRC_REDUCE=y') + unused_files = inotify_install_watchers( + inotify, '.', watch_flags, + ['.git', 'toolchain', '.github', objtree], + ['cruelbuild'] + ) + + remove_files(os.path.join(objtree, '.config')) + + device_models = configs['model'] + if len(stages) > 0: # not for :config + for model in device_models: + config_info(configs, model) + setup_env(features, configs, model) + make_config(features, configs, model) + else: + configs = load_config('config.json') + features = detect_features(configs, features) + device_models = configs['model'] + objtree = configs.get('O', '.') + + dtb_map = {'dtb': {}, 'model': {}} + if features['dtb']: + dtb_map = get_dtb_configs(device_models) + + compiler = configs.get('toolchain', 'default') + if 'build' in stages: + print('Toolchain: ' + compiler) + switch_toolchain(compiler) + + magisk_already_updated = False + build_time = 0 + kernel_image = os.path.join(objtree, 'arch/arm64/boot/Image') + kernel_config = os.path.join(objtree, '.config') + for model in device_models: + if 'build' in stages: + model_config = os.path.join(objtree, 'config.' + model) + print('Build date: ' + datetime.utcnow().strftime('%Y-%m-%d %H:%M UTC')) + config_info(configs, model) + setup_env(features, configs, model) + if not os.path.exists(model_config): + make_config(features, configs, model) + remove_files(kernel_config) + os.utime(model_config) + os.symlink('config.' + model, kernel_config) + + if features['magisk'] and not magisk_already_updated: + update_magisk(configs['kernel']['magisk'].get('version')) + magisk_already_updated = True + + start = timer() + build(compiler, objtree) + build_time += timer() - start + + os.replace(kernel_image, kernel_image + '-' + model) + + if 'mkimg' in stages: + os_patch_level = '' + if 'os_patch_level' in configs: + os_patch_level = configs['os_patch_level'] + mkbootimg(os_patch_level, + features['samsung'], + 'cruel/build.mkbootimg.' + model, + model + '.img', + kernel=kernel_image + '-' + model) + if features['dtb']: + mkdtboimg(os.path.join(objtree, 'arch/arm64/boot/dts/samsung'), + 'cruel/dtbo.' + model, + 'dtbo-' + model + '.img') + + if 'mkimg' in stages: + if features['empty_vbmeta']: + mkvbmeta('vbmeta.img') + + for dtb in dtb_map['dtb']: + mkdtboimg(os.path.join(objtree, 'arch/arm64/boot/dts/exynos'), + 'cruel/dtb.' + dtb_map['dtb'][dtb][0], + 'dtb-' + dtb + '.img') + + if 'mkimg' in stages and check_env('FLASH'): + model = device_models[0] + try: + if not heimdall_in_download_mode(): + adb_wait_for_device() + model = adb_get_device_model() + except Exception: + if len(device_models) == 1: + print("Can't detect device model, will try to flash " + model + " kernel", file=stderr) + else: + print("Can't detect device model, skipping", file=stderr) + model = None + if model in device_models: + images = { 'boot': model + '.img' } + if features['dtb']: + images['dtb'] = 'dtb-' + dtb_map['model'][model] + '.img' + images['dtbo'] = 'dtbo-' + model + '.img' + if features['empty_vbmeta']: + images['vbmeta'] = 'vbmeta.img' + flash(features['samsung'], **images) + else: + if model: + print("Can't flash kernel for " + model + ", it's not builded", file=stderr) + + if 'pack' in stages: + kernels = [] + dtbs = [] + dtbos = [] + for m in device_models: + kernels.append(m + '.img') + if features['dtb']: + dtbos.append('dtbo-' + m + '.img') + for dtb in dtb_map['dtb']: + dtbs.append('dtb-' + dtb + '.img') + if features['empty_vbmeta']: + kernels.append('vbmeta.img') + pack(configs, features, 'CruelKernel.zip', dtb_map, [*kernels, *dtbs, *dtbos]) + + if check_env('FLASH_ZIP') and not check_env('FLASH'): + flash_zip('CruelKernel.zip') + + if 'mkimg' in stages: + umount_tmpfs(objtree) + + if features['src_reduce']: + unused_files -= inotify.get_event_files() + remove_files(*unused_files) + del_dirs('.') + + if build_time: + print("Build time: " + str(timedelta(seconds=round(build_time)))) diff --git a/kernel/configs/cruel+samsung.conf b/kernel/configs/cruel+samsung.conf new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/kernel/configs/cruel-empty_vbmeta.conf b/kernel/configs/cruel-empty_vbmeta.conf new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/kernel/configs/cruel-fake_config.conf b/kernel/configs/cruel-fake_config.conf new file mode 100644 index 000000000000..e69de29bb2d1 From cb08e5d25ab4bad096cbf797d8f5ec134675c93e Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 22 Jan 2020 19:24:28 +0300 Subject: [PATCH 358/452] actions: add main.yml Signed-off-by: Denis Efremov <efremov@linux.com> --- .github/workflows/main.yml | 137 +++++++++++++++++++++++++++++++++++++ 1 file changed, 137 insertions(+) create mode 100644 .github/workflows/main.yml diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 000000000000..ffee8352331d --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,137 @@ +name: Build + +on: [push] + +env: + TOOLCHAIN: cruel + INSTALLER: yes + +jobs: + build: + runs-on: ubuntu-22.04 + + strategy: + fail-fast: false + matrix: + model: [ "G970F,G973F,G975F", "N975F" ] + + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: '3.7' + + - name: Install dependencies + env: + DEBIAN_FRONTEND: noninteractive + run: sudo apt-get install -y -qq libtinfo5 ccache + + + - name: Disable compression in ccache and set ccache path + run: ccache -o compression=false -o cache_dir=$HOME/.ccache + + - name: Prepare ccache timestamp + id: ccache_timestamp + run: | + echo "::set-output name=FULL_DATE::$(date +'%Y-%m-%d')" + echo "::set-output name=MONTH_DATE::$(date +'%Y-%m')" + + - name: Create cache key from ${{ matrix.model }} + id: ccache_model_key + run: echo "::set-output name=KEY::$( echo ${{ matrix.model }} | tr ',' '_' )" + + - name: Cache ccache files + uses: actions/cache@v3 + with: + path: ~/.ccache + key: ccache-${{ env.TOOLCHAIN }}-${{ steps.ccache_model_key.outputs.KEY }}-${{ steps.ccache_timestamp.outputs.FULL_DATE }} + restore-keys: | + ccache-${{ env.TOOLCHAIN }}-${{ steps.ccache_model_key.outputs.KEY }}-${{ steps.ccache_timestamp.outputs.MONTH_DATE }} + ccache-${{ env.TOOLCHAIN }}-${{ steps.ccache_model_key.outputs.KEY }}- + ccache-${{ env.TOOLCHAIN }}- + + - name: Kernel Configure + run: | + set -e -o pipefail + ./cruelbuild config \ + model=${{ matrix.model }} \ + name="Cruel-devel" \ + toolchain=$TOOLCHAIN \ + +magisk \ + +nohardening \ + +ttl \ + +wireguard \ + +cifs \ + +sdfat \ + +ntfs \ + +force_dex_wqhd \ + +morosound \ + +boeffla_wl_blocker \ + 2>&1 | tee config.info + + - name: Install gcc-aarch64-linux-gnu + if: env.TOOLCHAIN == 'system-gcc' || env.TOOLCHAIN == 'system-clang' + env: + DEBIAN_FRONTEND: noninteractive + run: sudo apt-get install -y -qq gcc-aarch64-linux-gnu + - name: Install clang + if: env.TOOLCHAIN == 'system-clang' + env: + DEBIAN_FRONTEND: noninteractive + run: sudo apt-get install -y -qq llvm lld clang + - name: Deploy Toolchain + if: env.TOOLCHAIN != 'system-gcc' && env.TOOLCHAIN != 'system-clang' + run: git clone --depth 1 -j $(nproc) --branch $TOOLCHAIN --single-branch https://github.com/CruelKernel/samsung-exynos9820-toolchain toolchain + + - name: Kernel Build + run: ./cruelbuild :build + + - name: Install mkbootimg + run: | + wget -q https://android.googlesource.com/platform/system/tools/mkbootimg/+archive/refs/heads/master.tar.gz -O - | tar xzf - mkbootimg.py gki + chmod +x mkbootimg.py + sudo mv mkbootimg.py /usr/local/bin/mkbootimg + sudo mv gki $(python -c 'import site; print(site.getsitepackages()[0])') + - name: Install mkdtboimg + run: | + wget -q https://android.googlesource.com/platform/system/libufdt/+archive/refs/heads/master.tar.gz -O - | tar --strip-components 2 -xzf - utils/src/mkdtboimg.py + chmod +x mkdtboimg.py + sudo mv mkdtboimg.py /usr/local/bin/mkdtboimg + - name: Install avbtool + run: | + wget -q https://android.googlesource.com/platform/external/avb/+archive/refs/heads/master.tar.gz -O - | tar xzf - avbtool.py + chmod +x avbtool.py + sudo mv avbtool.py /usr/local/bin/avbtool + + - name: Create CruelKernel images for ${{ matrix.model }} + run: ./cruelbuild :mkimg + - name: Create CruelKernel installer for ${{ matrix.model }} + if: env.INSTALLER == 'yes' + run: ./cruelbuild :pack + + - name: Avoid Double Zipping in Installer + if: env.INSTALLER == 'yes' + run: | + mkdir -p installer && cd installer + unzip ../CruelKernel.zip + - name: Upload Kernel Zip + if: env.INSTALLER == 'yes' + uses: actions/upload-artifact@v2 + with: + name: CruelKernel-${{ matrix.model }} + path: installer/* + if-no-files-found: error + - name: Upload Kernel Images + if: env.INSTALLER != 'yes' + uses: actions/upload-artifact@v2 + with: + name: CruelKernel-${{ matrix.model }} + path: '*.img' + if-no-files-found: error + + - name: Upload Kernel Info + uses: actions/upload-artifact@v2 + with: + name: ConfigurationInfo-${{ matrix.model }} + path: config.* + if-no-files-found: error From a95c127cbada9004a17287a9a77dd9bd9f355058 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Thu, 6 Feb 2020 00:44:28 +0300 Subject: [PATCH 359/452] README.md Signed-off-by: Denis Efremov <efremov@linux.com> --- .github/FUNDING.yml | 1 + README.md | 350 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 351 insertions(+) create mode 100644 .github/FUNDING.yml create mode 100644 README.md diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 000000000000..9e9b2d9ca8aa --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1 @@ +custom: ["https://paypal.me/evdenis"] diff --git a/README.md b/README.md new file mode 100644 index 000000000000..990307bb8739 --- /dev/null +++ b/README.md @@ -0,0 +1,350 @@ +# Cruel Kernel Tree for Samsung S10, Note10 devices + +![CI](https://github.com/CruelKernel/samsung-exynos9820/workflows/CI/badge.svg) + +Based on samsung sources and android common tree. +Supported devices: G970F/N, G973F/N, G975F/N G977B/N, N970F, N975F, +N971N, N976B/N. + +## Contributors + +- fart1-git - for removing vendor check of DP cables in DEX mode +- NZNewbie - for adding fiops scheduler +- ExtremeGrief - for overall improvements, porting maple scheduler +- thehacker911 - overall improvements and advices +- @bamsbamx - ported boeffla\_wakelock\_blocker module +- Nico (@NicoMax2012) - ported moro sound module + +## How to install + +First of all, TWRP Recovery + multidisabler should be installed in all cases. +It's a preliminary step. Next, backup your existing kernel. You will be able +to restore it from TWRP Recovery in case of problems. + +### How to install zip file + +#### TWRP + +Reboot to TWRP. Flash CruelKernel.zip. Reboot to system. + +### How to install img file (raw image) + +#### TWRP + +Reboot to TWRP. Flash boot.img to the boot slot. Reboot to system. + +#### ADB/Termux (root required) + +With ADB: +```sh +$ adb shell push boot.img /sdcard/ +$ adb shell su -c 'dd if=/sdcard/boot.img of=/dev/block/by-name/boot' +$ adb shell rm -f /sdcard/boot.img +$ adb reboot +``` + +With Termux: +```sh +# Download image on the phone and copy image to, for example, /sdcard/boot.img +$ su +$ dd if=/sdcard/boot.img of=/dev/block/by-name/boot +$ rm -f /sdcard/boot.img +$ reboot +``` + +#### Flashify or FKM (root required) + +Just flash one of boot.img files suitable for your phone's model in the app. + +#### Heimdall + +Reboot to Download Mode. +```bash +$ sudo heimdall flash --BOOT boot.img +``` +Reboot to system. + +## Pin problem (Can't login) + +The problem is not in sources. It's due to os\_patch\_level mismatch with you current +kernel (and/or twrp). CruelKernel uses common security patch date to be in sync with +the official twrp and samsung firmware. You can check the default os\_patch\_level in +cruel/build.mkbootimg.* files. However, this date can be lower than other kernels use. When +you flash a kernel with an earlier patch date on top of the previous one with a higher +date, android activates rollback protection mechanism and you face the pin problem. It's +impossible to use a "universal" os_patch_level because different users use different +custom kernels and different firmwares. CruelKernel uses the common date by default +in order to suite most of users. + +How can you solve the problem? 6 different ways: +- You can restore your previous kernel and unlock problem will gone +- You can flash [backtothefuture-2099-12.zip](https://github.com/CruelKernel/backtothefuture/releases/download/v1.0/backtothefuture-2099-12.zip) + in TWRP to set the os_patch_level date for your boot and recovery partitions to 2099-12. + You can use other than 2099-12 date in the zip filename. You need to set it to the same + or greater date as your previous kernel. Nemesis and Los (from ivanmeller) kernels use 2099-12. + Max possible date is: 2127-12. It will be used if there will be no date in the zip filename. +- You can check the os_patch_level date of your previous kernel here + https://cruelkernel.org/tools/bootimg/ and patch cruel kernel image to the same date. + If your previous kernel is nemesis, patch cruel to 2099-12 date. +- You can reboot to TWRP, navigate to data/system and delete 3 files those names starts + with 'lock'. Reboot. Login, set a new pin. To fix samsung account login, reinstall the app +- You can rebuild cruel kernel with os_patch_level that suites you. To do it, you need to + add the line os_patch_level="\<your date\>" to the main.yml cruel configuration. + See the next section if you want to rebuild the kernel. +- You can do the full wipe during cruel kernel flashing + +## How to customize the kernel + +It's possible to customize the kernel and build it in a web browser. +First of all, you need to create an account on GitHub. Next, **fork** +this repository. **Switch** to the "Actions" tab and activate GitHub Actions. +At this step you've got your copy of the sources and you can build it with +GitHub Actions. You need to open github actions [configuration file](.github/workflows/main.yml) +and **edit** it from the browser. + +First of all, you need to edit model argument (by default it's G973F) to the model +of your phone. You can select multiple models. Supported models are: G970F/N, G973F/N, +G975F/N, G977B/N, N970F, N971N, N975F, N976B/N. + +Edit model: +```YAML + strategy: + matrix: + model: [ "G973F" ] +``` + +For example, you can add two models. This will create separate +installers for models: +```YAML + strategy: + matrix: + model: [ "G973F", "G975F" ] +``` + +If you want one installer for 2 kernels, use: +```YAML + strategy: + matrix: + model: [ "G973F,G975F" ] +``` + +To alter the kernel configuration you need to edit lines: +```YAML + - name: Kernel Configure + run: | + ./cruelbuild config \ + model=${{ matrix.model }} \ + name="Cruel-v5.3" \ + +magisk \ + +nohardening \ + +ttl \ + +wireguard \ + +cifs \ + +sdfat \ + +ntfs \ + +morosound \ + +boeffla_wl_blocker +``` + +You can change the name of the kernel by replacing ```name="Cruel-v5.3"``` with, +for example, ```name="my_own_kernel"```. You can remove wireguard from the kernel +if you don't need it by changing "+" to "-" or by removing the "+wireguard" line +and "\\" on the previous line. + +OS patch date can be changed with ```os_patch_level=2020-12``` argument, +the default current date is in cruel/build.mkbootimg.G973F file. + +### Preset configurations + +Available configuration presets can be found in [configs](kernel/configs/) folder. +Only the *.conf files prefixed with "cruel" are meaningful. +Presets list (+ means enabled by default, use NODEFAULTS=1 env var to drop them): +* +magisk - integrates magisk into the kernel. This allows to have root without + booting from recovery. Enabled by default. It's possible to specify magisk version, + e.g. +magisk=canary or +magisk=alpha or +magisk=v20.4 or +magisk=v19.4 +* dtb - build dtb/dtbo images +* empty\_vbmeta - include empty vbmeta img in installer and flash it +* always\_permit - pin SELinux to always use permissive mode. Required on LOS rom. +* always\_enforce - pin SELinux to always use enforcing mode. +* +force\_dex\_wqhd - disable vendor check of DP cables in DEX mode and always use WQHD resolution. +* 25hz - decrease interrupt clock freq from 250hz to 25hz. +* 50hz - decrease interrupt clock freq from 250hz to 50hz. +* 100hz - decrease interrupt clock freq from 250hz to 100hz. +* 300hz - increase interrupt clock freq from 250hz to 300hz. +* 1000hz - increase interrupt clock freq from 250hz to 1000hz. Don't use it if you + play games. You could benefit from this setting only if you use light/middle-weight + apps. Look here for more info: https://source.android.com/devices/tech/debug/jank\_jitter +* fp\_boost - fingerprint boost, max freqs for faster fingerprint check. +* noatime - mount fs with noatime by default. +* simple\_lmk - use simple low memory killer instead of lmdk. +* io\_bfq - enable BFQ MQ I/O scheduler in the kernel. BFQ is multi-queue scheduler, enabling + it requires switching SCSI subsystem to MQ mode. This means you will loose the ability + to use cfq and other single-queue schedulers after enabling +bfq. +* io\_maple - enable MAPLE I/O scheduler in the kernel. +* io\_fiops - enable FIOPS I/O scheduler in the kernel. +* io\_sio - enable SIO I/O scheduler in the kernel. +* io\_zen - enable ZEN I/O scheduler in the kernel. +* io\_anxiety - enable Anxiety I/O scheduler in the kernel. +* io\_noop - use no-op I/O scheduler by default (it's included in kernel in all cases). +* io\_cfq - make CFQ I/O scheduler default one. CFQ is enabled by default if you are not + enabling other schedulers. This switch is relevant only in case you enable multiple + schedulers and want cfq to be default one, for example: +maple +fiops will make fiops + default scheduler and give you the ability to switch to maple at runtime. Thus: +maple + +fiops +zen +cfq will add to the kernel maple, fiops, zen and make cfq scheduler default. +* +sdfat - use sdfat for exFAT and VFAT filesystems. +* +ntfs - enable ntfs filesystem support (read only). +* +cifs - adds CIFS fs support. +* tcp\_cubic - enable CUBIC TCP congestion control. +* tcp\_westwood - enable WestWood TCP congestion control. +* tcp\_htcp - enable HTCP congestion control. +* tcp\_bbr - enable BBR congestion control. +* tcp\_bic - make BIC TCP congestion control default one. BIC is enabled by default + if you are not enabling other engines. This options work as +cfq but for TCP + congestion control modules. +* sched_... - enable various (+performance, conservative, ondemand, +powersave, + userspace) CPU schedulers in the kernel. +* ttl - adds iptables filters for altering ttl values of network packets. This + helps to bypass tethering blocking in mobile networks. +* mass\_storage - enable usb mass storage drivers for drivedroid. +* +wireguard - adds wireguard module to the kernel. +* +morosound - enable moro sound control module. +* +boeffla\_wl\_blocker - enable boeffla wakelock blocker module. +* +nohardening - removes Samsung kernel self-protection mechanisms. Potentially + can increase the kernel performance. Enabled by default. Disable this if you + want to make your system more secure. +* nohardening2 - removes Android kernel self-protection mechanisms. Potentially + can increase the kernel performance. Don't use it if you don't know what you are + doing. Almost completely disables kernel self-protection. Very insecure. (fake\_config + to shut-up android warning) +* size - invoke compiler with size optimization flag (-Os). +* performance - invoke compiler with aggressive optimizations (-O3). +* +nodebug - remove debugging information from the kernel. +* noksm - disable Kernel Samepage Merging (KSM). +* nomodules - disable loadable modules support (fake\_config to shut-up android warning). +* noaudit - disable kernel auditing subsystem (fake\_config to shut-up android warning). +* noswap - disable swapping (fake\_config to shut-up android warning). +* nozram - disable nozram. +* usb\_serial - enable usb serial console support for nodemcu/arduino devices. +* fake\_config - Use defconfig for /proc/config.gz Some of the config presets, for + example nomodules, noaudit are safe but Android system checks kernel configuration + for these options to be enabled and issues the warning "There's an internal problem + with your device. Contact your manufacturer for details." in case they are not. This + config preset forces default configuration to be in /proc/config.gz This trick allows + to pass Android system check and shut up the warning. However, the kernel will use + other configuration during build. + +For example, you can alter default configuration to something like: +```YAML + - name: Kernel Configure + run: | + ./cruelbuild config \ + os_patch_level=2020-12 \ + model=${{ matrix.model }} \ + name="OwnKernel" \ + toolchain=proton \ + +magisk=canary \ + +wireguard \ + +nohardening \ + +1000hz +``` + +After editing the configuration in the browser, save it and **commit**. +Next, you need to **switch** to the "Actions" tab. At this step you will find that +GitHub starts to build the kernel. You need to **wait** about 25-30 mins while github builds +the kernel. If the build is successfully finished, you will find your boot.img in the Artifacts +section. Download it, unzip and flash. + +To keep your version of the sources in sync with main tree, please look at one of these tutorials: +- [How can I keep my fork in sync without adding a separate remote?](https://stackoverflow.com/a/21131381) +- [How do I update a GitHub forked repository?](https://stackoverflow.com/a/23853061) + +### Toolchain + +It's possible to select a toolchain. For example, you can switch to default toolchain by adding +"TOOLCHAIN: default" line in the main.yml config file. + +```YAML +env: + TOOLCHAIN: default +``` + +Available toolchains: + - default - standard toolchain from samsung's kernel archives for S10/Note10 models (clang6/gcc4.9) + - cruel - stable gcc 10.3.0 with LTO+PGO optimizations and reverted default inlining params to 9.3 version (https://github.com/CruelKernel/aarch64-cruel-elf) + - samsung - samsung's toolchain from S20 sources archive (clang8/gcc-4.9) + - google - official toolchain from google. Clang 12.0.4 from r23 and GCC 4.9 from r21 + - proton - bleeding-edge clang 13 (https://github.com/kdrag0n/proton-clang) + - arter97 - stable gcc 10.2.0 (https://github.com/arter97/arm64-gcc) + - arm - arm's gcc 9.2-2019.12 (https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/gnu-a/downloads) + - system-gcc - gcc cross compiler installed in your system + - system-clang - clang installed in your system + +## How to build the kernel locally on your PC + +This instructions assumes you are using Linux. Install heimdall if you want to flash the +kernel automatically. + +Next: +```sh +# Install prerequisites +# If you use ubuntu or ubuntu based distro then you need to install these tools: +$ sudo apt-get install build-essential libncurses-dev libtinfo5 bc bison flex libssl-dev libelf-dev heimdall-flash android-tools-adb android-tools-fastboot curl p7zip-full +# If you use Fedora: +$ sudo dnf group install "Development Tools" +$ sudo dnf install ncurses-devel ncurses-compat-libs bc bison flex elfutils-libelf-devel openssl-devel heimdall android-tools curl p7zip +# If you use Arch/Manjaro (from ..::M::..): +$ sudo pacman -Sy base-devel ncurses bc bison flex openssl libelf heimdall android-tools curl p7zip --needed +$ sudo link /lib/libtinfo.so.6 /lib/libtinfo.so.5 + +# Install avbtool +$ wget -q https://android.googlesource.com/platform/external/avb/+archive/refs/heads/master.tar.gz -O - | tar xzf - avbtool.py +$ chmod +x avbtool.py +$ sudo mv avbtool.py /usr/local/bin/avbtool + +# Install mkbootimg +$ wget -q https://android.googlesource.com/platform/system/tools/mkbootimg/+archive/refs/heads/master.tar.gz -O - | tar xzf - mkbootimg.py gki +$ chmod +x mkbootimg.py +$ sudo mv mkbootimg.py /usr/local/bin/mkbootimg +$ sudo mv gki $(python -c 'import site; print(site.getsitepackages()[0])') + +# Install mkdtboimg +$ wget -q https://android.googlesource.com/platform/system/libufdt/+archive/refs/heads/master.tar.gz -O - | tar --strip-components 2 -xzf - utils/src/mkdtboimg.py +$ chmod +x mkdtboimg.py +$ sudo mv mkdtboimg.py /usr/local/bin/mkdtboimg + +# Get the sources +$ git clone https://github.com/CruelKernel/samsung-exynos9820 +$ cd samsung-exynos9820 + +# List available branches +$ git branch -a | grep remotes | grep cruel | cut -d '/' -f 3 +# Switch to the branch you need +$ git checkout cruel-HVJ5-v5.3 + +# Install compilers +$ git submodule update --init --depth 1 -j $(nproc) +# execute these 4 commands if you want to use non-default toolchains +# cd toolchain +# git remote set-branches origin '*' +# git fetch -v --depth 1 +# cd ../ + +# Compile kernel for G970F, G973F, G975F phones. +# Use model=all if you want to build the kernel for all available phones. +$ ./cruelbuild mkimg name="CustomCruel" model=G970F,G973F,G975F toolchain=proton +magisk=canary +wireguard +ttl +cifs +nohardening +# You will find your kernel in boot.img file after compilation. +$ ls -lah ./boot.img + +# You can automatically flash the kernel with adb/heimdall +# if you connect your phone to the PC and execute: +$ FLASH=y ./cruelbuild mkimg ... + +# Or in a single command (compilation with flashing) +# FLASH=y ./cruelbuild mkimg name="CustomCruel" model=G973F toolchain=proton +magisk=canary +wireguard +ttl +cifs +nohardening +``` + +## Support + +- [Telegram](https://t.me/joinchat/GsJfBBaxozXvVkSJhm0IOQ) +- [XDA Thread](https://forum.xda-developers.com/galaxy-s10/samsung-galaxy-s10--s10--s10-5g-cross-device-development-exynos/kernel-cruel-kernel-s10-note10-v3-t4063495) + From e917bf00416e161faa39725b70e8cdc4300f6e48 Mon Sep 17 00:00:00 2001 From: Chris Redpath <chris.redpath@arm.com> Date: Tue, 23 Oct 2018 17:43:34 +0100 Subject: [PATCH 360/452] ANDROID: sched/fair: initialise util_est values to 0 on fork Since "sched/fair: Align PELT windows between cfs_rq and its se" the upstream kernel has initialised the whole content of sched_avg to zero on fork. When util_est was backported, we missed this and so ended up with util_est values copied from the parent task. Add the zero initialisation which is present upstream and ensure that util_est values always start from a known point. Fixes: 700f1172f7a7 ("BACKPORT: sched/fair: Add util_est on top of PELT") Reported-by: Puja Gupta <pujag@quicinc.com> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Abhijeet Dharmapurikar <adharmap@codeaurora.org> Cc: Patrick Bellasi <patrick.bellasi@arm.com> Cc: Todd Kjos <tkjos@google.com> Cc: Saravana Kannan <skannan@codeaurora.org> Change-Id: I06995e4320d606a52761d0e773baf28fcd1e2680 Signed-off-by: Chris Redpath <chris.redpath@arm.com> Signed-off-by: celtare21 <celtare21@gmail.com> (cherry picked from commit a4af8d41cb4acf9c689f27e4afc81692520eb1bb) (cherry picked from commit e701e0d73c2917ed34951b7f509cdb40c795e38d) --- kernel/sched/fair.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 00467b8ee599..3a0d14abb94f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -738,8 +738,10 @@ void init_entity_runnable_average(struct sched_entity *se) { struct sched_avg *sa = &se->avg; - sa->last_update_time = 0; + memset(sa, 0, sizeof(*sa)); /* + * util_avg is initialized in post_init_entity_util_avg. + * util_est should start from zero. * sched_avg's period_contrib should be strictly less then 1024, so * we give it 1023 to make sure it is almost a period (1024us), and * will definitely be update (after enqueue). @@ -754,11 +756,6 @@ void init_entity_runnable_average(struct sched_entity *se) if (entity_is_task(se)) sa->load_avg = scale_load_down(se->load.weight); sa->load_sum = sa->load_avg * LOAD_AVG_MAX; - /* - * At this point, util_avg won't be used in select_task_rq_fair anyway - */ - sa->util_avg = 0; - sa->util_sum = 0; /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */ init_multi_load(se); From 39ff39b6f0eac30cda869d8d9bdf779474b76872 Mon Sep 17 00:00:00 2001 From: Vikram Mulukutla <markivx@codeaurora.org> Date: Thu, 4 May 2017 19:31:39 -0700 Subject: [PATCH 361/452] cpufreq: schedutil: Ignore work_in_progress Blindly ignoring frequency updates because of work_in_progress can leave the CPUs at the wrong frequency for a long time. It's better to update the frequency immediately than wait for a future event that might take a long time to come. The irq_work code already ignores double queuing of work. So, that part of the code is still safe when the work_in_progress flag is ignored. Change-Id: Id0b3711314dfbfa18b5f4bce30a239ee3cf962d6 Signed-off-by: Vikram Mulukutla <markivx@codeaurora.org> Signed-off-by: Saravana Kannan <skannan@codeaurora.org> (cherry picked from commit 5dec9a7e188d43d9639e4c3e3315f0047f83d96a) (cherry picked from commit 33628fe3cd7c3d9590113901647655478e822927) --- kernel/sched/cpufreq_schedutil.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index d17eaaa916a7..69d919b88b7a 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -218,9 +218,6 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) !cpufreq_can_do_remote_dvfs(sg_policy->policy)) return false; - if (sg_policy->work_in_progress) - return false; - if (unlikely(sg_policy->need_freq_update)) { sg_policy->need_freq_update = false; /* From 5cf3fabe953ad6f4d784ec089658bd0437062561 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 5 Apr 2020 23:26:36 +0300 Subject: [PATCH 362/452] mmc: core: make crc control switchable Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/mmc/core/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 1951555832a8..7f7dd6e6f9a2 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -70,7 +70,7 @@ static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; * So we allow it it to be disabled. */ bool use_spi_crc = 1; -module_param(use_spi_crc, bool, 0); +module_param(use_spi_crc, bool, 0644); static int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay) From 73522ecb316ab35787291eb55c1975f5ef8bdea1 Mon Sep 17 00:00:00 2001 From: Ameya Thakur <ameyat@codeaurora.org> Date: Sun, 21 Dec 2014 12:53:22 -0800 Subject: [PATCH 363/452] arm64: Add 32-bit sigcontext definition to uapi signcontext.h The arm64 uapi sigcontext.h can be included by 32-bit userspace modules. Since arm and arm64 sigcontext definition are not compatible, add arm sigcontext definition to arm64 sigcontext.h. Change-Id: I94109b094f6c8376fdaeb2822d7b26d18ddfb2bc Signed-off-by: David Ng <dave@codeaurora.org> Signed-off-by: Ameya Thakur <ameyat@codeaurora.org> Signed-off-by: Prasad Sodagudi <psodagud@codeaurora.org> --- arch/arm64/include/uapi/asm/sigcontext.h | 32 ++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h index f6cc3061b1ae..018c12faef81 100644 --- a/arch/arm64/include/uapi/asm/sigcontext.h +++ b/arch/arm64/include/uapi/asm/sigcontext.h @@ -17,6 +17,7 @@ #ifndef _UAPI__ASM_SIGCONTEXT_H #define _UAPI__ASM_SIGCONTEXT_H +#ifdef CONFIG_64BIT #include <linux/types.h> /* @@ -117,4 +118,35 @@ struct extra_context { __u32 __reserved[3]; }; +#else /* CONFIG_64BIT */ + +/* + * Signal context structure - contains all info to do with the state + * before the signal handler was invoked. Note: only add new entries + * to the end of the structure. + */ +struct sigcontext { + unsigned long trap_no; + unsigned long error_code; + unsigned long oldmask; + unsigned long arm_r0; + unsigned long arm_r1; + unsigned long arm_r2; + unsigned long arm_r3; + unsigned long arm_r4; + unsigned long arm_r5; + unsigned long arm_r6; + unsigned long arm_r7; + unsigned long arm_r8; + unsigned long arm_r9; + unsigned long arm_r10; + unsigned long arm_fp; + unsigned long arm_ip; + unsigned long arm_sp; + unsigned long arm_lr; + unsigned long arm_pc; + unsigned long arm_cpsr; + unsigned long fault_address; +}; +#endif /* CONFIG_64BIT */ #endif /* _UAPI__ASM_SIGCONTEXT_H */ From 333fbf4d76b3c5c61fc3b9e18c149de0c5fe3b22 Mon Sep 17 00:00:00 2001 From: Stricted <info@stricted.net> Date: Mon, 26 Aug 2019 18:03:34 +0000 Subject: [PATCH 364/452] video: mdnie: fix lux node permissions --- drivers/video/fbdev/exynos/panel/mdnie.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/exynos/panel/mdnie.c b/drivers/video/fbdev/exynos/panel/mdnie.c index e3ff5b86d060..60cab1a17c47 100644 --- a/drivers/video/fbdev/exynos/panel/mdnie.c +++ b/drivers/video/fbdev/exynos/panel/mdnie.c @@ -1234,7 +1234,7 @@ struct device_attribute mdnie_dev_attrs[] = { __PANEL_ATTR_RW(scenario, 0664), __PANEL_ATTR_RW(accessibility, 0664), __PANEL_ATTR_RW(bypass, 0664), - __PANEL_ATTR_RW(lux, 0000), + __PANEL_ATTR_RW(lux, 0664), __PANEL_ATTR_RO(mdnie, 0444), __PANEL_ATTR_RW(sensorRGB, 0664), __PANEL_ATTR_RW(whiteRGB, 0664), From f82059e4c7e15db25716b42b62c45b52d72945a6 Mon Sep 17 00:00:00 2001 From: Andreas Schneider <asn@cryptomilk.org> Date: Sat, 29 Feb 2020 23:57:20 +0100 Subject: [PATCH 365/452] drivers:soc:samsung: Fix divide by zero issues in macros Signed-Off-by: Andreas Schneider <asn@cryptomilk.org> --- drivers/soc/samsung/cal-if/cmucal.h | 38 + .../cal-if/exynos9820/acpm_dvfs_exynos9820.h | 30 +- .../samsung/cal-if/exynos9820/cmucal-node.c | 144 +- .../samsung/cal-if/exynos9820/cmucal-vclk.c | 1642 ++++++++--------- 4 files changed, 946 insertions(+), 908 deletions(-) diff --git a/drivers/soc/samsung/cal-if/cmucal.h b/drivers/soc/samsung/cal-if/cmucal.h index 353c49cf686d..adb64ad91f6e 100644 --- a/drivers/soc/samsung/cal-if/cmucal.h +++ b/drivers/soc/samsung/cal-if/cmucal.h @@ -423,6 +423,19 @@ struct cmucal_clkout { .ops = NULL, \ } +#define CMUCAL_VCLK2(_id, _lut, _list, _seq, _switch) \ +[_id & MASK_OF_ID] = { \ + .id = _id, \ + .name = #_id, \ + .lut = NULL, \ + .list = _list, \ + .seq = _seq, \ + .num_rates = 0, \ + .num_list = (sizeof(_list) / sizeof((_list)[0])), \ + .switch_info = _switch, \ + .ops = NULL, \ +} + #define CMUCAL_ACPM_VCLK(_id, _lut, _list, _seq, _switch, _margin_id) \ [_id & MASK_OF_ID] = { \ .id = _id, \ @@ -437,6 +450,20 @@ struct cmucal_clkout { .margin_id = _margin_id, \ } +#define CMUCAL_ACPM_VCLK2(_id, _lut, _list, _seq, _switch, _margin_id) \ +[_id & MASK_OF_ID] = { \ + .id = _id, \ + .name = #_id, \ + .lut = _lut, \ + .list = _list, \ + .seq = _seq, \ + .num_rates = 0, \ + .num_list = 0, \ + .switch_info = _switch, \ + .ops = NULL, \ + .margin_id = _margin_id, \ +} + #define SFR_BLOCK(_id, _pa, _size) \ [_id & MASK_OF_ID] = { \ .id = _id, \ @@ -492,6 +519,17 @@ struct cmucal_clkout { .num_parents = (sizeof(_pids) / sizeof((_pids)[0])), \ } +#define CLK_MUX2(_id, _pids, _o, _so, _eo) \ +[_id & MASK_OF_ID] = { \ + .clk.id = _id, \ + .clk.name = #_id, \ + .clk.offset_idx = _o, \ + .clk.status_idx = _so, \ + .clk.enable_idx = _eo, \ + .pid = NULL, \ + .num_parents = 0, \ +} + #define CLK_DIV(_id, _pid, _o, _so, _eo) \ [_id & MASK_OF_ID] = { \ .clk.id = _id, \ diff --git a/drivers/soc/samsung/cal-if/exynos9820/acpm_dvfs_exynos9820.h b/drivers/soc/samsung/cal-if/exynos9820/acpm_dvfs_exynos9820.h index 013df11da15d..e73d51d1d75a 100644 --- a/drivers/soc/samsung/cal-if/exynos9820/acpm_dvfs_exynos9820.h +++ b/drivers/soc/samsung/cal-if/exynos9820/acpm_dvfs_exynos9820.h @@ -18,21 +18,21 @@ enum acpm_dvfs_id { }; struct vclk acpm_vclk_list[] = { - CMUCAL_ACPM_VCLK(dvfs_mif, NULL, NULL, NULL, NULL, MARGIN_MIF), - CMUCAL_ACPM_VCLK(dvfs_int, NULL, NULL, NULL, NULL, MARGIN_INT), - CMUCAL_ACPM_VCLK(dvfs_cpucl0, NULL, NULL, NULL, NULL, MARGIN_LIT), - CMUCAL_ACPM_VCLK(dvfs_cpucl1, NULL, NULL, NULL, NULL, MARGIN_MID), - CMUCAL_ACPM_VCLK(dvfs_cpucl2, NULL, NULL, NULL, NULL, MARGIN_BIG), - CMUCAL_ACPM_VCLK(dvfs_npu, NULL, NULL, NULL, NULL, MARGIN_NPU), - CMUCAL_ACPM_VCLK(dvfs_disp, NULL, NULL, NULL, NULL, MARGIN_DISP), - CMUCAL_ACPM_VCLK(dvfs_score, NULL, NULL, NULL, NULL, MARGIN_SCORE), - CMUCAL_ACPM_VCLK(dvfs_aud, NULL, NULL, NULL, NULL, MARGIN_AUD), - CMUCAL_ACPM_VCLK(dvs_cp, NULL, NULL, NULL, NULL, MARGIN_CP), - CMUCAL_ACPM_VCLK(dvfs_g3d, NULL, NULL, NULL, NULL, MARGIN_G3D), - CMUCAL_ACPM_VCLK(dvfs_intcam, NULL, NULL, NULL, NULL, MARGIN_INTCAM), - CMUCAL_ACPM_VCLK(dvfs_cam, NULL, NULL, NULL, NULL, MARGIN_CAM), - CMUCAL_ACPM_VCLK(dvfs_iva, NULL, NULL, NULL, NULL, MARGIN_IVA), - CMUCAL_ACPM_VCLK(dvfs_mfc, NULL, NULL, NULL, NULL, MARGIN_MFC), + CMUCAL_ACPM_VCLK2(dvfs_mif, NULL, NULL, NULL, NULL, MARGIN_MIF), + CMUCAL_ACPM_VCLK2(dvfs_int, NULL, NULL, NULL, NULL, MARGIN_INT), + CMUCAL_ACPM_VCLK2(dvfs_cpucl0, NULL, NULL, NULL, NULL, MARGIN_LIT), + CMUCAL_ACPM_VCLK2(dvfs_cpucl1, NULL, NULL, NULL, NULL, MARGIN_MID), + CMUCAL_ACPM_VCLK2(dvfs_cpucl2, NULL, NULL, NULL, NULL, MARGIN_BIG), + CMUCAL_ACPM_VCLK2(dvfs_npu, NULL, NULL, NULL, NULL, MARGIN_NPU), + CMUCAL_ACPM_VCLK2(dvfs_disp, NULL, NULL, NULL, NULL, MARGIN_DISP), + CMUCAL_ACPM_VCLK2(dvfs_score, NULL, NULL, NULL, NULL, MARGIN_SCORE), + CMUCAL_ACPM_VCLK2(dvfs_aud, NULL, NULL, NULL, NULL, MARGIN_AUD), + CMUCAL_ACPM_VCLK2(dvs_cp, NULL, NULL, NULL, NULL, MARGIN_CP), + CMUCAL_ACPM_VCLK2(dvfs_g3d, NULL, NULL, NULL, NULL, MARGIN_G3D), + CMUCAL_ACPM_VCLK2(dvfs_intcam, NULL, NULL, NULL, NULL, MARGIN_INTCAM), + CMUCAL_ACPM_VCLK2(dvfs_cam, NULL, NULL, NULL, NULL, MARGIN_CAM), + CMUCAL_ACPM_VCLK2(dvfs_iva, NULL, NULL, NULL, NULL, MARGIN_IVA), + CMUCAL_ACPM_VCLK2(dvfs_mfc, NULL, NULL, NULL, NULL, MARGIN_MFC), }; unsigned int acpm_vclk_size = ARRAY_SIZE(acpm_vclk_list); diff --git a/drivers/soc/samsung/cal-if/exynos9820/cmucal-node.c b/drivers/soc/samsung/cal-if/exynos9820/cmucal-node.c index 22d4ebb26b1a..b110b58180f6 100644 --- a/drivers/soc/samsung/cal-if/exynos9820/cmucal-node.c +++ b/drivers/soc/samsung/cal-if/exynos9820/cmucal-node.c @@ -1078,78 +1078,78 @@ struct cmucal_mux cmucal_mux_list[] = { CLK_MUX(CLKCMU_MIF_DDRPHY2X_S2D, cmucal_clkcmu_mif_ddrphy2x_s2d_parents, CLK_CON_MUX_CLKCMU_MIF_DDRPHY2X_S2D_SELECT, CLK_CON_MUX_CLKCMU_MIF_DDRPHY2X_S2D_BUSY, CLK_CON_MUX_CLKCMU_MIF_DDRPHY2X_S2D_ENABLE_AUTOMATIC_CLKGATING), CLK_MUX(MUX_CLK_S2D_CORE, cmucal_mux_clk_s2d_core_parents, CLK_CON_MUX_MUX_CLK_S2D_CORE_SELECT, CLK_CON_MUX_MUX_CLK_S2D_CORE_BUSY, CLK_CON_MUX_MUX_CLK_S2D_CORE_ENABLE_AUTOMATIC_CLKGATING), CLK_MUX(MUX_CLK_VTS_BUS, cmucal_mux_clk_vts_bus_parents, CLK_CON_MUX_MUX_CLK_VTS_BUS_SELECT, CLK_CON_MUX_MUX_CLK_VTS_BUS_BUSY, CLK_CON_MUX_MUX_CLK_VTS_BUS_ENABLE_AUTOMATIC_CLKGATING), - CLK_MUX(APM_CMU_APM_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(APM_CMU_APM_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(AUD_CMU_AUD_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(AUD_CMU_AUD_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(BUSC_CMU_BUSC_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(BUSC_CMU_BUSC_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CMGP_CMU_CMGP_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CMGP_CMU_CMGP_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CMU_CMU_CMU_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CMU_CMU_CMU_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CORE_CMU_CORE_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CORE_CMU_CORE_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CPUCL0_CMU_CPUCL0_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CPUCL0_CMU_CPUCL0_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CPUCL0_EMBEDDED_CMU_CPUCL0_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CPUCL0_EMBEDDED_CMU_CPUCL0_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CPUCL1_CMU_CPUCL1_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CPUCL1_CMU_CPUCL1_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CPUCL2_CMU_CPUCL2_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CPUCL2_CMU_CPUCL2_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CPUCL2_EMBEDDED_CMU_CPUCL2_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(CPUCL2_EMBEDDED_CMU_CPUCL2_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(DPU_CMU_DPU_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(DPU_CMU_DPU_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(DSPM_CMU_DSPM_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(DSPM_CMU_DSPM_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(DSPS_CMU_DSPS_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(DSPS_CMU_DSPS_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(FSYS0_CMU_FSYS0_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(FSYS0_CMU_FSYS0_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(FSYS0A_CMU_FSYS0A_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(FSYS0A_CMU_FSYS0A_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(FSYS1_CMU_FSYS1_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(FSYS1_CMU_FSYS1_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(G2D_CMU_G2D_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(G2D_CMU_G2D_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(G3D_CMU_G3D_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(G3D_CMU_G3D_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(G3D_EMBEDDED_CMU_G3D_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(G3D_EMBEDDED_CMU_G3D_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(ISPHQ_CMU_ISPHQ_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(ISPHQ_CMU_ISPHQ_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(ISPLP_CMU_ISPLP_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(ISPLP_CMU_ISPLP_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(ISPPRE_CMU_ISPPRE_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(ISPPRE_CMU_ISPPRE_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(IVA_CMU_IVA_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(IVA_CMU_IVA_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(MFC_CMU_MFC_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(MFC_CMU_MFC_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(MIF_CMU_MIF_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(MIF_CMU_MIF_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(MIF1_CMU_MIF1_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(MIF1_CMU_MIF1_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(MIF2_CMU_MIF2_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(MIF2_CMU_MIF2_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(MIF3_CMU_MIF3_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(MIF3_CMU_MIF3_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(NPU0_CMU_NPU0_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(NPU0_CMU_NPU0_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(NPU1_CMU_NPU1_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(NPU1_CMU_NPU1_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(PERIC0_CMU_PERIC0_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(PERIC0_CMU_PERIC0_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(PERIC1_CMU_PERIC1_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(PERIC1_CMU_PERIC1_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(PERIS_CMU_PERIS_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(PERIS_CMU_PERIS_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(VRA2_CMU_VRA2_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(VRA2_CMU_VRA2_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(VTS_CMU_VTS_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), - CLK_MUX(VTS_CMU_VTS_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(APM_CMU_APM_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(APM_CMU_APM_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(AUD_CMU_AUD_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(AUD_CMU_AUD_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(BUSC_CMU_BUSC_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(BUSC_CMU_BUSC_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CMGP_CMU_CMGP_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CMGP_CMU_CMGP_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CMU_CMU_CMU_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CMU_CMU_CMU_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CORE_CMU_CORE_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CORE_CMU_CORE_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CPUCL0_CMU_CPUCL0_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CPUCL0_CMU_CPUCL0_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CPUCL0_EMBEDDED_CMU_CPUCL0_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CPUCL0_EMBEDDED_CMU_CPUCL0_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CPUCL1_CMU_CPUCL1_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CPUCL1_CMU_CPUCL1_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CPUCL2_CMU_CPUCL2_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CPUCL2_CMU_CPUCL2_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CPUCL2_EMBEDDED_CMU_CPUCL2_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(CPUCL2_EMBEDDED_CMU_CPUCL2_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(DPU_CMU_DPU_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(DPU_CMU_DPU_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(DSPM_CMU_DSPM_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(DSPM_CMU_DSPM_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(DSPS_CMU_DSPS_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(DSPS_CMU_DSPS_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(FSYS0_CMU_FSYS0_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(FSYS0_CMU_FSYS0_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(FSYS0A_CMU_FSYS0A_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(FSYS0A_CMU_FSYS0A_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(FSYS1_CMU_FSYS1_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(FSYS1_CMU_FSYS1_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(G2D_CMU_G2D_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(G2D_CMU_G2D_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(G3D_CMU_G3D_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(G3D_CMU_G3D_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(G3D_EMBEDDED_CMU_G3D_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(G3D_EMBEDDED_CMU_G3D_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(ISPHQ_CMU_ISPHQ_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(ISPHQ_CMU_ISPHQ_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(ISPLP_CMU_ISPLP_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(ISPLP_CMU_ISPLP_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(ISPPRE_CMU_ISPPRE_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(ISPPRE_CMU_ISPPRE_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(IVA_CMU_IVA_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(IVA_CMU_IVA_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(MFC_CMU_MFC_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(MFC_CMU_MFC_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(MIF_CMU_MIF_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(MIF_CMU_MIF_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(MIF1_CMU_MIF1_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(MIF1_CMU_MIF1_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(MIF2_CMU_MIF2_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(MIF2_CMU_MIF2_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(MIF3_CMU_MIF3_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(MIF3_CMU_MIF3_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(NPU0_CMU_NPU0_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(NPU0_CMU_NPU0_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(NPU1_CMU_NPU1_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(NPU1_CMU_NPU1_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(PERIC0_CMU_PERIC0_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(PERIC0_CMU_PERIC0_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(PERIC1_CMU_PERIC1_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(PERIC1_CMU_PERIC1_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(PERIS_CMU_PERIS_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(PERIS_CMU_PERIS_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(VRA2_CMU_VRA2_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(VRA2_CMU_VRA2_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(VTS_CMU_VTS_CLKOUT0, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), + CLK_MUX2(VTS_CMU_VTS_CLKOUT1, NULL, EMPTY_CAL_ID, EMPTY_CAL_ID, EMPTY_CAL_ID), CLK_MUX(MUX_CLKCMU_APM_BUS_USER, cmucal_mux_clkcmu_apm_bus_user_parents, PLL_CON0_MUX_CLKCMU_APM_BUS_USER_MUX_SEL, PLL_CON0_MUX_CLKCMU_APM_BUS_USER_BUSY, PLL_CON2_MUX_CLKCMU_APM_BUS_USER_ENABLE_AUTOMATIC_CLKGATING), CLK_MUX(MUX_DLL_USER, cmucal_mux_dll_user_parents, PLL_CON0_MUX_DLL_USER_MUX_SEL, PLL_CON0_MUX_DLL_USER_BUSY, PLL_CON2_MUX_DLL_USER_ENABLE_AUTOMATIC_CLKGATING), CLK_MUX(MUX_CLKMUX_APM_RCO_USER, cmucal_mux_clkmux_apm_rco_user_parents, PLL_CON0_MUX_CLKMUX_APM_RCO_USER_MUX_SEL, PLL_CON0_MUX_CLKMUX_APM_RCO_USER_BUSY, PLL_CON2_MUX_CLKMUX_APM_RCO_USER_ENABLE_AUTOMATIC_CLKGATING), diff --git a/drivers/soc/samsung/cal-if/exynos9820/cmucal-vclk.c b/drivers/soc/samsung/cal-if/exynos9820/cmucal-vclk.c index bde09430d294..d3f12dc6e673 100644 --- a/drivers/soc/samsung/cal-if/exynos9820/cmucal-vclk.c +++ b/drivers/soc/samsung/cal-if/exynos9820/cmucal-vclk.c @@ -3910,825 +3910,825 @@ struct vclk cmucal_vclk_list[] = { CMUCAL_VCLK(VCLK_BLK_VRA2, cmucal_vclk_blk_vra2_lut, cmucal_vclk_blk_vra2, NULL, NULL), /* GATE VCLK*/ - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_APM, NULL, cmucal_vclk_ip_lhs_axi_d_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_APM, NULL, cmucal_vclk_ip_lhm_axi_p_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_WDT_APM, NULL, cmucal_vclk_ip_wdt_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_APM, NULL, cmucal_vclk_ip_sysreg_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MAILBOX_APM_AP, NULL, cmucal_vclk_ip_mailbox_apm_ap, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBIF_PMU_ALIVE, NULL, cmucal_vclk_ip_apbif_pmu_alive, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_INTMEM, NULL, cmucal_vclk_ip_intmem, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_C_MODEM, NULL, cmucal_vclk_ip_lhm_axi_c_modem, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_G_SCAN2DRAM, NULL, cmucal_vclk_ip_lhs_axi_g_scan2dram, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PMU_INTR_GEN, NULL, cmucal_vclk_ip_pmu_intr_gen, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PEM, NULL, cmucal_vclk_ip_pem, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SPEEDY_APM, NULL, cmucal_vclk_ip_speedy_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_DP_APM, NULL, cmucal_vclk_ip_xiu_dp_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APM_CMU_APM, NULL, cmucal_vclk_ip_apm_cmu_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_APM, NULL, cmucal_vclk_ip_vgen_lite_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GREBEINTEGRATION, NULL, cmucal_vclk_ip_grebeintegration, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBIF_GPIO_ALIVE, NULL, cmucal_vclk_ip_apbif_gpio_alive, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBIF_TOP_RTC, NULL, cmucal_vclk_ip_apbif_top_rtc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MAILBOX_AP_CP, NULL, cmucal_vclk_ip_mailbox_ap_cp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MAILBOX_AP_CP_S, NULL, cmucal_vclk_ip_mailbox_ap_cp_s, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GREBEINTEGRATION_DBGCORE, NULL, cmucal_vclk_ip_grebeintegration_dbgcore, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DTZPC_APM, NULL, cmucal_vclk_ip_dtzpc_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_C_VTS, NULL, cmucal_vclk_ip_lhm_axi_c_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MAILBOX_APM_VTS, NULL, cmucal_vclk_ip_mailbox_apm_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MAILBOX_AP_DBGCORE, NULL, cmucal_vclk_ip_mailbox_ap_dbgcore, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_LP_VTS, NULL, cmucal_vclk_ip_lhs_axi_lp_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MAILBOX_APM_CP, NULL, cmucal_vclk_ip_mailbox_apm_cp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_G_DBGCORE, NULL, cmucal_vclk_ip_lhs_axi_g_dbgcore, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBIF_RTC, NULL, cmucal_vclk_ip_apbif_rtc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_C_CMGP, NULL, cmucal_vclk_ip_lhs_axi_c_cmgp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SPEEDY_SUB_APM, NULL, cmucal_vclk_ip_speedy_sub_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AUD_CMU_AUD, NULL, cmucal_vclk_ip_aud_cmu_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_AUD, NULL, cmucal_vclk_ip_lhs_axi_d_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_AUD, NULL, cmucal_vclk_ip_ppmu_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_AUD, NULL, cmucal_vclk_ip_sysreg_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ABOX, NULL, cmucal_vclk_ip_abox, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_T0_AUD, NULL, cmucal_vclk_ip_lhs_atb_t0_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GPIO_AUD, NULL, cmucal_vclk_ip_gpio_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI_US_32TO128, NULL, cmucal_vclk_ip_axi_us_32to128, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_AUD, NULL, cmucal_vclk_ip_btm_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PERI_AXI_ASB, NULL, cmucal_vclk_ip_peri_axi_asb, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_AUD, NULL, cmucal_vclk_ip_lhm_axi_p_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_WDT_AUD, NULL, cmucal_vclk_ip_wdt_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DMIC, NULL, cmucal_vclk_ip_dmic, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TREX_AUD, NULL, cmucal_vclk_ip_trex_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DFTMUX_AUD, NULL, cmucal_vclk_ip_dftmux_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SMMU_AUD, NULL, cmucal_vclk_ip_smmu_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_WRAP2_CONV_AUD, NULL, cmucal_vclk_ip_wrap2_conv_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_AUD, NULL, cmucal_vclk_ip_xiu_p_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_SMMU_AUD, NULL, cmucal_vclk_ip_ad_apb_smmu_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_AUD, NULL, cmucal_vclk_ip_axi2apb_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_SMMU_AUD_S, NULL, cmucal_vclk_ip_ad_apb_smmu_aud_s, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_T1_AUD, NULL, cmucal_vclk_ip_lhs_atb_t1_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_AUD, NULL, cmucal_vclk_ip_vgen_lite_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSC_CMU_BUSC, NULL, cmucal_vclk_ip_busc_cmu_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_BUSCP0, NULL, cmucal_vclk_ip_axi2apb_buscp0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_BUSC_TDP, NULL, cmucal_vclk_ip_axi2apb_busc_tdp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_BUSC, NULL, cmucal_vclk_ip_sysreg_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSIF_CMUTOPC, NULL, cmucal_vclk_ip_busif_cmutopc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TREX_D0_BUSC, NULL, cmucal_vclk_ip_trex_d0_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TREX_P_BUSC, NULL, cmucal_vclk_ip_trex_p_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_MIF0, NULL, cmucal_vclk_ip_lhs_axi_p_mif0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_MIF1, NULL, cmucal_vclk_ip_lhs_axi_p_mif1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_MIF2, NULL, cmucal_vclk_ip_lhs_axi_p_mif2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_MIF3, NULL, cmucal_vclk_ip_lhs_axi_p_mif3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_PERIS, NULL, cmucal_vclk_ip_lhs_axi_p_peris, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_PERIC0, NULL, cmucal_vclk_ip_lhs_axi_p_peric0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_PERIC1, NULL, cmucal_vclk_ip_lhs_axi_p_peric1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ASYNCSFR_WR_SMC, NULL, cmucal_vclk_ip_asyncsfr_wr_smc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_IVASC, NULL, cmucal_vclk_ip_lhs_axi_d_ivasc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACEL_D0_G2D, NULL, cmucal_vclk_ip_lhm_acel_d0_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACEL_D1_G2D, NULL, cmucal_vclk_ip_lhm_acel_d1_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACEL_D2_G2D, NULL, cmucal_vclk_ip_lhm_acel_d2_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACEL_D_FSYS0, NULL, cmucal_vclk_ip_lhm_acel_d_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACEL_D_IVA, NULL, cmucal_vclk_ip_lhm_acel_d_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACEL_D_NPU, NULL, cmucal_vclk_ip_lhm_acel_d_npu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D0_DPU, NULL, cmucal_vclk_ip_lhm_axi_d0_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D0_MFC, NULL, cmucal_vclk_ip_lhm_axi_d0_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_ISPPRE, NULL, cmucal_vclk_ip_lhm_axi_d_isppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D1_DPU, NULL, cmucal_vclk_ip_lhm_axi_d1_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D1_MFC, NULL, cmucal_vclk_ip_lhm_axi_d1_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D2_DPU, NULL, cmucal_vclk_ip_lhm_axi_d2_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D0_ISPLP, NULL, cmucal_vclk_ip_lhm_axi_d0_isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_DPU, NULL, cmucal_vclk_ip_lhs_axi_p_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_ISPPRE, NULL, cmucal_vclk_ip_lhs_axi_p_isppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_DSPM, NULL, cmucal_vclk_ip_lhs_axi_p_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_FSYS0, NULL, cmucal_vclk_ip_lhs_axi_p_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_G2D, NULL, cmucal_vclk_ip_lhs_axi_p_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_ISPHQ, NULL, cmucal_vclk_ip_lhs_axi_p_isphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_ISPLP, NULL, cmucal_vclk_ip_lhs_axi_p_isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_IVA, NULL, cmucal_vclk_ip_lhs_axi_p_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_MFC, NULL, cmucal_vclk_ip_lhs_axi_p_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACEL_D_FSYS1, NULL, cmucal_vclk_ip_lhm_acel_d_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_APM, NULL, cmucal_vclk_ip_lhm_axi_d_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D1_ISPLP, NULL, cmucal_vclk_ip_lhm_axi_d1_isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_FSYS1, NULL, cmucal_vclk_ip_lhs_axi_p_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SIREX, NULL, cmucal_vclk_ip_sirex, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACEL_D0_DSPM, NULL, cmucal_vclk_ip_lhm_acel_d0_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACEL_D1_DSPM, NULL, cmucal_vclk_ip_lhm_acel_d1_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_ISPHQ, NULL, cmucal_vclk_ip_lhm_axi_d_isphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TREX_RB_BUSC, NULL, cmucal_vclk_ip_trex_rb_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPFW, NULL, cmucal_vclk_ip_ppfw, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_WRAP2_CONV_BUSC, NULL, cmucal_vclk_ip_wrap2_conv_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_PDMA0, NULL, cmucal_vclk_ip_vgen_pdma0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_BUSC, NULL, cmucal_vclk_ip_vgen_lite_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_BUSC, NULL, cmucal_vclk_ip_hpm_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSIF_HPMBUSC, NULL, cmucal_vclk_ip_busif_hpmbusc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PDMA0, NULL, cmucal_vclk_ip_pdma0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SBIC, NULL, cmucal_vclk_ip_sbic, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SPDMA, NULL, cmucal_vclk_ip_spdma, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DIT, NULL, cmucal_vclk_ip_ad_apb_dit, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DIT, NULL, cmucal_vclk_ip_dit, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_BUSC, NULL, cmucal_vclk_ip_d_tzpc_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_NPU, NULL, cmucal_vclk_ip_lhs_axi_p_npu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MMCACHE, NULL, cmucal_vclk_ip_mmcache, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TREX_D1_BUSC, NULL, cmucal_vclk_ip_trex_d1_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_BUSCP1, NULL, cmucal_vclk_ip_axi2apb_buscp1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_AUD, NULL, cmucal_vclk_ip_lhm_axi_d_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_AUD, NULL, cmucal_vclk_ip_lhs_axi_p_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_DBG_G_BUSC, NULL, cmucal_vclk_ip_lhs_dbg_g_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_VTS, NULL, cmucal_vclk_ip_lhm_axi_d_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_VTS, NULL, cmucal_vclk_ip_lhs_axi_p_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_SPDMA, NULL, cmucal_vclk_ip_qe_spdma, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_PDMA0, NULL, cmucal_vclk_ip_qe_pdma0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_D_BUSC, NULL, cmucal_vclk_ip_xiu_d_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BAAW_P_VTS, NULL, cmucal_vclk_ip_baaw_p_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI_US_64TO128, NULL, cmucal_vclk_ip_axi_us_64to128, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BAAW_P_NPU, NULL, cmucal_vclk_ip_baaw_p_npu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_VRA2, NULL, cmucal_vclk_ip_lhm_axi_d_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_CMGP_CMU_CMGP, NULL, cmucal_vclk_ip_cmgp_cmu_cmgp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ADC_CMGP, NULL, cmucal_vclk_ip_adc_cmgp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GPIO_CMGP, NULL, cmucal_vclk_ip_gpio_cmgp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_I2C_CMGP0, NULL, cmucal_vclk_ip_i2c_cmgp0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_I2C_CMGP1, NULL, cmucal_vclk_ip_i2c_cmgp1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_I2C_CMGP2, NULL, cmucal_vclk_ip_i2c_cmgp2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_I2C_CMGP3, NULL, cmucal_vclk_ip_i2c_cmgp3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_CMGP, NULL, cmucal_vclk_ip_sysreg_cmgp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI_CMGP0, NULL, cmucal_vclk_ip_usi_cmgp0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI_CMGP1, NULL, cmucal_vclk_ip_usi_cmgp1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI_CMGP2, NULL, cmucal_vclk_ip_usi_cmgp2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI_CMGP3, NULL, cmucal_vclk_ip_usi_cmgp3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_CMGP2CP, NULL, cmucal_vclk_ip_sysreg_cmgp2cp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_CMGP2PMU_AP, NULL, cmucal_vclk_ip_sysreg_cmgp2pmu_ap, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DTZPC_CMGP, NULL, cmucal_vclk_ip_dtzpc_cmgp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_C_CMGP, NULL, cmucal_vclk_ip_lhm_axi_c_cmgp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_CMGP2APM, NULL, cmucal_vclk_ip_sysreg_cmgp2apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_CORE_CMU_CORE, NULL, cmucal_vclk_ip_core_cmu_core, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_CORE, NULL, cmucal_vclk_ip_sysreg_core, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_CORE_0, NULL, cmucal_vclk_ip_axi2apb_core_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MPACE2AXI_0, NULL, cmucal_vclk_ip_mpace2axi_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MPACE2AXI_1, NULL, cmucal_vclk_ip_mpace2axi_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_DEBUG_CCI, NULL, cmucal_vclk_ip_ppc_debug_cci, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TREX_P0_CORE, NULL, cmucal_vclk_ip_trex_p0_core, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_CPUCL2_0, NULL, cmucal_vclk_ip_ppmu_cpucl2_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_DBG_G0_DMC, NULL, cmucal_vclk_ip_lhm_dbg_g0_dmc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_DBG_G1_DMC, NULL, cmucal_vclk_ip_lhm_dbg_g1_dmc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_DBG_G2_DMC, NULL, cmucal_vclk_ip_lhm_dbg_g2_dmc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_DBG_G3_DMC, NULL, cmucal_vclk_ip_lhm_dbg_g3_dmc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_T_BDU, NULL, cmucal_vclk_ip_lhs_atb_t_bdu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ADM_APB_G_BDU, NULL, cmucal_vclk_ip_adm_apb_g_bdu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BDU, NULL, cmucal_vclk_ip_bdu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TREX_P1_CORE, NULL, cmucal_vclk_ip_trex_p1_core, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_CORE_TP, NULL, cmucal_vclk_ip_axi2apb_core_tp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPFW_G3D, NULL, cmucal_vclk_ip_ppfw_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_G3D, NULL, cmucal_vclk_ip_lhs_axi_p_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_CPUCL0, NULL, cmucal_vclk_ip_lhs_axi_p_cpucl0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_CPUCL2, NULL, cmucal_vclk_ip_lhs_axi_p_cpucl2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D0_CP, NULL, cmucal_vclk_ip_lhm_axi_d0_cp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACE_D0_G3D, NULL, cmucal_vclk_ip_lhm_ace_d0_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACE_D1_G3D, NULL, cmucal_vclk_ip_lhm_ace_d1_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACE_D2_G3D, NULL, cmucal_vclk_ip_lhm_ace_d2_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACE_D3_G3D, NULL, cmucal_vclk_ip_lhm_ace_d3_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TREX_D_CORE, NULL, cmucal_vclk_ip_trex_d_core, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_CORE, NULL, cmucal_vclk_ip_hpm_core, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSIF_HPMCORE, NULL, cmucal_vclk_ip_busif_hpmcore, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BPS_D0_G3D, NULL, cmucal_vclk_ip_bps_d0_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BPS_D1_G3D, NULL, cmucal_vclk_ip_bps_d1_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BPS_D2_G3D, NULL, cmucal_vclk_ip_bps_d2_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BPS_D3_G3D, NULL, cmucal_vclk_ip_bps_d3_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPCFW_G3D, NULL, cmucal_vclk_ip_ppcfw_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_CP, NULL, cmucal_vclk_ip_lhs_axi_p_cp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APB_ASYNC_PPFW_G3D, NULL, cmucal_vclk_ip_apb_async_ppfw_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BAAW_CP, NULL, cmucal_vclk_ip_baaw_cp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BPS_P_G3D, NULL, cmucal_vclk_ip_bps_p_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_APM, NULL, cmucal_vclk_ip_lhs_axi_p_apm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_CPUCL2_1, NULL, cmucal_vclk_ip_ppmu_cpucl2_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_CORE, NULL, cmucal_vclk_ip_d_tzpc_core, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_CORE_1, NULL, cmucal_vclk_ip_axi2apb_core_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_CORE, NULL, cmucal_vclk_ip_xiu_p_core, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_CPUCL2_0, NULL, cmucal_vclk_ip_ppc_cpucl2_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_CPUCL2_1, NULL, cmucal_vclk_ip_ppc_cpucl2_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_G3D0, NULL, cmucal_vclk_ip_ppc_g3d0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_G3D1, NULL, cmucal_vclk_ip_ppc_g3d1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_G3D2, NULL, cmucal_vclk_ip_ppc_g3d2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_G3D3, NULL, cmucal_vclk_ip_ppc_g3d3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_IRPS0, NULL, cmucal_vclk_ip_ppc_irps0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_IRPS1, NULL, cmucal_vclk_ip_ppc_irps1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D1_CP, NULL, cmucal_vclk_ip_lhm_axi_d1_cp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_L_CORE, NULL, cmucal_vclk_ip_lhs_axi_l_core, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_CORE_2, NULL, cmucal_vclk_ip_axi2apb_core_2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_L_CORE, NULL, cmucal_vclk_ip_lhm_axi_l_core, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACE_D0_CLUSTER0, NULL, cmucal_vclk_ip_lhm_ace_d0_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ACE_D1_CLUSTER0, NULL, cmucal_vclk_ip_lhm_ace_d1_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_CPUCL0_0, NULL, cmucal_vclk_ip_ppc_cpucl0_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_CPUCL0_1, NULL, cmucal_vclk_ip_ppc_cpucl0_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_CPUCL0_0, NULL, cmucal_vclk_ip_ppmu_cpucl0_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_CPUCL0_1, NULL, cmucal_vclk_ip_ppmu_cpucl0_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_DBG_G_BUSC, NULL, cmucal_vclk_ip_lhm_dbg_g_busc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MPACE_ASB_D0_MIF, NULL, cmucal_vclk_ip_mpace_asb_d0_mif, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MPACE_ASB_D1_MIF, NULL, cmucal_vclk_ip_mpace_asb_d1_mif, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MPACE_ASB_D2_MIF, NULL, cmucal_vclk_ip_mpace_asb_d2_mif, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MPACE_ASB_D3_MIF, NULL, cmucal_vclk_ip_mpace_asb_d3_mif, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI_ASB_CSSYS, NULL, cmucal_vclk_ip_axi_asb_cssys, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_G_CSSYS, NULL, cmucal_vclk_ip_lhm_axi_g_cssys, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_CCI, NULL, cmucal_vclk_ip_cci, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_CPUCL0, NULL, cmucal_vclk_ip_axi2apb_cpucl0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_CPUCL0, NULL, cmucal_vclk_ip_sysreg_cpucl0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSIF_HPMCPUCL0, NULL, cmucal_vclk_ip_busif_hpmcpucl0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_CSSYS, NULL, cmucal_vclk_ip_cssys, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_T0_AUD, NULL, cmucal_vclk_ip_lhm_atb_t0_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_T_BDU, NULL, cmucal_vclk_ip_lhm_atb_t_bdu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_T0_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t0_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_T0_CLUSTER2, NULL, cmucal_vclk_ip_lhm_atb_t0_cluster2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_T1_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t1_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_T1_CLUSTER2, NULL, cmucal_vclk_ip_lhm_atb_t1_cluster2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_T2_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t2_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_T3_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t3_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SECJTAG, NULL, cmucal_vclk_ip_secjtag, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_CPUCL0, NULL, cmucal_vclk_ip_lhm_axi_p_cpucl0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ACE_D0_CLUSTER0, NULL, cmucal_vclk_ip_lhs_ace_d0_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_T0_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t0_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_T1_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t1_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_T2_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t2_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_T3_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t3_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ADM_APB_G_CLUSTER0, NULL, cmucal_vclk_ip_adm_apb_g_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_CPUCL0_CMU_CPUCL0, NULL, cmucal_vclk_ip_cpucl0_cmu_cpucl0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_CPUCL0, NULL, cmucal_vclk_ip_cpucl0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_T4_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t4_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_T5_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t5_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ACE_D1_CLUSTER0, NULL, cmucal_vclk_ip_lhs_ace_d1_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_T4_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t4_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_T5_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t5_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_CPUCL0, NULL, cmucal_vclk_ip_d_tzpc_cpucl0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_T1_AUD, NULL, cmucal_vclk_ip_lhm_atb_t1_aud, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_G_INT_CSSYS, NULL, cmucal_vclk_ip_lhs_axi_g_int_cssys, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_G_INT_CSSYS, NULL, cmucal_vclk_ip_lhm_axi_g_int_cssys, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_G_INT_DBGCORE, NULL, cmucal_vclk_ip_lhs_axi_g_int_dbgcore, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_G_INT_DBGCORE, NULL, cmucal_vclk_ip_lhm_axi_g_int_dbgcore, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_CPUCL0, NULL, cmucal_vclk_ip_xiu_p_cpucl0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_DP_CSSYS, NULL, cmucal_vclk_ip_xiu_dp_cssys, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TREX_CPUCL0, NULL, cmucal_vclk_ip_trex_cpucl0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI_US_32TO64_G_DBGCORE, NULL, cmucal_vclk_ip_axi_us_32to64_g_dbgcore, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_G_CSSYS, NULL, cmucal_vclk_ip_lhs_axi_g_cssys, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_CPUCL0_1, NULL, cmucal_vclk_ip_hpm_cpucl0_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_CPUCL0_0, NULL, cmucal_vclk_ip_hpm_cpucl0_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APB_ASYNC_P_CSSYS_0, NULL, cmucal_vclk_ip_apb_async_p_cssys_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_G_INT_ETR, NULL, cmucal_vclk_ip_lhs_axi_g_int_etr, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_G_DBGCORE, NULL, cmucal_vclk_ip_lhm_axi_g_dbgcore, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_G_INT_ETR, NULL, cmucal_vclk_ip_lhm_axi_g_int_etr, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_P_CSSYS, NULL, cmucal_vclk_ip_axi2apb_p_cssys, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BPS_CPUCL0, NULL, cmucal_vclk_ip_bps_cpucl0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_CPUCL1_CMU_CPUCL1, NULL, cmucal_vclk_ip_cpucl1_cmu_cpucl1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_CPUCL1, NULL, cmucal_vclk_ip_cpucl1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_CPUCL2_CMU_CPUCL2, NULL, cmucal_vclk_ip_cpucl2_cmu_cpucl2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_CPUCL2, NULL, cmucal_vclk_ip_sysreg_cpucl2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSIF_HPMCPUCL2, NULL, cmucal_vclk_ip_busif_hpmcpucl2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_CPUCL2_0, NULL, cmucal_vclk_ip_hpm_cpucl2_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_CLUSTER2, NULL, cmucal_vclk_ip_cluster2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_CPUCL2, NULL, cmucal_vclk_ip_axi2apb_cpucl2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_CPUCL2, NULL, cmucal_vclk_ip_lhm_axi_p_cpucl2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_CPUCL2_1, NULL, cmucal_vclk_ip_hpm_cpucl2_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_CPUCL2_2, NULL, cmucal_vclk_ip_hpm_cpucl2_2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_CPUCL2, NULL, cmucal_vclk_ip_d_tzpc_cpucl2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DPU_CMU_DPU, NULL, cmucal_vclk_ip_dpu_cmu_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_DPUD0, NULL, cmucal_vclk_ip_btm_dpud0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_DPUD1, NULL, cmucal_vclk_ip_btm_dpud1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_DPU, NULL, cmucal_vclk_ip_sysreg_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_DPUP1, NULL, cmucal_vclk_ip_axi2apb_dpup1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_DPUP0, NULL, cmucal_vclk_ip_axi2apb_dpup0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_DPUD0, NULL, cmucal_vclk_ip_sysmmu_dpud0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_DPU, NULL, cmucal_vclk_ip_lhm_axi_p_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D1_DPU, NULL, cmucal_vclk_ip_lhs_axi_d1_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_DPU, NULL, cmucal_vclk_ip_xiu_p_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DECON0, NULL, cmucal_vclk_ip_ad_apb_decon0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DECON1, NULL, cmucal_vclk_ip_ad_apb_decon1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_MIPI_DSIM1, NULL, cmucal_vclk_ip_ad_apb_mipi_dsim1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DPP, NULL, cmucal_vclk_ip_ad_apb_dpp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D2_DPU, NULL, cmucal_vclk_ip_lhs_axi_d2_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_DPUD2, NULL, cmucal_vclk_ip_btm_dpud2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_DPUD2, NULL, cmucal_vclk_ip_sysmmu_dpud2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DPU_DMA, NULL, cmucal_vclk_ip_ad_apb_dpu_dma, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DPU_WB_MUX, NULL, cmucal_vclk_ip_ad_apb_dpu_wb_mux, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_DPUD1, NULL, cmucal_vclk_ip_sysmmu_dpud1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_DPUD0, NULL, cmucal_vclk_ip_ppmu_dpud0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_DPUD1, NULL, cmucal_vclk_ip_ppmu_dpud1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_DPUD2, NULL, cmucal_vclk_ip_ppmu_dpud2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_MIPI_DSIM0, NULL, cmucal_vclk_ip_ad_apb_mipi_dsim0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DECON2, NULL, cmucal_vclk_ip_ad_apb_decon2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_SYSMMU_DPUD0, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_SYSMMU_DPUD0_S, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud0_s, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_SYSMMU_DPUD1, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_SYSMMU_DPUD1_S, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud1_s, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_SYSMMU_DPUD2, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_SYSMMU_DPUD2_S, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud2_s, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DPU, NULL, cmucal_vclk_ip_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_WRAPPER_FOR_S5I6280_HSI_DCPHY_COMBO_TOP, NULL, cmucal_vclk_ip_wrapper_for_s5i6280_hsi_dcphy_combo_top, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DPU_DMA_PGEN, NULL, cmucal_vclk_ip_ad_apb_dpu_dma_pgen, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D0_DPU, NULL, cmucal_vclk_ip_lhs_axi_d0_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_DPU, NULL, cmucal_vclk_ip_d_tzpc_dpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_MCD, NULL, cmucal_vclk_ip_ad_apb_mcd, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DSPM_CMU_DSPM, NULL, cmucal_vclk_ip_dspm_cmu_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_DSPM, NULL, cmucal_vclk_ip_sysreg_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_DSPM, NULL, cmucal_vclk_ip_axi2apb_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_DSPM0, NULL, cmucal_vclk_ip_ppmu_dspm0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_DSPM0, NULL, cmucal_vclk_ip_sysmmu_dspm0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_DSPM0, NULL, cmucal_vclk_ip_btm_dspm0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_DSPM, NULL, cmucal_vclk_ip_lhm_axi_p_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ACEL_D0_DSPM, NULL, cmucal_vclk_ip_lhs_acel_d0_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_IVADSPM, NULL, cmucal_vclk_ip_lhm_axi_p_ivadspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_DSPMIVA, NULL, cmucal_vclk_ip_lhs_axi_p_dspmiva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_WRAP2_CONV_DSPM, NULL, cmucal_vclk_ip_wrap2_conv_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DSPM0, NULL, cmucal_vclk_ip_ad_apb_dspm0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DSPM1, NULL, cmucal_vclk_ip_ad_apb_dspm1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DSPM3, NULL, cmucal_vclk_ip_ad_apb_dspm3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_AXI_DSPM0, NULL, cmucal_vclk_ip_ad_axi_dspm0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_DSPM1, NULL, cmucal_vclk_ip_btm_dspm1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ACEL_D1_DSPM, NULL, cmucal_vclk_ip_lhs_acel_d1_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_DSPMDSPS, NULL, cmucal_vclk_ip_lhs_axi_p_dspmdsps, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_DSPM1, NULL, cmucal_vclk_ip_ppmu_dspm1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_DSPM1, NULL, cmucal_vclk_ip_sysmmu_dspm1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ADM_APB_DSPM, NULL, cmucal_vclk_ip_adm_apb_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D0_DSPSDSPM, NULL, cmucal_vclk_ip_lhm_axi_d0_dspsdspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_DSPM, NULL, cmucal_vclk_ip_xiu_p_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_DSPM, NULL, cmucal_vclk_ip_vgen_lite_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_DSPM2, NULL, cmucal_vclk_ip_ad_apb_dspm2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SCORE_TS_II, NULL, cmucal_vclk_ip_score_ts_ii, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_DSPM, NULL, cmucal_vclk_ip_d_tzpc_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_ISPPREDSPM, NULL, cmucal_vclk_ip_lhm_ast_isppredspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_ISPLPDSPM, NULL, cmucal_vclk_ip_lhm_ast_isplpdspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_ISPHQDSPM, NULL, cmucal_vclk_ip_lhm_ast_isphqdspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_DSPMISPPRE, NULL, cmucal_vclk_ip_lhs_ast_dspmisppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_DSPMISPLP, NULL, cmucal_vclk_ip_lhs_ast_dspmisplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_D_DSPM, NULL, cmucal_vclk_ip_xiu_d_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BAAW_DSPM, NULL, cmucal_vclk_ip_baaw_dspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_DSPMNPU0, NULL, cmucal_vclk_ip_lhs_axi_d_dspmnpu0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DSPS_CMU_DSPS, NULL, cmucal_vclk_ip_dsps_cmu_dsps, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_DSPS, NULL, cmucal_vclk_ip_axi2apb_dsps, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_DSPMDSPS, NULL, cmucal_vclk_ip_lhm_axi_p_dspmdsps, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_DSPS, NULL, cmucal_vclk_ip_sysreg_dsps, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_DSPSIVA, NULL, cmucal_vclk_ip_lhs_axi_d_dspsiva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D0_DSPSDSPM, NULL, cmucal_vclk_ip_lhs_axi_d0_dspsdspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SCORE_BARON, NULL, cmucal_vclk_ip_score_baron, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_IVADSPS, NULL, cmucal_vclk_ip_lhm_axi_d_ivadsps, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_DSPS, NULL, cmucal_vclk_ip_d_tzpc_dsps, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_DSPS, NULL, cmucal_vclk_ip_vgen_lite_dsps, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_FSYS0_CMU_FSYS0, NULL, cmucal_vclk_ip_fsys0_cmu_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ACEL_D_FSYS0, NULL, cmucal_vclk_ip_lhs_acel_d_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_FSYS0, NULL, cmucal_vclk_ip_lhm_axi_p_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GPIO_FSYS0, NULL, cmucal_vclk_ip_gpio_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_FSYS0, NULL, cmucal_vclk_ip_sysreg_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_D_FSYS0, NULL, cmucal_vclk_ip_xiu_d_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_FSYS0, NULL, cmucal_vclk_ip_btm_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DP_LINK, NULL, cmucal_vclk_ip_dp_link, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_FSYS0, NULL, cmucal_vclk_ip_vgen_lite_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_USB, NULL, cmucal_vclk_ip_lhm_axi_d_usb, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_USB, NULL, cmucal_vclk_ip_lhs_axi_p_usb, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_FSYS0, NULL, cmucal_vclk_ip_ppmu_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_PCIE_GEN3A, NULL, cmucal_vclk_ip_sysmmu_pcie_gen3a, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_PCIE_GEN3B, NULL, cmucal_vclk_ip_sysmmu_pcie_gen3b, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P0_FSYS0, NULL, cmucal_vclk_ip_xiu_p0_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PCIE_GEN3, NULL, cmucal_vclk_ip_pcie_gen3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PCIE_IA_GEN3A, NULL, cmucal_vclk_ip_pcie_ia_gen3a, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PCIE_IA_GEN3B, NULL, cmucal_vclk_ip_pcie_ia_gen3b, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_FSYS0, NULL, cmucal_vclk_ip_d_tzpc_fsys0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_FSYS0A_CMU_FSYS0A, NULL, cmucal_vclk_ip_fsys0a_cmu_fsys0a, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USB31DRD, NULL, cmucal_vclk_ip_usb31drd, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_USB, NULL, cmucal_vclk_ip_lhm_axi_p_usb, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_USB, NULL, cmucal_vclk_ip_lhs_axi_d_usb, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_FSYS1_CMU_FSYS1, NULL, cmucal_vclk_ip_fsys1_cmu_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MMC_CARD, NULL, cmucal_vclk_ip_mmc_card, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PCIE_GEN2, NULL, cmucal_vclk_ip_pcie_gen2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SSS, NULL, cmucal_vclk_ip_sss, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_RTIC, NULL, cmucal_vclk_ip_rtic, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_FSYS1, NULL, cmucal_vclk_ip_sysreg_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GPIO_FSYS1, NULL, cmucal_vclk_ip_gpio_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ACEL_D_FSYS1, NULL, cmucal_vclk_ip_lhs_acel_d_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_FSYS1, NULL, cmucal_vclk_ip_lhm_axi_p_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_D_FSYS1, NULL, cmucal_vclk_ip_xiu_d_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_FSYS1, NULL, cmucal_vclk_ip_xiu_p_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_FSYS1, NULL, cmucal_vclk_ip_ppmu_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_FSYS1, NULL, cmucal_vclk_ip_btm_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_UFS_CARD, NULL, cmucal_vclk_ip_ufs_card, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ADM_AHB_SSS, NULL, cmucal_vclk_ip_adm_ahb_sss, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_FSYS1, NULL, cmucal_vclk_ip_sysmmu_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_FSYS1, NULL, cmucal_vclk_ip_vgen_lite_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PCIE_IA_GEN2, NULL, cmucal_vclk_ip_pcie_ia_gen2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_FSYS1, NULL, cmucal_vclk_ip_d_tzpc_fsys1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_UFS_EMBD, NULL, cmucal_vclk_ip_ufs_embd, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PUF, NULL, cmucal_vclk_ip_puf, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_RTIC, NULL, cmucal_vclk_ip_qe_rtic, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_SSS, NULL, cmucal_vclk_ip_qe_sss, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BAAW_SSS, NULL, cmucal_vclk_ip_baaw_sss, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_G2D_CMU_G2D, NULL, cmucal_vclk_ip_g2d_cmu_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_G2DD0, NULL, cmucal_vclk_ip_ppmu_g2dd0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_G2DD1, NULL, cmucal_vclk_ip_ppmu_g2dd1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_G2DD0, NULL, cmucal_vclk_ip_sysmmu_g2dd0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_G2D, NULL, cmucal_vclk_ip_sysreg_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ACEL_D0_G2D, NULL, cmucal_vclk_ip_lhs_acel_d0_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ACEL_D1_G2D, NULL, cmucal_vclk_ip_lhs_acel_d1_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_G2D, NULL, cmucal_vclk_ip_lhm_axi_p_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_P_G2D, NULL, cmucal_vclk_ip_as_p_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_G2DP0, NULL, cmucal_vclk_ip_axi2apb_g2dp0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_G2DD0, NULL, cmucal_vclk_ip_btm_g2dd0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_G2DD1, NULL, cmucal_vclk_ip_btm_g2dd1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_G2D, NULL, cmucal_vclk_ip_xiu_p_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_G2DP1, NULL, cmucal_vclk_ip_axi2apb_g2dp1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_G2DD2, NULL, cmucal_vclk_ip_btm_g2dd2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_JPEG, NULL, cmucal_vclk_ip_qe_jpeg, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_MSCL, NULL, cmucal_vclk_ip_qe_mscl, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_G2DD2, NULL, cmucal_vclk_ip_sysmmu_g2dd2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_G2DD2, NULL, cmucal_vclk_ip_ppmu_g2dd2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ACEL_D2_G2D, NULL, cmucal_vclk_ip_lhs_acel_d2_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_P_JPEG, NULL, cmucal_vclk_ip_as_p_jpeg, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_D_G2D, NULL, cmucal_vclk_ip_xiu_d_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_P_MSCL, NULL, cmucal_vclk_ip_as_p_mscl, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_P_ASTC, NULL, cmucal_vclk_ip_as_p_astc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_P_SYSMMU_NS_G2DD0, NULL, cmucal_vclk_ip_as_p_sysmmu_ns_g2dd0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_P_SYSMMU_NS_G2DD2, NULL, cmucal_vclk_ip_as_p_sysmmu_ns_g2dd2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_P_SYSMMU_S_G2DD0, NULL, cmucal_vclk_ip_as_p_sysmmu_s_g2dd0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_P_SYSMMU_S_G2DD2, NULL, cmucal_vclk_ip_as_p_sysmmu_s_g2dd2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_ASTC, NULL, cmucal_vclk_ip_qe_astc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_G2D, NULL, cmucal_vclk_ip_vgen_lite_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_G2D, NULL, cmucal_vclk_ip_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_P_SYSMMU_NS_G2DD1, NULL, cmucal_vclk_ip_as_p_sysmmu_ns_g2dd1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_P_SYSMMU_S_G2DD1, NULL, cmucal_vclk_ip_as_p_sysmmu_s_g2dd1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_G2DD1, NULL, cmucal_vclk_ip_sysmmu_g2dd1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_JPEG, NULL, cmucal_vclk_ip_jpeg, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MSCL, NULL, cmucal_vclk_ip_mscl, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ASTC, NULL, cmucal_vclk_ip_astc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_P_JSQZ, NULL, cmucal_vclk_ip_as_p_jsqz, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_JSQZ, NULL, cmucal_vclk_ip_qe_jsqz, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_G2D, NULL, cmucal_vclk_ip_d_tzpc_g2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_JSQZ, NULL, cmucal_vclk_ip_jsqz, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_G3D, NULL, cmucal_vclk_ip_xiu_p_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_G3D, NULL, cmucal_vclk_ip_lhm_axi_p_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSIF_HPMG3D, NULL, cmucal_vclk_ip_busif_hpmg3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_G3D0, NULL, cmucal_vclk_ip_hpm_g3d0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_G3D, NULL, cmucal_vclk_ip_sysreg_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_G3D_CMU_G3D, NULL, cmucal_vclk_ip_g3d_cmu_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_G3DSFR, NULL, cmucal_vclk_ip_lhs_axi_g3dsfr, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_G3D, NULL, cmucal_vclk_ip_vgen_lite_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GPU, NULL, cmucal_vclk_ip_gpu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_G3D, NULL, cmucal_vclk_ip_axi2apb_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_G3DSFR, NULL, cmucal_vclk_ip_lhm_axi_g3dsfr, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GRAY2BIN_G3D, NULL, cmucal_vclk_ip_gray2bin_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_G3D, NULL, cmucal_vclk_ip_d_tzpc_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ASB_G3D, NULL, cmucal_vclk_ip_asb_g3d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_ISPHQ, NULL, cmucal_vclk_ip_lhm_axi_p_isphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_ISPHQ, NULL, cmucal_vclk_ip_lhs_axi_d_isphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_IS_ISPHQ, NULL, cmucal_vclk_ip_is_isphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_ISPHQ, NULL, cmucal_vclk_ip_sysreg_isphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ISPHQ_CMU_ISPHQ, NULL, cmucal_vclk_ip_isphq_cmu_isphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_ISPPREISPHQ, NULL, cmucal_vclk_ip_lhm_atb_isppreisphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_ISPHQISPLP, NULL, cmucal_vclk_ip_lhs_atb_isphqisplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_ISPHQ, NULL, cmucal_vclk_ip_btm_isphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_VO_ISPLPISPHQ, NULL, cmucal_vclk_ip_lhm_atb_vo_isplpisphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_VO_ISPHQISPPRE, NULL, cmucal_vclk_ip_lhs_ast_vo_isphqisppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_ISPHQ, NULL, cmucal_vclk_ip_d_tzpc_isphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_ISPHQDSPM, NULL, cmucal_vclk_ip_lhs_ast_isphqdspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_ISPLP, NULL, cmucal_vclk_ip_lhm_axi_p_isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D0_ISPLP, NULL, cmucal_vclk_ip_lhs_axi_d0_isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_ISPLP0, NULL, cmucal_vclk_ip_btm_isplp0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_IS_ISPLP, NULL, cmucal_vclk_ip_is_isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_ISPLP, NULL, cmucal_vclk_ip_sysreg_isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ISPLP_CMU_ISPLP, NULL, cmucal_vclk_ip_isplp_cmu_isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_ISPLP1, NULL, cmucal_vclk_ip_btm_isplp1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D1_ISPLP, NULL, cmucal_vclk_ip_lhs_axi_d1_isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_ISPHQISPLP, NULL, cmucal_vclk_ip_lhm_atb_isphqisplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_VO_ISPPREISPLP, NULL, cmucal_vclk_ip_lhm_ast_vo_isppreisplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_ATB_ISPPREISPLP, NULL, cmucal_vclk_ip_lhm_atb_isppreisplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_VO_ISPLPISPHQ, NULL, cmucal_vclk_ip_lhs_atb_vo_isplpisphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_ISPLP, NULL, cmucal_vclk_ip_d_tzpc_isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_ISPLPDSPM, NULL, cmucal_vclk_ip_lhs_ast_isplpdspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_DSPMISPLP, NULL, cmucal_vclk_ip_lhm_ast_dspmisplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_ISPLPVRA2, NULL, cmucal_vclk_ip_lhs_axi_p_isplpvra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_VRA2ISPLP, NULL, cmucal_vclk_ip_lhm_axi_d_vra2isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_IS_ISPPRE, NULL, cmucal_vclk_ip_is_isppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_ISPPRE, NULL, cmucal_vclk_ip_lhs_axi_d_isppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_ISPPRE, NULL, cmucal_vclk_ip_btm_isppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_ISPPRE, NULL, cmucal_vclk_ip_lhm_axi_p_isppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_ISPPRE, NULL, cmucal_vclk_ip_sysreg_isppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ISPPRE_CMU_ISPPRE, NULL, cmucal_vclk_ip_isppre_cmu_isppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_ISPPREISPLP, NULL, cmucal_vclk_ip_lhs_atb_isppreisplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ATB_ISPPREISPHQ, NULL, cmucal_vclk_ip_lhs_atb_isppreisphq, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_ISPPRE, NULL, cmucal_vclk_ip_d_tzpc_isppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_ISPPREDSPM, NULL, cmucal_vclk_ip_lhs_ast_isppredspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_DSPMISPPRE, NULL, cmucal_vclk_ip_lhm_ast_dspmisppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSIF_HPMISPPRE, NULL, cmucal_vclk_ip_busif_hpmisppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_ISPPRE, NULL, cmucal_vclk_ip_hpm_isppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_ISPPRE1, NULL, cmucal_vclk_ip_d_tzpc_isppre1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_VO_ISPPREISPLP, NULL, cmucal_vclk_ip_lhs_ast_vo_isppreisplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_VO_ISPHQISPPRE, NULL, cmucal_vclk_ip_lhm_ast_vo_isphqisppre, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_IVA_CMU_IVA, NULL, cmucal_vclk_ip_iva_cmu_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ACEL_D_IVA, NULL, cmucal_vclk_ip_lhs_acel_d_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_IVADSPS, NULL, cmucal_vclk_ip_lhs_axi_d_ivadsps, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_IVADSPM, NULL, cmucal_vclk_ip_lhs_axi_p_ivadspm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_DSPMIVA, NULL, cmucal_vclk_ip_lhm_axi_p_dspmiva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_IVA, NULL, cmucal_vclk_ip_lhm_axi_p_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_IVA, NULL, cmucal_vclk_ip_btm_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_IVA, NULL, cmucal_vclk_ip_ppmu_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_IVA, NULL, cmucal_vclk_ip_sysmmu_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_IVA, NULL, cmucal_vclk_ip_xiu_p_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_IVA0, NULL, cmucal_vclk_ip_ad_apb_iva0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_2M_IVA, NULL, cmucal_vclk_ip_axi2apb_2m_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_IVA, NULL, cmucal_vclk_ip_axi2apb_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_IVA, NULL, cmucal_vclk_ip_sysreg_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_IVASC, NULL, cmucal_vclk_ip_lhm_axi_d_ivasc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ADM_DAP_IVA, NULL, cmucal_vclk_ip_adm_dap_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_DSPSIVA, NULL, cmucal_vclk_ip_lhm_axi_d_dspsiva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_IVA1, NULL, cmucal_vclk_ip_ad_apb_iva1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_APB_IVA2, NULL, cmucal_vclk_ip_ad_apb_iva2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_IVA, NULL, cmucal_vclk_ip_vgen_lite_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_IVA, NULL, cmucal_vclk_ip_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_IVA_INTMEM, NULL, cmucal_vclk_ip_iva_intmem, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_D0_IVA, NULL, cmucal_vclk_ip_xiu_d0_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_D1_IVA, NULL, cmucal_vclk_ip_xiu_d1_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_IVA, NULL, cmucal_vclk_ip_d_tzpc_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_D2_IVA, NULL, cmucal_vclk_ip_xiu_d2_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TREX_RB1_IVA, NULL, cmucal_vclk_ip_trex_rb1_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_IVA, NULL, cmucal_vclk_ip_qe_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_WRAP2_CONV_IVA, NULL, cmucal_vclk_ip_wrap2_conv_iva, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MFC_CMU_MFC, NULL, cmucal_vclk_ip_mfc_cmu_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_APB_MFC, NULL, cmucal_vclk_ip_as_apb_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_MFC, NULL, cmucal_vclk_ip_axi2apb_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_MFC, NULL, cmucal_vclk_ip_sysreg_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D0_MFC, NULL, cmucal_vclk_ip_lhs_axi_d0_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D1_MFC, NULL, cmucal_vclk_ip_lhs_axi_d1_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_MFC, NULL, cmucal_vclk_ip_lhm_axi_p_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_MFCD0, NULL, cmucal_vclk_ip_sysmmu_mfcd0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_MFCD1, NULL, cmucal_vclk_ip_sysmmu_mfcd1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_MFCD0, NULL, cmucal_vclk_ip_ppmu_mfcd0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_MFCD1, NULL, cmucal_vclk_ip_ppmu_mfcd1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_MFCD0, NULL, cmucal_vclk_ip_btm_mfcd0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_MFCD1, NULL, cmucal_vclk_ip_btm_mfcd1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_APB_SYSMMU_NS_MFCD0, NULL, cmucal_vclk_ip_as_apb_sysmmu_ns_mfcd0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_APB_SYSMMU_NS_MFCD1, NULL, cmucal_vclk_ip_as_apb_sysmmu_ns_mfcd1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_APB_SYSMMU_S_MFCD0, NULL, cmucal_vclk_ip_as_apb_sysmmu_s_mfcd0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_APB_SYSMMU_S_MFCD1, NULL, cmucal_vclk_ip_as_apb_sysmmu_s_mfcd1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_APB_WFD_NS, NULL, cmucal_vclk_ip_as_apb_wfd_ns, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_AXI_WFD, NULL, cmucal_vclk_ip_as_axi_wfd, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_MFCD2, NULL, cmucal_vclk_ip_ppmu_mfcd2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_D_MFC, NULL, cmucal_vclk_ip_xiu_d_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_APB_WFD_S, NULL, cmucal_vclk_ip_as_apb_wfd_s, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_MFC, NULL, cmucal_vclk_ip_vgen_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MFC, NULL, cmucal_vclk_ip_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_WFD, NULL, cmucal_vclk_ip_wfd, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LH_ATB_MFC, NULL, cmucal_vclk_ip_lh_atb_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_MFC, NULL, cmucal_vclk_ip_d_tzpc_mfc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MIF_CMU_MIF, NULL, cmucal_vclk_ip_mif_cmu_mif, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DDRPHY, NULL, cmucal_vclk_ip_ddrphy, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_MIF, NULL, cmucal_vclk_ip_sysreg_mif, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSIF_HPMMIF, NULL, cmucal_vclk_ip_busif_hpmmif, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_MIF, NULL, cmucal_vclk_ip_lhm_axi_p_mif, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_MIF, NULL, cmucal_vclk_ip_axi2apb_mif, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_DVFS, NULL, cmucal_vclk_ip_ppc_dvfs, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPC_DEBUG, NULL, cmucal_vclk_ip_ppc_debug, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DDRPHY, NULL, cmucal_vclk_ip_apbbr_ddrphy, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DMC, NULL, cmucal_vclk_ip_apbbr_dmc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DMCTZ, NULL, cmucal_vclk_ip_apbbr_dmctz, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_MIF, NULL, cmucal_vclk_ip_hpm_mif, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DMC, NULL, cmucal_vclk_ip_dmc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QCH_ADAPTER_PPC_DEBUG, NULL, cmucal_vclk_ip_qch_adapter_ppc_debug, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QCH_ADAPTER_PPC_DVFS, NULL, cmucal_vclk_ip_qch_adapter_ppc_dvfs, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_MIF, NULL, cmucal_vclk_ip_d_tzpc_mif, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_MIF1, NULL, cmucal_vclk_ip_hpm_mif1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MIF1_CMU_MIF1, NULL, cmucal_vclk_ip_mif1_cmu_mif1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DDRPHY1, NULL, cmucal_vclk_ip_apbbr_ddrphy1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DMC1, NULL, cmucal_vclk_ip_apbbr_dmc1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DMCTZ1, NULL, cmucal_vclk_ip_apbbr_dmctz1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_MIF1, NULL, cmucal_vclk_ip_axi2apb_mif1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSIF_HPMMIF1, NULL, cmucal_vclk_ip_busif_hpmmif1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DDRPHY1, NULL, cmucal_vclk_ip_ddrphy1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DMC1, NULL, cmucal_vclk_ip_dmc1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_MIF1, NULL, cmucal_vclk_ip_lhm_axi_p_mif1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMUPPC_DEBUG1, NULL, cmucal_vclk_ip_ppmuppc_debug1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMUPPC_DVFS1, NULL, cmucal_vclk_ip_ppmuppc_dvfs1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_MIF1, NULL, cmucal_vclk_ip_sysreg_mif1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QCH_ADAPTER_PPMUPPC_DEBUG1, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_debug1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QCH_ADAPTER_PPMUPPC_DVFS1, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_dvfs1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_MIF2, NULL, cmucal_vclk_ip_hpm_mif2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DDRPHY2, NULL, cmucal_vclk_ip_apbbr_ddrphy2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DMC2, NULL, cmucal_vclk_ip_apbbr_dmc2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DMCTZ2, NULL, cmucal_vclk_ip_apbbr_dmctz2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_MIF2, NULL, cmucal_vclk_ip_axi2apb_mif2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSIF_HPMMIF2, NULL, cmucal_vclk_ip_busif_hpmmif2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DDRPHY2, NULL, cmucal_vclk_ip_ddrphy2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DMC2, NULL, cmucal_vclk_ip_dmc2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_MIF2, NULL, cmucal_vclk_ip_lhm_axi_p_mif2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMUPPC_DEBUG2, NULL, cmucal_vclk_ip_ppmuppc_debug2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMUPPC_DVFS2, NULL, cmucal_vclk_ip_ppmuppc_dvfs2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_MIF2, NULL, cmucal_vclk_ip_sysreg_mif2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QCH_ADAPTER_PPMUPPC_DEBUG2, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_debug2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QCH_ADAPTER_PPMUPPC_DVFS2, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_dvfs2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MIF2_CMU_MIF2, NULL, cmucal_vclk_ip_mif2_cmu_mif2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HPM_MIF3, NULL, cmucal_vclk_ip_hpm_mif3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DDRPHY3, NULL, cmucal_vclk_ip_apbbr_ddrphy3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DMC3, NULL, cmucal_vclk_ip_apbbr_dmc3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APBBR_DMCTZ3, NULL, cmucal_vclk_ip_apbbr_dmctz3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_MIF3, NULL, cmucal_vclk_ip_axi2apb_mif3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BUSIF_HPMMIF3, NULL, cmucal_vclk_ip_busif_hpmmif3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DDRPHY3, NULL, cmucal_vclk_ip_ddrphy3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DMC3, NULL, cmucal_vclk_ip_dmc3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_MIF3, NULL, cmucal_vclk_ip_lhm_axi_p_mif3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMUPPC_DEBUG3, NULL, cmucal_vclk_ip_ppmuppc_debug3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMUPPC_DVFS3, NULL, cmucal_vclk_ip_ppmuppc_dvfs3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_MIF3, NULL, cmucal_vclk_ip_sysreg_mif3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MIF3_CMU_MIF3, NULL, cmucal_vclk_ip_mif3_cmu_mif3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QCH_ADAPTER_PPMUPPC_DEBUG3, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_debug3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QCH_ADAPTER_PPMUPPC_DVFS3, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_dvfs3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_ACEL_D_NPU, NULL, cmucal_vclk_ip_lhs_acel_d_npu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_P_NPU1, NULL, cmucal_vclk_ip_lhs_axi_p_npu1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_NPU0_CMU_NPU0, NULL, cmucal_vclk_ip_npu0_cmu_npu0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APB_ASYNC_SI0, NULL, cmucal_vclk_ip_apb_async_si0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APB_ASYNC_SMMU_NS, NULL, cmucal_vclk_ip_apb_async_smmu_ns, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_NPU0, NULL, cmucal_vclk_ip_axi2apb_npu0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_NPU0, NULL, cmucal_vclk_ip_btm_npu0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_NPU0, NULL, cmucal_vclk_ip_d_tzpc_npu0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD1_D1_0, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD1_D1_1, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD1_D1_2, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD1_D1_3, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD1_D1_4, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_4, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD1_D1_5, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_5, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD1_D1_6, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_6, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD1_D1_7, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_7, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_P_NPU1_DONE, NULL, cmucal_vclk_ip_lhm_ast_p_npu1_done, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_DSPMNPU0, NULL, cmucal_vclk_ip_lhm_axi_d_dspmnpu0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_NPU, NULL, cmucal_vclk_ip_lhm_axi_p_npu, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD0_D1_0, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD0_D1_1, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD0_D1_2, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD0_D1_3, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD0_D1_4, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_4, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD0_D1_5, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_5, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD0_D1_6, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_6, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD0_D1_7, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_7, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_P_NPUD1_SETREG, NULL, cmucal_vclk_ip_lhs_ast_p_npud1_setreg, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_IDPSRAM1, NULL, cmucal_vclk_ip_lhs_axi_d_idpsram1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_IDPSRAM3, NULL, cmucal_vclk_ip_lhs_axi_d_idpsram3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_NPUC, NULL, cmucal_vclk_ip_npuc, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_NPUD_UNIT0, NULL, cmucal_vclk_ip_npud_unit0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_CPUDMA, NULL, cmucal_vclk_ip_ppmu_cpudma, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_RFM, NULL, cmucal_vclk_ip_ppmu_rfm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_CPUDMA, NULL, cmucal_vclk_ip_qe_cpudma, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_RFM, NULL, cmucal_vclk_ip_qe_rfm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SMMU_NPU0, NULL, cmucal_vclk_ip_smmu_npu0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_NPU0, NULL, cmucal_vclk_ip_sysreg_npu0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_D_NPU0, NULL, cmucal_vclk_ip_xiu_d_npu0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APB_ASYNC_SMMU_S, NULL, cmucal_vclk_ip_apb_async_smmu_s, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_NPU0, NULL, cmucal_vclk_ip_vgen_lite_npu0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_NPU0, NULL, cmucal_vclk_ip_ppmu_npu0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_NPU0_PPC_WRAPPER, NULL, cmucal_vclk_ip_npu0_ppc_wrapper, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_NPU1_CMU_NPU1, NULL, cmucal_vclk_ip_npu1_cmu_npu1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD0_D1_0, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_NPU1, NULL, cmucal_vclk_ip_lhm_axi_p_npu1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_APB_ASYNC_SI1, NULL, cmucal_vclk_ip_apb_async_si1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_NPU1, NULL, cmucal_vclk_ip_axi2apb_npu1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_NPU1, NULL, cmucal_vclk_ip_d_tzpc_npu1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD0_D1_1, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD0_D1_2, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD0_D1_3, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD0_D1_4, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_4, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD0_D1_5, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_5, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD0_D1_6, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_6, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_D_NPUD0_D1_7, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_7, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AST_P_NPUD1_SETREG, NULL, cmucal_vclk_ip_lhm_ast_p_npud1_setreg, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_IDPSRAM1, NULL, cmucal_vclk_ip_lhm_axi_d_idpsram1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_D_IDPSRAM3, NULL, cmucal_vclk_ip_lhm_axi_d_idpsram3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD1_D1_0, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD1_D1_1, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD1_D1_2, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD1_D1_3, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD1_D1_4, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_4, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD1_D1_5, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_5, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD1_D1_6, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_6, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_D_NPUD1_D1_7, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_7, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_NPU1, NULL, cmucal_vclk_ip_sysreg_npu1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AST_P_NPU1_DONE, NULL, cmucal_vclk_ip_lhs_ast_p_npu1_done, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_NPUD_UNIT1, NULL, cmucal_vclk_ip_npud_unit1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_NPU1, NULL, cmucal_vclk_ip_ppmu_npu1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_NPU1_PPC_WRAPPER, NULL, cmucal_vclk_ip_npu1_ppc_wrapper, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GPIO_PERIC0, NULL, cmucal_vclk_ip_gpio_peric0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PWM, NULL, cmucal_vclk_ip_pwm, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_PERIC0, NULL, cmucal_vclk_ip_sysreg_peric0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI00_USI, NULL, cmucal_vclk_ip_usi00_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI01_USI, NULL, cmucal_vclk_ip_usi01_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI02_USI, NULL, cmucal_vclk_ip_usi02_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI03_USI, NULL, cmucal_vclk_ip_usi03_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_PERIC0P0, NULL, cmucal_vclk_ip_axi2apb_peric0p0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PERIC0_CMU_PERIC0, NULL, cmucal_vclk_ip_peric0_cmu_peric0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI04_USI, NULL, cmucal_vclk_ip_usi04_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_PERIC0P1, NULL, cmucal_vclk_ip_axi2apb_peric0p1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI05_USI, NULL, cmucal_vclk_ip_usi05_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI00_I2C, NULL, cmucal_vclk_ip_usi00_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI01_I2C, NULL, cmucal_vclk_ip_usi01_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI02_I2C, NULL, cmucal_vclk_ip_usi02_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI03_I2C, NULL, cmucal_vclk_ip_usi03_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI04_I2C, NULL, cmucal_vclk_ip_usi04_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI05_I2C, NULL, cmucal_vclk_ip_usi05_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_UART_DBG, NULL, cmucal_vclk_ip_uart_dbg, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_PERIC0, NULL, cmucal_vclk_ip_xiu_p_peric0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_PERIC0, NULL, cmucal_vclk_ip_lhm_axi_p_peric0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI12_USI, NULL, cmucal_vclk_ip_usi12_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI12_I2C, NULL, cmucal_vclk_ip_usi12_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI13_I2C, NULL, cmucal_vclk_ip_usi13_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI13_USI, NULL, cmucal_vclk_ip_usi13_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI14_USI, NULL, cmucal_vclk_ip_usi14_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI14_I2C, NULL, cmucal_vclk_ip_usi14_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_PERIC0, NULL, cmucal_vclk_ip_d_tzpc_peric0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI15_I2C, NULL, cmucal_vclk_ip_usi15_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI15_USI, NULL, cmucal_vclk_ip_usi15_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_PERIC1P1, NULL, cmucal_vclk_ip_axi2apb_peric1p1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GPIO_PERIC1, NULL, cmucal_vclk_ip_gpio_peric1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_PERIC1, NULL, cmucal_vclk_ip_sysreg_peric1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_UART_BT, NULL, cmucal_vclk_ip_uart_bt, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_I2C_CAM1, NULL, cmucal_vclk_ip_i2c_cam1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_I2C_CAM2, NULL, cmucal_vclk_ip_i2c_cam2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_I2C_CAM3, NULL, cmucal_vclk_ip_i2c_cam3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI06_USI, NULL, cmucal_vclk_ip_usi06_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI07_USI, NULL, cmucal_vclk_ip_usi07_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI08_USI, NULL, cmucal_vclk_ip_usi08_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_I2C_CAM0, NULL, cmucal_vclk_ip_i2c_cam0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_PERIC1, NULL, cmucal_vclk_ip_xiu_p_peric1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_PERIC1P0, NULL, cmucal_vclk_ip_axi2apb_peric1p0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PERIC1_CMU_PERIC1, NULL, cmucal_vclk_ip_peric1_cmu_peric1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SPI_CAM0, NULL, cmucal_vclk_ip_spi_cam0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI09_USI, NULL, cmucal_vclk_ip_usi09_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI06_I2C, NULL, cmucal_vclk_ip_usi06_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI10_USI, NULL, cmucal_vclk_ip_usi10_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI07_I2C, NULL, cmucal_vclk_ip_usi07_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI08_I2C, NULL, cmucal_vclk_ip_usi08_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI09_I2C, NULL, cmucal_vclk_ip_usi09_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI10_I2C, NULL, cmucal_vclk_ip_usi10_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_PERIC1, NULL, cmucal_vclk_ip_lhm_axi_p_peric1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI11_USI, NULL, cmucal_vclk_ip_usi11_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI11_I2C, NULL, cmucal_vclk_ip_usi11_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_PERIC1, NULL, cmucal_vclk_ip_d_tzpc_peric1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_I3C, NULL, cmucal_vclk_ip_i3c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI16_USI, NULL, cmucal_vclk_ip_usi16_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI17_USI, NULL, cmucal_vclk_ip_usi17_usi, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI16_I3C, NULL, cmucal_vclk_ip_usi16_i3c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_USI17_I2C, NULL, cmucal_vclk_ip_usi17_i2c, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_PERISP, NULL, cmucal_vclk_ip_axi2apb_perisp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XIU_P_PERIS, NULL, cmucal_vclk_ip_xiu_p_peris, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_PERIS, NULL, cmucal_vclk_ip_sysreg_peris, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_WDT_CLUSTER2, NULL, cmucal_vclk_ip_wdt_cluster2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_WDT_CLUSTER0, NULL, cmucal_vclk_ip_wdt_cluster0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PERIS_CMU_PERIS, NULL, cmucal_vclk_ip_peris_cmu_peris, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AD_AXI_P_PERIS, NULL, cmucal_vclk_ip_ad_axi_p_peris, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_OTP_CON_BIRA, NULL, cmucal_vclk_ip_otp_con_bira, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GIC, NULL, cmucal_vclk_ip_gic, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_PERIS, NULL, cmucal_vclk_ip_lhm_axi_p_peris, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MCT, NULL, cmucal_vclk_ip_mct, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_OTP_CON_TOP, NULL, cmucal_vclk_ip_otp_con_top, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_PERIS, NULL, cmucal_vclk_ip_d_tzpc_peris, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TMU_SUB, NULL, cmucal_vclk_ip_tmu_sub, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TMU_TOP, NULL, cmucal_vclk_ip_tmu_top, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_OTP_CON_BISR, NULL, cmucal_vclk_ip_otp_con_bisr, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_S2D_CMU_S2D, NULL, cmucal_vclk_ip_s2d_cmu_s2d, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VRA2_CMU_VRA2, NULL, cmucal_vclk_ip_vra2_cmu_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_APB_VRA2, NULL, cmucal_vclk_ip_as_apb_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AXI2APB_VRA2, NULL, cmucal_vclk_ip_axi2apb_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_VRA2, NULL, cmucal_vclk_ip_d_tzpc_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_ISPLPVRA2, NULL, cmucal_vclk_ip_lhm_axi_p_isplpvra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_VRA2ISPLP, NULL, cmucal_vclk_ip_lhs_axi_d_vra2isplp, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_QE_VRA2, NULL, cmucal_vclk_ip_qe_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_VRA2, NULL, cmucal_vclk_ip_sysreg_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE_VRA2, NULL, cmucal_vclk_ip_vgen_lite_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VRA2, NULL, cmucal_vclk_ip_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AS_APB_STR, NULL, cmucal_vclk_ip_as_apb_str, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BTM_VRA2, NULL, cmucal_vclk_ip_btm_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_PPMU_VRA2, NULL, cmucal_vclk_ip_ppmu_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSMMU_VRA2, NULL, cmucal_vclk_ip_sysmmu_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_STR, NULL, cmucal_vclk_ip_str, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_VRA2, NULL, cmucal_vclk_ip_lhs_axi_d_vra2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DMIC_IF, NULL, cmucal_vclk_ip_dmic_if, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SYSREG_VTS, NULL, cmucal_vclk_ip_sysreg_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VTS_CMU_VTS, NULL, cmucal_vclk_ip_vts_cmu_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_AHB_BUSMATRIX, NULL, cmucal_vclk_ip_ahb_busmatrix, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_P_VTS, NULL, cmucal_vclk_ip_lhm_axi_p_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_GPIO_VTS, NULL, cmucal_vclk_ip_gpio_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_WDT_VTS, NULL, cmucal_vclk_ip_wdt_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DMIC_AHB0, NULL, cmucal_vclk_ip_dmic_ahb0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DMIC_AHB1, NULL, cmucal_vclk_ip_dmic_ahb1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_C_VTS, NULL, cmucal_vclk_ip_lhs_axi_c_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_ASYNCINTERRUPT, NULL, cmucal_vclk_ip_asyncinterrupt, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HWACG_SYS_DMIC0, NULL, cmucal_vclk_ip_hwacg_sys_dmic0, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HWACG_SYS_DMIC1, NULL, cmucal_vclk_ip_hwacg_sys_dmic1, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SS_VTS_GLUE, NULL, cmucal_vclk_ip_ss_vts_glue, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_CORTEXM4INTEGRATION, NULL, cmucal_vclk_ip_cortexm4integration, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_U_DMIC_CLK_MUX, NULL, cmucal_vclk_ip_u_dmic_clk_mux, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHM_AXI_LP_VTS, NULL, cmucal_vclk_ip_lhm_axi_lp_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_LHS_AXI_D_VTS, NULL, cmucal_vclk_ip_lhs_axi_d_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BAAW_C_VTS, NULL, cmucal_vclk_ip_baaw_c_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_D_TZPC_VTS, NULL, cmucal_vclk_ip_d_tzpc_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_VGEN_LITE, NULL, cmucal_vclk_ip_vgen_lite, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BPS_LP_VTS, NULL, cmucal_vclk_ip_bps_lp_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BPS_P_VTS, NULL, cmucal_vclk_ip_bps_p_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XHB_LP_VTS, NULL, cmucal_vclk_ip_xhb_lp_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_XHB_P_VTS, NULL, cmucal_vclk_ip_xhb_p_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SWEEPER_C_VTS, NULL, cmucal_vclk_ip_sweeper_c_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_SWEEPER_D_VTS, NULL, cmucal_vclk_ip_sweeper_d_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_BAAW_D_VTS, NULL, cmucal_vclk_ip_baaw_d_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MAILBOX_ABOX_VTS, NULL, cmucal_vclk_ip_mailbox_abox_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DMIC_AHB2, NULL, cmucal_vclk_ip_dmic_ahb2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DMIC_AHB3, NULL, cmucal_vclk_ip_dmic_ahb3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HWACG_SYS_DMIC2, NULL, cmucal_vclk_ip_hwacg_sys_dmic2, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_HWACG_SYS_DMIC3, NULL, cmucal_vclk_ip_hwacg_sys_dmic3, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_DMIC_IF_3RD, NULL, cmucal_vclk_ip_dmic_if_3rd, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_MAILBOX_AP_VTS, NULL, cmucal_vclk_ip_mailbox_ap_vts, NULL, NULL), - CMUCAL_VCLK(VCLK_IP_TIMER, NULL, cmucal_vclk_ip_timer, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_APM, NULL, cmucal_vclk_ip_lhs_axi_d_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_APM, NULL, cmucal_vclk_ip_lhm_axi_p_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_WDT_APM, NULL, cmucal_vclk_ip_wdt_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_APM, NULL, cmucal_vclk_ip_sysreg_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MAILBOX_APM_AP, NULL, cmucal_vclk_ip_mailbox_apm_ap, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBIF_PMU_ALIVE, NULL, cmucal_vclk_ip_apbif_pmu_alive, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_INTMEM, NULL, cmucal_vclk_ip_intmem, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_C_MODEM, NULL, cmucal_vclk_ip_lhm_axi_c_modem, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_G_SCAN2DRAM, NULL, cmucal_vclk_ip_lhs_axi_g_scan2dram, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PMU_INTR_GEN, NULL, cmucal_vclk_ip_pmu_intr_gen, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PEM, NULL, cmucal_vclk_ip_pem, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SPEEDY_APM, NULL, cmucal_vclk_ip_speedy_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_DP_APM, NULL, cmucal_vclk_ip_xiu_dp_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APM_CMU_APM, NULL, cmucal_vclk_ip_apm_cmu_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_APM, NULL, cmucal_vclk_ip_vgen_lite_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GREBEINTEGRATION, NULL, cmucal_vclk_ip_grebeintegration, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBIF_GPIO_ALIVE, NULL, cmucal_vclk_ip_apbif_gpio_alive, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBIF_TOP_RTC, NULL, cmucal_vclk_ip_apbif_top_rtc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MAILBOX_AP_CP, NULL, cmucal_vclk_ip_mailbox_ap_cp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MAILBOX_AP_CP_S, NULL, cmucal_vclk_ip_mailbox_ap_cp_s, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GREBEINTEGRATION_DBGCORE, NULL, cmucal_vclk_ip_grebeintegration_dbgcore, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DTZPC_APM, NULL, cmucal_vclk_ip_dtzpc_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_C_VTS, NULL, cmucal_vclk_ip_lhm_axi_c_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MAILBOX_APM_VTS, NULL, cmucal_vclk_ip_mailbox_apm_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MAILBOX_AP_DBGCORE, NULL, cmucal_vclk_ip_mailbox_ap_dbgcore, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_LP_VTS, NULL, cmucal_vclk_ip_lhs_axi_lp_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MAILBOX_APM_CP, NULL, cmucal_vclk_ip_mailbox_apm_cp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_G_DBGCORE, NULL, cmucal_vclk_ip_lhs_axi_g_dbgcore, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBIF_RTC, NULL, cmucal_vclk_ip_apbif_rtc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_C_CMGP, NULL, cmucal_vclk_ip_lhs_axi_c_cmgp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SPEEDY_SUB_APM, NULL, cmucal_vclk_ip_speedy_sub_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AUD_CMU_AUD, NULL, cmucal_vclk_ip_aud_cmu_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_AUD, NULL, cmucal_vclk_ip_lhs_axi_d_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_AUD, NULL, cmucal_vclk_ip_ppmu_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_AUD, NULL, cmucal_vclk_ip_sysreg_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ABOX, NULL, cmucal_vclk_ip_abox, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_T0_AUD, NULL, cmucal_vclk_ip_lhs_atb_t0_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GPIO_AUD, NULL, cmucal_vclk_ip_gpio_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI_US_32TO128, NULL, cmucal_vclk_ip_axi_us_32to128, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_AUD, NULL, cmucal_vclk_ip_btm_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PERI_AXI_ASB, NULL, cmucal_vclk_ip_peri_axi_asb, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_AUD, NULL, cmucal_vclk_ip_lhm_axi_p_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_WDT_AUD, NULL, cmucal_vclk_ip_wdt_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DMIC, NULL, cmucal_vclk_ip_dmic, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TREX_AUD, NULL, cmucal_vclk_ip_trex_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DFTMUX_AUD, NULL, cmucal_vclk_ip_dftmux_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SMMU_AUD, NULL, cmucal_vclk_ip_smmu_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_WRAP2_CONV_AUD, NULL, cmucal_vclk_ip_wrap2_conv_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_AUD, NULL, cmucal_vclk_ip_xiu_p_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_SMMU_AUD, NULL, cmucal_vclk_ip_ad_apb_smmu_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_AUD, NULL, cmucal_vclk_ip_axi2apb_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_SMMU_AUD_S, NULL, cmucal_vclk_ip_ad_apb_smmu_aud_s, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_T1_AUD, NULL, cmucal_vclk_ip_lhs_atb_t1_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_AUD, NULL, cmucal_vclk_ip_vgen_lite_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSC_CMU_BUSC, NULL, cmucal_vclk_ip_busc_cmu_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_BUSCP0, NULL, cmucal_vclk_ip_axi2apb_buscp0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_BUSC_TDP, NULL, cmucal_vclk_ip_axi2apb_busc_tdp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_BUSC, NULL, cmucal_vclk_ip_sysreg_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSIF_CMUTOPC, NULL, cmucal_vclk_ip_busif_cmutopc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TREX_D0_BUSC, NULL, cmucal_vclk_ip_trex_d0_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TREX_P_BUSC, NULL, cmucal_vclk_ip_trex_p_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_MIF0, NULL, cmucal_vclk_ip_lhs_axi_p_mif0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_MIF1, NULL, cmucal_vclk_ip_lhs_axi_p_mif1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_MIF2, NULL, cmucal_vclk_ip_lhs_axi_p_mif2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_MIF3, NULL, cmucal_vclk_ip_lhs_axi_p_mif3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_PERIS, NULL, cmucal_vclk_ip_lhs_axi_p_peris, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_PERIC0, NULL, cmucal_vclk_ip_lhs_axi_p_peric0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_PERIC1, NULL, cmucal_vclk_ip_lhs_axi_p_peric1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ASYNCSFR_WR_SMC, NULL, cmucal_vclk_ip_asyncsfr_wr_smc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_IVASC, NULL, cmucal_vclk_ip_lhs_axi_d_ivasc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACEL_D0_G2D, NULL, cmucal_vclk_ip_lhm_acel_d0_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACEL_D1_G2D, NULL, cmucal_vclk_ip_lhm_acel_d1_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACEL_D2_G2D, NULL, cmucal_vclk_ip_lhm_acel_d2_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACEL_D_FSYS0, NULL, cmucal_vclk_ip_lhm_acel_d_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACEL_D_IVA, NULL, cmucal_vclk_ip_lhm_acel_d_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACEL_D_NPU, NULL, cmucal_vclk_ip_lhm_acel_d_npu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D0_DPU, NULL, cmucal_vclk_ip_lhm_axi_d0_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D0_MFC, NULL, cmucal_vclk_ip_lhm_axi_d0_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_ISPPRE, NULL, cmucal_vclk_ip_lhm_axi_d_isppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D1_DPU, NULL, cmucal_vclk_ip_lhm_axi_d1_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D1_MFC, NULL, cmucal_vclk_ip_lhm_axi_d1_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D2_DPU, NULL, cmucal_vclk_ip_lhm_axi_d2_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D0_ISPLP, NULL, cmucal_vclk_ip_lhm_axi_d0_isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_DPU, NULL, cmucal_vclk_ip_lhs_axi_p_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_ISPPRE, NULL, cmucal_vclk_ip_lhs_axi_p_isppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_DSPM, NULL, cmucal_vclk_ip_lhs_axi_p_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_FSYS0, NULL, cmucal_vclk_ip_lhs_axi_p_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_G2D, NULL, cmucal_vclk_ip_lhs_axi_p_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_ISPHQ, NULL, cmucal_vclk_ip_lhs_axi_p_isphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_ISPLP, NULL, cmucal_vclk_ip_lhs_axi_p_isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_IVA, NULL, cmucal_vclk_ip_lhs_axi_p_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_MFC, NULL, cmucal_vclk_ip_lhs_axi_p_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACEL_D_FSYS1, NULL, cmucal_vclk_ip_lhm_acel_d_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_APM, NULL, cmucal_vclk_ip_lhm_axi_d_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D1_ISPLP, NULL, cmucal_vclk_ip_lhm_axi_d1_isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_FSYS1, NULL, cmucal_vclk_ip_lhs_axi_p_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SIREX, NULL, cmucal_vclk_ip_sirex, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACEL_D0_DSPM, NULL, cmucal_vclk_ip_lhm_acel_d0_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACEL_D1_DSPM, NULL, cmucal_vclk_ip_lhm_acel_d1_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_ISPHQ, NULL, cmucal_vclk_ip_lhm_axi_d_isphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TREX_RB_BUSC, NULL, cmucal_vclk_ip_trex_rb_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPFW, NULL, cmucal_vclk_ip_ppfw, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_WRAP2_CONV_BUSC, NULL, cmucal_vclk_ip_wrap2_conv_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_PDMA0, NULL, cmucal_vclk_ip_vgen_pdma0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_BUSC, NULL, cmucal_vclk_ip_vgen_lite_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_BUSC, NULL, cmucal_vclk_ip_hpm_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSIF_HPMBUSC, NULL, cmucal_vclk_ip_busif_hpmbusc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PDMA0, NULL, cmucal_vclk_ip_pdma0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SBIC, NULL, cmucal_vclk_ip_sbic, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SPDMA, NULL, cmucal_vclk_ip_spdma, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DIT, NULL, cmucal_vclk_ip_ad_apb_dit, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DIT, NULL, cmucal_vclk_ip_dit, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_BUSC, NULL, cmucal_vclk_ip_d_tzpc_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_NPU, NULL, cmucal_vclk_ip_lhs_axi_p_npu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MMCACHE, NULL, cmucal_vclk_ip_mmcache, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TREX_D1_BUSC, NULL, cmucal_vclk_ip_trex_d1_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_BUSCP1, NULL, cmucal_vclk_ip_axi2apb_buscp1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_AUD, NULL, cmucal_vclk_ip_lhm_axi_d_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_AUD, NULL, cmucal_vclk_ip_lhs_axi_p_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_DBG_G_BUSC, NULL, cmucal_vclk_ip_lhs_dbg_g_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_VTS, NULL, cmucal_vclk_ip_lhm_axi_d_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_VTS, NULL, cmucal_vclk_ip_lhs_axi_p_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_SPDMA, NULL, cmucal_vclk_ip_qe_spdma, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_PDMA0, NULL, cmucal_vclk_ip_qe_pdma0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_D_BUSC, NULL, cmucal_vclk_ip_xiu_d_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BAAW_P_VTS, NULL, cmucal_vclk_ip_baaw_p_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI_US_64TO128, NULL, cmucal_vclk_ip_axi_us_64to128, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BAAW_P_NPU, NULL, cmucal_vclk_ip_baaw_p_npu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_VRA2, NULL, cmucal_vclk_ip_lhm_axi_d_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_CMGP_CMU_CMGP, NULL, cmucal_vclk_ip_cmgp_cmu_cmgp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ADC_CMGP, NULL, cmucal_vclk_ip_adc_cmgp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GPIO_CMGP, NULL, cmucal_vclk_ip_gpio_cmgp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_I2C_CMGP0, NULL, cmucal_vclk_ip_i2c_cmgp0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_I2C_CMGP1, NULL, cmucal_vclk_ip_i2c_cmgp1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_I2C_CMGP2, NULL, cmucal_vclk_ip_i2c_cmgp2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_I2C_CMGP3, NULL, cmucal_vclk_ip_i2c_cmgp3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_CMGP, NULL, cmucal_vclk_ip_sysreg_cmgp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI_CMGP0, NULL, cmucal_vclk_ip_usi_cmgp0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI_CMGP1, NULL, cmucal_vclk_ip_usi_cmgp1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI_CMGP2, NULL, cmucal_vclk_ip_usi_cmgp2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI_CMGP3, NULL, cmucal_vclk_ip_usi_cmgp3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_CMGP2CP, NULL, cmucal_vclk_ip_sysreg_cmgp2cp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_CMGP2PMU_AP, NULL, cmucal_vclk_ip_sysreg_cmgp2pmu_ap, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DTZPC_CMGP, NULL, cmucal_vclk_ip_dtzpc_cmgp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_C_CMGP, NULL, cmucal_vclk_ip_lhm_axi_c_cmgp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_CMGP2APM, NULL, cmucal_vclk_ip_sysreg_cmgp2apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_CORE_CMU_CORE, NULL, cmucal_vclk_ip_core_cmu_core, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_CORE, NULL, cmucal_vclk_ip_sysreg_core, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_CORE_0, NULL, cmucal_vclk_ip_axi2apb_core_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MPACE2AXI_0, NULL, cmucal_vclk_ip_mpace2axi_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MPACE2AXI_1, NULL, cmucal_vclk_ip_mpace2axi_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_DEBUG_CCI, NULL, cmucal_vclk_ip_ppc_debug_cci, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TREX_P0_CORE, NULL, cmucal_vclk_ip_trex_p0_core, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_CPUCL2_0, NULL, cmucal_vclk_ip_ppmu_cpucl2_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_DBG_G0_DMC, NULL, cmucal_vclk_ip_lhm_dbg_g0_dmc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_DBG_G1_DMC, NULL, cmucal_vclk_ip_lhm_dbg_g1_dmc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_DBG_G2_DMC, NULL, cmucal_vclk_ip_lhm_dbg_g2_dmc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_DBG_G3_DMC, NULL, cmucal_vclk_ip_lhm_dbg_g3_dmc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_T_BDU, NULL, cmucal_vclk_ip_lhs_atb_t_bdu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ADM_APB_G_BDU, NULL, cmucal_vclk_ip_adm_apb_g_bdu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BDU, NULL, cmucal_vclk_ip_bdu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TREX_P1_CORE, NULL, cmucal_vclk_ip_trex_p1_core, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_CORE_TP, NULL, cmucal_vclk_ip_axi2apb_core_tp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPFW_G3D, NULL, cmucal_vclk_ip_ppfw_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_G3D, NULL, cmucal_vclk_ip_lhs_axi_p_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_CPUCL0, NULL, cmucal_vclk_ip_lhs_axi_p_cpucl0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_CPUCL2, NULL, cmucal_vclk_ip_lhs_axi_p_cpucl2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D0_CP, NULL, cmucal_vclk_ip_lhm_axi_d0_cp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACE_D0_G3D, NULL, cmucal_vclk_ip_lhm_ace_d0_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACE_D1_G3D, NULL, cmucal_vclk_ip_lhm_ace_d1_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACE_D2_G3D, NULL, cmucal_vclk_ip_lhm_ace_d2_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACE_D3_G3D, NULL, cmucal_vclk_ip_lhm_ace_d3_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TREX_D_CORE, NULL, cmucal_vclk_ip_trex_d_core, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_CORE, NULL, cmucal_vclk_ip_hpm_core, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSIF_HPMCORE, NULL, cmucal_vclk_ip_busif_hpmcore, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BPS_D0_G3D, NULL, cmucal_vclk_ip_bps_d0_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BPS_D1_G3D, NULL, cmucal_vclk_ip_bps_d1_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BPS_D2_G3D, NULL, cmucal_vclk_ip_bps_d2_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BPS_D3_G3D, NULL, cmucal_vclk_ip_bps_d3_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPCFW_G3D, NULL, cmucal_vclk_ip_ppcfw_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_CP, NULL, cmucal_vclk_ip_lhs_axi_p_cp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APB_ASYNC_PPFW_G3D, NULL, cmucal_vclk_ip_apb_async_ppfw_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BAAW_CP, NULL, cmucal_vclk_ip_baaw_cp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BPS_P_G3D, NULL, cmucal_vclk_ip_bps_p_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_APM, NULL, cmucal_vclk_ip_lhs_axi_p_apm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_CPUCL2_1, NULL, cmucal_vclk_ip_ppmu_cpucl2_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_CORE, NULL, cmucal_vclk_ip_d_tzpc_core, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_CORE_1, NULL, cmucal_vclk_ip_axi2apb_core_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_CORE, NULL, cmucal_vclk_ip_xiu_p_core, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_CPUCL2_0, NULL, cmucal_vclk_ip_ppc_cpucl2_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_CPUCL2_1, NULL, cmucal_vclk_ip_ppc_cpucl2_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_G3D0, NULL, cmucal_vclk_ip_ppc_g3d0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_G3D1, NULL, cmucal_vclk_ip_ppc_g3d1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_G3D2, NULL, cmucal_vclk_ip_ppc_g3d2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_G3D3, NULL, cmucal_vclk_ip_ppc_g3d3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_IRPS0, NULL, cmucal_vclk_ip_ppc_irps0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_IRPS1, NULL, cmucal_vclk_ip_ppc_irps1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D1_CP, NULL, cmucal_vclk_ip_lhm_axi_d1_cp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_L_CORE, NULL, cmucal_vclk_ip_lhs_axi_l_core, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_CORE_2, NULL, cmucal_vclk_ip_axi2apb_core_2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_L_CORE, NULL, cmucal_vclk_ip_lhm_axi_l_core, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACE_D0_CLUSTER0, NULL, cmucal_vclk_ip_lhm_ace_d0_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ACE_D1_CLUSTER0, NULL, cmucal_vclk_ip_lhm_ace_d1_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_CPUCL0_0, NULL, cmucal_vclk_ip_ppc_cpucl0_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_CPUCL0_1, NULL, cmucal_vclk_ip_ppc_cpucl0_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_CPUCL0_0, NULL, cmucal_vclk_ip_ppmu_cpucl0_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_CPUCL0_1, NULL, cmucal_vclk_ip_ppmu_cpucl0_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_DBG_G_BUSC, NULL, cmucal_vclk_ip_lhm_dbg_g_busc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MPACE_ASB_D0_MIF, NULL, cmucal_vclk_ip_mpace_asb_d0_mif, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MPACE_ASB_D1_MIF, NULL, cmucal_vclk_ip_mpace_asb_d1_mif, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MPACE_ASB_D2_MIF, NULL, cmucal_vclk_ip_mpace_asb_d2_mif, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MPACE_ASB_D3_MIF, NULL, cmucal_vclk_ip_mpace_asb_d3_mif, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI_ASB_CSSYS, NULL, cmucal_vclk_ip_axi_asb_cssys, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_G_CSSYS, NULL, cmucal_vclk_ip_lhm_axi_g_cssys, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_CCI, NULL, cmucal_vclk_ip_cci, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_CPUCL0, NULL, cmucal_vclk_ip_axi2apb_cpucl0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_CPUCL0, NULL, cmucal_vclk_ip_sysreg_cpucl0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSIF_HPMCPUCL0, NULL, cmucal_vclk_ip_busif_hpmcpucl0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_CSSYS, NULL, cmucal_vclk_ip_cssys, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_T0_AUD, NULL, cmucal_vclk_ip_lhm_atb_t0_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_T_BDU, NULL, cmucal_vclk_ip_lhm_atb_t_bdu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_T0_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t0_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_T0_CLUSTER2, NULL, cmucal_vclk_ip_lhm_atb_t0_cluster2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_T1_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t1_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_T1_CLUSTER2, NULL, cmucal_vclk_ip_lhm_atb_t1_cluster2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_T2_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t2_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_T3_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t3_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SECJTAG, NULL, cmucal_vclk_ip_secjtag, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_CPUCL0, NULL, cmucal_vclk_ip_lhm_axi_p_cpucl0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ACE_D0_CLUSTER0, NULL, cmucal_vclk_ip_lhs_ace_d0_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_T0_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t0_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_T1_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t1_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_T2_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t2_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_T3_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t3_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ADM_APB_G_CLUSTER0, NULL, cmucal_vclk_ip_adm_apb_g_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_CPUCL0_CMU_CPUCL0, NULL, cmucal_vclk_ip_cpucl0_cmu_cpucl0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_CPUCL0, NULL, cmucal_vclk_ip_cpucl0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_T4_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t4_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_T5_CLUSTER0, NULL, cmucal_vclk_ip_lhm_atb_t5_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ACE_D1_CLUSTER0, NULL, cmucal_vclk_ip_lhs_ace_d1_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_T4_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t4_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_T5_CLUSTER0, NULL, cmucal_vclk_ip_lhs_atb_t5_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_CPUCL0, NULL, cmucal_vclk_ip_d_tzpc_cpucl0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_T1_AUD, NULL, cmucal_vclk_ip_lhm_atb_t1_aud, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_G_INT_CSSYS, NULL, cmucal_vclk_ip_lhs_axi_g_int_cssys, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_G_INT_CSSYS, NULL, cmucal_vclk_ip_lhm_axi_g_int_cssys, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_G_INT_DBGCORE, NULL, cmucal_vclk_ip_lhs_axi_g_int_dbgcore, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_G_INT_DBGCORE, NULL, cmucal_vclk_ip_lhm_axi_g_int_dbgcore, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_CPUCL0, NULL, cmucal_vclk_ip_xiu_p_cpucl0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_DP_CSSYS, NULL, cmucal_vclk_ip_xiu_dp_cssys, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TREX_CPUCL0, NULL, cmucal_vclk_ip_trex_cpucl0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI_US_32TO64_G_DBGCORE, NULL, cmucal_vclk_ip_axi_us_32to64_g_dbgcore, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_G_CSSYS, NULL, cmucal_vclk_ip_lhs_axi_g_cssys, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_CPUCL0_1, NULL, cmucal_vclk_ip_hpm_cpucl0_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_CPUCL0_0, NULL, cmucal_vclk_ip_hpm_cpucl0_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APB_ASYNC_P_CSSYS_0, NULL, cmucal_vclk_ip_apb_async_p_cssys_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_G_INT_ETR, NULL, cmucal_vclk_ip_lhs_axi_g_int_etr, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_G_DBGCORE, NULL, cmucal_vclk_ip_lhm_axi_g_dbgcore, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_G_INT_ETR, NULL, cmucal_vclk_ip_lhm_axi_g_int_etr, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_P_CSSYS, NULL, cmucal_vclk_ip_axi2apb_p_cssys, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BPS_CPUCL0, NULL, cmucal_vclk_ip_bps_cpucl0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_CPUCL1_CMU_CPUCL1, NULL, cmucal_vclk_ip_cpucl1_cmu_cpucl1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_CPUCL1, NULL, cmucal_vclk_ip_cpucl1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_CPUCL2_CMU_CPUCL2, NULL, cmucal_vclk_ip_cpucl2_cmu_cpucl2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_CPUCL2, NULL, cmucal_vclk_ip_sysreg_cpucl2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSIF_HPMCPUCL2, NULL, cmucal_vclk_ip_busif_hpmcpucl2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_CPUCL2_0, NULL, cmucal_vclk_ip_hpm_cpucl2_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_CLUSTER2, NULL, cmucal_vclk_ip_cluster2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_CPUCL2, NULL, cmucal_vclk_ip_axi2apb_cpucl2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_CPUCL2, NULL, cmucal_vclk_ip_lhm_axi_p_cpucl2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_CPUCL2_1, NULL, cmucal_vclk_ip_hpm_cpucl2_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_CPUCL2_2, NULL, cmucal_vclk_ip_hpm_cpucl2_2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_CPUCL2, NULL, cmucal_vclk_ip_d_tzpc_cpucl2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DPU_CMU_DPU, NULL, cmucal_vclk_ip_dpu_cmu_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_DPUD0, NULL, cmucal_vclk_ip_btm_dpud0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_DPUD1, NULL, cmucal_vclk_ip_btm_dpud1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_DPU, NULL, cmucal_vclk_ip_sysreg_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_DPUP1, NULL, cmucal_vclk_ip_axi2apb_dpup1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_DPUP0, NULL, cmucal_vclk_ip_axi2apb_dpup0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_DPUD0, NULL, cmucal_vclk_ip_sysmmu_dpud0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_DPU, NULL, cmucal_vclk_ip_lhm_axi_p_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D1_DPU, NULL, cmucal_vclk_ip_lhs_axi_d1_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_DPU, NULL, cmucal_vclk_ip_xiu_p_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DECON0, NULL, cmucal_vclk_ip_ad_apb_decon0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DECON1, NULL, cmucal_vclk_ip_ad_apb_decon1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_MIPI_DSIM1, NULL, cmucal_vclk_ip_ad_apb_mipi_dsim1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DPP, NULL, cmucal_vclk_ip_ad_apb_dpp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D2_DPU, NULL, cmucal_vclk_ip_lhs_axi_d2_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_DPUD2, NULL, cmucal_vclk_ip_btm_dpud2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_DPUD2, NULL, cmucal_vclk_ip_sysmmu_dpud2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DPU_DMA, NULL, cmucal_vclk_ip_ad_apb_dpu_dma, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DPU_WB_MUX, NULL, cmucal_vclk_ip_ad_apb_dpu_wb_mux, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_DPUD1, NULL, cmucal_vclk_ip_sysmmu_dpud1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_DPUD0, NULL, cmucal_vclk_ip_ppmu_dpud0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_DPUD1, NULL, cmucal_vclk_ip_ppmu_dpud1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_DPUD2, NULL, cmucal_vclk_ip_ppmu_dpud2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_MIPI_DSIM0, NULL, cmucal_vclk_ip_ad_apb_mipi_dsim0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DECON2, NULL, cmucal_vclk_ip_ad_apb_decon2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_SYSMMU_DPUD0, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_SYSMMU_DPUD0_S, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud0_s, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_SYSMMU_DPUD1, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_SYSMMU_DPUD1_S, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud1_s, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_SYSMMU_DPUD2, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_SYSMMU_DPUD2_S, NULL, cmucal_vclk_ip_ad_apb_sysmmu_dpud2_s, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DPU, NULL, cmucal_vclk_ip_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_WRAPPER_FOR_S5I6280_HSI_DCPHY_COMBO_TOP, NULL, cmucal_vclk_ip_wrapper_for_s5i6280_hsi_dcphy_combo_top, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DPU_DMA_PGEN, NULL, cmucal_vclk_ip_ad_apb_dpu_dma_pgen, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D0_DPU, NULL, cmucal_vclk_ip_lhs_axi_d0_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_DPU, NULL, cmucal_vclk_ip_d_tzpc_dpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_MCD, NULL, cmucal_vclk_ip_ad_apb_mcd, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DSPM_CMU_DSPM, NULL, cmucal_vclk_ip_dspm_cmu_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_DSPM, NULL, cmucal_vclk_ip_sysreg_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_DSPM, NULL, cmucal_vclk_ip_axi2apb_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_DSPM0, NULL, cmucal_vclk_ip_ppmu_dspm0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_DSPM0, NULL, cmucal_vclk_ip_sysmmu_dspm0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_DSPM0, NULL, cmucal_vclk_ip_btm_dspm0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_DSPM, NULL, cmucal_vclk_ip_lhm_axi_p_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ACEL_D0_DSPM, NULL, cmucal_vclk_ip_lhs_acel_d0_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_IVADSPM, NULL, cmucal_vclk_ip_lhm_axi_p_ivadspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_DSPMIVA, NULL, cmucal_vclk_ip_lhs_axi_p_dspmiva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_WRAP2_CONV_DSPM, NULL, cmucal_vclk_ip_wrap2_conv_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DSPM0, NULL, cmucal_vclk_ip_ad_apb_dspm0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DSPM1, NULL, cmucal_vclk_ip_ad_apb_dspm1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DSPM3, NULL, cmucal_vclk_ip_ad_apb_dspm3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_AXI_DSPM0, NULL, cmucal_vclk_ip_ad_axi_dspm0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_DSPM1, NULL, cmucal_vclk_ip_btm_dspm1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ACEL_D1_DSPM, NULL, cmucal_vclk_ip_lhs_acel_d1_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_DSPMDSPS, NULL, cmucal_vclk_ip_lhs_axi_p_dspmdsps, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_DSPM1, NULL, cmucal_vclk_ip_ppmu_dspm1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_DSPM1, NULL, cmucal_vclk_ip_sysmmu_dspm1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ADM_APB_DSPM, NULL, cmucal_vclk_ip_adm_apb_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D0_DSPSDSPM, NULL, cmucal_vclk_ip_lhm_axi_d0_dspsdspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_DSPM, NULL, cmucal_vclk_ip_xiu_p_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_DSPM, NULL, cmucal_vclk_ip_vgen_lite_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_DSPM2, NULL, cmucal_vclk_ip_ad_apb_dspm2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SCORE_TS_II, NULL, cmucal_vclk_ip_score_ts_ii, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_DSPM, NULL, cmucal_vclk_ip_d_tzpc_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_ISPPREDSPM, NULL, cmucal_vclk_ip_lhm_ast_isppredspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_ISPLPDSPM, NULL, cmucal_vclk_ip_lhm_ast_isplpdspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_ISPHQDSPM, NULL, cmucal_vclk_ip_lhm_ast_isphqdspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_DSPMISPPRE, NULL, cmucal_vclk_ip_lhs_ast_dspmisppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_DSPMISPLP, NULL, cmucal_vclk_ip_lhs_ast_dspmisplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_D_DSPM, NULL, cmucal_vclk_ip_xiu_d_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BAAW_DSPM, NULL, cmucal_vclk_ip_baaw_dspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_DSPMNPU0, NULL, cmucal_vclk_ip_lhs_axi_d_dspmnpu0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DSPS_CMU_DSPS, NULL, cmucal_vclk_ip_dsps_cmu_dsps, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_DSPS, NULL, cmucal_vclk_ip_axi2apb_dsps, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_DSPMDSPS, NULL, cmucal_vclk_ip_lhm_axi_p_dspmdsps, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_DSPS, NULL, cmucal_vclk_ip_sysreg_dsps, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_DSPSIVA, NULL, cmucal_vclk_ip_lhs_axi_d_dspsiva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D0_DSPSDSPM, NULL, cmucal_vclk_ip_lhs_axi_d0_dspsdspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SCORE_BARON, NULL, cmucal_vclk_ip_score_baron, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_IVADSPS, NULL, cmucal_vclk_ip_lhm_axi_d_ivadsps, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_DSPS, NULL, cmucal_vclk_ip_d_tzpc_dsps, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_DSPS, NULL, cmucal_vclk_ip_vgen_lite_dsps, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_FSYS0_CMU_FSYS0, NULL, cmucal_vclk_ip_fsys0_cmu_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ACEL_D_FSYS0, NULL, cmucal_vclk_ip_lhs_acel_d_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_FSYS0, NULL, cmucal_vclk_ip_lhm_axi_p_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GPIO_FSYS0, NULL, cmucal_vclk_ip_gpio_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_FSYS0, NULL, cmucal_vclk_ip_sysreg_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_D_FSYS0, NULL, cmucal_vclk_ip_xiu_d_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_FSYS0, NULL, cmucal_vclk_ip_btm_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DP_LINK, NULL, cmucal_vclk_ip_dp_link, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_FSYS0, NULL, cmucal_vclk_ip_vgen_lite_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_USB, NULL, cmucal_vclk_ip_lhm_axi_d_usb, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_USB, NULL, cmucal_vclk_ip_lhs_axi_p_usb, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_FSYS0, NULL, cmucal_vclk_ip_ppmu_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_PCIE_GEN3A, NULL, cmucal_vclk_ip_sysmmu_pcie_gen3a, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_PCIE_GEN3B, NULL, cmucal_vclk_ip_sysmmu_pcie_gen3b, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P0_FSYS0, NULL, cmucal_vclk_ip_xiu_p0_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PCIE_GEN3, NULL, cmucal_vclk_ip_pcie_gen3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PCIE_IA_GEN3A, NULL, cmucal_vclk_ip_pcie_ia_gen3a, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PCIE_IA_GEN3B, NULL, cmucal_vclk_ip_pcie_ia_gen3b, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_FSYS0, NULL, cmucal_vclk_ip_d_tzpc_fsys0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_FSYS0A_CMU_FSYS0A, NULL, cmucal_vclk_ip_fsys0a_cmu_fsys0a, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USB31DRD, NULL, cmucal_vclk_ip_usb31drd, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_USB, NULL, cmucal_vclk_ip_lhm_axi_p_usb, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_USB, NULL, cmucal_vclk_ip_lhs_axi_d_usb, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_FSYS1_CMU_FSYS1, NULL, cmucal_vclk_ip_fsys1_cmu_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MMC_CARD, NULL, cmucal_vclk_ip_mmc_card, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PCIE_GEN2, NULL, cmucal_vclk_ip_pcie_gen2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SSS, NULL, cmucal_vclk_ip_sss, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_RTIC, NULL, cmucal_vclk_ip_rtic, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_FSYS1, NULL, cmucal_vclk_ip_sysreg_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GPIO_FSYS1, NULL, cmucal_vclk_ip_gpio_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ACEL_D_FSYS1, NULL, cmucal_vclk_ip_lhs_acel_d_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_FSYS1, NULL, cmucal_vclk_ip_lhm_axi_p_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_D_FSYS1, NULL, cmucal_vclk_ip_xiu_d_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_FSYS1, NULL, cmucal_vclk_ip_xiu_p_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_FSYS1, NULL, cmucal_vclk_ip_ppmu_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_FSYS1, NULL, cmucal_vclk_ip_btm_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_UFS_CARD, NULL, cmucal_vclk_ip_ufs_card, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ADM_AHB_SSS, NULL, cmucal_vclk_ip_adm_ahb_sss, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_FSYS1, NULL, cmucal_vclk_ip_sysmmu_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_FSYS1, NULL, cmucal_vclk_ip_vgen_lite_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PCIE_IA_GEN2, NULL, cmucal_vclk_ip_pcie_ia_gen2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_FSYS1, NULL, cmucal_vclk_ip_d_tzpc_fsys1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_UFS_EMBD, NULL, cmucal_vclk_ip_ufs_embd, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PUF, NULL, cmucal_vclk_ip_puf, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_RTIC, NULL, cmucal_vclk_ip_qe_rtic, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_SSS, NULL, cmucal_vclk_ip_qe_sss, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BAAW_SSS, NULL, cmucal_vclk_ip_baaw_sss, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_G2D_CMU_G2D, NULL, cmucal_vclk_ip_g2d_cmu_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_G2DD0, NULL, cmucal_vclk_ip_ppmu_g2dd0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_G2DD1, NULL, cmucal_vclk_ip_ppmu_g2dd1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_G2DD0, NULL, cmucal_vclk_ip_sysmmu_g2dd0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_G2D, NULL, cmucal_vclk_ip_sysreg_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ACEL_D0_G2D, NULL, cmucal_vclk_ip_lhs_acel_d0_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ACEL_D1_G2D, NULL, cmucal_vclk_ip_lhs_acel_d1_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_G2D, NULL, cmucal_vclk_ip_lhm_axi_p_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_P_G2D, NULL, cmucal_vclk_ip_as_p_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_G2DP0, NULL, cmucal_vclk_ip_axi2apb_g2dp0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_G2DD0, NULL, cmucal_vclk_ip_btm_g2dd0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_G2DD1, NULL, cmucal_vclk_ip_btm_g2dd1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_G2D, NULL, cmucal_vclk_ip_xiu_p_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_G2DP1, NULL, cmucal_vclk_ip_axi2apb_g2dp1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_G2DD2, NULL, cmucal_vclk_ip_btm_g2dd2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_JPEG, NULL, cmucal_vclk_ip_qe_jpeg, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_MSCL, NULL, cmucal_vclk_ip_qe_mscl, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_G2DD2, NULL, cmucal_vclk_ip_sysmmu_g2dd2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_G2DD2, NULL, cmucal_vclk_ip_ppmu_g2dd2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ACEL_D2_G2D, NULL, cmucal_vclk_ip_lhs_acel_d2_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_P_JPEG, NULL, cmucal_vclk_ip_as_p_jpeg, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_D_G2D, NULL, cmucal_vclk_ip_xiu_d_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_P_MSCL, NULL, cmucal_vclk_ip_as_p_mscl, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_P_ASTC, NULL, cmucal_vclk_ip_as_p_astc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_P_SYSMMU_NS_G2DD0, NULL, cmucal_vclk_ip_as_p_sysmmu_ns_g2dd0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_P_SYSMMU_NS_G2DD2, NULL, cmucal_vclk_ip_as_p_sysmmu_ns_g2dd2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_P_SYSMMU_S_G2DD0, NULL, cmucal_vclk_ip_as_p_sysmmu_s_g2dd0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_P_SYSMMU_S_G2DD2, NULL, cmucal_vclk_ip_as_p_sysmmu_s_g2dd2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_ASTC, NULL, cmucal_vclk_ip_qe_astc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_G2D, NULL, cmucal_vclk_ip_vgen_lite_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_G2D, NULL, cmucal_vclk_ip_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_P_SYSMMU_NS_G2DD1, NULL, cmucal_vclk_ip_as_p_sysmmu_ns_g2dd1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_P_SYSMMU_S_G2DD1, NULL, cmucal_vclk_ip_as_p_sysmmu_s_g2dd1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_G2DD1, NULL, cmucal_vclk_ip_sysmmu_g2dd1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_JPEG, NULL, cmucal_vclk_ip_jpeg, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MSCL, NULL, cmucal_vclk_ip_mscl, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ASTC, NULL, cmucal_vclk_ip_astc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_P_JSQZ, NULL, cmucal_vclk_ip_as_p_jsqz, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_JSQZ, NULL, cmucal_vclk_ip_qe_jsqz, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_G2D, NULL, cmucal_vclk_ip_d_tzpc_g2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_JSQZ, NULL, cmucal_vclk_ip_jsqz, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_G3D, NULL, cmucal_vclk_ip_xiu_p_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_G3D, NULL, cmucal_vclk_ip_lhm_axi_p_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSIF_HPMG3D, NULL, cmucal_vclk_ip_busif_hpmg3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_G3D0, NULL, cmucal_vclk_ip_hpm_g3d0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_G3D, NULL, cmucal_vclk_ip_sysreg_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_G3D_CMU_G3D, NULL, cmucal_vclk_ip_g3d_cmu_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_G3DSFR, NULL, cmucal_vclk_ip_lhs_axi_g3dsfr, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_G3D, NULL, cmucal_vclk_ip_vgen_lite_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GPU, NULL, cmucal_vclk_ip_gpu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_G3D, NULL, cmucal_vclk_ip_axi2apb_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_G3DSFR, NULL, cmucal_vclk_ip_lhm_axi_g3dsfr, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GRAY2BIN_G3D, NULL, cmucal_vclk_ip_gray2bin_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_G3D, NULL, cmucal_vclk_ip_d_tzpc_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ASB_G3D, NULL, cmucal_vclk_ip_asb_g3d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_ISPHQ, NULL, cmucal_vclk_ip_lhm_axi_p_isphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_ISPHQ, NULL, cmucal_vclk_ip_lhs_axi_d_isphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_IS_ISPHQ, NULL, cmucal_vclk_ip_is_isphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_ISPHQ, NULL, cmucal_vclk_ip_sysreg_isphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ISPHQ_CMU_ISPHQ, NULL, cmucal_vclk_ip_isphq_cmu_isphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_ISPPREISPHQ, NULL, cmucal_vclk_ip_lhm_atb_isppreisphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_ISPHQISPLP, NULL, cmucal_vclk_ip_lhs_atb_isphqisplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_ISPHQ, NULL, cmucal_vclk_ip_btm_isphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_VO_ISPLPISPHQ, NULL, cmucal_vclk_ip_lhm_atb_vo_isplpisphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_VO_ISPHQISPPRE, NULL, cmucal_vclk_ip_lhs_ast_vo_isphqisppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_ISPHQ, NULL, cmucal_vclk_ip_d_tzpc_isphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_ISPHQDSPM, NULL, cmucal_vclk_ip_lhs_ast_isphqdspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_ISPLP, NULL, cmucal_vclk_ip_lhm_axi_p_isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D0_ISPLP, NULL, cmucal_vclk_ip_lhs_axi_d0_isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_ISPLP0, NULL, cmucal_vclk_ip_btm_isplp0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_IS_ISPLP, NULL, cmucal_vclk_ip_is_isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_ISPLP, NULL, cmucal_vclk_ip_sysreg_isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ISPLP_CMU_ISPLP, NULL, cmucal_vclk_ip_isplp_cmu_isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_ISPLP1, NULL, cmucal_vclk_ip_btm_isplp1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D1_ISPLP, NULL, cmucal_vclk_ip_lhs_axi_d1_isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_ISPHQISPLP, NULL, cmucal_vclk_ip_lhm_atb_isphqisplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_VO_ISPPREISPLP, NULL, cmucal_vclk_ip_lhm_ast_vo_isppreisplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_ATB_ISPPREISPLP, NULL, cmucal_vclk_ip_lhm_atb_isppreisplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_VO_ISPLPISPHQ, NULL, cmucal_vclk_ip_lhs_atb_vo_isplpisphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_ISPLP, NULL, cmucal_vclk_ip_d_tzpc_isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_ISPLPDSPM, NULL, cmucal_vclk_ip_lhs_ast_isplpdspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_DSPMISPLP, NULL, cmucal_vclk_ip_lhm_ast_dspmisplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_ISPLPVRA2, NULL, cmucal_vclk_ip_lhs_axi_p_isplpvra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_VRA2ISPLP, NULL, cmucal_vclk_ip_lhm_axi_d_vra2isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_IS_ISPPRE, NULL, cmucal_vclk_ip_is_isppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_ISPPRE, NULL, cmucal_vclk_ip_lhs_axi_d_isppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_ISPPRE, NULL, cmucal_vclk_ip_btm_isppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_ISPPRE, NULL, cmucal_vclk_ip_lhm_axi_p_isppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_ISPPRE, NULL, cmucal_vclk_ip_sysreg_isppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ISPPRE_CMU_ISPPRE, NULL, cmucal_vclk_ip_isppre_cmu_isppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_ISPPREISPLP, NULL, cmucal_vclk_ip_lhs_atb_isppreisplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ATB_ISPPREISPHQ, NULL, cmucal_vclk_ip_lhs_atb_isppreisphq, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_ISPPRE, NULL, cmucal_vclk_ip_d_tzpc_isppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_ISPPREDSPM, NULL, cmucal_vclk_ip_lhs_ast_isppredspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_DSPMISPPRE, NULL, cmucal_vclk_ip_lhm_ast_dspmisppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSIF_HPMISPPRE, NULL, cmucal_vclk_ip_busif_hpmisppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_ISPPRE, NULL, cmucal_vclk_ip_hpm_isppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_ISPPRE1, NULL, cmucal_vclk_ip_d_tzpc_isppre1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_VO_ISPPREISPLP, NULL, cmucal_vclk_ip_lhs_ast_vo_isppreisplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_VO_ISPHQISPPRE, NULL, cmucal_vclk_ip_lhm_ast_vo_isphqisppre, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_IVA_CMU_IVA, NULL, cmucal_vclk_ip_iva_cmu_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ACEL_D_IVA, NULL, cmucal_vclk_ip_lhs_acel_d_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_IVADSPS, NULL, cmucal_vclk_ip_lhs_axi_d_ivadsps, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_IVADSPM, NULL, cmucal_vclk_ip_lhs_axi_p_ivadspm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_DSPMIVA, NULL, cmucal_vclk_ip_lhm_axi_p_dspmiva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_IVA, NULL, cmucal_vclk_ip_lhm_axi_p_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_IVA, NULL, cmucal_vclk_ip_btm_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_IVA, NULL, cmucal_vclk_ip_ppmu_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_IVA, NULL, cmucal_vclk_ip_sysmmu_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_IVA, NULL, cmucal_vclk_ip_xiu_p_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_IVA0, NULL, cmucal_vclk_ip_ad_apb_iva0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_2M_IVA, NULL, cmucal_vclk_ip_axi2apb_2m_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_IVA, NULL, cmucal_vclk_ip_axi2apb_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_IVA, NULL, cmucal_vclk_ip_sysreg_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_IVASC, NULL, cmucal_vclk_ip_lhm_axi_d_ivasc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ADM_DAP_IVA, NULL, cmucal_vclk_ip_adm_dap_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_DSPSIVA, NULL, cmucal_vclk_ip_lhm_axi_d_dspsiva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_IVA1, NULL, cmucal_vclk_ip_ad_apb_iva1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_APB_IVA2, NULL, cmucal_vclk_ip_ad_apb_iva2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_IVA, NULL, cmucal_vclk_ip_vgen_lite_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_IVA, NULL, cmucal_vclk_ip_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_IVA_INTMEM, NULL, cmucal_vclk_ip_iva_intmem, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_D0_IVA, NULL, cmucal_vclk_ip_xiu_d0_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_D1_IVA, NULL, cmucal_vclk_ip_xiu_d1_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_IVA, NULL, cmucal_vclk_ip_d_tzpc_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_D2_IVA, NULL, cmucal_vclk_ip_xiu_d2_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TREX_RB1_IVA, NULL, cmucal_vclk_ip_trex_rb1_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_IVA, NULL, cmucal_vclk_ip_qe_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_WRAP2_CONV_IVA, NULL, cmucal_vclk_ip_wrap2_conv_iva, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MFC_CMU_MFC, NULL, cmucal_vclk_ip_mfc_cmu_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_APB_MFC, NULL, cmucal_vclk_ip_as_apb_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_MFC, NULL, cmucal_vclk_ip_axi2apb_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_MFC, NULL, cmucal_vclk_ip_sysreg_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D0_MFC, NULL, cmucal_vclk_ip_lhs_axi_d0_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D1_MFC, NULL, cmucal_vclk_ip_lhs_axi_d1_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_MFC, NULL, cmucal_vclk_ip_lhm_axi_p_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_MFCD0, NULL, cmucal_vclk_ip_sysmmu_mfcd0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_MFCD1, NULL, cmucal_vclk_ip_sysmmu_mfcd1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_MFCD0, NULL, cmucal_vclk_ip_ppmu_mfcd0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_MFCD1, NULL, cmucal_vclk_ip_ppmu_mfcd1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_MFCD0, NULL, cmucal_vclk_ip_btm_mfcd0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_MFCD1, NULL, cmucal_vclk_ip_btm_mfcd1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_APB_SYSMMU_NS_MFCD0, NULL, cmucal_vclk_ip_as_apb_sysmmu_ns_mfcd0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_APB_SYSMMU_NS_MFCD1, NULL, cmucal_vclk_ip_as_apb_sysmmu_ns_mfcd1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_APB_SYSMMU_S_MFCD0, NULL, cmucal_vclk_ip_as_apb_sysmmu_s_mfcd0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_APB_SYSMMU_S_MFCD1, NULL, cmucal_vclk_ip_as_apb_sysmmu_s_mfcd1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_APB_WFD_NS, NULL, cmucal_vclk_ip_as_apb_wfd_ns, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_AXI_WFD, NULL, cmucal_vclk_ip_as_axi_wfd, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_MFCD2, NULL, cmucal_vclk_ip_ppmu_mfcd2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_D_MFC, NULL, cmucal_vclk_ip_xiu_d_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_APB_WFD_S, NULL, cmucal_vclk_ip_as_apb_wfd_s, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_MFC, NULL, cmucal_vclk_ip_vgen_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MFC, NULL, cmucal_vclk_ip_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_WFD, NULL, cmucal_vclk_ip_wfd, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LH_ATB_MFC, NULL, cmucal_vclk_ip_lh_atb_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_MFC, NULL, cmucal_vclk_ip_d_tzpc_mfc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MIF_CMU_MIF, NULL, cmucal_vclk_ip_mif_cmu_mif, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DDRPHY, NULL, cmucal_vclk_ip_ddrphy, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_MIF, NULL, cmucal_vclk_ip_sysreg_mif, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSIF_HPMMIF, NULL, cmucal_vclk_ip_busif_hpmmif, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_MIF, NULL, cmucal_vclk_ip_lhm_axi_p_mif, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_MIF, NULL, cmucal_vclk_ip_axi2apb_mif, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_DVFS, NULL, cmucal_vclk_ip_ppc_dvfs, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPC_DEBUG, NULL, cmucal_vclk_ip_ppc_debug, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DDRPHY, NULL, cmucal_vclk_ip_apbbr_ddrphy, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DMC, NULL, cmucal_vclk_ip_apbbr_dmc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DMCTZ, NULL, cmucal_vclk_ip_apbbr_dmctz, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_MIF, NULL, cmucal_vclk_ip_hpm_mif, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DMC, NULL, cmucal_vclk_ip_dmc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QCH_ADAPTER_PPC_DEBUG, NULL, cmucal_vclk_ip_qch_adapter_ppc_debug, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QCH_ADAPTER_PPC_DVFS, NULL, cmucal_vclk_ip_qch_adapter_ppc_dvfs, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_MIF, NULL, cmucal_vclk_ip_d_tzpc_mif, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_MIF1, NULL, cmucal_vclk_ip_hpm_mif1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MIF1_CMU_MIF1, NULL, cmucal_vclk_ip_mif1_cmu_mif1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DDRPHY1, NULL, cmucal_vclk_ip_apbbr_ddrphy1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DMC1, NULL, cmucal_vclk_ip_apbbr_dmc1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DMCTZ1, NULL, cmucal_vclk_ip_apbbr_dmctz1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_MIF1, NULL, cmucal_vclk_ip_axi2apb_mif1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSIF_HPMMIF1, NULL, cmucal_vclk_ip_busif_hpmmif1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DDRPHY1, NULL, cmucal_vclk_ip_ddrphy1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DMC1, NULL, cmucal_vclk_ip_dmc1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_MIF1, NULL, cmucal_vclk_ip_lhm_axi_p_mif1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMUPPC_DEBUG1, NULL, cmucal_vclk_ip_ppmuppc_debug1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMUPPC_DVFS1, NULL, cmucal_vclk_ip_ppmuppc_dvfs1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_MIF1, NULL, cmucal_vclk_ip_sysreg_mif1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QCH_ADAPTER_PPMUPPC_DEBUG1, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_debug1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QCH_ADAPTER_PPMUPPC_DVFS1, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_dvfs1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_MIF2, NULL, cmucal_vclk_ip_hpm_mif2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DDRPHY2, NULL, cmucal_vclk_ip_apbbr_ddrphy2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DMC2, NULL, cmucal_vclk_ip_apbbr_dmc2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DMCTZ2, NULL, cmucal_vclk_ip_apbbr_dmctz2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_MIF2, NULL, cmucal_vclk_ip_axi2apb_mif2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSIF_HPMMIF2, NULL, cmucal_vclk_ip_busif_hpmmif2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DDRPHY2, NULL, cmucal_vclk_ip_ddrphy2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DMC2, NULL, cmucal_vclk_ip_dmc2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_MIF2, NULL, cmucal_vclk_ip_lhm_axi_p_mif2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMUPPC_DEBUG2, NULL, cmucal_vclk_ip_ppmuppc_debug2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMUPPC_DVFS2, NULL, cmucal_vclk_ip_ppmuppc_dvfs2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_MIF2, NULL, cmucal_vclk_ip_sysreg_mif2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QCH_ADAPTER_PPMUPPC_DEBUG2, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_debug2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QCH_ADAPTER_PPMUPPC_DVFS2, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_dvfs2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MIF2_CMU_MIF2, NULL, cmucal_vclk_ip_mif2_cmu_mif2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HPM_MIF3, NULL, cmucal_vclk_ip_hpm_mif3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DDRPHY3, NULL, cmucal_vclk_ip_apbbr_ddrphy3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DMC3, NULL, cmucal_vclk_ip_apbbr_dmc3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APBBR_DMCTZ3, NULL, cmucal_vclk_ip_apbbr_dmctz3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_MIF3, NULL, cmucal_vclk_ip_axi2apb_mif3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BUSIF_HPMMIF3, NULL, cmucal_vclk_ip_busif_hpmmif3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DDRPHY3, NULL, cmucal_vclk_ip_ddrphy3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DMC3, NULL, cmucal_vclk_ip_dmc3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_MIF3, NULL, cmucal_vclk_ip_lhm_axi_p_mif3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMUPPC_DEBUG3, NULL, cmucal_vclk_ip_ppmuppc_debug3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMUPPC_DVFS3, NULL, cmucal_vclk_ip_ppmuppc_dvfs3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_MIF3, NULL, cmucal_vclk_ip_sysreg_mif3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MIF3_CMU_MIF3, NULL, cmucal_vclk_ip_mif3_cmu_mif3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QCH_ADAPTER_PPMUPPC_DEBUG3, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_debug3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QCH_ADAPTER_PPMUPPC_DVFS3, NULL, cmucal_vclk_ip_qch_adapter_ppmuppc_dvfs3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_ACEL_D_NPU, NULL, cmucal_vclk_ip_lhs_acel_d_npu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_P_NPU1, NULL, cmucal_vclk_ip_lhs_axi_p_npu1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_NPU0_CMU_NPU0, NULL, cmucal_vclk_ip_npu0_cmu_npu0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APB_ASYNC_SI0, NULL, cmucal_vclk_ip_apb_async_si0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APB_ASYNC_SMMU_NS, NULL, cmucal_vclk_ip_apb_async_smmu_ns, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_NPU0, NULL, cmucal_vclk_ip_axi2apb_npu0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_NPU0, NULL, cmucal_vclk_ip_btm_npu0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_NPU0, NULL, cmucal_vclk_ip_d_tzpc_npu0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD1_D1_0, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD1_D1_1, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD1_D1_2, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD1_D1_3, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD1_D1_4, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_4, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD1_D1_5, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_5, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD1_D1_6, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_6, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD1_D1_7, NULL, cmucal_vclk_ip_lhm_ast_d_npud1_d1_7, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_P_NPU1_DONE, NULL, cmucal_vclk_ip_lhm_ast_p_npu1_done, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_DSPMNPU0, NULL, cmucal_vclk_ip_lhm_axi_d_dspmnpu0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_NPU, NULL, cmucal_vclk_ip_lhm_axi_p_npu, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD0_D1_0, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD0_D1_1, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD0_D1_2, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD0_D1_3, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD0_D1_4, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_4, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD0_D1_5, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_5, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD0_D1_6, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_6, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD0_D1_7, NULL, cmucal_vclk_ip_lhs_ast_d_npud0_d1_7, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_P_NPUD1_SETREG, NULL, cmucal_vclk_ip_lhs_ast_p_npud1_setreg, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_IDPSRAM1, NULL, cmucal_vclk_ip_lhs_axi_d_idpsram1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_IDPSRAM3, NULL, cmucal_vclk_ip_lhs_axi_d_idpsram3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_NPUC, NULL, cmucal_vclk_ip_npuc, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_NPUD_UNIT0, NULL, cmucal_vclk_ip_npud_unit0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_CPUDMA, NULL, cmucal_vclk_ip_ppmu_cpudma, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_RFM, NULL, cmucal_vclk_ip_ppmu_rfm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_CPUDMA, NULL, cmucal_vclk_ip_qe_cpudma, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_RFM, NULL, cmucal_vclk_ip_qe_rfm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SMMU_NPU0, NULL, cmucal_vclk_ip_smmu_npu0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_NPU0, NULL, cmucal_vclk_ip_sysreg_npu0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_D_NPU0, NULL, cmucal_vclk_ip_xiu_d_npu0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APB_ASYNC_SMMU_S, NULL, cmucal_vclk_ip_apb_async_smmu_s, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_NPU0, NULL, cmucal_vclk_ip_vgen_lite_npu0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_NPU0, NULL, cmucal_vclk_ip_ppmu_npu0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_NPU0_PPC_WRAPPER, NULL, cmucal_vclk_ip_npu0_ppc_wrapper, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_NPU1_CMU_NPU1, NULL, cmucal_vclk_ip_npu1_cmu_npu1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD0_D1_0, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_NPU1, NULL, cmucal_vclk_ip_lhm_axi_p_npu1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_APB_ASYNC_SI1, NULL, cmucal_vclk_ip_apb_async_si1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_NPU1, NULL, cmucal_vclk_ip_axi2apb_npu1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_NPU1, NULL, cmucal_vclk_ip_d_tzpc_npu1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD0_D1_1, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD0_D1_2, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD0_D1_3, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD0_D1_4, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_4, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD0_D1_5, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_5, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD0_D1_6, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_6, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_D_NPUD0_D1_7, NULL, cmucal_vclk_ip_lhm_ast_d_npud0_d1_7, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AST_P_NPUD1_SETREG, NULL, cmucal_vclk_ip_lhm_ast_p_npud1_setreg, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_IDPSRAM1, NULL, cmucal_vclk_ip_lhm_axi_d_idpsram1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_D_IDPSRAM3, NULL, cmucal_vclk_ip_lhm_axi_d_idpsram3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD1_D1_0, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD1_D1_1, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD1_D1_2, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD1_D1_3, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD1_D1_4, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_4, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD1_D1_5, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_5, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD1_D1_6, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_6, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_D_NPUD1_D1_7, NULL, cmucal_vclk_ip_lhs_ast_d_npud1_d1_7, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_NPU1, NULL, cmucal_vclk_ip_sysreg_npu1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AST_P_NPU1_DONE, NULL, cmucal_vclk_ip_lhs_ast_p_npu1_done, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_NPUD_UNIT1, NULL, cmucal_vclk_ip_npud_unit1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_NPU1, NULL, cmucal_vclk_ip_ppmu_npu1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_NPU1_PPC_WRAPPER, NULL, cmucal_vclk_ip_npu1_ppc_wrapper, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GPIO_PERIC0, NULL, cmucal_vclk_ip_gpio_peric0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PWM, NULL, cmucal_vclk_ip_pwm, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_PERIC0, NULL, cmucal_vclk_ip_sysreg_peric0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI00_USI, NULL, cmucal_vclk_ip_usi00_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI01_USI, NULL, cmucal_vclk_ip_usi01_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI02_USI, NULL, cmucal_vclk_ip_usi02_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI03_USI, NULL, cmucal_vclk_ip_usi03_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_PERIC0P0, NULL, cmucal_vclk_ip_axi2apb_peric0p0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PERIC0_CMU_PERIC0, NULL, cmucal_vclk_ip_peric0_cmu_peric0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI04_USI, NULL, cmucal_vclk_ip_usi04_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_PERIC0P1, NULL, cmucal_vclk_ip_axi2apb_peric0p1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI05_USI, NULL, cmucal_vclk_ip_usi05_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI00_I2C, NULL, cmucal_vclk_ip_usi00_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI01_I2C, NULL, cmucal_vclk_ip_usi01_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI02_I2C, NULL, cmucal_vclk_ip_usi02_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI03_I2C, NULL, cmucal_vclk_ip_usi03_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI04_I2C, NULL, cmucal_vclk_ip_usi04_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI05_I2C, NULL, cmucal_vclk_ip_usi05_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_UART_DBG, NULL, cmucal_vclk_ip_uart_dbg, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_PERIC0, NULL, cmucal_vclk_ip_xiu_p_peric0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_PERIC0, NULL, cmucal_vclk_ip_lhm_axi_p_peric0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI12_USI, NULL, cmucal_vclk_ip_usi12_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI12_I2C, NULL, cmucal_vclk_ip_usi12_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI13_I2C, NULL, cmucal_vclk_ip_usi13_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI13_USI, NULL, cmucal_vclk_ip_usi13_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI14_USI, NULL, cmucal_vclk_ip_usi14_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI14_I2C, NULL, cmucal_vclk_ip_usi14_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_PERIC0, NULL, cmucal_vclk_ip_d_tzpc_peric0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI15_I2C, NULL, cmucal_vclk_ip_usi15_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI15_USI, NULL, cmucal_vclk_ip_usi15_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_PERIC1P1, NULL, cmucal_vclk_ip_axi2apb_peric1p1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GPIO_PERIC1, NULL, cmucal_vclk_ip_gpio_peric1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_PERIC1, NULL, cmucal_vclk_ip_sysreg_peric1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_UART_BT, NULL, cmucal_vclk_ip_uart_bt, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_I2C_CAM1, NULL, cmucal_vclk_ip_i2c_cam1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_I2C_CAM2, NULL, cmucal_vclk_ip_i2c_cam2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_I2C_CAM3, NULL, cmucal_vclk_ip_i2c_cam3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI06_USI, NULL, cmucal_vclk_ip_usi06_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI07_USI, NULL, cmucal_vclk_ip_usi07_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI08_USI, NULL, cmucal_vclk_ip_usi08_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_I2C_CAM0, NULL, cmucal_vclk_ip_i2c_cam0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_PERIC1, NULL, cmucal_vclk_ip_xiu_p_peric1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_PERIC1P0, NULL, cmucal_vclk_ip_axi2apb_peric1p0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PERIC1_CMU_PERIC1, NULL, cmucal_vclk_ip_peric1_cmu_peric1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SPI_CAM0, NULL, cmucal_vclk_ip_spi_cam0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI09_USI, NULL, cmucal_vclk_ip_usi09_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI06_I2C, NULL, cmucal_vclk_ip_usi06_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI10_USI, NULL, cmucal_vclk_ip_usi10_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI07_I2C, NULL, cmucal_vclk_ip_usi07_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI08_I2C, NULL, cmucal_vclk_ip_usi08_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI09_I2C, NULL, cmucal_vclk_ip_usi09_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI10_I2C, NULL, cmucal_vclk_ip_usi10_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_PERIC1, NULL, cmucal_vclk_ip_lhm_axi_p_peric1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI11_USI, NULL, cmucal_vclk_ip_usi11_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI11_I2C, NULL, cmucal_vclk_ip_usi11_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_PERIC1, NULL, cmucal_vclk_ip_d_tzpc_peric1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_I3C, NULL, cmucal_vclk_ip_i3c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI16_USI, NULL, cmucal_vclk_ip_usi16_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI17_USI, NULL, cmucal_vclk_ip_usi17_usi, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI16_I3C, NULL, cmucal_vclk_ip_usi16_i3c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_USI17_I2C, NULL, cmucal_vclk_ip_usi17_i2c, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_PERISP, NULL, cmucal_vclk_ip_axi2apb_perisp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XIU_P_PERIS, NULL, cmucal_vclk_ip_xiu_p_peris, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_PERIS, NULL, cmucal_vclk_ip_sysreg_peris, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_WDT_CLUSTER2, NULL, cmucal_vclk_ip_wdt_cluster2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_WDT_CLUSTER0, NULL, cmucal_vclk_ip_wdt_cluster0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PERIS_CMU_PERIS, NULL, cmucal_vclk_ip_peris_cmu_peris, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AD_AXI_P_PERIS, NULL, cmucal_vclk_ip_ad_axi_p_peris, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_OTP_CON_BIRA, NULL, cmucal_vclk_ip_otp_con_bira, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GIC, NULL, cmucal_vclk_ip_gic, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_PERIS, NULL, cmucal_vclk_ip_lhm_axi_p_peris, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MCT, NULL, cmucal_vclk_ip_mct, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_OTP_CON_TOP, NULL, cmucal_vclk_ip_otp_con_top, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_PERIS, NULL, cmucal_vclk_ip_d_tzpc_peris, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TMU_SUB, NULL, cmucal_vclk_ip_tmu_sub, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TMU_TOP, NULL, cmucal_vclk_ip_tmu_top, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_OTP_CON_BISR, NULL, cmucal_vclk_ip_otp_con_bisr, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_S2D_CMU_S2D, NULL, cmucal_vclk_ip_s2d_cmu_s2d, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VRA2_CMU_VRA2, NULL, cmucal_vclk_ip_vra2_cmu_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_APB_VRA2, NULL, cmucal_vclk_ip_as_apb_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AXI2APB_VRA2, NULL, cmucal_vclk_ip_axi2apb_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_VRA2, NULL, cmucal_vclk_ip_d_tzpc_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_ISPLPVRA2, NULL, cmucal_vclk_ip_lhm_axi_p_isplpvra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_VRA2ISPLP, NULL, cmucal_vclk_ip_lhs_axi_d_vra2isplp, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_QE_VRA2, NULL, cmucal_vclk_ip_qe_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_VRA2, NULL, cmucal_vclk_ip_sysreg_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE_VRA2, NULL, cmucal_vclk_ip_vgen_lite_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VRA2, NULL, cmucal_vclk_ip_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AS_APB_STR, NULL, cmucal_vclk_ip_as_apb_str, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BTM_VRA2, NULL, cmucal_vclk_ip_btm_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_PPMU_VRA2, NULL, cmucal_vclk_ip_ppmu_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSMMU_VRA2, NULL, cmucal_vclk_ip_sysmmu_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_STR, NULL, cmucal_vclk_ip_str, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_VRA2, NULL, cmucal_vclk_ip_lhs_axi_d_vra2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DMIC_IF, NULL, cmucal_vclk_ip_dmic_if, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SYSREG_VTS, NULL, cmucal_vclk_ip_sysreg_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VTS_CMU_VTS, NULL, cmucal_vclk_ip_vts_cmu_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_AHB_BUSMATRIX, NULL, cmucal_vclk_ip_ahb_busmatrix, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_P_VTS, NULL, cmucal_vclk_ip_lhm_axi_p_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_GPIO_VTS, NULL, cmucal_vclk_ip_gpio_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_WDT_VTS, NULL, cmucal_vclk_ip_wdt_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DMIC_AHB0, NULL, cmucal_vclk_ip_dmic_ahb0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DMIC_AHB1, NULL, cmucal_vclk_ip_dmic_ahb1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_C_VTS, NULL, cmucal_vclk_ip_lhs_axi_c_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_ASYNCINTERRUPT, NULL, cmucal_vclk_ip_asyncinterrupt, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HWACG_SYS_DMIC0, NULL, cmucal_vclk_ip_hwacg_sys_dmic0, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HWACG_SYS_DMIC1, NULL, cmucal_vclk_ip_hwacg_sys_dmic1, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SS_VTS_GLUE, NULL, cmucal_vclk_ip_ss_vts_glue, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_CORTEXM4INTEGRATION, NULL, cmucal_vclk_ip_cortexm4integration, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_U_DMIC_CLK_MUX, NULL, cmucal_vclk_ip_u_dmic_clk_mux, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHM_AXI_LP_VTS, NULL, cmucal_vclk_ip_lhm_axi_lp_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_LHS_AXI_D_VTS, NULL, cmucal_vclk_ip_lhs_axi_d_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BAAW_C_VTS, NULL, cmucal_vclk_ip_baaw_c_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_D_TZPC_VTS, NULL, cmucal_vclk_ip_d_tzpc_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_VGEN_LITE, NULL, cmucal_vclk_ip_vgen_lite, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BPS_LP_VTS, NULL, cmucal_vclk_ip_bps_lp_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BPS_P_VTS, NULL, cmucal_vclk_ip_bps_p_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XHB_LP_VTS, NULL, cmucal_vclk_ip_xhb_lp_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_XHB_P_VTS, NULL, cmucal_vclk_ip_xhb_p_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SWEEPER_C_VTS, NULL, cmucal_vclk_ip_sweeper_c_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_SWEEPER_D_VTS, NULL, cmucal_vclk_ip_sweeper_d_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_BAAW_D_VTS, NULL, cmucal_vclk_ip_baaw_d_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MAILBOX_ABOX_VTS, NULL, cmucal_vclk_ip_mailbox_abox_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DMIC_AHB2, NULL, cmucal_vclk_ip_dmic_ahb2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DMIC_AHB3, NULL, cmucal_vclk_ip_dmic_ahb3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HWACG_SYS_DMIC2, NULL, cmucal_vclk_ip_hwacg_sys_dmic2, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_HWACG_SYS_DMIC3, NULL, cmucal_vclk_ip_hwacg_sys_dmic3, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_DMIC_IF_3RD, NULL, cmucal_vclk_ip_dmic_if_3rd, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_MAILBOX_AP_VTS, NULL, cmucal_vclk_ip_mailbox_ap_vts, NULL, NULL), + CMUCAL_VCLK2(VCLK_IP_TIMER, NULL, cmucal_vclk_ip_timer, NULL, NULL), }; From 96650e65e1823cd382f8f5c9c4d86bceddfa7713 Mon Sep 17 00:00:00 2001 From: Andreas Schneider <asn@cryptomilk.org> Date: Sun, 1 Mar 2020 09:49:23 +0100 Subject: [PATCH 366/452] arch:arm64:boot:dts: Fix include path for autoconf.h Signed-off-by: Andreas Schneider <asn@cryptomilk.org> --- arch/arm64/boot/dts/exynos/exynos9820-rmem.dtsi | 2 +- arch/arm64/boot/dts/samsung/exynos9820-bootargs_ext.dtsi | 2 +- scripts/Makefile.lib | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/boot/dts/exynos/exynos9820-rmem.dtsi b/arch/arm64/boot/dts/exynos/exynos9820-rmem.dtsi index cea9dd757368..d2bfa2f6687c 100644 --- a/arch/arm64/boot/dts/exynos/exynos9820-rmem.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos9820-rmem.dtsi @@ -9,7 +9,7 @@ * published by the Free Software Foundation. */ -#include "../../../../../include/generated/autoconf.h" +#include <generated/autoconf.h> #include <dt-bindings/soc/samsung/debug-snapshot-table.h> #include <dt-bindings/soc/samsung/exynos9820-seclog.h> diff --git a/arch/arm64/boot/dts/samsung/exynos9820-bootargs_ext.dtsi b/arch/arm64/boot/dts/samsung/exynos9820-bootargs_ext.dtsi index c063378c0d57..395520f0cbe4 100644 --- a/arch/arm64/boot/dts/samsung/exynos9820-bootargs_ext.dtsi +++ b/arch/arm64/boot/dts/samsung/exynos9820-bootargs_ext.dtsi @@ -9,7 +9,7 @@ * published by the Free Software Foundation. */ -#include "../../../../../include/generated/autoconf.h" +#include <generated/autoconf.h> / { fragment@model { diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index cff0f35e1c4f..2d4087a75c78 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -304,7 +304,7 @@ $(obj)/%.dtb.S: $(obj)/%.dtb quiet_cmd_dtc = DTC $@ cmd_dtc = mkdir -p $(dir ${dtc-tmp}) ; \ - $(CPP) $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \ + $(CPP) $(dtc_cpp_flags) -x assembler-with-cpp $(LINUXINCLUDE) -o $(dtc-tmp) $< ; \ $(DTC) -O dtb -o $@ -b 0 -a 4\ $(addprefix -i,$(dir $<) $(DTC_INCLUDE)) $(DTC_FLAGS) \ -d $(depfile).dtc.tmp $(dtc-tmp) ; \ From c7c71aa74e6662d86c862163b1377b1bc094d9ba Mon Sep 17 00:00:00 2001 From: Erfan Abdi <erfangplus@gmail.com> Date: Wed, 19 Feb 2020 00:08:46 +0800 Subject: [PATCH 367/452] scripts: FIPS: use readelf full path --- scripts/crypto/fips_crypto_integrity.py | 9 +++++---- scripts/fmp/fips_fmp_integrity.py | 9 +++++---- scripts/link-vmlinux.sh | 4 ++-- 3 files changed, 12 insertions(+), 10 deletions(-) mode change 100644 => 100755 scripts/link-vmlinux.sh diff --git a/scripts/crypto/fips_crypto_integrity.py b/scripts/crypto/fips_crypto_integrity.py index 479a7d289861..ce07280139dd 100755 --- a/scripts/crypto/fips_crypto_integrity.py +++ b/scripts/crypto/fips_crypto_integrity.py @@ -33,16 +33,17 @@ if __name__ == "__main__": #print("python version:\n{}\n".format(sys.version)) - if len(sys.argv) != 2: - print("Usage " + sys.argv[0] + " elf_file") + if len(sys.argv) != 3: + print("Usage " + sys.argv[0] + " elf_file readelf_path") sys.exit(-1) elf_file = os.path.abspath(sys.argv[1]) - modules = sys.argv[2:] + readelf_path = os.path.abspath(sys.argv[2]) + modules = sys.argv[3:] utils = Utils() utils.paths_exists([elf_file]) - integrity = IntegrityRoutine(elf_file) + integrity = IntegrityRoutine(elf_file, readelf_path) integrity.make_integrity(sec_sym=sec_sym, module_name=module_name, debug=False, print_reloc_addrs=False, sort_by="address", reverse=False) diff --git a/scripts/fmp/fips_fmp_integrity.py b/scripts/fmp/fips_fmp_integrity.py index 29ab8a73e033..d2dd6e922ed4 100755 --- a/scripts/fmp/fips_fmp_integrity.py +++ b/scripts/fmp/fips_fmp_integrity.py @@ -29,16 +29,17 @@ module_name = "fmp" if __name__ == "__main__": - if len(sys.argv) != 2: - print("Usage " + sys.argv[0] + " elf_file") + if len(sys.argv) != 3: + print("Usage " + sys.argv[0] + " elf_file readelf_path") sys.exit(-1) elf_file = os.path.abspath(sys.argv[1]) - modules = sys.argv[2:] + readelf_path = os.path.abspath(sys.argv[2]) + modules = sys.argv[3:] utils = Utils() utils.paths_exists([elf_file]) - integrity = IntegrityRoutine(elf_file) + integrity = IntegrityRoutine(elf_file, readelf_path) integrity.make_integrity(sec_sym=sec_sym, module_name=module_name, debug=False, print_reloc_addrs=False, sort_by="address", reverse=False) diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh old mode 100644 new mode 100755 index 15617ff0b21b..ba25de7025fc --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -412,12 +412,12 @@ fi if [ -n "${CONFIG_CRYPTO_FIPS}" ]; then echo ' FIPS : Generating hmac of crypto and updating vmlinux... ' - PYTHONDONTWRITEBYTECODE=0 "${srctree}/scripts/crypto/fips_crypto_integrity.py" "${objtree}/vmlinux" + PYTHONDONTWRITEBYTECODE=0 "${srctree}/scripts/crypto/fips_crypto_integrity.py" "${objtree}/vmlinux" "${READELF}" fi if [ -n "${CONFIG_EXYNOS_FMP_FIPS}" ]; then echo ' FIPS : Generating hmac of fmp and updating vmlinux... ' - PYTHONDONTWRITEBYTECODE=0 "${srctree}/scripts/fmp/fips_fmp_integrity.py" "${objtree}/vmlinux" + PYTHONDONTWRITEBYTECODE=0 "${srctree}/scripts/fmp/fips_fmp_integrity.py" "${objtree}/vmlinux" "${READELF}" fi # We made a new kernel - delete old version file From a9d82f247825dddadb0df82d29b40735fd53a9a7 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 8 Sep 2020 20:59:47 +0300 Subject: [PATCH 368/452] scripts: FIPS: check readelf binary existance Signed-off-by: Denis Efremov <efremov@linux.com> --- scripts/crypto/fips_crypto_integrity.py | 3 ++- scripts/fmp/fips_fmp_integrity.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/scripts/crypto/fips_crypto_integrity.py b/scripts/crypto/fips_crypto_integrity.py index ce07280139dd..8da25be8ce39 100755 --- a/scripts/crypto/fips_crypto_integrity.py +++ b/scripts/crypto/fips_crypto_integrity.py @@ -10,6 +10,7 @@ import sys from IntegrityRoutine import IntegrityRoutine from Utils import Utils +from shutil import which __author__ = "Vadym Stupakov" @@ -38,7 +39,7 @@ sys.exit(-1) elf_file = os.path.abspath(sys.argv[1]) - readelf_path = os.path.abspath(sys.argv[2]) + readelf_path = which(sys.argv[2]) or os.path.abspath(sys.argv[2]) modules = sys.argv[3:] utils = Utils() diff --git a/scripts/fmp/fips_fmp_integrity.py b/scripts/fmp/fips_fmp_integrity.py index d2dd6e922ed4..66856c341310 100755 --- a/scripts/fmp/fips_fmp_integrity.py +++ b/scripts/fmp/fips_fmp_integrity.py @@ -10,6 +10,7 @@ import sys from IntegrityRoutine import IntegrityRoutine from Utils import Utils +from shutil import which __author__ = "Vadym Stupakov" @@ -34,7 +35,7 @@ sys.exit(-1) elf_file = os.path.abspath(sys.argv[1]) - readelf_path = os.path.abspath(sys.argv[2]) + readelf_path = which(sys.argv[2]) or os.path.abspath(sys.argv[2]) modules = sys.argv[3:] utils = Utils() From fd3610f0234f566305707ab160694053465b0712 Mon Sep 17 00:00:00 2001 From: jimzrt <james.tophoven@gmail.com> Date: Thu, 8 Feb 2018 12:55:48 +0100 Subject: [PATCH 369/452] usb: Modify mass_storage gadget to work with configfs Signed-off-by: djb77 <dwayne.bakewell@gmail.com> --- drivers/usb/gadget/function/f_mass_storage.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 25ba30329533..ee78a5840e62 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -3350,6 +3350,8 @@ static void fsg_free_inst(struct usb_function_instance *fi) kfree(opts); } +extern struct device *create_function_device(char *name); + static struct usb_function_instance *fsg_alloc_inst(void) { struct fsg_opts *opts; @@ -3389,6 +3391,9 @@ static struct usb_function_instance *fsg_alloc_inst(void) config_group_init_type_name(&opts->lun0.group, "lun.0", &fsg_lun_type); configfs_add_default_group(&opts->lun0.group, &opts->func_inst.group); + //create dummy device + create_function_device("f_mass_storage"); + return &opts->func_inst; release_buffers: From dbe25a49a75ed4faa8e3d2582cd0d67e79fb3548 Mon Sep 17 00:00:00 2001 From: Noxxxious <f.catzgerald@gmail.com> Date: Mon, 2 Jul 2018 02:31:57 +0200 Subject: [PATCH 370/452] usb: correct function name Other drivers like the mtp driver use a proper 'function.name' to make the configfs work. So lets correct mass storages name which will allow drivedroid to work. --- drivers/usb/gadget/function/f_mass_storage.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index ee78a5840e62..a1d81c390670 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -228,7 +228,7 @@ /*------------------------------------------------------------------------*/ -#define FSG_DRIVER_DESC "Mass Storage Function" +#define FSG_DRIVER_DESC "mass_storage" #define FSG_DRIVER_VERSION "2009/09/11" static const char fsg_string_interface[] = "Mass Storage"; From 18d166bb934b68096fe7f6f1887d49637fd59d82 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig <hch@lst.de> Date: Tue, 6 Mar 2018 17:03:31 -0800 Subject: [PATCH 371/452] fs: don't clear I_DIRTY_TIME before calling mark_inode_dirty_sync __mark_inode_dirty already takes care of that, and for the XFS lazytime implementation we need to know that ->dirty_inode was called because I_DIRTY_TIME was set. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Jan Kara <jack@suse.cz> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com> --- fs/inode.c | 1 - fs/sync.c | 6 +----- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/fs/inode.c b/fs/inode.c index d662e7a25ae1..c4a2046b5db6 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1538,7 +1538,6 @@ void iput(struct inode *inode) if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) { if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) { atomic_inc(&inode->i_count); - inode->i_state &= ~I_DIRTY_TIME; spin_unlock(&inode->i_lock); trace_writeback_lazytime_iput(inode); mark_inode_dirty_sync(inode); diff --git a/fs/sync.c b/fs/sync.c index 2f6aca5f0cdd..df38797a2769 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -451,12 +451,8 @@ int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync) if (!file->f_op->fsync) return -EINVAL; - if (!datasync && (inode->i_state & I_DIRTY_TIME)) { - spin_lock(&inode->i_lock); - inode->i_state &= ~I_DIRTY_TIME; - spin_unlock(&inode->i_lock); + if (!datasync && (inode->i_state & I_DIRTY_TIME)) mark_inode_dirty_sync(inode); - } return file->f_op->fsync(file, start, end, datasync); } EXPORT_SYMBOL(vfs_fsync_range); From b9541a49baa2667587bd9b12ef380298232f2b29 Mon Sep 17 00:00:00 2001 From: Ilie Halip <ilie.halip@gmail.com> Date: Tue, 26 Nov 2019 16:45:44 +0200 Subject: [PATCH 372/452] x86/boot: Discard .eh_frame sections When using GCC as compiler and LLVM's lld as linker, linking setup.elf fails: LD arch/x86/boot/setup.elf ld.lld: error: init sections too big! This happens because GCC generates .eh_frame sections for most of the files in that directory, then ld.lld places the merged section before __end_init, triggering an assert in the linker script. Fix this by discarding the .eh_frame sections, as suggested by Boris. The kernel proper linker script discards them too. [ bp: Going back in history, 64-bit kernel proper has been discarding .eh_frame since 2002: commit acca80acefe20420e69561cf55be64f16c34ea97 Author: Andi Kleen <ak@muc.de> Date: Tue Oct 29 23:54:35 2002 -0800 [PATCH] x86-64 updates for 2.5.44 ... - Remove the .eh_frame on linking. This saves several hundred KB in the bzImage ] Suggested-by: Borislav Petkov <bp@alien8.de> Signed-off-by: Ilie Halip <ilie.halip@gmail.com> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Nick Desaulniers <ndesaulniers@google.com Cc: Andy Lutomirski <luto@kernel.org> Cc: clang-built-linux@googlegroups.com Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: x86-ml <x86@kernel.org> Link: https://lore.kernel.org/lkml/20191118175223.GM6363@zn.tnic/ Link: https://github.com/ClangBuiltLinux/linux/issues/760 Link: https://lkml.kernel.org/r/20191126144545.19354-1-ilie.halip@gmail.com --- arch/x86/boot/setup.ld | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/boot/setup.ld b/arch/x86/boot/setup.ld index 96a6c7563538..00fd2d6d91ee 100644 --- a/arch/x86/boot/setup.ld +++ b/arch/x86/boot/setup.ld @@ -51,7 +51,10 @@ SECTIONS . = ALIGN(16); _end = .; - /DISCARD/ : { *(.note*) } + /DISCARD/ : { + *(.eh_frame) + *(.note*) + } /* * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility: From ede19aaefe65f42372d6ab774ddf4a2b153dcfe2 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada <yamada.masahiro@socionext.com> Date: Fri, 16 Mar 2018 16:37:09 +0900 Subject: [PATCH 373/452] kbuild: clear LDFLAGS in the top Makefile Currently LDFLAGS is not cleared, so same flags are accumulated in LDFLAGS when the top Makefile is recursively invoked. I found unneeded rebuild for ARCH=arm64 when CONFIG_TRIM_UNUSED_KSYMS is enabled. If include/generated/autoksyms.h is updated, the top Makefile is recursively invoked, then arch/arm64/Makefile adds one more '-maarch64linux'. Due to the command line change, modules are rebuilt needlessly. Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> Acked-by: Nicolas Pitre <nico@linaro.org> --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 692bafceb978..6b57cdc316b3 100644 --- a/Makefile +++ b/Makefile @@ -432,6 +432,7 @@ KBUILD_CFLAGS_KERNEL := KBUILD_AFLAGS_MODULE := -DMODULE KBUILD_CFLAGS_MODULE := -DMODULE KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds +LDFLAGS := GCC_PLUGINS_CFLAGS := export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC From 6b8493a7d5fa846784cf41ff6aa721d8168dfc51 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada <yamada.masahiro@socionext.com> Date: Thu, 21 Feb 2019 13:13:38 +0900 Subject: [PATCH 374/452] kbuild: compute false-positive -Wmaybe-uninitialized cases in Kconfig Since -Wmaybe-uninitialized was introduced by GCC 4.7, we have patched various false positives: - commit e74fc973b6e5 ("Turn off -Wmaybe-uninitialized when building with -Os") turned off this option for -Os. - commit 815eb71e7149 ("Kbuild: disable 'maybe-uninitialized' warning for CONFIG_PROFILE_ALL_BRANCHES") turned off this option for CONFIG_PROFILE_ALL_BRANCHES - commit a76bcf557ef4 ("Kbuild: enable -Wmaybe-uninitialized warning for "make W=1"") turned off this option for GCC < 4.9 Arnd provided more explanation in https://lkml.org/lkml/2017/3/14/903 I think this looks better by shifting the logic from Makefile to Kconfig. Link: https://github.com/ClangBuiltLinux/linux/issues/350 Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> Reviewed-by: Nathan Chancellor <natechancellor@gmail.com> Tested-by: Nick Desaulniers <ndesaulniers@google.com> --- Makefile | 10 +++------- init/Kconfig | 17 +++++++++++++++++ kernel/trace/Kconfig | 1 + 3 files changed, 21 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index 6b57cdc316b3..0b447f94a64f 100644 --- a/Makefile +++ b/Makefile @@ -704,17 +704,13 @@ KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias) ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE KBUILD_CFLAGS += $(call cc-option,-Oz,-Os) -KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,) -else -ifdef CONFIG_PROFILE_ALL_BRANCHES -KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,) else KBUILD_CFLAGS += -O2 endif -endif -KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \ - $(call cc-disable-warning,maybe-uninitialized,)) +ifdef CONFIG_CC_DISABLE_WARN_MAYBE_UNINITIALIZED +KBUILD_CFLAGS += -Wno-maybe-uninitialized +endif # Tell gcc to never replace conditional load with a non-conditional one KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) diff --git a/init/Kconfig b/init/Kconfig index ce912e8924d5..0d2e3e6acc08 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -16,6 +16,22 @@ config DEFCONFIG_LIST default "$ARCH_DEFCONFIG" default "arch/$ARCH/defconfig" +config CC_HAS_WARN_MAYBE_UNINITIALIZED + def_bool $(cc-option,-Wmaybe-uninitialized) + help + GCC >= 4.7 supports this option. + +config CC_DISABLE_WARN_MAYBE_UNINITIALIZED + bool + depends on CC_HAS_WARN_MAYBE_UNINITIALIZED + default CC_IS_GCC && GCC_VERSION < 40900 # unreliable for GCC < 4.9 + help + GCC's -Wmaybe-uninitialized is not reliable by definition. + Lots of false positive warnings are produced in some cases. + + If this option is enabled, -Wno-maybe-uninitialzed is passed + to the compiler to suppress maybe-uninitialized warnings. + config CONSTRUCTORS bool depends on !UML @@ -1162,6 +1178,7 @@ config CC_OPTIMIZE_FOR_PERFORMANCE config CC_OPTIMIZE_FOR_SIZE bool "Optimize for size" + imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives help Enabling this option will pass "-Os" instead of "-O2" to your compiler resulting in a smaller kernel. diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 3ec4922a2655..a2d799bd3ed7 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -359,6 +359,7 @@ config PROFILE_ANNOTATED_BRANCHES config PROFILE_ALL_BRANCHES bool "Profile all if conditionals" if !FORTIFY_SOURCE select TRACE_BRANCH_PROFILING + imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives help This tracer profiles all branch conditions. Every if () taken in the kernel is recorded whether it hit or miss. From 783ae628e565612b499ea000305ac95f73bc9e75 Mon Sep 17 00:00:00 2001 From: Matthias Kaehlcke <mka@chromium.org> Date: Mon, 18 Mar 2019 17:10:05 -0400 Subject: [PATCH 375/452] Revert "kbuild: use -Oz instead of -Os when using clang" The clang option -Oz enables *aggressive* optimization for size, which doesn't necessarily result in smaller images, but can have negative impact on performance. Switch back to the less aggressive -Os. This reverts commit 6748cb3c299de1ffbe56733647b01dbcc398c419. Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Matthias Kaehlcke <mka@chromium.org> Reviewed-by: Nick Desaulniers <ndesaulniers@google.com> Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 0b447f94a64f..24d8126d3860 100644 --- a/Makefile +++ b/Makefile @@ -703,7 +703,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias) ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE -KBUILD_CFLAGS += $(call cc-option,-Oz,-Os) +KBUILD_CFLAGS += -Os else KBUILD_CFLAGS += -O2 endif From ad1093d898690ba351d063dae216ee1d51a8dd68 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada <yamada.masahiro@socionext.com> Date: Wed, 21 Aug 2019 02:09:40 +0900 Subject: [PATCH 376/452] kbuild,arc: add CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3 for ARC arch/arc/Makefile overrides -O2 with -O3. This is the only user of ARCH_CFLAGS. There is no user of ARCH_CPPFLAGS or ARCH_AFLAGS. My plan is to remove ARCH_{CPP,A,C}FLAGS after refactoring the ARC Makefile. Currently, ARC has no way to enable -Wmaybe-uninitialized because both -O3 and -Os disable it. Enabling it will be useful for compile-testing. This commit allows allmodconfig (, which defaults to -O2) to enable it. Add CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y to all the defconfig files in arch/arc/configs/ in order to keep the current config settings. Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> Acked-by: Vineet Gupta <vgupta@synopsys.com> --- Makefile | 10 ++++++---- arch/arc/Makefile | 8 -------- arch/arc/configs/axs101_defconfig | 1 + arch/arc/configs/axs103_defconfig | 1 + arch/arc/configs/axs103_smp_defconfig | 1 + arch/arc/configs/haps_hs_defconfig | 1 + arch/arc/configs/haps_hs_smp_defconfig | 1 + arch/arc/configs/hsdk_defconfig | 1 + arch/arc/configs/nps_defconfig | 1 + arch/arc/configs/nsim_700_defconfig | 1 + arch/arc/configs/nsim_hs_defconfig | 1 + arch/arc/configs/nsim_hs_smp_defconfig | 1 + arch/arc/configs/nsimosci_defconfig | 1 + arch/arc/configs/nsimosci_hs_defconfig | 1 + arch/arc/configs/nsimosci_hs_smp_defconfig | 1 + arch/arc/configs/tb10x_defconfig | 1 + arch/arc/configs/vdk_hs38_defconfig | 1 + arch/arc/configs/vdk_hs38_smp_defconfig | 1 + init/Kconfig | 12 ++++++++++-- 19 files changed, 32 insertions(+), 14 deletions(-) diff --git a/Makefile b/Makefile index 24d8126d3860..702a3b3aeee0 100644 --- a/Makefile +++ b/Makefile @@ -702,10 +702,12 @@ KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias) -ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE -KBUILD_CFLAGS += -Os -else -KBUILD_CFLAGS += -O2 +ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE +KBUILD_CFLAGS += -O2 +else ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3 +KBUILD_CFLAGS += -O3 +else ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE +KBUILD_CFLAGS += -Os endif ifdef CONFIG_CC_DISABLE_WARN_MAYBE_UNINITIALIZED diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 2917f56f0ea4..960635c20d5a 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -44,14 +44,6 @@ endif cfi := $(call as-instr,.cfi_startproc\n.cfi_endproc,-DARC_DW2_UNWIND_AS_CFI) cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables $(cfi) -ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE -# Generic build system uses -O2, we want -O3 -# Note: No need to add to cflags-y as that happens anyways -# -# Disable the false maybe-uninitialized warings gcc spits out at -O3 -ARCH_CFLAGS += -O3 $(call cc-disable-warning,maybe-uninitialized,) -endif - # small data is default for elf32 tool-chain. If not usable, disable it # This also allows repurposing GP as scratch reg to gcc reg allocator disable_small_data := y diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index 5d5ba2104ba7..0ea55ce41966 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig @@ -10,6 +10,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index 0874db2d48a8..e4088688abb8 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig @@ -10,6 +10,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index cf5df0e1cb08..01604b72aa42 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig @@ -10,6 +10,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig index aa8240a92b60..fb0487b0f63f 100644 --- a/arch/arc/configs/haps_hs_defconfig +++ b/arch/arc/configs/haps_hs_defconfig @@ -11,6 +11,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EXPERT=y CONFIG_PERF_EVENTS=y # CONFIG_COMPAT_BRK is not set diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig index bc5a24ea6cf7..dcd1f70a3f70 100644 --- a/arch/arc/configs/haps_hs_smp_defconfig +++ b/arch/arc/configs/haps_hs_smp_defconfig @@ -11,6 +11,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig index 4dac1169f528..274ad3e51ebf 100644 --- a/arch/arc/configs/hsdk_defconfig +++ b/arch/arc/configs/hsdk_defconfig @@ -10,6 +10,7 @@ CONFIG_NAMESPACES=y # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_RAM=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig index 9121c6ba15d0..8c014cb87146 100644 --- a/arch/arc/configs/nps_defconfig +++ b/arch/arc/configs/nps_defconfig @@ -6,6 +6,7 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_SYSCTL_SYSCALL=y # CONFIG_EPOLL is not set # CONFIG_SIGNALFD is not set diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig index cdb06417d3d9..5c67398ec494 100644 --- a/arch/arc/configs/nsim_700_defconfig +++ b/arch/arc/configs/nsim_700_defconfig @@ -11,6 +11,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig index 217d7ea3c956..36af90920caf 100644 --- a/arch/arc/configs/nsim_hs_defconfig +++ b/arch/arc/configs/nsim_hs_defconfig @@ -11,6 +11,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig index e733e4f1a320..f3c8434a1f25 100644 --- a/arch/arc/configs/nsim_hs_smp_defconfig +++ b/arch/arc/configs/nsim_hs_smp_defconfig @@ -9,6 +9,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig index c4577bd9196c..13406a9dcaf9 100644 --- a/arch/arc/configs/nsimosci_defconfig +++ b/arch/arc/configs/nsimosci_defconfig @@ -11,6 +11,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig index b20692c82d3c..b9a1e9971ee5 100644 --- a/arch/arc/configs/nsimosci_hs_defconfig +++ b/arch/arc/configs/nsimosci_hs_defconfig @@ -11,6 +11,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig index 5ad4949af6d0..ba6cf24ea9eb 100644 --- a/arch/arc/configs/nsimosci_hs_smp_defconfig +++ b/arch/arc/configs/nsimosci_hs_smp_defconfig @@ -9,6 +9,7 @@ CONFIG_IKCONFIG_PROC=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_PERF_EVENTS=y # CONFIG_COMPAT_BRK is not set CONFIG_KPROBES=y diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig index 0130e29eeca1..7433ba017f9b 100644 --- a/arch/arc/configs/tb10x_defconfig +++ b/arch/arc/configs/tb10x_defconfig @@ -14,6 +14,7 @@ CONFIG_INITRAMFS_SOURCE="../tb10x-rootfs.cpio" CONFIG_INITRAMFS_ROOT_UID=2100 CONFIG_INITRAMFS_ROOT_GID=501 # CONFIG_RD_GZIP is not set +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_SYSCTL_SYSCALL=y CONFIG_KALLSYMS_ALL=y # CONFIG_AIO is not set diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig index 4587c9af5afe..df4a690ad26b 100644 --- a/arch/arc/configs/vdk_hs38_defconfig +++ b/arch/arc/configs/vdk_hs38_defconfig @@ -5,6 +5,7 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig index 1855aa995bc9..0eca91d14fd6 100644 --- a/arch/arc/configs/vdk_hs38_smp_defconfig +++ b/arch/arc/configs/vdk_hs38_smp_defconfig @@ -5,6 +5,7 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/init/Kconfig b/init/Kconfig index 0d2e3e6acc08..13476e15a1d7 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1170,14 +1170,22 @@ choice default CC_OPTIMIZE_FOR_PERFORMANCE config CC_OPTIMIZE_FOR_PERFORMANCE - bool "Optimize for performance" + bool "Optimize for performance (-O2)" help This is the default optimization level for the kernel, building with the "-O2" compiler flag for best performance and most helpful compile-time warnings. +config CC_OPTIMIZE_FOR_PERFORMANCE_O3 + bool "Optimize more for performance (-O3)" + depends on ARC + imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives + help + Choosing this option will pass "-O3" to your compiler to optimize + the kernel yet more for performance. + config CC_OPTIMIZE_FOR_SIZE - bool "Optimize for size" + bool "Optimize for size (-Os)" imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives help Enabling this option will pass "-Os" instead of "-O2" to From aacfe661383897778887c6e297107eaa1256ad94 Mon Sep 17 00:00:00 2001 From: Linus Torvalds <torvalds@linux-foundation.org> Date: Sat, 9 May 2020 13:57:10 -0700 Subject: [PATCH 377/452] Stop the ad-hoc games with -Wno-maybe-initialized We have some rather random rules about when we accept the "maybe-initialized" warnings, and when we don't. For example, we consider it unreliable for gcc versions < 4.9, but also if -O3 is enabled, or if optimizing for size. And then various kernel config options disabled it, because they know that they trigger that warning by confusing gcc sufficiently (ie PROFILE_ALL_BRANCHES). And now gcc-10 seems to be introducing a lot of those warnings too, so it falls under the same heading as 4.9 did. At the same time, we have a very straightforward way to _enable_ that warning when wanted: use "W=2" to enable more warnings. So stop playing these ad-hoc games, and just disable that warning by default, with the known and straight-forward "if you want to work on the extra compiler warnings, use W=123". Would it be great to have code that is always so obvious that it never confuses the compiler whether a variable is used initialized or not? Yes, it would. In a perfect world, the compilers would be smarter, and our source code would be simpler. That's currently not the world we live in, though. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> --- Makefile | 7 +++---- init/Kconfig | 18 ------------------ kernel/trace/Kconfig | 1 - 3 files changed, 3 insertions(+), 23 deletions(-) diff --git a/Makefile b/Makefile index 702a3b3aeee0..b45176bb77d9 100644 --- a/Makefile +++ b/Makefile @@ -710,10 +710,6 @@ else ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE KBUILD_CFLAGS += -Os endif -ifdef CONFIG_CC_DISABLE_WARN_MAYBE_UNINITIALIZED -KBUILD_CFLAGS += -Wno-maybe-uninitialized -endif - # Tell gcc to never replace conditional load with a non-conditional one KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) @@ -908,6 +904,9 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) # disable stringop warnings in gcc 8+ KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation) +# Enabled with W=2, disabled by default as noisy +KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized) + # disable invalid "can't wrap" optimizations for signed / pointers KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) diff --git a/init/Kconfig b/init/Kconfig index 13476e15a1d7..fa13dc7a973e 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -16,22 +16,6 @@ config DEFCONFIG_LIST default "$ARCH_DEFCONFIG" default "arch/$ARCH/defconfig" -config CC_HAS_WARN_MAYBE_UNINITIALIZED - def_bool $(cc-option,-Wmaybe-uninitialized) - help - GCC >= 4.7 supports this option. - -config CC_DISABLE_WARN_MAYBE_UNINITIALIZED - bool - depends on CC_HAS_WARN_MAYBE_UNINITIALIZED - default CC_IS_GCC && GCC_VERSION < 40900 # unreliable for GCC < 4.9 - help - GCC's -Wmaybe-uninitialized is not reliable by definition. - Lots of false positive warnings are produced in some cases. - - If this option is enabled, -Wno-maybe-uninitialzed is passed - to the compiler to suppress maybe-uninitialized warnings. - config CONSTRUCTORS bool depends on !UML @@ -1179,14 +1163,12 @@ config CC_OPTIMIZE_FOR_PERFORMANCE config CC_OPTIMIZE_FOR_PERFORMANCE_O3 bool "Optimize more for performance (-O3)" depends on ARC - imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives help Choosing this option will pass "-O3" to your compiler to optimize the kernel yet more for performance. config CC_OPTIMIZE_FOR_SIZE bool "Optimize for size (-Os)" - imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives help Enabling this option will pass "-Os" instead of "-O2" to your compiler resulting in a smaller kernel. diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index a2d799bd3ed7..3ec4922a2655 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -359,7 +359,6 @@ config PROFILE_ANNOTATED_BRANCHES config PROFILE_ALL_BRANCHES bool "Profile all if conditionals" if !FORTIFY_SOURCE select TRACE_BRANCH_PROFILING - imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives help This tracer profiles all branch conditions. Every if () taken in the kernel is recorded whether it hit or miss. From 0d558724a67f18f029169047f97bcf108a04d9f6 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 17 Jun 2020 01:16:39 +0300 Subject: [PATCH 378/452] Kconfig: allow CC_OPTIMIZE_FOR_PERFORMANCE_O3 on all arches Signed-off-by: Denis Efremov <efremov@linux.com> --- init/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/init/Kconfig b/init/Kconfig index fa13dc7a973e..c3c5710a9a47 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1162,7 +1162,6 @@ config CC_OPTIMIZE_FOR_PERFORMANCE config CC_OPTIMIZE_FOR_PERFORMANCE_O3 bool "Optimize more for performance (-O3)" - depends on ARC help Choosing this option will pass "-O3" to your compiler to optimize the kernel yet more for performance. From 923cde607df57f5af9e30fed3e83105ae8cef99a Mon Sep 17 00:00:00 2001 From: Dirk Mueller <dmueller@suse.com> Date: Tue, 14 Jan 2020 18:53:41 +0100 Subject: [PATCH 379/452] scripts/dtc: Remove redundant YYLOC global declaration gcc 10 will default to -fno-common, which causes this error at link time: (.text+0x0): multiple definition of `yylloc'; dtc-lexer.lex.o (symbol from plugin):(.text+0x0): first defined here This is because both dtc-lexer as well as dtc-parser define the same global symbol yyloc. Before with -fcommon those were merged into one defintion. The proper solution would be to to mark this as "extern", however that leads to: dtc-lexer.l:26:16: error: redundant redeclaration of 'yylloc' [-Werror=redundant-decls] 26 | extern YYLTYPE yylloc; | ^~~~~~ In file included from dtc-lexer.l:24: dtc-parser.tab.h:127:16: note: previous declaration of 'yylloc' was here 127 | extern YYLTYPE yylloc; | ^~~~~~ cc1: all warnings being treated as errors which means the declaration is completely redundant and can just be dropped. Signed-off-by: Dirk Mueller <dmueller@suse.com> Signed-off-by: David Gibson <david@gibson.dropbear.id.au> [robh: cherry-pick from upstream] Cc: stable@vger.kernel.org Signed-off-by: Rob Herring <robh@kernel.org> [evdenis: scripts/dtc/dtc-parser.tab.c_shipped fixed] Signed-off-by: Denis Efremov <efremov@linux.com> --- scripts/dtc/dtc-lexer.l | 1 - scripts/dtc/dtc-lexer.lex.c_shipped | 1 - 2 files changed, 2 deletions(-) diff --git a/scripts/dtc/dtc-lexer.l b/scripts/dtc/dtc-lexer.l index c600603044f3..cf7707be43aa 100644 --- a/scripts/dtc/dtc-lexer.l +++ b/scripts/dtc/dtc-lexer.l @@ -38,7 +38,6 @@ LINECOMMENT "//".*\n #include "srcpos.h" #include "dtc-parser.tab.h" -YYLTYPE yylloc; extern bool treesource_error; /* CAUTION: this will stop working if we ever use yyless() or yyunput() */ diff --git a/scripts/dtc/dtc-lexer.lex.c_shipped b/scripts/dtc/dtc-lexer.lex.c_shipped index e0835ad4a848..a2a93b35f961 100644 --- a/scripts/dtc/dtc-lexer.lex.c_shipped +++ b/scripts/dtc/dtc-lexer.lex.c_shipped @@ -646,7 +646,6 @@ char *yytext; #include "srcpos.h" #include "dtc-parser.tab.h" -YYLTYPE yylloc; extern bool treesource_error; /* CAUTION: this will stop working if we ever use yyless() or yyunput() */ From fa6cb0a4d113f430feb79c71471d8f74f22c7756 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 11 Aug 2020 12:12:56 +0300 Subject: [PATCH 380/452] fs/proc: hide magisk mounts for IsolatedService The issue described here: https://darvincitech.wordpress.com/2019/11/04/detecting-magisk-hide/ Signed-off-by: Denis Efremov <efremov@linux.com> --- fs/proc/Kconfig | 4 ++++ fs/proc_namespace.c | 25 +++++++++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig index 5e14d454c865..17e26e132a36 100644 --- a/fs/proc/Kconfig +++ b/fs/proc/Kconfig @@ -99,6 +99,10 @@ config PROC_CHILDREN Say Y if you are running any user-space software which takes benefit from this interface. For example, rkt is such a piece of software. +config PROC_MAGISK_HIDE_MOUNT + bool "Hide magisk mounts for IsolatedService" + default n + config PROC_UID bool "Include /proc/uid/ files" default y diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c index b859aaeecb27..6fec2a071ec3 100644 --- a/fs/proc_namespace.c +++ b/fs/proc_namespace.c @@ -94,6 +94,21 @@ static void show_type(struct seq_file *m, struct super_block *sb) } } +static inline int skip_magisk_entry(const char *devname) +{ +#ifdef CONFIG_PROC_MAGISK_HIDE_MOUNT + if (devname && strstr(devname, "magisk")) { + char name[TASK_COMM_LEN]; + get_task_comm(name, current); + if (strstr(name, "Binder") || + strstr(name, "JavaBridge")) { + return SEQ_SKIP; + } + } +#endif + return 0; +} + static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt) { struct proc_mounts *p = m->private; @@ -107,6 +122,9 @@ static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt) if (err) goto out; } else { + err = skip_magisk_entry(r->mnt_devname); + if (err) + goto out; mangle(m, r->mnt_devname ? r->mnt_devname : "none"); } seq_putc(m, ' '); @@ -179,6 +197,9 @@ static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt) if (err) goto out; } else { + err = skip_magisk_entry(r->mnt_devname); + if (err) + goto out; mangle(m, r->mnt_devname ? r->mnt_devname : "none"); } seq_puts(m, sb_rdonly(sb) ? " ro" : " rw"); @@ -210,6 +231,10 @@ static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt) goto out; } else { if (r->mnt_devname) { + err = skip_magisk_entry(r->mnt_devname); + if (err) + goto out; + seq_puts(m, "device "); mangle(m, r->mnt_devname); } else From 61779c224cfe890da988c8a4cdf0e8b5eabb7e02 Mon Sep 17 00:00:00 2001 From: Linus Torvalds <torvalds@linux-foundation.org> Date: Wed, 1 May 2019 11:05:41 -0700 Subject: [PATCH 381/452] gcc-9: silence 'address-of-packed-member' warning commit 6f303d60534c46aa1a239f29c321f95c83dda748 upstream. We already did this for clang, but now gcc has that warning too. Yes, yes, the address may be unaligned. And that's kind of the point. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index b45176bb77d9..0a0c52b66ca0 100644 --- a/Makefile +++ b/Makefile @@ -700,6 +700,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation) KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) +KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias) ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE @@ -761,7 +762,6 @@ ifeq ($(cc-name),clang) KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,) KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier) KBUILD_CFLAGS += $(call cc-disable-warning, gnu) -KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) KBUILD_CFLAGS += $(call cc-disable-warning, duplicate-decl-specifier) # Quiet clang warning: comparison of unsigned expression < 0 is always false KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare) From 67c14033359d92eb219dc4524e0eda9267c4c6de Mon Sep 17 00:00:00 2001 From: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com> Date: Fri, 8 Feb 2019 23:51:05 +0100 Subject: [PATCH 382/452] Compiler Attributes: add support for __copy (gcc >= 9) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit c0d9782f5b6d7157635ae2fd782a4b27d55a6013 upstream. From the GCC manual: copy copy(function) The copy attribute applies the set of attributes with which function has been declared to the declaration of the function to which the attribute is applied. The attribute is designed for libraries that define aliases or function resolvers that are expected to specify the same set of attributes as their targets. The copy attribute can be used with functions, variables, or types. However, the kind of symbol to which the attribute is applied (either function or variable) must match the kind of symbol to which the argument refers. The copy attribute copies only syntactic and semantic attributes but not attributes that affect a symbol’s linkage or visibility such as alias, visibility, or weak. The deprecated attribute is also not copied. https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html The upcoming GCC 9 release extends the -Wmissing-attributes warnings (enabled by -Wall) to C and aliases: it warns when particular function attributes are missing in the aliases but not in their target, e.g.: void __cold f(void) {} void __alias("f") g(void); diagnoses: warning: 'g' specifies less restrictive attribute than its target 'f': 'cold' [-Wmissing-attributes] Using __copy(f) we can copy the __cold attribute from f to g: void __cold f(void) {} void __copy(f) __alias("f") g(void); This attribute is most useful to deal with situations where an alias is declared but we don't know the exact attributes the target has. For instance, in the kernel, the widely used module_init/exit macros define the init/cleanup_module aliases, but those cannot be marked always as __init/__exit since some modules do not have their functions marked as such. Suggested-by: Martin Sebor <msebor@gcc.gnu.org> Reviewed-by: Nick Desaulniers <ndesaulniers@google.com> Signed-off-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com> Signed-off-by: Stefan Agner <stefan@agner.ch> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> --- include/linux/compiler-gcc.h | 4 ++++ include/linux/compiler_types.h | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 4816355b9875..6d7ead22c1b4 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -343,6 +343,10 @@ #define __designated_init __attribute__((designated_init)) #endif +#if GCC_VERSION >= 90100 +#define __copy(symbol) __attribute__((__copy__(symbol))) +#endif + #endif /* gcc version >= 40000 specific checks */ #if !defined(__noclone) diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index e9ce90615869..a207f820d3b0 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -230,6 +230,10 @@ struct ftrace_likely_data { # define __latent_entropy #endif +#ifndef __copy +# define __copy(symbol) +#endif + #ifndef __randomize_layout # define __randomize_layout __designated_init #endif From 24dad93cad682ceab276c1bad9fa2a5343f20b1e Mon Sep 17 00:00:00 2001 From: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com> Date: Sat, 19 Jan 2019 20:59:34 +0100 Subject: [PATCH 383/452] include/linux/module.h: copy __init/__exit attrs to init/cleanup_module commit a6e60d84989fa0e91db7f236eda40453b0e44afa upstream. The upcoming GCC 9 release extends the -Wmissing-attributes warnings (enabled by -Wall) to C and aliases: it warns when particular function attributes are missing in the aliases but not in their target. In particular, it triggers for all the init/cleanup_module aliases in the kernel (defined by the module_init/exit macros), ending up being very noisy. These aliases point to the __init/__exit functions of a module, which are defined as __cold (among other attributes). However, the aliases themselves do not have the __cold attribute. Since the compiler behaves differently when compiling a __cold function as well as when compiling paths leading to calls to __cold functions, the warning is trying to point out the possibly-forgotten attribute in the alias. In order to keep the warning enabled, we decided to silence this case. Ideally, we would mark the aliases directly as __init/__exit. However, there are currently around 132 modules in the kernel which are missing __init/__exit in their init/cleanup functions (either because they are missing, or for other reasons, e.g. the functions being called from somewhere else); and a section mismatch is a hard error. A conservative alternative was to mark the aliases as __cold only. However, since we would like to eventually enforce __init/__exit to be always marked, we chose to use the new __copy function attribute (introduced by GCC 9 as well to deal with this). With it, we copy the attributes used by the target functions into the aliases. This way, functions that were not marked as __init/__exit won't have their aliases marked either, and therefore there won't be a section mismatch. Note that the warning would go away marking either the extern declaration, the definition, or both. However, we only mark the definition of the alias, since we do not want callers (which only see the declaration) to be compiled as if the function was __cold (and therefore the paths leading to those calls would be assumed to be unlikely). Link: https://lore.kernel.org/lkml/20190123173707.GA16603@gmail.com/ Link: https://lore.kernel.org/lkml/20190206175627.GA20399@gmail.com/ Suggested-by: Martin Sebor <msebor@gcc.gnu.org> Acked-by: Jessica Yu <jeyu@kernel.org> Signed-off-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com> Signed-off-by: Stefan Agner <stefan@agner.ch> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> --- include/linux/module.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/module.h b/include/linux/module.h index 2d01e23c34a9..96c51179d500 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -129,13 +129,13 @@ extern void cleanup_module(void); #define module_init(initfn) \ static inline initcall_t __maybe_unused __inittest(void) \ { return initfn; } \ - int init_module(void) __attribute__((alias(#initfn))); + int init_module(void) __copy(initfn) __attribute__((alias(#initfn))); /* This is only required if you want to be unloadable. */ #define module_exit(exitfn) \ static inline exitcall_t __maybe_unused __exittest(void) \ { return exitfn; } \ - void cleanup_module(void) __attribute__((alias(#exitfn))); + void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn))); #endif From e443ff7188c4dbe91370f0048794530af2b73c3b Mon Sep 17 00:00:00 2001 From: Nathan Chancellor <natechancellor@gmail.com> Date: Tue, 11 Jun 2019 11:43:31 -0700 Subject: [PATCH 384/452] kbuild: Add -Werror=unknown-warning-option to CLANG_FLAGS [ Upstream commit 589834b3a0097a4908f4112eac0ca2feb486fa32 ] In commit ebcc5928c5d9 ("arm64: Silence gcc warnings about arch ABI drift"), the arm64 Makefile added -Wno-psabi to KBUILD_CFLAGS, which is a GCC only option so clang rightfully complains: warning: unknown warning option '-Wno-psabi' [-Wunknown-warning-option] https://clang.llvm.org/docs/DiagnosticsReference.html#wunknown-warning-option However, by default, this is merely a warning so the build happily goes on with a slew of these warnings in the process. Commit c3f0d0bc5b01 ("kbuild, LLVMLinux: Add -Werror to cc-option to support clang") worked around this behavior in cc-option by adding -Werror so that unknown flags cause an error. However, this all happens silently and when an unknown flag is added to the build unconditionally like -Wno-psabi, cc-option will always fail because there is always an unknown flag in the list of flags. This manifested as link time failures in the arm64 libstub because -fno-stack-protector didn't get added to KBUILD_CFLAGS. To avoid these weird cryptic failures in the future, make clang behave like gcc and immediately error when it encounters an unknown flag by adding -Werror=unknown-warning-option to CLANG_FLAGS. This can be added unconditionally for clang because it is supported by at least 3.0.0, according to godbolt [1] and 4.0.0, according to its documentation [2], which is far earlier than we typically support. [1]: https://godbolt.org/z/7F7rm3 [2]: https://releases.llvm.org/4.0.0/tools/clang/docs/DiagnosticsReference.html#wunknown-warning-option Link: https://github.com/ClangBuiltLinux/linux/issues/511 Link: https://github.com/ClangBuiltLinux/linux/issues/517 Suggested-by: Peter Smith <peter.smith@linaro.org> Signed-off-by: Nathan Chancellor <natechancellor@gmail.com> Tested-by: Nick Desaulniers <ndesaulniers@google.com> Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> Signed-off-by: Sasha Levin <sashal@kernel.org> --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 0a0c52b66ca0..c581e9fdd4e8 100644 --- a/Makefile +++ b/Makefile @@ -519,6 +519,7 @@ ifneq ($(GCC_TOOLCHAIN),) CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN) endif CLANG_FLAGS += -no-integrated-as +CLANG_FLAGS += -Werror=unknown-warning-option KBUILD_CFLAGS += $(CLANG_FLAGS) KBUILD_AFLAGS += $(CLANG_FLAGS) export CLANG_FLAGS From 30e9d947bcb62d4ff9f25c4c1e2b9406c85437e7 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada <yamada.masahiro@socionext.com> Date: Mon, 29 Jul 2019 18:15:17 +0900 Subject: [PATCH 385/452] kbuild: initialize CLANG_FLAGS correctly in the top Makefile commit 5241ab4cf42d3a93b933b55d3d53f43049081fa1 upstream. CLANG_FLAGS is initialized by the following line: CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%)) ..., which is run only when CROSS_COMPILE is set. Some build targets (bindeb-pkg etc.) recurse to the top Makefile. When you build the kernel with Clang but without CROSS_COMPILE, the same compiler flags such as -no-integrated-as are accumulated into CLANG_FLAGS. If you run 'make CC=clang' and then 'make CC=clang bindeb-pkg', Kbuild will recompile everything needlessly due to the build command change. Fix this by correctly initializing CLANG_FLAGS. Fixes: 238bcbc4e07f ("kbuild: consolidate Clang compiler flags") Cc: <stable@vger.kernel.org> # v5.0+ Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> Reviewed-by: Nathan Chancellor <natechancellor@gmail.com> Acked-by: Nick Desaulniers <ndesaulniers@google.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index c581e9fdd4e8..c51396defc63 100644 --- a/Makefile +++ b/Makefile @@ -434,6 +434,7 @@ KBUILD_CFLAGS_MODULE := -DMODULE KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds LDFLAGS := GCC_PLUGINS_CFLAGS := +CLANG_FLAGS := export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC export CPP AR NM STRIP OBJCOPY OBJDUMP READELF HOSTLDFLAGS HOST_LOADLIBES @@ -510,7 +511,7 @@ endif ifeq ($(cc-name),clang) ifneq ($(CROSS_COMPILE),) CLANG_TRIPLE ?= $(CROSS_COMPILE) -CLANG_FLAGS := --target=$(notdir $(CLANG_TRIPLE:%-=%)) +CLANG_FLAGS += --target=$(notdir $(CLANG_TRIPLE:%-=%)) GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR) GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..) From 7d6322aeb6e3202e6a4e3dd55be4c74058247537 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada <yamada.masahiro@socionext.com> Date: Fri, 30 Mar 2018 13:15:26 +0900 Subject: [PATCH 386/452] kbuild: use -fmacro-prefix-map to make __FILE__ a relative path [ Upstream commit a73619a845d5625079cc1b3b820f44c899618388 ] The __FILE__ macro is used everywhere in the kernel to locate the file printing the log message, such as WARN_ON(), etc. If the kernel is built out of tree, this can be a long absolute path, like this: WARNING: CPU: 1 PID: 1 at /path/to/build/directory/arch/arm64/kernel/foo.c:... This is because Kbuild runs in the objtree instead of the srctree, then __FILE__ is expanded to a file path prefixed with $(srctree)/. Commit 9da0763bdd82 ("kbuild: Use relative path when building in a subdir of the source tree") improved this to some extent; $(srctree) becomes ".." if the objtree is a child of the srctree. For other cases of out-of-tree build, __FILE__ is still the absolute path. It also means the kernel image depends on where it was built. A brand-new option from GCC, -fmacro-prefix-map, solves this problem. If your compiler supports it, __FILE__ is the relative path from the srctree regardless of O= option. This provides more readable log and more reproducible builds. Please note __FILE__ is always an absolute path for external modules. Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> Signed-off-by: Sasha Levin <sashal@kernel.org> --- Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Makefile b/Makefile index c51396defc63..eb66e9ee2ce2 100644 --- a/Makefile +++ b/Makefile @@ -942,6 +942,9 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types) # Require designated initializers for all marked structures KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init) +# change __FILE__ to the relative path from the srctree +KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) + # use the deterministic mode of AR if available KBUILD_ARFLAGS := $(call ar-option,D) From 9575f6bda3255e1105ea529a800557f36a11806f Mon Sep 17 00:00:00 2001 From: Seth Forshee <seth.forshee@canonical.com> Date: Wed, 17 Jul 2019 11:06:26 -0500 Subject: [PATCH 387/452] kbuild: add -fcf-protection=none when using retpoline flags [ Upstream commit 29be86d7f9cb18df4123f309ac7857570513e8bc ] The gcc -fcf-protection=branch option is not compatible with -mindirect-branch=thunk-extern. The latter is used when CONFIG_RETPOLINE is selected, and this will fail to build with a gcc which has -fcf-protection=branch enabled by default. Adding -fcf-protection=none when building with retpoline enabled prevents such build failures. Signed-off-by: Seth Forshee <seth.forshee@canonical.com> Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> Signed-off-by: Sasha Levin <sashal@kernel.org> --- Makefile | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Makefile b/Makefile index eb66e9ee2ce2..94caddcfe41b 100644 --- a/Makefile +++ b/Makefile @@ -945,6 +945,12 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init) # change __FILE__ to the relative path from the srctree KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) +# ensure -fcf-protection is disabled when using retpoline as it is +# incompatible with -mindirect-branch=thunk-extern +ifdef CONFIG_RETPOLINE +KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) +endif + # use the deterministic mode of AR if available KBUILD_ARFLAGS := $(call ar-option,D) From aa3021c177b1b200ea92d6c2fa31c09609d12804 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada <yamada.masahiro@socionext.com> Date: Thu, 22 Nov 2018 08:11:54 +0900 Subject: [PATCH 388/452] kbuild: fix single target build for external module [ Upstream commit e07db28eea38ed4e332b3a89f3995c86b713cb5b ] Building a single target in an external module fails due to missing .tmp_versions directory. For example, $ make -C /lib/modules/$(uname -r)/build M=$PWD foo.o will fail in the following way: CC [M] /home/masahiro/foo/foo.o /bin/sh: 1: cannot create /home/masahiro/foo/.tmp_versions/foo.mod: Directory nonexistent This is because $(cmd_crmodverdir) is executed only before building /, %/, %.ko single targets of external modules. Create .tmp_versions in the 'prepare' target. Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> Signed-off-by: Sasha Levin <sashal@kernel.org> --- Makefile | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index 94caddcfe41b..dd4f8550f8fe 100644 --- a/Makefile +++ b/Makefile @@ -1668,9 +1668,6 @@ else # KBUILD_EXTMOD # We are always building modules KBUILD_MODULES := 1 -PHONY += crmodverdir -crmodverdir: - $(cmd_crmodverdir) PHONY += $(objtree)/Module.symvers $(objtree)/Module.symvers: @@ -1682,7 +1679,7 @@ $(objtree)/Module.symvers: module-dirs := $(addprefix _module_,$(KBUILD_EXTMOD)) PHONY += $(module-dirs) modules -$(module-dirs): crmodverdir $(objtree)/Module.symvers +$(module-dirs): prepare $(objtree)/Module.symvers $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@) modules: $(module-dirs) @@ -1723,7 +1720,8 @@ help: # Dummies... PHONY += prepare scripts -prepare: ; +prepare: + $(cmd_crmodverdir) scripts: ; endif # KBUILD_EXTMOD @@ -1849,17 +1847,14 @@ endif # Modules /: prepare scripts FORCE - $(cmd_crmodverdir) $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ $(build)=$(build-dir) # Make sure the latest headers are built for Documentation Documentation/ samples/: headers_install %/: prepare scripts FORCE - $(cmd_crmodverdir) $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ $(build)=$(build-dir) %.ko: prepare scripts FORCE - $(cmd_crmodverdir) $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ $(build)=$(build-dir) $(@:.ko=.o) $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost From 56b1297285b0d8fe0a0aaa9671abd3b90b21cde7 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada <yamada.masahiro@socionext.com> Date: Tue, 15 Jan 2019 16:19:00 +0900 Subject: [PATCH 389/452] kbuild: mark prepare0 as PHONY to fix external module build [ Upstream commit e00d8880481497474792d28c14479a9fb6752046 ] Commit c3ff2a5193fa ("powerpc/32: add stack protector support") caused kernel panic on PowerPC when an external module is used with CONFIG_STACKPROTECTOR because the 'prepare' target was not executed for the external module build. Commit e07db28eea38 ("kbuild: fix single target build for external module") turned it into a build error because the 'prepare' target is now executed but the 'prepare0' target is missing for the external module build. External module on arm/arm64 with CONFIG_STACKPROTECTOR_PER_TASK is also broken in the same way. Move 'PHONY += prepare0' to the common place. GNU Make is fine with missing rule for phony targets. I also removed the comment which is wrong irrespective of this commit. I minimize the change so it can be easily backported to 4.20.x To fix v4.20, please backport e07db28eea38 ("kbuild: fix single target build for external module"), and then this commit. Link: https://bugzilla.kernel.org/show_bug.cgi?id=201891 Fixes: e07db28eea38 ("kbuild: fix single target build for external module") Fixes: c3ff2a5193fa ("powerpc/32: add stack protector support") Fixes: 189af4657186 ("ARM: smp: add support for per-task stack canaries") Fixes: 0a1213fa7432 ("arm64: enable per-task stack canaries") Cc: linux-stable <stable@vger.kernel.org> # v4.20 Reported-by: Samuel Holland <samuel@sholland.org> Reported-by: Alexey Kardashevskiy <aik@ozlabs.ru> Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Tested-by: Alexey Kardashevskiy <aik@ozlabs.ru> Signed-off-by: Sasha Levin <sashal@kernel.org> --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index dd4f8550f8fe..93adda2a63d5 100644 --- a/Makefile +++ b/Makefile @@ -1088,6 +1088,7 @@ ifdef CONFIG_STACK_VALIDATION endif endif +PHONY += prepare0 ifeq ($(KBUILD_EXTMOD),) core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ @@ -1182,8 +1183,7 @@ include/config/kernel.release: include/config/auto.conf FORCE # archprepare is used in arch Makefiles and when processed asm symlink, # version.h and scripts_basic is processed / created. -# Listed in dependency order -PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3 +PHONY += prepare archprepare prepare1 prepare2 prepare3 # prepare3 is used to check if we are building in a separate output directory, # and if so do: From 4308aae08ac5196a2d19e714272a55b946cf201b Mon Sep 17 00:00:00 2001 From: Linus Torvalds <torvalds@linux-foundation.org> Date: Sat, 9 May 2020 14:30:29 -0700 Subject: [PATCH 390/452] gcc-10: disable 'zero-length-bounds' warning for now commit 5c45de21a2223fe46cf9488c99a7fbcf01527670 upstream. This is a fine warning, but we still have a number of zero-length arrays in the kernel that come from the traditional gcc extension. Yes, they are getting converted to flexible arrays, but in the meantime the gcc-10 warning about zero-length bounds is very verbose, and is hiding other issues. I missed one actual build failure because it was hidden among hundreds of lines of warning. Thankfully I caught it on the second go before pushing things out, but it convinced me that I really need to disable the new warnings for now. We'll hopefully be all done with our conversion to flexible arrays in the not too distant future, and we can then re-enable this warning. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> --- Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Makefile b/Makefile index 93adda2a63d5..72148229fa17 100644 --- a/Makefile +++ b/Makefile @@ -906,6 +906,9 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) # disable stringop warnings in gcc 8+ KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation) +# We'll want to enable this eventually, but it's not going away for 5.7 at least +KBUILD_CFLAGS += $(call cc-disable-warning, zero-length-bounds) + # Enabled with W=2, disabled by default as noisy KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized) From 68341bc0d4dae965a734cd9437de36ded3359a3b Mon Sep 17 00:00:00 2001 From: Linus Torvalds <torvalds@linux-foundation.org> Date: Sat, 9 May 2020 14:52:44 -0700 Subject: [PATCH 391/452] gcc-10: disable 'array-bounds' warning for now commit 44720996e2d79e47d508b0abe99b931a726a3197 upstream. This is another fine warning, related to the 'zero-length-bounds' one, but hitting the same historical code in the kernel. Because C didn't historically support flexible array members, we have code that instead uses a one-sized array, the same way we have cases of zero-sized arrays. The one-sized arrays come from either not wanting to use the gcc zero-sized array extension, or from a slight convenience-feature, where particularly for strings, the size of the structure now includes the allocation for the final NUL character. So with a "char name[1];" at the end of a structure, you can do things like v = my_malloc(sizeof(struct vendor) + strlen(name)); and avoid the "+1" for the terminator. Yes, the modern way to do that is with a flexible array, and using 'offsetof()' instead of 'sizeof()', and adding the "+1" by hand. That also technically gets the size "more correct" in that it avoids any alignment (and thus padding) issues, but this is another long-term cleanup thing that will not happen for 5.7. So disable the warning for now, even though it's potentially quite useful. Having a slew of warnings that then hide more urgent new issues is not an improvement. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 72148229fa17..f18bbc947921 100644 --- a/Makefile +++ b/Makefile @@ -908,6 +908,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation) # We'll want to enable this eventually, but it's not going away for 5.7 at least KBUILD_CFLAGS += $(call cc-disable-warning, zero-length-bounds) +KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds) # Enabled with W=2, disabled by default as noisy KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized) From 5f0736de233ec728e3b61a6b7d62a2342147427e Mon Sep 17 00:00:00 2001 From: Linus Torvalds <torvalds@linux-foundation.org> Date: Sat, 9 May 2020 15:40:52 -0700 Subject: [PATCH 392/452] gcc-10: disable 'stringop-overflow' warning for now commit 5a76021c2eff7fcf2f0918a08fd8a37ce7922921 upstream. This is the final array bounds warning removal for gcc-10 for now. Again, the warning is good, and we should re-enable all these warnings when we have converted all the legacy array declaration cases to flexible arrays. But in the meantime, it's just noise. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index f18bbc947921..51bb24ef3599 100644 --- a/Makefile +++ b/Makefile @@ -909,6 +909,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation) # We'll want to enable this eventually, but it's not going away for 5.7 at least KBUILD_CFLAGS += $(call cc-disable-warning, zero-length-bounds) KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds) +KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow) # Enabled with W=2, disabled by default as noisy KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized) From c3bbf6be405b0c4ebf383bbccbeaeb900eac68e3 Mon Sep 17 00:00:00 2001 From: Linus Torvalds <torvalds@linux-foundation.org> Date: Sat, 9 May 2020 15:45:21 -0700 Subject: [PATCH 393/452] gcc-10: disable 'restrict' warning for now commit adc71920969870dfa54e8f40dac8616284832d02 upstream. gcc-10 now warns about passing aliasing pointers to functions that take restricted pointers. That's actually a great warning, and if we ever start using 'restrict' in the kernel, it might be quite useful. But right now we don't, and it turns out that the only thing this warns about is an idiom where we have declared a few functions to be "printf-like" (which seems to make gcc pick up the restricted pointer thing), and then we print to the same buffer that we also use as an input. And people do that as an odd concatenation pattern, with code like this: #define sysfs_show_gen_prop(buffer, fmt, ...) \ snprintf(buffer, PAGE_SIZE, "%s"fmt, buffer, __VA_ARGS__) where we have 'buffer' as both the destination of the final result, and as the initial argument. Yes, it's a bit questionable. And outside of the kernel, people do have standard declarations like int snprintf( char *restrict buffer, size_t bufsz, const char *restrict format, ... ); where that output buffer is marked as a restrict pointer that cannot alias with any other arguments. But in the context of the kernel, that 'use snprintf() to concatenate to the end result' does work, and the pattern shows up in multiple places. And we have not marked our own version of snprintf() as taking restrict pointers, so the warning is incorrect for now, and gcc picks it up on its own. If we do start using 'restrict' in the kernel (and it might be a good idea if people find places where it matters), we'll need to figure out how to avoid this issue for snprintf and friends. But in the meantime, this warning is not useful. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> --- Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Makefile b/Makefile index 51bb24ef3599..a817a4e2de53 100644 --- a/Makefile +++ b/Makefile @@ -911,6 +911,9 @@ KBUILD_CFLAGS += $(call cc-disable-warning, zero-length-bounds) KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds) KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow) +# Another good warning that we'll want to enable eventually +KBUILD_CFLAGS += $(call cc-disable-warning, restrict) + # Enabled with W=2, disabled by default as noisy KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized) From 3149fc2404e1b4b85401bd7b7fadcb33e4e67c08 Mon Sep 17 00:00:00 2001 From: Sergei Trofimovich <slyfox@gentoo.org> Date: Tue, 17 Mar 2020 00:07:18 +0000 Subject: [PATCH 394/452] Makefile: disallow data races on gcc-10 as well commit b1112139a103b4b1101d0d2d72931f2d33d8c978 upstream. gcc-10 will rename --param=allow-store-data-races=0 to -fno-allow-store-data-races. The flag change happened at https://gcc.gnu.org/PR92046. Signed-off-by: Sergei Trofimovich <slyfox@gentoo.org> Acked-by: Jiri Kosina <jkosina@suse.cz> Signed-off-by: Masahiro Yamada <masahiroy@kernel.org> Cc: Thomas Backlund <tmb@mageia.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index a817a4e2de53..3ba2aaaa19e8 100644 --- a/Makefile +++ b/Makefile @@ -715,6 +715,7 @@ endif # Tell gcc to never replace conditional load with a non-conditional one KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) +KBUILD_CFLAGS += $(call cc-option,-fno-allow-store-data-races) # check for 'asm goto' ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) From a932a66889cbb08b9f8e290336f878850bb427e5 Mon Sep 17 00:00:00 2001 From: Vasily Averin <vvs@virtuozzo.com> Date: Fri, 10 Apr 2020 14:34:10 -0700 Subject: [PATCH 395/452] kernel/gcov/fs.c: gcov_seq_next() should increase position index [ Upstream commit f4d74ef6220c1eda0875da30457bef5c7111ab06 ] If seq_file .next function does not change position index, read after some lseek can generate unexpected output. https://bugzilla.kernel.org/show_bug.cgi?id=206283 Signed-off-by: Vasily Averin <vvs@virtuozzo.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Peter Oberparleiter <oberpar@linux.ibm.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Ingo Molnar <mingo@redhat.com> Cc: Manfred Spraul <manfred@colorfullife.com> Cc: NeilBrown <neilb@suse.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Waiman Long <longman@redhat.com> Link: http://lkml.kernel.org/r/f65c6ee7-bd00-f910-2f8a-37cc67e4ff88@virtuozzo.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Sasha Levin <sashal@kernel.org> --- kernel/gcov/fs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c index 6e40ff6be083..291e0797125b 100644 --- a/kernel/gcov/fs.c +++ b/kernel/gcov/fs.c @@ -109,9 +109,9 @@ static void *gcov_seq_next(struct seq_file *seq, void *data, loff_t *pos) { struct gcov_iterator *iter = data; + (*pos)++; if (gcov_iter_next(iter)) return NULL; - (*pos)++; return iter; } From ef0be809e5cadf4565d134bf80ad0708caf66580 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky <leonro@nvidia.com> Date: Fri, 4 Sep 2020 18:58:08 +0300 Subject: [PATCH 396/452] gcov: Disable gcov build with GCC 10 [ Upstream commit cfc905f158eaa099d6258031614d11869e7ef71c ] GCOV built with GCC 10 doesn't initialize n_function variable. This produces different kernel panics as was seen by Colin in Ubuntu and me in FC 32. As a workaround, let's disable GCOV build for broken GCC 10 version. Link: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1891288 Link: https://lore.kernel.org/lkml/20200827133932.3338519-1-leon@kernel.org Link: https://lore.kernel.org/lkml/CAHk-=whbijeSdSvx-Xcr0DPMj0BiwhJ+uiNnDSVZcr_h_kg7UA@mail.gmail.com/ Cc: Colin Ian King <colin.king@canonical.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Sasha Levin <sashal@kernel.org> --- kernel/gcov/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig index 1276aabaab55..1d78ed19a351 100644 --- a/kernel/gcov/Kconfig +++ b/kernel/gcov/Kconfig @@ -3,6 +3,7 @@ menu "GCOV-based kernel profiling" config GCOV_KERNEL bool "Enable gcov-based kernel profiling" depends on DEBUG_FS + depends on !CC_IS_GCC || GCC_VERSION < 100000 select CONSTRUCTORS if !UML default n ---help--- From 89cd0094d122c9bca95d6ff9231f07ebc909819f Mon Sep 17 00:00:00 2001 From: Peter Oberparleiter <oberpar@linux.ibm.com> Date: Thu, 10 Sep 2020 14:52:01 +0200 Subject: [PATCH 397/452] gcov: add support for GCC 10.1 [ Upstream commit 40249c6962075c040fd071339acae524f18bfac9 ] Using gcov to collect coverage data for kernels compiled with GCC 10.1 causes random malfunctions and kernel crashes. This is the result of a changed GCOV_COUNTERS value in GCC 10.1 that causes a mismatch between the layout of the gcov_info structure created by GCC profiling code and the related structure used by the kernel. Fix this by updating the in-kernel GCOV_COUNTERS value. Also re-enable config GCOV_KERNEL for use with GCC 10. Reported-by: Colin Ian King <colin.king@canonical.com> Reported-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com> Tested-by: Leon Romanovsky <leonro@nvidia.com> Tested-and-Acked-by: Colin Ian King <colin.king@canonical.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Sasha Levin <sashal@kernel.org> --- kernel/gcov/Kconfig | 1 - kernel/gcov/gcc_4_7.c | 4 +++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig index 1d78ed19a351..1276aabaab55 100644 --- a/kernel/gcov/Kconfig +++ b/kernel/gcov/Kconfig @@ -3,7 +3,6 @@ menu "GCOV-based kernel profiling" config GCOV_KERNEL bool "Enable gcov-based kernel profiling" depends on DEBUG_FS - depends on !CC_IS_GCC || GCC_VERSION < 100000 select CONSTRUCTORS if !UML default n ---help--- diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c index ca5e5c0ef853..5b9e76117ded 100644 --- a/kernel/gcov/gcc_4_7.c +++ b/kernel/gcov/gcc_4_7.c @@ -19,7 +19,9 @@ #include <linux/vmalloc.h> #include "gcov.h" -#if (__GNUC__ >= 7) +#if (__GNUC__ >= 10) +#define GCOV_COUNTERS 8 +#elif (__GNUC__ >= 7) #define GCOV_COUNTERS 9 #elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) #define GCOV_COUNTERS 10 From 4ea8261675d5ca630bbeda529099977b4b67548b Mon Sep 17 00:00:00 2001 From: Masahiro Yamada <masahiroy@kernel.org> Date: Sun, 31 May 2020 17:47:06 +0900 Subject: [PATCH 398/452] kbuild: force to build vmlinux if CONFIG_MODVERSION=y commit 4b50c8c4eaf06a825d1c005c0b1b4a8307087b83 upstream. This code does not work as stated in the comment. $(CONFIG_MODVERSIONS) is always empty because it is expanded before include/config/auto.conf is included. Hence, 'make modules' with CONFIG_MODVERSION=y cannot record the version CRCs. This has been broken since 2003, commit ("kbuild: Enable modules to be build using the "make dir/" syntax"). [1] [1]: https://git.kernel.org/pub/scm/linux/kernel/git/history/history.git/commit/?id=15c6240cdc44bbeef3c4797ec860f9765ef4f1a7 Cc: linux-stable <stable@vger.kernel.org> # v2.5.71+ Signed-off-by: Masahiro Yamada <masahiroy@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> --- Makefile | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 3ba2aaaa19e8..10ce0697fb9c 100644 --- a/Makefile +++ b/Makefile @@ -573,12 +573,8 @@ KBUILD_MODULES := KBUILD_BUILTIN := 1 # If we have only "make modules", don't compile built-in objects. -# When we're building modules with modversions, we need to consider -# the built-in objects during the descend as well, in order to -# make sure the checksums are up to date before we record them. - ifeq ($(MAKECMDGOALS),modules) - KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1) + KBUILD_BUILTIN := endif # If we have "make <whatever> modules", compile modules @@ -1391,6 +1387,13 @@ ifdef CONFIG_MODULES all: modules +# When we're building modules with modversions, we need to consider +# the built-in objects during the descend as well, in order to +# make sure the checksums are up to date before we record them. +ifdef CONFIG_MODVERSIONS + KBUILD_BUILTIN := 1 +endif + # Build modules # # A module can be listed more than once in obj-m resulting in From 373c33af23ab3c3214aac327cd0a330d6f1b363a Mon Sep 17 00:00:00 2001 From: Fangrui Song <maskray@google.com> Date: Tue, 21 Jul 2020 10:31:23 -0700 Subject: [PATCH 399/452] Makefile: Fix GCC_TOOLCHAIN_DIR prefix for Clang cross compilation commit ca9b31f6bb9c6aa9b4e5f0792f39a97bbffb8c51 upstream. When CROSS_COMPILE is set (e.g. aarch64-linux-gnu-), if $(CROSS_COMPILE)elfedit is found at /usr/bin/aarch64-linux-gnu-elfedit, GCC_TOOLCHAIN_DIR will be set to /usr/bin/. --prefix= will be set to /usr/bin/ and Clang as of 11 will search for both $(prefix)aarch64-linux-gnu-$needle and $(prefix)$needle. GCC searchs for $(prefix)aarch64-linux-gnu/$version/$needle, $(prefix)aarch64-linux-gnu/$needle and $(prefix)$needle. In practice, $(prefix)aarch64-linux-gnu/$needle rarely contains executables. To better model how GCC's -B/--prefix takes in effect in practice, newer Clang (since https://github.com/llvm/llvm-project/commit/3452a0d8c17f7166f479706b293caf6ac76ffd90) only searches for $(prefix)$needle. Currently it will find /usr/bin/as instead of /usr/bin/aarch64-linux-gnu-as. Set --prefix= to $(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE)) (/usr/bin/aarch64-linux-gnu-) so that newer Clang can find the appropriate cross compiling GNU as (when -no-integrated-as is in effect). Cc: stable@vger.kernel.org Reported-by: Nathan Chancellor <natechancellor@gmail.com> Signed-off-by: Fangrui Song <maskray@google.com> Reviewed-by: Nathan Chancellor <natechancellor@gmail.com> Tested-by: Nathan Chancellor <natechancellor@gmail.com> Tested-by: Nick Desaulniers <ndesaulniers@google.com> Link: https://github.com/ClangBuiltLinux/linux/issues/1099 Reviewed-by: Nick Desaulniers <ndesaulniers@google.com> Signed-off-by: Masahiro Yamada <masahiroy@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 10ce0697fb9c..b19ec32d5933 100644 --- a/Makefile +++ b/Makefile @@ -513,7 +513,7 @@ ifneq ($(CROSS_COMPILE),) CLANG_TRIPLE ?= $(CROSS_COMPILE) CLANG_FLAGS += --target=$(notdir $(CLANG_TRIPLE:%-=%)) GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) -CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR) +CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE)) GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..) endif ifneq ($(GCC_TOOLCHAIN),) From 89ee73db836c3af83152b9bdebccbfadc1f9ad8a Mon Sep 17 00:00:00 2001 From: Nathan Chancellor <natechancellor@gmail.com> Date: Thu, 27 Jun 2019 12:14:48 -0700 Subject: [PATCH 400/452] kbuild: Add ability to test Clang's integrated assembler There are some people interested in experimenting with Clang's integrated assembler. To make it easy to do so without source modification, allow the user to specify 'AS=clang' as part of the make command to avoid adding '-no-integrated-as' to the {A,C}FLAGS. Link: https://github.com/ClangBuiltLinux/linux/issues/577 Suggested-by: Dmitry Golovin <dima@golovin.in> Signed-off-by: Nathan Chancellor <natechancellor@gmail.com> Reviewed-by: Nick Desaulniers <ndesaulniers@google.com> Tested-by: Nick Desaulniers <ndesaulniers@google.com> Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> --- Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Makefile b/Makefile index b19ec32d5933..1110b035853e 100644 --- a/Makefile +++ b/Makefile @@ -519,7 +519,9 @@ endif ifneq ($(GCC_TOOLCHAIN),) CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN) endif +ifeq ($(shell $(AS) --version 2>&1 | head -n 1 | grep clang),) CLANG_FLAGS += -no-integrated-as +endif CLANG_FLAGS += -Werror=unknown-warning-option KBUILD_CFLAGS += $(CLANG_FLAGS) KBUILD_AFLAGS += $(CLANG_FLAGS) From a63030f5ed2c295cfa1d3eada6e11fa8457bbb08 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor <natechancellor@gmail.com> Date: Thu, 9 May 2019 04:48:25 -0700 Subject: [PATCH 401/452] kbuild: Don't try to add '-fcatch-undefined-behavior' flag This is no longer a valid option in clang, it was removed in 3.5, which we don't support. https://github.com/llvm/llvm-project/commit/cb3f812b6b9fab8f3b41414f24e90222170417b4 Signed-off-by: Nathan Chancellor <natechancellor@gmail.com> Reviewed-by: Nick Desaulniers <ndesaulniers@google.com> Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> --- Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/Makefile b/Makefile index 1110b035853e..a07fbe5dc830 100644 --- a/Makefile +++ b/Makefile @@ -770,7 +770,6 @@ KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare) # source of a reference will be _MergedGlobals and not on of the whitelisted names. # See modpost pattern 2 KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,) -KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior) else # These warnings generated too much noise in a regular build. From e5a3004375f64c662bc9283b48e6f99417d06c14 Mon Sep 17 00:00:00 2001 From: Laura Abbott <labbott@redhat.com> Date: Thu, 12 Apr 2018 14:21:54 -0500 Subject: [PATCH 402/452] objtool: Support HOSTCFLAGS and HOSTLDFLAGS It may be useful to compile host programs with different flags (e.g. hardening). Ensure that objtool picks up the appropriate flags. Signed-off-by: Laura Abbott <labbott@redhat.com> Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Masahiro Yamada <yamada.masahiro@socionext.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kbuild@vger.kernel.org Link: http://lkml.kernel.org/r/05a360681176f1423cb2fde8faae3a0a0261afc5.1523560825.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org> --- tools/objtool/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index 8ae824dbfca3..f76d9914686a 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -31,8 +31,8 @@ INCLUDES := -I$(srctree)/tools/include \ -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \ -I$(srctree)/tools/objtool/arch/$(ARCH)/include WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed -CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES) -LDFLAGS += -lelf $(LIBSUBCMD) +CFLAGS += -Werror $(WARNINGS) $(HOSTCFLAGS) -g $(INCLUDES) +LDFLAGS += -lelf $(LIBSUBCMD) $(HOSTLDFLAGS) # Allow old libelf to be used: elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr) From 1e7177c2d8cf902a893230a99d66f08d0005bdc4 Mon Sep 17 00:00:00 2001 From: Laura Abbott <labbott@redhat.com> Date: Mon, 9 Jul 2018 17:45:56 -0700 Subject: [PATCH 403/452] tools: build: Fixup host c flags Commit 0c3b7e42616f ("tools build: Add support for host programs format") introduced host_c_flags which referenced CHOSTFLAGS. The actual name of the variable is HOSTCFLAGS. Fix this up. Fixes: 0c3b7e42616f ("tools build: Add support for host programs format") Signed-off-by: Laura Abbott <labbott@redhat.com> Acked-by: Jiri Olsa <jolsa@kernel.org> Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> --- tools/build/Build.include | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build/Build.include b/tools/build/Build.include index d9048f145f97..950c1504ca37 100644 --- a/tools/build/Build.include +++ b/tools/build/Build.include @@ -98,4 +98,4 @@ cxx_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CXXFLAGS) -D"BUILD_STR(s)=\#s" $(CXX ### ## HOSTCC C flags -host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CHOSTFLAGS) -D"BUILD_STR(s)=\#s" $(CHOSTFLAGS_$(basetarget).o) $(CHOSTFLAGS_$(obj)) +host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(HOSTCFLAGS) -D"BUILD_STR(s)=\#s" $(HOSTCFLAGS_$(basetarget).o) $(HOSTCFLAGS_$(obj)) From f4e05f12b4ff552d973fb7a3bea2dca3869d4ff0 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada <masahiroy@kernel.org> Date: Thu, 26 Mar 2020 14:57:18 +0900 Subject: [PATCH 404/452] kbuild: remove AS variable As commit 5ef872636ca7 ("kbuild: get rid of misleading $(AS) from documents") noted, we rarely use $(AS) directly in the kernel build. Now that the only/last user of $(AS) in drivers/net/wan/Makefile was converted to $(CC), $(AS) is no longer used in the build process. You can still pass in AS=clang, which is just a switch to turn on the LLVM integrated assembler. Signed-off-by: Masahiro Yamada <masahiroy@kernel.org> Reviewed-by: Nick Desaulniers <ndesaulniers@google.com> Tested-by: Nick Desaulniers <ndesaulniers@google.com> Reviewed-by: Nathan Chancellor <natechancellor@gmail.com> --- Makefile | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index a07fbe5dc830..039812fbb763 100644 --- a/Makefile +++ b/Makefile @@ -372,7 +372,6 @@ HOSTLDFLAGS := $(HOST_LFS_LDFLAGS) HOST_LOADLIBES := $(HOST_LFS_LIBS) # Make variables (CC, etc...) -AS = $(CROSS_COMPILE)as LD = $(CROSS_COMPILE)ld LDGOLD = $(CROSS_COMPILE)ld.gold CC = $(CROSS_COMPILE)gcc @@ -436,7 +435,7 @@ LDFLAGS := GCC_PLUGINS_CFLAGS := CLANG_FLAGS := -export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC +export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE LD CC export CPP AR NM STRIP OBJCOPY OBJDUMP READELF HOSTLDFLAGS HOST_LOADLIBES export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS @@ -519,7 +518,7 @@ endif ifneq ($(GCC_TOOLCHAIN),) CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN) endif -ifeq ($(shell $(AS) --version 2>&1 | head -n 1 | grep clang),) +ifeq ($(if $(AS),$(shell $(AS) --version 2>&1 | head -n 1 | grep clang)),) CLANG_FLAGS += -no-integrated-as endif CLANG_FLAGS += -Werror=unknown-warning-option From 9965631c3ce02f7f8fff69bb4716261895498c96 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada <masahiroy@kernel.org> Date: Wed, 8 Apr 2020 10:36:22 +0900 Subject: [PATCH 405/452] kbuild: replace AS=clang with LLVM_IAS=1 The 'AS' variable is unused for building the kernel. Only the remaining usage is to turn on the integrated assembler. A boolean flag is a better fit for this purpose. AS=clang was added for experts. So, I replaced it with LLVM_IAS=1, breaking the backward compatibility. Suggested-by: Nick Desaulniers <ndesaulniers@google.com> Signed-off-by: Masahiro Yamada <masahiroy@kernel.org> Reviewed-by: Nathan Chancellor <natechancellor@gmail.com> Reviewed-by: Nick Desaulniers <ndesaulniers@google.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 039812fbb763..823f489131b8 100644 --- a/Makefile +++ b/Makefile @@ -518,7 +518,7 @@ endif ifneq ($(GCC_TOOLCHAIN),) CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN) endif -ifeq ($(if $(AS),$(shell $(AS) --version 2>&1 | head -n 1 | grep clang)),) +ifneq ($(LLVM_IAS),1) CLANG_FLAGS += -no-integrated-as endif CLANG_FLAGS += -Werror=unknown-warning-option From dea50ddc011936f38e03dbafaebfcf8ec48f5c32 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor <natechancellor@gmail.com> Date: Thu, 16 May 2019 12:49:42 -0500 Subject: [PATCH 406/452] objtool: Allow AR to be overridden with HOSTAR Currently, this Makefile hardcodes GNU ar, meaning that if it is not available, there is no way to supply a different one and the build will fail. $ make AR=llvm-ar CC=clang LD=ld.lld HOSTAR=llvm-ar HOSTCC=clang \ HOSTLD=ld.lld HOSTLDFLAGS=-fuse-ld=lld defconfig modules_prepare ... AR /out/tools/objtool/libsubcmd.a /bin/sh: 1: ar: not found ... Follow the logic of HOST{CC,LD} and allow the user to specify a different ar tool via HOSTAR (which is used elsewhere in other tools/ Makefiles). Signed-off-by: Nathan Chancellor <natechancellor@gmail.com> Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com> Reviewed-by: Nick Desaulniers <ndesaulniers@google.com> Reviewed-by: Mukesh Ojha <mojha@codeaurora.org> Cc: <stable@vger.kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/80822a9353926c38fd7a152991c6292491a9d0e8.1558028966.git.jpoimboe@redhat.com Link: https://github.com/ClangBuiltLinux/linux/issues/481 Signed-off-by: Ingo Molnar <mingo@kernel.org> --- tools/objtool/Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index f76d9914686a..29c8c358451a 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -7,11 +7,12 @@ ARCH := x86 endif # always use the host compiler +HOSTAR ?= ar HOSTCC ?= gcc HOSTLD ?= ld +AR = $(HOSTAR) CC = $(HOSTCC) LD = $(HOSTLD) -AR = ar ifeq ($(srctree),) srctree := $(patsubst %/,%,$(dir $(CURDIR))) From 913ebb708942d22ca421d459a44956dd16ad9a5b Mon Sep 17 00:00:00 2001 From: Masahiro Yamada <masahiroy@kernel.org> Date: Wed, 8 Apr 2020 10:36:23 +0900 Subject: [PATCH 407/452] kbuild: support LLVM=1 to switch the default tools to Clang/LLVM As Documentation/kbuild/llvm.rst implies, building the kernel with a full set of LLVM tools gets very verbose and unwieldy. Provide a single switch LLVM=1 to use Clang and LLVM tools instead of GCC and Binutils. You can pass it from the command line or as an environment variable. Please note LLVM=1 does not turn on the integrated assembler. You need to pass LLVM_IAS=1 to use it. When the upstream kernel is ready for the integrated assembler, I think we can make it default. We discussed what we need, and we agreed to go with a simple boolean flag that switches both target and host tools: https://lkml.org/lkml/2020/3/28/494 https://lkml.org/lkml/2020/4/3/43 Some items discussed, but not adopted: - LLVM_DIR When multiple versions of LLVM are installed, I just thought supporting LLVM_DIR=/path/to/my/llvm/bin/ might be useful. CC = $(LLVM_DIR)clang LD = $(LLVM_DIR)ld.lld ... However, we can handle this by modifying PATH. So, we decided to not do this. - LLVM_SUFFIX Some distributions (e.g. Debian) package specific versions of LLVM with naming conventions that use the version as a suffix. CC = clang$(LLVM_SUFFIX) LD = ld.lld(LLVM_SUFFIX) ... will allow a user to pass LLVM_SUFFIX=-11 to use clang-11 etc., but the suffixed versions in /usr/bin/ are symlinks to binaries in /usr/lib/llvm-#/bin/, so this can also be handled by PATH. Signed-off-by: Masahiro Yamada <masahiroy@kernel.org> Reviewed-by: Nathan Chancellor <natechancellor@gmail.com> Tested-by: Nathan Chancellor <natechancellor@gmail.com> # build Tested-by: Nick Desaulniers <ndesaulniers@google.com> Reviewed-by: Nick Desaulniers <ndesaulniers@google.com> --- Makefile | 29 ++++++++++++++++++++++++----- tools/objtool/Makefile | 6 ++++++ 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 823f489131b8..d43f80de1f34 100644 --- a/Makefile +++ b/Makefile @@ -363,8 +363,13 @@ HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null) HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null) HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null) -HOSTCC = gcc -HOSTCXX = g++ +ifneq ($(LLVM),) +HOSTCC = clang +HOSTCXX = clang++ +else +HOSTCC = gcc +HOSTCXX = g++ +endif HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 \ -fomit-frame-pointer -std=gnu89 $(HOST_LFS_CFLAGS) HOSTCXXFLAGS := -O2 $(HOST_LFS_CFLAGS) @@ -372,16 +377,30 @@ HOSTLDFLAGS := $(HOST_LFS_LDFLAGS) HOST_LOADLIBES := $(HOST_LFS_LIBS) # Make variables (CC, etc...) +CPP = $(CC) -E +ifneq ($(LLVM),) +CC = clang +LD = ld.lld +LDGOLD = ld.gold +AR = llvm-ar +NM = llvm-nm +OBJCOPY = llvm-objcopy +OBJDUMP = llvm-objdump +READELF = llvm-readelf +OBJSIZE = llvm-size +STRIP = llvm-strip +else +CC = $(CROSS_COMPILE)gcc LD = $(CROSS_COMPILE)ld LDGOLD = $(CROSS_COMPILE)ld.gold -CC = $(CROSS_COMPILE)gcc -CPP = $(CC) -E AR = $(CROSS_COMPILE)ar NM = $(CROSS_COMPILE)nm -STRIP = $(CROSS_COMPILE)strip OBJCOPY = $(CROSS_COMPILE)objcopy OBJDUMP = $(CROSS_COMPILE)objdump READELF = $(CROSS_COMPILE)readelf +OBJSIZE = $(CROSS_COMPILE)size +STRIP = $(CROSS_COMPILE)strip +endif AWK = awk GENKSYMS = scripts/genksyms/genksyms INSTALLKERNEL := installkernel diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index 29c8c358451a..0b9c598a8eb8 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -7,9 +7,15 @@ ARCH := x86 endif # always use the host compiler +ifneq ($(LLVM),) +HOSTAR ?= llvm-ar +HOSTCC ?= clang +HOSTLD ?= ld.lld +else HOSTAR ?= ar HOSTCC ?= gcc HOSTLD ?= ld +endif AR = $(HOSTAR) CC = $(HOSTCC) LD = $(HOSTLD) From 6b4020632ab1032cff0243b6bac4ccfe24271c29 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers <ndesaulniers@google.com> Date: Tue, 25 Aug 2020 07:00:00 -0700 Subject: [PATCH 408/452] lib/string.c: implement stpcpy LLVM implemented a recent "libcall optimization" that lowers calls to `sprintf(dest, "%s", str)` where the return value is used to `stpcpy(dest, str) - dest`. This generally avoids the machinery involved in parsing format strings. `stpcpy` is just like `strcpy` except it returns the pointer to the new tail of `dest`. This optimization was introduced into clang-12. Implement this so that we don't observe linkage failures due to missing symbol definitions for `stpcpy`. Similar to last year's fire drill with: commit 5f074f3e192f ("lib/string.c: implement a basic bcmp") The kernel is somewhere between a "freestanding" environment (no full libc) and "hosted" environment (many symbols from libc exist with the same type, function signature, and semantics). As H. Peter Anvin notes, there's not really a great way to inform the compiler that you're targeting a freestanding environment but would like to opt-in to some libcall optimizations (see pr/47280 below), rather than opt-out. Arvind notes, -fno-builtin-* behaves slightly differently between GCC and Clang, and Clang is missing many __builtin_* definitions, which I consider a bug in Clang and am working on fixing. Masahiro summarizes the subtle distinction between compilers justly: To prevent transformation from foo() into bar(), there are two ways in Clang to do that; -fno-builtin-foo, and -fno-builtin-bar. There is only one in GCC; -fno-buitin-foo. (Any difference in that behavior in Clang is likely a bug from a missing __builtin_* definition.) Masahiro also notes: We want to disable optimization from foo() to bar(), but we may still benefit from the optimization from foo() into something else. If GCC implements the same transform, we would run into a problem because it is not -fno-builtin-bar, but -fno-builtin-foo that disables that optimization. In this regard, -fno-builtin-foo would be more future-proof than -fno-built-bar, but -fno-builtin-foo is still potentially overkill. We may want to prevent calls from foo() being optimized into calls to bar(), but we still may want other optimization on calls to foo(). It seems that compilers today don't quite provide the fine grain control over which libcall optimizations pseudo-freestanding environments would prefer. Finally, Kees notes that this interface is unsafe, so we should not encourage its use. As such, I've removed the declaration from any header, but it still needs to be exported to avoid linkage errors in modules. Reported-by: Sami Tolvanen <samitolvanen@google.com> Suggested-by: Andy Lavr <andy.lavr@gmail.com> Suggested-by: Arvind Sankar <nivedita@alum.mit.edu> Suggested-by: Joe Perches <joe@perches.com> Suggested-by: Masahiro Yamada <masahiroy@kernel.org> Suggested-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Signed-off-by: Nick Desaulniers <ndesaulniers@google.com> Acked-by: Kees Cook <keescook@chromium.org> Cc: stable@vger.kernel.org Link: https://bugs.llvm.org/show_bug.cgi?id=47162 Link: https://bugs.llvm.org/show_bug.cgi?id=47280 Link: https://github.com/ClangBuiltLinux/linux/issues/1126 Link: https://man7.org/linux/man-pages/man3/stpcpy.3.html Link: https://pubs.opengroup.org/onlinepubs/9699919799/functions/stpcpy.html Link: https://reviews.llvm.org/D85963 --- lib/string.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/lib/string.c b/lib/string.c index 33befc6ba3fa..38450d042444 100644 --- a/lib/string.c +++ b/lib/string.c @@ -236,6 +236,30 @@ ssize_t strscpy(char *dest, const char *src, size_t count) EXPORT_SYMBOL(strscpy); #endif +/** + * stpcpy - copy a string from src to dest returning a pointer to the new end + * of dest, including src's %NUL-terminator. May overrun dest. + * @dest: pointer to end of string being copied into. Must be large enough + * to receive copy. + * @src: pointer to the beginning of string being copied from. Must not overlap + * dest. + * + * stpcpy differs from strcpy in a key way: the return value is the new + * %NUL-terminated character. (for strcpy, the return value is a pointer to + * src. This interface is considered unsafe as it doesn't perform bounds + * checking of the inputs. As such it's not recommended for usage. Instead, + * its definition is provided in case the compiler lowers other libcalls to + * stpcpy. + */ +char *stpcpy(char *__restrict__ dest, const char *__restrict__ src); +char *stpcpy(char *__restrict__ dest, const char *__restrict__ src) +{ + while ((*dest++ = *src++) != '\0') + /* nothing */; + return --dest; +} +EXPORT_SYMBOL(stpcpy); + #ifndef __HAVE_ARCH_STRCAT /** * strcat - Append one %NUL-terminated string to another From 06f28921161f05d6d9e233ceebf7a384febe490c Mon Sep 17 00:00:00 2001 From: Paul Kocialkowski <contact@paulk.fr> Date: Mon, 2 Jul 2018 11:16:59 +0200 Subject: [PATCH 409/452] arm64: Use aarch64elf and aarch64elfb emulation mode variants The aarch64linux and aarch64linuxb emulation modes are not supported by bare-metal toolchains and Linux using them forbids building the kernel with these toolchains. Since there is apparently no reason to target these emulation modes, the more generic elf modes are used instead, allowing to build on bare-metal toolchains as well as the already-supported ones. Fixes: 3d6a7b99e3fa ("arm64: ensure the kernel is compiled for LP64") Cc: stable@vger.kernel.org Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Paul Kocialkowski <contact@paulk.fr> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> --- arch/arm64/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index a221b9f8f98c..615cc6620d50 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -88,7 +88,7 @@ LD += -EB ifeq ($(ld-name),gold) LDFLAGS += -maarch64_elf64_be_vec else -LDFLAGS += -maarch64linuxb +LDFLAGS += -maarch64elfb endif UTS_MACHINE := aarch64_be else @@ -99,7 +99,7 @@ LD += -EL ifeq ($(ld-name),gold) LDFLAGS += -maarch64_elf64_le_vec else -LDFLAGS += -maarch64linux +LDFLAGS += -maarch64elf endif UTS_MACHINE := aarch64 endif From d6c7a55376573e75b22680e41b3a71f728e0f10b Mon Sep 17 00:00:00 2001 From: Masahiro Yamada <yamada.masahiro@socionext.com> Date: Tue, 3 Jul 2018 10:22:00 +0900 Subject: [PATCH 410/452] arm64: add endianness option to LDFLAGS instead of LD With the recent syntax extension, Kconfig is now able to evaluate the compiler / toolchain capability. However, accumulating flags to 'LD' is not compatible with the way it works; 'LD' must be passed to Kconfig to call $(ld-option,...) from Kconfig files. If you tweak 'LD' in arch Makefile depending on CONFIG_CPU_BIG_ENDIAN, this would end up with circular dependency between Makefile and Kconfig. Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> --- arch/arm64/Makefile | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 615cc6620d50..0c630482f80f 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -84,22 +84,20 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) KBUILD_CPPFLAGS += -mbig-endian CHECKFLAGS += -D__AARCH64EB__ AS += -EB -LD += -EB ifeq ($(ld-name),gold) -LDFLAGS += -maarch64_elf64_be_vec +LDFLAGS += -EB -maarch64_elf64_be_vec else -LDFLAGS += -maarch64elfb +LDFLAGS += -EB -maarch64elfb endif UTS_MACHINE := aarch64_be else KBUILD_CPPFLAGS += -mlittle-endian CHECKFLAGS += -D__AARCH64EL__ AS += -EL -LD += -EL ifeq ($(ld-name),gold) -LDFLAGS += -maarch64_elf64_le_vec +LDFLAGS += -EL -maarch64_elf64_le_vec else -LDFLAGS += -maarch64elf +LDFLAGS += -EL -maarch64elf endif UTS_MACHINE := aarch64 endif From 1b77ae03b8a8a65e3bcd76d8fe5b374c66cc1505 Mon Sep 17 00:00:00 2001 From: Laura Abbott <labbott@redhat.com> Date: Mon, 9 Jul 2018 13:09:56 -0700 Subject: [PATCH 411/452] Revert "arm64: Use aarch64elf and aarch64elfb emulation mode variants" This reverts commit 38fc4248677552ce35efc09902fdcb06b61d7ef9. Distributions such as Fedora and Debian do not package the ELF linker scripts with their toolchains, resulting in kernel build failures such as: | CHK include/generated/compile.h | LD [M] arch/arm64/crypto/sha512-ce.o | aarch64-linux-gnu-ld: cannot open linker script file ldscripts/aarch64elf.xr: No such file or directory | make[1]: *** [scripts/Makefile.build:530: arch/arm64/crypto/sha512-ce.o] Error 1 | make: *** [Makefile:1029: arch/arm64/crypto] Error 2 Revert back to the linux targets for now, adding a comment to the Makefile so we don't accidentally break this in the future. Cc: Paul Kocialkowski <contact@paulk.fr> Cc: <stable@vger.kernel.org> Fixes: 38fc42486775 ("arm64: Use aarch64elf and aarch64elfb emulation mode variants") Tested-by: Kevin Hilman <khilman@baylibre.com> Signed-off-by: Laura Abbott <labbott@redhat.com> Signed-off-by: Will Deacon <will.deacon@arm.com> --- arch/arm64/Makefile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 0c630482f80f..f78f77ce12ed 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -87,7 +87,9 @@ AS += -EB ifeq ($(ld-name),gold) LDFLAGS += -EB -maarch64_elf64_be_vec else -LDFLAGS += -EB -maarch64elfb +# We must use the linux target here, since distributions don't tend to package +# the ELF linker scripts with binutils, and this results in a build failure. +LDFLAGS += -EB -maarch64linuxb endif UTS_MACHINE := aarch64_be else @@ -97,7 +99,7 @@ AS += -EL ifeq ($(ld-name),gold) LDFLAGS += -EL -maarch64_elf64_le_vec else -LDFLAGS += -EL -maarch64elf +LDFLAGS += -EL -maarch64linux # See comment above endif UTS_MACHINE := aarch64 endif From 236f6bbe7dfed0103ecd821f237e5148d1fc0c76 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada <yamada.masahiro@socionext.com> Date: Fri, 23 Feb 2018 13:56:53 +0900 Subject: [PATCH 412/452] kbuild: simplify ld-option implementation commit 0294e6f4a0006856e1f36b8cd8fa088d9e499e98 upstream. Currently, linker options are tested by the coordination of $(CC) and $(LD) because $(LD) needs some object to link. As commit 86a9df597cdd ("kbuild: fix linker feature test macros when cross compiling with Clang") addressed, we need to make sure $(CC) and $(LD) agree the underlying architecture of the passed object. This could be a bit complex when we combine tools from different groups. For example, we can use clang for $(CC), but we still need to rely on GCC toolchain for $(LD). So, I was searching for a way of standalone testing of linker options. A trick I found is to use '-v'; this not only prints the version string, but also tests if the given option is recognized. If a given option is supported, $ aarch64-linux-gnu-ld -v --fix-cortex-a53-843419 GNU ld (Linaro_Binutils-2017.11) 2.28.2.20170706 $ echo $? 0 If unsupported, $ aarch64-linux-gnu-ld -v --fix-cortex-a53-843419 GNU ld (crosstool-NG linaro-1.13.1-4.7-2013.04-20130415 - Linaro GCC 2013.04) 2.23.1 aarch64-linux-gnu-ld: unrecognized option '--fix-cortex-a53-843419' aarch64-linux-gnu-ld: use the --help option for usage information $ echo $? 1 Gold works likewise. $ aarch64-linux-gnu-ld.gold -v --fix-cortex-a53-843419 GNU gold (Linaro_Binutils-2017.11 2.28.2.20170706) 1.14 masahiro@pug:~/ref/linux$ echo $? 0 $ aarch64-linux-gnu-ld.gold -v --fix-cortex-a53-999999 GNU gold (Linaro_Binutils-2017.11 2.28.2.20170706) 1.14 aarch64-linux-gnu-ld.gold: --fix-cortex-a53-999999: unknown option aarch64-linux-gnu-ld.gold: use the --help option for usage information $ echo $? 1 LLD too. $ ld.lld -v --gc-sections LLD 7.0.0 (http://llvm.org/git/lld.git 4a0e4190e74cea19f8a8dc625ccaebdf8b5d1585) (compatible with GNU linkers) $ echo $? 0 $ ld.lld -v --fix-cortex-a53-843419 LLD 7.0.0 (http://llvm.org/git/lld.git 4a0e4190e74cea19f8a8dc625ccaebdf8b5d1585) (compatible with GNU linkers) $ echo $? 0 $ ld.lld -v --fix-cortex-a53-999999 ld.lld: error: unknown argument: --fix-cortex-a53-999999 LLD 7.0.0 (http://llvm.org/git/lld.git 4a0e4190e74cea19f8a8dc625ccaebdf8b5d1585) (compatible with GNU linkers) $ echo $? 1 Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> Tested-by: Nick Desaulniers <ndesaulniers@google.com> [nc: try-run-cached was added later, just use try-run, which is the current mainline state] Signed-off-by: Nathan Chancellor <natechancellor@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> --- scripts/Kbuild.include | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index 1f2f528b7f23..ae30702c1f15 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -196,9 +196,7 @@ cc-ldoption = $(call try-run,\ # ld-option # Usage: LDFLAGS += $(call ld-option, -X) -ld-option = $(call try-run,\ - $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -x c /dev/null -c -o "$$TMPO"; \ - $(LD) $(LDFLAGS) $(1) "$$TMPO" -o "$$TMP",$(1),$(2)) +ld-option = $(call try-run, $(LD) $(LDFLAGS) $(1) -v,$(1),$(2)) # ar-option # Usage: KBUILD_ARFLAGS := $(call ar-option,D) From b095cc8b560b4490ca39e7cdb282ace368c623f4 Mon Sep 17 00:00:00 2001 From: Olof Johansson <olof@lixom.net> Date: Fri, 13 Jul 2018 08:30:33 -0700 Subject: [PATCH 413/452] arm64: build with baremetal linker target instead of Linux when available Not all toolchains have the baremetal elf targets, RedHat/Fedora ones in particular. So, probe for whether it's available and use the previous (linux) targets if it isn't. Reported-by: Laura Abbott <labbott@redhat.com> Tested-by: Laura Abbott <labbott@redhat.com> Acked-by: Masahiro Yamada <yamada.masahiro@socionext.com> Cc: Paul Kocialkowski <contact@paulk.fr> Signed-off-by: Olof Johansson <olof@lixom.net> Signed-off-by: Will Deacon <will.deacon@arm.com> --- arch/arm64/Makefile | 9 +++++---- scripts/Kbuild.include | 4 ++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index f78f77ce12ed..8ed3708a6838 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -87,9 +87,9 @@ AS += -EB ifeq ($(ld-name),gold) LDFLAGS += -EB -maarch64_elf64_be_vec else -# We must use the linux target here, since distributions don't tend to package -# the ELF linker scripts with binutils, and this results in a build failure. -LDFLAGS += -EB -maarch64linuxb +# Prefer the baremetal ELF build target, but not all toolchains include +# it so fall back to the standard linux version if needed. +LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb) endif UTS_MACHINE := aarch64_be else @@ -99,7 +99,8 @@ AS += -EL ifeq ($(ld-name),gold) LDFLAGS += -EL -maarch64_elf64_le_vec else -LDFLAGS += -EL -maarch64linux # See comment above +# Same as above, prefer ELF but fall back to linux target if needed. +LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux) endif UTS_MACHINE := aarch64 endif diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index ae30702c1f15..99cb231a93bf 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -195,8 +195,8 @@ cc-ldoption = $(call try-run,\ $(CC) $(1) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2)) # ld-option -# Usage: LDFLAGS += $(call ld-option, -X) -ld-option = $(call try-run, $(LD) $(LDFLAGS) $(1) -v,$(1),$(2)) +# Usage: LDFLAGS += $(call ld-option, -X, -Y) +ld-option = $(call try-run, $(LD) $(LDFLAGS) $(1) -v,$(1),$(2),$(3)) # ar-option # Usage: KBUILD_ARFLAGS := $(call ar-option,D) From 5f5cfc95ad6bfa97f8a053053abdf98f520f96f8 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 5 Jun 2020 10:39:55 +0300 Subject: [PATCH 414/452] kbuild: add variables for compression tools commit 8dfb61dcbaceb19a5ded5e9c9dcf8d05acc32294 upstream. Allow user to use alternative implementations of compression tools, such as pigz, pbzip2, pxz. For example, multi-threaded tools to speed up the build: $ make KGZIP=pigz KBZIP2=pbzip2 Variables KGZIP, KBZIP2, KLZOP are used internally because original env vars are reserved by the tools. The use of GZIP in gzip tool is obsolete since 2015. However, alternative implementations (e.g., pigz) still rely on it. BZIP2, BZIP, LZOP vars are not obsolescent. The credit goes to @grsecurity. As a sidenote, for multi-threaded lzma, xz compression one can use: $ export XZ_OPT="--threads=0" Signed-off-by: Denis Efremov <efremov@linux.com> Signed-off-by: Masahiro Yamada <masahiroy@kernel.org> Signed-off-by: Matthias Maennich <maennich@google.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> --- Makefile | 11 +++++++++-- arch/m68k/Makefile | 8 ++++---- arch/parisc/Makefile | 2 +- scripts/Makefile.lib | 12 ++++++------ scripts/package/Makefile | 8 ++++---- scripts/package/buildtar | 6 +++--- scripts/xz_wrap.sh | 2 +- 7 files changed, 28 insertions(+), 21 deletions(-) diff --git a/Makefile b/Makefile index d43f80de1f34..b95984938d47 100644 --- a/Makefile +++ b/Makefile @@ -408,6 +408,12 @@ DEPMOD = /sbin/depmod PERL = perl PYTHON = python CHECK = sparse +KGZIP = gzip +KBZIP2 = bzip2 +KLZOP = lzop +LZMA = lzma +LZ4 = lz4c +XZ = xz CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ -Wbitwise -Wno-return-void $(CF) @@ -457,6 +463,7 @@ CLANG_FLAGS := export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE LD CC export CPP AR NM STRIP OBJCOPY OBJDUMP READELF HOSTLDFLAGS HOST_LOADLIBES export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE +export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS @@ -1071,10 +1078,10 @@ export mod_strip_cmd mod_compress_cmd = true ifdef CONFIG_MODULE_COMPRESS ifdef CONFIG_MODULE_COMPRESS_GZIP - mod_compress_cmd = gzip -n -f + mod_compress_cmd = $(KGZIP) -n -f endif # CONFIG_MODULE_COMPRESS_GZIP ifdef CONFIG_MODULE_COMPRESS_XZ - mod_compress_cmd = xz -f + mod_compress_cmd = $(XZ) -f endif # CONFIG_MODULE_COMPRESS_XZ endif # CONFIG_MODULE_COMPRESS export mod_compress_cmd diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile index a229d28e14cc..c44a53b743e4 100644 --- a/arch/m68k/Makefile +++ b/arch/m68k/Makefile @@ -135,10 +135,10 @@ vmlinux.gz: vmlinux ifndef CONFIG_KGDB cp vmlinux vmlinux.tmp $(STRIP) vmlinux.tmp - gzip -9c vmlinux.tmp >vmlinux.gz + $(KGZIP) -9c vmlinux.tmp >vmlinux.gz rm vmlinux.tmp else - gzip -9c vmlinux >vmlinux.gz + $(KGZIP) -9c vmlinux >vmlinux.gz endif bzImage: vmlinux.bz2 @@ -148,10 +148,10 @@ vmlinux.bz2: vmlinux ifndef CONFIG_KGDB cp vmlinux vmlinux.tmp $(STRIP) vmlinux.tmp - bzip2 -1c vmlinux.tmp >vmlinux.bz2 + $(KBZIP2) -1c vmlinux.tmp >vmlinux.bz2 rm vmlinux.tmp else - bzip2 -1c vmlinux >vmlinux.bz2 + $(KBZIP2) -1c vmlinux >vmlinux.bz2 endif archclean: diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 01946ebaff72..efbc6e36df3e 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -134,7 +134,7 @@ vmlinuz: bzImage $(OBJCOPY) $(boot)/bzImage $@ else vmlinuz: vmlinux - @gzip -cf -9 $< > $@ + @$(KGZIP) -cf -9 $< > $@ endif install: diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 2d4087a75c78..2976b68d01a8 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -269,7 +269,7 @@ cmd_objcopy = $(OBJCOPY) $(OBJCOPYFLAGS) $(OBJCOPYFLAGS_$(@F)) $< $@ # --------------------------------------------------------------------------- quiet_cmd_gzip = GZIP $@ -cmd_gzip = (cat $(filter-out FORCE,$^) | gzip -n -f -9 > $@) || \ +cmd_gzip = (cat $(filter-out FORCE,$^) | $(KGZIP) -n -f -9 > $@) || \ (rm -f $@ ; false) # DTC @@ -347,7 +347,7 @@ printf "%08x\n" $$dec_size | \ quiet_cmd_bzip2 = BZIP2 $@ cmd_bzip2 = (cat $(filter-out FORCE,$^) | \ - bzip2 -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ + $(KBZIP2) -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ (rm -f $@ ; false) # Lzma @@ -355,17 +355,17 @@ cmd_bzip2 = (cat $(filter-out FORCE,$^) | \ quiet_cmd_lzma = LZMA $@ cmd_lzma = (cat $(filter-out FORCE,$^) | \ - lzma -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ + $(LZMA) -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ (rm -f $@ ; false) quiet_cmd_lzo = LZO $@ cmd_lzo = (cat $(filter-out FORCE,$^) | \ - lzop -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ + $(KLZOP) -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ (rm -f $@ ; false) quiet_cmd_lz4 = LZ4 $@ cmd_lz4 = (cat $(filter-out FORCE,$^) | \ - lz4c -l -c1 stdin stdout && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ + $(LZ4) -l -c1 stdin stdout && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ (rm -f $@ ; false) # U-Boot mkimage @@ -417,7 +417,7 @@ cmd_xzkern = (cat $(filter-out FORCE,$^) | \ quiet_cmd_xzmisc = XZMISC $@ cmd_xzmisc = (cat $(filter-out FORCE,$^) | \ - xz --check=crc32 --lzma2=dict=1MiB) > $@ || \ + $(XZ) --check=crc32 --lzma2=dict=1MiB) > $@ || \ (rm -f $@ ; false) # ASM offsets diff --git a/scripts/package/Makefile b/scripts/package/Makefile index 348af5b20618..3567749d6be4 100644 --- a/scripts/package/Makefile +++ b/scripts/package/Makefile @@ -39,7 +39,7 @@ if test "$(objtree)" != "$(srctree)"; then \ false; \ fi ; \ $(srctree)/scripts/setlocalversion --save-scmversion; \ -tar -cz $(RCS_TAR_IGNORE) -f $(2).tar.gz \ +tar -I $(KGZIP) -c $(RCS_TAR_IGNORE) -f $(2).tar.gz \ --transform 's:^:$(2)/:S' $(TAR_CONTENT) $(3); \ rm -f $(objtree)/.scmversion @@ -121,9 +121,9 @@ util/PERF-VERSION-GEN $(CURDIR)/$(perf-tar)/); \ tar rf $(perf-tar).tar $(perf-tar)/HEAD $(perf-tar)/PERF-VERSION-FILE; \ rm -r $(perf-tar); \ $(if $(findstring tar-src,$@),, \ -$(if $(findstring bz2,$@),bzip2, \ -$(if $(findstring gz,$@),gzip, \ -$(if $(findstring xz,$@),xz, \ +$(if $(findstring bz2,$@),$(KBZIP2), \ +$(if $(findstring gz,$@),$(KGZIP), \ +$(if $(findstring xz,$@),$(XZ), \ $(error unknown target $@)))) \ -f -9 $(perf-tar).tar) diff --git a/scripts/package/buildtar b/scripts/package/buildtar index e8cc72a51b32..d6c0fc3ac004 100755 --- a/scripts/package/buildtar +++ b/scripts/package/buildtar @@ -28,15 +28,15 @@ case "${1}" in opts= ;; targz-pkg) - opts=--gzip + opts="-I ${KGZIP}" tarball=${tarball}.gz ;; tarbz2-pkg) - opts=--bzip2 + opts="-I ${KBZIP2}" tarball=${tarball}.bz2 ;; tarxz-pkg) - opts=--xz + opts="-I ${XZ}" tarball=${tarball}.xz ;; *) diff --git a/scripts/xz_wrap.sh b/scripts/xz_wrap.sh index 7a2d372f4885..76e9cbcfbeab 100755 --- a/scripts/xz_wrap.sh +++ b/scripts/xz_wrap.sh @@ -20,4 +20,4 @@ case $SRCARCH in sparc) BCJ=--sparc ;; esac -exec xz --check=crc32 $BCJ --lzma2=$LZMA2OPTS,dict=32MiB +exec $XZ --check=crc32 $BCJ --lzma2=$LZMA2OPTS,dict=32MiB From 4554ae20a9fb2ad79d7f85ac2136e43793e86667 Mon Sep 17 00:00:00 2001 From: Dave Martin <Dave.Martin@arm.com> Date: Thu, 6 Jun 2019 11:33:43 +0100 Subject: [PATCH 415/452] arm64: Silence gcc warnings about arch ABI drift Since GCC 9, the compiler warns about evolution of the platform-specific ABI, in particular relating for the marshaling of certain structures involving bitfields. The kernel is a standalone binary, and of course nobody would be so stupid as to expose structs containing bitfields as function arguments in ABI. (Passing a pointer to such a struct, however inadvisable, should be unaffected by this change. perf and various drivers rely on that.) So these warnings do more harm than good: turn them off. We may miss warnings about future ABI drift, but that's too bad. Future ABI breaks of this class will have to be debugged and fixed the traditional way unless the compiler evolves finer-grained diagnostics. Signed-off-by: Dave Martin <Dave.Martin@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> --- arch/arm64/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 8ed3708a6838..f953a6c24fe7 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -75,6 +75,7 @@ KBUILD_CFLAGS += $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += -fno-asynchronous-unwind-tables KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads) KBUILD_CFLAGS += -fno-pic +KBUILD_CFLAGS += -Wno-psabi KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) From b5846124247e01ab6273bca0f554c12a1dcbbd07 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor <natechancellor@gmail.com> Date: Tue, 11 Jun 2019 10:19:32 -0700 Subject: [PATCH 416/452] arm64: Don't unconditionally add -Wno-psabi to KBUILD_CFLAGS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a GCC only option, which warns about ABI changes within GCC, so unconditionally adding it breaks Clang with tons of: warning: unknown warning option '-Wno-psabi' [-Wunknown-warning-option] and link time failures: ld.lld: error: undefined symbol: __efistub___stack_chk_guard >>> referenced by arm-stub.c:73 (/home/nathan/cbl/linux/drivers/firmware/efi/libstub/arm-stub.c:73) >>> arm-stub.stub.o:(__efistub_install_memreserve_table) in archive ./drivers/firmware/efi/libstub/lib.a These failures come from the lack of -fno-stack-protector, which is added via cc-option in drivers/firmware/efi/libstub/Makefile. When an unknown flag is added to KBUILD_CFLAGS, clang will noisily warn that it is ignoring the option like above, unlike gcc, who will just error. $ echo "int main() { return 0; }" > tmp.c $ clang -Wno-psabi tmp.c; echo $? warning: unknown warning option '-Wno-psabi' [-Wunknown-warning-option] 1 warning generated. 0 $ gcc -Wsometimes-uninitialized tmp.c; echo $? gcc: error: unrecognized command line option ‘-Wsometimes-uninitialized’; did you mean ‘-Wmaybe-uninitialized’? 1 For cc-option to work properly with clang and behave like gcc, -Werror is needed, which was done in commit c3f0d0bc5b01 ("kbuild, LLVMLinux: Add -Werror to cc-option to support clang"). $ clang -Werror -Wno-psabi tmp.c; echo $? error: unknown warning option '-Wno-psabi' [-Werror,-Wunknown-warning-option] 1 As a consequence of this, when an unknown flag is unconditionally added to KBUILD_CFLAGS, it will cause cc-option to always fail and those flags will never get added: $ clang -Werror -Wno-psabi -fno-stack-protector tmp.c; echo $? error: unknown warning option '-Wno-psabi' [-Werror,-Wunknown-warning-option] 1 This can be seen when compiling the whole kernel as some warnings that are normally disabled (see below) show up. The full list of flags missing from drivers/firmware/efi/libstub are the following (gathered from diffing .arm64-stub.o.cmd): -fno-delete-null-pointer-checks -Wno-address-of-packed-member -Wframe-larger-than=2048 -Wno-unused-const-variable -fno-strict-overflow -fno-merge-all-constants -fno-stack-check -Werror=date-time -Werror=incompatible-pointer-types -ffreestanding -fno-stack-protector Use cc-disable-warning so that it gets disabled for GCC and does nothing for Clang. Fixes: ebcc5928c5d9 ("arm64: Silence gcc warnings about arch ABI drift") Link: https://github.com/ClangBuiltLinux/linux/issues/511 Reported-by: Qian Cai <cai@lca.pw> Acked-by: Dave Martin <Dave.Martin@arm.com> Reviewed-by: Nick Desaulniers <ndesaulniers@google.com> Signed-off-by: Nathan Chancellor <natechancellor@gmail.com> Signed-off-by: Will Deacon <will.deacon@arm.com> --- arch/arm64/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index f953a6c24fe7..75e36c1de54d 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -75,7 +75,7 @@ KBUILD_CFLAGS += $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += -fno-asynchronous-unwind-tables KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads) KBUILD_CFLAGS += -fno-pic -KBUILD_CFLAGS += -Wno-psabi +KBUILD_CFLAGS += $(call cc-disable-warning, psabi) KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) From 6203e29905661026ab589376689983030f4683df Mon Sep 17 00:00:00 2001 From: Linus Torvalds <torvalds@linux-foundation.org> Date: Sat, 9 May 2020 15:58:04 -0700 Subject: [PATCH 417/452] gcc-10: avoid shadowing standard library 'free()' in crypto MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 1a263ae60b04de959d9ce9caea4889385eefcc7b upstream. gcc-10 has started warning about conflicting types for a few new built-in functions, particularly 'free()'. This results in warnings like: crypto/xts.c:325:13: warning: conflicting types for built-in function ‘free’; expected ‘void(void *)’ [-Wbuiltin-declaration-mismatch] because the crypto layer had its local freeing functions called 'free()'. Gcc-10 is in the wrong here, since that function is marked 'static', and thus there is no chance of confusion with any standard library function namespace. But the simplest thing to do is to just use a different name here, and avoid this gcc mis-feature. [ Side note: gcc knowing about 'free()' is in itself not the mis-feature: the semantics of 'free()' are special enough that a compiler can validly do special things when seeing it. So the mis-feature here is that gcc thinks that 'free()' is some restricted name, and you can't shadow it as a local static function. Making the special 'free()' semantics be a function attribute rather than tied to the name would be the much better model ] Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> --- crypto/lrw.c | 4 ++-- crypto/xts.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/crypto/lrw.c b/crypto/lrw.c index 886f91f2426c..1b73fec817cf 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -531,7 +531,7 @@ static void exit_tfm(struct crypto_skcipher *tfm) crypto_free_skcipher(ctx->child); } -static void free(struct skcipher_instance *inst) +static void free_inst(struct skcipher_instance *inst) { crypto_drop_skcipher(skcipher_instance_ctx(inst)); kfree(inst); @@ -642,7 +642,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.encrypt = encrypt; inst->alg.decrypt = decrypt; - inst->free = free; + inst->free = free_inst; err = skcipher_register_instance(tmpl, inst); if (err) diff --git a/crypto/xts.c b/crypto/xts.c index e31828ed0046..f5fba941d6f6 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -469,7 +469,7 @@ static void exit_tfm(struct crypto_skcipher *tfm) crypto_free_cipher(ctx->tweak); } -static void free(struct skcipher_instance *inst) +static void free_inst(struct skcipher_instance *inst) { crypto_drop_skcipher(skcipher_instance_ctx(inst)); kfree(inst); @@ -580,7 +580,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.encrypt = encrypt; inst->alg.decrypt = decrypt; - inst->free = free; + inst->free = free_inst; err = skcipher_register_instance(tmpl, inst); if (err) From 4afd09d32ceea728a3dcea73cada996048d9bb38 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Fri, 20 Nov 2020 17:53:09 +0300 Subject: [PATCH 418/452] dpu20: make vendor check of cables for dex mode optional Add CONFIG_DISPLAYPORT_DEX_FORCE_WQHD config. Thanks, @fart1-git. Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/video/fbdev/exynos/dpu20/Kconfig | 7 +++++++ drivers/video/fbdev/exynos/dpu20/displayport_drv.c | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/drivers/video/fbdev/exynos/dpu20/Kconfig b/drivers/video/fbdev/exynos/dpu20/Kconfig index 020b81473176..2ad5e91f1c1c 100644 --- a/drivers/video/fbdev/exynos/dpu20/Kconfig +++ b/drivers/video/fbdev/exynos/dpu20/Kconfig @@ -23,6 +23,13 @@ config EXYNOS_DISPLAYPORT help Enable DISPLAYPORT driver. +config DISPLAYPORT_DEX_FORCE_WQHD + bool "Force WQHD in DEX" + depends on EXYNOS_DISPLAYPORT + default n + help + Force WQHD mode in dex_adapter_type. + config EXYNOS_WINDOW_UPDATE bool "Support Window Update Mode" depends on EXYNOS_DPU20 diff --git a/drivers/video/fbdev/exynos/dpu20/displayport_drv.c b/drivers/video/fbdev/exynos/dpu20/displayport_drv.c index 111bbc9a4535..e6e8d8263e5c 100644 --- a/drivers/video/fbdev/exynos/dpu20/displayport_drv.c +++ b/drivers/video/fbdev/exynos/dpu20/displayport_drv.c @@ -2668,6 +2668,12 @@ static void displayport_aux_sel(struct displayport_device *displayport) } } +#ifdef CONFIG_DISPLAYPORT_DEX_FORCE_WQHD +static void displayport_check_adapter_type(struct displayport_device *displayport) +{ + displayport->dex_adapter_type = DEX_WQHD_SUPPORT; +} +#else static void displayport_check_adapter_type(struct displayport_device *displayport) { #ifdef FEATURE_DEX_ADAPTER_TWEAK @@ -2688,6 +2694,7 @@ static void displayport_check_adapter_type(struct displayport_device *displaypor break; }; } +#endif static int usb_typec_displayport_notification(struct notifier_block *nb, unsigned long action, void *data) From 73718796c8ddf5df1fb69ed199d9a22d2a53e76e Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 22 Nov 2020 00:49:44 +0300 Subject: [PATCH 419/452] Makefile: don't use -fconserve-stack with CC_OPTIMIZE_FOR_PERFORMANCE* Signed-off-by: Denis Efremov <efremov@linux.com> --- Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Makefile b/Makefile index b95984938d47..077587bbcf68 100644 --- a/Makefile +++ b/Makefile @@ -956,7 +956,9 @@ KBUILD_CFLAGS += $(call cc-option,-fmerge-constants) KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,) # conserve stack if available +ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE KBUILD_CFLAGS += $(call cc-option,-fconserve-stack) +endif # disallow errors like 'EXPORT_GPL(foo);' with missing header KBUILD_CFLAGS += $(call cc-option,-Werror=implicit-int) From 350bcf72987df4fa2771c60fbd294e6ced0805bb Mon Sep 17 00:00:00 2001 From: Sami Tolvanen <samitolvanen@google.com> Date: Mon, 15 Apr 2019 09:49:56 -0700 Subject: [PATCH 420/452] x86/build/lto: Fix truncated .bss with -fdata-sections [ Upstream commit 6a03469a1edc94da52b65478f1e00837add869a3 ] With CONFIG_LD_DEAD_CODE_DATA_ELIMINATION=y, we compile the kernel with -fdata-sections, which also splits the .bss section. The new section, with a new .bss.* name, which pattern gets missed by the main x86 linker script which only expects the '.bss' name. This results in the discarding of the second part and a too small, truncated .bss section and an unhappy, non-working kernel. Use the common BSS_MAIN macro in the linker script to properly capture and merge all the generated BSS sections. Signed-off-by: Sami Tolvanen <samitolvanen@google.com> Reviewed-by: Nick Desaulniers <ndesaulniers@google.com> Reviewed-by: Kees Cook <keescook@chromium.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Kees Cook <keescook@chromium.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20190415164956.124067-1-samitolvanen@google.com [ Extended the changelog. ] Signed-off-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Sasha Levin <sashal@kernel.org> --- arch/x86/kernel/vmlinux.lds.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 2384a2ae5ec3..b2f6e4f3e927 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -352,7 +352,7 @@ SECTIONS .bss : AT(ADDR(.bss) - LOAD_OFFSET) { __bss_start = .; *(.bss..page_aligned) - *(.bss) + *(BSS_MAIN) . = ALIGN(PAGE_SIZE); __bss_stop = .; } From 43170d788cbae1bae6569a7e9131ab96289fc11a Mon Sep 17 00:00:00 2001 From: Nicholas Piggin <npiggin@gmail.com> Date: Wed, 9 May 2018 22:59:59 +1000 Subject: [PATCH 421/452] kbuild: LD_DEAD_CODE_DATA_ELIMINATION no -ffunction-sections/-fdata-sections for module build Modules do not tend to cope with -ffunction-sections, even though they do not link with -gc-sections. It may be possible for unused symbols to be trimmed from modules, but in general that would take much more work in architecture module linker scripts. For now, enable these only for kernel build. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 077587bbcf68..c5465a239641 100644 --- a/Makefile +++ b/Makefile @@ -865,8 +865,8 @@ KBUILD_CFLAGS += $(call cc-option, -fno-inline-functions-called-once) endif ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION -KBUILD_CFLAGS += $(call cc-option,-ffunction-sections,) -KBUILD_CFLAGS += $(call cc-option,-fdata-sections,) +KBUILD_CFLAGS_KERNEL += $(call cc-option,-ffunction-sections,) +KBUILD_CFLAGS_KERNEL += $(call cc-option,-fdata-sections,) endif ifdef CONFIG_LTO_CLANG From 836129e3719c2bff65c54796f9a61af256a9d9cd Mon Sep 17 00:00:00 2001 From: Nicholas Piggin <npiggin@gmail.com> Date: Wed, 9 May 2018 23:00:00 +1000 Subject: [PATCH 422/452] kbuild: Allow LD_DEAD_CODE_DATA_ELIMINATION to be selectable if enabled Architectures that are capable can select HAVE_LD_DEAD_CODE_DATA_ELIMINATION to enable selection of that option (as an EXPERT kernel option). Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> --- arch/Kconfig | 15 --------------- init/Kconfig | 27 +++++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/arch/Kconfig b/arch/Kconfig index eb9975a67673..c6caaeffc124 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -596,21 +596,6 @@ config THIN_ARCHIVES Select this if the architecture wants to use thin archives instead of ld -r to create the built-in.o files. -config LD_DEAD_CODE_DATA_ELIMINATION - bool - help - Select this if the architecture wants to do dead code and - data elimination with the linker by compiling with - -ffunction-sections -fdata-sections and linking with - --gc-sections. - - This requires that the arch annotates or otherwise protects - its external entry points from being discarded. Linker scripts - must also merge .text.*, .data.*, and .bss.* correctly into - output sections. Care must be taken not to pull in unrelated - sections (e.g., '.text.init'). Typically '.' in section names - is used to distinguish them from label names / C identifiers. - config LTO def_bool n diff --git a/init/Kconfig b/init/Kconfig index c3c5710a9a47..6258b47e72b8 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1176,6 +1176,33 @@ config CC_OPTIMIZE_FOR_SIZE endchoice +config HAVE_LD_DEAD_CODE_DATA_ELIMINATION + bool + help + This requires that the arch annotates or otherwise protects + its external entry points from being discarded. Linker scripts + must also merge .text.*, .data.*, and .bss.* correctly into + output sections. Care must be taken not to pull in unrelated + sections (e.g., '.text.init'). Typically '.' in section names + is used to distinguish them from label names / C identifiers. + +config LD_DEAD_CODE_DATA_ELIMINATION + bool "Dead code and data elimination (EXPERIMENTAL)" + depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION + depends on EXPERT + help + Select this if the architecture wants to do dead code and + data elimination with the linker by compiling with + -ffunction-sections -fdata-sections, and linking with + --gc-sections. + + This can reduce on disk and in-memory size of the kernel + code and static data, particularly for small configs and + on small systems. This has the possibility of introducing + silently broken kernel if the required annotations are not + present. This option is not well tested yet, so use at your + own risk. + config SYSCTL bool From 219620f7db6ff00e6ffc315dcd18cff11af632b4 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada <yamada.masahiro@socionext.com> Date: Sun, 24 Jun 2018 01:41:51 +0900 Subject: [PATCH 423/452] kbuild: reword help of LD_DEAD_CODE_DATA_ELIMINATION Since commit 5d20ee3192a5 ("kbuild: Allow LD_DEAD_CODE_DATA_ELIMINATION to be selectable if enabled"), HAVE_LD_DEAD_CODE_DATA_ELIMINATION is supposed to be selected by architectures that are capable of this functionality. LD_DEAD_CODE_DATA_ELIMINATION is now users' selection. Update the help message. Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> --- init/Kconfig | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/init/Kconfig b/init/Kconfig index 6258b47e72b8..e8ca1428f6b8 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1191,10 +1191,9 @@ config LD_DEAD_CODE_DATA_ELIMINATION depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION depends on EXPERT help - Select this if the architecture wants to do dead code and - data elimination with the linker by compiling with - -ffunction-sections -fdata-sections, and linking with - --gc-sections. + Enable this if you want to do dead code and data elimination with + the linker by compiling with -ffunction-sections -fdata-sections, + and linking with --gc-sections. This can reduce on disk and in-memory size of the kernel code and static data, particularly for small configs and From 8ffba11a1cfad6c029def3e1a525492c8524152a Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sat, 21 Nov 2020 21:30:04 +0300 Subject: [PATCH 424/452] arm64: allow LD_DEAD_CODE_DATA_ELIMINATION Signed-off-by: Denis Efremov <efremov@linux.com> --- arch/arm64/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 84166a9b7c05..94683e02421d 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -98,6 +98,7 @@ config ARM64 select HAVE_GENERIC_DMA_COHERENT select HAVE_HW_BREAKPOINT if PERF_EVENTS select HAVE_IRQ_TIME_ACCOUNTING + select HAVE_LD_DEAD_CODE_DATA_ELIMINATION select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP if NUMA select HAVE_NMI if ACPI_APEI_SEA From d10c724b3ed8aa5c561b91d95c33480993a87924 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin <npiggin@gmail.com> Date: Wed, 9 May 2018 22:59:58 +1000 Subject: [PATCH 425/452] kbuild: Fix asm-generic/vmlinux.lds.h for LD_DEAD_CODE_DATA_ELIMINATION KEEP more tables, and add the function/data section wildcard to more section selections. This is a little ad-hoc at the moment, but kernel code should be moved to consistently use .text..x (note: double dots) for explicit sections and all references to it in the linker script can be made with TEXT_MAIN, and similarly for other sections. For now, let's see if major architectures move to enabling this option then we can do some refactoring passes. Otherwise if it remains unused or superseded by LTO, this may not be required. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> --- include/asm-generic/vmlinux.lds.h | 39 +++++++++++++++++++------------ 1 file changed, 24 insertions(+), 15 deletions(-) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 1489c85241f6..12b7fcca75f9 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -64,17 +64,26 @@ * generates .data.identifier sections, which need to be pulled in with * .data. We don't want to pull in .data..other sections, which Linux * has defined. Same for text and bss. + * + * RODATA_MAIN is not used because existing code already defines .rodata.x + * sections to be brought in with rodata. */ #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* #define TEXT_CFI_MAIN .text.cfi .text.[0-9a-zA-Z_]*.cfi #define DATA_MAIN .data .data.[0-9a-zA-Z_]* +#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* +#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* +#define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]* #else #define TEXT_MAIN .text #define TEXT_CFI_MAIN .text.cfi #define DATA_MAIN .data +#define SDATA_MAIN .sdata +#define RODATA_MAIN .rodata #define BSS_MAIN .bss +#define SBSS_MAIN .sbss #endif /* @@ -115,7 +124,7 @@ #ifdef CONFIG_TRACE_BRANCH_PROFILING #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \ - *(_ftrace_annotated_branch) \ + KEEP(*(_ftrace_annotated_branch)) \ VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .; #else #define LIKELY_PROFILE() @@ -123,7 +132,7 @@ #ifdef CONFIG_PROFILE_ALL_BRANCHES #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \ - *(_ftrace_branch) \ + KEEP(*(_ftrace_branch)) \ VMLINUX_SYMBOL(__stop_branch_profile) = .; #else #define BRANCH_PROFILE() @@ -222,8 +231,8 @@ *(DATA_MAIN) \ *(.ref.data) \ *(.data..shared_aligned) /* percpu related */ \ - MEM_KEEP(init.data) \ - MEM_KEEP(exit.data) \ + MEM_KEEP(init.data*) \ + MEM_KEEP(exit.data*) \ *(.data.unlikely) \ STRUCT_ALIGN(); \ *(__tracepoints) \ @@ -267,7 +276,7 @@ #define INIT_TASK_DATA(align) \ . = ALIGN(align); \ VMLINUX_SYMBOL(__start_init_task) = .; \ - *(.data..init_task) \ + KEEP(*(.data..init_task)) \ VMLINUX_SYMBOL(__end_init_task) = .; /* @@ -526,8 +535,8 @@ *(.text..ftrace) \ *(TEXT_CFI_MAIN) \ *(.ref.text) \ - MEM_KEEP(init.text) \ - MEM_KEEP(exit.text) \ + MEM_KEEP(init.text*) \ + MEM_KEEP(exit.text*) \ /* sched.text is aling to function alignment to secure we have same @@ -618,8 +627,8 @@ /* init and exit section handling */ #define INIT_DATA \ KEEP(*(SORT(___kentry+*))) \ - *(.init.data) \ - MEM_DISCARD(init.data) \ + *(.init.data init.data.*) \ + MEM_DISCARD(init.data*) \ KERNEL_CTORS() \ MCOUNT_REC() \ *(.init.rodata .init.rodata.*) \ @@ -643,14 +652,14 @@ #define INIT_TEXT \ *(.init.text .init.text.*) \ *(.text.startup) \ - MEM_DISCARD(init.text) + MEM_DISCARD(init.text*) #define EXIT_DATA \ - *(.exit.data) \ + *(.exit.data .exit.data.*) \ *(.fini_array) \ *(.dtors) \ - MEM_DISCARD(exit.data) \ - MEM_DISCARD(exit.rodata) + MEM_DISCARD(exit.data*) \ + MEM_DISCARD(exit.rodata*) #define EXIT_TEXT \ *(.exit.text) \ @@ -668,7 +677,7 @@ . = ALIGN(sbss_align); \ .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ *(.dynsbss) \ - *(.sbss) \ + *(SBSS_MAIN) \ *(.scommon) \ } @@ -806,7 +815,7 @@ #define NOTES \ .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start_notes) = .; \ - *(.note.*) \ + KEEP(*(.note.*)) \ VMLINUX_SYMBOL(__stop_notes) = .; \ } From 8e780ce892e32f0e1eb896592fcb9387836198b5 Mon Sep 17 00:00:00 2001 From: Peter Oberparleiter <oberpar@linux.ibm.com> Date: Thu, 13 Sep 2018 13:00:00 +0200 Subject: [PATCH 426/452] vmlinux.lds.h: Fix linker warnings about orphan .LPBX sections Enabling both CONFIG_LD_DEAD_CODE_DATA_ELIMINATION=y and CONFIG_GCOV_PROFILE_ALL=y results in linker warnings: warning: orphan section `.data..LPBX1' being placed in section `.data..LPBX1'. LD_DEAD_CODE_DATA_ELIMINATION adds compiler flag -fdata-sections. This option causes GCC to create separate data sections for data objects, including those generated by GCC internally for gcov profiling. The names of these objects start with a dot (.LPBX0, .LPBX1), resulting in section names starting with 'data..'. As section names starting with 'data..' are used for specific purposes in the Linux kernel, the linker script does not automatically include them in the output data section, resulting in the "orphan section" linker warnings. Fix this by specifically including sections named "data..LPBX*" in the data section. Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Tested-by: Stephen Rothwell <sfr@canb.auug.org.au> Tested-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com> Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> --- include/asm-generic/vmlinux.lds.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 12b7fcca75f9..803d16157b42 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -71,7 +71,7 @@ #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* #define TEXT_CFI_MAIN .text.cfi .text.[0-9a-zA-Z_]*.cfi -#define DATA_MAIN .data .data.[0-9a-zA-Z_]* +#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX* #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* From b01c9c1eeaef3b0814467d41ea7e0345ddf05f22 Mon Sep 17 00:00:00 2001 From: Danny Lin <danny@kdrag0n.dev> Date: Thu, 2 Jul 2020 16:27:13 -0700 Subject: [PATCH 427/452] vmlinux.lds.h: Coalesce transient LLVM dead code elimination sections A recent LLVM 11 commit [1] made LLD stop implicitly coalescing some temporary LLVM sections, namely .{data,bss}..compoundliteral.XXX: [30] .data..compoundli PROGBITS ffffffff9ac9a000 19e9a000 000000000000cea0 0000000000000000 WA 0 0 32 [31] .rela.data..compo RELA 0000000000000000 40965440 0000000000001d88 0000000000000018 I 2238 30 8 [32] .data..compoundli PROGBITS ffffffff9aca6ea0 19ea6ea0 00000000000033c0 0000000000000000 WA 0 0 32 [33] .rela.data..compo RELA 0000000000000000 409671c8 0000000000000948 0000000000000018 I 2238 32 8 [...] [2213] .bss..compoundlit NOBITS ffffffffa3000000 1d85c000 00000000000000a0 0000000000000000 WA 0 0 32 [2214] .bss..compoundlit NOBITS ffffffffa30000a0 1d85c000 0000000000000040 0000000000000000 WA 0 0 32 [...] .{data,bss}..L<symbol name> sections are also created in some cases. While there aren't any in this example, they should also be coalesced to be safe in case some config or future LLVM change makes it start creating more of those sections in the future. For example, enabling global merging causes ..L_MergedGlobals sections to be created, but it's likely that other changes will result in such sections as well. While these extra sections don't typically cause any breakage, they do inflate the vmlinux size due to the overhead of storing metadata for thousands of extra sections. It's also worth noting that for some reason, some downstream Android kernels can't boot at all if these sections aren't coalesced. This issue isn't limited to any specific architecture; it affects arm64 and x86 if CONFIG_LD_DEAD_CODE_DATA_ELIMINATION is forced on. Example on x86 allyesconfig: Before: 2241 sections, 1171169 KiB After: 56 sections, 1170972 KiB [1] https://github.com/llvm/llvm-project/commit/9e33c096476ab5e02ab1c8442cc3cb4e32e29f17 Suggested-by: Fangrui Song <maskray@google.com> Signed-off-by: Danny Lin <danny@kdrag0n.dev> Reviewed-by: Nathan Chancellor <natechancellor@gmail.com> Cc: stable@vger.kernel.org # v4.4+ Link: https://github.com/ClangBuiltLinux/linux/issues/958 --- include/asm-generic/vmlinux.lds.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 803d16157b42..ebb89377e870 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -72,9 +72,12 @@ #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* #define TEXT_CFI_MAIN .text.cfi .text.[0-9a-zA-Z_]*.cfi #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX* +#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX* \ + .data..compoundliteral* .data..L* #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* +#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..compoundliteral* .bss..L* #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]* #else #define TEXT_MAIN .text From 5853d7189beb90f0a06c0f8d5dadd7052d172c30 Mon Sep 17 00:00:00 2001 From: Sami Tolvanen <samitolvanen@google.com> Date: Tue, 17 Dec 2019 11:53:52 -0800 Subject: [PATCH 428/452] ANDROID: kbuild: disable clang-specific configs with other compilers cuttlefish_defconfig explicitly enables options that fail to compile with compilers other than clang. This change detects when a different compiler is used and disables clang-specific features after printing a warning. Bug: 145297810 Change-Id: I3371576b45c9715a63c5668ab58e996cab612f53 Signed-off-by: Sami Tolvanen <samitolvanen@google.com> --- Makefile | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/Makefile b/Makefile index c5465a239641..857dc4f00308 100644 --- a/Makefile +++ b/Makefile @@ -641,6 +641,8 @@ ifeq ($(dot-config),1) -include include/config/auto.conf ifeq ($(KBUILD_EXTMOD),) +include/config/auto.conf.cmd: check-clang-specific-options + # Read in dependencies to all Kconfig* files, make sure to run # oldconfig if changes are detected. -include include/config/auto.conf.cmd @@ -1268,6 +1270,22 @@ else endif endif +# Disable clang-specific config options when using a different compiler +clang-specific-configs := LTO_CLANG CFI_CLANG SHADOW_CALL_STACK + +PHONY += check-clang-specific-options +check-clang-specific-options: $(KCONFIG_CONFIG) FORCE +ifneq ($(cc-name),clang) +ifneq ($(findstring y,$(shell $(CONFIG_SHELL) \ + $(srctree)/scripts/config --file $(KCONFIG_CONFIG) \ + $(foreach c,$(clang-specific-configs),-s $(c)))),) + @echo WARNING: Disabling clang-specific options with $(cc-name) >&2 + $(Q)$(srctree)/scripts/config --file $(KCONFIG_CONFIG) \ + $(foreach c,$(clang-specific-configs),-d $(c)) && \ + $(MAKE) -f $(srctree)/Makefile olddefconfig +endif +endif + # Check for CONFIG flags that require compiler support. Abort the build # after .config has been processed, but before the kernel build starts. # From a1455851f5a6c7ea6e37399a6160fd4195eb48e3 Mon Sep 17 00:00:00 2001 From: Danny Lin <danny@kdrag0n.dev> Date: Wed, 31 Jul 2019 22:26:08 -0700 Subject: [PATCH 429/452] kbuild: Add support for LLVM's Polly optimizer This adds support for compiling the kernel with optimizations offered by LLVM's polyhedral loop optimizer known as Polly, which can improve performance by improving cache locality in loops. Note that LLVM is not compiled with Polly by default -- it must be enabled explicitly. Signed-off-by: Danny Lin <danny@kdrag0n.dev> --- Makefile | 13 +++++++++++++ arch/Kconfig | 8 ++++++++ 2 files changed, 21 insertions(+) diff --git a/Makefile b/Makefile index 857dc4f00308..b9c251489405 100644 --- a/Makefile +++ b/Makefile @@ -738,6 +738,19 @@ else ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE KBUILD_CFLAGS += -Os endif +ifeq ($(cc-name),clang) +ifdef CONFIG_LLVM_POLLY +KBUILD_CFLAGS += -mllvm -polly \ + -mllvm -polly-run-dce \ + -mllvm -polly-run-inliner \ + -mllvm -polly-opt-fusion=max \ + -mllvm -polly-ast-use-context \ + -mllvm -polly-detect-keep-going \ + -mllvm -polly-vectorizer=stripmine \ + -mllvm -polly-invariant-load-hoisting +endif +endif + # Tell gcc to never replace conditional load with a non-conditional one KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) KBUILD_CFLAGS += $(call cc-option,-fno-allow-store-data-races) diff --git a/arch/Kconfig b/arch/Kconfig index c6caaeffc124..3db1e7a13335 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -664,6 +664,14 @@ config CFI_CLANG_SHADOW If you select this option, the kernel builds a fast look-up table of CFI check functions in loaded modules to reduce overhead. +config LLVM_POLLY + bool "Enable LLVM's polyhedral loop optimizer (Polly)" + help + This option enables LLVM's polyhedral loop optimizer known as Polly. + Polly is able to optimize various loops throughout the kernel for + maximum cache locality. This requires an LLVM toolchain explicitly + compiled with Polly support. + config HAVE_ARCH_WITHIN_STACK_FRAMES bool help From bf617325fc74766bdbf7285e2425e412bd17343c Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 25 Nov 2020 16:53:16 +0300 Subject: [PATCH 430/452] kbuild: Add support for GCC's graphite optimizer Signed-off-by: Denis Efremov <efremov@linux.com> --- Makefile | 4 ++++ arch/Kconfig | 3 +++ 2 files changed, 7 insertions(+) diff --git a/Makefile b/Makefile index b9c251489405..3c1f63666619 100644 --- a/Makefile +++ b/Makefile @@ -749,6 +749,10 @@ KBUILD_CFLAGS += -mllvm -polly \ -mllvm -polly-vectorizer=stripmine \ -mllvm -polly-invariant-load-hoisting endif +else ifeq ($(cc-name),gcc) +ifdef CONFIG_GCC_GRAPHITE +KBUILD_CFLAGS += -fgraphite-identity +endif endif # Tell gcc to never replace conditional load with a non-conditional one diff --git a/arch/Kconfig b/arch/Kconfig index 3db1e7a13335..94802f909d95 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -672,6 +672,9 @@ config LLVM_POLLY maximum cache locality. This requires an LLVM toolchain explicitly compiled with Polly support. +config GCC_GRAPHITE + bool "Enable GCC's optimizations using the polyhedral model (Graphite)" + config HAVE_ARCH_WITHIN_STACK_FRAMES bool help From 2f14cd54303fa137a198fb43965abe633fef9909 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada <yamada.masahiro@socionext.com> Date: Fri, 29 May 2020 10:52:09 -0700 Subject: [PATCH 431/452] BACKPORT: arm64: vdso: use $(LD) instead of $(CC) to link VDSO We use $(LD) to link vmlinux, modules, decompressors, etc. VDSO is the only exceptional case where $(CC) is used as the linker driver, but I do not know why we need to do so. VDSO uses a special linker script, and does not link standard libraries at all. I changed the Makefile to use $(LD) rather than $(CC). I tested this, and VDSO worked for me. Users will be able to use their favorite linker (e.g. lld instead of of bfd) by passing LD= from the command line. My plan is to rewrite all VDSO Makefiles to use $(LD), then delete cc-ldoption. Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Change-Id: I8a14d6dd51d46b6942e68720e24217d1564b7869 [nd: conflicts due to ANDROID patches for LTO and SCS] (cherry picked from commit 691efbedc60d2a7364a90e38882fc762f06f52c4) Bug: 153418016 Bug: 157279372 Signed-off-by: Nick Desaulniers <ndesaulniers@google.com> (cherry picked from commit 64ea9b4b072b37bd624dd98b963161fd22c1be34) --- arch/arm64/kernel/vdso/Makefile | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index ef3f9d9d4062..83cf80d64add 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -12,18 +12,13 @@ obj-vdso := gettimeofday.o note.o sigreturn.o targets := $(obj-vdso) vdso.so vdso.so.dbg obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) -ccflags-y := -shared -fno-common -fno-builtin -ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \ - $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) +ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 \ + $(call ld-option, --hash-style=sysv) -n -T ccflags-y += $(DISABLE_LTO) # Disable gcov profiling for VDSO code GCOV_PROFILE := n -# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared -# down to collect2, resulting in silent corruption of the vDSO image. -ccflags-y += -Wl,-shared - obj-y += vdso.o extra-y += vdso.lds CPPFLAGS_vdso.lds += -P -C -U$(ARCH) @@ -33,7 +28,7 @@ $(obj)/vdso.o : $(obj)/vdso.so # Link rule for the .so file, .lds has to be first $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) - $(call if_changed,vdsold) + $(call if_changed,ld) # Strip rule for the .so file $(obj)/%.so: OBJCOPYFLAGS := -S @@ -55,8 +50,6 @@ $(obj-vdso): %.o: %.S FORCE $(call if_changed_dep,vdsoas) # Actual build commands -quiet_cmd_vdsold = VDSOL $@ - cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@ quiet_cmd_vdsoas = VDSOA $@ cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< From 64e0f1faf93ef8e1a14e056be3ec8709699ec02e Mon Sep 17 00:00:00 2001 From: Laura Abbott <labbott@redhat.com> Date: Fri, 29 May 2020 10:54:25 -0700 Subject: [PATCH 432/452] BACKPORT: arm64: vdso: Explicitly add build-id option Commit 691efbedc60d ("arm64: vdso: use $(LD) instead of $(CC) to link VDSO") switched to using LD explicitly. The --build-id option needs to be passed explicitly, similar to x86. Add this option. Fixes: 691efbedc60d ("arm64: vdso: use $(LD) instead of $(CC) to link VDSO") Reviewed-by: Masahiro Yamada <yamada.masahiro@socionext.com> Signed-off-by: Laura Abbott <labbott@redhat.com> [will: drop redundant use of 'call ld-option' as requested by Masahiro] Signed-off-by: Will Deacon <will.deacon@arm.com> Change-Id: I4a0f5c1bb60bda682221a7ff96a783bf8731cc00 [nd: conflict due to ANDROID LTO and CFI] (cherry picked from commit 7a0a93c51799edc45ee57c6cc1679aa94f1e03d5) Bug: 153418016 Bug: 157279372 Signed-off-by: Nick Desaulniers <ndesaulniers@google.com> (cherry picked from commit a9ee8bba814d956404c12b1c2e2c24cf4b710f08) --- arch/arm64/kernel/vdso/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index 83cf80d64add..01d6f496d1eb 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -12,8 +12,8 @@ obj-vdso := gettimeofday.o note.o sigreturn.o targets := $(obj-vdso) vdso.so vdso.so.dbg obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) -ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 \ - $(call ld-option, --hash-style=sysv) -n -T +ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \ + --build-id -n -T ccflags-y += $(DISABLE_LTO) # Disable gcov profiling for VDSO code From 8fd6c411fefe6ad1b2cdafe6c486a79eb3f36d36 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers <ndesaulniers@google.com> Date: Thu, 4 Jun 2020 11:49:55 -0700 Subject: [PATCH 433/452] ANDROID: arm64: vdso: wrap -n in ld-option ld.lld distributed in clang-r353983c AOSP LLVM release (the final AOSP LLVM release for Android Q) did not support `-n` linker flag. It was eventually added to clang-r360593. Android OEM's may wish to still use ld.lld to link their kernels for Q. This flag was disabled for Pixel 4 in go/pag/1258086. This patch is equivalent, but rebased on upstream changes that removed cc-ldoption in favor of ld-option. For Android R, the final AOSP LLVM release, clang-r383902 has long supported `-n` for ld.lld. Change-Id: Iab41c9e1039e163113b428fc487a4a0708822faa Bug: 63740206 Bug: 157279372 Link: https://github.com/ClangBuiltLinux/linux/issues/340 Link: https://bugs.llvm.org/show_bug.cgi?id=40542 Signed-off-by: Nick Desaulniers <ndesaulniers@google.com> --- arch/arm64/kernel/vdso/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index 01d6f496d1eb..b2c1695c4d4c 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -13,7 +13,7 @@ targets := $(obj-vdso) vdso.so vdso.so.dbg obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \ - --build-id -n -T + --build-id $(call ld-option,-n) -T ccflags-y += $(DISABLE_LTO) # Disable gcov profiling for VDSO code From 240f52e60bd0316ff906b21fc670e7c4c8e9d11c Mon Sep 17 00:00:00 2001 From: Wei Wang <wvw@google.com> Date: Fri, 16 Mar 2018 11:25:08 -0700 Subject: [PATCH 434/452] mm: add config for readahead window Change VM_MAX_READAHEAD value from the default 128KB to a configurable value. This will allow the readahead window to grow to a maximum size bigger than 128KB during boot, which could benefit to sequential read throughput and thus boot performance. Signed-off-by: Wei Wang <wvw@google.com> --- include/linux/mm.h | 2 +- mm/Kconfig | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 1f3df994812f..cf7774a0d808 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2247,7 +2247,7 @@ int __must_check write_one_page(struct page *page); void task_dirty_inc(struct task_struct *tsk); /* readahead.c */ -#define VM_MAX_READAHEAD 128 /* kbytes */ +#define VM_MAX_READAHEAD CONFIG_VM_MAX_READAHEAD_KB #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ extern int mmap_readaround_limit; diff --git a/mm/Kconfig b/mm/Kconfig index c5dd9c30fd6f..29e60847251b 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -764,6 +764,14 @@ config PERCPU_STATS information includes global and per chunk statistics, which can be used to help understand percpu memory usage. +config VM_MAX_READAHEAD_KB + int "Default max readahead window size in Kilobytes" + default 128 + help + This sets the VM_MAX_READAHEAD value to allow the readahead window + to grow to a maximum size of configured. Increasing this value will + benefit sequential read throughput. + config INCREASE_MAXIMUM_SWAPPINESS bool "Allow swappiness to be set up to 200" depends on SWAP From 1b5745cfcef21b6f421535bbcaf57aeadf11bc69 Mon Sep 17 00:00:00 2001 From: Sami Tolvanen <samitolvanen@google.com> Date: Wed, 29 Jan 2020 16:04:42 -0800 Subject: [PATCH 435/452] ANDROID: kallsyms: strip hashes from function names with ThinLTO With CONFIG_THINLTO and CFI both enabled, LLVM appends a hash to the names of all static functions. This breaks userspace tools, so strip out the hash from output. Bug: 147422318 Change-Id: Ibea6be089d530e92dcd191481cb02549041203f6 Signed-off-by: Sami Tolvanen <samitolvanen@google.com> --- kernel/kallsyms.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index f8674e39e770..ea06361d6bfc 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -352,6 +352,12 @@ static inline void cleanup_symbol_name(char *s) { char *res; +#ifdef CONFIG_THINLTO + /* Filter out hashes from static functions */ + res = strrchr(s, '$'); + if (res) + *res = '\0'; +#endif res = strrchr(s, '.'); if (res && !strcmp(res, ".cfi")) *res = '\0'; From c544775fdbfbd0b5b31409ba45199225b637a731 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 7 Oct 2020 01:48:39 +0300 Subject: [PATCH 436/452] arm64/boot/dts: remove verify,avb fsmgr_flags Signed-off-by: Denis Efremov <efremov@linux.com> --- arch/arm64/boot/dts/exynos/exynos9810-crownlte_common.dtsi | 6 +++--- arch/arm64/boot/dts/exynos/exynos9820.dts | 6 +++--- arch/arm64/boot/dts/exynos/exynos9820_evt0.dts | 6 +++--- arch/arm64/boot/dts/exynos/exynos9825.dts | 6 +++--- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/arch/arm64/boot/dts/exynos/exynos9810-crownlte_common.dtsi b/arch/arm64/boot/dts/exynos/exynos9810-crownlte_common.dtsi index d42ad40e1083..034e24a0da28 100644 --- a/arch/arm64/boot/dts/exynos/exynos9810-crownlte_common.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos9810-crownlte_common.dtsi @@ -66,21 +66,21 @@ dev = "/dev/block/platform/11120000.ufs/by-name/SYSTEM"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait,verify"; + fsmgr_flags = "wait"; }; vendor { compatible = "android,vendor"; dev = "/dev/block/platform/11120000.ufs/by-name/VENDOR"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait,verify"; + fsmgr_flags = "wait"; }; odm { compatible = "android,odm"; dev = "/dev/block/platform/11120000.ufs/by-name/ODM"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait,verify"; + fsmgr_flags = "wait"; }; }; }; diff --git a/arch/arm64/boot/dts/exynos/exynos9820.dts b/arch/arm64/boot/dts/exynos/exynos9820.dts index ea6d8b8087ae..dd71b2d8e253 100644 --- a/arch/arm64/boot/dts/exynos/exynos9820.dts +++ b/arch/arm64/boot/dts/exynos/exynos9820.dts @@ -5318,7 +5318,7 @@ dev = "/dev/block/platform/13d60000.ufs/by-name/system"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait, avb"; + fsmgr_flags = "wait"; status = "disabled"; }; @@ -5327,7 +5327,7 @@ dev = "/dev/block/platform/13d60000.ufs/by-name/vendor"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait,verify,avb"; + fsmgr_flags = "wait"; status = "okay"; }; @@ -5336,7 +5336,7 @@ dev = "/dev/block/platform/13d60000.ufs/by-name/product"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait,verify,avb"; + fsmgr_flags = "wait"; status = "okay"; }; }; diff --git a/arch/arm64/boot/dts/exynos/exynos9820_evt0.dts b/arch/arm64/boot/dts/exynos/exynos9820_evt0.dts index 138af210d6d2..c9bec066f2e1 100644 --- a/arch/arm64/boot/dts/exynos/exynos9820_evt0.dts +++ b/arch/arm64/boot/dts/exynos/exynos9820_evt0.dts @@ -251,7 +251,7 @@ dev = "/dev/block/platform/13d60000.ufs/by-name/system"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait, avb"; + fsmgr_flags = "wait"; status = "disabled"; }; vendor { @@ -259,7 +259,7 @@ dev = "/dev/block/platform/13d60000.ufs/by-name/vendor"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait,verify,avb"; + fsmgr_flags = "wait"; status = "okay"; }; product { @@ -267,7 +267,7 @@ dev = "/dev/block/platform/13d60000.ufs/by-name/product"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait,verify,avb"; + fsmgr_flags = "wait"; status = "okay"; }; }; diff --git a/arch/arm64/boot/dts/exynos/exynos9825.dts b/arch/arm64/boot/dts/exynos/exynos9825.dts index 1ee08b451abb..edfc49362474 100644 --- a/arch/arm64/boot/dts/exynos/exynos9825.dts +++ b/arch/arm64/boot/dts/exynos/exynos9825.dts @@ -5318,7 +5318,7 @@ dev = "/dev/block/platform/13d60000.ufs/by-name/system"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait, avb"; + fsmgr_flags = "wait"; status = "disabled"; }; @@ -5327,7 +5327,7 @@ dev = "/dev/block/platform/13d60000.ufs/by-name/vendor"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait,verify,avb"; + fsmgr_flags = "wait"; status = "okay"; }; @@ -5336,7 +5336,7 @@ dev = "/dev/block/platform/13d60000.ufs/by-name/product"; type = "ext4"; mnt_flags = "ro"; - fsmgr_flags = "wait,verify,avb"; + fsmgr_flags = "wait"; status = "okay"; }; }; From f90cee72c65bd3c96aca8bb5c53d921471bd39ed Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 24 Nov 2020 13:56:54 +0300 Subject: [PATCH 437/452] arm64/boot/dts: disable vbmeta Signed-off-by: Denis Efremov <efremov@linux.com> --- arch/arm64/boot/dts/exynos/exynos9820.dts | 5 ----- arch/arm64/boot/dts/exynos/exynos9825.dts | 5 ----- 2 files changed, 10 deletions(-) diff --git a/arch/arm64/boot/dts/exynos/exynos9820.dts b/arch/arm64/boot/dts/exynos/exynos9820.dts index dd71b2d8e253..51114b10a57d 100644 --- a/arch/arm64/boot/dts/exynos/exynos9820.dts +++ b/arch/arm64/boot/dts/exynos/exynos9820.dts @@ -5305,11 +5305,6 @@ android { compatible = "android,firmware"; - vbmeta { - compatible = "android,vbmeta"; - parts = "vbmeta,boot,recovery,system,vendor,product,dtb,dtbo,keystorage"; - }; - fstab { compatible = "android,fstab"; diff --git a/arch/arm64/boot/dts/exynos/exynos9825.dts b/arch/arm64/boot/dts/exynos/exynos9825.dts index edfc49362474..eefc4a0890da 100644 --- a/arch/arm64/boot/dts/exynos/exynos9825.dts +++ b/arch/arm64/boot/dts/exynos/exynos9825.dts @@ -5305,11 +5305,6 @@ android { compatible = "android,firmware"; - vbmeta { - compatible = "android,vbmeta"; - parts = "vbmeta,boot,recovery,system,vendor,product,dtb,dtbo,keystorage"; - }; - fstab { compatible = "android,fstab"; From afd1271376350f9be23a10c471aeedfd0551413c Mon Sep 17 00:00:00 2001 From: Andi Kleen <ak@linux.intel.com> Date: Sat, 8 Apr 2017 17:37:26 -0700 Subject: [PATCH 438/452] afs: Fix const confusion in AFS A trace point string cannot be const because the underlying special section is not marked const. An LTO build complains about the section attribute mismatch. Fix it by not marking the trace point string in afs const. Cc: dhowells@redhat.com Signed-off-by: Andi Kleen <ak@linux.intel.com> --- fs/afs/cmservice.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index c7475867a52b..fdcda968d4b6 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -31,7 +31,7 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *); static void SRXAFSCB_TellMeAboutYourself(struct work_struct *); #define CM_NAME(name) \ - const char afs_SRXCB##name##_name[] __tracepoint_string = \ + char afs_SRXCB##name##_name[] __tracepoint_string = \ "CB." #name /* From f87404c7a3d367dd9014966d03417beaea62ac9c Mon Sep 17 00:00:00 2001 From: Andi Kleen <ak@linux.intel.com> Date: Fri, 10 Aug 2012 03:11:20 +0200 Subject: [PATCH 439/452] locking/spinlocks: Mark spinlocks noinline when inline spinlocks are disabled Otherwise LTO will inline them anyways and cause a large kernel text increase. Since the explicit intention here is to not inline them marking them noinline is good documentation even for the non LTO case. Signed-off-by: Andi Kleen <ak@linux.intel.com> --- kernel/locking/spinlock.c | 56 +++++++++++++++++++-------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c index 6e40fdfba326..f32ba58fcd70 100644 --- a/kernel/locking/spinlock.c +++ b/kernel/locking/spinlock.c @@ -131,7 +131,7 @@ BUILD_LOCK_OPS(write, rwlock); #endif #ifndef CONFIG_INLINE_SPIN_TRYLOCK -int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) +noinline int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) { return __raw_spin_trylock(lock); } @@ -139,7 +139,7 @@ EXPORT_SYMBOL(_raw_spin_trylock); #endif #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH -int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) +noinline int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) { return __raw_spin_trylock_bh(lock); } @@ -147,7 +147,7 @@ EXPORT_SYMBOL(_raw_spin_trylock_bh); #endif #ifndef CONFIG_INLINE_SPIN_LOCK -void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) +noinline void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) { __raw_spin_lock(lock); } @@ -155,7 +155,7 @@ EXPORT_SYMBOL(_raw_spin_lock); #endif #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE -unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) +noinline unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) { return __raw_spin_lock_irqsave(lock); } @@ -163,7 +163,7 @@ EXPORT_SYMBOL(_raw_spin_lock_irqsave); #endif #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ -void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) +noinline void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) { __raw_spin_lock_irq(lock); } @@ -171,7 +171,7 @@ EXPORT_SYMBOL(_raw_spin_lock_irq); #endif #ifndef CONFIG_INLINE_SPIN_LOCK_BH -void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) +noinline void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) { __raw_spin_lock_bh(lock); } @@ -179,7 +179,7 @@ EXPORT_SYMBOL(_raw_spin_lock_bh); #endif #ifdef CONFIG_UNINLINE_SPIN_UNLOCK -void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) +noinline void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) { __raw_spin_unlock(lock); } @@ -187,7 +187,7 @@ EXPORT_SYMBOL(_raw_spin_unlock); #endif #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE -void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) +noinline void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) { __raw_spin_unlock_irqrestore(lock, flags); } @@ -195,7 +195,7 @@ EXPORT_SYMBOL(_raw_spin_unlock_irqrestore); #endif #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ -void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) +noinline void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) { __raw_spin_unlock_irq(lock); } @@ -203,7 +203,7 @@ EXPORT_SYMBOL(_raw_spin_unlock_irq); #endif #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH -void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) +noinline void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) { __raw_spin_unlock_bh(lock); } @@ -211,7 +211,7 @@ EXPORT_SYMBOL(_raw_spin_unlock_bh); #endif #ifndef CONFIG_INLINE_READ_TRYLOCK -int __lockfunc _raw_read_trylock(rwlock_t *lock) +noinline int __lockfunc _raw_read_trylock(rwlock_t *lock) { return __raw_read_trylock(lock); } @@ -219,7 +219,7 @@ EXPORT_SYMBOL(_raw_read_trylock); #endif #ifndef CONFIG_INLINE_READ_LOCK -void __lockfunc _raw_read_lock(rwlock_t *lock) +noinline void __lockfunc _raw_read_lock(rwlock_t *lock) { __raw_read_lock(lock); } @@ -227,7 +227,7 @@ EXPORT_SYMBOL(_raw_read_lock); #endif #ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE -unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) +noinline unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) { return __raw_read_lock_irqsave(lock); } @@ -235,7 +235,7 @@ EXPORT_SYMBOL(_raw_read_lock_irqsave); #endif #ifndef CONFIG_INLINE_READ_LOCK_IRQ -void __lockfunc _raw_read_lock_irq(rwlock_t *lock) +noinline void __lockfunc _raw_read_lock_irq(rwlock_t *lock) { __raw_read_lock_irq(lock); } @@ -243,7 +243,7 @@ EXPORT_SYMBOL(_raw_read_lock_irq); #endif #ifndef CONFIG_INLINE_READ_LOCK_BH -void __lockfunc _raw_read_lock_bh(rwlock_t *lock) +noinline void __lockfunc _raw_read_lock_bh(rwlock_t *lock) { __raw_read_lock_bh(lock); } @@ -251,7 +251,7 @@ EXPORT_SYMBOL(_raw_read_lock_bh); #endif #ifndef CONFIG_INLINE_READ_UNLOCK -void __lockfunc _raw_read_unlock(rwlock_t *lock) +noinline void __lockfunc _raw_read_unlock(rwlock_t *lock) { __raw_read_unlock(lock); } @@ -259,7 +259,7 @@ EXPORT_SYMBOL(_raw_read_unlock); #endif #ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE -void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) +noinline void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) { __raw_read_unlock_irqrestore(lock, flags); } @@ -267,7 +267,7 @@ EXPORT_SYMBOL(_raw_read_unlock_irqrestore); #endif #ifndef CONFIG_INLINE_READ_UNLOCK_IRQ -void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) +noinline void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) { __raw_read_unlock_irq(lock); } @@ -275,7 +275,7 @@ EXPORT_SYMBOL(_raw_read_unlock_irq); #endif #ifndef CONFIG_INLINE_READ_UNLOCK_BH -void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) +noinline void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) { __raw_read_unlock_bh(lock); } @@ -283,7 +283,7 @@ EXPORT_SYMBOL(_raw_read_unlock_bh); #endif #ifndef CONFIG_INLINE_WRITE_TRYLOCK -int __lockfunc _raw_write_trylock(rwlock_t *lock) +noinline int __lockfunc _raw_write_trylock(rwlock_t *lock) { return __raw_write_trylock(lock); } @@ -291,7 +291,7 @@ EXPORT_SYMBOL(_raw_write_trylock); #endif #ifndef CONFIG_INLINE_WRITE_LOCK -void __lockfunc _raw_write_lock(rwlock_t *lock) +noinline void __lockfunc _raw_write_lock(rwlock_t *lock) { __raw_write_lock(lock); } @@ -299,7 +299,7 @@ EXPORT_SYMBOL(_raw_write_lock); #endif #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE -unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) +noinline unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) { return __raw_write_lock_irqsave(lock); } @@ -307,7 +307,7 @@ EXPORT_SYMBOL(_raw_write_lock_irqsave); #endif #ifndef CONFIG_INLINE_WRITE_LOCK_IRQ -void __lockfunc _raw_write_lock_irq(rwlock_t *lock) +noinline void __lockfunc _raw_write_lock_irq(rwlock_t *lock) { __raw_write_lock_irq(lock); } @@ -315,7 +315,7 @@ EXPORT_SYMBOL(_raw_write_lock_irq); #endif #ifndef CONFIG_INLINE_WRITE_LOCK_BH -void __lockfunc _raw_write_lock_bh(rwlock_t *lock) +noinline void __lockfunc _raw_write_lock_bh(rwlock_t *lock) { __raw_write_lock_bh(lock); } @@ -323,7 +323,7 @@ EXPORT_SYMBOL(_raw_write_lock_bh); #endif #ifndef CONFIG_INLINE_WRITE_UNLOCK -void __lockfunc _raw_write_unlock(rwlock_t *lock) +noinline void __lockfunc _raw_write_unlock(rwlock_t *lock) { __raw_write_unlock(lock); } @@ -331,7 +331,7 @@ EXPORT_SYMBOL(_raw_write_unlock); #endif #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE -void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) +noinline void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) { __raw_write_unlock_irqrestore(lock, flags); } @@ -339,7 +339,7 @@ EXPORT_SYMBOL(_raw_write_unlock_irqrestore); #endif #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ -void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) +noinline void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) { __raw_write_unlock_irq(lock); } @@ -347,7 +347,7 @@ EXPORT_SYMBOL(_raw_write_unlock_irq); #endif #ifndef CONFIG_INLINE_WRITE_UNLOCK_BH -void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) +noinline void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) { __raw_write_unlock_bh(lock); } From e7edf0b86302e5b52e47463bc48e2e36d270e30a Mon Sep 17 00:00:00 2001 From: Andi Kleen <ak@linux.intel.com> Date: Thu, 23 Nov 2017 16:06:26 -0800 Subject: [PATCH 440/452] Fix read buffer overflow in delta-ipc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The single caller passes a string to delta_ipc_open, which copies with a fixed size larger than the string. So it copies some random data after the original string the ro segment. If the string was at the end of a page it may fault. Just copy the string with a normal strcpy after clearing the field. Found by a LTO build (which errors out) because the compiler inlines the functions and can resolve the string sizes and triggers the compile time checks in memcpy. In function ‘memcpy’, inlined from ‘delta_ipc_open.constprop’ at linux/drivers/media/platform/sti/delta/delta-ipc.c:178:0, inlined from ‘delta_mjpeg_ipc_open’ at linux/drivers/media/platform/sti/delta/delta-mjpeg-dec.c:227:0, inlined from ‘delta_mjpeg_decode’ at linux/drivers/media/platform/sti/delta/delta-mjpeg-dec.c:403:0: /home/andi/lsrc/linux/include/linux/string.h:337:0: error: call to ‘__read_overflow2’ declared with attribute error: detected read beyond size of object passed as 2nd parameter __read_overflow2(); Cc: hugues.fruchet@st.com Cc: mchehab@s-opensource.com Signed-off-by: Andi Kleen <ak@linux.intel.com> --- drivers/media/platform/sti/delta/delta-ipc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/sti/delta/delta-ipc.c b/drivers/media/platform/sti/delta/delta-ipc.c index 41e4a4c259b3..b6c256e3ceb6 100644 --- a/drivers/media/platform/sti/delta/delta-ipc.c +++ b/drivers/media/platform/sti/delta/delta-ipc.c @@ -175,8 +175,8 @@ int delta_ipc_open(struct delta_ctx *pctx, const char *name, msg.ipc_buf_size = ipc_buf_size; msg.ipc_buf_paddr = ctx->ipc_buf->paddr; - memcpy(msg.name, name, sizeof(msg.name)); - msg.name[sizeof(msg.name) - 1] = 0; + memset(msg.name, 0, sizeof(msg.name)); + strcpy(msg.name, name); msg.param_size = param->size; memcpy(ctx->ipc_buf->vaddr, param->data, msg.param_size); From de36f87d39428595f00e692ada95b3dc31d2528f Mon Sep 17 00:00:00 2001 From: Andi Kleen <ak@linux.intel.com> Date: Tue, 31 Mar 2015 03:45:39 -0700 Subject: [PATCH 441/452] lto: Add __noreorder and mark initcalls __noreorder gcc 5 has a new no_reorder attribute that prevents top level reordering only for that symbol. Kernels don't like any reordering of initcalls between files, as several initcalls depend on each other. LTO previously needed to use -fno-toplevel-reordering to prevent boot failures. Add a __noreorder wrapper for the no_reorder attribute and use it for initcalls. Signed-off-by: Andi Kleen <ak@linux.intel.com> --- include/linux/compiler-gcc.h | 5 +++++ include/linux/compiler_types.h | 3 +++ include/linux/init.h | 2 +- 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 6d7ead22c1b4..4a8308c74bfc 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -357,6 +357,11 @@ #define __no_sanitize_address #endif +#if __GNUC__ >= 5 +/* Avoid reordering a top level statement */ +#define __noreorder __attribute__((no_reorder)) +#endif + /* * A trick to suppress uninitialized variable warning without generating any * code diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index a207f820d3b0..2da2ba2fd954 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -284,6 +284,9 @@ struct ftrace_likely_data { #define __assume_aligned(a, ...) #endif +#ifndef __noreorder +#define __noreorder +#endif /* Are two types/vars the same type (ignoring qualifiers)? */ #ifndef __same_type diff --git a/include/linux/init.h b/include/linux/init.h index f5fd059a6e76..6a8fa1e0cadf 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -179,7 +179,7 @@ extern bool initcall_debug; */ #define __define_initcall(fn, id) \ - static initcall_t __initcall_name(fn, id) __used \ + static initcall_t __initcall_name(fn, id) __used __noreorder \ __attribute__((__section__(".initcall" #id ".init"))) = fn; /* From 415500758d51686fe45d7e9c1dae449dc2ad6164 Mon Sep 17 00:00:00 2001 From: Andi Kleen <ak@linux.intel.com> Date: Fri, 27 Jun 2014 23:11:22 +0200 Subject: [PATCH 442/452] lto, workaround: Disable LTO for BPF Disable LTO for the BPF interpreter. This works around a gcc bug in the LTO partitioner that partitions the jumptable used the BPF interpreter into a different LTO unit. This in term causes assembler errors because the jump table contains references to the code labels in the original file. gcc problem tracked in https://gcc.gnu.org/bugzilla/show_bug.cgi?id=50676 Signed-off-by: Andi Kleen <ak@linux.intel.com> --- kernel/bpf/Makefile | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index af3ab6164ff5..63ac9f5320b1 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -13,3 +13,8 @@ ifeq ($(CONFIG_PERF_EVENTS),y) obj-$(CONFIG_BPF_SYSCALL) += stackmap.o endif obj-$(CONFIG_CGROUP_BPF) += cgroup.o + +# various version of gcc have a LTO bug where the &&labels used in the +# BPF interpreter can cause linker errors when spread incorrectly over +# partitions. Disable LTO for BPF for now +CFLAGS_core.o = $(DISABLE_LTO) From d391e1960c21b2ba83af4fafb3c50ed899e7e077 Mon Sep 17 00:00:00 2001 From: Yury Norov <ynorov@caviumnetworks.com> Date: Wed, 29 Nov 2017 17:03:03 +0300 Subject: [PATCH 443/452] arm64: cpu_ops: Add missing 'const' qualifiers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Building the kernel with an LTO-enabled GCC spits out the following "const" warning for the cpu_ops code: mm/percpu.c:2168:20: error: pcpu_fc_names causes a section type conflict with dt_supported_cpu_ops const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { ^ arch/arm64/kernel/cpu_ops.c:34:37: note: ‘dt_supported_cpu_ops’ was declared here static const struct cpu_operations *dt_supported_cpu_ops[] __initconst = { Fix it by adding missed const qualifiers. Signed-off-by: Yury Norov <ynorov@caviumnetworks.com> Reviewed-by: Nick Desaulniers <ndesaulniers@google.com> Signed-off-by: Will Deacon <will.deacon@arm.com> --- arch/arm64/kernel/cpu_ops.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c index d16978213c5b..ea001241bdd4 100644 --- a/arch/arm64/kernel/cpu_ops.c +++ b/arch/arm64/kernel/cpu_ops.c @@ -31,13 +31,13 @@ extern const struct cpu_operations cpu_psci_ops; const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init; -static const struct cpu_operations *dt_supported_cpu_ops[] __initconst = { +static const struct cpu_operations *const dt_supported_cpu_ops[] __initconst = { &smp_spin_table_ops, &cpu_psci_ops, NULL, }; -static const struct cpu_operations *acpi_supported_cpu_ops[] __initconst = { +static const struct cpu_operations *const acpi_supported_cpu_ops[] __initconst = { #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL &acpi_parking_protocol_ops, #endif @@ -47,7 +47,7 @@ static const struct cpu_operations *acpi_supported_cpu_ops[] __initconst = { static const struct cpu_operations * __init cpu_get_ops(const char *name) { - const struct cpu_operations **ops; + const struct cpu_operations *const *ops; ops = acpi_disabled ? dt_supported_cpu_ops : acpi_supported_cpu_ops; From eb5d1eff797c7d2507fe413f05bc1016b0a8b48e Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 25 Nov 2020 12:55:50 +0300 Subject: [PATCH 444/452] Makefile: add mcpu option Signed-off-by: Denis Efremov <efremov@linux.com> --- Makefile | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Makefile b/Makefile index 3c1f63666619..d49b5b07be71 100644 --- a/Makefile +++ b/Makefile @@ -738,6 +738,12 @@ else ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE KBUILD_CFLAGS += -Os endif +ifdef CONFIG_SOC_EXYNOS9820 +ifeq ($(cc-name),gcc) +KBUILD_CFLAGS += $(call cc-option,-mcpu=cortex-a75.cortex-a55,$(call cc-option,-mcpu=cortex-a55)) +endif +endif + ifeq ($(cc-name),clang) ifdef CONFIG_LLVM_POLLY KBUILD_CFLAGS += -mllvm -polly \ From a2ba77172f60f708e85217bd667574127bc33f36 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 9 Dec 2020 13:34:10 +0300 Subject: [PATCH 445/452] Kbuild: Add Link Time Optimization (LTO) support Based on Andi Kleen <ak@linux.intel.com> work. Signed-off-by: Denis Efremov <efremov@linux.com> --- Makefile | 65 ++++++++++++++++++++++++++++++++++++++-- arch/Kconfig | 60 +++++++++++++++++++++++++++++++++++++ arch/arm64/Kconfig | 1 + crypto/Makefile | 1 + lib/Kconfig.debug | 2 +- scripts/Makefile.modpost | 3 +- scripts/gcc-ld | 9 ++++-- scripts/link-vmlinux.sh | 6 ++++ 8 files changed, 141 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index d49b5b07be71..0555de65344a 100644 --- a/Makefile +++ b/Makefile @@ -713,6 +713,26 @@ LDFLAGS += -plugin LLVMgold.so LLVM_AR := llvm-ar LLVM_DIS := llvm-dis export LLVM_AR LLVM_DIS +else ifdef CONFIG_LTO_GCC +LDFLAGS_FINAL_vmlinux := -flto=jobserver -fuse-linker-plugin +LDFLAGS_FINAL_vmlinux += $(filter -g%, $(KBUILD_CFLAGS)) +LDFLAGS_FINAL_vmlinux += -fno-fat-lto-objects +LDFLAGS_FINAL_vmlinux += $(call cc-disable-warning,attribute-alias,) +LDFLAGS_FINAL_vmlinux += -Xassembler -Idrivers/misc/tzdev +ifdef CONFIG_LTO_DEBUG + LDFLAGS_FINAL_vmlinux += -fdump-ipa-cgraph -fdump-ipa-inline-details + # add for debugging compiler crashes: + # LDFLAGS_FINAL_vmlinux += -dH -save-temps +endif +ifdef CONFIG_LTO_CP_CLONE + LDFLAGS_FINAL_vmlinux += -fipa-cp-clone +endif +LDFLAGS_FINAL_vmlinux += -Wno-lto-type-mismatch -Wno-psabi +LDFLAGS_FINAL_vmlinux += -Wno-stringop-overflow -flinker-output=nolto-rel + +LDFINAL_vmlinux := ${CONFIG_SHELL} ${srctree}/scripts/gcc-ld +AR := $(CROSS_COMPILE)gcc-ar +NM := $(CROSS_COMPILE)gcc-nm endif # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default @@ -900,13 +920,23 @@ lto-clang-flags := -flto -fvisibility=hidden # allow disabling only clang LTO where needed DISABLE_LTO_CLANG := -fno-lto -fvisibility=default export DISABLE_LTO_CLANG +else ifdef CONFIG_LTO_GCC +lto-gcc-flags := -flto -fno-fat-lto-objects +lto-gcc-flags += $(call cc-disable-warning,attribute-alias,) + +ifdef CONFIG_LTO_CP_CLONE +lto-gcc-flags += -fipa-cp-clone +endif + +DISABLE_LTO_GCC := -fno-lto +export DISABLE_LTO_GCC endif ifdef CONFIG_LTO -lto-flags := $(lto-clang-flags) +lto-flags := $(lto-clang-flags) $(lto-gcc-flags) KBUILD_CFLAGS += $(lto-flags) -DISABLE_LTO := $(DISABLE_LTO_CLANG) +DISABLE_LTO := $(DISABLE_LTO_CLANG) $(DISABLE_LTO_GCC) export DISABLE_LTO # LDFINAL_vmlinux and LDFLAGS_FINAL_vmlinux can be set to override @@ -1309,6 +1339,22 @@ ifneq ($(findstring y,$(shell $(CONFIG_SHELL) \ endif endif +# Disable gcc-specific config options when using a different compiler +gcc-specific-configs := LTO_GCC + +PHONY += check-gcc-specific-options +check-gcc-specific-options: $(KCONFIG_CONFIG) FORCE +ifneq ($(cc-name),gcc) +ifneq ($(findstring y,$(shell $(CONFIG_SHELL) \ + $(srctree)/scripts/config --file $(KCONFIG_CONFIG) \ + $(foreach c,$(gcc-specific-configs),-s $(c)))),) + @echo WARNING: Disabling gcc-specific options with $(cc-name) >&2 + $(Q)$(srctree)/scripts/config --file $(KCONFIG_CONFIG) \ + $(foreach c,$(gcc-specific-configs),-d $(c)) && \ + $(MAKE) -f $(srctree)/Makefile olddefconfig +endif +endif + # Check for CONFIG flags that require compiler support. Abort the build # after .config has been processed, but before the kernel build starts. # @@ -1327,6 +1373,21 @@ ifdef CONFIG_LTO_CLANG ifneq ($(call gold-ifversion, -ge, 112000000, y), y) @echo Cannot use CONFIG_LTO_CLANG: requires GNU gold 1.12 or later >&2 && exit 1 endif +else ifdef CONFIG_LTO_GCC + ifdef CONFIG_UBSAN + ifeq ($(call gcc-ifversion,-lt,0600,y),y) + @echo Cannot use CONFIG_LTO_GCC with UBSAN: >= gcc 6.x required >&2 && exit 1 + endif + endif + ifeq ($(shell if test `ulimit -n` -lt 4000 ; then echo yes ; fi),yes) + @echo File descriptor limit too low. Increase with ulimit -n >&2 && exit 1 + endif + ifeq ($(call gcc-ifversion, -lt, 0500,y),y) + @echo Cannot use CONFIG_LTO_GCC: requires gcc 5.0 or later >&2 && exit 1 + endif + ifeq ($(call ld-ifversion,-lt,227000000,y),y) + @echo Cannot use CONFIG_LTO_GCC: requires binutils 2.27 or later >&2 && exit 1 + endif endif # Make sure compiler supports LTO flags ifdef lto-flags diff --git a/arch/Kconfig b/arch/Kconfig index 94802f909d95..3857689caf10 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -607,6 +607,9 @@ config ARCH_SUPPORTS_LTO_CLANG - compiling inline assembly with clang's integrated assembler, - and linking with either lld or GNU gold w/ LLVMgold. +config ARCH_SUPPORTS_LTO_GCC + bool + choice prompt "Link-Time Optimization (LTO) (EXPERIMENTAL)" default LTO_NONE @@ -634,8 +637,65 @@ config LTO_CLANG 5.0 (make CC=clang) and GNU gold from binutils >= 2.27, and have the LLVMgold plug-in in LD_LIBRARY_PATH. +config LTO_GCC + bool "Enable gcc link time optimization (LTO)" + depends on ARCH_SUPPORTS_LTO_GCC + depends on !GCOV_KERNEL + depends on !MODVERSIONS + select LTO + select THIN_ARCHIVES + select LD_DEAD_CODE_DATA_ELIMINATION + # lto does not support excluding flags for specific files + # right now. Can be removed if that is fixed. + #depends on !FUNCTION_TRACER + help + With this option gcc will do whole program optimizations for + the whole kernel and module. This increases compile time, but can + lead to better code. It allows gcc to inline functions between + different files and do other optimization. It might also trigger + bugs due to more aggressive optimization. It allows gcc to drop unused + code. On smaller monolithic kernel configurations + it usually leads to smaller kernels, especially when modules + are disabled. + + With this option gcc will also do some global checking over + different source files. It also disables a number of kernel + features. + + This option is recommended for release builds. With LTO + the kernel always has to be re-optimized (but not re-parsed) + on each build. + + This requires a gcc 6.0 or later compiler. + + On larger configurations this may need more than 4GB of RAM. + It will likely not work on those with a 32bit compiler. + + When the toolchain support is not available this will (hopefully) + be automatically disabled. + + For more information see Documentation/lto-build endchoice +config LTO_DEBUG + bool "Enable LTO compile time debugging" + depends on LTO_GCC + help + Enable LTO debugging in the compiler. The compiler dumps + some log files that make it easier to figure out LTO + behavior. The log files also allow to reconstruct + the global inlining and a global callgraph. + They however add some (single threaded) cost to the + compilation. When in doubt do not enable. + +config LTO_CP_CLONE + bool "Allow aggressive cloning for function specialization" + depends on LTO_GCC + help + Allow the compiler to clone and specialize functions for specific + arguments when it determines these arguments are very commonly + called. Experimential. Will increase text size. + config CFI bool diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 94683e02421d..7455241ac0b4 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -25,6 +25,7 @@ config ARM64 select ARCH_USE_CMPXCHG_LOCKREF select ARCH_SUPPORTS_MEMORY_FAILURE select ARCH_SUPPORTS_LTO_CLANG + select ARCH_SUPPORTS_LTO_GCC select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_WANT_COMPAT_IPC_PARSE_VERSION diff --git a/crypto/Makefile b/crypto/Makefile index 038ed7b25dfa..afdb6e88c274 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -126,6 +126,7 @@ obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o +CFLAGS_cast_common.o += $(DISABLE_LTO) obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o obj-$(CONFIG_CRYPTO_CAST5) += cast5_generic.o obj-$(CONFIG_CRYPTO_CAST6) += cast6_generic.o diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index ccd0e952f182..aaa7c5d4ae61 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -245,7 +245,7 @@ config STRIP_ASM_SYMS config READABLE_ASM bool "Generate readable assembler code" - depends on DEBUG_KERNEL + depends on DEBUG_KERNEL && !LTO help Disable some compiler optimizations that tend to generate human unreadable assembler output. This may make the kernel slightly slower, but it helps diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost index acd7ea0b5f26..2d1b4cceb290 100644 --- a/scripts/Makefile.modpost +++ b/scripts/Makefile.modpost @@ -79,7 +79,8 @@ modpost = scripts/mod/modpost \ $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \ $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S) \ $(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \ - $(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w) + $(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w) \ + $(if $(CONFIG_LTO),-w) MODPOST_OPT=$(subst -i,-n,$(filter -i,$(MAKEFLAGS))) diff --git a/scripts/gcc-ld b/scripts/gcc-ld index 997b818c3962..e552259bbe07 100755 --- a/scripts/gcc-ld +++ b/scripts/gcc-ld @@ -8,17 +8,20 @@ ARGS="-nostdlib" while [ "$1" != "" ] ; do case "$1" in - -save-temps|-m32|-m64) N="$1" ;; + -save-temps*|-m32|-m64) N="$1" ;; -r) N="$1" ;; + -flinker-output*) N="$1" ;; -[Wg]*) N="$1" ;; -[olv]|-[Ofd]*|-nostdlib) N="$1" ;; - --end-group|--start-group) + --end-group|--start-group|--whole-archive|--no-whole-archive) N="-Wl,$1" ;; -[RTFGhIezcbyYu]*|\ --script|--defsym|-init|-Map|--oformat|-rpath|\ -rpath-link|--sort-section|--section-start|-Tbss|-Tdata|-Ttext|\ --version-script|--dynamic-list|--version-exports-symbol|--wrap|-m) A="$1" ; shift ; N="-Wl,$A,$1" ;; + -maarch64elf) N="-Wl,$1" ;; + -Xassembler) shift ; N="-Xassembler $1" ;; -[m]*) N="$1" ;; -*) N="-Wl,$1" ;; *) N="$1" ;; @@ -27,4 +30,6 @@ while [ "$1" != "" ] ; do shift done +[ -n "$V" ] && echo >&2 $CC $ARGS + exec $CC $ARGS diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index ba25de7025fc..6591496fe845 100755 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -120,6 +120,12 @@ modpost_link() # This might take a while, so indicate that we're doing # an LTO link info LTO vmlinux.o + elif [ -n "${CONFIG_LTO_GCC}" ]; then + if [ -n "${LDFINAL_vmlinux}" ]; then + LD=${LDFINAL_vmlinux} + LDFLAGS="${LDFLAGS_FINAL_vmlinux} ${LDFLAGS}" + fi + info LDFINAL vmlinux.o else info LD vmlinux.o fi From ed13fc02c64e3b4c04454e296e21159769d55aef Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 24 May 2022 21:35:57 +0400 Subject: [PATCH 446/452] drivers/net/wireless/broadcom: drop -Wno-sometimes-uninitialized Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/net/wireless/broadcom/bcmdhd/Makefile | 2 -- drivers/net/wireless/broadcom/bcmdhd/Makefile.kk | 2 -- drivers/net/wireless/broadcom/bcmdhd/Makefile.lp | 2 -- drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile | 2 -- drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile.kk | 2 -- drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile.lp | 2 -- drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile | 2 -- drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile.kk | 2 -- drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile.lp | 2 -- drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile | 7 ------- 10 files changed, 25 deletions(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd/Makefile b/drivers/net/wireless/broadcom/bcmdhd/Makefile index 2b1c18d4fa02..f0e0f6240315 100644 --- a/drivers/net/wireless/broadcom/bcmdhd/Makefile +++ b/drivers/net/wireless/broadcom/bcmdhd/Makefile @@ -1366,11 +1366,9 @@ ifeq ($(CONFIG_ARCH_SDM845),y) endif ifeq ($(CONFIG_SOC_EXYNOS9820),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_SM8150),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif # DTS Support diff --git a/drivers/net/wireless/broadcom/bcmdhd/Makefile.kk b/drivers/net/wireless/broadcom/bcmdhd/Makefile.kk index ac9d6438e6bb..a1f8644798dc 100644 --- a/drivers/net/wireless/broadcom/bcmdhd/Makefile.kk +++ b/drivers/net/wireless/broadcom/bcmdhd/Makefile.kk @@ -1331,11 +1331,9 @@ ifeq ($(CONFIG_ARCH_SDM845),y) endif ifeq ($(CONFIG_SOC_EXYNOS9820),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_SM8150),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif # DTS Support diff --git a/drivers/net/wireless/broadcom/bcmdhd/Makefile.lp b/drivers/net/wireless/broadcom/bcmdhd/Makefile.lp index 5ffbdc0aa5de..c9cbecb8b477 100644 --- a/drivers/net/wireless/broadcom/bcmdhd/Makefile.lp +++ b/drivers/net/wireless/broadcom/bcmdhd/Makefile.lp @@ -1354,11 +1354,9 @@ ifeq ($(CONFIG_ARCH_SDM845),y) endif ifeq ($(CONFIG_SOC_EXYNOS9820),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_SM8150),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif # DTS Support diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile b/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile index 97534f4483b6..0256c044264d 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile +++ b/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile @@ -1345,11 +1345,9 @@ ifeq ($(CONFIG_ARCH_SDM845),y) endif ifeq ($(CONFIG_SOC_EXYNOS9820),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_SM8150),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif # DTS Support diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile.kk b/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile.kk index d0ebd62b9cd0..d175529f8f46 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile.kk +++ b/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile.kk @@ -1310,11 +1310,9 @@ ifeq ($(CONFIG_ARCH_SDM845),y) endif ifeq ($(CONFIG_SOC_EXYNOS9820),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_SM8150),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif # DTS Support diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile.lp b/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile.lp index 89271234981f..16e7d0623472 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile.lp +++ b/drivers/net/wireless/broadcom/bcmdhd_100_10/Makefile.lp @@ -1333,11 +1333,9 @@ ifeq ($(CONFIG_ARCH_SDM845),y) endif ifeq ($(CONFIG_SOC_EXYNOS9820),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_SM8150),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif # DTS Support diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile b/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile index 2660b26bfc8e..cd6983d5df6a 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile +++ b/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile @@ -1555,11 +1555,9 @@ ifeq ($(CONFIG_ARCH_SDM845),y) endif ifeq ($(CONFIG_SOC_EXYNOS9820),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_SM8150),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif # DTS Support diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile.kk b/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile.kk index 4e689aadee3f..02c7fff2a748 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile.kk +++ b/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile.kk @@ -1520,11 +1520,9 @@ ifeq ($(CONFIG_ARCH_SDM845),y) endif ifeq ($(CONFIG_SOC_EXYNOS9820),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_SM8150),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif # DTS Support diff --git a/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile.lp b/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile.lp index 5354514c6c4f..5d6a89cf451a 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile.lp +++ b/drivers/net/wireless/broadcom/bcmdhd_100_15/Makefile.lp @@ -1543,11 +1543,9 @@ ifeq ($(CONFIG_ARCH_SDM845),y) endif ifeq ($(CONFIG_SOC_EXYNOS9820),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_SM8150),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif # DTS Support diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile b/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile index c311e7a849c8..84bcbef79a5b 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile @@ -1836,31 +1836,24 @@ ifeq ($(CONFIG_ARCH_SDM845),y) endif ifeq ($(CONFIG_SOC_EXYNOS9820),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_SOC_EXYNOS9830),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_SOC_EXYNOS2100),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_SOC_EXYNOS1000),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_SM8150),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_KONA),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_ARCH_LAHAINA),y) DHDCFLAGS += -Wno-date-time - DHDCFLAGS += -Wno-sometimes-uninitialized endif ifeq ($(CONFIG_SOC_EXYNOS9110),y) DHDCFLAGS += -Wno-unused-const-variable From a3be850e32398703dcf00a2d7d1dae23aadb930f Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Tue, 24 May 2022 21:36:53 +0400 Subject: [PATCH 447/452] drivers/net/wireless/broadcom/bcmdhd_101_16: drop -Werror Signed-off-by: Denis Efremov <efremov@linux.com> --- drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile b/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile index 84bcbef79a5b..c9fb91b814a4 100644 --- a/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile +++ b/drivers/net/wireless/broadcom/bcmdhd_101_16/Makefile @@ -25,7 +25,7 @@ DHDCFLAGS += -DBCMUTILS_ERR_CODES -DUSE_NEW_RSPEC_DEFS DHDCFLAGS += -Dlinux -D__linux__ -DLINUX -DHDCFLAGS += -Wall -Werror -Wstrict-prototypes -DBCMDRIVER \ +DHDCFLAGS += -Wall -Wstrict-prototypes -DBCMDRIVER \ -DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DBCMFILEIMAGE \ -DDHDTHREAD -DDHD_BCMEVENTS -DSHOW_EVENTS -DWLP2P \ -DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT -DKEEP_ALIVE -DCSCAN \ From 204d9fead683f8171c51c1df5044ca1159a10202 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Sun, 11 Apr 2021 20:22:45 +0300 Subject: [PATCH 448/452] cruelbuild: disable builds for models without HVJ5 sources Signed-off-by: Denis Efremov <efremov@linux.com> --- cruelbuild | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/cruelbuild b/cruelbuild index 9f14f439e766..ca8dc0eb449c 100755 --- a/cruelbuild +++ b/cruelbuild @@ -91,33 +91,18 @@ models = { 'G970F': { 'config': 'exynos9820-beyond0lte_defconfig' }, - 'G970N': { - 'config': 'exynos9820-beyond0lteks_defconfig' - }, 'G973F': { 'config': 'exynos9820-beyond1lte_defconfig' }, - 'G973N': { - 'config': 'exynos9820-beyond1lteks_defconfig' - }, 'G975F': { 'config': 'exynos9820-beyond2lte_defconfig' }, - 'G975N': { - 'config': 'exynos9820-beyond2lteks_defconfig' - }, 'G977B': { 'config': 'exynos9820-beyondx_defconfig' }, 'G977N': { 'config': 'exynos9820-beyondxks_defconfig' }, - 'N970F': { - 'config': 'exynos9820-d1_defconfig' - }, - 'N971N': { - 'config': 'exynos9820-d1xks_defconfig' - }, 'N975F': { 'config': 'exynos9820-d2s_defconfig' }, From d9db0726366ffbc53bd78a48c9cf73f7fdaacb45 Mon Sep 17 00:00:00 2001 From: "@alu94" <ridwansn2@gmail.com> Date: Thu, 5 Jan 2023 21:36:07 +0900 Subject: [PATCH 449/452] Update main.yml --- .github/workflows/main.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index ffee8352331d..4a8ed84a7d53 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -13,7 +13,7 @@ jobs: strategy: fail-fast: false matrix: - model: [ "G970F,G973F,G975F", "N975F" ] + model: [ "G973F", "N976B" ] steps: - uses: actions/checkout@v3 @@ -55,7 +55,7 @@ jobs: set -e -o pipefail ./cruelbuild config \ model=${{ matrix.model }} \ - name="Cruel-devel" \ + name="4lu-Cruel" \ toolchain=$TOOLCHAIN \ +magisk \ +nohardening \ @@ -67,6 +67,7 @@ jobs: +force_dex_wqhd \ +morosound \ +boeffla_wl_blocker \ + +always_permit \ 2>&1 | tee config.info - name: Install gcc-aarch64-linux-gnu From e602e1bcaec4fe28ede4fc89b348462e11044086 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Wed, 19 Apr 2023 22:50:29 +0400 Subject: [PATCH 450/452] magisk: Support Magisk v26. Signed-off-by: Denis Efremov <efremov@linux.com> --- cruelbuild | 11 ++++++++++- usr/magisk/.gitignore | 1 + usr/magisk/backup_magisk | 3 --- usr/magisk/initramfs_list | 1 + usr/magisk/update_magisk.sh | 6 ++++++ 5 files changed, 18 insertions(+), 4 deletions(-) delete mode 100644 usr/magisk/backup_magisk diff --git a/cruelbuild b/cruelbuild index ca8dc0eb449c..c3bf9b91c2a8 100755 --- a/cruelbuild +++ b/cruelbuild @@ -5,6 +5,7 @@ from sys import argv, stdout, stderr import re import json import atexit +import secrets from copy import deepcopy from datetime import datetime, timedelta from subprocess import CalledProcessError, Popen, run, DEVNULL, PIPE @@ -597,7 +598,15 @@ def update_magisk(version): if version: cmd.append(version) run(cmd, check=True) - with open('usr/magisk/magisk_version', 'r') as fh: + with open('usr/magisk/backup_magisk', 'wt') as fh: + print(f""" +KEEPVERITY=true +KEEPFORCEENCRYPT=true +RECOVERYMODE=false +PREINITDEVICE=userdata +RANDOMSEED=0x{secrets.token_hex(8)} +""".strip(), file=fh) + with open('usr/magisk/magisk_version', 'rt') as fh: print('Magisk Version: ' + fh.readline()) def switch_toolchain(compiler): diff --git a/usr/magisk/.gitignore b/usr/magisk/.gitignore index 6efeeffea17a..38b93855af59 100644 --- a/usr/magisk/.gitignore +++ b/usr/magisk/.gitignore @@ -1,3 +1,4 @@ magiskinit magiskinit64 magisk_version +backup_magisk diff --git a/usr/magisk/backup_magisk b/usr/magisk/backup_magisk deleted file mode 100644 index 8070a531554f..000000000000 --- a/usr/magisk/backup_magisk +++ /dev/null @@ -1,3 +0,0 @@ -KEEPVERITY=true -KEEPFORCEENCRYPT=true -RECOVERYMODE=false diff --git a/usr/magisk/initramfs_list b/usr/magisk/initramfs_list index 6fc1b73bfd2d..a63387d9e8cb 100644 --- a/usr/magisk/initramfs_list +++ b/usr/magisk/initramfs_list @@ -5,3 +5,4 @@ dir /overlay.d 0750 0 0 dir /overlay.d/sbin 0750 0 0 file /overlay.d/sbin/magisk32.xz usr/magisk/magisk32.xz 0644 0 0 file /overlay.d/sbin/magisk64.xz usr/magisk/magisk64.xz 0644 0 0 +file /overlay.d/sbin/stub.xz usr/magisk/stub.xz 0644 0 0 diff --git a/usr/magisk/update_magisk.sh b/usr/magisk/update_magisk.sh index 7bf57a36b086..305fb68245b1 100755 --- a/usr/magisk/update_magisk.sh +++ b/usr/magisk/update_magisk.sh @@ -39,6 +39,12 @@ then mv -f "$DIR/lib/armeabi-v7a/libmagisk32.so" "$DIR/magisk32" mv -f "$DIR/lib/armeabi-v7a/libmagisk64.so" "$DIR/magisk64" xz --force --check=crc32 "$DIR/magisk32" "$DIR/magisk64" + elif unzip -o "$DIR/magisk.zip" lib/arm64-v8a/libmagiskinit.so lib/armeabi-v7a/libmagisk32.so lib/arm64-v8a/libmagisk64.so assets/stub.apk -d "$DIR"; then + mv -f "$DIR/lib/arm64-v8a/libmagiskinit.so" "$DIR/magiskinit" + mv -f "$DIR/lib/armeabi-v7a/libmagisk32.so" "$DIR/magisk32" + mv -f "$DIR/lib/arm64-v8a/libmagisk64.so" "$DIR/magisk64" + mv -f "$DIR/assets/stub.apk" "$DIR/stub" + xz --force --check=crc32 "$DIR/magisk32" "$DIR/magisk64" "$DIR/stub" else unzip -o "$DIR/magisk.zip" lib/arm64-v8a/libmagiskinit.so lib/armeabi-v7a/libmagisk32.so lib/arm64-v8a/libmagisk64.so -d "$DIR" mv -f "$DIR/lib/arm64-v8a/libmagiskinit.so" "$DIR/magiskinit" From 41aba77656e3377e8c55691b8b807d915e545343 Mon Sep 17 00:00:00 2001 From: Denis Efremov <efremov@linux.com> Date: Mon, 4 Sep 2023 20:04:21 +0400 Subject: [PATCH 451/452] magisk: Fix broken link for magisk v26.3 Signed-off-by: Denis Efremov <efremov@linux.com> --- usr/magisk/update_magisk.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/usr/magisk/update_magisk.sh b/usr/magisk/update_magisk.sh index 305fb68245b1..9d1e7e3a3919 100755 --- a/usr/magisk/update_magisk.sh +++ b/usr/magisk/update_magisk.sh @@ -15,12 +15,16 @@ then nver="alpha" magisk_link="https://github.com/vvb2060/magisk_files/raw/${nver}/app-release.apk" else + dash='-' if [ "x$1" = "x" ]; then nver="$(curl -s https://github.com/topjohnwu/Magisk/releases | grep -m 1 -Poe 'Magisk v[\d\.]+' | cut -d ' ' -f 2)" else nver="$1" fi - magisk_link="https://github.com/topjohnwu/Magisk/releases/download/${nver}/Magisk-${nver}.apk" + if [ "$nver" = "v26.3" ]; then + dash='.' + fi + magisk_link="https://github.com/topjohnwu/Magisk/releases/download/${nver}/Magisk${dash}${nver}.apk" fi if [ \( -n "$nver" \) -a \( "$nver" != "$ver" \) -o ! \( -f "$DIR/magiskinit" \) -o \( "$nver" = "canary" \) -o \( "$nver" = "alpha" \) ] From ba7c47e28b56e4150fc5bbba2365f4126e15f590 Mon Sep 17 00:00:00 2001 From: "@alu94" <ridwansn2@gmail.com> Date: Tue, 21 Nov 2023 17:36:19 +0900 Subject: [PATCH 452/452] Update main.yml --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 4a8ed84a7d53..53c5059a2e0e 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -13,7 +13,7 @@ jobs: strategy: fail-fast: false matrix: - model: [ "G973F", "N976B" ] + model: [ "G973F", "N976N" ] steps: - uses: actions/checkout@v3