diff --git a/docs/examples/array_display.ipynb b/docs/examples/array_display.ipynb index 74544759eb7..3bf8c75f490 100644 --- a/docs/examples/array_display.ipynb +++ b/docs/examples/array_display.ipynb @@ -142,15 +142,10 @@ "ad = ArrayDisplay(subarray)\n", "ad.telescopes.set_linewidth(0) # to turn off the telescope borders\n", "\n", + "tels_with_trigger = [1, 4, 5, 6]\n", "trigger_pattern = np.zeros(subarray.n_tels)\n", - "trigger_pattern[\n", - " [\n", - " 1,\n", - " 4,\n", - " 5,\n", - " 6,\n", - " ]\n", - "] = 1\n", + "trigger_pattern[tels_with_trigger] = 1\n", + "\n", "ad.values = trigger_pattern # display certain telescopes in a color\n", "ad.add_labels()" ] @@ -184,16 +179,20 @@ }, "outputs": [], "source": [ - "plt.set_cmap(\"rainbow\") # the array display will use the current colormap for values\n", + "plt.set_cmap(\"inferno\") # the array display will use the current colormap for values\n", + "\n", "ad = ArrayDisplay(subarray)\n", "ad.telescopes.set_linewidth(0) # to turn off the telescope borders\n", + "\n", "plt.scatter(shower_impact.easting, shower_impact.northing, marker=\"+\", s=200)\n", "\n", "distances = np.hypot(\n", " subarray.tel_coords.cartesian.x - shower_impact.cartesian.x,\n", " subarray.tel_coords.cartesian.y - shower_impact.cartesian.y,\n", ")\n", + "\n", "ad.values = distances\n", + "\n", "plt.colorbar(ad.telescopes, label=\"Distance (m)\")" ] }, @@ -219,8 +218,8 @@ }, "outputs": [], "source": [ - "np.random.seed(0)\n", - "phis = np.random.uniform(0, 180.0, size=subarray.n_tels) * u.deg\n", + "rng = np.random.default_rng(0)\n", + "phis = rng.uniform(0, 180.0, size=subarray.n_tels) * u.deg\n", "rhos = np.ones(subarray.n_tels) * 200 * u.m\n", "\n", "\n", @@ -297,8 +296,8 @@ " angle_offset = event.pointing.azimuth\n", " disp = ArrayDisplay(subarray, axes=ax)\n", "\n", - " hillas_dict = {tid: tel.parameters.hillas for tid, tel in event.dl1.tel.items()}\n", - " core_dict = {tid: tel.parameters.core.psi for tid, tel in event.dl1.tel.items()}\n", + " hillas_dict = {tid: tel.dl1.parameters.hillas for tid, tel in event.tel.items()}\n", + " core_dict = {tid: tel.dl1.parameters.core.psi for tid, tel in event.tel.items()}\n", "\n", " disp.set_line_hillas(\n", " hillas_dict,\n", @@ -341,6 +340,7 @@ "execution_count": null, "id": "502fe577-3e8a-4e6d-90f3-8db8fbb57b90", "metadata": { + "scrolled": false, "tags": [] }, "outputs": [], @@ -362,14 +362,22 @@ } ], "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, "language_info": { "codemirror_mode": { - "name": "ipython" + "name": "ipython", + "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", - "nbconvert_exporter": "python" + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.16" } }, "nbformat": 4, diff --git a/docs/tutorials/calibrated_data_exploration.ipynb b/docs/tutorials/calibrated_data_exploration.ipynb index 2966094d860..b3db35fc404 100644 --- a/docs/tutorials/calibrated_data_exploration.ipynb +++ b/docs/tutorials/calibrated_data_exploration.ipynb @@ -91,7 +91,7 @@ }, "outputs": [], "source": [ - "print(event.r1)" + "event.tel[26]" ] }, { @@ -100,12 +100,11 @@ "source": [ "## Perform basic calibration:\n", "\n", - "Here we will use a `CameraCalibrator` which is just a simple wrapper that runs the three calibraraton and trace-integration phases of the pipeline, taking the data from levels:\n", + "Here we will use a `CameraCalibrator` which is just a simple wrapper that runs the two calibration and trace-integration phases of the pipeline, taking the data from levels:\n", "\n", - " **R0** → **R1** → **DL0** → **DL1**\n", + " **R1** → **DL0** → **DL1**\n", "\n", - "You could of course do these each separately, by using the classes `R1Calibrator`, `DL0Reducer`, and `DL1Calibrator`.\n", - "Note that we have not specified any configuration to the `CameraCalibrator`, so it will be using the default algorithms and thresholds, other than specifying that the product is a \"HESSIOR1Calibrator\" (hopefully in the near future that will be automatic)." + "Note that we have not specified any configuration to the `CameraCalibrator`, so it will be using the default algorithms and thresholds." ] }, { @@ -126,9 +125,10 @@ "source": [ "Now the *r1*, *dl0* and *dl1* containers are filled in the event\n", "\n", - "* **r1.tel[x]**: contains the \"r1-calibrated\" waveforms, after gain-selection, pedestal subtraciton, and gain-correction\n", - "* **dl0.tel[x]**: is the same but with optional data volume reduction (some pixels not filled), in this case this is not performed by default, so it is the same as r1\n", - "* **dl1.tel[x]**: contains the (possibly re-calibrated) waveforms as dl0, but also the time-integrated *image* that has been calculated using a `ImageExtractor` (a `NeighborPeakWindowSum` by default)" + "* **r0**: Contains device specific raw data. This is usually only available in simulations or in expert data. \n", + "* **r1**: contains the \"r1-calibrated\" waveforms, after gain-selection, pedestal subtraction, and gain-correction\n", + "* **dl0**: is the same but with optional data volume reduction (some pixels not filled), by default, this is not performed, so it is the same as r1\n", + "* **dl1**: contains the time-integrated *image* that has been calculated using an `ImageExtractor` (`NeighborPeakWindowSum` by default)" ] }, { @@ -137,11 +137,11 @@ "metadata": {}, "outputs": [], "source": [ - "for tel_id in event.dl1.tel:\n", + "for tel_id, tel_event in event.tel.items():\n", " print(\"TEL{:03}: {}\".format(tel_id, source.subarray.tel[tel_id]))\n", - " print(\" - r0 wave shape : {}\".format(event.r0.tel[tel_id].waveform.shape))\n", - " print(\" - r1 wave shape : {}\".format(event.r1.tel[tel_id].waveform.shape))\n", - " print(\" - dl1 image shape : {}\".format(event.dl1.tel[tel_id].image.shape))" + " print(\" - r0 wave shape : {}\".format(tel_event.r0.waveform.shape))\n", + " print(\" - r1 wave shape : {}\".format(tel_event.r1.waveform.shape))\n", + " print(\" - dl1 image shape : {}\".format(tel_event.dl1.image.shape))" ] }, { @@ -161,10 +161,10 @@ "source": [ "from ctapipe.visualization import CameraDisplay\n", "\n", - "tel_id = sorted(event.r1.tel.keys())[1]\n", + "tel_id, tel_event = next(iter(event.tel.items()))\n", "sub = source.subarray\n", "geometry = sub.tel[tel_id].camera.geometry\n", - "image = event.dl1.tel[tel_id].image" + "image = tel_event.dl1.image" ] }, { @@ -222,23 +222,36 @@ "source": [ "params = hillas_parameters(geometry, cleaned)\n", "\n", - "plt.figure(figsize=(10, 10))\n", + "plt.figure(figsize=(5, 5))\n", "disp = CameraDisplay(geometry, image=image)\n", "disp.add_colorbar()\n", - "disp.overlay_moments(params, color=\"red\", lw=3)\n", + "disp.overlay_moments(params, color=\"xkcd:light blue\", lw=3)\n", "disp.highlight_pixels(mask, color=\"white\", alpha=0.3, linewidth=2)\n", "\n", "plt.xlim(params.x.to_value(u.m) - 0.5, params.x.to_value(u.m) + 0.5)\n", "plt.ylim(params.y.to_value(u.m) - 0.5, params.y.to_value(u.m) + 0.5)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## ImageProcessor\n", + "\n", + "The above steps can be configured and run easily using the `ImageProcessor` class:" + ] + }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "source.metadata" + "from ctapipe.image import ImageProcessor\n", + "\n", + "image_processor = ImageProcessor(subarray=source.subarray, use_telescope_frame=False)\n", + "\n", + "image_processor(event)" ] }, { @@ -262,9 +275,8 @@ "metadata": {}, "outputs": [], "source": [ - "tels_in_event = set(\n", - " event.dl1.tel.keys()\n", - ") # use a set here, so we can intersect it later\n", + "# use a set here, so we can intersect it later\n", + "tels_in_event = set(event.tel.keys())\n", "tels_in_event" ] }, @@ -274,27 +286,8 @@ "metadata": {}, "outputs": [], "source": [ - "cam_ids = set(sub.get_tel_ids_for_type(\"MST_MST_NectarCam\"))\n", - "cam_ids" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "cams_in_event = tels_in_event.intersection(cam_ids)\n", - "first_tel_id = list(cams_in_event)[0]\n", - "tel = sub.tel[first_tel_id]\n", - "print(\"{}s in event: {}\".format(tel, cams_in_event))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, let's sum those images:" + "mst_tel_ids = set(sub.get_tel_ids_for_type(\"MST_MST_NectarCam\"))\n", + "mst_tel_ids" ] }, { @@ -303,32 +296,45 @@ "metadata": {}, "outputs": [], "source": [ - "image_sum = np.zeros_like(\n", - " tel.camera.geometry.pix_x.value\n", - ") # just make an array of 0's in the same shape as the camera\n", + "msts_in_event = list(tels_in_event.intersection(mst_tel_ids))\n", "\n", - "for tel_id in cams_in_event:\n", - " image_sum += event.dl1.tel[tel_id].image" + "tel = sub.tel[msts_in_event[0]]\n", + "print(f\"{tel} in event: {msts_in_event}\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "And finally display the sum of those images" + "Now let's sum and display those images" ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "scrolled": false + }, "outputs": [], "source": [ - "plt.figure(figsize=(8, 8))\n", + "image_sum = np.zeros(tel.camera.geometry.n_pixels)\n", + "\n", + "\n", + "fig, ax = plt.subplots(figsize=(8, 8))\n", "\n", - "disp = CameraDisplay(tel.camera.geometry, image=image_sum)\n", - "disp.overlay_moments(params, with_label=False)\n", - "plt.title(\"Sum of {}x {}\".format(len(cams_in_event), tel))" + "disp = CameraDisplay(tel.camera.geometry, ax=ax)\n", + "\n", + "\n", + "for tel_id in msts_in_event:\n", + " dl1 = event.tel[tel_id].dl1\n", + " image_sum += dl1.image\n", + "\n", + " disp.overlay_moments(\n", + " dl1.parameters.hillas, with_label=False, keep_old=True, lw=3, n_sigma=2\n", + " )\n", + "\n", + "disp.image = image_sum\n", + "plt.title(\"Sum of {}x {}\".format(len(msts_in_event), tel))" ] }, { @@ -353,10 +359,10 @@ "metadata": {}, "outputs": [], "source": [ - "nectarcam_subarray = sub.select_subarray(cam_ids, name=\"NectarCam\")\n", + "nectarcam_subarray = sub.select_subarray(mst_tel_ids, name=\"NectarCam\")\n", "\n", "hit_pattern = np.zeros(shape=nectarcam_subarray.n_tels)\n", - "hit_pattern[[nectarcam_subarray.tel_indices[x] for x in cams_in_event]] = 100\n", + "hit_pattern[nectarcam_subarray.tel_ids_to_indices(msts_in_event)] = 1\n", "\n", "plt.set_cmap(plt.cm.Accent)\n", "plt.figure(figsize=(8, 8))\n", @@ -365,13 +371,6 @@ "ad.values = hit_pattern\n", "ad.add_labels()" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { @@ -390,7 +389,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.13" + "version": "3.9.16" } }, "nbformat": 4, diff --git a/docs/tutorials/coordinates_example.ipynb b/docs/tutorials/coordinates_example.ipynb index 3f415bb338d..9821e50e71f 100644 --- a/docs/tutorials/coordinates_example.ipynb +++ b/docs/tutorials/coordinates_example.ipynb @@ -123,8 +123,9 @@ }, "outputs": [], "source": [ - "print(f\"Telescope with data: {event.r1.tel.keys()}\")\n", - "tel_id = 3" + "print(f\"Telescope with data: {event.tel.keys()}\")\n", + "tel_id = 3\n", + "tel_event = event.tel[tel_id]" ] }, { @@ -141,8 +142,14 @@ "\n", "Pointing direction of telescopes or the origin of a simulated shower are described in the `AltAz` frame.\n", "This is a local, angular coordinate frame, with angles `altitude` and `azimuth`.\n", - "Altitude is the measured from the Horizon (0°) to the Zenith (90°).\n", - "For the azimuth, there are different conventions. In Astropy und thus ctapipe, Azimuth is oriented East of North (i.e., N=0°, E=90°)." + "\n", + "Altitude is measured from the Horizon (0°) to the Zenith (90°).\n", + "\n", + "For the azimuth, there are different conventions. \n", + "In Astropy und thus ctapipe, Azimuth is oriented East of North (i.e., N=0°, E=90°).\n", + "\n", + "We use *geographic* North direction, this is different e.g. from CORSIKA, which uses *magnetic* North direction\n", + "which is a couple of degrees different, depending on the time-dependent orientation of Earth's magnetic field and the observer location." ] }, { @@ -191,11 +198,11 @@ "\n", "Camera coordinate frame.\n", "\n", - "The camera frame is a 2d cartesian frame, describing position of objects in the focal plane of the telescope.\n", + "The camera frame is a 2d cartesian frame, describing positions of objects in the focal plane of the telescope.\n", "\n", - "The frame is defined as in H.E.S.S., starting at the horizon, the telescope is pointed to magnetic north in azimuth and then up to zenith.\n", + "The frame is defined as in H.E.S.S., starting at the horizon, the telescope is pointed to magnetic North in azimuth and then up to zenith.\n", "\n", - "Now, x points north and y points west, so in this orientation, the camera coordinates line up with the CORSIKA ground coordinate system.\n", + "Now, x points North and y points West, so in this orientation, the camera coordinates line up with the CORSIKA ground coordinate system.\n", "\n", "MAGIC and FACT use a different camera coordinate system: Standing at the dish, looking at the camera, x points right, y points up.\n", "To transform MAGIC/FACT to ctapipe, do x' = -y, y' = -x.\n", @@ -216,7 +223,7 @@ "geometry = source.subarray.tel[tel_id].camera.geometry\n", "pix_x = geometry.pix_x\n", "pix_y = geometry.pix_y\n", - "focal_length = source.subarray.tel[tel_id].optics.equivalent_focal_length" + "focal_length = source.subarray.tel[tel_id].optics.effective_focal_length" ] }, { @@ -230,8 +237,8 @@ "outputs": [], "source": [ "telescope_pointing = SkyCoord(\n", - " alt=event.pointing.tel[tel_id].altitude,\n", - " az=event.pointing.tel[tel_id].azimuth,\n", + " alt=tel_event.pointing.altitude,\n", + " az=tel_event.pointing.azimuth,\n", " frame=altaz,\n", ")\n", "\n", @@ -256,11 +263,16 @@ }, "outputs": [], "source": [ - "plt.scatter(cam_coords.x, cam_coords.y)\n", - "plt.title(f\"Camera type: {geometry.name}\")\n", - "plt.xlabel(f\"x / {cam_coords.x.unit}\")\n", - "plt.ylabel(f\"y / {cam_coords.y.unit}\")\n", - "plt.axis(\"square\");" + "fig, ax = plt.subplots()\n", + "\n", + "ax.scatter(cam_coords.x, cam_coords.y)\n", + "ax.set(\n", + " title=f\"Camera type: {geometry.name}\",\n", + " xlabel=f\"x / {cam_coords.x.unit}\",\n", + " ylabel=f\"y / {cam_coords.y.unit}\",\n", + " aspect=1,\n", + ")\n", + "None" ] }, { @@ -273,7 +285,9 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "scrolled": false + }, "outputs": [], "source": [ "from ctapipe.visualization import CameraDisplay\n", @@ -297,7 +311,8 @@ "\n", "\n", "subarray = SubarrayDescription.read(\"dataset://gamma_prod5.simtel.zst\")\n", - "cam = subarray.tel[1].camera.geometry\n", + "cam = subarray.tel[tel_id].camera.geometry\n", + "\n", "fig, ax = plt.subplots()\n", "display = CameraDisplay(cam, ax=ax)\n", "\n", @@ -309,19 +324,17 @@ " star = SkyCoord.from_name(name)\n", " star_cam = star.transform_to(camera_frame)\n", "\n", + " display.overlay_coordinate(star_cam, ms=10, keep_old=True)\n", + "\n", " x = star_cam.x.to_value(u.m)\n", " y = star_cam.y.to_value(u.m)\n", - "\n", - " ax.plot(x, y, marker=\"*\", color=f\"C{i}\")\n", " ax.annotate(\n", " name,\n", " xy=(x, y),\n", " xytext=(5, 5),\n", " textcoords=\"offset points\",\n", " color=f\"C{i}\",\n", - " )\n", - "\n", - "plt.show()" + " )" ] }, { @@ -376,8 +389,9 @@ "source": [ "wrap_angle = telescope_pointing.az + 180 * u.deg\n", "\n", - "plt.axis(\"equal\")\n", - "plt.scatter(\n", + "fig, ax = plt.subplots()\n", + "\n", + "ax.scatter(\n", " telescope_coords.fov_lon.deg, telescope_coords.fov_lat.deg, alpha=0.2, color=\"gray\"\n", ")\n", "\n", @@ -386,8 +400,9 @@ " star = SkyCoord.from_name(name)\n", " star_tel = star.transform_to(telescope_frame)\n", "\n", - " plt.plot(star_tel.fov_lon.deg, star_tel.fov_lat.deg, \"*\", ms=10)\n", - " plt.annotate(\n", + " ax.plot(star_tel.fov_lon.deg, star_tel.fov_lat.deg, \"*\", ms=10)\n", + "\n", + " ax.annotate(\n", " name,\n", " xy=(star_tel.fov_lon.deg, star_tel.fov_lat.deg),\n", " xytext=(5, 5),\n", @@ -395,8 +410,11 @@ " color=f\"C{i}\",\n", " )\n", "\n", - "plt.xlabel(\"fov_lon / {}\".format(telescope_coords.altaz.az.unit))\n", - "plt.ylabel(\"fov_lat / {}\".format(telescope_coords.altaz.alt.unit))" + "ax.set(\n", + " xlabel=\"fov_lon / {}\".format(telescope_coords.altaz.az.unit),\n", + " ylabel=\"fov_lat / {}\".format(telescope_coords.altaz.alt.unit),\n", + " aspect=1,\n", + ")" ] }, { @@ -532,8 +550,8 @@ "\n", "Ground coordinate frame. The ground coordinate frame is a simple\n", " cartesian frame describing the 3 dimensional position of objects\n", - " compared to the array ground level in relation to the nomial\n", - " centre of the array. Typically this frame will be used for\n", + " compared to the array ground level in relation to the reference\n", + " location of the array. Typically this frame will be used for\n", " describing the position on telescopes and equipment\n", " \n", "**Typical usage**: positions of telescopes on the ground (x, y, z)" @@ -641,7 +659,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.13" + "version": "3.9.16" } }, "nbformat": 4, diff --git a/docs/tutorials/ctapipe_handson.ipynb b/docs/tutorials/ctapipe_handson.ipynb index 68115dfd9f5..b723229248e 100644 --- a/docs/tutorials/ctapipe_handson.ipynb +++ b/docs/tutorials/ctapipe_handson.ipynb @@ -6,7 +6,9 @@ "source": [ "# Getting Started with ctapipe\n", "\n", - "This hands-on was presented at the Paris CTA Consoritum meeting (K. Kosack)" + "This hands-on was presented at the Paris CTA Consoritum meeting (K. Kosack).\n", + "\n", + "It has been updated since then to be compatible with the latest ctapipe version." ] }, { @@ -26,6 +28,7 @@ "from ctapipe import utils\n", "from matplotlib import pyplot as plt\n", "import numpy as np\n", + "\n", "%matplotlib inline" ] }, @@ -59,15 +62,6 @@ "event" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "event.r1" - ] - }, { "cell_type": "code", "execution_count": null, @@ -75,7 +69,7 @@ "outputs": [], "source": [ "for event in EventSource(path, max_events=5):\n", - " print(event.count, event.r1.tel.keys())" + " print(event.count, event.tel.keys())" ] }, { @@ -84,7 +78,9 @@ "metadata": {}, "outputs": [], "source": [ - "event.r0.tel[3]" + "tel_id = 119\n", + "tel_event = event.tel[tel_id]\n", + "tel_event" ] }, { @@ -93,7 +89,7 @@ "metadata": {}, "outputs": [], "source": [ - "r0tel = event.r0.tel[3]" + "r0tel = tel_event.r0" ] }, { @@ -146,8 +142,8 @@ "metadata": {}, "outputs": [], "source": [ - "plt.plot(r0tel.waveform[0,brightest_pixel], label=\"channel 0 (high-gain)\")\n", - "plt.plot(r0tel.waveform[1,brightest_pixel], label=\"channel 1 (low-gain)\")\n", + "plt.plot(r0tel.waveform[0, brightest_pixel], label=\"channel 0 (high-gain)\")\n", + "plt.plot(r0tel.waveform[1, brightest_pixel], label=\"channel 1 (low-gain)\")\n", "plt.legend()" ] }, @@ -159,9 +155,12 @@ "source": [ "from ipywidgets import interact\n", "\n", - "@interact\n", - "def view_waveform(chan=0, pix_id=brightest_pixel):\n", - " plt.plot(r0tel.waveform[chan, pix_id])" + "n_channels, n_pixels, n_samples = r0tel.waveform.shape\n", + "\n", + "\n", + "@interact(channel=(0, n_channels - 1), pixel=(0, n_pixels - 1))\n", + "def view_waveform(chan=0, pixel=brightest_pixel):\n", + " plt.plot(r0tel.waveform[chan, pixel])" ] }, { @@ -187,7 +186,7 @@ "metadata": {}, "outputs": [], "source": [ - "subarray = source.subarray " + "subarray = source.subarray" ] }, { @@ -223,7 +222,7 @@ "metadata": {}, "outputs": [], "source": [ - "subarray.tel[2]" + "subarray.tel[tel_id]" ] }, { @@ -232,7 +231,7 @@ "metadata": {}, "outputs": [], "source": [ - "subarray.tel[2].camera" + "subarray.tel[tel_id].camera" ] }, { @@ -241,7 +240,7 @@ "metadata": {}, "outputs": [], "source": [ - "subarray.tel[2].optics" + "subarray.tel[tel_id].optics" ] }, { @@ -250,7 +249,7 @@ "metadata": {}, "outputs": [], "source": [ - "tel = subarray.tel[2]" + "tel = subarray.tel[tel_id]" ] }, { @@ -325,14 +324,16 @@ "outputs": [], "source": [ "disp = CameraDisplay(tel.camera.geometry)\n", - "disp.image = r0tel.waveform[0,:,10] # display channel 0, sample 0 (try others like 10)" + "disp.image = r0tel.waveform[\n", + " 0, :, 10\n", + "] # display channel 0, sample 0 (try others like 10)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - " ** aside: ** show demo using a CameraDisplay in interactive mode in ipython rather than notebook" + " **aside:** show demo using a CameraDisplay in interactive mode in ipython rather than notebook" ] }, { @@ -367,8 +368,7 @@ "outputs": [], "source": [ "for event in EventSource(path, max_events=5):\n", - " calib(event) # fills in r1, dl0, and dl1\n", - " print(event.dl1.tel.keys())" + " calib(event) # fills in r1, dl0, and dl1" ] }, { @@ -377,7 +377,8 @@ "metadata": {}, "outputs": [], "source": [ - "event.dl1.tel[3]" + "tel_event = event.tel[tel_id]\n", + "tel_event.dl1" ] }, { @@ -386,7 +387,7 @@ "metadata": {}, "outputs": [], "source": [ - "dl1tel = event.dl1.tel[3]" + "dl1tel = tel_event.dl1" ] }, { @@ -395,7 +396,7 @@ "metadata": {}, "outputs": [], "source": [ - "dl1tel.image.shape # note this will be gain-selected in next version, so will be just 1D array of 1855" + "dl1tel.image.shape" ] }, { @@ -422,7 +423,7 @@ "metadata": {}, "outputs": [], "source": [ - "CameraDisplay(tel.camera.geometry, image=dl1tel.peak_time)" + "CameraDisplay(tel.camera.geometry, image=dl1tel.peak_time, cmap=\"RdBu_r\")" ] }, { @@ -458,17 +459,8 @@ "metadata": {}, "outputs": [], "source": [ - "CameraDisplay(tel.camera.geometry, image=mask)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "cleaned = image.copy()\n", - "cleaned[~mask] = 0 " + "d = CameraDisplay(tel.camera.geometry, image=dl1tel.image)\n", + "d.highlight_pixels(mask)" ] }, { @@ -477,11 +469,12 @@ "metadata": {}, "outputs": [], "source": [ - "disp = CameraDisplay(tel.camera.geometry, image=cleaned)\n", - "disp.cmap = plt.cm.coolwarm\n", + "disp = CameraDisplay(tel.camera.geometry, image=image)\n", + "disp.highlight_pixels(mask, linewidth=3)\n", "disp.add_colorbar()\n", - "plt.xlim(0.5, 1.0)\n", - "plt.ylim(-1.0, 0.0)" + "\n", + "plt.xlim(-0.5, 0.5)\n", + "plt.ylim(-0.9, 0.0)" ] }, { @@ -490,7 +483,7 @@ "metadata": {}, "outputs": [], "source": [ - "params = hillas_parameters(tel.camera.geometry, cleaned)\n", + "params = hillas_parameters(tel.camera.geometry[mask], image[mask])\n", "print(params)" ] }, @@ -500,12 +493,13 @@ "metadata": {}, "outputs": [], "source": [ - "disp = CameraDisplay(tel.camera.geometry, image=cleaned)\n", - "disp.cmap = plt.cm.coolwarm\n", + "disp = CameraDisplay(tel.camera.geometry, image=image)\n", + "\n", "disp.add_colorbar()\n", - "plt.xlim(0.5, 1.0)\n", - "plt.ylim(-1.0, 0.0)\n", - "disp.overlay_moments(params, color='white', lw=2)" + "plt.xlim(-0.5, 0.5)\n", + "plt.ylim(-0.9, 0.0)\n", + "\n", + "disp.overlay_moments(params, color=\"white\", lw=2, n_sigma=2)" ] }, { @@ -557,8 +551,8 @@ "metadata": {}, "outputs": [], "source": [ - "data = utils.get_dataset_path(\"gamma_prod5.simtel.zst\") \n", - "source = EventSource(data) # remove the max_events limit to get more stats" + "data = utils.get_dataset_path(\"gamma_prod5.simtel.zst\")\n", + "source = EventSource(data) # remove the max_events limit to get more stats" ] }, { @@ -569,12 +563,15 @@ "source": [ "for event in source:\n", " calib(event)\n", - " \n", - " for tel_id, tel_data in event.dl1.tel.items():\n", + "\n", + " for tel_id, tel_event in event.tel.items():\n", " tel = source.subarray.tel[tel_id]\n", - " mask = tailcuts_clean(tel.camera.geometry, tel_data.image)\n", + "\n", + " mask = tailcuts_clean(tel.camera.geometry, tel_event.dl1.image)\n", " if np.count_nonzero(mask) > 0:\n", - " params = hillas_parameters(tel.camera.geometry[mask], tel_data.image[mask])" + " params = hillas_parameters(\n", + " tel.camera.geometry[mask], tel_event.dl1.image[mask]\n", + " )" ] }, { @@ -583,7 +580,7 @@ "metadata": {}, "outputs": [], "source": [ - "from ctapipe.io import HDF5TableWriter\n" + "from ctapipe.io import HDF5TableWriter" ] }, { @@ -592,16 +589,19 @@ "metadata": {}, "outputs": [], "source": [ - "with HDF5TableWriter(filename='hillas.h5', group_name='dl1', overwrite=True) as writer:\n", - " \n", - " source = EventSource(data, allowed_tels=[1,2,3,4], max_events=10)\n", + "with HDF5TableWriter(filename=\"hillas.h5\", group_name=\"dl1\", overwrite=True) as writer:\n", + "\n", + " source = EventSource(data, allowed_tels=[1, 2, 3, 4], max_events=10)\n", " for event in source:\n", " calib(event)\n", - " \n", - " for tel_id, tel_data in event.dl1.tel.items():\n", + "\n", + " for tel_id, tel_event in event.tel.items():\n", " tel = source.subarray.tel[tel_id]\n", - " mask = tailcuts_clean(tel.camera.geometry, tel_data.image)\n", - " params = hillas_parameters(tel.camera.geometry[mask], tel_data.image[mask])\n", + "\n", + " mask = tailcuts_clean(tel.camera.geometry, tel_event.dl1.image)\n", + " params = hillas_parameters(\n", + " tel.camera.geometry[mask], tel_event.dl1.image[mask]\n", + " )\n", " writer.write(\"hillas\", params)" ] }, @@ -627,21 +627,12 @@ "metadata": {}, "outputs": [], "source": [ - "import pandas as pd\n", + "from ctapipe.io import read_table\n", "\n", - "hillas = pd.read_hdf(\"hillas.h5\", key='/dl1/hillas')\n", + "hillas = read_table(\"hillas.h5\", \"/dl1/hillas\")\n", "hillas" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "_ = hillas.hist(figsize=(8,8))" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -666,7 +657,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.13" + "version": "3.9.16" } }, "nbformat": 4,