From d76e5a35fc833fad77dad1468131b223656056d5 Mon Sep 17 00:00:00 2001 From: Peter Teuben Date: Wed, 20 Jul 2016 20:17:05 -0400 Subject: [PATCH] new before the demo --- yt-demo/example1.ipynb | 102 ++++++++++ yt-demo/example2.ipynb | 103 ++++++++++ yt-demo/example3.ipynb | 115 +++++++++++ yt-demo/example4.ipynb | 348 ++++++++++++++++++++++++++++++++ yt-demo/parallel_time_series.py | 46 +++++ 5 files changed, 714 insertions(+) create mode 100644 yt-demo/example1.ipynb create mode 100644 yt-demo/example2.ipynb create mode 100644 yt-demo/example3.ipynb create mode 100644 yt-demo/example4.ipynb create mode 100644 yt-demo/parallel_time_series.py diff --git a/yt-demo/example1.ipynb b/yt-demo/example1.ipynb new file mode 100644 index 0000000..413349b --- /dev/null +++ b/yt-demo/example1.ipynb @@ -0,0 +1,102 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "from __future__ import print_function\n", + "import yt\n", + "import numpy as np" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "

1. Load the `\"virgo_novisc.0054.gdf\"` dataset from the `\"data\"` directory.

" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "

2. Create a sphere object centered on the domain center. Use the shorthand `\"c\"` for the center. Give it a radius of 200 kpc.

" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "

3. Create a second sphere object at the location ``[0.1, -0.2, 0.3]``. Give it a radius of 0.4 Mpc.

" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "

4. Query the sphere object for the density and calculate the mean using `np.mean`.

" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.5.2" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/yt-demo/example2.ipynb b/yt-demo/example2.ipynb new file mode 100644 index 0000000..0798842 --- /dev/null +++ b/yt-demo/example2.ipynb @@ -0,0 +1,103 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "from __future__ import print_function\n", + "import yt\n", + "import yt.units as u\n", + "import numpy as np" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "

1. Load up the Enzo dataset `\"DD0046/DD0046\"` from the `\"data\"` directory, and create a sphere of 1.0 Mpc centered on the maximum density, using the `\"max\"` shorthand.

" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "

2. Query the `\"velocity_x\"` field from the sphere and convert it to units of miles per hour, `\"mile/hr\"`

" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "

3. Use the `\"cell_mass\"` and `\"velocity_y\"` fields to generate the kinetic energy of the y-component of velocity assuming $KE_y = \\frac{1}{2}mv_y^2$. Print it in units of `\"erg\"`, `\"J\"`, `\"keV\"`, and `\"ft*lbf\"`.

" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "

4. Use the `weighted_average_quantity` derived quantity to generate a weighted average of the temperature field `\"kT\"` where the weight is the field `\"cell_mass\"`.

" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.5.2" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/yt-demo/example3.ipynb b/yt-demo/example3.ipynb new file mode 100644 index 0000000..a8bff25 --- /dev/null +++ b/yt-demo/example3.ipynb @@ -0,0 +1,115 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "from __future__ import print_function\n", + "import yt\n", + "import numpy as np" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "

1. Load the `\"virgo_novisc.0054.gdf\"` dataset from the `\"data\"` directory.

" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "ds = yt.load(\"../data/virgo_novisc.0054.gdf\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "

2. Create a `SlicePlot` of temperature along the y-axis, with a width of 0.4 Mpc. Change the colormap to \"algae\". Annotate the magnetic field vectors.

" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "slc = yt.SlicePlot(ds, \"y\", [\"temperature\"], width=(0.4, \"Mpc\"))\n", + "slc.set_cmap(\"temperature\", \"algae\")\n", + "slc.annotate_magnetic_field()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "

3. What happens if you supply a vector, say `[0.1, -0.3, 0.4]`, to the second argument of `SlicePlot`? Try it.

" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "slc = yt.SlicePlot(ds, [0.1, -0.3, 0.4], [\"temperature\"], width=(0.4, \"Mpc\"))\n", + "slc.set_cmap(\"temperature\", \"algae\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "

4. Now make a `ProjectionPlot` of the `\"velocity_x\"` field, weighted by the `\"density\"` field, along the x-axis. Use the `set_log` method to make the plot have linear scaling, and change the units to km/s.

" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "prj = yt.ProjectionPlot(ds, 'x', [\"velocity_x\"], weight_field=\"density\", width=(0.4, \"Mpc\"))\n", + "prj.set_log(\"velocity_x\", False)\n", + "prj.set_unit(\"velocity_x\", \"km/s\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.5.2" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/yt-demo/example4.ipynb b/yt-demo/example4.ipynb new file mode 100644 index 0000000..dd25423 --- /dev/null +++ b/yt-demo/example4.ipynb @@ -0,0 +1,348 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Even if your data is not strictly related to fields commonly used in\n", + "astrophysical codes or your code is not supported yet, you can still feed it to\n", + "yt to use its advanced visualization and analysis facilities. The only\n", + "requirement is that your data can be represented as three-dimensional NumPy arrays with a consistent grid structure. What follows are some common examples of loading in generic array data that you may find useful. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Generic Unigrid Data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The simplest case is that of a single grid of data spanning the domain, with one or more fields. The data could be generated from a variety of sources; we'll just give three common examples:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Data generated \"on-the-fly\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The most common example is that of data that is generated in memory from the currently running script or notebook. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import yt\n", + "import numpy as np" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this example, we'll just create a 3-D array of random floating-point data using NumPy:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "arr = np.random.random(size=(64,64,64))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To load this data into yt, we need associate it with a field. The `data` dictionary consists of one or more fields, each consisting of a tuple of a NumPy array and a unit string. Then, we can call `load_uniform_grid`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "data = dict(density = (arr, \"g/cm**3\"))\n", + "bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]])\n", + "ds = yt.load_uniform_grid(data, arr.shape, length_unit=\"Mpc\", bbox=bbox, nprocs=64)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`load_uniform_grid` takes the following arguments and optional keywords:\n", + "\n", + "* `data` : This is a dict of numpy arrays, where the keys are the field names\n", + "* `domain_dimensions` : The domain dimensions of the unigrid\n", + "* `length_unit` : The unit that corresponds to `code_length`, can be a string, tuple, or floating-point number\n", + "* `bbox` : Size of computational domain in units of `code_length`\n", + "* `nprocs` : If greater than 1, will create this number of subarrays out of data\n", + "* `sim_time` : The simulation time in seconds\n", + "* `mass_unit` : The unit that corresponds to `code_mass`, can be a string, tuple, or floating-point number\n", + "* `time_unit` : The unit that corresponds to `code_time`, can be a string, tuple, or floating-point number\n", + "* `velocity_unit` : The unit that corresponds to `code_velocity`\n", + "* `magnetic_unit` : The unit that corresponds to `code_magnetic`, i.e. the internal units used to represent magnetic field strengths.\n", + "* `periodicity` : A tuple of booleans that determines whether the data will be treated as periodic along each axis\n", + "\n", + "This example creates a yt-native dataset `ds` that will treat your array as a\n", + "density field in cubic domain of 3 Mpc edge size and simultaneously divide the \n", + "domain into `nprocs` = 64 chunks, so that you can take advantage\n", + "of the underlying parallelism. \n", + "\n", + "The optional unit keyword arguments allow for the default units of the dataset to be set. They can be:\n", + "* A string, e.g. `length_unit=\"Mpc\"`\n", + "* A tuple, e.g. `mass_unit=(1.0e14, \"Msun\")`\n", + "* A floating-point value, e.g. `time_unit=3.1557e13`\n", + "\n", + "In the latter case, the unit is assumed to be cgs. \n", + "\n", + "The resulting `ds` functions exactly like a dataset like any other yt can handle--it can be sliced, and we can show the grid boundaries:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n", + "slc.set_cmap(\"density\", \"Blues\")\n", + "slc.annotate_grids(cmap=None)\n", + "slc.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Particle fields are detected as one-dimensional fields. The number of\n", + "particles is set by the `number_of_particles` key in\n", + "`data`. Particle fields are then added as one-dimensional arrays in\n", + "a similar manner as the three-dimensional grid fields:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "posx_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n", + "posy_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n", + "posz_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n", + "data = dict(density = (np.random.random(size=(64,64,64)), \"Msun/kpc**3\"), \n", + " number_of_particles = 10000,\n", + " particle_position_x = (posx_arr, 'code_length'), \n", + " particle_position_y = (posy_arr, 'code_length'),\n", + " particle_position_z = (posz_arr, 'code_length'))\n", + "bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]])\n", + "ds = yt.load_uniform_grid(data, data[\"density\"][0].shape, length_unit=(1.0, \"Mpc\"), mass_unit=(1.0,\"Msun\"), \n", + " bbox=bbox, nprocs=4)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this example only the particle position fields have been assigned. `number_of_particles` must be the same size as the particle\n", + "arrays. If no particle arrays are supplied then `number_of_particles` is assumed to be zero. Take a slice, and overlay particle positions:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n", + "slc.set_cmap(\"density\", \"Blues\")\n", + "slc.annotate_particles(0.25, p_size=12.0, col=\"Red\")\n", + "slc.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Generic AMR Data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In a similar fashion to unigrid data, data gridded into rectangular patches at varying levels of resolution may also be loaded into yt. In this case, a list of grid dictionaries should be provided, with the requisite information about each grid's properties. This example sets up two grids: a top-level grid (`level == 0`) covering the entire domain and a subgrid at `level == 1`. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "grid_data = [\n", + " dict(left_edge = [0.0, 0.0, 0.0],\n", + " right_edge = [1.0, 1.0, 1.0],\n", + " level = 0,\n", + " dimensions = [32, 32, 32]), \n", + " dict(left_edge = [0.25, 0.25, 0.25],\n", + " right_edge = [0.75, 0.75, 0.75],\n", + " level = 1,\n", + " dimensions = [32, 32, 32])\n", + " ]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We'll just fill each grid with random density data, with a scaling with the grid refinement level." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "for g in grid_data: \n", + " g[\"density\"] = (np.random.random(g[\"dimensions\"]) * 2**g[\"level\"], \"g/cm**3\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Particle fields are supported by adding 1-dimensional arrays to each `grid` and\n", + "setting the `number_of_particles` key in each `grid`'s dict. If a grid has no particles, set `number_of_particles = 0`, but the particle fields still have to be defined since they are defined elsewhere; set them to empty NumPy arrays:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "grid_data[0][\"number_of_particles\"] = 0 # Set no particles in the top-level grid\n", + "grid_data[0][\"particle_position_x\"] = (np.array([]), \"code_length\") # No particles, so set empty arrays\n", + "grid_data[0][\"particle_position_y\"] = (np.array([]), \"code_length\")\n", + "grid_data[0][\"particle_position_z\"] = (np.array([]), \"code_length\")\n", + "grid_data[1][\"number_of_particles\"] = 1000\n", + "grid_data[1][\"particle_position_x\"] = (np.random.uniform(low=0.25, high=0.75, size=1000), \"code_length\")\n", + "grid_data[1][\"particle_position_y\"] = (np.random.uniform(low=0.25, high=0.75, size=1000), \"code_length\")\n", + "grid_data[1][\"particle_position_z\"] = (np.random.uniform(low=0.25, high=0.75, size=1000), \"code_length\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then, call `load_amr_grids`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "ds = yt.load_amr_grids(grid_data, [32, 32, 32])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`load_amr_grids` also takes the same keywords `bbox` and `sim_time` as `load_uniform_grid`. We could have also specified the length, time, velocity, and mass units in the same manner as before. Let's take a slice:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n", + "slc.annotate_particles(0.25, p_size=15.0, col=\"Pink\")\n", + "slc.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Caveats for Loading Generic Array Data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* Particles may be difficult to integrate.\n", + "* Data must already reside in memory before loading it in to yt, whether it is generated at runtime or loaded from disk. \n", + "* Some functions may behave oddly, and parallelism will be disappointing or non-existent in most cases.\n", + "* No consistency checks are performed on the hierarchy\n", + "* Consistency between particle positions and grids is not checked; `load_amr_grids` assumes that particle positions associated with one grid are not bounded within another grid at a higher level, so this must be ensured by the user prior to loading the grid data. " + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.5.2" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/yt-demo/parallel_time_series.py b/yt-demo/parallel_time_series.py new file mode 100644 index 0000000..c892067 --- /dev/null +++ b/yt-demo/parallel_time_series.py @@ -0,0 +1,46 @@ +import numpy as np +import matplotlib.pyplot as plt +import yt +# If you want to run in parallel this *must* be the first +# thing after importing yt! +yt.enable_parallelism() + +# We first create a time series object of the data files, using a +# "wildcard" syntax + +ts = yt.DatasetSeries("../data/WindTunnel/windtunnel_4lev_hdf5_plt_cnt_*") + +# This is a dictionary object which we'll use to store the results in +my_storage = {} + +# Now we loop over the time series, calculating the average x-velocity and +# storing the result. We also make slice plots of every snapshot, +# annotating the grid lines on top. + +for sto, ds in ts.piter(storage=my_storage): + dd = ds.all_data() # This is an object giving us all the data + # This line computes the average x-velocity weighted by the + # density + vx = dd.quantities.weighted_average_quantity("velocity_x", "density") + + # We now store both the filename and + # the result in the storage. + sto.result_id = str(ds) # Taking str() of ds gives us the filename + sto.result = (ds.current_time, vx) + + # Make a slice plot, setting the z-lim of the density field, + # drawing the grid lines on top, and saving it. + slc = yt.SlicePlot(ds, "z", ["density"]) + slc.set_zlim("density", 0.8, 8.0) + slc.set_log("density", False) + slc.annotate_grids() + slc.save() + +# Now, let processor 0 gather the values of the time and velocity +# and save them to a plot. +if yt.is_root(): + time = np.array([v[0] for v in my_storage.values()]) + vx = np.array([v[1] for v in my_storage.values()]) + idxs = np.argsort(time) + plt.plot(time[idxs], vx[idxs]) + plt.savefig("vx_vs_time.png")