Skip to content

Commit

Permalink
lint & format `weights/*.py part 3 (#651)
Browse files Browse the repository at this point in the history
* lint&format gabriel.py

* lint&format raster.py

* set_operations.py
  • Loading branch information
jGaboardi authored Nov 14, 2023
1 parent bb16950 commit 6112e8d
Show file tree
Hide file tree
Showing 3 changed files with 98 additions and 89 deletions.
44 changes: 24 additions & 20 deletions libpysal/weights/gabriel.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
from scipy.spatial import Delaunay as _Delaunay
import warnings

import numpy
import pandas
from scipy import sparse
from libpysal.weights import W, WSP
import pandas, numpy, warnings
from scipy.spatial import Delaunay as _Delaunay

from libpysal.weights import WSP, W

try:
from numba import njit
Expand Down Expand Up @@ -51,12 +55,13 @@ class Delaunay(W):

def __init__(self, coordinates, **kwargs):
try:
from numba import njit
from numba import njit # noqa F401
except ModuleNotFoundError:
warnings.warn(
"The numba package is used extensively in this module"
" to accelerate the computation of graphs. Without numba,"
" these computations may become unduly slow on large data."
" these computations may become unduly slow on large data.",
stacklevel=2,
)
edges, _ = self._voronoi_edges(coordinates)
ids = kwargs.get("ids")
Expand Down Expand Up @@ -144,7 +149,7 @@ def from_dataframe(cls, df, geom_col=None, ids=None, use_index=None, **kwargs):
f" but this delaunay triangulation is only well-defined for points."
f" Choose a method to convert your dataframe into points (like using"
f" the df.centroid) and use that to estimate this graph."
)
) from None


class Gabriel(Delaunay):
Expand Down Expand Up @@ -173,12 +178,13 @@ class Gabriel(Delaunay):

def __init__(self, coordinates, **kwargs):
try:
from numba import njit
from numba import njit # noqa F401
except ModuleNotFoundError:
warnings.warn(
"The numba package is used extensively in this module"
" to accelerate the computation of graphs. Without numba,"
" these computations may become unduly slow on large data."
" these computations may become unduly slow on large data.",
stacklevel=2,
)
edges, dt = self._voronoi_edges(coordinates)
droplist = _filter_gabriel(
Expand All @@ -198,19 +204,19 @@ def __init__(self, coordinates, **kwargs):
W.__init__(self, gabriel_neighbors, id_order=list(ids), **kwargs)


class Relative_Neighborhood(Delaunay):
class Relative_Neighborhood(Delaunay): # noqa N801
"""
Constructs the Relative Neighborhood graph from a set of points.
This graph is a subset of the Delaunay triangulation, where only
"relative neighbors" are retained. Further, it is a superset of
the Minimum Spanning Tree, with additional "relative neighbors"
introduced.
A relative neighbor pair of points i,j must be closer than the
maximum distance between i (or j) and each other point k.
This means that the points are at least as close to one another
as they are to any other point.
A relative neighbor pair of points i,j must be closer than the
maximum distance between i (or j) and each other point k.
This means that the points are at least as close to one another
as they are to any other point.
Parameters
----------
coordinates : array of points, (N,2)
Expand All @@ -222,16 +228,17 @@ class Relative_Neighborhood(Delaunay):

def __init__(self, coordinates, binary=True, **kwargs):
try:
from numba import njit
from numba import njit # noqa F401
except ModuleNotFoundError:
warnings.warn(
"The numba package is used extensively in this module"
" to accelerate the computation of graphs. Without numba,"
" these computations may become unduly slow on large data."
" these computations may become unduly slow on large data.",
stacklevel=2,
)
edges, dt = self._voronoi_edges(coordinates)
output, dkmax = _filter_relativehood(edges, dt.points, return_dkmax=False)
row, col, data = zip(*output)
row, col, data = zip(*output, strict=True)
if binary:
data = numpy.ones_like(col, dtype=float)
sp = sparse.csc_matrix((data, (row, col))) # TODO: faster way than this?
Expand Down Expand Up @@ -288,7 +295,6 @@ def _filter_gabriel(edges, coordinates):
in order to construct the Gabriel graph.
"""
edge_pointer = 0
n = edges.max()
n_edges = len(edges)
to_drop = []
while edge_pointer < n_edges:
Expand Down Expand Up @@ -328,9 +334,7 @@ def _filter_relativehood(edges, coordinates, return_dkmax=False):
3. for each edge of the delaunay (i,j), prune
if any dkmax is greater than d(i,j)
"""
edge_pointer = 0
n = edges.max()
n_edges = len(edges)
out = []
r = []
for edge in edges:
Expand Down
52 changes: 33 additions & 19 deletions libpysal/weights/raster.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
# ruff: noqa: B006, N802

from .util import lat2SW
from .weights import WSP, W
import numpy as np
Expand All @@ -18,7 +20,6 @@ def intercepted_function(f, *f_args, **f_kwargs):

return intercepted_function


else:
from ..common import jit

Expand Down Expand Up @@ -51,7 +52,8 @@ def da2W(
coords_labels : dictionary
Pass dimension labels for coordinates and layers if they do not
belong to default dimensions, which are (band/time, y/lat, x/lon)
e.g. coords_labels = {"y_label": "latitude", "x_label": "longitude", "z_label": "year"}
e.g. coords_labels = {"y_label": "latitude",
"x_label": "longitude", "z_label": "year"}
Default is {} empty dictionary.
k : int
Order of contiguity, this will select all neighbors upto kth order.
Expand All @@ -78,7 +80,6 @@ def da2W(
Examples
--------
>>> from libpysal.weights.raster import da2W, testDataArray
>>> da = testDataArray().rename(
{'band': 'layer', 'x': 'longitude', 'y': 'latitude'})
Expand Down Expand Up @@ -119,7 +120,8 @@ def da2W(
"xarray.DataArray (raster) object. This computation "
"can be very slow and not scale well. It is recommended, "
"if possible, to instead build WSP object, which is more "
"efficient and faster. You can do this by using da2WSP method."
"efficient and faster. You can do this by using da2WSP method.",
stacklevel=2,
)
wsp = da2WSP(da, criterion, z_value, coords_labels, k, include_nodata, n_jobs)
w = wsp.to_W(**kwargs)
Expand Down Expand Up @@ -154,7 +156,8 @@ def da2WSP(
coords_labels : dictionary
Pass dimension labels for coordinates and layers if they do not
belong to default dimensions, which are (band/time, y/lat, x/lon)
e.g. coords_labels = {"y_label": "latitude", "x_label": "longitude", "z_label": "year"}
e.g. coords_labels = {"y_label": "latitude",
"x_label": "longitude", "z_label": "year"}
Default is {} empty dictionary.
k : int
Order of contiguity, this will select all neighbors upto kth order.
Expand Down Expand Up @@ -225,7 +228,7 @@ def da2WSP(
da = da[slice_dict]

ser = da.to_series()
dtype = np.int32 if (shape[0] * shape[1]) < 46340 ** 2 else np.int64
dtype = np.int32 if (shape[0] * shape[1]) < 46340**2 else np.int64
if "nodatavals" in da.attrs and da.attrs["nodatavals"]:
mask = (ser != da.attrs["nodatavals"][0]).to_numpy()
ids = np.where(mask)[0]
Expand All @@ -243,7 +246,8 @@ def da2WSP(
warn(
"numba cannot be imported, parallel processing "
"and include_nodata functionality will be disabled. "
"falling back to slower method"
"falling back to slower method",
stacklevel=2,
)
include_nodata = False
# Fallback method to build sparse matrix
Expand Down Expand Up @@ -292,7 +296,7 @@ def da2WSP(
# then eliminate zeros from the data. This changes the
# sparcity of the csr_matrix !!
if k > 1 and not include_nodata:
sw = sum(map(lambda x: sw ** x, range(1, k + 1)))
sw = sum(map(lambda x: sw**x, range(1, k + 1)))
sw.setdiag(0)
sw.eliminate_zeros()
sw.data[:] = np.ones_like(sw.data, dtype=np.int8)
Expand Down Expand Up @@ -332,15 +336,15 @@ def w2da(data, w, attrs={}, coords=None):
>>> w = da2W(da, z_value=2)
>>> data = np.random.randint(0, 255, len(w.index))
>>> da1 = w2da(data, w)
"""
if not isinstance(w, W):
raise TypeError("w must be an instance of weights.W")
if hasattr(w, "index"):
da = _index2da(data, w.index, attrs, coords)
else:
raise AttributeError(
"This method requires `w` object to include `index` attribute that is built as a `pandas.MultiIndex` object."
"This method requires `w` object to include `index` "
"attribute that is built as a `pandas.MultiIndex` object."
)
return da

Expand Down Expand Up @@ -375,15 +379,15 @@ def wsp2da(data, wsp, attrs={}, coords=None):
>>> wsp = da2WSP(da, z_value=2)
>>> data = np.random.randint(0, 255, len(wsp.index))
>>> da1 = w2da(data, wsp)
"""
if not isinstance(wsp, WSP):
raise TypeError("wsp must be an instance of weights.WSP")
if hasattr(wsp, "index"):
da = _index2da(data, wsp.index, attrs, coords)
else:
raise AttributeError(
"This method requires `wsp` object to include `index` attribute that is built as a `pandas.MultiIndex` object."
"This method requires `wsp` object to include `index` "
"attribute that is built as a `pandas.MultiIndex` object."
)
return da

Expand Down Expand Up @@ -415,7 +419,9 @@ def testDataArray(shape=(3, 4, 4), time=False, rand=False, missing_vals=True):
try:
from xarray import DataArray
except ImportError:
raise ModuleNotFoundError("xarray must be installed to use this functionality")
raise ModuleNotFoundError(
"xarray must be installed to use this functionality"
) from None
if not rand:
np.random.seed(12345)
coords = {}
Expand Down Expand Up @@ -457,7 +463,8 @@ def _da_checker(da, z_value, coords_labels):
coords_labels : dictionary
Pass dimension labels for coordinates and layers if they do not
belong to default dimensions, which are (band/time, y/lat, x/lon)
e.g. coords_labels = {"y_label": "latitude", "x_label": "longitude", "z_label": "year"}
e.g. coords_labels = {"y_label": "latitude","
"x_label": "longitude", "z_label": "year"}
Default is {} empty dictionary.
Returns
Expand All @@ -470,7 +477,9 @@ def _da_checker(da, z_value, coords_labels):
try:
from xarray import DataArray
except ImportError:
raise ModuleNotFoundError("xarray must be installed to use this functionality")
raise ModuleNotFoundError(
"xarray must be installed to use this functionality"
) from None

if not isinstance(da, DataArray):
raise TypeError("da must be an instance of xarray.DataArray")
Expand Down Expand Up @@ -502,7 +511,10 @@ def _da_checker(da, z_value, coords_labels):
z_id = 1
if z_value is None:
if da.sizes[def_labels["z_label"]] != 1:
warn("Multiple layers detected. Using first layer as default.")
warn(
"Multiple layers detected. Using first layer as default.",
stacklevel=2,
)
else:
z_id += tuple(da[def_labels["z_label"]]).index(z_value)
else:
Expand Down Expand Up @@ -533,7 +545,9 @@ def _index2da(data, index, attrs, coords):
try:
from xarray import DataArray
except ImportError:
raise ModuleNotFoundError("xarray must be installed to use this functionality")
raise ModuleNotFoundError(
"xarray must be installed to use this functionality"
) from None

data = np.array(data).flatten()
idx = index
Expand All @@ -555,7 +569,7 @@ def _index2da(data, index, attrs, coords):
data_complete = np.empty(shape, data.dtype)
data_complete[indexer] = data
coords = {}
for dim, lev in zip(dims, idx.levels):
for dim, lev in zip(dims, idx.levels, strict=True):
coords[dim] = lev.to_numpy()
else:
fill = attrs["nodatavals"][0] if "nodatavals" in attrs else 0
Expand Down Expand Up @@ -851,7 +865,7 @@ def _parSWbuilder(
delayed(_compute_chunk)(nrows, ncols, *ids, id_map, criterion, k, dtype)
for ids in chunk
)
rows, cols = zip(*worker_out)
rows, cols = zip(*worker_out, strict=True)
rows = np.concatenate(rows)
cols = np.concatenate(cols)
data = np.ones_like(rows, dtype=np.int8)
Expand Down
Loading

0 comments on commit 6112e8d

Please sign in to comment.