diff --git a/igneous/task_creation/common.py b/igneous/task_creation/common.py index 5600fe5..17ed399 100644 --- a/igneous/task_creation/common.py +++ b/igneous/task_creation/common.py @@ -226,6 +226,8 @@ def set_encoding(cv, mip, encoding, encoding_level): if encoding == "jpeg": scale["jpeg_quality"] = encoding_level + elif encoding == "jpegxl": + scale["jpegxl_quality"] = encoding_level elif encoding == "png": scale["png_level"] = encoding_level elif encoding == "fpzip": diff --git a/igneous/task_creation/image.py b/igneous/task_creation/image.py index 9e06473..94ba786 100644 --- a/igneous/task_creation/image.py +++ b/igneous/task_creation/image.py @@ -406,14 +406,6 @@ def create_sharded_image_info( # maximum amount of information in the morton codes grid_size = np.ceil(Vec(*dataset_size) / Vec(*chunk_size)).astype(np.int64) max_bits = sum([ math.ceil(math.log2(size)) for size in grid_size ]) - if max_bits > 64: - raise ValueError( - f"{max_bits}, more than a 64-bit integer, " - "would be required to describe the chunk positions " - "in this dataset. Try increasing the chunk size or " - "increasing dataset bounds." - f"Dataset Size: {dataset_size} Chunk Size: {chunk_size}" - ) chunks_per_shard = math.ceil(uncompressed_shard_bytesize / (chunk_voxels * byte_width)) chunks_per_shard = 2 ** int(math.log2(chunks_per_shard)) @@ -423,7 +415,7 @@ def create_sharded_image_info( # approximate, would need to account for rounding effects to be exact # rounding is corrected for via max_bits - pre - mini below. - num_shards = num_chunks / chunks_per_shard + num_shards = num_chunks / chunks_per_shard def update_bits(): shard_bits = int(math.ceil(math.log2(num_shards))) @@ -465,7 +457,25 @@ def update_bits(): # in the morton codes, so if there's any slack from rounding, the # remainder goes into shard bits. preshift_bits = preshift_bits - minishard_bits - shard_bits = max_bits - preshift_bits - minishard_bits + if dataset_size[2] == chunk_size[2]: + additional_bits = (preshift_bits // 3) + i = 0 + while i < additional_bits: + max_bits += 1 + preshift_bits += 1 + if preshift_bits % 3 != 0: + i += 1 + + shard_bits = max(max_bits - preshift_bits - minishard_bits, 0) + + if max_bits > 64: + raise ValueError( + f"{max_bits}, more than a 64-bit integer, " + "would be required to describe the chunk positions " + "in this dataset. Try increasing the chunk size or " + "increasing dataset bounds." + f"Dataset Size: {dataset_size} Chunk Size: {chunk_size}" + ) if preshift_bits < 0: raise ValueError(f"Preshift bits cannot be negative. ({shard_bits}, {minishard_bits}, {preshift_bits}), total info: {max_bits} bits") diff --git a/igneous/task_creation/skeleton.py b/igneous/task_creation/skeleton.py index bb7d4a6..e99ab06 100644 --- a/igneous/task_creation/skeleton.py +++ b/igneous/task_creation/skeleton.py @@ -15,7 +15,7 @@ from cloudvolume import CloudVolume from cloudvolume.lib import Vec, Bbox, max2, min2, xyzrange, find_closest_divisor, yellow, jsonify from cloudvolume.datasource.precomputed.sharding import ShardingSpecification -from cloudfiles import CloudFiles +from cloudfiles import CloudFiles, CloudFile from igneous.tasks import ( SkeletonTask, UnshardedSkeletonMergeTask, @@ -58,6 +58,7 @@ def bounds_from_mesh( bbxes.append(bounds) bounds = Bbox.expand(*bbxes) + bounds = bounds.expand_to_chunk_size(shape, offset=vol.voxel_offset) return Bbox.clamp(bounds, vol.bounds) def create_skeletonizing_tasks( @@ -71,9 +72,11 @@ def create_skeletonizing_tasks( parallel=1, fill_missing=False, sharded=False, frag_path=None, spatial_index=True, synapses=None, num_synapses=None, - dust_global=False, + dust_global=False, fix_autapses=False, cross_sectional_area=False, cross_sectional_area_smoothing_window=5, + timestamp=None, + root_ids_cloudpath=None, ): """ Assign tasks with one voxel overlap in a regular grid @@ -121,6 +124,17 @@ def create_skeletonizing_tasks( fix_borders: Allows trivial merging of single overlap tasks. You'll only want to set this to false if you're working on single or non-overlapping volumes. + fix_autapses: Only possible for graphene volumes. Uses PyChunkGraph (PCG) information + to fix autapses (when a neuron synapses onto itself). This requires splitting + contacts between the edges of two touching voxels. The algorithm for doing this + requires much more memory. + + This works by comparing the PYC L2 and root layers. L1 is watershed. L2 is the + connections only within an atomic chunk. The root layer provides the global + connectivity. Autapses can be distinguished at the L2 level, above that, they + may not be (and certainly not at the root level). We extract the voxel connectivity + graph from L2 and perform the overall trace at root connectivity. + dust_threshold: don't skeletonize labels smaller than this number of voxels as seen by a single task. dust_global: Use global voxel counts for the dust threshold instead of from @@ -155,10 +169,24 @@ def create_skeletonizing_tasks( to the total computation.) cross_sectional_area_smoothing_window: Perform a rolling average of the normal vectors across these many vectors. + timestamp: for graphene volumes only, you can specify the timepoint to use + root_ids_cloudpath: for graphene volumes, if you have a materialized archive + if your desired timepoint, you can use this path for fetching root ID + segmentation as it is far more efficient. """ shape = Vec(*shape) vol = CloudVolume(cloudpath, mip=mip, info=info) + if fix_autapses: + if vol.meta.path.format != "graphene": + raise ValueError("fix_autapses can only be performed on graphene volumes.") + + if not np.all(shape % vol.meta.graph_chunk_size == 0): + raise ValueError( + f"shape must be a multiple of the graph chunk size. Got: {shape}, " + f"{vol.meta.graph_chunk_size}" + ) + if dust_threshold > 0 and dust_global: cf = CloudFiles(cloudpath) vxctfile = cf.join(vol.key, 'stats', 'voxel_counts.mb') @@ -201,6 +229,15 @@ def create_skeletonizing_tasks( vol.skeleton.meta.commit_info() + if frag_path: + frag_info_path = CloudFiles(frag_path).join(frag_path, "info") + frag_info = CloudFile(frag_info_path).get_json() + if not frag_info: + CloudFile(frag_info_path).put_json(vol.skeleton.meta.info) + elif 'scales' in frag_info: + frag_info_path = CloudFiles(frag_path).join(frag_path, vol.info["skeletons"], "info") + CloudFile(frag_info_path).put_json(vol.skeleton.meta.info) + will_postprocess = bool(np.any(vol.bounds.size3() > shape)) bounds = vol.bounds.clone() @@ -247,8 +284,11 @@ def task(self, shape, offset): spatial_grid_shape=shape.clone(), # used for writing index filenames synapses=bbox_synapses, dust_global=dust_global, + fix_autapses=bool(fix_autapses), + timestamp=timestamp, cross_sectional_area=bool(cross_sectional_area), cross_sectional_area_smoothing_window=int(cross_sectional_area_smoothing_window), + root_ids_cloudpath=root_ids_cloudpath, ) def synapses_for_bbox(self, shape, offset): @@ -292,8 +332,11 @@ def on_finish(self): 'spatial_index': bool(spatial_index), 'synapses': bool(synapses), 'dust_global': bool(dust_global), + 'fix_autapses': bool(fix_autapses), + 'timestamp': timestamp, 'cross_sectional_area': bool(cross_sectional_area), 'cross_sectional_area_smoothing_window': int(cross_sectional_area_smoothing_window), + 'root_ids_cloudpath': root_ids_cloudpath, }, 'by': operator_contact(), 'date': strftime('%Y-%m-%d %H:%M %Z'), diff --git a/igneous/tasks/image/image.py b/igneous/tasks/image/image.py index 2e72cd6..b093148 100755 --- a/igneous/tasks/image/image.py +++ b/igneous/tasks/image/image.py @@ -70,7 +70,7 @@ def downsample_and_upload( if max_mips is not None: factors = factors[:max_mips] - if len(factors) == 0: + if len(factors) == 0 and max_mips: print("No factors generated. Image Shape: {}, Downsample Shape: {}, Volume Shape: {}, Bounds: {}".format( image.shape, ds_shape, vol.volume_size, bounds) ) @@ -327,6 +327,9 @@ def execute(self): cts = np.bincount(img2d) levels[0:len(cts)] += cts.astype(np.uint64) + if len(bboxes) == 0: + return + covered_area = sum([bbx.volume() for bbx in bboxes]) bboxes = [(bbox.volume(), bbox.size3()) for bbox in bboxes] @@ -376,7 +379,8 @@ def select_bounding_boxes(self, dataset_bounds): patch_start += self.offset bbox = Bbox(patch_start, patch_start + sample_shape.size3()) bbox = Bbox.clamp(bbox, dataset_bounds) - bboxes.append(bbox) + if not bbox.subvoxel(): + bboxes.append(bbox) return bboxes @queueable diff --git a/igneous/tasks/skeleton.py b/igneous/tasks/skeleton.py index cfe80f7..30fae46 100644 --- a/igneous/tasks/skeleton.py +++ b/igneous/tasks/skeleton.py @@ -1,4 +1,4 @@ -from typing import Optional,Sequence,Dict +from typing import Optional, Sequence, Dict, List from functools import reduce import itertools @@ -21,9 +21,10 @@ import cloudvolume from cloudvolume import CloudVolume, Skeleton, paths -from cloudvolume.lib import Vec, Bbox, sip +from cloudvolume.lib import Vec, Bbox, sip, xyzrange from cloudvolume.datasource.precomputed.sharding import synthesize_shard_files +import cc3d import fastremap import kimimaro @@ -39,15 +40,6 @@ def filename_to_segid(filename): segid, = matches.groups() return int(segid) -def skeldir(cloudpath): - cf = CloudFiles(cloudpath) - info = cf.get_json('info') - - skel_dir = 'skeletons/' - if 'skeletons' in info: - skel_dir = info['skeletons'] - return skel_dir - def strip_integer_attributes(skeletons): for skel in skeletons: skel.extra_attributes = [ @@ -89,6 +81,9 @@ def __init__( cross_sectional_area_shape_delta:int = 150, dry_run:bool = False, strip_integer_attributes:bool = True, + fix_autapses:bool = False, + timestamp:Optional[int] = None, + root_ids_cloudpath:Optional[str] = None, ): super().__init__( cloudpath, shape, offset, mip, @@ -101,14 +96,25 @@ def __init__( spatial_grid_shape, synapses, bool(dust_global), bool(cross_sectional_area), int(cross_sectional_area_smoothing_window), int(cross_sectional_area_shape_delta), - bool(dry_run), bool(strip_integer_attributes) + bool(dry_run), bool(strip_integer_attributes), + bool(fix_autapses), timestamp, + root_ids_cloudpath, ) + if isinstance(self.frag_path, str): + self.frag_path = cloudfiles.paths.normalize(self.frag_path) self.bounds = Bbox(offset, Vec(*shape) + Vec(*offset)) self.index_bounds = Bbox(offset, Vec(*spatial_grid_shape) + Vec(*offset)) def execute(self): + # For graphene volumes, if we've materialized the root IDs + # into a static archive, let's use that because it's way more + # efficient for fetching root IDs. + cloudpath = self.cloudpath + if self.root_ids_cloudpath: + cloudpath = self.root_ids_cloudpath + vol = CloudVolume( - self.cloudpath, mip=self.mip, + cloudpath, mip=self.mip, info=self.info, cdn_cache=False, parallel=self.parallel, fill_missing=self.fill_missing, @@ -116,10 +122,24 @@ def execute(self): bbox = Bbox.clamp(self.bounds, vol.bounds) index_bbox = Bbox.clamp(self.index_bounds, vol.bounds) - path = skeldir(self.cloudpath) - path = os.path.join(self.cloudpath, path) + path = vol.info.get("skeletons", "skeletons") + if self.frag_path is None: + path = vol.meta.join(self.cloudpath, path) + else: + # if the path is to a volume root, follow the info instructions, + # otherwise place the files exactly where frag path says to + test_path = CloudFiles(self.frag_path).join(self.frag_path, "info") + test_info = CloudFile(test_path).get_json() + if test_info is not None and 'scales' in test_info: + path = CloudFiles(self.frag_path).join(self.frag_path, path) + else: + path = self.frag_path - all_labels = vol[ bbox.to_slices() ] + all_labels = vol.download( + bbox.to_slices(), + agglomerate=True, + timestamp=self.timestamp + ) all_labels = all_labels[:,:,:,0] if self.mask_ids: @@ -136,6 +156,10 @@ def execute(self): dust_threshold = 0 all_labels = self.apply_global_dust_threshold(vol, all_labels) + voxel_graph = None + if self.fix_autapses: + voxel_graph = self.voxel_connectivity_graph(vol, bbox, all_labels) + skeletons = kimimaro.skeletonize( all_labels, self.teasar_params, object_ids=self.object_ids, @@ -148,6 +172,7 @@ def execute(self): fill_holes=self.fill_holes, parallel=self.parallel, extra_targets_after=extra_targets_after.keys(), + voxel_graph=voxel_graph, ) del all_labels @@ -180,17 +205,80 @@ def execute(self): return skeletons if self.sharded: - if self.frag_path: - self.upload_batch(vol, os.path.join(self.frag_path, skeldir(self.cloudpath)), index_bbox, skeletons) - else: - self.upload_batch(vol, path, index_bbox, skeletons) + self.upload_batch(vol, path, index_bbox, skeletons) else: self.upload_individuals(vol, path, bbox, skeletons) if self.spatial_index: self.upload_spatial_index(vol, path, index_bbox, skeletons) - + + def voxel_connectivity_graph( + self, + vol:CloudVolume, + bbox:Bbox, + root_labels:np.ndarray, + ) -> np.ndarray: + + if vol.meta.path.format != "graphene": + vol = CloudVolume( + self.cloudpath, mip=self.mip, + info=self.info, cdn_cache=False, + parallel=self.parallel, + fill_missing=self.fill_missing, + ) + + if vol.meta.path.format != "graphene": + raise ValueError("Can't extract a voxel connectivity graph from non-graphene volumes.") + + layer_2 = vol.download( + bbox, + stop_layer=2, + agglomerate=True, + timestamp=self.timestamp, + )[...,0] + + graph_chunk_size = np.array(vol.meta.graph_chunk_size) / vol.meta.downsample_ratio(vol.mip) + graph_chunk_size = graph_chunk_size.astype(int) + + shape = bbox.size()[:3] + sgx, sgy, sgz = list(np.ceil(shape / graph_chunk_size).astype(int)) + + vcg = cc3d.voxel_connectivity_graph(layer_2, connectivity=26) + del layer_2 + + # the proper way to do this would be to get the lowest the L3..LN root + # as needed, but the lazy way to do this is to get the root labels + # which will retain a few errors, but overall the error rate should be + # over 100x less. We need to shade in the sides of the connectivity graph + # with edges that represent the connections between the adjacent boxes. + + root_vcg = cc3d.voxel_connectivity_graph(root_labels, connectivity=26) + clamp_box = Bbox([0,0,0], shape) + + for gx,gy,gz in xyzrange([sgx, sgy, sgz]): + bbx = Bbox((gx,gy,gz), (gx+1, gy+1, gz+1)) + bbx *= graph_chunk_size + bbx = Bbox.clamp(bbx, clamp_box) + + slicearr = [] + for i in range(3): + bbx1 = bbx.clone() + bbx1.maxpt[i] = bbx1.minpt[i] + 1 + slicearr.append(bbx1) + + bbx1 = bbx.clone() + bbx1.minpt[i] = bbx1.maxpt[i] - 1 + slicearr.append(bbx1) + + for bbx1 in slicearr: + vcg[bbx1.to_slices()] = root_vcg[bbx1.to_slices()] + + return vcg + def compute_cross_sectional_area(self, vol, bbox, skeletons): + if len(skeletons) == 0: + return skeletons + # Why redownload a bigger image? In order to avoid clipping the # cross sectional areas on the edges. delta = int(self.cross_sectional_area_shape_delta) @@ -203,9 +291,13 @@ def compute_cross_sectional_area(self, vol, bbox, skeletons): huge_bbox.grow(int(np.max(bbox.size()) / 2) + 1) huge_bbox = Bbox.clamp(huge_bbox, vol.bounds) - mem_vol = vol.image.memory_cutout( - huge_bbox, mip=vol.mip, - encoding="crackle", compress=False + mem_vol = vol.memory_cutout( + huge_bbox, + mip=vol.mip, + encoding="crackle", + compress=False, + agglomerate=True, + timestamp=self.timestamp, ) all_labels = mem_vol[big_bbox][...,0] @@ -266,6 +358,11 @@ def reprocess_skel(pts, skel): diff = bbox.minpt - skel_bbx.minpt skel.vertices += diff * vol.resolution + # we binarized the label for memory's sake, + # so need to harmonize that with the skeleton ID + segid = skel.id + skel.id = 1 + kimimaro.cross_sectional_area( binary_image, skel, anisotropy=vol.resolution, @@ -275,7 +372,7 @@ def reprocess_skel(pts, skel): fill_holes=self.fill_holes, repair_contacts=True, ) - + skel.id = segid skel.vertices -= diff * vol.resolution for skel in repair_skels: diff --git a/igneous_cli/cli.py b/igneous_cli/cli.py index 4800575..77d1540 100644 --- a/igneous_cli/cli.py +++ b/igneous_cli/cli.py @@ -896,7 +896,7 @@ def meshgroup(): @meshgroup.command("xfer") @click.argument("src", type=CloudPath()) @click.argument("dest", type=CloudPath()) -@click.option('--queue', required=True, help="AWS SQS queue or directory to be used for a task queue. e.g. sqs://my-queue or ./my-queue. See https://github.com/seung-lab/python-task-queue", type=str) +@click.option('--queue', help="AWS SQS queue or directory to be used for a task queue. e.g. sqs://my-queue or ./my-queue. See https://github.com/seung-lab/python-task-queue", type=str) @click.option("--sharded", is_flag=True, default=False, help="Generate shard fragments instead of outputing mesh fragments.", show_default=True) @click.option("--dir", "mesh_dir", type=str, default=None, help="Write meshes into this directory instead of the one indicated in the info file.") @click.option('--magnitude', default=2, help="Split up the work with 10^(magnitude) prefix based tasks.", show_default=True) @@ -933,7 +933,7 @@ def mesh_xfer( @meshgroup.command("forge") @click.argument("path", type=CloudPath()) -@click.option('--queue', required=True, help="AWS SQS queue or directory to be used for a task queue. e.g. sqs://my-queue or ./my-queue. See https://github.com/seung-lab/python-task-queue", type=str) +@click.option('--queue', help="AWS SQS queue or directory to be used for a task queue. e.g. sqs://my-queue or ./my-queue. See https://github.com/seung-lab/python-task-queue", type=str) @click.option('--mip', default=0, help="Perform meshing using this level of the image pyramid.", show_default=True) @click.option('--shape', type=Tuple3(), default=(448, 448, 448), help="Set the task shape in voxels.", show_default=True) @click.option('--simplify/--skip-simplify', is_flag=True, default=True, help="Enable mesh simplification.", show_default=True) @@ -984,7 +984,7 @@ def mesh_forge( @meshgroup.command("merge") @click.argument("path", type=CloudPath()) -@click.option('--queue', required=True, help="AWS SQS queue or directory to be used for a task queue. e.g. sqs://my-queue or ./my-queue. See https://github.com/seung-lab/python-task-queue", type=str) +@click.option('--queue', help="AWS SQS queue or directory to be used for a task queue. e.g. sqs://my-queue or ./my-queue. See https://github.com/seung-lab/python-task-queue", type=str) @click.option('--magnitude', default=2, help="Split up the work with 10^(magnitude) prefix based tasks. Default: 2 (100 tasks)") @click.option('--nlod', default=0, help="(multires) How many extra levels of detail to create.", show_default=True) @click.option('--vqb', default=16, help="(multires) Vertex quantization bits for stored model representation. 10 or 16 only.", show_default=True) @@ -1016,7 +1016,7 @@ def mesh_merge(ctx, path, queue, magnitude, nlod, vqb, dir, min_chunk_size): @meshgroup.command("merge-sharded") @click.argument("path", type=CloudPath()) -@click.option('--queue', required=True, help="AWS SQS queue or directory to be used for a task queue. e.g. sqs://my-queue or ./my-queue. See https://github.com/seung-lab/python-task-queue", type=str) +@click.option('--queue', help="AWS SQS queue or directory to be used for a task queue. e.g. sqs://my-queue or ./my-queue. See https://github.com/seung-lab/python-task-queue", type=str) @click.option('--nlod', default=1, help="Number of levels of detail to create.", type=int, show_default=True) @click.option('--vqb', default=16, help="Vertex quantization bits. Can be 10 or 16.", type=int, show_default=True) @click.option('--compress-level', default=7, help="Draco compression level.", type=int, show_default=True) @@ -1065,7 +1065,7 @@ def mesh_sharded_merge( @meshgroup.command("rm") @click.argument("path", type=CloudPath()) -@click.option('--queue', required=True, help="AWS SQS queue or directory to be used for a task queue. e.g. sqs://my-queue or ./my-queue. See https://github.com/seung-lab/python-task-queue", type=str) +@click.option('--queue', help="AWS SQS queue or directory to be used for a task queue. e.g. sqs://my-queue or ./my-queue. See https://github.com/seung-lab/python-task-queue", type=str) @click.option('--magnitude', default=2, help="Split up the work with 10^(magnitude) prefix based tasks. Default: 2 (100 tasks)") @click.option('--dir', 'mesh_dir', default=None, help="Target this directory instead of the one indicated in the info file.") @click.pass_context @@ -1114,7 +1114,7 @@ def spatialindexgroup(): @spatialindexgroup.command("create") @click.argument("path", type=CloudPath()) -@click.option('--queue', required=True, help="AWS SQS queue or directory to be used for a task queue. e.g. sqs://my-queue or ./my-queue. See https://github.com/seung-lab/python-task-queue", type=str) +@click.option('--queue', help="AWS SQS queue or directory to be used for a task queue. e.g. sqs://my-queue or ./my-queue. See https://github.com/seung-lab/python-task-queue", type=str) @click.option('--shape', default="448,448,448", type=Tuple3(), help="Shape in voxels of each indexing task.", show_default=True) @click.option('--mip', default=0, help="Perform indexing using this level of the image pyramid.", show_default=True) @click.option('--fill-missing', is_flag=True, default=False, help="Interpret missing image files as background instead of failing.", show_default=True) @@ -1181,6 +1181,7 @@ def skeletongroup(): @click.option('--fix-branching', is_flag=True, default=True, help="Trades speed for quality of branching at forks.", show_default=True) @click.option('--fix-borders', is_flag=True, default=True, help="Allows trivial merging of single voxel overlap tasks. Only switch off for datasets that fit in a single task.", show_default=True) @click.option('--fix-avocados', is_flag=True, default=False, help="Fixes somata where nuclei and cytoplasm have separate segmentations.", show_default=True) +@click.option('--fix-autapses', is_flag=True, default=False, help="(graphene only) Fixes autapses by using the PyChunkGraph.", show_default=True) @click.option('--fill-holes', is_flag=True, default=False, help="Preprocess each cutout to eliminate background holes and holes caused by entirely contained inclusions. Warning: May remove labels that are considered inclusions.", show_default=True) @click.option('--dust-threshold', default=1000, help="Skip skeletonizing objects smaller than this number of voxels within a cutout.", type=int, show_default=True) @click.option('--dust-global/--dust-local', is_flag=True, default=False, help="Use global voxel counts for the dust threshold (when >0). To use this feature you must first compute the global voxel counts using the 'igneous image voxels' command.", show_default=True) @@ -1195,14 +1196,17 @@ def skeletongroup(): @click.option('--sharded', is_flag=True, default=False, help="Generate shard fragments instead of outputing skeleton fragments.", show_default=True) @click.option('--labels', type=TupleN(), default=None, help="Skeletonize only this comma separated list of labels.", show_default=True) @click.option('--cross-section', type=int, default=0, help="Compute the cross sectional area for each skeleton vertex. May add substantial computation time. Integer value is the normal vector rolling average smoothing window over vertices. 0 means off.", show_default=True) +@click.option('--output', '-o', type=CloudPath(), default=None, help="Output the results to a different place.", show_default=True) +@click.option('--timestamp', type=int, default=None, help="(graphene) Use the proofreading state at this UNIX timestamp.", show_default=True) +@click.option('--root-ids', type=CloudPath(), default=None, help="(graphene) If you have a materialization of graphene root ids for this timepoint, it's more efficient to use it than making requests to the graphene server.", show_default=True) @click.pass_context def skeleton_forge( ctx, path, queue, mip, shape, fill_missing, dust_threshold, dust_global, spatial_index, - fix_branching, fix_borders, fix_avocados, + fix_branching, fix_borders, fix_avocados, fix_autapses, fill_holes, scale, const, soma_detect, soma_accept, soma_scale, soma_const, max_paths, sharded, labels, - cross_section, + cross_section, output, timestamp, root_ids, ): """ (1) Synthesize skeletons from segmentation cutouts. @@ -1247,6 +1251,8 @@ def skeleton_forge( dust_global=dust_global, object_ids=labels, cross_sectional_area=(cross_section > 0), cross_sectional_area_smoothing_window=int(cross_section), + frag_path=output, fix_autapses=fix_autapses, + timestamp=timestamp, root_ids_cloudpath=root_ids, ) enqueue_tasks(ctx, queue, tasks) @@ -1283,7 +1289,7 @@ def skeleton_merge( @skeletongroup.command("merge-sharded") @click.argument("path", type=CloudPath()) -@click.option('--queue', required=True, help="AWS SQS queue or directory to be used for a task queue. e.g. sqs://my-queue or ./my-queue. See https://github.com/seung-lab/python-task-queue", type=str) +@click.option('--queue', help="AWS SQS queue or directory to be used for a task queue. e.g. sqs://my-queue or ./my-queue. See https://github.com/seung-lab/python-task-queue", type=str) @click.option('--min-cable-length', default=1000, help="Skip objects smaller than this physical path length.", type=float, show_default=True) @click.option('--max-cable-length', default=None, help="Skip objects larger than this physical path length. Default: no limit", type=float) @click.option('--tick-threshold', default=0, help="Remove small \"ticks\", or branches from the main skeleton one at a time from smallest to largest. Branches larger than this are preserved. Default: no elimination", type=float) @@ -1334,7 +1340,7 @@ def skeleton_sharded_merge( @imagegroup.command("rm") @click.argument("path", type=CloudPath()) -@click.option('--queue', required=True, help="AWS SQS queue or directory to be used for a task queue. e.g. sqs://my-queue or ./my-queue. See https://github.com/seung-lab/python-task-queue", type=str) +@click.option('--queue', help="AWS SQS queue or directory to be used for a task queue. e.g. sqs://my-queue or ./my-queue. See https://github.com/seung-lab/python-task-queue", type=str) @click.option('--mip', default=0, help="Which mip level to start deleting from. Default: 0") @click.option('--num-mips', default=5, help="The number of mip levels to delete at once. Default: 5") @click.option('--shape', default=None, help="The size of each deletion task as a comma delimited list. Must be a multiple of the chunk size.", type=Tuple3()) @@ -1358,7 +1364,7 @@ def delete_images( @skeletongroup.command("rm") @click.argument("path", type=CloudPath()) -@click.option('--queue', required=True, help="AWS SQS queue or directory to be used for a task queue. e.g. sqs://my-queue or ./my-queue. See https://github.com/seung-lab/python-task-queue", type=str) +@click.option('--queue', help="AWS SQS queue or directory to be used for a task queue. e.g. sqs://my-queue or ./my-queue. See https://github.com/seung-lab/python-task-queue", type=str) @click.option('--magnitude', default=2, help="Split up the work with 10^(magnitude) prefix based tasks. Default: 2 (100 tasks)") @click.option('--dir', 'skel_dir', default=None, help="Target this directory instead of the one indicated in the info file.") @click.pass_context @@ -1376,7 +1382,7 @@ def skeleton_rm(ctx, path, queue, magnitude, skel_dir): @skeletongroup.command("xfer") @click.argument("src", type=CloudPath()) @click.argument("dest", type=CloudPath()) -@click.option('--queue', required=True, help="AWS SQS queue or directory to be used for a task queue. e.g. sqs://my-queue or ./my-queue. See https://github.com/seung-lab/python-task-queue", type=str) +@click.option('--queue', help="AWS SQS queue or directory to be used for a task queue. e.g. sqs://my-queue or ./my-queue. See https://github.com/seung-lab/python-task-queue", type=str) @click.option("--sharded", is_flag=True, default=False, help="Generate shard fragments instead of outputing mesh fragments.", show_default=True) @click.option("--dir", "skel_dir", type=str, default=None, help="Write skeletons into this directory instead of the one indicated in the info file.") @click.option('--magnitude', default=2, help="Split up the work with 10^(magnitude) prefix based tasks.", show_default=True) @@ -1440,7 +1446,7 @@ def spatialindexgroupskel(): @spatialindexgroupskel.command("create") @click.argument("path", type=CloudPath()) -@click.option('--queue', required=True, help="AWS SQS queue or directory to be used for a task queue. e.g. sqs://my-queue or ./my-queue. See https://github.com/seung-lab/python-task-queue", type=str) +@click.option('--queue', help="AWS SQS queue or directory to be used for a task queue. e.g. sqs://my-queue or ./my-queue. See https://github.com/seung-lab/python-task-queue", type=str) @click.option('--shape', default="512,512,512", type=Tuple3(), help="Shape in voxels of each indexing task.", show_default=True) @click.option('--mip', default=0, help="Perform indexing using this level of the image pyramid.", show_default=True) @click.option('--fill-missing', is_flag=True, default=False, help="Interpret missing image files as background instead of failing.", show_default=True)