Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Scope metadata #55

Merged
merged 9 commits into from
Feb 1, 2024
12 changes: 6 additions & 6 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,10 @@ jobs:

steps:
- name: Check out repo
uses: actions/checkout@v2
uses: actions/checkout@v4

- name: Set up python ${{ matrix.python-version }}
uses: actions/setup-python@v2
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}

Expand All @@ -37,14 +37,14 @@ jobs:

- name: Upload test results
if: always()
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v4
with:
name: Test Results (Python ${{ matrix.python-version }})
path: test-reports/test.xml

- name: Upload code coverage
if: matrix.python-version == '3.9'
uses: codecov/codecov-action@v2
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}

Expand All @@ -57,12 +57,12 @@ jobs:

steps:
- name: Download artifacts
uses: actions/download-artifact@v2
uses: actions/download-artifact@v4
with:
path: artifacts

- name: Publish Test Results
uses: EnricoMi/publish-unit-test-result-action@v1
uses: EnricoMi/publish-unit-test-result-action@v2
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
files: artifacts/**/*.xml
86 changes: 66 additions & 20 deletions em_stitch/lens_correction/lens_correction_solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,39 +70,85 @@ def tilespec_input_from_metafile(
return result


def filter_match_collection(
matches, threshold, model="Similarity",
n_clusters=None, n_cluster_pts=20, ransacReprojThreshold=40.,
ignore_match_indices=(),
input_n_key="n_from_gpu", output_n_key="n_after_filter"):
ignore_match_indices = (set() if ignore_match_indices is None else ignore_match_indices)
ignore_match_indices = set(ignore_match_indices)

counts = []
new_matches = []

for i, m in enumerate(matches):
input_n = len(m["matches"]["p"][0])

_, _, w, _ = common_utils.pointmatch_filter(
m,
n_clusters=n_clusters,
n_cluster_pts=n_cluster_pts,

)

m["matches"]["w"] = w.tolist()

output_n = np.count_nonzero(w)

counts.append({
input_n_key: input_n,
output_n_key: output_n})

# original version ran pointmatch filtering for these and then filtered
# them out of the matches. Do this by continuing here, I guess.
if i in ignore_match_indices:
continue

new_matches.append(m)

return new_matches, counts


def make_collection_json(
template_file,
output_dir,
thresh,
thresh, # FIXME thresh not used in this version
compress,
ignore_match_indices=None):

with open(template_file, 'r') as f:
matches = json.load(f)
template_match_md = json.load(f)

counts = []
for m in matches['collection']:
counts.append({})
ind = np.arange(len(m['matches']['p'][0]))
counts[-1]['n_from_gpu'] = ind.size
input_matches = template_match_md["collection"]

_, _, w, _ = common_utils.pointmatch_filter(
m,
n_clusters=None,
n_cluster_pts=20,
ransacReprojThreshold=40.0,
model='Similarity')
m, counts = filter_match_collection(
input_matches, thresh,
ignore_match_indices=ignore_match_indices
)

# counts = []
# for m in template_match_md['collection']:
# counts.append({})
# ind = np.arange(len(m['matches']['p'][0]))
# counts[-1]['n_from_gpu'] = ind.size

# _, _, w, _ = common_utils.pointmatch_filter(
# m,
# n_clusters=None,
# n_cluster_pts=20,
# ransacReprojThreshold=40.0,
# model='Similarity')

m['matches']['w'] = w.tolist()
# m['matches']['w'] = w.tolist()

counts[-1]['n_after_filter'] = np.count_nonzero(w)
# counts[-1]['n_after_filter'] = np.count_nonzero(w)

m = matches['collection']
# m = matches['collection']

if ignore_match_indices:
m = [match for i, match in enumerate(matches['collection'])
if i not in ignore_match_indices]
logger.warning("you are ignoring some point matches")
# if ignore_match_indices:
# m = [match for i, match in enumerate(matches['collection'])
# if i not in ignore_match_indices]
# logger.warning("you are ignoring some point matches")

collection_file = os.path.join(output_dir, "collection.json")
collection_file = jsongz.dump(m, collection_file, compress=compress)
Expand Down
69 changes: 61 additions & 8 deletions em_stitch/utils/generate_EM_tilespecs_from_metafile.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,16 +30,19 @@ def image_coords_from_stage(stage_coords, resX, resY, rotation):
return (int(x * cr + y * sr),
int(-x * sr + y * cr))

def tileId_from_basename(self, fname):
@staticmethod
def tileId_from_basename(fname):
return os.path.splitext(os.path.basename(fname))[0]

def ts_from_imgdata(self, imgdata, imgdir, x, y,
minint=0, maxint=255, maskUrl=None,
width=3840, height=3840, z=None, sectionId=None,
scopeId=None, cameraId=None, pixelsize=None):
tileId = self.tileId_from_basename(imgdata['img_path'])
sectionId = (self.sectionId_from_z(z) if sectionId is None
else sectionId)
@staticmethod
def sectionId_from_z(z):
return str(float(z))

@staticmethod
def ts_from_imgdata_tileId(imgdata, imgdir, x, y, tileId,
minint=0, maxint=255, maskUrl=None,
width=3840, height=3840, z=None, sectionId=None,
scopeId=None, cameraId=None, pixelsize=None):
raw_tforms = [renderapi.transform.AffineModel(B0=x, B1=y)]
imageUrl = pathlib.Path(
os.path.abspath(os.path.join(
Expand All @@ -63,6 +66,56 @@ def ts_from_imgdata(self, imgdata, imgdir, x, y,
stageY=imgdata['img_meta']['stage_pos'][1],
rotation=imgdata['img_meta']['angle'], pixelsize=pixelsize)

def ts_from_imgdata(self, imgdata, imgdir, x, y,
minint=0, maxint=255, maskUrl=None,
width=3840, height=3840, z=None, sectionId=None,
scopeId=None, cameraId=None, pixelsize=None):
tileId = self.tileId_from_basename(imgdata['img_path'])
sectionId = (self.sectionId_from_z(z) if sectionId is None
else sectionId)
return self.ts_from_imgdata_tileId(
imgdata, imgdir, x, y, tileId,
minint, maxint, maskUrl,
width, height, z, sectionId,
scopeId, cameraId, pixelsize)

@classmethod
def ts_from_metadata(
cls, md, image_directory, z, sectionId=None,
minimum_intensity=0, maximum_intensity=255, maskUrl=None):
roidata = md[0]['metadata']
imgdata = md[1]['data']
img_coords = {img['img_path']: cls.image_coords_from_stage(
img['img_meta']['stage_pos'],
img['img_meta']['pixel_size_x_move'],
img['img_meta']['pixel_size_y_move'],
numpy.radians(img['img_meta']['angle'])) for img in imgdata}

minX, minY = numpy.min(numpy.array(list(img_coords.values())), axis=0)
# assume isotropic pixels
pixelsize = roidata['calibration']['highmag']['x_nm_per_pix']

inputs = {
"minint": minimum_intensity,
"maxint": maximum_intensity,
"z": z,
"sectionId": sectionId,
"maskUrl": maskUrl
}

tspecs = [
cls.ts_from_imgdata_tileId(
img, image_directory,
img_coords[img['img_path']][0] - minX,
img_coords[img['img_path']][1] - minY,
cls.tileId_from_basename(img["img_path"]),
width=roidata['camera_info']['width'],
height=roidata['camera_info']['height'],
scopeId=roidata['temca_id'],
cameraId=roidata['camera_info']['camera_id'],
pixelsize=pixelsize, **inputs) for img in imgdata]
return tspecs

def run(self):
with open(self.args['metafile'], 'r') as f:
meta = json.load(f)
Expand Down
2 changes: 1 addition & 1 deletion integration_tests/test_pointmatch_filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def dummy_match(npts=100, tform_type='affine'):
np.random.rand() * oh + overlap.bounds[1])
if overlap.contains(pt):
pts.append(pt)
src = np.array([np.array(p) for p in pts])
src = np.array([np.array(p.coords[0]) for p in pts])
match = {}
for k in ['pId', 'qId', 'pGroupId', 'qGroupId']:
match[k] = k
Expand Down