diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 00000000..243e8705
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,17 @@
+[run]
+source =
+ src/imcflibs
+
+omit =
+ conftest.py
+ tests/*
+ # omit anything in a venv / venv2 directory
+ ./venv/*
+ ./venv2/*
+
+[report]
+## NOTE: `exclude_also` is only supported for coverage 7.2 and newer, which
+## won't work when testing with Python2 (coverage 5.5 is the latest one
+## supporting Python2), hence we cannot use it:
+; exclude_also =
+; if _python_platform.python_implementation\(\) == \"Jython\":
diff --git a/.github/codecov.yml b/.github/codecov.yml
new file mode 100644
index 00000000..eba40389
--- /dev/null
+++ b/.github/codecov.yml
@@ -0,0 +1,13 @@
+coverage:
+ precision: 0
+ round: nearest
+ range:
+ - 0
+ - 75
+ # notification blocks
+ # https://docs.codecov.io/docs/codecovyml-reference#section-coverage-notify
+ # notify:
+ # status:
+ # project:
+ # patch:
+ # changes: off
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 368b50f2..de54ef54 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -1,30 +1,44 @@
-name: build
+name: ππ Publish to π¬β SciJava and πͺ PyPI
on:
- push:
- branches:
- - master
- tags:
- - "*-[0-9]+.*"
- pull_request:
- branches:
- - master
+
+ release:
+ types:
+ - published # A release, pre-release, or draft of a release was published.
+
+ workflow_dispatch:
+
+ ## Do NOT run on pushes or PR's, as they won't have a 'release.properties',
+ ## which is required by the build-tooling (see the comment at the "Inspect" /
+ ## "check-if-release.sh" section below for details).
+ # push:
+ # pull_request:
+
jobs:
- build:
+
+ publish-to-scijava:
+
+ name: π¬β publish to SciJava
+
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
- - name: Set up Java
+
+ - uses: actions/checkout@v4
+ name: π₯ Checkout repo
+
+ - name: β Set up Java
uses: actions/setup-java@v3
with:
java-version: '8'
distribution: 'zulu'
cache: 'maven'
- - name: Set up CI environment
+
+ - name: πͺ Set up CI environment
run: .github/setup.sh
- - name: Execute the build
+
+ - name: π· Build and publish on π¬β SciJava
run: .github/build.sh
env:
GPG_KEY_NAME: ${{ secrets.GPG_KEY_NAME }}
@@ -33,3 +47,137 @@ jobs:
MAVEN_PASS: ${{ secrets.MAVEN_PASS }}
OSSRH_PASS: ${{ secrets.OSSRH_PASS }}
SIGNING_ASC: ${{ secrets.SIGNING_ASC }}
+
+
+ build-via-poetry:
+
+ name: π· build via π Poetry
+
+ runs-on: ubuntu-22.04
+
+ steps:
+ - uses: actions/checkout@v4
+ name: π₯ Checkout repo
+
+ - name: π Cache π¦ APT Packages
+ uses: awalsh128/cache-apt-pkgs-action@v1.4.3
+ with:
+ packages: xmlstarlet
+ version: 1.0
+
+ - name: π΅ Inspect if this is a proper "scijava-scripts" release
+ run: scripts/check-if-release.sh
+ # This will make sure the file 'release.properties' exists, meaning
+ # `release-version.sh` from the 'scijava-scripts' repo has been run to
+ # prepare the release and modify 'pom.xml' (which is in turn
+ # required by the local 'scripts/run-poetry.sh' script for building the
+ # Python package through Poetry.
+
+ - name: π Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.10"
+
+ - name: π Cache π Poetry install
+ uses: actions/cache@v4
+ with:
+ path: ~/.local
+ key: poetry-2.0.1-0
+
+ # The key configuration value here is `virtualenvs-in-project: true`: this
+ # creates the venv as a `.venv` in your testing directory, which allows
+ # the next step to easily cache it.
+ - name: π©π§ Install π Poetry
+ uses: snok/install-poetry@v1
+ with:
+ version: 2.0.1
+ virtualenvs-create: true
+ virtualenvs-in-project: true
+
+ ### No dependencies and project installation required, we're just packaging.
+ # # Cache dependencies (i.e. all the stuff in your `pyproject.toml`).
+ # - name: π Cache π§Ύ Dependencies
+ # id: cache-deps
+ # uses: actions/cache@v4
+ # with:
+ # path: .venv
+ # key: pydeps-${{ hashFiles('**/poetry.lock') }}
+
+ ### No poetry-dynamic-versioning here, we're using the POM instead!
+ # - name: π Install Poetry dynamic-versioning π plugin
+ # run: poetry self add "poetry-dynamic-versioning[plugin]"
+
+ ### No dependencies and project installation required, we're just packaging.
+ # # Install dependencies. `--no-root` means "install all dependencies but
+ # # not the project itself", which is what you want to avoid caching _your_
+ # # code. The `if` statement ensures this only runs on a cache miss.
+ # - name: π Install π§Ύ Dependencies
+ # run: scripts/run-poetry.sh install --no-interaction --no-root
+ # if: steps.cache-deps.outputs.cache-hit != 'true'
+
+ # - name: π Install π project
+ # run: scripts/run-poetry.sh install --no-interaction
+
+ - name: ππ· Build π§± project
+ run: scripts/run-poetry.sh build
+ env:
+ IGNORE_DEPS_PYTHON: true # required to build "py2.py3" wheels
+
+ - name: π€ Upload build artifacts
+ uses: actions/upload-artifact@v4
+ with:
+ name: release-dists
+ path: dist/
+
+
+ publish-to-pypi:
+
+ name: ππ publish to πͺ PyPI
+
+ runs-on: ubuntu-latest
+
+ needs:
+ - build-via-poetry
+
+ permissions:
+ id-token: write
+
+ environment:
+ name: release
+
+ steps:
+ - name: π₯ Retrieve release π¦ distributions
+ uses: actions/download-artifact@v4
+ with:
+ name: release-dists
+ path: dist/
+
+ - name: π₯ Publish release distributions to πͺ PyPI
+ uses: pypa/gh-action-pypi-publish@release/v1
+ with:
+ packages-dir: dist/
+
+
+ trigger-foreign-workflows:
+
+ name: π Dispatch foreign workflows
+
+ strategy:
+ matrix:
+ repo: ['imcf/imcf.github.io']
+
+ runs-on: ubuntu-latest
+
+ needs:
+ - publish-to-pypi
+
+ steps:
+ - name: πΉ Fire event on `${{ matrix.repo }}`
+ run: |
+ curl -L \
+ -X POST \
+ -H "Accept: application/vnd.github+json" \
+ -H "Authorization: Bearer ${{ secrets.DISPATCH_DEPLOY_PAGES }}" \
+ -H "X-GitHub-Api-Version: 2022-11-28" \
+ https://api.github.com/repos/${{ matrix.repo }}/dispatches \
+ -d '{"event_type":"dispatch-event"}'
diff --git a/.github/workflows/dispatch-deploy-pages.yml b/.github/workflows/dispatch-deploy-pages.yml
new file mode 100644
index 00000000..88feb100
--- /dev/null
+++ b/.github/workflows/dispatch-deploy-pages.yml
@@ -0,0 +1,25 @@
+name: π Dispatch foreign workflows
+
+on:
+ workflow_dispatch:
+
+jobs:
+
+ trigger-event:
+
+ strategy:
+ matrix:
+ repo: ['imcf/imcf.github.io']
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: πΉ Fire event on `${{ matrix.repo }}`
+ run: |
+ curl -L \
+ -X POST \
+ -H "Accept: application/vnd.github+json" \
+ -H "Authorization: Bearer ${{ secrets.DISPATCH_DEPLOY_PAGES }}" \
+ -H "X-GitHub-Api-Version: 2022-11-28" \
+ https://api.github.com/repos/${{ matrix.repo }}/dispatches \
+ -d '{"event_type":"dispatch-event"}'
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
new file mode 100644
index 00000000..6a7fd051
--- /dev/null
+++ b/.github/workflows/lint.yml
@@ -0,0 +1,23 @@
+name: π Code Linting β‘
+
+on:
+ push:
+ pull_request:
+ workflow_dispatch:
+
+jobs:
+
+ lint:
+ name: Ruff β‘π΅
+
+ runs-on: ubuntu-latest
+
+ steps:
+
+ - name: π₯ Checkout repo
+ uses: actions/checkout@v4
+
+ - name: Run Ruff checks β‘
+ uses: astral-sh/ruff-action@v3
+ with:
+ args: check
diff --git a/.github/workflows/pytest-poetry.yml b/.github/workflows/pytest-poetry.yml
new file mode 100644
index 00000000..a53043f8
--- /dev/null
+++ b/.github/workflows/pytest-poetry.yml
@@ -0,0 +1,107 @@
+## action file inspired by https://jacobian.org/til/github-actions-poetry/
+
+name: π§ͺ pytest (via π Poetry)
+
+on:
+ push:
+ branches:
+ - master
+ tags:
+ - run-pytest*
+ - py3-pytest*
+ - "*-[0-9]+.*"
+ pull_request:
+ branches:
+ - master
+ - devel
+
+jobs:
+ pytest-poetry:
+ # runs-on: ubuntu-latest
+ runs-on: ubuntu-22.04
+
+ steps:
+
+ - uses: actions/checkout@v4
+ name: π₯ Checkout repo
+
+ - name: π Cache π¦ APT Packages
+ uses: awalsh128/cache-apt-pkgs-action@v1.4.3
+ with:
+ packages: xmlstarlet
+ version: 1.0
+
+ # If you wanted to use multiple Python versions, you'd have specify a
+ # matrix in the job and reference the matrix python version here.
+ - name: π Set up Python
+ uses: actions/setup-python@v5.4.0
+ with:
+ python-version: "3.10"
+
+ # Cache the installation of Poetry itself, e.g. the next step. This
+ # prevents the workflow from installing Poetry every time, which can be
+ # slow. Note the use of the Poetry version number in the cache key, and
+ # the "-0" suffix: this allows you to invalidate the cache manually
+ # if/when you want to upgrade Poetry, or if something goes wrong (could be
+ # done mildly cleaner by using an environment variable).
+ - name: π Cache π Poetry install
+ uses: actions/cache@v4
+ with:
+ path: ~/.local
+ key: poetry-1.8.2-0
+
+ # Install Poetry. You could do this manually, or there are several actions
+ # that do this. `snok/install-poetry` seems to be minimal yet complete,
+ # and really just calls out to Poetry's default install script, which
+ # feels correct. I pin the Poetry version here because Poetry does
+ # occasionally change APIs between versions and I don't want my actions to
+ # break if it does.
+ #
+ # The key configuration value here is `virtualenvs-in-project: true`: this
+ # creates the venv as a `.venv` in your testing directory, which allows
+ # the next step to easily cache it.
+ - name: π©π§ Install π Poetry
+ uses: snok/install-poetry@v1
+ with:
+ version: 1.8.2
+ virtualenvs-create: true
+ virtualenvs-in-project: true
+
+ # Cache your dependencies (i.e. all the stuff in your `pyproject.toml`).
+ # Note the cache key: if you're using multiple Python versions, or
+ # multiple OSes, you'd need to include them in the cache key. I'm not, so
+ # it can be simple and just depend on the poetry.lock.
+ - name: π Cache π§Ύ Dependencies
+ id: cache-deps
+ uses: actions/cache@v4
+ with:
+ path: .venv
+ key: pydeps-${{ hashFiles('**/poetry.lock') }}
+
+ ### No poetry-dynamic-versioning here, we're using the POM instead!
+ # - name: π Install Poetry dynamic-versioning π plugin
+ # run: poetry self add "poetry-dynamic-versioning[plugin]"
+
+ # Install dependencies. `--no-root` means "install all dependencies but
+ # not the project itself", which is what you want to avoid caching _your_
+ # code. The `if` statement ensures this only runs on a cache miss.
+ - name: π Install π§Ύ Dependencies
+ run: scripts/run-poetry.sh install --no-interaction --no-root --verbose
+ if: steps.cache-deps.outputs.cache-hit != 'true'
+
+ # Now install _your_ project. This isn't necessary for many types of
+ # projects -- particularly things like Django apps don't need this. But
+ # it's a good idea since it fully-exercises the pyproject.toml and makes
+ # that if you add things like console-scripts at some point that they'll
+ # be installed and working.
+ - name: π Install π project
+ run: scripts/run-poetry.sh install --no-interaction --verbose
+
+ # And finally run the tests.
+ - name: π§ͺπ Run Tests
+ run: scripts/run-poetry.sh run pytest --color=yes --cov --cov-report=xml -vv
+
+ - name: π€ Upload π coverage reports to β Codecov
+ uses: codecov/codecov-action@v5
+ with:
+ token: ${{ secrets.CODECOV_TOKEN }}
\ No newline at end of file
diff --git a/.github/workflows/pytest-python2.yml b/.github/workflows/pytest-python2.yml
new file mode 100644
index 00000000..91fa23ba
--- /dev/null
+++ b/.github/workflows/pytest-python2.yml
@@ -0,0 +1,73 @@
+name: π§ͺ pytest (using π Python2)
+
+on:
+ push:
+ branches:
+ - master
+ tags:
+ - run-pytest*
+ - py2-pytest*
+ - "*-[0-9]+.*"
+ pull_request:
+ branches:
+ - master
+ - devel
+
+env:
+ PY_VERSION: 2.7.18
+
+jobs:
+ pytest-python2:
+ runs-on: ubuntu-22.04
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: π Cache π¦ APT Packages
+ uses: awalsh128/cache-apt-pkgs-action@v1.4.3
+ with:
+ packages:
+ xmlstarlet
+ # python3-tomli # required only for 'parse-python-deps.py'
+ version: 1.0
+
+ - name: π Cache pyenv installation
+ id: cache-pyenv
+ uses: actions/cache@v4
+ with:
+ path: /opt/hostedtoolcache/pyenv_root
+ key: "pyenv-${{ env.PY_VERSION }}-3"
+
+ - name: ππ§° Set up pyenv
+ id: setup-pyenv
+ uses: "gabrielfalcao/pyenv-action@v18"
+ with:
+ default: "${{ env.PY_VERSION }}"
+ # Store the `PYENV_ROOT` var *inside* the cached directory so it can be
+ # retrieved consistently (independent of pyenv being freshly installed
+ # or coming from the cache).
+ # NOTE: In case pyenv was extracted from the cache, it WILL NOT BE ADDED
+ # to the PATH environment variable! Any follow-up task has to take
+ # this into account and **ACTIVELY** use the PYENV_ROOT variable!
+ command: |
+ echo $PYENV_ROOT > /opt/hostedtoolcache/pyenv_root/.pyenv_root
+ if: steps.cache-pyenv.outputs.cache-hit != 'true'
+
+ - name: π΅ Identify PYENV_ROOT
+ id: pyenvroot
+ run: |
+ cat /opt/hostedtoolcache/pyenv_root/.pyenv_root
+ echo "PYENV_ROOT=$(cat /opt/hostedtoolcache/pyenv_root/.pyenv_root)" >> $GITHUB_ENV
+
+ - name: ππ Cache Python2 virtualenv
+ id: cache-py2-venv
+ uses: actions/cache@v4
+ with:
+ path: venv.py2
+ key: "venv-py-${{ env.PY_VERSION }}--${{ hashFiles('pyproject.toml') }}-2"
+
+ - name: π§ͺβ Run pytest-wrapper
+ run: scripts/py2-pytest.sh --cov -vv
+ env:
+ VENV_PATH: venv.py2
+ PY_VERSION: ${{ env.PY_VERSION }}
+
diff --git a/.pylintrc b/.pylintrc
index a02e3e5b..75b44175 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -2,5 +2,8 @@
# Good variable names which should always be accepted, separated by a comma
good-names=i,j,k,ex,Run,_,x,y,z,rm,rt
-# don't mess with black autoformatting, just let it do its job:
-disable=bad-continuation
\ No newline at end of file
+disable=too-many-lines
+
+# This repo is all about Python 2.7 code, so disable a few messages:
+disable=consider-using-f-string
+disable=useless-object-inheritance
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index bf1c0100..7503eb67 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,198 @@
+
+
+
# Changelog π§Ύ
-
+## 1.5.0
+
+This release brings a lot of additions, not all changes and functions are
+explained in depth below. For detailed information, please refer to the [updated
+API documentation][apidocs].
+
+[apidocs]: https://imcf.one/apidocs/imcflibs/imcflibs.html
+
+### Added
+
+#### New functions in `imcflibs.imagej.misc`
+
+* `imcflibs.imagej.misc.send_notification_email` to send email notifications
+ upon completion of long-running scripts.
+ * Sends a mail with job details, such as recipient, file name, execution time
+ & an optional message.
+ * To enable email notifications, the following preferences must be set in
+ `~/.imagej/IJ_Prefs.txt`:
+ * `.imcf.sender_email`: sender's email address.
+ * `.imcf.smtpserver`: the SMTP server used for sending emails.
+ * If the sender email or SMTP server is not configured, the method logs a
+ message and returns.
+* `imcflibs.imagej.misc.sanitize_image_title` to remove special chars and
+ various suffixes from an ImagePlus' window title.
+* `imcflibs.imagej.misc.subtract_images` to subtract an image from another.
+* `imcflibs.imagej.misc.close_images` for closing all ImagePluses from a list.
+* `imcflibs.imagej.misc.get_threshold_value_from_method` to get the value that a
+ selected *AutoThreshold* method would be using.
+* `imcflibs.imagej.misc.write_ordereddict_to_csv` to write data from an ordered
+ dictionary (or list of ordered dictionaries) to a CSV file.
+* `imcflibs.imagej.misc.save_image_in_format` to save an ImagePlus in a
+ specified format, such as `ImageJ-TIF` or `OME-TIFF` etc.
+* `imcflibs.imagej.misc.run_imarisconvert` to convert a given file to Imaris
+ format using the utility *ImarisConvert*. The function uses
+ `imcflibs.imagej.misc.locate_latest_imaris` to find the path to the most
+ recent Imaris installation.
+
+#### New functions in `imcflibs.imagej.labelimage`
+
+* `imcflibs.imagej.labelimage.cookie_cut_labels` to use a label image as a mask
+ for another label image. Objects might get split or merged depending on the
+ mask.
+* `imcflibs.imagej.labelimage.binary_to_label` for segmenting a binary image to
+ get a label image (2D/3D).
+* `imcflibs.imagej.labelimage.relate_label_images` to relate two label images
+ (2D/3D) using the *3D Association* plugin from the 3DImageJSuite.
+* `imcflibs.imagej.labelimage.dilate_labels_2d` to dilate a label image slice by
+ slice. Works for 2D or 3D images.
+
+#### New submodule `imcflibs.imagej.objects3d`
+
+* `imcflibs.imagej.objects3d.population3d_to_imgplus` to turn an
+ *Objects3DPopulation* into an ImagePlus (2D/3D).
+* `imcflibs.imagej.objects3d.imgplus_to_population3d` to get the
+ *Objects3DPopulation* from an ImagePlus (2D/3D).
+* `imcflibs.imagej.objects3d.segment_3d_image` to threshold an image into a
+ labeled stack.
+* `imcflibs.imagej.objects3d.get_objects_within_intensity` to filter a
+ population of 3D objects by intensity.
+* `imcflibs.imagej.objects3d.maxima_finder_3d` to find local maxima in a 3D
+ image.
+* `imcflibs.imagej.objects3d.seeded_watershed` to perform a seeded watershed
+ segmentation on a binary image using seeds points.
+
+#### New submodule `imcflibs.imagej.bdv`
+
+Providing *BigDataViewer* related functionality.
+
+* Option configuration classes:
+ * `imcflibs.imagej.bdv.ProcessingOptions` to configure the options on how the
+ dataset should be processed
+ * `imcflibs.imagej.bdv.DefinitionOptions` to hold the options on how a dataset
+ is defined.
+* `imcflibs.imagej.bdv.check_processing_input` to sanitize and clarify the
+ `acitt` input selection.
+* `imcflibs.imagej.bdv.get_processing_settings` to generate the strings needed
+ for the processing.
+* `imcflibs.imagej.bdv.backup_xml_files` to create a backup of BDV-XML files.
+* `imcflibs.imagej.bdv.define_dataset_auto` to run "*Define Multi-View Dataset*"
+ using the "*Auto-Loader*" option.
+* `imcflibs.imagej.bdv.define_dataset_manual` to run "*Define Multi-View
+ Dataset*" using the "*Manual Loader*" option.
+* `imcflibs.imagej.bdv.resave_as_h5` to resave the dataset in H5 to make it
+ compatible with BigDataViewer/BigStitcher.
+* `imcflibs.imagej.bdv.flip_axes` to call BigStitcher's "*Flip Axes*" command.
+* `imcflibs.imagej.bdv.phase_correlation_pairwise_shifts_calculation` to
+ calculate pairwise shifts using Phase Correlation.
+* `imcflibs.imagej.bdv.filter_pairwise_shifts` for filtering pairwise shifts
+ based on different thresholds.
+* `imcflibs.imagej.bdv.optimize_and_apply_shifts` to optimize shifts and apply
+ them to a dataset.
+* `imcflibs.imagej.bdv.detect_interest_points` for running the "*Detect Interest
+ Points*" command for registration.
+* `imcflibs.imagej.bdv.interest_points_registration` to run the "*Register
+ Dataset based on Interest Points*" command.
+* `imcflibs.imagej.bdv.duplicate_transformations` for duplicating / propagating
+ transformation parameters to other channels.
+* `imcflibs.imagej.bdv.fuse_dataset` to call BigStitcher's "*Fuse Multi-View
+ Dataset*" command.
+
+#### New submodule `imcflibs.imagej.trackmate`
+
+Providing helper functions to interface with *Trackmate*.
+
+* Multiple functions to set up Trackmate settings with different detectors:
+ * `imcflibs.imagej.trackmate.cellpose_detector`
+ * `imcflibs.imagej.trackmate.stardist_detector`
+ * `imcflibs.imagej.trackmate.log_detector`
+* `imcflibs.imagej.trackmate.spot_filtering` to create settings to filter
+ detected spots based on optional thresholds for quality, area, circularity &
+ intensity.
+* `imcflibs.imagej.trackmate.sparse_lap_tracker` to create default settings for
+ the sparse LAP tracker.
+* `imcflibs.imagej.trackmate.track_filtering` to create settings to filter
+ detected tracks based upon optional distances, such as `maximum linking`,
+ `gap closing`, `track splitting & merging` and `maximum frame gap`.
+* `imcflibs.imagej.trackmate.run_trackmate` to run Trackmate with given settings
+ (which can be set up with the methods in the `imcflibs.imagej.trackmate`
+ submodule) to create a label image.
+
+#### New submodule `imcflibs.imagej.omerotools`
+
+Providing helper functions to connect to *OMERO* using user credentials, fetch
+and upload images, retrieve datasets or save ROIs to OMERO.
+
+* `imcflibs.imagej.omerotools.parse_url` to parse the OMERO URL and get a list
+ of `ImageWrappers` from multiple image or datasets IDs.
+* `imcflibs.imagej.omerotools.connect` to connect to OMERO using user
+ credentials.
+* `imcflibs.imagej.omerotools.fetch_image` to fetch an image from OMERO using
+ the image ID.
+* `imcflibs.imagej.omerotools.upload_image_to_omero` to upload a local image to
+ OMERO and returning the new image ID.
+* `imcflibs.imagej.omerotools.add_keyvalue_annotation` to add an annotation to
+ an OMERO object.
+* `imcflibs.imagej.omerotools.delete_keyvalue_annotations` to delete key/value
+ annotations from an OMERO object.
+* `imcflibs.imagej.omerotools.find_dataset` to find a dataset in OMERO using the
+ dataset ID.
+* `imcflibs.imagej.omerotools.get_acquisition_metadata` to get the acquisition
+ metadata from an image in OMERO.
+* `imcflibs.imagej.omerotools.get_info_from_original_metadata` to get the
+ original metadata from an image in OMERO.
+* `imcflibs.imagej.omerotools.create_table_columns` to create OMERO table
+ headings from a list of column names.
+* `imcflibs.imagej.omerotools.upload_array_as_omero_table` to upload a table to
+ OMERO.
+* `imcflibs.imagej.omerotools.save_rois_to_omero` to save ROIs to OMERO.
+
+#### New submodule `imcflibs.imagej.shading`
+
+* `imcflibs.imagej.shading.simple_flatfield_correction` to perform a simple
+ flatfield correction to an ImagePlus.
+
+#### New submodule `imcflibs.imagej.processing`
+
+Utilities for filtering and thresholding.
+
+* `imcflibs.imagej.processing.apply_filter` to apply a filter to an ImagePlus.
+* `imcflibs.imagej.processing.apply_rollingball_bg_subtraction` to apply a
+ rolling ball background subtraction to an ImagePlus.
+* `imcflibs.imagej.processing.apply_threshold` to apply a threshold method to an
+ ImagePlus.
+
+#### New functions in `imcflibs.imagej.bioformats`
+
+* `imcflibs.imagej.bioformats.export` to export an image to a given file.
+* `imcflibs.imagej.bioformats.get_metadata_from_file` to extract various
+ metadata from a given file using BioFormats.
+* `imcflibs.imagej.bioformats.get_stage_coords`to get stage coordinates and
+ calibration for one or more given images.
+
+#### Other new functions
+
+* `imcflibs.strtools.pad_number` to pad a number with leading zeros.
+* `imcflibs.pathtools.create_directory` to create a new directory at the
+ specified path if it does not exist (needed for Python 2.7).
+* `imcflibs.imagej.prefs.set_default_ij_options` to configure ImageJ default
+ options to ensure consistent behavior.
+* `imcflibs.imagej.projections.project_stack` to project a stack along a defined
+ axis using one of the available projection methods, such as `max`, `min`,
+ `mean`, `sum` or `standard_deviation`.
+
+### Changed
+
+* `imcflibs.pathtools.listdir_matching` now has an additional optional boolean
+ parameter `regex` to request the parameter `suffix` being interpreted as a
+ regular expression for filtering.
+* `imcflibs.imagej.misc.calculate_mean_and_stdv` now has an optional parameter
+ `round_decimals` to allow for rounding the results, defaulting to `0`.
## 1.4.0
diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md
index 9678b746..1d2f6f45 100644
--- a/DEVELOPMENT.md
+++ b/DEVELOPMENT.md
@@ -6,7 +6,7 @@ To create a new release, clone the [scijava-scripts][gh_scijava-scripts] repo
(e.g. in `/opt/imagej/`) and run the `release-version.sh` helper:
```bash
-BASE_DIR=/opt/imagej
+BASE_DIR=/opt
mkdir -pv "$BASE_DIR"
cd "$BASE_DIR"
git clone https://github.com/scijava/scijava-scripts
@@ -14,23 +14,36 @@ cd -
RELEASE_SCRIPT="$BASE_DIR/scijava-scripts/release-version.sh"
-$RELEASE_SCRIPT --skip-push --skip-gpg --skip-license-update
+############## ONLY FOR PRE-RELEASES ##############
+PRE_RELEASE="1.5.0.a17" # <-- adjust this to the desired version
+EXTRA_FLAGS="--skip-branch-check --skip-version-check $PRE_RELEASE"
+############## ONLY FOR PRE-RELEASES ##############
+
+$RELEASE_SCRIPT --skip-push --skip-gpg --skip-license-update $EXTRA_FLAGS
```
-**IMPORTANT**: after the release has been built, the corresponding tag needs to
-be pushed to github, e.g. like this:
+**IMPORTANT 1**: after the release has been built, the corresponding tag needs
+to be pushed to github, e.g. like this:
```bash
RELEASE_TAG=$(git tag -l "python-imcflibs-*" | tail -n 1)
git push origin $RELEASE_TAG
```
+**IMPORTANT 2**: in case a **pre-releaes** was created, the last commit needs to
+be discarded as the _release-script_ places a wrong version / snapshot
+combination in the `pom.xml`:
+
+```bash
+git reset --hard HEAD~1
+```
+
## Build & Deploy with Maven using VS Code
Building and deploying the package can be greatly simplified using "tasks" in
[Visual Studio Code][www_vscode]. By adding the following settings to the
`.vscode/tasks.json` file, you can simply press `Ctrl+Shift+B` in VS Code and
-select the *deploy* task for running Maven and have the resulting JAR file being
+select the _deploy_ task for running Maven and have the resulting JAR file being
placed in `/opt/fiji-packaging/Fiji.app/jars/` (adjust to your path as
necessary):
@@ -67,7 +80,7 @@ necessary):
## Linting Python 2.7 with VS Code
For being able to lint the old Python code properly, you'll need to set up an
-appropriate *virtualenv* with `pylint` being installed.
+appropriate _virtualenv_ with `pylint` being installed.
Using [`fish`][www_fish] and [virtualfish][www_vf], this can be done as follows:
diff --git a/README.md b/README.md
index d38bf9fa..db18ef35 100644
--- a/README.md
+++ b/README.md
@@ -1,8 +1,9 @@
# IMCFlibs π β π© π§ πͺ
-[](https://github.com/imcf/python-imcflibs/actions/workflows/build.yml)
-[](https://zenodo.org/badge/latestdoi/156891364)
+[][build]
+[](https://github.com/imcf/python-imcflibs/actions/workflows/lint.yml)
+[][doi]
This package contains a diverse collection of Python functions dealing with
paths, I/O (file handles, ...), strings etc. and tons of [Fiji][fiji] /
@@ -13,13 +14,17 @@ Initially this has been a multi-purpose package where a substantial part had
been useful in **CPython** as well. However, since the latest Jython
release is still based on Python 2.7 (see the [Jython 3 roadmap][jython3] for
more info), *imcflibs* is now basically limited to the **Fiji / ImageJ2
-ecosystem** (which is also the reason why no `pip install`able package is
-provided).
+ecosystem**.
Releases are made through Maven and published to the [SciJava Maven
repository][sj_maven]. The easiest way to use the lib is by adding the **`IMCF
Uni Basel`** [update site][imcf_updsite] to your ImageJ installation.
+The [`pip install`able package][pypi] is probably only useful for two cases:
+running `pytest` (where applicable) and rendering [HTML-based API docs][apidocs]
+using [`pdoc`][pdoc]. Let us know in case you're having another use case πͺ for
+it.
+
Developed and provided by the [Imaging Core Facility (IMCF)][imcf] of the
Biozentrum, University of Basel, Switzerland.
@@ -55,3 +60,8 @@ correct_and_project(raw_image, out_path, model, "Maximum", ".ics")
[imcf_updsite]: https://imagej.net/list-of-update-sites/
[script_split]: https://github.com/imcf/imcf-fiji-scripts/blob/master/src/main/resources/scripts/Plugins/IMCF_Utilities/Convert/Split_TIFFs_By_Channels_And_Slices.py
[script_fvstitch]: https://github.com/imcf/imcf-fiji-scripts/blob/master/src/main/resources/scripts/Plugins/IMCF_Utilities/Stitching_Registration/FluoView_OIF_OIB_OIR_Simple_Stitcher.py
+[doi]: https://zenodo.org/badge/latestdoi/156891364
+[build]: https://github.com/imcf/python-imcflibs/actions/workflows/build.yml
+[apidocs]: https://imcf.one/apidocs/imcflibs/imcflibs.html
+[pdoc]: https://pdoc.dev/
+[pypi]: https://pypi.org/project/imcflibs/
diff --git a/TESTING.md b/TESTING.md
index b515e21d..356d6462 100644
--- a/TESTING.md
+++ b/TESTING.md
@@ -1,41 +1,105 @@
-# Testing π§ͺ in Fiji / ImageJ2
+# Testing π§ͺπ§« in Fiji / ImageJ2
+
+## Using `pytest` ππ¬ and Python 3 for plain Python code
+
+Those parts of the package that do not interact / depend on ImageJ objects can
+be tested via [`pytest`][pytest] up to a certain level, some (most?) of them
+should even work in a Python 3 environment.
+
+To perform those tests, the packges otherwise provided by ImageJ need to be
+mocked using the `imcf-fiji-mocks` package. For seting up a _venv_ use the steps
+described here:
+
+```bash
+# check if we're "inside" the repo already, otherwise clone it here:
+git remote -v 2>/dev/null | grep -q imcf/python-imcflibs || {
+ git clone https://github.com/imcf/python-imcflibs/
+ cd python-imcflibs
+ git checkout -b processing-options-class origin/processing-options-class
+}
+# create and activate a new venv:
+test -d "venv" || python3 -m venv venv
+source venv/bin/activate
+
+# install dependencies / requirements:
+MOCKS_REL="0.2.0"
+URL_PFX="https://github.com/imcf/imcf-fiji-mocks/releases/download/v$MOCKS_REL"
+pip install --upgrade \
+ $URL_PFX/imcf_fiji_mocks-${MOCKS_REL}-py2.py3-none-any.whl \
+ $URL_PFX/micrometa-15.2.2-py2.py3-none-any.whl \
+ $URL_PFX/sjlogging-0.5.2-py2.py3-none-any.whl \
+ olefile \
+ pytest \
+ pytest-cov \
+ pip
+
+# now install the 'imcflibs' package in editable mode:
+pip install -e .
+```
+
+Using this _venv_, tests can be triggered just the usual way. To run only
+specific tests, use e.g.
+
+```bash
+pytest tests/bdv/test_processingoptions.py
+```
+
+## Using `pytest` ππ¬ and Python 2 for plain Python code
+
+For running [`pytest`][pytest] in a C-Python 2 environment, things are slightly
+more complicated than the approach described for Python 3 above as `pip` for
+Python 2 cannot install a project in _editable_ mode unless it has a `setup.py`
+file (which we don't have and don't want).
+
+Therefore, a wheel needs to be built (e.g. using [`poetry`][poetry]) and
+installed (every time) into the corresponding virtualenv when performing the
+tests. Assuming you're having a working _poetry_ setup on your machine, you can
+simply use the provided `scripts/py2-pytest.sh` wrapper that will create the
+virtualenv, build and install the `imcflibs` wheel and launch `pytest` with the
+parameters specified, e.g.
+
+```bash
+bash scripts/py2-pytest.sh -rv --cov --cov-report html
+```
+
+## Common (interactive) testing with ImageJ2 / Fiji
Unfortunately there is nothing like `pytest` available for the parts that are
running exclusively in a ImageJ2 / Fiji context. So in order to provide at least
some basic, semi-interactive tests the following conventions are being used:
-* Each ***function*** in any of the `imcflibs.imagej` submodules should have its
- own directory underneath `/tests/imagej/`, using their fully qualified name
- as the path (only skipping the `imcflibs.` prefix). For example test scripts
- for `imcflibs.imagej.bioformats.import_image()` will be placed in the
- directory `/tests/imagej/bioformats/import_image/`.
+* Each _**function**_ in any of the `imcflibs.imagej` submodules should have its
+ own directory underneath `/tests/interactive-imagej/`, using their fully
+ qualified name as the path (only skipping the `imcflibs.` prefix). For example
+ test scripts for `imcflibs.imagej.bioformats.import_image()` will be placed in
+ the directory `/tests/interactive-imagej/bioformats/import_image/`.
* The scripts inside those directories are intended to be run interactively /
manually in a (freshly started) Fiji instance. Yes, really. Any other
suggestions are highly welcome!
-* To facilitate this, a collection of *test images* (and possibly other input
- data) should be cloned to the local file system. Currently this `sample_data`
- repository is *NOT* publicly available due to legal β uncertainties. A repo
+* To facilitate this, a collection of _test images_ (and possibly other input
+ data) should be cloned to the local file system. Currently this `sample-data`
+ repository is _NOT_ publicly available due to legal β uncertainties. A repo
containing test data π that can be published should be assembled over time
though!
-* Any *interactive* test script should start with a header similar to the one
- described below. Paths to input data *inside* the test scripts **has** to be
- relative to the location of the `sample_data` repository mentioned above. This
+* Any _interactive_ test script should start with a header similar to the one
+ described below. Paths to input data _inside_ the test scripts **has** to be
+ relative to the location of the `sample-data` repository mentioned above. This
will allow for a fairly okayish testing workflow like this:
* Make your changes in VS Code, then trigger a build by pressing `Shift` +
- `Ctrl` + `B`. If things are configured as described in the *DEVELOPMENT*
+ `Ctrl` + `B`. If things are configured as described in the _DEVELOPMENT_
document, the resulting `.jar` file will be automatically placed in Fiji's
`jars/` folder.
* Next, start a fresh instance of the Fiji that received the newly built JAR.
* After Fiji has started, simply drag and drop the desired test script onto
- the main window. This will open the *Script Editor*, then press `Ctrl` + `R`
+ the main window. This will open the _Script Editor_, then press `Ctrl` + `R`
to launch the script.
* Only on the first run on the machine being used you will have to select the
- base location of the `sample_data` repository.
- * All subsequent runs of ***any*** test script using the defined *Script
- Parameter* `IMCF_TESTDATA` will remember this selection, so it will be
+ base location of the `sample-data` repository.
+ * All subsequent runs of _**any**_ test script using the defined _Script
+ Parameter_ `IMCF_TESTDATA` will remember this selection, so it will be
sufficient to just confirm the dialog by pressing `Enter`.
-## Quick Workflow Summary
+### Quick Workflow Summary
First, make sure to have the test data π¬πaround (or some mocks πͺ¨πͺ΅), then:
@@ -48,7 +112,7 @@ First, make sure to have the test data π¬πaround (or some mocks πͺ¨πͺ΅),
1. Inspect the output ππ
1. Repeat π
-## Test Script Template π
+### Test Script Template π
As described above, each test script should use the `IMCF_TESTDATA` parameter to
facilitate the manual testing approach. Simply use this template header for
@@ -76,3 +140,6 @@ from imcflibs.pathtools import parse_path
components = parse_path("systems/lsm700/beads/10x_phmax.czi", IMCF_TESTDATA)
assert os.path.exists(components["full"])
```
+
+[pytest]: https://pytest.org
+[poetry]: https://python-poetry.org
diff --git a/conftest.py b/conftest.py
new file mode 100644
index 00000000..8197bee9
--- /dev/null
+++ b/conftest.py
@@ -0,0 +1,3 @@
+"""Pytest configuration."""
+
+collect_ignore = ["tests/interactive-imagej"]
diff --git a/poetry.lock b/poetry.lock
new file mode 100644
index 00000000..833588fd
--- /dev/null
+++ b/poetry.lock
@@ -0,0 +1,274 @@
+# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand.
+
+[[package]]
+name = "colorama"
+version = "0.4.6"
+description = "Cross-platform colored terminal text."
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+files = [
+ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
+ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
+]
+
+[[package]]
+name = "coverage"
+version = "7.6.12"
+description = "Code coverage measurement for Python"
+optional = false
+python-versions = ">=3.9"
+files = [
+ {file = "coverage-7.6.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:704c8c8c6ce6569286ae9622e534b4f5b9759b6f2cd643f1c1a61f666d534fe8"},
+ {file = "coverage-7.6.12-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ad7525bf0241e5502168ae9c643a2f6c219fa0a283001cee4cf23a9b7da75879"},
+ {file = "coverage-7.6.12-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06097c7abfa611c91edb9e6920264e5be1d6ceb374efb4986f38b09eed4cb2fe"},
+ {file = "coverage-7.6.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:220fa6c0ad7d9caef57f2c8771918324563ef0d8272c94974717c3909664e674"},
+ {file = "coverage-7.6.12-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3688b99604a24492bcfe1c106278c45586eb819bf66a654d8a9a1433022fb2eb"},
+ {file = "coverage-7.6.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d1a987778b9c71da2fc8948e6f2656da6ef68f59298b7e9786849634c35d2c3c"},
+ {file = "coverage-7.6.12-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:cec6b9ce3bd2b7853d4a4563801292bfee40b030c05a3d29555fd2a8ee9bd68c"},
+ {file = "coverage-7.6.12-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ace9048de91293e467b44bce0f0381345078389814ff6e18dbac8fdbf896360e"},
+ {file = "coverage-7.6.12-cp310-cp310-win32.whl", hash = "sha256:ea31689f05043d520113e0552f039603c4dd71fa4c287b64cb3606140c66f425"},
+ {file = "coverage-7.6.12-cp310-cp310-win_amd64.whl", hash = "sha256:676f92141e3c5492d2a1596d52287d0d963df21bf5e55c8b03075a60e1ddf8aa"},
+ {file = "coverage-7.6.12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e18aafdfb3e9ec0d261c942d35bd7c28d031c5855dadb491d2723ba54f4c3015"},
+ {file = "coverage-7.6.12-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:66fe626fd7aa5982cdebad23e49e78ef7dbb3e3c2a5960a2b53632f1f703ea45"},
+ {file = "coverage-7.6.12-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ef01d70198431719af0b1f5dcbefc557d44a190e749004042927b2a3fed0702"},
+ {file = "coverage-7.6.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e92ae5a289a4bc4c0aae710c0948d3c7892e20fd3588224ebe242039573bf0"},
+ {file = "coverage-7.6.12-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e695df2c58ce526eeab11a2e915448d3eb76f75dffe338ea613c1201b33bab2f"},
+ {file = "coverage-7.6.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d74c08e9aaef995f8c4ef6d202dbd219c318450fe2a76da624f2ebb9c8ec5d9f"},
+ {file = "coverage-7.6.12-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e995b3b76ccedc27fe4f477b349b7d64597e53a43fc2961db9d3fbace085d69d"},
+ {file = "coverage-7.6.12-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b1f097878d74fe51e1ddd1be62d8e3682748875b461232cf4b52ddc6e6db0bba"},
+ {file = "coverage-7.6.12-cp311-cp311-win32.whl", hash = "sha256:1f7ffa05da41754e20512202c866d0ebfc440bba3b0ed15133070e20bf5aeb5f"},
+ {file = "coverage-7.6.12-cp311-cp311-win_amd64.whl", hash = "sha256:e216c5c45f89ef8971373fd1c5d8d1164b81f7f5f06bbf23c37e7908d19e8558"},
+ {file = "coverage-7.6.12-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b172f8e030e8ef247b3104902cc671e20df80163b60a203653150d2fc204d1ad"},
+ {file = "coverage-7.6.12-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:641dfe0ab73deb7069fb972d4d9725bf11c239c309ce694dd50b1473c0f641c3"},
+ {file = "coverage-7.6.12-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e549f54ac5f301e8e04c569dfdb907f7be71b06b88b5063ce9d6953d2d58574"},
+ {file = "coverage-7.6.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:959244a17184515f8c52dcb65fb662808767c0bd233c1d8a166e7cf74c9ea985"},
+ {file = "coverage-7.6.12-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bda1c5f347550c359f841d6614fb8ca42ae5cb0b74d39f8a1e204815ebe25750"},
+ {file = "coverage-7.6.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1ceeb90c3eda1f2d8c4c578c14167dbd8c674ecd7d38e45647543f19839dd6ea"},
+ {file = "coverage-7.6.12-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f16f44025c06792e0fb09571ae454bcc7a3ec75eeb3c36b025eccf501b1a4c3"},
+ {file = "coverage-7.6.12-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b076e625396e787448d27a411aefff867db2bffac8ed04e8f7056b07024eed5a"},
+ {file = "coverage-7.6.12-cp312-cp312-win32.whl", hash = "sha256:00b2086892cf06c7c2d74983c9595dc511acca00665480b3ddff749ec4fb2a95"},
+ {file = "coverage-7.6.12-cp312-cp312-win_amd64.whl", hash = "sha256:7ae6eabf519bc7871ce117fb18bf14e0e343eeb96c377667e3e5dd12095e0288"},
+ {file = "coverage-7.6.12-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:488c27b3db0ebee97a830e6b5a3ea930c4a6e2c07f27a5e67e1b3532e76b9ef1"},
+ {file = "coverage-7.6.12-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d1095bbee1851269f79fd8e0c9b5544e4c00c0c24965e66d8cba2eb5bb535fd"},
+ {file = "coverage-7.6.12-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0533adc29adf6a69c1baa88c3d7dbcaadcffa21afbed3ca7a225a440e4744bf9"},
+ {file = "coverage-7.6.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53c56358d470fa507a2b6e67a68fd002364d23c83741dbc4c2e0680d80ca227e"},
+ {file = "coverage-7.6.12-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64cbb1a3027c79ca6310bf101014614f6e6e18c226474606cf725238cf5bc2d4"},
+ {file = "coverage-7.6.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:79cac3390bfa9836bb795be377395f28410811c9066bc4eefd8015258a7578c6"},
+ {file = "coverage-7.6.12-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9b148068e881faa26d878ff63e79650e208e95cf1c22bd3f77c3ca7b1d9821a3"},
+ {file = "coverage-7.6.12-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8bec2ac5da793c2685ce5319ca9bcf4eee683b8a1679051f8e6ec04c4f2fd7dc"},
+ {file = "coverage-7.6.12-cp313-cp313-win32.whl", hash = "sha256:200e10beb6ddd7c3ded322a4186313d5ca9e63e33d8fab4faa67ef46d3460af3"},
+ {file = "coverage-7.6.12-cp313-cp313-win_amd64.whl", hash = "sha256:2b996819ced9f7dbb812c701485d58f261bef08f9b85304d41219b1496b591ef"},
+ {file = "coverage-7.6.12-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:299cf973a7abff87a30609879c10df0b3bfc33d021e1adabc29138a48888841e"},
+ {file = "coverage-7.6.12-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4b467a8c56974bf06e543e69ad803c6865249d7a5ccf6980457ed2bc50312703"},
+ {file = "coverage-7.6.12-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2458f275944db8129f95d91aee32c828a408481ecde3b30af31d552c2ce284a0"},
+ {file = "coverage-7.6.12-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a9d8be07fb0832636a0f72b80d2a652fe665e80e720301fb22b191c3434d924"},
+ {file = "coverage-7.6.12-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14d47376a4f445e9743f6c83291e60adb1b127607a3618e3185bbc8091f0467b"},
+ {file = "coverage-7.6.12-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b95574d06aa9d2bd6e5cc35a5bbe35696342c96760b69dc4287dbd5abd4ad51d"},
+ {file = "coverage-7.6.12-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:ecea0c38c9079570163d663c0433a9af4094a60aafdca491c6a3d248c7432827"},
+ {file = "coverage-7.6.12-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2251fabcfee0a55a8578a9d29cecfee5f2de02f11530e7d5c5a05859aa85aee9"},
+ {file = "coverage-7.6.12-cp313-cp313t-win32.whl", hash = "sha256:eb5507795caabd9b2ae3f1adc95f67b1104971c22c624bb354232d65c4fc90b3"},
+ {file = "coverage-7.6.12-cp313-cp313t-win_amd64.whl", hash = "sha256:f60a297c3987c6c02ffb29effc70eadcbb412fe76947d394a1091a3615948e2f"},
+ {file = "coverage-7.6.12-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e7575ab65ca8399c8c4f9a7d61bbd2d204c8b8e447aab9d355682205c9dd948d"},
+ {file = "coverage-7.6.12-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8161d9fbc7e9fe2326de89cd0abb9f3599bccc1287db0aba285cb68d204ce929"},
+ {file = "coverage-7.6.12-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a1e465f398c713f1b212400b4e79a09829cd42aebd360362cd89c5bdc44eb87"},
+ {file = "coverage-7.6.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f25d8b92a4e31ff1bd873654ec367ae811b3a943583e05432ea29264782dc32c"},
+ {file = "coverage-7.6.12-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a936309a65cc5ca80fa9f20a442ff9e2d06927ec9a4f54bcba9c14c066323f2"},
+ {file = "coverage-7.6.12-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:aa6f302a3a0b5f240ee201297fff0bbfe2fa0d415a94aeb257d8b461032389bd"},
+ {file = "coverage-7.6.12-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f973643ef532d4f9be71dd88cf7588936685fdb576d93a79fe9f65bc337d9d73"},
+ {file = "coverage-7.6.12-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:78f5243bb6b1060aed6213d5107744c19f9571ec76d54c99cc15938eb69e0e86"},
+ {file = "coverage-7.6.12-cp39-cp39-win32.whl", hash = "sha256:69e62c5034291c845fc4df7f8155e8544178b6c774f97a99e2734b05eb5bed31"},
+ {file = "coverage-7.6.12-cp39-cp39-win_amd64.whl", hash = "sha256:b01a840ecc25dce235ae4c1b6a0daefb2a203dba0e6e980637ee9c2f6ee0df57"},
+ {file = "coverage-7.6.12-pp39.pp310-none-any.whl", hash = "sha256:7e39e845c4d764208e7b8f6a21c541ade741e2c41afabdfa1caa28687a3c98cf"},
+ {file = "coverage-7.6.12-py3-none-any.whl", hash = "sha256:eb8668cfbc279a536c633137deeb9435d2962caec279c3f8cf8b91fff6ff8953"},
+ {file = "coverage-7.6.12.tar.gz", hash = "sha256:48cfc4641d95d34766ad41d9573cc0f22a48aa88d22657a1fe01dca0dbae4de2"},
+]
+
+[package.dependencies]
+tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""}
+
+[package.extras]
+toml = ["tomli"]
+
+[[package]]
+name = "exceptiongroup"
+version = "1.2.2"
+description = "Backport of PEP 654 (exception groups)"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"},
+ {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"},
+]
+
+[package.extras]
+test = ["pytest (>=6)"]
+
+[[package]]
+name = "imcf-fiji-mocks"
+version = "0.10.0"
+description = "Mocks collection for Fiji-Python. Zero functional code."
+optional = false
+python-versions = ">=2.7"
+files = [
+ {file = "imcf_fiji_mocks-0.10.0-py2.py3-none-any.whl", hash = "sha256:476927d82fa0e93b0b0b738f82cab60e180cf0da5b3dd09dc6a5336b08e18d2d"},
+ {file = "imcf_fiji_mocks-0.10.0.tar.gz", hash = "sha256:d1f3302031cad5f1d15388bf337025bbfb59037a04e79a102de59093e643a5f5"},
+]
+
+[[package]]
+name = "iniconfig"
+version = "2.0.0"
+description = "brain-dead simple config-ini parsing"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
+ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
+]
+
+[[package]]
+name = "olefile"
+version = "0.46"
+description = "Python package to parse, read and write Microsoft OLE2 files (Structured Storage or Compound Document, Microsoft Office)"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "olefile-0.46.zip", hash = "sha256:133b031eaf8fd2c9399b78b8bc5b8fcbe4c31e85295749bb17a87cba8f3c3964"},
+]
+
+[[package]]
+name = "packaging"
+version = "24.2"
+description = "Core utilities for Python packages"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"},
+ {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"},
+]
+
+[[package]]
+name = "pluggy"
+version = "1.5.0"
+description = "plugin and hook calling mechanisms for python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
+ {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
+]
+
+[package.extras]
+dev = ["pre-commit", "tox"]
+testing = ["pytest", "pytest-benchmark"]
+
+[[package]]
+name = "pytest"
+version = "8.3.4"
+description = "pytest: simple powerful testing with Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6"},
+ {file = "pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "sys_platform == \"win32\""}
+exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
+iniconfig = "*"
+packaging = "*"
+pluggy = ">=1.5,<2"
+tomli = {version = ">=1", markers = "python_version < \"3.11\""}
+
+[package.extras]
+dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
+
+[[package]]
+name = "pytest-cov"
+version = "6.0.0"
+description = "Pytest plugin for measuring coverage."
+optional = false
+python-versions = ">=3.9"
+files = [
+ {file = "pytest-cov-6.0.0.tar.gz", hash = "sha256:fde0b595ca248bb8e2d76f020b465f3b107c9632e6a1d1705f17834c89dcadc0"},
+ {file = "pytest_cov-6.0.0-py3-none-any.whl", hash = "sha256:eee6f1b9e61008bd34975a4d5bab25801eb31898b032dd55addc93e96fcaaa35"},
+]
+
+[package.dependencies]
+coverage = {version = ">=7.5", extras = ["toml"]}
+pytest = ">=4.6"
+
+[package.extras]
+testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"]
+
+[[package]]
+name = "python-micrometa"
+version = "15.2.2"
+description = "Process metadata from various light-microscopy related formats."
+optional = false
+python-versions = ">=2.7"
+files = [
+ {file = "python_micrometa-15.2.2-py2.py3-none-any.whl", hash = "sha256:1667dc19b08897c243356c8fda3670bcb5e8ce934fcea58ba6aa432313709a5c"},
+ {file = "python_micrometa-15.2.2.tar.gz", hash = "sha256:91a58a6d61d565a4c3d3ac639150fb4bd58473b7c6f9b50845f4cd993f5665d5"},
+]
+
+[package.dependencies]
+imcflibs = ">=1.4,<2.0"
+olefile = ">=0.46,<0.47"
+
+[[package]]
+name = "sjlogging"
+version = "0.5.4"
+description = "Jython package for using SciJava's LogService for logging."
+optional = false
+python-versions = ">=2.7"
+files = [
+ {file = "sjlogging-0.5.4-py2.py3-none-any.whl", hash = "sha256:e7db0a34ac2788a0404ac02beee232132f3946330cc04d28a37eea9adb3cfd42"},
+ {file = "sjlogging-0.5.4.tar.gz", hash = "sha256:5fb1a4e6338088bdbf9d943a867bf1bc6f77031ec48dbb7dd96f09abb0aaaa92"},
+]
+
+[[package]]
+name = "tomli"
+version = "2.2.1"
+description = "A lil' TOML parser"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"},
+ {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"},
+ {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"},
+ {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"},
+ {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"},
+ {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"},
+ {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"},
+ {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"},
+ {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"},
+ {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"},
+ {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"},
+ {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"},
+ {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"},
+ {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"},
+ {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"},
+ {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"},
+ {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"},
+ {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"},
+ {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"},
+ {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"},
+ {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"},
+ {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"},
+ {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"},
+ {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"},
+ {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"},
+ {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"},
+ {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"},
+ {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"},
+ {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"},
+ {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"},
+ {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"},
+ {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"},
+]
+
+[metadata]
+lock-version = "2.0"
+python-versions = ">=3.10"
+content-hash = "6b4a1828157bbddc15f61de0427a3d7970a61da1750f2cbf9e160f1b7546d7c9"
diff --git a/poetry.lock.md b/poetry.lock.md
new file mode 100644
index 00000000..2eefdcfa
--- /dev/null
+++ b/poetry.lock.md
@@ -0,0 +1,16 @@
+## Updating `poetry.lock` ππ
+
+Every time dependencies in `pyproject.toml` have been modified (e.g. when
+pulling in a newer version of the [`imcf-fiji-mocks`][1] package), [Poetry's
+lockfile][2] has to be updated (otherwise the build workflow will start to
+fail, complaining about the outdated file).
+
+To do so, it's not sufficient to simply call `poetry lock --no-update` but
+rather the Poetry wrapper script has to be used like this:
+
+```bash
+scripts/run-poetry.sh lock --no-update
+```
+
+[1]: https://pypi.org/project/imcf-fiji-mocks
+[2]: https://python-poetry.org/docs/basic-usage/#committing-your-poetrylock-file-to-version-control
diff --git a/pom.xml b/pom.xml
index a404225a..50e2d94e 100644
--- a/pom.xml
+++ b/pom.xml
@@ -11,7 +11,7 @@
ch.unibas.biozentrum.imcf
python-imcflibs
- 1.4.1-SNAPSHOT
+ 1.5.1.a26-SNAPSHOT
python-imcflibs
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 00000000..31da3a7b
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,62 @@
+[tool.poetry]
+authors = [
+ "Niko Ehrenfeuchter ",
+ "Laurent Guerard ",
+ "Kai Schleicher ",
+ "SΓ©bastien Herbert ",
+ "Rohan Girish ",
+]
+description = "Mostly ImageJ/Fiji-related Python helper functions."
+documentation = "https://imcf.one/apidocs/imcflibs/imcflibs.html"
+license = "GPL-3.0-or-later"
+name = "imcflibs"
+readme = "README.md"
+repository = "https://github.com/imcf/python-imcflibs"
+version = "0.0.0"
+
+# NOTE: to create an environment using 'poetry install' or similar operations,
+# the following modifications are temporarily necessary (can / should be
+# reverted after creating the env):
+# - either: python = ">=3.9" AND disable ipython in the dev dependencies
+# - or: python = ">=3.10"
+
+[tool.poetry.dependencies]
+imcf-fiji-mocks = ">=0.10.0"
+python = ">=2.7"
+python-micrometa = "^15.2.2"
+sjlogging = ">=0.5.2"
+
+[tool.poetry.group.dev.dependencies]
+pytest = "^8.0.1"
+pytest-cov = "^6.0.0"
+
+[build-system]
+build-backend = "poetry.core.masonry.api"
+requires = ["poetry-core"]
+
+
+[tool.ruff.lint]
+exclude = [
+ "tests/interactive-imagej/*"
+]
+
+select = [
+ "D",
+ # summary lines have to be placed on the first physical line of the docstring
+ "D212",
+ # imperative mood for all docstrings
+ "D401",
+ # summary line has to end in a punctuation mark
+ "D415",
+ # require documentation for _all_ function parameters
+ "D417",
+]
+
+ignore = [
+ # no blank lines allowed after function docstring
+ "D202",
+]
+
+
+[tool.ruff.lint.pydocstyle]
+convention = "numpy"
\ No newline at end of file
diff --git a/scripts/check-if-release.sh b/scripts/check-if-release.sh
new file mode 100755
index 00000000..c7f37934
--- /dev/null
+++ b/scripts/check-if-release.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+# Very basic script checking for the existence of a file 'release.properties' in
+# the root of the repository. If this is present, the state of the checkout
+# indicates this is a proper release made through the 'release-version.sh'
+# script from the 'scijava-scripts' repo. Will return false if the file is not
+# present, can be overridden with an environment variable.
+
+# The idea is to include this in automated builds to prevent accidential
+# attempts of publishing non-releases to PyPI.
+
+set -o errexit # exit on any error
+
+PROPERTIES="release.properties"
+
+test -n "$IGNORE_NO_RELEASE" &&
+ echo "β π NOT checking for file '$PROPERTIES'... β " &&
+ exit 0
+
+cd "$(dirname "$0")/.."
+
+echo "Checking if '$PROPERTIES' exists..."
+test -f "$PROPERTIES" && echo "All good β
" && exit 0
+
+echo πππ
+echo '/-----------------------------------------------------\'
+echo "| Couldn't find 'release.properties', STOPPING build! |"
+echo "| To ignore this, add this to the environment: |"
+echo "| > export IGNORE_NO_RELEASE=true |"
+echo '\-----------------------------------------------------/'
+echo πππ
+
+exit 1
diff --git a/scripts/doctest_runner.py b/scripts/doctest_runner.py
deleted file mode 100644
index a1782e0d..00000000
--- a/scripts/doctest_runner.py
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/env python
-
-"""Doctest runner for the imcflibs package.
-
-Needs to be run from imcflibs's parent directory.
-"""
-
-if __name__ == "__main__":
- import doctest
- import sys
-
- VERB = '-v' in sys.argv
-
- import imcflibs
- import imcflibs.pathtools
- import imcflibs.iotools
- import imcflibs.strtools
-
- doctest.testmod(imcflibs, verbose=VERB)
- doctest.testmod(imcflibs.pathtools, verbose=VERB)
- doctest.testmod(imcflibs.iotools, verbose=VERB)
- doctest.testmod(imcflibs.strtools, verbose=VERB)
diff --git a/scripts/build_and_deploy.sh b/scripts/mvn-build-deploy.sh
old mode 100644
new mode 100755
similarity index 100%
rename from scripts/build_and_deploy.sh
rename to scripts/mvn-build-deploy.sh
diff --git a/scripts/parse-python-deps.py b/scripts/parse-python-deps.py
new file mode 100644
index 00000000..2b076114
--- /dev/null
+++ b/scripts/parse-python-deps.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python3
+
+"""Parse project dependencies and format them for usage with `pip install`."""
+
+# NOTE: requires Ubuntu package 'python3-tomli' to be installed!
+
+import tomli
+
+with open("pyproject.toml", "rb") as tomlfile:
+ pyproject = tomli.load(tomlfile)
+
+deps_pkg = pyproject["tool"]["poetry"]["dependencies"]
+deps_dev = pyproject["tool"]["poetry"]["group"]["dev"]["dependencies"]
+
+output = ""
+
+for deps in deps_pkg, deps_dev:
+ for pkg, ver in deps.items():
+ if ver[0] == "^":
+ ver = f">={ver[1:]}"
+ output = f'{output} {pkg}{ver}'
+
+print(output)
diff --git a/scripts/pom-to-pyproject.sh b/scripts/pom-to-pyproject.sh
new file mode 100755
index 00000000..54e6a9c6
--- /dev/null
+++ b/scripts/pom-to-pyproject.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+set -o errexit # exit on any error
+set -o pipefail
+
+echo "Propagating version information: POM.xml -> pyproject.toml / __init__.py"
+
+cd "$(dirname "$0")/.."
+
+### parse the version from 'pom.xml':
+PACKAGE_VERSION=$(xmlstarlet sel --template -m _:project -v _:version pom.xml)
+PACKAGE_NAME=$(xmlstarlet sel --template -m _:project -v _:artifactId pom.xml)
+PACKAGE_DIR="src/${PACKAGE_NAME#python-}" # strip 'python-' prefix if present
+
+echo "Package version from POM: [$PACKAGE_VERSION]"
+### make sure to have a valid Python package version:
+case $PACKAGE_VERSION in
+*-SNAPSHOT*)
+ PACKAGE_VERSION=${PACKAGE_VERSION/-SNAPSHOT/}
+ ### calculate the distance to the last release tag:
+ LAST_TAG=$(git tag --list "${PACKAGE_NAME}-*" | sort | tail -n1)
+ # echo "Last git tag: '$LAST_TAG'"
+ COMMITS_SINCE=$(git rev-list "${LAST_TAG}..HEAD" | wc -l)
+ # echo "Nr of commits since last tag: $COMMITS_SINCE"
+ HEAD_ID=$(git rev-parse --short HEAD)
+ # echo "HEAD commit hash: $HEAD_ID"
+ PACKAGE_VERSION="${PACKAGE_VERSION}.dev${COMMITS_SINCE}+${HEAD_ID}"
+ ;;
+esac
+
+echo "Using Python package version: [$PACKAGE_VERSION]"
+### put the version into the project file and the package source:
+sed -i "s/^version = \"0.0.0\"/version = \"${PACKAGE_VERSION}\"/" pyproject.toml
+sed -i "s/\${project.version}/${PACKAGE_VERSION}/" "${PACKAGE_DIR}/__init__.py"
diff --git a/scripts/py2-pytest.sh b/scripts/py2-pytest.sh
new file mode 100755
index 00000000..3c8557c1
--- /dev/null
+++ b/scripts/py2-pytest.sh
@@ -0,0 +1,137 @@
+#!/bin/bash
+
+set -o errexit # exit on any error
+cd "$(dirname "$0")"/..
+
+if [ -n "$VENV_PATH" ]; then
+ # can be used for GH actions to cache the venv by giving it a fixed path:
+ echo "Using venv path from envvar VENV_PATH='$VENV_PATH'."
+ VENV="$VENV_PATH"
+else
+ VENV="$(mktemp --directory --dry-run --tmpdir=. venv2.pytest-XXX)"
+fi
+
+if [ -n "$PYENV_ROOT" ]; then
+ # in case pyenv was retrieved e.g. from a GH actions cache, it will be
+ # present in the filesystem but not added to the path (this is done when
+ # the installer runs, but will not persist to subsequent runs that will get
+ # their pyenv extracted from the cache), therefore we're force-adding it:
+ echo "Found envvar PYENV_ROOT='$PYENV_ROOT', adjusting PATH..."
+ export PATH="$PYENV_ROOT/bin:$PYENV_ROOT/shims:$PATH"
+fi
+
+echo "PATH=$PATH"
+
+if [ -n "$PY_VERSION" ]; then
+ echo "Calling [pyenv local $PY_VERSION]..."
+ pyenv local "$PY_VERSION"
+fi
+
+# now we're done checking the environment, so disallow empty variables below:
+set -o nounset
+
+# NOTE: the `pip2` calls below were initially using a flag to prevent the
+# deprecation warning for Python 2.7 (`--no-python-version-warning`), alas this
+# flag only got into pip as of version 20.0, which is newer than the default one
+# provided by GitHub's "ubuntu-22.04" image, so these commands would fail -
+# therefore we'll have to live with the warnings for now.
+
+if ! [ -d "$VENV" ]; then
+ pip2 show virtualenv > /dev/null || {
+ echo "== Installing 'virtualenv' for Python2..."
+ pip2 install virtualenv
+ }
+ echo "== Creating a Python2 venv in [$VENV]..."
+ python2 -m virtualenv --always-copy "$VENV"
+ echo "== Finished creating a Python2 venv."
+fi
+
+function vpip() {
+ "$VENV/bin/pip" "$@"
+}
+
+echo
+echo "===== Using venv at: [$VENV] ====="
+"$VENV/bin/python" --version
+echo
+
+echo "== Installing local version of 'imcflibs' package..."
+# NOTE: for being able to use coverage, the package has to be installed in
+# editable mode, making it necessary to move `pyproject.toml` out of the way
+# and creating a `setup.py` for the actual installation process (will be
+# reverted after installing):
+### parse the version from 'pom.xml':
+PACKAGE_VERSION=$(xmlstarlet sel --template -m _:project -v _:version pom.xml)
+PACKAGE_NAME=$(xmlstarlet sel --template -m _:project -v _:artifactId pom.xml)
+
+echo "Package version from POM: [$PACKAGE_VERSION]"
+### make sure to have a valid Python package version:
+case $PACKAGE_VERSION in
+*-SNAPSHOT*)
+ PACKAGE_VERSION=${PACKAGE_VERSION/-SNAPSHOT/}
+ ### calculate the distance to the last release tag:
+ LAST_TAG=$(git tag --list "${PACKAGE_NAME}-*" | sort | tail -n1)
+ # echo "Last git tag: '$LAST_TAG'"
+ COMMITS_SINCE=$(git rev-list "${LAST_TAG}..HEAD" | wc -l)
+ # echo "Nr of commits since last tag: $COMMITS_SINCE"
+ HEAD_ID=$(git rev-parse --short HEAD)
+ # echo "HEAD commit hash: $HEAD_ID"
+ PACKAGE_VERSION="${PACKAGE_VERSION}.dev${COMMITS_SINCE}+${HEAD_ID}"
+ ;;
+esac
+echo "== * Mocking 'setup.py' with package version $PACKAGE_VERSION"
+echo "import setuptools
+setuptools.setup(
+name='imcflibs',
+version='$PACKAGE_VERSION',
+package_dir={'': 'src'},
+)
+" > setup.py
+
+echo "== * Temporarily disabling 'pyproject.toml'..."
+mv pyproject.toml pyproject_.toml
+
+echo "== * Installing package in editable mode..."
+vpip install --editable .
+
+echo "== * Installed package version reported by pip:"
+vpip show imcflibs | grep ^Version:
+
+echo "== * Re-enabling 'pyproject.toml'..."
+mv pyproject_.toml pyproject.toml
+
+echo "== * Removing 'setup.py'..."
+rm setup.py
+
+echo "== * Installing dependencies (incl. pre-release and dev versions)..."
+vpip install --upgrade --pre \
+ imcf-fiji-mocks
+
+echo "== * Installing dependencies..."
+vpip install \
+ python-micrometa \
+ sjlogging \
+ olefile==0.46 \
+ pytest \
+ pytest-cov \
+ pip
+
+echo "== * Cleaning up egg-info..."
+# NOTE: this can only be done AFTER the pip-install from above as otherwise
+# dependency resolution won't work due to lack of package metadata:
+rm -r src/imcflibs.egg-info
+echo "== Finished installing local 'imcflibs'."
+echo
+
+echo "== Running pytest..."
+set -o xtrace
+set +o errexit # otherwise the script stops if pytest exits non-zero
+"$VENV/bin/pytest" "$@" # run pytest with the parameters given to the script
+RETVAL=$? # remember the actual exit code of pytest for returning it below!
+set +o xtrace
+
+echo
+echo "== Done. Leaving venv around: [$VENV]"
+echo
+
+exit $RETVAL # now return the exit code from running pytest
diff --git a/scripts/run-poetry.sh b/scripts/run-poetry.sh
new file mode 100755
index 00000000..a7ecdeae
--- /dev/null
+++ b/scripts/run-poetry.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+set -o errexit # exit on any error
+
+cd "$(dirname "$0")/.."
+
+STATUS=$(git status --porcelain)
+
+if [ -z "$RUN_ON_UNCLEAN" ]; then
+ if [ -n "$STATUS" ]; then
+ echo "==== ERROR: repository unclean, stopping! ===="
+ echo
+ git status
+ echo
+ echo "--------"
+ echo "To ignore this (you have been warned!), set an environment var:"
+ echo
+ echo "> export RUN_ON_UNCLEAN=true"
+ echo
+ exit 1
+ fi
+fi
+
+### clean up old poetry artifacts:
+rm -rf dist/
+
+# adjust metadata version strings:
+scripts/pom-to-pyproject.sh
+
+# in case the project needs to be "installed" (e.g. to run pytest and generate
+# coverage reports), the Python version specified in the project's dependencies
+# needs to be set to a version satisfying the other dependencies requirements -
+# this can be turned off e.g. for simply packaging for PyPi.
+PYPROJ="pyproject.toml"
+DEPS_PYTHON='>=3.10'
+if [ -z "$IGNORE_DEPS_PYTHON" ]; then
+ echo "$PYPROJ: setting [python = \"$DEPS_PYTHON\"]"
+ echo "Use 'export IGNORE_DEPS_PYTHON=true' to skip this step."
+ sed -i 's/^python = ">=2.7"$/python = "'"$DEPS_PYTHON"'"/' $PYPROJ
+else
+ echo "$PYPROJ: found 'IGNORE_DEPS_PYTHON' envvar, not modifying."
+fi
+
+set +o errexit # otherwise the script stops if poetry exits non-zero
+poetry "$@" # run poetry with the parameters given to the script
+RETVAL=$? # remember the actual exit code of poetry for returning it below!
+
+### clean up the moved source tree and restore the previous state:
+git restore pyproject.toml
+git restore "src/*/__init__.py"
+
+exit $RETVAL # now return the exit code from running poetry
diff --git a/src/imcflibs/__init__.py b/src/imcflibs/__init__.py
index e8055314..b5d3fe00 100644
--- a/src/imcflibs/__init__.py
+++ b/src/imcflibs/__init__.py
@@ -17,6 +17,6 @@
# check if we're running in Jython, then also import the 'imagej' submodule:
import platform as _python_platform
-if _python_platform.python_implementation() == "Jython":
+if _python_platform.python_implementation() == "Jython": # pragma: no cover
from . import imagej
del _python_platform
diff --git a/src/imcflibs/_jython_compat.py b/src/imcflibs/_jython_compat.py
new file mode 100644
index 00000000..f6b06d07
--- /dev/null
+++ b/src/imcflibs/_jython_compat.py
@@ -0,0 +1,16 @@
+"""Prevent namespace clashes / race conditions when running in Jython."""
+
+import importlib
+
+# Using `import io` will heavily depend on the state of the Python engine and
+# may fail when being used as a library inside Jython (as `io` might be shadowed
+# by the Java package with the same name then) with something like this:
+## AttributeError: 'javapackage' object has no attribute 'BufferedIOBase'
+io = importlib.import_module("io")
+
+try:
+ # Python 2: "file" is built-in
+ file_types = file, io.IOBase
+except NameError:
+ # Python 3: "file" fully replaced with IOBase
+ file_types = (io.IOBase,)
diff --git a/src/imcflibs/imagej/_loci.py b/src/imcflibs/imagej/_loci.py
index e3f6bb0c..0c354373 100644
--- a/src/imcflibs/imagej/_loci.py
+++ b/src/imcflibs/imagej/_loci.py
@@ -1,31 +1,54 @@
"""Internal wrapper module to import from the (Java) loci package.
-This module mostly exists to work around the issue that importing the
-`ImporterOptions` class from the Java `loci` package requires syntax to be used
-that is considered invalid by any C-Python parser (but is still valid and
-working in Jython) and hence will break usage of Black, Pylint, and similar.
+This module exists to work around the issue that importing some of the classes
+from the Java `loci` package would require syntax that is considered invalid by
+any C-Python parser (but is still valid and working in Jython) and hence will
+break usage of Black, Pylint, and similar.
+
+By aggregating those "special" imports into this (private) submodule we can
+properly work around that issue by providing "dummy" objects for C-Python and
+importing the actual modules / classes when running within Jython. To avoid the
+invalid syntax issue (which would still prevent C-Python-based tools like black
+and pdoc to run) those parts are done via `importlib` calls.
+
+Other loci-related imports (i.e. those without problematic syntax) are placed in
+here simply for consistency reasons (to have everything in the same place).
+"""
-By stashing this into an internal submodule only checks on this specific
-(minimalistic) file will fail but remain operational for the other code.
+#
+### *** WARNING *** ### *** WARNING *** ### *** WARNING *** ### *** WARNING ***
+#
+# Whenever an import is added here, make sure to also update the corresponding
+# part in `imcf-fiji-mocks`: https://github.com/imcf/imcf-fiji-mocks/
+#
+### *** WARNING *** ### *** WARNING *** ### *** WARNING *** ### *** WARNING ***
+#
-So why are the other imports in here then?
-This was a conscious decision as it seems to be confusing that *some* parts from
-the `loci` package need to be imported from the `imcflibs.imagej._loci`
-sub-module while others are imported directly. Instead we're simply importing
-proxy-style all loci components through the file here.
+from loci.plugins import BF
-NOTE: the actual import of `ImporterOptions` still requires the `# pdoc: skip`
-pragma to work with the documentation generation scripts, e.g.
+# dummy objects to prevent failing imports in a non-ImageJ / Jython context:
+ImporterOptions = None
+ZeissCZIReader = None
+DefaultMetadataOptions = None
+MetadataLevel = None
+DynamicMetadataOptions = None
-```
-from ._loci import ImporterOptions # pdoc: skip
-```
-"""
+# perform the actual imports when running under Jython using `importlib` calls:
+import platform as _python_platform
-from loci.plugins import BF
+if _python_platform.python_implementation() == "Jython": # pragma: no cover
+ import importlib
+
+ _loci_plugins_in = importlib.import_module("loci.plugins.in")
+ ImporterOptions = _loci_plugins_in.ImporterOptions
-from loci.plugins.in import ImporterOptions # pdoc: skip
-from loci.formats.in import ZeissCZIReader, DefaultMetadataOptions, MetadataLevel, DynamicMetadataOptions, MetadataOptions # pdoc: skip
+ _loci_formats_in = importlib.import_module("loci.formats.in")
+ ZeissCZIReader = _loci_formats_in.ZeissCZIReader
+ DefaultMetadataOptions = _loci_formats_in.DefaultMetadataOptions
+ MetadataLevel = _loci_formats_in.MetadataLevel
+ DynamicMetadataOptions = _loci_formats_in.DynamicMetadataOptions
+ MetadataOptions = _loci_formats_in.MetadataOptions
+del _python_platform
-from loci.formats import ImageReader, Memoizer
\ No newline at end of file
+from loci.formats import ImageReader, Memoizer, MetadataTools
diff --git a/src/imcflibs/imagej/bdv.py b/src/imcflibs/imagej/bdv.py
new file mode 100644
index 00000000..5cc7b412
--- /dev/null
+++ b/src/imcflibs/imagej/bdv.py
@@ -0,0 +1,1650 @@
+"""BigDataViewer related functions.
+
+Mostly convenience wrappers with simplified calls and default values.
+"""
+
+# Some function names just need to be longer than 30 chars:
+# pylint: disable-msg=invalid-name
+
+# The attribute count is not really our choice:
+# pylint: disable-msg=too-many-instance-attributes
+
+import os
+import shutil
+import sys
+
+from ch.epfl.biop.scijava.command.spimdata import (
+ FuseBigStitcherDatasetIntoOMETiffCommand,
+)
+from ij import IJ
+
+from .. import pathtools
+from ..log import LOG as log
+
+
+# internal template strings used in string formatting (note: the `"""@private"""`
+# pseudo-decorator is there to instruct [pdoc] to omit those variables when generating
+# API documentation):
+SINGLE = "[Single %s (Select from List)]"
+"""@private"""
+MULTIPLE = "[Multiple %ss (Select from List)]"
+"""@private"""
+RANGE = "[Range of %ss (Specify by Name)]"
+"""@private"""
+SINGLE_FILE = "[NO (one %s)]"
+"""@private"""
+MULTI_SINGLE_FILE = "[YES (all %ss in one file)]"
+"""@private"""
+MULTI_MULTI_FILE = "[YES (one file per %s)]"
+"""@private"""
+
+
+class ProcessingOptions(object):
+ """Helper to store processing options and generate parameter strings.
+
+ Example
+ -------
+ NOTE: for readability reasons the output has been split into multiple lines
+ even though the formatters are returning a single-line string.
+
+ >>> opts = ProcessingOptions()
+ >>> opts.process_channel(2)
+ >>> opts.reference_tile(1)
+ >>> opts.treat_timepoints("compare")
+
+ >>> opts.fmt_acitt_options()
+ ... process_angle=[All angles]
+ ... process_channel=[Single channel (Select from List)]
+ ... process_illumination=[All illuminations]
+ ... process_tile=[All tiles]
+ ... process_timepoint=[All Timepoints]
+
+ >>> opts.fmt_acitt_selectors()
+ ... processing_channel=[channel 1]
+
+ >>> opts.fmt_use_acitt()
+ ... channels=[Average Channels]
+ ... illuminations=[Average Illuminations]
+ ... tiles=[use Tile 1]
+
+ >>> opts.fmt_how_to_treat()
+ ... how_to_treat_angles=[treat individually]
+ ... how_to_treat_channels=group
+ ... how_to_treat_illuminations=group
+ ... how_to_treat_tiles=group
+ ... how_to_treat_timepoints=compare
+ """
+
+ def __init__(self):
+ self._angle_processing_option = "[All angles]"
+ self._angle_select = ""
+
+ self._channel_processing_option = "[All channels]"
+ self._channel_select = ""
+
+ self._illumination_processing_option = "[All illuminations]"
+ self._illumination_select = ""
+
+ self._tile_processing_option = "[All tiles]"
+ self._tile_select = ""
+
+ self._timepoint_processing_option = "[All Timepoints]"
+ self._timepoint_select = ""
+
+ # by default `angles` is empty as the "sane" default value for
+ # "treat_angles" is "[treat individually]"
+ self._use_angle = ""
+ # all other "use" options are set to averaging by default:
+ self._use_channel = "channels=[Average Channels]"
+ self._use_illumination = "illuminations=[Average Illuminations]"
+ self._use_tile = "tiles=[Average Tiles]"
+ self._use_timepoint = "timepoints=[Average Timepoints]"
+
+ # 'treat_*' values are: "group", "compare" or "[treat individually]"
+ self._treat_angles = "[treat individually]"
+ self._treat_channels = "group"
+ self._treat_illuminations = "group"
+ self._treat_tiles = "compare"
+ self._treat_timepoints = "[treat individually]"
+
+ ### reference-X methods
+
+ def reference_angle(self, value):
+ """Set the reference angle when using *Expert Grouping Options*.
+
+ Select the angle(s) to use for the operation, by default empty (`""`).
+
+ NOTE: this value will be used to render `angles=[use Angle VALUE]` when
+ calling the `fmt_use_acitt()` method.
+
+ Parameters
+ ----------
+ value : str
+ The tile to use for the grouping.
+ """
+ self._use_angle = "angles=[use Angle %s]" % str(value)
+ log.debug("New reference angle setting: %s", self._use_angle)
+
+ def reference_channel(self, value):
+ """Set the reference channel when using *Expert Grouping Options*.
+
+ Select the channel(s) to use for the operation, by default the averaging
+ mode will be used (`channels=[Average Channels]`).
+
+ NOTE: this value will be used to render `channels=[use Channel VALUE]`
+ when calling the `fmt_use_acitt()` method.
+
+ Parameters
+ ----------
+ value : int or int-like
+ The channel number to use for the grouping.
+ """
+ # channel = int(value) - 1 # will raise a ValueError if cast fails
+ self._use_channel = "channels=[use Channel %s]" % int(value)
+ log.debug("New reference channel setting: %s", self._use_channel)
+
+ def reference_illumination(self, value):
+ """Set the reference illumination when using *Expert Grouping Options*.
+
+ Select the illumination(s) to use for the operation, by default the
+ averaging mode will be used (`illuminations=[Average Illuminations]`).
+
+ NOTE: this value will be used to render `illuminations=[use Illumination
+ VALUE]` when calling the `fmt_use_acitt()` method.
+
+ Parameters
+ ----------
+ value : int or int-like
+ The illumination number to use for the grouping.
+ """
+ self._use_illumination = "illuminations=[use Illumination %s]" % value
+ log.debug(
+ "New reference illumination setting: %s",
+ self._use_illumination,
+ )
+
+ def reference_tile(self, value):
+ """Set the reference tile when using *Expert Grouping Options*.
+
+ Select the tile(s) to use for the operation, by default the averaging
+ mode will be used (`tiles=[Average Tiles]`).
+
+ NOTE: this value will be used to render `tiles=[use Tile VALUE]` when
+ calling the `fmt_use_acitt()` method.
+
+ Parameters
+ ----------
+ value : int
+ The tile number to use for the grouping.
+ """
+ self._use_tile = "tiles=[use Tile %s]" % str(value)
+ log.debug("New reference tile setting: %s", self._use_tile)
+
+ def reference_timepoint(self, value):
+ """Set the reference timepoint when using *Expert Grouping Options*.
+
+ Select the timepoint(s) to use for the operation, by default the
+ averaging mode will be used (`timepoints=[Average Timepoints]`).
+
+ NOTE: this value will be used to render `timepoints=[use Timepoint
+ VALUE]` when calling the `fmt_use_acitt()` method.
+
+ Parameters
+ ----------
+ value : int or int-like
+ The timepoint number to use for the grouping.
+ """
+ self._use_timepoint = "timepoints=[use Timepoint %s]" % value
+ log.debug("New reference timepoint setting: %s", self._use_timepoint)
+
+ ### process-X methods
+
+ def process_angle(self, value, range_end=None):
+ """Set the processing option for angles.
+
+ Update the angle processing option and selection depending on input.
+ If the range_end is not None, it is considered as a range.
+
+ Parameters
+ ----------
+ value : str, int, list of int or list of str
+ The angle(s) to use for processing, either a single value or a list.
+ range_end : int, optional
+ Contains the end of the range, by default None.
+
+ Notes
+ -----
+ Previous function name : angle_select().
+ """
+
+ selection = check_processing_input(value, range_end)
+ processing_option, dimension_select = get_processing_settings(
+ "angle", selection, value, range_end
+ )
+
+ self._angle_processing_option = processing_option
+ self._angle_select = dimension_select
+
+ def process_channel(self, value, range_end=None):
+ """Set the processing option for channels.
+
+ Update the channel processing option and selection depending on input.
+ If the range_end is not None, it is considered as a range.
+
+ Parameters
+ ----------
+ value : str, int, list of int or list of str
+ The channel(s) to use for processing, a single value or a list.
+ range_end : int, optional
+ Contains the end of the range, by default None.
+
+ Notes
+ -----
+ Previous function name : channel_select().
+ """
+
+ selection = check_processing_input(value, range_end)
+ processing_option, dimension_select = get_processing_settings(
+ "channel", selection, value, range_end
+ )
+
+ self._channel_processing_option = processing_option
+ self._channel_select = dimension_select
+
+ def process_illumination(self, value, range_end=None):
+ """Set the processing option for illuminations.
+
+ Update the illumination processing option and selection depending on
+ input. If the range_end is not None, it is considered as a range.
+
+ Parameters
+ ----------
+ value : str, int, list of int or list of str
+ The illumination(s) to use for processing, a single value or a list.
+ range_end : int, optional
+ Contains the end of the range, by default None.
+
+ Notes
+ -----
+ Previous function name : illumination_select().
+ """
+
+ selection = check_processing_input(value, range_end)
+ processing_option, dimension_select = get_processing_settings(
+ "illumination", selection, value, range_end
+ )
+
+ self._illumination_processing_option = processing_option
+ self._illumination_select = dimension_select
+
+ def process_tile(self, value, range_end=None):
+ """Set the processing option for tiles.
+
+ Update the tile processing option and selection depending on input.
+ If the range_end is not None, it is considered as a range.
+
+ Parameters
+ ----------
+ value : str, int, list of int or list of str
+ The tile(s) to use for processing, a single value or a list.
+ range_end : int, optional
+ Contains the end of the range, by default None.
+
+ Notes
+ -----
+ Previous function name : tile_select().
+ """
+
+ selection = check_processing_input(value, range_end)
+ processing_option, dimension_select = get_processing_settings(
+ "tile", selection, value, range_end
+ )
+
+ self._tile_processing_option = processing_option
+ self._tile_select = dimension_select
+
+ def process_timepoint(self, value, range_end=None):
+ """Set the processing option for timepoints.
+
+ Update the timepoint processing option and selection depending on input.
+ If the range_end is not None, it is considered as a range.
+
+ Parameters
+ ----------
+ value : str, int, list of int or list of str
+ The timepoint(s) to use for processing, a single value or a list.
+ range_end : int, optional
+ Contains the end of the range, by default None.
+
+ Notes
+ -----
+ Previous function name : timepoint_select().
+ """
+
+ selection = check_processing_input(value, range_end)
+ processing_option, dimension_select = get_processing_settings(
+ "timepoint", selection, value, range_end
+ )
+
+ self._timepoint_processing_option = processing_option
+ self._timepoint_select = dimension_select
+
+ ### treat-X methods
+
+ def treat_angles(self, value):
+ """Set the value for the `how_to_treat_angles` option.
+
+ If the value is set to `group` also the `reference_angle` setting will
+ be adjusted to `angles=[Average Angles]`.
+
+ The default setting is `[treat individually]`.
+
+ Parameters
+ ----------
+ value : str
+ One of `group`, `compare` or `[treat individually]`.
+ """
+ self._treat_angles = value
+ log.debug("New 'treat_angles' setting: %s", value)
+ if value == "group":
+ self._use_angle = "angles=[Average Angles]"
+ log.debug("New 'use_angle' setting: %s", self._use_angle)
+
+ def treat_channels(self, value):
+ """Set the value for the `how_to_treat_channels` option.
+
+ The default setting is `group`.
+
+ Parameters
+ ----------
+ value : str
+ One of `group`, `compare` or `[treat individually]`.
+ """
+ self._treat_channels = value
+ log.debug("New 'treat_channels' setting: %s", value)
+
+ def treat_illuminations(self, value):
+ """Set the value for the `how_to_treat_illuminations` option.
+
+ The default setting is `group`.
+
+ Parameters
+ ----------
+ value : str
+ One of `group`, `compare` or `[treat individually]`.
+ """
+ self._treat_illuminations = value
+ log.debug("New 'treat_illuminations' setting: %s", value)
+
+ def treat_tiles(self, value):
+ """Set the value for the `how_to_treat_tiles` option.
+
+ The default setting is `compare`.
+
+ Parameters
+ ----------
+ value : str
+ One of `group`, `compare` or `[treat individually]`.
+ """
+ self._treat_tiles = value
+ log.debug("New 'treat_tiles' setting: %s", value)
+
+ def treat_timepoints(self, value):
+ """Set the value for the `how_to_treat_timepoints` option.
+
+ The default setting is `[treat individually]`.
+
+ Parameters
+ ----------
+ value : str
+ One of `group`, `compare` or `[treat individually]`.
+ """
+ self._treat_timepoints = value
+ log.debug("New 'treat_timepoints' setting: %s", value)
+
+ ### formatter methods
+
+ def fmt_acitt_options(self, input="process"):
+ """Format Angle / Channel / Illumination / Tile / Timepoint options.
+
+ Build a string providing the `process_angle`, `process_channel`,
+ `process_illumination`, `process_tile` and `process_timepoint` options
+ that can be used in a BDV-related `IJ.run` call.
+
+ Returns
+ -------
+ str
+ """
+ input_type = ["process", "resave"]
+ if input not in input_type:
+ raise ValueError("Invalid input type, expected one of: %s" % input_type)
+ parameters = [
+ input + "_angle=" + self._angle_processing_option,
+ input + "_channel=" + self._channel_processing_option,
+ input + "_illumination=" + self._illumination_processing_option,
+ input + "_tile=" + self._tile_processing_option,
+ input + "_timepoint=" + self._timepoint_processing_option,
+ ]
+ parameter_string = " ".join(parameters).strip()
+ log.debug("Formatted 'process_X' options: <%s>", parameter_string)
+ return parameter_string + " "
+
+ def fmt_acitt_selectors(self):
+ """Format Angle / Channel / Illumination / Tile / Timepoint selectors.
+
+ Build a string providing the `angle_select`, `channel_select`,
+ `illumination_select`, `tile_select` and `timepoint_select` options that
+ can be used in a BDV-related `IJ.run` call. In case no selectors have
+ been chosen, nothing but a single space will be returned.
+
+ Returns
+ -------
+ str
+ The formatted selector string. Will be a single white-space in case
+ no selectors have been configured for the object.
+ """
+ parameters = [
+ self._angle_select if self._angle_select else "",
+ self._channel_select if self._channel_select else "",
+ self._illumination_select if self._illumination_select else "",
+ self._tile_select if self._tile_select else "",
+ self._timepoint_select if self._timepoint_select else "",
+ ]
+ parameter_string = " ".join(parameters).strip()
+ log.debug("Formatted 'processing_X' selectors: <%s>", parameter_string)
+ return parameter_string + " "
+
+ def fmt_how_to_treat(self):
+ """Format a parameter string with all `how_to_treat_` options.
+
+ Returns
+ -------
+ str
+ """
+ parameters = [
+ "how_to_treat_angles=" + self._treat_angles,
+ "how_to_treat_channels=" + self._treat_channels,
+ "how_to_treat_illuminations=" + self._treat_illuminations,
+ "how_to_treat_tiles=" + self._treat_tiles,
+ "how_to_treat_timepoints=" + self._treat_timepoints,
+ ]
+ parameter_string = " ".join(parameters).strip()
+ log.debug("Formatted 'how_to_treat_X' options: <%s>", parameter_string)
+ return parameter_string + " "
+
+ def fmt_use_acitt(self):
+ """Format expert grouping options, e.g. `channels=[use Channel 2]`.
+
+ Generate a parameter string using the configured expert grouping options
+ for ACITT. Please note that this may be an empty string (`""`).
+
+ Returns
+ -------
+ str
+ """
+ parameters = [
+ self._use_angle if self._treat_angles == "group" else "",
+ self._use_channel if self._treat_channels == "group" else "",
+ self._use_illumination if self._treat_illuminations == "group" else "",
+ self._use_tile if self._treat_tiles == "group" else "",
+ self._use_timepoint if self._treat_timepoints == "group" else "",
+ ]
+ parameter_string = " ".join(parameters).strip()
+ log.debug(
+ "Formatted expert grouping 'use' options: <%s>",
+ parameter_string,
+ )
+ return parameter_string + " "
+
+
+class DefinitionOptions(object):
+ """Helper to store definition options and generate parameters strings.
+
+ Example
+ -------
+ NOTE: for readability reasons the output has been split into multiple lines
+ even though the formatters are returning a single-line string.
+
+ >>> opts = DefinitionOptions()
+ >>> opts.set_angle_definition("single")
+ >>> opts.set_channel_definition("multi_single")
+
+ >>> opts.fmt_acitt_options()
+ ... multiple_angles=[NO (one angle)]
+ ... multiple_channels=[YES (all channels in one file)]
+ ... multiple_illuminations_directions=[NO (one illumination direction)]
+ ... multiple_tiles=[YES (all tiles in one file)]
+ ... multiple_timepoints=[NO (one time-point)]
+ """
+
+ def __init__(self):
+ self._angle_definition = SINGLE_FILE % "angle"
+ self._channel_definition = MULTI_SINGLE_FILE % "channel"
+ self._illumination_definition = SINGLE_FILE % "illumination direction"
+ self._tile_definition = MULTI_MULTI_FILE % "tile"
+ self._timepoint_definition = SINGLE_FILE % "time-point"
+
+ def check_definition_option(self, value):
+ """Check if the value is a valid definition option.
+
+ Parameters
+ ----------
+ value : str
+ Entered value by the user.
+
+ Returns
+ -------
+ dict(str, str): dictionary containing the correct string definition.
+ """
+ valid = ["single", "multi_single", "multi_multi"]
+ if value not in valid:
+ raise ValueError("Value must be one of: %s" % valid)
+
+ return {
+ "single": SINGLE_FILE,
+ "multi_single": MULTI_SINGLE_FILE,
+ "multi_multi": MULTI_MULTI_FILE,
+ }
+
+ def check_definition_option_ang_ill(self, value):
+ """Check if the value is a valid definition option.
+
+ This is needed for angles and illuminations because support is not
+ available for multiple angles and illuminations in a single file.
+
+ Parameters
+ ----------
+ value : str
+ Entered value by the user.
+
+ Returns
+ -------
+ dict(str, str): dictionary containing the correct string definition.
+ """
+ valid = ["single", "multi_multi"]
+ if value not in valid:
+ raise ValueError(
+ (
+ "Value must be one of: %s. Support for 'multi_single' is "
+ "not available for angles and illuminations."
+ )
+ % valid
+ )
+
+ return {
+ "single": SINGLE_FILE,
+ "multi_multi": MULTI_MULTI_FILE,
+ }
+
+ def set_angle_definition(self, value):
+ """Set the value for the angle definition.
+
+ Parameters
+ ----------
+ value : str
+ One of `single` or `multi_multi`.
+ """
+ choices = self.check_definition_option_ang_ill(value)
+ self._angle_definition = choices[value] % "angle"
+ log.debug("New 'angle_definition' setting: %s", self._angle_definition)
+
+ def set_channel_definition(self, value):
+ """Set the value for the channel definition.
+
+ Parameters
+ ----------
+ value : str
+ One of `single`, `multi_single` or `multi_multi`.
+ """
+ choices = self.check_definition_option(value)
+ self._channel_definition = choices[value] % "channel"
+ log.debug(
+ "New 'channel_definition' setting: %s",
+ self._channel_definition,
+ )
+
+ def set_illumination_definition(self, value):
+ """Set the value for the illumination definition.
+
+ Parameters
+ ----------
+ value : str
+ One of `single`, `multi_single` or `multi_multi`.
+ """
+ choices = self.check_definition_option_ang_ill(value)
+ self._illumination_definition = choices[value] % "illumination direction"
+ log.debug(
+ "New 'illumination_definition' setting: %s",
+ self._illumination_definition,
+ )
+
+ def set_tile_definition(self, value):
+ """Set the value for the tile_definition.
+
+ Parameters
+ ----------
+ value : str
+ One of `single`, `multi_single` or `multi_multi`.
+ """
+ choices = self.check_definition_option(value)
+ self._tile_definition = choices[value] % "tile"
+ log.debug("New 'tile_definition' setting: %s", self._tile_definition)
+
+ def set_timepoint_definition(self, value):
+ """Set the value for the time_point_definition.
+
+ Parameters
+ ----------
+ value : str
+ One of `single`, `multi_single` or `multi_multi`.
+ """
+ choices = self.check_definition_option(value)
+ self._timepoint_definition = choices[value] % "time-point"
+ log.debug(
+ "New 'timepoint_definition' setting: %s",
+ self._timepoint_definition,
+ )
+
+ def fmt_acitt_options(self):
+ """Format Angle / Channel / Illumination / Tile / Timepoint options.
+
+ Build a string providing the `multiple_angles`, `multiple_channels`,
+ `multiple_illuminations_directions`, `multiple_tiles` and
+ `multiple_timepoints` options that can be used in a BDV-related `IJ.run`
+ call.
+
+ Returns
+ -------
+ str
+ """
+ parameters = [
+ "multiple_angles=" + self._angle_definition,
+ "multiple_channels=" + self._channel_definition,
+ "multiple_illuminations_directions=" + self._illumination_definition,
+ "multiple_tiles=" + self._tile_definition,
+ "multiple_timepoints=" + self._timepoint_definition,
+ ]
+ parameter_string = " ".join(parameters).strip()
+ log.debug("Formatted 'multiple_X' options: <%s>", parameter_string)
+ return parameter_string + " "
+
+
+def check_processing_input(value, range_end):
+ """Sanitize and clarifies the acitt input selection.
+
+ Validate the input by checking the type and returning the expected output.
+
+ Parameters
+ ----------
+ value : str, int, list of int or list of str
+ Contains the list of input dimensions, the first input dimension of a
+ range or a single channel.
+ range_end : int or None
+ Contains the end of the range if need be.
+
+ Returns
+ -------
+ str
+ Returns the type of selection: single, multiple or range
+ """
+ if type(value) is not list:
+ value = [value]
+ # Check if all the elements of the value list are of the same type
+ if not all(isinstance(x, type(value[0])) for x in value):
+ raise TypeError("Invalid input, all values must be of the same type.")
+ if type(range_end) is int:
+ if type(value[0]) is not int:
+ raise TypeError("Range start needs to be an int.")
+ elif len(value) != 1:
+ raise ValueError("Range start needs to be single number.")
+ else:
+ return "range"
+ elif len(value) == 1:
+ return "single"
+ else:
+ return "multiple"
+
+
+def get_processing_settings(dimension, selection, value, range_end):
+ """Generate processing strings for selected dimension and processing mode.
+
+ Generate the processing option and dimension selection strings that
+ correspond to the selected processing mode and the given dimension
+ selection.
+
+ Parameters
+ ----------
+ dimension : {`angle`, `channel`, `illumination`, `tile`, `timepoint`}
+ The dimension selection to use.
+ selection : {`single`, `multiple`, `range`}
+ The *selector* name ("processing mode"), used to derive how the
+ generated string needs to be assembled according to the given dimension
+ and value / range settings.
+ value : str, int, list of int or list of str
+ The list of input dimensions, the first input dimension of a range or a
+ single dimension value in case `selection == "single"` (e.g. for
+ selecting a single channel).
+ range_end : int or None
+ Contains the end of the range if need be.
+
+ Returns
+ -------
+ tuple of str
+ processing_option, dimension_select
+ """
+
+ if selection == "single":
+ processing_option = SINGLE % dimension
+ dimension_select = "processing_" + dimension + "=[" + dimension + " %s]" % value
+
+ if selection == "multiple":
+ processing_option = MULTIPLE % dimension
+ dimension_list = ""
+ for dimension_name in value:
+ dimension_list += dimension + "_%s " % dimension_name
+ dimension_select = dimension_list.rstrip()
+
+ if selection == "range":
+ processing_option = RANGE % dimension
+ dimension_select = (
+ "process_following_"
+ + dimension
+ + "s=%s-%s"
+ % (
+ value,
+ range_end,
+ )
+ )
+
+ return processing_option, dimension_select
+
+
+def backup_xml_files(source_directory, subfolder_name):
+ """Create a backup of BDV-XML files inside a subfolder of `xml-backup`.
+
+ Copies all `.xml` and `.xml~` files to a subfolder with the given name
+ inside a folder called `xml-backup` in the source directory. Uses the
+ `shutil.copy2()` command, which will overwrite existing files.
+
+ Parameters
+ ----------
+ source_directory : str
+ Full path to the directory containing the xml files.
+ subfolder_name : str
+ The name of the subfolder that will be used inside `xml-backup`. Will be
+ created if necessary.
+ """
+ xml_backup_directory = os.path.join(source_directory, "xml-backup")
+ pathtools.create_directory(xml_backup_directory)
+ backup_subfolder = xml_backup_directory + "/%s" % (subfolder_name)
+ pathtools.create_directory(backup_subfolder)
+ all_xml_files = pathtools.listdir_matching(source_directory, ".*\\.xml", regex=True)
+ os.chdir(source_directory)
+ for xml_file in all_xml_files:
+ shutil.copy2(xml_file, backup_subfolder)
+
+
+def define_dataset_auto(
+ project_filename,
+ file_path,
+ bf_series_type,
+ dataset_save_path=None,
+ timepoints_per_partition=1,
+ resave="Re-save as multiresolution HDF5",
+ subsampling_factors=None,
+ hdf5_chunk_sizes=None,
+):
+ """Define a dataset using the Autoloader or Multi-View loader.
+
+ If the series is tiles, will run "Define Dataset...", otherwise will run
+ "Define Multi-View Dataset...".
+
+ Parameters
+ ----------
+ project_filename : str
+ Name of the project (without an `.xml` extension).
+ file_path : str
+ Path to the file, can be the first `.czi` or a regex to match all files
+ with an extension.
+ dataset_save_path : str
+ Output path for the `.xml`.
+ bf_series_type : {`Angles`,`Tiles`}
+ Defines how Bio-Formats interprets the series.
+ timepoints_per_partition : int, optional
+ Split the output dataset by timepoints. Use `0` for no split, resulting
+ in a single HDF5 file containing all timepoints. By default `1`,
+ resulting in a HDF5 per timepoints.
+ resave : str, optional
+ Allow the function to either re-save the images or simply create a
+ merged xml. Use `Load raw data` to avoid re-saving, by default `Re-save
+ as multiresolution HDF5` which will resave the input data.
+ subsampling_factors : str, optional
+ Specify subsampling factors explicitly, for example:
+ `[{ {1,1,1}, {2,2,1}, {4,4,2}, {8,8,4} }]`.
+ hdf5_chunk_sizes : str, optional
+ Specify hdf5_chunk_sizes factors explicitly, for example
+ `[{ {32,16,8}, {16,16,16}, {16,16,16}, {16,16,16} }]`.
+ """
+
+ file_info = pathtools.parse_path(file_path)
+
+ project_filename = project_filename.replace(" ", "_")
+ result_folder = pathtools.join2(file_info["path"], project_filename)
+
+ if not os.path.exists(result_folder):
+ os.makedirs(result_folder)
+
+ if not dataset_save_path:
+ dataset_save_path = result_folder
+ if subsampling_factors:
+ subsampling_factors = (
+ "manual_mipmap_setup subsampling_factors=" + subsampling_factors + " "
+ )
+ else:
+ subsampling_factors = ""
+ if hdf5_chunk_sizes:
+ hdf5_chunk_sizes = "hdf5_chunk_sizes=" + hdf5_chunk_sizes + " "
+ else:
+ hdf5_chunk_sizes = ""
+
+ if bf_series_type == "Angles":
+ angle_rotation = "apply_angle_rotation "
+ else:
+ angle_rotation = ""
+
+ options = (
+ "define_dataset=[Automatic Loader (Bioformats based)]"
+ + " "
+ + "project_filename=["
+ + project_filename
+ + ".xml"
+ + "] "
+ + "path=["
+ + file_info["full"]
+ + "] "
+ + "exclude=10 "
+ + "bioformats_series_are?="
+ + bf_series_type
+ + " "
+ + "move_tiles_to_grid_(per_angle)?=["
+ + "Do not move Tiles to Grid (use Metadata if available)] "
+ + "how_to_store_input_images=["
+ + resave
+ + "] "
+ + "load_raw_data_virtually"
+ + " "
+ + "metadata_save_path=["
+ + dataset_save_path
+ + "] "
+ + "image_data_save_path=["
+ + dataset_save_path
+ + "] "
+ + "check_stack_sizes "
+ + angle_rotation
+ + subsampling_factors
+ + hdf5_chunk_sizes
+ + "split_hdf5 "
+ + "timepoints_per_partition="
+ + str(timepoints_per_partition)
+ + " "
+ + "setups_per_partition=0 "
+ + "use_deflate_compression "
+ )
+
+ log.debug(options)
+
+ IJ.run("Define Multi-View Dataset", str(options))
+
+
+def define_dataset_manual(
+ project_filename,
+ source_directory,
+ image_file_pattern,
+ dataset_organisation,
+ definition_opts=None,
+):
+ """Run "Define Multi-View Dataset" using the "Manual Loader" option.
+
+ Parameters
+ ----------
+ project_filename : str
+ Name of the project (without an `.xml` extension).
+ source_directory : str
+ Path to the folder containing the file(s).
+ image_file_pattern : str
+ Regular expression corresponding to the names of your files and how to
+ read the different dimensions.
+ dataset_organisation : str
+ Organisation of the dataset and the dimensions to process.
+ Allows for defining the range of interest of the different dimensions.
+ Looks like "timepoints_=%s-%s channels_=0-%s tiles_=%s-%s"
+ definition_opts : dict
+ Dictionary containing the details about the file repartitions.
+ """
+
+ xml_filename = project_filename + ".xml"
+
+ if definition_opts is None:
+ definition_opts = DefinitionOptions()
+
+ temp = os.path.join(source_directory, project_filename + "_temp")
+ os.path.join(temp, project_filename)
+
+ options = (
+ "define_dataset=[Manual Loader (Bioformats based)] "
+ + "project_filename=["
+ + xml_filename
+ + "] "
+ + "_____"
+ + definition_opts.fmt_acitt_options()
+ + " "
+ + "image_file_directory="
+ + source_directory
+ + " "
+ + "image_file_pattern="
+ + image_file_pattern
+ + dataset_organisation
+ + " "
+ + "calibration_type=[Same voxel-size for all views] "
+ + "calibration_definition=[Load voxel-size(s) from file(s)] "
+ # + "imglib2_data_container=[ArrayImg (faster)]"
+ )
+
+ log.debug("Manual dataset definition options: <%s>", options)
+ IJ.run("Define Multi-View Dataset", str(options))
+
+
+def resave_as_h5(
+ source_xml_file,
+ output_h5_file_path,
+ processing_opts=None,
+ timepoints_per_partition=1,
+ use_deflate_compression=True,
+ subsampling_factors=None,
+ hdf5_chunk_sizes=None,
+):
+ """Resave the xml dataset in a new format (either all or single timepoints).
+
+ Useful if it hasn't been done during dataset definition (see
+ `define_dataset_auto()`). Allows e.g. parallelization of HDF-5 re-saving.
+
+ Parameters
+ ----------
+ source_xml_file : File or str
+ XML input file.
+ output_h5_file_path : str
+ Export path for the output file including the `.xml `extension.
+ processing_opts : imcflibs.imagej.bdv.ProcessingOptions, optional
+ The `ProcessingOptions` object defining parameters for the run. Will
+ fall back to the defaults defined in the corresponding class if the
+ parameter is `None` or skipped.
+ timepoints_per_partition : int, optional
+ How many timepoints to export per partition, by default `1`.
+ use_deflate_compression : bool, optional
+ Run deflate compression, by default `True`.
+ subsampling_factors : str, optional
+ Specify subsampling factors explicitly, for example:
+ `[{ {1,1,1}, {2,2,1}, {4,4,2}, {8,8,4} }]`.
+ hdf5_chunk_sizes : str, optional
+ Specify hdf5_chunk_sizes factors explicitly, for example
+ `[{ {32,16,8}, {16,16,16}, {16,16,16}, {16,16,16} }]`.
+ """
+
+ if not processing_opts:
+ processing_opts = ProcessingOptions()
+
+ if use_deflate_compression:
+ use_deflate_compression_arg = "use_deflate_compression "
+ else:
+ use_deflate_compression_arg = ""
+
+ # If split_hdf5 option
+ if timepoints_per_partition != 0:
+ split_hdf5 = "split_hdf5 "
+ else:
+ split_hdf5 = ""
+
+ if subsampling_factors:
+ subsampling_factors = "subsampling_factors=" + subsampling_factors + " "
+ else:
+ subsampling_factors = " "
+ if hdf5_chunk_sizes:
+ hdf5_chunk_sizes = "hdf5_chunk_sizes=" + hdf5_chunk_sizes + " "
+ else:
+ hdf5_chunk_sizes = " "
+
+ options = (
+ "select="
+ + str(source_xml_file)
+ + " "
+ + processing_opts.fmt_acitt_options("resave")
+ + processing_opts.fmt_acitt_selectors()
+ + subsampling_factors
+ + hdf5_chunk_sizes
+ + "timepoints_per_partition="
+ + str(timepoints_per_partition)
+ + " "
+ + "setups_per_partition=0 "
+ + use_deflate_compression_arg
+ + split_hdf5
+ + "export_path="
+ + output_h5_file_path
+ )
+
+ log.debug("Resave as HDF5 options: <%s>", options)
+ IJ.run("As HDF5", str(options))
+
+
+def flip_axes(source_xml_file, x=False, y=True, z=False):
+ """Call BigStitcher's "Flip Axes" command.
+
+ Wrapper for `BigStitcher > Batch Processing > Tools > Flip Axes`. This is
+ required for some formats, for example Nikon `.nd2` files need a flip along
+ the Y-axis.
+
+ Parameters
+ ----------
+ source_xml_file : str
+ Full path to the `.xml` file.
+ x : bool, optional
+ Flip images along the X-axis, by default `False`.
+ y : bool, optional
+ Flip mages along the Y-axis, by default `True`.
+ z : bool, optional
+ Flip images along the Z-axis, by default `False`.
+ """
+
+ file_info = pathtools.parse_path(source_xml_file)
+
+ axes_to_flip = ""
+ if x is True:
+ axes_to_flip += " flip_x"
+ if y is True:
+ axes_to_flip += " flip_y"
+ if z is True:
+ axes_to_flip += " flip_z"
+
+ IJ.run("Flip Axes", "select=" + source_xml_file + axes_to_flip)
+
+ backup_xml_files(file_info["path"], "flip_axes")
+
+
+def phase_correlation_pairwise_shifts_calculation(
+ project_path,
+ processing_opts=None,
+ downsampling_xyz="",
+):
+ """Calculate pairwise shifts using Phase Correlation.
+
+ Parameters
+ ----------
+ project_path : str
+ Full path to the `.xml` file.
+ processing_opts : imcflibs.imagej.bdv.ProcessingOptions, optional
+ The `ProcessingOptinos` object defining parameters for the run. Will
+ fall back to the defaults defined in the corresponding class if the
+ parameter is `None` or skipped.
+ downsampling_xyz : list of int, optional
+ Downsampling factors in X, Y and Z, for example `[4,4,4]`. By default
+ empty which will result in BigStitcher choosing the factors.
+ """
+
+ if not processing_opts:
+ processing_opts = ProcessingOptions()
+
+ file_info = pathtools.parse_path(project_path)
+
+ if downsampling_xyz != "":
+ downsampling = "downsample_in_x=%s downsample_in_y=%s downsample_in_z=%s " % (
+ downsampling_xyz[0],
+ downsampling_xyz[1],
+ downsampling_xyz[2],
+ )
+ else:
+ downsampling = ""
+
+ options = (
+ "select=["
+ + project_path
+ + "] "
+ + processing_opts.fmt_acitt_options()
+ + processing_opts.fmt_acitt_selectors()
+ + " "
+ + "method=[Phase Correlation] "
+ + "show_expert_grouping_options "
+ + "show_expert_algorithm_parameters "
+ + processing_opts.fmt_use_acitt()
+ + processing_opts.fmt_how_to_treat()
+ + downsampling
+ + "subpixel_accuracy"
+ )
+
+ log.debug("Calculate pairwise shifts options: <%s>", options)
+ IJ.run("Calculate pairwise shifts ...", str(options))
+
+ backup_xml_files(file_info["path"], "phase_correlation_shift_calculation")
+
+
+def filter_pairwise_shifts(
+ project_path,
+ min_r=0.7,
+ max_r=1,
+ max_shift_xyz="",
+ max_displacement="",
+):
+ """Filter the pairwise shifts based on different thresholds.
+
+ Parameters
+ ----------
+ project_path : str
+ Path to the `.xml` on which to apply the filters.
+ min_r : float, optional
+ Minimal quality of the link to keep, by default `0.7`.
+ max_r : float, optional
+ Maximal quality of the link to keep, by default `1`.
+ max_shift_xyz : list(int), optional
+ Maximal shift in X, Y and Z (in pixels) to keep, e.g. `[10,10,10]`. By
+ default empty, meaning no filtering based on the shifts will be applied.
+ max_displacement : int, optional
+ Maximal displacement to keep. By default empty, meaning no filtering
+ based on the displacement will be applied.
+ """
+
+ file_info = pathtools.parse_path(project_path)
+
+ if max_shift_xyz != "":
+ filter_by_max_shift = (
+ " filter_by_shift_in_each_dimension"
+ " max_shift_in_x=%s max_shift_in_y=%s max_shift_in_z=%s"
+ ) % (max_shift_xyz[0], max_shift_xyz[1], max_shift_xyz[2])
+ else:
+ filter_by_max_shift = ""
+
+ if max_displacement != "":
+ filter_by_max_displacement = (
+ " filter_by_total_shift_magnitude max_displacement=%s"
+ ) % (max_displacement)
+ else:
+ filter_by_max_displacement = ""
+
+ options = (
+ "select=["
+ + project_path
+ + "] "
+ + "filter_by_link_quality "
+ + "min_r="
+ + str(min_r)
+ + " "
+ + "max_r="
+ + str(max_r)
+ + filter_by_max_shift
+ + filter_by_max_displacement
+ )
+
+ log.debug("Filter pairwise options: <%s>", options)
+ IJ.run("Filter pairwise shifts ...", str(options))
+
+ backup_xml_files(file_info["path"], "filter_pairwise_shifts")
+
+
+def optimize_and_apply_shifts(
+ project_path,
+ processing_opts=None,
+ relative_error=2.5,
+ absolute_error=3.5,
+):
+ """Optimize the shifts and apply them to the dataset.
+
+ Parameters
+ ----------
+ project_path : str
+ Path to the `.xml` on which to optimize and apply the shifts.
+ processing_opts : imcflibs.imagej.bdv.ProcessingOptions, optional
+ The `ProcessingOptinos` object defining parameters for the run. Will
+ fall back to the defaults defined in the corresponding class if the
+ parameter is `None` or skipped.
+ relative_error : float, optional
+ Relative alignment error (in px) to accept, by default `2.5`.
+ absolute_error : float, optional
+ Absolute alignment error (in px) to accept, by default `3.5`.
+ """
+
+ if not processing_opts:
+ processing_opts = ProcessingOptions()
+
+ file_info = pathtools.parse_path(project_path)
+
+ options = (
+ "select=["
+ + project_path
+ + "] "
+ + processing_opts.fmt_acitt_options()
+ + processing_opts.fmt_acitt_selectors()
+ + " "
+ + "relative="
+ + str(relative_error)
+ + " "
+ + "absolute="
+ + str(absolute_error)
+ + " "
+ + "global_optimization_strategy=[Two-Round using Metadata to align unconnected "
+ + "Tiles and iterative dropping of bad links] "
+ + "show_expert_grouping_options "
+ + processing_opts.fmt_use_acitt()
+ + processing_opts.fmt_how_to_treat()
+ )
+
+ log.debug("Optimization and shifts application options: <%s>", options)
+ IJ.run("Optimize globally and apply shifts ...", str(options))
+
+ backup_xml_files(file_info["path"], "optimize_and_apply_shifts")
+
+
+def detect_interest_points(
+ project_path,
+ processing_opts=None,
+ sigma=1.8,
+ threshold=0.008,
+ maximum_number=3000,
+):
+ """Run the "Detect Interest Points" command for registration.
+
+ Parameters
+ ----------
+ project_path : str
+ Path to the `.xml` project.
+ processing_opts : imcflibs.imagej.bdv.ProcessingOptions, optional
+ The `ProcessingOptions` object defining parameters for the run. Will
+ fall back to the defaults defined in the corresponding class if the
+ parameter is `None` or skipped.
+ sigma : float, optional
+ Minimum sigma for interest points detection, by default `1.8`.
+ threshold : float, optional
+ Threshold value for the interest point detection, by default `0.008`.
+ maximum_number : int, optional
+ Maximum number of interest points to use, by default `3000`.
+ """
+
+ if not processing_opts:
+ processing_opts = ProcessingOptions()
+
+ options = (
+ "select=["
+ + project_path
+ + "] "
+ + processing_opts.fmt_acitt_options()
+ + processing_opts.fmt_acitt_selectors()
+ + "type_of_interest_point_detection=Difference-of-Gaussian "
+ + "label_interest_points=beads "
+ + "limit_amount_of_detections "
+ + "group_tiles "
+ + "subpixel_localization=[3-dimensional quadratic fit] "
+ + "interest_point_specification=[Advanced ...] "
+ + "downsample_xy=8x "
+ + "downsample_z=2x "
+ + "sigma="
+ + str(sigma)
+ + " "
+ + "threshold="
+ + str(threshold)
+ + " "
+ + "find_maxima "
+ + "maximum_number="
+ + str(maximum_number)
+ + " "
+ + "type_of_detections_to_use=Brightest "
+ + "compute_on=[CPU (Java)]"
+ )
+
+ log.debug("Interest points detection options: <%s>", options)
+ IJ.run("Detect Interest Points for Registration", str(options))
+
+
+def interest_points_registration(
+ project_path,
+ processing_opts=None,
+ rigid_timepoints=False,
+):
+ """Run the "Register Dataset based on Interest Points" command.
+
+ Parameters
+ ----------
+ project_path : str
+ Path to the `.xml` project.
+ processing_opts : imcflibs.imagej.bdv.ProcessingOptions, optional
+ The `ProcessingOptions` object defining parameters for the run. Will
+ fall back to the defaults defined in the corresponding class if the
+ parameter is `None` or skipped. This controls which angles, channels,
+ illuminations, tiles and timepoints are processed.
+ rigid_timepoints : bool, optional
+ If set to `True` each timepoint will be considered as a rigid unit
+ (useful e.g. if spatial registration has already been performed before).
+ By default `False`.
+ """
+
+ if not processing_opts:
+ processing_opts = ProcessingOptions()
+
+ if rigid_timepoints:
+ rigid_timepoints_arg = "consider_each_timepoint_as_rigid_unit "
+ else:
+ rigid_timepoints_arg = " "
+
+ options = (
+ "select=["
+ + project_path
+ + "] "
+ + processing_opts.fmt_acitt_options()
+ + processing_opts.fmt_acitt_selectors()
+ + "registration_algorithm=[Precise descriptor-based (translation invariant)] "
+ + "registration_over_time=["
+ + "Match against one reference timepoint (no global optimization)] "
+ + "registration_in_between_views=["
+ + "Only compare overlapping views (according to current transformations)] "
+ + "interest_point_inclusion=[Compare all interest point of overlapping views] "
+ + "interest_points=beads "
+ + "group_tiles "
+ + "group_illuminations "
+ + "group_channels "
+ + "reference=1 "
+ + rigid_timepoints_arg
+ + "transformation=Affine "
+ + "regularize_model "
+ + "model_to_regularize_with=Affine "
+ + "lamba=0.10 "
+ + "number_of_neighbors=3 "
+ + "redundancy=1 "
+ + "significance=3 "
+ + "allowed_error_for_ransac=5 "
+ + "ransac_iterations=Normal "
+ + "global_optimization_strategy=["
+ + "Two-Round: Handle unconnected tiles, "
+ + "remove wrong links RELAXED (5.0x / 7.0px)] "
+ + "interestpoint_grouping=["
+ + "Group interest points (simply combine all in one virtual view)] "
+ + "interest=5"
+ )
+
+ log.debug("Interest points registration options: <%s>", options)
+ # register using interest points
+ IJ.run("Register Dataset based on Interest Points", options)
+
+
+def duplicate_transformations(
+ project_path,
+ transformation_type="channel",
+ channel_source=None,
+ tile_source=None,
+ transformation_to_use="[Replace all transformations]",
+):
+ """Duplicate / propagate transformation parameters to other channels.
+
+ Propagate the transformation parameters generated by a previously performed
+ registration of a single channel to the other channels.
+
+ Parameters
+ ----------
+ project_path : str
+ Path to the `.xml` project.
+ transformation_type : str, optional
+ Transformation mode, one of `channel` (to propagate from one channel to
+ all others) and `tiles` (to propagate from one tile to all others).
+ channel_source : int, optional
+ Reference channel nummber (starting at 1), by default None.
+ tile_source : int, optional
+ Reference tile, by default None.
+ transformation_to_use : str, optional
+ One of `[Replace all transformations]` (default) and `[Add last
+ transformation only]` to specify which transformations to propagate.
+ """
+
+ file_info = pathtools.parse_path(project_path)
+
+ apply = ""
+ source = ""
+ target = ""
+ tile_apply = ""
+ tile_process = ""
+
+ chnl_apply = ""
+ chnl_process = ""
+
+ if transformation_type == "channel":
+ apply = "[One channel to other channels]"
+ target = "[All Channels]"
+ source = str(channel_source - 1)
+ if tile_source:
+ tile_apply = "apply_to_tile=[Single tile (Select from List)] "
+ tile_process = "processing_tile=[tile " + str(tile_source) + "] "
+ else:
+ tile_apply = "apply_to_tile=[All tiles] "
+ elif transformation_type == "tile":
+ apply = "[One tile to other tiles]"
+ target = "[All Tiles]"
+ source = str(tile_source)
+ if channel_source:
+ chnl_apply = "apply_to_channel=[Single channel (Select from List)] "
+ chnl_process = (
+ "processing_channel=[channel " + str(channel_source - 1) + "] "
+ )
+ else:
+ chnl_apply = "apply_to_channel=[All channels] "
+ else:
+ sys.exit("Issue with transformation duplication")
+
+ options = (
+ "apply="
+ + apply
+ + " "
+ + "select=["
+ + project_path
+ + "] "
+ + "apply_to_angle=[All angles] "
+ + "apply_to_illumination=[All illuminations] "
+ + tile_apply
+ + tile_process
+ + chnl_apply
+ + chnl_process
+ + "apply_to_timepoint=[All Timepoints] "
+ + "source="
+ + source
+ + " "
+ + "target="
+ + target
+ + " "
+ + "duplicate_which_transformations="
+ + transformation_to_use
+ + " "
+ )
+
+ log.debug("Transformation duplication options: <%s>", options)
+ IJ.run("Duplicate Transformations", str(options))
+
+ backup_xml_files(
+ file_info["path"],
+ "duplicate_transformation_" + transformation_type,
+ )
+
+
+def fuse_dataset(
+ project_path,
+ processing_opts=None,
+ result_path=None,
+ downsampling=1,
+ interpolation="[Linear Interpolation]",
+ pixel_type="[16-bit unsigned integer]",
+ fusion_type="Avg, Blending",
+ export="HDF5",
+ compression="Zstandard",
+):
+ """Call BigStitcher's "Fuse Dataset" command.
+
+ Wrapper to `BigStitcher > Batch Processing > Fuse Dataset`.
+
+ Depending on the export type, inputs are different and therefore will
+ distribute inputs differently.
+
+ Parameters
+ ----------
+ project_path : str
+ Path to the `.xml` on which to run the fusion.
+ processing_opts : imcflibs.imagej.bdv.ProcessingOptions, optional
+ The `ProcessingOptinos` object defining parameters for the run. Will
+ fall back to the defaults defined in the corresponding class if the
+ parameter is `None` or skipped.
+ result_path : str, optional
+ Path to store the resulting fused image, by default `None` which will
+ store the result in the same folder as the input project.
+ downsampling : int, optional
+ Downsampling value to use during fusion, by default `1`.
+ interpolation : str, optional
+ Interpolation to use during fusion, by default `[Linear Interpolation]`.
+ pixel_type : str, optional
+ Pixel type to use during fusion, by default `[16-bit unsigned integer]`.
+ export : str, optional
+ Format of the output fused image, by default `HDF5`.
+ fusion_type : str, optional
+ Type of fusion algorithm to use, by default `Avg, Blending`.
+ compression : str, optional
+ Compression method to use when exporting as HDF5, by default `Zstandard`.
+ """
+
+ if processing_opts is None:
+ processing_opts = ProcessingOptions()
+
+ file_info = pathtools.parse_path(project_path)
+ if not result_path:
+ result_path = file_info["path"]
+ # if not os.path.exists(result_path):
+ # os.makedirs(result_path)
+
+ options = (
+ "select=["
+ + project_path
+ + "] "
+ + processing_opts.fmt_acitt_options()
+ + "bounding_box=[All Views] "
+ + "downsampling="
+ + str(downsampling)
+ + " "
+ + "interpolation="
+ + interpolation
+ + " "
+ + "fusion_type=["
+ + fusion_type
+ + "] "
+ + "pixel_type="
+ + pixel_type
+ + " "
+ + "interest_points_for_non_rigid=[-= Disable Non-Rigid =-] "
+ + "preserve_original "
+ + "produce=[Each timepoint & channel] "
+ )
+
+ if export == "TIFF":
+ options = (
+ options
+ + "fused_image=[Save as (compressed) TIFF stacks] "
+ + "define_input=[Auto-load from input data (values shown below)] "
+ + "output_file_directory=["
+ + result_path
+ + "/.] "
+ + "filename_addition=["
+ + file_info["basename"]
+ + "]"
+ )
+ elif export == "HDF5":
+ h5_fused_path = pathtools.join2(
+ result_path, file_info["basename"] + "_fused.h5"
+ )
+ xml_fused_path = pathtools.join2(
+ result_path, file_info["basename"] + "_fused.xml"
+ )
+
+ options = (
+ options
+ + "fused_image=[OME-ZARR/N5/HDF5 export using N5-API] "
+ + "define_input=[Auto-load from input data (values shown below)] "
+ + "export=HDF5 "
+ + "compression="
+ + compression
+ + " "
+ + "create "
+ + "create_0 "
+ + "hdf5_file=["
+ + h5_fused_path
+ + "] "
+ + "xml_output_file=["
+ + xml_fused_path
+ + "] "
+ + "show_advanced_block_size_options "
+ + "block_size_x=128 "
+ + "block_size_y=128 "
+ + "block_size_z=64 "
+ + "block_size_factor_x=1 "
+ + "block_size_factor_y=1 "
+ + "block_size_factor_z=1"
+ )
+
+ log.debug("Dataset fusion options: <%s>", options)
+ IJ.run("Image Fusion", str(options))
+
+
+def fuse_dataset_bdvp(
+ project_path,
+ command,
+ processing_opts=None,
+ result_path=None,
+ compression="LZW",
+):
+ """Export a BigDataViewer project using the BIOP Kheops exporter.
+
+ Use the BIOP Kheops exporter to convert a BigDataViewer project into
+ OME-TIFF files, with optional compression.
+
+ Parameters
+ ----------
+ project_path : str
+ Full path to the BigDataViewer XML project file.
+ command : CommandService
+ The Scijava CommandService instance to execute the export command.
+ processing_opts : ProcessingOptions, optional
+ Options defining which parts of the dataset to process. If None, default
+ processing options will be used (process all angles, channels, etc.).
+ result_path : str, optional
+ Path where to store the exported files. If None, files will be saved in
+ the same directory as the input project.
+ compression : str, optional
+ Compression method to use for the TIFF files. Default is "LZW".
+
+ Notes
+ -----
+ This function requires the PTBIOP update site to be enabled in Fiji/ImageJ.
+ """
+ if processing_opts is None:
+ processing_opts = ProcessingOptions()
+
+ file_info = pathtools.parse_path(project_path)
+ if not result_path:
+ result_path = file_info["path"]
+ # if not os.path.exists(result_path):
+ # os.makedirs(result_path)
+
+ command.run(
+ FuseBigStitcherDatasetIntoOMETiffCommand,
+ True,
+ "image",
+ project_path,
+ "output_dir",
+ result_path,
+ "compression",
+ compression,
+ "subset_channels",
+ "",
+ "subset_slices",
+ "",
+ "subset_frames",
+ "",
+ "compress_temp_files",
+ False,
+ )
diff --git a/src/imcflibs/imagej/bioformats.py b/src/imcflibs/imagej/bioformats.py
index 037c8ff7..5ef19a6b 100644
--- a/src/imcflibs/imagej/bioformats.py
+++ b/src/imcflibs/imagej/bioformats.py
@@ -14,11 +14,160 @@
from ij import IJ
-from ._loci import ImporterOptions # pdoc: skip
-from ._loci import BF, ImageReader, Memoizer
-
-from ..pathtools import gen_name_from_orig
from ..log import LOG as log
+from ..pathtools import gen_name_from_orig
+from ._loci import (
+ BF,
+ DynamicMetadataOptions,
+ ImageReader,
+ ImporterOptions,
+ Memoizer,
+ MetadataTools,
+ ZeissCZIReader,
+)
+
+
+class ImageMetadata(object):
+ """A class to store metadata information from an image.
+
+ This class stores metadata information extracted from an image file, such as image dimensions,
+ pixel dimensions, and calibration units. It provides a method to convert the attributes to a
+ dictionary and a string representation of the object.
+
+ Attributes
+ ----------
+ unit_width : float or None
+ Physical width of a pixel in the given unit.
+ unit_height : float or None
+ Physical height of a pixel in the given unit.
+ unit_depth : float or None
+ Physical depth of a voxel in the given unit.
+ pixel_width : int or None
+ Width of the image in pixels.
+ pixel_height : int or None
+ Height of the image in pixels.
+ slice_count : int or None
+ Number of Z-slices in the image.
+ channel_count : int or None
+ Number of channels in the image.
+ timepoints_count : int or None
+ Number of timepoints in the image.
+ dimension_order : str or None
+ Order of dimensions (e.g., "XYZCT").
+ pixel_type : str or None
+ Data type of the pixel values (e.g., "uint16").
+
+ Examples
+ --------
+ >>> metadata = ImageMetadata(
+ ... unit_width=0.1,
+ ... unit_height=0.1
+ ... )
+ >>> print(metadata)
+
+ """
+
+ def __init__(
+ self,
+ unit_width=None,
+ unit_height=None,
+ unit_depth=None,
+ unit=None,
+ pixel_width=None,
+ pixel_height=None,
+ slice_count=None,
+ channel_count=None,
+ timepoints_count=None,
+ dimension_order=None,
+ pixel_type=None,
+ ):
+ self.unit_width = unit_width
+ self.unit_height = unit_height
+ self.unit_depth = unit_depth
+ self.unit = unit
+ self.pixel_width = pixel_width
+ self.pixel_height = pixel_height
+ self.slice_count = slice_count
+ self.channel_count = channel_count
+ self.timepoints_count = timepoints_count
+ self.dimension_order = dimension_order
+ self.pixel_type = pixel_type
+
+ def to_dict(self):
+ """Convert the object attributes to a dictionary.
+
+ Returns
+ -------
+ dict
+ A dictionary representation of the object attributes.
+ """
+ return self.__dict__
+
+
+class StageMetadata(object):
+ """A class to store stage coordinates and calibration metadata for a set of images.
+
+ Attributes
+ ----------
+ dimensions : int
+ Number of dimensions (2D or 3D).
+ stage_coordinates_x : list of float
+ Absolute stage x-coordinates.
+ stage_coordinates_y : list of float
+ Absolute stage y-coordinates.
+ stage_coordinates_z : list of float
+ Absolute stage z-coordinates.
+ relative_coordinates_x : list of float
+ Relative stage x-coordinates in pixels.
+ relative_coordinates_y : list of float
+ Relative stage y-coordinates in pixels.
+ relative_coordinates_z : list of float
+ Relative stage z-coordinates in pixels.
+ image_calibration : list of float
+ Calibration values for x, y, and z in unit/px.
+ calibration_unit : str
+ Unit used for image calibration.
+ image_dimensions_czt : list of int
+ Number of images in dimensions (channels, z-slices, timepoints).
+ series_names : list of str
+ Names of all series in the image files.
+ max_size : list of float
+ Maximum physical size (x/y/z) across all files.
+ """
+
+ def __init__(
+ self,
+ dimensions=2,
+ stage_coordinates_x=None,
+ stage_coordinates_y=None,
+ stage_coordinates_z=None,
+ relative_coordinates_x=None,
+ relative_coordinates_y=None,
+ relative_coordinates_z=None,
+ image_calibration=None,
+ calibration_unit="unknown",
+ image_dimensions_czt=None,
+ series_names=None,
+ max_size=None,
+ ):
+ self.dimensions = dimensions
+ self.stage_coordinates_x = stage_coordinates_x or []
+ self.stage_coordinates_y = stage_coordinates_y or []
+ self.stage_coordinates_z = stage_coordinates_z or []
+ self.relative_coordinates_x = relative_coordinates_x or []
+ self.relative_coordinates_y = relative_coordinates_y or []
+ self.relative_coordinates_z = relative_coordinates_z or []
+ self.image_calibration = image_calibration or [1.0, 1.0, 1.0]
+ self.calibration_unit = calibration_unit or "unknown"
+ self.image_dimensions_czt = image_dimensions_czt or [1, 1, 1]
+ self.series_names = series_names or []
+ self.max_size = max_size or [1.0, 1.0, 1.0]
+
+ def __repr__(self):
+ """Return a string representation of the object."""
+ return "".format(
+ ", ".join("{}={}".format(k, v) for k, v in self.__dict__.items())
+ )
def import_image(
@@ -85,7 +234,7 @@ def import_image(
Returns
-------
- ij.ImagePlus[]
+ list(ij.ImagePlus)
A list of ImagePlus objects resulting from the import.
"""
options = ImporterOptions()
@@ -134,7 +283,7 @@ def import_image(
def export(imp, filename, overwrite=False):
- """Simple wrapper to export an image to a given file.
+ """Export an ImagePlus object to a given file.
Parameters
----------
@@ -195,7 +344,7 @@ def export_using_orig_name(imp, path, orig_name, tag, suffix, overwrite=False):
Returns
-------
- out_file : str
+ str
The full name of the exported file.
"""
out_file = gen_name_from_orig(path, orig_name, tag, suffix)
@@ -203,29 +352,70 @@ def export_using_orig_name(imp, path, orig_name, tag, suffix, overwrite=False):
return out_file
-def get_series_count_from_ome_metadata(path_to_file):
- """Get the Bio-Formates series count from a file on disk.
+def get_series_info_from_ome_metadata(path_to_file, skip_labels=False):
+ """Get the Bio-Formats series information from a file on disk.
- Useful to access a specific image in a container format like .czi, .nd2, .lif...
+ Useful to access specific images in container formats like .czi, .nd2, .lif...
Parameters
----------
path_to_file : str
The full path to the image file.
+ skip_labels : bool, optional
+ If True, excludes label and macro images from the series count (default: False).
Returns
-------
- int
- The number of Bio-Formats series detected in the image file metadata.
+ tuple
+ A tuple containing:
+ - int: The number of Bio-Formats series detected (excluding labels if skip_labels=True)
+ - list or range: Series indices. If skip_labels=True, returns filtered list of indices,
+ otherwise returns range(series_count)
+
+ Examples
+ --------
+ >>> count, indices = get_series_info_from_ome_metadata("image.czi")
+ >>> count, indices = get_series_info_from_ome_metadata("image.nd2", skip_labels=True)
"""
- reader = ImageReader()
- ome_meta = MetadataTools.createOMEXMLMetadata()
- reader.setMetadataStore(ome_meta)
- reader.setId(path_to_file)
- series_count = reader.getSeriesCount()
- reader.close()
- return series_count
+ if not skip_labels:
+ reader = ImageReader()
+ reader.setFlattenedResolutions(False)
+ ome_meta = MetadataTools.createOMEXMLMetadata()
+ reader.setMetadataStore(ome_meta)
+ reader.setId(path_to_file)
+ series_count = reader.getSeriesCount()
+
+ reader.close()
+ return series_count, range(series_count)
+
+ else:
+ reader = ImageReader()
+ # reader.setFlattenedResolutions(True)
+ ome_meta = MetadataTools.createOMEXMLMetadata()
+ reader.setMetadataStore(ome_meta)
+ reader.setId(path_to_file)
+ series_count = reader.getSeriesCount()
+
+ series_ids = []
+ series_names = []
+ x = 0
+ y = 0
+ for i in range(series_count):
+ reader.setSeries(i)
+
+ if reader.getSizeX() > x and reader.getSizeY() > y:
+ name = ome_meta.getImageName(i)
+
+ if name not in ["label image", "macro image"]:
+ series_ids.append(i)
+ series_names.append(name)
+
+ x = reader.getSizeX()
+ y = reader.getSizeY()
+
+ print(series_names)
+ return len(series_ids), series_ids
def write_bf_memoryfile(path_to_file):
@@ -236,9 +426,204 @@ def write_bf_memoryfile(path_to_file):
Parameters
----------
- path_to_file : string
+ path_to_file : str
The full path to the image file.
"""
reader = Memoizer(ImageReader())
reader.setId(path_to_file)
reader.close()
+
+
+def get_metadata_from_file(path_to_image):
+ """Extract metadata from an image file using Bio-Formats.
+
+ This function reads an image file using the Bio-Formats library and extracts
+ various metadata properties including physical dimensions, pixel dimensions,
+ and other image characteristics.
+
+ Parameters
+ ----------
+ path_to_image : str or pathlib.Path
+ Path to the image file from which metadata should be extracted.
+
+ Returns
+ -------
+ ImageMetadata
+ An instance of `imcflibs.imagej.bioformats.ImageMetadata` containing the extracted metadata.
+ """
+
+ reader = ImageReader()
+ ome_meta = MetadataTools.createOMEXMLMetadata()
+ reader.setMetadataStore(ome_meta)
+ reader.setId(str(path_to_image))
+
+ metadata = ImageMetadata(
+ unit_width=ome_meta.getPixelsPhysicalSizeX(0).value(),
+ unit_height=ome_meta.getPixelsPhysicalSizeY(0).value(),
+ unit_depth=ome_meta.getPixelsPhysicalSizeZ(0).value(),
+ unit=ome_meta.getPixelsPhysicalSizeX(0).unit().symbol,
+ pixel_width=ome_meta.getPixelsSizeX(0),
+ pixel_height=ome_meta.getPixelsSizeY(0),
+ slice_count=ome_meta.getPixelsSizeZ(0),
+ channel_count=ome_meta.getPixelsSizeC(0),
+ timepoints_count=ome_meta.getPixelsSizeT(0),
+ dimension_order=ome_meta.getPixelsDimensionOrder(0),
+ pixel_type=ome_meta.getPixelsType(0),
+ )
+ reader.close()
+
+ return metadata
+
+
+def get_stage_coords(filenames):
+ """Get stage coordinates and calibration for a given list of images.
+
+ Parameters
+ ----------
+ filenames : list of str
+ List of image filepaths.
+
+ Returns
+ -------
+ StageMetadata
+ An object containing extracted stage metadata.
+ """
+ # Initialize lists to store stage coordinates and series names
+ stage_coordinates_x = []
+ stage_coordinates_y = []
+ stage_coordinates_z = []
+ series_names = []
+
+ # Intiialize default values
+ dimensions = 2
+ image_calibration = []
+ calibration_unit = "unknown"
+ image_dimensions_czt = []
+ max_size = []
+
+ # Initialize max_size variables to track the maximums
+ max_phys_size_x = 0.0
+ max_phys_size_y = 0.0
+ max_phys_size_z = 0.0
+
+ for counter, image in enumerate(filenames):
+ reader = ImageReader()
+ reader.setFlattenedResolutions(False)
+ ome_meta = MetadataTools.createOMEXMLMetadata()
+ reader.setMetadataStore(ome_meta)
+ reader.setId(str(image))
+ series_count = reader.getSeriesCount()
+
+ # Process only the first image to get values not dependent on series
+ if counter == 0:
+ frame_size_x = reader.getSizeX()
+ frame_size_y = reader.getSizeY()
+ frame_size_z = reader.getSizeZ()
+ frame_size_c = reader.getSizeC()
+ frame_size_t = reader.getSizeT()
+
+ dimensions = 2 if frame_size_z == 1 else 3
+
+ # Retrieve physical size coordinates safely
+ phys_size_x = getattr(
+ ome_meta.getPixelsPhysicalSizeX(0), "value", lambda: 1.0
+ )()
+ phys_size_y = getattr(
+ ome_meta.getPixelsPhysicalSizeY(0), "value", lambda: 1.0
+ )()
+ phys_size_z = getattr(
+ ome_meta.getPixelsPhysicalSizeZ(0), "value", lambda: None
+ )()
+
+ z_interval = phys_size_z if phys_size_z is not None else 1.0
+
+ # Handle missing Z calibration
+ if phys_size_z is None and frame_size_z > 1:
+ first_plane = getattr(
+ ome_meta.getPlanePositionZ(0, 0), "value", lambda: 0
+ )()
+ next_plane_index = frame_size_c + frame_size_t - 1
+ second_plane = getattr(
+ ome_meta.getPlanePositionZ(0, next_plane_index), "value", lambda: 0
+ )()
+ z_interval = abs(first_plane - second_plane)
+
+ image_calibration = [phys_size_x, phys_size_y, z_interval]
+ calibration_unit = (
+ getattr(
+ ome_meta.getPixelsPhysicalSizeX(0).unit(),
+ "getSymbol",
+ lambda: "unknown",
+ )()
+ if phys_size_x
+ else "unknown"
+ )
+ image_dimensions_czt = [frame_size_c, frame_size_z, frame_size_t]
+
+ reader.close()
+
+ for series in range(series_count):
+ if ome_meta.getImageName(series) == "macro image":
+ continue
+
+ if series_count > 1 and not str(image).endswith(".vsi"):
+ series_names.append(ome_meta.getImageName(series))
+ else:
+ series_names.append(str(image))
+
+ current_position_x = getattr(
+ ome_meta.getPlanePositionX(series, 0), "value", lambda: 0
+ )()
+ current_position_y = getattr(
+ ome_meta.getPlanePositionY(series, 0), "value", lambda: 0
+ )()
+ current_position_z = getattr(
+ ome_meta.getPlanePositionZ(series, 0), "value", lambda: 1.0
+ )()
+
+ max_phys_size_x = max(
+ max_phys_size_x, ome_meta.getPixelsPhysicalSizeX(series).value()
+ )
+ max_phys_size_y = max(
+ max_phys_size_y, ome_meta.getPixelsPhysicalSizeY(series).value()
+ )
+ max_phys_size_z = max(
+ max_phys_size_z,
+ ome_meta.getPixelsPhysicalSizeZ(series).value()
+ if phys_size_z
+ else z_interval,
+ )
+
+ stage_coordinates_x.append(current_position_x)
+ stage_coordinates_y.append(current_position_y)
+ stage_coordinates_z.append(current_position_z)
+
+ max_size = [max_phys_size_x, max_phys_size_y, max_phys_size_z]
+
+ relative_coordinates_x_px = [
+ (stage_coordinates_x[i] - stage_coordinates_x[0]) / (phys_size_x or 1.0)
+ for i in range(len(stage_coordinates_x))
+ ]
+ relative_coordinates_y_px = [
+ (stage_coordinates_y[i] - stage_coordinates_y[0]) / (phys_size_y or 1.0)
+ for i in range(len(stage_coordinates_y))
+ ]
+ relative_coordinates_z_px = [
+ (stage_coordinates_z[i] - stage_coordinates_z[0]) / (z_interval or 1.0)
+ for i in range(len(stage_coordinates_z))
+ ]
+
+ return StageMetadata(
+ dimensions=dimensions,
+ stage_coordinates_x=stage_coordinates_x,
+ stage_coordinates_y=stage_coordinates_y,
+ stage_coordinates_z=stage_coordinates_z,
+ relative_coordinates_x=relative_coordinates_x_px,
+ relative_coordinates_y=relative_coordinates_y_px,
+ relative_coordinates_z=relative_coordinates_z_px,
+ image_calibration=image_calibration,
+ calibration_unit=calibration_unit,
+ image_dimensions_czt=image_dimensions_czt,
+ series_names=series_names,
+ max_size=max_size,
+ )
diff --git a/src/imcflibs/imagej/gpu.py b/src/imcflibs/imagej/gpu.py
index b3b1f04c..a5711b4d 100644
--- a/src/imcflibs/imagej/gpu.py
+++ b/src/imcflibs/imagej/gpu.py
@@ -62,7 +62,7 @@ def dilate_labels(clij2_instance, label_image, dilation_radius, channel=None):
Instance of CLIJ to communicate with the GPU.
label_image : ij.ImagePlus
Label Image to be dilated.
- erosion_radius : int
+ dilation_radius : int
Radius for dilation.
channel : int, optional
Specific channel to apply dilation.
diff --git a/src/imcflibs/imagej/labelimage.py b/src/imcflibs/imagej/labelimage.py
index 5326f298..15481e14 100644
--- a/src/imcflibs/imagej/labelimage.py
+++ b/src/imcflibs/imagej/labelimage.py
@@ -1,8 +1,10 @@
+# -*- coding: utf-8 -*-
+
"""Functions to work with ImageJ label images."""
-from ij import IJ, ImagePlus, Prefs
+from ij import IJ, ImagePlus, ImageStack, Prefs
from ij.plugin import Duplicator, ImageCalculator
-from ij.plugin.filter import ImageProcessor, ThresholdToSelection
+from ij.plugin.filter import ThresholdToSelection
from ij.process import FloatProcessor, ImageProcessor
from inra.ijpb.label import LabelImages as li
from inra.ijpb.plugins import AnalyzeRegions
@@ -65,12 +67,14 @@ def label_image_to_roi_list(label_image, low_thresh=None):
return roi_list, max_value
-def relate_label_images(label_image_ref, label_image_to_relate):
+def cookie_cut_labels(label_image_ref, label_image_to_relate):
"""Relate label images, giving the same label to objects belonging together.
β NOTE: Won't work with touching labels β
- FIXME: explain with an example what the function is doing!
+ Given two label images, this function will create a new label image
+ with the same labels as the reference image, but with the objects
+ of the second image.
Parameters
----------
@@ -90,7 +94,58 @@ def relate_label_images(label_image_ref, label_image_to_relate):
Prefs.blackBackground = True
IJ.run(imp_dup, "Convert to Mask", "")
IJ.run(imp_dup, "Divide...", "value=255")
- return ImageCalculator.run(label_image_ref, imp_dup, "Multimage_processorly create")
+ return ImageCalculator.run(label_image_ref, imp_dup, "Multiply create")
+
+
+def relate_label_images(outer_label_imp, inner_label_imp):
+ """Relate label images, giving the same label to objects belonging together.
+
+ Given two label images, this function will create a new label image with the
+ same labels as the reference image, but with the objects of the second image
+ using the 3D Association plugin from the 3DImageJSuite.
+
+ Parameters
+ ----------
+ outer_label_imp : ij.ImagePlus
+ The outer label image.
+ inner_label_imp : ij.ImagePlus
+ The inner label image.
+
+ Returns
+ -------
+ related_inner_imp : ij.ImagePlus
+ The related inner label image.
+
+ Notes
+ -----
+ Unlike `cookie_cut_labels`, this should work with touching labels by using
+ MereoTopology algorithms.
+ """
+
+ outer_label_imp.show()
+ inner_label_imp.show()
+
+ outer_title = outer_label_imp.getTitle()
+ inner_title = inner_label_imp.getTitle()
+
+ IJ.run(
+ "3D Association",
+ "image_a="
+ + outer_title
+ + " "
+ + "image_b="
+ + inner_title
+ + " "
+ + "method=Colocalisation min=1 max=0.000",
+ )
+
+ related_inner_imp = IJ.getImage()
+
+ outer_label_imp.hide()
+ inner_label_imp.hide()
+ related_inner_imp.hide()
+
+ return related_inner_imp
def filter_objects(label_image, table, string, min_val, max_val):
@@ -148,13 +203,12 @@ def binary_to_label(imp, title, min_thresh=1, min_vol=None, max_vol=None):
Parameters
----------
- imp : ImagePlus
+ imp : ij.ImagePlus
Binary 3D stack or 2D image.
title : str
Title of the new image.
min_thresh : int, optional
- Threshold to do segmentation, also allows for label filtering, by
- default 1.
+ Threshold to do segmentation, also allows for label filtering, by default 1.
min_vol : float, optional
Volume under which to exclude objects, by default None.
max_vol : float, optional
@@ -162,21 +216,78 @@ def binary_to_label(imp, title, min_thresh=1, min_vol=None, max_vol=None):
Returns
-------
- ImagePlus
+ ij.ImagePlus
Segmented labeled ImagePlus.
"""
+ # Get the calibration of the input ImagePlus
cal = imp.getCalibration()
+
+ # Wrap the ImagePlus in an ImageHandler
img = ImageHandler.wrap(imp)
+
+ # Threshold the image using the specified threshold value
img = img.threshold(min_thresh, False, False)
+ # Create an ImageLabeller instance
labeler = ImageLabeller()
+
+ # Set the minimum size for labeling if provided
if min_vol:
- labeler.setMinSize(min_vol)
+ labeler.setMinSizeCalibrated(min_vol)
+
+ # Set the maximum size for labeling if provided
if max_vol:
- labeler.setMaxSize(max_vol)
+ labeler.setMinSizeCalibrated(max_vol)
+ # Get the labeled image
seg = labeler.getLabels(img)
+
+ # Set the scale of the labeled image
seg.setScale(cal.pixelWidth, cal.pixelDepth, cal.getUnits())
+
+ # Set the title of the labeled image
seg.setTitle(title)
+ # Return the segmented labeled ImagePlus
return seg.getImagePlus()
+
+
+def dilate_labels_2d(imp, dilation_radius):
+ """Dilate each label in the given ImagePlus using the specified dilation radius.
+
+ This method will use a 2D dilation to be applied to each slice of the ImagePlus
+ and return a new stack.
+
+ Parameters
+ ----------
+ imp : ij.ImagePlus
+ Input ImagePlus with the labels to dilate
+ dilation_radius : int
+ Number of pixels to dilate each label
+
+ Returns
+ -------
+ ij.ImagePlus
+ New ImagePlus with the dilated labels
+ """
+
+ # Create a list of the dilated labels
+ dilated_labels_list = []
+
+ # Iterate over each slice of the input ImagePlus
+ for i in range(1, imp.getNSlices() + 1):
+ # Duplicate the current slice
+ current_imp = Duplicator().run(imp, 1, 1, i, imp.getNSlices(), 1, 1)
+
+ # Perform a dilation of the labels in the current slice
+ dilated_labels_imp = li.dilateLabels(current_imp, dilation_radius)
+
+ # Append the dilated labels to the list
+ dilated_labels_list.append(dilated_labels_imp)
+
+ # Create a new ImagePlus with the dilated labels
+ dilated_labels_imp = ImagePlus(
+ "Dilated labels", ImageStack().create(dilated_labels_list)
+ )
+
+ return dilated_labels_imp
diff --git a/src/imcflibs/imagej/misc.py b/src/imcflibs/imagej/misc.py
index 53ed01ae..edd4509e 100644
--- a/src/imcflibs/imagej/misc.py
+++ b/src/imcflibs/imagej/misc.py
@@ -1,48 +1,80 @@
"""Miscellaneous ImageJ related functions, mostly convenience wrappers."""
+import csv
+import glob
+import os
+import smtplib
+import subprocess
import sys
import time
from ij import IJ # pylint: disable-msg=import-error
+from ij.plugin import Duplicator, ImageCalculator, StackWriter
-from . import prefs
+from .. import pathtools
from ..log import LOG as log
+from . import bioformats as bf
+from . import prefs
def show_status(msg):
- """Wrapper to update the ImageJ status bar and the log simultaneously."""
+ """Update the ImageJ status bar and issue a log message.
+
+ Parameters
+ ----------
+ msg : str
+ The message to display in the ImageJ status bar and log.
+ """
log.info(msg)
IJ.showStatus(msg)
def show_progress(cur, final):
- """Wrapper to update the progress bar and issue a log message."""
- # ij.IJ.showProgress is adding 1 to the value given as first parameter...
+ """Update ImageJ's progress bar and print the current progress to the log.
+
+ Parameters
+ ----------
+ cur : int
+ Current progress value.
+ final : int
+ Total value representing 100% completion.
+
+ Notes
+ -----
+ `ij.IJ.showProgress` internally increments the given `cur` value by 1.
+ """
log.info("Progress: %s / %s (%s)", cur + 1, final, (1.0 + cur) / final)
IJ.showProgress(cur, final)
def error_exit(msg):
- """Convenience wrapper to log an error and exit then."""
+ """Log an error message and exit.
+
+ Parameters
+ ----------
+ msg : str
+ The error message to log.
+ """
log.error(msg)
sys.exit(msg)
def elapsed_time_since(start, end=None):
- """Generate a string with the time elapsed between the two timepoints.
+ """Generate a string with the time elapsed between two timepoints.
Parameters
----------
- start : time.time
- Start time.
- end : time.time, optional
- End time. If skipped the current time will be used.
+ start : float
+ The start time, typically obtained via `time.time()`.
+ end : float, optional
+ The end time. If not given, the current time is used.
Returns
-------
str
- """
+ The elapsed time formatted as `HH:MM:SS.ss`.
+ """
if not end:
end = time.time()
@@ -57,53 +89,77 @@ def percentage(part, whole):
Parameters
----------
part : float
- Part.
+ The portion value of a total.
whole : float
- Complete size.
+ The total value.
Returns
-------
float
+ The percentage value.
"""
return 100 * float(part) / float(whole)
-def calculate_mean_and_stdv(float_values):
+def calculate_mean_and_stdv(values_list, round_decimals=0):
"""Calculate mean and standard deviation from a list of floats.
Parameters
----------
- float_values : list(float)
- List containing float numbers.
+ values_list : list of int,float
+ List containing numbers.
+ round_decimals : int, optional
+ Rounding decimal to use for the result, by default 0
Returns
-------
- [float, float]
- Mean (1st item) and standard deviation (2nd item) of the list.
+ tuple of (float, float)
+ Mean and standard deviation of the input list.
+
+ Notes
+ -----
+ Returns (0, 0) when:
+ - The input list is empty.
+ - After filtering out None values, no elements remain.
"""
- mean = sum(float_values) / len(float_values)
- tot = 0.0
- for x in float_values:
- tot = tot + (x - mean) ** 2
- return [mean, (tot / (len(float_values))) ** 0.5]
+ filtered_list = [x for x in values_list if x is not None]
-def find_focus(imp):
- """Find the slice of a stack that seems to bet the best focused one.
+ if not filtered_list:
+ return 0, 0
- NOTE: currently only single-channel stacks are supported.
+ mean = round(sum(filtered_list) / len(filtered_list), round_decimals)
+ variance = sum((x - mean) ** 2 for x in filtered_list) / len(filtered_list)
+ std_dev = round(variance**0.5, round_decimals)
- FIXME: explain what the function is actually doing, i.e. how does it decide
- what "the best focused one" is?
+ return mean, std_dev
+
+
+def find_focus(imp):
+ """Find the slice of a stack that is the best focused one.
+
+ First, calculate the variance of the pixel values in each slice. The slice
+ with the highest variance is considered the best focused as this typically
+ indicates more contrast and sharpness.
Parameters
----------
imp : ij.ImagePlus
- A single-channel ImagePlus.
+ A single-channel ImagePlus stack.
Returns
-------
int
+ The slice number of the best focused slice.
+
+ Raises
+ ------
+ SystemExit
+ If the image has more than one channel.
+
+ Notes
+ -----
+ Currently only single-channel stacks are supported.
"""
imp_dimensions = imp.getDimensions()
@@ -113,7 +169,7 @@ def find_focus(imp):
if imp_dimensions[2] != 1:
sys.exit("Image has more than one channel, please reduce dimensionality")
- # Loop through each time points
+ # Loop through each time point
for plane in range(1, imp_dimensions[4] + 1):
focused_slice = 0
norm_var = 0
@@ -136,6 +192,92 @@ def find_focus(imp):
return focused_slice
+def send_notification_email(
+ job_name, recipient, filename, total_execution_time, subject="", message=""
+):
+ """Send an email notification with optional details of the processed job.
+
+ Retrieve the sender email and SMTP server settings from ImageJ's preferences
+ and use them to send an email notification with job details.
+
+ Parameters
+ ----------
+ job_name : string
+ Job name to display in the email.
+ recipient : string
+ Recipient's email address.
+ filename : string
+ The name of the file to be passed in the email.
+ total_execution_time : str
+ The time it took to process the file in the format [HH:MM:SS:ss].
+ subject : string, optional
+ Subject of the email, by default says job finished.
+ message : string, optional
+ Message to be included in the email, by default says job processed.
+
+ Notes
+ -----
+ - The function requires two preferences to be set in `~/.imagej/IJ_Prefs.txt`:
+ - `.imcf.sender_email`: the sender's email address
+ - `.imcf.smtpserver`: the SMTP server address
+ - If these preferences are not set or if required parameters are missing,
+ the function logs a message and exits without sending an email.
+ - In case of an SMTP error, the function logs a warning.
+ """
+
+ # Retrieve sender email and SMTP server from preferences
+ # NOTE: the leading dot "." has to be omitted in the `Prefs.get()` call,
+ # despite being present in the `IJ_Prefs.txt` file!
+ sender = prefs.Prefs.get("imcf.sender_email", "").strip()
+ server = prefs.Prefs.get("imcf.smtpserver", "").strip()
+
+ # Ensure the sender and server are configured from Prefs
+ if not sender:
+ log.info("[.imcf.sender_email] is not configured in '~/.imagej/IJ_Prefs.txt'.")
+ return
+ if not server:
+ log.info("[.imcf.smtpserver] is not configured in '~/.imagej/IJ_Prefs.txt'.")
+ return
+
+ log.debug("Using SMTP server [%s].", server)
+
+ # Ensure the recipient is provided
+ if not recipient.strip():
+ log.info("Recipient email is required, not sending email notification.")
+ return
+
+ # Form the email subject and body
+ if subject == "":
+ subject = "Your {0} job has finished".format(job_name)
+ else:
+ subject = subject
+
+ if message == "":
+ body = (
+ "Dear recipient,\n\n"
+ "This is an automated message.\n"
+ "Your workflow '{0}' has been processed "
+ "({1} [HH:MM:SS:ss]).\n\n"
+ "Kind regards.\n"
+ ).format(filename, total_execution_time)
+ else:
+ body = message
+
+ # Form the complete message
+ message = ("From: {0}\nTo: {1}\nSubject: {2}\n\n{3}").format(
+ sender, recipient, subject, body
+ )
+
+ # Try sending the email, print error message if it wasn't possible
+ try:
+ smtpObj = smtplib.SMTP(server)
+ smtpObj.sendmail(sender, recipient, message)
+ log.debug("Successfully sent email to <%s>.", recipient)
+ except smtplib.SMTPException as err:
+ log.warning("Error: Unable to send email: %s", err)
+ return
+
+
def progressbar(progress, total, line_number, prefix=""):
"""Progress bar for the IJ log window.
@@ -151,7 +293,7 @@ def progressbar(progress, total, line_number, prefix=""):
line_number : int
Number of the line to be updated.
prefix : str, optional
- Text to use before the progress bar, by default ''.
+ Text to use before the progress bar, by default an empty string.
"""
size = 20
@@ -170,20 +312,27 @@ def progressbar(progress, total, line_number, prefix=""):
def timed_log(message, as_string=False):
- """Print a message to the ImageJ log window with a timestamp added.
+ """Print a message to the ImageJ log window, prefixed with a timestamp.
+
+ If `as_string` is set to True, nothgin will be printed to the log window,
+ instead the formatted log message will be returned as a string.
Parameters
----------
message : str
Message to print
+ as_string : bool, optional
+ Flag to request the formatted string to be returned instead of printing
+ it to the log. By default False.
"""
+ formatted = time.strftime("%H:%M:%S", time.localtime()) + ": " + message + " "
if as_string:
- return time.strftime("%H:%M:%S", time.localtime()) + ": " + message + " "
- IJ.log(time.strftime("%H:%M:%S", time.localtime()) + ": " + message + " ")
+ return formatted
+ IJ.log(formatted)
def get_free_memory():
- """Get the free memory thats available to ImageJ.
+ """Get the free memory that is available to ImageJ.
Returns
-------
@@ -200,13 +349,9 @@ def get_free_memory():
def setup_clean_ij_environment(rm=None, rt=None): # pylint: disable-msg=unused-argument
"""Set up a clean and defined ImageJ environment.
- Clean active results table, roi manager and log, close any open image.
-
- "Fresh Start" is described in the ImageJ release notes [1] following a
- suggestion by Robert Haase in the Image.sc Forum [2].
-
- [1]: https://imagej.nih.gov/ij/notes.html
- [2]: https://forum.image.sc/t/fresh-start-macro-command-in-imagej-fiji/43102
+ This funtion clears the active results table, the ROI manager, and the log.
+ Additionally, it closes all open images and resets the ImageJ options,
+ performing a [*Fresh Start*][fresh_start].
Parameters
----------
@@ -214,6 +359,14 @@ def setup_clean_ij_environment(rm=None, rt=None): # pylint: disable-msg=unused-
Will be ignored (kept for keeping API compatibility).
rt : ResultsTable, optional
Will be ignored (kept for keeping API compatibility).
+
+ Notes
+ -----
+ "Fresh Start" is described in the [ImageJ release notes][ij_relnotes],
+ following a [suggestion by Robert Haase][fresh_start].
+
+ [ij_relnotes]: https://imagej.nih.gov/ij/notes.html
+ [fresh_start]: https://forum.image.sc/t/43102
"""
IJ.run("Fresh Start", "")
@@ -221,4 +374,330 @@ def setup_clean_ij_environment(rm=None, rt=None): # pylint: disable-msg=unused-
prefs.fix_ij_options()
- return
+
+def sanitize_image_title(imp):
+ """Remove special chars and various suffixes from the title of an ImagePlus.
+
+ Parameters
+ ----------
+ imp : ImagePlus
+ The ImagePlus to be renamed.
+
+ Notes
+ -----
+ The function removes the full path of the image file (if present), retaining
+ only the base filename using `os.path.basename()`.
+ """
+ # sometimes (unclear when) the title contains the full path, therefore we
+ # simply call `os.path.basename()` on it to remove all up to the last "/":
+ image_title = os.path.basename(imp.getTitle())
+ image_title = image_title.replace(".czi", "")
+ image_title = image_title.replace(" ", "_")
+ image_title = image_title.replace("_-_", "")
+ image_title = image_title.replace("__", "_")
+ image_title = image_title.replace("#", "Series")
+
+ imp.setTitle(image_title)
+
+
+def subtract_images(imp1, imp2):
+ """Subtract one image from the other (imp1 - imp2).
+
+ Parameters
+ ----------
+ imp1: ij.ImagePlus
+ The ImagePlus that is to be subtracted from.
+ imp2: ij.ImagePlus
+ The ImagePlus that is to be subtracted.
+
+ Returns
+ -------
+ ij.ImagePlus
+ The ImagePlus resulting from the subtraction.
+ """
+ ic = ImageCalculator()
+ subtracted = ic.run("Subtract create", imp1, imp2)
+
+ return subtracted
+
+
+def close_images(list_of_imps):
+ """Close all open ImagePlus objects given in a list.
+
+ Parameters
+ ----------
+ list_of_imps: list(ij.ImagePlus)
+ A list of open ImagePlus objects.
+ """
+ for imp in list_of_imps:
+ imp.changes = False
+ imp.close()
+
+
+def get_threshold_value_from_method(imp, method, ops):
+ """Get the value of a selected AutoThreshold method for the given ImagePlus.
+
+ This is useful to figure out which threshold value will be calculated by the
+ selected method for the given stack *without* actually having to apply it.
+
+ Parameters
+ ----------
+ imp : ij.ImagePlus
+ The image from which to get the threshold value.
+ method : {'huang', 'ij1', 'intermodes', 'isoData', 'li', 'maxEntropy',
+ 'maxLikelihood', 'mean', 'minError', 'minimum', 'moments', 'otsu',
+ 'percentile', 'renyiEntropy', 'rosin', 'shanbhag', 'triangle', 'yen'}
+ The AutoThreshold method to use.
+ ops: ops.OpService
+ The ImageJ Ops service instance, usually retrieved through a _Script
+ Parameter_ at the top of the script, as follows:
+ ```
+ #@ OpService ops
+ ```
+
+ Returns
+ -------
+ int
+ The threshold value chosen by the selected method.
+ """
+ histogram = ops.run("image.histogram", imp)
+ threshold_value = ops.run("threshold.%s" % method, histogram)
+ threshold_value = int(round(threshold_value.get()))
+
+ return threshold_value
+
+
+def write_ordereddict_to_csv(out_file, content):
+ """Write data from a list of OrderedDicts to a CSV file.
+
+ When performing measurements in an analysis that is e.g. looping over
+ multiple files, it's useful to keep the results in `OrderedDict` objects,
+ e.g. one per analyzed file / dataset. This function can be used to create a
+ CSV file (or append to an existing one) from a list of `OrderedDict`s. The
+ structure inside the dicts is entirely up to the calling code (i.e. it's not
+ related to ImageJ's *Results* window or such), the only requirement is
+ type-consistency among all the `OrderedDict`s provided to the function.
+
+ Parameters
+ ----------
+ out_file : str
+ Path to the output CSV file.
+ content : list of OrderedDict
+ List of OrderedDict objects representing the data rows to be written.
+ All dictionaries must have the same keys.
+
+ Notes
+ -----
+ - The CSV file will use the semicolon charachter (`;`) as delimiter.
+ - When appending to an existing file, the column structure has to match. No
+ sanity checking is being done on this by the function!
+ - The output file is opened in binary mode for compatibility.
+
+ Examples
+ --------
+ >>> from collections import OrderedDict
+ >>> results = [
+ ... OrderedDict([('id', 1), ('name', 'Sample A'), ('value', 42.5)]),
+ ... OrderedDict([('id', 2), ('name', 'Sample B'), ('value', 37.2)])
+ ... ]
+ >>> write_ordereddict_to_csv('results.csv', results)
+
+ The resulting CSV file will have the following content:
+
+ id;name;value
+ 1;Sample A;42.5
+ 2;Sample B;37.2
+ """
+
+ # Check if the output file exists
+ if not os.path.exists(out_file):
+ # If the file does not exist, create it and write the header
+ with open(out_file, "wb") as f:
+ dict_writer = csv.DictWriter(f, content[0].keys(), delimiter=";")
+ dict_writer.writeheader()
+ dict_writer.writerows(content)
+ else:
+ # If the file exists, append the results
+ with open(out_file, "ab") as f:
+ dict_writer = csv.DictWriter(f, content[0].keys(), delimiter=";")
+ dict_writer.writerows(content)
+
+
+def save_image_in_format(imp, format, out_dir, series, pad_number, split_channels):
+ """Save an ImagePlus object in the specified format.
+
+ This function provides flexible options for saving ImageJ images in various
+ formats with customizable naming conventions. It supports different
+ Bio-Formats compatible formats as well as ImageJ-native formats, and can
+ handle multi-channel images by either saving them as a single file or
+ splitting channels into separate files.
+
+ The function automatically creates necessary directories and uses consistent
+ naming patterns with series numbers. For split channels, separate
+ subdirectories are created for each channel (C1, C2, etc.).
+
+ Parameters
+ ----------
+ imp : ij.ImagePlus
+ ImagePlus object to save.
+ format : {'ImageJ-TIF', 'ICS-1', 'ICS-2', 'OME-TIFF', 'CellH5', 'BMP'}
+ Output format to use, see Notes section below for details.
+ out_dir : str
+ Directory path where the image(s) will be saved.
+ series : int
+ Series number to append to the filename.
+ pad_number : int
+ Number of digits to use when zero-padding the series number.
+ split_channels : bool
+ If True, split channels and save them individually in separate folders
+ named "C1", "C2", etc. inside out_dir. If False, save all channels in a
+ single file.
+
+ Notes
+ -----
+ Depending on the value of the `format` parameter, one of the following
+ output formats and saving strategies will be used:
+ - Bio-Formats based formats will be produced by calling `bf.export()`, note
+ that these formats will preserve metadata (which is **not** the case for
+ the other formats using different saving strategies):
+ - `ICS-1`: Save as ICS version 1 format (a pair of `.ics` and `.ids`).
+ - `ICS-2`: Save as ICS version 2 format (single `.ics` file).
+ - `OME-TIFF`: Save in OME-TIFF format (`.ome.tif`).
+ - `CellH5`: Save as CellH5 format (`.ch5`).
+ - `ImageJ-TIF`: Save in ImageJ TIFF format (`.tif`) using `IJ.saveAs()`.
+ - `BMP`: Save in BMP format using `StackWriter.save()`, producing one `.bmp`
+ per slice in a subfolder named after the original image.
+
+ Examples
+ --------
+ Save a multichannel image as OME-TIFF without splitting channels:
+
+ >>> save_image_with_extension(imp, "OME-TIFF", "/output/path", 1, 3, False)
+ ... # resulting file: /output/path/image_title_series_001.ome.tif
+
+ Save with channel splitting:
+
+ >>> save_image_with_extension(imp, "OME-TIFF", "/output/path", 1, 3, True)
+ ... # resulting files: /output/path/C1/image_title_series_001.ome.tif
+ ... # /output/path/C2/image_title_series_001.ome.tif
+ """
+
+ out_ext = {}
+ out_ext["ImageJ-TIF"] = ".tif"
+ out_ext["ICS-1"] = ".ids"
+ out_ext["ICS-2"] = ".ics"
+ out_ext["OME-TIFF"] = ".ome.tif"
+ out_ext["CellH5"] = ".ch5"
+ out_ext["BMP"] = ".bmp"
+
+ imp_to_use = []
+ dir_to_save = []
+
+ if split_channels:
+ for channel in range(1, imp.getNChannels() + 1):
+ imp_to_use.append(
+ Duplicator().run(
+ imp,
+ channel,
+ channel,
+ 1,
+ imp.getNSlices(),
+ 1,
+ imp.getNFrames(),
+ )
+ )
+ dir_to_save.append(os.path.join(out_dir, "C" + str(channel)))
+ else:
+ imp_to_use.append(imp)
+ dir_to_save.append(out_dir)
+
+ for index, current_imp in enumerate(imp_to_use):
+ basename = imp.getShortTitle()
+
+ out_path = os.path.join(
+ dir_to_save[index],
+ basename + "_series_" + str(series).zfill(pad_number),
+ )
+
+ if format == "ImageJ-TIF":
+ pathtools.create_directory(dir_to_save[index])
+ IJ.saveAs(current_imp, "Tiff", out_path + ".tif")
+
+ elif format == "BMP":
+ out_folder = os.path.join(out_dir, basename + os.path.sep)
+ pathtools.create_directory(out_folder)
+ StackWriter.save(current_imp, out_folder, "format=bmp")
+
+ else:
+ bf.export(current_imp, out_path + out_ext[format])
+
+ current_imp.close()
+
+
+def locate_latest_imaris(paths_to_check=None):
+ """Find paths to latest installed Imaris or ImarisFileConverter version.
+
+ Identify the full path to the most recent (as in "version number")
+ ImarisFileConverter or Imaris installation folder with the latter one having
+ priority. In case nothing is found, an empty string is returned.
+
+ Parameters
+ ----------
+ paths_to_check: list of str, optional
+ A list of paths that should be used to look for the installations, by default
+ `None` which will fall back to the standard installation locations of Bitplane.
+
+ Returns
+ -------
+ str
+ """
+ if not paths_to_check:
+ paths_to_check = [
+ r"C:\Program Files\Bitplane\ImarisFileConverter ",
+ r"C:\Program Files\Bitplane\Imaris ",
+ ]
+
+ imaris_paths = [""]
+
+ for check in paths_to_check:
+ hits = glob.glob(check + "*")
+ imaris_paths += sorted(
+ hits,
+ key=lambda x: float(x.replace(check, "").replace(".", "")),
+ )
+
+ return imaris_paths[-1]
+
+
+def run_imarisconvert(file_path):
+ """Convert a given file to Imaris format using ImarisConvert.
+
+ Convert the input image file to Imaris format (Imaris5) using the
+ ImarisConvert utility. The function uses the latest installed Imaris
+ application to perform the conversion via `subprocess.call()`.
+
+ Parameters
+ ----------
+ file_path : str
+ Absolute path to the input image file.
+ """
+ # in case the given file has the suffix `.ids` (meaning it is part of an
+ # ICS-1 `.ics`+`.ids` pair), point ImarisConvert to the `.ics` file instead:
+ path_root, file_extension = os.path.splitext(file_path)
+ if file_extension == ".ids":
+ file_extension = ".ics"
+ file_path = path_root + file_extension
+
+ imaris_path = locate_latest_imaris()
+
+ command = 'ImarisConvert.exe -i "%s" -of Imaris5 -o "%s"' % (
+ file_path,
+ file_path.replace(file_extension, ".ims"),
+ )
+ log.debug("\n%s" % command)
+ IJ.log("Converting to Imaris5 .ims...")
+ result = subprocess.call(command, shell=True, cwd=imaris_path)
+ if result == 0:
+ IJ.log("Conversion to .ims is finished.")
+ else:
+ IJ.log("Conversion failed with error code: %d" % result)
diff --git a/src/imcflibs/imagej/objects3d.py b/src/imcflibs/imagej/objects3d.py
index cc0b8300..a277a9c0 100644
--- a/src/imcflibs/imagej/objects3d.py
+++ b/src/imcflibs/imagej/objects3d.py
@@ -1,6 +1,17 @@
+"""Functions to work with 3D objects.
+
+Mostly (although not exclusively) related to the [`mcib3d`][mcib3d] package.
+
+[mcib3d]: https://mcib3d.frama.io/3d-suite-imagej/
+"""
+
+from de.mpicbg.scf.imgtools.image.create.image import ImageCreationUtilities
+from de.mpicbg.scf.imgtools.image.create.labelmap import WatershedLabeling
from ij import IJ
from mcib3d.geom import Objects3DPopulation
-from mcib3d.image3d import ImageHandler
+from mcib3d.image3d import ImageHandler, ImageLabeller
+from mcib3d.image3d.processing import MaximaFinder
+from net.imglib2.img import ImagePlusAdapter
def population3d_to_imgplus(imp, population):
@@ -13,14 +24,16 @@ def population3d_to_imgplus(imp, population):
imp : ij.ImagePlus
Original ImagePlus to derive the size of the resulting ImagePlus.
population : mcib3d.geom.Objects3DPopulation
- Population to use to generate the new ImagePlus.
+ Population of 3D objects used to generate the new ImagePlus.
Returns
-------
- ImagePlus
- Newly created ImagePlus from the population.
+ ij.ImagePlus
+ A newly created ImagePlus representing the labeled population.
"""
dim = imp.getDimensions()
+
+ # Create a new 16-bit image with the same size as the original image
new_imp = IJ.createImage(
"Filtered labeled stack",
"16-bit black",
@@ -31,6 +44,8 @@ def population3d_to_imgplus(imp, population):
dim[4],
)
new_imp.setCalibration(imp.getCalibration())
+
+ # Wrap the new image in an ImageHandler and draw the population
new_img = ImageHandler.wrap(new_imp)
population.drawPopulation(new_img)
@@ -45,12 +60,182 @@ def imgplus_to_population3d(imp):
Parameters
----------
imp : ij.ImagePlus
- Labeled 3D stack or 2D image to use to get population.
+ Labeled 2D image or 3D stack used to get the population.
Returns
-------
mcib3d.geom.Objects3DPopulation
- Population from the image.
+ The extracted population from the image.
"""
img = ImageHandler.wrap(imp)
return Objects3DPopulation(img)
+
+
+def segment_3d_image(imp, title=None, min_thresh=1, min_vol=None, max_vol=None):
+ """Segment a 3D binary image to get a labelled stack.
+
+ Parameters
+ ----------
+ imp : ij.ImagePlus
+ A binary 3D stack for segmentation.
+ title : str, optional
+ Title of the new image. Defaults to None.
+ min_thresh : int, optional
+ Threshold to do segmentation, also allows for label filtering. Since the
+ segmentation is happening on a binary stack, values are either 0 or 255,
+ so using 0 allows to discard only the background. Defaults to 1.
+ min_vol : int, optional
+ Minimum volume (in voxels) under which objects get filtered.
+ Defaults to None.
+ max_vol : int, optional
+ Maximum volume (in voxels) above which objects get filtered.
+ Defaults to None.
+
+ Returns
+ -------
+ ij.ImagePlus
+ A labelled 3D ImagePlus.
+ """
+ cal = imp.getCalibration()
+
+ # Wrap through ImageHandler and apply thresholding
+ img = ImageHandler.wrap(imp)
+ img = img.threshold(min_thresh, False, False)
+
+ labeler = ImageLabeller()
+ if min_vol:
+ labeler.setMinSizeCalibrated(min_vol, img)
+ if max_vol:
+ labeler.setMaxSizeCalibrated(max_vol, img)
+
+ # Generate labelled segmentation
+ seg = labeler.getLabels(img)
+ seg.setScale(cal.pixelWidth, cal.pixelDepth, cal.getUnits())
+ if title:
+ seg.setTitle(title)
+
+ return seg.getImagePlus()
+
+
+def get_objects_within_intensity(obj_pop, imp, min_intensity, max_intensity):
+ """Filter a population for objects within the given intensity range.
+
+ Parameters
+ ----------
+ obj_pop : mcib3d.geom.Objects3DPopulation
+ A population of 3D objects.
+ imp : ij.ImagePlus
+ An ImagePlus on which the population is based.
+ min_intensity : float
+ Minimum mean intensity threshold for filtering objects.
+ max_intensity : float
+ Maximum mean intensity threshold for filtering objects.
+
+ Returns
+ -------
+ Objects3DPopulation
+ New population with the objects filtered by intensity.
+ """
+ objects_within_intensity = []
+
+ # Iterate over all objects in the population
+ for i in range(0, obj_pop.getNbObjects()):
+ obj = obj_pop.getObject(i)
+ # Calculate the mean intensity of the object
+ mean_intensity = obj.getPixMeanValue(ImageHandler.wrap(imp))
+ # Check if the object is within the specified intensity range
+ if mean_intensity >= min_intensity and mean_intensity < max_intensity:
+ objects_within_intensity.append(obj)
+
+ # Return the new population with the filtered objects
+ return Objects3DPopulation(objects_within_intensity)
+
+
+def maxima_finder_3d(imp, min_threshold=0, noise=100, rxy=1.5, rz=1.5):
+ """Find local maxima in a 3D image.
+
+ This function identifies local maxima in a 3D image using a specified minimum threshold and noise level.
+ The radii for the maxima detection can be set independently for the x/y and z dimensions.
+
+ Parameters
+ ----------
+ imp : ij.ImagePlus
+ The input 3D image in which to find local maxima.
+ min_threshold : int, optional
+ The minimum intensity threshold for maxima detection. Default is 0.
+ noise : int, optional
+ The noise tolerance level for maxima detection. Default is 100.
+ rxy : float, optional
+ The radius for maxima detection in the x and y dimensions. Default is 1.5.
+ rz : float, optional
+ The radius for maxima detection in the z dimension. Default is 1.5.
+
+ Returns
+ -------
+ ij.ImagePlus
+ An ImagePlus object containing the detected maxima as peaks.
+ """
+ # Wrap the input ImagePlus into an ImageHandler
+ img = ImageHandler.wrap(imp)
+
+ # Duplicate the image and apply a threshold cut-off
+ thresholded = img.duplicate()
+ thresholded.thresholdCut(min_threshold, False, True)
+
+ # Initialize the MaximaFinder with the thresholded image and noise level
+ maxima_finder = MaximaFinder(thresholded, noise)
+
+ # Set the radii for maxima detection in x/y and z dimensions
+ maxima_finder.setRadii(rxy, rz)
+
+ # Retrieve the image peaks as an ImageHandler
+ img_peaks = maxima_finder.getImagePeaks()
+
+ # Convert the ImageHandler peaks to an ImagePlus
+ imp_peaks = img_peaks.getImagePlus()
+
+ # Set the calibration of the peaks image to match the input image
+ imp_peaks.setCalibration(imp.getCalibration())
+
+ # Set the title of the peaks image
+ imp_peaks.setTitle("Peaks")
+
+ return imp_peaks
+
+
+def seeded_watershed(imp_binary, imp_peaks, threshold=10):
+ """Perform a seeded watershed segmentation on a binary image using seed points.
+
+ This function applies a watershed segmentation to a binary image using seed points provided in another image.
+ An optional threshold can be specified to control the segmentation process.
+
+ Parameters
+ ----------
+ imp_binary : ij.ImagePlus
+ The binary image to segment.
+ imp_peaks : ij.ImagePlus
+ The image containing the seed points for the watershed segmentation.
+ threshold : float, optional
+ The threshold value to use for the segmentation. Default is 10.
+
+ Returns
+ -------
+ ij.ImagePlus
+ The segmented image with labels.
+ """
+
+ img = ImagePlusAdapter.convertFloat(imp_binary)
+ img_seed = ImagePlusAdapter.convertFloat(imp_peaks).copy()
+
+ if threshold:
+ watersheded_result = WatershedLabeling.watershed(img, img_seed, threshold)
+ else:
+ watersheded_result = WatershedLabeling.watershed(img, img_seed)
+
+ return ImageCreationUtilities.convertImgToImagePlus(
+ watersheded_result,
+ "Label image",
+ "",
+ imp_binary.getDimensions(),
+ imp_binary.getCalibration(),
+ )
diff --git a/src/imcflibs/imagej/omerotools.py b/src/imcflibs/imagej/omerotools.py
new file mode 100644
index 00000000..31f4bdd4
--- /dev/null
+++ b/src/imcflibs/imagej/omerotools.py
@@ -0,0 +1,422 @@
+"""Functions allowing to interact with an OMERO server.
+
+Contains helpers to parse URLs and / or OMERO image IDs, connect to OMERO and
+fetch images from the server.
+
+Requires both the [`simple-omero-client`][simple-omero-client] and the
+[`omero-insight`][omero-insight] JARs to be installed.
+
+Most of the functions will use the [`simple-omero-client`][simple-omero-client]
+to interact with the OMERO server. However, there are still some that
+requires the [`omero-insight`][omero-insight] plugin to read metadata.
+
+[simple-omero-client]: https://github.com/GReD-Clermont/simple-omero-client
+[omero-insight]: https://github.com/ome/omero-insight
+"""
+
+from fr.igred.omero import Client
+from fr.igred.omero.annotations import (
+ MapAnnotationWrapper,
+ TableWrapper,
+)
+from fr.igred.omero.roi import ROIWrapper
+from java.lang import Long
+from java.text import SimpleDateFormat
+from java.util import ArrayList
+from omero.cmd import OriginalMetadataRequest
+from omero.gateway.model import TableData, TableDataColumn
+
+
+def parse_url(client, omero_str):
+ """Parse an OMERO URL / image IDs into the respective ImageWrapper objects.
+
+ Assemble a list of ImageWrapper objects from one of the following inputs:
+
+ - An OMERO URL (as retrieved e.g. from OMERO.web).
+ - One or more OMERO image IDs.
+ - An OMERO Dataset ID.
+
+ Parameters
+ ----------
+ client : fr.igred.omero.Client
+ Client used for login to OMERO.
+ omero_str : str
+ Either an URL from OMERO or image IDs separated by commas.
+
+ Returns
+ -------
+ list(fr.igred.omero.repository.ImageWrapper)
+ List of ImageWrappers parsed from the string.
+ """
+ image_ids = []
+ dataset_ids = []
+ image_wpr_list = []
+
+ # Check if the url is a dataset
+ if "dataset-" in omero_str:
+ # If there are multiple datasets
+ if "|" in omero_str:
+ parts = omero_str.split("|")
+ for part in parts:
+ if "dataset-" in part:
+ image_ids.extend(
+ [
+ image
+ for image in client.getDataset(
+ Long(part.split("dataset-")[1].split("/")[0])
+ ).getImages()
+ ]
+ )
+ dataset_id = Long(part.split("dataset-")[1].split("/")[0])
+ dataset_ids.append(dataset_id)
+ else:
+ image_ids.extend(
+ [
+ image
+ for image in client.getDataset(
+ Long(omero_str.split("dataset-")[1].split("/")[0])
+ ).getImages()
+ ]
+ )
+ # If there is only one dataset
+ dataset_id = Long(omero_str.split("dataset-")[1].split("/")[0])
+ dataset_ids.append(dataset_id)
+
+ # Get the images from the dataset
+ for dataset_id in dataset_ids:
+ dataset_wpr = client.getDataset(dataset_id)
+ image_wpr_list.extend(dataset_wpr.getImages())
+
+ return image_wpr_list
+
+ # Check if the url is an image
+ elif "image-" in omero_str:
+ image_ids = omero_str.split("image-")
+ image_ids.pop(0)
+ image_ids = [s.split("%")[0].replace("|", "") for s in image_ids]
+ else:
+ image_ids = (
+ [s.split("%")[0].replace("|", "") for s in omero_str.split("image-")[1:]]
+ if "image-" in omero_str
+ else omero_str.split(",")
+ )
+ # If it is a list of IDs separated by commas
+ image_ids = omero_str.split(",")
+
+ return [client.getImage(Long(image_id)) for image_id in image_ids]
+
+
+def connect(host, port, username, password):
+ """Connect to OMERO using the credentials provided.
+
+ Parameters
+ ----------
+ host : str
+ The address (FQDN or IP) of the OMERO server.
+ port : int
+ The port number for the OMERO server.
+ username : str
+ The username for authentication.
+ password : str
+ The password for authentication.
+
+ Returns
+ -------
+ fr.igred.omero.Client
+ A Client object representing the connection to the OMERO server.
+ """
+ # Create a new OMERO client
+ client = Client()
+
+ # Connect to the OMERO server using provided credentials
+ client.connect(host, port, username, password)
+
+ # Return the connected client
+ return client
+
+
+def fetch_image(client, image_id):
+ """Fetch an image from an OMERO server and open it as an ImagePlus.
+
+ Parameters
+ ----------
+ client : fr.igred.omero.Client
+ The client object used to connect to the OMERO server.
+ image_id : int
+ The ID of the image to fetch.
+
+ Returns
+ -------
+ ij.ImagePlus
+ The fetched image as an ImagePlus.
+ """
+
+ # Fetch the image from the OMERO server
+ image_wpr = client.getImage(Long(image_id))
+
+ # Convert the image to an ImagePlus
+ return image_wpr.toImagePlus()
+
+
+def upload_image_to_omero(user_client, path, dataset_id):
+ """Upload an image to OMERO.
+
+ Parameters
+ ----------
+ user_client : fr.igred.omero.Client
+ The client object used to connect to the OMERO server.
+ path : str
+ Path of the file to upload back to OMERO.
+ dataset_id : Long
+ ID of the dataset where to upload the file.
+
+ Returns
+ -------
+ Long
+ ID of the uploaded image
+ """
+ return user_client.getDataset(Long(dataset_id)).importImage(user_client, path)[0]
+
+
+def add_keyvalue_annotation(client, repository_wpr, annotations, header):
+ """Add an annotation to an OMERO object.
+
+ Parameters
+ ----------
+ client : fr.igred.omero.Client
+ The client object used to connect to the OMERO server.
+ repository_wpr : fr.igred.omero.repositor.GenericRepositoryObjectWrapper
+ Wrapper to the object for the anotation.
+ annotations : dict
+ Dictionary with the annotation to add.
+ header : str
+ Name for the annotation header.
+ """
+ # for pair in dict:
+ # result.add
+ map_annotation_wpr = MapAnnotationWrapper(annotations)
+ map_annotation_wpr.setNameSpace(header)
+ repository_wpr.addMapAnnotation(client, map_annotation_wpr)
+
+
+def delete_keyvalue_annotations(user_client, object_wrapper):
+ """Delete annotations linked to object.
+
+ Parameters
+ ----------
+ user_client : fr.igred.omero.Client
+ Client used for login to OMERO.
+ object_wrapper : fr.igred.omero.repositor.GenericRepositoryObjectWrapper
+ Wrapper to the object for the anotation.
+
+ """
+ kv_pairs = object_wrapper.getMapAnnotations(user_client)
+ user_client.delete(kv_pairs)
+
+
+def find_dataset(client, dataset_id):
+ """Retrieve a dataset (wrapper) from the OMERO server.
+
+ Parameters
+ ----------
+ client : fr.igred.omero.Client
+ The client object used to connect to the OMERO server.
+ dataset_id : int
+ The ID of the dataset to retrieve.
+
+ Returns
+ -------
+ fr.igred.omero.repositor.DatasetWrapper
+ The dataset wrapper retrieved from the server.
+ """
+ # Fetch the dataset from the OMERO server using the provided dataset ID
+ return client.getDataset(Long(dataset_id))
+
+
+def get_acquisition_metadata(user_client, image_wpr):
+ """Get acquisition metadata from OMERO based on an image wrapper.
+
+ Parameters
+ ----------
+ user_client : fr.igred.omero.Client
+ Client used for login to OMERO
+ image_wpr : fr.igred.omero.repositor.ImageWrapper
+ Wrapper to the image for the metadata
+
+ Returns
+ -------
+ dict
+
+ {
+ objective_magnification : float,
+ objective_na : float,
+ acquisition_date : str,
+ acquisition_date_number : str,
+ }
+ """
+ ctx = user_client.getCtx()
+ instrument_data = (
+ user_client.getGateway()
+ .getMetadataService(ctx)
+ .loadInstrument(image_wpr.asDataObject().getInstrumentId())
+ )
+ objective_data = instrument_data.copyObjective().get(0)
+ metadata = {}
+
+ metadata["objective_magnification"] = (
+ objective_data.getNominalMagnification().getValue()
+ if objective_data.getNominalMagnification() is not None
+ else 0
+ )
+ metadata["objective_na"] = (
+ objective_data.getLensNA().getValue()
+ if objective_data.getLensNA() is not None
+ else 0
+ )
+
+ if image_wpr.getAcquisitionDate() is None:
+ if image_wpr.asDataObject().getFormat() == "ZeissCZI":
+ field = "Information|Document|CreationDate"
+ date_field = get_info_from_original_metadata(user_client, image_wpr, field)
+ metadata["acquisition_date"] = date_field.split("T")[0]
+ metadata["acquisition_date_number"] = int(
+ metadata["acquisition_date"].replace("-", "")
+ )
+ else:
+ metadata["acquisition_date"] = "NA"
+ metadata["acquisition_date_number"] = 0
+ else:
+ sdf = SimpleDateFormat("yyyy-MM-dd")
+ metadata["acquisition_date"] = sdf.format(image_wpr.getAcquisitionDate())
+ metadata["acquisition_date_number"] = int(
+ metadata["acquisition_date"].replace("-", "")
+ )
+
+ return metadata
+
+
+def get_info_from_original_metadata(user_client, image_wpr, field):
+ """Retrieve information from the original metadata (as opposed to OME-MD).
+
+ In some cases not all information is parsed correctly by BF and has to be
+ recovered / identified directly from the *original* metadata. This function
+ extracts the corresponding value based on the field identifier.
+
+ Parameters
+ ----------
+ user_client : fr.igred.omero.Client
+ Client used for login to OMERO
+ image_wpr : fr.igred.omero.repositor.ImageWrapper
+ Wrapper to the image
+ field : str
+ Field to look for in the original metadata. Needs to be found beforehand.
+
+ Returns
+ -------
+ str
+ Value of the field
+ """
+ omr = OriginalMetadataRequest(Long(image_wpr.getId()))
+ cmd = user_client.getGateway().submit(user_client.getCtx(), omr)
+ rsp = cmd.loop(5, 500)
+ gm = rsp.globalMetadata
+ return gm.get(field).getValue()
+
+
+def create_table_columns(headings):
+ """Create OMERO table headings from a list of column names.
+
+ Parameters
+ ----------
+ headings : list(str)
+ List of columns names.
+
+ Returns
+ -------
+ list(omero.gateway.model.TableDataColumn)
+ List of columns formatted to be uploaded to OMERO.
+ """
+ table_columns = []
+ # populate the headings
+ for h in range(len(headings)):
+ heading = headings.keys()[h]
+ type = headings.values()[h]
+ # OMERO.tables queries don't handle whitespace well
+ heading = heading.replace(" ", "_")
+ # title_heading = ["Slice", "Label"]
+ table_columns.append(TableDataColumn(heading, h, type))
+ # table_columns.append(TableDataColumn("Image", size, ImageData))
+ return table_columns
+
+
+def upload_array_as_omero_table(user_client, table_title, data, columns, image_wpr):
+ """Upload a table to OMERO from a list of lists.
+
+ Parameters
+ ----------
+ user_client : fr.igred.omero.Client
+ Client used for login to OMERO
+ table_title : str
+ Title of the table to be uploaded.
+ data : list(list())
+ List of lists of results to upload
+ columns : list(str)
+ List of columns names
+ image_wpr : fr.igred.omero.repositor.ImageWrapper
+ Wrapper to the image to be uploaded
+
+ Examples
+ --------
+ >>> from fr.igred.omero import Client
+ >>> from java.lang import String, Double, Long
+ ...
+ >>> client = Client() # connect to OMERO
+ >>> client.connect("omero.example.org", 4064, "username", "password")
+ ...
+ >>> image_wpr = client.getImage(Long(123456)) # get an image
+ ...
+ >>> columns = { # prepare column definitions (name-type pairs)
+ ... "Row_ID": Long,
+ ... "Cell_Area": Double,
+ ... "Cell_Type": String,
+ ... }
+ ...
+ >>> data = [ # prepare data (list of rows, each row is a list of values)
+ ... [1, 250.5, "Neuron"],
+ ... [2, 180.2, "Astrocyte"],
+ ... [3, 310.7, "Neuron"],
+ ... ]
+ ...
+ >>> upload_array_as_omero_table(
+ ... client, "Cell Measurements", data, columns, image_wpr
+ ... )
+ """
+
+ dataset_wpr = image_wpr.getDatasets(user_client)[0]
+
+ table_columns = create_table_columns(columns)
+ table_data = TableData(table_columns, data)
+ table_wpr = TableWrapper(table_data)
+ table_wpr.setName(table_title)
+ dataset_wpr.addTable(user_client, table_wpr)
+
+
+def save_rois_to_omero(user_client, image_wpr, rm):
+ """Save ROIs to OMERO linked to the image.
+
+ Parameters
+ ----------
+ user_client : fr.igred.omero.Client
+ Client used for login to OMERO
+ image_wpr : fr.igred.omero.repositor.ImageWrapper
+ Wrapper to the image for the ROIs
+ rm : ij.plugin.frame.RoiManager
+ ROI Manager containing the ROIs
+
+ """
+ rois_list = rm.getRoisAsArray()
+ rois_arraylist = ArrayList(len(rois_list))
+ for roi in rois_list:
+ rois_arraylist.add(roi)
+ rois_to_upload = ROIWrapper.fromImageJ(rois_arraylist)
+ image_wpr.saveROIs(user_client, rois_to_upload)
diff --git a/src/imcflibs/imagej/prefs.py b/src/imcflibs/imagej/prefs.py
index 0e3bde9a..5162e89d 100644
--- a/src/imcflibs/imagej/prefs.py
+++ b/src/imcflibs/imagej/prefs.py
@@ -4,7 +4,7 @@
def debug_mode():
- """Wrapper to check if 'imcf.debugging' is enabled.
+ """Check if the 'imcf.debugging' setting is enabled.
This is a workaround for a Jython issue in ImageJ with values that are
stored in the "IJ_Prefs.txt" file being cast to the wrong types and / or
@@ -20,19 +20,28 @@ def debug_mode():
return debug == "true"
-def fix_ij_options():
- """Wrapper to setup ImageJ default options."""
+def set_default_ij_options():
+ """Configure ImageJ default options for consistency.
- # disable inverting LUT
- IJ.run("Appearance...", " menu=0 16-bit=Automatic")
- # set foreground color to be white, background black
+ Set the following options:
+ - Ensure ImageJ appearance settings are default values.
+ - Set foreground color to white and background to black.
+ - Set black background for binary images.
+ - Set default file saving format to .txt files.
+ - Ensure images are scaled appropriately when converting between different bit depths.
+ """
+
+ # Set all appearance settings to default values (untick all options)
+ IJ.run("Appearance...", " ")
+
+ # Set foreground color to be white and background black
IJ.run("Colors...", "foreground=white background=black selection=red")
- # black BG for binary images and pad edges when eroding
- IJ.run("Options...", "black pad")
- # set saving format to .txt files
+
+ # Set black background for binary images and set pad edges to true to prevent eroding from image edge
+ IJ.run("Options...", "iterations=1 count=1 black pad")
+
+ # Set default saving format to .txt files
IJ.run("Input/Output...", "file=.txt save_column save_row")
- # ============= DON'T MOVE UPWARDS =============
- # set "Black Background" in "Binary Options"
- IJ.run("Options...", "black")
- # scale when converting = checked
+
+ # Scale when converting = checked
IJ.run("Conversions...", "scale")
diff --git a/src/imcflibs/imagej/processing.py b/src/imcflibs/imagej/processing.py
new file mode 100644
index 00000000..0e3d2224
--- /dev/null
+++ b/src/imcflibs/imagej/processing.py
@@ -0,0 +1,136 @@
+"""ImageJ processing utilities for filtering and thresholding images.
+
+This module provides functions to apply various image processing operations
+using ImageJ, including filters, background subtraction, and thresholding.
+"""
+
+from ij import IJ
+
+from ..log import LOG as log
+
+
+def apply_filter(imp, filter_method, filter_radius, do_3d=False):
+ """Make a specific filter followed by a threshold method of choice.
+
+ Parameters
+ ----------
+ imp : ImagePlus
+ Input ImagePlus to filter and threshold
+ filter_method : str
+ Name of the filter method to use. Must be one of:
+ - Median
+ - Mean
+ - Gaussian Blur
+ - Minimum
+ - Maximum
+ filter_radius : int
+ Radius of the filter filter to use
+ do_3d : bool, optional
+ If set to True, will do a 3D filtering, by default False
+
+
+ Returns
+ -------
+ ij.ImagePlus
+ Filtered ImagePlus
+ """
+ log.info("Applying filter %s with radius %d" % (filter_method, filter_radius))
+
+ if filter_method not in [
+ "Median",
+ "Mean",
+ "Gaussian Blur",
+ "Minimum",
+ "Maximum",
+ ]:
+ raise ValueError(
+ "filter_method must be one of: Median, Mean, Gaussian Blur, Minimum, Maximum"
+ )
+
+ if do_3d:
+ filter = filter_method + " 3D..."
+ else:
+ filter = filter_method + "..."
+
+ options = (
+ "sigma="
+ if filter_method == "Gaussian Blur"
+ else "radius=" + str(filter_radius) + " stack"
+ )
+
+ log.debug("Filter: <%s> with options <%s>" % (filter, options))
+
+ imageplus = imp.duplicate()
+ IJ.run(imageplus, filter, options)
+
+ return imageplus
+
+
+def apply_rollingball_bg_subtraction(imp, rolling_ball_radius, do_3d=False):
+ """Perform background subtraction using a rolling ball method.
+
+ Parameters
+ ----------
+ imp : ij.ImagePlus
+ Input ImagePlus to filter and threshold
+ rolling_ball_radius : int
+ Radius of the rolling ball filter to use
+ do_3d : bool, optional
+ If set to True, will do a 3D filtering, by default False
+
+ Returns
+ -------
+ ij.ImagePlus
+ Filtered ImagePlus
+ """
+ log.info("Applying rolling ball with radius %d" % rolling_ball_radius)
+
+ options = "rolling=" + str(rolling_ball_radius) + " stack" if do_3d else ""
+
+ log.debug("Background subtraction options: %s" % options)
+
+ imageplus = imp.duplicate()
+ IJ.run(imageplus, "Substract Background...", options)
+
+ return imageplus
+
+
+def apply_threshold(imp, threshold_method, do_3d=True):
+ """Apply a threshold method to the input ImagePlus.
+
+ Parameters
+ ----------
+ imp : ij.ImagePlus
+ Input ImagePlus to filter and threshold
+ threshold_method : str
+ Name of the threshold method to use
+ do_3d : bool, optional
+ If set to True, the automatic threshold will be done on a 3D stack, by default True
+
+ Returns
+ -------
+ ij.ImagePlus
+ Thresholded ImagePlus
+ """
+
+ log.info("Applying threshold method %s" % threshold_method)
+
+ imageplus = imp.duplicate()
+
+ auto_threshold_options = (
+ threshold_method + " " + "dark" + " " + "stack" if do_3D else ""
+ )
+
+ log.debug("Auto threshold options: %s" % auto_threshold_options)
+
+ IJ.setAutoThreshold(imageplus, auto_threshold_options)
+
+ convert_to_binary_options = (
+ "method=" + threshold_method + " " + "background=Dark" + " " + "black"
+ )
+
+ log.debug("Convert to binary options: %s" % convert_to_binary_options)
+
+ IJ.run(imageplus, "Convert to Mask", convert_to_binary_options)
+
+ return imageplus
diff --git a/src/imcflibs/imagej/projections.py b/src/imcflibs/imagej/projections.py
index 1a6d0501..7aea535f 100644
--- a/src/imcflibs/imagej/projections.py
+++ b/src/imcflibs/imagej/projections.py
@@ -1,13 +1,18 @@
-"""Functions for creating Z projections."""
+"""Functions for creating projections."""
from ij.plugin import ZProjector # pylint: disable-msg=E0401
from .bioformats import export_using_orig_name # pylint: disable-msg=E0401
from ..log import LOG as log
+from net.imagej.axis import Axes
+from net.imagej.ops import Ops
+from ij import ImagePlus, IJ
+from net.imagej import Dataset
+
def average(imp):
- """Create an average intensity projection.
+ """Create an average intensity Z projection.
Parameters
----------
@@ -23,13 +28,13 @@ def average(imp):
log.warn("ImagePlus is not a z-stack, not creating a projection!")
return imp
- log.debug("Creating average projection...")
+ log.debug("Creating average Z projection...")
proj = ZProjector.run(imp, "avg")
return proj
def maximum(imp):
- """Create a maximum intensity projection.
+ """Create a maximum intensity Z projection.
Parameters
----------
@@ -45,13 +50,13 @@ def maximum(imp):
log.warn("ImagePlus is not a z-stack, not creating a projection!")
return imp
- log.debug("Creating maximum intensity projection...")
+ log.debug("Creating maximum intensity Z projection...")
proj = ZProjector.run(imp, "max")
return proj
def create_and_save(imp, projections, path, filename, export_format):
- """Wrapper to create one or more projections and export the results.
+ """Create one or more projections and export (save) them.
Parameters
----------
@@ -100,3 +105,77 @@ def create_and_save(imp, projections, path, filename, export_format):
proj.close()
return True
+
+
+def project_stack(imp, projected_dimension, projection_type, ops, ds, cs):
+ """Project along a defined axis using the given projection type.
+
+ Parameters
+ ----------
+ imp : ImagePlus
+ The input image to be projected.
+ projected_dimension : str
+ The dimension along which to project the data. Must be one of {"X", "Y", "Z",
+ "TIME", "CHANNEL"}.
+ projection_type : str
+ The type of projection to perform. Must be one of {"Max", "Mean", "Median",
+ "Min", "StdDev", "Sum"}.
+ ops : OpService
+ The service used to access image processing operations. Use e.g. from script
+ parameter: `#@ OpService ops`
+ ds : DatasetService
+ The service used to create new datasets. Use e.g. from script parameter:
+ `#@ DatasetService ds`
+ cs : ConvertService
+ The service used to convert between formats. Use e.g. from script parameter:
+ `#@ ConvertService cs`
+
+ Returns
+ -------
+ ImagePlus
+ The resulting projected image as an ImagePlus object.
+
+ Raises
+ ------
+ Exception
+ If the specified dimension is not found or if the dimension has only one frame.
+ """
+ bit_depth = imp.getBitDepth()
+ data = cs.convert(imp, Dataset)
+ # Select which dimension to project
+ dim = data.dimensionIndex(getattr(Axes, projected_dimension))
+ if dim == -1:
+ raise Exception("%s dimension not found." % projected_dimension)
+ if data.dimension(dim) < 2:
+ raise Exception("%s dimension has only one frame." % projected_dimension)
+
+ # Write the output dimensions
+ new_dimensions = [
+ data.dimension(d) for d in range(0, data.numDimensions()) if d != dim
+ ]
+
+ # Create the output image
+ projected = ops.create().img(new_dimensions)
+
+ # Create the op and run it
+ proj_op = ops.op(getattr(Ops.Stats, projection_type), data)
+ ops.transform().project(projected, data, proj_op, dim)
+
+ # Create the output Dataset and convert to ImagePlus
+ output = ds.create(projected)
+ output_imp = cs.convert(output, ImagePlus)
+ output_imp = output_imp.duplicate()
+ output_imp.setTitle("%s %s projection" % (projected_dimension, projection_type))
+ IJ.run(output_imp, "Enhance Contrast", "saturated=0.35")
+
+ # Rescale bit depth if possible
+ if projection_type in ["Max", "Min", "Median"]:
+ IJ.run("Conversions...", " ")
+ if bit_depth in [8, 16]:
+ IJ.run(output_imp, str(bit_depth) + "-bit", "")
+ if bit_depth == 12:
+ IJ.run(output_imp, "16-bit", "")
+
+ IJ.run("Conversions...", "scale")
+
+ return output_imp
diff --git a/src/imcflibs/imagej/shading.py b/src/imcflibs/imagej/shading.py
index 291842c1..1e3a62fc 100644
--- a/src/imcflibs/imagej/shading.py
+++ b/src/imcflibs/imagej/shading.py
@@ -3,7 +3,9 @@
import os
import ij # pylint: disable-msg=import-error
-
+from ij import IJ
+from ij.plugin import ImageCalculator
+from ij.process import StackStatistics
from ..imagej import bioformats # pylint: disable-msg=no-name-in-module
from ..imagej import misc, projections
from ..log import LOG as log
@@ -179,3 +181,33 @@ def process_files(files, outpath, model_file, fmt):
if model:
model.close()
+
+
+def simple_flatfield_correction(imp, sigma=20.0):
+ """Perform a simple flatfield correction to a given ImagePlus stack.
+
+ Parameters
+ ----------
+ imp : ij.ImagePlus
+ The input stack to be projected.
+ sigma: float, optional
+ The sigma value for the Gaussian blur, default=20.0.
+
+ Returns
+ -------
+ ij.ImagePlus
+ The 32-bit image resulting from the flatfield correction.
+ """
+ flatfield = imp.duplicate()
+ sigma_str = "sigma=" + str(sigma)
+
+ IJ.run(flatfield, "Gaussian Blur...", sigma_str)
+ stats = StackStatistics(flatfield)
+
+ # Normalize image to the highest value of original (requires 32-bit image)
+ IJ.run(flatfield, "32-bit", "")
+ IJ.run(flatfield, "Divide...", "value=" + str(stats.max))
+ ic = ImageCalculator()
+ flatfield_corrected = ic.run("Divide create", imp, flatfield)
+
+ return flatfield_corrected
diff --git a/src/imcflibs/imagej/split.py b/src/imcflibs/imagej/split.py
index 5184e91b..0d3112c8 100644
--- a/src/imcflibs/imagej/split.py
+++ b/src/imcflibs/imagej/split.py
@@ -8,7 +8,7 @@
def split_by_c_and_z(log, dname, imgf, skip_top, skip_bottom):
- """Helper function to open, split and save a file.
+ """Open a file, split by Z and C and save the result into individual TIFFs.
Load the file specified, split by channels and z-slices, create a directory
for each channel using the channel number as a name suffix and export
diff --git a/src/imcflibs/imagej/stitching.py b/src/imcflibs/imagej/stitching.py
index a82a66d2..6f7ee7aa 100644
--- a/src/imcflibs/imagej/stitching.py
+++ b/src/imcflibs/imagej/stitching.py
@@ -7,7 +7,7 @@
import micrometa # pylint: disable-msg=import-error
import ij # pylint: disable-msg=import-error
-from imcflibs.imagej.misc import show_status, show_progress, error_exit
+from .misc import show_status, show_progress, error_exit
from ..strtools import flatten
from ..log import LOG as log
diff --git a/src/imcflibs/imagej/trackmate.py b/src/imcflibs/imagej/trackmate.py
new file mode 100644
index 00000000..4433a8b7
--- /dev/null
+++ b/src/imcflibs/imagej/trackmate.py
@@ -0,0 +1,387 @@
+"""Functions working with [TrackMate].
+
+[TrackMate]: https://imagej.net/plugins/trackmate/
+"""
+
+import os
+import sys
+
+from fiji.plugin.trackmate import Logger, Model, SelectionModel, Settings, TrackMate
+from fiji.plugin.trackmate.action import LabelImgExporter
+from fiji.plugin.trackmate.action.LabelImgExporter import LabelIdPainting
+from fiji.plugin.trackmate.cellpose import CellposeDetectorFactory
+from fiji.plugin.trackmate.cellpose.CellposeSettings import PretrainedModel
+from fiji.plugin.trackmate.detection import LogDetectorFactory
+from fiji.plugin.trackmate.features import FeatureFilter
+from fiji.plugin.trackmate.stardist import StarDistDetectorFactory
+from fiji.plugin.trackmate.tracking.jaqaman import SparseLAPTrackerFactory
+from ij import IJ
+from java.lang import Double
+
+from .. import pathtools
+
+
+def cellpose_detector(
+ imageplus,
+ cellpose_env_path,
+ model_to_use,
+ obj_diameter,
+ target_channel,
+ optional_channel=0,
+ use_gpu=True,
+ simplify_contours=True,
+):
+ """Create a dictionary with all settings for TrackMate using Cellpose.
+
+ Parameters
+ ----------
+ imageplus : ij.ImagePlus
+ ImagePlus on which to apply the detector.
+ cellpose_env_path : str
+ Path to the Cellpose environment.
+ model_to_use : str
+ Name of the model to use for the segmentation (CYTO, NUCLEI, CYTO2).
+ obj_diameter : float
+ Diameter of the objects to detect in the image.
+ This will be calibrated to the unit used in the image.
+ target_channel : int
+ Index of the channel to use for segmentation.
+ optional_channel : int, optional
+ Index of the secondary channel to use for segmentation, by default 0.
+ use_gpu : bool, optional
+ Boolean for GPU usage, by default True.
+ simplify_contours : bool, optional
+ Boolean for simplifying the contours, by default True.
+
+ Returns
+ -------
+ fiji.plugin.trackmate.Settings
+ Dictionary containing all the settings to use for TrackMate.
+
+ Example
+ -------
+ >>> settings = cellpose_detector(
+ ... imageplus=imp,
+ ... cellpose_env_path="D:/CondaEnvs/cellpose",
+ ... model_to_use="NUCLEI",
+ ... obj_diameter=23.0,
+ ... target_channel=1,
+ ... optional_channel=0
+ ... )
+ """
+ settings = Settings(imageplus)
+
+ settings.detectorFactory = CellposeDetectorFactory()
+ settings.detectorSettings["TARGET_CHANNEL"] = target_channel
+ # set optional channel to 0, will be overwritten if needed:
+ settings.detectorSettings["OPTIONAL_CHANNEL_2"] = optional_channel
+
+ settings.detectorSettings["CELLPOSE_PYTHON_FILEPATH"] = pathtools.join2(
+ cellpose_env_path, "python.exe"
+ )
+ settings.detectorSettings["CELLPOSE_MODEL_FILEPATH"] = os.path.join(
+ os.environ["USERPROFILE"], ".cellpose", "models"
+ )
+ input_to_model = {
+ "nuclei": PretrainedModel.NUCLEI,
+ "cyto": PretrainedModel.CYTO,
+ "cyto2": PretrainedModel.CYTO2,
+ }
+ if model_to_use.lower() in input_to_model:
+ selected_model = input_to_model[model_to_use.lower()]
+ else:
+ print("Selected Model Does Not Exist")
+ return
+
+ settings.detectorSettings["CELLPOSE_MODEL"] = selected_model
+ settings.detectorSettings["CELL_DIAMETER"] = obj_diameter
+ settings.detectorSettings["USE_GPU"] = use_gpu
+ settings.detectorSettings["SIMPLIFY_CONTOURS"] = simplify_contours
+
+ return settings
+
+
+def stardist_detector(imageplus, target_chnl):
+ """Create a dictionary with all settings for TrackMate using StarDist.
+
+ Parameters
+ ----------
+ imageplus : ij.ImagePlus
+ Image on which to do the segmentation.
+ target_chnl : int
+ Index of the channel on which to do the segmentation.
+
+ Returns
+ -------
+ fiji.plugin.trackmate.Settings
+ Dictionary containing all the settings to use for TrackMate.
+ """
+
+ settings = Settings(imageplus)
+ settings.detectorFactory = StarDistDetectorFactory()
+ settings.detectorSettings["TARGET_CHANNEL"] = target_chnl
+
+ return settings
+
+
+def log_detector(
+ imageplus,
+ radius,
+ target_channel,
+ quality_threshold=0.0,
+ median_filtering=True,
+ subpix_localization=True,
+):
+ """Create a dictionary with all settings for TrackMate using the LogDetector.
+
+ Parameters
+ ----------
+ imageplus : ij.ImagePlus
+ Image on which to do the segmentation.
+ radius : float
+ Radius of the objects to detect.
+ target_channel : int
+ Index of the channel on which to do the segmentation.
+ quality_threshold : int, optional
+ Threshold to use for excluding the spots by quality, by default 0.
+ median_filtering : bool, optional
+ Boolean to do median filtering, by default True.
+ subpix_localization : bool, optional
+ Boolean to do subpixel localization, by default True.
+
+ Returns
+ -------
+ fiji.plugin.trackmate.Settings
+ Dictionary containing all the settings to use for TrackMate.
+ """
+
+ settings = Settings(imageplus)
+ settings.detectorFactory = LogDetectorFactory()
+
+ settings.detectorSettings["RADIUS"] = Double(radius)
+ settings.detectorSettings["TARGET_CHANNEL"] = target_channel
+ settings.detectorSettings["THRESHOLD"] = Double(quality_threshold)
+ settings.detectorSettings["DO_MEDIAN_FILTERING"] = median_filtering
+ settings.detectorSettings["DO_SUBPIXEL_LOCALIZATION"] = subpix_localization
+
+ return settings
+
+
+def spot_filtering(
+ settings,
+ quality_thresh=None,
+ area_thresh=None,
+ circularity_thresh=None,
+ intensity_dict_thresh=None,
+):
+ """Add spot filtering for different features to the settings dictionary.
+
+ Parameters
+ ----------
+ settings : fiji.plugin.trackmate.Settings
+ Dictionary containing all the settings to use for TrackMate.
+ quality_thresh : float, optional
+ Threshold to use for quality filtering of the spots, by default None.
+ If the threshold is positive, will exclude everything below the value.
+ If the threshold is negative, will exclude everything above the value.
+ area_thresh : float, optional
+ Threshold to use for area filtering of the spots, keep None with LoG Detector -
+ by default also None.
+ If the threshold is positive, will exclude everything below the value.
+ If the threshold is negative, will exclude everything above the value.
+ circularity_thresh : float, optional
+ Threshold to use for circularity thresholding (needs to be between 0 and 1, keep None with LoG Detector)
+ - by default None.
+ If the threshold is positive, will exclude everything below the value.
+ If the threshold is negative, will exclude everything above the value.
+ intensity_dict_thresh : dict, optional
+ Threshold to use for intensity filtering of the spots, by default None.
+ Dictionary needs to contain the channel index as key and the filter as value.
+ If the threshold is positive, will exclude everything below the value.
+ If the threshold is negative, will exclude everything above the value.
+
+ Returns
+ -------
+ fiji.plugin.trackmate.Settings
+ Dictionary containing all the settings to use for TrackMate.
+ """
+
+ settings.initialSpotFilterValue = -1.0
+ settings.addAllAnalyzers()
+
+ # Here 'true' takes everything ABOVE the mean_int value
+ if quality_thresh:
+ filter_spot = FeatureFilter(
+ "QUALITY",
+ Double(abs(quality_thresh)),
+ quality_thresh >= 0,
+ )
+ settings.addSpotFilter(filter_spot)
+ if area_thresh: # Keep none for log detector
+ filter_spot = FeatureFilter("AREA", Double(abs(area_thresh)), area_thresh >= 0)
+ settings.addSpotFilter(filter_spot)
+ if circularity_thresh: # has to be between 0 and 1, keep none for log detector
+ filter_spot = FeatureFilter(
+ "CIRCULARITY", Double(abs(circularity_thresh)), circularity_thresh >= 0
+ )
+ settings.addSpotFilter(filter_spot)
+ if intensity_dict_thresh:
+ for key, value in intensity_dict_thresh.items():
+ filter_spot = FeatureFilter(
+ "MEAN_INTENSITY_CH" + str(key), abs(value), value >= 0
+ )
+ settings.addSpotFilter(filter_spot)
+
+ return settings
+
+
+def sparse_lap_tracker(settings):
+ """Create a sparse LAP tracker with default settings.
+
+ Parameters
+ ----------
+ settings : fiji.plugin.trackmate.Settings
+ Dictionary containing all the settings to use for TrackMate.
+
+ Returns
+ -------
+ fiji.plugin.trackmate.Settings
+ Dictionary containing all the settings to use for TrackMate.
+ """
+
+ settings.trackerFactory = SparseLAPTrackerFactory()
+ settings.trackerSettings = settings.trackerFactory.getDefaultSettings()
+
+ return settings
+
+
+def track_filtering(
+ settings,
+ link_max_dist=15.0,
+ gap_closing_dist=15.0,
+ max_frame_gap=3,
+ track_splitting_max_dist=None,
+ track_merging_max_distance=None,
+):
+ """Add track filtering for different features to the settings dictionary.
+
+ Parameters
+ ----------
+ settings : fiji.plugin.trackmate.Settings
+ Dictionary containing all the settings to use for TrackMate.
+ link_max_dist : float, optional
+ Maximal displacement of the spots, by default 0.5.
+ gap_closing_dist : float, optional
+ Maximal distance for gap closing, by default 0.5.
+ max_frame_gap : int, optional
+ Maximal frame interval between spots to be bridged, by default 2.
+ track_splitting_max_dist : int, optional
+ Maximal frame interval for splitting tracks, by default None.
+ track_merging_max_distance : int, optional
+ Maximal frame interval for merging tracks , by default None.
+
+ Returns
+ -------
+ fiji.plugin.trackmate.Settings
+ Dictionary containing all the settings to use for TrackMate.
+ """
+ # NOTE: `link_max_dist` and `gap_closing_dist` must be double!
+ settings.trackerSettings["LINKING_MAX_DISTANCE"] = link_max_dist
+ settings.trackerSettings["GAP_CLOSING_MAX_DISTANCE"] = gap_closing_dist
+ settings.trackerSettings["MAX_FRAME_GAP"] = max_frame_gap
+ if track_splitting_max_dist:
+ settings.trackerSettings["ALLOW_TRACK_SPLITTING"] = True
+ settings.trackerSettings["SPLITTING_MAX_DISTANCE"] = track_splitting_max_dist
+ if track_merging_max_distance:
+ settings.trackerSettings["ALLOW_TRACK_MERGING"] = True
+ settings.trackerSettings["MERGING_MAX_DISTANCE"] = track_merging_max_distance
+
+ return settings
+
+
+def run_trackmate(
+ implus,
+ settings,
+ crop_roi=None,
+):
+ # sourcery skip: merge-else-if-into-elif, swap-if-else-branches
+ """Run TrackMate on an open ImagePlus object.
+
+ Parameters
+ ----------
+ implus : ij.ImagePlus
+ ImagePlus image on which to run Trackmate.
+ settings : fiji.plugin.trackmate.Settings
+ Settings to use for TrackMate, see detector methods for different settings.
+ crop_roi : ij.gui.Roi, optional
+ ROI to crop on the image, by default None.
+
+ Returns
+ -------
+ ij.ImagePlus
+ Labeled image with all the objects belonging to the same tracks having
+ the same label.
+ """
+
+ dims = implus.getDimensions()
+ cal = implus.getCalibration()
+
+ if implus.getNSlices() > 1:
+ implus.setDimensions(dims[2], dims[4], dims[3])
+
+ if crop_roi is not None:
+ implus.setRoi(crop_roi)
+
+ model = Model()
+
+ model.setLogger(Logger.IJTOOLBAR_LOGGER)
+
+ # Configure tracker
+ # settings.addTrackAnalyzer(TrackDurationAnalyzer())
+ settings.initialSpotFilterValue = -1.0
+
+ trackmate = TrackMate(model, settings)
+ trackmate.computeSpotFeatures(True)
+ trackmate.computeTrackFeatures(True)
+
+ if not settings.trackerFactory:
+ # Create a Sparse LAP Tracker if no Tracker has been created
+ settings = sparse_lap_tracker(settings)
+
+ ok = trackmate.checkInput()
+ if not ok:
+ sys.exit(str(trackmate.getErrorMessage()))
+
+ ok = trackmate.process()
+ if not ok:
+ if "[SparseLAPTracker] The spot collection is empty." in str(
+ trackmate.getErrorMessage()
+ ):
+ new_imp = IJ.createImage(
+ "Untitled",
+ str(implus.getBitDepth()) + "-bit black",
+ implus.getWidth(),
+ implus.getHeight(),
+ implus.getNFrames(),
+ )
+ new_imp.setCalibration(cal)
+
+ return new_imp
+
+ else:
+ sys.exit(str(trackmate.getErrorMessage()))
+
+ SelectionModel(model)
+
+ exportSpotsAsDots = False
+ exportTracksOnly = False
+ labelIdPainting = LabelIdPainting.LABEL_IS_TRACK_ID
+ # implus2.close()
+ label_imp = LabelImgExporter.createLabelImagePlus(
+ trackmate, exportSpotsAsDots, exportTracksOnly, labelIdPainting
+ )
+ label_imp.setCalibration(cal)
+ label_imp.setDimensions(dims[2], dims[3], dims[4])
+ implus.setDimensions(dims[2], dims[3], dims[4])
+
+ return label_imp
diff --git a/src/imcflibs/iotools.py b/src/imcflibs/iotools.py
index c5da0cd2..1891545e 100644
--- a/src/imcflibs/iotools.py
+++ b/src/imcflibs/iotools.py
@@ -1,14 +1,14 @@
"""I/O related functions."""
-import glob
import zipfile
-import os
from os.path import splitext, join
from .log import LOG as log
from .strtools import flatten
+from ._jython_compat import file_types
+
def filehandle(fname, mode="r"):
"""Make sure a variable is either a filehandle or create one from it.
@@ -22,6 +22,7 @@ def filehandle(fname, mode="r"):
Parameters
----------
fname : str or filehandle
+ The object to ensure it is a file handle, or to create one from it.
mode : str
The desired mode of the filehandle (default=read).
@@ -44,13 +45,13 @@ def filehandle(fname, mode="r"):
"""
log.debug(type(fname))
- if type(fname).__name__ == "str":
+ if isinstance(fname, str):
try:
return open(fname, mode)
except IOError as err:
message = "can't open '%s': %s"
raise SystemExit(message % (fname, err))
- elif type(fname).__name__ == "file":
+ elif isinstance(fname, file_types):
if fname.mode != mode:
message = "mode mismatch: %s != %s"
raise IOError(message % (fname.mode, mode))
@@ -101,6 +102,10 @@ def readtxt(fname, path="", flat=False):
else:
fin = open(join(path, fname), "r")
txt = fin.readlines() # returns file as a list, one entry per line
+ try:
+ txt = [x.decode("utf-8") for x in txt]
+ except AttributeError:
+ pass # in Python2 decoding isn't necessary
if flat:
txt = flatten(txt)
fin.close()
diff --git a/src/imcflibs/pathtools.py b/src/imcflibs/pathtools.py
index e169f4bc..166a5186 100644
--- a/src/imcflibs/pathtools.py
+++ b/src/imcflibs/pathtools.py
@@ -2,6 +2,7 @@
import os.path
import platform
+import re
from os import sep
from . import strtools
@@ -19,6 +20,11 @@ def parse_path(path, prefix=""):
*Script Parameter* `#@ File`) for either of the parameters, so it is safe to
use this in ImageJ Python scripts without additional measures.
+ **WARNING**: when passing in **Windows paths** literally, make sure to
+ declare them as **raw strings** using the `r""` notation, otherwise
+ unexpected things might happen if the path contains sections that Python
+ will interpret as escape sequences (e.g. `\n`, `\t`, `\u2324`, ...).
+
Parameters
----------
path : str or str-like
@@ -32,10 +38,12 @@ def parse_path(path, prefix=""):
dict
The parsed (and possibly combined) path split into its components, with
the following keys:
+
- `orig` : The full string as passed into this function (possibly
combined with the prefix in case one was specified).
- `full` : The same as `orig` with separators adjusted to the current
platform.
+ - `parent` : The parent folder of the selected file.
- `path` : The same as `full`, up to (including) the last separator.
- `dname` : The segment between the last two separators (directory).
- `fname` : The segment after the last separator (filename).
@@ -49,50 +57,64 @@ def parse_path(path, prefix=""):
Examples
--------
-
POSIX-style path to a file with a suffix:
>>> parse_path('/tmp/foo/file.suffix')
- {'dname': 'foo',
- 'ext': '',
- 'fname': 'file',
- 'full': '/tmp/foo/file',
- 'basename': 'file',
- 'orig': '/tmp/foo/file',
- 'path': '/tmp/foo/'}
+ {
+ "dname": "foo",
+ "ext": "",
+ "fname": "file",
+ "full": "/tmp/foo/file",
+ "basename": "file",
+ "orig": "/tmp/foo/file",
+ "parent": "/tmp/",
+ "path": "/tmp/foo/",
+ }
+
POSIX-style path to a directory:
>>> parse_path('/tmp/foo/')
- {'dname': 'foo',
- 'ext': '',
- 'fname': '',
- 'full': '/tmp/foo/',
- 'basename': '',
- 'orig': '/tmp/foo/',
- 'path': '/tmp/foo/'}
+ {
+ "dname": "foo",
+ "ext": "",
+ "fname": "",
+ "full": "/tmp/foo/",
+ "basename": "",
+ "orig": "/tmp/foo/",
+ "parent": "/tmp/",
+ "path": "/tmp/foo/",
+ }
+
Windows-style path to a file:
- >>> parse_path('C:\\Temp\\foo\\file.ext')
- {'dname': 'foo',
- 'ext': '.ext',
- 'fname': 'file.ext',
- 'full': 'C:/Temp/foo/file.ext',
- 'basename': 'file',
- 'orig': 'C:\\Temp\\foo\\file.ext',
- 'path': 'C:/Temp/foo/'}
+ >>> parse_path(r'C:\Temp\new\file.ext')
+ {
+ "dname": "new",
+ "ext": ".ext",
+ "fname": "file.ext",
+ "full": "C:/Temp/new/file.ext",
+ "basename": "file",
+ "orig": "C:\\Temp\\new\\file.ext",
+ "parent": "C:/Temp",
+ "path": "C:/Temp/new/",
+ }
+
Special treatment for *OME-TIFF* suffixes:
>>> parse_path("/path/to/some/nice.OME.tIf")
- {'basename': 'nice',
- 'dname': 'some',
- 'ext': '.OME.tIf',
- 'fname': 'nice.OME.tIf',
- 'full': '/path/to/some/nice.OME.tIf',
- 'orig': '/path/to/some/nice.OME.tIf',
- 'path': '/path/to/some/'}
+ {
+ "basename": "nice",
+ "dname": "some",
+ "ext": ".OME.tIf",
+ "fname": "nice.OME.tIf",
+ "full": "/path/to/some/nice.OME.tIf",
+ "orig": "/path/to/some/nice.OME.tIf",
+ "parent": "/path/to/",
+ "path": "/path/to/some/",
+ }
"""
path = str(path)
if prefix:
@@ -104,7 +126,9 @@ def parse_path(path, prefix=""):
parsed["orig"] = path
path = path.replace("\\", sep)
parsed["full"] = path
- parsed["path"] = os.path.dirname(path) + sep
+ folder = os.path.dirname(path)
+ parsed["path"] = folder + sep
+ parsed["parent"] = os.path.dirname(folder)
parsed["fname"] = os.path.basename(path)
parsed["dname"] = os.path.basename(os.path.dirname(parsed["path"]))
base, ext = os.path.splitext(parsed["fname"])
@@ -118,7 +142,7 @@ def parse_path(path, prefix=""):
def join2(path1, path2):
- """Join two paths into one, much like os.path.join().
+ r"""Join two paths into one, much like os.path.join().
The main difference is that `join2()` takes exactly two arguments, but they
can be non-str (as long as they're having a `__str__()` method), so this is
@@ -144,7 +168,7 @@ def join2(path1, path2):
def jython_fiji_exists(path):
- """Wrapper to work around problems with Jython 2.7 in Fiji.
+ """Work around problems with `os.path.exists()` in Jython 2.7 in Fiji.
In current Fiji, the Jython implementation of os.path.exists(path) raises a
java.lang.AbstractMethodError iff 'path' doesn't exist. This function
@@ -152,11 +176,11 @@ def jython_fiji_exists(path):
"""
try:
return os.path.exists(path)
- except java.lang.AbstractMethodError:
+ except java.lang.AbstractMethodError: # pragma: no cover
return False
-def listdir_matching(path, suffix, fullpath=False, sort=False):
+def listdir_matching(path, suffix, fullpath=False, sort=False, regex=False):
"""Get a list of files in a directory matching a given suffix.
Parameters
@@ -172,6 +196,9 @@ def listdir_matching(path, suffix, fullpath=False, sort=False):
sort : bool, optional
If set to True, the returned list will be sorted using
`imcflibs.strtools.sort_alphanumerically()`.
+ regex : bool, optional
+ If set to True, uses the suffix-string as regular expression to match
+ filenames. By default False.
Returns
-------
@@ -180,12 +207,17 @@ def listdir_matching(path, suffix, fullpath=False, sort=False):
"""
matching_files = list()
for candidate in os.listdir(path):
- if candidate.lower().endswith(suffix.lower()):
+ if not regex and candidate.lower().endswith(suffix.lower()):
# log.debug("Found file %s", candidate)
if fullpath:
matching_files.append(os.path.join(path, candidate))
else:
matching_files.append(candidate)
+ if regex and re.match(suffix.lower(), candidate.lower()):
+ if fullpath:
+ matching_files.append(os.path.join(path, candidate))
+ else:
+ matching_files.append(candidate)
if sort:
matching_files = strtools.sort_alphanumerically(matching_files)
@@ -203,6 +235,7 @@ def image_basename(orig_name):
Parameters
----------
orig_name : str
+ The original name, possibly containing paths and filename suffix.
Examples
--------
@@ -325,10 +358,26 @@ def folder_size(source):
return total_size
+def create_directory(new_path):
+ """Create a new directory at the specified path.
+
+ This is a workaround for Python 2.7 where `os.makedirs()` is lacking
+ the `exist_ok` parameter that is present in Python 3.2 and newer.
+
+ Parameters
+ ----------
+ new_path : str
+ Path where the new directory should be created.
+ """
+
+ if not os.path.exists(new_path):
+ os.makedirs(new_path)
+
+
# pylint: disable-msg=C0103
# we use the variable name 'exists' in its common spelling (lowercase), so
# removing this workaround will be straightforward at a later point
-if platform.python_implementation() == "Jython":
+if platform.python_implementation() == "Jython": # pragma: no cover
# pylint: disable-msg=F0401
# java.lang is only importable within Jython, pylint would complain
import java.lang
diff --git a/src/imcflibs/strtools.py b/src/imcflibs/strtools.py
index f59143e2..d522c460 100644
--- a/src/imcflibs/strtools.py
+++ b/src/imcflibs/strtools.py
@@ -2,6 +2,8 @@
import re
+from ._jython_compat import file_types
+
# this is taken from numpy's iotools:
def _is_string_like(obj):
@@ -12,6 +14,11 @@ def _is_string_like(obj):
instance of it (or a subclass thereof). So it's more generic than using
isinstance(obj, str).
+ Parameters
+ ----------
+ obj : any
+ The object to be checked for being string-like.
+
Example
-------
>>> _is_string_like('foo')
@@ -39,6 +46,7 @@ def filename(name):
Parameters
----------
name : str or filehandle or java.io.File
+ The object to retrieve the filename from.
Returns
-------
@@ -62,7 +70,7 @@ def filename(name):
# likely we are not running under Jython
pass
- if isinstance(name, file):
+ if isinstance(name, file_types):
return name.name
elif _is_string_like(name):
return name
@@ -76,6 +84,7 @@ def flatten(lst):
Parameters
----------
lst : list(str)
+ The list of strings to be flattened.
Returns
-------
@@ -141,3 +150,28 @@ def alphanum_key(key):
return [convert(c) for c in re.split("([0-9]+)", key)]
return sorted(data, key=alphanum_key)
+
+
+def pad_number(index, pad_length=2):
+ """Pad a number with leading zeros to a specified length.
+
+ Parameters
+ ----------
+ index : int or str
+ The number to be padded
+ pad_length : int, optional
+ The total length of the resulting string after padding, by default 2
+
+ Returns
+ -------
+ str
+ The padded number as a string
+
+ Examples
+ --------
+ >>> pad_number(7)
+ '07'
+ >>> pad_number(42, 4)
+ '0042'
+ """
+ return str(index).zfill(pad_length)
diff --git a/tests/bdv/test_define_dataset_auto.py b/tests/bdv/test_define_dataset_auto.py
new file mode 100644
index 00000000..c10b973c
--- /dev/null
+++ b/tests/bdv/test_define_dataset_auto.py
@@ -0,0 +1,175 @@
+"""Tests for the automatic dataset definition functionality in the BDV module."""
+
+import logging
+
+from imcflibs import pathtools
+from imcflibs.imagej import bdv
+
+
+def set_default_values(project_filename, file_path, series_type="Tiles"):
+ """Set the default values for dataset definitions.
+
+ Parameters
+ ----------
+ project_filename : str
+ Name of the project
+ file_path : pathlib.Path
+ Path to a temporary folder
+ series_type : str, optional
+ Type of Bioformats series (default is "Tiles")
+
+ Returns
+ -------
+ str
+ Start of the options for dataset definitions.
+ """
+ # Additional settings
+ file_info = pathtools.parse_path(file_path)
+
+ options = (
+ "define_dataset=[Automatic Loader (Bioformats based)] "
+ + "project_filename=["
+ + project_filename
+ + ".xml"
+ + "] "
+ + "path=["
+ + file_info["full"]
+ + "] "
+ + "exclude=10 "
+ + "bioformats_series_are?="
+ + series_type
+ + " "
+ + "move_tiles_to_grid_(per_angle)?=[Do not move Tiles to Grid "
+ + "(use Metadata if available)] "
+ )
+
+ return options
+
+
+def test_define_dataset_auto_tile(tmp_path, caplog):
+ """Test automatic dataset definition method for tile series.
+
+ Parameters
+ ----------
+ tmp_path : pytest.fixture
+ Temporary path for the test.
+ caplog : pytest.fixture
+ Log capturing fixture.
+ """
+
+ # Set the logging level to capture warnings
+ caplog.set_level(logging.WARNING)
+ # Clear the log
+ caplog.clear()
+
+ # Define the project and file names
+ project_filename = "proj_name"
+ file_path = tmp_path
+ file_info = pathtools.parse_path(file_path)
+
+ # Define the result and dataset save paths
+ result_folder = pathtools.join2(file_info["path"], project_filename)
+
+ # Default settings
+
+ # Define the type of Bio-Formats series
+ bf_series_type = "Tiles"
+
+ # Define the ImageJ command
+ cmd = "Define Multi-View Dataset"
+
+ # Set the default values for dataset definitions
+ options = set_default_values(project_filename, file_info["path"])
+
+ # Construct the options for dataset definitions
+ options = (
+ options
+ + "how_to_store_input_images=["
+ + "Re-save as multiresolution HDF5"
+ + "] "
+ + "load_raw_data_virtually "
+ + "metadata_save_path=["
+ + result_folder
+ + "] "
+ + "image_data_save_path=["
+ + result_folder
+ + "] "
+ + "check_stack_sizes "
+ + "split_hdf5 "
+ + "timepoints_per_partition=1 "
+ + "setups_per_partition=0 "
+ + "use_deflate_compression "
+ )
+
+ # Construct the final call to ImageJ
+ final_call = "IJ.run(cmd=[%s], params=[%s])" % (cmd, options)
+
+ # Define the dataset using the "Auto-Loader" option
+ bdv.define_dataset_auto(project_filename, file_info["path"], bf_series_type)
+ # Check if the final call is in the log
+ assert final_call == caplog.messages[0]
+
+
+def test_define_dataset_auto_angle(tmp_path, caplog):
+ """Test automatic dataset definition method for angle series.
+
+ Parameters
+ ----------
+ tmp_path : pytest.fixture
+ Temporary path for the test.
+ caplog : pytest.fixture
+ Log capturing fixture.
+ """
+
+ # Set the logging level to capture warnings
+ caplog.set_level(logging.WARNING)
+ # Clear the log
+ caplog.clear()
+
+ # Define the project and file names
+ project_filename = "proj_name"
+ file_path = tmp_path
+ file_info = pathtools.parse_path(file_path)
+
+ # Define the result and dataset save paths
+ result_folder = pathtools.join2(file_info["path"], project_filename)
+
+ # Default settings
+
+ # Define the type of Bio-Formats series
+ bf_series_type = "Angles"
+
+ # Define the ImageJ command
+ cmd = "Define Multi-View Dataset"
+
+ # Set the default values for dataset definitions
+ options = set_default_values(project_filename, file_info["path"], bf_series_type)
+
+ # Construct the options for dataset definitions
+ options = (
+ options
+ + "how_to_store_input_images=["
+ + "Re-save as multiresolution HDF5"
+ + "] "
+ + "load_raw_data_virtually "
+ + "metadata_save_path=["
+ + result_folder
+ + "] "
+ + "image_data_save_path=["
+ + result_folder
+ + "] "
+ + "check_stack_sizes "
+ + "apply_angle_rotation "
+ + "split_hdf5 "
+ + "timepoints_per_partition=1 "
+ + "setups_per_partition=0 "
+ + "use_deflate_compression "
+ )
+
+ # Construct the final call to ImageJ
+ final_call = "IJ.run(cmd=[%s], params=[%s])" % (cmd, options)
+
+ # Define the dataset using the "Auto-Loader" option
+ bdv.define_dataset_auto(project_filename, file_info["path"], bf_series_type)
+ # Check if the final call is in the log
+ assert final_call == caplog.messages[0]
diff --git a/tests/bdv/test_definitionoptions.py b/tests/bdv/test_definitionoptions.py
new file mode 100644
index 00000000..74d4ce99
--- /dev/null
+++ b/tests/bdv/test_definitionoptions.py
@@ -0,0 +1,87 @@
+"""Tests for the imcflibs.imagej.bdv.DefinitionOptions class."""
+
+import pytest
+
+from imcflibs.imagej.bdv import DefinitionOptions
+
+
+def test_defaults():
+ """Test the default options by calling all formatters on a "raw" objects."""
+ acitt_options = (
+ "multiple_angles=[NO (one angle)] "
+ "multiple_channels=[YES (all channels in one file)] "
+ "multiple_illuminations_directions=[NO (one illumination direction)] "
+ "multiple_tiles=[YES (one file per tile)] "
+ "multiple_timepoints=[NO (one time-point)] "
+ )
+
+ def_opts = DefinitionOptions()
+
+ assert def_opts.fmt_acitt_options() == acitt_options
+
+
+def test__definition_option():
+ """Test an example with wrong setting for definition option."""
+
+ test_value = "Multiple"
+
+ def_opts = DefinitionOptions()
+ with pytest.raises(ValueError) as excinfo:
+ def_opts.set_angle_definition(test_value)
+ assert str(excinfo.value) == (
+ "Value must be one of: ['single', 'multi_multi']. Support for "
+ "'multi_single' is not available for angles and illuminations."
+ )
+
+
+def test__multiple_timepoints_files():
+ """Test an example setting how to treat multiple time-points."""
+
+ acitt_options = (
+ "multiple_angles=[NO (one angle)] "
+ "multiple_channels=[YES (all channels in one file)] "
+ "multiple_illuminations_directions=[NO (one illumination direction)] "
+ "multiple_tiles=[YES (one file per tile)] "
+ "multiple_timepoints=[YES (one file per time-point)] "
+ )
+
+ def_opts = DefinitionOptions()
+ def_opts.set_timepoint_definition("multi_multi")
+
+ assert def_opts.fmt_acitt_options() == acitt_options
+
+
+def test__multiple_channels_files_multiple_timepoints():
+ """Test an example setting how to treat multiple channels and multiple time-points."""
+
+ acitt_options = (
+ "multiple_angles=[NO (one angle)] "
+ "multiple_channels=[YES (one file per channel)] "
+ "multiple_illuminations_directions=[NO (one illumination direction)] "
+ "multiple_tiles=[YES (one file per tile)] "
+ "multiple_timepoints=[YES (all time-points in one file)] "
+ )
+
+ def_opts = DefinitionOptions()
+ def_opts.set_channel_definition("multi_multi")
+ def_opts.set_timepoint_definition("multi_single")
+
+ assert def_opts.fmt_acitt_options() == acitt_options
+
+
+def test_single_tile_multiple_angles_files():
+ """Test an example on with one tile and multiple angle files."""
+
+ acitt_options = (
+ "multiple_angles=[YES (one file per angle)] "
+ "multiple_channels=[YES (all channels in one file)] "
+ "multiple_illuminations_directions=[NO (one illumination direction)] "
+ "multiple_tiles=[NO (one tile)] "
+ "multiple_timepoints=[NO (one time-point)] "
+ )
+
+ def_opts = DefinitionOptions()
+ def_opts.set_angle_definition("multi_multi")
+ def_opts.set_tile_definition("single")
+
+ assert def_opts.fmt_acitt_options() == acitt_options
diff --git a/tests/bdv/test_processingoptions.py b/tests/bdv/test_processingoptions.py
new file mode 100644
index 00000000..218d3f31
--- /dev/null
+++ b/tests/bdv/test_processingoptions.py
@@ -0,0 +1,105 @@
+"""Tests for the ProcessingOptions class from the imcflibs.imagej.bdv module."""
+
+from imcflibs.imagej.bdv import ProcessingOptions
+
+
+def test_defaults():
+ """Test the default options by calling all formatters on a "raw" object."""
+ acitt_options = (
+ "process_angle=[All angles] "
+ "process_channel=[All channels] "
+ "process_illumination=[All illuminations] "
+ "process_tile=[All tiles] "
+ "process_timepoint=[All Timepoints] "
+ )
+ acitt_selectors = " "
+ how_to_treat = (
+ "how_to_treat_angles=[treat individually] "
+ "how_to_treat_channels=group "
+ "how_to_treat_illuminations=group "
+ "how_to_treat_tiles=compare "
+ "how_to_treat_timepoints=[treat individually] "
+ )
+ use_acitt = (
+ "channels=[Average Channels] "
+ "illuminations=[Average Illuminations] "
+ )
+
+ proc_opts = ProcessingOptions()
+
+ assert proc_opts.fmt_acitt_options() == acitt_options
+ assert proc_opts.fmt_acitt_selectors() == acitt_selectors
+ assert proc_opts.fmt_how_to_treat() == how_to_treat
+ assert proc_opts.fmt_use_acitt() == use_acitt
+
+
+def test__treat_tc_ti__ref_c1():
+ """Test an example setting how to treat components using a reference channel."""
+ # refers to "Example 1" from the BDV TODO list
+ # FIXME: what are the actual inputs and the correct output string??
+ acitt_options = (
+ "process_angle=[All angles] "
+ "process_channel=[All channels] "
+ "process_illumination=[All illuminations] "
+ "process_tile=[All tiles] "
+ "process_timepoint=[All Timepoints] "
+ )
+ acitt_selectors = " "
+ how_to_treat = (
+ "how_to_treat_angles=[treat individually] "
+ "how_to_treat_channels=group "
+ "how_to_treat_illuminations=group "
+ "how_to_treat_tiles=compare "
+ "how_to_treat_timepoints=[treat individually] "
+ )
+ use_acitt = (
+ "channels=[use Channel 1] "
+ "illuminations=[Average Illuminations] "
+ )
+
+ proc_opts = ProcessingOptions()
+ proc_opts.treat_tiles("compare")
+ proc_opts.treat_timepoints("[treat individually]")
+ proc_opts.reference_channel(1)
+
+ assert proc_opts.fmt_acitt_options() == acitt_options
+ assert proc_opts.fmt_acitt_selectors() == acitt_selectors
+ assert proc_opts.fmt_use_acitt() == use_acitt
+ assert proc_opts.fmt_how_to_treat() == how_to_treat
+
+
+def test__process_c1_treat_tg_ti_use_t3():
+ """Test an example setting using a reference channel and a reference tile."""
+
+ acitt_options = (
+ "process_angle=[All angles] "
+ "process_channel=[Single channel (Select from List)] "
+ "process_illumination=[All illuminations] "
+ "process_tile=[All tiles] "
+ "process_timepoint=[All Timepoints] "
+ )
+
+ acitt_selectors = "processing_channel=[channel 1] "
+ how_to_treat = (
+ "how_to_treat_angles=[treat individually] "
+ "how_to_treat_channels=group "
+ "how_to_treat_illuminations=group "
+ "how_to_treat_tiles=group "
+ "how_to_treat_timepoints=[treat individually] "
+ )
+ use_acitt = (
+ "channels=[Average Channels] "
+ "illuminations=[Average Illuminations] "
+ "tiles=[use Tile 3] "
+ )
+
+ proc_opts = ProcessingOptions()
+ proc_opts.process_channel(1)
+ # proc_opts.treat_timepoints("[treat individually]")
+ proc_opts.treat_tiles("group")
+ proc_opts.reference_tile(3)
+
+ assert proc_opts.fmt_acitt_options() == acitt_options
+ assert proc_opts.fmt_acitt_selectors() == acitt_selectors
+ assert proc_opts.fmt_use_acitt() == use_acitt
+ assert proc_opts.fmt_how_to_treat() == how_to_treat
diff --git a/tests/bdv/test_processingoptions_example3.py b/tests/bdv/test_processingoptions_example3.py
new file mode 100644
index 00000000..7aec8699
--- /dev/null
+++ b/tests/bdv/test_processingoptions_example3.py
@@ -0,0 +1,33 @@
+"""Tests for ProcessingOptions class with multiple reference channels configuration."""
+
+from imcflibs.imagej.bdv import ProcessingOptions
+
+
+def test__process_c1c2_treat_tc_ti():
+ """Test an example setting using 2 reference channels."""
+
+ acitt_options = (
+ "process_angle=[All angles] "
+ "process_channel=[Multiple channels (Select from List)] "
+ "process_illumination=[All illuminations] "
+ "process_tile=[All tiles] "
+ "process_timepoint=[All Timepoints] "
+ )
+
+ acitt_selectors = "channel_1 channel_2 "
+ how_to_treat = (
+ "how_to_treat_angles=[treat individually] "
+ "how_to_treat_channels=group "
+ "how_to_treat_illuminations=group "
+ "how_to_treat_tiles=compare "
+ "how_to_treat_timepoints=[treat individually] "
+ )
+ use_acitt = "channels=[Average Channels] illuminations=[Average Illuminations] "
+
+ proc_opts = ProcessingOptions()
+ proc_opts.process_channel([1, 2])
+
+ assert proc_opts.fmt_acitt_options() == acitt_options
+ assert proc_opts.fmt_acitt_selectors() == acitt_selectors
+ assert proc_opts.fmt_use_acitt() == use_acitt
+ assert proc_opts.fmt_how_to_treat() == how_to_treat
diff --git a/tests/bdv/test_processingoptions_example4.py b/tests/bdv/test_processingoptions_example4.py
new file mode 100644
index 00000000..4331384f
--- /dev/null
+++ b/tests/bdv/test_processingoptions_example4.py
@@ -0,0 +1,32 @@
+"""Tests for the ProcessingOptions class handling channel specific selection."""
+
+from imcflibs.imagej.bdv import ProcessingOptions
+
+
+def test__process_c1c3():
+ """Test an example setting to process only 2 channels."""
+
+ acitt_options = (
+ "process_angle=[All angles] "
+ "process_channel=[Range of channels (Specify by Name)] "
+ "process_illumination=[All illuminations] "
+ "process_tile=[All tiles] "
+ "process_timepoint=[All Timepoints] "
+ )
+ acitt_selectors = "process_following_channels=1-3 "
+ how_to_treat = (
+ "how_to_treat_angles=[treat individually] "
+ "how_to_treat_channels=group "
+ "how_to_treat_illuminations=group "
+ "how_to_treat_tiles=compare "
+ "how_to_treat_timepoints=[treat individually] "
+ )
+ use_acitt = "channels=[Average Channels] illuminations=[Average Illuminations] "
+
+ proc_opts = ProcessingOptions()
+ proc_opts.process_channel(1, 3)
+
+ assert proc_opts.fmt_acitt_options() == acitt_options
+ assert proc_opts.fmt_acitt_selectors() == acitt_selectors
+ assert proc_opts.fmt_use_acitt() == use_acitt
+ assert proc_opts.fmt_how_to_treat() == how_to_treat
diff --git a/tests/imagej/bioformats/import_image/test_01.py b/tests/interactive-imagej/bioformats/import_image/test_01.py
similarity index 100%
rename from tests/imagej/bioformats/import_image/test_01.py
rename to tests/interactive-imagej/bioformats/import_image/test_01.py
diff --git a/tests/interactive-imagej/bioformats/metadata/test_metadata.md b/tests/interactive-imagej/bioformats/metadata/test_metadata.md
new file mode 100644
index 00000000..a6328955
--- /dev/null
+++ b/tests/interactive-imagej/bioformats/metadata/test_metadata.md
@@ -0,0 +1,37 @@
+Following is a testing script for the retrieval of metadata methods in imcflibs.imagej.bioformats.
+
+Copy the following code to a Fiji that has the release `python-imcflibs-1.5.0.jar` in the /jars directory.
+
+Add the source folder and the names of the files under the corresponding lines, and run the script. If the metadata is printed in Fiji output, the methods are working as intended
+
+```
+# @ File (label="IMCF testdata location", style="directory") IMCF_TESTDATA
+
+import os
+from imcflibs.pathtools import join2
+from imcflibs.imagej import bioformats
+from ij import IJ
+
+# Testing for the metadata retrieval through Bioformats
+
+# Add directory path here that contains the files you wish to test for
+
+file_path_1 = join2(IMCF_TESTDATA, "bioformats-multiposition/DON_25922_20250201_25922_2_01.vsi")
+file_path_2 = join2(IMCF_TESTDATA, "bioformats-multiposition/DON_25922_20250201_25922_2_02.vsi")
+file_path_3 = join2(IMCF_TESTDATA, "bioformats-multiposition/DON_25922_20250201_25922_2_03.vsi")
+
+metadata = bioformats.get_metadata_from_file(file_path_1)
+print(metadata.unit_width)
+print(metadata.unit)
+print(metadata.channel_count)
+
+# Stage metadata and coordinates test for a list of vsi files
+fnames = [file_path_1, file_path_2, file_path_3]
+
+metadata_stage = bioformats.get_stage_coords(fnames)
+
+print(metadata_stage.image_calibration)
+print(metadata_stage.stage_coordinates_x)
+print(metadata_stage.stage_coordinates_y)
+print(metadata_stage.stage_coordinates_z)
+```
\ No newline at end of file
diff --git a/tests/imagej/bioformats/write_bf_memoryfile/test_01.py b/tests/interactive-imagej/bioformats/write_bf_memoryfile/test_01.py
similarity index 100%
rename from tests/imagej/bioformats/write_bf_memoryfile/test_01.py
rename to tests/interactive-imagej/bioformats/write_bf_memoryfile/test_01.py
diff --git a/tests/interactive-imagej/send-notification-email.md b/tests/interactive-imagej/send-notification-email.md
new file mode 100644
index 00000000..eb81e6e9
--- /dev/null
+++ b/tests/interactive-imagej/send-notification-email.md
@@ -0,0 +1,39 @@
+# Test the send_notification_email function
+
+```Python
+from imcflibs.imagej.misc import send_notification_email
+
+from imcflibs.log import LOG as log
+from imcflibs.log import enable_console_logging
+from imcflibs.log import set_loglevel
+
+
+"""
+Usage:
+BEFORE starting Fiji, add to the IJ_Prefs.txt:
+
+.imcf.sender_email=imcf@unibas.ch
+.imcf.smtpserver=smtp.unibas.ch
+
+Linux/Mac: ~/.imagej/IJ_Prefs.txt
+Windows: C:\Users\\.imagej\IJ_Prefs.txt
+"""
+
+
+enable_console_logging()
+set_loglevel(2)
+
+
+# see if logging works:
+log.warn("warn")
+log.debug("DEBUG")
+
+send_notification_email(
+ job_name="my job",
+ recipient="nikolaus.ehrenfeuchter@unibas.ch",
+ filename="magic-segmentation.py",
+ total_execution_time="5 years",
+)
+
+log.info("DONE")
+```
diff --git a/tests/interactive-imagej/shading-test.md b/tests/interactive-imagej/shading-test.md
new file mode 100644
index 00000000..9371be38
--- /dev/null
+++ b/tests/interactive-imagej/shading-test.md
@@ -0,0 +1,18 @@
+### ----------------------
+
+ The following code block is a `python` script to be used in a Fiji with the shading branch's .jar already pasted into ./jars in the Fiji installation
+
+ Recommended is to import an image you wish to test on (Shaded-blobs.png e.g) and then drag this script into Fiji and run it.
+ If a resulting image pops up (while using flatfield method), everything works finely.
+### ----------------------
+
+```python
+from imcflibs.imagej import shading
+# import imcflibs.imagej
+import ij
+from ij import IJ
+
+imp = IJ.getImage()
+imcf_shading = shading.simple_flatfield_correction(imp)
+# Or any other method in class shading
+imcf_shading.show()
\ No newline at end of file
diff --git a/tests/interactive-imagej/test_trackmate.md b/tests/interactive-imagej/test_trackmate.md
new file mode 100644
index 00000000..4561e790
--- /dev/null
+++ b/tests/interactive-imagej/test_trackmate.md
@@ -0,0 +1,27 @@
+This is a testing file for the trackmate branch and for the trackmate python class.
+
+The following Fiji script needs a `.jar` of the trackmate branch to be installed into Fiji already.
+
+You can open the a blobs image (`CTRL+SHIFT+B`) and then run the following script:
+
+```python
+from imcflibs.imagej import trackmate
+from ij import IJ
+
+imp = IJ.getImage()
+# Detector
+settings = trackmate.log_detector(imp, 5, 1, 0)
+# settings = trackmate.cellpose_detector(imp, "S:\cellpose_env", "NUCLEI", 23.0, 1, 0) # WORKS, tested
+# settings = trackmate.stardist_detector(imp, 1) # WORKS, tested
+
+# Manual tracker addition, run_trackmate does this otherwise
+# settings = trackmate.sparseLAP_tracker(settings)
+
+# Spot and track filtering
+# settings = trackmate.spot_filtering(settings, None, 1.0, None, None)
+# settings = trackmate.track_filtering(settings, 15.0, 15.0, 3, 1, 1)
+
+res_img = trackmate.run_trackmate(imp, settings)
+res_img.show()
+
+```
\ No newline at end of file
diff --git a/tests/test_iotools.py b/tests/test_iotools.py
index 12084766..bf8c6aae 100644
--- a/tests/test_iotools.py
+++ b/tests/test_iotools.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+"""Tests for `imcflibs.iotools`."""
# -*- coding: utf-8 -*-
import pytest
@@ -6,52 +6,73 @@
from os.path import join
+import io
+
from imcflibs.iotools import filehandle
from imcflibs.iotools import readtxt
-__author__ = "Niko Ehrenfeuchter"
-__copyright__ = "Niko Ehrenfeuchter"
-__license__ = "gpl3"
+try:
+ # Python 2: "file" is built-in
+ file_types = file, io.IOBase
+except NameError:
+ # Python 3: "file" fully replaced with IOBase
+ file_types = (io.IOBase,)
+
def test_filehandle(tmpdir):
- tmpfile = tmpdir.join('testfile')
+ """Test instance types of objects returned by `filehandle()`."""
+ tmpfile = tmpdir.join("testfile")
tmpname = str(tmpfile)
- tmphandle = open(str(tmpfile), 'w')
+ # print(tmpname)
+ tmphandle = open(str(tmpfile), "w")
print(type(tmphandle))
- assert type(tmpname) is str
- assert type(tmphandle) is file
- assert type(filehandle(tmpname)) is file
- assert type(filehandle(tmphandle, 'w')) is file
+
+ assert isinstance(tmpname, str)
+ print("tmpname is str-like π¦")
+
+ assert isinstance(tmphandle, file_types)
+ print("tmphandle is file/io-like π¦")
+
+ assert isinstance(filehandle(tmpname), file_types)
+ print("filehandle(tmpname) is file/io-like π¦")
+
+ assert isinstance(filehandle(tmphandle, "w"), file_types)
+ print("filehandle(tmphandle) is file/io-like π¦")
def test_readtxt(tmpdir):
+ """Test the `readtxt()` function.
+
+ Read text from a regular file as well as from a zip file, both straight and
+ using the `flat` option.
+ """
content = [
- u'lorem\n',
- u'ipsum\n',
- u'and some more\n',
- u'dummy text\n',
+ "lorem\n",
+ "ipsum\n",
+ "and some more\n",
+ "dummy text\n",
]
fh = tmpdir.mkdir("readtxt").join("content.txt")
- fh.write(u''.join(content))
+ fh.write("".join(content))
print(fh.basename)
print(fh.dirname)
- with zipfile.ZipFile(join(fh.dirname, 'archive.zip'), 'w') as zf:
- zf.write(str(fh), arcname='content.txt')
+ with zipfile.ZipFile(join(fh.dirname, "archive.zip"), "w") as zf:
+ zf.write(str(fh), arcname="content.txt")
print("wrote [%s] into [%s]" % (str(fh), zf.filename))
-
+
print(content)
fromfile = readtxt(str(fh))
print(fromfile)
assert fromfile == content
fromfile_flat = readtxt(str(fh), flat=True)
- assert fromfile_flat == ''.join(content)
+ assert fromfile_flat == "".join(content)
- print(join(fh.dirname, 'archive.zip'))
- fromzip = readtxt('content.txt', join(fh.dirname, 'archive.zip'))
+ print(join(fh.dirname, "archive.zip"))
+ fromzip = readtxt("content.txt", join(fh.dirname, "archive.zip"))
print(fromzip)
assert fromzip == content
- fromzip_flat = readtxt('content.txt', join(fh.dirname, 'archive.zip'), flat=True)
- assert fromzip_flat == ''.join(content)
+ fromzip_flat = readtxt("content.txt", join(fh.dirname, "archive.zip"), flat=True)
+ assert fromzip_flat == "".join(content)
diff --git a/tests/test_pathtools.py b/tests/test_pathtools.py
index fb81e811..2a0338e6 100644
--- a/tests/test_pathtools.py
+++ b/tests/test_pathtools.py
@@ -1,54 +1,116 @@
-#!/usr/bin/env python
+"""Tests for `imcflibs.pathtools`."""
# -*- coding: utf-8 -*-
-import pytest
from imcflibs.pathtools import parse_path
from imcflibs.pathtools import jython_fiji_exists
from imcflibs.pathtools import image_basename
-
-__author__ = "Niko Ehrenfeuchter"
-__copyright__ = "Niko Ehrenfeuchter"
-__license__ = "gpl3"
+from imcflibs.pathtools import gen_name_from_orig
+from imcflibs.pathtools import derive_out_dir
def test_parse_path():
- path = '/tmp/foo/'
+ """Tests using regular POSIX-style paths."""
+ path = "/tmp/foo/"
path_to_dir = parse_path(path)
- path_to_file = parse_path(path + 'file.ext')
- path_to_file_noext = parse_path(path + 'file')
+ path_to_file = parse_path(path + "file.ext")
+ path_to_file_noext = parse_path(path + "file")
+
+ assert path_to_file["orig"] == path + "file.ext"
+ assert path_to_file["path"] == path
+ assert path_to_file["dname"] == "foo"
+ assert path_to_file["fname"] == "file.ext"
+ assert path_to_file["ext"] == ".ext"
+
+ assert path_to_file_noext["ext"] == ""
+ assert path_to_file_noext["fname"] == "file"
+ assert path_to_file_noext["dname"] == "foo"
+ assert path_to_file_noext["path"] == path
- assert path_to_file['orig'] == path + 'file.ext'
- assert path_to_file['path'] == path
- assert path_to_file['dname'] == 'foo'
- assert path_to_file['fname'] == 'file.ext'
- assert path_to_file['ext'] == '.ext'
+ assert path_to_dir["path"] == path
+ assert path_to_dir["fname"] == ""
+ assert path_to_dir["dname"] == "foo"
+ assert path_to_dir["ext"] == ""
- assert path_to_file_noext['ext'] == ''
- assert path_to_file_noext['fname'] == 'file'
- assert path_to_file_noext['dname'] == 'foo'
- assert path_to_file_noext['path'] == path
- assert path_to_dir['path'] == path
- assert path_to_dir['fname'] == ''
- assert path_to_dir['dname'] == 'foo'
- assert path_to_dir['ext'] == ''
+def test_parse_path_with_prefix():
+ """Test parse_path with a prefix parameter."""
+ exp_full = "/FOO/BAR/tmp/foo/file.suffix"
+ prefix = "/FOO/BAR/"
+ path = "/tmp/foo/file.suffix"
+ assert parse_path(path, prefix)["full"] == exp_full
+
+ # test again without trailing / leading slashes:
+ prefix = "/FOO/BAR"
+ path = "tmp/foo/file.suffix"
+ assert parse_path(path, prefix)["full"] == exp_full
def test_parse_path_windows():
- path = r'C:\foo\bar'
+ """Test using a Windows-style path."""
+ path = r"C:\Foo\Bar"
parsed = parse_path(path)
- assert parsed['orig'] == path
- assert parsed['full'] == r'C:/foo/bar'
- assert parsed['fname'] == 'bar'
- assert parsed['dname'] == 'foo'
+ assert parsed["orig"] == path
+ assert parsed["full"] == "C:/Foo/Bar"
+ assert parsed["fname"] == "Bar"
+ assert parsed["dname"] == "Foo"
+
+
+def test_parse_path_windows_newline_tab():
+ """Test a Windows path with newline and tab sequences as raw string."""
+ path = r"C:\Temp\new\file.ext"
+ parsed = parse_path(path)
+
+ assert parsed == {
+ "dname": "new",
+ "ext": ".ext",
+ "fname": "file.ext",
+ "full": "C:/Temp/new/file.ext",
+ "basename": "file",
+ "orig": "C:\\Temp\\new\\file.ext",
+ "parent": "C:/Temp",
+ "path": "C:/Temp/new/",
+ }
+
+
+def test_parse_path_windows_nonraw():
+ r"""Test non-raw string containing newline `\n` and tab `\t` sequences.
+
+ As `parse_path()` cannot work on non-raw strings containing escape
+ sequences, the parsed result will not be the expected one.
+ """
+ path = "C:\new_folder\test"
+ parsed = parse_path(path)
+
+ assert parsed["full"] != r"C:\new_folder\test"
+ assert parsed["fname"] != "test"
def test_jython_fiji_exists(tmpdir):
+ """Test the Jython/Fiji `os.path.exists()` workaround."""
assert jython_fiji_exists(str(tmpdir)) == True
def test_image_basename():
- assert image_basename('/path/to/image_file_01.png') == 'image_file_01'
- assert image_basename('more-complex-stack.ome.tif') == 'more-complex-stack'
- assert image_basename('/tmp/FoObAr.OMe.tIf') == 'FoObAr'
+ """Test basename extraction for various image file names."""
+ assert image_basename("/path/to/image_file_01.png") == "image_file_01"
+ assert image_basename("more-complex-stack.ome.tif") == "more-complex-stack"
+ assert image_basename("/tmp/FoObAr.OMe.tIf") == "FoObAr"
+
+
+def test_gen_name_from_orig():
+ """Test assembling an output name from input, tag and suffix."""
+ outpath = "/outpath"
+ inpath = "/inpath/to/foobar.tif"
+ tag = "-avg"
+ suffix = ".h5"
+ generated = gen_name_from_orig(outpath, inpath, tag, suffix)
+ assert generated == "/outpath/foobar-avg.h5"
+
+
+def test_derive_out_dir():
+ """Test derive_out_dir() using various parameter combinations."""
+ assert derive_out_dir("/foo", "-") == "/foo"
+ assert derive_out_dir("/foo", "none") == "/foo"
+ assert derive_out_dir("/foo", "NONE") == "/foo"
+ assert derive_out_dir("/foo", "/bar") == "/bar"
diff --git a/tests/test_strtools.py b/tests/test_strtools.py
index 823b5615..87cf17c5 100644
--- a/tests/test_strtools.py
+++ b/tests/test_strtools.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+"""Tests for `imcflibs.strtools`."""
# -*- coding: utf-8 -*-
import pytest
@@ -9,30 +9,31 @@
from imcflibs.strtools import flatten
from imcflibs.strtools import strip_prefix
-__author__ = "Niko Ehrenfeuchter"
-__copyright__ = "Niko Ehrenfeuchter"
-__license__ = "gpl3"
-
def test__is_string_like():
- assert _is_string_like('foo') == True
+ """Test `_is_string_like()`."""
+ assert _is_string_like("foo") == True
assert _is_string_like(12345) == False
def test_filename_from_string():
- assert filename('test_file_name') == 'test_file_name'
+ """Test `filename()` using a string."""
+ assert filename("test_file_name") == "test_file_name"
def test_filename_from_handle(tmpdir):
+ """Test `filename()` using a file handle."""
path = str(tmpdir)
- fhandle = tmpdir.join('foo.txt')
- assert filename(fhandle) == os.path.join(path, 'foo.txt')
+ fhandle = tmpdir.join("foo.txt")
+ assert filename(fhandle) == os.path.join(path, "foo.txt")
def test_flatten():
- assert flatten(('foo', 'bar')) == 'foobar'
+ """Test `flatten()` using a tuple."""
+ assert flatten(("foo", "bar")) == "foobar"
def test_strip_prefix():
- assert strip_prefix('foobar', 'foo') == 'bar'
- assert strip_prefix('foobar', 'bar') == 'foobar'
\ No newline at end of file
+ """Test `strip_prefix()`."""
+ assert strip_prefix("foobar", "foo") == "bar"
+ assert strip_prefix("foobar", "bar") == "foobar"