From 33573d5645265e77c079fe9c232587deb9e5fc7d Mon Sep 17 00:00:00 2001 From: Adam Tyson Date: Wed, 22 Apr 2020 10:03:57 +0100 Subject: [PATCH] bump (#27) * points to brainrender * update testing * refactor * refactor * badges * notes * initial inj finder commit (#11) * formatting with black * add .DS_store to gitignore * typo * update imports * update imports * deal with no obj path given * error typo * move utils too imlib * remove amap imports * optional logging to file * tidy * registration moved to imlib * keep largest object * reduce RAM usage * remove unnecessary logging args * adds lesion and track estimation and generic tools * add missing dir support for cell export * add missing dir support for cell export * reduce mem requirements * remove old code * remove old code * refactor scaling * fix failing heatmap test * update docs * ensure output directory exists * make pixel sizes mandatory * make pixel sizes mandatory (#17) * move functionality to imlib * remove hard coded smoothing * Refactor (#19) * make pixel sizes mandatory * move functionality to imlib * remove hard coded smoothing * update docs * Injectionsite (#20) * refactors code from SL to import from neuro and imlib only * moves amap_vis to neuro and removes amap and cellfinder dependencies; * format with black * amap_vis * bump * remove cellfinder dependency * cli entry point for fibre track segmentation * refactor seg tools * start manual seg * remove hardocoding in manual region segmentation * refactor paths * bump * update docs * update docs * update docs * update docs * support multiple objects * color objects * bump * update reqs * improve rendering of flat objects * bump * prevent saving unnecessary common volume * allow ROIs to be edited * label regions * bump * bump Co-authored-by: Federico Claudi Co-authored-by: stephen --- .../user_guide/tools/manual_segmentation.md | 19 +- docs/.buildinfo | 2 +- .../tools/manual_segmentation.md.txt | 19 +- docs/_static/basic.css | 2 +- docs/_static/doctools.js | 7 +- docs/_static/documentation_options.js | 1 + docs/_static/language_data.js | 2 +- docs/_static/searchtools.js | 25 ++- docs/genindex.html | 19 +- docs/index.html | 36 ++-- docs/main/dev/CONTRIBUTING.html | 139 ++++++-------- .../tools/cells_to_brainrender.html | 116 +++++------ docs/main/user_guide/tools/heatmap.html | 127 +++++------- .../user_guide/tools/manual_segmentation.html | 181 ++++++++---------- docs/objects.inv | Bin 356 -> 327 bytes docs/search.html | 21 +- docs/searchindex.js | 2 +- neuro/brain_render_tools.py | 23 +-- .../manual_region_segmentation/segment.py | 100 ++++++---- setup.py | 4 +- 20 files changed, 382 insertions(+), 463 deletions(-) diff --git a/doc_build/main/user_guide/tools/manual_segmentation.md b/doc_build/main/user_guide/tools/manual_segmentation.md index b7fe006..3d0c061 100644 --- a/doc_build/main/user_guide/tools/manual_segmentation.md +++ b/doc_build/main/user_guide/tools/manual_segmentation.md @@ -36,8 +36,6 @@ Run `manual_region_seg -h` to see all options. `/home/analysis/cellfinder_output/registration/`) ##### The following options may also need to be used: -* `--save-image` Store the resulting segmented region image (e.g. for -inspecting in 2D. (default: False) * `--preview` Preview the segmented regions in brainrender (default:False) * `--debug` Debug mode. Will increase verbosity of logging and save all intermediate files for diagnosis of software issues. (default: False) @@ -51,17 +49,15 @@ take a few minutes) and then display the image in a manual_seg_window ##### To segment regions: -* Ensure that the "Regions" tab is selected (left hand side) +* Ensure that the "new_region" tab is selected (left hand side) +* Rename this region (by selecting the "new_region" text) * Navigate to where you want to draw your region of interest. * Use the scroll bar at the bottom (or left/right keys) to navigate through the image stack * Use the mouse scrollwheel to zoom in or out * Drag with the mouse the pan the view -* Select a label ID (by pressing the `+` button in the `label` row, top-left), -the ID is not important, but `0` refers to no label, so you may as well start -from 1. -* Choose a brush size (also in top left box) +* Choose a brush size (top left box) * Activate painting mode (by selecting the paintbrush, top left). You can go back to the navigation mode by selecting the magnifying glass. * Colour in your region that you want to segment, ensuring that you make a @@ -69,7 +65,16 @@ solid object. * Selecting the `ndim` toggle in the top left will extend the brush size in three dimensions (so it will colour in multiple layers). +* To add a new region press `Control+N` * Repeat above for each region you wish to segment. + * Press `Control+S` on your keyboard to save the regions. If you used the `--preview flag`, once they are saved, they will be displayed in a brainrender window. + + +##### Editing regions: +If you have already run `manual_region_seg`, and run it again, the segmented +regions will be shown. You can edit them, and press `Control+S` to resave them. +If you don't want to save any changes, press `Control+X` to exit. The regions + will still be previewed if you have selected that option. \ No newline at end of file diff --git a/docs/.buildinfo b/docs/.buildinfo index eaba643..279e694 100644 --- a/docs/.buildinfo +++ b/docs/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: ed007dbdf8172460ccc81a16ac8bf976 +config: 21f1b720085db93f2bf3fb4298a3bb15 tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/_sources/main/user_guide/tools/manual_segmentation.md.txt b/docs/_sources/main/user_guide/tools/manual_segmentation.md.txt index b7fe006..3d0c061 100644 --- a/docs/_sources/main/user_guide/tools/manual_segmentation.md.txt +++ b/docs/_sources/main/user_guide/tools/manual_segmentation.md.txt @@ -36,8 +36,6 @@ Run `manual_region_seg -h` to see all options. `/home/analysis/cellfinder_output/registration/`) ##### The following options may also need to be used: -* `--save-image` Store the resulting segmented region image (e.g. for -inspecting in 2D. (default: False) * `--preview` Preview the segmented regions in brainrender (default:False) * `--debug` Debug mode. Will increase verbosity of logging and save all intermediate files for diagnosis of software issues. (default: False) @@ -51,17 +49,15 @@ take a few minutes) and then display the image in a manual_seg_window ##### To segment regions: -* Ensure that the "Regions" tab is selected (left hand side) +* Ensure that the "new_region" tab is selected (left hand side) +* Rename this region (by selecting the "new_region" text) * Navigate to where you want to draw your region of interest. * Use the scroll bar at the bottom (or left/right keys) to navigate through the image stack * Use the mouse scrollwheel to zoom in or out * Drag with the mouse the pan the view -* Select a label ID (by pressing the `+` button in the `label` row, top-left), -the ID is not important, but `0` refers to no label, so you may as well start -from 1. -* Choose a brush size (also in top left box) +* Choose a brush size (top left box) * Activate painting mode (by selecting the paintbrush, top left). You can go back to the navigation mode by selecting the magnifying glass. * Colour in your region that you want to segment, ensuring that you make a @@ -69,7 +65,16 @@ solid object. * Selecting the `ndim` toggle in the top left will extend the brush size in three dimensions (so it will colour in multiple layers). +* To add a new region press `Control+N` * Repeat above for each region you wish to segment. + * Press `Control+S` on your keyboard to save the regions. If you used the `--preview flag`, once they are saved, they will be displayed in a brainrender window. + + +##### Editing regions: +If you have already run `manual_region_seg`, and run it again, the segmented +regions will be shown. You can edit them, and press `Control+S` to resave them. +If you don't want to save any changes, press `Control+X` to exit. The regions + will still be previewed if you have selected that option. \ No newline at end of file diff --git a/docs/_static/basic.css b/docs/_static/basic.css index b04360d..0119285 100644 --- a/docs/_static/basic.css +++ b/docs/_static/basic.css @@ -4,7 +4,7 @@ * * Sphinx stylesheet -- basic theme. * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ diff --git a/docs/_static/doctools.js b/docs/_static/doctools.js index b33f87f..daccd20 100644 --- a/docs/_static/doctools.js +++ b/docs/_static/doctools.js @@ -4,7 +4,7 @@ * * Sphinx JavaScript utilities for all documentation. * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ @@ -283,10 +283,11 @@ var Documentation = { }, initOnKeyListeners: function() { - $(document).keyup(function(event) { + $(document).keydown(function(event) { var activeElementType = document.activeElement.tagName; // don't navigate when in search box or textarea - if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { + if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT' + && !event.altKey && !event.ctrlKey && !event.metaKey && !event.shiftKey) { switch (event.keyCode) { case 37: // left var prevHref = $('link[rel="prev"]').prop('href'); diff --git a/docs/_static/documentation_options.js b/docs/_static/documentation_options.js index 4790c4d..2fa8c97 100644 --- a/docs/_static/documentation_options.js +++ b/docs/_static/documentation_options.js @@ -5,6 +5,7 @@ var DOCUMENTATION_OPTIONS = { COLLAPSE_INDEX: false, BUILDER: 'html', FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', HAS_SOURCE: true, SOURCELINK_SUFFIX: '.txt', NAVIGATION_WITH_KEYS: false diff --git a/docs/_static/language_data.js b/docs/_static/language_data.js index 5266fb1..d2b4ee9 100644 --- a/docs/_static/language_data.js +++ b/docs/_static/language_data.js @@ -5,7 +5,7 @@ * This script contains the language-specific data used by searchtools.js, * namely the list of stopwords, stemmer, scorer and splitter. * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ diff --git a/docs/_static/searchtools.js b/docs/_static/searchtools.js index ad84587..ab56499 100644 --- a/docs/_static/searchtools.js +++ b/docs/_static/searchtools.js @@ -4,7 +4,7 @@ * * Sphinx JavaScript utilities for the full-text search. * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ @@ -63,6 +63,11 @@ var Search = { htmlElement.innerHTML = htmlString; $(htmlElement).find('.headerlink').remove(); docContent = $(htmlElement).find('[role=main]')[0]; + if(docContent === undefined) { + console.warn("Content block not found. Sphinx search tries to obtain it " + + "via '[role=main]'. Could you check your theme or template."); + return ""; + } return docContent.textContent || docContent.innerText; }, @@ -245,6 +250,8 @@ var Search = { if (results.length) { var item = results.pop(); var listItem = $('
  • '); + var requestUrl = ""; + var linkUrl = ""; if (DOCUMENTATION_OPTIONS.BUILDER === 'dirhtml') { // dirhtml builder var dirname = item[0] + '/'; @@ -253,15 +260,17 @@ var Search = { } else if (dirname == 'index/') { dirname = ''; } - listItem.append($('').attr('href', - DOCUMENTATION_OPTIONS.URL_ROOT + dirname + - highlightstring + item[2]).html(item[1])); + requestUrl = DOCUMENTATION_OPTIONS.URL_ROOT + dirname; + linkUrl = requestUrl; + } else { // normal html builders - listItem.append($('').attr('href', - item[0] + DOCUMENTATION_OPTIONS.FILE_SUFFIX + - highlightstring + item[2]).html(item[1])); + requestUrl = DOCUMENTATION_OPTIONS.URL_ROOT + item[0] + DOCUMENTATION_OPTIONS.FILE_SUFFIX; + linkUrl = item[0] + DOCUMENTATION_OPTIONS.LINK_SUFFIX; } + listItem.append($('').attr('href', + linkUrl + + highlightstring + item[2]).html(item[1])); if (item[3]) { listItem.append($(' (' + item[3] + ')')); Search.output.append(listItem); @@ -269,7 +278,7 @@ var Search = { displayNextItem(); }); } else if (DOCUMENTATION_OPTIONS.HAS_SOURCE) { - $.ajax({url: DOCUMENTATION_OPTIONS.URL_ROOT + item[0] + DOCUMENTATION_OPTIONS.FILE_SUFFIX, + $.ajax({url: requestUrl, dataType: "text", complete: function(jqxhr, textstatus) { var data = jqxhr.responseText; diff --git a/docs/genindex.html b/docs/genindex.html index ec8ff79..99a9189 100644 --- a/docs/genindex.html +++ b/docs/genindex.html @@ -22,10 +22,10 @@ - - - - + + + + @@ -79,15 +79,8 @@ - - - + +
    diff --git a/docs/index.html b/docs/index.html index bc7a199..6432952 100644 --- a/docs/index.html +++ b/docs/index.html @@ -21,10 +21,10 @@ - - - - + + + + @@ -35,7 +35,7 @@ - + @@ -79,15 +79,17 @@ -
      -
    • Converting cells from cellfinder to brainrender format
    • -
    • Heatmap generation
    • -
    • Manual segmentation
    • + + @@ -165,19 +167,11 @@

      Installation:

      Tools

      Developers

      @@ -190,7 +184,7 @@

      Developers - + diff --git a/docs/main/dev/CONTRIBUTING.html b/docs/main/dev/CONTRIBUTING.html index f66a39e..d054ff7 100644 --- a/docs/main/dev/CONTRIBUTING.html +++ b/docs/main/dev/CONTRIBUTING.html @@ -8,7 +8,7 @@ - Contributing — neuro documentation + <no title> — neuro documentation @@ -21,10 +21,10 @@ - - - - + + + + @@ -35,7 +35,7 @@ - + @@ -79,24 +79,10 @@ -
        -
      • Converting cells from cellfinder to brainrender format
      • -
      • Heatmap generation
      • -
      • Manual segmentation
      • + + @@ -140,7 +126,7 @@
      • Docs »
      • -
      • Contributing
      • +
      • <no title>
      • @@ -157,76 +143,69 @@
        -
        -

        Contributing

        -
        -

        Setup

        -

        To set up a development install, please:

        +

        ### Contributing +#### Setup +To set up a development install, please: +* Fork this repository +* Clone your fork

        +
        +

        git clone https://github.com/YOUR_USERNAME/neuro

        +

        cd neuro

        +
          -
        • Fork this repository

        • -
        • Clone your fork

          -

          git clone https://github.com/YOUR_USERNAME/neuro

          -

          cd neuro

          -
        • Add this repository as the upstream

          -

          git remote add upstream https://github.com/SainsburyWellcomeCentre/neuro

          +
          +

          git remote add upstream https://github.com/SainsburyWellcomeCentre/neuro

          +
        • -
        • Install an editable, development version of neuro

          -

          pip install -e .[dev]

          +
        • Install an editable, development version of neuro

          +
          +

          pip install -e .[dev]

          +
        • To keep your fork up to date:

          -

          git fetch upstream

          -

          git merge upstream/master

          +
          +

          git fetch upstream

          +

          git merge upstream/master

          +
        -
        -
        -

        Pull requests

        -

        In all cases, please submit code to the main repository via a pull request. +

        #### Pull requests +In all cases, please submit code to the main repository via a pull request. Upon approval, please merge via “Squash and Merge” on Github to maintain a clean commit history.

        -
        -
        -

        Formatting

        -

        neuro uses Black to ensure a consistent -code style. Please run black ./ -l 79 --target-version py37 before making +

        #### Formatting +neuro uses [Black](https://github.com/python/black) to ensure a consistent +code style. Please run black ./ -l 79 –target-version py37 before making any commits. To prevent any errors, it is easier to add a formatting check -as a pre-commit hook. -E.g. on linux by adding this to your .git/hooks/pre-commit:

        -
        black ./ -l 79 --target-version py37 --check || exit 1
        -
        -
        -
        -
        -

        Testing

        -

        neuro uses pytest for testing. Please -try to ensure that all functions are tested in tests/tests/unit_tests and -all workflows/command-line tools are tested in tests/tests/unit_tests.

        -
        -
        -

        Travis

        -

        All commits & pull requests will be build by Travis. -To ensure there are no issues, ensure that all tests run (pytest) and there -are no formatting issues (black ./ -l 79 --target-version py37 --check) +as a [pre-commit hook](https://www.atlassian.com/git/tutorials/git-hooks). +E.g. on linux by adding this to your .git/hooks/pre-commit:

        +
        +

        black ./ -l 79 –target-version py37 –check || exit 1

        +
        +

        #### Testing +neuro uses [pytest](https://docs.pytest.org/en/latest/) for testing. Please +try to ensure that all functions are tested in tests/tests/unit_tests and +all workflows/command-line tools are tested in tests/tests/unit_tests.

        +

        #### Travis +All commits & pull requests will be build by [Travis](https://travis-ci.com). +To ensure there are no issues, ensure that all tests run (pytest) and there +are no formatting issues (black ./ -l 79 –target-version py37 –check) before pushing changes.

        -
        -
        -

        Releases

        -

        Travis will automatically release any tagged commit on the master branch. +

        #### Releases +Travis will automatically release any tagged commit on the master branch. Hence to release a new version of neuro, use either GitHub, or the git CLI to tag the relevant commit and push to master.

        -
        -
        -

        Dependencies

        -

        neuro is (initially) designed as companion software to -cellfinder and -amap. These -packages may depend on neuro and so the inverse should not be true. Any +

        #### Dependencies +neuro is (initially) designed as companion software to +[cellfinder](https://github.com/SainsburyWellcomeCentre/cellfinder) and

        +
        +

        [amap](https://github.com/SainsburyWellcomeCentre/amap-python). These +packages may depend on neuro and so the inverse should not be true. Any functionality in these packages necessary for neuro to work should be extracted into another package such as -imlib.

        -
        -
        +[imlib](https://github.com/adamltyson/imlib).

        +
        @@ -237,7 +216,7 @@

        Dependencies - +

        diff --git a/docs/main/user_guide/tools/cells_to_brainrender.html b/docs/main/user_guide/tools/cells_to_brainrender.html index 7e97a65..9f5446a 100644 --- a/docs/main/user_guide/tools/cells_to_brainrender.html +++ b/docs/main/user_guide/tools/cells_to_brainrender.html @@ -8,7 +8,7 @@ - Converting cells from cellfinder to brainrender format — neuro documentation + <no title> — neuro documentation @@ -21,10 +21,10 @@ - - - - + + + + @@ -35,7 +35,7 @@ - + @@ -80,24 +80,10 @@ -
          -
        • Converting cells from cellfinder to brainrender format - - + @@ -141,7 +127,7 @@
        • Docs »
        • -
        • Converting cells from cellfinder to brainrender format
        • +
        • <no title>
        • @@ -158,54 +144,40 @@
          -
          -

          Converting cells from cellfinder to brainrender format

          -

          To convert cell positions (e.g. in cells_in_standard_space.xml) to +

          # Converting cells from cellfinder to brainrender format

          +

          To convert cell positions (e.g. in cells_in_standard_space.xml) to a format that can be used in -BrainRender.

          -
          -

          Usage

          -
              points_to_brainrender cells_in_standard_space.xml exported_cells.h5
          -
          -
          -
          -
          -

          Arguments

          -

          Run points_to_brainrender -h to see all options.

          -
          -

          Positional

          -
            -
          • Cellfinder -cells file to be converted

          • -
          • Output filename. Should end with ‘.h5’. If the containing directory doesn’t -exist, it will be created.

          • -
          -
          -
          -

          The following options may also need to be used:

          -
            -
          • -x or --x-pixel-size Pixel spacing of the data that the cells are -defined in, in the first dimension, specified in um. (Default: 10)

          • -
          • -y or --y-pixel-size Pixel spacing of the data that the cells are -defined in, in the second dimension, specified in um. (Default: 10)

          • -
          • -z or --z-pixel-size Pixel spacing of the data that the cells are -defined in, in the third dimension, specified in um. (Default: 10)

          • -
          • --max-z Maximum z extent of the atlas, specified in um. (Default: 13200)

          • -
          • --hdf-key HDF identifying key. If this has changed, it must be specified -in the call to BrainRender.scene.Scene.add_cells_from_file()

          • -
          -
          -
          -
          -

          To visualise this file in BrainRender

          -
          from brainrender.scene import Scene
          -scene = Scene(jupyter=True)
          -scene.add_cells_from_file("exported_cells.h5")
          -scene.render()
          -
          -
          -
          -
          +[BrainRender](https://github.com/BrancoLab/BrainRender).

          +

          ### Usage +```bash

          +
          +

          points_to_brainrender cells_in_standard_space.xml exported_cells.h5

          +
          +

          ```

          +

          ### Arguments +Run points_to_brainrender -h to see all options.

          +

          #### Positional +* [Cellfinder](https://github.com/SainsburyWellcomeCentre/cellfinder) +cells file to be converted +* Output filename. Should end with ‘.h5’. If the containing directory doesn’t +exist, it will be created.

          +

          #### The following options may also need to be used: +* -x or –x-pixel-size Pixel spacing of the data that the cells are +defined in, in the first dimension, specified in um. (Default: 10) +* -y or –y-pixel-size Pixel spacing of the data that the cells are +defined in, in the second dimension, specified in um. (Default: 10) +* -z or –z-pixel-size Pixel spacing of the data that the cells are +defined in, in the third dimension, specified in um. (Default: 10) +* –max-z Maximum z extent of the atlas, specified in um. (Default: 13200) +* –hdf-key HDF identifying key. If this has changed, it must be specified +in the call to BrainRender.scene.Scene.add_cells_from_file()

          +

          ### To visualise this file in BrainRender +`python +from brainrender.scene import Scene +scene = Scene(jupyter=True) +scene.add_cells_from_file("exported_cells.h5") +scene.render() +`

          @@ -215,7 +187,7 @@

          To visualise this file in BrainRender - + diff --git a/docs/main/user_guide/tools/heatmap.html b/docs/main/user_guide/tools/heatmap.html index acfd0a9..3820e15 100644 --- a/docs/main/user_guide/tools/heatmap.html +++ b/docs/main/user_guide/tools/heatmap.html @@ -8,7 +8,7 @@ - Heatmap generation — neuro documentation + <no title> — neuro documentation @@ -21,10 +21,10 @@ - - - - + + + + @@ -35,8 +35,8 @@ - - + + @@ -80,24 +80,10 @@ -

          @@ -141,7 +127,7 @@
        • Docs »
        • -
        • Heatmap generation
        • +
        • <no title>
        • @@ -158,58 +144,45 @@
          -
          -

          Heatmap generation

          +

          # Heatmap generation

          To generate a heatmap of detected cells, which shows cell distributions in a more intuitive way that showing individual cell positions:

          -heatmap

          Overlay on raw data and segentation from -amap added separately

          -
          -

          Usage

          -
              heatmap cell_classification.xml heatmap.nii raw_data registered_atlas.nii -x 2 -y 2 -z 5
          -
          -
          -
          -
          -

          Arguments

          -

          Run heatmap -h to see all options.

          -
          -

          Positional arguments

          -
            -
          • Cellfinder -classified cells file (usually cell_classification.xml)

          • -
          • Output filename. Should end with ‘.nii’. If the containing directory doesn’t -exist, it will be created.

          • -
          • Path to raw data (just a single channel). Used to find the shape of the -raw image that the detected cell positions are defined in.

          • -
          • Registered atlas file from amap -(typically run automatically in -Cellfinder). File -is usually registered_atlas.nii.

          • -
          -
          -
          -

          Keyword arguments

          -
            -
          • -x or --x-pixel-size Pixel spacing of the data that the cells are -defined in, in the first dimension, specified in um.

          • -
          • -y or --y-pixel-size Pixel spacing of the data that the cells are -defined in, in the second dimension, specified in um.

          • -
          • -z or --z-pixel-size Pixel spacing of the data that the cells are -defined in, in the third dimension, specified in um.

          • -
          -
          -
          -

          The following options may also need to be used:

          -
            -
          • --heatmap-bin Heatmap bin size (um of each edge of histogram cube)

          • -
          • --heatmap-smoothing Gaussian smoothing sigma, in um.

          • -
          • --no-mask-figs Don’t mask the figures (removing any areas outside the -brain, from e.g. smoothing)

          • -
          -
          -
          -
          +

          <img src=”https://raw.githubusercontent.com/SainsburyWellcomeCentre/cellfinder/master/resources/heatmap.png” alt=”heatmap” width=”500”/>

          +

          Overlay on raw data and segentation from +[amap](https://github.com/SainsburyWellcomeCentre) added separately

          +

          ### Usage +```bash

          +
          +

          heatmap cell_classification.xml heatmap.nii raw_data registered_atlas.nii -x 2 -y 2 -z 5

          +
          +

          ```

          +

          ### Arguments +Run heatmap -h to see all options.

          +

          #### Positional arguments +* [Cellfinder](https://github.com/SainsburyWellcomeCentre/cellfinder) +classified cells file (usually cell_classification.xml) +* Output filename. Should end with ‘.nii’. If the containing directory doesn’t +exist, it will be created. +* Path to raw data (just a single channel). Used to find the shape of the +raw image that the detected cell positions are defined in. +* Registered atlas file from [amap](https://github.com/SainsburyWellcomeCentre)

          +
          +

          (typically run automatically in +[Cellfinder](https://github.com/SainsburyWellcomeCentre/cellfinder)). File +is usually registered_atlas.nii.

          +
          +

          #### Keyword arguments +* -x or –x-pixel-size Pixel spacing of the data that the cells are +defined in, in the first dimension, specified in um. +* -y or –y-pixel-size Pixel spacing of the data that the cells are +defined in, in the second dimension, specified in um. +* -z or –z-pixel-size Pixel spacing of the data that the cells are +defined in, in the third dimension, specified in um.

          +

          #### The following options may also need to be used: +* –heatmap-bin Heatmap bin size (um of each edge of histogram cube) +* –heatmap-smoothing Gaussian smoothing sigma, in um. +* –no-mask-figs Don’t mask the figures (removing any areas outside the +brain, from e.g. smoothing)

          @@ -219,10 +192,10 @@

          The following options may also need to be used: - + - +

          diff --git a/docs/main/user_guide/tools/manual_segmentation.html b/docs/main/user_guide/tools/manual_segmentation.html index 174fba6..68f9b1d 100644 --- a/docs/main/user_guide/tools/manual_segmentation.html +++ b/docs/main/user_guide/tools/manual_segmentation.html @@ -8,7 +8,7 @@ - Manual segmentation — neuro documentation + <no title> — neuro documentation @@ -21,10 +21,10 @@ - - - - + + + + @@ -35,8 +35,8 @@ - - + + @@ -80,29 +80,10 @@ -
            -
          • Converting cells from cellfinder to brainrender format
          • -
          • Heatmap generation
          • -
          • Manual segmentation - - + @@ -146,7 +127,7 @@
          • Docs »
          • -
          • Manual segmentation
          • +
          • <no title>
          • @@ -163,92 +144,82 @@
            -
            -

            Manual segmentation

            +

            # Manual segmentation

            To manually segment brain regions in standard space that can then be visualised along with other samples (e.g. in -BrainRender.)

            +[BrainRender](https://github.com/BrancoLab/BrainRender).)

            N.B. This tool depends (for now) on -amap. Please -run pip install amap and then amap_download before running this tool if +[amap](https://github.com/SainsburyWellcomeCentre/amap). Please +run `pip install amap` and then `amap_download` before running this tool if you don’t already have cellfinder installed

            -
            -

            Prerequisites

            -

            Data must be registered to a standard atlas (currently only the -Allen Reference Atlas is supported) using -amap (or the amap +

            ### Prerequisites +Data must be registered to a standard atlas (currently only the +[Allen Reference Atlas](http://mouse.brain-map.org/) is supported) using +[amap](https://github.com/SainsburyWellcomeCentre/amap-python) (or the amap based registration in -cellfinder). Please +[cellfinder](https://github.com/SainsburyWellcomeCentre/cellfinder)). Please follow the instructions for these packages, and ensure that the channel that -you want to segment is downsampled (e.g. using the --downsample flag in +you want to segment is downsampled (e.g. using the –downsample flag in amap).

            -
            -
            -

            Usage

            -
            -

            Command line

            -
                manual_region_seg "name_of_downsampled_image.nii" registration_directory
            -
            -
            -
            -

            Arguments

            -

            Run manual_region_seg -h to see all options.

            -
            -
            Positional arguments
            -
              -
            • Downsampled image to be segmented, as a string (e.g. "downsampled.nii")

            • -
            • amap/cellfinder registration directory (e.g. -/home/analysis/cellfinder_output/registration/)

            • +

              ## Usage +### Command line +```bash

              +
              +

              manual_region_seg “name_of_downsampled_image.nii” registration_directory

              +
              +

              ```

              +

              #### Arguments +Run manual_region_seg -h to see all options.

              +

              ##### Positional arguments +* Downsampled image to be segmented, as a string (e.g. “downsampled.nii”) +* amap/cellfinder registration directory (e.g. +/home/analysis/cellfinder_output/registration/)

              +

              ##### The following options may also need to be used: +* –preview Preview the segmented regions in brainrender (default:False) +* –debug Debug mode. Will increase verbosity of logging and save all +intermediate files for diagnosis of software issues. (default: False)

              +

              ### napari GUI +manual_region_seg will transform your image into standard space (this may +take a few minutes) and then display the image in a +[napari](https://github.com/napari/napari) viewer:

              +

              <img src=”https://raw.githubusercontent.com/SainsburyWellcomeCentre/neuro/master/resources/manual_segmentation_window.png” alt=”manual_seg_window” width=”700”/>

              +

              ##### To segment regions: +* Ensure that the “new_region” tab is selected (left hand side) +* Rename this region (by selecting the “new_region” text) +* Navigate to where you want to draw your region of interest.

              +
              +
                +
              • Use the scroll bar at the bottom (or left/right keys) to navigate

              -
              -
              -
              The following options may also need to be used:
              +

              through the image stack +* Use the mouse scrollwheel to zoom in or out +* Drag with the mouse the pan the view

              +
                -
              • --save-image Store the resulting segmented region image (e.g. for -inspecting in 2D. (default: False)

              • -
              • --preview Preview the segmented regions in brainrender (default:False)

              • -
              • --debug Debug mode. Will increase verbosity of logging and save all -intermediate files for diagnosis of software issues. (default: False)

              • +
              • Choose a brush size (top left box)

              • +
              • Activate painting mode (by selecting the paintbrush, top left). You can

              -
            -
            -
            -
            -

            napari GUI

            -

            manual_region_seg will transform your image into standard space (this may -take a few minutes) and then display the image in a -napari viewer:

            -manual_seg_window
            -

            To segment regions:

            +

            go back to the navigation mode by selecting the magnifying glass. +* Colour in your region that you want to segment, ensuring that you make a +solid object. +* Selecting the ndim toggle in the top left will extend the brush size in +three dimensions (so it will colour in multiple layers).

              -
            • Ensure that the “Regions” tab is selected (left hand side)

            • -
            • Navigate to where you want to draw your region of interest.

              -
                -
              • Use the scroll bar at the bottom (or left/right keys) to navigate -through the image stack

              • -
              • Use the mouse scrollwheel to zoom in or out

              • -
              • Drag with the mouse the pan the view

              • -
              -
            • -
            • Select a label ID (by pressing the + button in the label row, top-left), -the ID is not important, but 0 refers to no label, so you may as well start -from 1.

            • -
            • Choose a brush size (also in top left box)

            • -
            • Activate painting mode (by selecting the paintbrush, top left). You can -go back to the navigation mode by selecting the magnifying glass.

            • -
            • Colour in your region that you want to segment, ensuring that you make a -solid object.

            • -
            • Selecting the ndim toggle in the top left will extend the brush size in -three dimensions (so it will colour in multiple layers).

            • +
            • To add a new region press Control+N

            • Repeat above for each region you wish to segment.

            • -
            • Press Control+S on your keyboard to save the regions. If you used the ---preview flag, once they are saved, they will be displayed in a brainrender -window.

            • +
            • Press Control+S on your keyboard to save the regions. If you used the

            -
            -
            -
            -
            +
            +

            –preview flag, once they are saved, they will be displayed in a brainrender +window.

            +
            +

            ##### Editing regions: +If you have already run manual_region_seg, and run it again, the segmented +regions will be shown. You can edit them, and press Control+S to resave them. +If you don’t want to save any changes, press Control+X to exit. The regions

            +
            +

            will still be previewed if you have selected that option.

            +
            @@ -258,10 +229,10 @@

            To segment regions: - + - +

            diff --git a/docs/objects.inv b/docs/objects.inv index 08e62ba96d03afe754775448eea1393439f3dd4f..ee718729c4acae5393d826ba8d4600c5001312d3 100644 GIT binary patch delta 219 zcmV<103`q90>=W7bbpaSi^4Dvh41|p`2$+KF5+SLAU&*#y5}-BLmHT5B$mwQ zRxGr)Vcz@R8yJJ$dX0A#MSl=nkkn$OOut>W;z(7`vG})XZl0!r!($3uIIzCdcq_m5 zyYsR7{5e;WIdn7ZA&Sx?%)+uVX#($?IeBCuHKAbt3G4I#4ID!$aEc(ib`+ Vgr*jC$S9# zk*eIRoi{TpYc+V|6%5oz}cJg$4p91C;o{MYxZy?DA{grqpeU!EhpdENl y;WbL$z`Ur;p8-P^_q;g>n)BB=zHON1kQ)J?nd^q51?7P~s81g?r5a diff --git a/docs/search.html b/docs/search.html index ac61305..67a6106 100644 --- a/docs/search.html +++ b/docs/search.html @@ -21,11 +21,11 @@ - - - - - + + + + + @@ -79,15 +79,8 @@ - - - + +
            diff --git a/docs/searchindex.js b/docs/searchindex.js index 75e087c..417047d 100644 --- a/docs/searchindex.js +++ b/docs/searchindex.js @@ -1 +1 @@ -Search.setIndex({docnames:["index","main/dev/CONTRIBUTING","main/user_guide/tools/cells_to_brainrender","main/user_guide/tools/heatmap","main/user_guide/tools/manual_segmentation"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["index.rst","main/dev/CONTRIBUTING.md","main/user_guide/tools/cells_to_brainrender.md","main/user_guide/tools/heatmap.md","main/user_guide/tools/manual_segmentation.md"],objects:{},objnames:{},objtypes:{},terms:{"case":1,"default":[2,4],"function":1,"import":[2,4],"new":1,"true":[1,2],"try":1,These:1,Use:4,Used:3,Will:4,abov:4,activ:4,add:1,add_cells_from_fil:2,added:3,adding:1,all:[1,2,3,4],allen:4,along:4,alreadi:4,amap:[0,1,3,4],amap_download:4,analysi:[0,4],ani:[1,3],anoth:1,approv:1,area:3,atla:[2,3,4],automat:[1,3],back:4,bar:4,base:4,befor:[1,4],bin:3,black:1,bottom:4,boundari:[],box:4,brain:[0,3,4],brainrend:[0,4],branch:1,brush:4,build:1,button:4,call:2,can:[2,4],cell:[0,3],cell_classif:3,cellfind:[0,1,3,4],cellfinder_output:4,cells_in_standard_spac:2,centr:0,chang:[1,2],channel:[3,4],check:1,choos:4,classifi:3,clean:1,cli:1,clone:1,code:1,collect:0,colour:4,com:1,command:1,commit:1,companion:1,consist:1,contain:[2,3],contribut:0,control:4,convert:0,creat:[2,3],cube:3,current:4,data:[0,2,3,4],date:1,debug:4,defin:[2,3],depend:4,design:1,detect:3,dev:1,develop:1,diagnosi:4,dimens:[2,3,4],directori:[2,3,4],displai:4,distribut:3,doesn:[2,3],don:[3,4],downsampl:4,drag:4,draw:4,each:[3,4],easier:1,edg:3,edit:1,either:1,end:[2,3],ensur:[1,4],error:1,exist:[2,3],exit:1,exported_cel:2,extend:4,extent:2,extract:1,fals:4,fetch:1,few:4,fig:3,figur:3,file:[3,4],filenam:[2,3],find:3,first:[2,3],flag:4,fork:1,format:0,from:[0,3,4],gaussian:3,gener:0,git:1,github:1,glass:4,hand:4,has:2,have:4,hdf:2,heatmap:0,henc:1,histogram:3,histori:1,home:4,hook:1,http:1,identifi:2,imag:[0,3,4],imlib:1,increas:4,individu:3,initi:1,inspect:4,instal:[1,4],instruct:4,interest:4,intermedi:4,intuit:3,invers:1,issu:[1,4],jupyt:2,just:3,keep:1,kei:[2,4],keyboard:4,label:4,layer:4,left:4,line:1,linux:1,log:4,magnifi:4,mai:1,main:1,maintain:1,make:[1,4],manual:0,manual_region_seg:4,mask:3,master:1,max:2,maximum:2,merg:1,minut:4,mode:4,more:3,mous:4,multipl:4,must:[2,4],name_of_downsampled_imag:4,navig:4,ndim:4,necessari:1,neuro:1,nii:[3,4],now:4,object:4,onc:4,onli:4,other:4,out:4,output:[2,3],outsid:3,overlai:3,packag:[1,4],paint:4,paintbrush:4,pan:4,particularli:0,path:3,pip:[0,1,4],pixel:[2,3],pleas:[1,4],points_to_brainrend:2,pre:1,press:4,prevent:1,preview:4,process:0,push:1,py37:1,pytest:1,raw:3,raw_data:3,refer:4,region:[],regist:[3,4],registered_atla:3,registr:4,registration_directori:4,relev:1,remot:1,remov:3,render:2,repeat:4,repositori:1,result:4,right:4,row:4,run:[1,2,3,4],sainsburi:0,sainsburywellcomecentr:1,sampl:4,save:4,scene:2,scroll:4,scrollwheel:4,second:[2,3],see:[2,3,4],seg:4,segent:3,segment:0,select:4,separ:3,set:1,shape:3,should:[1,2,3],show:3,side:4,sigma:3,singl:3,size:[2,3,4],smooth:3,softwar:[1,4],solid:4,space:[2,3,4],specifi:[2,3],squash:1,stack:4,standard:4,start:4,store:4,string:4,style:1,submit:1,support:4,tab:4,tag:1,take:4,target:1,thei:4,thi:[1,4],third:[2,3],three:4,through:4,toggl:4,tool:[1,4],top:4,transform:4,typic:3,unit_test:1,upon:1,upstream:1,use:1,uses:1,using:4,usual:3,verbos:4,version:1,via:1,view:4,viewer:4,visualis:4,wai:3,want:4,well:4,wellcom:0,where:4,which:3,whole:0,window:4,wish:4,work:1,workflow:1,xml:[2,3],you:4,your:[1,4],your_usernam:1,zoom:4},titles:["neuro","Contributing","Converting cells from cellfinder to brainrender format","Heatmap generation","Manual segmentation"],titleterms:{The:[2,3,4],also:[2,3,4],argument:[2,3,4],brainrend:2,cell:2,cellfind:2,command:4,contribut:1,convert:2,depend:1,develop:0,file:2,follow:[2,3,4],format:[1,2],from:2,gener:3,gui:4,heatmap:3,instal:0,introduct:0,keyword:3,line:4,mai:[2,3,4],manual:4,napari:4,need:[2,3,4],neuro:0,option:[2,3,4],posit:[2,3,4],prerequisit:4,pull:1,region:4,releas:1,request:1,segment:4,setup:1,test:1,thi:2,tool:0,travi:1,usag:[2,3,4],used:[2,3,4],visualis:2}}) \ No newline at end of file +Search.setIndex({docnames:["index","main/dev/CONTRIBUTING","main/user_guide/tools/cells_to_brainrender","main/user_guide/tools/heatmap","main/user_guide/tools/manual_segmentation"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":2,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":2,"sphinx.domains.rst":2,"sphinx.domains.std":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["index.rst","main/dev/CONTRIBUTING.md","main/user_guide/tools/cells_to_brainrender.md","main/user_guide/tools/heatmap.md","main/user_guide/tools/manual_segmentation.md"],objects:{},objnames:{},objtypes:{},terms:{"case":1,"default":[2,4],"function":1,"import":2,"new":[1,4],"true":[1,2],"try":1,The:[2,3,4],These:1,Use:4,Used:3,Will:4,abov:4,activ:4,adamltyson:1,add:[1,4],add_cells_from_fil:2,added:3,adding:1,again:4,all:[1,2,3,4],allen:4,along:4,alreadi:4,also:[2,3,4],alt:[3,4],amap:[0,1,3,4],amap_download:4,analysi:[0,4],ani:[1,3,4],anoth:1,approv:1,area:3,argument:[2,3,4],atla:[2,3,4],atlassian:1,automat:[1,3],back:4,bar:4,base:4,bash:[2,3,4],befor:[1,4],bin:3,black:1,bottom:4,box:4,brain:[0,3,4],brainrend:[2,4],branch:1,brancolab:[2,4],brush:4,build:1,button:[],call:2,can:[2,4],cell:[2,3],cell_classif:3,cellfind:[0,1,2,3,4],cellfinder_output:4,cells_in_standard_spac:2,centr:0,chang:[1,2,4],channel:[3,4],check:1,choos:4,classifi:3,clean:1,cli:1,clone:1,code:1,collect:0,colour:4,com:[1,2,3,4],command:[1,4],commit:1,companion:1,consist:1,contain:[2,3],contribut:1,control:4,convert:2,creat:[2,3],cube:3,current:4,data:[0,2,3,4],date:1,debug:4,defin:[2,3],depend:[1,4],design:1,detect:3,dev:1,develop:1,diagnosi:4,dimens:[2,3,4],directori:[2,3,4],displai:4,distribut:3,doc:1,doesn:[2,3],don:[3,4],downsampl:4,drag:4,draw:4,each:[3,4],easier:1,edg:3,edit:[1,4],either:1,end:[2,3],ensur:[1,4],error:1,exist:[2,3],exit:[1,4],exported_cel:2,extend:4,extent:2,extract:1,fals:4,fetch:1,few:4,fig:3,figur:3,file:[2,3,4],filenam:[2,3],find:3,first:[2,3],flag:4,follow:[2,3,4],fork:1,format:[1,2],from:[0,2,3],gaussian:3,gener:3,git:1,github:[1,2,3,4],githubusercont:[3,4],glass:4,gui:4,hand:4,has:2,have:4,hdf:2,heatmap:3,henc:1,histogram:3,histori:1,home:4,hook:1,http:[1,2,3,4],identifi:2,imag:[0,3,4],img:[3,4],imlib:1,increas:4,individu:3,initi:1,inspect:[],instal:[1,4],instruct:4,interest:4,intermedi:4,intuit:3,invers:1,issu:[1,4],jupyt:2,just:3,keep:1,kei:[2,4],keyboard:4,keyword:3,label:[],latest:1,layer:4,left:4,line:[1,4],linux:1,log:4,magnifi:4,mai:[1,2,3,4],main:1,maintain:1,make:[1,4],manual:4,manual_region_seg:4,manual_seg_window:4,manual_segmentation_window:4,map:4,mask:3,master:[1,3,4],max:2,maximum:2,merg:1,minut:4,mode:4,more:3,mous:4,multipl:4,must:[2,4],name_of_downsampled_imag:4,napari:4,navig:4,ndim:4,necessari:1,need:[2,3,4],neuro:[1,4],new_region:4,nii:[3,4],now:4,object:4,onc:4,onli:4,option:[2,3,4],org:[1,4],other:4,out:4,output:[2,3],outsid:3,overlai:3,packag:[1,4],paint:4,paintbrush:4,pan:4,particularli:0,path:3,pip:[0,1,4],pixel:[2,3],pleas:[1,4],png:[3,4],points_to_brainrend:2,posit:[2,3,4],pre:1,prerequisit:4,press:4,prevent:1,preview:4,process:0,pull:1,push:1,py37:1,pytest:1,python:[1,2,4],raw:[3,4],raw_data:3,refer:4,region:4,regist:[3,4],registered_atla:3,registr:4,registration_directori:4,releas:1,relev:1,remot:1,remov:3,renam:4,render:2,repeat:4,repositori:1,request:1,resav:4,resourc:[3,4],result:[],right:4,row:[],run:[1,2,3,4],sainsburi:0,sainsburywellcomecentr:[1,2,3,4],sampl:4,save:4,scene:2,scroll:4,scrollwheel:4,second:[2,3],see:[2,3,4],segent:3,segment:4,select:4,separ:3,set:1,setup:1,shape:3,should:[1,2,3],show:3,shown:4,side:4,sigma:3,singl:3,size:[2,3,4],smooth:3,softwar:[1,4],solid:4,space:[2,3,4],specifi:[2,3],squash:1,src:[3,4],stack:4,standard:4,start:[],still:4,store:[],string:4,style:1,submit:1,support:4,tab:4,tag:1,take:4,target:1,test:1,text:4,thei:4,them:4,thi:[1,2,4],third:[2,3],three:4,through:4,toggl:4,tool:[1,4],top:4,transform:4,travi:1,tutori:1,typic:3,unit_test:1,upon:1,upstream:1,usag:[2,3,4],use:1,used:[2,3,4],uses:1,using:4,usual:3,verbos:4,version:1,via:1,view:4,viewer:4,visualis:[2,4],wai:3,want:4,well:[],wellcom:0,where:4,which:3,whole:0,width:[3,4],window:4,wish:4,work:1,workflow:1,www:1,xml:[2,3],you:4,your:[1,4],your_usernam:1,zoom:4},titles:["neuro","<no title>","<no title>","<no title>","<no title>"],titleterms:{develop:0,instal:0,introduct:0,neuro:0,tool:0}}) \ No newline at end of file diff --git a/neuro/brain_render_tools.py b/neuro/brain_render_tools.py index 0d4cd4a..1c45399 100644 --- a/neuro/brain_render_tools.py +++ b/neuro/brain_render_tools.py @@ -58,17 +58,18 @@ def volume_to_vector_array_to_obj_file( # if deal_with_regions_separately: for label_id in np.unique(oriented_binary): - filename = append_to_pathlib_stem( - Path(output_path), "_" + str(label_id) - ) - image = oriented_binary == label_id - extract_and_save_object( - image, - filename, - voxel_size=voxel_size, - threshold=threshold, - step_size=step_size, - ) + if label_id != 0: + filename = append_to_pathlib_stem( + Path(output_path), "_" + str(label_id) + ) + image = oriented_binary == label_id + extract_and_save_object( + image, + filename, + voxel_size=voxel_size, + threshold=threshold, + step_size=step_size, + ) else: extract_and_save_object( oriented_binary, diff --git a/neuro/segmentation/manual_region_segmentation/segment.py b/neuro/segmentation/manual_region_segmentation/segment.py index f5eba8a..05067d9 100644 --- a/neuro/segmentation/manual_region_segmentation/segment.py +++ b/neuro/segmentation/manual_region_segmentation/segment.py @@ -7,7 +7,11 @@ from PySide2.QtWidgets import QApplication from brainrender.scene import Scene -from imlib.general.system import delete_temp, ensure_directory_exists +from imlib.general.system import ( + delete_temp, + ensure_directory_exists, + delete_directory_contents, +) from imlib.plotting.colors import get_random_vtkplotter_color @@ -18,6 +22,9 @@ from neuro.visualise.vis_tools import display_channel, prepare_load_nii from neuro.brain_render_tools import volume_to_vector_array_to_obj_file +num_colors = 10 +brush_size = 30 + class Paths: """ @@ -32,12 +39,6 @@ def __init__(self, registration_output_folder, downsampled_image): self.regions_directory = self.join("segmented_regions") - self.regions_object_file_basename = ( - self.regions_directory / "region.obj" - ) - - self.regions_image_file = self.regions_directory / "regions.nii" - self.tmp__inverse_transformed_image = self.join( "image_standard_space.nii" ) @@ -48,21 +49,12 @@ def __init__(self, registration_output_folder, downsampled_image): "inverse_transform_error.txt" ) - self.prep() - def join(self, filename): return self.registration_output_folder / filename - def prep(self): - ensure_directory_exists(self.regions_directory) - def run( - image, - registration_directory, - save_segmented_image=False, - preview=False, - debug=False, + image, registration_directory, preview=False, debug=False, ): paths = Paths(registration_directory, image) registration_directory = Path(registration_directory) @@ -81,7 +73,7 @@ def run( registered_image = prepare_load_nii( paths.tmp__inverse_transformed_image, memory=False ) - labels = np.empty_like(registered_image) + print("\nLoading manual segmentation GUI.\n ") print( "Please 'colour in' the regions you would like to segment. \n " @@ -97,26 +89,55 @@ def run( registration_directory, paths.tmp__inverse_transformed_image, ) - labels_layer = viewer.add_labels(labels, num_colors=20, name="Regions") - @viewer.bind_key("Control-S") + global label_layers + label_layers = [] + + if paths.regions_directory.exists(): + label_files = glob(str(paths.regions_directory) + "/*.nii") + label_layers = [] + for label_file in label_files: + label_file = Path(label_file) + labels = prepare_load_nii(label_file) + label_layer = viewer.add_labels( + labels, num_colors=num_colors, name=label_file.stem, + ) + label_layer.selected_label = 1 + label_layer.brush_size = brush_size + label_layers.append(label_layer) + else: + add_new_label_layer(viewer, registered_image, name="region") + + @viewer.bind_key("Control-N") def add_region(viewer): + print("\nAdding new region") + add_new_label_layer(viewer, registered_image, name="new_region") + + @viewer.bind_key("Control-X") + def close_viewer(viewer): + print("\nClosing viewer") + QApplication.closeAllWindows() + + @viewer.bind_key("Control-S") + def save_regions(viewer): print(f"\nSaving regions to: {paths.regions_directory}") # return image back to original orientation (reoriented for napari) - data = np.swapaxes(labels_layer.data, 2, 0) - - volume_to_vector_array_to_obj_file( - data, - paths.regions_object_file_basename, - deal_with_regions_separately=True, - ) - if save_segmented_image: + ensure_directory_exists(paths.regions_directory) + delete_directory_contents(str(paths.regions_directory)) + + for label_layer in label_layers: + data = np.swapaxes(label_layer.data, 2, 0) + name = label_layer.name + filename = paths.regions_directory / (name + ".obj") + volume_to_vector_array_to_obj_file( + data, filename, + ) + filename = paths.regions_directory / (name + ".nii") save_brain( - data, paths.downsampled_image, paths.regions_image_file, + data, paths.downsampled_image, filename, ) - print("\nClosing viewer") - QApplication.closeAllWindows() + close_viewer(viewer) if not debug: print("Deleting tempory files") @@ -131,9 +152,18 @@ def add_region(viewer): act = scene.add_from_file( obj_file, c=get_random_vtkplotter_color(), alpha=0.8 ) + act.GetProperty().SetInterpolationToFlat() scene.render() +def add_new_label_layer(viewer, base_image, name="region"): + labels = np.empty_like(base_image) + label_layer = viewer.add_labels(labels, num_colors=num_colors, name=name,) + label_layer.selected_label = 1 + label_layer.brush_size = brush_size + label_layers.append(label_layer) + + def get_parser(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter @@ -148,13 +178,6 @@ def get_parser(): type=str, help="amap/cellfinder registration output directory", ) - parser.add_argument( - "--save-image", - dest="save_image", - action="store_true", - help="Store the resulting segmented region image (e.g. for inspecting " - "in 2D.", - ) parser.add_argument( "--preview", dest="preview", @@ -176,7 +199,6 @@ def main(): run( args.image, args.registration_directory, - save_segmented_image=args.save_image, preview=args.preview, debug=args.debug, ) diff --git a/setup.py b/setup.py index 984c83c..a8e9f16 100644 --- a/setup.py +++ b/setup.py @@ -6,14 +6,14 @@ "pandas<=0.25.3,>=0.25.1", "napari", "brainrender", - "imlib >= 0.0.18", + "imlib >= 0.0.20", "brainio", ] setup( name="neuro", - version="0.0.8rc1", + version="0.0.8", description="Visualisation and analysis of brain imaging data", install_requires=requirements, extras_require={