Skip to content

Commit

Permalink
Merge branch 'master' into patch-1
Browse files Browse the repository at this point in the history
  • Loading branch information
constantinpape authored Oct 17, 2024
2 parents 2853952 + 7ff36af commit 2d8e5cc
Show file tree
Hide file tree
Showing 9 changed files with 143 additions and 61 deletions.
24 changes: 8 additions & 16 deletions .github/workflows/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,32 +15,24 @@ jobs:
fail-fast: false
matrix:
os: [macos-latest, windows-latest, ubuntu-latest]
python-version: [3.7, 3.8, 3.9]
python-version: [3.10, 3.11]

steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v4

- name: Setup micromamba
uses: mamba-org/setup-micromamba@v1
with:
environment-file: .github/workflows/environment.yaml
python-version: ${{ matrix.python-version }}

# this will set the system compiler;
# I don't know how to set the conda compilers for windows
- name: Set windows env
if: matrix.os == 'windows-latest'
uses: ilammy/msvc-dev-cmd@v1

- name: Setup miniconda
uses: conda-incubator/setup-miniconda@v2
with:
activate-environment: nifty-build-env
auto-update-conda: true
channels: conda-forge
environment-file: .github/workflows/environment.yaml
python-version: ${{ matrix.python-version }}
use-mamba: true
mamba-version: "*"
auto-activate-base: false
env:
ACTIONS_ALLOW_UNSECURE_COMMANDS: true

- name: Build linux
if: matrix.os == 'ubuntu-latest'
shell: bash -l {0}
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/environment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ dependencies:
- h5py
- nlohmann_json
- scikit-image
- xtensor>=0.21,<0.22
- xtensor-python>=0.24,<0.25
- xtensor
- xtensor-python
- vigra
- z5py
Original file line number Diff line number Diff line change
Expand Up @@ -40,60 +40,60 @@ void accumulateAffninitiesWithAccChain(const RAG & rag,
auto nThreads = threadpool.nThreads();
ThreadAccChainVectorType edgeAccumulators(nThreads);

vigra::HistogramOptions histogram_opt;
histogram_opt = histogram_opt.setMinMax(accOptions.minVal, accOptions.maxVal);

parallel::parallel_foreach(threadpool, nThreads,
[&](int tid, int threadId){
auto & thisAccumulators = edgeAccumulators[threadId];
thisAccumulators = AccChainVectorType(nEdges);
// set the histogram options
if(accOptions.setMinMax){
// set the histogram options
if(accOptions.setMinMax){
vigra::HistogramOptions histogram_opt;
histogram_opt = histogram_opt.setMinMax(accOptions.minVal, accOptions.maxVal);
parallel::parallel_foreach(threadpool, nThreads, [&](int tid, int threadId){
auto & thisAccumulators = edgeAccumulators[threadId];
thisAccumulators = AccChainVectorType(nEdges);
for(std::size_t edgeId; edgeId < nEdges; ++edgeId) {
thisAccumulators[edgeId].setHistogramOptions(histogram_opt);
}
}
});
});
}

int pass = 1;
const auto passesRequired = edgeAccumulators.front().front().passesRequired();

// iterate over all affinity links and accumulate the associated
// affinity edges
tools::parallelForEachCoordinate(threadpool, affShape, [&](int tid, const Coord4 & affCoord) {
for(int pass = 1; pass <= passesRequired; ++pass) {
// iterate over all affinity links and accumulate the associated
// affinity edges
tools::parallelForEachCoordinate(threadpool, affShape, [&](int tid, const Coord4 & affCoord) {

Coord3 cU, cV;
VigraCoord vc;
const auto & offset = offsets[affCoord[0]];
Coord3 cU, cV;
VigraCoord vc;
const auto & offset = offsets[affCoord[0]];

for(int d = 0; d < DIM; ++d) {
cU[d] = affCoord[d+1];
cV[d] = affCoord[d+1] + offset[d];
// range check
if(cV[d] < 0 || cV[d] >= shape[d]) {
return;
for(int d = 0; d < DIM; ++d) {
cU[d] = affCoord[d+1];
cV[d] = affCoord[d+1] + offset[d];
// range check
if(cV[d] < 0 || cV[d] >= shape[d]) {
return;
}
}
}

const auto u = xtensor::read(labels, cU.asStdArray());
const auto v = xtensor::read(labels, cV.asStdArray());
const auto u = xtensor::read(labels, cU.asStdArray());
const auto v = xtensor::read(labels, cV.asStdArray());

// only do stuff if the labels are different
if(u != v) {
// only do stuff if the labels are different
if(u != v) {

auto & thisAccumulators = edgeAccumulators[tid];
// we just update the vigra coord of label u
for(int d = 0; d < DIM; ++d) {
vc = cU[d];
}
auto & thisAccumulators = edgeAccumulators[tid];
// we just update the vigra coord of label u
for(int d = 0; d < DIM; ++d) {
vc = cU[d];
}

const double val = xtensor::read(affinities, affCoord.asStdArray());
const int64_t e = rag.findEdge(u, v);
// For long range affinities, edge might not be in the rag
if(e != -1) {
thisAccumulators[e].updatePassN(val, vc, pass);
const double val = xtensor::read(affinities, affCoord.asStdArray());
const int64_t e = rag.findEdge(u, v);
// For long range affinities, edge might not be in the rag
if(e != -1) {
thisAccumulators[e].updatePassN(val, vc, pass);
}
}
}
});
});
}

// merge accumulators
auto & resultAccVec = edgeAccumulators.front();
Expand Down
2 changes: 0 additions & 2 deletions include/nifty/graph/rag/grid_rag_accumulate.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,6 @@ namespace graph{
});

if(accOptions.setMinMax){
vigra::HistogramOptions histogram_opt;
histogram_opt = histogram_opt.setMinMax(accOptions.minVal, accOptions.maxVal);
parallel::parallel_foreach(threadpool, actualNumberOfThreads,
[&](int tid, int i){

Expand Down
37 changes: 37 additions & 0 deletions include/nifty/tools/rle.hxx
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
#pragma once
#include <vector>

#include "nifty/xtensor/xtensor.hxx"


namespace nifty{
namespace tools{


template<class ARRAY>
void computeRLE(xt::xexpression<ARRAY> & dataExp, std::vector<int> & counts){
typedef typename ARRAY::value_type DataType;
auto & data = dataExp.derived_cast();

auto val = data[0];
if(val == 1) {
counts.push_back(0);
}

int count = 0;
for(const auto & m : data) {
if(val == m) {
++count;
} else {
val = m;
counts.push_back(count);
count = 1;
}
}
counts.push_back(count);

}


}
}
1 change: 1 addition & 0 deletions src/python/lib/tools/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ addPythonModule(
map_dict.cxx
merge_helper.cxx
label_multiset.cxx
rle.cxx
LIBRRARIES
${HDF5_LIBRARIES}
${Z5_COMPRESSION_LIBRARIES}
Expand Down
40 changes: 40 additions & 0 deletions src/python/lib/tools/rle.cxx
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
#include <pybind11/pybind11.h>
#include <iostream>
#include <sstream>
#include <pybind11/numpy.h>
#include <pybind11/stl.h>

#include <typeinfo> // to debug atm

#include "xtensor-python/pyarray.hpp"
#include "nifty/tools/rle.hxx"

namespace py = pybind11;



namespace nifty{
namespace tools{

template<class T>
void exportComputeRLET(py::module & toolsModule) {

toolsModule.def("computeRLE",
[](xt::pyarray<T> & dataIn){

std::vector<int> counts;
{
py::gil_scoped_release allowThreads;
tools::computeRLE(dataIn, counts);
}
return counts;
});
}


void exportComputeRLE(py::module & toolsModule) {
exportComputeRLET<bool>(toolsModule);
}

}
}
4 changes: 4 additions & 0 deletions src/python/lib/tools/tools.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@ namespace tools{
void exportMapDictionaryToArray(py::module &);
void exportMergeHelper(py::module &);
void exportLabelMultiset(py::module &);
void exportComputeRLE(py::module &);

}
}

Expand All @@ -49,4 +51,6 @@ PYBIND11_MODULE(_tools, toolsModule) {
exportMapDictionaryToArray(toolsModule);
exportMergeHelper(toolsModule);
exportLabelMultiset(toolsModule);
exportComputeRLE(toolsModule);

}
10 changes: 10 additions & 0 deletions src/python/test/graph/rag/test_accumulate.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import platform
import unittest

import numpy as np
Expand All @@ -23,6 +24,15 @@ def test_accumulate_mean_and_length_3d(self):
res = nrag.accumulateEdgeMeanAndLength(rag, data)
self.assertTrue(np.sum(res) != 0)

@unittest.skipIf(platform.system() in ("Darwin", "Windows"), "Test fails on Mac and Windows")
def test_accumulate_affinities(self):
labels = np.random.randint(0, 100, size=self.shape_2d, dtype='uint32')
rag = nrag.gridRag(labels, numberOfLabels=100)
offsets = [[-1, 0], [0, -1], [-3, 0], [0, -3]]
aff_shape = (len(offsets),) + labels.shape
data = np.random.random_sample(aff_shape).astype('float32')
res = nrag.accumulateAffinityStandartFeatures(rag, data, offsets, 0.0, 1.0)
self.assertTrue(np.sum(res) != 0)


if __name__ == '__main__':
Expand Down

0 comments on commit 2d8e5cc

Please sign in to comment.