Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

V0.2.0 #13

Merged
merged 10 commits into from
Apr 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,17 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [ubuntu-22.04, windows-2022]
os: [ubuntu-22.04, windows-2022, macos-14]
python-version: ['3.9', '3.10', '3.11', '3.12']
exclude:
- os: macos-14
python-version: '3.9' #macos-14 & py39 not supported by setup-python@v5
steps:
- uses: actions/checkout@v4
with:
submodules: True
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/wheels.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-22.04, windows-2022, macos-12]
os: [ubuntu-22.04, windows-2022, macos-14]
fail-fast: false

steps:
Expand All @@ -24,7 +24,7 @@ jobs:
submodules: True

# Used to host cibuildwheel
- uses: actions/setup-python@v4
- uses: actions/setup-python@v5

- name: Install cibuildwheel
run: python -m pip install cibuildwheel==2.15
Expand Down
4 changes: 2 additions & 2 deletions include/nn_search.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ template <typename real_t>
static std::pair<nb::ndarray<nb::numpy, uint32_t, nb::ndim<2>>, nb::ndarray<nb::numpy, real_t, nb::ndim<2>>>
nanoflann_knn_search(RefCloud<real_t> data, RefCloud<real_t> query, const uint32_t knn)
{
using kd_tree_t = nanoflann::KDTreeEigenMatrixAdaptor<RefCloud<real_t>, 3, nanoflann::metric_L2>;
using kd_tree_t = nanoflann::KDTreeEigenMatrixAdaptor<RefCloud<real_t>, 3, nanoflann::metric_L2_Simple>;

if (knn > data.rows()) { throw std::invalid_argument("knn size is greater than the data point cloud size"); }

Expand Down Expand Up @@ -87,7 +87,7 @@ static std::pair<nb::ndarray<nb::numpy, int32_t, nb::ndim<2>>, nb::ndarray<nb::n
nanoflann_radius_search(
RefCloud<real_t> data, RefCloud<real_t> query, const real_t search_radius, const uint32_t max_knn)
{
using kd_tree_t = nanoflann::KDTreeEigenMatrixAdaptor<RefCloud<real_t>, 3, nanoflann::metric_L2>;
using kd_tree_t = nanoflann::KDTreeEigenMatrixAdaptor<RefCloud<real_t>, 3, nanoflann::metric_L2_Simple>;

if (max_knn > data.rows())
{
Expand Down
25 changes: 16 additions & 9 deletions include/pca.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,12 @@ using DRefMatrixCloud = nb::DRef<const MatrixCloud<real_t>>;

// epsilon definition, for now same for float an double
// the eps is meant to stabilize the division when the cloud's 3rd eigenvalue is near 0
template <typename real_t> constexpr real_t epsilon;
template <> constexpr float epsilon<float> = 1e-3f;
template <> constexpr double epsilon<double> = 1e-3;
template <typename real_t>
constexpr real_t epsilon;
template <>
constexpr float epsilon<float> = 1e-3f;
template <>
constexpr double epsilon<double> = 1e-3;

template <typename real_t>
struct PCAResult
Expand Down Expand Up @@ -70,10 +73,10 @@ static inline PCAResult<real_t> pca_from_pointcloud(const PointCloud<real_t>& cl
{
// Compute the (3, 3) covariance matrix
const PointCloud<real_t> centered_cloud = cloud.rowwise() - cloud.colwise().mean();
const Eigen::Matrix<real_t, 3, 3> cov = (centered_cloud.adjoint() * centered_cloud) / real_t(cloud.rows());
const Eigen::Matrix<real_t, 3, 3> cov = (centered_cloud.transpose() * centered_cloud) / real_t(cloud.rows());

// Compute the eigenvalues and eigenvectors of the covariance
Eigen::EigenSolver<Eigen::Matrix<real_t, 3, 3>> es(cov);
Eigen::SelfAdjointEigenSolver<Eigen::Matrix<real_t, 3, 3>> es(cov);

// Sort the values and vectors in order of increasing eigenvalue
const auto ev = es.eigenvalues().real();
Expand Down Expand Up @@ -146,7 +149,9 @@ static inline real_t compute_eigentropy(const PCAResult<real_t>& pca)
// http://lareg.ensg.eu/labos/matis/pdf/articles_revues/2015/isprs_wjhm_15.pdf
const real_t val_sum = pca.val.sum() + epsilon<real_t>;
const Vec3<real_t> e = pca.val / val_sum;
return (-e(0) * std::log(e(0) + epsilon<real_t>) - e(1) * std::log(e(1) + epsilon<real_t>) - e(2) * std::log(e(2) + epsilon<real_t>));
return (
-e(0) * std::log(e(0) + epsilon<real_t>) - e(1) * std::log(e(1) + epsilon<real_t>) -
e(2) * std::log(e(2) + epsilon<real_t>));
};

/**
Expand Down Expand Up @@ -222,8 +227,8 @@ void compute_selected_features(
const real_t val2 = std::sqrt(pca.val(2));
const real_t val0_fact = real_t(1.0) / (val0 + epsilon<real_t>);

const auto compute_feature = [val0, val1, val2, val0_fact, &pca](
const EFeatureID feature_id, const size_t output_id, auto* feature_results)
const auto compute_feature =
[val0, val1, val2, val0_fact, &pca](const EFeatureID feature_id, const size_t output_id, auto* feature_results)
{
switch (feature_id)
{
Expand Down Expand Up @@ -253,7 +258,7 @@ void compute_selected_features(
break;
case EFeatureID::Volume:
feature_results[output_id] = std::pow(
val0 * val1 * val2 + real_t(1e-8),
val0 * val1 * val2 + real_t(1e-9),
real_t(1.) / real_t(3.)); // 1e-9 eps is a too small value for float32 so we fallback to 1e-8
break;
case EFeatureID::Curvature:
Expand Down Expand Up @@ -284,8 +289,10 @@ void compute_selected_features(
// The verticality as defined in most of the papers
// http://lareg.ensg.eu/labos/matis/pdf/articles_revues/2015/isprs_wjhm_15.pdf
feature_results[output_id] = real_t(1.0) - std::abs(pca.v2(2));
break;
case EFeatureID::Eigentropy:
feature_results[output_id] = compute_eigentropy(pca);
break;
default:
break;
}
Expand Down
52 changes: 26 additions & 26 deletions include/pgeof.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -321,49 +321,49 @@ static nb::ndarray<nb::numpy, real_t, nb::shape<nb::any, nb::any>> compute_geome
RefCloud<real_t> xyz, const real_t search_radius, const uint32_t max_knn,
const std::vector<EFeatureID>& selected_features)
{
using kd_tree_t = nanoflann::KDTreeEigenMatrixAdaptor<RefCloud<real_t>, 3, nanoflann::metric_L2>;
using kd_tree_t = nanoflann::KDTreeEigenMatrixAdaptor<RefCloud<real_t>, 3, nanoflann::metric_L2_Simple>;
// TODO: where knn < num of points
kd_tree_t kd_tree(3, xyz, 10);
const real_t sq_search_radius = search_radius * search_radius;
const size_t feature_count = selected_features.size();

const Eigen::Index n_points = xyz.rows();
real_t* features = (real_t*)calloc(n_points * feature_count, sizeof(real_t));
std::fill(features, features + (n_points * feature_count), real_t(0.0));
kd_tree_t kd_tree(3, xyz, 10);
const size_t feature_count = selected_features.size();
const Eigen::Index n_points = xyz.rows();
real_t sq_search_radius = search_radius * search_radius;

real_t* features = (real_t*)calloc(n_points * feature_count, sizeof(real_t));
nb::capsule owner_features(features, [](void* f) noexcept { delete[] (real_t*)f; });

tf::Executor executor;
tf::Taskflow taskflow;

taskflow.for_each_index(
Eigen::Index(0), n_points, Eigen::Index(1),
[&](Eigen::Index point_id)
{
nanoflann::RKNNResultSet<real_t, int32_t, uint32_t> result_set(max_knn, sq_search_radius);
std::vector<nanoflann::ResultItem<Eigen::Index, real_t>> result_set;

std::vector<int32_t> indices(max_knn, -1); // TODO: maybe int64
std::vector<real_t> sqr_dist(max_knn, real_t(0.0));
nanoflann::RadiusResultSet<real_t, Eigen::Index> radius_result_set(sq_search_radius, result_set);
const auto num_found =
kd_tree.index_->radiusSearchCustomCallback(xyz.row(point_id).data(), radius_result_set);

result_set.init(&indices[0], &sqr_dist[0]);
kd_tree.index_->findNeighbors(result_set, xyz.row(point_id).data());
// not enough point, no feature computation
if (num_found < 2) return;

std::vector<int32_t> final_indices;
for (size_t i = 0; i < max_knn; ++i)
// partial sort for max_knn
if (num_found > max_knn)
{
if (indices[i] == -1) { break; }
final_indices.push_back(indices[i]);
std::partial_sort(
result_set.begin(), result_set.begin() + max_knn, result_set.end(), nanoflann::IndexDist_Sorter());
}

if (final_indices.size() > 2)
{
const PointCloud<real_t> cloud = xyz(final_indices, Eigen::all);
const PCAResult<real_t> pca = pca_from_pointcloud(cloud);
compute_selected_features(pca, selected_features, &features[point_id * feature_count]);
}
},
tf::StaticPartitioner(0));
const size_t num_nn = std::min(static_cast<uint32_t>(num_found), max_knn);

PointCloud<real_t> cloud(num_nn, 3);
for (size_t id = 0; id < num_nn; ++id) { cloud.row(id) = xyz.row(result_set[id].first); }
const PCAResult<real_t> pca = pca_from_pointcloud(cloud);
compute_selected_features(pca, selected_features, &features[point_id * feature_count]);
});
executor.run(taskflow).get();

const size_t shape[2] = {static_cast<size_t>(n_points), feature_count};
return nb::ndarray<nb::numpy, real_t, nb::shape<nb::any, nb::any>>(features, 2, shape, owner_features);
return nb::ndarray<nb::numpy, real_t, nb::shape<nb::any, nb::any>>(features, {static_cast<size_t>(n_points), feature_count}, owner_features);
}
} // namespace pgeof
8 changes: 2 additions & 6 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "scikit_build_core.build"

[project]
name = "pgeof"
version = "0.1.0"
version = "0.2.0"
readme = "README.md"
description = "Compute the geometric features associated with each point's neighborhood:"
requires-python = ">=3.8,<3.13"
Expand Down Expand Up @@ -58,11 +58,7 @@ commands = pytest --basetemp="{envtmpdir}" {posargs}

[testenv:bench]
# globs/wildcards do not work with tox
commands = pytest --basetemp="{envtmpdir}" {posargs:tests/bench_knn.py tests/bench_jakteristics.py}

[testenv:ls]
skipbuild=true
commands = ls {envtmpdir}
commands = pytest -s --basetemp="{envtmpdir}" {posargs:tests/bench_knn.py tests/bench_jakteristics.py}
"""

[tool.cibuildwheel]
Expand Down
6 changes: 3 additions & 3 deletions src/pgeof_ext.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ NB_MODULE(pgeof_ext, m)
)");
m.def(
"radius_search", &pgeof::nanoflann_radius_search<float>, "data"_a.noconvert(), "query"_a.noconvert(),
"search_radius"_a, "knn"_a = 200, R"(
"search_radius"_a, "max_knn"_a, R"(
Search for the points within a specified sphere in a point cloud.

It could be a fallback replacement for FRNN into SuperPointTransformer code base.
Expand All @@ -148,7 +148,7 @@ NB_MODULE(pgeof_ext, m)
)");
m.def(
"compute_features_selected", &pgeof::compute_geometric_features_selected<double>, "xyz"_a.noconvert(),
"search_radius"_a, "knn"_a, "selected_features"_a, R"(
"search_radius"_a, "max_knn"_a, "selected_features"_a, R"(
Compute a selected set of geometric features for a point cloud via radius search.

This function aims to mimick the behavior of jakteristics and provide an efficient way
Expand All @@ -163,7 +163,7 @@ NB_MODULE(pgeof_ext, m)
)");
m.def(
"compute_features_selected", &pgeof::compute_geometric_features_selected<float>, "xyz"_a.noconvert(),
"search_radius"_a, "knn"_a, "selected_features"_a, R"(
"search_radius"_a, "max_knn"_a, "selected_features"_a, R"(
Compute a selected set of geometric features for a point cloud via radius search.

This function aims to mimic the behavior of jakteristics and provide an efficient way
Expand Down
2 changes: 1 addition & 1 deletion tests/bench_knn.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

@pytest.fixture
def random_point_cloud():
return np.random.rand(10000000, 3).astype("float32")
return np.random.rand(100000, 3).astype("float32")


@pytest.mark.benchmark(group="knn", disable_gc=True, warmup=True)
Expand Down
2 changes: 2 additions & 0 deletions tests/test_pgeof.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ def test_knn():
k_new, _ = pgeof.knn_search(xyz, xyz, knn)
np.testing.assert_equal(k_legacy, k_new)


def test_radius_search():
knn = 10
radius = 0.2
Expand All @@ -25,6 +26,7 @@ def test_radius_search():
k_new, _ = pgeof.radius_search(xyz, xyz, radius, knn)
np.testing.assert_equal(k_legacy, k_new)


def test_pgeof_multiscale():
# Generate a random synthetic point cloud and NNs
xyz, nn, nn_ptr = random_nn(10000, 50)
Expand Down
Loading