Skip to content

Commit 7dbc51c

Browse files
kgajdamorusty1s
andauthored
Replace unordered_map with a faster version (rusty1s#254)
* Replace unordered_map with a faster version * clone recursively repo when testing Co-authored-by: Matthias Fey <[email protected]>
1 parent 63b75d8 commit 7dbc51c

File tree

6 files changed

+17
-3
lines changed

6 files changed

+17
-3
lines changed

.github/workflows/testing.yml

+3
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,9 @@ jobs:
2020

2121
steps:
2222
- uses: actions/checkout@v2
23+
with:
24+
submodules: 'recursive'
25+
2326
- name: Set up Python ${{ matrix.python-version }}
2427
uses: actions/setup-python@v2
2528
with:

.gitmodules

+3
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
[submodule "third_party/parallel-hashmap"]
2+
path = third_party/parallel-hashmap
3+
url = https://github.com/greg7mdp/parallel-hashmap.git

CMakeLists.txt

+3
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,9 @@ target_include_directories(${PROJECT_NAME} INTERFACE
3939
include(GNUInstallDirs)
4040
include(CMakePackageConfigHelpers)
4141

42+
set(PHMAP_DIR third_party/parallel-hashmap)
43+
target_include_directories(${PROJECT_NAME} PRIVATE ${PHMAP_DIR})
44+
4245
set(TORCHSPARSE_CMAKECONFIG_INSTALL_DIR "share/cmake/TorchSparse" CACHE STRING "install path for TorchSparseConfig.cmake")
4346

4447
configure_package_config_file(cmake/TorchSparseConfig.cmake.in

csrc/cpu/neighbor_sample_cpu.cpp

+4-2
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@
22

33
#include "utils.h"
44

5+
#include "parallel_hashmap/phmap.h"
6+
57
#ifdef _WIN32
68
#include <process.h>
79
#endif
@@ -17,7 +19,7 @@ sample(const torch::Tensor &colptr, const torch::Tensor &row,
1719

1820
// Initialize some data structures for the sampling process:
1921
vector<int64_t> samples;
20-
unordered_map<int64_t, int64_t> to_local_node;
22+
phmap::flat_hash_map<int64_t, int64_t> to_local_node;
2123

2224
auto *colptr_data = colptr.data_ptr<int64_t>();
2325
auto *row_data = row.data_ptr<int64_t>();
@@ -93,7 +95,7 @@ sample(const torch::Tensor &colptr, const torch::Tensor &row,
9395
}
9496

9597
if (!directed) {
96-
unordered_map<int64_t, int64_t>::iterator iter;
98+
phmap::flat_hash_map<int64_t, int64_t>::iterator iter;
9799
for (int64_t i = 0; i < (int64_t)samples.size(); i++) {
98100
const auto &w = samples[i];
99101
const auto &col_start = colptr_data[w];

setup.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -103,11 +103,13 @@ def get_extensions():
103103
if suffix == 'cuda' and osp.exists(path):
104104
sources += [path]
105105

106+
phmap_dir = "third_party/parallel-hashmap"
107+
106108
Extension = CppExtension if suffix == 'cpu' else CUDAExtension
107109
extension = Extension(
108110
f'torch_sparse._{name}_{suffix}',
109111
sources,
110-
include_dirs=[extensions_dir],
112+
include_dirs=[extensions_dir, phmap_dir],
111113
define_macros=define_macros,
112114
extra_compile_args=extra_compile_args,
113115
extra_link_args=extra_link_args,

third_party/parallel-hashmap

Submodule parallel-hashmap added at 01ea809

0 commit comments

Comments
 (0)