Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Avoid converting Decimal32/Decimal64 in to_arrow and from_arrow APIs #17422

Merged
merged 39 commits into from
Jan 29, 2025
Merged
Show file tree
Hide file tree
Changes from 35 commits
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
0724fd2
Fix Debug-mode failing Arrow test
zeroshade Nov 21, 2024
3a45ca0
remove unused iostream
zeroshade Nov 21, 2024
6cab3fe
Merge branch 'rapidsai:branch-25.02' into branch-25.02
zeroshade Jan 17, 2025
67da453
Avoid converting decimal32/decimal64
zeroshade Nov 22, 2024
eab0b1e
style fix and cleanup DeviceType
zeroshade Dec 4, 2024
f2f63ae
updates from comments
zeroshade Dec 9, 2024
5fe9658
add comment to explain max precision subtraction
zeroshade Dec 9, 2024
bd73d6a
shift lambda to its own func
zeroshade Dec 10, 2024
7f89cdf
convert to device_storage_type_t
zeroshade Dec 10, 2024
b3e75ea
shift around and fix includes
zeroshade Dec 10, 2024
a2ca1a8
ran pre-commit
zeroshade Dec 10, 2024
e5863a7
update deps and fix compile issue
zeroshade Jan 17, 2025
7102a6d
Merge branch 'branch-25.02' into decimal32-decimal64
zeroshade Jan 17, 2025
033d472
Update cpp/src/interop/to_arrow_host.cu
zeroshade Jan 17, 2025
92be030
Update cpp/src/interop/from_arrow_host.cu
zeroshade Jan 17, 2025
aa68c51
use get_decimal_precision
zeroshade Jan 17, 2025
7a35ad2
use helper function in other spots
zeroshade Jan 17, 2025
1ef986f
ran pre-commit for linting checks
zeroshade Jan 17, 2025
37609d0
Merge branch 'branch-25.02' into decimal32-decimal64
zeroshade Jan 17, 2025
0457765
missed one
zeroshade Jan 17, 2025
5e74516
pre-commit is mean
zeroshade Jan 17, 2025
4fced62
fix tests
zeroshade Jan 19, 2025
e7d44b3
Merge branch 'branch-25.02' into decimal32-decimal64
zeroshade Jan 19, 2025
2bff31a
skip decimal32 and decimal64 python tests if using pyarrow < 19
zeroshade Jan 20, 2025
22dffe3
Merge branch 'branch-25.02' into decimal32-decimal64
zeroshade Jan 20, 2025
83aa9ad
skip decimal64 java tests until arrow-java supports it
zeroshade Jan 20, 2025
74e9119
Merge branch 'branch-25.02' into decimal32-decimal64
davidwendt Jan 22, 2025
e9d3ae8
Update dependencies.yaml
zeroshade Jan 22, 2025
570e416
Update dependencies.yaml
zeroshade Jan 22, 2025
a23fc13
Merge branch 'branch-25.02' into decimal32-decimal64
davidwendt Jan 22, 2025
ca848d6
fixing style issues
zeroshade Jan 22, 2025
be95a39
fix styles
zeroshade Jan 22, 2025
8a9579d
skip failing test
zeroshade Jan 24, 2025
162636a
Merge branch 'branch-25.02' into decimal32-decimal64
zeroshade Jan 24, 2025
c704ecd
Merge branch 'branch-25.02' into decimal32-decimal64
galipremsagar Jan 25, 2025
843c36f
Apply suggestions from code review
galipremsagar Jan 25, 2025
e625228
Update python/cudf/cudf/tests/test_scalar.py
galipremsagar Jan 25, 2025
f6b627b
Merge branch 'branch-25.02' into decimal32-decimal64
galipremsagar Jan 25, 2025
23a8204
Merge branch 'branch-25.02' into decimal32-decimal64
bdice Jan 28, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion conda/recipes/cudf/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ requirements:
- numba-cuda >=0.2.0,<0.3.0a0
- numba >=0.59.1,<0.61.0a0
- numpy >=1.23,<3.0a0
- pyarrow>=14.0.0,<18.0.0a0
- pyarrow>=14.0.0,<=19.0.0
galipremsagar marked this conversation as resolved.
Show resolved Hide resolved
- libcudf ={{ version }}
- pylibcudf ={{ version }}
- {{ pin_compatible('rmm', max_pin='x.x') }}
Expand Down
2 changes: 1 addition & 1 deletion conda/recipes/pylibcudf/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ requirements:
- typing_extensions >=4.0.0
- pandas >=2.0,<2.2.4dev0
- numpy >=1.23,<3.0a0
- pyarrow>=14.0.0,<18.0.0a0
- pyarrow>=14.0.0,<=19.0.0
galipremsagar marked this conversation as resolved.
Show resolved Hide resolved
- {{ pin_compatible('rmm', max_pin='x.x') }}
- fsspec >=0.6.0
{% if cuda_major == "11" %}
Expand Down
4 changes: 2 additions & 2 deletions cpp/cmake/thirdparty/get_arrow.cmake
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# =============================================================================
# Copyright (c) 2020-2024, NVIDIA CORPORATION.
# Copyright (c) 2020-2025, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
Expand Down Expand Up @@ -347,7 +347,7 @@ if(NOT DEFINED CUDF_VERSION_Arrow)
set(CUDF_VERSION_Arrow
# This version must be kept in sync with the libarrow version pinned for builds in
# dependencies.yaml.
16.1.0
19.0.0
CACHE STRING "The version of Arrow to find (or build)"
)
endif()
Expand Down
6 changes: 3 additions & 3 deletions cpp/cmake/thirdparty/get_nanoarrow.cmake
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# =============================================================================
# Copyright (c) 2024, NVIDIA CORPORATION.
# Copyright (c) 2024-2025, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
Expand All @@ -23,11 +23,11 @@ function(find_and_configure_nanoarrow)
# Currently we need to always build nanoarrow so we don't pickup a previous installed version
set(CPM_DOWNLOAD_nanoarrow ON)
rapids_cpm_find(
nanoarrow 0.6.0.dev
nanoarrow 0.7.0.dev
GLOBAL_TARGETS nanoarrow
CPM_ARGS
GIT_REPOSITORY https://github.com/apache/arrow-nanoarrow.git
GIT_TAG 1e2664a70ec14907409cadcceb14d79b9670bcdb
GIT_TAG 4bf5a9322626e95e3717e43de7616c0a256179eb
GIT_SHALLOW FALSE
OPTIONS "BUILD_SHARED_LIBS OFF" "NANOARROW_NAMESPACE cudf" ${_exclude_from_all}
)
Expand Down
18 changes: 18 additions & 0 deletions cpp/cmake/thirdparty/patches/nanoarrow_override.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@

{
"packages" : {
"nanoarrow" : {
"version" : "0.7.0.dev",
"git_url" : "https://github.com/apache/arrow-nanoarrow.git",
"git_tag" : "4bf5a9322626e95e3717e43de7616c0a256179eb",
"git_shallow" : false,
"patches" : [
{
"file" : "${current_json_dir}/nanoarrow_clang_tidy_compliance.diff",
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this patch still relevant? I don’t see it here. Can this override be removed?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@zeroshade Can we remove this? I'm okay with merging this PR and opening a follow-up for cleanup, since CI has passed.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Looks like #17618 removed this patch. I'll merge this PR and file a follow-up to delete this override.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I realized we are missing some reviews. If we need to push any changes to this branch for other reviews, we should go ahead and delete this. Otherwise I think merging this and removing the overrides in a follow-up is the best case scenario.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes this patch should be removed, but I agree that it's not worth rerunning CI just for that. If there are no other changes here we can do it in a follow-up just fine.

"issue" : "https://github.com/apache/arrow-nanoarrow/issues/537",
"fixed_in" : ""
}
]
}
}
}
6 changes: 5 additions & 1 deletion cpp/src/interop/arrow_utilities.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2024, NVIDIA CORPORATION.
* Copyright (c) 2020-2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -63,6 +63,8 @@ data_type arrow_to_cudf_type(ArrowSchemaView const* arrow_view)
default: CUDF_FAIL("Unsupported duration unit in arrow", cudf::data_type_error);
}
}
case NANOARROW_TYPE_DECIMAL32: return data_type{type_id::DECIMAL32, -arrow_view->decimal_scale};
case NANOARROW_TYPE_DECIMAL64: return data_type{type_id::DECIMAL64, -arrow_view->decimal_scale};
case NANOARROW_TYPE_DECIMAL128:
return data_type{type_id::DECIMAL128, -arrow_view->decimal_scale};
default: CUDF_FAIL("Unsupported type_id conversion to cudf", cudf::data_type_error);
Expand All @@ -84,6 +86,8 @@ ArrowType id_to_arrow_type(cudf::type_id id)
case cudf::type_id::FLOAT32: return NANOARROW_TYPE_FLOAT;
case cudf::type_id::FLOAT64: return NANOARROW_TYPE_DOUBLE;
case cudf::type_id::TIMESTAMP_DAYS: return NANOARROW_TYPE_DATE32;
case cudf::type_id::DECIMAL32: return NANOARROW_TYPE_DECIMAL32;
case cudf::type_id::DECIMAL64: return NANOARROW_TYPE_DECIMAL64;
case cudf::type_id::DECIMAL128: return NANOARROW_TYPE_DECIMAL128;
default: CUDF_FAIL("Unsupported type_id conversion to arrow type", cudf::data_type_error);
}
Expand Down
18 changes: 1 addition & 17 deletions cpp/src/interop/arrow_utilities.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, NVIDIA CORPORATION.
* Copyright (c) 2024-2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -70,21 +70,5 @@ ArrowType id_to_arrow_storage_type(cudf::type_id id);
*/
int initialize_array(ArrowArray* arr, ArrowType storage_type, cudf::column_view column);

/**
* @brief Helper to convert decimal values to 128-bit versions for Arrow compatibility
*
* The template parameter should be the underlying type of the data (e.g. int32_t for
* 32-bit decimal and int64_t for 64-bit decimal).
*
* @param input column_view of the data
* @param stream cuda stream to perform the operations on
* @param mr memory resource to allocate the returned device_uvector with
* @return unique_ptr to a device_buffer containing the upcasted data
*/
template <typename DeviceType>
std::unique_ptr<rmm::device_buffer> decimals_to_arrow(cudf::column_view input,
rmm::cuda_stream_view stream,
rmm::device_async_resource_ref mr);

} // namespace detail
} // namespace cudf
9 changes: 3 additions & 6 deletions cpp/src/interop/from_arrow_device.cu
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, NVIDIA CORPORATION.
* Copyright (c) 2024-2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -49,9 +49,7 @@ namespace {
using dispatch_tuple_t = std::tuple<column_view, owned_columns_t>;

struct dispatch_from_arrow_device {
template <typename T,
CUDF_ENABLE_IF(not is_rep_layout_compatible<T>() &&
!std::is_same_v<T, numeric::decimal128>)>
template <typename T, CUDF_ENABLE_IF(not is_rep_layout_compatible<T>() && !is_fixed_point<T>())>
dispatch_tuple_t operator()(ArrowSchemaView*,
ArrowArray const*,
data_type,
Expand All @@ -62,8 +60,7 @@ struct dispatch_from_arrow_device {
CUDF_FAIL("Unsupported type in from_arrow_device", cudf::data_type_error);
}

template <typename T,
CUDF_ENABLE_IF(is_rep_layout_compatible<T>() || std::is_same_v<T, numeric::decimal128>)>
template <typename T, CUDF_ENABLE_IF(is_rep_layout_compatible<T>() || is_fixed_point<T>())>
dispatch_tuple_t operator()(ArrowSchemaView* schema,
ArrowArray const* input,
data_type type,
Expand Down
11 changes: 4 additions & 7 deletions cpp/src/interop/from_arrow_host.cu
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, NVIDIA CORPORATION.
* Copyright (c) 2024-2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -69,22 +69,19 @@ struct dispatch_copy_from_arrow_host {
return mask;
}

template <typename T,
CUDF_ENABLE_IF(not is_rep_layout_compatible<T>() &&
!std::is_same_v<T, numeric::decimal128>)>
template <typename T, CUDF_ENABLE_IF(not is_rep_layout_compatible<T>() && !is_fixed_point<T>())>
std::unique_ptr<column> operator()(ArrowSchemaView*, ArrowArray const*, data_type, bool)
{
CUDF_FAIL("Unsupported type in copy_from_arrow_host.");
}

template <typename T,
CUDF_ENABLE_IF(is_rep_layout_compatible<T>() || std::is_same_v<T, numeric::decimal128>)>
template <typename T, CUDF_ENABLE_IF(is_rep_layout_compatible<T>() || is_fixed_point<T>())>
std::unique_ptr<column> operator()(ArrowSchemaView* schema,
ArrowArray const* input,
data_type type,
bool skip_mask)
{
using DeviceType = std::conditional_t<std::is_same_v<T, numeric::decimal128>, __int128_t, T>;
using DeviceType = device_storage_type_t<T>;

size_type const num_rows = input->length;
size_type const offset = input->offset;
Expand Down
102 changes: 8 additions & 94 deletions cpp/src/interop/to_arrow_device.cu
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, NVIDIA CORPORATION.
* Copyright (c) 2024-2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -92,13 +92,15 @@ int set_buffer(std::unique_ptr<T> device_buf, int64_t i, ArrowArray* out)
}

struct dispatch_to_arrow_device {
template <typename T, CUDF_ENABLE_IF(not is_rep_layout_compatible<T>())>
template <typename T,
CUDF_ENABLE_IF(not is_rep_layout_compatible<T>() and not is_fixed_point<T>())>
int operator()(cudf::column&&, rmm::cuda_stream_view, rmm::device_async_resource_ref, ArrowArray*)
{
CUDF_FAIL("Unsupported type for to_arrow_device", cudf::data_type_error);
}

template <typename T, CUDF_ENABLE_IF(is_rep_layout_compatible<T>())>
// cover rep layout compatible and decimal types
template <typename T, CUDF_ENABLE_IF(is_rep_layout_compatible<T>() or is_fixed_point<T>())>
int operator()(cudf::column&& column,
rmm::cuda_stream_view stream,
rmm::device_async_resource_ref mr,
Expand Down Expand Up @@ -132,64 +134,6 @@ struct dispatch_to_arrow_device {
}
};

template <typename DeviceType>
int construct_decimals(cudf::column_view input,
rmm::cuda_stream_view stream,
rmm::device_async_resource_ref mr,
ArrowArray* out)
{
nanoarrow::UniqueArray tmp;
NANOARROW_RETURN_NOT_OK(initialize_array(tmp.get(), NANOARROW_TYPE_DECIMAL128, input));

auto buf = detail::convert_decimals_to_decimal128<DeviceType>(input, stream, mr);
// Synchronize stream here to ensure the decimal128 buffer is ready.
stream.synchronize();
NANOARROW_RETURN_NOT_OK(set_buffer(std::move(buf), fixed_width_data_buffer_idx, tmp.get()));

ArrowArrayMove(tmp.get(), out);
return NANOARROW_OK;
}

template <>
int dispatch_to_arrow_device::operator()<numeric::decimal32>(cudf::column&& column,
rmm::cuda_stream_view stream,
rmm::device_async_resource_ref mr,
ArrowArray* out)
{
using DeviceType = int32_t;
NANOARROW_RETURN_NOT_OK(construct_decimals<DeviceType>(column.view(), stream, mr, out));
auto contents = column.release();
NANOARROW_RETURN_NOT_OK(set_null_mask(contents, out));
return NANOARROW_OK;
}

template <>
int dispatch_to_arrow_device::operator()<numeric::decimal64>(cudf::column&& column,
rmm::cuda_stream_view stream,
rmm::device_async_resource_ref mr,
ArrowArray* out)
{
using DeviceType = int64_t;
NANOARROW_RETURN_NOT_OK(construct_decimals<DeviceType>(column.view(), stream, mr, out));
auto contents = column.release();
NANOARROW_RETURN_NOT_OK(set_null_mask(contents, out));
return NANOARROW_OK;
}

template <>
int dispatch_to_arrow_device::operator()<numeric::decimal128>(cudf::column&& column,
rmm::cuda_stream_view stream,
rmm::device_async_resource_ref mr,
ArrowArray* out)
{
nanoarrow::UniqueArray tmp;
NANOARROW_RETURN_NOT_OK(initialize_array(tmp.get(), NANOARROW_TYPE_DECIMAL128, column));
auto contents = column.release();
NANOARROW_RETURN_NOT_OK(set_contents(contents, tmp.get()));
ArrowArrayMove(tmp.get(), out);
return NANOARROW_OK;
}

template <>
int dispatch_to_arrow_device::operator()<bool>(cudf::column&& column,
rmm::cuda_stream_view stream,
Expand Down Expand Up @@ -350,13 +294,14 @@ struct dispatch_to_arrow_device_view {
rmm::cuda_stream_view stream;
rmm::device_async_resource_ref mr;

template <typename T, CUDF_ENABLE_IF(not is_rep_layout_compatible<T>())>
template <typename T,
CUDF_ENABLE_IF(not is_rep_layout_compatible<T>() and not is_fixed_point<T>())>
int operator()(ArrowArray*) const
{
CUDF_FAIL("Unsupported type for to_arrow_device", cudf::data_type_error);
}

template <typename T, CUDF_ENABLE_IF(is_rep_layout_compatible<T>())>
template <typename T, CUDF_ENABLE_IF(is_rep_layout_compatible<T>() or is_fixed_point<T>())>
int operator()(ArrowArray* out) const
{
nanoarrow::UniqueArray tmp;
Expand Down Expand Up @@ -404,37 +349,6 @@ struct dispatch_to_arrow_device_view {
}
};

template <>
int dispatch_to_arrow_device_view::operator()<numeric::decimal32>(ArrowArray* out) const
{
using DeviceType = int32_t;
NANOARROW_RETURN_NOT_OK(construct_decimals<DeviceType>(column, stream, mr, out));
NANOARROW_RETURN_NOT_OK(set_null_mask(column, out));
return NANOARROW_OK;
}

template <>
int dispatch_to_arrow_device_view::operator()<numeric::decimal64>(ArrowArray* out) const
{
using DeviceType = int64_t;
NANOARROW_RETURN_NOT_OK(construct_decimals<DeviceType>(column, stream, mr, out));
NANOARROW_RETURN_NOT_OK(set_null_mask(column, out));
return NANOARROW_OK;
}

template <>
int dispatch_to_arrow_device_view::operator()<numeric::decimal128>(ArrowArray* out) const
{
nanoarrow::UniqueArray tmp;

NANOARROW_RETURN_NOT_OK(initialize_array(tmp.get(), NANOARROW_TYPE_DECIMAL128, column));
NANOARROW_RETURN_NOT_OK(set_null_mask(column, tmp.get()));
NANOARROW_RETURN_NOT_OK(set_view_to_buffer(column, tmp.get()));

ArrowArrayMove(tmp.get(), out);
return NANOARROW_OK;
}

template <>
int dispatch_to_arrow_device_view::operator()<bool>(ArrowArray* out) const
{
Expand Down
33 changes: 3 additions & 30 deletions cpp/src/interop/to_arrow_host.cu
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, NVIDIA CORPORATION.
* Copyright (c) 2024-2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -115,8 +115,7 @@ struct dispatch_to_arrow_host {
CUDF_FAIL("Unsupported type for to_arrow_host", cudf::data_type_error);
}

template <typename T,
CUDF_ENABLE_IF(is_rep_layout_compatible<T>() || std::is_same_v<T, numeric::decimal128>)>
template <typename T, CUDF_ENABLE_IF(is_rep_layout_compatible<T>() || is_fixed_point<T>())>
int operator()(ArrowArray* out) const
{
nanoarrow::UniqueArray tmp;
Expand All @@ -125,40 +124,14 @@ struct dispatch_to_arrow_host {
NANOARROW_RETURN_NOT_OK(initialize_array(tmp.get(), storage_type, column));

NANOARROW_RETURN_NOT_OK(populate_validity_bitmap(ArrowArrayValidityBitmap(tmp.get())));
using DataType = std::conditional_t<std::is_same_v<T, numeric::decimal128>, __int128_t, T>;
using DataType = device_storage_type_t<T>;
NANOARROW_RETURN_NOT_OK(
populate_data_buffer(device_span<DataType const>(column.data<DataType>(), column.size()),
ArrowArrayBuffer(tmp.get(), fixed_width_data_buffer_idx)));

ArrowArrayMove(tmp.get(), out);
return NANOARROW_OK;
}

// convert decimal types from libcudf to arrow where those types are not directly
// supported by Arrow. These types must be fit into 128 bits, the smallest
// decimal resolution supported by Arrow
template <typename T,
CUDF_ENABLE_IF(!is_rep_layout_compatible<T>() &&
(std::is_same_v<T, numeric::decimal32> ||
std::is_same_v<T, numeric::decimal64>))>
int operator()(ArrowArray* out) const
{
using DeviceType = std::conditional_t<std::is_same_v<T, numeric::decimal32>, int32_t, int64_t>;
nanoarrow::UniqueArray tmp;
NANOARROW_RETURN_NOT_OK(initialize_array(tmp.get(), NANOARROW_TYPE_DECIMAL128, column));

NANOARROW_RETURN_NOT_OK(populate_validity_bitmap(ArrowArrayValidityBitmap(tmp.get())));
auto buf = detail::convert_decimals_to_decimal128<DeviceType>(column, stream, mr);
// No need to synchronize stream here as populate_data_buffer uses the same stream to copy data
// to host.
NANOARROW_RETURN_NOT_OK(
populate_data_buffer(device_span<__int128_t const>(
reinterpret_cast<const __int128_t*>(buf->data()), column.size()),
ArrowArrayBuffer(tmp.get(), fixed_width_data_buffer_idx)));

ArrowArrayMove(tmp.get(), out);
return NANOARROW_OK;
}
};

int get_column(cudf::column_view column,
Expand Down
Loading
Loading