Skip to content

Commit 216f4b9

Browse files
authored
Remove usage of GradedUnitRanges (#93)
1 parent 24d8cec commit 216f4b9

12 files changed

+194
-714
lines changed

Project.toml

+2-7
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
name = "BlockSparseArrays"
22
uuid = "2c9a651f-6452-4ace-a6ac-809f4280fbb4"
33
authors = ["ITensor developers <[email protected]> and contributors"]
4-
version = "0.3.9"
4+
version = "0.4.0"
55

66
[deps]
77
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
@@ -12,7 +12,6 @@ DiagonalArrays = "74fd4be6-21e2-4f6f-823a-4360d37c7a77"
1212
Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4"
1313
FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b"
1414
GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527"
15-
GradedUnitRanges = "e2de450a-8a67-46c7-b59c-01d5a3d041c5"
1615
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
1716
MacroTools = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
1817
MapBroadcast = "ebd9b9da-f48d-417c-9660-449667d60261"
@@ -22,11 +21,9 @@ TypeParameterAccessors = "7e5a90cf-f82e-492e-a09b-e3e26432c138"
2221

2322
[weakdeps]
2423
TensorAlgebra = "68bd88dc-f39d-4e12-b2ca-f046b68fcc6a"
25-
TensorProducts = "decf83d6-1968-43f4-96dc-fdb3fe15fc6d"
2624

2725
[extensions]
28-
BlockSparseArraysGradedUnitRangesExt = "GradedUnitRanges"
29-
BlockSparseArraysTensorAlgebraExt = ["TensorProducts", "TensorAlgebra"]
26+
BlockSparseArraysTensorAlgebraExt = "TensorAlgebra"
3027

3128
[compat]
3229
Adapt = "4.1.1"
@@ -38,14 +35,12 @@ DiagonalArrays = "0.3"
3835
Dictionaries = "0.4.3"
3936
FillArrays = "1.13.0"
4037
GPUArraysCore = "0.1.0, 0.2"
41-
GradedUnitRanges = "0.2.2"
4238
LinearAlgebra = "1.10"
4339
MacroTools = "0.5.13"
4440
MapBroadcast = "0.1.5"
4541
SparseArraysBase = "0.5"
4642
SplitApplyCombine = "1.2.3"
4743
TensorAlgebra = "0.2.4"
48-
TensorProducts = "0.1.2"
4944
Test = "1.10"
5045
TypeParameterAccessors = "0.2.0, 0.3"
5146
julia = "1.10"

docs/Project.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,6 @@ Literate = "98b081ad-f1c9-55d3-8b20-4c87d4299306"
66

77
[compat]
88
BlockArrays = "1"
9-
BlockSparseArrays = "0.3"
9+
BlockSparseArrays = "0.4"
1010
Documenter = "1"
1111
Literate = "2"

examples/Project.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,5 +5,5 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
55

66
[compat]
77
BlockArrays = "1"
8-
BlockSparseArrays = "0.3"
8+
BlockSparseArrays = "0.4"
99
Test = "1"

ext/BlockSparseArraysGradedUnitRangesExt/BlockSparseArraysGradedUnitRangesExt.jl

-105
This file was deleted.
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,8 @@
11
module BlockSparseArraysTensorAlgebraExt
2-
using BlockArrays: AbstractBlockedUnitRange
3-
4-
using TensorAlgebra: TensorAlgebra, FusionStyle, BlockReshapeFusion
5-
using TensorProducts: OneToOne
62

3+
using BlockArrays: AbstractBlockedUnitRange
74
using BlockSparseArrays: AbstractBlockSparseArray, blockreshape
5+
using TensorAlgebra: TensorAlgebra, FusionStyle, BlockReshapeFusion
86

97
TensorAlgebra.FusionStyle(::AbstractBlockedUnitRange) = BlockReshapeFusion()
108

@@ -20,99 +18,4 @@ function TensorAlgebra.splitdims(
2018
return blockreshape(a, axes)
2119
end
2220

23-
using BlockArrays:
24-
AbstractBlockVector,
25-
AbstractBlockedUnitRange,
26-
Block,
27-
BlockIndexRange,
28-
blockedrange,
29-
blocks
30-
using BlockSparseArrays:
31-
BlockSparseArrays,
32-
AbstractBlockSparseArray,
33-
AbstractBlockSparseArrayInterface,
34-
AbstractBlockSparseMatrix,
35-
BlockSparseArray,
36-
BlockSparseArrayInterface,
37-
BlockSparseMatrix,
38-
BlockSparseVector,
39-
block_merge
40-
using DerivableInterfaces: @interface
41-
using GradedUnitRanges:
42-
GradedUnitRanges,
43-
AbstractGradedUnitRange,
44-
blockmergesortperm,
45-
blocksortperm,
46-
dual,
47-
invblockperm,
48-
nondual,
49-
unmerged_tensor_product
50-
using LinearAlgebra: Adjoint, Transpose
51-
using TensorAlgebra:
52-
TensorAlgebra, FusionStyle, BlockReshapeFusion, SectorFusion, fusedims, splitdims
53-
54-
# TODO: Make a `ReduceWhile` library.
55-
include("reducewhile.jl")
56-
57-
TensorAlgebra.FusionStyle(::AbstractGradedUnitRange) = SectorFusion()
58-
59-
# TODO: Need to implement this! Will require implementing
60-
# `block_merge(a::AbstractUnitRange, blockmerger::BlockedUnitRange)`.
61-
function BlockSparseArrays.block_merge(
62-
a::AbstractGradedUnitRange, blockmerger::AbstractBlockedUnitRange
63-
)
64-
return a
65-
end
66-
67-
# Sort the blocks by sector and then merge the common sectors.
68-
function block_mergesort(a::AbstractArray)
69-
I = blockmergesortperm.(axes(a))
70-
return a[I...]
71-
end
72-
73-
function TensorAlgebra.fusedims(
74-
::SectorFusion, a::AbstractArray, merged_axes::AbstractUnitRange...
75-
)
76-
# First perform a fusion using a block reshape.
77-
# TODO avoid groupreducewhile. Require refactor of fusedims.
78-
unmerged_axes = groupreducewhile(
79-
unmerged_tensor_product, axes(a), length(merged_axes); init=OneToOne()
80-
) do i, axis
81-
return length(axis) length(merged_axes[i])
82-
end
83-
84-
a_reshaped = fusedims(BlockReshapeFusion(), a, unmerged_axes...)
85-
# Sort the blocks by sector and merge the equivalent sectors.
86-
return block_mergesort(a_reshaped)
87-
end
88-
89-
function TensorAlgebra.splitdims(
90-
::SectorFusion, a::AbstractArray, split_axes::AbstractUnitRange...
91-
)
92-
# First, fuse axes to get `blockmergesortperm`.
93-
# Then unpermute the blocks.
94-
axes_prod = groupreducewhile(
95-
unmerged_tensor_product, split_axes, ndims(a); init=OneToOne()
96-
) do i, axis
97-
return length(axis) length(axes(a, i))
98-
end
99-
blockperms = blocksortperm.(axes_prod)
100-
sorted_axes = map((r, I) -> only(axes(r[I])), axes_prod, blockperms)
101-
102-
# TODO: This is doing extra copies of the blocks,
103-
# use `@view a[axes_prod...]` instead.
104-
# That will require implementing some reindexing logic
105-
# for this combination of slicing.
106-
a_unblocked = a[sorted_axes...]
107-
a_blockpermed = a_unblocked[invblockperm.(blockperms)...]
108-
return splitdims(BlockReshapeFusion(), a_blockpermed, split_axes...)
109-
end
110-
111-
# TODO: Handle this through some kind of trait dispatch, maybe
112-
# a `SymmetryStyle`-like trait to check if the block sparse
113-
# matrix has graded axes.
114-
function Base.axes(a::Adjoint{<:Any,<:AbstractBlockSparseMatrix})
115-
return dual.(reverse(axes(a')))
116-
end
117-
11821
end

ext/BlockSparseArraysTensorAlgebraExt/reducewhile.jl

-34
This file was deleted.

src/BlockArraysExtensions/BlockArraysExtensions.jl

-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ using BlockArrays:
2020
findblock,
2121
findblockindex
2222
using Dictionaries: Dictionary, Indices
23-
using GradedUnitRanges: blockedunitrange_getindices, to_blockindices
2423
using SparseArraysBase:
2524
SparseArraysBase,
2625
eachstoredindex,

0 commit comments

Comments
 (0)