File tree Expand file tree Collapse file tree 3 files changed +11
-9
lines changed
src/java/org/apache/cassandra
test/unit/org/apache/cassandra/db/compaction/unified Expand file tree Collapse file tree 3 files changed +11
-9
lines changed Original file line number Diff line number Diff line change @@ -1012,6 +1012,16 @@ public enum CassandraRelevantProperties
10121012 /** Set this property to true in order to use DSE-like histogram bucket boundaries and behaviour */
10131013 USE_DSE_COMPATIBLE_HISTOGRAM_BOUNDARIES ("cassandra.use_dse_compatible_histogram_boundaries" , "false" ),
10141014 USE_DYNAMIC_SNITCH_FOR_COUNTER_LEADER ("cassandra.counter_leader.use_dynamic_snitch" , "false" ),
1015+
1016+ /**
1017+ * Whether to use factorization-based shard count growth for smoother progression when base_shard_count is not power of 2.
1018+ * When enabled (default: true), instead of using power-of-two jumps like 1→2→8→1000, the system will
1019+ * use prime factorization to create smooth sequences like 1→5→25→125→250→500→1000 for num_shards=1000.
1020+ * This prevents the large jumps that were involved in the data loss incident caused by HCD-130
1021+ * <p>
1022+ */
1023+ USE_FACTORIZATION_SHARD_COUNT_GROWTH ("use_factorization_shard_count_growth" , "true" ),
1024+
10151025 /** Set this property to true in order to switch to micrometer metrics */
10161026 USE_MICROMETER ("cassandra.use_micrometer_metrics" , "false" ),
10171027 /** When enabled, recursive directory deletion will be executed using a unix command `rm -rf` instead of traversing
Original file line number Diff line number Diff line change @@ -322,14 +322,7 @@ public abstract class Controller
322322 static final String MAX_SSTABLES_PER_SHARD_FACTOR_OPTION = "max_sstables_per_shard_factor" ;
323323 static final double DEFAULT_MAX_SSTABLES_PER_SHARD_FACTOR = UCS_MAX_SSTABLES_PER_SHARD_FACTOR .getDoubleWithLegacyFallback ();
324324
325- /**
326- * Whether to use factorization-based shard count growth for smoother progression when base_shard_count is not power of 2.
327- * When enabled (default: true), instead of using power-of-two jumps like 1→2→8→1000, the system will
328- * use prime factorization to create smooth sequences like 1→5→25→125→250→500→1000 for num_shards=1000.
329- * This prevents the large jumps that were involved in the data loss incident caused by HCD-130
330- * <p>
331- */
332- static final boolean USE_FACTORIZATION_SHARD_COUNT_GROWTH = Boolean .parseBoolean (System .getProperty ("use_factorization_shard_count_growth" , "true" ));
325+ static final boolean USE_FACTORIZATION_SHARD_COUNT_GROWTH = CassandraRelevantProperties .USE_FACTORIZATION_SHARD_COUNT_GROWTH .getBoolean ();
333326
334327 protected final MonotonicClock clock ;
335328 protected final Environment env ;
Original file line number Diff line number Diff line change 1818
1919import java .util .Arrays ;
2020import java .util .HashMap ;
21- import java .util .List ;
2221import java .util .Map ;
2322import java .util .concurrent .ScheduledExecutorService ;
2423import java .util .concurrent .ScheduledFuture ;
You can’t perform that action at this time.
0 commit comments