Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[FLINK-37475] Drop ChangelogNormalize for piping from upsert source to sink #26306

Merged
merged 1 commit into from
Mar 21, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import java.util.EnumSet;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;

/**
* The set of changes contained in a changelog.
Expand All @@ -43,6 +44,15 @@ public final class ChangelogMode {
.addContainedKind(RowKind.INSERT)
.addContainedKind(RowKind.UPDATE_AFTER)
.addContainedKind(RowKind.DELETE)
.keyOnlyDeletes(true)
.build();

private static final ChangelogMode UPSERT_WITH_FULL_DELETES =
ChangelogMode.newBuilder()
.addContainedKind(RowKind.INSERT)
.addContainedKind(RowKind.UPDATE_AFTER)
.addContainedKind(RowKind.DELETE)
.keyOnlyDeletes(false)
.build();

private static final ChangelogMode ALL =
Expand All @@ -54,11 +64,13 @@ public final class ChangelogMode {
.build();

private final Set<RowKind> kinds;
private final boolean keyOnlyDeletes;

private ChangelogMode(Set<RowKind> kinds) {
private ChangelogMode(Set<RowKind> kinds, boolean keyOnlyDeletes) {
Preconditions.checkArgument(
kinds.size() > 0, "At least one kind of row should be contained in a changelog.");
this.kinds = Collections.unmodifiableSet(kinds);
this.keyOnlyDeletes = keyOnlyDeletes;
}

/** Shortcut for a simple {@link RowKind#INSERT}-only changelog. */
Expand All @@ -71,7 +83,21 @@ public static ChangelogMode insertOnly() {
* contain {@link RowKind#UPDATE_BEFORE} rows.
*/
public static ChangelogMode upsert() {
return UPSERT;
return upsert(true);
}

/**
* Shortcut for an upsert changelog that describes idempotent updates on a key and thus does not
* contain {@link RowKind#UPDATE_BEFORE} rows.
*
* @param keyOnlyDeletes Tells the system the DELETEs contain just the key.
*/
public static ChangelogMode upsert(boolean keyOnlyDeletes) {
if (keyOnlyDeletes) {
return UPSERT;
} else {
return UPSERT_WITH_FULL_DELETES;
}
}

/** Shortcut for a changelog that can contain all {@link RowKind}s. */
Expand All @@ -96,6 +122,10 @@ public boolean containsOnly(RowKind kind) {
return kinds.size() == 1 && kinds.contains(kind);
}

public boolean keyOnlyDeletes() {
return keyOnlyDeletes;
}

@Override
public boolean equals(Object o) {
if (this == o) {
Expand All @@ -115,7 +145,13 @@ public int hashCode() {

@Override
public String toString() {
return kinds.toString();
if (!keyOnlyDeletes) {
return kinds.toString();
} else {
return kinds.stream()
.map(kind -> kind == RowKind.DELETE ? "~D" : kind.toString())
.collect(Collectors.joining(", ", "[", "]"));
}
}

// --------------------------------------------------------------------------------------------
Expand All @@ -125,6 +161,7 @@ public String toString() {
public static class Builder {

private final Set<RowKind> kinds = EnumSet.noneOf(RowKind.class);
private boolean keyOnlyDeletes = false;

private Builder() {
// default constructor to allow a fluent definition
Expand All @@ -135,8 +172,13 @@ public Builder addContainedKind(RowKind kind) {
return this;
}

public Builder keyOnlyDeletes(boolean flag) {
this.keyOnlyDeletes = flag;
return this;
}

public ChangelogMode build() {
return new ChangelogMode(kinds);
return new ChangelogMode(kinds, keyOnlyDeletes);
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,179 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.flink.table.planner.plan.optimize;

import org.apache.flink.table.catalog.Column;
import org.apache.flink.table.connector.source.abilities.SupportsReadingMetadata;
import org.apache.flink.table.planner.connectors.DynamicSourceUtils;
import org.apache.flink.table.planner.plan.nodes.physical.stream.StreamPhysicalCalcBase;
import org.apache.flink.table.planner.plan.nodes.physical.stream.StreamPhysicalChangelogNormalize;
import org.apache.flink.table.planner.plan.nodes.physical.stream.StreamPhysicalExchange;
import org.apache.flink.table.planner.plan.nodes.physical.stream.StreamPhysicalTableSourceScan;
import org.apache.flink.table.planner.plan.nodes.physical.stream.StreamPhysicalWatermarkAssigner;
import org.apache.flink.table.planner.plan.optimize.program.FlinkChangelogModeInferenceProgram;
import org.apache.flink.table.planner.plan.schema.TableSourceTable;
import org.apache.flink.table.planner.plan.trait.UpdateKindTrait$;
import org.apache.flink.table.planner.plan.trait.UpdateKindTraitDef$;
import org.apache.flink.table.planner.plan.utils.FlinkRelUtil;

import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.SingleRel;
import org.apache.calcite.rex.RexNode;
import org.apache.calcite.util.ImmutableBitSet;

import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;

/**
* Checks if it is safe to remove ChangelogNormalize as part of {@link
* FlinkChangelogModeInferenceProgram}. It checks:
*
* <ul>
* <li>if there is no filter pushed into the changelog normalize
* <li>if we don't need to produce UPDATE_BEFORE
* <li>we don't access any metadata columns
* </ul>
*/
public class ChangelogNormalizeRequirementResolver {

/** Checks if it is safe to remove ChangelogNormalize. */
public static boolean isRequired(StreamPhysicalChangelogNormalize normalize) {
if (normalize.filterCondition() != null) {
return true;
}
if (!Objects.equals(
normalize.getTraitSet().getTrait(UpdateKindTraitDef$.MODULE$.INSTANCE()),
UpdateKindTrait$.MODULE$.ONLY_UPDATE_AFTER())) {
return true;
}

// check if metadata columns are accessed
final RelNode input = normalize.getInput();

return visit(input, bitSetForAllOutputColumns(input));
}

private static ImmutableBitSet bitSetForAllOutputColumns(RelNode input) {
return ImmutableBitSet.builder().set(0, input.getRowType().getFieldCount()).build();
}

private static boolean visit(final RelNode rel, final ImmutableBitSet requiredColumns) {
if (rel instanceof StreamPhysicalCalcBase) {
return visitCalc((StreamPhysicalCalcBase) rel, requiredColumns);
} else if (rel instanceof StreamPhysicalTableSourceScan) {
return visitTableSourceScan((StreamPhysicalTableSourceScan) rel, requiredColumns);
} else if (rel instanceof StreamPhysicalWatermarkAssigner
|| rel instanceof StreamPhysicalExchange) {
// require all input columns
final RelNode input = ((SingleRel) rel).getInput();
return visit(input, bitSetForAllOutputColumns(input));
} else {
// these nodes should not be in an input of a changelog normalize
// StreamPhysicalChangelogNormalize
// StreamPhysicalDropUpdateBefore
// StreamPhysicalUnion
// StreamPhysicalSort
// StreamPhysicalLimit
// StreamPhysicalSortLimit
// StreamPhysicalTemporalSort
// StreamPhysicalWindowTableFunction
// StreamPhysicalWindowRank
// StreamPhysicalWindowDeduplicate
// StreamPhysicalRank
// StreamPhysicalOverAggregateBase
// CommonPhysicalJoin
// StreamPhysicalMatch
// StreamPhysicalMiniBatchAssigner
// StreamPhysicalExpand
// StreamPhysicalWindowAggregateBase
// StreamPhysicalGroupAggregateBase
// StreamPhysicalSink
// StreamPhysicalLegacySink
// StreamPhysicalCorrelateBase
// StreamPhysicalLookupJoin
// StreamPhysicalValues
// StreamPhysicalDataStreamScan
// StreamPhysicalLegacyTableSourceScan
throw new UnsupportedOperationException(
String.format(
"Unsupported to visit node %s. The node either should not be pushed"
+ " through the changelog normalize or is not supported yet.",
rel.getClass().getSimpleName()));
}
}

private static boolean visitTableSourceScan(
StreamPhysicalTableSourceScan tableScan, ImmutableBitSet requiredColumns) {
if (!(tableScan.tableSource() instanceof SupportsReadingMetadata)) {
// source does not have metadata, no need to check
return false;
}
final TableSourceTable sourceTable = tableScan.getTable().unwrap(TableSourceTable.class);
assert sourceTable != null;
// check if requiredColumns contain metadata column
final List<Column.MetadataColumn> metadataColumns =
DynamicSourceUtils.extractMetadataColumns(
sourceTable.contextResolvedTable().getResolvedSchema());
final Set<String> metaColumnSet =
metadataColumns.stream().map(Column::getName).collect(Collectors.toSet());
final List<String> columns = tableScan.getRowType().getFieldNames();
for (int index = 0; index < columns.size(); index++) {
String column = columns.get(index);
if (metaColumnSet.contains(column) && requiredColumns.get(index)) {
// we require metadata column, therefore, we cannot remove the changelog normalize
return true;
}
}

return false;
}

private static boolean visitCalc(StreamPhysicalCalcBase calc, ImmutableBitSet requiredColumns) {
// evaluate required columns from input
final List<RexNode> projects =
calc.getProgram().getProjectList().stream()
.map(expr -> calc.getProgram().expandLocalRef(expr))
.collect(Collectors.toList());
final Map<Integer, List<Integer>> outFromSourcePos =
FlinkRelUtil.extractSourceMapping(projects);
final List<Integer> conv2Inputs =
requiredColumns.toList().stream()
.map(
out ->
Optional.ofNullable(outFromSourcePos.get(out))
.orElseThrow(
() ->
new IllegalStateException(
String.format(
"Invalid pos:%d over projection:%s",
out,
calc
.getProgram()))))
.flatMap(Collection::stream)
.filter(index -> index != -1)
.distinct()
.collect(Collectors.toList());
return visit(calc.getInput(), ImmutableBitSet.of(conv2Inputs));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@
import org.apache.flink.table.planner.plan.schema.TableSourceTable;
import org.apache.flink.table.planner.plan.utils.ChangelogPlanUtils;
import org.apache.flink.table.planner.plan.utils.FlinkRelOptUtil;
import org.apache.flink.table.planner.plan.utils.FlinkRelUtil;
import org.apache.flink.table.planner.plan.utils.FlinkRexUtil;
import org.apache.flink.table.planner.plan.utils.JoinUtil;
import org.apache.flink.table.planner.plan.utils.OverAggregateUtil;
Expand All @@ -78,7 +79,6 @@
import org.apache.calcite.rel.type.RelDataType;
import org.apache.calcite.rex.RexNode;
import org.apache.calcite.rex.RexProgram;
import org.apache.calcite.rex.RexSlot;
import org.apache.calcite.sql.SqlExplainLevel;
import org.apache.calcite.util.ImmutableBitSet;

Expand Down Expand Up @@ -297,7 +297,8 @@ private StreamPhysicalRel visitCalc(
calc.getProgram().getProjectList().stream()
.map(expr -> calc.getProgram().expandLocalRef(expr))
.collect(Collectors.toList());
Map<Integer, List<Integer>> outFromSourcePos = extractSourceMapping(projects);
Map<Integer, List<Integer>> outFromSourcePos =
FlinkRelUtil.extractSourceMapping(projects);
List<Integer> conv2Inputs =
requireDeterminism.toList().stream()
.map(
Expand Down Expand Up @@ -812,22 +813,6 @@ private StreamPhysicalRel visitJoinChild(
return transmitDeterminismRequirement(rel, inputRequireDeterminism);
}

/** Extracts the out from source field index mapping of the given projects. */
private Map<Integer, List<Integer>> extractSourceMapping(final List<RexNode> projects) {
Map<Integer, List<Integer>> mapOutFromInPos = new HashMap<>();

for (int index = 0; index < projects.size(); index++) {
RexNode expr = projects.get(index);
mapOutFromInPos.put(
index,
FlinkRexUtil.findAllInputRefs(expr).stream()
.mapToInt(RexSlot::getIndex)
.boxed()
.collect(Collectors.toList()));
}
return mapOutFromInPos;
}

private void checkNonDeterministicRexProgram(
final ImmutableBitSet requireDeterminism,
final RexProgram program,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.flink.table.planner.plan.trait;

/** Lists all kinds of {@link ModifyKind#DELETE} operation. */
public enum DeleteKind {

/** This kind indicates that operators do not emit {@link ModifyKind#DELETE} operation. */
NONE,

/**
* This kind indicates that operators can emit deletes with the key only. The rest of the row
* may be not present.
*/
DELETE_BY_KEY,

/** This kind indicates that operators should emit deletes with the full row. */
FULL_DELETE
}
Loading