diff --git a/Plot/src/main/java/io/deephaven/plot/AxesImpl.java b/Plot/src/main/java/io/deephaven/plot/AxesImpl.java index f3e063e8884..9d7b70d8df7 100644 --- a/Plot/src/main/java/io/deephaven/plot/AxesImpl.java +++ b/Plot/src/main/java/io/deephaven/plot/AxesImpl.java @@ -281,7 +281,7 @@ private static SelectableDataSet getAggregatedSelectableDataSet(final Selectable final Collection aggs = aggSupplier.get(); final Collection columnNames = ColumnName.from(cols); final Function applyAggs = t -> t.aggBy(aggs, columnNames); - return sds.transform(MemoizedOperationKey.aggBy(aggs, columnNames), applyAggs); + return sds.transform(MemoizedOperationKey.aggBy(aggs, false, null, columnNames), applyAggs); } private static SelectableDataSet getLastBySelectableDataSet(final SelectableDataSet sds, final String... columns) { diff --git a/engine/api/src/main/java/io/deephaven/engine/table/ColumnSource.java b/engine/api/src/main/java/io/deephaven/engine/table/ColumnSource.java index 0793f4a8305..351d3667fae 100644 --- a/engine/api/src/main/java/io/deephaven/engine/table/ColumnSource.java +++ b/engine/api/src/main/java/io/deephaven/engine/table/ColumnSource.java @@ -110,11 +110,6 @@ default void releaseCachedResources() { ColumnSource reinterpret( @NotNull final Class alternateDataType) throws IllegalArgumentException; - @Override - default List getColumnSources() { - return Collections.singletonList(this); - } - @Override default T createTuple(final long rowKey) { return get(rowKey); diff --git a/engine/api/src/main/java/io/deephaven/engine/table/PartitionedTable.java b/engine/api/src/main/java/io/deephaven/engine/table/PartitionedTable.java index 882aee45f49..6d78dfa0e3f 100644 --- a/engine/api/src/main/java/io/deephaven/engine/table/PartitionedTable.java +++ b/engine/api/src/main/java/io/deephaven/engine/table/PartitionedTable.java @@ -9,6 +9,7 @@ import io.deephaven.base.log.LogOutputAppendable; import io.deephaven.engine.liveness.LivenessNode; import io.deephaven.engine.liveness.LivenessReferent; +import io.deephaven.engine.updategraph.ConcurrentMethod; import io.deephaven.util.annotations.FinalDefault; import org.jetbrains.annotations.NotNull; @@ -62,6 +63,7 @@ interface Proxy extends TableOperations> { * * @return The underlying {@link Table partitioned table} */ + @ConcurrentMethod Table table(); /** @@ -70,6 +72,7 @@ interface Proxy extends TableOperations> { * * @return The key column names */ + @ConcurrentMethod Set keyColumnNames(); /** @@ -82,6 +85,7 @@ interface Proxy extends TableOperations> { * * @return Whether the keys in the underlying partitioned table are unique */ + @ConcurrentMethod boolean uniqueKeys(); /** @@ -89,6 +93,7 @@ interface Proxy extends TableOperations> { * * @return The constituent column name */ + @ConcurrentMethod String constituentColumnName(); /** @@ -98,6 +103,7 @@ interface Proxy extends TableOperations> { * * @return The constituent definition */ + @ConcurrentMethod TableDefinition constituentDefinition(); /** @@ -115,6 +121,7 @@ interface Proxy extends TableOperations> { * * @return Whether the constituents of the underlying partitioned table can change */ + @ConcurrentMethod boolean constituentChangesPermitted(); /** @@ -125,6 +132,7 @@ interface Proxy extends TableOperations> { * @see #proxy(boolean, boolean) */ @FinalDefault + @ConcurrentMethod default Proxy proxy() { return proxy(true, true); } @@ -147,6 +155,7 @@ default Proxy proxy() { * @return A proxy that allows {@link TableOperations table operations} to be applied to the constituent tables of * this PartitionedTable */ + @ConcurrentMethod Proxy proxy(boolean requireMatchingKeys, boolean sanityCheckJoinOperations); /** @@ -169,6 +178,7 @@ default Proxy proxy() { * @param filters The filters to apply. Must not reference the constituent column. * @return The filtered PartitionedTable */ + @ConcurrentMethod PartitionedTable filter(Collection filters); /** @@ -180,6 +190,7 @@ default Proxy proxy() { * @param sortColumns The columns to sort by. Must not reference the constituent column. * @return The sorted PartitionedTable */ + @ConcurrentMethod PartitionedTable sort(Collection sortColumns); /** @@ -242,6 +253,7 @@ PartitionedTable partitionedTransform( * @return The {@link Table constituent} at the single row in {@link #table()} matching the {@code keyColumnValues}, * or {@code null} if no matches were found */ + @ConcurrentMethod Table constituentFor(@NotNull Object... keyColumnValues); /** @@ -255,5 +267,6 @@ PartitionedTable partitionedTransform( * * @return An array of all current {@link Table constituents} */ + @ConcurrentMethod Table[] constituents(); } diff --git a/engine/api/src/main/java/io/deephaven/engine/table/Table.java b/engine/api/src/main/java/io/deephaven/engine/table/Table.java index 6605df24864..d8ff5cec549 100644 --- a/engine/api/src/main/java/io/deephaven/engine/table/Table.java +++ b/engine/api/src/main/java/io/deephaven/engine/table/Table.java @@ -142,8 +142,10 @@ public interface Table extends * all rows: *
    *
  1. {@link #groupBy} is unsupported - *
  2. {@link #aggBy} is unsupported if {@link Aggregation#AggGroup(String...)} is used *
  3. {@link #partitionBy} is unsupported
  4. + *
  5. {@link #partitionedAggBy(Collection, boolean, Table, String...) partitionedAggBy} is unsupported
  6. + *
  7. {@link #aggBy} is unsupported if either of {@link io.deephaven.api.agg.spec.AggSpecGroup group} or + * {@link io.deephaven.api.agg.Partition partition} are used
  8. *
  9. {@link #rollup(Collection, boolean, ColumnName...) rollup()} is unsupported if * {@code includeConstituents == true}
  10. *
  11. {@link #treeTable(String, String) treeTable()} is unsupported
  12. @@ -995,6 +997,14 @@ Table join(Table rightTable, Collection columnsToMatch, @ConcurrentMethod Table aggBy(Aggregation aggregation); + @Override + @ConcurrentMethod + Table aggBy(Collection aggregations); + + @Override + @ConcurrentMethod + Table aggBy(Collection aggregations, boolean preserveEmpty); + @Override @ConcurrentMethod Table aggBy(Aggregation aggregation, String... groupByColumns); @@ -1005,15 +1015,16 @@ Table join(Table rightTable, Collection columnsToMatch, @Override @ConcurrentMethod - Table aggBy(Collection aggregations, Collection groupByColumns); + Table aggBy(Collection aggregations, String... groupByColumns); @Override @ConcurrentMethod - Table aggBy(Collection aggregations, String... groupByColumns); + Table aggBy(Collection aggregations, Collection groupByColumns); @Override @ConcurrentMethod - Table aggBy(Collection aggregations); + Table aggBy(Collection aggregations, boolean preserveEmpty, Table initialGroups, + Collection groupByColumns); Table headBy(long nRows, Collection groupByColumnNames); @@ -1602,6 +1613,29 @@ Table join(Table rightTable, Collection columnsToMatch, @ConcurrentMethod PartitionedTable partitionBy(String... keyColumnNames); + /** + * Convenience method that performs an {@link #aggBy(Collection, boolean, Table, Collection)} and wraps the result + * in a {@link PartitionedTable}. If {@code aggregations} does not include a {@link io.deephaven.api.agg.Partition + * partition}, one will be added automatically with the default constituent column name and behavior used in + * {@link #partitionBy(String...)}. + * + * @param aggregations The {@link Aggregation aggregations} to apply + * @param preserveEmpty Whether to keep result rows for groups that are initially empty or become empty as a result + * of updates. Each aggregation operator defines its own value for empty groups. + * @param initialGroups A table whose distinct combinations of values for the {@code groupByColumns} should be used + * to create an initial set of aggregation groups. All other columns are ignored. This is useful in + * combination with {@code preserveEmpty == true} to ensure that particular groups appear in the result + * table, or with {@code preserveEmpty == false} to control the encounter order for a collection of groups + * and thus their relative order in the result. Changes to {@code initialGroups} are not expected or handled; + * if {@code initialGroups} is a refreshing table, only its contents at instantiation time will be used. If + * {@code initialGroups == null}, the result will be the same as if a table with no rows was supplied. + * @param keyColumnNames The names of the key columns to aggregate by + * @return A {@link PartitionedTable} keyed by {@code keyColumnNames} + */ + @ConcurrentMethod + PartitionedTable partitionedAggBy(Collection aggregations, boolean preserveEmpty, + Table initialGroups, String... keyColumnNames); + // ----------------------------------------------------------------------------------------------------------------- // Hierarchical table operations (rollup and treeTable). // ----------------------------------------------------------------------------------------------------------------- diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/AbstractColumnSource.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/AbstractColumnSource.java index d1e4e51e8b6..d1626313b8d 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/AbstractColumnSource.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/AbstractColumnSource.java @@ -26,6 +26,7 @@ import java.util.Collections; import java.util.LinkedHashMap; +import java.util.List; import java.util.Map; public abstract class AbstractColumnSource implements @@ -42,6 +43,7 @@ public abstract class AbstractColumnSource implements protected final Class componentType; protected volatile Map groupToRange; + protected volatile List rowSetIndexerKey; protected AbstractColumnSource(@NotNull final Class type) { this(type, Object.class); @@ -102,6 +104,19 @@ public ColumnSource getPrevSource() { return new PrevColumnSource<>(this); } + @Override + public List getColumnSources() { + List localRowSetIndexerKey; + if ((localRowSetIndexerKey = rowSetIndexerKey) == null) { + synchronized (this) { + if ((localRowSetIndexerKey = rowSetIndexerKey) == null) { + rowSetIndexerKey = localRowSetIndexerKey = Collections.singletonList(this); + } + } + } + return localRowSetIndexerKey; + } + @Override public Map getGroupToRange() { return groupToRange; diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/GroupingUtils.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/GroupingUtils.java index 209e2cf25d0..b38d0cf2389 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/GroupingUtils.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/GroupingUtils.java @@ -10,8 +10,10 @@ import io.deephaven.engine.rowset.TrackingWritableRowSet; import io.deephaven.engine.rowset.WritableRowSet; import io.deephaven.engine.table.ColumnSource; +import io.deephaven.engine.table.WritableColumnSource; import io.deephaven.engine.table.impl.indexer.RowSetIndexer; import io.deephaven.engine.table.impl.sources.ArrayBackedColumnSource; +import io.deephaven.engine.table.impl.sources.InMemoryColumnSource; import io.deephaven.engine.table.impl.sources.ObjectArraySource; import org.apache.commons.lang3.mutable.MutableInt; import org.jetbrains.annotations.NotNull; @@ -158,4 +160,25 @@ public static Pair, ObjectArraySource(resultKeyColumnSource, resultIndexColumnSource); } + + /** + * Convert a group-to-RowSet map to a flat, immutable, in-memory column of keys. + * + * @param originalKeyColumnSource The key column source whose contents are reflected by the group-to-RowSet map + * (used for typing, only) + * @param groupToRowSet The group-to-RowSet map to convert + * @return A flat, immutable, in-memory column of keys + */ + public static WritableColumnSource groupingKeysToImmutableFlatSource( + @NotNull final ColumnSource originalKeyColumnSource, + @NotNull final Map groupToRowSet) { + final WritableColumnSource destination = InMemoryColumnSource.makeImmutableSource( + originalKeyColumnSource.getType(), originalKeyColumnSource.getComponentType()); + destination.ensureCapacity(groupToRowSet.size()); + int ri = 0; + for (final TYPE key : groupToRowSet.keySet()) { + destination.set(ri++, key); + } + return destination; + } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/HierarchicalTable.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/HierarchicalTable.java index 67507196fc7..145a27fda7e 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/HierarchicalTable.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/HierarchicalTable.java @@ -160,11 +160,6 @@ public Table join(Table rightTable, MatchPair[] columnsToMatch, MatchPair[] colu return throwUnsupported("join()"); } - @Override - public Table countBy(String countColumnName, ColumnName... groupByColumns) { - return throwUnsupported("countBy()"); - } - @Override public Table ungroup(boolean nullFill, String... columnsToUngroup) { return throwUnsupported("ungroup()"); @@ -186,7 +181,7 @@ public Table aggAllBy(AggSpec spec, ColumnName... groupByColumns) { } @Override - public Table aggBy(Collection aggregations, + public Table aggBy(Collection aggregations, boolean preserveEmpty, Table initialGroups, Collection groupByColumns) { return throwUnsupported("aggBy()"); } @@ -251,6 +246,12 @@ public PartitionedTable partitionBy(boolean dropKeys, String... keyColumnNames) return throwUnsupported("partitionBy()"); } + @Override + public PartitionedTable partitionedAggBy(Collection aggregations, boolean preserveEmpty, + Table initialGroups, String... keyColumnNames) { + return throwUnsupported("partitionedAggBy()"); + } + @Override public Table rollup(Collection aggregations, boolean includeConstituents, ColumnName... groupByColumns) { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/MemoizedOperationKey.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/MemoizedOperationKey.java index 9260173b844..884897dd3ec 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/MemoizedOperationKey.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/MemoizedOperationKey.java @@ -99,9 +99,12 @@ public static MemoizedOperationKey treeTable(String idColumn, String parentColum return new TreeTable(idColumn, parentColumn); } - public static MemoizedOperationKey aggBy(Collection aggregations, + public static MemoizedOperationKey aggBy( + Collection aggregations, + boolean preserveEmpty, + Table initialGroups, Collection groupByColumns) { - return new AggBy(new ArrayList<>(aggregations), new ArrayList<>(groupByColumns)); + return new AggBy(new ArrayList<>(aggregations), preserveEmpty, initialGroups, new ArrayList<>(groupByColumns)); } public static MemoizedOperationKey partitionBy(boolean dropKeys, Collection groupByColumns) { @@ -110,7 +113,7 @@ public static MemoizedOperationKey partitionBy(boolean dropKeys, Collection aggregations, Collection groupByColumns, boolean includeConstituents) { - return new Rollup(new AggBy(new ArrayList<>(aggregations), new ArrayList<>(groupByColumns)), + return new Rollup(new AggBy(new ArrayList<>(aggregations), false, null, new ArrayList<>(groupByColumns)), includeConstituents); } @@ -335,11 +338,27 @@ public int hashCode() { private static class AggBy extends AttributeAgnosticMemoizedOperationKey { private final List aggregations; + private final boolean preserveEmpty; + private final WeakReference initialGroups; private final List groupByColumns; - private AggBy(List aggregations, List groupByColumns) { + private final int cachedHashCode; + + private AggBy( + List aggregations, + boolean preserveEmpty, + Table initialGroups, + List groupByColumns) { this.aggregations = aggregations; + this.preserveEmpty = preserveEmpty; + this.initialGroups = initialGroups == null ? null : new WeakReference<>(initialGroups); this.groupByColumns = groupByColumns; + + int hash = aggregations.hashCode(); + hash = 31 * hash + Boolean.hashCode(preserveEmpty); + hash = 31 * hash + System.identityHashCode(initialGroups); + hash = 31 * hash + groupByColumns.hashCode(); + this.cachedHashCode = hash; } @Override @@ -351,14 +370,15 @@ public boolean equals(Object o) { return false; } AggBy aggBy = (AggBy) o; - return aggregations.equals(aggBy.aggregations) && groupByColumns.equals(aggBy.groupByColumns); + return aggregations.equals(aggBy.aggregations) + && preserveEmpty == aggBy.preserveEmpty + && equalWeakRefsByReferentIdentity(initialGroups, aggBy.initialGroups) + && groupByColumns.equals(aggBy.groupByColumns); } @Override public int hashCode() { - int result = aggregations.hashCode(); - result = 31 * result + groupByColumns.hashCode(); - return result; + return cachedHashCode; } @Override @@ -530,11 +550,7 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; final CrossJoin crossJoin = (CrossJoin) o; - final Table rTable = rightTableCandidate.get(); - final Table oTable = crossJoin.rightTableCandidate.get(); - if (rTable == null || oTable == null) - return false; - return rTable == oTable && + return equalWeakRefsByReferentIdentity(rightTableCandidate, crossJoin.rightTableCandidate) && numRightBitsToReserve == crossJoin.numRightBitsToReserve && Arrays.equals(columnsToMatch, crossJoin.columnsToMatch) && Arrays.equals(columnsToAdd, crossJoin.columnsToAdd); @@ -555,4 +571,19 @@ public static CrossJoin crossJoin(final Table rightTableCandidate, final MatchPa final MatchPair[] columnsToAdd, final int numRightBitsToReserve) { return new CrossJoin(rightTableCandidate, columnsToMatch, columnsToAdd, numRightBitsToReserve); } + + private static boolean equalWeakRefsByReferentIdentity(final WeakReference r1, final WeakReference r2) { + if (r1 == r2) { + return true; + } + if (r1 == null || r2 == null) { + return false; + } + final Object t1 = r1.get(); + final Object t2 = r2.get(); + if (t1 == null || t2 == null) { + return false; + } + return t1 == t2; + } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java index 43645615a35..d30bd7c66c7 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java @@ -477,6 +477,35 @@ public PartitionedTable partitionBy(final boolean dropKeys, final String... keyC }); } + @Override + public PartitionedTable partitionedAggBy(final Collection aggregations, + final boolean preserveEmpty, @Nullable final Table initialGroups, final String... keyColumnNames) { + if (isStream()) { + throw streamUnsupported("partitionedAggBy"); + } + final Optional includedPartition = aggregations.stream() + .filter(agg -> agg instanceof Partition) + .map(agg -> (Partition) agg) + .findFirst(); + final Partition partition = includedPartition.orElseGet(() -> Partition.of(CONSTITUENT)); + final Collection aggregationsToUse = includedPartition.isPresent() + ? aggregations + : Stream.concat(aggregations.stream(), Stream.of(partition)).collect(Collectors.toList()); + final Table aggregated = + aggBy(aggregationsToUse, preserveEmpty, initialGroups, ColumnName.from(keyColumnNames)); + final Set keyColumnNamesSet = + Arrays.stream(keyColumnNames).collect(Collectors.toCollection(LinkedHashSet::new)); + final TableDefinition constituentDefinition; + if (partition.includeGroupByColumns()) { + constituentDefinition = definition; + } else { + constituentDefinition = TableDefinition.of(definition.getColumnStream() + .filter(cd -> !keyColumnNamesSet.contains(cd.getName())).toArray(ColumnDefinition[]::new)); + } + return new PartitionedTableImpl(aggregated, keyColumnNamesSet, true, partition.column().name(), + constituentDefinition, isRefreshing(), false); + } + @Override public Table rollup(Collection aggregations, boolean includeConstituents, ColumnName... groupByColumns) { @@ -597,8 +626,7 @@ public Table aggAllBy(AggSpec spec, ColumnName... groupByColumns) { } } final List groupByList = Arrays.asList(groupByColumns); - final List tableColumns = - definition.getColumnNames().stream().map(ColumnName::of).collect(Collectors.toList()); + final List tableColumns = definition.getTypedColumnNames(); final Optional agg = AggregateAllByTable.singleAggregation(spec, groupByList, tableColumns); if (agg.isEmpty()) { throw new IllegalArgumentException( @@ -606,9 +634,10 @@ public Table aggAllBy(AggSpec spec, ColumnName... groupByColumns) { } final QueryTable tableToUse = (QueryTable) AggAllByUseTable.of(this, spec); final List aggs = List.of(agg.get()); - final MemoizedOperationKey aggKey = MemoizedOperationKey.aggBy(aggs, groupByList); + final MemoizedOperationKey aggKey = MemoizedOperationKey.aggBy(aggs, false, null, groupByList); return tableToUse.memoizeResult(aggKey, () -> { - final QueryTable result = tableToUse.aggNoMemo(AggregationProcessor.forAggregation(aggs), groupByList); + final QueryTable result = + tableToUse.aggNoMemo(AggregationProcessor.forAggregation(aggs), false, null, groupByList); spec.walk(new AggAllByCopyAttributes(this, result)); return result; }); @@ -617,17 +646,19 @@ public Table aggAllBy(AggSpec spec, ColumnName... groupByColumns) { @Override public Table aggBy( final Collection aggregations, + final boolean preserveEmpty, + final Table initialGroups, final Collection groupByColumns) { if (aggregations.isEmpty()) { throw new IllegalArgumentException( "aggBy must have at least one aggregation, none specified. groupByColumns=" + toString(groupByColumns)); } - final List optimized = AggregationOptimizer.of(aggregations); - final MemoizedOperationKey aggKey = MemoizedOperationKey.aggBy(optimized, groupByColumns); - final Table aggregationTable = - memoizeResult(aggKey, () -> aggNoMemo(AggregationProcessor.forAggregation(optimized), groupByColumns)); + final MemoizedOperationKey aggKey = + MemoizedOperationKey.aggBy(optimized, preserveEmpty, initialGroups, groupByColumns); + final Table aggregationTable = memoizeResult(aggKey, () -> aggNoMemo( + AggregationProcessor.forAggregation(optimized), preserveEmpty, initialGroups, groupByColumns)); final List optimizedOrder = AggregationPairs.outputsOf(optimized).collect(Collectors.toList()); final List userOrder = AggregationPairs.outputsOf(aggregations).collect(Collectors.toList()); @@ -641,19 +672,16 @@ public Table aggBy( return aggregationTable.view(resultOrder); } - @Override - public Table countBy(String countColumnName, ColumnName... groupByColumns) { - return QueryPerformanceRecorder.withNugget( - "countBy(" + countColumnName + "," + Arrays.toString(groupByColumns) + ")", sizeForInstrumentation(), - () -> aggBy(Aggregation.AggCount(countColumnName), Arrays.asList(groupByColumns))); - } - - private QueryTable aggNoMemo(@NotNull final AggregationContextFactory aggregationContextFactory, + private QueryTable aggNoMemo( + @NotNull final AggregationContextFactory aggregationContextFactory, + final boolean preserveEmpty, + @Nullable final Table initialGroups, @NotNull final Collection groupByColumns) { final String description = "aggregation(" + aggregationContextFactory + ", " + groupByColumns + ")"; return QueryPerformanceRecorder.withNugget(description, sizeForInstrumentation(), - () -> ChunkedOperatorAggregationHelper.aggregation(aggregationContextFactory, this, groupByColumns)); + () -> ChunkedOperatorAggregationHelper.aggregation( + aggregationContextFactory, this, preserveEmpty, initialGroups, groupByColumns)); } private static UnsupportedOperationException streamUnsupported(@NotNull final String operationName) { @@ -2863,9 +2891,9 @@ public Table selectDistinct(Collection columns) { return view(columns).selectDistinct(); } final MemoizedOperationKey aggKey = - MemoizedOperationKey.aggBy(Collections.emptyList(), columnNames); + MemoizedOperationKey.aggBy(Collections.emptyList(), false, null, columnNames); return memoizeResult(aggKey, - () -> aggNoMemo(AggregationProcessor.forSelectDistinct(), columnNames)); + () -> aggNoMemo(AggregationProcessor.forSelectDistinct(), false, null, columnNames)); }); } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/TableWithDefaults.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/TableWithDefaults.java index dc0bbe6784f..5c25019800e 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/TableWithDefaults.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/TableWithDefaults.java @@ -662,6 +662,18 @@ default Table aggBy(Aggregation aggregation) { return aggBy(List.of(aggregation)); } + @Override + @ConcurrentMethod + default Table aggBy(Collection aggregations) { + return aggBy(aggregations, Collections.emptyList()); + } + + @Override + @ConcurrentMethod + default Table aggBy(Collection aggregations, boolean preserveEmpty) { + return aggBy(aggregations, preserveEmpty, null, Collections.emptyList()); + } + @Override @ConcurrentMethod default Table aggBy(Aggregation aggregation, String... groupByColumns) { @@ -682,8 +694,9 @@ default Table aggBy(Collection aggregations, String... gr @Override @ConcurrentMethod - default Table aggBy(Collection aggregations) { - return aggBy(aggregations, Collections.emptyList()); + default Table aggBy(Collection aggregations, + Collection groupByColumns) { + return aggBy(aggregations, false, null, groupByColumns); } @Override @@ -697,6 +710,7 @@ default Table tailBy(long nRows, Collection groupByColumnNames) { } @Override + @ConcurrentMethod default Table applyToAllBy(String formulaColumn, String columnParamName, Collection groupByColumns) { return aggAllBy(AggSpec.formula(formulaColumn, columnParamName), groupByColumns.toArray(ColumnName[]::new)); @@ -1002,6 +1016,12 @@ default Table medianBy() { return medianBy(ZERO_LENGTH_COLUMNNAME_ARRAY); } + @Override + @ConcurrentMethod + default Table countBy(String countColumnName, ColumnName... groupByColumns) { + return aggBy(Aggregation.AggCount(countColumnName), Arrays.asList(groupByColumns)); + } + @Override @ConcurrentMethod default Table countBy(String countColumnName, String... groupByColumns) { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/UncoalescedTable.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/UncoalescedTable.java index 56663b5b2b8..b98c9f15557 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/UncoalescedTable.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/UncoalescedTable.java @@ -343,12 +343,6 @@ public Table join(Table rightTable, MatchPair[] columnsToMatch, MatchPair[] colu return coalesce().join(rightTable, columnsToMatch, columnsToAdd, numRightBitsToReserve); } - @Override - @ConcurrentMethod - public Table groupBy(Collection groupByColumns) { - return coalesce().groupBy(groupByColumns); - } - @Override @ConcurrentMethod public Table aggAllBy(AggSpec spec, ColumnName... groupByColumns) { @@ -357,9 +351,9 @@ public Table aggAllBy(AggSpec spec, ColumnName... groupByColumns) { @Override @ConcurrentMethod - public Table aggBy(Collection aggregations, + public Table aggBy(Collection aggregations, boolean preserveEmpty, Table initialGroups, Collection groupByColumns) { - return coalesce().aggBy(aggregations, groupByColumns); + return coalesce().aggBy(aggregations, preserveEmpty, initialGroups, groupByColumns); } @Override @@ -372,91 +366,6 @@ public Table tailBy(long nRows, String... groupByColumnNames) { return coalesce().tailBy(nRows, groupByColumnNames); } - @Override - @ConcurrentMethod - public Table applyToAllBy(String formulaColumn, String columnParamName, - Collection groupByColumns) { - return coalesce().applyToAllBy(formulaColumn, columnParamName, groupByColumns); - } - - @Override - @ConcurrentMethod - public Table sumBy(ColumnName... groupByColumns) { - return coalesce().sumBy(groupByColumns); - } - - @Override - @ConcurrentMethod - public Table absSumBy(ColumnName... groupByColumns) { - return coalesce().absSumBy(groupByColumns); - } - - @Override - @ConcurrentMethod - public Table avgBy(ColumnName... groupByColumns) { - return coalesce().avgBy(groupByColumns); - } - - @Override - @ConcurrentMethod - public Table wavgBy(String weightColumn, ColumnName... groupByColumns) { - return coalesce().wavgBy(weightColumn, groupByColumns); - } - - @Override - @ConcurrentMethod - public Table wsumBy(String weightColumn, ColumnName... groupByColumns) { - return coalesce().wsumBy(weightColumn, groupByColumns); - } - - @Override - @ConcurrentMethod - public Table stdBy(ColumnName... groupByColumns) { - return coalesce().stdBy(groupByColumns); - } - - @Override - @ConcurrentMethod - public Table varBy(ColumnName... groupByColumns) { - return coalesce().varBy(groupByColumns); - } - - @Override - @ConcurrentMethod - public Table lastBy(ColumnName... groupByColumns) { - return coalesce().lastBy(groupByColumns); - } - - @Override - @ConcurrentMethod - public Table firstBy(ColumnName... groupByColumns) { - return coalesce().firstBy(groupByColumns); - } - - @Override - @ConcurrentMethod - public Table minBy(ColumnName... groupByColumns) { - return coalesce().minBy(groupByColumns); - } - - @Override - @ConcurrentMethod - public Table maxBy(ColumnName... groupByColumns) { - return coalesce().maxBy(groupByColumns); - } - - @Override - @ConcurrentMethod - public Table medianBy(ColumnName... groupByColumns) { - return coalesce().medianBy(groupByColumns); - } - - @Override - @ConcurrentMethod - public Table countBy(String countColumnName, ColumnName... groupByColumns) { - return coalesce().countBy(countColumnName, groupByColumns); - } - @Override public Table ungroup(boolean nullFill, String... columnsToUngroup) { return coalesce().ungroup(nullFill, columnsToUngroup); @@ -468,6 +377,13 @@ public PartitionedTable partitionBy(boolean dropKeys, String... keyColumnNames) return coalesce().partitionBy(dropKeys, keyColumnNames); } + @Override + @ConcurrentMethod + public PartitionedTable partitionedAggBy(Collection aggregations, boolean preserveEmpty, + Table initialGroups, String... keyColumnNames) { + return coalesce().partitionedAggBy(aggregations, preserveEmpty, initialGroups, keyColumnNames); + } + @Override @ConcurrentMethod public Table rollup(Collection aggregations, boolean includeConstituents, diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/AggregationContext.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/AggregationContext.java index 5a52d0b9137..683a46afce9 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/AggregationContext.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/AggregationContext.java @@ -3,6 +3,7 @@ */ package io.deephaven.engine.table.impl.by; +import io.deephaven.base.verify.Assert; import io.deephaven.engine.liveness.LivenessReferent; import io.deephaven.engine.table.*; import io.deephaven.engine.table.impl.TableUpdateImpl; @@ -175,6 +176,19 @@ void startTrackingPrevValues() { } } + /** + * Get any single {@link StateChangeRecorder} present in the {@code operators} array. + * + * @return Any single {@link StateChangeRecorder} present in the {@code operators} array + * @throws io.deephaven.base.verify.AssertionFailure If there is no state change recorder present + */ + StateChangeRecorder getStateChangeRecorder() { + return (StateChangeRecorder) Arrays.stream(operators) + .filter(op -> op instanceof StateChangeRecorder) + .findAny() + .orElseThrow(Assert::statementNeverExecuted); + } + /** * The helper passes in the result column source map, which contains the key columns if any. The context is * responsible for filling in the columns generated by the operators or transformations. diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/AggregationContextFactory.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/AggregationContextFactory.java index 9c549639c31..207b0b7706f 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/AggregationContextFactory.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/AggregationContextFactory.java @@ -16,8 +16,11 @@ public interface AggregationContextFactory { * Make an {@link AggregationContext} for this aggregation. * * @param table The source {@link Table} to aggregate + * @param requireStateChangeRecorder Whether the resulting context is required to have an operator that extends + * {@link StateChangeRecorder} * @param groupByColumns The key column names * @return A new or safely reusable {@link AggregationContext} */ - AggregationContext makeAggregationContext(@NotNull Table table, @NotNull String... groupByColumns); + AggregationContext makeAggregationContext( + @NotNull Table table, boolean requireStateChangeRecorder, @NotNull String... groupByColumns); } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/AggregationControl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/AggregationControl.java index a8d90fbcc1d..aafb062e870 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/AggregationControl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/AggregationControl.java @@ -15,6 +15,11 @@ @VisibleForTesting public class AggregationControl { + private static final int CHUNK_SIZE = ChunkedOperatorAggregationHelper.CHUNK_SIZE; + private static final int MINIMUM_INITIAL_HASH_SIZE = CHUNK_SIZE; + private static final double DEFAULT_MAX_LOAD_FACTOR = 0.75; + private static final double DEFAULT_TARGET_LOAD_FACTOR = 0.70; + public static final AggregationControl DEFAULT = new AggregationControl(); public static final AggregationControl DEFAULT_FOR_OPERATOR = new AggregationControl() { @Override @@ -25,15 +30,15 @@ public boolean considerGrouping(@NotNull Table table, @NotNull ColumnSource[] public int initialHashTableSize(@NotNull final Table inputTable) { // TODO: This approach relies on rehash. Maybe we should consider sampling instead. - return IncrementalChunkedOperatorAggregationStateManager.MINIMUM_INITIAL_HASH_SIZE; + return MINIMUM_INITIAL_HASH_SIZE; } public double getTargetLoadFactor() { - return IncrementalChunkedOperatorAggregationStateManager.DEFAULT_TARGET_LOAD_FACTOR; + return DEFAULT_TARGET_LOAD_FACTOR; } public double getMaximumLoadFactor() { - return IncrementalChunkedOperatorAggregationStateManager.DEFAULT_MAX_LOAD_FACTOR; + return DEFAULT_MAX_LOAD_FACTOR; } public boolean considerGrouping(@NotNull final Table inputTable, @NotNull final ColumnSource[] sources) { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/AggregationProcessor.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/AggregationProcessor.java index 8494dd4a0bc..39427a84a73 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/AggregationProcessor.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/AggregationProcessor.java @@ -273,17 +273,19 @@ public String toString() { // ----------------------------------------------------------------------------------------------------------------- @Override - public AggregationContext makeAggregationContext(@NotNull final Table table, + public AggregationContext makeAggregationContext( + @NotNull final Table table, + final boolean requireStateChangeRecorder, @NotNull final String... groupByColumnNames) { switch (type) { case NORMAL: - return new NormalConverter(table, groupByColumnNames).build(); + return new NormalConverter(table, requireStateChangeRecorder, groupByColumnNames).build(); case ROLLUP_BASE: - return new RollupBaseConverter(table, groupByColumnNames).build(); + return new RollupBaseConverter(table, requireStateChangeRecorder, groupByColumnNames).build(); case ROLLUP_REAGGREGATED: - return new RollupReaggregatedConverter(table, groupByColumnNames).build(); + return new RollupReaggregatedConverter(table, requireStateChangeRecorder, groupByColumnNames).build(); case SELECT_DISTINCT: - return makeEmptyAggregationContext(); + return makeEmptyAggregationContext(requireStateChangeRecorder); default: throw new UnsupportedOperationException("Unsupported type " + type); } @@ -300,6 +302,7 @@ public AggregationContext makeAggregationContext(@NotNull final Table table, private abstract class Converter implements Aggregation.Visitor, AggSpec.Visitor { final QueryTable table; + private final boolean requireStateChangeRecorder; final String[] groupByColumnNames; final boolean isAddOnly; @@ -315,8 +318,12 @@ private abstract class Converter implements Aggregation.Visitor, AggSpec.Visitor int trackedFirstOrLastIndex = -1; boolean partitionFound; - private Converter(@NotNull final Table table, @NotNull final String... groupByColumnNames) { + private Converter( + @NotNull final Table table, + final boolean requireStateChangeRecorder, + @NotNull final String... groupByColumnNames) { this.table = (QueryTable) table.coalesce(); + this.requireStateChangeRecorder = requireStateChangeRecorder; this.groupByColumnNames = groupByColumnNames; isAddOnly = this.table.isAddOnly(); isStream = this.table.isStream(); @@ -335,6 +342,9 @@ final void walkAllAggregations() { @NotNull final AggregationContext makeAggregationContext() { + if (requireStateChangeRecorder && operators.stream().noneMatch(op -> op instanceof StateChangeRecorder)) { + addNoInputOperator(new CountAggregationOperator(null)); + } // noinspection unchecked return new AggregationContext( operators.toArray(IterativeChunkedAggregationOperator[]::new), @@ -642,8 +652,11 @@ final void addWeightedAvgOrSumOperator(@NotNull final String weightName, final b */ private final class NormalConverter extends Converter { - private NormalConverter(@NotNull final Table table, @NotNull final String... groupByColumnNames) { - super(table, groupByColumnNames); + private NormalConverter( + @NotNull final Table table, + final boolean requireStateChangeRecorder, + @NotNull final String... groupByColumnNames) { + super(table, requireStateChangeRecorder, groupByColumnNames); } // ------------------------------------------------------------------------------------------------------------- @@ -896,8 +909,11 @@ private final class RollupBaseConverter extends Converter private int nextColumnIdentifier = 0; - private RollupBaseConverter(@NotNull final Table table, @NotNull final String... groupByColumnNames) { - super(table, groupByColumnNames); + private RollupBaseConverter( + @NotNull final Table table, + final boolean requireStateChangeRecorder, + @NotNull final String... groupByColumnNames) { + super(table, requireStateChangeRecorder, groupByColumnNames); } @Override @@ -1034,8 +1050,11 @@ private final class RollupReaggregatedConverter extends Converter private int nextColumnIdentifier = 0; - private RollupReaggregatedConverter(@NotNull final Table table, @NotNull final String... groupByColumnNames) { - super(table, groupByColumnNames); + private RollupReaggregatedConverter( + @NotNull final Table table, + final boolean requireStateChangeRecorder, + @NotNull final String... groupByColumnNames) { + super(table, requireStateChangeRecorder, groupByColumnNames); } // ------------------------------------------------------------------------------------------------------------- @@ -1327,7 +1346,14 @@ private OP_TYPE getAndAddB // Basic Helpers // ----------------------------------------------------------------------------------------------------------------- - private static AggregationContext makeEmptyAggregationContext() { + private static AggregationContext makeEmptyAggregationContext(final boolean requireStateChangeRecorder) { + if (requireStateChangeRecorder) { + // noinspection unchecked + return new AggregationContext( + new IterativeChunkedAggregationOperator[] {new CountAggregationOperator(null)}, + new String[][] {ZERO_LENGTH_STRING_ARRAY}, + new ChunkSource.WithPrev[] {null}); + } // noinspection unchecked return new AggregationContext( ZERO_LENGTH_ITERATIVE_CHUNKED_AGGREGATION_OPERATOR_ARRAY, diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/BaseAddOnlyFirstOrLastChunkedOperator.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/BaseAddOnlyFirstOrLastChunkedOperator.java index f6fc5fcac4f..b1c88357e58 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/BaseAddOnlyFirstOrLastChunkedOperator.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/BaseAddOnlyFirstOrLastChunkedOperator.java @@ -18,7 +18,10 @@ import java.util.LinkedHashMap; import java.util.Map; -abstract class BaseAddOnlyFirstOrLastChunkedOperator implements IterativeChunkedAggregationOperator { +abstract class BaseAddOnlyFirstOrLastChunkedOperator + extends NoopStateChangeRecorder // We can never empty or reincarnate states since we're add-only + implements IterativeChunkedAggregationOperator { + final boolean isFirst; final LongArraySource redirections; private final LongColumnSourceWritableRowRedirection rowRedirection; diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/BaseStreamFirstOrLastChunkedOperator.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/BaseStreamFirstOrLastChunkedOperator.java index 49c3b435037..e19e0e6759d 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/BaseStreamFirstOrLastChunkedOperator.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/BaseStreamFirstOrLastChunkedOperator.java @@ -22,7 +22,9 @@ /** * Base class with shared boilerplate for {@link StreamFirstChunkedOperator} and {@link StreamLastChunkedOperator}. */ -public abstract class BaseStreamFirstOrLastChunkedOperator implements IterativeChunkedAggregationOperator { +public abstract class BaseStreamFirstOrLastChunkedOperator + extends NoopStateChangeRecorder // We can never empty or reincarnate states since we ignore removes + implements IterativeChunkedAggregationOperator { protected static final int COPY_CHUNK_SIZE = ArrayBackedColumnSource.BLOCK_SIZE; diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/BasicStateChangeRecorder.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/BasicStateChangeRecorder.java new file mode 100644 index 00000000000..4b50e7238d0 --- /dev/null +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/BasicStateChangeRecorder.java @@ -0,0 +1,48 @@ +package io.deephaven.engine.table.impl.by; + +import java.util.function.LongConsumer; + +/** + * Re-usable support for recording reincarnated and emptied states in incremental aggregation processing. + */ +class BasicStateChangeRecorder implements StateChangeRecorder { + + private LongConsumer reincarnatedDestinationCallback; + private LongConsumer emptiedDestinationCallback; + + @Override + public final void startRecording( + final LongConsumer reincarnatedDestinationCallback, + final LongConsumer emptiedDestinationCallback) { + this.reincarnatedDestinationCallback = reincarnatedDestinationCallback; + this.emptiedDestinationCallback = emptiedDestinationCallback; + } + + @Override + public final void finishRecording() { + reincarnatedDestinationCallback = null; + emptiedDestinationCallback = null; + } + + /** + * Record a reincarnated {@code destination}. + * + * @param destination The destination slot that has been reincarnated + */ + final void onReincarnated(final long destination) { + if (reincarnatedDestinationCallback != null) { + reincarnatedDestinationCallback.accept(destination); + } + } + + /** + * Record an emptied {@code destination}. + * + * @param destination The destination slot that has been emptied + */ + final void onEmptied(final long destination) { + if (emptiedDestinationCallback != null) { + emptiedDestinationCallback.accept(destination); + } + } +} diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/ChunkedOperatorAggregationHelper.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/ChunkedOperatorAggregationHelper.java index 63b16cc1984..97b4826d52e 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/ChunkedOperatorAggregationHelper.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/ChunkedOperatorAggregationHelper.java @@ -22,6 +22,7 @@ import io.deephaven.engine.table.impl.indexer.RowSetIndexer; import io.deephaven.engine.rowset.chunkattributes.OrderedRowKeys; import io.deephaven.engine.rowset.chunkattributes.RowKeys; +import io.deephaven.engine.table.impl.remote.ConstructSnapshot; import io.deephaven.time.DateTime; import io.deephaven.time.DateTimeUtils; import io.deephaven.util.BooleanUtils; @@ -47,6 +48,8 @@ import java.util.*; import java.util.function.Consumer; +import java.util.function.LongFunction; +import java.util.function.Supplier; import java.util.function.UnaryOperator; @SuppressWarnings("rawtypes") @@ -58,65 +61,101 @@ public class ChunkedOperatorAggregationHelper { Configuration.getInstance().getBooleanWithDefault("ChunkedOperatorAggregationHelper.skipRunFind", false); static final boolean HASHED_RUN_FIND = Configuration.getInstance().getBooleanWithDefault("ChunkedOperatorAggregationHelper.hashedRunFind", true); - static boolean USE_TYPED_STATE_MANAGER = - Configuration.getInstance().getBooleanWithDefault("ChunkedOperatorAggregationHelper.useTypedStateManager", - false); static boolean USE_OPEN_ADDRESSED_STATE_MANAGER = Configuration.getInstance().getBooleanWithDefault( "ChunkedOperatorAggregationHelper.useOpenAddressedStateManager", true); - static boolean USE_BITMAP_MODIFIED_STATES_BUILDER = - Configuration.getInstance().getBooleanWithDefault( - "ChunkedOperatorAggregationHelper.useBitmapModifiedStatesBuilder", - true); - public static QueryTable aggregation(AggregationContextFactory aggregationContextFactory, QueryTable queryTable, - Collection groupByColumns) { - return aggregation(AggregationControl.DEFAULT_FOR_OPERATOR, aggregationContextFactory, queryTable, - groupByColumns); + public static QueryTable aggregation( + @NotNull final AggregationContextFactory aggregationContextFactory, + @NotNull final QueryTable input, + final boolean preserveEmpty, + @Nullable final Table initialKeys, + @NotNull final Collection groupByColumns) { + return aggregation(AggregationControl.DEFAULT_FOR_OPERATOR, + aggregationContextFactory, input, preserveEmpty, initialKeys, groupByColumns); } @VisibleForTesting - public static QueryTable aggregation(AggregationControl control, - AggregationContextFactory aggregationContextFactory, QueryTable queryTable, - Collection groupByColumns) { + public static QueryTable aggregation( + @NotNull final AggregationControl control, + @NotNull final AggregationContextFactory aggregationContextFactory, + @NotNull final QueryTable input, + final boolean preserveEmpty, + @Nullable final Table initialKeys, + @NotNull final Collection groupByColumns) { + final String[] keyNames = groupByColumns.stream().map(ColumnName::name).toArray(String[]::new); + if (!input.hasColumns(keyNames)) { + throw new IllegalArgumentException("aggregation: not all group-by columns " + Arrays.toString(keyNames) + + " are present in input table with columns " + + Arrays.toString(input.getDefinition().getColumnNamesArray())); + } + if (initialKeys != null) { + if (keyNames.length == 0) { + throw new IllegalArgumentException( + "aggregation: initial groups must not be specified if no group-by columns are specified"); + } + if (!initialKeys.hasColumns(keyNames)) { + throw new IllegalArgumentException("aggregation: not all group-by columns " + Arrays.toString(keyNames) + + " are present in initial groups table with columns " + + Arrays.toString(initialKeys.getDefinition().getColumnNamesArray())); + } + for (final String keyName : keyNames) { + final ColumnDefinition inputDef = input.getDefinition().getColumn(keyName); + final ColumnDefinition initialKeysDef = initialKeys.getDefinition().getColumn(keyName); + if (!inputDef.isCompatible(initialKeysDef)) { + throw new IllegalArgumentException( + "aggregation: column definition mismatch between input table and initial groups table for " + + keyName + " input has " + inputDef.describeForCompatibility() + + ", initial groups has " + initialKeysDef.describeForCompatibility()); + } + } + } final Mutable resultHolder = new MutableObject<>(); - final SwapListener swapListener = queryTable.createSwapListenerIfRefreshing(SwapListener::new); + final SwapListener swapListener = input.createSwapListenerIfRefreshing(SwapListener::new); BaseTable.initializeWithSnapshot( "by(" + aggregationContextFactory + ", " + groupByColumns + ")", swapListener, (usePrev, beforeClockValue) -> { - resultHolder.setValue(aggregation(control, swapListener, aggregationContextFactory, queryTable, - groupByColumns, usePrev)); + resultHolder.setValue(aggregation(control, swapListener, aggregationContextFactory, + input, preserveEmpty, initialKeys, keyNames, usePrev)); return true; }); return resultHolder.getValue(); } - private static QueryTable aggregation(AggregationControl control, SwapListener swapListener, - AggregationContextFactory aggregationContextFactory, QueryTable withView, - Collection groupByColumns, - boolean usePrev) { - if (groupByColumns.isEmpty()) { - return noKeyAggregation(swapListener, aggregationContextFactory, withView, usePrev); + private static QueryTable aggregation( + @NotNull final AggregationControl control, + @Nullable final SwapListener swapListener, + @NotNull final AggregationContextFactory aggregationContextFactory, + @NotNull final QueryTable input, + final boolean preserveEmpty, + @Nullable final Table initialKeys, + @NotNull final String[] keyNames, + final boolean usePrev) { + if (keyNames.length == 0) { + // This should be checked before this method is called, but let's verify here in case an additional + // entry point is added incautiously. + Assert.eqNull(initialKeys, "initialKeys"); + return noKeyAggregation(swapListener, aggregationContextFactory, input, preserveEmpty, usePrev); } - final String[] keyNames = groupByColumns.stream().map(ColumnName::name).toArray(String[]::new); final ColumnSource[] keySources = - Arrays.stream(keyNames).map(withView::getColumnSource).toArray(ColumnSource[]::new); + Arrays.stream(keyNames).map(input::getColumnSource).toArray(ColumnSource[]::new); final ColumnSource[] reinterpretedKeySources = Arrays.stream(keySources) .map(ReinterpretUtils::maybeConvertToPrimitive).toArray(ColumnSource[]::new); - final AggregationContext ac = aggregationContextFactory.makeAggregationContext(withView, keyNames); + final AggregationContext ac = aggregationContextFactory.makeAggregationContext( + input, input.isRefreshing() && !preserveEmpty, keyNames); final PermuteKernel[] permuteKernels = ac.makePermuteKernels(); final boolean useGrouping; - if (control.considerGrouping(withView, keySources)) { + if (control.considerGrouping(input, keySources)) { Assert.eq(keySources.length, "keySources.length", 1); - final boolean hasGrouping = RowSetIndexer.of(withView.getRowSet()).hasGrouping(keySources[0]); - if (!withView.isRefreshing() && hasGrouping) { - return staticGroupedAggregation(withView, keyNames[0], keySources[0], ac); + final boolean hasGrouping = RowSetIndexer.of(input.getRowSet()).hasGrouping(keySources[0]); + if (!input.isRefreshing() && hasGrouping && initialKeys == null) { + return staticGroupedAggregation(input, keyNames[0], keySources[0], ac); } // we have no hasPrevGrouping method useGrouping = !usePrev && hasGrouping && Arrays.equals(reinterpretedKeySources, keySources); @@ -124,54 +163,25 @@ private static QueryTable aggregation(AggregationControl control, SwapListener s useGrouping = false; } + final MutableInt outputPosition = new MutableInt(); + final Supplier stateManagerSupplier = + () -> makeStateManager(control, input, keySources, reinterpretedKeySources, ac); final OperatorAggregationStateManager stateManager; - final IncrementalOperatorAggregationStateManager incrementalStateManager; - if (withView.isRefreshing()) { - if (USE_OPEN_ADDRESSED_STATE_MANAGER) { - stateManager = incrementalStateManager = TypedHasherFactory.make( - IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase.class, - reinterpretedKeySources, - keySources, control.initialHashTableSize(withView), control.getMaximumLoadFactor(), - control.getTargetLoadFactor()); - } else if (USE_TYPED_STATE_MANAGER) { - stateManager = incrementalStateManager = TypedHasherFactory.make( - IncrementalChunkedOperatorAggregationStateManagerTypedBase.class, reinterpretedKeySources, - keySources, control.initialHashTableSize(withView), control.getMaximumLoadFactor(), - control.getTargetLoadFactor()); - } else { - stateManager = incrementalStateManager = new IncrementalChunkedOperatorAggregationStateManager( - reinterpretedKeySources, control.initialHashTableSize(withView), control.getMaximumLoadFactor(), - control.getTargetLoadFactor()); - } + if (initialKeys == null) { + stateManager = stateManagerSupplier.get(); } else { - if (USE_OPEN_ADDRESSED_STATE_MANAGER) { - stateManager = TypedHasherFactory.make( - StaticChunkedOperatorAggregationStateManagerOpenAddressedBase.class, reinterpretedKeySources, - keySources, control.initialHashTableSize(withView), control.getMaximumLoadFactor(), - control.getTargetLoadFactor()); - } else if (USE_TYPED_STATE_MANAGER) { - stateManager = TypedHasherFactory.make( - StaticChunkedOperatorAggregationStateManagerTypedBase.class, reinterpretedKeySources, - keySources, control.initialHashTableSize(withView), control.getMaximumLoadFactor(), - control.getTargetLoadFactor()); - } else { - stateManager = new StaticChunkedOperatorAggregationStateManager(reinterpretedKeySources, - control.initialHashTableSize(withView), control.getMaximumLoadFactor(), - control.getTargetLoadFactor()); - } - incrementalStateManager = null; + stateManager = initialKeyTableAddition(control, initialKeys, keyNames, ac, outputPosition, + stateManagerSupplier); } - setReverseLookupFunction(keySources, ac, stateManager); - - final MutableInt outputPosition = new MutableInt(); + final RowSetBuilderRandom initialRowsBuilder = + initialKeys != null && !preserveEmpty ? new BitmapRandomBuilder(stateManager.maxTableSize() - 1) : null; if (useGrouping) { - // This must be incremental, otherwise we would have done this earlier - initialGroupedKeyAddition(withView, reinterpretedKeySources, ac, incrementalStateManager, outputPosition, - usePrev); + initialGroupedKeyAddition(input, reinterpretedKeySources, ac, stateManager, outputPosition, + initialRowsBuilder, usePrev); } else { - initialBucketedKeyAddition(withView, reinterpretedKeySources, ac, permuteKernels, stateManager, - outputPosition, usePrev); + initialBucketedKeyAddition(input, reinterpretedKeySources, ac, permuteKernels, stateManager, + outputPosition, initialRowsBuilder, usePrev); } // Construct and return result table @@ -181,7 +191,7 @@ private static QueryTable aggregation(AggregationControl control, SwapListener s // Gather the result key columns final ColumnSource[] keyColumnsRaw = new ColumnSource[keyHashTableSources.length]; final ArrayBackedColumnSource[] keyColumnsCopied = - withView.isRefreshing() ? new ArrayBackedColumnSource[keyHashTableSources.length] : null; + input.isRefreshing() ? new ArrayBackedColumnSource[keyHashTableSources.length] : null; for (int kci = 0; kci < keyHashTableSources.length; ++kci) { ColumnSource resultKeyColumnSource = keyHashTableSources[kci]; if (keySources[kci] != reinterpretedKeySources[kci]) { @@ -189,7 +199,7 @@ private static QueryTable aggregation(AggregationControl control, SwapListener s ReinterpretUtils.convertToOriginal(keySources[kci].getType(), resultKeyColumnSource); } keyColumnsRaw[kci] = resultKeyColumnSource; - if (withView.isRefreshing()) { + if (input.isRefreshing()) { // noinspection ConstantConditions,unchecked keyColumnsCopied[kci] = ArrayBackedColumnSource.getMemoryColumnSource(outputPosition.intValue(), keyColumnsRaw[kci].getType()); @@ -200,9 +210,10 @@ private static QueryTable aggregation(AggregationControl control, SwapListener s } ac.getResultColumns(resultColumnSourceMap); - final TrackingWritableRowSet resultRowSet = - RowSetFactory.flat(outputPosition.intValue()).toTracking(); - if (withView.isRefreshing()) { + final TrackingWritableRowSet resultRowSet = (initialRowsBuilder == null + ? RowSetFactory.flat(outputPosition.intValue()) + : initialRowsBuilder.build()).toTracking(); + if (input.isRefreshing()) { copyKeyColumns(keyColumnsRaw, keyColumnsCopied, resultRowSet); } @@ -210,24 +221,29 @@ private static QueryTable aggregation(AggregationControl control, SwapListener s final QueryTable result = new QueryTable(resultRowSet, resultColumnSourceMap); ac.propagateInitialStateToOperators(result); - if (withView.isRefreshing()) { + if (input.isRefreshing()) { assert keyColumnsCopied != null; ac.startTrackingPrevValues(); + final IncrementalOperatorAggregationStateManager incrementalStateManager = + (IncrementalOperatorAggregationStateManager) stateManager; incrementalStateManager.startTrackingPrevValues(); - final boolean isStream = withView.isStream(); + final boolean isStream = input.isStream(); final TableUpdateListener listener = - new BaseTable.ListenerImpl("by(" + aggregationContextFactory + ")", withView, result) { + new BaseTable.ListenerImpl("by(" + aggregationContextFactory + ")", input, result) { @ReferentialIntegrity final SwapListener swapListenerHardReference = swapListener; - final ModifiedColumnSet keysUpstreamModifiedColumnSet = withView.newModifiedColumnSet(keyNames); + final ModifiedColumnSet keysUpstreamModifiedColumnSet = input.newModifiedColumnSet(keyNames); final ModifiedColumnSet[] operatorInputModifiedColumnSets = - ac.getInputModifiedColumnSets(withView); + ac.getInputModifiedColumnSets(input); final UnaryOperator[] resultModifiedColumnSetFactories = ac.initializeRefreshing(result, this); + final StateChangeRecorder stateChangeRecorder = + preserveEmpty ? null : ac.getStateChangeRecorder(); + @Override public void onUpdate(@NotNull final TableUpdate upstream) { incrementalStateManager.beginUpdateCycle(); @@ -239,9 +255,9 @@ public void onUpdate(@NotNull final TableUpdate upstream) { final TableUpdate downstream; try (final KeyedUpdateContext kuc = new KeyedUpdateContext(ac, incrementalStateManager, reinterpretedKeySources, permuteKernels, keysUpstreamModifiedColumnSet, - operatorInputModifiedColumnSets, - upstreamToUse, outputPosition)) { - downstream = kuc.computeDownstreamIndicesAndCopyKeys(withView.getRowSet(), + operatorInputModifiedColumnSets, stateChangeRecorder, upstreamToUse, + outputPosition)) { + downstream = kuc.computeDownstreamIndicesAndCopyKeys(input.getRowSet(), keyColumnsRaw, keyColumnsCopied, result.getModifiedColumnSetForUpdates(), resultModifiedColumnSetFactories); @@ -276,6 +292,41 @@ public void onFailureInternal(@NotNull final Throwable originalException, Entry return ac.transformResult(result); } + private static OperatorAggregationStateManager makeStateManager( + @NotNull final AggregationControl control, @NotNull final QueryTable input, + @NotNull final ColumnSource[] keySources, @NotNull final ColumnSource[] reinterpretedKeySources, + @NotNull final AggregationContext ac) { + final OperatorAggregationStateManager stateManager; + if (input.isRefreshing()) { + if (USE_OPEN_ADDRESSED_STATE_MANAGER) { + stateManager = TypedHasherFactory.make( + IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase.class, + reinterpretedKeySources, + keySources, control.initialHashTableSize(input), control.getMaximumLoadFactor(), + control.getTargetLoadFactor()); + } else { + stateManager = TypedHasherFactory.make( + IncrementalChunkedOperatorAggregationStateManagerTypedBase.class, reinterpretedKeySources, + keySources, control.initialHashTableSize(input), control.getMaximumLoadFactor(), + control.getTargetLoadFactor()); + } + } else { + if (USE_OPEN_ADDRESSED_STATE_MANAGER) { + stateManager = TypedHasherFactory.make( + StaticChunkedOperatorAggregationStateManagerOpenAddressedBase.class, reinterpretedKeySources, + keySources, control.initialHashTableSize(input), control.getMaximumLoadFactor(), + control.getTargetLoadFactor()); + } else { + stateManager = TypedHasherFactory.make( + StaticChunkedOperatorAggregationStateManagerTypedBase.class, reinterpretedKeySources, + keySources, control.initialHashTableSize(input), control.getMaximumLoadFactor(), + control.getTargetLoadFactor()); + } + } + setReverseLookupFunction(keySources, ac, stateManager); + return stateManager; + } + private static TableUpdate adjustForStreaming(@NotNull final TableUpdate upstream) { // Streaming aggregations never have modifies or shifts from their parent: Assert.assertion(upstream.modified().isEmpty() && upstream.shifted().empty(), @@ -335,6 +386,7 @@ private static class KeyedUpdateContext implements SafeCloseable { private final IncrementalOperatorAggregationStateManager incrementalStateManager; private final ColumnSource[] reinterpretedKeySources; private final PermuteKernel[] permuteKernels; + private final StateChangeRecorder stateChangeRecorder; private final TableUpdate upstream; // Not to be mutated private final MutableInt outputPosition; @@ -344,9 +396,9 @@ private static class KeyedUpdateContext implements SafeCloseable { private final boolean processShifts; private final OperatorDivision od; + private final RowSetBuilderRandom reincarnatedStatesBuilder; private final RowSetBuilderRandom emptiedStatesBuilder; private final RowSetBuilderRandom modifiedStatesBuilder; - private final RowSetBuilderRandom reincarnatedStatesBuilder; private final boolean[] modifiedOperators; private final SafeCloseableList toClose; @@ -382,11 +434,9 @@ private static class KeyedUpdateContext implements SafeCloseable { private final SafeCloseable bc; private final int buildChunkSize; - private final WritableIntChunk reincarnatedSlots; private final SafeCloseable pc; private final int probeChunkSize; - private final WritableIntChunk emptiedSlots; private KeyedUpdateContext(@NotNull final AggregationContext ac, @NotNull final IncrementalOperatorAggregationStateManager incrementalStateManager, @@ -394,12 +444,14 @@ private KeyedUpdateContext(@NotNull final AggregationContext ac, @NotNull final PermuteKernel[] permuteKernels, @NotNull final ModifiedColumnSet keysUpstreamModifiedColumnSet, @NotNull final ModifiedColumnSet[] operatorInputUpstreamModifiedColumnSets, + @Nullable final StateChangeRecorder stateChangeRecorder, @NotNull final TableUpdate upstream, @NotNull final MutableInt outputPosition) { this.ac = ac; this.incrementalStateManager = incrementalStateManager; this.reinterpretedKeySources = reinterpretedKeySources; this.permuteKernels = permuteKernels; + this.stateChangeRecorder = stateChangeRecorder; this.upstream = upstream; this.outputPosition = outputPosition; @@ -425,13 +477,15 @@ private KeyedUpdateContext(@NotNull final AggregationContext ac, probeChunkSize = chunkSize(probeSize); final int chunkSize = Math.max(buildChunkSize, probeChunkSize); - emptiedStatesBuilder = RowSetFactory.builderRandom(); - if (USE_BITMAP_MODIFIED_STATES_BUILDER) { - modifiedStatesBuilder = new BitmapRandomBuilder(outputPosition.intValue()); + if (stateChangeRecorder != null) { + reincarnatedStatesBuilder = RowSetFactory.builderRandom(); + emptiedStatesBuilder = RowSetFactory.builderRandom(); + stateChangeRecorder.startRecording(reincarnatedStatesBuilder::addKey, emptiedStatesBuilder::addKey); } else { - modifiedStatesBuilder = RowSetFactory.builderRandom(); + reincarnatedStatesBuilder = new EmptyRandomBuilder(); + emptiedStatesBuilder = new EmptyRandomBuilder(); } - reincarnatedStatesBuilder = RowSetFactory.builderRandom(); + modifiedStatesBuilder = new BitmapRandomBuilder(outputPosition.intValue()); modifiedOperators = new boolean[ac.size()]; toClose = new SafeCloseableList(); @@ -487,21 +541,14 @@ private KeyedUpdateContext(@NotNull final AggregationContext ac, if (buildSize > 0) { bc = toClose.add( incrementalStateManager.makeAggregationStateBuildContext(reinterpretedKeySources, buildSize)); - reincarnatedSlots = toClose.add(WritableIntChunk.makeWritableChunk(buildChunkSize)); } else { bc = null; - reincarnatedSlots = null; } if (probeSize > 0) { pc = toClose.add(incrementalStateManager.makeProbeContext(reinterpretedKeySources, probeSize)); } else { pc = null; } - if (upstream.removed().isNonempty() || keysModified) { - emptiedSlots = toClose.add(WritableIntChunk.makeWritableChunk(probeChunkSize)); - } else { - emptiedSlots = null; - } } @Override @@ -607,10 +654,13 @@ private TableUpdate computeDownstreamIndicesAndCopyKeys( doInserts(upstream.added(), true); } + if (stateChangeRecorder != null) { + stateChangeRecorder.finishRecording(); + } final TableUpdateImpl downstream = new TableUpdateImpl(); downstream.shifted = RowSetShiftData.EMPTY; - try (final RowSet newStates = makeNewStatesIndex(previousLastState, outputPosition.intValue() - 1)) { + try (final RowSet newStates = makeNewStatesRowSet(previousLastState, outputPosition.intValue() - 1)) { downstream.added = reincarnatedStatesBuilder.build(); downstream.removed = emptiedStatesBuilder.build(); @@ -649,10 +699,7 @@ private void doRemoves(@NotNull final RowSequence keyIndicesToRemove) { } private void doRemovesForChunk(@NotNull final RowSequence keyIndicesToRemoveChunk) { - - incrementalStateManager.remove(pc, keyIndicesToRemoveChunk, reinterpretedKeySources, slots, emptiedSlots); - emptiedStatesBuilder.addRowKeysChunk(emptiedSlots); - + incrementalStateManager.remove(pc, keyIndicesToRemoveChunk, reinterpretedKeySources, slots); propagateRemovesToOperators(keyIndicesToRemoveChunk, slots); } @@ -722,13 +769,11 @@ private void doInserts(@NotNull final RowSequence keyIndicesToInsert, final bool private void doInsertsForChunk(@NotNull final RowSequence keyIndicesToInsertChunk, final boolean addToStateManager) { if (addToStateManager) { - incrementalStateManager.addForUpdate(bc, keyIndicesToInsertChunk, reinterpretedKeySources, - outputPosition, slots, reincarnatedSlots); - reincarnatedStatesBuilder.addRowKeysChunk(reincarnatedSlots); + incrementalStateManager.add(bc, keyIndicesToInsertChunk, reinterpretedKeySources, outputPosition, + slots); } else { incrementalStateManager.findModifications(pc, keyIndicesToInsertChunk, reinterpretedKeySources, slots); } - propagateInsertsToOperators(keyIndicesToInsertChunk, slots); } @@ -1120,12 +1165,9 @@ private ModifySplitResult splitKeyModificationsAndDoKeyChangeRemoves() { shifted ? modifiedPostShiftIterator.getNextRowSequenceWithLength(CHUNK_SIZE) : modifiedPreShiftChunk; - incrementalStateManager.remove(pc, modifiedPreShiftChunk, reinterpretedKeySources, slots, - emptiedSlots); - emptiedStatesBuilder.addRowKeysChunk(emptiedSlots); - incrementalStateManager.addForUpdate(bc, modifiedPostShiftChunk, reinterpretedKeySources, - outputPosition, postSlots, reincarnatedSlots); - reincarnatedStatesBuilder.addRowKeysChunk(reincarnatedSlots); + incrementalStateManager.remove(pc, modifiedPreShiftChunk, reinterpretedKeySources, slots); + incrementalStateManager.add(bc, modifiedPostShiftChunk, reinterpretedKeySources, outputPosition, + postSlots); final LongChunk preShiftIndices = modifiedPreShiftChunk.asRowKeyChunk(); final LongChunk postShiftIndices = @@ -1547,7 +1589,7 @@ private static QueryTable staticGroupedAggregation(QueryTable withView, String k resultColumnSourceMap.put(keyName, groupKeyIndexTable.first); ac.getResultColumns(resultColumnSourceMap); - doGroupedAddition(ac, groupKeyIndexTable, responsiveGroups); + doGroupedAddition(ac, groupKeyIndexTable.second::get, responsiveGroups, CHUNK_SIZE); final QueryTable result = new QueryTable(RowSetFactory.flat(responsiveGroups).toTracking(), resultColumnSourceMap); @@ -1559,8 +1601,11 @@ private static QueryTable staticGroupedAggregation(QueryTable withView, String k return ac.transformResult(result); } - private static void doGroupedAddition(AggregationContext ac, - Pair> groupKeyIndexTable, int responsiveGroups) { + private static void doGroupedAddition( + @NotNull final AggregationContext ac, + @NotNull final LongFunction groupIndexToRowSet, + final int responsiveGroups, + final int chunkSize) { final boolean indicesRequired = ac.requiresIndices(); final ColumnSource.GetContext[] getContexts = new ColumnSource.GetContext[ac.size()]; @@ -1570,31 +1615,28 @@ private static void doGroupedAddition(AggregationContext ac, final SafeCloseable ignored2 = new SafeCloseableArray<>(operatorContexts); final SharedContext sharedContext = SharedContext.makeSharedContext()) { ac.ensureCapacity(responsiveGroups); - // we don't know how many things are in the groups, so we have to allocate a large chunk - ac.initializeGetContexts(sharedContext, getContexts, CHUNK_SIZE); - ac.initializeSingletonContexts(operatorContexts, CHUNK_SIZE); + ac.initializeGetContexts(sharedContext, getContexts, chunkSize); + ac.initializeSingletonContexts(operatorContexts, chunkSize); final boolean unchunked = !ac.requiresInputs() && ac.unchunkedIndices(); if (unchunked) { for (int ii = 0; ii < responsiveGroups; ++ii) { - final RowSet rowSet = groupKeyIndexTable.second.get(ii); + final RowSet rowSet = groupIndexToRowSet.apply(ii); for (int oi = 0; oi < ac.size(); ++oi) { ac.operators[oi].addRowSet(operatorContexts[oi], rowSet, ii); } } } else { + // noinspection unchecked + final Chunk[] workingChunks = new Chunk[ac.size()]; for (int ii = 0; ii < responsiveGroups; ++ii) { - final RowSet rowSet = groupKeyIndexTable.second.get(ii); - // noinspection ConstantConditions + final RowSet rowSet = groupIndexToRowSet.apply(ii); try (final RowSequence.Iterator rsIt = rowSet.getRowSequenceIterator()) { - // noinspection unchecked - final Chunk[] workingChunks = new Chunk[ac.size()]; - - while (rsIt.hasMore()) { - final RowSequence chunkOk = rsIt.getNextRowSequenceWithLength(CHUNK_SIZE); - final int chunkSize = chunkOk.intSize(); + do { + final RowSequence chunkRows = rsIt.getNextRowSequenceWithLength(chunkSize); + final int chunkRowsSize = chunkRows.intSize(); final LongChunk keyIndices = - indicesRequired ? chunkOk.asRowKeyChunk() : null; + indicesRequired ? chunkRows.asRowKeyChunk() : null; sharedContext.reset(); Arrays.fill(workingChunks, null); @@ -1603,24 +1645,113 @@ private static void doGroupedAddition(AggregationContext ac, final int inputSlot = ac.inputSlot(oi); if (inputSlot == oi) { workingChunks[inputSlot] = ac.inputColumns[oi] == null ? null - : ac.inputColumns[oi].getChunk(getContexts[oi], chunkOk); + : ac.inputColumns[oi].getChunk(getContexts[oi], chunkRows); } - ac.operators[oi].addChunk(operatorContexts[oi], chunkSize, + ac.operators[oi].addChunk(operatorContexts[oi], chunkRowsSize, inputSlot < 0 ? null : workingChunks[inputSlot], keyIndices, ii); } - } + } while (rsIt.hasMore()); } } } } } - private static void initialBucketedKeyAddition(QueryTable withView, + private static OperatorAggregationStateManager initialKeyTableAddition( + @NotNull final AggregationControl control, + @NotNull final Table initialKeys, + @NotNull final String[] keyColumnNames, + @NotNull final AggregationContext ac, + @NotNull final MutableInt outputPosition, + @NotNull final Supplier stateManagerSupplier) { + // This logic is duplicative of the logic in the main aggregation function, but it's hard to consolidate + // further. A better strategy might be to do a selectDistinct first, but that would result in more hash table + // inserts. + final ColumnSource[] keySources = Arrays.stream(keyColumnNames) + .map(initialKeys::getColumnSource) + .toArray(ColumnSource[]::new); + final ColumnSource[] reinterpretedKeySources = Arrays.stream(keyColumnNames) + .map(initialKeys::getColumnSource) + .map(ReinterpretUtils::maybeConvertToPrimitive) + .toArray(ColumnSource[]::new); + final boolean useGroupingAllowed = control.considerGrouping(initialKeys, keySources) + && keySources.length == 1 + && reinterpretedKeySources[0] == keySources[0]; + + final OperatorAggregationStateManager stateManager; + if (initialKeys.isRefreshing()) { + final MutableObject stateManagerHolder = new MutableObject<>(); + ConstructSnapshot.callDataSnapshotFunction( + "InitialKeyTableSnapshot-" + System.identityHashCode(initialKeys) + ": ", + ConstructSnapshot.makeSnapshotControl(false, true, (NotificationStepSource) initialKeys), + (final boolean usePrev, final long beforeClockValue) -> { + stateManagerHolder.setValue(makeInitializedStateManager(initialKeys, reinterpretedKeySources, + ac, outputPosition, stateManagerSupplier, useGroupingAllowed, usePrev)); + return true; + }); + stateManager = stateManagerHolder.getValue(); + } else { + stateManager = makeInitializedStateManager(initialKeys, reinterpretedKeySources, + ac, outputPosition, stateManagerSupplier, useGroupingAllowed, false); + } + try (final RowSet empty = RowSetFactory.empty()) { + doGroupedAddition(ac, gi -> empty, outputPosition.intValue(), 0); + } + return stateManager; + } + + private static OperatorAggregationStateManager makeInitializedStateManager( + @NotNull final Table initialKeys, + @NotNull ColumnSource[] reinterpretedKeySources, + @NotNull final AggregationContext ac, + @NotNull final MutableInt outputPosition, + @NotNull final Supplier stateManagerSupplier, + final boolean useGroupingAllowed, + final boolean usePrev) { + outputPosition.setValue(0); + final OperatorAggregationStateManager stateManager = stateManagerSupplier.get(); + + final ColumnSource[] keyColumnsToInsert; + final boolean closeRowsToInsert; + final RowSequence rowsToInsert; + final RowSetIndexer groupingIndexer = useGroupingAllowed && (!initialKeys.isRefreshing() || !usePrev) + ? RowSetIndexer.of(initialKeys.getRowSet()) + : null; + if (groupingIndexer != null && groupingIndexer.hasGrouping(reinterpretedKeySources[0])) { + final ColumnSource groupedSource = reinterpretedKeySources[0]; + final Map grouping = groupingIndexer.getGrouping(groupedSource); + // noinspection unchecked + keyColumnsToInsert = new ColumnSource[] { + GroupingUtils.groupingKeysToImmutableFlatSource(groupedSource, grouping)}; + closeRowsToInsert = true; + // noinspection resource + rowsToInsert = RowSequenceFactory.forRange(0, grouping.size() - 1); + } else { + keyColumnsToInsert = reinterpretedKeySources; + closeRowsToInsert = usePrev; + rowsToInsert = usePrev ? initialKeys.getRowSet().copyPrev() : initialKeys.getRowSet(); + } + + final int chunkSize = chunkSize(rowsToInsert.size()); + try (final SafeCloseable ignored = closeRowsToInsert ? rowsToInsert : null; + final SafeCloseable bc = stateManager.makeAggregationStateBuildContext(keyColumnsToInsert, chunkSize); + final RowSequence.Iterator rowsIterator = rowsToInsert.getRowSequenceIterator(); + final WritableIntChunk outputPositions = WritableIntChunk.makeWritableChunk(chunkSize)) { + while (rowsIterator.hasMore()) { + final RowSequence chunkRows = rowsIterator.getNextRowSequenceWithLength(chunkSize); + stateManager.add(bc, chunkRows, keyColumnsToInsert, outputPosition, outputPositions); + } + } + return stateManager; + } + + private static void initialBucketedKeyAddition(QueryTable input, ColumnSource[] reinterpretedKeySources, AggregationContext ac, PermuteKernel[] permuteKernels, OperatorAggregationStateManager stateManager, MutableInt outputPosition, + RowSetBuilderRandom initialRowsBuilder, boolean usePrev) { final boolean findRuns = ac.requiresRunFinds(SKIP_RUN_FIND); @@ -1639,7 +1770,7 @@ private static void initialBucketedKeyAddition(QueryTable withView, buildSources = reinterpretedKeySources; } - final RowSet rowSet = usePrev ? withView.getRowSet().copyPrev() : withView.getRowSet(); + final RowSet rowSet = usePrev ? input.getRowSet().copyPrev() : input.getRowSet(); if (rowSet.isEmpty()) { return; @@ -1677,6 +1808,9 @@ private static void initialBucketedKeyAddition(QueryTable withView, sharedContext.reset(); stateManager.add(bc, chunkOk, buildSources, outputPosition, outputPositions); + if (initialRowsBuilder != null) { + initialRowsBuilder.addRowKeysChunk(outputPositions); + } ac.ensureCapacity(outputPosition.intValue()); @@ -1714,14 +1848,15 @@ private static void initialBucketedKeyAddition(QueryTable withView, } } - private static void initialGroupedKeyAddition(QueryTable withView, + private static void initialGroupedKeyAddition(QueryTable input, ColumnSource[] reinterpretedKeySources, AggregationContext ac, - IncrementalOperatorAggregationStateManager stateManager, + OperatorAggregationStateManager stateManager, MutableInt outputPosition, + RowSetBuilderRandom initialRowsBuilder, boolean usePrev) { final Pair> groupKeyIndexTable; - final RowSetIndexer indexer = RowSetIndexer.of(withView.getRowSet()); + final RowSetIndexer indexer = RowSetIndexer.of(input.getRowSet()); final Map grouping = usePrev ? indexer.getPrevGrouping(reinterpretedKeySources[0]) : indexer.getGrouping(reinterpretedKeySources[0]); // noinspection unchecked @@ -1746,22 +1881,18 @@ private static void initialGroupedKeyAddition(QueryTable withView, while (rsIt.hasMore()) { final RowSequence chunkOk = rsIt.getNextRowSequenceWithLength(CHUNK_SIZE); stateManager.add(bc, chunkOk, groupedFlatKeySource, outputPosition, outputPositions); + if (initialRowsBuilder != null) { + initialRowsBuilder.addRowKeysChunk(outputPositions); + } } Assert.eq(outputPosition.intValue(), "outputPosition.intValue()", responsiveGroups, "responsiveGroups"); } - for (int ii = 0; ii < responsiveGroups; ++ii) { - // noinspection ConstantConditions - final long groupSize = groupKeyIndexTable.second.get(ii).size(); - stateManager.setRowSize(ii, groupSize); - } - - doGroupedAddition(ac, groupKeyIndexTable, responsiveGroups); + doGroupedAddition(ac, groupKeyIndexTable.second::get, responsiveGroups, CHUNK_SIZE); } - private static RowSet makeNewStatesIndex(final int first, final int last) { - return first > last ? RowSetFactory.empty() - : RowSetFactory.fromRange(first, last); + private static RowSet makeNewStatesRowSet(final int first, final int last) { + return first > last ? RowSetFactory.empty() : RowSetFactory.fromRange(first, last); } private static void copyKeyColumns(ColumnSource[] keyColumnsRaw, WritableColumnSource[] keyColumnsCopied, @@ -1794,10 +1925,14 @@ private static void copyKeyColumns(ColumnSource[] keyColumnsRaw, WritableColu } } - private static QueryTable noKeyAggregation(SwapListener swapListener, - AggregationContextFactory aggregationContextFactory, QueryTable table, boolean usePrev) { + private static QueryTable noKeyAggregation( + @Nullable final SwapListener swapListener, + @NotNull final AggregationContextFactory aggregationContextFactory, + @NotNull final QueryTable table, + final boolean preserveEmpty, + final boolean usePrev) { - final AggregationContext ac = aggregationContextFactory.makeAggregationContext(table); + final AggregationContext ac = aggregationContextFactory.makeAggregationContext(table, false); final Map> resultColumnSourceMap = new LinkedHashMap<>(); ac.getResultColumns(resultColumnSourceMap); @@ -1815,7 +1950,7 @@ private static QueryTable noKeyAggregation(SwapListener swapListener, final int initialResultSize; try (final SafeCloseable ignored1 = new SafeCloseableArray<>(opContexts); final SafeCloseable ignored2 = usePrev ? rowSet : null) { - initialResultSize = rowSet.size() == 0 ? 0 : 1; + initialResultSize = preserveEmpty || rowSet.size() != 0 ? 1 : 0; ac.initializeSingletonContexts(opContexts, rowSet.size()); doNoKeyAddition(rowSet, ac, opContexts, allColumns, usePrev, allColumns); } @@ -1932,7 +2067,8 @@ private void processNoKeyUpdate(@NotNull final TableUpdate upstream) { modifiedOperators); } - final int newResultSize = (!isStream || lastSize == 0) && table.size() == 0 ? 0 : 1; + final int newResultSize = + preserveEmpty || (isStream && lastSize != 0) || table.size() != 0 ? 1 : 0; final TableUpdateImpl downstream = new TableUpdateImpl(); downstream.shifted = RowSetShiftData.EMPTY; if ((lastSize == 0 && newResultSize == 1)) { @@ -1958,7 +2094,7 @@ private void processNoKeyUpdate(@NotNull final TableUpdate upstream) { final int newStatesCreated = Math.max(statesCreated, newResultSize); try (final RowSet newStates = - makeNewStatesIndex(statesCreated, newStatesCreated - 1)) { + makeNewStatesRowSet(statesCreated, newStatesCreated - 1)) { ac.propagateChangesToOperators(downstream, newStates); } statesCreated = newStatesCreated; @@ -2237,6 +2373,30 @@ public static int chunkSize(long size) { return (int) Math.min(size, CHUNK_SIZE); } + /** + * {@link RowSetBuilderRandom} that ignores added keys and always {@link RowSetBuilderRandom#build() builds} an + * {@link RowSetFactory#empty() empty} result. + */ + private static class EmptyRandomBuilder implements RowSetBuilderRandom { + + @Override + public WritableRowSet build() { + return RowSetFactory.empty(); + } + + @Override + public void addKey(long rowKey) { + // This class expects to never process any adds. + throw new UnsupportedOperationException(); + } + + @Override + public void addRange(final long firstRowKey, final long lastRowKey) { + // This class expects to never process any adds. + throw new UnsupportedOperationException(); + } + } + /** * The output RowSet of an aggregation is fairly special. It is always from zero to the number of output rows, and * while modifying states we randomly add rows to it, potentially touching the same state many times. The normal @@ -2253,9 +2413,25 @@ public static int chunkSize(long size) { * for the builder to the maximum output position without loss of fidelity. */ private static class BitmapRandomBuilder implements RowSetBuilderRandom { + + /** + * An upper bound on {@code lastUsed}. That is, the highest bit index that may be used in {@code bitset}. + */ final int maxKey; + + /** + * The lowest set bit index in {@code bitset}. + */ int firstUsed = Integer.MAX_VALUE; + + /** + * The highest set bit index in {@code bitset}. + */ int lastUsed = -1; + + /** + * The bitset itself. + */ long[] bitset; private BitmapRandomBuilder(int maxKey) { @@ -2285,7 +2461,7 @@ public WritableRowSet build() { } @Override - public void addKey(long rowKey) { + public void addKey(final long rowKey) { if (rowKey >= maxKey) { return; } @@ -2303,7 +2479,8 @@ public void addKey(long rowKey) { } @Override - public void addRange(long firstRowKey, long lastRowKey) { + public void addRange(final long firstRowKey, final long lastRowKey) { + // This class is used only with aggregation state managers, which never call addRange. throw new UnsupportedOperationException(); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/CountAggregationOperator.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/CountAggregationOperator.java index f87968b38e8..6481d112a59 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/CountAggregationOperator.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/CountAggregationOperator.java @@ -6,24 +6,49 @@ import io.deephaven.chunk.attributes.ChunkLengths; import io.deephaven.chunk.attributes.ChunkPositions; import io.deephaven.chunk.attributes.Values; -import io.deephaven.engine.util.NullSafeAddition; +import io.deephaven.engine.rowset.RowSet; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.sources.LongArraySource; import io.deephaven.chunk.*; import io.deephaven.engine.rowset.chunkattributes.RowKeys; +import org.jetbrains.annotations.Nullable; import java.util.Collections; import java.util.Map; -class CountAggregationOperator implements IterativeChunkedAggregationOperator { +class CountAggregationOperator extends BasicStateChangeRecorder implements IterativeChunkedAggregationOperator { + private final String resultName; private final LongArraySource countColumnSource; - CountAggregationOperator(String resultName) { + /** + * Construct a count aggregation operator. + * + * @param resultName The name of the result column if this operator should expose its results, else {@code null} + */ + CountAggregationOperator(@Nullable final String resultName) { this.resultName = resultName; this.countColumnSource = new LongArraySource(); } + private boolean exposesResult() { + return resultName != null; + } + + private void recordAdd(final long destination, final long rowsAdded) { + final long oldCount = countColumnSource.getAndAddUnsafe(destination, rowsAdded); + if (oldCount == 0) { + onReincarnated(destination); + } + } + + private void recordRemove(final long destination, final long rowsRemoved) { + final long oldCount = countColumnSource.getAndAddUnsafe(destination, -rowsRemoved); + if (oldCount == rowsRemoved) { + onEmptied(destination); + } + } + @Override public void addChunk(BucketedContext context, Chunk values, LongChunk inputRowKeys, IntChunk destinations, @@ -32,10 +57,12 @@ public void addChunk(BucketedContext context, Chunk values, for (int ii = 0; ii < startPositions.size(); ++ii) { final int startPosition = startPositions.get(ii); final long destination = destinations.get(startPosition); - final long newCount = length.get(ii); - countColumnSource.getAndAddUnsafe(destination, newCount); + final long rowsAdded = length.get(ii); + recordAdd(destination, rowsAdded); + } + if (exposesResult()) { + stateModified.fillWithValue(0, startPositions.size(), true); } - stateModified.fillWithValue(0, startPositions.size(), true); } @Override @@ -46,23 +73,39 @@ public void removeChunk(BucketedContext context, Chunk values, for (int ii = 0; ii < startPositions.size(); ++ii) { final int startPosition = startPositions.get(ii); final long destination = destinations.get(startPosition); - final long newCount = length.get(ii); - countColumnSource.getAndAddUnsafe(destination, -newCount); + final long rowsRemoved = length.get(ii); + recordRemove(destination, rowsRemoved); + } + if (exposesResult()) { + stateModified.fillWithValue(0, startPositions.size(), true); } - stateModified.fillWithValue(0, startPositions.size(), true); } @Override public boolean addChunk(SingletonContext context, int chunkSize, Chunk values, LongChunk inputRowKeys, long destination) { - countColumnSource.getAndAddUnsafe(destination, chunkSize); + recordAdd(destination, chunkSize); return true; } @Override public boolean removeChunk(SingletonContext context, int chunkSize, Chunk values, LongChunk inputRowKeys, long destination) { - countColumnSource.getAndAddUnsafe(destination, -chunkSize); + recordRemove(destination, chunkSize); + return true; + } + + @Override + public boolean unchunkedRowSet() { + // Optimize initial grouped addition by accepting un-chunked row sets in lieu of iterative calls to + // addChunk with null values and null inputRowKeys. + // NB: Count is unusual in allowing this while returning false for requiresRowKeys(). + return true; + } + + @Override + public boolean addRowSet(SingletonContext context, RowSet rowSet, long destination) { + recordAdd(destination, rowSet.size()); return true; } @@ -71,13 +114,15 @@ public void modifyChunk(BucketedContext context, Chunk previou Chunk newValues, LongChunk postShiftRowKeys, IntChunk destinations, IntChunk startPositions, IntChunk length, WritableBooleanChunk stateModified) { - stateModified.fillWithValue(0, startPositions.size(), false); + // We have no inputs, so we should never get here. + throw new IllegalStateException(); } @Override public boolean modifyChunk(SingletonContext context, int chunkSize, Chunk previousValues, Chunk newValues, LongChunk postShiftRowKeys, long destination) { - return false; + // We have no inputs, so we should never get here. + throw new IllegalStateException(); } @Override @@ -87,11 +132,13 @@ public void ensureCapacity(long tableSize) { @Override public Map> getResultColumns() { - return Collections.singletonMap(resultName, countColumnSource); + return exposesResult() ? Collections.singletonMap(resultName, countColumnSource) : Collections.emptyMap(); } @Override public void startTrackingPrevValues() { - countColumnSource.startTrackingPrevValues(); + if (exposesResult()) { + countColumnSource.startTrackingPrevValues(); + } } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/FirstOrLastChunkedOperator.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/FirstOrLastChunkedOperator.java index d697820c0df..2769566928d 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/FirstOrLastChunkedOperator.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/FirstOrLastChunkedOperator.java @@ -24,7 +24,10 @@ import java.util.LinkedHashMap; import java.util.Map; -public class FirstOrLastChunkedOperator implements IterativeChunkedAggregationOperator { +public class FirstOrLastChunkedOperator + extends BasicStateChangeRecorder + implements IterativeChunkedAggregationOperator { + private final boolean isFirst; private final LongArraySource redirections; private final ObjectArraySource rowSets; @@ -202,7 +205,12 @@ private boolean hasRedirection(LongChunk indices, long redire private boolean addChunk(LongChunk indices, int start, int length, long destination) { final WritableRowSet rowSet = rowSetForSlot(destination); + + final boolean wasEmpty = rowSet.isEmpty(); rowSet.insert(indices, start, length); + if (wasEmpty && rowSet.isNonempty()) { + onReincarnated(destination); + } return updateRedirections(destination, rowSet); } @@ -214,7 +222,11 @@ public boolean addRowSet(SingletonContext context, RowSet addRowSet, long destin } final WritableRowSet rowSet = rowSetForSlot(destination); + final boolean wasEmpty = rowSet.isEmpty(); rowSet.insert(addRowSet); + if (wasEmpty && rowSet.isNonempty()) { + onReincarnated(destination); + } return updateRedirections(destination, rowSet); } @@ -229,7 +241,12 @@ private WritableRowSet rowSetForSlot(long destination) { private boolean removeChunk(LongChunk indices, int start, int length, long destination) { final WritableRowSet rowSet = rowSetForSlot(destination); + + final boolean wasNonEmpty = rowSet.isNonempty(); rowSet.remove(indices, start, length); + if (wasNonEmpty && rowSet.isEmpty()) { + onEmptied(destination); + } return updateRedirections(destination, rowSet); } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/FormulaChunkedOperator.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/FormulaChunkedOperator.java index 915e498fc78..edf352154c2 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/FormulaChunkedOperator.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/FormulaChunkedOperator.java @@ -28,6 +28,7 @@ import java.util.Collections; import java.util.LinkedHashMap; import java.util.Map; +import java.util.function.LongConsumer; import java.util.function.UnaryOperator; import static io.deephaven.engine.table.impl.sources.ArrayBackedColumnSource.BLOCK_SIZE; @@ -35,7 +36,7 @@ /** * An {@link IterativeChunkedAggregationOperator} used in the implementation of {@link Table#applyToAllBy}. */ -class FormulaChunkedOperator implements IterativeChunkedAggregationOperator { +class FormulaChunkedOperator implements StateChangeRecorder, IterativeChunkedAggregationOperator { private final GroupByChunkedOperator groupBy; private final boolean delegateToBy; @@ -100,6 +101,16 @@ class FormulaChunkedOperator implements IterativeChunkedAggregationOperator { } } + @Override + public void startRecording(LongConsumer reincarnatedDestinationCallback, LongConsumer emptiedDestinationCallback) { + groupBy.startRecording(reincarnatedDestinationCallback, emptiedDestinationCallback); + } + + @Override + public void finishRecording() { + groupBy.finishRecording(); + } + @Override public void addChunk(final BucketedContext bucketedContext, final Chunk values, @NotNull final LongChunk inputRowKeys, @@ -336,10 +347,7 @@ public void propagateUpdates(@NotNull final TableUpdate downstream, modifiesToProcess ? makeModifiedColumnsMask(resultModifiedColumnSet) : null; final boolean[] columnsToFillMask = addsToProcess ? makeAllColumnsMask() : removesToProcess ? makeObjectOrModifiedColumnsMask(resultModifiedColumnSet) : modifiedColumnsMask; - final boolean[] columnsToGetMask = addsToProcess ? columnsToFillMask /* - * This is the result of - * makeAllColumnsMask() on the line above - */ : modifiedColumnsMask; + final boolean[] columnsToGetMask = addsToProcess ? columnsToFillMask : modifiedColumnsMask; try (final DataCopyContext dataCopyContext = new DataCopyContext(columnsToFillMask, columnsToGetMask)) { if (removesToProcess) { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/GroupByChunkedOperator.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/GroupByChunkedOperator.java index 5ad50c23703..eed663374c5 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/GroupByChunkedOperator.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/GroupByChunkedOperator.java @@ -33,7 +33,9 @@ * An {@link IterativeChunkedAggregationOperator} used in the implementation of {@link Table#groupBy}, * {@link io.deephaven.api.agg.spec.AggSpecGroup}, and {@link io.deephaven.api.agg.Aggregation#AggGroup(String...)}. */ -public final class GroupByChunkedOperator implements IterativeChunkedAggregationOperator { +public final class GroupByChunkedOperator + extends BasicStateChangeRecorder + implements IterativeChunkedAggregationOperator { private final QueryTable inputTable; private final boolean registeredWithHelper; @@ -213,17 +215,30 @@ public boolean modifyRowKeys(final SingletonContext context, @NotNull final Long private void addChunk(@NotNull final LongChunk indices, final int start, final int length, final long destination) { final WritableRowSet rowSet = rowSetForSlot(destination); + final boolean wasEmpty = rowSet.isEmpty(); rowSet.insert(indices, start, length); + if (wasEmpty && rowSet.isNonempty()) { + onReincarnated(destination); + } } private void addRowsToSlot(@NotNull final RowSet addRowSet, final long destination) { - rowSetForSlot(destination).insert(addRowSet); + final WritableRowSet rowSet = rowSetForSlot(destination); + final boolean wasEmpty = rowSet.isEmpty(); + rowSet.insert(addRowSet); + if (wasEmpty && rowSet.isNonempty()) { + onReincarnated(destination); + } } private void removeChunk(@NotNull final LongChunk indices, final int start, final int length, final long destination) { final WritableRowSet rowSet = rowSetForSlot(destination); + final boolean wasNonEmpty = rowSet.isNonempty(); rowSet.remove(indices, start, length); + if (wasNonEmpty && rowSet.isEmpty()) { + onEmptied(destination); + } } private void doShift(@NotNull final LongChunk preShiftIndices, diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/IncrementalChunkedOperatorAggregationStateManager.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/IncrementalChunkedOperatorAggregationStateManager.java deleted file mode 100644 index 4bfd834bdd7..00000000000 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/IncrementalChunkedOperatorAggregationStateManager.java +++ /dev/null @@ -1,1791 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.engine.table.impl.by; - -import io.deephaven.base.verify.Require; -import io.deephaven.base.verify.Assert; -import io.deephaven.chunk.*; -import io.deephaven.chunk.attributes.Any; -import io.deephaven.chunk.attributes.ChunkPositions; -import io.deephaven.chunk.attributes.HashCodes; -import io.deephaven.chunk.attributes.Values; -import io.deephaven.engine.rowset.*; -import io.deephaven.engine.table.*; -import io.deephaven.engine.rowset.chunkattributes.OrderedRowKeys; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; -import io.deephaven.util.QueryConstants; -import io.deephaven.chunk.util.hashing.*; -// this is ugly to have twice, but we do need it twice for replication -// @StateChunkIdentityName@ from \QIntChunk\E -import io.deephaven.chunk.util.hashing.IntChunkEquals; -import io.deephaven.engine.table.impl.sort.permute.PermuteKernel; -import io.deephaven.engine.table.impl.sort.timsort.LongIntTimsortKernel; -import io.deephaven.engine.table.impl.sources.*; -import io.deephaven.engine.table.impl.util.*; - -// mixin rehash -import java.util.Arrays; -import io.deephaven.engine.table.impl.sort.permute.IntPermuteKernel; -// @StateChunkTypeEnum@ from \QInt\E -import io.deephaven.engine.table.impl.sort.permute.IntPermuteKernel; -import io.deephaven.engine.table.impl.util.compact.IntCompactKernel; -import io.deephaven.engine.table.impl.util.compact.LongCompactKernel; -// endmixin rehash - -import io.deephaven.util.SafeCloseableArray; -import org.jetbrains.annotations.NotNull; - -// region extra imports -import io.deephaven.engine.table.impl.HashTableAnnotations; -import io.deephaven.util.SafeCloseable; -import org.apache.commons.lang3.mutable.MutableInt; -import org.jetbrains.annotations.Nullable; - -import java.util.Objects; -// endregion extra imports - -import static io.deephaven.util.SafeCloseable.closeArray; - -// region class visibility -public -// endregion class visibility -class IncrementalChunkedOperatorAggregationStateManager - // region extensions - implements IncrementalOperatorAggregationStateManager - // endregion extensions -{ - // region constants - public static final int CHUNK_SIZE = ChunkedOperatorAggregationHelper.CHUNK_SIZE; - public static final int MINIMUM_INITIAL_HASH_SIZE = CHUNK_SIZE; - private static final long MAX_TABLE_SIZE = HashTableColumnSource.MINIMUM_OVERFLOW_HASH_SLOT; - // endregion constants - - // mixin rehash - static final double DEFAULT_MAX_LOAD_FACTOR = 0.75; - static final double DEFAULT_TARGET_LOAD_FACTOR = 0.70; - // endmixin rehash - - // region preamble variables - // endregion preamble variables - - @HashTableAnnotations.EmptyStateValue - // @NullStateValue@ from \QQueryConstants.NULL_INT\E, @StateValueType@ from \Qint\E - private static final int EMPTY_RIGHT_VALUE = QueryConstants.NULL_INT; - - // mixin getStateValue - // region overflow pivot - // endregion overflow pivot - // endmixin getStateValue - - // the number of slots in our table - // mixin rehash - private int tableSize; - // endmixin rehash - // altmixin rehash: private final int tableSize; - - // how many key columns we have - private final int keyColumnCount; - - // mixin rehash - private long numEntries = 0; - - /** Our table size must be 2^L (i.e. a power of two); and the pivot is between 2^(L-1) and 2^L. - * - *

    When hashing a value, if hashCode % 2^L < tableHashPivot; then the destination location is hashCode % 2^L. - * If hashCode % 2^L >= tableHashPivot, then the destination location is hashCode % 2^(L-1). Once the pivot reaches - * the table size, we can simply double the table size and repeat the process.

    - * - *

    This has the effect of only using hash table locations < hashTablePivot. When we want to expand the table - * we can move some of the entries from the location {@code tableHashPivot - 2^(L-1)} to tableHashPivot. This - * provides for incremental expansion of the hash table, without the need for a full rehash.

    - */ - private int tableHashPivot; - - // the table will be rehashed to a load factor of targetLoadFactor if our loadFactor exceeds maximumLoadFactor - // or if it falls below minimum load factor we will instead contract the table - private double targetLoadFactor = DEFAULT_TARGET_LOAD_FACTOR; - private double maximumLoadFactor = DEFAULT_MAX_LOAD_FACTOR; - // TODO: We do not yet support contraction - // private final double minimumLoadFactor = 0.5; - - private final IntegerArraySource freeOverflowLocations = new IntegerArraySource(); - private int freeOverflowCount = 0; - // endmixin rehash - - // the keys for our hash entries - private final ArrayBackedColumnSource[] keySources; - // the location of any overflow entry in this bucket - private final IntegerArraySource overflowLocationSource = new IntegerArraySource(); - - // we are going to also reuse this for our state entry, so that we do not need additional storage - @HashTableAnnotations.StateColumnSource - // @StateColumnSourceType@ from \QIntegerArraySource\E - private final IntegerArraySource stateSource - // @StateColumnSourceConstructor@ from \QIntegerArraySource()\E - = new IntegerArraySource(); - - // the keys for overflow - private int nextOverflowLocation = 0; - private final ArrayBackedColumnSource [] overflowKeySources; - // the location of the next key in an overflow bucket - private final IntegerArraySource overflowOverflowLocationSource = new IntegerArraySource(); - // the overflow buckets for the state source - @HashTableAnnotations.OverflowStateColumnSource - // @StateColumnSourceType@ from \QIntegerArraySource\E - private final IntegerArraySource overflowStateSource - // @StateColumnSourceConstructor@ from \QIntegerArraySource()\E - = new IntegerArraySource(); - - // the type of each of our key chunks - private final ChunkType[] keyChunkTypes; - - // the operators for hashing and various equality methods - private final ChunkHasher[] chunkHashers; - private final ChunkEquals[] chunkEquals; - private final PermuteKernel[] chunkCopiers; - - // mixin rehash - // If we have objects in our key columns, then we should null them out if we delete an overflow row, this only - // applies to ObjectArraySources, for primitives we are content to leave the dead entries in the tables, because - // they will not affect GC. - private final ObjectArraySource[] overflowKeyColumnsToNull; - // endmixin rehash - - // region extra variables - // in position space - private final LongArraySource rowCountSource = new LongArraySource(); - - private final IntegerArraySource outputPositionToHashSlot = new IntegerArraySource(); - private final WritableRowRedirection resultIndexToHashSlot = new IntColumnSourceWritableRowRedirection(outputPositionToHashSlot); - // endregion extra variables - - // region constructor visibility - // endregion constructor visibility - IncrementalChunkedOperatorAggregationStateManager(ColumnSource[] tableKeySources - , int tableSize - // region constructor arguments - , double maximumLoadFactor - , double targetLoadFactor - // endregion constructor arguments - ) { - // region super - // endregion super - keyColumnCount = tableKeySources.length; - - this.tableSize = tableSize; - Require.leq(tableSize, "tableSize", MAX_TABLE_SIZE); - Require.gtZero(tableSize, "tableSize"); - Require.eq(Integer.bitCount(tableSize), "Integer.bitCount(tableSize)", 1); - // mixin rehash - this.tableHashPivot = tableSize; - // endmixin rehash - - overflowKeySources = new ArrayBackedColumnSource[keyColumnCount]; - keySources = new ArrayBackedColumnSource[keyColumnCount]; - - keyChunkTypes = new ChunkType[keyColumnCount]; - chunkHashers = new ChunkHasher[keyColumnCount]; - chunkEquals = new ChunkEquals[keyColumnCount]; - chunkCopiers = new PermuteKernel[keyColumnCount]; - - for (int ii = 0; ii < keyColumnCount; ++ii) { - // the sources that we will use to store our hash table - keySources[ii] = ArrayBackedColumnSource.getMemoryColumnSource(tableSize, tableKeySources[ii].getType()); - keyChunkTypes[ii] = tableKeySources[ii].getChunkType(); - - overflowKeySources[ii] = ArrayBackedColumnSource.getMemoryColumnSource(CHUNK_SIZE, tableKeySources[ii].getType()); - - chunkHashers[ii] = ChunkHasher.makeHasher(keyChunkTypes[ii]); - chunkEquals[ii] = ChunkEquals.makeEqual(keyChunkTypes[ii]); - chunkCopiers[ii] = PermuteKernel.makePermuteKernel(keyChunkTypes[ii]); - } - - // mixin rehash - overflowKeyColumnsToNull = Arrays.stream(overflowKeySources).filter(x -> x instanceof ObjectArraySource).map(x -> (ObjectArraySource)x).toArray(ObjectArraySource[]::new); - // endmixin rehash - - // region constructor - this.maximumLoadFactor = maximumLoadFactor; - this.targetLoadFactor = targetLoadFactor; - // endregion constructor - - ensureCapacity(tableSize); - } - - private void ensureCapacity(int tableSize) { - stateSource.ensureCapacity(tableSize); - overflowLocationSource.ensureCapacity(tableSize); - for (int ii = 0; ii < keyColumnCount; ++ii) { - keySources[ii].ensureCapacity(tableSize); - } - // region ensureCapacity - // endregion ensureCapacity - } - - private void ensureOverflowCapacity(WritableIntChunk chunkPositionsToInsertInOverflow) { - final int locationsToAllocate = chunkPositionsToInsertInOverflow.size(); - // mixin rehash - if (freeOverflowCount >= locationsToAllocate) { - return; - } - final int newCapacity = nextOverflowLocation + locationsToAllocate - freeOverflowCount; - // endmixin rehash - // altmixin rehash: final int newCapacity = nextOverflowLocation + locationsToAllocate; - overflowOverflowLocationSource.ensureCapacity(newCapacity); - overflowStateSource.ensureCapacity(newCapacity); - //noinspection ForLoopReplaceableByForEach - for (int ii = 0; ii < overflowKeySources.length; ++ii) { - overflowKeySources[ii].ensureCapacity(newCapacity); - } - // region ensureOverflowCapacity - // endregion ensureOverflowCapacity - } - - // region build wrappers - @Override - public void beginUpdateCycle() { - } - - @Override - public void add(final SafeCloseable bc, RowSequence rowSequence, ColumnSource[] sources, MutableInt nextOutputPosition, WritableIntChunk outputPositions) { - if (rowSequence.isEmpty()) { - return; - } - buildTable((BuildContext) bc, rowSequence, sources, nextOutputPosition, outputPositions, null); - } - - @Override - public void addForUpdate(final SafeCloseable bc, RowSequence rowSequence, ColumnSource[] sources, MutableInt nextOutputPosition, WritableIntChunk outputPositions, WritableIntChunk reincarnatedPositions) { - if (rowSequence.isEmpty()) { - return; - } - buildTable((BuildContext) bc, rowSequence, sources, nextOutputPosition, outputPositions, reincarnatedPositions); - } - - @Override - public BuildContext makeAggregationStateBuildContext(ColumnSource[] buildSources, long maxSize) { - return makeBuildContext(buildSources, maxSize); - } - // endregion build wrappers - - class BuildContext implements Context { - final int chunkSize; - - final LongIntTimsortKernel.LongIntSortKernelContext sortContext; - final ColumnSource.FillContext stateSourceFillContext; - // mixin rehash - final ColumnSource.FillContext overflowStateSourceFillContext; - // endmixin rehash - final ColumnSource.FillContext overflowFillContext; - final ColumnSource.FillContext overflowOverflowFillContext; - - // the chunk of hashcodes - final WritableIntChunk hashChunk; - // the chunk of positions within our table - final WritableLongChunk tableLocationsChunk; - - final ResettableWritableChunk[] writeThroughChunks = getResettableWritableKeyChunks(); - final WritableIntChunk sourcePositions; - final WritableIntChunk destinationLocationPositionInWriteThrough; - - final WritableBooleanChunk filledValues; - final WritableBooleanChunk equalValues; - - // the overflow locations that we need to get from the overflowLocationSource (or overflowOverflowLocationSource) - final WritableLongChunk overflowLocationsToFetch; - // the overflow position in the working key chunks, parallel to the overflowLocationsToFetch - final WritableIntChunk overflowPositionInSourceChunk; - - // the position with our hash table that we should insert a value into - final WritableLongChunk insertTableLocations; - // the position in our chunk, parallel to the workingChunkInsertTablePositions - final WritableIntChunk insertPositionsInSourceChunk; - - // we sometimes need to check two positions within a single chunk for equality, this contains those positions as pairs - final WritableIntChunk chunkPositionsToCheckForEquality; - // While processing overflow insertions, parallel to the chunkPositions to check for equality, the overflow location that - // is represented by the first of the pairs in chunkPositionsToCheckForEquality - final WritableLongChunk overflowLocationForEqualityCheck; - - // the chunk of state values that we read from the hash table - // @WritableStateChunkType@ from \QWritableIntChunk\E - final WritableIntChunk workingStateEntries; - - // the chunks for getting key values from the hash table - final WritableChunk[] workingKeyChunks; - final WritableChunk[] overflowKeyChunks; - - // when fetching from the overflow, we record which chunk position we are fetching for - final WritableIntChunk chunkPositionsForFetches; - // which positions in the chunk we are inserting into the overflow - final WritableIntChunk chunkPositionsToInsertInOverflow; - // which table locations we are inserting into the overflow - final WritableLongChunk tableLocationsToInsertInOverflow; - - // values we have read from the overflow locations sources - final WritableIntChunk overflowLocations; - - // mixin rehash - final WritableLongChunk rehashLocations; - final WritableIntChunk overflowLocationsToMigrate; - final WritableLongChunk overflowLocationsAsKeyIndices; - final WritableBooleanChunk shouldMoveBucket; - - final ResettableWritableLongChunk overflowLocationForPromotionLoop = ResettableWritableLongChunk.makeResettableChunk(); - - // mixin allowUpdateWriteThroughState - // @WritableStateChunkType@ from \QWritableIntChunk\E, @WritableStateChunkName@ from \QWritableIntChunk\E - final ResettableWritableIntChunk writeThroughState = ResettableWritableIntChunk.makeResettableChunk(); - // endmixin allowUpdateWriteThroughState - final ResettableWritableIntChunk writeThroughOverflowLocations = ResettableWritableIntChunk.makeResettableChunk(); - // endmixin rehash - - final SharedContext sharedFillContext; - final ColumnSource.FillContext[] workingFillContexts; - final SharedContext sharedOverflowContext; - final ColumnSource.FillContext[] overflowContexts; - final SharedContext sharedBuildContext; - final ChunkSource.GetContext[] buildContexts; - - // region build context - final WritableIntChunk duplicatePositions; - final WritableLongChunk addedSlotsByPosition; - // endregion build context - - final boolean haveSharedContexts; - - private BuildContext(ColumnSource[] buildSources, - int chunkSize - // region build context constructor args - // endregion build context constructor args - ) { - Assert.gtZero(chunkSize, "chunkSize"); - this.chunkSize = chunkSize; - haveSharedContexts = buildSources.length > 1; - if (haveSharedContexts) { - sharedFillContext = SharedContext.makeSharedContext(); - sharedOverflowContext = SharedContext.makeSharedContext(); - sharedBuildContext = SharedContext.makeSharedContext(); - } else { - // no point in the additional work implied by these not being null. - sharedFillContext = null; - sharedOverflowContext = null; - sharedBuildContext = null; - } - workingFillContexts = makeFillContexts(keySources, sharedFillContext, chunkSize); - overflowContexts = makeFillContexts(overflowKeySources, sharedOverflowContext, chunkSize); - buildContexts = makeGetContexts(buildSources, sharedBuildContext, chunkSize); - // region build context constructor - duplicatePositions = WritableIntChunk.makeWritableChunk(chunkSize * 2); - addedSlotsByPosition = WritableLongChunk.makeWritableChunk(chunkSize); - // endregion build context constructor - sortContext = LongIntTimsortKernel.createContext(chunkSize); - stateSourceFillContext = stateSource.makeFillContext(chunkSize); - overflowFillContext = overflowLocationSource.makeFillContext(chunkSize); - overflowOverflowFillContext = overflowOverflowLocationSource.makeFillContext(chunkSize); - hashChunk = WritableIntChunk.makeWritableChunk(chunkSize); - tableLocationsChunk = WritableLongChunk.makeWritableChunk(chunkSize); - sourcePositions = WritableIntChunk.makeWritableChunk(chunkSize); - destinationLocationPositionInWriteThrough = WritableIntChunk.makeWritableChunk(chunkSize); - filledValues = WritableBooleanChunk.makeWritableChunk(chunkSize); - equalValues = WritableBooleanChunk.makeWritableChunk(chunkSize); - overflowLocationsToFetch = WritableLongChunk.makeWritableChunk(chunkSize); - overflowPositionInSourceChunk = WritableIntChunk.makeWritableChunk(chunkSize); - insertTableLocations = WritableLongChunk.makeWritableChunk(chunkSize); - insertPositionsInSourceChunk = WritableIntChunk.makeWritableChunk(chunkSize); - chunkPositionsToCheckForEquality = WritableIntChunk.makeWritableChunk(chunkSize * 2); - overflowLocationForEqualityCheck = WritableLongChunk.makeWritableChunk(chunkSize); - // @WritableStateChunkName@ from \QWritableIntChunk\E - workingStateEntries = WritableIntChunk.makeWritableChunk(chunkSize); - workingKeyChunks = getWritableKeyChunks(chunkSize); - overflowKeyChunks = getWritableKeyChunks(chunkSize); - chunkPositionsForFetches = WritableIntChunk.makeWritableChunk(chunkSize); - chunkPositionsToInsertInOverflow = WritableIntChunk.makeWritableChunk(chunkSize); - tableLocationsToInsertInOverflow = WritableLongChunk.makeWritableChunk(chunkSize); - overflowLocations = WritableIntChunk.makeWritableChunk(chunkSize); - // mixin rehash - rehashLocations = WritableLongChunk.makeWritableChunk(chunkSize); - overflowStateSourceFillContext = overflowStateSource.makeFillContext(chunkSize); - overflowLocationsToMigrate = WritableIntChunk.makeWritableChunk(chunkSize); - overflowLocationsAsKeyIndices = WritableLongChunk.makeWritableChunk(chunkSize); - shouldMoveBucket = WritableBooleanChunk.makeWritableChunk(chunkSize); - // endmixin rehash - } - - private void resetSharedContexts() { - if (!haveSharedContexts) { - return; - } - sharedFillContext.reset(); - sharedOverflowContext.reset(); - sharedBuildContext.reset(); - } - - private void closeSharedContexts() { - if (!haveSharedContexts) { - return; - } - sharedFillContext.close(); - sharedOverflowContext.close(); - sharedBuildContext.close(); - } - - @Override - public void close() { - sortContext.close(); - stateSourceFillContext.close(); - // mixin rehash - overflowStateSourceFillContext.close(); - // endmixin rehash - overflowFillContext.close(); - overflowOverflowFillContext.close(); - closeArray(workingFillContexts); - closeArray(overflowContexts); - closeArray(buildContexts); - - hashChunk.close(); - tableLocationsChunk.close(); - closeArray(writeThroughChunks); - - sourcePositions.close(); - destinationLocationPositionInWriteThrough.close(); - filledValues.close(); - equalValues.close(); - overflowLocationsToFetch.close(); - overflowPositionInSourceChunk.close(); - insertTableLocations.close(); - insertPositionsInSourceChunk.close(); - chunkPositionsToCheckForEquality.close(); - overflowLocationForEqualityCheck.close(); - workingStateEntries.close(); - closeArray(workingKeyChunks); - closeArray(overflowKeyChunks); - chunkPositionsForFetches.close(); - chunkPositionsToInsertInOverflow.close(); - tableLocationsToInsertInOverflow.close(); - overflowLocations.close(); - // mixin rehash - rehashLocations.close(); - overflowLocationsToMigrate.close(); - overflowLocationsAsKeyIndices.close(); - shouldMoveBucket.close(); - overflowLocationForPromotionLoop.close(); - // mixin allowUpdateWriteThroughState - writeThroughState.close(); - // endmixin allowUpdateWriteThroughState - writeThroughOverflowLocations.close(); - // endmixin rehash - // region build context close - duplicatePositions.close(); - addedSlotsByPosition.close(); - // endregion build context close - closeSharedContexts(); - } - - } - - public BuildContext makeBuildContext(ColumnSource[] buildSources, - long maxSize - // region makeBuildContext args - // endregion makeBuildContext args - ) { - return new BuildContext(buildSources, (int)Math.min(CHUNK_SIZE, maxSize) - // region makeBuildContext arg pass - // endregion makeBuildContext arg pass - ); - } - - private void buildTable(final BuildContext bc, - final RowSequence buildIndex, - ColumnSource[] buildSources - // region extra build arguments - , final MutableInt nextOutputPosition - , final WritableIntChunk outputPositions - , @Nullable final WritableIntChunk reincarnatedPositions - // endregion extra build arguments - ) { - long hashSlotOffset = 0; - // region build start - - outputPositions.setSize(buildIndex.intSize()); - int maxAddedPosition = -1; - bc.addedSlotsByPosition.setSize(outputPositions.size()); - bc.addedSlotsByPosition.fillWithValue(0, bc.addedSlotsByPosition.size(), RowSequence.NULL_ROW_KEY); - bc.duplicatePositions.setSize(0); - - if (reincarnatedPositions != null) { - reincarnatedPositions.setSize(0); - } - - // endregion build start - - try (final RowSequence.Iterator rsIt = buildIndex.getRowSequenceIterator(); - // region build initialization try - // endregion build initialization try - ) { - // region build initialization - // endregion build initialization - - // chunks to write through to the table key sources - - - //noinspection unchecked - final Chunk [] sourceKeyChunks = new Chunk[buildSources.length]; - - while (rsIt.hasMore()) { - // we reset early to avoid carrying around state for old RowSequence which can't be reused. - bc.resetSharedContexts(); - - final RowSequence chunkOk = rsIt.getNextRowSequenceWithLength(bc.chunkSize); - - getKeyChunks(buildSources, bc.buildContexts, sourceKeyChunks, chunkOk); - hashKeyChunks(bc.hashChunk, sourceKeyChunks); - - // region build loop initialization - rowCountSource.ensureCapacity(nextOutputPosition.intValue() + chunkOk.size()); - // endregion build loop initialization - - // turn hash codes into indices within our table - convertHashToTableLocations(bc.hashChunk, bc.tableLocationsChunk); - - // now fetch the values from the table, note that we do not order these fetches - fillKeys(bc.workingFillContexts, bc.workingKeyChunks, bc.tableLocationsChunk); - - // and the corresponding states, if a value is null, we've found our insertion point - stateSource.fillChunkUnordered(bc.stateSourceFillContext, bc.workingStateEntries, bc.tableLocationsChunk); - - // find things that exist - // @StateChunkIdentityName@ from \QIntChunk\E - IntChunkEquals.notEqual(bc.workingStateEntries, EMPTY_RIGHT_VALUE, bc.filledValues); - - // to be equal, the location must exist; and each of the keyChunks must match - bc.equalValues.setSize(bc.filledValues.size()); - bc.equalValues.copyFromChunk(bc.filledValues, 0, 0, bc.filledValues.size()); - checkKeyEquality(bc.equalValues, bc.workingKeyChunks, sourceKeyChunks); - - bc.overflowPositionInSourceChunk.setSize(0); - bc.overflowLocationsToFetch.setSize(0); - bc.insertPositionsInSourceChunk.setSize(0); - bc.insertTableLocations.setSize(0); - - for (int ii = 0; ii < bc.equalValues.size(); ++ii) { - final long tableLocation = bc.tableLocationsChunk.get(ii); - if (bc.equalValues.get(ii)) { - // region build found main - final int foundPosition = bc.workingStateEntries.get(ii); - outputPositions.set(ii, foundPosition); - - final long oldRowCount = rowCountSource.getUnsafe(foundPosition); - Assert.geqZero(oldRowCount, "oldRowCount"); - if (reincarnatedPositions != null && oldRowCount == 0) { - reincarnatedPositions.add(foundPosition); - } - rowCountSource.set(foundPosition, oldRowCount + 1); - // endregion build found main - } else if (bc.filledValues.get(ii)) { - // we must handle this as part of the overflow bucket - bc.overflowPositionInSourceChunk.add(ii); - bc.overflowLocationsToFetch.add(tableLocation); - } else { - // for the values that are empty, we record them in the insert chunks - bc.insertPositionsInSourceChunk.add(ii); - bc.insertTableLocations.add(tableLocation); - } - } - - // we first sort by position; so that we'll not insert things into the table twice or overwrite - // collisions - LongIntTimsortKernel.sort(bc.sortContext, bc.insertPositionsInSourceChunk, bc.insertTableLocations); - - // the first and last valid table location in our writeThroughChunks - long firstBackingChunkLocation = -1; - long lastBackingChunkLocation = -1; - - bc.chunkPositionsToCheckForEquality.setSize(0); - bc.destinationLocationPositionInWriteThrough.setSize(0); - bc.sourcePositions.setSize(0); - - for (int ii = 0; ii < bc.insertPositionsInSourceChunk.size(); ) { - final int firstChunkPositionForHashLocation = bc.insertPositionsInSourceChunk.get(ii); - final long currentHashLocation = bc.insertTableLocations.get(ii); - - // region main insert - stateSource.set(currentHashLocation, chunkPositionToPendingState(firstChunkPositionForHashLocation)); - - bc.addedSlotsByPosition.set(firstChunkPositionForHashLocation, currentHashLocation); - maxAddedPosition = Math.max(maxAddedPosition, firstChunkPositionForHashLocation); - // endregion main insert - // mixin rehash - numEntries++; - // endmixin rehash - - if (currentHashLocation > lastBackingChunkLocation) { - flushWriteThrough(bc.sourcePositions, sourceKeyChunks, bc.destinationLocationPositionInWriteThrough, bc.writeThroughChunks); - firstBackingChunkLocation = updateWriteThroughChunks(bc.writeThroughChunks, currentHashLocation, keySources); - lastBackingChunkLocation = firstBackingChunkLocation + bc.writeThroughChunks[0].size() - 1; - } - - bc.sourcePositions.add(firstChunkPositionForHashLocation); - bc.destinationLocationPositionInWriteThrough.add((int)(currentHashLocation - firstBackingChunkLocation)); - - final int currentHashValue = bc.hashChunk.get(firstChunkPositionForHashLocation); - - while (++ii < bc.insertTableLocations.size() && bc.insertTableLocations.get(ii) == currentHashLocation) { - // if this thing is equal to the first one; we should mark the appropriate slot, we don't - // know the types and don't want to make the virtual calls, so we need to just accumulate - // the things to check for equality afterwards - final int chunkPosition = bc.insertPositionsInSourceChunk.get(ii); - if (bc.hashChunk.get(chunkPosition) != currentHashValue) { - // we must be an overflow - bc.overflowPositionInSourceChunk.add(chunkPosition); - bc.overflowLocationsToFetch.add(currentHashLocation); - } else { - // we need to check equality, equal things are the same slot; unequal things are overflow - bc.chunkPositionsToCheckForEquality.add(firstChunkPositionForHashLocation); - bc.chunkPositionsToCheckForEquality.add(chunkPosition); - } - } - } - - flushWriteThrough(bc.sourcePositions, sourceKeyChunks, bc.destinationLocationPositionInWriteThrough, bc.writeThroughChunks); - - checkPairEquality(bc.chunkPositionsToCheckForEquality, sourceKeyChunks, bc.equalValues); - - for (int ii = 0; ii < bc.equalValues.size(); ii++) { - final int chunkPosition = bc.chunkPositionsToCheckForEquality.get(ii * 2 + 1); - final long tableLocation = bc.tableLocationsChunk.get(chunkPosition); - - if (bc.equalValues.get(ii)) { - // region build main duplicate - bc.duplicatePositions.add(chunkPosition); - bc.duplicatePositions.add(bc.chunkPositionsToCheckForEquality.get(ii * 2)); - // endregion build main duplicate - } else { - // we are an overflow element - bc.overflowPositionInSourceChunk.add(chunkPosition); - bc.overflowLocationsToFetch.add(tableLocation); - } - } - - // now handle overflow - if (bc.overflowPositionInSourceChunk.size() > 0) { - // on the first pass we fill from the table's locations - overflowLocationSource.fillChunkUnordered(bc.overflowFillContext, bc.overflowLocations, bc.overflowLocationsToFetch); - bc.chunkPositionsToInsertInOverflow.setSize(0); - bc.tableLocationsToInsertInOverflow.setSize(0); - - // overflow slots now contains the positions in the overflow columns - - while (bc.overflowPositionInSourceChunk.size() > 0) { - // now we have the overflow slot for each of the things we are interested in. - // if the slot is null, then we can insert it and we are complete. - - bc.overflowLocationsToFetch.setSize(0); - bc.chunkPositionsForFetches.setSize(0); - - // TODO: Crunch it down - for (int ii = 0; ii < bc.overflowLocations.size(); ++ii) { - final int overflowLocation = bc.overflowLocations.get(ii); - final int chunkPosition = bc.overflowPositionInSourceChunk.get(ii); - if (overflowLocation == QueryConstants.NULL_INT) { - // insert me into overflow in the next free overflow slot - bc.chunkPositionsToInsertInOverflow.add(chunkPosition); - bc.tableLocationsToInsertInOverflow.add(bc.tableLocationsChunk.get(chunkPosition)); - } else { - // add to the key positions to fetch - bc.chunkPositionsForFetches.add(chunkPosition); - bc.overflowLocationsToFetch.add(overflowLocation); - } - } - - // if the slot is non-null, then we need to fetch the overflow values for comparison - fillOverflowKeys(bc.overflowContexts, bc.overflowKeyChunks, bc.overflowLocationsToFetch); - - // now compare the value in our overflowKeyChunk to the value in the sourceChunk - checkLhsPermutedEquality(bc.chunkPositionsForFetches, sourceKeyChunks, bc.overflowKeyChunks, bc.equalValues); - - int writePosition = 0; - for (int ii = 0; ii < bc.equalValues.size(); ++ii) { - final int chunkPosition = bc.chunkPositionsForFetches.get(ii); - final long overflowLocation = bc.overflowLocationsToFetch.get(ii); - if (bc.equalValues.get(ii)) { - // region build overflow found - final int position = overflowStateSource.getUnsafe(overflowLocation); - outputPositions.set(chunkPosition, position); - - final long oldRowCount = rowCountSource.getUnsafe(position); - Assert.geqZero(oldRowCount, "oldRowCount"); - if (reincarnatedPositions != null && oldRowCount == 0) { - reincarnatedPositions.add(position); - } - rowCountSource.set(position, oldRowCount + 1); - // endregion build overflow found - } else { - // otherwise, we need to repeat the overflow calculation, with our next overflow fetch - bc.overflowLocationsToFetch.set(writePosition, overflowLocation); - bc.overflowPositionInSourceChunk.set(writePosition++, chunkPosition); - } - } - bc.overflowLocationsToFetch.setSize(writePosition); - bc.overflowPositionInSourceChunk.setSize(writePosition); - - // on subsequent iterations, we are following the overflow chains, so we fill from the overflowOverflowLocationSource - if (bc.overflowPositionInSourceChunk.size() > 0) { - overflowOverflowLocationSource.fillChunkUnordered(bc.overflowOverflowFillContext, bc.overflowLocations, bc.overflowLocationsToFetch); - } - } - - // make sure we actually have enough room to insert stuff where we would like - ensureOverflowCapacity(bc.chunkPositionsToInsertInOverflow); - - firstBackingChunkLocation = -1; - lastBackingChunkLocation = -1; - bc.destinationLocationPositionInWriteThrough.setSize(0); - bc.sourcePositions.setSize(0); - - // do the overflow insertions, one per table position at a time; until we have no insertions left - while (bc.chunkPositionsToInsertInOverflow.size() > 0) { - // sort by table position - LongIntTimsortKernel.sort(bc.sortContext, bc.chunkPositionsToInsertInOverflow, bc.tableLocationsToInsertInOverflow); - - bc.chunkPositionsToCheckForEquality.setSize(0); - bc.overflowLocationForEqualityCheck.setSize(0); - - for (int ii = 0; ii < bc.chunkPositionsToInsertInOverflow.size(); ) { - final long tableLocation = bc.tableLocationsToInsertInOverflow.get(ii); - final int chunkPosition = bc.chunkPositionsToInsertInOverflow.get(ii); - - final int allocatedOverflowLocation = allocateOverflowLocation(); - - // we are inserting into the head of the list, so we move the existing overflow into our overflow - overflowOverflowLocationSource.set(allocatedOverflowLocation, overflowLocationSource.getUnsafe(tableLocation)); - // and we point the overflow at our slot - overflowLocationSource.set(tableLocation, allocatedOverflowLocation); - - // region build overflow insert - overflowStateSource.set(allocatedOverflowLocation, chunkPositionToPendingState(chunkPosition)); - bc.addedSlotsByPosition.set(chunkPosition, overflowLocationToHashLocation(allocatedOverflowLocation)); - maxAddedPosition = Math.max(maxAddedPosition, chunkPosition); - // endregion build overflow insert - - // mixin rehash - numEntries++; - // endmixin rehash - - // get the backing chunk from the overflow keys - if (allocatedOverflowLocation > lastBackingChunkLocation || allocatedOverflowLocation < firstBackingChunkLocation) { - flushWriteThrough(bc.sourcePositions, sourceKeyChunks, bc.destinationLocationPositionInWriteThrough, bc.writeThroughChunks); - firstBackingChunkLocation = updateWriteThroughChunks(bc.writeThroughChunks, allocatedOverflowLocation, overflowKeySources); - lastBackingChunkLocation = firstBackingChunkLocation + bc.writeThroughChunks[0].size() - 1; - } - - // now we must set all of our key values in the overflow - bc.sourcePositions.add(chunkPosition); - bc.destinationLocationPositionInWriteThrough.add((int)(allocatedOverflowLocation - firstBackingChunkLocation)); - - while (++ii < bc.tableLocationsToInsertInOverflow.size() && bc.tableLocationsToInsertInOverflow.get(ii) == tableLocation) { - bc.overflowLocationForEqualityCheck.add(allocatedOverflowLocation); - bc.chunkPositionsToCheckForEquality.add(chunkPosition); - bc.chunkPositionsToCheckForEquality.add(bc.chunkPositionsToInsertInOverflow.get(ii)); - } - } - - // now we need to do the equality check; so that we can mark things appropriately - int remainingInserts = 0; - - checkPairEquality(bc.chunkPositionsToCheckForEquality, sourceKeyChunks, bc.equalValues); - for (int ii = 0; ii < bc.equalValues.size(); ii++) { - final int chunkPosition = bc.chunkPositionsToCheckForEquality.get(ii * 2 + 1); - final long tableLocation = bc.tableLocationsChunk.get(chunkPosition); - - if (bc.equalValues.get(ii)) { - final long insertedOverflowLocation = bc.overflowLocationForEqualityCheck.get(ii); - // region build overflow duplicate - bc.duplicatePositions.add(chunkPosition); - bc.duplicatePositions.add(bc.chunkPositionsToCheckForEquality.get(ii * 2)); - // endregion build overflow duplicate - } else { - // we need to try this element again in the next round - bc.chunkPositionsToInsertInOverflow.set(remainingInserts, chunkPosition); - bc.tableLocationsToInsertInOverflow.set(remainingInserts++, tableLocation); - } - } - - bc.chunkPositionsToInsertInOverflow.setSize(remainingInserts); - bc.tableLocationsToInsertInOverflow.setSize(remainingInserts); - } - flushWriteThrough(bc.sourcePositions, sourceKeyChunks, bc.destinationLocationPositionInWriteThrough, bc.writeThroughChunks); - // mixin rehash - // region post-build rehash - doRehash(bc); - // endregion post-build rehash - // endmixin rehash - } - - // region copy hash slots - outputPositionToHashSlot.ensureCapacity(nextOutputPosition.intValue() + maxAddedPosition + 1); - for (int ii = 0; ii <= maxAddedPosition; ++ii) { - final long longSlot = bc.addedSlotsByPosition.get(ii); - if (longSlot != RowSequence.NULL_ROW_KEY) { - final int intSlot = (int) longSlot; - - outputPositions.set(ii, nextOutputPosition.intValue()); - if (isOverflowLocation(intSlot)) { - overflowStateSource.set(hashLocationToOverflowLocation(intSlot), nextOutputPosition.intValue()); - } else { - stateSource.set(intSlot, nextOutputPosition.intValue()); - } - rowCountSource.set(nextOutputPosition.intValue(), 1L); - - outputPositionToHashSlot.set(nextOutputPosition.intValue(), intSlot); - nextOutputPosition.increment(); - } - } - - for (int ii = 0; ii < bc.duplicatePositions.size(); ii += 2) { - final int position = outputPositions.get(bc.duplicatePositions.get(ii + 1)); - outputPositions.set(bc.duplicatePositions.get(ii), position); - rowCountSource.set(position, rowCountSource.getUnsafe(position) + 1L); - } - // endregion copy hash slots - hashSlotOffset += chunkOk.size(); - } - // region post build loop - // endregion post build loop - } - } - - // mixin rehash - public void doRehash(BuildContext bc - // region extra rehash arguments - // endregion extra rehash arguments - ) { - long firstBackingChunkLocation; - long lastBackingChunkLocation;// mixin rehash - // region rehash start - // endregion rehash start - while (rehashRequired()) { - // region rehash loop start - // endregion rehash loop start - if (tableHashPivot == tableSize) { - tableSize *= 2; - ensureCapacity(tableSize); - // region rehash ensure capacity - // endregion rehash ensure capacity - } - - final long targetBuckets = Math.min(MAX_TABLE_SIZE, (long)(numEntries / targetLoadFactor)); - final int bucketsToAdd = Math.max(1, (int)Math.min(Math.min(targetBuckets, tableSize) - tableHashPivot, bc.chunkSize)); - - initializeRehashLocations(bc.rehashLocations, bucketsToAdd); - - // fill the overflow bucket locations - overflowLocationSource.fillChunk(bc.overflowFillContext, bc.overflowLocations, RowSequenceFactory.wrapRowKeysChunkAsRowSequence(LongChunk.downcast(bc.rehashLocations))); - // null out the overflow locations in the table - setOverflowLocationsToNull(tableHashPivot - (tableSize >> 1), bucketsToAdd); - - while (bc.overflowLocations.size() > 0) { - // figure out which table location each overflow location maps to - compactOverflowLocations(bc.overflowLocations, bc.overflowLocationsToFetch); - if (bc.overflowLocationsToFetch.size() == 0) { - break; - } - - fillOverflowKeys(bc.overflowContexts, bc.workingKeyChunks, bc.overflowLocationsToFetch); - hashKeyChunks(bc.hashChunk, bc.workingKeyChunks); - convertHashToTableLocations(bc.hashChunk, bc.tableLocationsChunk, tableHashPivot + bucketsToAdd); - - // read the next chunk of overflow locations, which we will be overwriting in the next step - overflowOverflowLocationSource.fillChunkUnordered(bc.overflowOverflowFillContext, bc.overflowLocations, bc.overflowLocationsToFetch); - - // swap the table's overflow pointer with our location - swapOverflowPointers(bc.tableLocationsChunk, bc.overflowLocationsToFetch); - } - - // now rehash the main entries - - stateSource.fillChunkUnordered(bc.stateSourceFillContext, bc.workingStateEntries, bc.rehashLocations); - // @StateChunkIdentityName@ from \QIntChunk\E - IntChunkEquals.notEqual(bc.workingStateEntries, EMPTY_RIGHT_VALUE, bc.shouldMoveBucket); - - // crush down things that don't exist - LongCompactKernel.compact(bc.rehashLocations, bc.shouldMoveBucket); - - // get the keys from the table - fillKeys(bc.workingFillContexts, bc.workingKeyChunks, bc.rehashLocations); - hashKeyChunks(bc.hashChunk, bc.workingKeyChunks); - convertHashToTableLocations(bc.hashChunk, bc.tableLocationsChunk, tableHashPivot + bucketsToAdd); - - // figure out which ones must move - LongChunkEquals.notEqual(bc.tableLocationsChunk, bc.rehashLocations, bc.shouldMoveBucket); - - firstBackingChunkLocation = -1; - lastBackingChunkLocation = -1; - // flushWriteThrough will have zero-ed out the sourcePositions and destinationLocationPositionInWriteThrough size - - int moves = 0; - for (int ii = 0; ii < bc.shouldMoveBucket.size(); ++ii) { - if (bc.shouldMoveBucket.get(ii)) { - moves++; - final long newHashLocation = bc.tableLocationsChunk.get(ii); - final long oldHashLocation = bc.rehashLocations.get(ii); - - if (newHashLocation > lastBackingChunkLocation) { - flushWriteThrough(bc.sourcePositions, bc.workingKeyChunks, bc.destinationLocationPositionInWriteThrough, bc.writeThroughChunks); - firstBackingChunkLocation = updateWriteThroughChunks(bc.writeThroughChunks, newHashLocation, keySources); - lastBackingChunkLocation = firstBackingChunkLocation + bc.writeThroughChunks[0].size() - 1; - } - - // @StateValueType@ from \Qint\E - final int stateValueToMove = stateSource.getUnsafe(oldHashLocation); - stateSource.set(newHashLocation, stateValueToMove); - stateSource.set(oldHashLocation, EMPTY_RIGHT_VALUE); - // region rehash move values - if (isPendingState(stateValueToMove)) { - bc.addedSlotsByPosition.set(pendingStateToChunkPosition(stateValueToMove), newHashLocation); - } else { - outputPositionToHashSlot.set(stateValueToMove, (int) newHashLocation); - } - // endregion rehash move values - - bc.sourcePositions.add(ii); - bc.destinationLocationPositionInWriteThrough.add((int)(newHashLocation - firstBackingChunkLocation)); - } - } - flushWriteThrough(bc.sourcePositions, bc.workingKeyChunks, bc.destinationLocationPositionInWriteThrough, bc.writeThroughChunks); - - // everything has been rehashed now, but we have some table locations that might have an overflow, - // without actually having a main entry. We walk through the empty main entries, pulling non-empty - // overflow locations into the main table - - // figure out which of the two possible locations is empty, because (1) we moved something from it - // or (2) we did not move something to it - bc.overflowLocationsToFetch.setSize(bc.shouldMoveBucket.size()); - final int totalPromotionsToProcess = bc.shouldMoveBucket.size(); - createOverflowPartitions(bc.overflowLocationsToFetch, bc.rehashLocations, bc.shouldMoveBucket, moves); - - for (int loop = 0; loop < 2; loop++) { - final boolean firstLoop = loop == 0; - - if (firstLoop) { - bc.overflowLocationForPromotionLoop.resetFromTypedChunk(bc.overflowLocationsToFetch, 0, moves); - } else { - bc.overflowLocationForPromotionLoop.resetFromTypedChunk(bc.overflowLocationsToFetch, moves, totalPromotionsToProcess - moves); - } - - overflowLocationSource.fillChunk(bc.overflowFillContext, bc.overflowLocations, RowSequenceFactory.wrapRowKeysChunkAsRowSequence(bc.overflowLocationForPromotionLoop)); - IntChunkEquals.notEqual(bc.overflowLocations, QueryConstants.NULL_INT, bc.shouldMoveBucket); - - // crunch the chunk down to relevant locations - LongCompactKernel.compact(bc.overflowLocationForPromotionLoop, bc.shouldMoveBucket); - IntCompactKernel.compact(bc.overflowLocations, bc.shouldMoveBucket); - - IntToLongCast.castInto(IntChunk.downcast(bc.overflowLocations), bc.overflowLocationsAsKeyIndices); - - // now fetch the overflow key values - fillOverflowKeys(bc.overflowContexts, bc.workingKeyChunks, bc.overflowLocationsAsKeyIndices); - // and their state values - overflowStateSource.fillChunkUnordered(bc.overflowStateSourceFillContext, bc.workingStateEntries, bc.overflowLocationsAsKeyIndices); - // and where their next pointer is - overflowOverflowLocationSource.fillChunkUnordered(bc.overflowOverflowFillContext, bc.overflowLocationsToMigrate, bc.overflowLocationsAsKeyIndices); - - // we'll have two sorted regions intermingled in the overflowLocationsToFetch, one of them is before the pivot, the other is after the pivot - // so that we can use our write through chunks, we first process the things before the pivot; then have a separate loop for those - // that go after - firstBackingChunkLocation = -1; - lastBackingChunkLocation = -1; - - for (int ii = 0; ii < bc.overflowLocationForPromotionLoop.size(); ++ii) { - final long tableLocation = bc.overflowLocationForPromotionLoop.get(ii); - if ((firstLoop && tableLocation < tableHashPivot) || (!firstLoop && tableLocation >= tableHashPivot)) { - if (tableLocation > lastBackingChunkLocation) { - if (bc.sourcePositions.size() > 0) { - // the permutes here are flushing the write through for the state and overflow locations - - // mixin allowUpdateWriteThroughState - // @StateChunkTypeEnum@ from \QInt\E - IntPermuteKernel.permute(bc.sourcePositions, bc.workingStateEntries, bc.destinationLocationPositionInWriteThrough, bc.writeThroughState); - // endmixin allowUpdateWriteThroughState - IntPermuteKernel.permute(bc.sourcePositions, bc.overflowLocationsToMigrate, bc.destinationLocationPositionInWriteThrough, bc.writeThroughOverflowLocations); - flushWriteThrough(bc.sourcePositions, bc.workingKeyChunks, bc.destinationLocationPositionInWriteThrough, bc.writeThroughChunks); - } - - firstBackingChunkLocation = updateWriteThroughChunks(bc.writeThroughChunks, tableLocation, keySources); - lastBackingChunkLocation = firstBackingChunkLocation + bc.writeThroughChunks[0].size() - 1; - // mixin allowUpdateWriteThroughState - updateWriteThroughState(bc.writeThroughState, firstBackingChunkLocation, lastBackingChunkLocation); - // endmixin allowUpdateWriteThroughState - updateWriteThroughOverflow(bc.writeThroughOverflowLocations, firstBackingChunkLocation, lastBackingChunkLocation); - } - bc.sourcePositions.add(ii); - bc.destinationLocationPositionInWriteThrough.add((int)(tableLocation - firstBackingChunkLocation)); - // region promotion move - final long overflowLocation = bc.overflowLocationsAsKeyIndices.get(ii); - final int positionForSlot = overflowStateSource.getUnsafe(overflowLocation); - if (isPendingState(positionForSlot)) { - bc.addedSlotsByPosition.set(pendingStateToChunkPosition(positionForSlot), tableLocation); - } else { - outputPositionToHashSlot.set(positionForSlot, (int) tableLocation); - } - // endregion promotion move - } - } - - // the permutes are completing the state and overflow promotions write through - // mixin allowUpdateWriteThroughState - // @StateChunkTypeEnum@ from \QInt\E - IntPermuteKernel.permute(bc.sourcePositions, bc.workingStateEntries, bc.destinationLocationPositionInWriteThrough, bc.writeThroughState); - // endmixin allowUpdateWriteThroughState - IntPermuteKernel.permute(bc.sourcePositions, bc.overflowLocationsToMigrate, bc.destinationLocationPositionInWriteThrough, bc.writeThroughOverflowLocations); - flushWriteThrough(bc.sourcePositions, bc.workingKeyChunks, bc.destinationLocationPositionInWriteThrough, bc.writeThroughChunks); - - // now mark these overflow locations as free, so that we can reuse them - freeOverflowLocations.ensureCapacity(freeOverflowCount + bc.overflowLocations.size()); - // by sorting them, they will be more likely to be in the same write through chunk when we pull them from the free list - bc.overflowLocations.sort(); - for (int ii = 0; ii < bc.overflowLocations.size(); ++ii) { - freeOverflowLocations.set(freeOverflowCount++, bc.overflowLocations.get(ii)); - } - nullOverflowObjectSources(bc.overflowLocations); - } - - tableHashPivot += bucketsToAdd; - // region rehash loop end - // endregion rehash loop end - } - // region rehash final - // endregion rehash final - } - - public boolean rehashRequired() { - return numEntries > (tableHashPivot * maximumLoadFactor) && tableHashPivot < MAX_TABLE_SIZE; - } - - /** - * This function can be stuck in for debugging if you are breaking the table to make sure each slot still corresponds - * to the correct location. - */ - @SuppressWarnings({"unused", "unchecked"}) - private void verifyKeyHashes() { - final int maxSize = tableHashPivot; - - final ChunkSource.FillContext [] keyFillContext = makeFillContexts(keySources, SharedContext.makeSharedContext(), maxSize); - final WritableChunk [] keyChunks = getWritableKeyChunks(maxSize); - - try (final WritableLongChunk positions = WritableLongChunk.makeWritableChunk(maxSize); - final WritableBooleanChunk exists = WritableBooleanChunk.makeWritableChunk(maxSize); - final WritableIntChunk hashChunk = WritableIntChunk.makeWritableChunk(maxSize); - final WritableLongChunk tableLocationsChunk = WritableLongChunk.makeWritableChunk(maxSize); - final SafeCloseableArray ignored = new SafeCloseableArray<>(keyFillContext); - final SafeCloseableArray ignored2 = new SafeCloseableArray<>(keyChunks); - // @StateChunkName@ from \QIntChunk\E - final WritableIntChunk stateChunk = WritableIntChunk.makeWritableChunk(maxSize); - final ChunkSource.FillContext fillContext = stateSource.makeFillContext(maxSize)) { - - stateSource.fillChunk(fillContext, stateChunk, RowSetFactory.flat(tableHashPivot)); - - ChunkUtils.fillInOrder(positions); - - // @StateChunkIdentityName@ from \QIntChunk\E - IntChunkEquals.notEqual(stateChunk, EMPTY_RIGHT_VALUE, exists); - - // crush down things that don't exist - LongCompactKernel.compact(positions, exists); - - // get the keys from the table - fillKeys(keyFillContext, keyChunks, positions); - hashKeyChunks(hashChunk, keyChunks); - convertHashToTableLocations(hashChunk, tableLocationsChunk, tableHashPivot); - - for (int ii = 0; ii < positions.size(); ++ii) { - if (tableLocationsChunk.get(ii) != positions.get(ii)) { - throw new IllegalStateException(); - } - } - } - } - - void setTargetLoadFactor(final double targetLoadFactor) { - this.targetLoadFactor = targetLoadFactor; - } - - void setMaximumLoadFactor(final double maximumLoadFactor) { - this.maximumLoadFactor = maximumLoadFactor; - } - - private void createOverflowPartitions(WritableLongChunk overflowLocationsToFetch, WritableLongChunk rehashLocations, WritableBooleanChunk shouldMoveBucket, int moves) { - int startWritePosition = 0; - int endWritePosition = moves; - for (int ii = 0; ii < shouldMoveBucket.size(); ++ii) { - if (shouldMoveBucket.get(ii)) { - final long oldHashLocation = rehashLocations.get(ii); - // this needs to be promoted, because we moved it - overflowLocationsToFetch.set(startWritePosition++, oldHashLocation); - } else { - // we didn't move anything into the destination slot; so we need to promote its overflow - final long newEmptyHashLocation = rehashLocations.get(ii) + (tableSize >> 1); - overflowLocationsToFetch.set(endWritePosition++, newEmptyHashLocation); - } - } - } - - private void setOverflowLocationsToNull(long start, int count) { - for (int ii = 0; ii < count; ++ii) { - overflowLocationSource.set(start + ii, QueryConstants.NULL_INT); - } - } - - private void initializeRehashLocations(WritableLongChunk rehashLocations, int bucketsToAdd) { - rehashLocations.setSize(bucketsToAdd); - for (int ii = 0; ii < bucketsToAdd; ++ii) { - rehashLocations.set(ii, tableHashPivot + ii - (tableSize >> 1)); - } - } - - private void compactOverflowLocations(IntChunk overflowLocations, WritableLongChunk overflowLocationsToFetch) { - overflowLocationsToFetch.setSize(0); - for (int ii = 0; ii < overflowLocations.size(); ++ii) { - final int overflowLocation = overflowLocations.get(ii); - if (overflowLocation != QueryConstants.NULL_INT) { - overflowLocationsToFetch.add(overflowLocation); - } - } - } - - private void swapOverflowPointers(LongChunk tableLocationsChunk, LongChunk overflowLocationsToFetch) { - for (int ii = 0; ii < overflowLocationsToFetch.size(); ++ii) { - final long newLocation = tableLocationsChunk.get(ii); - final int existingOverflow = overflowLocationSource.getUnsafe(newLocation); - final long overflowLocation = overflowLocationsToFetch.get(ii); - overflowOverflowLocationSource.set(overflowLocation, existingOverflow); - overflowLocationSource.set(newLocation, (int)overflowLocation); - } - } - - // mixin allowUpdateWriteThroughState - // @WritableStateChunkType@ from \QWritableIntChunk\E - private void updateWriteThroughState(ResettableWritableIntChunk writeThroughState, long firstPosition, long expectedLastPosition) { - final long firstBackingChunkPosition = stateSource.resetWritableChunkToBackingStore(writeThroughState, firstPosition); - if (firstBackingChunkPosition != firstPosition) { - throw new IllegalStateException("ArrayBackedColumnSources have different block sizes!"); - } - if (firstBackingChunkPosition + writeThroughState.size() - 1 != expectedLastPosition) { - throw new IllegalStateException("ArrayBackedColumnSources have different block sizes!"); - } - } - // endmixin allowUpdateWriteThroughState - - private void updateWriteThroughOverflow(ResettableWritableIntChunk writeThroughOverflow, long firstPosition, long expectedLastPosition) { - final long firstBackingChunkPosition = overflowLocationSource.resetWritableChunkToBackingStore(writeThroughOverflow, firstPosition); - if (firstBackingChunkPosition != firstPosition) { - throw new IllegalStateException("ArrayBackedColumnSources have different block sizes!"); - } - if (firstBackingChunkPosition + writeThroughOverflow.size() - 1 != expectedLastPosition) { - throw new IllegalStateException("ArrayBackedColumnSources have different block sizes!"); - } - } - - // endmixin rehash - - private int allocateOverflowLocation() { - // mixin rehash - if (freeOverflowCount > 0) { - return freeOverflowLocations.getUnsafe(--freeOverflowCount); - } - // endmixin rehash - return nextOverflowLocation++; - } - - private static long updateWriteThroughChunks(ResettableWritableChunk[] writeThroughChunks, long currentHashLocation, ArrayBackedColumnSource[] sources) { - final long firstBackingChunkPosition = sources[0].resetWritableChunkToBackingStore(writeThroughChunks[0], currentHashLocation); - for (int jj = 1; jj < sources.length; ++jj) { - if (sources[jj].resetWritableChunkToBackingStore(writeThroughChunks[jj], currentHashLocation) != firstBackingChunkPosition) { - throw new IllegalStateException("ArrayBackedColumnSources have different block sizes!"); - } - if (writeThroughChunks[jj].size() != writeThroughChunks[0].size()) { - throw new IllegalStateException("ArrayBackedColumnSources have different block sizes!"); - } - } - return firstBackingChunkPosition; - } - - private void flushWriteThrough(WritableIntChunk sourcePositions, Chunk[] sourceKeyChunks, WritableIntChunk destinationLocationPositionInWriteThrough, WritableChunk[] writeThroughChunks) { - if (sourcePositions.size() < 0) { - return; - } - for (int jj = 0; jj < keySources.length; ++jj) { - chunkCopiers[jj].permute(sourcePositions, sourceKeyChunks[jj], destinationLocationPositionInWriteThrough, writeThroughChunks[jj]); - } - sourcePositions.setSize(0); - destinationLocationPositionInWriteThrough.setSize(0); - } - - // mixin rehash - private void nullOverflowObjectSources(IntChunk locationsToNull) { - for (ObjectArraySource objectArraySource : overflowKeyColumnsToNull) { - for (int ii = 0; ii < locationsToNull.size(); ++ii) { - objectArraySource.set(locationsToNull.get(ii), null); - } - } - // region nullOverflowObjectSources - // endregion nullOverflowObjectSources - } - // endmixin rehash - - private void checkKeyEquality(WritableBooleanChunk equalValues, WritableChunk[] workingKeyChunks, Chunk[] sourceKeyChunks) { - for (int ii = 0; ii < sourceKeyChunks.length; ++ii) { - chunkEquals[ii].andEqual(workingKeyChunks[ii], sourceKeyChunks[ii], equalValues); - } - } - - private void checkLhsPermutedEquality(WritableIntChunk chunkPositionsForFetches, Chunk[] sourceKeyChunks, WritableChunk[] overflowKeyChunks, WritableBooleanChunk equalValues) { - chunkEquals[0].equalLhsPermuted(chunkPositionsForFetches, sourceKeyChunks[0], overflowKeyChunks[0], equalValues); - for (int ii = 1; ii < overflowKeySources.length; ++ii) { - chunkEquals[ii].andEqualLhsPermuted(chunkPositionsForFetches, sourceKeyChunks[ii], overflowKeyChunks[ii], equalValues); - } - } - - private void checkPairEquality(WritableIntChunk chunkPositionsToCheckForEquality, Chunk[] sourceKeyChunks, WritableBooleanChunk equalPairs) { - chunkEquals[0].equalPairs(chunkPositionsToCheckForEquality, sourceKeyChunks[0], equalPairs); - for (int ii = 1; ii < keyColumnCount; ++ii) { - chunkEquals[ii].andEqualPairs(chunkPositionsToCheckForEquality, sourceKeyChunks[ii], equalPairs); - } - } - - private void fillKeys(ColumnSource.FillContext[] fillContexts, WritableChunk[] keyChunks, WritableLongChunk tableLocationsChunk) { - fillKeys(keySources, fillContexts, keyChunks, tableLocationsChunk); - } - - private void fillOverflowKeys(ColumnSource.FillContext[] fillContexts, WritableChunk[] keyChunks, WritableLongChunk overflowLocationsChunk) { - fillKeys(overflowKeySources, fillContexts, keyChunks, overflowLocationsChunk); - } - - private static void fillKeys(ArrayBackedColumnSource[] keySources, ColumnSource.FillContext[] fillContexts, WritableChunk[] keyChunks, WritableLongChunk keyIndices) { - for (int ii = 0; ii < keySources.length; ++ii) { - keySources[ii].fillChunkUnordered(fillContexts[ii], keyChunks[ii], keyIndices); - } - } - - private void hashKeyChunks(WritableIntChunk hashChunk, Chunk[] sourceKeyChunks) { - chunkHashers[0].hashInitial(sourceKeyChunks[0], hashChunk); - for (int ii = 1; ii < sourceKeyChunks.length; ++ii) { - chunkHashers[ii].hashUpdate(sourceKeyChunks[ii], hashChunk); - } - } - - private void getKeyChunks(ColumnSource[] sources, ColumnSource.GetContext[] contexts, Chunk[] chunks, RowSequence rowSequence) { - for (int ii = 0; ii < chunks.length; ++ii) { - chunks[ii] = sources[ii].getChunk(contexts[ii], rowSequence); - } - } - - // mixin prev - private void getPrevKeyChunks(ColumnSource[] sources, ColumnSource.GetContext[] contexts, Chunk[] chunks, RowSequence rowSequence) { - for (int ii = 0; ii < chunks.length; ++ii) { - chunks[ii] = sources[ii].getPrevChunk(contexts[ii], rowSequence); - } - } - // endmixin prev - - // region probe wrappers - @Override - public void remove(final SafeCloseable pc, RowSequence rowSequence, ColumnSource [] sources, WritableIntChunk outputPositions, WritableIntChunk emptiedPositions) { - if (rowSequence.isEmpty()) { - return; - } - decorationProbe((ProbeContext)pc, rowSequence, sources, true, true, outputPositions, emptiedPositions); - } - - @Override - public void findModifications(final SafeCloseable pc, RowSequence rowSequence, ColumnSource [] sources, WritableIntChunk outputPositions) { - if (rowSequence.isEmpty()) { - return; - } - decorationProbe((ProbeContext)pc, rowSequence, sources, false, false, outputPositions, null); - } - // endregion probe wrappers - - // mixin decorationProbe - class ProbeContext implements Context { - final int chunkSize; - - final ColumnSource.FillContext stateSourceFillContext; - final ColumnSource.FillContext overflowFillContext; - final ColumnSource.FillContext overflowOverflowFillContext; - - final SharedContext sharedFillContext; - final ColumnSource.FillContext[] workingFillContexts; - final SharedContext sharedOverflowContext; - final ColumnSource.FillContext[] overflowContexts; - - // the chunk of hashcodes - final WritableIntChunk hashChunk; - // the chunk of positions within our table - final WritableLongChunk tableLocationsChunk; - - // the chunk of right indices that we read from the hash table, the empty right index is used as a sentinel that the - // state exists; otherwise when building from the left it is always null - // @WritableStateChunkType@ from \QWritableIntChunk\E - final WritableIntChunk workingStateEntries; - - // the overflow locations that we need to get from the overflowLocationSource (or overflowOverflowLocationSource) - final WritableLongChunk overflowLocationsToFetch; - // the overflow position in the working keychunks, parallel to the overflowLocationsToFetch - final WritableIntChunk overflowPositionInWorkingChunk; - // values we have read from the overflow locations sources - final WritableIntChunk overflowLocations; - // when fetching from the overflow, we record which chunk position we are fetching for - final WritableIntChunk chunkPositionsForFetches; - - final WritableBooleanChunk equalValues; - final WritableChunk[] workingKeyChunks; - - final SharedContext sharedProbeContext; - // the contexts for filling from our key columns - final ChunkSource.GetContext[] probeContexts; - - // region probe context - // endregion probe context - final boolean haveSharedContexts; - - private ProbeContext(ColumnSource[] probeSources, - int chunkSize - // region probe context constructor args - // endregion probe context constructor args - ) { - Assert.gtZero(chunkSize, "chunkSize"); - this.chunkSize = chunkSize; - haveSharedContexts = probeSources.length > 1; - if (haveSharedContexts) { - sharedFillContext = SharedContext.makeSharedContext(); - sharedOverflowContext = SharedContext.makeSharedContext(); - sharedProbeContext = SharedContext.makeSharedContext(); - } else { - // No point in the additional work implied by these being non null. - sharedFillContext = null; - sharedOverflowContext = null; - sharedProbeContext = null; - } - workingFillContexts = makeFillContexts(keySources, sharedFillContext, chunkSize); - overflowContexts = makeFillContexts(overflowKeySources, sharedOverflowContext, chunkSize); - probeContexts = makeGetContexts(probeSources, sharedProbeContext, chunkSize); - // region probe context constructor - // endregion probe context constructor - stateSourceFillContext = stateSource.makeFillContext(chunkSize); - overflowFillContext = overflowLocationSource.makeFillContext(chunkSize); - overflowOverflowFillContext = overflowOverflowLocationSource.makeFillContext(chunkSize); - hashChunk = WritableIntChunk.makeWritableChunk(chunkSize); - tableLocationsChunk = WritableLongChunk.makeWritableChunk(chunkSize); - // @WritableStateChunkName@ from \QWritableIntChunk\E - workingStateEntries = WritableIntChunk.makeWritableChunk(chunkSize); - overflowLocationsToFetch = WritableLongChunk.makeWritableChunk(chunkSize); - overflowPositionInWorkingChunk = WritableIntChunk.makeWritableChunk(chunkSize); - overflowLocations = WritableIntChunk.makeWritableChunk(chunkSize); - chunkPositionsForFetches = WritableIntChunk.makeWritableChunk(chunkSize); - equalValues = WritableBooleanChunk.makeWritableChunk(chunkSize); - workingKeyChunks = getWritableKeyChunks(chunkSize); - } - - private void resetSharedContexts() { - if (!haveSharedContexts) { - return; - } - sharedFillContext.reset(); - sharedOverflowContext.reset(); - sharedProbeContext.reset(); - } - - private void closeSharedContexts() { - if (!haveSharedContexts) { - return; - } - sharedFillContext.close(); - sharedOverflowContext.close(); - sharedProbeContext.close(); - } - - @Override - public void close() { - stateSourceFillContext.close(); - overflowFillContext.close(); - overflowOverflowFillContext.close(); - closeArray(workingFillContexts); - closeArray(overflowContexts); - closeArray(probeContexts); - hashChunk.close(); - tableLocationsChunk.close(); - workingStateEntries.close(); - overflowLocationsToFetch.close(); - overflowPositionInWorkingChunk.close(); - overflowLocations.close(); - chunkPositionsForFetches.close(); - equalValues.close(); - closeArray(workingKeyChunks); - closeSharedContexts(); - // region probe context close - // endregion probe context close - closeSharedContexts(); - } - } - - public ProbeContext makeProbeContext(ColumnSource[] probeSources, - long maxSize - // region makeProbeContext args - // endregion makeProbeContext args - ) { - return new ProbeContext(probeSources, (int)Math.min(maxSize, CHUNK_SIZE) - // region makeProbeContext arg pass - // endregion makeProbeContext arg pass - ); - } - - private void decorationProbe(ProbeContext pc - , RowSequence probeIndex - , final ColumnSource[] probeSources - // mixin prev - , boolean usePrev - // endmixin prev - // region additional probe arguments - , boolean remove - , WritableIntChunk outputPositions - , WritableIntChunk emptiedPositions - // endregion additional probe arguments - ) { - // region probe start - outputPositions.setSize(probeIndex.intSize()); - if (remove) { - emptiedPositions.setSize(0); - } - // endregion probe start - long hashSlotOffset = 0; - - try (final RowSequence.Iterator rsIt = probeIndex.getRowSequenceIterator(); - // region probe additional try resources - // endregion probe additional try resources - ) { - //noinspection unchecked - final Chunk [] sourceKeyChunks = new Chunk[keyColumnCount]; - - // region probe initialization - // endregion probe initialization - - while (rsIt.hasMore()) { - // we reset shared contexts early to avoid carrying around state that can't be reused. - pc.resetSharedContexts(); - final RowSequence chunkOk = rsIt.getNextRowSequenceWithLength(pc.chunkSize); - final int chunkSize = chunkOk.intSize(); - - // region probe loop initialization - // endregion probe loop initialization - - // get our keys, hash them, and convert them to table locations - // mixin prev - if (usePrev) { - getPrevKeyChunks(probeSources, pc.probeContexts, sourceKeyChunks, chunkOk); - } else { - // endmixin prev - getKeyChunks(probeSources, pc.probeContexts, sourceKeyChunks, chunkOk); - // mixin prev - } - // endmixin prev - hashKeyChunks(pc.hashChunk, sourceKeyChunks); - convertHashToTableLocations(pc.hashChunk, pc.tableLocationsChunk); - - // get the keys from the table - fillKeys(pc.workingFillContexts, pc.workingKeyChunks, pc.tableLocationsChunk); - - // and the corresponding states - // - if a value is empty; we don't care about it - // - otherwise we check for equality; if we are equal, we have found our thing to set - // (or to complain if we are already set) - // - if we are not equal, then we are an overflow block - stateSource.fillChunkUnordered(pc.stateSourceFillContext, pc.workingStateEntries, pc.tableLocationsChunk); - - // @StateChunkIdentityName@ from \QIntChunk\E - IntChunkEquals.notEqual(pc.workingStateEntries, EMPTY_RIGHT_VALUE, pc.equalValues); - checkKeyEquality(pc.equalValues, pc.workingKeyChunks, sourceKeyChunks); - - pc.overflowPositionInWorkingChunk.setSize(0); - pc.overflowLocationsToFetch.setSize(0); - - for (int ii = 0; ii < pc.equalValues.size(); ++ii) { - if (pc.equalValues.get(ii)) { - // region probe main found - final long tableLocation = pc.tableLocationsChunk.get(ii); - final int outputPosition = pc.workingStateEntries.get(ii); - outputPositions.set(ii, outputPosition); - if (remove) { - // decrement the row count - final long oldRowCount = rowCountSource.getUnsafe(outputPosition); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - rowCountSource.set(outputPosition, oldRowCount - 1); - } - // endregion probe main found - } else if (pc.workingStateEntries.get(ii) != EMPTY_RIGHT_VALUE) { - // we must handle this as part of the overflow bucket - pc.overflowPositionInWorkingChunk.add(ii); - pc.overflowLocationsToFetch.add(pc.tableLocationsChunk.get(ii)); - } else { - // region probe main not found - throw new IllegalStateException("Failed to find main aggregation slot for key " + ChunkUtils.extractKeyStringFromChunks(keyChunkTypes, sourceKeyChunks, ii)); - // endregion probe main not found - } - } - - overflowLocationSource.fillChunkUnordered(pc.overflowFillContext, pc.overflowLocations, pc.overflowLocationsToFetch); - - while (pc.overflowLocationsToFetch.size() > 0) { - pc.overflowLocationsToFetch.setSize(0); - pc.chunkPositionsForFetches.setSize(0); - for (int ii = 0; ii < pc.overflowLocations.size(); ++ii) { - final int overflowLocation = pc.overflowLocations.get(ii); - final int chunkPosition = pc.overflowPositionInWorkingChunk.get(ii); - - // if the overflow slot is null, this state is not responsive to the join so we can ignore it - if (overflowLocation != QueryConstants.NULL_INT) { - pc.overflowLocationsToFetch.add(overflowLocation); - pc.chunkPositionsForFetches.add(chunkPosition); - } else { - // region probe overflow not found - throw new IllegalStateException("Failed to find overflow aggregation slot for key " + ChunkUtils.extractKeyStringFromChunks(keyChunkTypes, sourceKeyChunks, chunkPosition)); - // endregion probe overflow not found - } - } - - // if the slot is non-null, then we need to fetch the overflow values for comparison - fillOverflowKeys(pc.overflowContexts, pc.workingKeyChunks, pc.overflowLocationsToFetch); - - // region probe overflow state source fill - // endregion probe overflow state source fill - - // now compare the value in our workingKeyChunks to the value in the sourceChunk - checkLhsPermutedEquality(pc.chunkPositionsForFetches, sourceKeyChunks, pc.workingKeyChunks, pc.equalValues); - - // we write back into the overflowLocationsToFetch, so we can't set its size to zero. Instead - // we overwrite the elements in the front of the chunk referenced by a position cursor - int overflowPosition = 0; - for (int ii = 0; ii < pc.equalValues.size(); ++ii) { - final long overflowLocation = pc.overflowLocationsToFetch.get(ii); - final int chunkPosition = pc.chunkPositionsForFetches.get(ii); - - if (pc.equalValues.get(ii)) { - // region probe overflow found - final int outputPosition = overflowStateSource.getUnsafe(overflowLocation); - outputPositions.set(chunkPosition, outputPosition); - if (remove) { - final long oldRowCount = rowCountSource.getUnsafe(outputPosition); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - rowCountSource.set(outputPosition, oldRowCount - 1); - } - // endregion probe overflow found - } else { - // otherwise, we need to repeat the overflow calculation, with our next overflow fetch - pc.overflowLocationsToFetch.set(overflowPosition, overflowLocation); - pc.overflowPositionInWorkingChunk.set(overflowPosition, chunkPosition); - overflowPosition++; - } - } - pc.overflowLocationsToFetch.setSize(overflowPosition); - pc.overflowPositionInWorkingChunk.setSize(overflowPosition); - - overflowOverflowLocationSource.fillChunkUnordered(pc.overflowOverflowFillContext, pc.overflowLocations, pc.overflowLocationsToFetch); - } - - // region probe complete - // endregion probe complete - hashSlotOffset += chunkSize; - } - - // region probe cleanup - // endregion probe cleanup - } - // region probe final - // endregion probe final - } - // endmixin decorationProbe - - private void convertHashToTableLocations(WritableIntChunk hashChunk, WritableLongChunk tablePositionsChunk) { - // mixin rehash - // NOTE that this mixin section is a bit ugly, we are spanning the two functions so that we can avoid using tableHashPivot and having the unused pivotPoint parameter - convertHashToTableLocations(hashChunk, tablePositionsChunk, tableHashPivot); - } - - private void convertHashToTableLocations(WritableIntChunk hashChunk, WritableLongChunk tablePositionsChunk, int pivotPoint) { - // endmixin rehash - - // turn hash codes into indices within our table - for (int ii = 0; ii < hashChunk.size(); ++ii) { - final int hash = hashChunk.get(ii); - // mixin rehash - final int location = hashToTableLocation(pivotPoint, hash); - // endmixin rehash - // altmixin rehash: final int location = hashToTableLocation(hash); - tablePositionsChunk.set(ii, location); - } - tablePositionsChunk.setSize(hashChunk.size()); - } - - private int hashToTableLocation( - // mixin rehash - int pivotPoint, - // endmixin rehash - int hash) { - // altmixin rehash: final \ - int location = hash & (tableSize - 1); - // mixin rehash - if (location >= pivotPoint) { - location -= (tableSize >> 1); - } - // endmixin rehash - return location; - } - - // region extraction functions - @Override - public ColumnSource[] getKeyHashTableSources() { - final ColumnSource[] keyHashTableSources = new ColumnSource[keyColumnCount]; - for (int kci = 0; kci < keyColumnCount; ++kci) { - // noinspection unchecked - keyHashTableSources[kci] = new RedirectedColumnSource(resultIndexToHashSlot, new HashTableColumnSource(keySources[kci], overflowKeySources[kci])); - } - return keyHashTableSources; - } - - @Override - public int findPositionForKey(Object key) { - int hash; - if (chunkHashers.length == 1) { - hash = chunkHashers[0].hashInitial(key); - } else { - final Object [] values = (Object[])key; - hash = chunkHashers[0].hashInitial(values[0]); - for (int ii = 1; ii < chunkHashers.length; ++ii) { - hash = chunkHashers[ii].hashUpdate(hash, values[ii]); - } - } - - final int location = hashToTableLocation(tableHashPivot, hash); - - final int positionValue = stateSource.getUnsafe(location); - if (positionValue == EMPTY_RIGHT_VALUE) { - return -1; - } - - if (checkKeyEquality(keySources, key, location)) { - return positionValue; - } - - int overflowLocation = overflowLocationSource.getUnsafe(location); - while (overflowLocation != QueryConstants.NULL_INT) { - if (checkKeyEquality(overflowKeySources, key, overflowLocation)) { - return overflowStateSource.getUnsafe(overflowLocation); - } - overflowLocation = overflowOverflowLocationSource.getUnsafe(overflowLocation); - } - - return -1; - } - - private boolean checkKeyEquality(ArrayBackedColumnSource[] keySources, Object key, int location) { - if (keySources.length == 1) { - return Objects.equals(key, keySources[0].get(location)); - } - final Object [] keyValues = (Object[]) key; - for (int ii = 0; ii < keySources.length; ++ii) { - if (!Objects.equals(keyValues[ii], keySources[ii].get(location))) { - return false; - } - } - return true; - } - - @Override - public void startTrackingPrevValues() { - resultIndexToHashSlot.startTrackingPrevValues(); - } - - @Override - public void setRowSize(int outputPosition, long size) { - rowCountSource.set(outputPosition, size); - } - // endregion extraction functions - - @NotNull - private static ColumnSource.FillContext[] makeFillContexts(ColumnSource[] keySources, final SharedContext sharedContext, int chunkSize) { - final ColumnSource.FillContext[] workingFillContexts = new ColumnSource.FillContext[keySources.length]; - for (int ii = 0; ii < keySources.length; ++ii) { - workingFillContexts[ii] = keySources[ii].makeFillContext(chunkSize, sharedContext); - } - return workingFillContexts; - } - - private static ColumnSource.GetContext[] makeGetContexts(ColumnSource [] sources, final SharedContext sharedState, int chunkSize) { - final ColumnSource.GetContext[] contexts = new ColumnSource.GetContext[sources.length]; - for (int ii = 0; ii < sources.length; ++ii) { - contexts[ii] = sources[ii].makeGetContext(chunkSize, sharedState); - } - return contexts; - } - - @NotNull - private WritableChunk[] getWritableKeyChunks(int chunkSize) { - //noinspection unchecked - final WritableChunk[] workingKeyChunks = new WritableChunk[keyChunkTypes.length]; - for (int ii = 0; ii < keyChunkTypes.length; ++ii) { - workingKeyChunks[ii] = keyChunkTypes[ii].makeWritableChunk(chunkSize); - } - return workingKeyChunks; - } - - @NotNull - private ResettableWritableChunk[] getResettableWritableKeyChunks() { - //noinspection unchecked - final ResettableWritableChunk[] workingKeyChunks = new ResettableWritableChunk[keyChunkTypes.length]; - for (int ii = 0; ii < keyChunkTypes.length; ++ii) { - workingKeyChunks[ii] = keyChunkTypes[ii].makeResettableWritableChunk(); - } - return workingKeyChunks; - } - - // region getStateValue - // endregion getStateValue - - // region overflowLocationToHashLocation - private static int overflowLocationToHashLocation(final int overflowSlot) { - return HashTableColumnSource.overflowLocationToHashLocation(overflowSlot); - } - - private static int hashLocationToOverflowLocation(final int hashLocation) { - return HashTableColumnSource.hashLocationToOverflowLocation(hashLocation); - } - - private static boolean isOverflowLocation(final long slot) { - return HashTableColumnSource.isOverflowLocation(slot); - } - - private static int chunkPositionToPendingState(final int position) { - return -position - 1; - } - - private static int pendingStateToChunkPosition(final int pendingState) { - return -pendingState - 1; - } - - private static boolean isPendingState(final int position) { - return position < 0; - } - // endregion overflowLocationToHashLocation - - - static int hashTableSize(long initialCapacity) { - return (int)Math.max(MINIMUM_INITIAL_HASH_SIZE, Math.min(MAX_TABLE_SIZE, Long.highestOneBit(initialCapacity) * 2)); - } - -} diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase.java index ff4b059500e..8c6f5fd55a2 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase.java @@ -11,7 +11,6 @@ import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.alternatingcolumnsource.AlternatingColumnSource; import io.deephaven.engine.table.impl.sources.IntegerArraySource; -import io.deephaven.engine.table.impl.sources.LongArraySource; import io.deephaven.engine.table.impl.sources.RedirectedColumnSource; import io.deephaven.engine.table.impl.sources.immutable.ImmutableIntArraySource; import io.deephaven.engine.table.impl.util.IntColumnSourceWritableRowRedirection; @@ -20,39 +19,47 @@ import io.deephaven.util.QueryConstants; import io.deephaven.util.SafeCloseable; import org.apache.commons.lang3.mutable.MutableInt; +import org.jetbrains.annotations.NotNull; public abstract class IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase extends OperatorAggregationStateManagerOpenAddressedAlternateBase implements IncrementalOperatorAggregationStateManager { - // our state value used when nothing is there + + /** Our state value used when nothing is there. */ protected static final int EMPTY_OUTPUT_POSITION = QueryConstants.NULL_INT; - // the state value for the bucket, parallel to mainKeySources (the state is an output row key for the aggregation) + /** + * The state value for the bucket, parallel to mainKeySources (the state is an output row key for the aggregation). + */ protected ImmutableIntArraySource mainOutputPosition = new ImmutableIntArraySource(); - // the state value for the bucket, parallel to alternateKeySources (the state is an output row key for the - // aggregation) + /** + * The state value for the bucket, parallel to alternateKeySources (the state is an output row key for the + * aggregation). + */ protected ImmutableIntArraySource alternateOutputPosition; - // used as a row redirection for the output key sources, updated using the mainInsertMask to identify the main vs. - // alternate values + /** + * Used as a row redirection for the output key sources, updated using the mainInsertMask to identify the main vs. + * alternate values. + */ protected final IntegerArraySource outputPositionToHashSlot = new IntegerArraySource(); - // how many values are in each state, addressed by output row key - protected final LongArraySource rowCountSource = new LongArraySource(); - - // state variables that exist as part of the update + /** State variables that exist as part of the update. */ protected MutableInt nextOutputPosition; protected WritableIntChunk outputPositions; - // output alternating column sources + /** Output alternating column sources. */ protected AlternatingColumnSource[] alternatingColumnSources; - // the mask for insertion into the main table (this tells our alternating column sources which of the two sources - // to access for a given key) + /** + * The mask for insertion into the main table (this tells our alternating column sources which of the two sources to + * access for a given key). + */ protected int mainInsertMask = 0; - protected IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase(ColumnSource[] tableKeySources, + protected IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase( + ColumnSource[] tableKeySources, int tableSize, double maximumLoadFactor) { super(tableKeySources, tableSize, maximumLoadFactor); @@ -93,15 +100,19 @@ public SafeCloseable makeAggregationStateBuildContext(ColumnSource[] buildSou } @Override - public void add(final SafeCloseable bc, RowSequence rowSequence, ColumnSource[] sources, - MutableInt nextOutputPosition, WritableIntChunk outputPositions) { + public void add( + @NotNull final SafeCloseable bc, + @NotNull final RowSequence rowSequence, + @NotNull final ColumnSource[] sources, + @NotNull final MutableInt nextOutputPosition, + @NotNull final WritableIntChunk outputPositions) { outputPositions.setSize(rowSequence.intSize()); if (rowSequence.isEmpty()) { return; } this.nextOutputPosition = nextOutputPosition; this.outputPositions = outputPositions; - buildTable((BuildContext) bc, rowSequence, sources, true, this::build); + buildTable((BuildContext) bc, rowSequence, sources, this::build); this.outputPositions = null; this.nextOutputPosition = null; } @@ -109,7 +120,6 @@ public void add(final SafeCloseable bc, RowSequence rowSequence, ColumnSource @Override public void onNextChunk(int size) { outputPositionToHashSlot.ensureCapacity(nextOutputPosition.intValue() + size, false); - rowCountSource.ensureCapacity(nextOutputPosition.intValue() + size, false); } @Override @@ -138,71 +148,47 @@ public ColumnSource[] getKeyHashTableSources() { @Override public void beginUpdateCycle() { - // at the beginning of the update cycle, we always want to do some rehash work so that we can eventually ditch - // the alternate table + // Once we're past initial state processing, we want to rehash incrementally. + fullRehash = false; + // At the beginning of the update cycle, we always want to do some rehash work so that we can eventually ditch + // the alternate table. if (rehashPointer > 0) { rehashInternalPartial(CHUNK_SIZE); } } - @Override - public void addForUpdate(final SafeCloseable bc, RowSequence rowSequence, ColumnSource[] sources, - MutableInt nextOutputPosition, WritableIntChunk outputPositions, - final WritableIntChunk reincarnatedPositions) { - outputPositions.setSize(rowSequence.intSize()); - reincarnatedPositions.setSize(0); - if (rowSequence.isEmpty()) { - return; - } - this.nextOutputPosition = nextOutputPosition; - this.outputPositions = outputPositions; - buildTable((BuildContext) bc, rowSequence, sources, false, - ((chunkOk, sourceKeyChunks) -> buildForUpdate(chunkOk, sourceKeyChunks, reincarnatedPositions))); - this.outputPositions = null; - this.nextOutputPosition = null; - } - - protected abstract void buildForUpdate(RowSequence chunkOk, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions); + protected abstract void probe(RowSequence chunkOk, Chunk[] sourceKeyChunks); @Override - public void remove(final SafeCloseable pc, RowSequence rowSequence, ColumnSource[] sources, - WritableIntChunk outputPositions, final WritableIntChunk emptiedPositions) { + public void remove( + @NotNull final SafeCloseable pc, + @NotNull final RowSequence rowSequence, + @NotNull final ColumnSource[] sources, + @NotNull final WritableIntChunk outputPositions) { outputPositions.setSize(rowSequence.intSize()); - emptiedPositions.setSize(0); if (rowSequence.isEmpty()) { return; } this.outputPositions = outputPositions; - probeTable((ProbeContext) pc, rowSequence, true, sources, - (chunkOk, sourceKeyChunks) -> IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase.this - .doRemoveProbe(chunkOk, sourceKeyChunks, emptiedPositions)); + probeTable((ProbeContext) pc, rowSequence, true, sources, this::probe); this.outputPositions = null; } - protected abstract void doRemoveProbe(RowSequence chunkOk, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions); - @Override - public void findModifications(final SafeCloseable pc, RowSequence rowSequence, ColumnSource[] sources, - WritableIntChunk outputPositions) { + public void findModifications( + @NotNull final SafeCloseable pc, + @NotNull final RowSequence rowSequence, + @NotNull final ColumnSource[] sources, + @NotNull final WritableIntChunk outputPositions) { outputPositions.setSize(rowSequence.intSize()); if (rowSequence.isEmpty()) { return; } this.outputPositions = outputPositions; - probeTable((ProbeContext) pc, rowSequence, false, sources, - IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase.this::doModifyProbe); + probeTable((ProbeContext) pc, rowSequence, false, sources, this::probe); this.outputPositions = null; } - protected abstract void doModifyProbe(RowSequence chunkOk, Chunk[] sourceKeyChunks); - @Override public void startTrackingPrevValues() {} - - @Override - public void setRowSize(int outputPosition, long size) { - rowCountSource.set(outputPosition, size); - } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/IncrementalChunkedOperatorAggregationStateManagerTypedBase.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/IncrementalChunkedOperatorAggregationStateManagerTypedBase.java index f923e32e346..f6ce5d37929 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/IncrementalChunkedOperatorAggregationStateManagerTypedBase.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/IncrementalChunkedOperatorAggregationStateManagerTypedBase.java @@ -3,13 +3,11 @@ */ package io.deephaven.engine.table.impl.by; -import io.deephaven.base.verify.Assert; import io.deephaven.chunk.WritableIntChunk; import io.deephaven.engine.rowset.RowSequence; import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.sources.IntegerArraySource; -import io.deephaven.engine.table.impl.sources.LongArraySource; import io.deephaven.engine.table.impl.sources.RedirectedColumnSource; import io.deephaven.engine.table.impl.util.IntColumnSourceWritableRowRedirection; import io.deephaven.engine.table.impl.util.RowRedirection; @@ -18,6 +16,7 @@ import io.deephaven.util.QueryConstants; import io.deephaven.util.SafeCloseable; import org.apache.commons.lang3.mutable.MutableInt; +import org.jetbrains.annotations.NotNull; /** * Incremental aggregation state manager that is extended by code generated typed hashers. @@ -38,12 +37,8 @@ public abstract class IncrementalChunkedOperatorAggregationStateManagerTypedBase // used as a row redirection for the output key sources private final IntegerArraySource outputPositionToHashSlot = new IntegerArraySource(); - // how many values are in each state, addressed by output row key - private final LongArraySource rowCountSource = new LongArraySource(); - - // handlers for use during updates - private final AddInitialHandler addInitialHandler = new AddInitialHandler(); - private final AddUpdateHandler addUpdateHandler = new AddUpdateHandler(); + // handlers for use during initialization and updates + private final AddHandler addHandler = new AddHandler(); private final RemoveHandler removeHandler = new RemoveHandler(); private final ModifyHandler modifyHandler = new ModifyHandler(); @@ -59,15 +54,19 @@ public SafeCloseable makeAggregationStateBuildContext(ColumnSource[] buildSou } @Override - public void add(final SafeCloseable bc, RowSequence rowSequence, ColumnSource[] sources, - MutableInt nextOutputPosition, WritableIntChunk outputPositions) { + public void add( + @NotNull final SafeCloseable bc, + @NotNull final RowSequence rowSequence, + @NotNull final ColumnSource[] sources, + @NotNull final MutableInt nextOutputPosition, + @NotNull final WritableIntChunk outputPositions) { outputPositions.setSize(rowSequence.intSize()); if (rowSequence.isEmpty()) { return; } - addInitialHandler.reset(nextOutputPosition, outputPositions); - buildTable(addInitialHandler, (BuildContext) bc, rowSequence, sources); - addInitialHandler.reset(); + addHandler.reset(nextOutputPosition, outputPositions); + buildTable(addHandler, (BuildContext) bc, rowSequence, sources); + addHandler.reset(); } @Override @@ -90,42 +89,26 @@ public void startTrackingPrevValues() {} public void beginUpdateCycle() {} @Override - public void setRowSize(int outputPosition, long size) { - rowCountSource.set(outputPosition, size); - } - - @Override - public void addForUpdate(final SafeCloseable bc, RowSequence rowSequence, ColumnSource[] sources, - MutableInt nextOutputPosition, WritableIntChunk outputPositions, - WritableIntChunk reincarnatedPositions) { - outputPositions.setSize(rowSequence.intSize()); - reincarnatedPositions.setSize(0); - if (rowSequence.isEmpty()) { - return; - } - addUpdateHandler.reset(nextOutputPosition, outputPositions, reincarnatedPositions); - buildTable(addUpdateHandler, (BuildContext) bc, - rowSequence, sources); - addUpdateHandler.reset(); - } - - @Override - public void remove(final SafeCloseable pc, RowSequence rowSequence, ColumnSource[] sources, - WritableIntChunk outputPositions, WritableIntChunk emptiedPositions) { + public void remove( + @NotNull final SafeCloseable pc, + @NotNull final RowSequence rowSequence, + @NotNull final ColumnSource[] sources, + @NotNull final WritableIntChunk outputPositions) { outputPositions.setSize(rowSequence.intSize()); - emptiedPositions.setSize(0); if (rowSequence.isEmpty()) { return; } - removeHandler.reset(outputPositions, emptiedPositions); - probeTable(removeHandler, (ProbeContext) pc, rowSequence, true, - sources); + removeHandler.reset(outputPositions); + probeTable(removeHandler, (ProbeContext) pc, rowSequence, true, sources); removeHandler.reset(); } @Override - public void findModifications(final SafeCloseable pc, RowSequence rowSequence, ColumnSource[] sources, - WritableIntChunk outputPositions) { + public void findModifications( + @NotNull final SafeCloseable pc, + @NotNull final RowSequence rowSequence, + @NotNull final ColumnSource[] sources, + @NotNull final WritableIntChunk outputPositions) { outputPositions.setSize(rowSequence.intSize()); if (rowSequence.isEmpty()) { return; @@ -136,20 +119,23 @@ public void findModifications(final SafeCloseable pc, RowSequence rowSequence, C } @Override - protected void ensureMainState(int tableSize) { + protected void ensureMainState(final int tableSize) { mainOutputPosition.ensureCapacity(tableSize); } @Override - protected void ensureOverflowState(int newCapacity) { + protected void ensureOverflowState(final int newCapacity) { overflowOutputPosition.ensureCapacity(newCapacity); } - private abstract class AddHandler extends HashHandler.BuildHandler { + private class AddHandler extends HashHandler.BuildHandler { + MutableInt outputPosition; WritableIntChunk outputPositions; - void reset(MutableInt nextOutputPosition, WritableIntChunk outputPositions) { + void reset( + @NotNull final MutableInt nextOutputPosition, + @NotNull final WritableIntChunk outputPositions) { this.outputPosition = nextOutputPosition; this.outputPositions = outputPositions; } @@ -160,159 +146,89 @@ void reset() { } @Override - public void doMainInsert(int tableLocation, int chunkPosition) { + public void doMainInsert(final int tableLocation, final int chunkPosition) { final int nextOutputPosition = outputPosition.getAndIncrement(); outputPositions.set(chunkPosition, nextOutputPosition); mainOutputPosition.set(tableLocation, nextOutputPosition); outputPositionToHashSlot.set(nextOutputPosition, tableLocation); - rowCountSource.set(nextOutputPosition, 1L); } @Override - public void doMoveMain(int oldTableLocation, int newTableLocation) { + public void doMainFound(final int tableLocation, final int chunkPosition) { + final int outputPosition = mainOutputPosition.getUnsafe(tableLocation); + outputPositions.set(chunkPosition, outputPosition); + } + + @Override + public void doMoveMain(final int oldTableLocation, final int newTableLocation) { final int outputPosition = mainOutputPosition.getUnsafe(newTableLocation); outputPositionToHashSlot.set(outputPosition, newTableLocation); } @Override - public void doPromoteOverflow(int overflowLocation, int mainInsertLocation) { + public void doPromoteOverflow(final int overflowLocation, final int mainInsertLocation) { outputPositionToHashSlot.set(mainOutputPosition.getUnsafe(mainInsertLocation), mainInsertLocation); } @Override - public void onNextChunk(int size) { + public void onNextChunk(final int size) { outputPositionToHashSlot.ensureCapacity(outputPosition.intValue() + size); - rowCountSource.ensureCapacity(outputPosition.intValue() + size); } @Override - public void doOverflowInsert(int overflowLocation, int chunkPosition) { + public void doOverflowInsert(final int overflowLocation, final int chunkPosition) { final int nextOutputPosition = outputPosition.getAndIncrement(); outputPositions.set(chunkPosition, nextOutputPosition); overflowOutputPosition.set(overflowLocation, nextOutputPosition); outputPositionToHashSlot.set(nextOutputPosition, HashTableColumnSource.overflowLocationToHashLocation(overflowLocation)); - rowCountSource.set(nextOutputPosition, 1L); - } - } - - class AddInitialHandler extends AddHandler { - @Override - public void doMainFound(int tableLocation, int chunkPosition) { - final int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - outputPositions.set(chunkPosition, outputPosition); - - final long oldRowCount = rowCountSource.getUnsafe(outputPosition); - Assert.gtZero(oldRowCount, "oldRowCount"); - rowCountSource.set(outputPosition, oldRowCount + 1); } @Override - public void doOverflowFound(int overflowLocation, int chunkPosition) { + public void doOverflowFound(final int overflowLocation, final int chunkPosition) { final int outputPosition = overflowOutputPosition.getUnsafe(overflowLocation); outputPositions.set(chunkPosition, outputPosition); - - final long oldRowCount = rowCountSource.getUnsafe(outputPosition); - Assert.gtZero(oldRowCount, "oldRowCount"); - rowCountSource.set(outputPosition, oldRowCount + 1); - } - } - - class AddUpdateHandler extends AddHandler { - private WritableIntChunk reincarnatedPositions; - - public void reset(MutableInt nextOutputPosition, WritableIntChunk outputPositions, - WritableIntChunk reincarnatedPositions) { - super.reset(nextOutputPosition, outputPositions); - this.reincarnatedPositions = reincarnatedPositions; - } - - void reset() { - super.reset(); - reincarnatedPositions = null; - } - - @Override - public void doMainFound(int tableLocation, int chunkPosition) { - final int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - outputPositions.set(chunkPosition, outputPosition); - - final long oldRowCount = rowCountSource.getUnsafe(outputPosition); - Assert.geqZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - rowCountSource.set(outputPosition, oldRowCount + 1); - } - - @Override - public void doOverflowFound(int overflowLocation, int chunkPosition) { - final int outputPosition = overflowOutputPosition.getUnsafe(overflowLocation); - outputPositions.set(chunkPosition, outputPosition); - - final long oldRowCount = rowCountSource.getUnsafe(outputPosition); - Assert.geqZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - rowCountSource.set(outputPosition, oldRowCount + 1); } } class RemoveHandler extends HashHandler.ProbeHandler { + private WritableIntChunk outputPositions; - private WritableIntChunk emptiedPositions; - public void reset(WritableIntChunk outputPositions, WritableIntChunk emptiedPositions) { + public void reset(@NotNull final WritableIntChunk outputPositions) { this.outputPositions = outputPositions; - this.emptiedPositions = emptiedPositions; } public void reset() { this.outputPositions = null; - this.emptiedPositions = null; } @Override - public void doMainFound(int tableLocation, int chunkPosition) { + public void doMainFound(final int tableLocation, final int chunkPosition) { final int outputPosition = mainOutputPosition.getUnsafe(tableLocation); outputPositions.set(chunkPosition, outputPosition); - - // decrement the row count - final long oldRowCount = rowCountSource.getUnsafe(outputPosition); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - rowCountSource.set(outputPosition, oldRowCount - 1); } @Override - public void doOverflowFound(int overflowLocation, int chunkPosition) { + public void doOverflowFound(final int overflowLocation, final int chunkPosition) { final int outputPosition = overflowOutputPosition.getUnsafe(overflowLocation); outputPositions.set(chunkPosition, outputPosition); - - final long oldRowCount = rowCountSource.getUnsafe(outputPosition); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - rowCountSource.set(outputPosition, oldRowCount - 1); } @Override - public void onNextChunk(int size) {} + public void onNextChunk(final int size) {} @Override - public void doMissing(int chunkPosition) { + public void doMissing(final int chunkPosition) { throw new IllegalStateException(); } } class ModifyHandler extends HashHandler.ProbeHandler { + private WritableIntChunk outputPositions; - public void reset(WritableIntChunk outputPositions) { + public void reset(@NotNull final WritableIntChunk outputPositions) { this.outputPositions = outputPositions; } @@ -321,22 +237,22 @@ public void reset() { } @Override - public void doMainFound(int tableLocation, int chunkPosition) { + public void doMainFound(final int tableLocation, final int chunkPosition) { final int outputPosition = mainOutputPosition.getUnsafe(tableLocation); outputPositions.set(chunkPosition, outputPosition); } @Override - public void doOverflowFound(int overflowLocation, int chunkPosition) { + public void doOverflowFound(final int overflowLocation, final int chunkPosition) { final int outputPosition = overflowOutputPosition.getUnsafe(overflowLocation); outputPositions.set(chunkPosition, outputPosition); } @Override - public void onNextChunk(int size) {} + public void onNextChunk(final int size) {} @Override - public void doMissing(int chunkPosition) { + public void doMissing(final int chunkPosition) { throw new IllegalStateException(); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/IncrementalOperatorAggregationStateManager.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/IncrementalOperatorAggregationStateManager.java index 350c8dc1c2b..366e46214cb 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/IncrementalOperatorAggregationStateManager.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/IncrementalOperatorAggregationStateManager.java @@ -8,7 +8,6 @@ import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.util.SafeCloseable; -import org.apache.commons.lang3.mutable.MutableInt; /** * Interface for ChunkedOperatorAggregationHelper to process incremental updates. @@ -25,11 +24,9 @@ public interface IncrementalOperatorAggregationStateManager extends OperatorAggr void startTrackingPrevValues(); - void setRowSize(int outputPosition, long size); + void remove(SafeCloseable pc, RowSequence rowSequence, ColumnSource [] sources, + WritableIntChunk outputPositions); - void addForUpdate(final SafeCloseable bc, RowSequence rowSequence, ColumnSource[] sources, MutableInt nextOutputPosition, WritableIntChunk outputPositions, WritableIntChunk reincarnatedPositions); - - void remove(final SafeCloseable pc, RowSequence rowSequence, ColumnSource [] sources, WritableIntChunk outputPositions, WritableIntChunk emptiedPositions); - - void findModifications(final SafeCloseable pc, RowSequence rowSequence, ColumnSource [] sources, WritableIntChunk outputPositions); + void findModifications(SafeCloseable pc, RowSequence rowSequence, ColumnSource [] sources, + WritableIntChunk outputPositions); } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/NoopStateChangeRecorder.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/NoopStateChangeRecorder.java new file mode 100644 index 00000000000..ca0ba2a349f --- /dev/null +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/NoopStateChangeRecorder.java @@ -0,0 +1,18 @@ +package io.deephaven.engine.table.impl.by; + +import java.util.function.LongConsumer; + +/** + * Re-usable support for not recording reincarnated and emptied states in incremental aggregation processing, + * for operators that never process any removes. + */ +class NoopStateChangeRecorder implements StateChangeRecorder { + + @Override + public final void startRecording( + final LongConsumer reincarnatedDestinationCallback, + final LongConsumer emptiedDestinationCallback) {} + + @Override + public final void finishRecording() {} +} diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/OperatorAggregationStateManager.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/OperatorAggregationStateManager.java index a6853eb3973..a41ff6edcd4 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/OperatorAggregationStateManager.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/OperatorAggregationStateManager.java @@ -12,6 +12,8 @@ interface OperatorAggregationStateManager { + int maxTableSize(); + SafeCloseable makeAggregationStateBuildContext(ColumnSource[] buildSources, long maxSize); void add(final SafeCloseable bc, RowSequence rowSequence, ColumnSource[] sources, MutableInt nextOutputPosition, WritableIntChunk outputPositions); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/OperatorAggregationStateManagerOpenAddressedAlternateBase.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/OperatorAggregationStateManagerOpenAddressedAlternateBase.java index 230c95d7041..9ee4eac83b8 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/OperatorAggregationStateManagerOpenAddressedAlternateBase.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/OperatorAggregationStateManagerOpenAddressedAlternateBase.java @@ -14,32 +14,39 @@ import org.apache.commons.lang3.mutable.MutableInt; import static io.deephaven.engine.table.impl.util.TypedHasherUtil.*; -import static io.deephaven.util.SafeCloseable.closeArray; public abstract class OperatorAggregationStateManagerOpenAddressedAlternateBase implements OperatorAggregationStateManager { public static final int CHUNK_SIZE = ChunkedOperatorAggregationHelper.CHUNK_SIZE; private static final long MAX_TABLE_SIZE = 1 << 30; // maximum array size - // the number of slots in our table + /** The number of slots in our table. */ protected int tableSize; - // the number of slots in our alternate table, to start with "1" is a lie, but rehashPointer is zero; so our - // location value is positive and can be compared against rehashPointer safely + + /** + * The number of slots in our alternate table, to start with "1" is a lie, but rehashPointer is zero; so our + * location value is positive and can be compared against rehashPointer safely + */ protected int alternateTableSize = 1; - // how much of the alternate sources are necessary to rehash? + /** Should we rehash the entire table fully ({@code true}) or incrementally ({@code false})? */ + protected boolean fullRehash = true; + + /** How much of the alternate sources are necessary to rehash? */ protected int rehashPointer = 0; protected long numEntries = 0; - // the table will be rehashed to a load factor of targetLoadFactor if our loadFactor exceeds maximumLoadFactor - // or if it falls below minimum load factor we will instead contract the table + /** + * The table will be rehashed to a load factor of targetLoadFactor if our loadFactor exceeds maximumLoadFactor or if + * it falls below minimum load factor we will instead contract the table. + */ private final double maximumLoadFactor; - // the keys for our hash entries + /** The keys for our hash entries. */ protected final WritableColumnSource[] mainKeySources; - // the keys for our hash entries, for the old alternative smaller table + /** The keys for our hash entries, for the old alternative smaller table. */ protected final ColumnSource[] alternateKeySources; protected OperatorAggregationStateManagerOpenAddressedAlternateBase(ColumnSource[] tableKeySources, @@ -61,6 +68,11 @@ protected OperatorAggregationStateManagerOpenAddressedAlternateBase(ColumnSource this.maximumLoadFactor = maximumLoadFactor; } + @Override + public final int maxTableSize() { + return Math.toIntExact(MAX_TABLE_SIZE); + } + protected abstract void build(RowSequence rowSequence, Chunk[] sourceKeyChunks); public static class BuildContext extends BuildOrProbeContext { @@ -85,7 +97,6 @@ protected void buildTable( final BuildContext bc, final RowSequence buildRows, final ColumnSource[] buildSources, - final boolean fullRehash, final BuildHandler buildHandler) { try (final RowSequence.Iterator rsIt = buildRows.getRowSequenceIterator()) { // noinspection unchecked @@ -95,7 +106,7 @@ protected void buildTable( final RowSequence chunkOk = rsIt.getNextRowSequenceWithLength(bc.chunkSize); final int nextChunkSize = chunkOk.intSize(); onNextChunk(nextChunkSize); - while (doRehash(fullRehash, bc.rehashCredits, nextChunkSize)) { + while (doRehash(bc.rehashCredits, nextChunkSize)) { migrateFront(); } @@ -152,12 +163,11 @@ public interface BuildHandler { } /** - * @param fullRehash should we rehash the entire table (if false, we rehash incrementally) * @param rehashCredits the number of entries this operation has rehashed (input/output) * @param nextChunkSize the size of the chunk we are processing * @return true if a front migration is required */ - public boolean doRehash(boolean fullRehash, MutableInt rehashCredits, int nextChunkSize) { + public boolean doRehash(MutableInt rehashCredits, int nextChunkSize) { if (rehashPointer > 0) { final int requiredRehash = nextChunkSize - rehashCredits.intValue(); if (requiredRehash <= 0) { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/OperatorAggregationStateManagerOpenAddressedBase.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/OperatorAggregationStateManagerOpenAddressedBase.java index 0cb7b3bcc99..aa363f5fe02 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/OperatorAggregationStateManagerOpenAddressedBase.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/OperatorAggregationStateManagerOpenAddressedBase.java @@ -48,6 +48,11 @@ protected OperatorAggregationStateManagerOpenAddressedBase(ColumnSource[] tab this.maximumLoadFactor = maximumLoadFactor; } + @Override + public final int maxTableSize() { + return Math.toIntExact(MAX_TABLE_SIZE); + } + protected abstract void build(RowSequence rowSequence, Chunk[] sourceKeyChunks); BuildContext makeBuildContext(ColumnSource[] buildSources, long maxSize) { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/OperatorAggregationStateManagerTypedBase.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/OperatorAggregationStateManagerTypedBase.java index 211249f3944..125572d1796 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/OperatorAggregationStateManagerTypedBase.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/OperatorAggregationStateManagerTypedBase.java @@ -20,6 +20,11 @@ public abstract class OperatorAggregationStateManagerTypedBase public static final int CHUNK_SIZE = ChunkedOperatorAggregationHelper.CHUNK_SIZE; private static final long MAX_TABLE_SIZE = HashTableColumnSource.MINIMUM_OVERFLOW_HASH_SLOT; + @Override + public final int maxTableSize() { + return Math.toIntExact(MAX_TABLE_SIZE); + } + // the number of slots in our table private int tableSize; diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/SortedFirstOrLastChunkedOperator.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/SortedFirstOrLastChunkedOperator.java index d3520401a1b..daa30528726 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/SortedFirstOrLastChunkedOperator.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/SortedFirstOrLastChunkedOperator.java @@ -6,6 +6,7 @@ import io.deephaven.chunk.attributes.ChunkLengths; import io.deephaven.chunk.attributes.ChunkPositions; import io.deephaven.chunk.attributes.Values; +import io.deephaven.engine.rowset.RowSequence; import io.deephaven.engine.table.impl.SortingOrder; import io.deephaven.engine.table.Table; import io.deephaven.engine.table.MatchPair; @@ -26,7 +27,9 @@ import java.util.Map; import java.util.function.Supplier; -public class SortedFirstOrLastChunkedOperator implements IterativeChunkedAggregationOperator { +public class SortedFirstOrLastChunkedOperator + extends BasicStateChangeRecorder + implements IterativeChunkedAggregationOperator { private final ChunkType chunkType; private final boolean isFirst; private final Supplier ssaFactory; @@ -416,6 +419,9 @@ private boolean addSortedChunk(Chunk values, LongChunk indices, ssa.insert(values, indices); final long newValue = isFirst ? ssa.getFirst() : ssa.getLast(); final long oldValue = redirections.getAndSetUnsafe(destination, newValue); + if (oldValue == RowSequence.NULL_ROW_KEY && newValue != RowSequence.NULL_ROW_KEY) { + onReincarnated(destination); + } return oldValue != newValue; } @@ -432,6 +438,9 @@ private boolean removeSortedChunk(Chunk values, LongChunk indic ssa.remove(values, indices); final long newValue = isFirst ? ssa.getFirst() : ssa.getLast(); final long oldValue = redirections.getAndSetUnsafe(destination, newValue); + if (oldValue != RowSequence.NULL_ROW_KEY && newValue == RowSequence.NULL_ROW_KEY) { + onEmptied(destination); + } return oldValue != newValue; } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/StateChangeRecorder.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/StateChangeRecorder.java new file mode 100644 index 00000000000..7e55434ba74 --- /dev/null +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/StateChangeRecorder.java @@ -0,0 +1,24 @@ +package io.deephaven.engine.table.impl.by; + +import java.util.function.LongConsumer; + +/** + * Interface for recording reincarnated and emptied states in incremental aggregation processing. + */ +interface StateChangeRecorder { + + /** + * Set {@link LongConsumer callbacks} that should be used to record destinations that have transitioned from empty + * to non-empty ({@code reincarnatedDestinationCallback}) or non-empty to empty + * ({@code emptiedDestinationCallback}). + * + * @param reincarnatedDestinationCallback Consumer for destinations that have gone from empty to non-empty + * @param emptiedDestinationCallback Consumer for destinations that have gone from non-empty to empty + */ + void startRecording(LongConsumer reincarnatedDestinationCallback, LongConsumer emptiedDestinationCallback); + + /** + * Remove callbacks and stop state change recording. + */ + void finishRecording(); +} diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/StaticChunkedOperatorAggregationStateManager.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/StaticChunkedOperatorAggregationStateManager.java deleted file mode 100644 index 7f6f1f7352a..00000000000 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/StaticChunkedOperatorAggregationStateManager.java +++ /dev/null @@ -1,1414 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.engine.table.impl.by; - -import io.deephaven.base.verify.Require; -import io.deephaven.base.verify.Assert; -import io.deephaven.chunk.*; -import io.deephaven.chunk.attributes.Any; -import io.deephaven.chunk.attributes.ChunkPositions; -import io.deephaven.chunk.attributes.HashCodes; -import io.deephaven.chunk.attributes.Values; -import io.deephaven.engine.rowset.*; -import io.deephaven.engine.table.*; -import io.deephaven.engine.rowset.chunkattributes.OrderedRowKeys; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; -import io.deephaven.util.QueryConstants; -import io.deephaven.chunk.util.hashing.*; -// this is ugly to have twice, but we do need it twice for replication -// @StateChunkIdentityName@ from \QIntChunk\E -import io.deephaven.chunk.util.hashing.IntChunkEquals; -import io.deephaven.engine.table.impl.sort.permute.PermuteKernel; -import io.deephaven.engine.table.impl.sort.timsort.LongIntTimsortKernel; -import io.deephaven.engine.table.impl.sources.*; -import io.deephaven.engine.table.impl.util.*; - -// mixin rehash -import java.util.Arrays; -import io.deephaven.engine.table.impl.sort.permute.IntPermuteKernel; -// @StateChunkTypeEnum@ from \QInt\E -import io.deephaven.engine.table.impl.sort.permute.IntPermuteKernel; -import io.deephaven.engine.table.impl.util.compact.IntCompactKernel; -import io.deephaven.engine.table.impl.util.compact.LongCompactKernel; -// endmixin rehash - -import io.deephaven.util.SafeCloseableArray; -import org.jetbrains.annotations.NotNull; - -// region extra imports -import io.deephaven.engine.table.impl.HashTableAnnotations; -import io.deephaven.util.SafeCloseable; -import org.apache.commons.lang3.mutable.MutableInt; - -import java.util.Objects; -// endregion extra imports - -import static io.deephaven.util.SafeCloseable.closeArray; - -// region class visibility -public -// endregion class visibility -class StaticChunkedOperatorAggregationStateManager - // region extensions - implements OperatorAggregationStateManager - // endregion extensions -{ - // region constants - public static final int CHUNK_SIZE = ChunkedOperatorAggregationHelper.CHUNK_SIZE; - private static final int MINIMUM_INITIAL_HASH_SIZE = CHUNK_SIZE; - private static final long MAX_TABLE_SIZE = HashTableColumnSource.MINIMUM_OVERFLOW_HASH_SLOT; - // endregion constants - - // mixin rehash - static final double DEFAULT_MAX_LOAD_FACTOR = 0.75; - static final double DEFAULT_TARGET_LOAD_FACTOR = 0.70; - // endmixin rehash - - // region preamble variables - // endregion preamble variables - - @HashTableAnnotations.EmptyStateValue - // @NullStateValue@ from \QQueryConstants.NULL_INT\E, @StateValueType@ from \Qint\E - private static final int EMPTY_RIGHT_VALUE = QueryConstants.NULL_INT; - - // mixin getStateValue - // region overflow pivot - // endregion overflow pivot - // endmixin getStateValue - - // the number of slots in our table - // mixin rehash - private int tableSize; - // endmixin rehash - // altmixin rehash: private final int tableSize; - - // how many key columns we have - private final int keyColumnCount; - - // mixin rehash - private long numEntries = 0; - - /** Our table size must be 2^L (i.e. a power of two); and the pivot is between 2^(L-1) and 2^L. - * - *

    When hashing a value, if hashCode % 2^L < tableHashPivot; then the destination location is hashCode % 2^L. - * If hashCode % 2^L >= tableHashPivot, then the destination location is hashCode % 2^(L-1). Once the pivot reaches - * the table size, we can simply double the table size and repeat the process.

    - * - *

    This has the effect of only using hash table locations < hashTablePivot. When we want to expand the table - * we can move some of the entries from the location {@code tableHashPivot - 2^(L-1)} to tableHashPivot. This - * provides for incremental expansion of the hash table, without the need for a full rehash.

    - */ - private int tableHashPivot; - - // the table will be rehashed to a load factor of targetLoadFactor if our loadFactor exceeds maximumLoadFactor - // or if it falls below minimum load factor we will instead contract the table - private double targetLoadFactor = DEFAULT_TARGET_LOAD_FACTOR; - private double maximumLoadFactor = DEFAULT_MAX_LOAD_FACTOR; - // TODO: We do not yet support contraction - // private final double minimumLoadFactor = 0.5; - - private final IntegerArraySource freeOverflowLocations = new IntegerArraySource(); - private int freeOverflowCount = 0; - // endmixin rehash - - // the keys for our hash entries - private final ArrayBackedColumnSource[] keySources; - // the location of any overflow entry in this bucket - private final IntegerArraySource overflowLocationSource = new IntegerArraySource(); - - // we are going to also reuse this for our state entry, so that we do not need additional storage - @HashTableAnnotations.StateColumnSource - // @StateColumnSourceType@ from \QIntegerArraySource\E - private final IntegerArraySource stateSource - // @StateColumnSourceConstructor@ from \QIntegerArraySource()\E - = new IntegerArraySource(); - - // the keys for overflow - private int nextOverflowLocation = 0; - private final ArrayBackedColumnSource [] overflowKeySources; - // the location of the next key in an overflow bucket - private final IntegerArraySource overflowOverflowLocationSource = new IntegerArraySource(); - // the overflow buckets for the state source - @HashTableAnnotations.OverflowStateColumnSource - // @StateColumnSourceType@ from \QIntegerArraySource\E - private final IntegerArraySource overflowStateSource - // @StateColumnSourceConstructor@ from \QIntegerArraySource()\E - = new IntegerArraySource(); - - // the type of each of our key chunks - private final ChunkType[] keyChunkTypes; - - // the operators for hashing and various equality methods - private final ChunkHasher[] chunkHashers; - private final ChunkEquals[] chunkEquals; - private final PermuteKernel[] chunkCopiers; - - // mixin rehash - // If we have objects in our key columns, then we should null them out if we delete an overflow row, this only - // applies to ObjectArraySources, for primitives we are content to leave the dead entries in the tables, because - // they will not affect GC. - private final ObjectArraySource[] overflowKeyColumnsToNull; - // endmixin rehash - - // region extra variables - private final IntegerArraySource outputPositionToHashSlot = new IntegerArraySource(); - // endregion extra variables - - // region constructor visibility - // endregion constructor visibility - StaticChunkedOperatorAggregationStateManager(ColumnSource[] tableKeySources - , int tableSize - // region constructor arguments - , double maximumLoadFactor - , double targetLoadFactor - // endregion constructor arguments - ) { - // region super - // endregion super - keyColumnCount = tableKeySources.length; - - this.tableSize = tableSize; - Require.leq(tableSize, "tableSize", MAX_TABLE_SIZE); - Require.gtZero(tableSize, "tableSize"); - Require.eq(Integer.bitCount(tableSize), "Integer.bitCount(tableSize)", 1); - // mixin rehash - this.tableHashPivot = tableSize; - // endmixin rehash - - overflowKeySources = new ArrayBackedColumnSource[keyColumnCount]; - keySources = new ArrayBackedColumnSource[keyColumnCount]; - - keyChunkTypes = new ChunkType[keyColumnCount]; - chunkHashers = new ChunkHasher[keyColumnCount]; - chunkEquals = new ChunkEquals[keyColumnCount]; - chunkCopiers = new PermuteKernel[keyColumnCount]; - - for (int ii = 0; ii < keyColumnCount; ++ii) { - // the sources that we will use to store our hash table - keySources[ii] = ArrayBackedColumnSource.getMemoryColumnSource(tableSize, tableKeySources[ii].getType()); - keyChunkTypes[ii] = tableKeySources[ii].getChunkType(); - - overflowKeySources[ii] = ArrayBackedColumnSource.getMemoryColumnSource(CHUNK_SIZE, tableKeySources[ii].getType()); - - chunkHashers[ii] = ChunkHasher.makeHasher(keyChunkTypes[ii]); - chunkEquals[ii] = ChunkEquals.makeEqual(keyChunkTypes[ii]); - chunkCopiers[ii] = PermuteKernel.makePermuteKernel(keyChunkTypes[ii]); - } - - // mixin rehash - overflowKeyColumnsToNull = Arrays.stream(overflowKeySources).filter(x -> x instanceof ObjectArraySource).map(x -> (ObjectArraySource)x).toArray(ObjectArraySource[]::new); - // endmixin rehash - - // region constructor - this.maximumLoadFactor = maximumLoadFactor; - this.targetLoadFactor = targetLoadFactor; - // endregion constructor - - ensureCapacity(tableSize); - } - - private void ensureCapacity(int tableSize) { - stateSource.ensureCapacity(tableSize); - overflowLocationSource.ensureCapacity(tableSize); - for (int ii = 0; ii < keyColumnCount; ++ii) { - keySources[ii].ensureCapacity(tableSize); - } - // region ensureCapacity - // endregion ensureCapacity - } - - private void ensureOverflowCapacity(WritableIntChunk chunkPositionsToInsertInOverflow) { - final int locationsToAllocate = chunkPositionsToInsertInOverflow.size(); - // mixin rehash - if (freeOverflowCount >= locationsToAllocate) { - return; - } - final int newCapacity = nextOverflowLocation + locationsToAllocate - freeOverflowCount; - // endmixin rehash - // altmixin rehash: final int newCapacity = nextOverflowLocation + locationsToAllocate; - overflowOverflowLocationSource.ensureCapacity(newCapacity); - overflowStateSource.ensureCapacity(newCapacity); - //noinspection ForLoopReplaceableByForEach - for (int ii = 0; ii < overflowKeySources.length; ++ii) { - overflowKeySources[ii].ensureCapacity(newCapacity); - } - // region ensureOverflowCapacity - // endregion ensureOverflowCapacity - } - - // region build wrappers - - @Override - public void add(final SafeCloseable bc, RowSequence rowSequence, ColumnSource[] sources, MutableInt nextOutputPosition, WritableIntChunk outputPositions) { - if (rowSequence.isEmpty()) { - return; - } - buildTable((BuildContext)bc, rowSequence, sources, nextOutputPosition, outputPositions); - } - - @Override - public SafeCloseable makeAggregationStateBuildContext(ColumnSource[] buildSources, long maxSize) { - return makeBuildContext(buildSources, maxSize); - } - - // endregion build wrappers - - class BuildContext implements Context { - final int chunkSize; - - final LongIntTimsortKernel.LongIntSortKernelContext sortContext; - final ColumnSource.FillContext stateSourceFillContext; - // mixin rehash - final ColumnSource.FillContext overflowStateSourceFillContext; - // endmixin rehash - final ColumnSource.FillContext overflowFillContext; - final ColumnSource.FillContext overflowOverflowFillContext; - - // the chunk of hashcodes - final WritableIntChunk hashChunk; - // the chunk of positions within our table - final WritableLongChunk tableLocationsChunk; - - final ResettableWritableChunk[] writeThroughChunks = getResettableWritableKeyChunks(); - final WritableIntChunk sourcePositions; - final WritableIntChunk destinationLocationPositionInWriteThrough; - - final WritableBooleanChunk filledValues; - final WritableBooleanChunk equalValues; - - // the overflow locations that we need to get from the overflowLocationSource (or overflowOverflowLocationSource) - final WritableLongChunk overflowLocationsToFetch; - // the overflow position in the working key chunks, parallel to the overflowLocationsToFetch - final WritableIntChunk overflowPositionInSourceChunk; - - // the position with our hash table that we should insert a value into - final WritableLongChunk insertTableLocations; - // the position in our chunk, parallel to the workingChunkInsertTablePositions - final WritableIntChunk insertPositionsInSourceChunk; - - // we sometimes need to check two positions within a single chunk for equality, this contains those positions as pairs - final WritableIntChunk chunkPositionsToCheckForEquality; - // While processing overflow insertions, parallel to the chunkPositions to check for equality, the overflow location that - // is represented by the first of the pairs in chunkPositionsToCheckForEquality - final WritableLongChunk overflowLocationForEqualityCheck; - - // the chunk of state values that we read from the hash table - // @WritableStateChunkType@ from \QWritableIntChunk\E - final WritableIntChunk workingStateEntries; - - // the chunks for getting key values from the hash table - final WritableChunk[] workingKeyChunks; - final WritableChunk[] overflowKeyChunks; - - // when fetching from the overflow, we record which chunk position we are fetching for - final WritableIntChunk chunkPositionsForFetches; - // which positions in the chunk we are inserting into the overflow - final WritableIntChunk chunkPositionsToInsertInOverflow; - // which table locations we are inserting into the overflow - final WritableLongChunk tableLocationsToInsertInOverflow; - - // values we have read from the overflow locations sources - final WritableIntChunk overflowLocations; - - // mixin rehash - final WritableLongChunk rehashLocations; - final WritableIntChunk overflowLocationsToMigrate; - final WritableLongChunk overflowLocationsAsKeyIndices; - final WritableBooleanChunk shouldMoveBucket; - - final ResettableWritableLongChunk overflowLocationForPromotionLoop = ResettableWritableLongChunk.makeResettableChunk(); - - // mixin allowUpdateWriteThroughState - // @WritableStateChunkType@ from \QWritableIntChunk\E, @WritableStateChunkName@ from \QWritableIntChunk\E - final ResettableWritableIntChunk writeThroughState = ResettableWritableIntChunk.makeResettableChunk(); - // endmixin allowUpdateWriteThroughState - final ResettableWritableIntChunk writeThroughOverflowLocations = ResettableWritableIntChunk.makeResettableChunk(); - // endmixin rehash - - final SharedContext sharedFillContext; - final ColumnSource.FillContext[] workingFillContexts; - final SharedContext sharedOverflowContext; - final ColumnSource.FillContext[] overflowContexts; - final SharedContext sharedBuildContext; - final ChunkSource.GetContext[] buildContexts; - - // region build context - final WritableIntChunk duplicatePositions; - final WritableLongChunk addedSlotsByPosition; - // endregion build context - - final boolean haveSharedContexts; - - private BuildContext(ColumnSource[] buildSources, - int chunkSize - // region build context constructor args - // endregion build context constructor args - ) { - Assert.gtZero(chunkSize, "chunkSize"); - this.chunkSize = chunkSize; - haveSharedContexts = buildSources.length > 1; - if (haveSharedContexts) { - sharedFillContext = SharedContext.makeSharedContext(); - sharedOverflowContext = SharedContext.makeSharedContext(); - sharedBuildContext = SharedContext.makeSharedContext(); - } else { - // no point in the additional work implied by these not being null. - sharedFillContext = null; - sharedOverflowContext = null; - sharedBuildContext = null; - } - workingFillContexts = makeFillContexts(keySources, sharedFillContext, chunkSize); - overflowContexts = makeFillContexts(overflowKeySources, sharedOverflowContext, chunkSize); - buildContexts = makeGetContexts(buildSources, sharedBuildContext, chunkSize); - // region build context constructor - duplicatePositions = WritableIntChunk.makeWritableChunk(chunkSize * 2); - addedSlotsByPosition = WritableLongChunk.makeWritableChunk(chunkSize); - // endregion build context constructor - sortContext = LongIntTimsortKernel.createContext(chunkSize); - stateSourceFillContext = stateSource.makeFillContext(chunkSize); - overflowFillContext = overflowLocationSource.makeFillContext(chunkSize); - overflowOverflowFillContext = overflowOverflowLocationSource.makeFillContext(chunkSize); - hashChunk = WritableIntChunk.makeWritableChunk(chunkSize); - tableLocationsChunk = WritableLongChunk.makeWritableChunk(chunkSize); - sourcePositions = WritableIntChunk.makeWritableChunk(chunkSize); - destinationLocationPositionInWriteThrough = WritableIntChunk.makeWritableChunk(chunkSize); - filledValues = WritableBooleanChunk.makeWritableChunk(chunkSize); - equalValues = WritableBooleanChunk.makeWritableChunk(chunkSize); - overflowLocationsToFetch = WritableLongChunk.makeWritableChunk(chunkSize); - overflowPositionInSourceChunk = WritableIntChunk.makeWritableChunk(chunkSize); - insertTableLocations = WritableLongChunk.makeWritableChunk(chunkSize); - insertPositionsInSourceChunk = WritableIntChunk.makeWritableChunk(chunkSize); - chunkPositionsToCheckForEquality = WritableIntChunk.makeWritableChunk(chunkSize * 2); - overflowLocationForEqualityCheck = WritableLongChunk.makeWritableChunk(chunkSize); - // @WritableStateChunkName@ from \QWritableIntChunk\E - workingStateEntries = WritableIntChunk.makeWritableChunk(chunkSize); - workingKeyChunks = getWritableKeyChunks(chunkSize); - overflowKeyChunks = getWritableKeyChunks(chunkSize); - chunkPositionsForFetches = WritableIntChunk.makeWritableChunk(chunkSize); - chunkPositionsToInsertInOverflow = WritableIntChunk.makeWritableChunk(chunkSize); - tableLocationsToInsertInOverflow = WritableLongChunk.makeWritableChunk(chunkSize); - overflowLocations = WritableIntChunk.makeWritableChunk(chunkSize); - // mixin rehash - rehashLocations = WritableLongChunk.makeWritableChunk(chunkSize); - overflowStateSourceFillContext = overflowStateSource.makeFillContext(chunkSize); - overflowLocationsToMigrate = WritableIntChunk.makeWritableChunk(chunkSize); - overflowLocationsAsKeyIndices = WritableLongChunk.makeWritableChunk(chunkSize); - shouldMoveBucket = WritableBooleanChunk.makeWritableChunk(chunkSize); - // endmixin rehash - } - - private void resetSharedContexts() { - if (!haveSharedContexts) { - return; - } - sharedFillContext.reset(); - sharedOverflowContext.reset(); - sharedBuildContext.reset(); - } - - private void closeSharedContexts() { - if (!haveSharedContexts) { - return; - } - sharedFillContext.close(); - sharedOverflowContext.close(); - sharedBuildContext.close(); - } - - @Override - public void close() { - sortContext.close(); - stateSourceFillContext.close(); - // mixin rehash - overflowStateSourceFillContext.close(); - // endmixin rehash - overflowFillContext.close(); - overflowOverflowFillContext.close(); - closeArray(workingFillContexts); - closeArray(overflowContexts); - closeArray(buildContexts); - - hashChunk.close(); - tableLocationsChunk.close(); - closeArray(writeThroughChunks); - - sourcePositions.close(); - destinationLocationPositionInWriteThrough.close(); - filledValues.close(); - equalValues.close(); - overflowLocationsToFetch.close(); - overflowPositionInSourceChunk.close(); - insertTableLocations.close(); - insertPositionsInSourceChunk.close(); - chunkPositionsToCheckForEquality.close(); - overflowLocationForEqualityCheck.close(); - workingStateEntries.close(); - closeArray(workingKeyChunks); - closeArray(overflowKeyChunks); - chunkPositionsForFetches.close(); - chunkPositionsToInsertInOverflow.close(); - tableLocationsToInsertInOverflow.close(); - overflowLocations.close(); - // mixin rehash - rehashLocations.close(); - overflowLocationsToMigrate.close(); - overflowLocationsAsKeyIndices.close(); - shouldMoveBucket.close(); - overflowLocationForPromotionLoop.close(); - // mixin allowUpdateWriteThroughState - writeThroughState.close(); - // endmixin allowUpdateWriteThroughState - writeThroughOverflowLocations.close(); - // endmixin rehash - // region build context close - duplicatePositions.close(); - addedSlotsByPosition.close(); - // endregion build context close - closeSharedContexts(); - } - - } - - public BuildContext makeBuildContext(ColumnSource[] buildSources, - long maxSize - // region makeBuildContext args - // endregion makeBuildContext args - ) { - return new BuildContext(buildSources, (int)Math.min(CHUNK_SIZE, maxSize) - // region makeBuildContext arg pass - // endregion makeBuildContext arg pass - ); - } - - private void buildTable(final BuildContext bc, - final RowSequence buildIndex, - ColumnSource[] buildSources - // region extra build arguments - , final MutableInt outputPosition - , final WritableIntChunk outputPositions - // endregion extra build arguments - ) { - long hashSlotOffset = 0; - // region build start - outputPositions.setSize(buildIndex.intSize()); - int maxAddedPosition = -1; - bc.addedSlotsByPosition.setSize(outputPositions.size()); - bc.addedSlotsByPosition.fillWithValue(0, bc.addedSlotsByPosition.size(), RowSequence.NULL_ROW_KEY); - bc.duplicatePositions.setSize(0); - // endregion build start - - try (final RowSequence.Iterator rsIt = buildIndex.getRowSequenceIterator(); - // region build initialization try - // endregion build initialization try - ) { - // region build initialization - // endregion build initialization - - // chunks to write through to the table key sources - - - //noinspection unchecked - final Chunk [] sourceKeyChunks = new Chunk[buildSources.length]; - - while (rsIt.hasMore()) { - // we reset early to avoid carrying around state for old RowSequence which can't be reused. - bc.resetSharedContexts(); - - final RowSequence chunkOk = rsIt.getNextRowSequenceWithLength(bc.chunkSize); - - getKeyChunks(buildSources, bc.buildContexts, sourceKeyChunks, chunkOk); - hashKeyChunks(bc.hashChunk, sourceKeyChunks); - - // region build loop initialization - // endregion build loop initialization - - // turn hash codes into indices within our table - convertHashToTableLocations(bc.hashChunk, bc.tableLocationsChunk); - - // now fetch the values from the table, note that we do not order these fetches - fillKeys(bc.workingFillContexts, bc.workingKeyChunks, bc.tableLocationsChunk); - - // and the corresponding states, if a value is null, we've found our insertion point - stateSource.fillChunkUnordered(bc.stateSourceFillContext, bc.workingStateEntries, bc.tableLocationsChunk); - - // find things that exist - // @StateChunkIdentityName@ from \QIntChunk\E - IntChunkEquals.notEqual(bc.workingStateEntries, EMPTY_RIGHT_VALUE, bc.filledValues); - - // to be equal, the location must exist; and each of the keyChunks must match - bc.equalValues.setSize(bc.filledValues.size()); - bc.equalValues.copyFromChunk(bc.filledValues, 0, 0, bc.filledValues.size()); - checkKeyEquality(bc.equalValues, bc.workingKeyChunks, sourceKeyChunks); - - bc.overflowPositionInSourceChunk.setSize(0); - bc.overflowLocationsToFetch.setSize(0); - bc.insertPositionsInSourceChunk.setSize(0); - bc.insertTableLocations.setSize(0); - - for (int ii = 0; ii < bc.equalValues.size(); ++ii) { - final long tableLocation = bc.tableLocationsChunk.get(ii); - if (bc.equalValues.get(ii)) { - // region build found main - outputPositions.set(ii, bc.workingStateEntries.get(ii)); - // endregion build found main - } else if (bc.filledValues.get(ii)) { - // we must handle this as part of the overflow bucket - bc.overflowPositionInSourceChunk.add(ii); - bc.overflowLocationsToFetch.add(tableLocation); - } else { - // for the values that are empty, we record them in the insert chunks - bc.insertPositionsInSourceChunk.add(ii); - bc.insertTableLocations.add(tableLocation); - } - } - - // we first sort by position; so that we'll not insert things into the table twice or overwrite - // collisions - LongIntTimsortKernel.sort(bc.sortContext, bc.insertPositionsInSourceChunk, bc.insertTableLocations); - - // the first and last valid table location in our writeThroughChunks - long firstBackingChunkLocation = -1; - long lastBackingChunkLocation = -1; - - bc.chunkPositionsToCheckForEquality.setSize(0); - bc.destinationLocationPositionInWriteThrough.setSize(0); - bc.sourcePositions.setSize(0); - - for (int ii = 0; ii < bc.insertPositionsInSourceChunk.size(); ) { - final int firstChunkPositionForHashLocation = bc.insertPositionsInSourceChunk.get(ii); - final long currentHashLocation = bc.insertTableLocations.get(ii); - - // region main insert - stateSource.set(currentHashLocation, chunkPositionToPendingState(firstChunkPositionForHashLocation)); - bc.addedSlotsByPosition.set(firstChunkPositionForHashLocation, currentHashLocation); - maxAddedPosition = Math.max(maxAddedPosition, firstChunkPositionForHashLocation); - // endregion main insert - // mixin rehash - numEntries++; - // endmixin rehash - - if (currentHashLocation > lastBackingChunkLocation) { - flushWriteThrough(bc.sourcePositions, sourceKeyChunks, bc.destinationLocationPositionInWriteThrough, bc.writeThroughChunks); - firstBackingChunkLocation = updateWriteThroughChunks(bc.writeThroughChunks, currentHashLocation, keySources); - lastBackingChunkLocation = firstBackingChunkLocation + bc.writeThroughChunks[0].size() - 1; - } - - bc.sourcePositions.add(firstChunkPositionForHashLocation); - bc.destinationLocationPositionInWriteThrough.add((int)(currentHashLocation - firstBackingChunkLocation)); - - final int currentHashValue = bc.hashChunk.get(firstChunkPositionForHashLocation); - - while (++ii < bc.insertTableLocations.size() && bc.insertTableLocations.get(ii) == currentHashLocation) { - // if this thing is equal to the first one; we should mark the appropriate slot, we don't - // know the types and don't want to make the virtual calls, so we need to just accumulate - // the things to check for equality afterwards - final int chunkPosition = bc.insertPositionsInSourceChunk.get(ii); - if (bc.hashChunk.get(chunkPosition) != currentHashValue) { - // we must be an overflow - bc.overflowPositionInSourceChunk.add(chunkPosition); - bc.overflowLocationsToFetch.add(currentHashLocation); - } else { - // we need to check equality, equal things are the same slot; unequal things are overflow - bc.chunkPositionsToCheckForEquality.add(firstChunkPositionForHashLocation); - bc.chunkPositionsToCheckForEquality.add(chunkPosition); - } - } - } - - flushWriteThrough(bc.sourcePositions, sourceKeyChunks, bc.destinationLocationPositionInWriteThrough, bc.writeThroughChunks); - - checkPairEquality(bc.chunkPositionsToCheckForEquality, sourceKeyChunks, bc.equalValues); - - for (int ii = 0; ii < bc.equalValues.size(); ii++) { - final int chunkPosition = bc.chunkPositionsToCheckForEquality.get(ii * 2 + 1); - final long tableLocation = bc.tableLocationsChunk.get(chunkPosition); - - if (bc.equalValues.get(ii)) { - // region build main duplicate - bc.duplicatePositions.add(chunkPosition); - bc.duplicatePositions.add(bc.chunkPositionsToCheckForEquality.get(ii * 2)); - // endregion build main duplicate - } else { - // we are an overflow element - bc.overflowPositionInSourceChunk.add(chunkPosition); - bc.overflowLocationsToFetch.add(tableLocation); - } - } - - // now handle overflow - if (bc.overflowPositionInSourceChunk.size() > 0) { - // on the first pass we fill from the table's locations - overflowLocationSource.fillChunkUnordered(bc.overflowFillContext, bc.overflowLocations, bc.overflowLocationsToFetch); - bc.chunkPositionsToInsertInOverflow.setSize(0); - bc.tableLocationsToInsertInOverflow.setSize(0); - - // overflow slots now contains the positions in the overflow columns - - while (bc.overflowPositionInSourceChunk.size() > 0) { - // now we have the overflow slot for each of the things we are interested in. - // if the slot is null, then we can insert it and we are complete. - - bc.overflowLocationsToFetch.setSize(0); - bc.chunkPositionsForFetches.setSize(0); - - // TODO: Crunch it down - for (int ii = 0; ii < bc.overflowLocations.size(); ++ii) { - final int overflowLocation = bc.overflowLocations.get(ii); - final int chunkPosition = bc.overflowPositionInSourceChunk.get(ii); - if (overflowLocation == QueryConstants.NULL_INT) { - // insert me into overflow in the next free overflow slot - bc.chunkPositionsToInsertInOverflow.add(chunkPosition); - bc.tableLocationsToInsertInOverflow.add(bc.tableLocationsChunk.get(chunkPosition)); - } else { - // add to the key positions to fetch - bc.chunkPositionsForFetches.add(chunkPosition); - bc.overflowLocationsToFetch.add(overflowLocation); - } - } - - // if the slot is non-null, then we need to fetch the overflow values for comparison - fillOverflowKeys(bc.overflowContexts, bc.overflowKeyChunks, bc.overflowLocationsToFetch); - - // now compare the value in our overflowKeyChunk to the value in the sourceChunk - checkLhsPermutedEquality(bc.chunkPositionsForFetches, sourceKeyChunks, bc.overflowKeyChunks, bc.equalValues); - - int writePosition = 0; - for (int ii = 0; ii < bc.equalValues.size(); ++ii) { - final int chunkPosition = bc.chunkPositionsForFetches.get(ii); - final long overflowLocation = bc.overflowLocationsToFetch.get(ii); - if (bc.equalValues.get(ii)) { - // region build overflow found - final int position = overflowStateSource.getUnsafe(overflowLocation); - outputPositions.set(chunkPosition, position); - // endregion build overflow found - } else { - // otherwise, we need to repeat the overflow calculation, with our next overflow fetch - bc.overflowLocationsToFetch.set(writePosition, overflowLocation); - bc.overflowPositionInSourceChunk.set(writePosition++, chunkPosition); - } - } - bc.overflowLocationsToFetch.setSize(writePosition); - bc.overflowPositionInSourceChunk.setSize(writePosition); - - // on subsequent iterations, we are following the overflow chains, so we fill from the overflowOverflowLocationSource - if (bc.overflowPositionInSourceChunk.size() > 0) { - overflowOverflowLocationSource.fillChunkUnordered(bc.overflowOverflowFillContext, bc.overflowLocations, bc.overflowLocationsToFetch); - } - } - - // make sure we actually have enough room to insert stuff where we would like - ensureOverflowCapacity(bc.chunkPositionsToInsertInOverflow); - - firstBackingChunkLocation = -1; - lastBackingChunkLocation = -1; - bc.destinationLocationPositionInWriteThrough.setSize(0); - bc.sourcePositions.setSize(0); - - // do the overflow insertions, one per table position at a time; until we have no insertions left - while (bc.chunkPositionsToInsertInOverflow.size() > 0) { - // sort by table position - LongIntTimsortKernel.sort(bc.sortContext, bc.chunkPositionsToInsertInOverflow, bc.tableLocationsToInsertInOverflow); - - bc.chunkPositionsToCheckForEquality.setSize(0); - bc.overflowLocationForEqualityCheck.setSize(0); - - for (int ii = 0; ii < bc.chunkPositionsToInsertInOverflow.size(); ) { - final long tableLocation = bc.tableLocationsToInsertInOverflow.get(ii); - final int chunkPosition = bc.chunkPositionsToInsertInOverflow.get(ii); - - final int allocatedOverflowLocation = allocateOverflowLocation(); - - // we are inserting into the head of the list, so we move the existing overflow into our overflow - overflowOverflowLocationSource.set(allocatedOverflowLocation, overflowLocationSource.getUnsafe(tableLocation)); - // and we point the overflow at our slot - overflowLocationSource.set(tableLocation, allocatedOverflowLocation); - - // region build overflow insert - overflowStateSource.set(allocatedOverflowLocation, chunkPositionToPendingState(chunkPosition)); - bc.addedSlotsByPosition.set(chunkPosition, overflowLocationToHashLocation(allocatedOverflowLocation)); - maxAddedPosition = Math.max(maxAddedPosition, chunkPosition); - // endregion build overflow insert - - // mixin rehash - numEntries++; - // endmixin rehash - - // get the backing chunk from the overflow keys - if (allocatedOverflowLocation > lastBackingChunkLocation || allocatedOverflowLocation < firstBackingChunkLocation) { - flushWriteThrough(bc.sourcePositions, sourceKeyChunks, bc.destinationLocationPositionInWriteThrough, bc.writeThroughChunks); - firstBackingChunkLocation = updateWriteThroughChunks(bc.writeThroughChunks, allocatedOverflowLocation, overflowKeySources); - lastBackingChunkLocation = firstBackingChunkLocation + bc.writeThroughChunks[0].size() - 1; - } - - // now we must set all of our key values in the overflow - bc.sourcePositions.add(chunkPosition); - bc.destinationLocationPositionInWriteThrough.add((int)(allocatedOverflowLocation - firstBackingChunkLocation)); - - while (++ii < bc.tableLocationsToInsertInOverflow.size() && bc.tableLocationsToInsertInOverflow.get(ii) == tableLocation) { - bc.overflowLocationForEqualityCheck.add(allocatedOverflowLocation); - bc.chunkPositionsToCheckForEquality.add(chunkPosition); - bc.chunkPositionsToCheckForEquality.add(bc.chunkPositionsToInsertInOverflow.get(ii)); - } - } - - // now we need to do the equality check; so that we can mark things appropriately - int remainingInserts = 0; - - checkPairEquality(bc.chunkPositionsToCheckForEquality, sourceKeyChunks, bc.equalValues); - for (int ii = 0; ii < bc.equalValues.size(); ii++) { - final int chunkPosition = bc.chunkPositionsToCheckForEquality.get(ii * 2 + 1); - final long tableLocation = bc.tableLocationsChunk.get(chunkPosition); - - if (bc.equalValues.get(ii)) { - final long insertedOverflowLocation = bc.overflowLocationForEqualityCheck.get(ii); - // region build overflow duplicate - bc.duplicatePositions.add(chunkPosition); - bc.duplicatePositions.add(bc.chunkPositionsToCheckForEquality.get(ii * 2)); - // endregion build overflow duplicate - } else { - // we need to try this element again in the next round - bc.chunkPositionsToInsertInOverflow.set(remainingInserts, chunkPosition); - bc.tableLocationsToInsertInOverflow.set(remainingInserts++, tableLocation); - } - } - - bc.chunkPositionsToInsertInOverflow.setSize(remainingInserts); - bc.tableLocationsToInsertInOverflow.setSize(remainingInserts); - } - flushWriteThrough(bc.sourcePositions, sourceKeyChunks, bc.destinationLocationPositionInWriteThrough, bc.writeThroughChunks); - // mixin rehash - // region post-build rehash - doRehash(bc); - // endregion post-build rehash - // endmixin rehash - } - - // region copy hash slots - outputPositionToHashSlot.ensureCapacity(outputPosition.intValue() + maxAddedPosition + 1); - for (int ii = 0; ii <= maxAddedPosition; ++ii) { - final long longSlot = bc.addedSlotsByPosition.get(ii); - if (longSlot != RowSequence.NULL_ROW_KEY) { - final int intSlot = (int) longSlot; - - outputPositions.set(ii, outputPosition.intValue()); - if (isOverflowLocation(intSlot)) { - overflowStateSource.set(hashLocationToOverflowLocation(intSlot), outputPosition.intValue()); - } else { - stateSource.set(intSlot, outputPosition.intValue()); - } - - outputPositionToHashSlot.set(outputPosition.intValue(), intSlot); - outputPosition.increment(); - } - } - - for (int ii = 0; ii < bc.duplicatePositions.size(); ii += 2) { - outputPositions.set(bc.duplicatePositions.get(ii), outputPositions.get(bc.duplicatePositions.get(ii + 1))); - } - // endregion copy hash slots - hashSlotOffset += chunkOk.size(); - } - // region post build loop - // endregion post build loop - } - } - - // mixin rehash - public void doRehash(BuildContext bc - // region extra rehash arguments - // endregion extra rehash arguments - ) { - long firstBackingChunkLocation; - long lastBackingChunkLocation;// mixin rehash - // region rehash start - // endregion rehash start - while (rehashRequired()) { - // region rehash loop start - // endregion rehash loop start - if (tableHashPivot == tableSize) { - tableSize *= 2; - ensureCapacity(tableSize); - // region rehash ensure capacity - // endregion rehash ensure capacity - } - - final long targetBuckets = Math.min(MAX_TABLE_SIZE, (long)(numEntries / targetLoadFactor)); - final int bucketsToAdd = Math.max(1, (int)Math.min(Math.min(targetBuckets, tableSize) - tableHashPivot, bc.chunkSize)); - - initializeRehashLocations(bc.rehashLocations, bucketsToAdd); - - // fill the overflow bucket locations - overflowLocationSource.fillChunk(bc.overflowFillContext, bc.overflowLocations, RowSequenceFactory.wrapRowKeysChunkAsRowSequence(LongChunk.downcast(bc.rehashLocations))); - // null out the overflow locations in the table - setOverflowLocationsToNull(tableHashPivot - (tableSize >> 1), bucketsToAdd); - - while (bc.overflowLocations.size() > 0) { - // figure out which table location each overflow location maps to - compactOverflowLocations(bc.overflowLocations, bc.overflowLocationsToFetch); - if (bc.overflowLocationsToFetch.size() == 0) { - break; - } - - fillOverflowKeys(bc.overflowContexts, bc.workingKeyChunks, bc.overflowLocationsToFetch); - hashKeyChunks(bc.hashChunk, bc.workingKeyChunks); - convertHashToTableLocations(bc.hashChunk, bc.tableLocationsChunk, tableHashPivot + bucketsToAdd); - - // read the next chunk of overflow locations, which we will be overwriting in the next step - overflowOverflowLocationSource.fillChunkUnordered(bc.overflowOverflowFillContext, bc.overflowLocations, bc.overflowLocationsToFetch); - - // swap the table's overflow pointer with our location - swapOverflowPointers(bc.tableLocationsChunk, bc.overflowLocationsToFetch); - } - - // now rehash the main entries - - stateSource.fillChunkUnordered(bc.stateSourceFillContext, bc.workingStateEntries, bc.rehashLocations); - // @StateChunkIdentityName@ from \QIntChunk\E - IntChunkEquals.notEqual(bc.workingStateEntries, EMPTY_RIGHT_VALUE, bc.shouldMoveBucket); - - // crush down things that don't exist - LongCompactKernel.compact(bc.rehashLocations, bc.shouldMoveBucket); - - // get the keys from the table - fillKeys(bc.workingFillContexts, bc.workingKeyChunks, bc.rehashLocations); - hashKeyChunks(bc.hashChunk, bc.workingKeyChunks); - convertHashToTableLocations(bc.hashChunk, bc.tableLocationsChunk, tableHashPivot + bucketsToAdd); - - // figure out which ones must move - LongChunkEquals.notEqual(bc.tableLocationsChunk, bc.rehashLocations, bc.shouldMoveBucket); - - firstBackingChunkLocation = -1; - lastBackingChunkLocation = -1; - // flushWriteThrough will have zero-ed out the sourcePositions and destinationLocationPositionInWriteThrough size - - int moves = 0; - for (int ii = 0; ii < bc.shouldMoveBucket.size(); ++ii) { - if (bc.shouldMoveBucket.get(ii)) { - moves++; - final long newHashLocation = bc.tableLocationsChunk.get(ii); - final long oldHashLocation = bc.rehashLocations.get(ii); - - if (newHashLocation > lastBackingChunkLocation) { - flushWriteThrough(bc.sourcePositions, bc.workingKeyChunks, bc.destinationLocationPositionInWriteThrough, bc.writeThroughChunks); - firstBackingChunkLocation = updateWriteThroughChunks(bc.writeThroughChunks, newHashLocation, keySources); - lastBackingChunkLocation = firstBackingChunkLocation + bc.writeThroughChunks[0].size() - 1; - } - - // @StateValueType@ from \Qint\E - final int stateValueToMove = stateSource.getUnsafe(oldHashLocation); - stateSource.set(newHashLocation, stateValueToMove); - stateSource.set(oldHashLocation, EMPTY_RIGHT_VALUE); - // region rehash move values - if (isPendingState(stateValueToMove)) { - bc.addedSlotsByPosition.set(pendingStateToChunkPosition(stateValueToMove), newHashLocation); - } else { - outputPositionToHashSlot.set(stateValueToMove, (int) newHashLocation); - } - // endregion rehash move values - - bc.sourcePositions.add(ii); - bc.destinationLocationPositionInWriteThrough.add((int)(newHashLocation - firstBackingChunkLocation)); - } - } - flushWriteThrough(bc.sourcePositions, bc.workingKeyChunks, bc.destinationLocationPositionInWriteThrough, bc.writeThroughChunks); - - // everything has been rehashed now, but we have some table locations that might have an overflow, - // without actually having a main entry. We walk through the empty main entries, pulling non-empty - // overflow locations into the main table - - // figure out which of the two possible locations is empty, because (1) we moved something from it - // or (2) we did not move something to it - bc.overflowLocationsToFetch.setSize(bc.shouldMoveBucket.size()); - final int totalPromotionsToProcess = bc.shouldMoveBucket.size(); - createOverflowPartitions(bc.overflowLocationsToFetch, bc.rehashLocations, bc.shouldMoveBucket, moves); - - for (int loop = 0; loop < 2; loop++) { - final boolean firstLoop = loop == 0; - - if (firstLoop) { - bc.overflowLocationForPromotionLoop.resetFromTypedChunk(bc.overflowLocationsToFetch, 0, moves); - } else { - bc.overflowLocationForPromotionLoop.resetFromTypedChunk(bc.overflowLocationsToFetch, moves, totalPromotionsToProcess - moves); - } - - overflowLocationSource.fillChunk(bc.overflowFillContext, bc.overflowLocations, RowSequenceFactory.wrapRowKeysChunkAsRowSequence(bc.overflowLocationForPromotionLoop)); - IntChunkEquals.notEqual(bc.overflowLocations, QueryConstants.NULL_INT, bc.shouldMoveBucket); - - // crunch the chunk down to relevant locations - LongCompactKernel.compact(bc.overflowLocationForPromotionLoop, bc.shouldMoveBucket); - IntCompactKernel.compact(bc.overflowLocations, bc.shouldMoveBucket); - - IntToLongCast.castInto(IntChunk.downcast(bc.overflowLocations), bc.overflowLocationsAsKeyIndices); - - // now fetch the overflow key values - fillOverflowKeys(bc.overflowContexts, bc.workingKeyChunks, bc.overflowLocationsAsKeyIndices); - // and their state values - overflowStateSource.fillChunkUnordered(bc.overflowStateSourceFillContext, bc.workingStateEntries, bc.overflowLocationsAsKeyIndices); - // and where their next pointer is - overflowOverflowLocationSource.fillChunkUnordered(bc.overflowOverflowFillContext, bc.overflowLocationsToMigrate, bc.overflowLocationsAsKeyIndices); - - // we'll have two sorted regions intermingled in the overflowLocationsToFetch, one of them is before the pivot, the other is after the pivot - // so that we can use our write through chunks, we first process the things before the pivot; then have a separate loop for those - // that go after - firstBackingChunkLocation = -1; - lastBackingChunkLocation = -1; - - for (int ii = 0; ii < bc.overflowLocationForPromotionLoop.size(); ++ii) { - final long tableLocation = bc.overflowLocationForPromotionLoop.get(ii); - if ((firstLoop && tableLocation < tableHashPivot) || (!firstLoop && tableLocation >= tableHashPivot)) { - if (tableLocation > lastBackingChunkLocation) { - if (bc.sourcePositions.size() > 0) { - // the permutes here are flushing the write through for the state and overflow locations - - // mixin allowUpdateWriteThroughState - // @StateChunkTypeEnum@ from \QInt\E - IntPermuteKernel.permute(bc.sourcePositions, bc.workingStateEntries, bc.destinationLocationPositionInWriteThrough, bc.writeThroughState); - // endmixin allowUpdateWriteThroughState - IntPermuteKernel.permute(bc.sourcePositions, bc.overflowLocationsToMigrate, bc.destinationLocationPositionInWriteThrough, bc.writeThroughOverflowLocations); - flushWriteThrough(bc.sourcePositions, bc.workingKeyChunks, bc.destinationLocationPositionInWriteThrough, bc.writeThroughChunks); - } - - firstBackingChunkLocation = updateWriteThroughChunks(bc.writeThroughChunks, tableLocation, keySources); - lastBackingChunkLocation = firstBackingChunkLocation + bc.writeThroughChunks[0].size() - 1; - // mixin allowUpdateWriteThroughState - updateWriteThroughState(bc.writeThroughState, firstBackingChunkLocation, lastBackingChunkLocation); - // endmixin allowUpdateWriteThroughState - updateWriteThroughOverflow(bc.writeThroughOverflowLocations, firstBackingChunkLocation, lastBackingChunkLocation); - } - bc.sourcePositions.add(ii); - bc.destinationLocationPositionInWriteThrough.add((int)(tableLocation - firstBackingChunkLocation)); - // region promotion move - final long overflowLocation = bc.overflowLocationsAsKeyIndices.get(ii); - final int positionForSlot = overflowStateSource.getUnsafe(overflowLocation); - if (isPendingState(positionForSlot)) { - bc.addedSlotsByPosition.set(pendingStateToChunkPosition(positionForSlot), tableLocation); - } else { - outputPositionToHashSlot.set(positionForSlot, (int) tableLocation); - } - // endregion promotion move - } - } - - // the permutes are completing the state and overflow promotions write through - // mixin allowUpdateWriteThroughState - // @StateChunkTypeEnum@ from \QInt\E - IntPermuteKernel.permute(bc.sourcePositions, bc.workingStateEntries, bc.destinationLocationPositionInWriteThrough, bc.writeThroughState); - // endmixin allowUpdateWriteThroughState - IntPermuteKernel.permute(bc.sourcePositions, bc.overflowLocationsToMigrate, bc.destinationLocationPositionInWriteThrough, bc.writeThroughOverflowLocations); - flushWriteThrough(bc.sourcePositions, bc.workingKeyChunks, bc.destinationLocationPositionInWriteThrough, bc.writeThroughChunks); - - // now mark these overflow locations as free, so that we can reuse them - freeOverflowLocations.ensureCapacity(freeOverflowCount + bc.overflowLocations.size()); - // by sorting them, they will be more likely to be in the same write through chunk when we pull them from the free list - bc.overflowLocations.sort(); - for (int ii = 0; ii < bc.overflowLocations.size(); ++ii) { - freeOverflowLocations.set(freeOverflowCount++, bc.overflowLocations.get(ii)); - } - nullOverflowObjectSources(bc.overflowLocations); - } - - tableHashPivot += bucketsToAdd; - // region rehash loop end - // endregion rehash loop end - } - // region rehash final - // endregion rehash final - } - - public boolean rehashRequired() { - return numEntries > (tableHashPivot * maximumLoadFactor) && tableHashPivot < MAX_TABLE_SIZE; - } - - /** - * This function can be stuck in for debugging if you are breaking the table to make sure each slot still corresponds - * to the correct location. - */ - @SuppressWarnings({"unused", "unchecked"}) - private void verifyKeyHashes() { - final int maxSize = tableHashPivot; - - final ChunkSource.FillContext [] keyFillContext = makeFillContexts(keySources, SharedContext.makeSharedContext(), maxSize); - final WritableChunk [] keyChunks = getWritableKeyChunks(maxSize); - - try (final WritableLongChunk positions = WritableLongChunk.makeWritableChunk(maxSize); - final WritableBooleanChunk exists = WritableBooleanChunk.makeWritableChunk(maxSize); - final WritableIntChunk hashChunk = WritableIntChunk.makeWritableChunk(maxSize); - final WritableLongChunk tableLocationsChunk = WritableLongChunk.makeWritableChunk(maxSize); - final SafeCloseableArray ignored = new SafeCloseableArray<>(keyFillContext); - final SafeCloseableArray ignored2 = new SafeCloseableArray<>(keyChunks); - // @StateChunkName@ from \QIntChunk\E - final WritableIntChunk stateChunk = WritableIntChunk.makeWritableChunk(maxSize); - final ChunkSource.FillContext fillContext = stateSource.makeFillContext(maxSize)) { - - stateSource.fillChunk(fillContext, stateChunk, RowSetFactory.flat(tableHashPivot)); - - ChunkUtils.fillInOrder(positions); - - // @StateChunkIdentityName@ from \QIntChunk\E - IntChunkEquals.notEqual(stateChunk, EMPTY_RIGHT_VALUE, exists); - - // crush down things that don't exist - LongCompactKernel.compact(positions, exists); - - // get the keys from the table - fillKeys(keyFillContext, keyChunks, positions); - hashKeyChunks(hashChunk, keyChunks); - convertHashToTableLocations(hashChunk, tableLocationsChunk, tableHashPivot); - - for (int ii = 0; ii < positions.size(); ++ii) { - if (tableLocationsChunk.get(ii) != positions.get(ii)) { - throw new IllegalStateException(); - } - } - } - } - - void setTargetLoadFactor(final double targetLoadFactor) { - this.targetLoadFactor = targetLoadFactor; - } - - void setMaximumLoadFactor(final double maximumLoadFactor) { - this.maximumLoadFactor = maximumLoadFactor; - } - - private void createOverflowPartitions(WritableLongChunk overflowLocationsToFetch, WritableLongChunk rehashLocations, WritableBooleanChunk shouldMoveBucket, int moves) { - int startWritePosition = 0; - int endWritePosition = moves; - for (int ii = 0; ii < shouldMoveBucket.size(); ++ii) { - if (shouldMoveBucket.get(ii)) { - final long oldHashLocation = rehashLocations.get(ii); - // this needs to be promoted, because we moved it - overflowLocationsToFetch.set(startWritePosition++, oldHashLocation); - } else { - // we didn't move anything into the destination slot; so we need to promote its overflow - final long newEmptyHashLocation = rehashLocations.get(ii) + (tableSize >> 1); - overflowLocationsToFetch.set(endWritePosition++, newEmptyHashLocation); - } - } - } - - private void setOverflowLocationsToNull(long start, int count) { - for (int ii = 0; ii < count; ++ii) { - overflowLocationSource.set(start + ii, QueryConstants.NULL_INT); - } - } - - private void initializeRehashLocations(WritableLongChunk rehashLocations, int bucketsToAdd) { - rehashLocations.setSize(bucketsToAdd); - for (int ii = 0; ii < bucketsToAdd; ++ii) { - rehashLocations.set(ii, tableHashPivot + ii - (tableSize >> 1)); - } - } - - private void compactOverflowLocations(IntChunk overflowLocations, WritableLongChunk overflowLocationsToFetch) { - overflowLocationsToFetch.setSize(0); - for (int ii = 0; ii < overflowLocations.size(); ++ii) { - final int overflowLocation = overflowLocations.get(ii); - if (overflowLocation != QueryConstants.NULL_INT) { - overflowLocationsToFetch.add(overflowLocation); - } - } - } - - private void swapOverflowPointers(LongChunk tableLocationsChunk, LongChunk overflowLocationsToFetch) { - for (int ii = 0; ii < overflowLocationsToFetch.size(); ++ii) { - final long newLocation = tableLocationsChunk.get(ii); - final int existingOverflow = overflowLocationSource.getUnsafe(newLocation); - final long overflowLocation = overflowLocationsToFetch.get(ii); - overflowOverflowLocationSource.set(overflowLocation, existingOverflow); - overflowLocationSource.set(newLocation, (int)overflowLocation); - } - } - - // mixin allowUpdateWriteThroughState - // @WritableStateChunkType@ from \QWritableIntChunk\E - private void updateWriteThroughState(ResettableWritableIntChunk writeThroughState, long firstPosition, long expectedLastPosition) { - final long firstBackingChunkPosition = stateSource.resetWritableChunkToBackingStore(writeThroughState, firstPosition); - if (firstBackingChunkPosition != firstPosition) { - throw new IllegalStateException("ArrayBackedColumnSources have different block sizes!"); - } - if (firstBackingChunkPosition + writeThroughState.size() - 1 != expectedLastPosition) { - throw new IllegalStateException("ArrayBackedColumnSources have different block sizes!"); - } - } - // endmixin allowUpdateWriteThroughState - - private void updateWriteThroughOverflow(ResettableWritableIntChunk writeThroughOverflow, long firstPosition, long expectedLastPosition) { - final long firstBackingChunkPosition = overflowLocationSource.resetWritableChunkToBackingStore(writeThroughOverflow, firstPosition); - if (firstBackingChunkPosition != firstPosition) { - throw new IllegalStateException("ArrayBackedColumnSources have different block sizes!"); - } - if (firstBackingChunkPosition + writeThroughOverflow.size() - 1 != expectedLastPosition) { - throw new IllegalStateException("ArrayBackedColumnSources have different block sizes!"); - } - } - - // endmixin rehash - - private int allocateOverflowLocation() { - // mixin rehash - if (freeOverflowCount > 0) { - return freeOverflowLocations.getUnsafe(--freeOverflowCount); - } - // endmixin rehash - return nextOverflowLocation++; - } - - private static long updateWriteThroughChunks(ResettableWritableChunk[] writeThroughChunks, long currentHashLocation, ArrayBackedColumnSource[] sources) { - final long firstBackingChunkPosition = sources[0].resetWritableChunkToBackingStore(writeThroughChunks[0], currentHashLocation); - for (int jj = 1; jj < sources.length; ++jj) { - if (sources[jj].resetWritableChunkToBackingStore(writeThroughChunks[jj], currentHashLocation) != firstBackingChunkPosition) { - throw new IllegalStateException("ArrayBackedColumnSources have different block sizes!"); - } - if (writeThroughChunks[jj].size() != writeThroughChunks[0].size()) { - throw new IllegalStateException("ArrayBackedColumnSources have different block sizes!"); - } - } - return firstBackingChunkPosition; - } - - private void flushWriteThrough(WritableIntChunk sourcePositions, Chunk[] sourceKeyChunks, WritableIntChunk destinationLocationPositionInWriteThrough, WritableChunk[] writeThroughChunks) { - if (sourcePositions.size() < 0) { - return; - } - for (int jj = 0; jj < keySources.length; ++jj) { - chunkCopiers[jj].permute(sourcePositions, sourceKeyChunks[jj], destinationLocationPositionInWriteThrough, writeThroughChunks[jj]); - } - sourcePositions.setSize(0); - destinationLocationPositionInWriteThrough.setSize(0); - } - - // mixin rehash - private void nullOverflowObjectSources(IntChunk locationsToNull) { - for (ObjectArraySource objectArraySource : overflowKeyColumnsToNull) { - for (int ii = 0; ii < locationsToNull.size(); ++ii) { - objectArraySource.set(locationsToNull.get(ii), null); - } - } - // region nullOverflowObjectSources - // endregion nullOverflowObjectSources - } - // endmixin rehash - - private void checkKeyEquality(WritableBooleanChunk equalValues, WritableChunk[] workingKeyChunks, Chunk[] sourceKeyChunks) { - for (int ii = 0; ii < sourceKeyChunks.length; ++ii) { - chunkEquals[ii].andEqual(workingKeyChunks[ii], sourceKeyChunks[ii], equalValues); - } - } - - private void checkLhsPermutedEquality(WritableIntChunk chunkPositionsForFetches, Chunk[] sourceKeyChunks, WritableChunk[] overflowKeyChunks, WritableBooleanChunk equalValues) { - chunkEquals[0].equalLhsPermuted(chunkPositionsForFetches, sourceKeyChunks[0], overflowKeyChunks[0], equalValues); - for (int ii = 1; ii < overflowKeySources.length; ++ii) { - chunkEquals[ii].andEqualLhsPermuted(chunkPositionsForFetches, sourceKeyChunks[ii], overflowKeyChunks[ii], equalValues); - } - } - - private void checkPairEquality(WritableIntChunk chunkPositionsToCheckForEquality, Chunk[] sourceKeyChunks, WritableBooleanChunk equalPairs) { - chunkEquals[0].equalPairs(chunkPositionsToCheckForEquality, sourceKeyChunks[0], equalPairs); - for (int ii = 1; ii < keyColumnCount; ++ii) { - chunkEquals[ii].andEqualPairs(chunkPositionsToCheckForEquality, sourceKeyChunks[ii], equalPairs); - } - } - - private void fillKeys(ColumnSource.FillContext[] fillContexts, WritableChunk[] keyChunks, WritableLongChunk tableLocationsChunk) { - fillKeys(keySources, fillContexts, keyChunks, tableLocationsChunk); - } - - private void fillOverflowKeys(ColumnSource.FillContext[] fillContexts, WritableChunk[] keyChunks, WritableLongChunk overflowLocationsChunk) { - fillKeys(overflowKeySources, fillContexts, keyChunks, overflowLocationsChunk); - } - - private static void fillKeys(ArrayBackedColumnSource[] keySources, ColumnSource.FillContext[] fillContexts, WritableChunk[] keyChunks, WritableLongChunk keyIndices) { - for (int ii = 0; ii < keySources.length; ++ii) { - keySources[ii].fillChunkUnordered(fillContexts[ii], keyChunks[ii], keyIndices); - } - } - - private void hashKeyChunks(WritableIntChunk hashChunk, Chunk[] sourceKeyChunks) { - chunkHashers[0].hashInitial(sourceKeyChunks[0], hashChunk); - for (int ii = 1; ii < sourceKeyChunks.length; ++ii) { - chunkHashers[ii].hashUpdate(sourceKeyChunks[ii], hashChunk); - } - } - - private void getKeyChunks(ColumnSource[] sources, ColumnSource.GetContext[] contexts, Chunk[] chunks, RowSequence rowSequence) { - for (int ii = 0; ii < chunks.length; ++ii) { - chunks[ii] = sources[ii].getChunk(contexts[ii], rowSequence); - } - } - - - // region probe wrappers - // endregion probe wrappers - - - private void convertHashToTableLocations(WritableIntChunk hashChunk, WritableLongChunk tablePositionsChunk) { - // mixin rehash - // NOTE that this mixin section is a bit ugly, we are spanning the two functions so that we can avoid using tableHashPivot and having the unused pivotPoint parameter - convertHashToTableLocations(hashChunk, tablePositionsChunk, tableHashPivot); - } - - private void convertHashToTableLocations(WritableIntChunk hashChunk, WritableLongChunk tablePositionsChunk, int pivotPoint) { - // endmixin rehash - - // turn hash codes into indices within our table - for (int ii = 0; ii < hashChunk.size(); ++ii) { - final int hash = hashChunk.get(ii); - // mixin rehash - final int location = hashToTableLocation(pivotPoint, hash); - // endmixin rehash - // altmixin rehash: final int location = hashToTableLocation(hash); - tablePositionsChunk.set(ii, location); - } - tablePositionsChunk.setSize(hashChunk.size()); - } - - private int hashToTableLocation( - // mixin rehash - int pivotPoint, - // endmixin rehash - int hash) { - // altmixin rehash: final \ - int location = hash & (tableSize - 1); - // mixin rehash - if (location >= pivotPoint) { - location -= (tableSize >> 1); - } - // endmixin rehash - return location; - } - - // region extraction functions - @Override - public ColumnSource[] getKeyHashTableSources() { - final WritableRowRedirection resultIndexToHashSlot = new IntColumnSourceWritableRowRedirection(outputPositionToHashSlot); - final ColumnSource[] keyHashTableSources = new ColumnSource[keyColumnCount]; - for (int kci = 0; kci < keyColumnCount; ++kci) { - // noinspection unchecked - keyHashTableSources[kci] = new RedirectedColumnSource(resultIndexToHashSlot, new HashTableColumnSource(keySources[kci], overflowKeySources[kci])); - } - return keyHashTableSources; - } - - @Override - public int findPositionForKey(Object key) { - int hash; - if (chunkHashers.length == 1) { - hash = chunkHashers[0].hashInitial(key); - } else { - final Object [] values = (Object[])key; - hash = chunkHashers[0].hashInitial(values[0]); - for (int ii = 1; ii < chunkHashers.length; ++ii) { - hash = chunkHashers[ii].hashUpdate(hash, values[ii]); - } - } - - final int location = hashToTableLocation(tableHashPivot, hash); - - final int positionValue = stateSource.getUnsafe(location); - if (positionValue == EMPTY_RIGHT_VALUE) { - return -1; - } - - if (checkKeyEquality(keySources, key, location)) { - return positionValue; - } - - int overflowLocation = overflowLocationSource.getUnsafe(location); - while (overflowLocation != QueryConstants.NULL_INT) { - if (checkKeyEquality(overflowKeySources, key, overflowLocation)) { - return overflowStateSource.getUnsafe(overflowLocation); - } - overflowLocation = overflowOverflowLocationSource.getUnsafe(overflowLocation); - } - - return -1; - } - - private boolean checkKeyEquality(ArrayBackedColumnSource[] keySources, Object key, int location) { - if (keySources.length == 1) { - return Objects.equals(key, keySources[0].get(location)); - } - final Object [] keyValues = (Object[]) key; - for (int ii = 0; ii < keySources.length; ++ii) { - if (!Objects.equals(keyValues[ii], keySources[ii].get(location))) { - return false; - } - } - return true; - } - // endregion extraction functions - - @NotNull - private static ColumnSource.FillContext[] makeFillContexts(ColumnSource[] keySources, final SharedContext sharedContext, int chunkSize) { - final ColumnSource.FillContext[] workingFillContexts = new ColumnSource.FillContext[keySources.length]; - for (int ii = 0; ii < keySources.length; ++ii) { - workingFillContexts[ii] = keySources[ii].makeFillContext(chunkSize, sharedContext); - } - return workingFillContexts; - } - - private static ColumnSource.GetContext[] makeGetContexts(ColumnSource [] sources, final SharedContext sharedState, int chunkSize) { - final ColumnSource.GetContext[] contexts = new ColumnSource.GetContext[sources.length]; - for (int ii = 0; ii < sources.length; ++ii) { - contexts[ii] = sources[ii].makeGetContext(chunkSize, sharedState); - } - return contexts; - } - - @NotNull - private WritableChunk[] getWritableKeyChunks(int chunkSize) { - //noinspection unchecked - final WritableChunk[] workingKeyChunks = new WritableChunk[keyChunkTypes.length]; - for (int ii = 0; ii < keyChunkTypes.length; ++ii) { - workingKeyChunks[ii] = keyChunkTypes[ii].makeWritableChunk(chunkSize); - } - return workingKeyChunks; - } - - @NotNull - private ResettableWritableChunk[] getResettableWritableKeyChunks() { - //noinspection unchecked - final ResettableWritableChunk[] workingKeyChunks = new ResettableWritableChunk[keyChunkTypes.length]; - for (int ii = 0; ii < keyChunkTypes.length; ++ii) { - workingKeyChunks[ii] = keyChunkTypes[ii].makeResettableWritableChunk(); - } - return workingKeyChunks; - } - - // region getStateValue - // endregion getStateValue - - // region overflowLocationToHashLocation - private static int overflowLocationToHashLocation(final int overflowSlot) { - return HashTableColumnSource.overflowLocationToHashLocation(overflowSlot); - } - - private static int hashLocationToOverflowLocation(final int hashLocation) { - return HashTableColumnSource.hashLocationToOverflowLocation(hashLocation); - } - - private static boolean isOverflowLocation(final long slot) { - return HashTableColumnSource.isOverflowLocation(slot); - } - - private static int chunkPositionToPendingState(final int position) { - return -position - 1; - } - - private static int pendingStateToChunkPosition(final int pendingState) { - return -pendingState - 1; - } - - private static boolean isPendingState(final int position) { - return position < 0; - } - // endregion overflowLocationToHashLocation - - - static int hashTableSize(long initialCapacity) { - return (int)Math.max(MINIMUM_INITIAL_HASH_SIZE, Math.min(MAX_TABLE_SIZE, Long.highestOneBit(initialCapacity) * 2)); - } - -} diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/TypedAggregationFactory.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/TypedAggregationFactory.java index 7f01f9f7433..691d321b617 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/TypedAggregationFactory.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/TypedAggregationFactory.java @@ -24,23 +24,12 @@ public static void buildInsert(HasherConfig hasherConfig, CodeBlock.Builder b public static void buildInsertIncremental(HasherConfig hasherConfig, CodeBlock.Builder builder) { buildInsertCommon(hasherConfig, builder); builder.addStatement("outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation)"); - builder.addStatement("rowCountSource.set(outputPosition, 1L)"); } public static void probeFound(HasherConfig hasherConfig, boolean alternate, CodeBlock.Builder builder) { builder.addStatement("outputPositions.set(chunkPosition, outputPosition)"); } - public static void removeProbeFound(HasherConfig hasherConfig, boolean alternate, CodeBlock.Builder builder) { - probeFound(hasherConfig, alternate, builder); - - builder.addStatement("final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1)"); - builder.addStatement("Assert.gtZero(oldRowCount, \"oldRowCount\")"); - builder.beginControlFlow("if (oldRowCount == 1)"); - builder.addStatement("emptiedPositions.add(outputPosition)"); - builder.endControlFlow(); - } - public static void probeMissing(CodeBlock.Builder builder) { builder.addStatement("throw new IllegalStateException($S)", "Missing value in probe"); } @@ -49,26 +38,6 @@ static void buildFound(HasherConfig hasherConfig, boolean alternate, CodeBloc builder.addStatement("outputPositions.set(chunkPosition, outputPosition)"); } - private static void buildFoundIncremental(HasherConfig hasherConfig, boolean alternate, - CodeBlock.Builder builder) { - buildFound(hasherConfig, alternate, builder); - builder.addStatement("final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1)"); - } - - static void buildFoundIncrementalInitial(HasherConfig hasherConfig, boolean alternate, - CodeBlock.Builder builder) { - buildFoundIncremental(hasherConfig, alternate, builder); - builder.addStatement("Assert.gtZero(oldRowCount, \"oldRowCount\")"); - } - - static void buildFoundIncrementalUpdate(HasherConfig hasherConfig, boolean alternate, - CodeBlock.Builder builder) { - buildFoundIncremental(hasherConfig, alternate, builder); - builder.beginControlFlow("if (oldRowCount == 0)"); - builder.addStatement("reincarnatedPositions.add(outputPosition)"); - builder.endControlFlow(); - } - private static void buildInsertCommon(HasherConfig hasherConfig, CodeBlock.Builder builder) { builder.addStatement("outputPosition = nextOutputPosition.getAndIncrement()"); builder.addStatement("outputPositions.set(chunkPosition, outputPosition)"); @@ -84,7 +53,6 @@ static void incAggMoveMain(CodeBlock.Builder builder) { "outputPositionToHashSlot.set(currentStateValue, mainInsertMask | destinationTableLocation)"); } - @NotNull public static MethodSpec createFindPositionForKey(HasherConfig hasherConfig, ChunkType[] chunkTypes) { MethodSpec.Builder builder = MethodSpec.methodBuilder("findPositionForKey").addParameter(Object.class, "key") diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/TypedHasherFactory.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/TypedHasherFactory.java index 1b5a2b923fc..568a1fafa69 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/TypedHasherFactory.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/TypedHasherFactory.java @@ -33,6 +33,7 @@ import io.deephaven.util.compare.CharComparisons; import org.apache.commons.lang3.mutable.MutableInt; import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; import javax.lang.model.element.Modifier; import java.lang.reflect.Constructor; @@ -103,26 +104,12 @@ public static HasherConfig hasherConfigForBase(Class baseClass) { final ClassName rowKeyType = ClassName.get(RowKeys.class); final ParameterizedTypeName emptiedChunkType = ParameterizedTypeName.get(ClassName.get(WritableIntChunk.class), rowKeyType); - final ParameterSpec emptiedPositions = ParameterSpec.builder(emptiedChunkType, "emptiedPositions").build(); - - builder.addProbe(new HasherConfig.ProbeSpec("doRemoveProbe", "outputPosition", - false, TypedAggregationFactory::removeProbeFound, - TypedAggregationFactory::probeMissing, emptiedPositions)); - builder.addProbe( - new HasherConfig.ProbeSpec("doModifyProbe", "outputPosition", false, - TypedAggregationFactory::probeFound, - TypedAggregationFactory::probeMissing)); - - builder.addBuild(new HasherConfig.BuildSpec("build", "outputPosition", - false, true, TypedAggregationFactory::buildFoundIncrementalInitial, - TypedAggregationFactory::buildInsertIncremental)); - - final ParameterSpec reincarnatedPositions = - ParameterSpec.builder(emptiedChunkType, "reincarnatedPositions").build(); - builder.addBuild( - new HasherConfig.BuildSpec("buildForUpdate", "outputPosition", - false, true, TypedAggregationFactory::buildFoundIncrementalUpdate, - TypedAggregationFactory::buildInsertIncremental, reincarnatedPositions)); + + builder.addProbe(new HasherConfig.ProbeSpec("probe", "outputPosition", false, + TypedAggregationFactory::probeFound, TypedAggregationFactory::probeMissing)); + + builder.addBuild(new HasherConfig.BuildSpec("build", "outputPosition", false, true, + TypedAggregationFactory::buildFound, TypedAggregationFactory::buildInsertIncremental)); } else if (baseClass.equals(StaticNaturalJoinStateManagerTypedBase.class)) { builder.classPrefix("StaticNaturalJoinHasher").packageGroup("naturaljoin").packageMiddle("staticopen") .openAddressedAlternate(false) diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByte.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByte.java index bc4c0f565b2..0772e8a5449 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByte.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByte.java @@ -8,11 +8,9 @@ import io.deephaven.base.verify.Assert; import io.deephaven.chunk.ByteChunk; import io.deephaven.chunk.Chunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.ByteChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableByteArraySource; @@ -62,8 +60,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -76,12 +72,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -91,112 +84,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final byte k0 = keyChunk0.get(chunkPosition); - final int hash = hash(k0); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final byte k0 = keyChunk0.get(chunkPosition); - final int hash = hash(k0); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); final int chunkSize = keyChunk0.size(); for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteByte.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteByte.java index 1ea8ac77e74..c9637a4a7f8 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteByte.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteByte.java @@ -8,11 +8,9 @@ import io.deephaven.base.verify.Assert; import io.deephaven.chunk.ByteChunk; import io.deephaven.chunk.Chunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.ByteChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableByteArraySource; @@ -70,8 +68,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -85,12 +81,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -100,117 +93,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); - final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final byte k0 = keyChunk0.get(chunkPosition); - final byte k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); - final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final byte k0 = keyChunk0.get(chunkPosition); - final byte k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteChar.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteChar.java index 69e94a90fba..d9d6223de90 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteChar.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteChar.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.ByteChunk; import io.deephaven.chunk.CharChunk; import io.deephaven.chunk.Chunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.ByteChunkHasher; import io.deephaven.chunk.util.hashing.CharChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableByteArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); - final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final byte k0 = keyChunk0.get(chunkPosition); - final char k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); - final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final byte k0 = keyChunk0.get(chunkPosition); - final char k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteDouble.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteDouble.java index a90f731a86a..3a38c6910e0 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteDouble.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteDouble.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.ByteChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.DoubleChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.ByteChunkHasher; import io.deephaven.chunk.util.hashing.DoubleChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableByteArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); - final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final byte k0 = keyChunk0.get(chunkPosition); - final double k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); - final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final byte k0 = keyChunk0.get(chunkPosition); - final double k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteFloat.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteFloat.java index 44fee989c10..f477593cca0 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteFloat.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteFloat.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.ByteChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.FloatChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.ByteChunkHasher; import io.deephaven.chunk.util.hashing.FloatChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableByteArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); - final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final byte k0 = keyChunk0.get(chunkPosition); - final float k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); - final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final byte k0 = keyChunk0.get(chunkPosition); - final float k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteInt.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteInt.java index 59ac60ae777..eef7cdf9c56 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteInt.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteInt.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.ByteChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.IntChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.ByteChunkHasher; import io.deephaven.chunk.util.hashing.IntChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableByteArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); - final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final byte k0 = keyChunk0.get(chunkPosition); - final int k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); - final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final byte k0 = keyChunk0.get(chunkPosition); - final int k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteLong.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteLong.java index fe5a66637e8..e536d3eab3f 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteLong.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteLong.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.ByteChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.LongChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.ByteChunkHasher; import io.deephaven.chunk.util.hashing.LongChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableByteArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); - final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final byte k0 = keyChunk0.get(chunkPosition); - final long k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); - final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final byte k0 = keyChunk0.get(chunkPosition); - final long k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteObject.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteObject.java index dbc5d3bf827..90dbe74a612 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteObject.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteObject.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.ByteChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.ObjectChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.ByteChunkHasher; import io.deephaven.chunk.util.hashing.ObjectChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableByteArraySource; @@ -74,8 +72,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -89,12 +85,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -104,117 +97,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); - final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final byte k0 = keyChunk0.get(chunkPosition); - final Object k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); - final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final byte k0 = keyChunk0.get(chunkPosition); - final Object k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteShort.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteShort.java index 7cdbc12d496..fce710276e6 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteShort.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherByteShort.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.ByteChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.ShortChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.ByteChunkHasher; import io.deephaven.chunk.util.hashing.ShortChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableByteArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); - final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final byte k0 = keyChunk0.get(chunkPosition); - final short k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); - final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final byte k0 = keyChunk0.get(chunkPosition); - final short k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ByteChunk keyChunk0 = sourceKeyChunks[0].asByteChunk(); final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherChar.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherChar.java index 20006a40075..c6a700f87f1 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherChar.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherChar.java @@ -8,11 +8,9 @@ import io.deephaven.base.verify.Assert; import io.deephaven.chunk.CharChunk; import io.deephaven.chunk.Chunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.CharChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableCharArraySource; @@ -62,8 +60,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -76,12 +72,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -91,112 +84,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final char k0 = keyChunk0.get(chunkPosition); - final int hash = hash(k0); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final char k0 = keyChunk0.get(chunkPosition); - final int hash = hash(k0); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); final int chunkSize = keyChunk0.size(); for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharByte.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharByte.java index e2aceb4357a..b2cbeb6bee0 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharByte.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharByte.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.ByteChunk; import io.deephaven.chunk.CharChunk; import io.deephaven.chunk.Chunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.ByteChunkHasher; import io.deephaven.chunk.util.hashing.CharChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableByteArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); - final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final char k0 = keyChunk0.get(chunkPosition); - final byte k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); - final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final char k0 = keyChunk0.get(chunkPosition); - final byte k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharChar.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharChar.java index 7ec6794d9ba..9ec16557224 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharChar.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharChar.java @@ -8,11 +8,9 @@ import io.deephaven.base.verify.Assert; import io.deephaven.chunk.CharChunk; import io.deephaven.chunk.Chunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.CharChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableCharArraySource; @@ -70,8 +68,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -85,12 +81,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -100,117 +93,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); - final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final char k0 = keyChunk0.get(chunkPosition); - final char k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); - final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final char k0 = keyChunk0.get(chunkPosition); - final char k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharDouble.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharDouble.java index 7dd950b04b0..fffc1c78feb 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharDouble.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharDouble.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.CharChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.DoubleChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.CharChunkHasher; import io.deephaven.chunk.util.hashing.DoubleChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableCharArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); - final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final char k0 = keyChunk0.get(chunkPosition); - final double k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); - final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final char k0 = keyChunk0.get(chunkPosition); - final double k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharFloat.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharFloat.java index c8e528daa09..7f4af99aba3 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharFloat.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharFloat.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.CharChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.FloatChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.CharChunkHasher; import io.deephaven.chunk.util.hashing.FloatChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableCharArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); - final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final char k0 = keyChunk0.get(chunkPosition); - final float k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); - final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final char k0 = keyChunk0.get(chunkPosition); - final float k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharInt.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharInt.java index e6b6a59d6a3..87c979d9ce1 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharInt.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharInt.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.CharChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.IntChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.CharChunkHasher; import io.deephaven.chunk.util.hashing.IntChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableCharArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); - final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final char k0 = keyChunk0.get(chunkPosition); - final int k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); - final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final char k0 = keyChunk0.get(chunkPosition); - final int k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharLong.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharLong.java index 9f04e78011c..c3a1e72fd28 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharLong.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharLong.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.CharChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.LongChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.CharChunkHasher; import io.deephaven.chunk.util.hashing.LongChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableCharArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); - final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final char k0 = keyChunk0.get(chunkPosition); - final long k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); - final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final char k0 = keyChunk0.get(chunkPosition); - final long k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharObject.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharObject.java index dd53f5ddee3..6cbad7f82c7 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharObject.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharObject.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.CharChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.ObjectChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.CharChunkHasher; import io.deephaven.chunk.util.hashing.ObjectChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableCharArraySource; @@ -74,8 +72,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -89,12 +85,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -104,117 +97,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); - final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final char k0 = keyChunk0.get(chunkPosition); - final Object k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); - final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final char k0 = keyChunk0.get(chunkPosition); - final Object k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharShort.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharShort.java index e04759ff361..359b266903a 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharShort.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherCharShort.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.CharChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.ShortChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.CharChunkHasher; import io.deephaven.chunk.util.hashing.ShortChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableCharArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); - final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final char k0 = keyChunk0.get(chunkPosition); - final short k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); - final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final char k0 = keyChunk0.get(chunkPosition); - final short k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final CharChunk keyChunk0 = sourceKeyChunks[0].asCharChunk(); final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDouble.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDouble.java index eb4d5483050..46864f2dcea 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDouble.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDouble.java @@ -8,11 +8,9 @@ import io.deephaven.base.verify.Assert; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.DoubleChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.DoubleChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableDoubleArraySource; @@ -62,8 +60,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -76,12 +72,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -91,112 +84,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final double k0 = keyChunk0.get(chunkPosition); - final int hash = hash(k0); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final double k0 = keyChunk0.get(chunkPosition); - final int hash = hash(k0); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); final int chunkSize = keyChunk0.size(); for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleByte.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleByte.java index d70b4e92115..06f5e3805d2 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleByte.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleByte.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.ByteChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.DoubleChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.ByteChunkHasher; import io.deephaven.chunk.util.hashing.DoubleChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableByteArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); - final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final double k0 = keyChunk0.get(chunkPosition); - final byte k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); - final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final double k0 = keyChunk0.get(chunkPosition); - final byte k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleChar.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleChar.java index c81731a084f..a3bda5b4b14 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleChar.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleChar.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.CharChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.DoubleChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.CharChunkHasher; import io.deephaven.chunk.util.hashing.DoubleChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableCharArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); - final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final double k0 = keyChunk0.get(chunkPosition); - final char k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); - final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final double k0 = keyChunk0.get(chunkPosition); - final char k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleDouble.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleDouble.java index f2b9ea9cf75..3757bee8d05 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleDouble.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleDouble.java @@ -8,11 +8,9 @@ import io.deephaven.base.verify.Assert; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.DoubleChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.DoubleChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableDoubleArraySource; @@ -70,8 +68,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -85,12 +81,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -100,117 +93,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); - final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final double k0 = keyChunk0.get(chunkPosition); - final double k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); - final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final double k0 = keyChunk0.get(chunkPosition); - final double k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleFloat.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleFloat.java index c919606b9bf..292002a0fd0 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleFloat.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleFloat.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.DoubleChunk; import io.deephaven.chunk.FloatChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.DoubleChunkHasher; import io.deephaven.chunk.util.hashing.FloatChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableDoubleArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); - final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final double k0 = keyChunk0.get(chunkPosition); - final float k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); - final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final double k0 = keyChunk0.get(chunkPosition); - final float k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleInt.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleInt.java index d9e0e7b8c31..421e64894a5 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleInt.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleInt.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.DoubleChunk; import io.deephaven.chunk.IntChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.DoubleChunkHasher; import io.deephaven.chunk.util.hashing.IntChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableDoubleArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); - final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final double k0 = keyChunk0.get(chunkPosition); - final int k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); - final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final double k0 = keyChunk0.get(chunkPosition); - final int k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleLong.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleLong.java index a9789c2d1fd..774955e030c 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleLong.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleLong.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.DoubleChunk; import io.deephaven.chunk.LongChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.DoubleChunkHasher; import io.deephaven.chunk.util.hashing.LongChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableDoubleArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); - final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final double k0 = keyChunk0.get(chunkPosition); - final long k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); - final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final double k0 = keyChunk0.get(chunkPosition); - final long k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleObject.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleObject.java index e248f9f610c..25e726ee4ea 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleObject.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleObject.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.DoubleChunk; import io.deephaven.chunk.ObjectChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.DoubleChunkHasher; import io.deephaven.chunk.util.hashing.ObjectChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableDoubleArraySource; @@ -74,8 +72,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -89,12 +85,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -104,117 +97,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); - final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final double k0 = keyChunk0.get(chunkPosition); - final Object k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); - final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final double k0 = keyChunk0.get(chunkPosition); - final Object k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleShort.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleShort.java index 2dff13acb00..f3cd7ff5fea 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleShort.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherDoubleShort.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.DoubleChunk; import io.deephaven.chunk.ShortChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.DoubleChunkHasher; import io.deephaven.chunk.util.hashing.ShortChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableDoubleArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); - final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final double k0 = keyChunk0.get(chunkPosition); - final short k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); - final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final double k0 = keyChunk0.get(chunkPosition); - final short k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final DoubleChunk keyChunk0 = sourceKeyChunks[0].asDoubleChunk(); final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloat.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloat.java index dc4368157a5..c3a4d449aaf 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloat.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloat.java @@ -8,11 +8,9 @@ import io.deephaven.base.verify.Assert; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.FloatChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.FloatChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableFloatArraySource; @@ -62,8 +60,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -76,12 +72,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -91,112 +84,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final float k0 = keyChunk0.get(chunkPosition); - final int hash = hash(k0); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final float k0 = keyChunk0.get(chunkPosition); - final int hash = hash(k0); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); final int chunkSize = keyChunk0.size(); for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatByte.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatByte.java index 0480e438a9f..32d1fade69a 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatByte.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatByte.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.ByteChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.FloatChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.ByteChunkHasher; import io.deephaven.chunk.util.hashing.FloatChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableByteArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); - final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final float k0 = keyChunk0.get(chunkPosition); - final byte k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); - final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final float k0 = keyChunk0.get(chunkPosition); - final byte k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatChar.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatChar.java index a92c4c32175..0eb07bbccfc 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatChar.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatChar.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.CharChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.FloatChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.CharChunkHasher; import io.deephaven.chunk.util.hashing.FloatChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableCharArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); - final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final float k0 = keyChunk0.get(chunkPosition); - final char k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); - final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final float k0 = keyChunk0.get(chunkPosition); - final char k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatDouble.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatDouble.java index 318efd597db..5b7433a582e 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatDouble.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatDouble.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.DoubleChunk; import io.deephaven.chunk.FloatChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.DoubleChunkHasher; import io.deephaven.chunk.util.hashing.FloatChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableDoubleArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); - final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final float k0 = keyChunk0.get(chunkPosition); - final double k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); - final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final float k0 = keyChunk0.get(chunkPosition); - final double k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatFloat.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatFloat.java index 7482d58bfab..bfe3296ac80 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatFloat.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatFloat.java @@ -8,11 +8,9 @@ import io.deephaven.base.verify.Assert; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.FloatChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.FloatChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableFloatArraySource; @@ -70,8 +68,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -85,12 +81,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -100,117 +93,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); - final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final float k0 = keyChunk0.get(chunkPosition); - final float k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); - final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final float k0 = keyChunk0.get(chunkPosition); - final float k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatInt.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatInt.java index e50486c8f0f..f1aae7905bb 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatInt.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatInt.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.FloatChunk; import io.deephaven.chunk.IntChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.FloatChunkHasher; import io.deephaven.chunk.util.hashing.IntChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableFloatArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); - final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final float k0 = keyChunk0.get(chunkPosition); - final int k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); - final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final float k0 = keyChunk0.get(chunkPosition); - final int k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatLong.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatLong.java index f8366f8d604..409ce852d53 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatLong.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatLong.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.FloatChunk; import io.deephaven.chunk.LongChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.FloatChunkHasher; import io.deephaven.chunk.util.hashing.LongChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableFloatArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); - final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final float k0 = keyChunk0.get(chunkPosition); - final long k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); - final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final float k0 = keyChunk0.get(chunkPosition); - final long k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatObject.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatObject.java index 81b2b58fb4b..79d3a3d507c 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatObject.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatObject.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.FloatChunk; import io.deephaven.chunk.ObjectChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.FloatChunkHasher; import io.deephaven.chunk.util.hashing.ObjectChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableFloatArraySource; @@ -74,8 +72,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -89,12 +85,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -104,117 +97,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); - final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final float k0 = keyChunk0.get(chunkPosition); - final Object k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); - final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final float k0 = keyChunk0.get(chunkPosition); - final Object k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatShort.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatShort.java index b7f6ce2929e..b0fe924f800 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatShort.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherFloatShort.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.FloatChunk; import io.deephaven.chunk.ShortChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.FloatChunkHasher; import io.deephaven.chunk.util.hashing.ShortChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableFloatArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); - final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final float k0 = keyChunk0.get(chunkPosition); - final short k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); - final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final float k0 = keyChunk0.get(chunkPosition); - final short k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final FloatChunk keyChunk0 = sourceKeyChunks[0].asFloatChunk(); final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherInt.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherInt.java index 304edb41516..bd028c56dab 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherInt.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherInt.java @@ -8,11 +8,9 @@ import io.deephaven.base.verify.Assert; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.IntChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.IntChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableIntArraySource; @@ -62,8 +60,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -76,12 +72,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -91,112 +84,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final int k0 = keyChunk0.get(chunkPosition); - final int hash = hash(k0); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final int k0 = keyChunk0.get(chunkPosition); - final int hash = hash(k0); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); final int chunkSize = keyChunk0.size(); for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntByte.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntByte.java index ed9d33ba88c..5f121014049 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntByte.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntByte.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.ByteChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.IntChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.ByteChunkHasher; import io.deephaven.chunk.util.hashing.IntChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableByteArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); - final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final int k0 = keyChunk0.get(chunkPosition); - final byte k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); - final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final int k0 = keyChunk0.get(chunkPosition); - final byte k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntChar.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntChar.java index 8ddede36862..7afb789f677 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntChar.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntChar.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.CharChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.IntChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.CharChunkHasher; import io.deephaven.chunk.util.hashing.IntChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableCharArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); - final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final int k0 = keyChunk0.get(chunkPosition); - final char k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); - final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final int k0 = keyChunk0.get(chunkPosition); - final char k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntDouble.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntDouble.java index a96c7f15c93..03be2c51c3e 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntDouble.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntDouble.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.DoubleChunk; import io.deephaven.chunk.IntChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.DoubleChunkHasher; import io.deephaven.chunk.util.hashing.IntChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableDoubleArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); - final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final int k0 = keyChunk0.get(chunkPosition); - final double k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); - final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final int k0 = keyChunk0.get(chunkPosition); - final double k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntFloat.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntFloat.java index 624b42f23ec..0b25e88f31f 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntFloat.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntFloat.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.FloatChunk; import io.deephaven.chunk.IntChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.FloatChunkHasher; import io.deephaven.chunk.util.hashing.IntChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableFloatArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); - final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final int k0 = keyChunk0.get(chunkPosition); - final float k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); - final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final int k0 = keyChunk0.get(chunkPosition); - final float k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntInt.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntInt.java index 0826da629b6..5e60c7f1588 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntInt.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntInt.java @@ -8,11 +8,9 @@ import io.deephaven.base.verify.Assert; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.IntChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.IntChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableIntArraySource; @@ -70,8 +68,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -85,12 +81,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -100,117 +93,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); - final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final int k0 = keyChunk0.get(chunkPosition); - final int k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); - final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final int k0 = keyChunk0.get(chunkPosition); - final int k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntLong.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntLong.java index 167d7d304bb..1e688aa87aa 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntLong.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntLong.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.IntChunk; import io.deephaven.chunk.LongChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.IntChunkHasher; import io.deephaven.chunk.util.hashing.LongChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableIntArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); - final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final int k0 = keyChunk0.get(chunkPosition); - final long k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); - final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final int k0 = keyChunk0.get(chunkPosition); - final long k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntObject.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntObject.java index 4b8b7040c7e..d4d73ebe543 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntObject.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntObject.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.IntChunk; import io.deephaven.chunk.ObjectChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.IntChunkHasher; import io.deephaven.chunk.util.hashing.ObjectChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableIntArraySource; @@ -74,8 +72,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -89,12 +85,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -104,117 +97,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); - final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final int k0 = keyChunk0.get(chunkPosition); - final Object k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); - final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final int k0 = keyChunk0.get(chunkPosition); - final Object k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntShort.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntShort.java index 196d13a2147..4506fd94e30 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntShort.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherIntShort.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.IntChunk; import io.deephaven.chunk.ShortChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.IntChunkHasher; import io.deephaven.chunk.util.hashing.ShortChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableIntArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); - final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final int k0 = keyChunk0.get(chunkPosition); - final short k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); - final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final int k0 = keyChunk0.get(chunkPosition); - final short k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final IntChunk keyChunk0 = sourceKeyChunks[0].asIntChunk(); final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLong.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLong.java index 83e035ee197..666d1013c61 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLong.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLong.java @@ -8,11 +8,9 @@ import io.deephaven.base.verify.Assert; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.LongChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.LongChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableLongArraySource; @@ -62,8 +60,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -76,12 +72,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -91,112 +84,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final long k0 = keyChunk0.get(chunkPosition); - final int hash = hash(k0); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final long k0 = keyChunk0.get(chunkPosition); - final int hash = hash(k0); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); final int chunkSize = keyChunk0.size(); for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongByte.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongByte.java index cedc112f9f8..f0209a0650f 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongByte.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongByte.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.ByteChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.LongChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.ByteChunkHasher; import io.deephaven.chunk.util.hashing.LongChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableByteArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); - final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final long k0 = keyChunk0.get(chunkPosition); - final byte k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); - final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final long k0 = keyChunk0.get(chunkPosition); - final byte k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongChar.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongChar.java index 765fc1c8f83..0748d2cc334 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongChar.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongChar.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.CharChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.LongChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.CharChunkHasher; import io.deephaven.chunk.util.hashing.LongChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableCharArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); - final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final long k0 = keyChunk0.get(chunkPosition); - final char k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); - final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final long k0 = keyChunk0.get(chunkPosition); - final char k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongDouble.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongDouble.java index 9ce9b0d095d..81535ad0839 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongDouble.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongDouble.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.DoubleChunk; import io.deephaven.chunk.LongChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.DoubleChunkHasher; import io.deephaven.chunk.util.hashing.LongChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableDoubleArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); - final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final long k0 = keyChunk0.get(chunkPosition); - final double k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); - final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final long k0 = keyChunk0.get(chunkPosition); - final double k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongFloat.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongFloat.java index c296d811623..bf3a0e482aa 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongFloat.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongFloat.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.FloatChunk; import io.deephaven.chunk.LongChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.FloatChunkHasher; import io.deephaven.chunk.util.hashing.LongChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableFloatArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); - final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final long k0 = keyChunk0.get(chunkPosition); - final float k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); - final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final long k0 = keyChunk0.get(chunkPosition); - final float k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongInt.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongInt.java index d4695e75e4d..a023272c293 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongInt.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongInt.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.IntChunk; import io.deephaven.chunk.LongChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.IntChunkHasher; import io.deephaven.chunk.util.hashing.LongChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableIntArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); - final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final long k0 = keyChunk0.get(chunkPosition); - final int k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); - final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final long k0 = keyChunk0.get(chunkPosition); - final int k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongLong.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongLong.java index 7b2c7b08832..c8cac1c977f 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongLong.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongLong.java @@ -8,11 +8,9 @@ import io.deephaven.base.verify.Assert; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.LongChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.LongChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableLongArraySource; @@ -70,8 +68,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -85,12 +81,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -100,117 +93,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); - final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final long k0 = keyChunk0.get(chunkPosition); - final long k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); - final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final long k0 = keyChunk0.get(chunkPosition); - final long k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongObject.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongObject.java index e2361ecbda8..13499ea8da0 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongObject.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongObject.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.LongChunk; import io.deephaven.chunk.ObjectChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.LongChunkHasher; import io.deephaven.chunk.util.hashing.ObjectChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableLongArraySource; @@ -74,8 +72,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -89,12 +85,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -104,117 +97,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); - final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final long k0 = keyChunk0.get(chunkPosition); - final Object k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); - final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final long k0 = keyChunk0.get(chunkPosition); - final Object k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongShort.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongShort.java index 8472b0ea0a4..315cfc0d7ff 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongShort.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherLongShort.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.LongChunk; import io.deephaven.chunk.ShortChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.LongChunkHasher; import io.deephaven.chunk.util.hashing.ShortChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableLongArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); - final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final long k0 = keyChunk0.get(chunkPosition); - final short k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); - final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final long k0 = keyChunk0.get(chunkPosition); - final short k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final LongChunk keyChunk0 = sourceKeyChunks[0].asLongChunk(); final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObject.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObject.java index 5614939eb1f..40df04f989c 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObject.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObject.java @@ -8,11 +8,9 @@ import io.deephaven.base.verify.Assert; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.ObjectChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.ObjectChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableObjectArraySource; @@ -60,8 +58,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -74,12 +70,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -89,112 +82,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final Object k0 = keyChunk0.get(chunkPosition); - final int hash = hash(k0); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final Object k0 = keyChunk0.get(chunkPosition); - final int hash = hash(k0); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); final int chunkSize = keyChunk0.size(); for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectByte.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectByte.java index 6b6c9f046fd..6fefe2ed297 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectByte.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectByte.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.ByteChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.ObjectChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.ByteChunkHasher; import io.deephaven.chunk.util.hashing.ObjectChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableByteArraySource; @@ -74,8 +72,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -89,12 +85,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -104,117 +97,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); - final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final Object k0 = keyChunk0.get(chunkPosition); - final byte k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); - final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final Object k0 = keyChunk0.get(chunkPosition); - final byte k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectChar.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectChar.java index eab30e68083..4c716646748 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectChar.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectChar.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.CharChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.ObjectChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.CharChunkHasher; import io.deephaven.chunk.util.hashing.ObjectChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableCharArraySource; @@ -74,8 +72,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -89,12 +85,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -104,117 +97,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); - final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final Object k0 = keyChunk0.get(chunkPosition); - final char k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); - final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final Object k0 = keyChunk0.get(chunkPosition); - final char k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectDouble.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectDouble.java index 92c1549bedd..4c6a5382a11 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectDouble.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectDouble.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.DoubleChunk; import io.deephaven.chunk.ObjectChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.DoubleChunkHasher; import io.deephaven.chunk.util.hashing.ObjectChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableDoubleArraySource; @@ -74,8 +72,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -89,12 +85,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -104,117 +97,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); - final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final Object k0 = keyChunk0.get(chunkPosition); - final double k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); - final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final Object k0 = keyChunk0.get(chunkPosition); - final double k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectFloat.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectFloat.java index d269e2d2fe8..67a132cce1b 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectFloat.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectFloat.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.FloatChunk; import io.deephaven.chunk.ObjectChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.FloatChunkHasher; import io.deephaven.chunk.util.hashing.ObjectChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableFloatArraySource; @@ -74,8 +72,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -89,12 +85,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -104,117 +97,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); - final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final Object k0 = keyChunk0.get(chunkPosition); - final float k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); - final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final Object k0 = keyChunk0.get(chunkPosition); - final float k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectInt.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectInt.java index 06e999e4e0a..dc087128109 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectInt.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectInt.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.IntChunk; import io.deephaven.chunk.ObjectChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.IntChunkHasher; import io.deephaven.chunk.util.hashing.ObjectChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableIntArraySource; @@ -74,8 +72,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -89,12 +85,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -104,117 +97,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); - final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final Object k0 = keyChunk0.get(chunkPosition); - final int k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); - final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final Object k0 = keyChunk0.get(chunkPosition); - final int k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectLong.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectLong.java index 4d3c1901d65..d329413349e 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectLong.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectLong.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.LongChunk; import io.deephaven.chunk.ObjectChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.LongChunkHasher; import io.deephaven.chunk.util.hashing.ObjectChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableLongArraySource; @@ -74,8 +72,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -89,12 +85,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -104,117 +97,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); - final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final Object k0 = keyChunk0.get(chunkPosition); - final long k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); - final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final Object k0 = keyChunk0.get(chunkPosition); - final long k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectObject.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectObject.java index b107b9e0acf..0c49e5cb3ba 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectObject.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectObject.java @@ -8,11 +8,9 @@ import io.deephaven.base.verify.Assert; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.ObjectChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.ObjectChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableObjectArraySource; @@ -68,8 +66,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -83,12 +79,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -98,117 +91,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); - final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final Object k0 = keyChunk0.get(chunkPosition); - final Object k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); - final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final Object k0 = keyChunk0.get(chunkPosition); - final Object k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectShort.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectShort.java index 4fe37e65490..0e66907078b 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectShort.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherObjectShort.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.ObjectChunk; import io.deephaven.chunk.ShortChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.ObjectChunkHasher; import io.deephaven.chunk.util.hashing.ShortChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableObjectArraySource; @@ -74,8 +72,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -89,12 +85,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -104,117 +97,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); - final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final Object k0 = keyChunk0.get(chunkPosition); - final short k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); - final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final Object k0 = keyChunk0.get(chunkPosition); - final short k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ObjectChunk keyChunk0 = sourceKeyChunks[0].asObjectChunk(); final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShort.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShort.java index 266d7ad6332..807e9bb5cb3 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShort.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShort.java @@ -8,11 +8,9 @@ import io.deephaven.base.verify.Assert; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.ShortChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.ShortChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableShortArraySource; @@ -62,8 +60,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -76,12 +72,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -91,112 +84,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final short k0 = keyChunk0.get(chunkPosition); - final int hash = hash(k0); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final short k0 = keyChunk0.get(chunkPosition); - final int hash = hash(k0); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); final int chunkSize = keyChunk0.size(); for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortByte.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortByte.java index 3d8f4edde1c..c0f6a93e26c 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortByte.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortByte.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.ByteChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.ShortChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.ByteChunkHasher; import io.deephaven.chunk.util.hashing.ShortChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableByteArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); - final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final short k0 = keyChunk0.get(chunkPosition); - final byte k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); - final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final short k0 = keyChunk0.get(chunkPosition); - final byte k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); final ByteChunk keyChunk1 = sourceKeyChunks[1].asByteChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortChar.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortChar.java index 3dcf5680485..8df06410271 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortChar.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortChar.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.CharChunk; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.ShortChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.CharChunkHasher; import io.deephaven.chunk.util.hashing.ShortChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableCharArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); - final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final short k0 = keyChunk0.get(chunkPosition); - final char k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); - final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final short k0 = keyChunk0.get(chunkPosition); - final char k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); final CharChunk keyChunk1 = sourceKeyChunks[1].asCharChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortDouble.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortDouble.java index 94645069ac1..f08dbc36e8f 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortDouble.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortDouble.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.DoubleChunk; import io.deephaven.chunk.ShortChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.DoubleChunkHasher; import io.deephaven.chunk.util.hashing.ShortChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableDoubleArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); - final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final short k0 = keyChunk0.get(chunkPosition); - final double k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); - final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final short k0 = keyChunk0.get(chunkPosition); - final double k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); final DoubleChunk keyChunk1 = sourceKeyChunks[1].asDoubleChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortFloat.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortFloat.java index 4d478cbd3ed..976f1ec9298 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortFloat.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortFloat.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.FloatChunk; import io.deephaven.chunk.ShortChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.FloatChunkHasher; import io.deephaven.chunk.util.hashing.ShortChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableFloatArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); - final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final short k0 = keyChunk0.get(chunkPosition); - final float k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); - final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final short k0 = keyChunk0.get(chunkPosition); - final float k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); final FloatChunk keyChunk1 = sourceKeyChunks[1].asFloatChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortInt.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortInt.java index 2c980d20a13..cfe361c9e11 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortInt.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortInt.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.IntChunk; import io.deephaven.chunk.ShortChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.IntChunkHasher; import io.deephaven.chunk.util.hashing.ShortChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableIntArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); - final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final short k0 = keyChunk0.get(chunkPosition); - final int k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); - final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final short k0 = keyChunk0.get(chunkPosition); - final int k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); final IntChunk keyChunk1 = sourceKeyChunks[1].asIntChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortLong.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortLong.java index bb1af7d7034..528067cee79 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortLong.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortLong.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.LongChunk; import io.deephaven.chunk.ShortChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.LongChunkHasher; import io.deephaven.chunk.util.hashing.ShortChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableLongArraySource; @@ -75,8 +73,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -90,12 +86,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -105,117 +98,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); - final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final short k0 = keyChunk0.get(chunkPosition); - final long k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); - final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final short k0 = keyChunk0.get(chunkPosition); - final long k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); final LongChunk keyChunk1 = sourceKeyChunks[1].asLongChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortObject.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortObject.java index 06f966caffb..00cbe84fd4e 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortObject.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortObject.java @@ -10,12 +10,10 @@ import io.deephaven.chunk.Chunk; import io.deephaven.chunk.ObjectChunk; import io.deephaven.chunk.ShortChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.ObjectChunkHasher; import io.deephaven.chunk.util.hashing.ShortChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableObjectArraySource; @@ -74,8 +72,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -89,12 +85,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -104,117 +97,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); - final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final short k0 = keyChunk0.get(chunkPosition); - final Object k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); - final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final short k0 = keyChunk0.get(chunkPosition); - final Object k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); final ObjectChunk keyChunk1 = sourceKeyChunks[1].asObjectChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortShort.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortShort.java index 689294ffd97..805d7e72c6b 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortShort.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/by/typed/incopenagg/gen/IncrementalAggOpenHasherShortShort.java @@ -8,11 +8,9 @@ import io.deephaven.base.verify.Assert; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.ShortChunk; -import io.deephaven.chunk.WritableIntChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.chunk.util.hashing.ShortChunkHasher; import io.deephaven.engine.rowset.RowSequence; -import io.deephaven.engine.rowset.chunkattributes.RowKeys; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.impl.by.IncrementalChunkedOperatorAggregationStateManagerOpenAddressedBase; import io.deephaven.engine.table.impl.sources.immutable.ImmutableShortArraySource; @@ -70,8 +68,6 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { break; } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break MAIN_SEARCH; } else { alternateTableLocation = alternateNextTableLocation(alternateTableLocation); @@ -85,12 +81,9 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { outputPositions.set(chunkPosition, outputPosition); mainOutputPosition.set(tableLocation, outputPosition); outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); break; } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - Assert.gtZero(oldRowCount, "oldRowCount"); break; } else { tableLocation = nextTableLocation(tableLocation); @@ -100,117 +93,7 @@ protected void build(RowSequence rowSequence, Chunk[] sourceKeyChunks) { } } - protected void buildForUpdate(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk reincarnatedPositions) { - final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); - final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final short k0 = keyChunk0.get(chunkPosition); - final short k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - int tableLocation = firstTableLocation; - MAIN_SEARCH: while (true) { - int outputPosition = mainOutputPosition.getUnsafe(tableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - int alternateTableLocation = firstAlternateTableLocation; - while (alternateTableLocation < rehashPointer) { - outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation); - if (outputPosition == EMPTY_OUTPUT_POSITION) { - break; - } else if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break MAIN_SEARCH; - } else { - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - numEntries++; - mainKeySource0.set(tableLocation, k0); - mainKeySource1.set(tableLocation, k1); - outputPosition = nextOutputPosition.getAndIncrement(); - outputPositions.set(chunkPosition, outputPosition); - mainOutputPosition.set(tableLocation, outputPosition); - outputPositionToHashSlot.set(outputPosition, mainInsertMask | tableLocation); - rowCountSource.set(outputPosition, 1L); - break; - } else if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, 1); - if (oldRowCount == 0) { - reincarnatedPositions.add(outputPosition); - } - break; - } else { - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - } - } - } - - protected void doRemoveProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks, - WritableIntChunk emptiedPositions) { - final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); - final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); - final int chunkSize = keyChunk0.size(); - for (int chunkPosition = 0; chunkPosition < chunkSize; ++chunkPosition) { - final short k0 = keyChunk0.get(chunkPosition); - final short k1 = keyChunk1.get(chunkPosition); - final int hash = hash(k0, k1); - final int firstTableLocation = hashToTableLocation(hash); - boolean found = false; - int tableLocation = firstTableLocation; - int outputPosition; - while ((outputPosition = mainOutputPosition.getUnsafe(tableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(mainKeySource0.getUnsafe(tableLocation), k0) && eq(mainKeySource1.getUnsafe(tableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - found = true; - break; - } - tableLocation = nextTableLocation(tableLocation); - Assert.neq(tableLocation, "tableLocation", firstTableLocation, "firstTableLocation"); - } - if (!found) { - final int firstAlternateTableLocation = hashToTableLocationAlternate(hash); - boolean alternateFound = false; - if (firstAlternateTableLocation < rehashPointer) { - int alternateTableLocation = firstAlternateTableLocation; - while ((outputPosition = alternateOutputPosition.getUnsafe(alternateTableLocation)) != EMPTY_OUTPUT_POSITION) { - if (eq(alternateKeySource0.getUnsafe(alternateTableLocation), k0) && eq(alternateKeySource1.getUnsafe(alternateTableLocation), k1)) { - outputPositions.set(chunkPosition, outputPosition); - final long oldRowCount = rowCountSource.getAndAddUnsafe(outputPosition, -1); - Assert.gtZero(oldRowCount, "oldRowCount"); - if (oldRowCount == 1) { - emptiedPositions.add(outputPosition); - } - alternateFound = true; - break; - } - alternateTableLocation = alternateNextTableLocation(alternateTableLocation); - Assert.neq(alternateTableLocation, "alternateTableLocation", firstAlternateTableLocation, "firstAlternateTableLocation"); - } - } - if (!alternateFound) { - throw new IllegalStateException("Missing value in probe"); - } - } - } - } - - protected void doModifyProbe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { + protected void probe(RowSequence rowSequence, Chunk[] sourceKeyChunks) { final ShortChunk keyChunk0 = sourceKeyChunks[0].asShortChunk(); final ShortChunk keyChunk1 = sourceKeyChunks[1].asShortChunk(); final int chunkSize = keyChunk0.size(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/partitioned/PartitionedTableImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/partitioned/PartitionedTableImpl.java index fa2ca627268..e054cd2f56e 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/partitioned/PartitionedTableImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/partitioned/PartitionedTableImpl.java @@ -25,6 +25,7 @@ import io.deephaven.engine.table.impl.sources.NullValueColumnSource; import io.deephaven.engine.table.impl.sources.UnionSourceManager; import io.deephaven.engine.table.iterators.ObjectColumnIterator; +import io.deephaven.engine.updategraph.ConcurrentMethod; import io.deephaven.engine.updategraph.UpdateGraphProcessor; import io.deephaven.util.SafeCloseable; import org.apache.commons.lang3.mutable.MutableInt; @@ -93,36 +94,43 @@ public String toString() { return "PartitionedTable for " + table.getDescription(); } + @ConcurrentMethod @Override public Table table() { return table; } + @ConcurrentMethod @Override public Set keyColumnNames() { return keyColumnNames; } + @ConcurrentMethod @Override public boolean uniqueKeys() { return uniqueKeys; } + @ConcurrentMethod @Override public String constituentColumnName() { return constituentColumnName; } + @ConcurrentMethod @Override public TableDefinition constituentDefinition() { return constituentDefinition; } + @ConcurrentMethod @Override public boolean constituentChangesPermitted() { return constituentChangesPermitted; } + @ConcurrentMethod @Override public PartitionedTable.Proxy proxy(final boolean requireMatchingKeys, final boolean sanityCheckJoinOperations) { return PartitionedTableProxyImpl.of(this, requireMatchingKeys, sanityCheckJoinOperations); @@ -187,6 +195,7 @@ private Map computeSharedAttributes(@NotNull final Iterator filters) { final WhereFilter[] whereFilters = WhereFilter.from(filters); @@ -208,6 +217,7 @@ public PartitionedTableImpl filter(@NotNull final Collection f false); } + @ConcurrentMethod @Override public PartitionedTable sort(@NotNull final Collection sortColumns) { final boolean invalidSortColumn = sortColumns.stream() @@ -227,6 +237,7 @@ public PartitionedTable sort(@NotNull final Collection sortColumns) false); } + @ConcurrentMethod @Override public PartitionedTableImpl transform(@NotNull final UnaryOperator
    transformer) { final Table resultTable; @@ -302,6 +313,7 @@ public PartitionedTableImpl partitionedTransform( // TODO (https://github.com/deephaven/deephaven-core/issues/2368): Consider adding transformWithKeys support + @ConcurrentMethod @Override public Table constituentFor(@NotNull final Object... keyColumnValues) { if (keyColumnValues.length != keyColumnNames.size()) { @@ -334,6 +346,7 @@ public Table constituentFor(@NotNull final Object... keyColumnValues) { } } + @ConcurrentMethod @Override public Table[] constituents() { final LivenessManager enclosingLivenessManager = LivenessScopeStack.peek(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/partitioned/PartitionedTableProxyImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/partitioned/PartitionedTableProxyImpl.java index 0568476fa6c..914d877cef9 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/partitioned/PartitionedTableProxyImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/partitioned/PartitionedTableProxyImpl.java @@ -649,18 +649,23 @@ public PartitionedTable.Proxy aggBy(Aggregation aggregation) { } @Override - public PartitionedTable.Proxy aggBy(Aggregation aggregation, String... groupByColumns) { - return basicTransform(ct -> ct.aggBy(aggregation, groupByColumns)); + public PartitionedTable.Proxy aggBy(Collection aggregations) { + return basicTransform(ct -> ct.aggBy(aggregations)); } @Override - public PartitionedTable.Proxy aggBy(Aggregation aggregation, Collection groupByColumns) { + public PartitionedTable.Proxy aggBy(Collection aggregations, boolean preserveEmpty) { + return basicTransform(ct -> ct.aggBy(aggregations, preserveEmpty)); + } + + @Override + public PartitionedTable.Proxy aggBy(Aggregation aggregation, String... groupByColumns) { return basicTransform(ct -> ct.aggBy(aggregation, groupByColumns)); } @Override - public PartitionedTable.Proxy aggBy(Collection aggregations) { - return basicTransform(ct -> ct.aggBy(aggregations)); + public PartitionedTable.Proxy aggBy(Aggregation aggregation, Collection groupByColumns) { + return basicTransform(ct -> ct.aggBy(aggregation, groupByColumns)); } @Override @@ -674,6 +679,13 @@ public PartitionedTable.Proxy aggBy(Collection aggregatio return basicTransform(ct -> ct.aggBy(aggregations, groupByColumns)); } + @Override + public PartitionedTable.Proxy aggBy(Collection aggregations, boolean preserveEmpty, + TableOperations initialGroups, Collection groupByColumns) { + return complexTransform(initialGroups, (ct, ot) -> ct.aggBy(aggregations, preserveEmpty, ot, groupByColumns), + null); + } + @Override public PartitionedTable.Proxy updateBy(UpdateByOperation operation) { return basicTransform(ct -> ct.updateBy(operation)); diff --git a/engine/table/src/test/java/io/deephaven/engine/table/impl/PartitionedTableTest.java b/engine/table/src/test/java/io/deephaven/engine/table/impl/PartitionedTableTest.java index 5c1b23b94f0..632541f4ef0 100644 --- a/engine/table/src/test/java/io/deephaven/engine/table/impl/PartitionedTableTest.java +++ b/engine/table/src/test/java/io/deephaven/engine/table/impl/PartitionedTableTest.java @@ -32,6 +32,7 @@ import java.util.*; import java.util.function.Function; +import java.util.stream.IntStream; import java.util.stream.LongStream; import org.apache.commons.lang3.mutable.MutableLong; @@ -89,29 +90,35 @@ public void testMergeSimple() { } public void testMergePopulate() { - // TODO (https://github.com/deephaven/deephaven-core/issues/2416): Re-implement once populate keys is replaced - /* - * final QueryTable queryTable = TstUtils.testRefreshingTable(i(1, 2, 4, 6).toTracking(), c("Sym", "aa", "bb", - * "aa", "bb"), c("intCol", 10, 20, 40, 60), c("doubleCol", 0.1, 0.2, 0.4, 0.6)); - * - * final Table withK = queryTable.update("K=k"); - * - * final PartitionedTable partitionedTable = withK.partitionBy("Sym"); partitionedTable.populateKeys("cc", - * "dd"); - * - * final Table merged = partitionedTable.merge(); final Table mergedByK = merged.sort("K"); - * - * if (printTableUpdates) { TableTools.show(withK); TableTools.show(mergedByK); } - * - * assertEquals("", TableTools.diff(mergedByK, withK, 10)); - * - * UpdateGraphProcessor.DEFAULT.runWithinUnitTestCycle(() -> { addToTable(queryTable, i(3, 9), c("Sym", "cc", - * "cc"), c("intCol", 30, 90), c("doubleCol", 2.3, 2.9)); queryTable.notifyListeners(i(3, 9), i(), i()); }); - * - * if (printTableUpdates) { TableTools.show(withK); TableTools.show(mergedByK); } - * - * assertEquals("", TableTools.diff(mergedByK, withK, 10)); - */ + final QueryTable queryTable = TstUtils.testRefreshingTable(i(1, 2, 4, 6).toTracking(), + c("Sym", "aa", "bb", "aa", "bb"), c("intCol", 10, 20, 40, 60), c("doubleCol", 0.1, 0.2, 0.4, 0.6)); + + final Table withK = queryTable.update("K=k"); + + final QueryTable keyTable = TstUtils.testTable(c("Sym", "cc", "dd")); + final PartitionedTable partitionedTable = withK.partitionedAggBy(List.of(), true, keyTable, "Sym"); + + final Table merged = partitionedTable.merge(); + final Table mergedByK = merged.sort("K"); + + if (printTableUpdates) { + TableTools.show(withK); + TableTools.show(mergedByK); + } + + assertEquals("", TableTools.diff(mergedByK, withK, 10)); + + UpdateGraphProcessor.DEFAULT.runWithinUnitTestCycle(() -> { + addToTable(queryTable, i(3, 9), c("Sym", "cc", "cc"), c("intCol", 30, 90), c("doubleCol", 2.3, 2.9)); + queryTable.notifyListeners(i(3, 9), i(), i()); + }); + + if (printTableUpdates) { + TableTools.show(withK); + TableTools.show(mergedByK); + } + + assertEquals("", TableTools.diff(mergedByK, withK, 10)); } public void testMergeIncremental() { @@ -133,16 +140,18 @@ private void testMergeIncremental(int seed) { new TstUtils.IntGenerator(0, 20), new TstUtils.DoubleGenerator(0, 100))); - // TODO (https://github.com/deephaven/deephaven-core/issues/2416): Re-add key initialization final EvalNugget en[] = new EvalNugget[] { new EvalNugget() { public Table e() { - return table.partitionBy("Sym").merge().sort("Sym"); + return table.partitionedAggBy(List.of(), true, testTable(c("Sym", syms)), "Sym") + .merge().sort("Sym"); } }, new EvalNugget() { public Table e() { - return table.partitionBy("intCol").merge().sort("intCol"); + return table.partitionedAggBy(List.of(), true, + testTable(intCol("intCol", IntStream.rangeClosed(0, 20).toArray())), "intCol") + .merge().sort("intCol"); } }, }; @@ -195,21 +204,19 @@ public void testProxy() { new TstUtils.SetGenerator<>("aa", "bb", "cc", "dd"), new TstUtils.IntGenerator(100, 200))); - final PartitionedTable leftPT = withK.partitionBy("Sym"); - // TODO (https://github.com/deephaven/deephaven-core/issues/2416): Re-add key initialization - // leftPT.populateKeys("aa", "bb", "cc", "dd"); + final PartitionedTable leftPT = + withK.partitionedAggBy(List.of(), true, testTable(c("Sym", "aa", "bb", "cc", "dd")), "Sym"); final PartitionedTable.Proxy leftProxy = leftPT.proxy(false, false); - final PartitionedTable rightPT = rightTable.partitionBy("Sym"); - // TODO (https://github.com/deephaven/deephaven-core/issues/2416): Re-add key initialization - // rightPT.populateKeys("aa", "bb", "cc", "dd"); + final PartitionedTable rightPT = + rightTable.partitionedAggBy(List.of(), true, testTable(c("Sym", "aa", "bb", "cc", "dd")), "Sym"); final PartitionedTable.Proxy rightProxy = rightPT.proxy(false, false); final EvalNuggetInterface[] en = new EvalNuggetInterface[] { new EvalNugget() { public Table e() { - return table.update("K=Indices").partitionBy("Sym") - // .populateKeys("aa", "bb", "cc", "dd") + return table.update("K=Indices") + .partitionedAggBy(List.of(), true, testTable(c("Sym", "aa", "bb", "cc", "dd")), "Sym") .proxy(false, false) .update("K2=Indices*2") .select("K", "K2", "Half=doubleCol/2", "Sq=doubleCol*doubleCol", diff --git a/engine/table/src/test/java/io/deephaven/engine/table/impl/QueryTableAggregationTest.java b/engine/table/src/test/java/io/deephaven/engine/table/impl/QueryTableAggregationTest.java index e4f247c628e..dae6a527b27 100644 --- a/engine/table/src/test/java/io/deephaven/engine/table/impl/QueryTableAggregationTest.java +++ b/engine/table/src/test/java/io/deephaven/engine/table/impl/QueryTableAggregationTest.java @@ -6,11 +6,13 @@ import io.deephaven.api.ColumnName; import io.deephaven.api.Selectable; import io.deephaven.api.agg.Aggregation; +import io.deephaven.api.agg.Count; import io.deephaven.api.agg.spec.AggSpec; import io.deephaven.datastructures.util.CollectionUtil; import io.deephaven.engine.rowset.RowSet; import io.deephaven.engine.rowset.RowSetFactory; import io.deephaven.engine.rowset.RowSetShiftData; +import io.deephaven.engine.rowset.TrackingWritableRowSet; import io.deephaven.engine.table.*; import io.deephaven.engine.table.impl.indexer.RowSetIndexer; import io.deephaven.qst.table.AggregateAllByTable; @@ -62,6 +64,7 @@ import static io.deephaven.api.agg.spec.AggSpec.percentile; import static io.deephaven.engine.util.TableTools.*; import static io.deephaven.engine.table.impl.TstUtils.*; +import static io.deephaven.util.QueryConstants.*; @Category(OutOfBandTest.class) public class QueryTableAggregationTest { @@ -124,7 +127,7 @@ private static Table individualStaticByTest(@NotNull final Table input, final Table aggregatedInput = ChunkedOperatorAggregationHelper.aggregation( aggregationControl == null ? AggregationControl.DEFAULT : aggregationControl, makeGroupByACF(adjustedInput, keyColumns), - (QueryTable) adjustedInput, ColumnName.from(keyColumns)); + (QueryTable) adjustedInput, false, null, ColumnName.from(keyColumns)); actualKeys = keyColumns.length == 0 ? aggregatedInput.dropColumns(aggregatedInput.getDefinition().getColumnNamesArray()) : aggregatedInput.view(keyColumns); @@ -236,10 +239,11 @@ private IncrementalFirstStaticAfterByResultSupplier(@NotNull final AggregationCo public final Table get() { if (firstTime.compareAndSet(true, false)) { return ChunkedOperatorAggregationHelper - .aggregation(control, acf, input, ColumnName.from(columns)).sort(columns); + .aggregation(control, acf, input, false, null, ColumnName.from(columns)).sort(columns); } return ChunkedOperatorAggregationHelper - .aggregation(control, acf, (QueryTable) input.silent(), ColumnName.from(columns)).sort(columns); + .aggregation(control, acf, (QueryTable) input.silent(), false, null, ColumnName.from(columns)) + .sort(columns); } } @@ -1687,7 +1691,7 @@ public void testAbsSumBySimple() { BigInteger expected = BigInteger.valueOf(6); TestCase.assertEquals(expected, absSum); TestCase.assertEquals(expected.doubleValue(), absSumDouble); - TestCase.assertEquals(QueryConstants.NULL_LONG, result.getColumn("BoolCol").getLong(0)); + TestCase.assertEquals(NULL_LONG, result.getColumn("BoolCol").getLong(0)); UpdateGraphProcessor.DEFAULT.runWithinUnitTestCycle(() -> { TstUtils.addToTable(table, i(8), col("BigI", BigInteger.valueOf(5)), col("DoubleCol", 5.0), @@ -1766,14 +1770,14 @@ public void testAbsSumBySimple() { @Test public void testAbsSumByNull() { final QueryTable table = TstUtils.testRefreshingTable(i(2).toTracking(), - intCol("IntCol", QueryConstants.NULL_INT), + intCol("IntCol", NULL_INT), floatCol("FloatCol", QueryConstants.NULL_FLOAT)); final Table result = table.absSumBy(); TableTools.show(result); TestCase.assertEquals(1, result.size()); long absSum = result.getColumn("IntCol").getLong(0); - TestCase.assertEquals(QueryConstants.NULL_LONG, absSum); + TestCase.assertEquals(NULL_LONG, absSum); float absSumF = result.getColumn("FloatCol").getFloat(0); TestCase.assertEquals(QueryConstants.NULL_FLOAT, absSumF); @@ -1794,14 +1798,14 @@ public void testAbsSumByNull() { show(result); absSum = result.getColumn("IntCol").getLong(0); absSumF = result.getColumn("FloatCol").getFloat(0); - TestCase.assertEquals(QueryConstants.NULL_LONG, absSum); + TestCase.assertEquals(NULL_LONG, absSum); TestCase.assertEquals(QueryConstants.NULL_FLOAT, absSumF); } @Test public void testAvgInfinities() { final QueryTable table = TstUtils.testRefreshingTable(i(2).toTracking(), - intCol("IntCol", QueryConstants.NULL_INT), + intCol("IntCol", NULL_INT), floatCol("FloatCol", QueryConstants.NULL_FLOAT)); final Table result = table.avgBy(); @@ -1879,7 +1883,7 @@ public void testAvgInfinities() { @Test public void testVarInfinities() { final QueryTable table = TstUtils.testRefreshingTable(i(2).toTracking(), - intCol("IntCol", QueryConstants.NULL_INT), + intCol("IntCol", NULL_INT), floatCol("FloatCol", QueryConstants.NULL_FLOAT)); final Table result = table.varBy(); @@ -3585,4 +3589,111 @@ public void testIds7553() { final Table prevResult = prevTableColumnSources(result); assertTableEquals(result, prevResult); } + + @Test + public void testInitialGroupsOrdering() { + // Tests bucketed addition for static tables and static initial groups + + final Table data = testTable(c("S", "A", "B", "C", "D"), c("I", 10, 20, 30, 40)); + final Table distinct = data.selectDistinct(); + assertTableEquals(data, distinct); + + final Table reversed = data.reverse(); + final Table initializedDistinct = + data.aggBy(List.of(Count.of("C")), false, reversed, ColumnName.from("S", "I")).dropColumns("C"); + assertTableEquals(reversed, initializedDistinct); + } + + @Test + public void testInitialGroupsWithGrouping() { + // Tests grouped addition for static tables and static initial groups + + final Table data = testTable(c("S", "A", "A", "B", "B"), c("I", 10, 20, 30, 40)); + final RowSetIndexer dataIndexer = RowSetIndexer.of(data.getRowSet()); + dataIndexer.getGrouping(data.getColumnSource("S")); + final Table distinct = data.selectDistinct("S"); + assertTableEquals(testTable(c("S", "A", "B")), distinct); + + final Table reversed = data.reverse(); + final RowSetIndexer reversedIndexer = RowSetIndexer.of(reversed.getRowSet()); + reversedIndexer.getGrouping(reversed.getColumnSource("S")); + final Table initializedDistinct = + data.aggBy(List.of(Count.of("C")), false, reversed, ColumnName.from("S")).dropColumns("C"); + assertTableEquals(testTable(c("S", "B", "A")), initializedDistinct); + } + + @Test + public void testInitialGroupsRefreshing() { + // Tests bucketed addition for refreshing tables and refreshing initial groups + + final Collection aggs = List.of( + AggCount("Count"), + AggSum("SumI=I"), + AggMax("MaxI=I"), + AggMin("MinI=I"), + AggGroup("GroupS=S")); + + final TrackingWritableRowSet inputRows = ir(0, 9).toTracking(); + final QueryTable input = testRefreshingTable(inputRows, + c("S", "A", "B", "C", "D", "E", "F", "G", "H", "I", "K"), + c("C", 'A', 'A', 'B', 'B', 'C', 'C', 'D', 'D', 'E', 'E'), + c("I", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)); + inputRows.removeRange(0, 8); + + final Table initialKeys = testRefreshingTable(c("C", 'A', 'B', 'C', 'D', 'E')); + + final Table aggregated = input.aggBy(aggs, true, initialKeys, ColumnName.from("C")); + final Table initialState = emptyTable(0).snapshot(aggregated); + TestCase.assertEquals(5, aggregated.size()); + + UpdateGraphProcessor.DEFAULT.runWithinUnitTestCycle(() -> { + inputRows.insertRange(0, 8); + input.notifyListeners(ir(0, 8), i(), i()); + }); + TestCase.assertEquals(5, aggregated.size()); + + UpdateGraphProcessor.DEFAULT.runWithinUnitTestCycle(() -> { + inputRows.removeRange(0, 8); + input.notifyListeners(i(), ir(0, 8), i()); + }); + TestCase.assertEquals(5, aggregated.size()); + + assertTableEquals(initialState, aggregated); + } + + @Test + public void testPreserveEmptyNoKey() { + final Collection aggs = List.of( + AggCount("Count"), + AggSum("SumI=I"), + AggMax("MaxI=I"), + AggMin("MinI=I")); + + final Table expectedEmpty = testTable( + c("Count", 0L), c("SumI", NULL_LONG_BOXED), c("MaxI", NULL_INT_BOXED), c("MinI", NULL_INT_BOXED)); + + final TrackingWritableRowSet inputRows = ir(0, 9).toTracking(); + final QueryTable input = testRefreshingTable(inputRows, + c("S", "A", "B", "C", "D", "E", "F", "G", "H", "I", "K"), + c("C", 'A', 'A', 'B', 'B', 'C', 'C', 'D', 'D', 'E', 'E'), + c("I", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)); + inputRows.removeRange(0, 9); + + final Table aggregated = input.aggBy(aggs, true); + TestCase.assertEquals(1, aggregated.size()); + assertTableEquals(expectedEmpty, aggregated); + + UpdateGraphProcessor.DEFAULT.runWithinUnitTestCycle(() -> { + inputRows.insertRange(0, 9); + input.notifyListeners(ir(0, 9), i(), i()); + }); + TestCase.assertEquals(1, aggregated.size()); + + UpdateGraphProcessor.DEFAULT.runWithinUnitTestCycle(() -> { + inputRows.removeRange(0, 9); + input.notifyListeners(i(), ir(0, 9), i()); + }); + TestCase.assertEquals(1, aggregated.size()); + assertTableEquals(expectedEmpty, aggregated); + } } diff --git a/engine/table/src/test/java/io/deephaven/engine/table/impl/TestPartitionBy.java b/engine/table/src/test/java/io/deephaven/engine/table/impl/TestPartitionBy.java index 5db9d072370..ff1bae7034d 100644 --- a/engine/table/src/test/java/io/deephaven/engine/table/impl/TestPartitionBy.java +++ b/engine/table/src/test/java/io/deephaven/engine/table/impl/TestPartitionBy.java @@ -441,23 +441,24 @@ public void testReleaseRaceRollup() { } public void testPopulateKeysStatic() { - // TODO (https://github.com/deephaven/deephaven-core/issues/2416): Re-implement once populate keys is replaced - /* - * final Table table = emptyTable(1).update("USym=`AAPL`", "Value=1"); final PartitionedTable map = - * table.partitionBy("USym"); map.populateKeys("SPY"); System.out.println(Arrays.toString(map.getKeySet())); - * assertEquals(map.getKeySet(), new String[] {"AAPL", "SPY"}); assertFalse(((TableMapImpl) - * map).isRefreshing()); - */ + testPopulateKeys(false); } public void testPopulateKeysRefreshing() { - // TODO (https://github.com/deephaven/deephaven-core/issues/2416): Re-implement once populate keys is replaced - /* - * final Table table = emptyTable(1).update("USym=`AAPL`", "Value=1"); ((BaseTable) table).setRefreshing(true); - * final TableMap map = table.partitionBy("USym"); map.populateKeys("SPY"); - * System.out.println(Arrays.toString(map.getKeySet())); assertEquals(map.getKeySet(), new String[] {"AAPL", - * "SPY"}); assertTrue(((TableMapImpl) map).isRefreshing()); - */ + testPopulateKeys(true); + } + + private void testPopulateKeys(final boolean refreshing) { + final Table table = emptyTable(1).update("USym=`AAPL`", "Value=1"); + if (refreshing) { + table.setRefreshing(true); + } + final PartitionedTable pt = table.partitionedAggBy(List.of(), true, testTable(c("USym", "SPY")), "USym"); + final String keyColumnName = pt.keyColumnNames().stream().findFirst().get(); + final String[] keys = (String[]) pt.table().getColumn(keyColumnName).getDirect(); + System.out.println(Arrays.toString(keys)); + assertEquals(keys, new String[] {"SPY", "AAPL"}); + assertEquals(pt.table().isRefreshing(), refreshing); } public void testPartitionByWithShifts() { diff --git a/java-client/session/src/main/java/io/deephaven/client/impl/BatchTableRequestBuilder.java b/java-client/session/src/main/java/io/deephaven/client/impl/BatchTableRequestBuilder.java index 1da15dd762e..8a7f91ab76c 100644 --- a/java-client/session/src/main/java/io/deephaven/client/impl/BatchTableRequestBuilder.java +++ b/java-client/session/src/main/java/io/deephaven/client/impl/BatchTableRequestBuilder.java @@ -400,6 +400,10 @@ public void visit(AggregateAllByTable aggAllByTable) { @Override public void visit(AggregationTable aggregationTable) { + if (aggregationTable.preserveEmpty() || aggregationTable.initialGroups().isPresent()) { + throw new UnsupportedOperationException( + "TODO(deephaven-core#991): TableService aggregation coverage, https://github.com/deephaven/deephaven-core/issues/991"); + } out = op(Builder::setComboAggregate, aggBy(aggregationTable)); } diff --git a/qst/src/main/java/io/deephaven/qst/TableAdapterImpl.java b/qst/src/main/java/io/deephaven/qst/TableAdapterImpl.java index b0fa1a9efbc..b9eff7fbc46 100644 --- a/qst/src/main/java/io/deephaven/qst/TableAdapterImpl.java +++ b/qst/src/main/java/io/deephaven/qst/TableAdapterImpl.java @@ -257,9 +257,11 @@ public void visit(AggregateAllByTable aggAllByTable) { @Override public void visit(AggregationTable aggregationTable) { if (aggregationTable.groupByColumns().isEmpty()) { - addOp(aggregationTable, parentOps(aggregationTable).aggBy(aggregationTable.aggregations())); + addOp(aggregationTable, ops(aggregationTable.parent()).aggBy(aggregationTable.aggregations(), + aggregationTable.preserveEmpty())); } else { - addOp(aggregationTable, parentOps(aggregationTable).aggBy(aggregationTable.aggregations(), + addOp(aggregationTable, ops(aggregationTable.parent()).aggBy(aggregationTable.aggregations(), + aggregationTable.preserveEmpty(), aggregationTable.initialGroups().map(this::table).orElse(null), aggregationTable.groupByColumns())); } } diff --git a/qst/src/main/java/io/deephaven/qst/table/AggregateAllByTable.java b/qst/src/main/java/io/deephaven/qst/table/AggregateAllByTable.java index 6a5e8c55186..135a2467e43 100644 --- a/qst/src/main/java/io/deephaven/qst/table/AggregateAllByTable.java +++ b/qst/src/main/java/io/deephaven/qst/table/AggregateAllByTable.java @@ -17,7 +17,7 @@ @Immutable @NodeStyle -public abstract class AggregateAllByTable extends ByTableBase { +public abstract class AggregateAllByTable extends ByTableBase implements SingleParentTable { public static Builder builder() { return ImmutableAggregateAllByTable.builder(); diff --git a/qst/src/main/java/io/deephaven/qst/table/AggregationTable.java b/qst/src/main/java/io/deephaven/qst/table/AggregationTable.java index ddaddcd9e81..6f8fb8feefe 100644 --- a/qst/src/main/java/io/deephaven/qst/table/AggregationTable.java +++ b/qst/src/main/java/io/deephaven/qst/table/AggregationTable.java @@ -5,11 +5,13 @@ import io.deephaven.annotations.NodeStyle; import io.deephaven.api.agg.Aggregation; +import org.immutables.value.Value; import org.immutables.value.Value.Check; import org.immutables.value.Value.Immutable; import java.util.Collection; import java.util.List; +import java.util.Optional; /** * @see io.deephaven.api.TableOperations#aggBy(Collection, Collection) @@ -24,6 +26,13 @@ public static Builder builder() { public abstract List aggregations(); + @Value.Default + public boolean preserveEmpty() { + return false; + } + + public abstract Optional initialGroups(); + @Override public final V walk(V visitor) { visitor.visit(this); @@ -37,11 +46,26 @@ final void checkNumAggs() { } } + @Check + final void checkInitialGroups() { + if (groupByColumns().isEmpty() && initialGroups().isPresent()) { + throw new IllegalArgumentException("InitialGroups must not be set if GroupByColumns is empty"); + } + } + + public interface Builder extends ByTableBase.Builder { Builder addAggregations(Aggregation element); Builder addAggregations(Aggregation... elements); Builder addAllAggregations(Iterable elements); + + Builder preserveEmpty(boolean preserveEmpty); + + Builder initialGroups(TableSpec initialGroups); + + Builder initialGroups( + @SuppressWarnings("OptionalUsedAsFieldOrParameterType") Optional initialGroups); } } diff --git a/qst/src/main/java/io/deephaven/qst/table/ByTableBase.java b/qst/src/main/java/io/deephaven/qst/table/ByTableBase.java index 9ec4c94b06f..8333be7eac6 100644 --- a/qst/src/main/java/io/deephaven/qst/table/ByTableBase.java +++ b/qst/src/main/java/io/deephaven/qst/table/ByTableBase.java @@ -7,7 +7,7 @@ import java.util.List; -public abstract class ByTableBase extends TableBase implements SingleParentTable { +public abstract class ByTableBase extends TableBase { public abstract TableSpec parent(); diff --git a/qst/src/main/java/io/deephaven/qst/table/CountByTable.java b/qst/src/main/java/io/deephaven/qst/table/CountByTable.java index aedae251aad..17e256fb1cd 100644 --- a/qst/src/main/java/io/deephaven/qst/table/CountByTable.java +++ b/qst/src/main/java/io/deephaven/qst/table/CountByTable.java @@ -9,7 +9,7 @@ @Immutable @NodeStyle -public abstract class CountByTable extends ByTableBase { +public abstract class CountByTable extends ByTableBase implements SingleParentTable { public static Builder builder() { return ImmutableCountByTable.builder(); diff --git a/qst/src/main/java/io/deephaven/qst/table/ParentsVisitor.java b/qst/src/main/java/io/deephaven/qst/table/ParentsVisitor.java index 82e0f447b53..f6d455d7cad 100644 --- a/qst/src/main/java/io/deephaven/qst/table/ParentsVisitor.java +++ b/qst/src/main/java/io/deephaven/qst/table/ParentsVisitor.java @@ -254,7 +254,11 @@ public void visit(AggregateAllByTable aggAllByTable) { @Override public void visit(AggregationTable aggregationTable) { - out = single(aggregationTable); + if (aggregationTable.initialGroups().isPresent()) { + out = Stream.of(aggregationTable.initialGroups().get(), aggregationTable.parent()); + } else { + out = Stream.of(aggregationTable.parent()); + } } @Override diff --git a/qst/src/main/java/io/deephaven/qst/table/TableBase.java b/qst/src/main/java/io/deephaven/qst/table/TableBase.java index c2b0f93f401..b70c5ed9981 100644 --- a/qst/src/main/java/io/deephaven/qst/table/TableBase.java +++ b/qst/src/main/java/io/deephaven/qst/table/TableBase.java @@ -21,6 +21,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.Optional; import java.util.stream.Collectors; public abstract class TableBase implements TableSpec { @@ -434,6 +435,19 @@ public final AggregationTable aggBy(Aggregation aggregation) { return AggregationTable.builder().parent(this).addAggregations(aggregation).build(); } + @Override + public final AggregationTable aggBy(Collection aggregations) { + return AggregationTable.builder().parent(this).addAllAggregations(aggregations).build(); + } + + @Override + public TableSpec aggBy(Collection aggregations, boolean preserveEmpty) { + return AggregationTable.builder().parent(this) + .addAllAggregations(aggregations) + .preserveEmpty(preserveEmpty) + .build(); + } + @Override public final AggregationTable aggBy(Aggregation aggregation, String... groupByColumns) { final AggregationTable.Builder builder = AggregationTable.builder().parent(this); @@ -449,11 +463,6 @@ public final AggregationTable aggBy(Aggregation aggregation, Collection aggregations) { - return AggregationTable.builder().parent(this).addAllAggregations(aggregations).build(); - } - @Override public final AggregationTable aggBy(Collection aggregations, String... groupByColumns) { final AggregationTable.Builder builder = AggregationTable.builder().parent(this); @@ -470,6 +479,17 @@ public final AggregationTable aggBy(Collection aggregatio .addAllAggregations(aggregations).build(); } + @Override + public TableSpec aggBy(Collection aggregations, boolean preserveEmpty, + TableSpec initialGroups, Collection groupByColumns) { + return AggregationTable.builder().parent(this) + .addAllGroupByColumns(groupByColumns) + .addAllAggregations(aggregations) + .preserveEmpty(preserveEmpty) + .initialGroups(Optional.ofNullable(initialGroups)) + .build(); + } + @Override public final UpdateByTable updateBy(UpdateByOperation operation) { return UpdateByTable.builder() diff --git a/qst/src/main/java/io/deephaven/qst/table/UpdateByTable.java b/qst/src/main/java/io/deephaven/qst/table/UpdateByTable.java index cca47da33af..309bfe46242 100644 --- a/qst/src/main/java/io/deephaven/qst/table/UpdateByTable.java +++ b/qst/src/main/java/io/deephaven/qst/table/UpdateByTable.java @@ -14,7 +14,7 @@ @Immutable @NodeStyle -public abstract class UpdateByTable extends ByTableBase { +public abstract class UpdateByTable extends ByTableBase implements SingleParentTable { public static Builder builder() { return ImmutableUpdateByTable.builder(); diff --git a/replication/reflective/src/main/java/io/deephaven/replicators/ReplicateHashTable.java b/replication/reflective/src/main/java/io/deephaven/replicators/ReplicateHashTable.java index 33f0578c8f9..9be1b0d6c4b 100644 --- a/replication/reflective/src/main/java/io/deephaven/replicators/ReplicateHashTable.java +++ b/replication/reflective/src/main/java/io/deephaven/replicators/ReplicateHashTable.java @@ -117,27 +117,6 @@ public static void main(String[] args) throws IOException, ClassNotFoundExceptio "engine/table/src/main/java/io/deephaven/engine/table/impl/IncrementalChunkedNaturalJoinStateManager.java", "engine/table/src/main/java/io/deephaven/engine/table/impl/StaticChunkedCrossJoinStateManager.java", allowMissingDestinations, Arrays.asList("dumpTable", "prev")); - - // Incremental NJ -> Static Operator Aggregations - doReplicate( - "engine/table/src/main/java/io/deephaven/engine/table/impl/IncrementalChunkedNaturalJoinStateManager.java", - "engine/table/src/main/java/io/deephaven/engine/table/impl/by/StaticChunkedOperatorAggregationStateManager.java", - allowMissingDestinations, Arrays.asList("dumpTable", "prev", "decorationProbe")); - // Incremental NJ -> Incremental Operator Aggregations - doReplicate( - "engine/table/src/main/java/io/deephaven/engine/table/impl/IncrementalChunkedNaturalJoinStateManager.java", - "engine/table/src/main/java/io/deephaven/engine/table/impl/by/IncrementalChunkedOperatorAggregationStateManager.java", - allowMissingDestinations, Collections.singletonList("dumpTable")); - - // Incremental NJ -> AddOnly updateBy - doReplicate( - "engine/table/src/main/java/io/deephaven/engine/table/impl/IncrementalChunkedNaturalJoinStateManager.java", - "engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/hashing/AddOnlyUpdateByStateManager.java", - allowMissingDestinations, Arrays.asList("dumpTable", "prev", "decorationProbe")); - doReplicate( - "engine/table/src/main/java/io/deephaven/engine/table/impl/IncrementalChunkedNaturalJoinStateManager.java", - "engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/hashing/IncrementalUpdateByStateManager.java", - allowMissingDestinations, Arrays.asList("dumpTable", "allowUpdateWriteThroughState")); } private static class RegionedFile { diff --git a/table-api/src/main/java/io/deephaven/api/TableOperations.java b/table-api/src/main/java/io/deephaven/api/TableOperations.java index 8fb20074641..c64cf5452e4 100644 --- a/table-api/src/main/java/io/deephaven/api/TableOperations.java +++ b/table-api/src/main/java/io/deephaven/api/TableOperations.java @@ -511,18 +511,107 @@ TOPS raj(TABLE rightTable, Collection columnsToMatch, // ------------------------------------------------------------------------------------------- + /** + * Produce an aggregated result by grouping all rows from {@code this} into a single group of rows and applying + * {@code aggregation} to the result. The result table will have one row if {@code this} has one or more rows, or + * else zero rows. + * + * @param aggregation The {@link Aggregation aggregation} to apply + * @return A new table aggregating the rows of {@code this} + */ TOPS aggBy(Aggregation aggregation); + /** + * Produce an aggregated result by grouping all rows from {@code this} into a single group of rows and applying + * {@code aggregations} to the result. The result table will have one row if {@code this} has one or more rows, or + * else zero rows. + * + * @param aggregations The {@link Aggregation aggregations} to apply + * @return A new table aggregating the rows of {@code this} + */ + TOPS aggBy(Collection aggregations); + + /** + * Produce an aggregated result by grouping all rows from {@code this} into a single group of rows and applying + * {@code aggregations} to the result. + * + * @param aggregations The {@link Aggregation aggregations} to apply + * @param preserveEmpty If {@code preserveEmpty == true}, the result table will always have one row. Otherwise, the + * result table will have one row if {@code this} has one or more rows, or else zero rows. + * @return A new table aggregating the rows of {@code this} + */ + TOPS aggBy(Collection aggregations, boolean preserveEmpty); + + /** + * Produce an aggregated result by grouping {@code this} according to the {@code groupByColumns} and applying + * {@code aggregation} to each resulting group of rows. The result table will have one row per group, ordered by the + * encounter order within {@code this}, thereby ensuring that the row key for a given group never changes. + * Groups that become empty will be removed from the result. + * + * @param aggregation The {@link Aggregation aggregation} to apply + * @param groupByColumns The columns to group by + * @return A new table aggregating the rows of {@code this} + */ TOPS aggBy(Aggregation aggregation, String... groupByColumns); + /** + * Produce an aggregated result by grouping {@code this} according to the {@code groupByColumns} and applying + * {@code aggregation} to each resulting group of rows. The result table will have one row per group, ordered by the + * encounter order within {@code this}, thereby ensuring that the row key for a given group never changes. + * Groups that become empty will be removed from the result. + * + * @param aggregation The {@link Aggregation aggregation} to apply + * @param groupByColumns The {@link ColumnName columns} to group by + * @return A new table aggregating the rows of {@code this} + */ TOPS aggBy(Aggregation aggregation, Collection groupByColumns); - TOPS aggBy(Collection aggregations); - + /** + * Produce an aggregated result by grouping {@code this} according to the {@code groupByColumns} and applying + * {@code aggregations} to each resulting group of rows. The result table will have one row per group, ordered by + * the encounter order within {@code this}, thereby ensuring that the row key for a given group never + * changes. Groups that become empty will be removed from the result. + * + * @param aggregations The {@link Aggregation aggregations} to apply + * @param groupByColumns The columns to group by + * @return A new table aggregating the rows of {@code this} + */ TOPS aggBy(Collection aggregations, String... groupByColumns); + /** + * Produce an aggregated result by grouping {@code this} according to the {@code groupByColumns} and applying + * {@code aggregations} to each resulting group of rows. The result table will have one row per group, ordered by + * the encounter order within {@code this}, thereby ensuring that the row key for a given group never + * changes. Groups that become empty will be removed from the result. + * + * @param aggregations The {@link Aggregation aggregations} to apply + * @param groupByColumns The {@link ColumnName columns} to group by + * @return A new table aggregating the rows of {@code this} + */ TOPS aggBy(Collection aggregations, Collection groupByColumns); + /** + * Produce an aggregated result by grouping {@code this} according to the {@code groupByColumns} and applying + * {@code aggregations} to each resulting group of rows. The result table will have one row per group, ordered by + * the encounter order within {@code this}, thereby ensuring that the row key for a given group never + * changes. + * + * @param aggregations The {@link Aggregation aggregations} to apply + * @param preserveEmpty Whether to keep result rows for groups that are initially empty or become empty as a result + * of updates. Each aggregation operator defines its own value for empty groups. + * @param initialGroups A table whose distinct combinations of values for the {@code groupByColumns} should be used + * to create an initial set of aggregation groups. All other columns are ignored. This is useful in + * combination with {@code preserveEmpty == true} to ensure that particular groups appear in the result + * table, or with {@code preserveEmpty == false} to control the encounter order for a collection of groups + * and thus their relative order in the result. Changes to {@code initialGroups} are not expected or handled; + * if {@code initialGroups} is a refreshing table, only its contents at instantiation time will be used. If + * {@code initialGroups == null}, the result will be the same as if a table with no rows was supplied. + * @param groupByColumns The {@link ColumnName columns} to group by + * @return A new table aggregating the rows of {@code this} + */ + TOPS aggBy(Collection aggregations, boolean preserveEmpty, TABLE initialGroups, + Collection groupByColumns); + // ------------------------------------------------------------------------------------------- TOPS updateBy(UpdateByOperation operation); diff --git a/table-api/src/main/java/io/deephaven/api/TableOperationsAdapter.java b/table-api/src/main/java/io/deephaven/api/TableOperationsAdapter.java index 632fc7f696a..40e4a9973d5 100644 --- a/table-api/src/main/java/io/deephaven/api/TableOperationsAdapter.java +++ b/table-api/src/main/java/io/deephaven/api/TableOperationsAdapter.java @@ -301,18 +301,23 @@ public final TOPS_1 aggBy(Aggregation aggregation) { } @Override - public final TOPS_1 aggBy(Aggregation aggregation, String... groupByColumns) { - return adapt(delegate.aggBy(aggregation, groupByColumns)); + public final TOPS_1 aggBy(Collection aggregations) { + return adapt(delegate.aggBy(aggregations)); } @Override - public final TOPS_1 aggBy(Aggregation aggregation, Collection groupByColumns) { + public TOPS_1 aggBy(Collection aggregations, boolean preserveEmpty) { + return adapt(delegate.aggBy(aggregations, preserveEmpty)); + } + + @Override + public final TOPS_1 aggBy(Aggregation aggregation, String... groupByColumns) { return adapt(delegate.aggBy(aggregation, groupByColumns)); } @Override - public final TOPS_1 aggBy(Collection aggregations) { - return adapt(delegate.aggBy(aggregations)); + public final TOPS_1 aggBy(Aggregation aggregation, Collection groupByColumns) { + return adapt(delegate.aggBy(aggregation, groupByColumns)); } @Override @@ -364,6 +369,13 @@ public final TOPS_1 updateBy(UpdateByControl control, Collection aggregations, boolean preserveEmpty, TABLE_1 initialGroups, + Collection groupByColumns) { + return adapt(delegate.aggBy(aggregations, preserveEmpty, initialGroups == null ? null : adapt(initialGroups), + groupByColumns)); + } + @Override public final TOPS_1 selectDistinct() { return adapt(delegate.selectDistinct());