diff --git a/content/browser/attribution_reporting/attribution_storage_sql.cc b/content/browser/attribution_reporting/attribution_storage_sql.cc index 7666a0be3f8c4d..d42334d08f3315 100644 --- a/content/browser/attribution_reporting/attribution_storage_sql.cc +++ b/content/browser/attribution_reporting/attribution_storage_sql.cc @@ -68,11 +68,11 @@ namespace content { // Version number of the database. // TODO: remove the active_unattributed_sources_by_site_reporting_origin index // during the next DB migration. -const int AttributionStorageSql::kCurrentVersionNumber = 42; +const int AttributionStorageSql::kCurrentVersionNumber = 43; // Earliest version which can use a |kCurrentVersionNumber| database // without failing. -const int AttributionStorageSql::kCompatibleVersionNumber = 42; +const int AttributionStorageSql::kCompatibleVersionNumber = 43; // Latest version of the database that cannot be upgraded to // |kCurrentVersionNumber| without razing the database. diff --git a/content/browser/attribution_reporting/attribution_storage_sql_migrations.cc b/content/browser/attribution_reporting/attribution_storage_sql_migrations.cc index ca5ce480aa293b..fd7ed6e4d91a29 100644 --- a/content/browser/attribution_reporting/attribution_storage_sql_migrations.cc +++ b/content/browser/attribution_reporting/attribution_storage_sql_migrations.cc @@ -583,6 +583,34 @@ bool MigrateToVersion42(sql::Database* db, sql::MetaTable* meta_table) { return transaction.Commit(); } +bool MigrateToVersion43(sql::Database* db, sql::MetaTable* meta_table) { + // Wrap each migration in its own transaction. See comment in + // `MigrateToVersion34`. + sql::Transaction transaction(db); + if (!transaction.Begin()) { + return false; + } + + static constexpr char kRenameExpiryTimeSql[] = + "ALTER TABLE rate_limits " + "RENAME COLUMN expiry_time TO source_expiry_or_attribution_time"; + if (!db->Execute(kRenameExpiryTimeSql)) { + return false; + } + + static_assert(static_cast(RateLimitTable::Scope::kAttribution) == 1); + + static constexpr char kSetAttributionTimeSql[] = + "UPDATE rate_limits " + "SET source_expiry_or_attribution_time=time WHERE scope=1"; + if (!db->Execute(kSetAttributionTimeSql)) { + return false; + } + + meta_table->SetVersionNumber(43); + return transaction.Commit(); +} + } // namespace bool UpgradeAttributionStorageSqlSchema(sql::Database* db, @@ -640,6 +668,11 @@ bool UpgradeAttributionStorageSqlSchema(sql::Database* db, return false; } } + if (meta_table->GetVersionNumber() == 42) { + if (!MigrateToVersion43(db, meta_table)) { + return false; + } + } // Add similar if () blocks for new versions here. if (base::ThreadTicks::IsSupported()) { diff --git a/content/browser/attribution_reporting/attribution_storage_sql_migrations_unittest.cc b/content/browser/attribution_reporting/attribution_storage_sql_migrations_unittest.cc index 584899d694053c..a7969741108de7 100644 --- a/content/browser/attribution_reporting/attribution_storage_sql_migrations_unittest.cc +++ b/content/browser/attribution_reporting/attribution_storage_sql_migrations_unittest.cc @@ -280,8 +280,8 @@ TEST_F(AttributionStorageSqlMigrationsTest, MigrateVersion34ToCurrent) { NormalizeSchema(db.GetSchema())); // Verify that data is preserved across the migration. - sql::Statement s( - db.GetUniqueStatement("SELECT expiry_time FROM rate_limits")); + sql::Statement s(db.GetUniqueStatement( + "SELECT source_expiry_or_attribution_time FROM rate_limits")); ASSERT_TRUE(s.Step()); ASSERT_EQ(7, s.ColumnInt64(0)); // with matching source @@ -289,7 +289,7 @@ TEST_F(AttributionStorageSqlMigrationsTest, MigrateVersion34ToCurrent) { EXPECT_EQ(9 + base::Days(30).InMicroseconds(), s.ColumnInt64(0)); // without matching source ASSERT_TRUE(s.Step()); - EXPECT_EQ(0, s.ColumnInt64(0)); // for attribution + EXPECT_EQ(9, s.ColumnInt64(0)); // for attribution ASSERT_FALSE(s.Step()); } @@ -655,4 +655,57 @@ TEST_F(AttributionStorageSqlMigrationsTest, MigrateVersion41ToCurrent) { histograms.ExpectTotalCount("Conversions.Storage.MigrationTime", 1); } +TEST_F(AttributionStorageSqlMigrationsTest, MigrateVersion42ToCurrent) { + base::HistogramTester histograms; + LoadDatabase(GetVersionFilePath(42), DbPath()); + + // Verify pre-conditions. + { + sql::Database db; + ASSERT_TRUE(db.Open(DbPath())); + ASSERT_TRUE(db.DoesColumnExist("rate_limits", "expiry_time")); + ASSERT_FALSE( + db.DoesColumnExist("rate_limits", "source_expiry_or_attribution_time")); + + static constexpr char kSql[] = "SELECT expiry_time FROM rate_limits"; + sql::Statement s(db.GetUniqueStatement(kSql)); + + ASSERT_TRUE(s.Step()); + ASSERT_EQ(7, s.ColumnInt64(0)); + ASSERT_TRUE(s.Step()); + ASSERT_EQ(10, s.ColumnInt64(0)); + ASSERT_FALSE(s.Step()); + } + + MigrateDatabase(); + + // Verify schema is current. + { + sql::Database db; + ASSERT_TRUE(db.Open(DbPath())); + + // Check version. + EXPECT_EQ(AttributionStorageSql::kCurrentVersionNumber, + VersionFromDatabase(&db)); + + // Compare normalized schemas + EXPECT_EQ(NormalizeSchema(GetCurrentSchema()), + NormalizeSchema(db.GetSchema())); + + // Verify that data is preserved across the migration. + sql::Statement s(db.GetUniqueStatement( + "SELECT source_expiry_or_attribution_time FROM rate_limits")); + + ASSERT_TRUE(s.Step()); + ASSERT_EQ(7, s.ColumnInt64(0)); // unchanged + ASSERT_TRUE(s.Step()); + ASSERT_EQ(9, s.ColumnInt64(0)); // from time + ASSERT_FALSE(s.Step()); + } + + // DB creation histograms should be recorded. + histograms.ExpectTotalCount("Conversions.Storage.CreationTime", 0); + histograms.ExpectTotalCount("Conversions.Storage.MigrationTime", 1); +} + } // namespace content diff --git a/content/browser/attribution_reporting/attribution_storage_sql_query_plans_unittest.cc b/content/browser/attribution_reporting/attribution_storage_sql_query_plans_unittest.cc index 993648f4d725fe..3889da0e774043 100644 --- a/content/browser/attribution_reporting/attribution_storage_sql_query_plans_unittest.cc +++ b/content/browser/attribution_reporting/attribution_storage_sql_query_plans_unittest.cc @@ -213,12 +213,14 @@ TEST_F(AttributionSqlQueryPlanTest, kRateLimitSelectReportingOriginsSql) { TEST_F(AttributionSqlQueryPlanTest, kDeleteRateLimitRangeSql) { EXPECT_THAT(GetPlan(attribution_queries::kDeleteRateLimitRangeSql), - UsesIndex("rate_limit_time_idx")); + AllOf(UsesIndex("rate_limit_time_idx"), + UsesIndex("rate_limit_reporting_origin_idx"))); } TEST_F(AttributionSqlQueryPlanTest, kSelectRateLimitsForDeletionSql) { EXPECT_THAT(GetPlan(attribution_queries::kSelectRateLimitsForDeletionSql), - UsesIndex("rate_limit_time_idx")); + AllOf(UsesIndex("rate_limit_time_idx"), + UsesIndex("rate_limit_reporting_origin_idx"))); } TEST_F(AttributionSqlQueryPlanTest, kDeleteExpiredRateLimitsSql) { diff --git a/content/browser/attribution_reporting/rate_limit_table.cc b/content/browser/attribution_reporting/rate_limit_table.cc index f4afebb66b58e8..1de5c0ceeff150 100644 --- a/content/browser/attribution_reporting/rate_limit_table.cc +++ b/content/browser/attribution_reporting/rate_limit_table.cc @@ -48,11 +48,9 @@ bool RateLimitTable::CreateTable(sql::Database* db) { // |context_origin| is the source origin for `kSource` or the destination // origin for `kAttribution`. // |reporting_origin| is the reporting origin of the impression/conversion. - // |time| is the time of either the source registration or the attribution - // trigger, depending on |scope|. - // |expiry_time| is only meaningful when |scope| is - // `RateLimitTable::Scope::kSource` and contains the source's expiry time, - // otherwise it is set to `base::Time()`. + // |time| is the time of the source registration. + // |source_expiry_or_attribution_time| is either the source's expiry time or + // the attribution time, depending on |scope|. static constexpr char kRateLimitTableSql[] = "CREATE TABLE rate_limits(" "id INTEGER PRIMARY KEY NOT NULL," @@ -63,7 +61,7 @@ bool RateLimitTable::CreateTable(sql::Database* db) { "context_origin TEXT NOT NULL," "reporting_origin TEXT NOT NULL," "time INTEGER NOT NULL," - "expiry_time INTEGER NOT NULL)"; + "source_expiry_or_attribution_time INTEGER NOT NULL)"; if (!db->Execute(kRateLimitTableSql)) { return false; } @@ -106,19 +104,19 @@ bool RateLimitTable::CreateTable(sql::Database* db) { bool RateLimitTable::AddRateLimitForSource(sql::Database* db, const StoredSource& source) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); - return AddRateLimit(db, Scope::kSource, source); + return AddRateLimit(db, source, /*trigger_time=*/absl::nullopt); } bool RateLimitTable::AddRateLimitForAttribution( sql::Database* db, const AttributionInfo& attribution_info) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); - return AddRateLimit(db, Scope::kAttribution, attribution_info.source); + return AddRateLimit(db, attribution_info.source, attribution_info.time); } bool RateLimitTable::AddRateLimit(sql::Database* db, - Scope scope, - const StoredSource& source) { + const StoredSource& source, + absl::optional trigger_time) { const CommonSourceInfo& common_info = source.common_info(); // Only delete expired rate limits periodically to avoid excessive DB @@ -134,23 +132,23 @@ bool RateLimitTable::AddRateLimit(sql::Database* db, last_cleared_ = now; } + Scope scope; const attribution_reporting::SuitableOrigin* context_origin; - base::Time expiry_time; - switch (scope) { - case Scope::kSource: - context_origin = &common_info.source_origin(); - expiry_time = common_info.expiry_time(); - break; - case Scope::kAttribution: - context_origin = &common_info.destination_origin(); - expiry_time = base::Time(); - break; + base::Time source_expiry_or_attribution_time; + if (trigger_time.has_value()) { + scope = Scope::kAttribution; + context_origin = &common_info.destination_origin(); + source_expiry_or_attribution_time = *trigger_time; + } else { + scope = Scope::kSource; + context_origin = &common_info.source_origin(); + source_expiry_or_attribution_time = common_info.expiry_time(); } static constexpr char kStoreRateLimitSql[] = "INSERT INTO rate_limits" - "(scope,source_id,source_site," - "destination_site,context_origin,reporting_origin,time,expiry_time)" + "(scope,source_id,source_site,destination_site,context_origin," + "reporting_origin,time,source_expiry_or_attribution_time)" "VALUES(?,?,?,?,?,?,?,?)"; sql::Statement statement( db->GetCachedStatement(SQL_FROM_HERE, kStoreRateLimitSql)); @@ -161,7 +159,7 @@ bool RateLimitTable::AddRateLimit(sql::Database* db, statement.BindString(4, context_origin->Serialize()); statement.BindString(5, common_info.reporting_origin().Serialize()); statement.BindTime(6, common_info.source_time()); - statement.BindTime(7, expiry_time); + statement.BindTime(7, source_expiry_or_attribution_time); return statement.Run(); } @@ -218,7 +216,8 @@ RateLimitResult RateLimitTable::SourceAllowedForDestinationLimit( "update `scope=0` query below"); // Check the number of unique destinations covered by all source registrations - // whose [source_time, expiry_time] intersect with the current source_time. + // whose [source_time, source_expiry_or_attribution_time] intersect with the + // current source_time. sql::Statement statement(db->GetCachedStatement( SQL_FROM_HERE, attribution_queries::kRateLimitSourceAllowedSql)); @@ -324,6 +323,7 @@ bool RateLimitTable::ClearAllDataInRange(sql::Database* db, DCHECK(!((delete_begin.is_null() || delete_begin.is_min()) && delete_end.is_max())); + // TODO(linnan): Optimize using a more appropriate index. sql::Statement statement(db->GetCachedStatement( SQL_FROM_HERE, attribution_queries::kDeleteRateLimitRangeSql)); statement.BindTime(0, delete_begin); @@ -359,6 +359,7 @@ bool RateLimitTable::ClearDataForOriginsInRange( return false; } + // TODO(linnan): Optimize using a more appropriate index. sql::Statement select_statement(db->GetCachedStatement( SQL_FROM_HERE, attribution_queries::kSelectRateLimitsForDeletionSql)); select_statement.BindTime(0, delete_begin); diff --git a/content/browser/attribution_reporting/rate_limit_table.h b/content/browser/attribution_reporting/rate_limit_table.h index 05b8f7d9c883e9..fe1573aefd06c7 100644 --- a/content/browser/attribution_reporting/rate_limit_table.h +++ b/content/browser/attribution_reporting/rate_limit_table.h @@ -15,6 +15,7 @@ #include "content/browser/attribution_reporting/stored_source.h" #include "content/common/content_export.h" #include "content/public/browser/storage_partition.h" +#include "third_party/abseil-cpp/absl/types/optional.h" namespace sql { class Database; @@ -98,8 +99,8 @@ class CONTENT_EXPORT RateLimitTable { private: [[nodiscard]] bool AddRateLimit(sql::Database* db, - Scope scope, - const StoredSource& source) + const StoredSource& source, + absl::optional trigger_time) VALID_CONTEXT_REQUIRED(sequence_checker_); [[nodiscard]] RateLimitResult AllowedForReportingOriginLimit( diff --git a/content/browser/attribution_reporting/rate_limit_table_unittest.cc b/content/browser/attribution_reporting/rate_limit_table_unittest.cc index 33b8d7e8ead2aa..36a7f6426e9b78 100644 --- a/content/browser/attribution_reporting/rate_limit_table_unittest.cc +++ b/content/browser/attribution_reporting/rate_limit_table_unittest.cc @@ -47,6 +47,8 @@ using ::testing::IsEmpty; using ::testing::Pair; using ::testing::SizeIs; +constexpr base::TimeDelta kExpiry = base::Milliseconds(30); + struct RateLimitInput { template static RateLimitInput Source(Args&&... args) { @@ -63,13 +65,15 @@ struct RateLimitInput { std::string destination_origin, std::string reporting_origin, base::Time time, - base::TimeDelta source_expiry = base::Milliseconds(30)) + base::TimeDelta source_expiry = kExpiry, + absl::optional attribution_time = absl::nullopt) : scope(scope), source_origin(std::move(source_origin)), destination_origin(std::move(destination_origin)), reporting_origin(std::move(reporting_origin)), time(time), - source_expiry(source_expiry) {} + source_expiry(source_expiry), + attribution_time(attribution_time) {} RateLimitScope scope; std::string source_origin; @@ -77,6 +81,7 @@ struct RateLimitInput { std::string reporting_origin; base::Time time; base::TimeDelta source_expiry; + absl::optional attribution_time; SourceBuilder NewSourceBuilder() const { // Ensure that operations involving attributions use the trigger time, not @@ -95,7 +100,9 @@ struct RateLimitInput { AttributionInfo BuildAttributionInfo() const { CHECK_EQ(scope, RateLimitScope::kAttribution); auto source = NewSourceBuilder().BuildStored(); - return AttributionInfoBuilder(std::move(source)).SetTime(time).Build(); + return AttributionInfoBuilder(std::move(source)) + .SetTime(attribution_time.value_or(time)) + .Build(); } }; @@ -115,13 +122,15 @@ struct RateLimitRow { std::string destination_site, std::string reporting_origin, std::string context_origin, - base::Time time) + base::Time time, + base::Time source_expiry_or_attribution_time) : scope(scope), source_site(std::move(source_site)), destination_site(std::move(destination_site)), reporting_origin(std::move(reporting_origin)), context_origin(std::move(context_origin)), - time(time) {} + time(time), + source_expiry_or_attribution_time(source_expiry_or_attribution_time) {} RateLimitScope scope; std::string source_site; @@ -129,12 +138,14 @@ struct RateLimitRow { std::string reporting_origin; std::string context_origin; base::Time time; + base::Time source_expiry_or_attribution_time; }; bool operator==(const RateLimitRow& a, const RateLimitRow& b) { const auto tie = [](const RateLimitRow& row) { return std::make_tuple(row.scope, row.source_site, row.destination_site, - row.reporting_origin, row.context_origin, row.time); + row.reporting_origin, row.context_origin, row.time, + row.source_expiry_or_attribution_time); }; return tie(a) == tie(b); } @@ -151,13 +162,15 @@ std::ostream& operator<<(std::ostream& out, const RateLimitScope scope) { std::ostream& operator<<(std::ostream& out, const RateLimitInput& i) { return out << "{" << i.scope << "," << i.source_origin << "," << i.destination_origin << "," << i.reporting_origin << "," - << "," << i.time << "," << i.source_expiry << "}"; + << "," << i.time << "," << i.source_expiry << "," + << i.attribution_time.value_or(base::Time()) << "}"; } std::ostream& operator<<(std::ostream& out, const RateLimitRow& row) { return out << "{" << row.scope << "," << row.source_site << "," << row.destination_site << "," << row.reporting_origin << "," - << row.context_origin << "," << row.time << "}"; + << row.context_origin << "," << row.time << "," + << row.source_expiry_or_attribution_time << "}"; } class RateLimitTableTest : public testing::Test { @@ -175,7 +188,8 @@ class RateLimitTableTest : public testing::Test { static constexpr char kSelectSql[] = "SELECT id,scope,source_site,destination_site," - "reporting_origin,context_origin,time FROM rate_limits"; + "reporting_origin,context_origin,time," + "source_expiry_or_attribution_time FROM rate_limits"; sql::Statement statement(db_.GetCachedStatement(SQL_FROM_HERE, kSelectSql)); while (statement.Step()) { @@ -186,7 +200,7 @@ class RateLimitTableTest : public testing::Test { /*destination_site=*/statement.ColumnString(3), /*reporting_origin=*/statement.ColumnString(4), /*context_origin=*/statement.ColumnString(5), - statement.ColumnTime(6))); + statement.ColumnTime(6), statement.ColumnTime(7))); } EXPECT_TRUE(statement.Succeeded()); @@ -583,7 +597,7 @@ TEST_F(RateLimitTableTest, ClearDataForOriginsInRange) { }, { "no deletions: no rows in time range", - now + base::Days(1) + base::Milliseconds(1), + now + base::Days(1) + base::Milliseconds(11), base::Time::Max(), base::NullCallback(), {}, @@ -609,21 +623,40 @@ TEST_F(RateLimitTableTest, ClearDataForOriginsInRange) { {}, }, { - "1 deletion: time range and filter match for reporting origin", + "2 deletions: time range and filter match for reporting origin", now + base::Milliseconds(1), - base::Time::Max(), + now + base::Days(1) + base::Milliseconds(5), base::BindRepeating([](const blink::StorageKey& storage_key) { return storage_key == blink::StorageKey::CreateFromStringForTesting( "https://c.r.test"); }), - {3}, + {3, 5}, }, { - "4 deletions: null filter matches everything", + "6 deletions: null filter matches everything", now, base::Time::Max(), base::NullCallback(), - {1, 2, 3, 4}, + {1, 2, 3, 4, 5, 6}, + }, + { + "1 deletion: attribution time range and filter match for reporting " + "origin" + "origin", + now + base::Days(1) + base::Milliseconds(5), + now + base::Days(1) + base::Milliseconds(10), + base::BindRepeating([](const blink::StorageKey& storage_key) { + return storage_key == blink::StorageKey::CreateFromStringForTesting( + "https://c.r.test"); + }), + {5}, + }, + { + "2 deletions: attribution time range and null filter", + now + base::Days(1) + base::Milliseconds(5), + now + base::Days(1) + base::Milliseconds(10), + base::NullCallback(), + {5, 6}, }, }; @@ -639,6 +672,14 @@ TEST_F(RateLimitTableTest, ClearDataForOriginsInRange) { "https://c.r.test", now + base::Days(1))}, {4, RateLimitInput::Source("https://b.s1.test", "https://b.d1.test", "https://d.r.test", now + base::Days(1))}, + {5, RateLimitInput::Attribution( + "https://a.s1.test", "https://a.d1.test", "https://c.r.test", + now + base::Days(1), kExpiry, + now + base::Days(1) + base::Milliseconds(10))}, + {6, RateLimitInput::Attribution( + "https://a.s1.test", "https://a.d1.test", "https://d.r.test", + now + base::Days(1), kExpiry, + now + base::Days(1) + base::Milliseconds(10))}, }; for (const auto& [key, input] : inputs) { @@ -655,18 +696,28 @@ TEST_F(RateLimitTableTest, ClearDataForOriginsInRange) { base::flat_map rows = { {1, RateLimitRow::Attribution("https://s1.test", "https://d1.test", "https://a.r.test", "https://a.d1.test", - now)}, + now, now)}, {2, RateLimitRow::Source("https://s1.test", "https://d1.test", - "https://b.r.test", "https://b.s1.test", now)}, - {3, RateLimitRow::Attribution("https://s1.test", "https://d1.test", - "https://c.r.test", "https://a.d1.test", - now + base::Days(1))}, + "https://b.r.test", "https://b.s1.test", now, + now + kExpiry)}, + {3, RateLimitRow::Attribution( + "https://s1.test", "https://d1.test", "https://c.r.test", + "https://a.d1.test", now + base::Days(1), now + base::Days(1))}, {4, RateLimitRow::Source("https://s1.test", "https://d1.test", "https://d.r.test", "https://b.s1.test", - now + base::Days(1))}, + now + base::Days(1), + now + base::Days(1) + kExpiry)}, + {5, RateLimitRow::Attribution( + "https://s1.test", "https://d1.test", "https://c.r.test", + "https://a.d1.test", now + base::Days(1), + now + base::Days(1) + base::Milliseconds(10))}, + {6, RateLimitRow::Attribution( + "https://s1.test", "https://d1.test", "https://d.r.test", + "https://a.d1.test", now + base::Days(1), + now + base::Days(1) + base::Milliseconds(10))}, }; - ASSERT_EQ(GetRateLimitRows(), rows); + ASSERT_EQ(GetRateLimitRows(), rows) << test_case.desc; ASSERT_TRUE(table_.ClearDataForOriginsInRange( &db_, test_case.delete_min, test_case.delete_max, test_case.filter)) diff --git a/content/browser/attribution_reporting/sql_queries.h b/content/browser/attribution_reporting/sql_queries.h index 60942e7208114b..9e4c16beff250d 100644 --- a/content/browser/attribution_reporting/sql_queries.h +++ b/content/browser/attribution_reporting/sql_queries.h @@ -231,7 +231,7 @@ inline constexpr const char kRateLimitSourceAllowedSql[] = "WHERE scope=0 " "AND source_site=? " "AND reporting_origin=? " - "AND expiry_time>?"; + "AND source_expiry_or_attribution_time>?"; inline constexpr const char kRateLimitSelectReportingOriginsSql[] = "SELECT reporting_origin FROM rate_limits " @@ -241,17 +241,19 @@ inline constexpr const char kRateLimitSelectReportingOriginsSql[] = "AND time>?"; inline constexpr const char kDeleteRateLimitRangeSql[] = - "DELETE FROM rate_limits " - "WHERE time BETWEEN ? AND ?"; + "DELETE FROM rate_limits WHERE" + "(time BETWEEN ?1 AND ?2)OR" + "(scope=1 AND source_expiry_or_attribution_time BETWEEN ?1 AND ?2)"; inline constexpr const char kSelectRateLimitsForDeletionSql[] = "SELECT id,reporting_origin " - "FROM rate_limits " - "WHERE time BETWEEN ? AND ?"; + "FROM rate_limits WHERE" + "(time BETWEEN ?1 AND ?2)OR" + "(scope=1 AND source_expiry_or_attribution_time BETWEEN ?1 AND ?2)"; inline constexpr const char kDeleteExpiredRateLimitsSql[] = "DELETE FROM rate_limits " - "WHERE time<=? AND(scope=1 OR expiry_time<=?)"; + "WHERE time<=? AND(scope=1 OR source_expiry_or_attribution_time<=?)"; inline constexpr const char kDeleteRateLimitsBySourceIdSql[] = "DELETE FROM rate_limits WHERE source_id=?"; diff --git a/content/test/data/attribution_reporting/databases/version_42.sql b/content/test/data/attribution_reporting/databases/version_42.sql index d4f5afcf8f3e7a..7b0097cac49397 100644 --- a/content/test/data/attribution_reporting/databases/version_42.sql +++ b/content/test/data/attribution_reporting/databases/version_42.sql @@ -46,4 +46,7 @@ CREATE INDEX aggregate_trigger_time_idx ON aggregatable_report_metadata(trigger_ CREATE INDEX aggregate_report_time_idx ON aggregatable_report_metadata(report_time); +INSERT INTO rate_limits VALUES(3,0,5,'a','b','c','d',6,7); +INSERT INTO rate_limits VALUES(4,1,8,'e','f','g','h',9,10); + COMMIT; diff --git a/content/test/data/attribution_reporting/databases/version_43.sql b/content/test/data/attribution_reporting/databases/version_43.sql new file mode 100644 index 00000000000000..dd96816f30ceb2 --- /dev/null +++ b/content/test/data/attribution_reporting/databases/version_43.sql @@ -0,0 +1,49 @@ +PRAGMA foreign_keys=OFF; + +BEGIN TRANSACTION; + +CREATE TABLE sources(source_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,source_event_id INTEGER NOT NULL,source_origin TEXT NOT NULL,destination_origin TEXT NOT NULL,reporting_origin TEXT NOT NULL,source_time INTEGER NOT NULL,expiry_time INTEGER NOT NULL,event_report_window_time INTEGER NOT NULL,aggregatable_report_window_time INTEGER NOT NULL,num_attributions INTEGER NOT NULL,event_level_active INTEGER NOT NULL,aggregatable_active INTEGER NOT NULL,destination_site TEXT NOT NULL,source_type INTEGER NOT NULL,attribution_logic INTEGER NOT NULL,priority INTEGER NOT NULL,source_site TEXT NOT NULL,debug_key INTEGER,aggregatable_budget_consumed INTEGER NOT NULL,aggregatable_source BLOB NOT NULL,filter_data BLOB NOT NULL); + +CREATE TABLE event_level_reports(report_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,source_id INTEGER NOT NULL,trigger_data INTEGER NOT NULL,trigger_time INTEGER NOT NULL,report_time INTEGER NOT NULL,priority INTEGER NOT NULL,failed_send_attempts INTEGER NOT NULL,external_report_id TEXT NOT NULL,debug_key INTEGER); + +CREATE TABLE rate_limits(id INTEGER PRIMARY KEY NOT NULL,scope INTEGER NOT NULL,source_id INTEGER NOT NULL,source_site TEXT NOT NULL,destination_site TEXT NOT NULL,context_origin TEXT NOT NULL,reporting_origin TEXT NOT NULL,time INTEGER NOT NULL,source_expiry_or_attribution_time INTEGER NOT NULL); + +CREATE TABLE dedup_keys(source_id INTEGER NOT NULL,report_type INTEGER NOT NULL,dedup_key INTEGER NOT NULL,PRIMARY KEY(source_id,report_type,dedup_key))WITHOUT ROWID; + +CREATE TABLE aggregatable_report_metadata(aggregation_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,source_id INTEGER NOT NULL,trigger_time INTEGER NOT NULL,debug_key INTEGER,external_report_id TEXT NOT NULL,report_time INTEGER NOT NULL,failed_send_attempts INTEGER NOT NULL,initial_report_time INTEGER NOT NULL,aggregation_coordinator INTEGER NOT NULL,attestation_token TEXT); + +CREATE TABLE aggregatable_contributions(aggregation_id INTEGER NOT NULL,contribution_id INTEGER NOT NULL,key_high_bits INTEGER NOT NULL,key_low_bits INTEGER NOT NULL,value INTEGER NOT NULL,PRIMARY KEY(aggregation_id,contribution_id))WITHOUT ROWID; + +CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, value LONGVARCHAR); + +INSERT INTO meta VALUES('mmap_status','-1'); +INSERT INTO meta VALUES('version','43'); +INSERT INTO meta VALUES('last_compatible_version','43'); + +CREATE INDEX sources_by_active_destination_site_reporting_origin ON sources(event_level_active,aggregatable_active,destination_site,reporting_origin); + +CREATE INDEX sources_by_expiry_time ON sources(expiry_time); + +CREATE INDEX active_sources_by_source_origin ON sources(source_origin)WHERE event_level_active=1 OR aggregatable_active=1; + +CREATE INDEX active_unattributed_sources_by_site_reporting_origin ON sources(source_site,reporting_origin)WHERE event_level_active=1 AND num_attributions=0 AND aggregatable_active=1 AND aggregatable_budget_consumed=0; + +CREATE INDEX event_level_reports_by_report_time ON event_level_reports(report_time); + +CREATE INDEX event_level_reports_by_source_id ON event_level_reports(source_id); + +CREATE INDEX rate_limit_source_site_reporting_origin_idx ON rate_limits(scope,source_site,reporting_origin); + +CREATE INDEX rate_limit_reporting_origin_idx ON rate_limits(scope,destination_site,source_site); + +CREATE INDEX rate_limit_time_idx ON rate_limits(time); + +CREATE INDEX rate_limit_source_id_idx ON rate_limits(source_id); + +CREATE INDEX aggregate_source_id_idx ON aggregatable_report_metadata(source_id); + +CREATE INDEX aggregate_trigger_time_idx ON aggregatable_report_metadata(trigger_time); + +CREATE INDEX aggregate_report_time_idx ON aggregatable_report_metadata(report_time); + +COMMIT;