From 55765bc2ab7c098db0793dac941bc95b53af3e97 Mon Sep 17 00:00:00 2001 From: Rebecca Mahany-Horton Date: Thu, 25 Jul 2024 09:09:29 -0400 Subject: [PATCH] [IndexedDB/KATC] Firefox array deserialization improvements and more tests (#1795) --- ee/indexeddb/indexeddb_test.go | 88 ----- ee/indexeddb/test_data/README.md | 28 -- ee/katc/deserialize_firefox.go | 30 +- ee/katc/deserialize_firefox_test.go | 106 ------ ee/katc/table_test.go | 348 ++++++++++++------ ee/katc/test_data/README.md | 32 ++ ee/{indexeddb => katc}/test_data/index.html | 0 .../1985929987lbadutnscehter.sqlite.zip | Bin 0 -> 2169 bytes .../indexeddbs/file__0.indexeddb.leveldb.zip | Bin ee/{indexeddb => katc}/test_data/main.js | 0 10 files changed, 293 insertions(+), 339 deletions(-) delete mode 100644 ee/indexeddb/indexeddb_test.go delete mode 100644 ee/indexeddb/test_data/README.md create mode 100644 ee/katc/test_data/README.md rename ee/{indexeddb => katc}/test_data/index.html (100%) create mode 100644 ee/katc/test_data/indexeddbs/1985929987lbadutnscehter.sqlite.zip rename ee/{indexeddb => katc}/test_data/indexeddbs/file__0.indexeddb.leveldb.zip (100%) rename ee/{indexeddb => katc}/test_data/main.js (100%) diff --git a/ee/indexeddb/indexeddb_test.go b/ee/indexeddb/indexeddb_test.go deleted file mode 100644 index f07fb466c..000000000 --- a/ee/indexeddb/indexeddb_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package indexeddb - -import ( - "archive/zip" - "context" - _ "embed" - "io" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/kolide/launcher/pkg/log/multislogger" - "github.com/stretchr/testify/require" -) - -//go:embed test_data/indexeddbs/file__0.indexeddb.leveldb.zip -var basicIndexeddb []byte - -func TestQueryIndexeddbObjectStore(t *testing.T) { - t.Parallel() - - for _, tt := range []struct { - fileName string - dbName string - objStoreName string - expectedRows int - zipBytes []byte - }{ - { - fileName: "file__0.indexeddb.leveldb.zip", - dbName: "launchertestdb", - objStoreName: "launchertestobjstore", - expectedRows: 2, - zipBytes: basicIndexeddb, - }, - } { - tt := tt - t.Run(tt.fileName, func(t *testing.T) { - t.Parallel() - - // Write zip bytes to file - tempDir := t.TempDir() - zipFile := filepath.Join(tempDir, tt.fileName) - require.NoError(t, os.WriteFile(zipFile, tt.zipBytes, 0755), "writing zip to temp dir") - - // Prepare indexeddb dir - indexeddbDest := strings.TrimSuffix(zipFile, ".zip") - require.NoError(t, os.MkdirAll(indexeddbDest, 0755), "creating indexeddb dir") - - // Unzip to temp dir - zipReader, err := zip.OpenReader(zipFile) - require.NoError(t, err, "opening reader to zip file") - defer zipReader.Close() - for _, fileInZip := range zipReader.File { - fileInZipReader, err := fileInZip.Open() - require.NoError(t, err, "opening file in zip") - defer fileInZipReader.Close() - - idbFilePath := filepath.Join(tempDir, fileInZip.Name) - - if fileInZip.FileInfo().IsDir() { - require.NoError(t, os.MkdirAll(idbFilePath, fileInZip.Mode()), "creating dir") - continue - } - - outFile, err := os.OpenFile(idbFilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fileInZip.Mode()) - require.NoError(t, err, "opening output file") - defer outFile.Close() - - _, err = io.Copy(outFile, fileInZipReader) - require.NoError(t, err, "copying from zip to temp dir") - } - - // Perform query and check that we get the expected number of rows - rows, err := QueryIndexeddbObjectStore(indexeddbDest, tt.dbName, tt.objStoreName) - require.NoError(t, err, "querying indexeddb") - require.Equal(t, tt.expectedRows, len(rows), "unexpected number of rows returned") - - // Confirm we can deserialize each row. - slogger := multislogger.NewNopLogger() - for _, row := range rows { - _, err := DeserializeChrome(context.TODO(), slogger, row) - require.NoError(t, err, "could not deserialize row") - } - }) - } -} diff --git a/ee/indexeddb/test_data/README.md b/ee/indexeddb/test_data/README.md deleted file mode 100644 index eba47c09b..000000000 --- a/ee/indexeddb/test_data/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# Testing deserialization of IndexedDB data - -Handcrafting data to test deserialization byte-by-byte is time-consuming and error-prone. -The JavaScript in this directory creates an IndexedDB database seeded with data -for use in tests. - -## Instructions for generating new test indexeddb.leveldb files - -Edit [main.js](./main.js) as desired. Open [index.html](./index.html) using Google Chrome. - -Loading the page will populate IndexedDB with the desired data. You should also see a -console log message to this effect. - -The file will now be available with other Chrome IndexedDB files. For example, on macOS, -I located the resulting file at `/Users//Library/Application Support/Google/Chrome/Default/IndexedDB/file__0.indexeddb.leveldb`. -The name of the indexeddb file will likely be the same, but you can confirm the origin -matches in Dev Tools in your browser by going to Application => IndexedDB => launchertestdb. - -Zip the directory, then move the zipped file to [indexeddbs](./indexeddbs). You can then -reference this file in the indexeddb tests. - -If you are iteratively making changes to the database, Chrome will complain about re-creating -the database. You can delete the database via Dev Tools and then reload the page to re-create -the database successfully. - -## References - -* [Helpful tutorial for working with the IndexedDB API](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API/Using_IndexedDB) diff --git a/ee/katc/deserialize_firefox.go b/ee/katc/deserialize_firefox.go index 73691b979..a06c89c0c 100644 --- a/ee/katc/deserialize_firefox.go +++ b/ee/katc/deserialize_firefox.go @@ -227,24 +227,38 @@ func deserializeUtf16String(strLen uint32, srcReader io.ByteReader) ([]byte, err func deserializeArray(arrayLength uint32, srcReader io.ByteReader) ([]byte, error) { resultArr := make([]any, arrayLength) - // We discard the next pair before reading the array. - _, _, _ = nextPair(srcReader) + for { + // The next pair is the index. + idxTag, idx, err := nextPair(srcReader) + if err != nil { + return nil, fmt.Errorf("reading next index in array: %w", err) + } + + if idxTag == tagEndOfKeys { + break + } - for i := 0; i < int(arrayLength); i += 1 { - itemTag, _, err := nextPair(srcReader) + // Now, read the data for this index. + itemTag, itemData, err := nextPair(srcReader) if err != nil { - return nil, fmt.Errorf("reading item at index %d in array: %w", i, err) + return nil, fmt.Errorf("reading item at index %d in array: %w", idx, err) } switch itemTag { case tagObjectObject: obj, err := deserializeNestedObject(srcReader) if err != nil { - return nil, fmt.Errorf("reading object at index %d in array: %w", i, err) + return nil, fmt.Errorf("reading object at index %d in array: %w", idx, err) + } + resultArr[idx] = string(obj) // cast to string so it's readable when marshalled again below + case tagString: + str, err := deserializeString(itemData, srcReader) + if err != nil { + return nil, fmt.Errorf("reading string at index %d in array: %w", idx, err) } - resultArr[i] = string(obj) // cast to string so it's readable when marshalled again below + resultArr[idx] = string(str) // cast to string so it's readable when marshalled again below default: - return nil, fmt.Errorf("cannot process item at index %d in array: unsupported tag type %x", i, itemTag) + return nil, fmt.Errorf("cannot process item at index %d in array: unsupported tag type %x", idx, itemTag) } } diff --git a/ee/katc/deserialize_firefox_test.go b/ee/katc/deserialize_firefox_test.go index 50b5d2799..76d68b855 100644 --- a/ee/katc/deserialize_firefox_test.go +++ b/ee/katc/deserialize_firefox_test.go @@ -4,119 +4,13 @@ import ( "bytes" "context" "encoding/binary" - "encoding/json" "io" "testing" - "github.com/google/uuid" "github.com/kolide/launcher/pkg/log/multislogger" "github.com/stretchr/testify/require" ) -func Test_deserializeFirefox(t *testing.T) { - t.Parallel() - - // Build expected object - u, err := uuid.NewRandom() - require.NoError(t, err, "generating test UUID") - idValue := u.String() - arrWithNestedObj := []string{"{\"id\":\"3\"}"} - nestedArrBytes, err := json.Marshal(arrWithNestedObj) - require.NoError(t, err) - expectedObj := map[string][]byte{ - "id": []byte(idValue), // will exercise deserializeString - "version": []byte("1"), // will exercise int deserialization - "option": nil, // will exercise null/undefined deserialization - "types": nestedArrBytes, // will exercise deserializeArray, deserializeNestedObject - } - - // Build a serialized object to deserialize - serializedObj := []byte{ - // Header - 0x00, 0x00, 0x00, 0x00, // header tag data -- discarded - 0x00, 0x00, 0xf1, 0xff, // LE `tagHeader` - // Begin object - 0x00, 0x00, 0x00, 0x00, // object tag data -- discarded - 0x08, 0x00, 0xff, 0xff, // LE `tagObject` - // Begin `id` key - 0x02, 0x00, 0x00, 0x80, // LE data about upcoming string: length 2 (remaining bytes), is ASCII - 0x04, 0x00, 0xff, 0xff, // LE `tagString` - 0x69, 0x64, // "id" - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // padding to get to 8-byte word boundary - // End `id` key - // Begin `id` value - 0x24, 0x00, 0x00, 0x80, // LE data about upcoming string: length 36 (remaining bytes), is ASCII - 0x04, 0x00, 0xff, 0xff, // LE `tagString` - } - // Append `id` - serializedObj = append(serializedObj, []byte(idValue)...) - // Append `id` padding, add `version` - serializedObj = append(serializedObj, - 0x00, 0x00, 0x00, 0x00, // padding to get to 8-byte word boundary for `id` string - // End `id` value - // Begin `version` key - 0x07, 0x00, 0x00, 0x80, // LE data about upcoming string: length 7 (remaining bytes), is ASCII - 0x04, 0x00, 0xff, 0xff, // LE `tagString` - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, // "version" - 0x00, // padding to get to 8-byte word boundary - // End `version` key - // Begin `version` value - 0x01, 0x00, 0x00, 0x00, // Value `1` - 0x03, 0x00, 0xff, 0xff, // LE `tagInt32` - // End `version` value - // Begin `option` key - 0x06, 0x00, 0x00, 0x80, // LE data about upcoming string: length 6 (remaining bytes), is ASCII - 0x04, 0x00, 0xff, 0xff, // LE `tagString` - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, // "option" - 0x00, 0x00, // padding to get to 8-byte word boundary - // End `option` key - // Begin `option` value - 0x00, 0x00, 0x00, 0x00, // Unused data, discarded - 0x00, 0x00, 0xff, 0xff, // LE `tagNull` - // End `option` value - // Begin `types` key - 0x05, 0x00, 0x00, 0x80, // LE data about upcoming string: length 5 (remaining bytes), is ASCII - 0x04, 0x00, 0xff, 0xff, // LE `tagString` - 0x74, 0x79, 0x70, 0x65, 0x73, // "types" - 0x00, 0x00, 0x00, // padding to get to 8-byte word boundary - // End `types` key - // Begin `types` value - 0x01, 0x00, 0x00, 0x00, // Array length (1) - 0x07, 0x00, 0xff, 0xff, // LE `tagArrayObject` - // Begin first array item - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // An extra pair that gets discarded, I don't know why - 0x00, 0x00, 0x00, 0x00, // Tag data, discarded - 0x08, 0x00, 0xff, 0xff, // LE `tagObjectObject` - // Begin nested object - // Begin `id` key - 0x02, 0x00, 0x00, 0x80, // LE data about upcoming string: length 2 (remaining bytes), is ASCII - 0x04, 0x00, 0xff, 0xff, // LE `tagString` - 0x69, 0x64, // "id" - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // padding to get to 8-byte word boundary - // End `id` key - // Begin `id` value - 0x03, 0x00, 0x00, 0x00, // Value `3` - 0x03, 0x00, 0xff, 0xff, // LE `tagInt32` - // End `id` value - // Object footer - 0x00, 0x00, 0x00, 0x00, // tag data -- discarded - 0x13, 0x00, 0xff, 0xff, // LE `tagEndOfKeys` 0xffff0013 - // End nested object - // End first array item - // End `types` value - // Object footer - 0x00, 0x00, 0x00, 0x00, // tag data -- discarded - 0x13, 0x00, 0xff, 0xff, // LE `tagEndOfKeys` - ) - - results, err := deserializeFirefox(context.TODO(), multislogger.NewNopLogger(), map[string][]byte{ - "data": serializedObj, - }) - require.NoError(t, err, "expected to be able to deserialize object") - - require.Equal(t, expectedObj, results) -} - func Test_deserializeFirefox_missingTopLevelDataKey(t *testing.T) { t.Parallel() diff --git a/ee/katc/table_test.go b/ee/katc/table_test.go index 8cba6ce69..ba77460f2 100644 --- a/ee/katc/table_test.go +++ b/ee/katc/table_test.go @@ -1,15 +1,18 @@ package katc import ( + "archive/zip" "context" - "database/sql" + _ "embed" + "fmt" + "io" "os" "path/filepath" "runtime" + "strings" "testing" - "github.com/golang/snappy" - "github.com/google/uuid" + "github.com/kolide/launcher/ee/indexeddb" "github.com/kolide/launcher/pkg/log/multislogger" "github.com/osquery/osquery-go/plugin/table" "github.com/stretchr/testify/require" @@ -17,128 +20,255 @@ import ( _ "modernc.org/sqlite" ) -func Test_generate_SqliteBackedIndexedDB(t *testing.T) { +//go:embed test_data/indexeddbs/1985929987lbadutnscehter.sqlite.zip +var basicFirefoxIndexeddb []byte + +//go:embed test_data/indexeddbs/file__0.indexeddb.leveldb.zip +var basicChromeIndexeddb []byte + +func TestQueryFirefoxIndexedDB(t *testing.T) { t.Parallel() // This test validates generation of table results. It uses a sqlite-backed // IndexedDB as a source, which means it also exercises functionality from // sqlite.go, snappy.go, and deserialize_firefox.go. - // First, set up the data we expect to retrieve. - expectedColumn := "uuid" - u, err := uuid.NewRandom() - require.NoError(t, err, "generating test UUID") - expectedColumnValue := u.String() - - // Serialize the row data, reversing the deserialization operation in - // deserialize_firefox.go. - serializedUuid := []byte(expectedColumnValue) - serializedObj := append([]byte{ - // Header - 0x00, 0x00, 0x00, 0x00, // header tag data -- discarded - 0x00, 0x00, 0xf1, 0xff, // LE `tagHeader` - // Begin object - 0x00, 0x00, 0x00, 0x00, // object tag data -- discarded - 0x08, 0x00, 0xff, 0xff, // LE `tagObject` - // Begin UUID key - 0x04, 0x00, 0x00, 0x80, // LE data about upcoming string: length 4 (remaining bytes), is ASCII - 0x04, 0x00, 0xff, 0xff, // LE `tagString` - 0x75, 0x75, 0x69, 0x64, // "uuid" - 0x00, 0x00, 0x00, 0x00, // padding to get to 8-byte word boundary - // End UUID key - // Begin UUID value - 0x24, 0x00, 0x00, 0x80, // LE data about upcoming string: length 36 (remaining bytes), is ASCII - 0x04, 0x00, 0xff, 0xff, // LE `tagString` - }, - serializedUuid..., - ) - serializedObj = append(serializedObj, - 0x00, 0x00, 0x00, 0x00, // padding to get to 8-byte word boundary for UUID string - // End UUID value - 0x00, 0x00, 0x00, 0x00, // tag data -- discarded - 0x13, 0x00, 0xff, 0xff, // LE `tagEndOfKeys` 0xffff0013 - ) - - // Now compress the serialized row data, reversing the decompression operation - // in snappy.go - compressedObj := snappy.Encode(nil, serializedObj) - - // Now, create a sqlite database to store this data in. - databaseDir := t.TempDir() - sourceFilepath := filepath.Join(databaseDir, "test.sqlite") - f, err := os.Create(sourceFilepath) - require.NoError(t, err, "creating source db") - require.NoError(t, f.Close(), "closing source db file") - conn, err := sql.Open("sqlite", sourceFilepath) - require.NoError(t, err) - _, err = conn.Exec(`CREATE TABLE object_data(data TEXT NOT NULL PRIMARY KEY) WITHOUT ROWID;`) - require.NoError(t, err, "creating test table") - - // Insert compressed object into the database - _, err = conn.Exec("INSERT INTO object_data (data) VALUES (?);", compressedObj) - require.NoError(t, err, "inserting into sqlite database") - require.NoError(t, conn.Close(), "closing sqlite database") - - // At long last, our source is adequately configured. - // Move on to constructing our KATC table. - sourceQuery := "SELECT data FROM object_data;" - cfg := katcTableConfig{ - Columns: []string{expectedColumn}, - katcTableDefinition: katcTableDefinition{ - SourceType: &katcSourceType{ - name: sqliteSourceType, - dataFunc: sqliteData, - }, - SourcePaths: &[]string{filepath.Join("some", "incorrect", "path")}, - SourceQuery: &sourceQuery, - RowTransformSteps: &[]rowTransformStep{ - { - name: snappyDecodeTransformStep, - transformFunc: snappyDecode, - }, - { - name: deserializeFirefoxTransformStep, - transformFunc: deserializeFirefox, - }, - }, + for _, tt := range []struct { + fileName string + objStoreName string + expectedRows int + zipBytes []byte + }{ + { + fileName: "1985929987lbadutnscehter.sqlite.zip", + objStoreName: "launchertestobjstore", + expectedRows: 2, + zipBytes: basicFirefoxIndexeddb, }, - Overlays: []katcTableConfigOverlay{ - { - Filters: map[string]string{ - "goos": runtime.GOOS, - }, + } { + tt := tt + t.Run(tt.fileName, func(t *testing.T) { + t.Parallel() + + // Write zip bytes to file + tempDir := t.TempDir() + zipFile := filepath.Join(tempDir, tt.fileName) + require.NoError(t, os.WriteFile(zipFile, tt.zipBytes, 0755), "writing zip to temp dir") + + // Unzip to file in temp dir + indexeddbDest := strings.TrimSuffix(zipFile, ".zip") + zipReader, err := zip.OpenReader(zipFile) + require.NoError(t, err, "opening reader to zip file") + defer zipReader.Close() + for _, fileInZip := range zipReader.File { + fileInZipReader, err := fileInZip.Open() + require.NoError(t, err, "opening file in zip") + defer fileInZipReader.Close() + + idbFilePath := filepath.Join(tempDir, fileInZip.Name) + + if fileInZip.FileInfo().IsDir() { + require.NoError(t, os.MkdirAll(idbFilePath, fileInZip.Mode()), "creating dir") + continue + } + + outFile, err := os.OpenFile(idbFilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fileInZip.Mode()) + require.NoError(t, err, "opening output file") + defer outFile.Close() + + _, err = io.Copy(outFile, fileInZipReader) + require.NoError(t, err, "copying from zip to temp dir") + } + + // Construct table + sourceQuery := fmt.Sprintf("SELECT data FROM object_data JOIN object_store ON (object_data.object_store_id = object_store.id) WHERE object_store.name=\"%s\";", tt.objStoreName) + cfg := katcTableConfig{ + Columns: []string{"uuid", "name", "version"}, katcTableDefinition: katcTableDefinition{ - SourcePaths: &[]string{filepath.Join(databaseDir, "%.sqlite")}, // All sqlite files in the test directory + SourceType: &katcSourceType{ + name: sqliteSourceType, + dataFunc: sqliteData, + }, + SourcePaths: &[]string{filepath.Join("some", "incorrect", "path")}, + SourceQuery: &sourceQuery, + RowTransformSteps: &[]rowTransformStep{ + { + name: snappyDecodeTransformStep, + transformFunc: snappyDecode, + }, + { + name: deserializeFirefoxTransformStep, + transformFunc: deserializeFirefox, + }, + }, }, - }, - }, + Overlays: []katcTableConfigOverlay{ + { + Filters: map[string]string{ + "goos": runtime.GOOS, + }, + katcTableDefinition: katcTableDefinition{ + SourcePaths: &[]string{indexeddbDest}, // All sqlite files in the test directory + }, + }, + }, + } + testTable, _ := newKatcTable("test_katc_table", cfg, multislogger.NewNopLogger()) + + // Make a query context restricting the source to our exact source sqlite database + queryContext := table.QueryContext{ + Constraints: map[string]table.ConstraintList{ + pathColumnName: { + Constraints: []table.Constraint{ + { + Operator: table.OperatorEquals, + Expression: indexeddbDest, + }, + }, + }, + }, + } + + // At long last: run a query + results, err := testTable.generate(context.TODO(), queryContext) + require.NoError(t, err) + + // We should have the expected number of results in the row + require.Equal(t, tt.expectedRows, len(results), "unexpected number of rows returned") + + // Make sure we have the expected number of columns + for i := 0; i < tt.expectedRows; i += 1 { + require.Contains(t, results[i], pathColumnName, "missing source column") + require.Equal(t, indexeddbDest, results[i][pathColumnName]) + require.Contains(t, results[i], "uuid", "expected uuid column missing") + require.Contains(t, results[i], "name", "expected name column missing") + require.Contains(t, results[i], "version", "expected version column missing") + } + }) } - testTable, _ := newKatcTable("test_katc_table", cfg, multislogger.NewNopLogger()) +} - // Make a query context restricting the source to our exact source sqlite database - queryContext := table.QueryContext{ - Constraints: map[string]table.ConstraintList{ - pathColumnName: { - Constraints: []table.Constraint{ +func TestQueryChromeIndexedDB(t *testing.T) { + t.Parallel() + + // This test validates generation of table results. It uses a leveldb-backed + // IndexedDB as a source, which means it also exercises functionality from + // indexeddb_leveldb.go and the ee/indexeddb package. + + for _, tt := range []struct { + fileName string + dbName string + objStoreName string + expectedRows int + zipBytes []byte + }{ + { + fileName: "file__0.indexeddb.leveldb.zip", + dbName: "launchertestdb", + objStoreName: "launchertestobjstore", + expectedRows: 2, + zipBytes: basicChromeIndexeddb, + }, + } { + tt := tt + t.Run(tt.fileName, func(t *testing.T) { + t.Parallel() + + // Write zip bytes to file + tempDir := t.TempDir() + zipFile := filepath.Join(tempDir, tt.fileName) + require.NoError(t, os.WriteFile(zipFile, tt.zipBytes, 0755), "writing zip to temp dir") + + // Prepare indexeddb dir + indexeddbDest := strings.TrimSuffix(zipFile, ".zip") + require.NoError(t, os.MkdirAll(indexeddbDest, 0755), "creating indexeddb dir") + + // Unzip to temp dir + zipReader, err := zip.OpenReader(zipFile) + require.NoError(t, err, "opening reader to zip file") + defer zipReader.Close() + for _, fileInZip := range zipReader.File { + fileInZipReader, err := fileInZip.Open() + require.NoError(t, err, "opening file in zip") + defer fileInZipReader.Close() + + idbFilePath := filepath.Join(tempDir, fileInZip.Name) + + if fileInZip.FileInfo().IsDir() { + require.NoError(t, os.MkdirAll(idbFilePath, fileInZip.Mode()), "creating dir") + continue + } + + outFile, err := os.OpenFile(idbFilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fileInZip.Mode()) + require.NoError(t, err, "opening output file") + defer outFile.Close() + + _, err = io.Copy(outFile, fileInZipReader) + require.NoError(t, err, "copying from zip to temp dir") + } + + // Construct table + sourceQuery := fmt.Sprintf("%s.%s", tt.dbName, tt.objStoreName) + cfg := katcTableConfig{ + Columns: []string{"uuid", "name", "version"}, + katcTableDefinition: katcTableDefinition{ + SourceType: &katcSourceType{ + name: indexeddbLeveldbSourceType, + dataFunc: indexeddbLeveldbData, + }, + SourcePaths: &[]string{filepath.Join("some", "incorrect", "path")}, + SourceQuery: &sourceQuery, + RowTransformSteps: &[]rowTransformStep{ + { + name: deserializeChromeTransformStep, + transformFunc: indexeddb.DeserializeChrome, + }, + }, + }, + Overlays: []katcTableConfigOverlay{ { - Operator: table.OperatorEquals, - Expression: sourceFilepath, + Filters: map[string]string{ + "goos": runtime.GOOS, + }, + katcTableDefinition: katcTableDefinition{ + SourcePaths: &[]string{indexeddbDest}, // All indexeddb files in the test directory + }, }, }, - }, - }, - } + } + testTable, _ := newKatcTable("test_katc_table", cfg, multislogger.NewNopLogger()) + + // Make a query context restricting the source to our exact source indexeddb database + queryContext := table.QueryContext{ + Constraints: map[string]table.ConstraintList{ + pathColumnName: { + Constraints: []table.Constraint{ + { + Operator: table.OperatorEquals, + Expression: indexeddbDest, + }, + }, + }, + }, + } - // At long last: run a query - results, err := testTable.generate(context.TODO(), queryContext) - require.NoError(t, err) + // At long last: run a query + results, err := testTable.generate(context.TODO(), queryContext) + require.NoError(t, err) - // Validate results - require.Equal(t, 1, len(results), "exactly one row expected") - require.Contains(t, results[0], pathColumnName, "missing source column") - require.Equal(t, sourceFilepath, results[0][pathColumnName]) - require.Contains(t, results[0], expectedColumn, "expected column missing") - require.Equal(t, expectedColumnValue, results[0][expectedColumn], "data mismatch") + // We should have the expected number of results in the row + require.Equal(t, tt.expectedRows, len(results), "unexpected number of rows returned") + + // Make sure we have the expected number of columns + for i := 0; i < tt.expectedRows; i += 1 { + require.Contains(t, results[i], pathColumnName, "missing source column") + require.Equal(t, indexeddbDest, results[i][pathColumnName]) + require.Contains(t, results[i], "uuid", "expected uuid column missing") + require.Contains(t, results[i], "name", "expected name column missing") + require.Contains(t, results[i], "version", "expected version column missing") + } + }) + } } func Test_checkSourcePathConstraints(t *testing.T) { diff --git a/ee/katc/test_data/README.md b/ee/katc/test_data/README.md new file mode 100644 index 000000000..3ec6405b1 --- /dev/null +++ b/ee/katc/test_data/README.md @@ -0,0 +1,32 @@ +# Testing deserialization of IndexedDB data + +Handcrafting data to test deserialization byte-by-byte is time-consuming and error-prone. +The JavaScript in this directory creates an IndexedDB database seeded with data +for use in tests. + +## Instructions for generating new test IndexedDBs + +Edit [main.js](./main.js) as desired. Open [index.html](./index.html) using Chrome or Firefox, +depending on the desired outcome. + +Loading the page will populate IndexedDB with the desired data. You should also see a +console log message to this effect. + +The file will now be available with other IndexedDB files. On macOS, Chrome indexeddb files +can be found at `/Users//Library/Application Support/Google/Chrome/Default/IndexedDB/file__0.indexeddb.leveldb`. +(The name of the indexeddb file will likely be the same, but you can confirm the origin +matches in Dev Tools in your browser by going to Application => IndexedDB => launchertestdb.) +On macOS, Firefox sqlite files can be found at a path similar to this one: +`/Users//Library/Application Support/Firefox/Profiles/*.default*/storage/default/file++++*+launcher+ee+katc+test_data+index.html/idb/*.sqlite`. + +Zip the .indexeddb.leveldb directory (for Chrome) or the .sqlite file (for Firefox), +then move the zipped file to [indexeddbs](./indexeddbs). You can then reference this file +in the indexeddb tests. + +If you are iteratively making changes to the database, Chrome will complain about re-creating +the database. You can delete the database via Dev Tools and then reload the page to re-create +the database successfully. + +## References + +* [Helpful tutorial for working with the IndexedDB API](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API/Using_IndexedDB) diff --git a/ee/indexeddb/test_data/index.html b/ee/katc/test_data/index.html similarity index 100% rename from ee/indexeddb/test_data/index.html rename to ee/katc/test_data/index.html diff --git a/ee/katc/test_data/indexeddbs/1985929987lbadutnscehter.sqlite.zip b/ee/katc/test_data/indexeddbs/1985929987lbadutnscehter.sqlite.zip new file mode 100644 index 0000000000000000000000000000000000000000..7009d90b3c5d2ee979332259fed47dd3a95ed345 GIT binary patch literal 2169 zcmai0XH*kd77hY3Dh|y71c4m|QBe@-(mXIy5(q^pCISvEbR{Ix2|6I1Py{hCAT27A z0Aj!>2qH@nfsF#8Nl7FSAktE3Aqz75XV2NQ-#hQT@4k2MJ@3bR_kNBbVUc410N@wE z{lscZGu`MtbM~3 z`POs6u&j|BSE&40f7D%m4L$sivZ`~cWB5@J+7#cY3<-?wDXlVm?VKH(dELbY>SDxm zX1!qd4!5pLpzhXF^eMbkiqN$QZ1fUu%l^rszUr2D`1vQlRY7i0ZX`w7c&R*wWlDO> z^b5f)q#a@xn(J`so#l*$7a^*5YW&3?hMf9NLcQk>O?e$mKrvr!iS_-pgW?FWEpM;*W^5q}$yo(vjTP48Z~q$iNg&j-aeis5dbzZuS&3{z^r@Dpd*7qn&kH2Oi?z7~ar z*&`)pcv9++Z~Mvf9D;@EgUn=iZhE8KfhHJFthQ68SScpBr;31WLZ9t zIvZ8$41%%ELDlIgn8pxTTVc;4eUpAfBh{cZE9}Y%L(Fy?4Kr>c+HsYmo_Bo2y{8vJ z!zHsrxuzRjRbw+$HUxcIx2};1k1@E9FekdM#x*)Qp#~&V^d}7HcdXB*9jy!kpCvU! za>xT1+mmF~+pjVf+fr}2nQO3a(v3U&c+`%1Lrd94*LXKg@j)q1&j=1B0Xw?ns~L@1;`&yLj$eGD)eeRgs?@r zK~CUm)iJEPJH%7A6gZJXH9qafmN$q&h_i}fsCKceFXGPBEA{nVE+^|Tqh0c|Hr|0t zke09SXyscz43WO-f_y7CC=Lg`rB(atffBf;-`cziTY!W=&5!JBfNV;q&%Ks5YUwb8 zNgkH_M;(-8^o8oT z;HANr2(S6{8cs8Rpr}~cKB6y3?b*turImXLH=|0ABq6;8myj`?9dp9Zj2y}}>w*0B z!6E&Yass_jsZkxW?O{tmD6jlVoev_x8FnC`hukv+nDexw-sI7>Pe7*^;~cq{7*B%8*Ctnj82W*zkpyP)#Pmj-e!yb;HUk_1 zefs$UJeXAj=QSy4Ek)H;~7P*b_>e=aNl>w6m-KXt!8aYe4^Humq)*To_wBS zsHQ*pHdrB=tpg14<_Uk9i?XMS5?6rP)Ii5F;z!Gr_Jc>%{9tphI*WR!6_kw_(&tCy zG6P*rw@mC?qn$Z?#qE&Ti#5YGdK{svj803M^@kn! znoK6Q-Yj+yWBA`~OSaHg>0TZ2x~!EW2+@_!=uJ<8!SH-1W%L%E95<&G1<35eM=gTy+apxA`wMnJ4N}?)HZNYE8=X+--GMH}4Ts z{e4+(!hC7-;l%U