diff --git a/src/plugins/data/server/server.api.md b/src/plugins/data/server/server.api.md index bb8ed92e9bfce..c58cd11bbc5bc 100644 --- a/src/plugins/data/server/server.api.md +++ b/src/plugins/data/server/server.api.md @@ -38,7 +38,6 @@ import { IUiSettingsClient } from 'src/core/server'; import { IUiSettingsClient as IUiSettingsClient_3 } from 'kibana/server'; import { KibanaRequest } from 'kibana/server'; import { KibanaRequest as KibanaRequest_2 } from 'src/core/server'; -import { LegacyAPICaller } from 'src/core/server'; import { Logger } from 'src/core/server'; import { Logger as Logger_2 } from 'kibana/server'; import { LoggerFactory } from '@kbn/logging'; diff --git a/src/plugins/telemetry/server/fetcher.ts b/src/plugins/telemetry/server/fetcher.ts index 820f2c7c4c4af..23b4b0640e978 100644 --- a/src/plugins/telemetry/server/fetcher.ts +++ b/src/plugins/telemetry/server/fetcher.ts @@ -31,7 +31,7 @@ import { SavedObjectsClientContract, SavedObjectsClient, CoreStart, - ILegacyCustomClusterClient, + ICustomClusterClient, } from '../../../core/server'; import { getTelemetryOptIn, @@ -65,7 +65,7 @@ export class FetcherTask { private isSending = false; private internalRepository?: SavedObjectsClientContract; private telemetryCollectionManager?: TelemetryCollectionManagerPluginStart; - private elasticsearchClient?: ILegacyCustomClusterClient; + private elasticsearchClient?: ICustomClusterClient; constructor(initializerContext: PluginInitializerContext) { this.config$ = initializerContext.config.create(); @@ -79,7 +79,7 @@ export class FetcherTask { ) { this.internalRepository = new SavedObjectsClient(savedObjects.createInternalRepository()); this.telemetryCollectionManager = telemetryCollectionManager; - this.elasticsearchClient = elasticsearch.legacy.createClient('telemetry-fetcher'); + this.elasticsearchClient = elasticsearch.createClient('telemetry-fetcher'); this.intervalId = timer(this.initialCheckDelayMs, this.checkIntervalMs).subscribe(() => this.sendIfDue() diff --git a/src/plugins/telemetry/server/plugin.ts b/src/plugins/telemetry/server/plugin.ts index f40c38b2cbbd0..95b44ae560f20 100644 --- a/src/plugins/telemetry/server/plugin.ts +++ b/src/plugins/telemetry/server/plugin.ts @@ -99,7 +99,7 @@ export class TelemetryPlugin implements Plugin { - const usage = await usageCollection.bulkFetch( - callWithInternalUser, - asInternalUser, - soClient, - kibanaRequest - ); + const usage = await usageCollection.bulkFetch(asInternalUser, soClient, kibanaRequest); return usageCollection.toObject(usage); } diff --git a/src/plugins/telemetry/server/telemetry_collection/get_local_stats.ts b/src/plugins/telemetry/server/telemetry_collection/get_local_stats.ts index 12245ce62305e..866aed520bd2e 100644 --- a/src/plugins/telemetry/server/telemetry_collection/get_local_stats.ts +++ b/src/plugins/telemetry/server/telemetry_collection/get_local_stats.ts @@ -71,7 +71,7 @@ export const getLocalStats: StatsGetter = async ( config, context ) => { - const { callCluster, usageCollection, esClient, soClient, kibanaRequest } = config; + const { usageCollection, esClient, soClient, kibanaRequest } = config; return await Promise.all( clustersDetails.map(async (clustersDetail) => { @@ -79,7 +79,7 @@ export const getLocalStats: StatsGetter = async ( getClusterInfo(esClient), // cluster info getClusterStats(esClient), // cluster stats (not to be confused with cluster _state_) getNodesUsage(esClient), // nodes_usage info - getKibana(usageCollection, callCluster, esClient, soClient, kibanaRequest), + getKibana(usageCollection, esClient, soClient, kibanaRequest), getDataTelemetry(esClient), ]); return handleLocalStats( diff --git a/src/plugins/telemetry_collection_manager/server/plugin.ts b/src/plugins/telemetry_collection_manager/server/plugin.ts index a135f4b115b21..247d9ce2c366c 100644 --- a/src/plugins/telemetry_collection_manager/server/plugin.ts +++ b/src/plugins/telemetry_collection_manager/server/plugin.ts @@ -26,7 +26,6 @@ import { Logger, IClusterClient, SavedObjectsServiceStart, - ILegacyClusterClient, } from 'src/core/server'; import { @@ -53,7 +52,6 @@ export class TelemetryCollectionManagerPlugin private collectionStrategy: CollectionStrategy | undefined; private usageGetterMethodPriority = -1; private usageCollection?: UsageCollectionSetup; - private legacyElasticsearchClient?: ILegacyClusterClient; private elasticsearchClient?: IClusterClient; private savedObjectsService?: SavedObjectsServiceStart; private readonly isDistributable: boolean; @@ -77,7 +75,6 @@ export class TelemetryCollectionManagerPlugin } public start(core: CoreStart) { - this.legacyElasticsearchClient = core.elasticsearch.legacy.client; // TODO: Remove when all the collectors have migrated this.elasticsearchClient = core.elasticsearch.client; this.savedObjectsService = core.savedObjects; @@ -129,9 +126,6 @@ export class TelemetryCollectionManagerPlugin config: StatsGetterConfig, usageCollection: UsageCollectionSetup ): StatsCollectionConfig | undefined { - const callCluster = config.unencrypted - ? this.legacyElasticsearchClient?.asScoped(config.request).callAsCurrentUser - : this.legacyElasticsearchClient?.callAsInternalUser; // Scope the new elasticsearch Client appropriately and pass to the stats collection config const esClient = config.unencrypted ? this.elasticsearchClient?.asScoped(config.request).asCurrentUser @@ -143,8 +137,8 @@ export class TelemetryCollectionManagerPlugin // Provide the kibanaRequest so opted-in plugins can scope their custom clients only if the request is not encrypted const kibanaRequest = config.unencrypted ? config.request : void 0; - if (callCluster && esClient && soClient) { - return { callCluster, usageCollection, esClient, soClient, kibanaRequest }; + if (esClient && soClient) { + return { usageCollection, esClient, soClient, kibanaRequest }; } } diff --git a/src/plugins/telemetry_collection_manager/server/types.ts b/src/plugins/telemetry_collection_manager/server/types.ts index 05641d5064593..49e217b9e3d75 100644 --- a/src/plugins/telemetry_collection_manager/server/types.ts +++ b/src/plugins/telemetry_collection_manager/server/types.ts @@ -18,7 +18,6 @@ */ import { - LegacyAPICaller, ElasticsearchClient, Logger, KibanaRequest, @@ -68,7 +67,6 @@ export interface ClusterDetails { export interface StatsCollectionConfig { usageCollection: UsageCollectionSetup; - callCluster: LegacyAPICaller; esClient: ElasticsearchClient; soClient: SavedObjectsClientContract | ISavedObjectsRepository; kibanaRequest: KibanaRequest | undefined; // intentionally `| undefined` to enforce providing the parameter diff --git a/src/plugins/usage_collection/README.md b/src/plugins/usage_collection/README.md index 85c910cd09bf1..5055b3112a297 100644 --- a/src/plugins/usage_collection/README.md +++ b/src/plugins/usage_collection/README.md @@ -95,10 +95,10 @@ Some background: - `isReady` (added in v7.2.0 and v6.8.4) is a way for a usage collector to announce that some async process must finish first before it can return data in the `fetch` method (e.g. a client needs to ne initialized, or the task manager needs to run a task first). If any collector reports that it is not ready when we call its `fetch` method, we reset a flag to try again and, after a set amount of time, collect data from those collectors that are ready and skip any that are not. This means that if a collector returns `true` for `isReady` and it actually isn't ready to return data, there won't be telemetry data from that collector in that telemetry report (usually once per day). You should consider what it means if your collector doesn't return data in the first few documents when Kibana starts or, if we should wait for any other reason (e.g. the task manager needs to run your task first). If you need to tell telemetry collection to wait, you should implement this function with custom logic. If your `fetch` method can run without the need of any previous dependencies, then you can return true for `isReady` as shown in the example below. -- The `fetch` method needs to support multiple contexts in which it is called. For example, when a user requests the example of what we collect in the **Kibana>Advanced Settings>Usage data** section, the clients provided in the context of the function (`CollectorFetchContext`) are scoped to that user's privileges. The reason is to avoid exposing via telemetry any data that user should not have access to (i.e.: if the user does not have access to certain indices, they shouldn't be allowed to see the number of documents that exists in it). In this case, the `fetch` method receives the clients `callCluster`, `esClient` and `soClient` scoped to the user who performed the HTTP API request. Alternatively, when requesting the usage data to be reported to the Remote Telemetry Service, the clients are scoped to the internal Kibana user (`kibana_system`). Please, mind it might have lower-level access than the default super-admin `elastic` test user. +- The `fetch` method needs to support multiple contexts in which it is called. For example, when a user requests the example of what we collect in the **Kibana>Advanced Settings>Usage data** section, the clients provided in the context of the function (`CollectorFetchContext`) are scoped to that user's privileges. The reason is to avoid exposing via telemetry any data that user should not have access to (i.e.: if the user does not have access to certain indices, they shouldn't be allowed to see the number of documents that exists in it). In this case, the `fetch` method receives the clients `esClient` and `soClient` scoped to the user who performed the HTTP API request. Alternatively, when requesting the usage data to be reported to the Remote Telemetry Service, the clients are scoped to the internal Kibana user (`kibana_system`). Please, mind it might have lower-level access than the default super-admin `elastic` test user. In some scenarios, your collector might need to maintain its own client. An example of that is the `monitoring` plugin, that maintains a connection to the Remote Monitoring Cluster to push its monitoring data. If that's the case, your plugin can opt-in to receive the additional `kibanaRequest` parameter by adding `extendFetchContext.kibanaRequest: true` to the collector's config: it will be appended to the context of the `fetch` method only if the request needs to be scoped to a user other than Kibana Internal, so beware that your collector will need to work for both scenarios (especially for the scenario when `kibanaRequest` is missing). -Note: there will be many cases where you won't need to use the `callCluster`, `esClient` or `soClient` function that gets passed in to your `fetch` method at all. Your feature might have an accumulating value in server memory, or read something from the OS. +Note: there will be many cases where you won't need to use the `esClient` or `soClient` function that gets passed in to your `fetch` method at all. Your feature might have an accumulating value in server memory, or read something from the OS. In the case of using a custom SavedObjects client, it is up to the plugin to initialize the client to save the data and it is strongly recommended to scope that client to the `kibana_system` user. @@ -302,7 +302,7 @@ New fields added to the telemetry payload currently mean that telemetry cluster There are a few ways you can test that your usage collector is working properly. -1. The `/api/stats?extended=true&legacy=true` HTTP API in Kibana (added in 6.4.0) will call the fetch methods of all the registered collectors, and add them to a stats object you can see in a browser or in curl. To test that your usage collector has been registered correctly and that it has the model of data you expected it to have, call that HTTP API manually and you should see a key in the `usage` object of the response named after your usage collector's `type` field. This method tests the Metricbeat scenario described above where `callCluster` wraps `callWithRequest`. +1. The `/api/stats?extended=true&legacy=true` HTTP API in Kibana (added in 6.4.0) will call the fetch methods of all the registered collectors, and add them to a stats object you can see in a browser or in curl. To test that your usage collector has been registered correctly and that it has the model of data you expected it to have, call that HTTP API manually and you should see a key in the `usage` object of the response named after your usage collector's `type` field. This method tests the Metricbeat scenario described above where elasticsearch client wraps the call with the request. 2. There is a dev script in x-pack that will give a sample of a payload of data that gets sent up to the telemetry cluster for the sending phase of telemetry. Collected data comes from: - The `.monitoring-*` indices, when Monitoring is enabled. Monitoring enhances the sent payload of telemetry by producing usage data potentially of multiple clusters that exist in the monitoring data. Monitoring data is time-based, and the time frame of collection is the last 15 minutes. - Live-pulled from ES API endpoints. This will get just real-time stats without context of historical data. diff --git a/src/plugins/usage_collection/server/collector/collector.ts b/src/plugins/usage_collection/server/collector/collector.ts index 8e86bc3d1cd26..afd5b5883ff17 100644 --- a/src/plugins/usage_collection/server/collector/collector.ts +++ b/src/plugins/usage_collection/server/collector/collector.ts @@ -19,7 +19,6 @@ import { Logger, - LegacyAPICaller, ElasticsearchClient, ISavedObjectsRepository, SavedObjectsClientContract, @@ -54,10 +53,6 @@ export type MakeSchemaFrom = { * @remark Bear in mind when testing your collector that your user has the same privileges as the Kibana Internal user to ensure the expected data is sent to the remote cluster. */ export type CollectorFetchContext = { - /** - * @deprecated Scoped Legacy Elasticsearch client: use esClient instead - */ - callCluster: LegacyAPICaller; /** * Request-scoped Elasticsearch client * @remark Bear in mind when testing your collector that your user has the same privileges as the Kibana Internal user to ensure the expected data is sent to the remote cluster (more info: {@link CollectorFetchContext}) diff --git a/src/plugins/usage_collection/server/collector/collector_set.test.ts b/src/plugins/usage_collection/server/collector/collector_set.test.ts index 90a69043e0635..310714cc2a48a 100644 --- a/src/plugins/usage_collection/server/collector/collector_set.test.ts +++ b/src/plugins/usage_collection/server/collector/collector_set.test.ts @@ -44,7 +44,6 @@ describe('CollectorSet', () => { loggerSpies.debug.mockRestore(); loggerSpies.warn.mockRestore(); }); - const mockCallCluster = jest.fn().mockResolvedValue({ passTest: 1000 }); const mockEsClient = elasticsearchServiceMock.createClusterClient().asInternalUser; const mockSoClient = savedObjectsRepositoryMock.create(); const req = void 0; // No need to instantiate any KibanaRequest in these tests @@ -83,18 +82,19 @@ describe('CollectorSet', () => { }); it('should log debug status of fetching from the collector', async () => { + mockEsClient.get.mockResolvedValue({ passTest: 1000 } as any); const collectors = new CollectorSet({ logger }); collectors.registerCollector( new Collector(logger, { type: 'MY_TEST_COLLECTOR', fetch: (collectorFetchContext: any) => { - return collectorFetchContext.callCluster(); + return collectorFetchContext.esClient.get(); }, isReady: () => true, }) ); - const result = await collectors.bulkFetch(mockCallCluster, mockEsClient, mockSoClient, req); + const result = await collectors.bulkFetch(mockEsClient, mockSoClient, req); expect(loggerSpies.debug).toHaveBeenCalledTimes(1); expect(loggerSpies.debug).toHaveBeenCalledWith( 'Fetching data from MY_TEST_COLLECTOR collector' @@ -119,7 +119,7 @@ describe('CollectorSet', () => { let result; try { - result = await collectors.bulkFetch(mockCallCluster, mockEsClient, mockSoClient, req); + result = await collectors.bulkFetch(mockEsClient, mockSoClient, req); } catch (err) { // Do nothing } @@ -137,7 +137,7 @@ describe('CollectorSet', () => { }) ); - const result = await collectors.bulkFetch(mockCallCluster, mockEsClient, mockSoClient, req); + const result = await collectors.bulkFetch(mockEsClient, mockSoClient, req); expect(result).toStrictEqual([ { type: 'MY_TEST_COLLECTOR', @@ -155,7 +155,7 @@ describe('CollectorSet', () => { } as any) ); - const result = await collectors.bulkFetch(mockCallCluster, mockEsClient, mockSoClient, req); + const result = await collectors.bulkFetch(mockEsClient, mockSoClient, req); expect(result).toStrictEqual([ { type: 'MY_TEST_COLLECTOR', diff --git a/src/plugins/usage_collection/server/collector/collector_set.ts b/src/plugins/usage_collection/server/collector/collector_set.ts index 3555b05518fdb..fc47bbcd16649 100644 --- a/src/plugins/usage_collection/server/collector/collector_set.ts +++ b/src/plugins/usage_collection/server/collector/collector_set.ts @@ -20,7 +20,6 @@ import { snakeCase } from 'lodash'; import { Logger, - LegacyAPICaller, ElasticsearchClient, ISavedObjectsRepository, SavedObjectsClientContract, @@ -171,7 +170,6 @@ export class CollectorSet { }; public bulkFetch = async ( - callCluster: LegacyAPICaller, esClient: ElasticsearchClient, soClient: SavedObjectsClientContract | ISavedObjectsRepository, kibanaRequest: KibanaRequest | undefined, // intentionally `| undefined` to enforce providing the parameter @@ -182,7 +180,6 @@ export class CollectorSet { this.logger.debug(`Fetching data from ${collector.type} collector`); try { const context = { - callCluster, esClient, soClient, ...(collector.extendFetchContext.kibanaRequest && { kibanaRequest }), @@ -212,14 +209,12 @@ export class CollectorSet { }; public bulkFetchUsage = async ( - callCluster: LegacyAPICaller, esClient: ElasticsearchClient, savedObjectsClient: SavedObjectsClientContract | ISavedObjectsRepository, kibanaRequest: KibanaRequest | undefined // intentionally `| undefined` to enforce providing the parameter ) => { const usageCollectors = this.getFilteredCollectorSet((c) => c instanceof UsageCollector); return await this.bulkFetch( - callCluster, esClient, savedObjectsClient, kibanaRequest, diff --git a/src/plugins/usage_collection/server/routes/stats/stats.ts b/src/plugins/usage_collection/server/routes/stats/stats.ts index 16a1c2c742f04..bbb4c94e02d5f 100644 --- a/src/plugins/usage_collection/server/routes/stats/stats.ts +++ b/src/plugins/usage_collection/server/routes/stats/stats.ts @@ -28,7 +28,6 @@ import { IRouter, ISavedObjectsRepository, KibanaRequest, - LegacyAPICaller, MetricsServiceSetup, SavedObjectsClientContract, ServiceStatus, @@ -66,17 +65,11 @@ export function registerStatsRoute({ overallStatus$: Observable; }) { const getUsage = async ( - callCluster: LegacyAPICaller, esClient: ElasticsearchClient, savedObjectsClient: SavedObjectsClientContract | ISavedObjectsRepository, kibanaRequest: KibanaRequest ): Promise => { - const usage = await collectorSet.bulkFetchUsage( - callCluster, - esClient, - savedObjectsClient, - kibanaRequest - ); + const usage = await collectorSet.bulkFetchUsage(esClient, savedObjectsClient, kibanaRequest); return collectorSet.toObject(usage); }; @@ -110,7 +103,6 @@ export function registerStatsRoute({ let extended; if (isExtended) { - const callCluster = context.core.elasticsearch.legacy.client.callAsCurrentUser; const { asCurrentUser } = context.core.elasticsearch.client; const savedObjectsClient = context.core.savedObjects.client; @@ -122,7 +114,7 @@ export function registerStatsRoute({ } const usagePromise = shouldGetUsage - ? getUsage(callCluster, asCurrentUser, savedObjectsClient, req) + ? getUsage(asCurrentUser, savedObjectsClient, req) : Promise.resolve({}); const [usage, clusterUuid] = await Promise.all([ usagePromise, diff --git a/src/plugins/usage_collection/server/usage_collection.mock.ts b/src/plugins/usage_collection/server/usage_collection.mock.ts index fb0a2e56ff3c9..1295572335a66 100644 --- a/src/plugins/usage_collection/server/usage_collection.mock.ts +++ b/src/plugins/usage_collection/server/usage_collection.mock.ts @@ -50,7 +50,6 @@ export const createUsageCollectionSetupMock = () => { export function createCollectorFetchContextMock(): jest.Mocked> { const collectorFetchClientsMock: jest.Mocked> = { - callCluster: elasticsearchServiceMock.createLegacyClusterClient().callAsInternalUser, esClient: elasticsearchServiceMock.createClusterClient().asInternalUser, soClient: savedObjectsRepositoryMock.create(), }; @@ -61,7 +60,6 @@ export function createCollectorFetchContextWithKibanaMock(): jest.Mocked< CollectorFetchContext > { const collectorFetchClientsMock: jest.Mocked> = { - callCluster: elasticsearchServiceMock.createLegacyClusterClient().callAsInternalUser, esClient: elasticsearchServiceMock.createClusterClient().asInternalUser, soClient: savedObjectsRepositoryMock.create(), kibanaRequest: httpServerMock.createKibanaRequest(), diff --git a/x-pack/plugins/monitoring/server/telemetry_collection/get_beats_stats.ts b/x-pack/plugins/monitoring/server/telemetry_collection/get_beats_stats.ts index a7f001f166d7d..2aee9bbda77b7 100644 --- a/x-pack/plugins/monitoring/server/telemetry_collection/get_beats_stats.ts +++ b/x-pack/plugins/monitoring/server/telemetry_collection/get_beats_stats.ts @@ -6,7 +6,7 @@ import { get } from 'lodash'; import { SearchResponse } from 'elasticsearch'; -import { StatsCollectionConfig } from 'src/plugins/telemetry_collection_manager/server'; +import { LegacyAPICaller } from 'kibana/server'; import { createQuery } from './create_query'; import { INDEX_PATTERN_BEATS } from '../../common/constants'; @@ -318,7 +318,7 @@ export function processResults( * @return {Promise} */ async function fetchBeatsByType( - callCluster: StatsCollectionConfig['callCluster'], + callCluster: LegacyAPICaller, clusterUuids: string[], start: string, end: string, @@ -382,7 +382,7 @@ async function fetchBeatsByType( } export async function fetchBeatsStats( - callCluster: StatsCollectionConfig['callCluster'], + callCluster: LegacyAPICaller, clusterUuids: string[], start: string, end: string, @@ -392,7 +392,7 @@ export async function fetchBeatsStats( } export async function fetchBeatsStates( - callCluster: StatsCollectionConfig['callCluster'], + callCluster: LegacyAPICaller, clusterUuids: string[], start: string, end: string, @@ -410,7 +410,7 @@ export interface BeatsStatsByClusterUuid { * @return {Object} - Beats stats in an object keyed by the cluster UUIDs */ export async function getBeatsStats( - callCluster: StatsCollectionConfig['callCluster'], + callCluster: LegacyAPICaller, clusterUuids: string[], start: string, end: string diff --git a/x-pack/plugins/monitoring/server/telemetry_collection/get_es_stats.ts b/x-pack/plugins/monitoring/server/telemetry_collection/get_es_stats.ts index 6325ed0c4b052..455327336462f 100644 --- a/x-pack/plugins/monitoring/server/telemetry_collection/get_es_stats.ts +++ b/x-pack/plugins/monitoring/server/telemetry_collection/get_es_stats.ts @@ -5,7 +5,7 @@ */ import { SearchResponse } from 'elasticsearch'; -import { StatsCollectionConfig } from 'src/plugins/telemetry_collection_manager/server'; +import { LegacyAPICaller } from 'kibana/server'; import { INDEX_PATTERN_ELASTICSEARCH } from '../../common/constants'; /** @@ -16,7 +16,7 @@ import { INDEX_PATTERN_ELASTICSEARCH } from '../../common/constants'; * @param {Array} clusterUuids The string Cluster UUIDs to fetch details for */ export async function getElasticsearchStats( - callCluster: StatsCollectionConfig['callCluster'], + callCluster: LegacyAPICaller, clusterUuids: string[], maxBucketSize: number ) { @@ -34,7 +34,7 @@ export async function getElasticsearchStats( * Returns the response for the aggregations to fetch details for the product. */ export function fetchElasticsearchStats( - callCluster: StatsCollectionConfig['callCluster'], + callCluster: LegacyAPICaller, clusterUuids: string[], maxBucketSize: number ) { diff --git a/x-pack/plugins/monitoring/server/telemetry_collection/get_high_level_stats.ts b/x-pack/plugins/monitoring/server/telemetry_collection/get_high_level_stats.ts index ce1c6ccd4b106..464c3ae406a53 100644 --- a/x-pack/plugins/monitoring/server/telemetry_collection/get_high_level_stats.ts +++ b/x-pack/plugins/monitoring/server/telemetry_collection/get_high_level_stats.ts @@ -6,7 +6,7 @@ import { get } from 'lodash'; import { SearchResponse } from 'elasticsearch'; -import { StatsCollectionConfig } from 'src/plugins/telemetry_collection_manager/server'; +import { LegacyAPICaller } from 'kibana/server'; import { createQuery } from './create_query'; import { INDEX_PATTERN_KIBANA, @@ -247,7 +247,7 @@ function getIndexPatternForStackProduct(product: string) { * Returns an object keyed by the cluster UUIDs to make grouping easier. */ export async function getHighLevelStats( - callCluster: StatsCollectionConfig['callCluster'], + callCluster: LegacyAPICaller, clusterUuids: string[], start: string, end: string, @@ -268,7 +268,7 @@ export async function getHighLevelStats( export async function fetchHighLevelStats< T extends { cluster_uuid?: string } = { cluster_uuid?: string } >( - callCluster: StatsCollectionConfig['callCluster'], + callCluster: LegacyAPICaller, clusterUuids: string[], start: string, end: string, diff --git a/x-pack/plugins/monitoring/server/telemetry_collection/get_kibana_stats.ts b/x-pack/plugins/monitoring/server/telemetry_collection/get_kibana_stats.ts index 57e75d9c9c12e..cf9ac7768048b 100644 --- a/x-pack/plugins/monitoring/server/telemetry_collection/get_kibana_stats.ts +++ b/x-pack/plugins/monitoring/server/telemetry_collection/get_kibana_stats.ts @@ -7,7 +7,7 @@ import moment from 'moment'; import { isEmpty } from 'lodash'; import { SearchResponse } from 'elasticsearch'; -import { StatsCollectionConfig } from 'src/plugins/telemetry_collection_manager/server'; +import { LegacyAPICaller } from 'kibana/server'; import { KIBANA_SYSTEM_ID, TELEMETRY_COLLECTION_INTERVAL } from '../../common/constants'; import { fetchHighLevelStats, @@ -182,7 +182,7 @@ export function ensureTimeSpan( * specialized usage data that comes with kibana stats (kibana_stats.usage). */ export async function getKibanaStats( - callCluster: StatsCollectionConfig['callCluster'], + callCluster: LegacyAPICaller, clusterUuids: string[], start: string, end: string,