Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: genai sample base #3587

Merged
merged 4 commits into from
Dec 13, 2023
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/auto-label.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ path:
eventarc: "eventarc"
error-reporting: "clouderrorreporting"
functions: "cloudfunctions"
generative-ai: "genai"
game-servers: "gameservices"
healthcare: "healhcare"
iam: "iam"
Expand Down
106 changes: 106 additions & 0 deletions .github/workflows/generative-ai-snippets.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

name: generative-ai-snippets
on:
push:
branches:
- main
paths:
- 'generative-ai/snippets/**'
- '.github/workflows/generative-ai-snippets.yaml'
pull_request:
paths:
- 'generative-ai/snippets/**'
- '.github/workflows/generative-ai-snippets.yaml'
pull_request_target:
types: [labeled]
paths:
- 'generative-ai/snippets/**'
- '.github/workflows/generative-ai-snippets.yaml'
schedule:
- cron: '0 0 * * 0'
jobs:
test:
if: github.event.action != 'labeled' || github.event.label.name == 'actions:force-run'
runs-on: ubuntu-latest
timeout-minutes: 120
permissions:
contents: 'read'
id-token: 'write'
defaults:
run:
working-directory: 'generative-ai/snippets'
steps:
- uses: actions/checkout@v4.1.0
with:
ref: ${{github.event.pull_request.head.sha}}
- uses: 'google-github-actions/auth@v1.1.1'
with:
workload_identity_provider: 'projects/1046198160504/locations/global/workloadIdentityPools/github-actions-pool/providers/github-actions-provider'
service_account: 'kokoro-system-test@long-door-651.iam.gserviceaccount.com'
create_credentials_file: 'true'
access_token_lifetime: 600s
- id: secrets
uses: 'google-github-actions/get-secretmanager-secrets@v1'
with:
secrets: |-
caip_id:nodejs-docs-samples-tests/nodejs-docs-samples-ai-platform-caip-project-id
location:nodejs-docs-samples-tests/nodejs-docs-samples-ai-platform-location
- uses: actions/setup-node@v4.0.0
with:
node-version: 16
- name: Get npm cache directory
id: npm-cache-dir
shell: bash
run: echo "dir=$(npm config get cache)" >> ${GITHUB_OUTPUT}
- uses: actions/cache@v3
id: npm-cache
with:
path: ${{ steps.npm-cache-dir.outputs.dir }}
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
- name: install repo dependencies
run: npm install
working-directory: .
- name: install directory dependencies
run: npm install
- run: npm run build --if-present
- name: set env vars for scheduled run
if: github.event.action == 'schedule'
run: |
echo "MOCHA_REPORTER_SUITENAME=generative-ai-snippets" >> $GITHUB_ENV
echo "MOCHA_REPORTER_OUTPUT=${{github.run_id}}_sponge_log.xml" >> $GITHUB_ENV
echo "MOCHA_REPORTER=xunit" >> $GITHUB_ENV
- run: npm test
env:
LOCATION: ${{ steps.secrets.outputs.location }}
CAIP_PROJECT_ID: ${{ steps.secrets.outputs.caip_id }}
- name: upload test results for FlakyBot workflow
if: github.event.action == 'schedule' && always()
uses: actions/upload-artifact@v3
env:
MOCHA_REPORTER_OUTPUT: "${{github.run_id}}_sponge_log.xml"
with:
name: test-results
path: generative-ai/snippets/${{ env.MOCHA_REPORTER_OUTPUT }}
retention-days: 1
flakybot:
permissions:
contents: 'read'
id-token: 'write'
if: github.event_name == 'schedule' && always() # always() submits logs even if tests fail
uses: ./.github/workflows/flakybot.yaml
needs: [test]
1 change: 1 addition & 0 deletions .github/workflows/utils/workflows-secrets.json
Original file line number Diff line number Diff line change
Expand Up @@ -5,5 +5,6 @@
"iam/deny",
"security-center/snippets",
"storagetransfer",
"generative-ai/snippets",
"vision"
]
1 change: 1 addition & 0 deletions CODEOWNERS
Validating CODEOWNERS rules …
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ monitoring/opencensus @GoogleCloudPlatform/nodejs-samples-reviewers

# Data & AI
ai-platform @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers
generative-ai @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers
automl @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers
cloud-language @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers
contact-center-insights @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers
Expand Down
52 changes: 52 additions & 0 deletions generative-ai/snippets/countTokens.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

const {VertexAI} = require('@google-cloud/vertexai');

async function countTokens(
projectId = 'PROJECT_ID',
location = 'LOCATION_ID',
model = 'MODEL'
) {
// [START aiplatform_gemini_token_count]

/**
* TODO(developer): Uncomment these variables before running the sample.
*/
// const projectId = 'your-project-id';
// const location = 'us-central1';
// const model = 'gemini-pro';

// Initialize Vertex with your Cloud project and location
const vertex_ai = new VertexAI({project: projectId, location: location});

// Instantiate the model
const generativeModel = vertex_ai.preview.getGenerativeModel({
model: model,
});

const req = {
contents: [{role: 'user', parts: [{text: 'How are you doing today?'}]}],
};

const countTokensResp = await generativeModel.countTokens(req);
console.log('count tokens response: ', countTokensResp);

// [END aiplatform_gemini_token_count]
}

countTokens(...process.argv.slice(2)).catch(err => {
console.error(err.message);
process.exitCode = 1;
});
124 changes: 124 additions & 0 deletions generative-ai/snippets/index.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
const {
VertexAI,
HarmBlockThreshold,
HarmCategory,
} = require('@google-cloud/vertexai');

const project = 'cloud-llm-preview1';
const location = 'us-central1';

// Initialize Vertex with your Cloud project and location
const vertex_ai = new VertexAI({project: project, location: location});

// Instantiate the models
const generativeModel = vertex_ai.preview.getGenerativeModel({
model: 'gemini-pro',
// The following parameters are optional
// They can also be passed to individual content generation requests
safety_settings: [
{
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
},
],
generation_config: {max_output_tokens: 256},
});

const generativeVisionModel = vertex_ai.preview.getGenerativeModel({

Check failure on line 27 in generative-ai/snippets/index.js

View workflow job for this annotation

GitHub Actions / lint

'generativeVisionModel' is assigned a value but never used
model: 'gemini-pro-vision',
});

async function streamContentTextOnly() {
const req = {
contents: [{role: 'user', parts: [{text: 'How are you doing today?'}]}],
};

const streamingResp = await generativeModel.generateContentStream(req);

for await (const item of streamingResp.stream) {
console.log('stream chunk:', item);
}

console.log('aggregated response: ', await streamingResp.response);
}

async function nonStreamingTextOnly() {
const req = {
contents: [{role: 'user', parts: [{text: 'How are you doing today?'}]}],
};

const nonstreamingResp = await generativeModel.generateContent(req);
console.log('non-streaming response: ', await nonstreamingResp.response);
}

async function countTokens() {
const req = {
contents: [{role: 'user', parts: [{text: 'How are you doing today?'}]}],
};

const countTokensResp = await generativeModel.countTokens(req);
console.log('count tokens response: ', countTokensResp);
}

async function nonStreamingChat() {
const chat = generativeModel.startChat({});
const result1 = await chat.sendMessage('hello');
console.log('send message result1: ', result1);
const resp1 = result1.response;
console.log('send message response1: ', resp1);
const result2 = await chat.sendMessage('what day is it today?');
console.log('result2: ', result2);
const resp2 = result2.response;
console.log('send message response2: ', resp2);
const result3 = await chat.sendMessage('what day is it tomorrow?');
console.log('result3: ', result3);
const resp3 = result3.response;
console.log('send message response3: ', resp3);
}

async function streamingChat() {
const chat = generativeModel.startChat({});
const streamResult1 = await chat.sendMessageStream('hello again');
console.log('stream result1: ', streamResult1);
const streamResp1 = await streamResult1.response;
console.log('stream send message response1: ', streamResp1);
const streamResult2 = await chat.sendMessageStream('what is the date today?');
console.log('stream result2: ', streamResult2);
const streamResp2 = await streamResult2.response;
console.log('stream send message response2: ', streamResp2);
const streamResult3 = await chat.sendMessageStream(
'what is the date tomorrow?'
);
console.log('stream result3: ', streamResult3);
const streamResp3 = await streamResult3.response;
console.log('stream send message response3: ', streamResp3);
}

async function multiPartContent() {
const filePart = {
file_data: {
file_uri: 'gs://sararob_imagegeneration_test/kitten.jpeg',
mime_type: 'image/jpeg',
},
};
const textPart = {text: 'What is this a picture of?'};

const request = {
contents: [{role: 'user', parts: [textPart, filePart]}],
};

const generativeVisionModel = vertex_ai.preview.getGenerativeModel({
model: 'gemini-pro-vision',
});

const resp = await generativeVisionModel.generateContentStream(request);
const contentResponse = await resp.response;
console.log(contentResponse.candidates[0].content);
}

nonStreamingTextOnly();
streamContentTextOnly();
countTokens();
nonStreamingChat();
streamingChat();
multiPartContent();
73 changes: 73 additions & 0 deletions generative-ai/snippets/nonStreamingChat.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

const {VertexAI} = require('@google-cloud/vertexai');

function wait(time) {

Check failure on line 17 in generative-ai/snippets/nonStreamingChat.js

View workflow job for this annotation

GitHub Actions / lint

'wait' is defined but never used
return new Promise(resolve => {
setTimeout(resolve, time);
});
}

async function createNonStreamingChat(
projectId = 'PROJECT_ID',
location = 'LOCATION_ID',
model = 'MODEL'
) {
// TODO: Find better method. Setting delay to give api time to respond, otherwise it will 404
// await wait(10);

// [START aiplatform_gemini_multiturn_chat]
/**
* TODO(developer): Uncomment these variables before running the sample.
*/
// const projectId = 'your-project-id';
// const location = 'us-central1';

// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});

// Instantiate the model
const generativeModel = vertexAI.preview.getGenerativeModel({
model: model,
});

const chat = generativeModel.startChat({});

const chatInput1 = 'Hello';
console.log(`User: ${chatInput1}`);

const result1 = await chat.sendMessage(chatInput1);
const response1 = result1.response.candidates[0].content.parts[0].text;
console.log('Chat bot: ', response1);

const chatInput2 = 'Can you tell me a scientific fun fact?';
console.log(`User: ${chatInput2}`);
const result2 = await chat.sendMessage(chatInput2);
const response2 = result2.response.candidates[0].content.parts[0].text;
console.log('Chat bot: ', response2);

const chatInput3 = 'How can I learn more about that?';
console.log(`User: ${chatInput3}`);
const result3 = await chat.sendMessage(chatInput3);
const response3 = result3.response.candidates[0].content.parts[0].text;
console.log('Chat bot: ', response3);

// [END aiplatform_gemini_multiturn_chat]
}

createNonStreamingChat(...process.argv.slice(2)).catch(err => {
console.error(err.message);
process.exitCode = 1;
});
Loading
Loading