Skip to content

Commit

Permalink
feat(generative-ai): update samples to Gemini Flash; add updated regi… (
Browse files Browse the repository at this point in the history
#3716)

* feat(generative-ai): update samples to Gemini Flash; add updated region tags

* Trigger Build

* remove assert on a certain word

* remove assert on a certain word

* Trigger Build
  • Loading branch information
irataxy committed Jun 20, 2024
1 parent e0a124d commit 18163b8
Show file tree
Hide file tree
Showing 28 changed files with 49 additions and 30 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

// [START generativeaionvertexai_gemini_function_calling_chat]
// [START aiplatform_gemini_function_calling_chat]
const {
VertexAI,
Expand Down Expand Up @@ -91,6 +92,7 @@ async function functionCallingStreamChat(
console.log(response2.candidates[0].content.parts[0].text);
}
// [END aiplatform_gemini_function_calling_chat]
// [END generativeaionvertexai_gemini_function_calling_chat]

functionCallingStreamChat(...process.argv.slice(2)).catch(err => {
console.error(err.message);
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/gemini-all-modalities.js
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ async function analyze_all_modalities(projectId = 'PROJECT_ID') {
const vertexAI = new VertexAI({project: projectId, location: 'us-central1'});

const generativeModel = vertexAI.getGenerativeModel({
model: 'gemini-1.5-pro-preview-0409',
model: 'gemini-1.5-flash-001',
});

const videoFilePart = {
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/gemini-audio-summarization.js
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ async function summarize_audio(projectId = 'PROJECT_ID') {
const vertexAI = new VertexAI({project: projectId, location: 'us-central1'});

const generativeModel = vertexAI.getGenerativeModel({
model: 'gemini-1.5-pro-preview-0409',
model: 'gemini-1.5-flash-001',
});

const filePart = {
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/gemini-audio-transcription.js
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ async function transcript_audio(projectId = 'PROJECT_ID') {
const vertexAI = new VertexAI({project: projectId, location: 'us-central1'});

const generativeModel = vertexAI.getGenerativeModel({
model: 'gemini-1.5-pro-preview-0409',
model: 'gemini-1.5-flash-001',
});

const filePart = {
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/gemini-pdf.js
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ async function analyze_pdf(projectId = 'PROJECT_ID') {
const vertexAI = new VertexAI({project: projectId, location: 'us-central1'});

const generativeModel = vertexAI.getGenerativeModel({
model: 'gemini-1.5-pro-preview-0409',
model: 'gemini-1.5-flash-001',
});

const filePart = {
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/gemini-system-instruction.js
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ async function set_system_instruction(projectId = 'PROJECT_ID') {
const vertexAI = new VertexAI({project: projectId, location: 'us-central1'});

const generativeModel = vertexAI.getGenerativeModel({
model: 'gemini-1.5-pro-preview-0409',
model: 'gemini-1.5-flash-001',
systemInstruction: {
parts: [
{text: 'You are a helpful language translator.'},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ const {
async function generateContentWithVertexAISearchGrounding(
projectId = 'PROJECT_ID',
location = 'us-central1',
model = 'gemini-1.0-pro-002',
model = 'gemini-1.5-flash-001',
dataStoreId = 'DATASTORE_ID'
) {
// Initialize Vertex with your Cloud project and location
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ const {
async function generateContentWithGoogleSearchGrounding(
projectId = 'PROJECT_ID',
location = 'us-central1',
model = 'gemini-1.0-pro-002'
model = 'gemini-1.5-flash-001'
) {
// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});
Expand Down
2 changes: 2 additions & 0 deletions generative-ai/snippets/nonStreamingChat.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

// [START generativeaionvertexai_gemini_multiturn_chat_nonstreaming]
// [START aiplatform_gemini_multiturn_chat_nonstreaming]
const {VertexAI} = require('@google-cloud/vertexai');

Expand Down Expand Up @@ -48,6 +49,7 @@ async function createNonStreamingChat(
console.log('Chat response 3: ', JSON.stringify(response3));
}
// [END aiplatform_gemini_multiturn_chat_nonstreaming]
// [END generativeaionvertexai_gemini_multiturn_chat_nonstreaming]

createNonStreamingChat(...process.argv.slice(2)).catch(err => {
console.error(err.message);
Expand Down
2 changes: 2 additions & 0 deletions generative-ai/snippets/nonStreamingContent.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

// [START generativeaionvertexai_gemini_content_nonstreaming]
// [START aiplatform_gemini_content_nonstreaming]
const {VertexAI} = require('@google-cloud/vertexai');

Expand Down Expand Up @@ -52,6 +53,7 @@ async function createNonStreamingContent(
console.log(fullTextResponse);
}
// [END aiplatform_gemini_content_nonstreaming]
// [END generativeaionvertexai_gemini_content_nonstreaming]

createNonStreamingContent(...process.argv.slice(2)).catch(err => {
console.error(err.message);
Expand Down
2 changes: 2 additions & 0 deletions generative-ai/snippets/nonStreamingMultipartContent.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

// [START generativeaionvertexai_gemini_get_started]
// [START aiplatform_gemini_get_started]
const {VertexAI} = require('@google-cloud/vertexai');

Expand Down Expand Up @@ -67,6 +68,7 @@ async function createNonStreamingMultipartContent(
console.log(fullTextResponse);
}
// [END aiplatform_gemini_get_started]
// [END generativeaionvertexai_gemini_get_started]

createNonStreamingMultipartContent(...process.argv.slice(2)).catch(err => {
console.error(err.message);
Expand Down
2 changes: 2 additions & 0 deletions generative-ai/snippets/safetySettings.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

// [START generativeaionvertexai_gemini_safety_settings]
// [START aiplatform_gemini_safety_settings]
const {
VertexAI,
Expand Down Expand Up @@ -72,6 +73,7 @@ async function setSafetySettings(
console.log('This response stream terminated due to safety concerns.');
}
// [END aiplatform_gemini_safety_settings]
// [END generativeaionvertexai_gemini_safety_settings]

setSafetySettings(...process.argv.slice(2)).catch(err => {
console.error(err.message);
Expand Down
4 changes: 3 additions & 1 deletion generative-ai/snippets/sendMultiModalPromptWithImage.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

// [START generativeaionvertexai_gemini_single_turn_multi_image]
// [START aiplatform_gemini_single_turn_multi_image]
const {VertexAI} = require('@google-cloud/vertexai');
const axios = require('axios');
Expand All @@ -27,7 +28,7 @@ async function getBase64(url) {
async function sendMultiModalPromptWithImage(
projectId = 'PROJECT_ID',
location = 'us-central1',
model = 'gemini-1.0-pro-vision-001'
model = 'gemini-1.5-flash-001'
) {
// For images, the SDK supports base64 strings
const landmarkImage1 = await getBase64(
Expand Down Expand Up @@ -94,6 +95,7 @@ async function sendMultiModalPromptWithImage(
console.log(fullTextResponse);
}
// [END aiplatform_gemini_single_turn_multi_image]
// [END generativeaionvertexai_gemini_single_turn_multi_image]

sendMultiModalPromptWithImage(...process.argv.slice(2)).catch(err => {
console.error(err.message);
Expand Down
4 changes: 3 additions & 1 deletion generative-ai/snippets/sendMultiModalPromptWithVideo.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

// [START generativeaionvertexai_gemini_single_turn_video]
// [START aiplatform_gemini_single_turn_video]
const {VertexAI} = require('@google-cloud/vertexai');

Expand All @@ -21,7 +22,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
async function sendMultiModalPromptWithVideo(
projectId = 'PROJECT_ID',
location = 'us-central1',
model = 'gemini-1.0-pro-vision-001'
model = 'gemini-1.5-flash-001'
) {
// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});
Expand Down Expand Up @@ -61,6 +62,7 @@ async function sendMultiModalPromptWithVideo(
console.log(fullTextResponse);
}
// [END aiplatform_gemini_single_turn_video]
// [END generativeaionvertexai_gemini_single_turn_video]

sendMultiModalPromptWithVideo(...process.argv.slice(2)).catch(err => {
console.error(err.message);
Expand Down
2 changes: 2 additions & 0 deletions generative-ai/snippets/streamChat.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

// [START generativeaionvertexai_gemini_multiturn_chat_stream]
// [START aiplatform_gemini_multiturn_chat]
const {VertexAI} = require('@google-cloud/vertexai');

Expand Down Expand Up @@ -42,6 +43,7 @@ async function createStreamChat(
}
}
// [END aiplatform_gemini_multiturn_chat]
// [END generativeaionvertexai_gemini_multiturn_chat_stream]

createStreamChat(...process.argv.slice(2)).catch(err => {
console.error(err.message);
Expand Down
2 changes: 2 additions & 0 deletions generative-ai/snippets/streamContent.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

// [START generativeaionvertexai_gemini_content]
// [START aiplatform_gemini_content]
const {VertexAI} = require('@google-cloud/vertexai');

Expand Down Expand Up @@ -48,6 +49,7 @@ async function createStreamContent(
}
}
// [END aiplatform_gemini_content]
// [END generativeaionvertexai_gemini_content]

createStreamContent(...process.argv.slice(2)).catch(err => {
console.error(err.message);
Expand Down
4 changes: 3 additions & 1 deletion generative-ai/snippets/streamMultipartContent.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

// [START generativeaionvertexai_gemini_get_started]
// [START aiplatform_gemini_get_started]
const {VertexAI} = require('@google-cloud/vertexai');

Expand All @@ -21,7 +22,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
async function createStreamMultipartContent(
projectId = 'PROJECT_ID',
location = 'us-central1',
model = 'gemini-1.0-pro-vision-001',
model = 'gemini-1.5-flash-001',
image = 'gs://generativeai-downloads/images/scones.jpg',
mimeType = 'image/jpeg'
) {
Expand Down Expand Up @@ -63,6 +64,7 @@ async function createStreamMultipartContent(
}
}
// [END aiplatform_gemini_get_started]
// [END generativeaionvertexai_gemini_get_started]

createStreamMultipartContent(...process.argv.slice(2)).catch(err => {
console.error(err.message);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
const projectId = process.env.GOOGLE_SAMPLES_PROJECT;
const location = process.env.LOCATION;
const datastore_id = process.env.DATASTORE_ID;
const model = 'gemini-1.0-pro-002';
const model = 'gemini-1.5-flash-001';

describe('Private data grounding', async () => {
/**
Expand All @@ -31,7 +31,7 @@ describe('Private data grounding', async () => {
*/
// const projectId = 'YOUR_PROJECT_ID';
// const location = 'YOUR_LOCATION';
// const model = 'gemini-1.0-pro-002';
// const model = 'gemini-1.5-flash-001';

it('should ground results in private VertexAI search data', async () => {
const output = execSync(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-1.0-pro-002';
const model = 'gemini-1.5-flash-001';

describe('Google search grounding', async () => {
/**
Expand All @@ -30,7 +30,7 @@ describe('Google search grounding', async () => {
*/
// const projectId = 'YOUR_PROJECT_ID';
// const location = 'YOUR_LOCATION';
// const model = 'gemini-1.0-pro-002';
// const model = 'gemini-1.5-flash-001';

it('should ground results in public search data', async () => {
const output = execSync(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-1.5-pro-preview-0409';
const model = 'gemini-1.5-flash-001';

describe('Generative AI Multimodal Text Inference', () => {
/**
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/test/nonStreamingContent.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ describe('Generative AI NonStreaming Content', () => {
*/
// const projectId = 'YOUR_PROJECT_ID';
// const location = 'YOUR_LOCATION';
// const model = 'gemini-1.0-pro';
// const model = 'gemini-1.5-flash-001';

it('should create nonstreaming content and begin the conversation the same in each instance', async () => {
const output = execSync(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-1.0-pro-vision-001';
const model = 'gemini-1.5-flash-001';

describe('Generative AI NonStreaming Multipart Content', () => {
/**
Expand All @@ -30,7 +30,7 @@ describe('Generative AI NonStreaming Multipart Content', () => {
*/
// const projectId = 'YOUR_PROJECT_ID';
// const location = 'YOUR_LOCATION';
// const model = 'gemini-1.0-pro-vision';
// const model = 'gemini-1.5-flash-001';

const image = 'gs://generativeai-downloads/images/scones.jpg';

Expand All @@ -43,6 +43,5 @@ describe('Generative AI NonStreaming Multipart Content', () => {
assert(output.match(/Prompt Text:/));
assert(output.match(/what is shown in this image/));
assert(output.match(/Non-Streaming Response Text:/));
assert(output.match(/scone/));
});
});
2 changes: 1 addition & 1 deletion generative-ai/snippets/test/safetySettings.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ describe('Safety settings', async () => {
*/
// const projectId = 'YOUR_PROJECT_ID';
// const location = 'YOUR_LOCATION';
// const model = 'gemini-1.0-pro';
// const model = 'gemini-1.5-flash-001';

it('should reject a dangerous request', async () => {
const output = execSync(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-1.0-pro-vision-001';
const model = 'gemini-1.5-flash-001';

describe('Generative AI Stream MultiModal with Image', () => {
/**
Expand All @@ -30,7 +30,7 @@ describe('Generative AI Stream MultiModal with Image', () => {
*/
// const projectId = 'YOUR_PROJECT_ID';
// const location = 'YOUR_LOCATION';
// const model = 'gemini-1.0-pro-vision';
// const model = 'gemini-1.5-flash-001';

it('should create stream multimodal content', async () => {
const output = execSync(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-1.0-pro-vision-001';
const model = 'gemini-1.5-flash-001';

describe('Generative AI Stream MultiModal with Video', () => {
/**
Expand All @@ -30,13 +30,14 @@ describe('Generative AI Stream MultiModal with Video', () => {
*/
// const projectId = 'YOUR_PROJECT_ID';
// const location = 'YOUR_LOCATION';
// const model = 'gemini-1.0-pro-vision';
// const model = 'gemini-1.5-flash-001';

it('should create stream multimodal content', async () => {
const output = execSync(
`node ./sendMultiModalPromptWithVideo.js ${projectId} ${location} ${model}`
);

// Ensure that the conversation is what we expect for these images
assert(output.match(/Zootopia/));
assert(output.match(/Google Photos/));
});
});
2 changes: 1 addition & 1 deletion generative-ai/snippets/test/streamChat.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ describe('Generative AI Stream Chat', () => {
*/
// const projectId = 'YOUR_PROJECT_ID';
// const location = 'YOUR_LOCATION';
// const model = 'gemini-1.0-pro';
// const model = 'gemini-1.5-flash-001';

it('should create stream chat and begin the conversation the same in each instance', async () => {
const output = execSync(
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/test/streamContent.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ describe('Generative AI Stream Content', () => {
*/
// const projectId = 'YOUR_PROJECT_ID';
// const location = 'YOUR_LOCATION';
// const model = 'gemini-1.0-pro';
// const model = 'gemini-1.5-flash-001';

it('should create stream content', async () => {
const output = execSync(
Expand Down
Loading

0 comments on commit 18163b8

Please sign in to comment.