From c3126aa120208df4589f6c68fe67b540344af7e7 Mon Sep 17 00:00:00 2001 From: Sampath Kumar Date: Mon, 31 Mar 2025 18:24:46 +0200 Subject: [PATCH 01/19] feat(genai): update the gemini model name --- discoveryengine/answer_query_sample.py | 4 ++-- discoveryengine/session_sample.py | 2 +- generative_ai/batch_predict/gemini_batch_predict_bigquery.py | 2 +- generative_ai/batch_predict/gemini_batch_predict_gcs.py | 2 +- generative_ai/evaluation/pairwise_summarization_quality.py | 4 ++-- generative_ai/express_mode/api_key_example.py | 2 +- generative_ai/image/image_example01.py | 2 +- generative_ai/image/image_example02.py | 2 +- generative_ai/labels/labels_example.py | 2 +- generative_ai/prompts/prompt_create.py | 2 +- generative_ai/prompts/prompt_delete.py | 2 +- generative_ai/prompts/prompt_get.py | 2 +- generative_ai/prompts/prompt_list_version.py | 2 +- generative_ai/prompts/prompt_restore_version.py | 2 +- generative_ai/prompts/prompt_template.py | 2 +- .../provisioned_throughput/provisioned_throughput_with_txt.py | 2 +- generative_ai/rag/generate_content_example.py | 2 +- generative_ai/rag/quickstart_example.py | 2 +- generative_ai/safety/safety_config_example.py | 2 +- .../system_instructions/system_instructions_example.py | 2 +- generative_ai/template_folder/advanced_example.py | 2 +- generative_ai/template_folder/simple_example.py | 2 +- generative_ai/text_generation/chat_multiturn_example.py | 2 +- .../text_generation/chat_multiturn_stream_example.py | 2 +- .../text_generation/gemini_describe_http_image_example.py | 2 +- .../text_generation/gemini_describe_http_pdf_example.py | 2 +- generative_ai/text_generation/gemini_translate_text.py | 2 +- generative_ai/text_generation/generation_config_example.py | 2 +- generative_ai/text_generation/multimodal_stream_example.py | 2 +- .../text_generation/single_turn_multi_image_example.py | 2 +- generative_ai/text_generation/text_example01.py | 2 +- generative_ai/text_generation/text_example03.py | 2 +- generative_ai/text_generation/text_stream_example01.py | 2 +- generative_ai/understand_audio/summarization_example.py | 2 +- generative_ai/understand_audio/transcription_example.py | 2 +- generative_ai/understand_docs/pdf_example.py | 2 +- generative_ai/understand_video/audio_video_example.py | 2 +- generative_ai/understand_video/single_turn_video_example.py | 2 +- generative_ai/video/gemini_describe_http_video_example.py | 2 +- .../video/gemini_youtube_video_key_moments_example.py | 2 +- .../video/gemini_youtube_video_summarization_example.py | 2 +- generative_ai/video/multimodal_example01.py | 2 +- generative_ai/video/multimodal_example02.py | 2 +- translate/samples/snippets/translate_with_gemini.py | 2 +- 44 files changed, 46 insertions(+), 46 deletions(-) diff --git a/discoveryengine/answer_query_sample.py b/discoveryengine/answer_query_sample.py index 5eeffa9554..ac8fc04a6e 100644 --- a/discoveryengine/answer_query_sample.py +++ b/discoveryengine/answer_query_sample.py @@ -13,7 +13,7 @@ # limitations under the License. # -# NOTE: This snippet has been partially generated by `gemini-1.5-pro-001` +# NOTE: This snippet has been partially generated by `gemini-2.0-flash-001` # [START genappbuilder_answer_query] from google.api_core.client_options import ClientOptions @@ -71,7 +71,7 @@ def answer_query_sample( ignore_non_answer_seeking_query=False, # Optional: Ignore non-answer seeking query ignore_low_relevant_content=False, # Optional: Return fallback answer when content is not relevant model_spec=discoveryengine.AnswerQueryRequest.AnswerGenerationSpec.ModelSpec( - model_version="gemini-1.5-flash-001/answer_gen/v2", # Optional: Model to use for answer generation + model_version="gemini-2.0-flash-001/answer_gen/v2", # Optional: Model to use for answer generation ), prompt_spec=discoveryengine.AnswerQueryRequest.AnswerGenerationSpec.PromptSpec( preamble="Give a detailed answer.", # Optional: Natural language instructions for customizing the answer. diff --git a/discoveryengine/session_sample.py b/discoveryengine/session_sample.py index 4b1e71b4cd..574d36ebf7 100644 --- a/discoveryengine/session_sample.py +++ b/discoveryengine/session_sample.py @@ -14,7 +14,7 @@ # limitations under the License. # -# NOTE: This snippet has been partially generated by `gemini-1.5-pro-001` +# NOTE: This snippet has been partially generated by `gemini-2.0-flash-001` # [START genappbuilder_create_session] from google.cloud import discoveryengine_v1 as discoveryengine diff --git a/generative_ai/batch_predict/gemini_batch_predict_bigquery.py b/generative_ai/batch_predict/gemini_batch_predict_bigquery.py index 15f755596e..60c9baa990 100644 --- a/generative_ai/batch_predict/gemini_batch_predict_bigquery.py +++ b/generative_ai/batch_predict/gemini_batch_predict_bigquery.py @@ -37,7 +37,7 @@ def batch_predict_gemini_createjob(output_uri: str) -> str: # Submit a batch prediction job with Gemini model batch_prediction_job = BatchPredictionJob.submit( - source_model="gemini-1.5-flash-002", + source_model="gemini-2.0-flash-001", input_dataset=input_uri, output_uri_prefix=output_uri, ) diff --git a/generative_ai/batch_predict/gemini_batch_predict_gcs.py b/generative_ai/batch_predict/gemini_batch_predict_gcs.py index 5b452dc044..ff1b5d8386 100644 --- a/generative_ai/batch_predict/gemini_batch_predict_gcs.py +++ b/generative_ai/batch_predict/gemini_batch_predict_gcs.py @@ -38,7 +38,7 @@ def batch_predict_gemini_createjob(output_uri: str) -> str: # Submit a batch prediction job with Gemini model batch_prediction_job = BatchPredictionJob.submit( - source_model="gemini-1.5-flash-002", + source_model="gemini-2.0-flash-001", input_dataset=input_uri, output_uri_prefix=output_uri, ) diff --git a/generative_ai/evaluation/pairwise_summarization_quality.py b/generative_ai/evaluation/pairwise_summarization_quality.py index 8b5fba4460..3c24eccca0 100644 --- a/generative_ai/evaluation/pairwise_summarization_quality.py +++ b/generative_ai/evaluation/pairwise_summarization_quality.py @@ -52,11 +52,11 @@ def evaluate_output() -> EvalResult: eval_dataset = pd.DataFrame({"prompt": [prompt]}) # Baseline model for pairwise comparison - baseline_model = GenerativeModel("gemini-1.5-pro-001") + baseline_model = GenerativeModel("gemini-2.0-flash-001") # Candidate model for pairwise comparison candidate_model = GenerativeModel( - "gemini-1.5-pro-002", generation_config={"temperature": 0.4} + "gemini-2.0-flash-001", generation_config={"temperature": 0.4} ) prompt_template = MetricPromptTemplateExamples.get_prompt_template( diff --git a/generative_ai/express_mode/api_key_example.py b/generative_ai/express_mode/api_key_example.py index 99510c8d11..efa1307cf2 100644 --- a/generative_ai/express_mode/api_key_example.py +++ b/generative_ai/express_mode/api_key_example.py @@ -21,7 +21,7 @@ def generate_content() -> None: # TODO(developer): Update below line vertexai.init(api_key="YOUR_API_KEY") - model = GenerativeModel("gemini-1.5-flash") + model = GenerativeModel("gemini-2.0-flash-001") response = model.generate_content("Explain bubble sort to me") diff --git a/generative_ai/image/image_example01.py b/generative_ai/image/image_example01.py index 20b05501a4..6ad128f66f 100644 --- a/generative_ai/image/image_example01.py +++ b/generative_ai/image/image_example01.py @@ -26,7 +26,7 @@ def generate_text() -> str: # PROJECT_ID = "your-project-id" vertexai.init(project=PROJECT_ID, location="us-central1") - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") response = model.generate_content( [ diff --git a/generative_ai/image/image_example02.py b/generative_ai/image/image_example02.py index f498e85bc8..ac110f58d4 100644 --- a/generative_ai/image/image_example02.py +++ b/generative_ai/image/image_example02.py @@ -26,7 +26,7 @@ def generate_text() -> None: # PROJECT_ID = "your-project-id" vertexai.init(project=PROJECT_ID, location="us-central1") - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") image_file = Part.from_uri( "gs://cloud-samples-data/generative-ai/image/scones.jpg", "image/jpeg" diff --git a/generative_ai/labels/labels_example.py b/generative_ai/labels/labels_example.py index 6704d9962a..23168e7d46 100644 --- a/generative_ai/labels/labels_example.py +++ b/generative_ai/labels/labels_example.py @@ -28,7 +28,7 @@ def generate_content() -> GenerationResponse: # PROJECT_ID = "your-project-id" vertexai.init(project=PROJECT_ID, location="us-central1") - model = GenerativeModel("gemini-1.5-flash-001") + model = GenerativeModel("gemini-2.0-flash-001") prompt = "What is Generative AI?" response = model.generate_content( diff --git a/generative_ai/prompts/prompt_create.py b/generative_ai/prompts/prompt_create.py index 3418ff56fb..a18fbd986f 100644 --- a/generative_ai/prompts/prompt_create.py +++ b/generative_ai/prompts/prompt_create.py @@ -39,7 +39,7 @@ def prompt_create() -> Prompt: {"movie1": "The Lion King", "movie2": "Frozen"}, {"movie1": "Inception", "movie2": "Interstellar"}, ], - model_name="gemini-1.5-pro-002", + model_name="gemini-2.0-flash-001", system_instruction="You are a movie critic. Answer in a short sentence.", # generation_config=GenerationConfig, # Optional, # safety_settings=SafetySetting, # Optional, diff --git a/generative_ai/prompts/prompt_delete.py b/generative_ai/prompts/prompt_delete.py index 41d8bd0cb8..80c2f6940f 100644 --- a/generative_ai/prompts/prompt_delete.py +++ b/generative_ai/prompts/prompt_delete.py @@ -35,7 +35,7 @@ def delete_prompt() -> None: {"movie1": "The Lion King", "movie2": "Frozen"}, {"movie1": "Inception", "movie2": "Interstellar"}, ], - model_name="gemini-1.5-pro-002", + model_name="gemini-2.0-flash-001", system_instruction="You are a movie critic. Answer in a short sentence.", ) diff --git a/generative_ai/prompts/prompt_get.py b/generative_ai/prompts/prompt_get.py index 01ad6bda48..59cf9c0bbc 100644 --- a/generative_ai/prompts/prompt_get.py +++ b/generative_ai/prompts/prompt_get.py @@ -33,7 +33,7 @@ def get_prompt() -> Prompt: prompt = Prompt( prompt_name="meteorologist", prompt_data="How should I dress for weather in August?", - model_name="gemini-1.5-pro-002", + model_name="gemini-2.0-flash-001", system_instruction="You are a meteorologist. Answer in a short sentence.", ) diff --git a/generative_ai/prompts/prompt_list_version.py b/generative_ai/prompts/prompt_list_version.py index 58c490736a..1fc200673f 100644 --- a/generative_ai/prompts/prompt_list_version.py +++ b/generative_ai/prompts/prompt_list_version.py @@ -32,7 +32,7 @@ def list_prompt_version() -> list: prompt = Prompt( prompt_name="zoologist", prompt_data="Which animal is the fastest on earth?", - model_name="gemini-1.5-pro-002", + model_name="gemini-2.0-flash-001", system_instruction="You are a zoologist. Answer in a short sentence.", ) # Save Prompt to online resource. diff --git a/generative_ai/prompts/prompt_restore_version.py b/generative_ai/prompts/prompt_restore_version.py index b2db354015..44473c300f 100644 --- a/generative_ai/prompts/prompt_restore_version.py +++ b/generative_ai/prompts/prompt_restore_version.py @@ -32,7 +32,7 @@ def restore_prompt_version() -> Prompt: prompt = Prompt( prompt_name="zoologist", prompt_data="Which animal is the fastest on earth?", - model_name="gemini-1.5-pro-002", + model_name="gemini-2.0-flash-001", system_instruction="You are a zoologist. Answer in a short sentence.", ) # Save Prompt to online resource. diff --git a/generative_ai/prompts/prompt_template.py b/generative_ai/prompts/prompt_template.py index cc253aa02a..7517c7bb66 100644 --- a/generative_ai/prompts/prompt_template.py +++ b/generative_ai/prompts/prompt_template.py @@ -38,7 +38,7 @@ def prompt_template_example() -> list[GenerationResponse]: # define prompt template prompt = Prompt( prompt_data="Do {animal} {activity}?", - model_name="gemini-1.5-flash-002", + model_name="gemini-2.0-flash-001", variables=variables, system_instruction="You are a helpful zoologist" # generation_config=generation_config, # Optional diff --git a/generative_ai/provisioned_throughput/provisioned_throughput_with_txt.py b/generative_ai/provisioned_throughput/provisioned_throughput_with_txt.py index 97f782a86d..8da294ab6a 100644 --- a/generative_ai/provisioned_throughput/provisioned_throughput_with_txt.py +++ b/generative_ai/provisioned_throughput/provisioned_throughput_with_txt.py @@ -33,7 +33,7 @@ def generate_content() -> str: request_metadata=[("x-vertex-ai-llm-request-type", "shared")], ) - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") response = model.generate_content( "What's a good name for a flower shop that specializes in selling bouquets of dried flowers?" diff --git a/generative_ai/rag/generate_content_example.py b/generative_ai/rag/generate_content_example.py index f31ea94f53..703234b15d 100644 --- a/generative_ai/rag/generate_content_example.py +++ b/generative_ai/rag/generate_content_example.py @@ -52,7 +52,7 @@ def generate_content_with_rag( ) rag_model = GenerativeModel( - model_name="gemini-1.5-flash-001", tools=[rag_retrieval_tool] + model_name="gemini-2.0-flash-001", tools=[rag_retrieval_tool] ) response = rag_model.generate_content("Why is the sky blue?") print(response.text) diff --git a/generative_ai/rag/quickstart_example.py b/generative_ai/rag/quickstart_example.py index 03b4624857..e78519207b 100644 --- a/generative_ai/rag/quickstart_example.py +++ b/generative_ai/rag/quickstart_example.py @@ -95,7 +95,7 @@ def quickstart( ) # Create a gemini-pro model instance rag_model = GenerativeModel( - model_name="gemini-1.5-flash-001", tools=[rag_retrieval_tool] + model_name="gemini-2.0-flash-001", tools=[rag_retrieval_tool] ) # Generate response diff --git a/generative_ai/safety/safety_config_example.py b/generative_ai/safety/safety_config_example.py index 281f0d227c..e60496b416 100644 --- a/generative_ai/safety/safety_config_example.py +++ b/generative_ai/safety/safety_config_example.py @@ -33,7 +33,7 @@ def generate_text() -> str: # PROJECT_ID = "your-project-id" vertexai.init(project=PROJECT_ID, location="us-central1") - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") # Safety config safety_config = [ diff --git a/generative_ai/system_instructions/system_instructions_example.py b/generative_ai/system_instructions/system_instructions_example.py index 50f4c493a6..2ec0518c9f 100644 --- a/generative_ai/system_instructions/system_instructions_example.py +++ b/generative_ai/system_instructions/system_instructions_example.py @@ -27,7 +27,7 @@ def set_system_instruction() -> str: vertexai.init(project=PROJECT_ID, location="us-central1") model = GenerativeModel( - model_name="gemini-1.5-flash-002", + model_name="gemini-2.0-flash-001", system_instruction=[ "You are a helpful language translator.", "Your mission is to translate text in English to French.", diff --git a/generative_ai/template_folder/advanced_example.py b/generative_ai/template_folder/advanced_example.py index 4b9c7a721d..e214aa0a4f 100644 --- a/generative_ai/template_folder/advanced_example.py +++ b/generative_ai/template_folder/advanced_example.py @@ -27,7 +27,7 @@ # # PROJECT_ID = "your-project-id" # vertexai.init(project=PROJECT_ID, location="us-central1") # -# model = GenerativeModel("gemini-1.5-flash-002") +# model = GenerativeModel("gemini-2.0-flash-001") # # contents = [ # Part.from_uri( diff --git a/generative_ai/template_folder/simple_example.py b/generative_ai/template_folder/simple_example.py index c4c31eb1af..5653c16e99 100644 --- a/generative_ai/template_folder/simple_example.py +++ b/generative_ai/template_folder/simple_example.py @@ -19,7 +19,7 @@ # from vertexai.preview.tokenization import get_tokenizer_for_model # # # Using local tokenzier -# tokenizer = get_tokenizer_for_model("gemini-1.5-flash-002") +# tokenizer = get_tokenizer_for_model("gemini-2.0-flash-001") # # prompt = "hello world" # response = tokenizer.count_tokens(prompt) diff --git a/generative_ai/text_generation/chat_multiturn_example.py b/generative_ai/text_generation/chat_multiturn_example.py index bd78321a83..becaf5367d 100644 --- a/generative_ai/text_generation/chat_multiturn_example.py +++ b/generative_ai/text_generation/chat_multiturn_example.py @@ -27,7 +27,7 @@ def chat_text_example() -> str: # PROJECT_ID = "your-project-id" vertexai.init(project=PROJECT_ID, location="us-central1") - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") chat_session = model.start_chat() diff --git a/generative_ai/text_generation/chat_multiturn_stream_example.py b/generative_ai/text_generation/chat_multiturn_stream_example.py index f6b1d821af..08df3f9867 100644 --- a/generative_ai/text_generation/chat_multiturn_stream_example.py +++ b/generative_ai/text_generation/chat_multiturn_stream_example.py @@ -27,7 +27,7 @@ def chat_stream_example() -> str: # PROJECT_ID = "your-project-id" vertexai.init(project=PROJECT_ID, location="us-central1") - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") chat_session = model.start_chat() diff --git a/generative_ai/text_generation/gemini_describe_http_image_example.py b/generative_ai/text_generation/gemini_describe_http_image_example.py index fd92f3858c..0ee1303df2 100644 --- a/generative_ai/text_generation/gemini_describe_http_image_example.py +++ b/generative_ai/text_generation/gemini_describe_http_image_example.py @@ -24,7 +24,7 @@ def generate_content() -> str: # TODO (developer): update project id vertexai.init(project=PROJECT_ID, location="us-central1") - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") contents = [ # Text prompt diff --git a/generative_ai/text_generation/gemini_describe_http_pdf_example.py b/generative_ai/text_generation/gemini_describe_http_pdf_example.py index 4c36a3ba03..2d45d36b5b 100644 --- a/generative_ai/text_generation/gemini_describe_http_pdf_example.py +++ b/generative_ai/text_generation/gemini_describe_http_pdf_example.py @@ -24,7 +24,7 @@ def generate_content() -> str: # TODO (developer): update project id vertexai.init(project=PROJECT_ID, location="us-central1") - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") contents = [ # Text prompt diff --git a/generative_ai/text_generation/gemini_translate_text.py b/generative_ai/text_generation/gemini_translate_text.py index 4b3fcffa54..688bdc2bf6 100644 --- a/generative_ai/text_generation/gemini_translate_text.py +++ b/generative_ai/text_generation/gemini_translate_text.py @@ -28,7 +28,7 @@ def generate_translation() -> GenerationResponse: # PROJECT_ID = "your-project-id" vertexai.init(project=PROJECT_ID, location="us-central1") - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") prompt = """ Translate the text from source to target language and return the translated text. diff --git a/generative_ai/text_generation/generation_config_example.py b/generative_ai/text_generation/generation_config_example.py index 429456544f..194eb23e23 100644 --- a/generative_ai/text_generation/generation_config_example.py +++ b/generative_ai/text_generation/generation_config_example.py @@ -27,7 +27,7 @@ def generate_text() -> None: # PROJECT_ID = "your-project-id" vertexai.init(project=PROJECT_ID, location="us-central1") - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") # Load example image from local storage encoded_image = base64.b64encode(open("scones.jpg", "rb").read()).decode("utf-8") diff --git a/generative_ai/text_generation/multimodal_stream_example.py b/generative_ai/text_generation/multimodal_stream_example.py index b8ddeb511e..c0e863c438 100644 --- a/generative_ai/text_generation/multimodal_stream_example.py +++ b/generative_ai/text_generation/multimodal_stream_example.py @@ -27,7 +27,7 @@ def generate_content() -> object: vertexai.init(project=PROJECT_ID, location="us-central1") - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") responses = model.generate_content( [ Part.from_uri( diff --git a/generative_ai/text_generation/single_turn_multi_image_example.py b/generative_ai/text_generation/single_turn_multi_image_example.py index 8eecae58d7..b9b9f6a298 100644 --- a/generative_ai/text_generation/single_turn_multi_image_example.py +++ b/generative_ai/text_generation/single_turn_multi_image_example.py @@ -40,7 +40,7 @@ def generate_text_multimodal() -> str: mime_type="image/png", ) - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") response = model.generate_content( [ image_file1, diff --git a/generative_ai/text_generation/text_example01.py b/generative_ai/text_generation/text_example01.py index 0db32c10b1..744ec4ee1e 100644 --- a/generative_ai/text_generation/text_example01.py +++ b/generative_ai/text_generation/text_example01.py @@ -25,7 +25,7 @@ def generate_from_text_input() -> str: # PROJECT_ID = "your-project-id" vertexai.init(project=PROJECT_ID, location="us-central1") - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") response = model.generate_content( "What's a good name for a flower shop that specializes in selling bouquets of dried flowers?" diff --git a/generative_ai/text_generation/text_example03.py b/generative_ai/text_generation/text_example03.py index 80d5bce30c..72c35ac6e2 100644 --- a/generative_ai/text_generation/text_example03.py +++ b/generative_ai/text_generation/text_example03.py @@ -27,7 +27,7 @@ def generate_content() -> object: vertexai.init(project=PROJECT_ID, location="us-central1") - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") response = model.generate_content("Write a story about a magic backpack.") print(response.text) diff --git a/generative_ai/text_generation/text_stream_example01.py b/generative_ai/text_generation/text_stream_example01.py index f581d02d1c..da9cde5a39 100644 --- a/generative_ai/text_generation/text_stream_example01.py +++ b/generative_ai/text_generation/text_stream_example01.py @@ -27,7 +27,7 @@ def generate_content() -> object: # PROJECT_ID = "your-project-id" vertexai.init(project=PROJECT_ID, location="us-central1") - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") responses = model.generate_content( "Write a story about a magic backpack.", stream=True ) diff --git a/generative_ai/understand_audio/summarization_example.py b/generative_ai/understand_audio/summarization_example.py index 67f8f78252..80b160a181 100644 --- a/generative_ai/understand_audio/summarization_example.py +++ b/generative_ai/understand_audio/summarization_example.py @@ -28,7 +28,7 @@ def summarize_audio() -> str: vertexai.init(project=PROJECT_ID, location="us-central1") - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") prompt = """ Please provide a summary for the audio. diff --git a/generative_ai/understand_audio/transcription_example.py b/generative_ai/understand_audio/transcription_example.py index 80550a0a21..e7e659f257 100644 --- a/generative_ai/understand_audio/transcription_example.py +++ b/generative_ai/understand_audio/transcription_example.py @@ -28,7 +28,7 @@ def transcript_audio() -> str: vertexai.init(project=PROJECT_ID, location="us-central1") - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") prompt = """ Can you transcribe this interview, in the format of timecode, speaker, caption. diff --git a/generative_ai/understand_docs/pdf_example.py b/generative_ai/understand_docs/pdf_example.py index e1eea10273..ac568c6512 100644 --- a/generative_ai/understand_docs/pdf_example.py +++ b/generative_ai/understand_docs/pdf_example.py @@ -25,7 +25,7 @@ def analyze_pdf() -> str: # TODO(developer): Update project_id and location vertexai.init(project=PROJECT_ID, location="us-central1") - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") prompt = """ You are a very professional document summarization specialist. diff --git a/generative_ai/understand_video/audio_video_example.py b/generative_ai/understand_video/audio_video_example.py index c8218ee0c7..838d7f29c5 100644 --- a/generative_ai/understand_video/audio_video_example.py +++ b/generative_ai/understand_video/audio_video_example.py @@ -27,7 +27,7 @@ def analyze_video_with_audio() -> str: vertexai.init(project=PROJECT_ID, location="us-central1") - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") prompt = """ Provide a description of the video. diff --git a/generative_ai/understand_video/single_turn_video_example.py b/generative_ai/understand_video/single_turn_video_example.py index 1923b214d7..81eec8f5ae 100644 --- a/generative_ai/understand_video/single_turn_video_example.py +++ b/generative_ai/understand_video/single_turn_video_example.py @@ -27,7 +27,7 @@ def generate_text() -> str: vertexai.init(project=PROJECT_ID, location="us-central1") - vision_model = GenerativeModel("gemini-1.5-flash-002") + vision_model = GenerativeModel("gemini-2.0-flash-001") # Generate text response = vision_model.generate_content( diff --git a/generative_ai/video/gemini_describe_http_video_example.py b/generative_ai/video/gemini_describe_http_video_example.py index f4d2c66410..559ccc4819 100644 --- a/generative_ai/video/gemini_describe_http_video_example.py +++ b/generative_ai/video/gemini_describe_http_video_example.py @@ -24,7 +24,7 @@ def generate_content() -> str: # TODO (developer): update project id vertexai.init(project=PROJECT_ID, location="us-central1") - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") contents = [ # Text prompt diff --git a/generative_ai/video/gemini_youtube_video_key_moments_example.py b/generative_ai/video/gemini_youtube_video_key_moments_example.py index 2ed43d954a..dc450de455 100644 --- a/generative_ai/video/gemini_youtube_video_key_moments_example.py +++ b/generative_ai/video/gemini_youtube_video_key_moments_example.py @@ -24,7 +24,7 @@ def generate_content() -> str: # TODO (developer): update project id vertexai.init(project=PROJECT_ID, location="us-central1") - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") contents = [ # Text prompt diff --git a/generative_ai/video/gemini_youtube_video_summarization_example.py b/generative_ai/video/gemini_youtube_video_summarization_example.py index 9dda09017c..5e13253971 100644 --- a/generative_ai/video/gemini_youtube_video_summarization_example.py +++ b/generative_ai/video/gemini_youtube_video_summarization_example.py @@ -24,7 +24,7 @@ def generate_content() -> str: # TODO (developer): update project id vertexai.init(project=PROJECT_ID, location="us-central1") - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") contents = [ # Text prompt diff --git a/generative_ai/video/multimodal_example01.py b/generative_ai/video/multimodal_example01.py index 07302bdd06..2be8c2e59a 100644 --- a/generative_ai/video/multimodal_example01.py +++ b/generative_ai/video/multimodal_example01.py @@ -26,7 +26,7 @@ def analyze_all_modalities() -> str: # PROJECT_ID = "your-project-id" vertexai.init(project=PROJECT_ID, location="us-central1") - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") video_file_uri = ( "gs://cloud-samples-data/generative-ai/video/behind_the_scenes_pixel.mp4" diff --git a/generative_ai/video/multimodal_example02.py b/generative_ai/video/multimodal_example02.py index ec0ffdc170..a36ff1dabe 100644 --- a/generative_ai/video/multimodal_example02.py +++ b/generative_ai/video/multimodal_example02.py @@ -27,7 +27,7 @@ def generate_content() -> object: vertexai.init(project=PROJECT_ID, location="us-central1") - model = GenerativeModel("gemini-1.5-flash-002") + model = GenerativeModel("gemini-2.0-flash-001") response = model.generate_content( [ Part.from_uri( diff --git a/translate/samples/snippets/translate_with_gemini.py b/translate/samples/snippets/translate_with_gemini.py index 0372090062..6cc5cec137 100644 --- a/translate/samples/snippets/translate_with_gemini.py +++ b/translate/samples/snippets/translate_with_gemini.py @@ -33,7 +33,7 @@ def translate_text(text: str, target_language_code: str = "fr") -> GenerationRes # Initializes the Vertex AI with the specified project and location vertexai.init(project=PROJECT_ID, location="europe-west2") - model = GenerativeModel("gemini-1.0-pro") + model = GenerativeModel("gemini-2.0-flash-001") # Configuration for the text generation generation_config = { From 6857a00ee7e090b41c01b34791b56499d9affcd3 Mon Sep 17 00:00:00 2001 From: Sampath Kumar Date: Mon, 31 Mar 2025 18:25:00 +0200 Subject: [PATCH 02/19] feat(genai): cleanup --- .../contentcache_create_with_txt_gcs_pdf.py | 65 ------- genai/content_cache/contentcache_delete.py | 35 ---- genai/content_cache/contentcache_list.py | 42 ----- genai/content_cache/contentcache_update.py | 59 ------- .../contentcache_use_with_txt.py | 42 ----- genai/content_cache/noxfile_config.py | 42 ----- .../test_content_cache_examples.py | 49 ------ .../context_caching/create_context_cache.py | 65 ------- .../context_caching/delete_context_cache.py | 37 ---- .../context_caching/get_context_cache.py | 41 ----- .../context_caching/list_context_caches.py | 46 ----- .../context_caching/noxfile_config.py | 42 ----- .../context_caching/requirements-test.txt | 4 - .../context_caching/requirements.txt | 14 -- .../context_caching/test_context_caching.py | 59 ------- .../context_caching/update_context_cache.py | 53 ------ .../context_caching/use_context_cache.py | 48 ------ .../controlled_generation_test.py | 60 ------- .../controlled_generation/example_01.py | 65 ------- .../controlled_generation/example_02.py | 72 -------- .../controlled_generation/example_03.py | 85 --------- .../controlled_generation/example_04.py | 97 ----------- .../controlled_generation/example_05.py | 57 ------ .../controlled_generation/example_06.py | 84 --------- .../controlled_generation/example_07.py | 57 ------ .../controlled_generation/noxfile_config.py | 42 ----- .../requirements-test.txt | 4 - .../controlled_generation/requirements.txt | 14 -- .../function_calling/advanced_example.py | 105 ------------ .../function_calling/basic_example.py | 120 ------------- .../function_calling/chat_example.py | 162 ------------------ .../chat_function_calling_basic.py | 90 ---------- .../chat_function_calling_config.py | 91 ---------- .../function_calling/noxfile_config.py | 42 ----- .../parallel_function_calling_example.py | 115 ------------- .../function_calling/requirements-test.txt | 4 - .../function_calling/requirements.txt | 14 -- .../function_calling/test_function_calling.py | 87 ---------- generative_ai/grounding/noxfile_config.py | 42 ----- generative_ai/grounding/palm_example.py | 72 -------- generative_ai/grounding/requirements-test.txt | 4 - generative_ai/grounding/requirements.txt | 1 - generative_ai/grounding/test_grounding.py | 45 ----- generative_ai/grounding/vais_example.py | 68 -------- generative_ai/grounding/web_example.py | 67 -------- generative_ai/token_count/api_example.py | 50 ------ .../token_count/list_tokens_example.py | 44 ----- .../token_count/local_sdk_example.py | 40 ----- .../multimodal_token_count_example.py | 66 ------- generative_ai/token_count/noxfile_config.py | 42 ----- .../token_count/requirements-test.txt | 4 - generative_ai/token_count/requirements.txt | 14 -- generative_ai/token_count/simple_example.py | 58 ------- .../token_count/test_list_tokens_example.py | 20 --- .../token_count/test_token_count_examples.py | 36 ---- 55 files changed, 2883 deletions(-) delete mode 100644 genai/content_cache/contentcache_create_with_txt_gcs_pdf.py delete mode 100644 genai/content_cache/contentcache_delete.py delete mode 100644 genai/content_cache/contentcache_list.py delete mode 100644 genai/content_cache/contentcache_update.py delete mode 100644 genai/content_cache/contentcache_use_with_txt.py delete mode 100644 genai/content_cache/noxfile_config.py delete mode 100644 genai/content_cache/test_content_cache_examples.py delete mode 100644 generative_ai/context_caching/create_context_cache.py delete mode 100644 generative_ai/context_caching/delete_context_cache.py delete mode 100644 generative_ai/context_caching/get_context_cache.py delete mode 100644 generative_ai/context_caching/list_context_caches.py delete mode 100644 generative_ai/context_caching/noxfile_config.py delete mode 100644 generative_ai/context_caching/requirements-test.txt delete mode 100644 generative_ai/context_caching/requirements.txt delete mode 100644 generative_ai/context_caching/test_context_caching.py delete mode 100644 generative_ai/context_caching/update_context_cache.py delete mode 100644 generative_ai/context_caching/use_context_cache.py delete mode 100644 generative_ai/controlled_generation/controlled_generation_test.py delete mode 100644 generative_ai/controlled_generation/example_01.py delete mode 100644 generative_ai/controlled_generation/example_02.py delete mode 100644 generative_ai/controlled_generation/example_03.py delete mode 100644 generative_ai/controlled_generation/example_04.py delete mode 100644 generative_ai/controlled_generation/example_05.py delete mode 100644 generative_ai/controlled_generation/example_06.py delete mode 100644 generative_ai/controlled_generation/example_07.py delete mode 100644 generative_ai/controlled_generation/noxfile_config.py delete mode 100644 generative_ai/controlled_generation/requirements-test.txt delete mode 100644 generative_ai/controlled_generation/requirements.txt delete mode 100644 generative_ai/function_calling/advanced_example.py delete mode 100644 generative_ai/function_calling/basic_example.py delete mode 100644 generative_ai/function_calling/chat_example.py delete mode 100644 generative_ai/function_calling/chat_function_calling_basic.py delete mode 100644 generative_ai/function_calling/chat_function_calling_config.py delete mode 100644 generative_ai/function_calling/noxfile_config.py delete mode 100644 generative_ai/function_calling/parallel_function_calling_example.py delete mode 100644 generative_ai/function_calling/requirements-test.txt delete mode 100644 generative_ai/function_calling/requirements.txt delete mode 100644 generative_ai/function_calling/test_function_calling.py delete mode 100644 generative_ai/grounding/noxfile_config.py delete mode 100644 generative_ai/grounding/palm_example.py delete mode 100644 generative_ai/grounding/requirements-test.txt delete mode 100644 generative_ai/grounding/requirements.txt delete mode 100644 generative_ai/grounding/test_grounding.py delete mode 100644 generative_ai/grounding/vais_example.py delete mode 100644 generative_ai/grounding/web_example.py delete mode 100644 generative_ai/token_count/api_example.py delete mode 100644 generative_ai/token_count/list_tokens_example.py delete mode 100644 generative_ai/token_count/local_sdk_example.py delete mode 100644 generative_ai/token_count/multimodal_token_count_example.py delete mode 100644 generative_ai/token_count/noxfile_config.py delete mode 100644 generative_ai/token_count/requirements-test.txt delete mode 100644 generative_ai/token_count/requirements.txt delete mode 100644 generative_ai/token_count/simple_example.py delete mode 100644 generative_ai/token_count/test_list_tokens_example.py delete mode 100644 generative_ai/token_count/test_token_count_examples.py diff --git a/genai/content_cache/contentcache_create_with_txt_gcs_pdf.py b/genai/content_cache/contentcache_create_with_txt_gcs_pdf.py deleted file mode 100644 index 57d10e39e0..0000000000 --- a/genai/content_cache/contentcache_create_with_txt_gcs_pdf.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def create_content_cache() -> str: - # [START googlegenaisdk_contentcache_create_with_txt_gcs_pdf] - from google import genai - from google.genai.types import Content, CreateCachedContentConfig, HttpOptions, Part - - client = genai.Client(http_options=HttpOptions(api_version="v1beta1")) - - system_instruction = """ - You are an expert researcher. You always stick to the facts in the sources provided, and never make up new facts. - Now look at these research papers, and answer the following questions. - """ - - contents = [ - Content( - role="user", - parts=[ - Part.from_uri( - file_uri="gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf", - mime_type="application/pdf", - ), - Part.from_uri( - file_uri="gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf", - mime_type="application/pdf", - ), - ], - ) - ] - - content_cache = client.caches.create( - model="gemini-1.5-pro-002", - config=CreateCachedContentConfig( - contents=contents, - system_instruction=system_instruction, - display_name="example-cache", - ttl="86400s", - ), - ) - - print(content_cache.name) - print(content_cache.usage_metadata) - # Example response: - # projects/111111111111/locations/us-central1/cachedContents/1111111111111111111 - # CachedContentUsageMetadata(audio_duration_seconds=None, image_count=167, - # text_count=153, total_token_count=43130, video_duration_seconds=None) - # [END googlegenaisdk_contentcache_create_with_txt_gcs_pdf] - return content_cache.name - - -if __name__ == "__main__": - create_content_cache() diff --git a/genai/content_cache/contentcache_delete.py b/genai/content_cache/contentcache_delete.py deleted file mode 100644 index 9b8b331094..0000000000 --- a/genai/content_cache/contentcache_delete.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def delete_context_caches(cache_name: str) -> str: - # [START googlegenaisdk_contentcache_delete] - from google import genai - from google.genai.types import HttpOptions - - client = genai.Client(http_options=HttpOptions(api_version="v1beta1")) - - # Delete content cache using name - # E.g cache_name = 'projects/111111111111/locations/us-central1/cachedContents/1111111111111111111' - client.caches.delete(name=cache_name) - print("Deleted Cache", cache_name) - # Example response - # Deleted Cache projects/111111111111/locations/us-central1/cachedContents/1111111111111111111 - # [END googlegenaisdk_contentcache_delete] - return cache_name - - -if __name__ == "__main__": - cache_name = input("Cache Name: ") - delete_context_caches(cache_name) diff --git a/genai/content_cache/contentcache_list.py b/genai/content_cache/contentcache_list.py deleted file mode 100644 index 112fc9c43d..0000000000 --- a/genai/content_cache/contentcache_list.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def list_context_caches() -> str: - # [START googlegenaisdk_contentcache_list] - from google import genai - from google.genai.types import HttpOptions - - client = genai.Client(http_options=HttpOptions(api_version="v1beta1")) - - content_cache_list = client.caches.list() - - # Access individual properties of a ContentCache object(s) - for content_cache in content_cache_list: - print(f"Cache `{content_cache.name}` for model `{content_cache.model}`") - print(f"Last updated at: {content_cache.update_time}") - print(f"Expires at: {content_cache.expire_time}") - - # Example response: - # * Cache `projects/111111111111/locations/us-central1/cachedContents/1111111111111111111` for - # model `projects/111111111111/locations/us-central1/publishers/google/models/gemini-XXX-pro-XXX` - # * Last updated at: 2025-02-13 14:46:42.620490+00:00 - # * CachedContentUsageMetadata(audio_duration_seconds=None, image_count=167, text_count=153, total_token_count=43130, video_duration_seconds=None) - # ... - # [END googlegenaisdk_contentcache_list] - return [content_cache.name for content_cache in content_cache_list] - - -if __name__ == "__main__": - list_context_caches() diff --git a/genai/content_cache/contentcache_update.py b/genai/content_cache/contentcache_update.py deleted file mode 100644 index 56748ce7ef..0000000000 --- a/genai/content_cache/contentcache_update.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def update_content_cache(cache_name: str) -> str: - # [START googlegenaisdk_contentcache_update] - from datetime import datetime as dt - from datetime import timezone as tz - from datetime import timedelta - - from google import genai - from google.genai.types import HttpOptions, UpdateCachedContentConfig - - client = genai.Client(http_options=HttpOptions(api_version="v1beta1")) - - # Get content cache by name - # cache_name = "projects/111111111111/locations/us-central1/cachedContents/1111111111111111111" - content_cache = client.caches.get(name=cache_name) - print("Expire time", content_cache.expire_time) - # Example response - # Expire time 2025-02-20 15:50:18.434482+00:00 - - # Update expire time using TTL - content_cache = client.caches.update( - name=cache_name, config=UpdateCachedContentConfig(ttl="36000s") - ) - time_diff = content_cache.expire_time - dt.now(tz.utc) - print("Expire time(after update):", content_cache.expire_time) - print("Expire time(in seconds):", time_diff.seconds) - # Example response - # Expire time(after update): 2025-02-14 01:51:42.571696+00:00 - # Expire time(in seconds): 35999 - - # Update expire time using specific time stamp - next_week_utc = dt.now(tz.utc) + timedelta(days=7) - content_cache = client.caches.update( - name=cache_name, config=UpdateCachedContentConfig(expireTime=next_week_utc) - ) - print("Expire time(after update):", content_cache.expire_time) - # Example response - # Expire time(after update): 2025-02-20 15:51:42.614968+00:00 - # [END googlegenaisdk_contentcache_update] - return cache_name - - -if __name__ == "__main__": - cache_name = input("Cache Name: ") - update_content_cache(cache_name) diff --git a/genai/content_cache/contentcache_use_with_txt.py b/genai/content_cache/contentcache_use_with_txt.py deleted file mode 100644 index 4c2dab55d4..0000000000 --- a/genai/content_cache/contentcache_use_with_txt.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def generate_content(cache_name: str) -> str: - # [START googlegenaisdk_contentcache_use_with_txt] - from google import genai - from google.genai.types import GenerateContentConfig, HttpOptions - - client = genai.Client(http_options=HttpOptions(api_version="v1beta1")) - - # Use content cache to generate text response - # E.g cache_name = 'projects/111111111111/locations/us-central1/cachedContents/1111111111111111111' - response = client.models.generate_content( - model="gemini-1.5-pro-002", - contents="Summarize the pdfs", - config=GenerateContentConfig( - cached_content=cache_name, - ), - ) - print(response.text) - # Example response - # The Gemini family of multimodal models from Google DeepMind demonstrates remarkable capabilities across various - # modalities, including image, audio, video, and text.... - # [END googlegenaisdk_contentcache_use_with_txt] - return response.text - - -if __name__ == "__main__": - cache_name = input("Cache Name: ") - generate_content(cache_name) diff --git a/genai/content_cache/noxfile_config.py b/genai/content_cache/noxfile_config.py deleted file mode 100644 index 2a0f115c38..0000000000 --- a/genai/content_cache/noxfile_config.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default TEST_CONFIG_OVERRIDE for python repos. - -# You can copy this file into your directory, then it will be imported from -# the noxfile.py. - -# The source of truth: -# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py - -TEST_CONFIG_OVERRIDE = { - # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.12"], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": True, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} diff --git a/genai/content_cache/test_content_cache_examples.py b/genai/content_cache/test_content_cache_examples.py deleted file mode 100644 index d7d9e5abda..0000000000 --- a/genai/content_cache/test_content_cache_examples.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import contentcache_create_with_txt_gcs_pdf -import contentcache_delete -import contentcache_list -import contentcache_update -import contentcache_use_with_txt - - -os.environ["GOOGLE_GENAI_USE_VERTEXAI"] = "True" -os.environ["GOOGLE_CLOUD_LOCATION"] = "us-central1" -# The project name is included in the CICD pipeline -# os.environ['GOOGLE_CLOUD_PROJECT'] = "add-your-project-name" - - -def test_content_cache() -> None: - # Create a Cache - cache_name = contentcache_create_with_txt_gcs_pdf.create_content_cache() - assert cache_name - - # List cache - assert contentcache_list.list_context_caches() - - # Update cache - assert contentcache_update.update_content_cache(cache_name) - - # Use cache - assert contentcache_use_with_txt.generate_content(cache_name) - - # Delete cache - assert contentcache_delete.delete_context_caches(cache_name) - - -if __name__ == "__main__": - test_content_cache() diff --git a/generative_ai/context_caching/create_context_cache.py b/generative_ai/context_caching/create_context_cache.py deleted file mode 100644 index 426635fcf7..0000000000 --- a/generative_ai/context_caching/create_context_cache.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def create_context_cache() -> str: - # [START generativeaionvertexai_gemini_create_context_cache] - import vertexai - import datetime - - from vertexai.generative_models import Part - from vertexai.preview import caching - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - - vertexai.init(project=PROJECT_ID, location="us-central1") - - system_instruction = """ - You are an expert researcher. You always stick to the facts in the sources provided, and never make up new facts. - Now look at these research papers, and answer the following questions. - """ - - contents = [ - Part.from_uri( - "gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf", - mime_type="application/pdf", - ), - Part.from_uri( - "gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf", - mime_type="application/pdf", - ), - ] - - cached_content = caching.CachedContent.create( - model_name="gemini-1.5-pro-002", - system_instruction=system_instruction, - contents=contents, - ttl=datetime.timedelta(minutes=60), - display_name="example-cache", - ) - - print(cached_content.name) - # Example response: - # 1234567890 - # [END generativeaionvertexai_gemini_create_context_cache] - - return cached_content.name - - -if __name__ == "__main__": - create_context_cache() diff --git a/generative_ai/context_caching/delete_context_cache.py b/generative_ai/context_caching/delete_context_cache.py deleted file mode 100644 index f08b035303..0000000000 --- a/generative_ai/context_caching/delete_context_cache.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def delete_context_cache(cache_id: str) -> None: - # [START generativeaionvertexai_gemini_delete_context_cache] - import vertexai - - from vertexai.preview import caching - - # TODO(developer): Update and un-comment below lines - # PROJECT_ID = "your-project-id" - # cache_id = "your-cache-id" - - vertexai.init(project=PROJECT_ID, location="us-central1") - - cached_content = caching.CachedContent(cached_content_name=cache_id) - cached_content.delete() - # [END generativeaionvertexai_gemini_delete_context_cache] - - -if __name__ == "__main__": - delete_context_cache("1234567890") diff --git a/generative_ai/context_caching/get_context_cache.py b/generative_ai/context_caching/get_context_cache.py deleted file mode 100644 index f3484bdc95..0000000000 --- a/generative_ai/context_caching/get_context_cache.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def get_context_cache(cache_id: str) -> str: - # [START generativeaionvertexai_gemini_get_context_cache] - import vertexai - - from vertexai.preview import caching - - # TODO(developer): Update and un-comment below lines - # PROJECT_ID = "your-project-id" - # cache_id = "your-cache-id" - - vertexai.init(project=PROJECT_ID, location="us-central1") - - cached_content = caching.CachedContent(cached_content_name=cache_id) - - print(cached_content.resource_name) - # Example response: - # projects/[PROJECT_ID]/locations/us-central1/cachedContents/1234567890 - # [END generativeaionvertexai_gemini_get_context_cache] - return cached_content.resource_name - - -if __name__ == "__main__": - get_context_cache("1234567890") diff --git a/generative_ai/context_caching/list_context_caches.py b/generative_ai/context_caching/list_context_caches.py deleted file mode 100644 index 8a483bad4b..0000000000 --- a/generative_ai/context_caching/list_context_caches.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import annotations - -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def list_context_caches() -> list[str]: - # [START generativeaionvertexai_context_caching_list] - import vertexai - - from vertexai.preview import caching - - # TODO(developer): Update & uncomment line below - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - cache_list = caching.CachedContent.list() - # Access individual properties of a CachedContent object - for cached_content in cache_list: - print(f"Cache '{cached_content.name}' for model '{cached_content.model_name}'") - print(f"Last updated at: {cached_content.update_time}") - print(f"Expires at: {cached_content.expire_time}") - # Example response: - # Cached content 'example-cache' for model '.../gemini-1.5-pro-001' - # Last updated at: 2024-09-16T12:41:09.998635Z - # Expires at: 2024-09-16T13:41:09.989729Z - # [END generativeaionvertexai_context_caching_list] - return [cached_content.name for cached_content in cache_list] - - -if __name__ == "__main__": - list_context_caches() diff --git a/generative_ai/context_caching/noxfile_config.py b/generative_ai/context_caching/noxfile_config.py deleted file mode 100644 index 962ba40a92..0000000000 --- a/generative_ai/context_caching/noxfile_config.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default TEST_CONFIG_OVERRIDE for python repos. - -# You can copy this file into your directory, then it will be imported from -# the noxfile.py. - -# The source of truth: -# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py - -TEST_CONFIG_OVERRIDE = { - # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.13"], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": True, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} diff --git a/generative_ai/context_caching/requirements-test.txt b/generative_ai/context_caching/requirements-test.txt deleted file mode 100644 index 92281986e5..0000000000 --- a/generative_ai/context_caching/requirements-test.txt +++ /dev/null @@ -1,4 +0,0 @@ -backoff==2.2.1 -google-api-core==2.19.0 -pytest==8.2.0 -pytest-asyncio==0.23.6 diff --git a/generative_ai/context_caching/requirements.txt b/generative_ai/context_caching/requirements.txt deleted file mode 100644 index 09178aa830..0000000000 --- a/generative_ai/context_caching/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -pandas==2.2.3; python_version == '3.7' -pandas==2.2.3; python_version == '3.8' -pandas==2.2.3; python_version > '3.8' -pillow==10.3.0; python_version < '3.8' -pillow==10.3.0; python_version >= '3.8' -google-cloud-aiplatform[all]==1.69.0 -sentencepiece==0.2.0 -google-auth==2.38.0 -anthropic[vertex]==0.28.0 -langchain-core==0.2.33 -langchain-google-vertexai==1.0.10 -numpy<3 -openai==1.68.2 -immutabledict==4.2.0 diff --git a/generative_ai/context_caching/test_context_caching.py b/generative_ai/context_caching/test_context_caching.py deleted file mode 100644 index 99f5734d1d..0000000000 --- a/generative_ai/context_caching/test_context_caching.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -from typing import Generator - -import pytest - -import create_context_cache -import delete_context_cache -import get_context_cache -import list_context_caches -import update_context_cache -import use_context_cache - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") -REGION = "us-central1" - - -@pytest.fixture(scope="module") -def cache_id() -> Generator[str, None, None]: - cached_content_name = create_context_cache.create_context_cache() - yield cached_content_name - delete_context_cache.delete_context_cache(cached_content_name) - - -def test_create_context_cache(cache_id: str) -> None: - assert cache_id - - -def test_use_context_cache(cache_id: str) -> None: - response = use_context_cache.use_context_cache(cache_id) - assert response - - -def test_get_context_cache(cache_id: str) -> None: - response = get_context_cache.get_context_cache(cache_id) - assert response - - -def test_get_list_of_context_caches(cache_id: str) -> None: - response = list_context_caches.list_context_caches() - assert cache_id in response - - -def test_update_context_cache(cache_id: str) -> None: - response = update_context_cache.update_context_cache(cache_id) - assert response diff --git a/generative_ai/context_caching/update_context_cache.py b/generative_ai/context_caching/update_context_cache.py deleted file mode 100644 index 15527418b6..0000000000 --- a/generative_ai/context_caching/update_context_cache.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def update_context_cache(cache_id: str) -> str: - # [START generativeaionvertexai_gemini_update_context_cache] - import vertexai - from datetime import datetime as dt - from datetime import timezone as tz - from datetime import timedelta - - from vertexai.preview import caching - - # TODO(developer): Update and un-comment below lines - # PROJECT_ID = "your-project-id" - # cache_id = "your-cache-id" - - vertexai.init(project=PROJECT_ID, location="us-central1") - - cached_content = caching.CachedContent(cached_content_name=cache_id) - - # Option1: Update the context cache using TTL (Time to live) - cached_content.update(ttl=timedelta(hours=3)) - cached_content.refresh() - - # Option2: Update the context cache using specific time - next_week_utc = dt.now(tz.utc) + timedelta(days=7) - cached_content.update(expire_time=next_week_utc) - cached_content.refresh() - - print(cached_content.expire_time) - # Example response: - # 2024-09-11 17:16:45.864520+00:00 - # [END generativeaionvertexai_gemini_update_context_cache] - return cached_content.expire_time - - -if __name__ == "__main__": - update_context_cache("1234567890") diff --git a/generative_ai/context_caching/use_context_cache.py b/generative_ai/context_caching/use_context_cache.py deleted file mode 100644 index 1c904518b3..0000000000 --- a/generative_ai/context_caching/use_context_cache.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def use_context_cache(cache_id: str) -> str: - # [START generativeaionvertexai_gemini_use_context_cache] - import vertexai - - from vertexai.preview.generative_models import GenerativeModel - from vertexai.preview import caching - - # TODO(developer): Update and un-comment below lines - # PROJECT_ID = "your-project-id" - # cache_id = "your-cache-id" - - vertexai.init(project=PROJECT_ID, location="us-central1") - - cached_content = caching.CachedContent(cached_content_name=cache_id) - - model = GenerativeModel.from_cached_content(cached_content=cached_content) - - response = model.generate_content("What are the papers about?") - - print(response.text) - # Example response: - # The provided text is about a new family of multimodal models called Gemini, developed by Google. - # ... - # [END generativeaionvertexai_gemini_use_context_cache] - - return response.text - - -if __name__ == "__main__": - use_context_cache("1234567890") diff --git a/generative_ai/controlled_generation/controlled_generation_test.py b/generative_ai/controlled_generation/controlled_generation_test.py deleted file mode 100644 index 2b566cf0d3..0000000000 --- a/generative_ai/controlled_generation/controlled_generation_test.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import example_01 -import example_02 -import example_03 -import example_04 -import example_05 -import example_06 -import example_07 - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def test_config_response_mime_type() -> None: - response = example_05.generate_content() - assert response - - -def test_config_response_schema() -> None: - response = example_01.generate_content() - assert response - - -def test_config_response_schema2() -> None: - response = example_02.generate_content() - assert response - - -def test_config_response_schema3() -> None: - response = example_03.generate_content() - assert response - - -def test_config_response_schema4() -> None: - response = example_04.generate_content() - assert response - - -def test_config_response_schema6() -> None: - response = example_06.generate_content() - assert response - - -def test_config_response_schema7() -> None: - response = example_07.generate_content() - assert response diff --git a/generative_ai/controlled_generation/example_01.py b/generative_ai/controlled_generation/example_01.py deleted file mode 100644 index b6d06a4e87..0000000000 --- a/generative_ai/controlled_generation/example_01.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_content() -> str: - # [START generativeaionvertexai_gemini_controlled_generation_response_schema] - import vertexai - - from vertexai.generative_models import GenerationConfig, GenerativeModel - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - response_schema = { - "type": "array", - "items": { - "type": "object", - "properties": { - "recipe_name": { - "type": "string", - }, - }, - "required": ["recipe_name"], - }, - } - - model = GenerativeModel("gemini-1.5-pro-002") - - response = model.generate_content( - "List a few popular cookie recipes", - generation_config=GenerationConfig( - response_mime_type="application/json", response_schema=response_schema - ), - ) - - print(response.text) - # Example response: - # [ - # {"recipe_name": "Chocolate Chip Cookies"}, - # {"recipe_name": "Peanut Butter Cookies"}, - # {"recipe_name": "Snickerdoodles"}, - # {"recipe_name": "Oatmeal Raisin Cookies"}, - # ] - - # [END generativeaionvertexai_gemini_controlled_generation_response_schema] - return response.text - - -if __name__ == "__main__": - generate_content() diff --git a/generative_ai/controlled_generation/example_02.py b/generative_ai/controlled_generation/example_02.py deleted file mode 100644 index fbea29bdbe..0000000000 --- a/generative_ai/controlled_generation/example_02.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_content() -> str: - # [START generativeaionvertexai_gemini_controlled_generation_response_schema_2] - import vertexai - - from vertexai.generative_models import GenerationConfig, GenerativeModel - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - response_schema = { - "type": "ARRAY", - "items": { - "type": "ARRAY", - "items": { - "type": "OBJECT", - "properties": { - "rating": {"type": "INTEGER"}, - "flavor": {"type": "STRING"}, - }, - }, - }, - } - - prompt = """ - Reviews from our social media: - - "Absolutely loved it! Best ice cream I've ever had." Rating: 4, Flavor: Strawberry Cheesecake - - "Quite good, but a bit too sweet for my taste." Rating: 1, Flavor: Mango Tango - """ - - model = GenerativeModel("gemini-1.5-pro-002") - - response = model.generate_content( - prompt, - generation_config=GenerationConfig( - response_mime_type="application/json", response_schema=response_schema - ), - ) - - print(response.text) - # Example response: - # [ - # [ - # {"flavor": "Strawberry Cheesecake", "rating": 4}, - # {"flavor": "Mango Tango", "rating": 1}, - # ] - # ] - - # [END generativeaionvertexai_gemini_controlled_generation_response_schema_2] - return response.text - - -if __name__ == "__main__": - generate_content() diff --git a/generative_ai/controlled_generation/example_03.py b/generative_ai/controlled_generation/example_03.py deleted file mode 100644 index 31fb65953c..0000000000 --- a/generative_ai/controlled_generation/example_03.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_content() -> str: - # [START generativeaionvertexai_gemini_controlled_generation_response_schema_3] - import vertexai - - from vertexai.generative_models import GenerationConfig, GenerativeModel - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - - vertexai.init(project=PROJECT_ID, location="us-central1") - - response_schema = { - "type": "OBJECT", - "properties": { - "forecast": { - "type": "ARRAY", - "items": { - "type": "OBJECT", - "properties": { - "Day": {"type": "STRING", "nullable": True}, - "Forecast": {"type": "STRING", "nullable": True}, - "Temperature": {"type": "INTEGER", "nullable": True}, - "Humidity": {"type": "STRING", "nullable": True}, - "Wind Speed": {"type": "INTEGER", "nullable": True}, - }, - "required": ["Day", "Temperature", "Forecast", "Wind Speed"], - }, - } - }, - } - - prompt = """ - The week ahead brings a mix of weather conditions. - Sunday is expected to be sunny with a temperature of 77°F and a humidity level of 50%. Winds will be light at around 10 km/h. - Monday will see partly cloudy skies with a slightly cooler temperature of 72°F and the winds will pick up slightly to around 15 km/h. - Tuesday brings rain showers, with temperatures dropping to 64°F and humidity rising to 70%. - Wednesday may see thunderstorms, with a temperature of 68°F. - Thursday will be cloudy with a temperature of 66°F and moderate humidity at 60%. - Friday returns to partly cloudy conditions, with a temperature of 73°F and the Winds will be light at 12 km/h. - Finally, Saturday rounds off the week with sunny skies, a temperature of 80°F, and a humidity level of 40%. Winds will be gentle at 8 km/h. - """ - - model = GenerativeModel("gemini-1.5-pro-002") - - response = model.generate_content( - prompt, - generation_config=GenerationConfig( - response_mime_type="application/json", response_schema=response_schema - ), - ) - - print(response.text) - # Example response: - # {"forecast": [{"Day": "Sunday", "Forecast": "Sunny", "Temperature": 77, "Humidity": "50%", "Wind Speed": 10}, - # {"Day": "Monday", "Forecast": "Partly Cloudy", "Temperature": 72, "Wind Speed": 15}, - # {"Day": "Tuesday", "Forecast": "Rain Showers", "Temperature": 64, "Humidity": "70%"}, - # {"Day": "Wednesday", "Forecast": "Thunderstorms", "Temperature": 68}, - # {"Day": "Thursday", "Forecast": "Cloudy", "Temperature": 66, "Humidity": "60%"}, - # {"Day": "Friday", "Forecast": "Partly Cloudy", "Temperature": 73, "Wind Speed": 12}, - # {"Day": "Saturday", "Forecast": "Sunny", "Temperature": 80, "Humidity": "40%", "Wind Speed": 8}]} - - # [END generativeaionvertexai_gemini_controlled_generation_response_schema_3] - return response.text - - -if __name__ == "__main__": - generate_content() diff --git a/generative_ai/controlled_generation/example_04.py b/generative_ai/controlled_generation/example_04.py deleted file mode 100644 index f45fc948ef..0000000000 --- a/generative_ai/controlled_generation/example_04.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_content() -> str: - # [START generativeaionvertexai_gemini_controlled_generation_response_schema_4] - import vertexai - - from vertexai.generative_models import GenerationConfig, GenerativeModel - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - response_schema = { - "type": "ARRAY", - "items": { - "type": "OBJECT", - "properties": { - "to_discard": {"type": "INTEGER"}, - "subcategory": {"type": "STRING"}, - "safe_handling": {"type": "INTEGER"}, - "item_category": { - "type": "STRING", - "enum": [ - "clothing", - "winter apparel", - "specialized apparel", - "furniture", - "decor", - "tableware", - "cookware", - "toys", - ], - }, - "for_resale": {"type": "INTEGER"}, - "condition": { - "type": "STRING", - "enum": [ - "new in package", - "like new", - "gently used", - "used", - "damaged", - "soiled", - ], - }, - }, - }, - } - - prompt = """ - Item description: - The item is a long winter coat that has many tears all around the seams and is falling apart. - It has large questionable stains on it. - """ - - model = GenerativeModel("gemini-1.5-pro-002") - - response = model.generate_content( - prompt, - generation_config=GenerationConfig( - response_mime_type="application/json", response_schema=response_schema - ), - ) - - print(response.text) - # Example response: - # [ - # { - # "condition": "damaged", - # "item_category": "clothing", - # "subcategory": "winter apparel", - # "to_discard": 123, - # } - # ] - - # [END generativeaionvertexai_gemini_controlled_generation_response_schema_4] - return response.text - - -if __name__ == "__main__": - generate_content() diff --git a/generative_ai/controlled_generation/example_05.py b/generative_ai/controlled_generation/example_05.py deleted file mode 100644 index 6d4f75e8b1..0000000000 --- a/generative_ai/controlled_generation/example_05.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_content() -> str: - # [START generativeaionvertexai_gemini_controlled_generation_response_mime_type] - import vertexai - - from vertexai.generative_models import GenerationConfig, GenerativeModel - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-1.5-flash-002") - - prompt = """ - List a few popular cookie recipes using this JSON schema: - Recipe = {"recipe_name": str} - Return: `list[Recipe]` - """ - - response = model.generate_content( - prompt, - generation_config=GenerationConfig(response_mime_type="application/json"), - ) - - print(response.text) - # Example response: - # [ - # {"recipe_name": "Chocolate Chip Cookies"}, - # {"recipe_name": "Oatmeal Raisin Cookies"}, - # {"recipe_name": "Snickerdoodles"}, - # {"recipe_name": "Peanut Butter Cookies"}, - # {"recipe_name": "Sugar Cookies"}, - # ] - - # [END generativeaionvertexai_gemini_controlled_generation_response_mime_type] - return response.text - - -if __name__ == "__main__": - generate_content() diff --git a/generative_ai/controlled_generation/example_06.py b/generative_ai/controlled_generation/example_06.py deleted file mode 100644 index 1441e82058..0000000000 --- a/generative_ai/controlled_generation/example_06.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_content() -> str: - # [START generativeaionvertexai_gemini_controlled_generation_response_schema_6] - import vertexai - - from vertexai.generative_models import GenerationConfig, GenerativeModel, Part - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - response_schema = { - "type": "ARRAY", - "items": { - "type": "ARRAY", - "items": { - "type": "OBJECT", - "properties": { - "object": {"type": "STRING"}, - }, - }, - }, - } - - model = GenerativeModel("gemini-1.5-pro-002") - - response = model.generate_content( - [ - # Text prompt - "Generate a list of objects in the images.", - # Http Image - Part.from_uri( - "https://storage.googleapis.com/cloud-samples-data/generative-ai/image/office-desk.jpeg", - "image/jpeg", - ), - # Cloud storage object - Part.from_uri( - "gs://cloud-samples-data/generative-ai/image/gardening-tools.jpeg", - "image/jpeg", - ), - ], - generation_config=GenerationConfig( - response_mime_type="application/json", response_schema=response_schema - ), - ) - - print(response.text) - # Example response: - # [ - # [ - # {"object": "globe"}, {"object": "tablet"}, {"object": "toy car"}, - # {"object": "airplane"}, {"object": "keyboard"}, {"object": "mouse"}, - # {"object": "passport"}, {"object": "sunglasses"}, {"object": "money"}, - # {"object": "notebook"}, {"object": "pen"}, {"object": "coffee cup"}, - # ], - # [ - # {"object": "watering can"}, {"object": "plant"}, {"object": "flower pot"}, - # {"object": "gloves"}, {"object": "garden tool"}, - # ], - # ] - - # [END generativeaionvertexai_gemini_controlled_generation_response_schema_6] - return response.text - - -if __name__ == "__main__": - generate_content() diff --git a/generative_ai/controlled_generation/example_07.py b/generative_ai/controlled_generation/example_07.py deleted file mode 100644 index 3e8d2197ea..0000000000 --- a/generative_ai/controlled_generation/example_07.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_content() -> str: - # [START generativeaionvertexai_gemini_controlled_generation_response_schema_7] - import vertexai - - from vertexai.generative_models import GenerationConfig, GenerativeModel - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-1.5-pro") - - response_schema = {"type": "STRING", "enum": ["drama", "comedy", "documentary"]} - - prompt = ( - "The film aims to educate and inform viewers about real-life subjects, events, or people." - "It offers a factual record of a particular topic by combining interviews, historical footage, " - "and narration. The primary purpose of a film is to present information and provide insights " - "into various aspects of reality." - ) - - response = model.generate_content( - prompt, - generation_config=GenerationConfig( - response_mime_type="text/x.enum", response_schema=response_schema - ), - ) - - print(response.text) - # Example response: - # 'documentary' - - # [END generativeaionvertexai_gemini_controlled_generation_response_schema_7] - return response.text - - -if __name__ == "__main__": - generate_content() diff --git a/generative_ai/controlled_generation/noxfile_config.py b/generative_ai/controlled_generation/noxfile_config.py deleted file mode 100644 index 962ba40a92..0000000000 --- a/generative_ai/controlled_generation/noxfile_config.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default TEST_CONFIG_OVERRIDE for python repos. - -# You can copy this file into your directory, then it will be imported from -# the noxfile.py. - -# The source of truth: -# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py - -TEST_CONFIG_OVERRIDE = { - # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.13"], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": True, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} diff --git a/generative_ai/controlled_generation/requirements-test.txt b/generative_ai/controlled_generation/requirements-test.txt deleted file mode 100644 index 92281986e5..0000000000 --- a/generative_ai/controlled_generation/requirements-test.txt +++ /dev/null @@ -1,4 +0,0 @@ -backoff==2.2.1 -google-api-core==2.19.0 -pytest==8.2.0 -pytest-asyncio==0.23.6 diff --git a/generative_ai/controlled_generation/requirements.txt b/generative_ai/controlled_generation/requirements.txt deleted file mode 100644 index 09178aa830..0000000000 --- a/generative_ai/controlled_generation/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -pandas==2.2.3; python_version == '3.7' -pandas==2.2.3; python_version == '3.8' -pandas==2.2.3; python_version > '3.8' -pillow==10.3.0; python_version < '3.8' -pillow==10.3.0; python_version >= '3.8' -google-cloud-aiplatform[all]==1.69.0 -sentencepiece==0.2.0 -google-auth==2.38.0 -anthropic[vertex]==0.28.0 -langchain-core==0.2.33 -langchain-google-vertexai==1.0.10 -numpy<3 -openai==1.68.2 -immutabledict==4.2.0 diff --git a/generative_ai/function_calling/advanced_example.py b/generative_ai/function_calling/advanced_example.py deleted file mode 100644 index a83c2fea94..0000000000 --- a/generative_ai/function_calling/advanced_example.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -from vertexai.generative_models import GenerationResponse - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_function_call_advanced() -> GenerationResponse: - # [START generativeaionvertexai_gemini_function_calling_advanced] - import vertexai - - from vertexai.preview.generative_models import ( - FunctionDeclaration, - GenerativeModel, - Tool, - ToolConfig, - ) - - # TODO(developer): Update & uncomment below line - # PROJECT_ID = "your-project-id" - - # Initialize Vertex AI - vertexai.init(project=PROJECT_ID, location="us-central1") - - # Specify a function declaration and parameters for an API request - get_product_sku_func = FunctionDeclaration( - name="get_product_sku", - description="Get the available inventory for a Google products, e.g: Pixel phones, Pixel Watches, Google Home etc", - # Function parameters are specified in JSON schema format - parameters={ - "type": "object", - "properties": { - "product_name": {"type": "string", "description": "Product name"} - }, - }, - ) - - # Specify another function declaration and parameters for an API request - get_store_location_func = FunctionDeclaration( - name="get_store_location", - description="Get the location of the closest store", - # Function parameters are specified in JSON schema format - parameters={ - "type": "object", - "properties": {"location": {"type": "string", "description": "Location"}}, - }, - ) - - # Define a tool that includes the above functions - retail_tool = Tool( - function_declarations=[ - get_product_sku_func, - get_store_location_func, - ], - ) - - # Define a tool config for the above functions - retail_tool_config = ToolConfig( - function_calling_config=ToolConfig.FunctionCallingConfig( - # ANY mode forces the model to predict a function call - mode=ToolConfig.FunctionCallingConfig.Mode.ANY, - # List of functions that can be returned when the mode is ANY. - # If the list is empty, any declared function can be returned. - allowed_function_names=["get_product_sku"], - ) - ) - - model = GenerativeModel( - model_name="gemini-1.5-flash-002", - tools=[retail_tool], - tool_config=retail_tool_config, - ) - response = model.generate_content( - "Do you have the Pixel 8 Pro 128GB in stock?", - ) - - print(response.candidates[0].function_calls) - # Example response: - # [ - # name: "get_product_sku" - # args { - # fields { key: "product_name" value { string_value: "Pixel 8 Pro 128GB" }} - # } - # ] - - # [END generativeaionvertexai_gemini_function_calling_advanced] - return response - - -if __name__ == "__main__": - generate_function_call_advanced() diff --git a/generative_ai/function_calling/basic_example.py b/generative_ai/function_calling/basic_example.py deleted file mode 100644 index ce108337bb..0000000000 --- a/generative_ai/function_calling/basic_example.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -from vertexai.generative_models import GenerationResponse - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_function_call() -> GenerationResponse: - # [START generativeaionvertexai_gemini_function_calling] - import vertexai - - from vertexai.generative_models import ( - Content, - FunctionDeclaration, - GenerationConfig, - GenerativeModel, - Part, - Tool, - ) - - # TODO(developer): Update & uncomment below line - # PROJECT_ID = "your-project-id" - - # Initialize Vertex AI - vertexai.init(project=PROJECT_ID, location="us-central1") - - # Initialize Gemini model - model = GenerativeModel("gemini-1.5-flash-002") - - # Define the user's prompt in a Content object that we can reuse in model calls - user_prompt_content = Content( - role="user", - parts=[ - Part.from_text("What is the weather like in Boston?"), - ], - ) - - # Specify a function declaration and parameters for an API request - function_name = "get_current_weather" - get_current_weather_func = FunctionDeclaration( - name=function_name, - description="Get the current weather in a given location", - # Function parameters are specified in JSON schema format - parameters={ - "type": "object", - "properties": {"location": {"type": "string", "description": "Location"}}, - }, - ) - - # Define a tool that includes the above get_current_weather_func - weather_tool = Tool( - function_declarations=[get_current_weather_func], - ) - - # Send the prompt and instruct the model to generate content using the Tool that you just created - response = model.generate_content( - user_prompt_content, - generation_config=GenerationConfig(temperature=0), - tools=[weather_tool], - ) - function_call = response.candidates[0].function_calls[0] - print(function_call) - - # Check the function name that the model responded with, and make an API call to an external system - if function_call.name == function_name: - # Extract the arguments to use in your API call - location = function_call.args["location"] # noqa: F841 - - # Here you can use your preferred method to make an API request to fetch the current weather, for example: - # api_response = requests.post(weather_api_url, data={"location": location}) - - # In this example, we'll use synthetic data to simulate a response payload from an external API - api_response = """{ "location": "Boston, MA", "temperature": 38, "description": "Partly Cloudy", - "icon": "partly-cloudy", "humidity": 65, "wind": { "speed": 10, "direction": "NW" } }""" - - # Return the API response to Gemini so it can generate a model response or request another function call - response = model.generate_content( - [ - user_prompt_content, # User prompt - response.candidates[0].content, # Function call response - Content( - parts=[ - Part.from_function_response( - name=function_name, - response={ - "content": api_response, # Return the API response to Gemini - }, - ), - ], - ), - ], - tools=[weather_tool], - ) - - # Get the model response - print(response.text) - # Example response: - # The weather in Boston is partly cloudy with a temperature of 38 degrees Fahrenheit. - # The humidity is 65% and the wind is blowing from the northwest at 10 mph. - - # [END generativeaionvertexai_gemini_function_calling] - return response - - -if __name__ == "__main__": - generate_function_call() diff --git a/generative_ai/function_calling/chat_example.py b/generative_ai/function_calling/chat_example.py deleted file mode 100644 index 31bf009359..0000000000 --- a/generative_ai/function_calling/chat_example.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -from vertexai.generative_models import ChatSession - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_function_call_chat() -> ChatSession: - # [START generativeaionvertexai_gemini_function_calling_chat] - import vertexai - - from vertexai.generative_models import ( - FunctionDeclaration, - GenerationConfig, - GenerativeModel, - Part, - Tool, - ) - - # TODO(developer): Update & uncomment below line - # PROJECT_ID = "your-project-id" - - # Initialize Vertex AI - vertexai.init(project=PROJECT_ID, location="us-central1") - - # Specify a function declaration and parameters for an API request - get_product_sku = "get_product_sku" - get_product_sku_func = FunctionDeclaration( - name=get_product_sku, - description="Get the SKU for a product", - # Function parameters are specified in OpenAPI JSON schema format - parameters={ - "type": "object", - "properties": { - "product_name": {"type": "string", "description": "Product name"} - }, - }, - ) - - # Specify another function declaration and parameters for an API request - get_store_location_func = FunctionDeclaration( - name="get_store_location", - description="Get the location of the closest store", - # Function parameters are specified in JSON schema format - parameters={ - "type": "object", - "properties": {"location": {"type": "string", "description": "Location"}}, - }, - ) - - # Define a tool that includes the above functions - retail_tool = Tool( - function_declarations=[ - get_product_sku_func, - get_store_location_func, - ], - ) - - # Initialize Gemini model - model = GenerativeModel( - model_name="gemini-1.5-flash-001", - generation_config=GenerationConfig(temperature=0), - tools=[retail_tool], - ) - - # Start a chat session - chat = model.start_chat() - - # Send a prompt for the first conversation turn that should invoke the get_product_sku function - response = chat.send_message("Do you have the Pixel 8 Pro in stock?") - - function_call = response.candidates[0].function_calls[0] - print(function_call) - - # Check the function name that the model responded with, and make an API call to an external system - if function_call.name == get_product_sku: - # Extract the arguments to use in your API call - product_name = function_call.args["product_name"] # noqa: F841 - - # Here you can use your preferred method to make an API request to retrieve the product SKU, as in: - # api_response = requests.post(product_api_url, data={"product_name": product_name}) - - # In this example, we'll use synthetic data to simulate a response payload from an external API - api_response = {"sku": "GA04834-US", "in_stock": "Yes"} - - # Return the API response to Gemini, so it can generate a model response or request another function call - response = chat.send_message( - Part.from_function_response( - name=get_product_sku, - response={ - "content": api_response, - }, - ), - ) - # Extract the text from the model response - print(response.text) - - # Send a prompt for the second conversation turn that should invoke the get_store_location function - response = chat.send_message( - "Is there a store in Mountain View, CA that I can visit to try it out?" - ) - - function_call = response.candidates[0].function_calls[0] - print(function_call) - - # Check the function name that the model responded with, and make an API call to an external system - if function_call.name == "get_store_location": - # Extract the arguments to use in your API call - location = function_call.args["location"] # noqa: F841 - - # Here you can use your preferred method to make an API request to retrieve store location closest to the user, as in: - # api_response = requests.post(store_api_url, data={"location": location}) - - # In this example, we'll use synthetic data to simulate a response payload from an external API - api_response = {"store": "2000 N Shoreline Blvd, Mountain View, CA 94043, US"} - - # Return the API response to Gemini, so it can generate a model response or request another function call - response = chat.send_message( - Part.from_function_response( - name="get_store_location", - response={ - "content": api_response, - }, - ), - ) - - # Extract the text from the model response - print(response.text) - # Example response: - # name: "get_product_sku" - # args { - # fields { key: "product_name" value {string_value: "Pixel 8 Pro" } - # } - # } - # Yes, we have the Pixel 8 Pro in stock. - # name: "get_store_location" - # args { - # fields { key: "location" value { string_value: "Mountain View, CA" } - # } - # } - # Yes, there is a store located at 2000 N Shoreline Blvd, Mountain View, CA 94043, US. - - # [END generativeaionvertexai_gemini_function_calling_chat] - - return chat - - -if __name__ == "__main__": - generate_function_call_chat() diff --git a/generative_ai/function_calling/chat_function_calling_basic.py b/generative_ai/function_calling/chat_function_calling_basic.py deleted file mode 100644 index b0e8445755..0000000000 --- a/generative_ai/function_calling/chat_function_calling_basic.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_text() -> object: - # [START generativeaionvertexai_gemini_chat_completions_function_calling_basic] - import vertexai - import openai - - from google.auth import default, transport - - # TODO(developer): Update & uncomment below line - # PROJECT_ID = "your-project-id" - location = "us-central1" - - vertexai.init(project=PROJECT_ID, location=location) - - # Programmatically get an access token - credentials, _ = default(scopes=["https://www.googleapis.com/auth/cloud-platform"]) - auth_request = transport.requests.Request() - credentials.refresh(auth_request) - - # # OpenAI Client - client = openai.OpenAI( - base_url=f"https://{location}-aiplatform.googleapis.com/v1beta1/projects/{PROJECT_ID}/locations/{location}/endpoints/openapi", - api_key=credentials.token, - ) - - tools = [ - { - "type": "function", - "function": { - "name": "get_current_weather", - "description": "Get the current weather in a given location", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, e.g. San Francisco, CA or a zip code e.g. 95616", - }, - }, - "required": ["location"], - }, - }, - } - ] - - messages = [] - messages.append( - { - "role": "system", - "content": "Don't make assumptions about what values to plug into functions. Ask for clarification if a user request is ambiguous.", - } - ) - messages.append({"role": "user", "content": "What is the weather in Boston?"}) - - response = client.chat.completions.create( - model="google/gemini-1.5-flash-001", - messages=messages, - tools=tools, - ) - - print("Function:", response.choices[0].message.tool_calls[0].id) - print("Arguments:", response.choices[0].message.tool_calls[0].function.arguments) - # Example response: - # Function: get_current_weather - # Arguments: {"location":"Boston"} - - # [END generativeaionvertexai_gemini_chat_completions_function_calling_basic] - return response - - -if __name__ == "__main__": - generate_text() diff --git a/generative_ai/function_calling/chat_function_calling_config.py b/generative_ai/function_calling/chat_function_calling_config.py deleted file mode 100644 index d80e37f0ce..0000000000 --- a/generative_ai/function_calling/chat_function_calling_config.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_text() -> object: - # [START generativeaionvertexai_gemini_chat_completions_function_calling_config] - import vertexai - import openai - - from google.auth import default, transport - - # TODO(developer): Update & uncomment below line - # PROJECT_ID = "your-project-id" - location = "us-central1" - - vertexai.init(project=PROJECT_ID, location=location) - - # Programmatically get an access token - credentials, _ = default(scopes=["https://www.googleapis.com/auth/cloud-platform"]) - auth_request = transport.requests.Request() - credentials.refresh(auth_request) - - # OpenAI Client - client = openai.OpenAI( - base_url=f"https://{location}-aiplatform.googleapis.com/v1beta1/projects/{PROJECT_ID}/locations/{location}/endpoints/openapi", - api_key=credentials.token, - ) - - tools = [ - { - "type": "function", - "function": { - "name": "get_current_weather", - "description": "Get the current weather in a given location", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, e.g. San Francisco, CA or a zip code e.g. 95616", - }, - }, - "required": ["location"], - }, - }, - } - ] - - messages = [] - messages.append( - { - "role": "system", - "content": "Don't make assumptions about what values to plug into functions. Ask for clarification if a user request is ambiguous.", - } - ) - messages.append({"role": "user", "content": "What is the weather in Boston, MA?"}) - - response = client.chat.completions.create( - model="google/gemini-1.5-flash-002", - messages=messages, - tools=tools, - tool_choice="auto", - ) - - print("Function:", response.choices[0].message.tool_calls[0].id) - print("Arguments:", response.choices[0].message.tool_calls[0].function.arguments) - # Example response: - # Function: get_current_weather - # Arguments: {"location":"Boston"} - # [END generativeaionvertexai_gemini_chat_completions_function_calling_config] - - return response - - -if __name__ == "__main__": - generate_text() diff --git a/generative_ai/function_calling/noxfile_config.py b/generative_ai/function_calling/noxfile_config.py deleted file mode 100644 index 962ba40a92..0000000000 --- a/generative_ai/function_calling/noxfile_config.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default TEST_CONFIG_OVERRIDE for python repos. - -# You can copy this file into your directory, then it will be imported from -# the noxfile.py. - -# The source of truth: -# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py - -TEST_CONFIG_OVERRIDE = { - # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.13"], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": True, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} diff --git a/generative_ai/function_calling/parallel_function_calling_example.py b/generative_ai/function_calling/parallel_function_calling_example.py deleted file mode 100644 index e6e2bd89d0..0000000000 --- a/generative_ai/function_calling/parallel_function_calling_example.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -from vertexai.generative_models import ChatSession - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def parallel_function_calling_example() -> ChatSession: - # [START generativeaionvertexai_function_calling_generate_parallel_calls] - import vertexai - - from vertexai.generative_models import ( - FunctionDeclaration, - GenerativeModel, - Part, - Tool, - ) - - # TODO(developer): Update & uncomment below line - # PROJECT_ID = "your-project-id" - - # Initialize Vertex AI - vertexai.init(project=PROJECT_ID, location="us-central1") - - # Specify a function declaration and parameters for an API request - function_name = "get_current_weather" - get_current_weather_func = FunctionDeclaration( - name=function_name, - description="Get the current weather in a given location", - parameters={ - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The location for which to get the weather. \ - It can be a city name, a city name and state, or a zip code. \ - Examples: 'San Francisco', 'San Francisco, CA', '95616', etc.", - }, - }, - }, - ) - - # In this example, we'll use synthetic data to simulate a response payload from an external API - def mock_weather_api_service(location: str) -> str: - temperature = 25 if location == "San Francisco" else 35 - return f"""{{ "location": "{location}", "temperature": {temperature}, "unit": "C" }}""" - - # Define a tool that includes the above function - tools = Tool( - function_declarations=[get_current_weather_func], - ) - - # Initialize Gemini model - model = GenerativeModel( - model_name="gemini-1.5-pro-002", - tools=[tools], - ) - - # Start a chat session - chat_session = model.start_chat() - response = chat_session.send_message( - "Get weather details in New Delhi and San Francisco?" - ) - - function_calls = response.candidates[0].function_calls - print("Suggested finction calls:\n", function_calls) - - if function_calls: - api_responses = [] - for func in function_calls: - if func.name == function_name: - api_responses.append( - { - "content": mock_weather_api_service( - location=func.args["location"] - ) - } - ) - - # Return the API response to Gemini - response = chat_session.send_message( - [ - Part.from_function_response( - name="get_current_weather", - response=api_responses[0], - ), - Part.from_function_response( - name="get_current_weather", - response=api_responses[1], - ), - ], - ) - - print(response.text) - # Example response: - # The current weather in New Delhi is 35°C. The current weather in San Francisco is 25°C. - # [END generativeaionvertexai_function_calling_generate_parallel_calls] - return response - - -if __name__ == "__main__": - parallel_function_calling_example() diff --git a/generative_ai/function_calling/requirements-test.txt b/generative_ai/function_calling/requirements-test.txt deleted file mode 100644 index 92281986e5..0000000000 --- a/generative_ai/function_calling/requirements-test.txt +++ /dev/null @@ -1,4 +0,0 @@ -backoff==2.2.1 -google-api-core==2.19.0 -pytest==8.2.0 -pytest-asyncio==0.23.6 diff --git a/generative_ai/function_calling/requirements.txt b/generative_ai/function_calling/requirements.txt deleted file mode 100644 index 09178aa830..0000000000 --- a/generative_ai/function_calling/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -pandas==2.2.3; python_version == '3.7' -pandas==2.2.3; python_version == '3.8' -pandas==2.2.3; python_version > '3.8' -pillow==10.3.0; python_version < '3.8' -pillow==10.3.0; python_version >= '3.8' -google-cloud-aiplatform[all]==1.69.0 -sentencepiece==0.2.0 -google-auth==2.38.0 -anthropic[vertex]==0.28.0 -langchain-core==0.2.33 -langchain-google-vertexai==1.0.10 -numpy<3 -openai==1.68.2 -immutabledict==4.2.0 diff --git a/generative_ai/function_calling/test_function_calling.py b/generative_ai/function_calling/test_function_calling.py deleted file mode 100644 index 9dc8c22cd5..0000000000 --- a/generative_ai/function_calling/test_function_calling.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import backoff - -from google.api_core.exceptions import ResourceExhausted - -import advanced_example -import basic_example -import chat_example -import chat_function_calling_basic -import chat_function_calling_config -import parallel_function_calling_example - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_function_calling() -> None: - response = basic_example.generate_function_call() - - expected_summary = [ - "Boston", - ] - expected_responses = [ - "candidates", - "content", - "role", - "model", - "parts", - "Boston", - ] - assert all(x in str(response.text) for x in expected_summary) - assert all(x in str(response) for x in expected_responses) - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_function_calling_advanced_function_selection() -> None: - response = advanced_example.generate_function_call_advanced() - assert ( - "Pixel 8 Pro 128GB" - in response.candidates[0].function_calls[0].args["product_name"] - ) - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_function_calling_basic() -> None: - response = chat_function_calling_basic.generate_text() - assert "get_current_weather" in response.choices[0].message.tool_calls[0].id - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_function_calling_config() -> None: - response = chat_function_calling_config.generate_text() - assert "Boston" in response.choices[0].message.tool_calls[0].function.arguments - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_function_calling_chat() -> None: - chat = chat_example.generate_function_call_chat() - - assert chat - assert chat.history - - expected_summaries = [ - "Pixel 8 Pro", - "stock", - "store", - "2000 N Shoreline Blvd", - "Mountain View", - ] - assert any(x in str(chat.history) for x in expected_summaries) - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_parallel_function_calling() -> None: - response = parallel_function_calling_example.parallel_function_calling_example() - assert response is not None diff --git a/generative_ai/grounding/noxfile_config.py b/generative_ai/grounding/noxfile_config.py deleted file mode 100644 index 962ba40a92..0000000000 --- a/generative_ai/grounding/noxfile_config.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default TEST_CONFIG_OVERRIDE for python repos. - -# You can copy this file into your directory, then it will be imported from -# the noxfile.py. - -# The source of truth: -# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py - -TEST_CONFIG_OVERRIDE = { - # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.13"], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": True, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} diff --git a/generative_ai/grounding/palm_example.py b/generative_ai/grounding/palm_example.py deleted file mode 100644 index de9565e615..0000000000 --- a/generative_ai/grounding/palm_example.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -from typing import Optional - -from vertexai.language_models import TextGenerationResponse - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def grounding( - data_store_location: Optional[str] = None, - data_store_id: Optional[str] = None, -) -> TextGenerationResponse: - """Grounding example with a Large Language Model""" - # [START generativeaionvertexai_grounding] - import vertexai - - from vertexai.language_models import GroundingSource, TextGenerationModel - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - # TODO developer - override these parameters as needed: - parameters = { - "temperature": 0.1, # Temperature controls the degree of randomness in token selection. - "max_output_tokens": 256, # Token limit determines the maximum amount of text output. - "top_p": 0.8, # Tokens are selected from most probable to least until the sum of their probabilities equals the top_p value. - "top_k": 20, # A top_k of 1 means the selected token is the most probable among all tokens. - } - - model = TextGenerationModel.from_pretrained("text-bison@002") - - # TODO(developer): Update and un-comment below lines - # data_store_id = "datastore_123456789012345" - # data_store_location = "global" - if data_store_id and data_store_location: - # Use Vertex AI Search data store - grounding_source = GroundingSource.VertexAISearch( - data_store_id=data_store_id, location=data_store_location - ) - else: - # Use Google Search for grounding (Private Preview) - grounding_source = GroundingSource.WebSearch() - - response = model.predict( - "What are the price, available colors, and storage size options of a Pixel Tablet?", - grounding_source=grounding_source, - **parameters, - ) - print(f"Response from Model: {response.text}") - print(f"Grounding Metadata: {response.grounding_metadata}") - # [END generativeaionvertexai_grounding] - - return response - - -if __name__ == "__main__": - grounding(data_store_id="data-store_1234567890123", data_store_location="global") diff --git a/generative_ai/grounding/requirements-test.txt b/generative_ai/grounding/requirements-test.txt deleted file mode 100644 index 92281986e5..0000000000 --- a/generative_ai/grounding/requirements-test.txt +++ /dev/null @@ -1,4 +0,0 @@ -backoff==2.2.1 -google-api-core==2.19.0 -pytest==8.2.0 -pytest-asyncio==0.23.6 diff --git a/generative_ai/grounding/requirements.txt b/generative_ai/grounding/requirements.txt deleted file mode 100644 index 250437ef3e..0000000000 --- a/generative_ai/grounding/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -google-cloud-aiplatform==1.82.0 diff --git a/generative_ai/grounding/test_grounding.py b/generative_ai/grounding/test_grounding.py deleted file mode 100644 index 334d3a38ed..0000000000 --- a/generative_ai/grounding/test_grounding.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import backoff - -from google.api_core.exceptions import ResourceExhausted - -import palm_example -import vais_example -import web_example - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_grounding() -> None: - data_store_id = "test-search-engine_1689960780551" - response = palm_example.grounding( - data_store_location="global", - data_store_id=data_store_id, - ) - assert response - assert response.text - assert response.grounding_metadata - - -def test_gemini_grounding_vais_example() -> None: - response = vais_example.generate_text_with_grounding_vertex_ai_search( - "grounding-test-datastore" - ) - assert response - - -def test_gemini_grounding_web_example() -> None: - response = web_example.generate_text_with_grounding_web() - assert response diff --git a/generative_ai/grounding/vais_example.py b/generative_ai/grounding/vais_example.py deleted file mode 100644 index a08715dd58..0000000000 --- a/generative_ai/grounding/vais_example.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -from vertexai.generative_models import GenerationResponse - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_text_with_grounding_vertex_ai_search( - data_store_id: str, -) -> GenerationResponse: - # [START generativeaionvertexai_gemini_grounding_with_vais] - import vertexai - - from vertexai.preview.generative_models import ( - GenerationConfig, - GenerativeModel, - Tool, - grounding, - ) - - # TODO(developer): Update and un-comment below lines - # PROJECT_ID = "your-project-id" - # data_store_id = "your-data-store-id" - - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-1.5-flash-001") - - tool = Tool.from_retrieval( - grounding.Retrieval( - grounding.VertexAISearch( - datastore=data_store_id, - project=PROJECT_ID, - location="global", - ) - ) - ) - - prompt = "How do I make an appointment to renew my driver's license?" - response = model.generate_content( - prompt, - tools=[tool], - generation_config=GenerationConfig( - temperature=0.0, - ), - ) - - print(response.text) - - # [END generativeaionvertexai_gemini_grounding_with_vais] - return response - - -if __name__ == "__main__": - generate_text_with_grounding_vertex_ai_search("data-store_1234567890123") diff --git a/generative_ai/grounding/web_example.py b/generative_ai/grounding/web_example.py deleted file mode 100644 index 926dd6b3ae..0000000000 --- a/generative_ai/grounding/web_example.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -from vertexai.generative_models import GenerationResponse - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_text_with_grounding_web() -> GenerationResponse: - # [START generativeaionvertexai_gemini_grounding_with_web] - import vertexai - - from vertexai.generative_models import ( - GenerationConfig, - GenerativeModel, - Tool, - grounding, - ) - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-1.5-flash-001") - - # Use Google Search for grounding - tool = Tool.from_google_search_retrieval( - grounding.GoogleSearchRetrieval( - # Optional: For Dynamic Retrieval - dynamic_retrieval_config=grounding.DynamicRetrievalConfig( - mode=grounding.DynamicRetrievalConfig.Mode.MODE_DYNAMIC, - dynamic_threshold=0.7, - ) - ) - ) - - prompt = "When is the next total solar eclipse in US?" - response = model.generate_content( - prompt, - tools=[tool], - generation_config=GenerationConfig( - temperature=0.0, - ), - ) - - print(response) - # Example response: - # The next total solar eclipse visible from the contiguous United States will be on **August 23, 2044**. - - # [END generativeaionvertexai_gemini_grounding_with_web] - return response - - -if __name__ == "__main__": - generate_text_with_grounding_web() diff --git a/generative_ai/token_count/api_example.py b/generative_ai/token_count/api_example.py deleted file mode 100644 index 05f8bf7f55..0000000000 --- a/generative_ai/token_count/api_example.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def count_token_api_example() -> int: - # [START generativeaionvertexai_token_count_sample_with_genai] - import vertexai - from vertexai.generative_models import GenerativeModel - - # TODO(developer): Update project & location - vertexai.init(project=PROJECT_ID, location="us-central1") - - # using Vertex AI Model as tokenzier - model = GenerativeModel("gemini-1.5-flash-002") - - prompt = "hello world" - response = model.count_tokens(prompt) - print(f"Prompt Token Count: {response.total_tokens}") - print(f"Prompt Character Count: {response.total_billable_characters}") - # Example response: - # Prompt Token Count: 2 - # Prompt Token Count: 10 - - prompt = ["hello world", "what's the weather today"] - response = model.count_tokens(prompt) - print(f"Prompt Token Count: {response.total_tokens}") - print(f"Prompt Character Count: {response.total_billable_characters}") - # Example response: - # Prompt Token Count: 8 - # Prompt Token Count: 31 - # [END generativeaionvertexai_token_count_sample_with_genai] - return response.total_tokens - - -if __name__ == "__main__": - count_token_api_example() diff --git a/generative_ai/token_count/list_tokens_example.py b/generative_ai/token_count/list_tokens_example.py deleted file mode 100644 index 26592ff76c..0000000000 --- a/generative_ai/token_count/list_tokens_example.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def list_tokens_example() -> int: - # [START generativeaionvertexai_compute_tokens] - from vertexai.preview.tokenization import get_tokenizer_for_model - - # init local tokenzier - tokenizer = get_tokenizer_for_model("gemini-1.5-flash-001") - - # Count Tokens - prompt = "why is the sky blue?" - response = tokenizer.count_tokens(prompt) - print(f"Tokens count: {response.total_tokens}") - # Example response: - # Tokens count: 6 - - # Compute Tokens - response = tokenizer.compute_tokens(prompt) - print(f"Tokens list: {response.tokens_info}") - # Example response: - # Tokens list: [TokensInfo(token_ids=[18177, 603, 573, 8203, 3868, 235336], - # tokens=[b'why', b' is', b' the', b' sky', b' blue', b'?'], role='user')] - # [END generativeaionvertexai_compute_tokens] - return len(response.tokens_info) - - -if __name__ == "__main__": - list_tokens_example() diff --git a/generative_ai/token_count/local_sdk_example.py b/generative_ai/token_count/local_sdk_example.py deleted file mode 100644 index 2ab4d7ea72..0000000000 --- a/generative_ai/token_count/local_sdk_example.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def local_tokenizer_example() -> int: - # [START generativeaionvertexai_token_count_sample_with_local_sdk] - from vertexai.preview.tokenization import get_tokenizer_for_model - - # Using local tokenzier - tokenizer = get_tokenizer_for_model("gemini-1.5-flash-002") - - prompt = "hello world" - response = tokenizer.count_tokens(prompt) - print(f"Prompt Token Count: {response.total_tokens}") - # Example response: - # Prompt Token Count: 2 - - prompt = ["hello world", "what's the weather today"] - response = tokenizer.count_tokens(prompt) - print(f"Prompt Token Count: {response.total_tokens}") - # Example response: - # Prompt Token Count: 8 - - # [END generativeaionvertexai_token_count_sample_with_local_sdk] - return response.total_tokens - - -if __name__ == "__main__": - local_tokenizer_example() diff --git a/generative_ai/token_count/multimodal_token_count_example.py b/generative_ai/token_count/multimodal_token_count_example.py deleted file mode 100644 index 06e936652a..0000000000 --- a/generative_ai/token_count/multimodal_token_count_example.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -from vertexai.generative_models import GenerationResponse - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def count_tokens_multimodal_example() -> GenerationResponse: - # [START generativeaionvertexai_gemini_token_count_multimodal] - import vertexai - from vertexai.generative_models import GenerativeModel, Part - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-1.5-flash-002") - - contents = [ - Part.from_uri( - "gs://cloud-samples-data/generative-ai/video/pixel8.mp4", - mime_type="video/mp4", - ), - "Provide a description of the video.", - ] - - # tokens count for user prompt - response = model.count_tokens(contents) - print(f"Prompt Token Count: {response.total_tokens}") - print(f"Prompt Character Count: {response.total_billable_characters}") - # Example response: - # Prompt Token Count: 16822 - # Prompt Character Count: 30 - - # Send text to Gemini - response = model.generate_content(contents) - usage_metadata = response.usage_metadata - - # tokens count for model response - print(f"Prompt Token Count: {usage_metadata.prompt_token_count}") - print(f"Candidates Token Count: {usage_metadata.candidates_token_count}") - print(f"Total Token Count: {usage_metadata.total_token_count}") - # Example response: - # Prompt Token Count: 16822 - # Candidates Token Count: 71 - # Total Token Count: 16893 - - # [END generativeaionvertexai_gemini_token_count_multimodal] - return response - - -if __name__ == "__main__": - count_tokens_multimodal_example() diff --git a/generative_ai/token_count/noxfile_config.py b/generative_ai/token_count/noxfile_config.py deleted file mode 100644 index 962ba40a92..0000000000 --- a/generative_ai/token_count/noxfile_config.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default TEST_CONFIG_OVERRIDE for python repos. - -# You can copy this file into your directory, then it will be imported from -# the noxfile.py. - -# The source of truth: -# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py - -TEST_CONFIG_OVERRIDE = { - # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.13"], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": True, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} diff --git a/generative_ai/token_count/requirements-test.txt b/generative_ai/token_count/requirements-test.txt deleted file mode 100644 index 92281986e5..0000000000 --- a/generative_ai/token_count/requirements-test.txt +++ /dev/null @@ -1,4 +0,0 @@ -backoff==2.2.1 -google-api-core==2.19.0 -pytest==8.2.0 -pytest-asyncio==0.23.6 diff --git a/generative_ai/token_count/requirements.txt b/generative_ai/token_count/requirements.txt deleted file mode 100644 index 09178aa830..0000000000 --- a/generative_ai/token_count/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -pandas==2.2.3; python_version == '3.7' -pandas==2.2.3; python_version == '3.8' -pandas==2.2.3; python_version > '3.8' -pillow==10.3.0; python_version < '3.8' -pillow==10.3.0; python_version >= '3.8' -google-cloud-aiplatform[all]==1.69.0 -sentencepiece==0.2.0 -google-auth==2.38.0 -anthropic[vertex]==0.28.0 -langchain-core==0.2.33 -langchain-google-vertexai==1.0.10 -numpy<3 -openai==1.68.2 -immutabledict==4.2.0 diff --git a/generative_ai/token_count/simple_example.py b/generative_ai/token_count/simple_example.py deleted file mode 100644 index cf25aa1ef8..0000000000 --- a/generative_ai/token_count/simple_example.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -from vertexai.generative_models import GenerationResponse - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def count_token_example() -> GenerationResponse: - # [START generativeaionvertexai_gemini_token_count] - import vertexai - from vertexai.generative_models import GenerativeModel - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-1.5-flash-002") - - prompt = "Why is the sky blue?" - # Prompt tokens count - response = model.count_tokens(prompt) - print(f"Prompt Token Count: {response.total_tokens}") - print(f"Prompt Character Count: {response.total_billable_characters}") - - # Send text to Gemini - response = model.generate_content(prompt) - - # Response tokens count - usage_metadata = response.usage_metadata - print(f"Prompt Token Count: {usage_metadata.prompt_token_count}") - print(f"Candidates Token Count: {usage_metadata.candidates_token_count}") - print(f"Total Token Count: {usage_metadata.total_token_count}") - # Example response: - # Prompt Token Count: 6 - # Prompt Character Count: 16 - # Prompt Token Count: 6 - # Candidates Token Count: 315 - # Total Token Count: 321 - - # [END generativeaionvertexai_gemini_token_count] - return response - - -if __name__ == "__main__": - count_token_example() diff --git a/generative_ai/token_count/test_list_tokens_example.py b/generative_ai/token_count/test_list_tokens_example.py deleted file mode 100644 index aae8fb75ba..0000000000 --- a/generative_ai/token_count/test_list_tokens_example.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import list_tokens_example - - -# TODO: move to test_token_count_examples.py -def test_list_tokens_example() -> int: - response = list_tokens_example.list_tokens_example() - assert isinstance(response, int) diff --git a/generative_ai/token_count/test_token_count_examples.py b/generative_ai/token_count/test_token_count_examples.py deleted file mode 100644 index 365f66e478..0000000000 --- a/generative_ai/token_count/test_token_count_examples.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import api_example -import local_sdk_example -import multimodal_token_count_example -import simple_example - - -def test_local_sdk_example() -> None: - assert local_sdk_example.local_tokenizer_example() - assert api_example.count_token_api_example() - - -def test_simple_example() -> None: - response = simple_example.count_token_example() - assert response - assert response.usage_metadata - - -def test_multimodal_example() -> None: - print(dir(multimodal_token_count_example)) - response = multimodal_token_count_example.count_tokens_multimodal_example() - assert response - assert response.usage_metadata From 8db05d24a96dddc202408b6aef08c83a150d71b2 Mon Sep 17 00:00:00 2001 From: Sampath Kumar Date: Mon, 31 Mar 2025 18:27:01 +0200 Subject: [PATCH 03/19] feat(genai): cleanup --- discoveryengine/answer_query_sample.py | 2 -- discoveryengine/session_sample.py | 2 -- 2 files changed, 4 deletions(-) diff --git a/discoveryengine/answer_query_sample.py b/discoveryengine/answer_query_sample.py index ac8fc04a6e..54f655a712 100644 --- a/discoveryengine/answer_query_sample.py +++ b/discoveryengine/answer_query_sample.py @@ -11,9 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# NOTE: This snippet has been partially generated by `gemini-2.0-flash-001` # [START genappbuilder_answer_query] from google.api_core.client_options import ClientOptions diff --git a/discoveryengine/session_sample.py b/discoveryengine/session_sample.py index 574d36ebf7..a4744dfe9d 100644 --- a/discoveryengine/session_sample.py +++ b/discoveryengine/session_sample.py @@ -12,9 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# NOTE: This snippet has been partially generated by `gemini-2.0-flash-001` # [START genappbuilder_create_session] from google.cloud import discoveryengine_v1 as discoveryengine From 4e219bf40c229c8e63edc8d159dcd9dbe184f09a Mon Sep 17 00:00:00 2001 From: Sampath Kumar Date: Mon, 31 Mar 2025 19:48:39 +0200 Subject: [PATCH 04/19] feat(genai): cleanup --- .../batch_predict/batch_code_predict.py | 60 --------- .../batch_predict/batch_text_predict.py | 62 ---------- .../gemini_batch_predict_bigquery.py | 71 ----------- .../batch_predict/gemini_batch_predict_gcs.py | 73 ----------- generative_ai/batch_predict/noxfile_config.py | 42 ------- .../batch_predict/requirements-test.txt | 4 - generative_ai/batch_predict/requirements.txt | 14 --- .../test_batch_predict_examples.py | 104 ---------------- generative_ai/express_mode/api_key_example.py | 32 ----- .../express_mode/api_key_example_test.py | 44 ------- generative_ai/express_mode/noxfile_config.py | 42 ------- .../express_mode/requirements-test.txt | 2 - generative_ai/express_mode/requirements.txt | 1 - generative_ai/image/image_example01.py | 53 -------- generative_ai/image/image_example02.py | 50 -------- generative_ai/image/noxfile_config.py | 42 ------- generative_ai/image/requirements-test.txt | 4 - generative_ai/image/requirements.txt | 14 --- generative_ai/image/test_image_samples.py | 27 ---- ...reate_reasoning_engine_advanced_example.py | 101 --------------- .../create_reasoning_engine_example.py | 72 ----------- .../delete_reasoning_engine_example.py | 40 ------ .../get_reasoning_engine_example.py | 42 ------- .../list_reasoning_engine_example.py | 45 ------- .../reasoning_engine/noxfile_config.py | 42 ------- .../query_reasoning_engine_example.py | 41 ------ .../reasoning_engine/requirements-test.txt | 4 - .../reasoning_engine/requirements.txt | 14 --- .../test_reasoning_engine_examples.py | 77 ------------ generative_ai/safety/noxfile_config.py | 42 ------- generative_ai/safety/requirements-test.txt | 4 - generative_ai/safety/requirements.txt | 14 --- generative_ai/safety/safety_config_example.py | 72 ----------- .../safety/safety_config_example_test.py | 20 --- .../system_instructions/noxfile_config.py | 42 ------- .../system_instructions/requirements-test.txt | 4 - .../system_instructions/requirements.txt | 14 --- .../system_instructions_example.py | 51 -------- .../system_instructions_example_test.py | 20 --- .../template_folder/advanced_example.py | 66 ---------- .../template_folder/noxfile_config.py | 42 ------- .../template_folder/requirements-test.txt | 4 - .../template_folder/requirements.txt | 14 --- .../template_folder/simple_example.py | 41 ------ .../test_template_folder_examples.py | 26 ---- .../text_generation/chat_code_example.py | 48 ------- .../text_generation/chat_multiturn_example.py | 60 --------- .../chat_multiturn_stream_example.py | 63 ---------- .../text_generation/chat_simple_example.py | 52 -------- .../code_completion_example.py | 41 ------ .../text_generation/codegen_example.py | 44 ------- .../gemini_describe_http_image_example.py | 50 -------- .../gemini_describe_http_pdf_example.py | 51 -------- .../text_generation/gemini_translate_text.py | 79 ------------ .../generation_config_example.py | 60 --------- .../multimodal_stream_example.py | 57 --------- .../text_generation/noxfile_config.py | 42 ------- .../text_generation/requirements-test.txt | 4 - .../text_generation/requirements.txt | 14 --- .../single_turn_multi_image_example.py | 62 ---------- .../text_generation/test_text_examples.py | 55 -------- .../test_text_generation_examples.py | 117 ------------------ .../text_generation/text_example01.py | 47 ------- .../text_generation/text_example02.py | 54 -------- .../text_generation/text_example03.py | 45 ------- .../text_generation/text_stream_example01.py | 48 ------- .../text_generation/text_stream_example02.py | 68 ---------- .../text_models/classify_news_items.py | 68 ---------- .../text_models/classify_news_items_test.py | 24 ---- .../code_completion_test_function.py | 41 ------ .../code_completion_test_function_test.py | 29 ----- .../text_models/code_generation_unittest.py | 57 --------- .../code_generation_unittest_test.py | 24 ---- generative_ai/text_models/extraction.py | 80 ------------ generative_ai/text_models/extraction_test.py | 30 ----- .../list_tuned_code_generation_models.py | 38 ------ .../list_tuned_code_generation_models_test.py | 36 ------ generative_ai/text_models/noxfile_config.py | 42 ------- .../text_models/requirements-test.txt | 4 - generative_ai/text_models/requirements.txt | 14 --- .../text_models/sentiment_analysis.py | 83 ------------- .../text_models/sentiment_analysis_test.py | 24 ---- generative_ai/text_models/streaming_chat.py | 70 ----------- .../text_models/streaming_chat_test.py | 24 ---- generative_ai/text_models/streaming_code.py | 53 -------- .../text_models/streaming_code_test.py | 24 ---- .../text_models/streaming_codechat.py | 53 -------- .../text_models/streaming_codechat_test.py | 24 ---- generative_ai/text_models/summarization.py | 72 ----------- .../text_models/summarization_test.py | 27 ---- .../understand_audio/noxfile_config.py | 42 ------- .../understand_audio/requirements-test.txt | 4 - .../understand_audio/requirements.txt | 14 --- .../understand_audio/summarization_example.py | 58 --------- .../understand_audio/transcription_example.py | 57 --------- .../understand_audio/understand_audio_test.py | 26 ---- .../understand_docs/noxfile_config.py | 42 ------- generative_ai/understand_docs/pdf_example.py | 56 --------- .../understand_docs/pdf_example_test.py | 20 --- .../understand_docs/requirements-test.txt | 4 - .../understand_docs/requirements.txt | 14 --- .../understand_video/audio_video_example.py | 58 --------- .../understand_video/noxfile_config.py | 42 ------- .../understand_video/requirements-test.txt | 4 - .../understand_video/requirements.txt | 14 --- .../single_turn_video_example.py | 53 -------- .../understand_video/understand_video_test.py | 30 ----- .../gemini_describe_http_video_example.py | 50 -------- ...emini_youtube_video_key_moments_example.py | 55 -------- ...ini_youtube_video_summarization_example.py | 46 ------- generative_ai/video/multimodal_example01.py | 68 ---------- generative_ai/video/multimodal_example02.py | 56 --------- generative_ai/video/noxfile_config.py | 42 ------- generative_ai/video/requirements-test.txt | 4 - generative_ai/video/requirements.txt | 14 --- generative_ai/video/test_video_examples.py | 44 ------- 116 files changed, 4714 deletions(-) delete mode 100644 generative_ai/batch_predict/batch_code_predict.py delete mode 100644 generative_ai/batch_predict/batch_text_predict.py delete mode 100644 generative_ai/batch_predict/gemini_batch_predict_bigquery.py delete mode 100644 generative_ai/batch_predict/gemini_batch_predict_gcs.py delete mode 100644 generative_ai/batch_predict/noxfile_config.py delete mode 100644 generative_ai/batch_predict/requirements-test.txt delete mode 100644 generative_ai/batch_predict/requirements.txt delete mode 100644 generative_ai/batch_predict/test_batch_predict_examples.py delete mode 100644 generative_ai/express_mode/api_key_example.py delete mode 100644 generative_ai/express_mode/api_key_example_test.py delete mode 100644 generative_ai/express_mode/noxfile_config.py delete mode 100644 generative_ai/express_mode/requirements-test.txt delete mode 100644 generative_ai/express_mode/requirements.txt delete mode 100644 generative_ai/image/image_example01.py delete mode 100644 generative_ai/image/image_example02.py delete mode 100644 generative_ai/image/noxfile_config.py delete mode 100644 generative_ai/image/requirements-test.txt delete mode 100644 generative_ai/image/requirements.txt delete mode 100644 generative_ai/image/test_image_samples.py delete mode 100644 generative_ai/reasoning_engine/create_reasoning_engine_advanced_example.py delete mode 100644 generative_ai/reasoning_engine/create_reasoning_engine_example.py delete mode 100644 generative_ai/reasoning_engine/delete_reasoning_engine_example.py delete mode 100644 generative_ai/reasoning_engine/get_reasoning_engine_example.py delete mode 100644 generative_ai/reasoning_engine/list_reasoning_engine_example.py delete mode 100644 generative_ai/reasoning_engine/noxfile_config.py delete mode 100644 generative_ai/reasoning_engine/query_reasoning_engine_example.py delete mode 100644 generative_ai/reasoning_engine/requirements-test.txt delete mode 100644 generative_ai/reasoning_engine/requirements.txt delete mode 100644 generative_ai/reasoning_engine/test_reasoning_engine_examples.py delete mode 100644 generative_ai/safety/noxfile_config.py delete mode 100644 generative_ai/safety/requirements-test.txt delete mode 100644 generative_ai/safety/requirements.txt delete mode 100644 generative_ai/safety/safety_config_example.py delete mode 100644 generative_ai/safety/safety_config_example_test.py delete mode 100644 generative_ai/system_instructions/noxfile_config.py delete mode 100644 generative_ai/system_instructions/requirements-test.txt delete mode 100644 generative_ai/system_instructions/requirements.txt delete mode 100644 generative_ai/system_instructions/system_instructions_example.py delete mode 100644 generative_ai/system_instructions/system_instructions_example_test.py delete mode 100644 generative_ai/template_folder/advanced_example.py delete mode 100644 generative_ai/template_folder/noxfile_config.py delete mode 100644 generative_ai/template_folder/requirements-test.txt delete mode 100644 generative_ai/template_folder/requirements.txt delete mode 100644 generative_ai/template_folder/simple_example.py delete mode 100644 generative_ai/template_folder/test_template_folder_examples.py delete mode 100644 generative_ai/text_generation/chat_code_example.py delete mode 100644 generative_ai/text_generation/chat_multiturn_example.py delete mode 100644 generative_ai/text_generation/chat_multiturn_stream_example.py delete mode 100644 generative_ai/text_generation/chat_simple_example.py delete mode 100644 generative_ai/text_generation/code_completion_example.py delete mode 100644 generative_ai/text_generation/codegen_example.py delete mode 100644 generative_ai/text_generation/gemini_describe_http_image_example.py delete mode 100644 generative_ai/text_generation/gemini_describe_http_pdf_example.py delete mode 100644 generative_ai/text_generation/gemini_translate_text.py delete mode 100644 generative_ai/text_generation/generation_config_example.py delete mode 100644 generative_ai/text_generation/multimodal_stream_example.py delete mode 100644 generative_ai/text_generation/noxfile_config.py delete mode 100644 generative_ai/text_generation/requirements-test.txt delete mode 100644 generative_ai/text_generation/requirements.txt delete mode 100644 generative_ai/text_generation/single_turn_multi_image_example.py delete mode 100644 generative_ai/text_generation/test_text_examples.py delete mode 100644 generative_ai/text_generation/test_text_generation_examples.py delete mode 100644 generative_ai/text_generation/text_example01.py delete mode 100644 generative_ai/text_generation/text_example02.py delete mode 100644 generative_ai/text_generation/text_example03.py delete mode 100644 generative_ai/text_generation/text_stream_example01.py delete mode 100644 generative_ai/text_generation/text_stream_example02.py delete mode 100644 generative_ai/text_models/classify_news_items.py delete mode 100644 generative_ai/text_models/classify_news_items_test.py delete mode 100644 generative_ai/text_models/code_completion_test_function.py delete mode 100644 generative_ai/text_models/code_completion_test_function_test.py delete mode 100644 generative_ai/text_models/code_generation_unittest.py delete mode 100644 generative_ai/text_models/code_generation_unittest_test.py delete mode 100644 generative_ai/text_models/extraction.py delete mode 100644 generative_ai/text_models/extraction_test.py delete mode 100644 generative_ai/text_models/list_tuned_code_generation_models.py delete mode 100644 generative_ai/text_models/list_tuned_code_generation_models_test.py delete mode 100644 generative_ai/text_models/noxfile_config.py delete mode 100644 generative_ai/text_models/requirements-test.txt delete mode 100644 generative_ai/text_models/requirements.txt delete mode 100644 generative_ai/text_models/sentiment_analysis.py delete mode 100644 generative_ai/text_models/sentiment_analysis_test.py delete mode 100644 generative_ai/text_models/streaming_chat.py delete mode 100644 generative_ai/text_models/streaming_chat_test.py delete mode 100644 generative_ai/text_models/streaming_code.py delete mode 100644 generative_ai/text_models/streaming_code_test.py delete mode 100644 generative_ai/text_models/streaming_codechat.py delete mode 100644 generative_ai/text_models/streaming_codechat_test.py delete mode 100644 generative_ai/text_models/summarization.py delete mode 100644 generative_ai/text_models/summarization_test.py delete mode 100644 generative_ai/understand_audio/noxfile_config.py delete mode 100644 generative_ai/understand_audio/requirements-test.txt delete mode 100644 generative_ai/understand_audio/requirements.txt delete mode 100644 generative_ai/understand_audio/summarization_example.py delete mode 100644 generative_ai/understand_audio/transcription_example.py delete mode 100644 generative_ai/understand_audio/understand_audio_test.py delete mode 100644 generative_ai/understand_docs/noxfile_config.py delete mode 100644 generative_ai/understand_docs/pdf_example.py delete mode 100644 generative_ai/understand_docs/pdf_example_test.py delete mode 100644 generative_ai/understand_docs/requirements-test.txt delete mode 100644 generative_ai/understand_docs/requirements.txt delete mode 100644 generative_ai/understand_video/audio_video_example.py delete mode 100644 generative_ai/understand_video/noxfile_config.py delete mode 100644 generative_ai/understand_video/requirements-test.txt delete mode 100644 generative_ai/understand_video/requirements.txt delete mode 100644 generative_ai/understand_video/single_turn_video_example.py delete mode 100644 generative_ai/understand_video/understand_video_test.py delete mode 100644 generative_ai/video/gemini_describe_http_video_example.py delete mode 100644 generative_ai/video/gemini_youtube_video_key_moments_example.py delete mode 100644 generative_ai/video/gemini_youtube_video_summarization_example.py delete mode 100644 generative_ai/video/multimodal_example01.py delete mode 100644 generative_ai/video/multimodal_example02.py delete mode 100644 generative_ai/video/noxfile_config.py delete mode 100644 generative_ai/video/requirements-test.txt delete mode 100644 generative_ai/video/requirements.txt delete mode 100644 generative_ai/video/test_video_examples.py diff --git a/generative_ai/batch_predict/batch_code_predict.py b/generative_ai/batch_predict/batch_code_predict.py deleted file mode 100644 index ba2d4f6c83..0000000000 --- a/generative_ai/batch_predict/batch_code_predict.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from google.cloud.aiplatform import BatchPredictionJob - - -def batch_code_prediction( - input_uri: str = None, output_uri: str = None -) -> BatchPredictionJob: - """Perform batch code prediction using a pre-trained code generation model. - Args: - input_uri (str, optional): URI of the input dataset. Could be a BigQuery table or a Google Cloud Storage file. - E.g. "gs://[BUCKET]/[DATASET].jsonl" OR "bq://[PROJECT].[DATASET].[TABLE]" - output_uri (str, optional): URI where the output will be stored. - Could be a BigQuery table or a Google Cloud Storage file. - E.g. "gs://[BUCKET]/[OUTPUT].jsonl" OR "bq://[PROJECT].[DATASET].[TABLE]" - Returns: - batch_prediction_job: The batch prediction job object containing details of the job. - """ - - # [START generativeaionvertexai_batch_code_predict] - from vertexai.preview.language_models import CodeGenerationModel - - # Example of using Google Cloud Storage bucket as the input and output data source - # TODO (Developer): Replace the input_uri and output_uri with your own GCS paths - # input_uri = "gs://cloud-samples-data/batch/prompt_for_batch_code_predict.jsonl" - # output_uri = "gs://your-bucket-name/batch_code_predict_output" - - code_model = CodeGenerationModel.from_pretrained("code-bison") - - batch_prediction_job = code_model.batch_predict( - dataset=input_uri, - destination_uri_prefix=output_uri, - # Optional: - model_parameters={ - "maxOutputTokens": "200", - "temperature": "0.2", - }, - ) - print(batch_prediction_job.display_name) - print(batch_prediction_job.resource_name) - print(batch_prediction_job.state) - - # [END generativeaionvertexai_batch_code_predict] - - return batch_prediction_job - - -if __name__ == "__main__": - batch_code_prediction() diff --git a/generative_ai/batch_predict/batch_text_predict.py b/generative_ai/batch_predict/batch_text_predict.py deleted file mode 100644 index 76d745a9f4..0000000000 --- a/generative_ai/batch_predict/batch_text_predict.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from google.cloud.aiplatform import BatchPredictionJob - - -def batch_text_prediction( - input_uri: str = None, output_uri: str = None -) -> BatchPredictionJob: - """Perform batch text prediction using a pre-trained text generation model. - Args: - input_uri (str, optional): URI of the input dataset. Could be a BigQuery table or a Google Cloud Storage file. - E.g. "gs://[BUCKET]/[DATASET].jsonl" OR "bq://[PROJECT].[DATASET].[TABLE]" - output_uri (str, optional): URI where the output will be stored. - Could be a BigQuery table or a Google Cloud Storage file. - E.g. "gs://[BUCKET]/[OUTPUT].jsonl" OR "bq://[PROJECT].[DATASET].[TABLE]" - Returns: - batch_prediction_job: The batch prediction job object containing details of the job. - """ - - # [START generativeaionvertexai_batch_text_predict] - from vertexai.preview.language_models import TextGenerationModel - - # Example of using Google Cloud Storage bucket as the input and output data source - # TODO (Developer): Replace the input_uri and output_uri with your own GCS paths - # input_uri = "gs://cloud-samples-data/batch/prompt_for_batch_text_predict.jsonl" - # output_uri = "gs://your-bucket-name/batch_text_predict_output" - - # Initialize the text generation model from a pre-trained model named "text-bison" - text_model = TextGenerationModel.from_pretrained("text-bison") - - batch_prediction_job = text_model.batch_predict( - dataset=input_uri, - destination_uri_prefix=output_uri, - # Optional: - model_parameters={ - "maxOutputTokens": "200", - "temperature": "0.2", - "topP": "0.95", - "topK": "40", - }, - ) - print(batch_prediction_job.display_name) - print(batch_prediction_job.resource_name) - print(batch_prediction_job.state) - - # [END generativeaionvertexai_batch_text_predict] - return batch_prediction_job - - -if __name__ == "__main__": - batch_text_prediction() diff --git a/generative_ai/batch_predict/gemini_batch_predict_bigquery.py b/generative_ai/batch_predict/gemini_batch_predict_bigquery.py deleted file mode 100644 index 60c9baa990..0000000000 --- a/generative_ai/batch_predict/gemini_batch_predict_bigquery.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - -output_uri = "bq://storage-samples.generative_ai.gen_ai_batch_prediction.predictions" - - -def batch_predict_gemini_createjob(output_uri: str) -> str: - """Perform batch text prediction using a Gemini AI model and returns the output location""" - - # [START generativeaionvertexai_batch_predict_gemini_createjob_bigquery] - import time - import vertexai - - from vertexai.batch_prediction import BatchPredictionJob - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - - # Initialize vertexai - vertexai.init(project=PROJECT_ID, location="us-central1") - - input_uri = "bq://storage-samples.generative_ai.batch_requests_for_multimodal_input" - - # Submit a batch prediction job with Gemini model - batch_prediction_job = BatchPredictionJob.submit( - source_model="gemini-2.0-flash-001", - input_dataset=input_uri, - output_uri_prefix=output_uri, - ) - - # Check job status - print(f"Job resource name: {batch_prediction_job.resource_name}") - print(f"Model resource name with the job: {batch_prediction_job.model_name}") - print(f"Job state: {batch_prediction_job.state.name}") - - # Refresh the job until complete - while not batch_prediction_job.has_ended: - time.sleep(5) - batch_prediction_job.refresh() - - # Check if the job succeeds - if batch_prediction_job.has_succeeded: - print("Job succeeded!") - else: - print(f"Job failed: {batch_prediction_job.error}") - - # Check the location of the output - print(f"Job output location: {batch_prediction_job.output_location}") - - # Example response: - # Job output location: bq://Project-ID/gen-ai-batch-prediction/predictions-model-year-month-day-hour:minute:second.12345 - # [END generativeaionvertexai_batch_predict_gemini_createjob_bigquery] - return batch_prediction_job - - -if __name__ == "__main__": - batch_predict_gemini_createjob(output_uri) diff --git a/generative_ai/batch_predict/gemini_batch_predict_gcs.py b/generative_ai/batch_predict/gemini_batch_predict_gcs.py deleted file mode 100644 index ff1b5d8386..0000000000 --- a/generative_ai/batch_predict/gemini_batch_predict_gcs.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - -output_uri = "gs://python-docs-samples-tests" - - -def batch_predict_gemini_createjob(output_uri: str) -> str: - "Perform batch text prediction using a Gemini AI model and returns the output location" - - # [START generativeaionvertexai_batch_predict_gemini_createjob] - import time - import vertexai - - from vertexai.batch_prediction import BatchPredictionJob - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - - # Initialize vertexai - vertexai.init(project=PROJECT_ID, location="us-central1") - - input_uri = "gs://cloud-samples-data/batch/prompt_for_batch_gemini_predict.jsonl" - - # Submit a batch prediction job with Gemini model - batch_prediction_job = BatchPredictionJob.submit( - source_model="gemini-2.0-flash-001", - input_dataset=input_uri, - output_uri_prefix=output_uri, - ) - - # Check job status - print(f"Job resource name: {batch_prediction_job.resource_name}") - print(f"Model resource name with the job: {batch_prediction_job.model_name}") - print(f"Job state: {batch_prediction_job.state.name}") - - # Refresh the job until complete - while not batch_prediction_job.has_ended: - time.sleep(5) - batch_prediction_job.refresh() - - # Check if the job succeeds - if batch_prediction_job.has_succeeded: - print("Job succeeded!") - else: - print(f"Job failed: {batch_prediction_job.error}") - - # Check the location of the output - print(f"Job output location: {batch_prediction_job.output_location}") - - # Example response: - # Job output location: gs://your-bucket/gen-ai-batch-prediction/prediction-model-year-month-day-hour:minute:second.12345 - - # [END generativeaionvertexai_batch_predict_gemini_createjob] - return batch_prediction_job - - -if __name__ == "__main__": - batch_predict_gemini_createjob(output_uri) diff --git a/generative_ai/batch_predict/noxfile_config.py b/generative_ai/batch_predict/noxfile_config.py deleted file mode 100644 index 962ba40a92..0000000000 --- a/generative_ai/batch_predict/noxfile_config.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default TEST_CONFIG_OVERRIDE for python repos. - -# You can copy this file into your directory, then it will be imported from -# the noxfile.py. - -# The source of truth: -# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py - -TEST_CONFIG_OVERRIDE = { - # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.13"], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": True, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} diff --git a/generative_ai/batch_predict/requirements-test.txt b/generative_ai/batch_predict/requirements-test.txt deleted file mode 100644 index 92281986e5..0000000000 --- a/generative_ai/batch_predict/requirements-test.txt +++ /dev/null @@ -1,4 +0,0 @@ -backoff==2.2.1 -google-api-core==2.19.0 -pytest==8.2.0 -pytest-asyncio==0.23.6 diff --git a/generative_ai/batch_predict/requirements.txt b/generative_ai/batch_predict/requirements.txt deleted file mode 100644 index 8b0237df36..0000000000 --- a/generative_ai/batch_predict/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -pandas==2.2.3; python_version == '3.7' -pandas==2.2.3; python_version == '3.8' -pandas==2.2.3; python_version > '3.8' -pillow==10.3.0; python_version < '3.8' -pillow==10.3.0; python_version >= '3.8' -google-cloud-aiplatform[all]==1.71.0 -sentencepiece==0.2.0 -google-auth==2.29.0 -anthropic[vertex]==0.28.0 -langchain-core==0.2.33 -langchain-google-vertexai==1.0.10 -numpy<3 -openai==1.68.2 -immutabledict==4.2.0 diff --git a/generative_ai/batch_predict/test_batch_predict_examples.py b/generative_ai/batch_predict/test_batch_predict_examples.py deleted file mode 100644 index 6306a0c2fd..0000000000 --- a/generative_ai/batch_predict/test_batch_predict_examples.py +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -from typing import Callable - - -from google.cloud import storage -from google.cloud.aiplatform import BatchPredictionJob -from google.cloud.aiplatform_v1 import JobState - - -import pytest - - -import batch_code_predict -import batch_text_predict -import gemini_batch_predict_bigquery -import gemini_batch_predict_gcs - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -INPUT_BUCKET = "cloud-samples-data" -OUTPUT_BUCKET = "python-docs-samples-tests" -OUTPUT_PATH = "batch/batch_text_predict_output" -GCS_OUTPUT_PATH = "gs://python-docs-samples-tests/" -OUTPUT_TABLE = f"bq://{PROJECT_ID}.gen_ai_batch_prediction.predictions" - - -def _clean_resources() -> None: - storage_client = storage.Client() - bucket = storage_client.get_bucket(OUTPUT_BUCKET) - blobs = bucket.list_blobs(prefix=OUTPUT_PATH) - for blob in blobs: - blob.delete() - - -@pytest.fixture(scope="session") -def output_folder() -> str: - yield f"gs://{OUTPUT_BUCKET}/{OUTPUT_PATH}" - _clean_resources() - - -def _main_test(test_func: Callable) -> BatchPredictionJob: - job = None - try: - job = test_func() - assert job.state == JobState.JOB_STATE_SUCCEEDED - return job - finally: - if job is not None: - job.delete() - - -def test_batch_text_predict(output_folder: pytest.fixture()) -> None: - input_uri = f"gs://{INPUT_BUCKET}/batch/prompt_for_batch_text_predict.jsonl" - job = _main_test( - test_func=lambda: batch_text_predict.batch_text_prediction( - input_uri, output_folder - ) - ) - assert OUTPUT_PATH in job.output_info.gcs_output_directory - - -def test_batch_code_predict(output_folder: pytest.fixture()) -> None: - input_uri = f"gs://{INPUT_BUCKET}/batch/prompt_for_batch_code_predict.jsonl" - job = _main_test( - test_func=lambda: batch_code_predict.batch_code_prediction( - input_uri, output_folder - ) - ) - assert OUTPUT_PATH in job.output_info.gcs_output_directory - - -def test_batch_gemini_predict_gcs(output_folder: pytest.fixture()) -> None: - output_uri = "gs://python-docs-samples-tests" - job = _main_test( - test_func=lambda: gemini_batch_predict_gcs.batch_predict_gemini_createjob( - output_uri - ) - ) - assert GCS_OUTPUT_PATH in job.output_location - - -def test_batch_gemini_predict_bigquery(output_folder: pytest.fixture()) -> None: - output_uri = f"bq://{PROJECT_ID}.gen_ai_batch_prediction.predictions" - job = _main_test( - test_func=lambda: gemini_batch_predict_bigquery.batch_predict_gemini_createjob( - output_uri - ) - ) - assert OUTPUT_TABLE in job.output_location diff --git a/generative_ai/express_mode/api_key_example.py b/generative_ai/express_mode/api_key_example.py deleted file mode 100644 index efa1307cf2..0000000000 --- a/generative_ai/express_mode/api_key_example.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def generate_content() -> None: - # [START generativeaionvertexai_gemini_express_mode] - import vertexai - from vertexai.generative_models import GenerativeModel - - # TODO(developer): Update below line - vertexai.init(api_key="YOUR_API_KEY") - - model = GenerativeModel("gemini-2.0-flash-001") - - response = model.generate_content("Explain bubble sort to me") - - print(response.text) - # Example response: - # Bubble Sort is a simple sorting algorithm that repeatedly steps through the list - # [END generativeaionvertexai_gemini_express_mode] - return response.text diff --git a/generative_ai/express_mode/api_key_example_test.py b/generative_ai/express_mode/api_key_example_test.py deleted file mode 100644 index 032262f644..0000000000 --- a/generative_ai/express_mode/api_key_example_test.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest.mock import MagicMock, patch - -from vertexai.generative_models import ( - GenerationResponse, - GenerativeModel, -) - -import api_key_example - - -@patch.object(GenerativeModel, "generate_content") -def test_api_key_example(mock_generate_content: MagicMock) -> None: - # Mock the API response - mock_generate_content.return_value = GenerationResponse.from_dict( - { - "candidates": [ - { - "content": { - "parts": [{"text": "This is a mocked bubble sort explanation."}] - } - } - ] - } - ) - - # Call the function - response = api_key_example.generate_content() - - # Assert that the function returns the expected value - assert response == "This is a mocked bubble sort explanation." diff --git a/generative_ai/express_mode/noxfile_config.py b/generative_ai/express_mode/noxfile_config.py deleted file mode 100644 index 962ba40a92..0000000000 --- a/generative_ai/express_mode/noxfile_config.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default TEST_CONFIG_OVERRIDE for python repos. - -# You can copy this file into your directory, then it will be imported from -# the noxfile.py. - -# The source of truth: -# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py - -TEST_CONFIG_OVERRIDE = { - # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.13"], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": True, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} diff --git a/generative_ai/express_mode/requirements-test.txt b/generative_ai/express_mode/requirements-test.txt deleted file mode 100644 index e43b779272..0000000000 --- a/generative_ai/express_mode/requirements-test.txt +++ /dev/null @@ -1,2 +0,0 @@ -google-api-core==2.24.0 -pytest==8.2.0 diff --git a/generative_ai/express_mode/requirements.txt b/generative_ai/express_mode/requirements.txt deleted file mode 100644 index 913473b5ef..0000000000 --- a/generative_ai/express_mode/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -google-cloud-aiplatform==1.74.0 diff --git a/generative_ai/image/image_example01.py b/generative_ai/image/image_example01.py deleted file mode 100644 index 6ad128f66f..0000000000 --- a/generative_ai/image/image_example01.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_text() -> str: - # [START generativeaionvertexai_gemini_get_started] - import vertexai - - from vertexai.generative_models import GenerativeModel, Part - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-2.0-flash-001") - - response = model.generate_content( - [ - Part.from_uri( - "gs://cloud-samples-data/generative-ai/image/scones.jpg", - mime_type="image/jpeg", - ), - "What is shown in this image?", - ] - ) - - print(response.text) - # That's a lovely overhead shot of a rustic-style breakfast or brunch spread. - # Here's what's in the image: - # * **Blueberry scones:** Several freshly baked blueberry scones are arranged on parchment paper. - # They look crumbly and delicious. - # ... - - # [END generativeaionvertexai_gemini_get_started] - return response.text - - -if __name__ == "__main__": - generate_text() diff --git a/generative_ai/image/image_example02.py b/generative_ai/image/image_example02.py deleted file mode 100644 index ac110f58d4..0000000000 --- a/generative_ai/image/image_example02.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_text() -> None: - # [START generativeaionvertexai_gemini_pro_example] - import vertexai - - from vertexai.generative_models import GenerativeModel, Part - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-2.0-flash-001") - - image_file = Part.from_uri( - "gs://cloud-samples-data/generative-ai/image/scones.jpg", "image/jpeg" - ) - - # Query the model - response = model.generate_content([image_file, "what is this image?"]) - print(response.text) - # Example response: - # That's a lovely overhead flatlay photograph of blueberry scones. - # The image features: - # * **Several blueberry scones:** These are the main focus, - # arranged on parchment paper with some blueberry juice stains. - # ... - - # [END generativeaionvertexai_gemini_pro_example] - return response.text - - -if __name__ == "__main__": - generate_text() diff --git a/generative_ai/image/noxfile_config.py b/generative_ai/image/noxfile_config.py deleted file mode 100644 index 962ba40a92..0000000000 --- a/generative_ai/image/noxfile_config.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default TEST_CONFIG_OVERRIDE for python repos. - -# You can copy this file into your directory, then it will be imported from -# the noxfile.py. - -# The source of truth: -# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py - -TEST_CONFIG_OVERRIDE = { - # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.13"], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": True, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} diff --git a/generative_ai/image/requirements-test.txt b/generative_ai/image/requirements-test.txt deleted file mode 100644 index 92281986e5..0000000000 --- a/generative_ai/image/requirements-test.txt +++ /dev/null @@ -1,4 +0,0 @@ -backoff==2.2.1 -google-api-core==2.19.0 -pytest==8.2.0 -pytest-asyncio==0.23.6 diff --git a/generative_ai/image/requirements.txt b/generative_ai/image/requirements.txt deleted file mode 100644 index 09178aa830..0000000000 --- a/generative_ai/image/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -pandas==2.2.3; python_version == '3.7' -pandas==2.2.3; python_version == '3.8' -pandas==2.2.3; python_version > '3.8' -pillow==10.3.0; python_version < '3.8' -pillow==10.3.0; python_version >= '3.8' -google-cloud-aiplatform[all]==1.69.0 -sentencepiece==0.2.0 -google-auth==2.38.0 -anthropic[vertex]==0.28.0 -langchain-core==0.2.33 -langchain-google-vertexai==1.0.10 -numpy<3 -openai==1.68.2 -immutabledict==4.2.0 diff --git a/generative_ai/image/test_image_samples.py b/generative_ai/image/test_image_samples.py deleted file mode 100644 index 7527beba38..0000000000 --- a/generative_ai/image/test_image_samples.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import image_example01 -import image_example02 - - -def test_gemini_guide_example() -> None: - text = image_example01.generate_text() - text = text.lower() - assert len(text) > 0 - - -def test_gemini_pro_basic_example() -> None: - text = image_example02.generate_text() - assert len(text) > 0 diff --git a/generative_ai/reasoning_engine/create_reasoning_engine_advanced_example.py b/generative_ai/reasoning_engine/create_reasoning_engine_advanced_example.py deleted file mode 100644 index f0a935ec01..0000000000 --- a/generative_ai/reasoning_engine/create_reasoning_engine_advanced_example.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -from typing import Dict, Union - -from vertexai.preview import reasoning_engines - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def create_reasoning_engine_advanced( - staging_bucket: str, -) -> reasoning_engines.ReasoningEngine: - # [START generativeaionvertexai_create_reasoning_engine_advanced] - - from typing import List - - import vertexai - from vertexai.preview import reasoning_engines - - # TODO(developer): Update and un-comment below lines - # PROJECT_ID = "your-project-id" - # staging_bucket = "gs://YOUR_BUCKET_NAME" - - vertexai.init( - project=PROJECT_ID, location="us-central1", staging_bucket=staging_bucket - ) - - class LangchainApp: - def __init__(self, project: str, location: str) -> None: - self.project_id = project - self.location = location - - def set_up(self) -> None: - from langchain_core.prompts import ChatPromptTemplate - from langchain_google_vertexai import ChatVertexAI - - system = ( - "You are a helpful assistant that answers questions " - "about Google Cloud." - ) - human = "{text}" - prompt = ChatPromptTemplate.from_messages( - [("system", system), ("human", human)] - ) - chat = ChatVertexAI(project=self.project_id, location=self.location) - self.chain = prompt | chat - - def query(self, question: str) -> Union[str, List[Union[str, Dict]]]: - """Query the application. - Args: - question: The user prompt. - Returns: - str: The LLM response. - """ - return self.chain.invoke({"text": question}).content - - # Locally test - app = LangchainApp(project=PROJECT_ID, location="us-central1") - app.set_up() - print(app.query("What is Vertex AI?")) - - # Create a remote app with Reasoning Engine - # Deployment of the app should take a few minutes to complete. - reasoning_engine = reasoning_engines.ReasoningEngine.create( - LangchainApp(project=PROJECT_ID, location="us-central1"), - requirements=[ - "google-cloud-aiplatform[langchain,reasoningengine]", - "cloudpickle==3.0.0", - "pydantic==2.7.4", - ], - display_name="Demo LangChain App", - description="This is a simple LangChain app.", - # sys_version="3.10", # Optional - extra_packages=[], - ) - # Example response: - # Model_name will become a required arg for VertexAIEmbeddings starting... - # ... - # Create ReasoningEngine backing LRO: projects/123456789/locations/us-central1/reasoningEngines/... - # ReasoningEngine created. Resource name: projects/123456789/locations/us-central1/reasoningEngines/... - # ... - - # [END generativeaionvertexai_create_reasoning_engine_advanced] - return reasoning_engine - - -if __name__ == "__main__": - create_reasoning_engine_advanced("gs://your-bucket-unique-name") diff --git a/generative_ai/reasoning_engine/create_reasoning_engine_example.py b/generative_ai/reasoning_engine/create_reasoning_engine_example.py deleted file mode 100644 index c2ba4eba5f..0000000000 --- a/generative_ai/reasoning_engine/create_reasoning_engine_example.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -from vertexai.preview import reasoning_engines - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def create_reasoning_engine_basic( - staging_bucket: str, -) -> reasoning_engines.ReasoningEngine: - # [START generativeaionvertexai_create_reasoning_engine_basic] - import vertexai - from vertexai.preview import reasoning_engines - - # TODO(developer): Update and un-comment below lines - # PROJECT_ID = "your-project-id" - # staging_bucket = "gs://YOUR_BUCKET_NAME" - vertexai.init( - project=PROJECT_ID, location="us-central1", staging_bucket=staging_bucket - ) - - class SimpleAdditionApp: - def query(self, a: int, b: int) -> str: - """Query the application. - Args: - a: The first input number - b: The second input number - Returns: - int: The additional result. - """ - return f"{int(a)} + {int(b)} is {int(a + b)}" - - # Locally test - app = SimpleAdditionApp() - app.query(a=1, b=2) - - # Create a remote app with Reasoning Engine. - # This may take 1-2 minutes to finish. - reasoning_engine = reasoning_engines.ReasoningEngine.create( - SimpleAdditionApp(), - display_name="Demo Addition App", - description="A simple demo addition app", - requirements=["cloudpickle==3"], - extra_packages=[], - ) - # Example response: - # Using bucket YOUR_BUCKET_NAME - # Writing to gs://YOUR_BUCKET_NAME/reasoning_engine/reasoning_engine.pkl - # ... - # ReasoningEngine created. Resource name: projects/123456789/locations/us-central1/reasoningEngines/123456 - # To use this ReasoningEngine in another session: - # reasoning_engine = vertexai.preview.reasoning_engines.ReasoningEngine('projects/123456789/locations/... - - # [END generativeaionvertexai_create_reasoning_engine_basic] - return reasoning_engine - - -if __name__ == "__main__": - create_reasoning_engine_basic("gs://your-bucket-unique-name") diff --git a/generative_ai/reasoning_engine/delete_reasoning_engine_example.py b/generative_ai/reasoning_engine/delete_reasoning_engine_example.py deleted file mode 100644 index 9f4e019b0e..0000000000 --- a/generative_ai/reasoning_engine/delete_reasoning_engine_example.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def delete_reasoning_engine(reasoning_engine_id: str) -> None: - # [START generativeaionvertexai_delete_reasoning_engine] - import vertexai - from vertexai.preview import reasoning_engines - - # TODO(developer): Update and un-comment below lines - # PROJECT_ID = "your-project-id" - # reasoning_engine_id = "1234567890123456" - vertexai.init(project=PROJECT_ID, location="us-central1") - - reasoning_engine = reasoning_engines.ReasoningEngine(reasoning_engine_id) - reasoning_engine.delete() - # Example response: - # Deleting ReasoningEngine:projects/[PROJECT_ID]/locations/us-central1/reasoningEngines/1234567890123456 - # ... - # ... resource projects/[PROJECT_ID]/locations/us-central1/reasoningEngines/1234567890123456 deleted. - - # [END generativeaionvertexai_delete_reasoning_engine] - - -if __name__ == "__main__": - delete_reasoning_engine("1234567890123456") diff --git a/generative_ai/reasoning_engine/get_reasoning_engine_example.py b/generative_ai/reasoning_engine/get_reasoning_engine_example.py deleted file mode 100644 index 956015e073..0000000000 --- a/generative_ai/reasoning_engine/get_reasoning_engine_example.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -from vertexai.preview import reasoning_engines - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def get_reasoning_engine(reasoning_engine_id: str) -> reasoning_engines.ReasoningEngine: - # [START generativeaionvertexai_get_reasoning_engine] - import vertexai - from vertexai.preview import reasoning_engines - - # TODO(developer): Update and un-comment below lines - # PROJECT_ID = "your-project-id" - # reasoning_engine_id = "1234567890123456" - vertexai.init(project=PROJECT_ID, location="us-central1") - - reasoning_engine = reasoning_engines.ReasoningEngine(reasoning_engine_id) - print(reasoning_engine) - # Example response: - # - # resource name: projects/[PROJECT_ID]/locations/us-central1/reasoningEngines/1234567890123456 - - # [END generativeaionvertexai_get_reasoning_engine] - return reasoning_engine - - -if __name__ == "__main__": - get_reasoning_engine("1234567890123456") diff --git a/generative_ai/reasoning_engine/list_reasoning_engine_example.py b/generative_ai/reasoning_engine/list_reasoning_engine_example.py deleted file mode 100644 index c0354d7f4d..0000000000 --- a/generative_ai/reasoning_engine/list_reasoning_engine_example.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -from typing import List - -from vertexai.preview import reasoning_engines - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def list_reasoning_engines() -> List[reasoning_engines.ReasoningEngine]: - # [START generativeaionvertexai_list_reasoning_engines] - import vertexai - from vertexai.preview import reasoning_engines - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - reasoning_engine_list = reasoning_engines.ReasoningEngine.list() - print(reasoning_engine_list) - # Example response: - # [ - # resource name: projects/123456789/locations/us-central1/reasoningEngines/111111111111111111, - # - # resource name: projects/123456789/locations/us-central1/reasoningEngines/222222222222222222] - - # [END generativeaionvertexai_list_reasoning_engines] - return reasoning_engine_list - - -if __name__ == "__main__": - list_reasoning_engines() diff --git a/generative_ai/reasoning_engine/noxfile_config.py b/generative_ai/reasoning_engine/noxfile_config.py deleted file mode 100644 index 962ba40a92..0000000000 --- a/generative_ai/reasoning_engine/noxfile_config.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default TEST_CONFIG_OVERRIDE for python repos. - -# You can copy this file into your directory, then it will be imported from -# the noxfile.py. - -# The source of truth: -# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py - -TEST_CONFIG_OVERRIDE = { - # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.13"], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": True, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} diff --git a/generative_ai/reasoning_engine/query_reasoning_engine_example.py b/generative_ai/reasoning_engine/query_reasoning_engine_example.py deleted file mode 100644 index bdaa3d39be..0000000000 --- a/generative_ai/reasoning_engine/query_reasoning_engine_example.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def query_reasoning_engine(reasoning_engine_id: str) -> object: - # [START generativeaionvertexai_query_reasoning_engine] - import vertexai - from vertexai.preview import reasoning_engines - - # TODO(developer): Update and un-comment below lines - # PROJECT_ID = "your-project-id" - # reasoning_engine_id = "1234567890123456" - vertexai.init(project=PROJECT_ID, location="us-central1") - reasoning_engine = reasoning_engines.ReasoningEngine(reasoning_engine_id) - - # Replace with kwargs for `.query()` method. - response = reasoning_engine.query(a=1, b=2) - print(response) - # Example response: - # 1 + 2 is 3 - - # [END generativeaionvertexai_query_reasoning_engine] - return response - - -if __name__ == "__main__": - query_reasoning_engine("1234567890123456") diff --git a/generative_ai/reasoning_engine/requirements-test.txt b/generative_ai/reasoning_engine/requirements-test.txt deleted file mode 100644 index 92281986e5..0000000000 --- a/generative_ai/reasoning_engine/requirements-test.txt +++ /dev/null @@ -1,4 +0,0 @@ -backoff==2.2.1 -google-api-core==2.19.0 -pytest==8.2.0 -pytest-asyncio==0.23.6 diff --git a/generative_ai/reasoning_engine/requirements.txt b/generative_ai/reasoning_engine/requirements.txt deleted file mode 100644 index 09178aa830..0000000000 --- a/generative_ai/reasoning_engine/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -pandas==2.2.3; python_version == '3.7' -pandas==2.2.3; python_version == '3.8' -pandas==2.2.3; python_version > '3.8' -pillow==10.3.0; python_version < '3.8' -pillow==10.3.0; python_version >= '3.8' -google-cloud-aiplatform[all]==1.69.0 -sentencepiece==0.2.0 -google-auth==2.38.0 -anthropic[vertex]==0.28.0 -langchain-core==0.2.33 -langchain-google-vertexai==1.0.10 -numpy<3 -openai==1.68.2 -immutabledict==4.2.0 diff --git a/generative_ai/reasoning_engine/test_reasoning_engine_examples.py b/generative_ai/reasoning_engine/test_reasoning_engine_examples.py deleted file mode 100644 index 366f6d25b0..0000000000 --- a/generative_ai/reasoning_engine/test_reasoning_engine_examples.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Generator - -import pytest - -import create_reasoning_engine_advanced_example -import create_reasoning_engine_example -import delete_reasoning_engine_example -import get_reasoning_engine_example -import list_reasoning_engine_example -import query_reasoning_engine_example - - -STAGING_BUCKET = "gs://ucaip-samples-us-central1" - - -@pytest.fixture(scope="module") -def reasoning_engine_id() -> Generator[str, None, None]: - reasoning_engine = create_reasoning_engine_example.create_reasoning_engine_basic( - STAGING_BUCKET - ) - yield reasoning_engine.resource_name - print("Deleting Reasoning Engine...") - delete_reasoning_engine_example.delete_reasoning_engine( - reasoning_engine.resource_name - ) - - -@pytest.mark.skip("TODO: Reasoning Engine Deployment Issue b/339643184") -def test_create_reasoning_engine_basic(reasoning_engine_id: str) -> None: - assert reasoning_engine_id - - -@pytest.mark.skip("TODO: Reasoning Engine Deployment Issue b/339643184") -def test_create_reasoning_engine_advanced() -> None: - reasoning_engine = ( - create_reasoning_engine_advanced_example.create_reasoning_engine_advanced( - STAGING_BUCKET - ) - ) - assert reasoning_engine - delete_reasoning_engine_example.delete_reasoning_engine( - reasoning_engine.resource_name - ) - - -@pytest.mark.skip("TODO: Resolve issue b/348193408") -def test_query_reasoning_engine(reasoning_engine_id: str) -> None: - response = query_reasoning_engine_example.query_reasoning_engine( - reasoning_engine_id - ) - assert response - assert response == "1 + 2 is 3" - - -def test_list_reasoning_engines() -> None: - response = list_reasoning_engine_example.list_reasoning_engines() - assert response - - -@pytest.mark.skip("TODO: Resolve issue b/348193408") -def test_get_reasoning_engine(reasoning_engine_id: str) -> None: - response = get_reasoning_engine_example.get_reasoning_engine(reasoning_engine_id) - assert response diff --git a/generative_ai/safety/noxfile_config.py b/generative_ai/safety/noxfile_config.py deleted file mode 100644 index 962ba40a92..0000000000 --- a/generative_ai/safety/noxfile_config.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default TEST_CONFIG_OVERRIDE for python repos. - -# You can copy this file into your directory, then it will be imported from -# the noxfile.py. - -# The source of truth: -# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py - -TEST_CONFIG_OVERRIDE = { - # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.13"], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": True, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} diff --git a/generative_ai/safety/requirements-test.txt b/generative_ai/safety/requirements-test.txt deleted file mode 100644 index 92281986e5..0000000000 --- a/generative_ai/safety/requirements-test.txt +++ /dev/null @@ -1,4 +0,0 @@ -backoff==2.2.1 -google-api-core==2.19.0 -pytest==8.2.0 -pytest-asyncio==0.23.6 diff --git a/generative_ai/safety/requirements.txt b/generative_ai/safety/requirements.txt deleted file mode 100644 index 09178aa830..0000000000 --- a/generative_ai/safety/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -pandas==2.2.3; python_version == '3.7' -pandas==2.2.3; python_version == '3.8' -pandas==2.2.3; python_version > '3.8' -pillow==10.3.0; python_version < '3.8' -pillow==10.3.0; python_version >= '3.8' -google-cloud-aiplatform[all]==1.69.0 -sentencepiece==0.2.0 -google-auth==2.38.0 -anthropic[vertex]==0.28.0 -langchain-core==0.2.33 -langchain-google-vertexai==1.0.10 -numpy<3 -openai==1.68.2 -immutabledict==4.2.0 diff --git a/generative_ai/safety/safety_config_example.py b/generative_ai/safety/safety_config_example.py deleted file mode 100644 index e60496b416..0000000000 --- a/generative_ai/safety/safety_config_example.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_text() -> str: - # [START generativeaionvertexai_gemini_safety_settings] - import vertexai - - from vertexai.generative_models import ( - GenerativeModel, - HarmCategory, - HarmBlockThreshold, - Part, - SafetySetting, - ) - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-2.0-flash-001") - - # Safety config - safety_config = [ - SafetySetting( - category=HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, - threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, - ), - SafetySetting( - category=HarmCategory.HARM_CATEGORY_HARASSMENT, - threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, - ), - ] - - image_file = Part.from_uri( - "gs://cloud-samples-data/generative-ai/image/scones.jpg", "image/jpeg" - ) - - # Generate content - response = model.generate_content( - [image_file, "What is in this image?"], - safety_settings=safety_config, - ) - - print(response.text) - print(response.candidates[0].safety_ratings) - # Example response: - # The image contains a beautiful arrangement of blueberry scones, flowers, coffee, and blueberries. - # The scene is set on a rustic blue background. The image evokes a sense of comfort and indulgence. - # ... - - # [END generativeaionvertexai_gemini_safety_settings] - return response.text - - -if __name__ == "__main__": - generate_text() diff --git a/generative_ai/safety/safety_config_example_test.py b/generative_ai/safety/safety_config_example_test.py deleted file mode 100644 index f5b62609dc..0000000000 --- a/generative_ai/safety/safety_config_example_test.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import safety_config_example - - -def test_gemini_safety_config_example() -> None: - text = safety_config_example.generate_text() - assert len(text) > 0 diff --git a/generative_ai/system_instructions/noxfile_config.py b/generative_ai/system_instructions/noxfile_config.py deleted file mode 100644 index 962ba40a92..0000000000 --- a/generative_ai/system_instructions/noxfile_config.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default TEST_CONFIG_OVERRIDE for python repos. - -# You can copy this file into your directory, then it will be imported from -# the noxfile.py. - -# The source of truth: -# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py - -TEST_CONFIG_OVERRIDE = { - # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.13"], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": True, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} diff --git a/generative_ai/system_instructions/requirements-test.txt b/generative_ai/system_instructions/requirements-test.txt deleted file mode 100644 index 92281986e5..0000000000 --- a/generative_ai/system_instructions/requirements-test.txt +++ /dev/null @@ -1,4 +0,0 @@ -backoff==2.2.1 -google-api-core==2.19.0 -pytest==8.2.0 -pytest-asyncio==0.23.6 diff --git a/generative_ai/system_instructions/requirements.txt b/generative_ai/system_instructions/requirements.txt deleted file mode 100644 index 09178aa830..0000000000 --- a/generative_ai/system_instructions/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -pandas==2.2.3; python_version == '3.7' -pandas==2.2.3; python_version == '3.8' -pandas==2.2.3; python_version > '3.8' -pillow==10.3.0; python_version < '3.8' -pillow==10.3.0; python_version >= '3.8' -google-cloud-aiplatform[all]==1.69.0 -sentencepiece==0.2.0 -google-auth==2.38.0 -anthropic[vertex]==0.28.0 -langchain-core==0.2.33 -langchain-google-vertexai==1.0.10 -numpy<3 -openai==1.68.2 -immutabledict==4.2.0 diff --git a/generative_ai/system_instructions/system_instructions_example.py b/generative_ai/system_instructions/system_instructions_example.py deleted file mode 100644 index 2ec0518c9f..0000000000 --- a/generative_ai/system_instructions/system_instructions_example.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.environ["GOOGLE_CLOUD_PROJECT"] - - -def set_system_instruction() -> str: - # [START generativeaionvertexai_gemini_system_instruction] - import vertexai - - from vertexai.generative_models import GenerativeModel - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel( - model_name="gemini-2.0-flash-001", - system_instruction=[ - "You are a helpful language translator.", - "Your mission is to translate text in English to French.", - ], - ) - - prompt = """ - User input: I like bagels. - Answer: - """ - response = model.generate_content([prompt]) - print(response.text) - # Example response: - # J'aime les bagels. - - # [END generativeaionvertexai_gemini_system_instruction] - return response.text - - -if __name__ == "__main__": - set_system_instruction() diff --git a/generative_ai/system_instructions/system_instructions_example_test.py b/generative_ai/system_instructions/system_instructions_example_test.py deleted file mode 100644 index 5d26f103bc..0000000000 --- a/generative_ai/system_instructions/system_instructions_example_test.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import system_instructions_example - - -def test_set_system_instruction() -> None: - text = system_instructions_example.set_system_instruction() - assert len(text) > 0 diff --git a/generative_ai/template_folder/advanced_example.py b/generative_ai/template_folder/advanced_example.py deleted file mode 100644 index e214aa0a4f..0000000000 --- a/generative_ai/template_folder/advanced_example.py +++ /dev/null @@ -1,66 +0,0 @@ -# # Copyright 2024 Google LLC -# # -# # Licensed under the Apache License, Version 2.0 (the "License"); -# # you may not use this file except in compliance with the License. -# # You may obtain a copy of the License at -# # -# # https://www.apache.org/licenses/LICENSE-2.0 -# # -# # Unless required by applicable law or agreed to in writing, software -# # distributed under the License is distributed on an "AS IS" BASIS, -# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# # See the License for the specific language governing permissions and -# # limitations under the License. -# import os -# -# from vertexai.generative_models import GenerationResponse -# -# PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") -# -# -# def advanced_example() -> GenerationResponse: -# # TODO: -# import vertexai -# from vertexai.generative_models import GenerativeModel, Part -# -# # TODO(developer): Update and un-comment below line -# # PROJECT_ID = "your-project-id" -# vertexai.init(project=PROJECT_ID, location="us-central1") -# -# model = GenerativeModel("gemini-2.0-flash-001") -# -# contents = [ -# Part.from_uri( -# "gs://cloud-samples-data/generative-ai/video/pixel8.mp4", -# mime_type="video/mp4", -# ), -# "Provide a description of the video.", -# ] -# -# # tokens count for user prompt -# response = model.count_tokens(contents) -# print(f"Prompt Token Count: {response.total_tokens}") -# print(f"Prompt Character Count: {response.total_billable_characters}") -# # Example response: -# # Prompt Token Count: 16822 -# # Prompt Character Count: 30 -# -# # Send text to Gemini -# response = model.generate_content(contents) -# usage_metadata = response.usage_metadata -# -# # tokens count for model response -# print(f"Prompt Token Count: {usage_metadata.prompt_token_count}") -# print(f"Candidates Token Count: {usage_metadata.candidates_token_count}") -# print(f"Total Token Count: {usage_metadata.total_token_count}") -# # Example response: -# # Prompt Token Count: 16822 -# # Candidates Token Count: 71 -# # Total Token Count: 16893 -# -# # TODO: -# return response -# -# -# if __name__ == "__main__": -# advanced_example() diff --git a/generative_ai/template_folder/noxfile_config.py b/generative_ai/template_folder/noxfile_config.py deleted file mode 100644 index 962ba40a92..0000000000 --- a/generative_ai/template_folder/noxfile_config.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default TEST_CONFIG_OVERRIDE for python repos. - -# You can copy this file into your directory, then it will be imported from -# the noxfile.py. - -# The source of truth: -# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py - -TEST_CONFIG_OVERRIDE = { - # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.13"], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": True, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} diff --git a/generative_ai/template_folder/requirements-test.txt b/generative_ai/template_folder/requirements-test.txt deleted file mode 100644 index 92281986e5..0000000000 --- a/generative_ai/template_folder/requirements-test.txt +++ /dev/null @@ -1,4 +0,0 @@ -backoff==2.2.1 -google-api-core==2.19.0 -pytest==8.2.0 -pytest-asyncio==0.23.6 diff --git a/generative_ai/template_folder/requirements.txt b/generative_ai/template_folder/requirements.txt deleted file mode 100644 index 09178aa830..0000000000 --- a/generative_ai/template_folder/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -pandas==2.2.3; python_version == '3.7' -pandas==2.2.3; python_version == '3.8' -pandas==2.2.3; python_version > '3.8' -pillow==10.3.0; python_version < '3.8' -pillow==10.3.0; python_version >= '3.8' -google-cloud-aiplatform[all]==1.69.0 -sentencepiece==0.2.0 -google-auth==2.38.0 -anthropic[vertex]==0.28.0 -langchain-core==0.2.33 -langchain-google-vertexai==1.0.10 -numpy<3 -openai==1.68.2 -immutabledict==4.2.0 diff --git a/generative_ai/template_folder/simple_example.py b/generative_ai/template_folder/simple_example.py deleted file mode 100644 index 5653c16e99..0000000000 --- a/generative_ai/template_folder/simple_example.py +++ /dev/null @@ -1,41 +0,0 @@ -# # Copyright 2024 Google LLC -# # -# # Licensed under the Apache License, Version 2.0 (the "License"); -# # you may not use this file except in compliance with the License. -# # You may obtain a copy of the License at -# # -# # https://www.apache.org/licenses/LICENSE-2.0 -# # -# # Unless required by applicable law or agreed to in writing, software -# # distributed under the License is distributed on an "AS IS" BASIS, -# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# # See the License for the specific language governing permissions and -# # limitations under the License. -# -# -# def simple_example() -> int: -# "Simple example for feature." -# # TODO: -# from vertexai.preview.tokenization import get_tokenizer_for_model -# -# # Using local tokenzier -# tokenizer = get_tokenizer_for_model("gemini-2.0-flash-001") -# -# prompt = "hello world" -# response = tokenizer.count_tokens(prompt) -# print(f"Prompt Token Count: {response.total_tokens}") -# # Example response: -# # Prompt Token Count: 2 -# -# prompt = ["hello world", "what's the weather today"] -# response = tokenizer.count_tokens(prompt) -# print(f"Prompt Token Count: {response.total_tokens}") -# # Example response: -# # Prompt Token Count: 8 -# -# # TODO: -# return response.total_tokens -# -# -# if __name__ == "__main__": -# simple_example() diff --git a/generative_ai/template_folder/test_template_folder_examples.py b/generative_ai/template_folder/test_template_folder_examples.py deleted file mode 100644 index b1932442f3..0000000000 --- a/generative_ai/template_folder/test_template_folder_examples.py +++ /dev/null @@ -1,26 +0,0 @@ -# # Copyright 2024 Google LLC -# # -# # Licensed under the Apache License, Version 2.0 (the "License"); -# # you may not use this file except in compliance with the License. -# # You may obtain a copy of the License at -# # -# # https://www.apache.org/licenses/LICENSE-2.0 -# # -# # Unless required by applicable law or agreed to in writing, software -# # distributed under the License is distributed on an "AS IS" BASIS, -# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# # See the License for the specific language governing permissions and -# # limitations under the License. -# -# import advanced_example -# import simple_example -# -# -# def test_simple_example() -> None: -# response = simple_example.simple_example() -# assert response -# -# -# def test_advanced_example() -> None: -# response = advanced_example.advanced_example() -# assert response diff --git a/generative_ai/text_generation/chat_code_example.py b/generative_ai/text_generation/chat_code_example.py deleted file mode 100644 index 24a5ec0d2b..0000000000 --- a/generative_ai/text_generation/chat_code_example.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def write_a_function() -> object: - """Example of using Codey for Code Chat Model to write a function.""" - # [START generativeaionvertexai_sdk_code_chat] - from vertexai.language_models import CodeChatModel - - # TODO developer - override these parameters as needed: - parameters = { - "temperature": 0.5, # Temperature controls the degree of randomness in token selection. - "max_output_tokens": 1024, # Token limit determines the maximum amount of text output. - } - - code_chat_model = CodeChatModel.from_pretrained("codechat-bison@001") - chat_session = code_chat_model.start_chat() - - response = chat_session.send_message( - "Please help write a function to calculate the min of two numbers", **parameters - ) - print(f"Response from Model: {response.text}") - # Response from Model: Sure, here is a function that you can use to calculate the minimum of two numbers: - # ``` - # def min(a, b): - # """ - # Calculates the minimum of two numbers. - # Args: - # a: The first number. - # ... - - # [END generativeaionvertexai_sdk_code_chat] - return response - - -if __name__ == "__main__": - write_a_function() diff --git a/generative_ai/text_generation/chat_multiturn_example.py b/generative_ai/text_generation/chat_multiturn_example.py deleted file mode 100644 index becaf5367d..0000000000 --- a/generative_ai/text_generation/chat_multiturn_example.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def chat_text_example() -> str: - """Demonstrates a multi-turn chat interaction with a generative model.""" - # [START generativeaionvertexai_gemini_multiturn_chat] - import vertexai - - from vertexai.generative_models import GenerativeModel, ChatSession - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-2.0-flash-001") - - chat_session = model.start_chat() - - def get_chat_response(chat: ChatSession, prompt: str) -> str: - response = chat.send_message(prompt) - return response.text - - prompt = "Hello." - print(get_chat_response(chat_session, prompt)) - # Example response: - # Hello there! How can I help you today? - - prompt = "What are all the colors in a rainbow?" - print(get_chat_response(chat_session, prompt)) - # Example response: - # The colors in a rainbow are often remembered using the acronym ROY G. BIV: - # * **Red** - # * **Orange** ... - - prompt = "Why does it appear when it rains?" - print(get_chat_response(chat_session, prompt)) - # Example response: - # It's important to note that these colors blend seamlessly into each other, ... - - # [END generativeaionvertexai_gemini_multiturn_chat] - return get_chat_response(chat_session, "Hello") - - -if __name__ == "__main__": - chat_text_example() diff --git a/generative_ai/text_generation/chat_multiturn_stream_example.py b/generative_ai/text_generation/chat_multiturn_stream_example.py deleted file mode 100644 index 08df3f9867..0000000000 --- a/generative_ai/text_generation/chat_multiturn_stream_example.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def chat_stream_example() -> str: - """Demonstrates a multi-turn chat interaction with a generative model using streaming responses""" - # [START generativeaionvertexai_gemini_multiturn_chat_stream] - import vertexai - - from vertexai.generative_models import GenerativeModel, ChatSession - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-2.0-flash-001") - - chat_session = model.start_chat() - - def get_chat_response(chat: ChatSession, prompt: str) -> str: - text_response = [] - responses = chat.send_message(prompt, stream=True) - for chunk in responses: - text_response.append(chunk.text) - return "".join(text_response) - - prompt = "Hello." - print(get_chat_response(chat_session, prompt)) - # Example response: - # Hello there! How can I help you today? - - prompt = "What are all the colors in a rainbow?" - print(get_chat_response(chat_session, prompt)) - # Example response: - # The colors in a rainbow are often remembered using the acronym ROY G. BIV: - # * **Red** - # * **Orange** ... - - prompt = "Why does it appear when it rains?" - print(get_chat_response(chat_session, prompt)) - # Example response: - # It's important to note that these colors blend smoothly into each other, ... - - # [END generativeaionvertexai_gemini_multiturn_chat_stream] - return get_chat_response(chat_session, "Hello") - - -if __name__ == "__main__": - chat_stream_example() diff --git a/generative_ai/text_generation/chat_simple_example.py b/generative_ai/text_generation/chat_simple_example.py deleted file mode 100644 index 42bce26d34..0000000000 --- a/generative_ai/text_generation/chat_simple_example.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def send_chat() -> str: - # [START generativeaionvertexai_chat] - from vertexai.language_models import ChatModel, InputOutputTextPair - - chat_model = ChatModel.from_pretrained("chat-bison@002") - - parameters = { - "temperature": 0.2, - "max_output_tokens": 256, - "top_p": 0.95, - "top_k": 40, - } - - chat_session = chat_model.start_chat( - context="My name is Miles. You are an astronomer, knowledgeable about the solar system.", - examples=[ - InputOutputTextPair( - input_text="How many moons does Mars have?", - output_text="The planet Mars has two moons, Phobos and Deimos.", - ), - ], - ) - - response = chat_session.send_message( - "How many planets are there in the solar system?", **parameters - ) - print(response.text) - # Example response: - # There are eight planets in the solar system: - # Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, and Neptune. - - # [END generativeaionvertexai_chat] - return response.text - - -if __name__ == "__main__": - send_chat() diff --git a/generative_ai/text_generation/code_completion_example.py b/generative_ai/text_generation/code_completion_example.py deleted file mode 100644 index 4c69157039..0000000000 --- a/generative_ai/text_generation/code_completion_example.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def complete_code_function() -> object: - """Example of using Codey for Code Completion to complete a function.""" - # [START generativeaionvertexai_sdk_code_completion_comment] - from vertexai.language_models import CodeGenerationModel - - parameters = { - "temperature": 0.2, # Temperature controls the degree of randomness in token selection. - "max_output_tokens": 64, # Token limit determines the maximum amount of text output. - } - - code_completion_model = CodeGenerationModel.from_pretrained("code-gecko@001") - response = code_completion_model.predict( - prefix="def reverse_string(s):", **parameters - ) - - print(f"Response from Model: {response.text}") - # Example response: - # Response from Model: - # return s[::-1] - - # [END generativeaionvertexai_sdk_code_completion_comment] - return response - - -if __name__ == "__main__": - complete_code_function() diff --git a/generative_ai/text_generation/codegen_example.py b/generative_ai/text_generation/codegen_example.py deleted file mode 100644 index 01f7bf93fd..0000000000 --- a/generative_ai/text_generation/codegen_example.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def generate_a_function() -> object: - """Example of using Codey for Code Generation to write a function.""" - # [START generativeaionvertexai_sdk_code_generation_function] - from vertexai.language_models import CodeGenerationModel - - parameters = { - "temperature": 0.1, # Temperature controls the degree of randomness in token selection. - "max_output_tokens": 256, # Token limit determines the maximum amount of text output. - } - - code_generation_model = CodeGenerationModel.from_pretrained("code-bison@001") - response = code_generation_model.predict( - prefix="Write a function that checks if a year is a leap year.", **parameters - ) - - print(f"Response from Model: {response.text}") - # Example response: - # Response from Model: I will write a function to check if a year is a leap year. - # **The function will take a year as input and return a boolean value**. - # **The function will first check if the year is divisible by 4.** - # ... - - return response - - # [END generativeaionvertexai_sdk_code_generation_function] - - -if __name__ == "__main__": - generate_a_function() diff --git a/generative_ai/text_generation/gemini_describe_http_image_example.py b/generative_ai/text_generation/gemini_describe_http_image_example.py deleted file mode 100644 index 0ee1303df2..0000000000 --- a/generative_ai/text_generation/gemini_describe_http_image_example.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_content() -> str: - # [START generativeaionvertexai_gemini_describe_http_image] - import vertexai - from vertexai.generative_models import GenerativeModel, Part - - # TODO (developer): update project id - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-2.0-flash-001") - - contents = [ - # Text prompt - "Describe this image.", - # Example image of a Jack Russell Terrier puppy from Wikipedia. - Part.from_uri( - "https://upload.wikimedia.org/wikipedia/commons/1/1d/Szczenie_Jack_Russell_Terrier.jpg", - "image/jpeg", - ), - ] - - response = model.generate_content(contents) - print(response.text) - # Example response: - # 'Here is a description of the image:' - # 'Close-up view of a young Jack Russell Terrier puppy sitting in short grass ...' - - # [END generativeaionvertexai_gemini_describe_http_image] - return response.text - - -if __name__ == "__main__": - generate_content() diff --git a/generative_ai/text_generation/gemini_describe_http_pdf_example.py b/generative_ai/text_generation/gemini_describe_http_pdf_example.py deleted file mode 100644 index 2d45d36b5b..0000000000 --- a/generative_ai/text_generation/gemini_describe_http_pdf_example.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_content() -> str: - # [START generativeaionvertexai_gemini_describe_http_pdf] - import vertexai - from vertexai.generative_models import GenerativeModel, Part - - # TODO (developer): update project id - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-2.0-flash-001") - - contents = [ - # Text prompt - "Summarise this file", - # Example PDF document on Transformers, a neural network architecture. - Part.from_uri( - "https://storage.googleapis.com/cloud-samples-data/generative-ai/pdf/1706.03762v7.pdf", - "application/pdf", - ), - ] - - response = model.generate_content(contents) - print(response.text) - # Example response: - # 'This paper introduces the Transformer, a new neural network architecture for ' - # 'sequence transduction, which uses an attention mechanism to learn global ' - # 'dependencies between input and output sequences. The Transformer ... - - # [END generativeaionvertexai_gemini_describe_http_pdf] - return response.text - - -if __name__ == "__main__": - generate_content() diff --git a/generative_ai/text_generation/gemini_translate_text.py b/generative_ai/text_generation/gemini_translate_text.py deleted file mode 100644 index 688bdc2bf6..0000000000 --- a/generative_ai/text_generation/gemini_translate_text.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -from vertexai.generative_models import GenerationResponse - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_translation() -> GenerationResponse: - # [START generativeaionvertexai_text_generation_gemini_translate] - import vertexai - - from vertexai.generative_models import GenerativeModel, HarmBlockThreshold, HarmCategory - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-2.0-flash-001") - - prompt = """ - Translate the text from source to target language and return the translated text. - - TEXT: Google's Generative AI API lets you use a large language model (LLM) to dynamically translate text. - SOURCE_LANGUAGE_CODE: EN - TARGET_LANGUAGE_CODE: FR - """ - - # Check the API reference for details: - # https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#generationconfig - generation_config = { - "candidate_count": 1, - "max_output_tokens": 8192, - "temperature": 0.2, - "top_k": 40.0, - "top_p": 0.95, - } - safety_settings = { - HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE, - HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE, - HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE, - HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE, - } - # Send request to Gemini - response = model.generate_content( - prompt, - generation_config=generation_config, - safety_settings=safety_settings, - ) - - print(f"Translation:\n{response.text}", ) - print(f"Usage metadata:\n{response.usage_metadata}") - # Example response: - # Translation: - # L'API d'IA générative de Google vous permet d'utiliser un grand modèle linguistique (LLM) pour traduire dynamiquement du texte. - # - # Usage metadata: - # prompt_token_count: 63 - # candidates_token_count: 32 - # total_token_count: 95 - - # [END generativeaionvertexai_text_generation_gemini_translate] - return response - - -if __name__ == "__main__": - generate_translation() diff --git a/generative_ai/text_generation/generation_config_example.py b/generative_ai/text_generation/generation_config_example.py deleted file mode 100644 index 194eb23e23..0000000000 --- a/generative_ai/text_generation/generation_config_example.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_text() -> None: - # [START generativeaionvertexai_gemini_pro_config_example] - import base64 - import vertexai - - from vertexai.generative_models import GenerationConfig, GenerativeModel, Part - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-2.0-flash-001") - - # Load example image from local storage - encoded_image = base64.b64encode(open("scones.jpg", "rb").read()).decode("utf-8") - image_content = Part.from_data( - data=base64.b64decode(encoded_image), mime_type="image/jpeg" - ) - - # Generation Config - config = GenerationConfig( - max_output_tokens=2048, temperature=0.4, top_p=1, top_k=32 - ) - - # Generate text - response = model.generate_content( - [image_content, "what is this image?"], generation_config=config - ) - print(response.text) - # Example response: - # That's a lovely overhead shot of a rustic still life featuring blueberry scones. - # Here's a breakdown of what's in the image: - # * **Blueberry Scones:** Several freshly baked blueberry scones are arranged on - # a piece of parchment paper. They appear to be homemade and slightly crumbly. - # ... - - # [END generativeaionvertexai_gemini_pro_config_example] - return response.text - - -if __name__ == "__main__": - generate_text() diff --git a/generative_ai/text_generation/multimodal_stream_example.py b/generative_ai/text_generation/multimodal_stream_example.py deleted file mode 100644 index c0e863c438..0000000000 --- a/generative_ai/text_generation/multimodal_stream_example.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_content() -> object: - # [START generativeaionvertexai_stream_multimodality_basic] - import vertexai - - from vertexai.generative_models import GenerativeModel, Part - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-2.0-flash-001") - responses = model.generate_content( - [ - Part.from_uri( - "gs://cloud-samples-data/generative-ai/video/animals.mp4", "video/mp4" - ), - Part.from_uri( - "gs://cloud-samples-data/generative-ai/image/character.jpg", - "image/jpeg", - ), - "Are these video and image correlated?", - ], - stream=True, - ) - - for response in responses: - print(response.candidates[0].content.text) - # Example response: - # No, the video and image are not correlated. The video shows a Google Photos - # project where animals at the Los Angeles Zoo take selfies using modified cameras. - # The image is a simple drawing of a wizard. - - # [END generativeaionvertexai_stream_multimodality_basic] - return responses - - -if __name__ == "__main__": - generate_content() diff --git a/generative_ai/text_generation/noxfile_config.py b/generative_ai/text_generation/noxfile_config.py deleted file mode 100644 index 962ba40a92..0000000000 --- a/generative_ai/text_generation/noxfile_config.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default TEST_CONFIG_OVERRIDE for python repos. - -# You can copy this file into your directory, then it will be imported from -# the noxfile.py. - -# The source of truth: -# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py - -TEST_CONFIG_OVERRIDE = { - # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.13"], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": True, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} diff --git a/generative_ai/text_generation/requirements-test.txt b/generative_ai/text_generation/requirements-test.txt deleted file mode 100644 index 92281986e5..0000000000 --- a/generative_ai/text_generation/requirements-test.txt +++ /dev/null @@ -1,4 +0,0 @@ -backoff==2.2.1 -google-api-core==2.19.0 -pytest==8.2.0 -pytest-asyncio==0.23.6 diff --git a/generative_ai/text_generation/requirements.txt b/generative_ai/text_generation/requirements.txt deleted file mode 100644 index 09178aa830..0000000000 --- a/generative_ai/text_generation/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -pandas==2.2.3; python_version == '3.7' -pandas==2.2.3; python_version == '3.8' -pandas==2.2.3; python_version > '3.8' -pillow==10.3.0; python_version < '3.8' -pillow==10.3.0; python_version >= '3.8' -google-cloud-aiplatform[all]==1.69.0 -sentencepiece==0.2.0 -google-auth==2.38.0 -anthropic[vertex]==0.28.0 -langchain-core==0.2.33 -langchain-google-vertexai==1.0.10 -numpy<3 -openai==1.68.2 -immutabledict==4.2.0 diff --git a/generative_ai/text_generation/single_turn_multi_image_example.py b/generative_ai/text_generation/single_turn_multi_image_example.py deleted file mode 100644 index b9b9f6a298..0000000000 --- a/generative_ai/text_generation/single_turn_multi_image_example.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_text_multimodal() -> str: - # [START generativeaionvertexai_gemini_single_turn_multi_image] - import vertexai - - from vertexai.generative_models import GenerativeModel, Part - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - # Load images from Cloud Storage URI - image_file1 = Part.from_uri( - "gs://cloud-samples-data/vertex-ai/llm/prompts/landmark1.png", - mime_type="image/png", - ) - image_file2 = Part.from_uri( - "gs://cloud-samples-data/vertex-ai/llm/prompts/landmark2.png", - mime_type="image/png", - ) - image_file3 = Part.from_uri( - "gs://cloud-samples-data/vertex-ai/llm/prompts/landmark3.png", - mime_type="image/png", - ) - - model = GenerativeModel("gemini-2.0-flash-001") - response = model.generate_content( - [ - image_file1, - "city: Rome, Landmark: the Colosseum", - image_file2, - "city: Beijing, Landmark: Forbidden City", - image_file3, - ] - ) - print(response.text) - # Example response: - # city: Rio de Janeiro, Landmark: Christ the Redeemer - - # [END generativeaionvertexai_gemini_single_turn_multi_image] - return response.text - - -if __name__ == "__main__": - generate_text_multimodal() diff --git a/generative_ai/text_generation/test_text_examples.py b/generative_ai/text_generation/test_text_examples.py deleted file mode 100644 index 75d95ca228..0000000000 --- a/generative_ai/text_generation/test_text_examples.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import backoff - -from google.api_core.exceptions import ResourceExhausted - -import text_example01 -import text_example02 -import text_example03 -import text_stream_example01 -import text_stream_example02 - - -def test_non_stream_text_basic() -> None: - response = text_example03.generate_content() - assert response - - -def test_gemini_text_input_example() -> None: - text = text_example01.generate_from_text_input() - assert len(text) > 0 - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_interview() -> None: - content = text_example02.interview() - # check if response is empty - assert len(content) > 0 - - -def test_stream_text_basic() -> None: - responses = text_stream_example01.generate_content() - assert responses - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_streaming_prediction() -> None: - responses = text_stream_example02.streaming_prediction() - print(responses) - assert "1." in responses - assert "?" in responses - assert "you" in responses - assert "do" in responses diff --git a/generative_ai/text_generation/test_text_generation_examples.py b/generative_ai/text_generation/test_text_generation_examples.py deleted file mode 100644 index 55c20af878..0000000000 --- a/generative_ai/text_generation/test_text_generation_examples.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -import backoff - -from google.api_core.exceptions import ResourceExhausted - -import chat_code_example -import chat_multiturn_example -import chat_multiturn_stream_example -import chat_simple_example -import code_completion_example -import codegen_example -import gemini_describe_http_image_example -import gemini_describe_http_pdf_example -import gemini_translate_text -import generation_config_example -import multimodal_stream_example -import single_turn_multi_image_example - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_code_chat() -> None: - content = chat_code_example.write_a_function().text - assert len(content) > 0 - - -def test_gemini_describe_http_image_example() -> None: - text = gemini_describe_http_image_example.generate_content() - assert len(text) > 0 - - -def test_gemini_describe_http_pdf_example() -> None: - text = gemini_describe_http_pdf_example.generate_content() - assert len(text) > 0 - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_code_completion_comment() -> None: - content = code_completion_example.complete_code_function().text - assert len(content) > 0 - - -def test_stream_multi_modality_basic_example() -> None: - responses = multimodal_stream_example.generate_content() - assert responses - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_code_generation_function() -> None: - content = codegen_example.generate_a_function().text - print(content) - assert "year" in content - assert "return" in content - - -def test_gemini_multi_image_example() -> None: - text = single_turn_multi_image_example.generate_text_multimodal() - text = text.lower() - assert len(text) > 0 - assert "city" in text - assert "landmark" in text - - -def test_gemini_pro_config_example() -> None: - import urllib.request - - # Download the image - fname = "scones.jpg" - url = "https://storage.googleapis.com/generativeai-downloads/images/scones.jpg" - urllib.request.urlretrieve(url, fname) - - if os.path.isfile(fname): - text = generation_config_example.generate_text() - text = text.lower() - assert len(text) > 0 - - # clean-up - os.remove(fname) - else: - raise Exception("File(scones.jpg) not found!") - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_chat_example() -> None: - response = chat_simple_example.send_chat() - assert len(response) > 0 - - -def test_gemini_chat_example() -> None: - text = chat_multiturn_example.chat_text_example() - text = text.lower() - assert len(text) > 0 - assert any([_ in text for _ in ("hi", "hello", "greeting")]) - - text = chat_multiturn_stream_example.chat_stream_example() - text = text.lower() - assert len(text) > 0 - assert any([_ in text for _ in ("hi", "hello", "greeting")]) - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_translate_text_gemini() -> None: - response = gemini_translate_text.generate_translation - assert response diff --git a/generative_ai/text_generation/text_example01.py b/generative_ai/text_generation/text_example01.py deleted file mode 100644 index 744ec4ee1e..0000000000 --- a/generative_ai/text_generation/text_example01.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_from_text_input() -> str: - # [START generativeaionvertexai_gemini_generate_from_text_input] - import vertexai - from vertexai.generative_models import GenerativeModel - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-2.0-flash-001") - - response = model.generate_content( - "What's a good name for a flower shop that specializes in selling bouquets of dried flowers?" - ) - - print(response.text) - # Example response: - # **Emphasizing the Dried Aspect:** - # * Everlasting Blooms - # * Dried & Delightful - # * The Petal Preserve - # ... - - # [END generativeaionvertexai_gemini_generate_from_text_input] - return response.text - - -if __name__ == "__main__": - generate_from_text_input() diff --git a/generative_ai/text_generation/text_example02.py b/generative_ai/text_generation/text_example02.py deleted file mode 100644 index 45067ce1a4..0000000000 --- a/generative_ai/text_generation/text_example02.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def interview() -> str: - """Ideation example with a Large Language Model""" - # [START generativeaionvertexai_sdk_ideation] - import vertexai - - from vertexai.language_models import TextGenerationModel - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - parameters = { - "temperature": 0.2, # Temperature controls the degree of randomness in token selection. - "max_output_tokens": 256, # Token limit determines the maximum amount of text output. - "top_p": 0.8, # Tokens are selected from most probable to least until the sum of their probabilities equals the top_p value. - "top_k": 40, # A top_k of 1 means the selected token is the most probable among all tokens. - } - - model = TextGenerationModel.from_pretrained("text-bison@002") - response = model.predict( - "Give me ten interview questions for the role of program manager.", - **parameters, - ) - print(f"Response from Model: {response.text}") - # Example response: - # Response from Model: 1. **Tell me about your experience managing programs.** - # 2. **What are your strengths and weaknesses as a program manager?** - # 3. **What do you think are the most important qualities for a successful program manager?** - # ... - - # [END generativeaionvertexai_sdk_ideation] - return response.text - - -if __name__ == "__main__": - interview() diff --git a/generative_ai/text_generation/text_example03.py b/generative_ai/text_generation/text_example03.py deleted file mode 100644 index 72c35ac6e2..0000000000 --- a/generative_ai/text_generation/text_example03.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_content() -> object: - # [START generativeaionvertexai_non_stream_text_basic] - import vertexai - - from vertexai.generative_models import GenerativeModel - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-2.0-flash-001") - response = model.generate_content("Write a story about a magic backpack.") - - print(response.text) - # Example response: - # Elara found the backpack nestled amongst the dusty relics in her grandmother's attic. - # It wasn't particularly flashy; a worn canvas, the colour of faded moss, - # with tarnished brass buckles. But it hummed with a faint, ... - # ... - - # [END generativeaionvertexai_non_stream_text_basic] - return response - - -if __name__ == "__main__": - generate_content() diff --git a/generative_ai/text_generation/text_stream_example01.py b/generative_ai/text_generation/text_stream_example01.py deleted file mode 100644 index da9cde5a39..0000000000 --- a/generative_ai/text_generation/text_stream_example01.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_content() -> object: - # [START generativeaionvertexai_stream_text_basic] - import vertexai - - from vertexai.generative_models import GenerativeModel - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-2.0-flash-001") - responses = model.generate_content( - "Write a story about a magic backpack.", stream=True - ) - - for response in responses: - print(response.text) - # Example response: - # El - # ara wasn't looking for magic. She was looking for rent money. - # Her tiny apartment, perched precariously on the edge of Whispering Woods, - # ... - - # [END generativeaionvertexai_stream_text_basic] - return responses - - -if __name__ == "__main__": - generate_content() diff --git a/generative_ai/text_generation/text_stream_example02.py b/generative_ai/text_generation/text_stream_example02.py deleted file mode 100644 index 4f9f35817d..0000000000 --- a/generative_ai/text_generation/text_stream_example02.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def streaming_prediction() -> str: - """Streaming Text Example with a Large Language Model.""" - # [START generativeaionvertexai_streaming_text] - import vertexai - from vertexai import language_models - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - text_generation_model = language_models.TextGenerationModel.from_pretrained( - "text-bison" - ) - parameters = { - # Temperature controls the degree of randomness in token selection. - "temperature": 0.2, - # Token limit determines the maximum amount of text output. - "max_output_tokens": 256, - # Tokens are selected from most probable to least until the - # sum of their probabilities equals the top_p value. - "top_p": 0.8, - # A top_k of 1 means the selected token is the most probable among - # all tokens. - "top_k": 40, - } - - responses = text_generation_model.predict_streaming( - prompt="Give me ten interview questions for the role of program manager.", - **parameters, - ) - - results = [] - for response in responses: - print(response) - results.append(str(response)) - results = "\n".join(results) - print(results) - # Example response: - # 1. **Tell me about your experience as a program manager.** - # 2. **What are your strengths and weaknesses as a program manager?** - # 3. **What do you think are the most important qualities for a successful program manager?** - # 4. **How do you manage - # ... - - # [END generativeaionvertexai_streaming_text] - return results - - -if __name__ == "__main__": - streaming_prediction() diff --git a/generative_ai/text_models/classify_news_items.py b/generative_ai/text_models/classify_news_items.py deleted file mode 100644 index 87b149b3d0..0000000000 --- a/generative_ai/text_models/classify_news_items.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def classify_news_items() -> str: - """Text Classification Example with a Large Language Model""" - # [START generativeaionvertexai_classification] - from vertexai.language_models import TextGenerationModel - - model = TextGenerationModel.from_pretrained("text-bison@002") - - parameters = { - "temperature": 0.2, - "max_output_tokens": 5, - "top_p": 0, - "top_k": 1, - } - - response = model.predict( - """What is the topic for a given news headline? -- business -- entertainment -- health -- sports -- technology - -Text: Pixel 7 Pro Expert Hands On Review, the Most Helpful Google Phones. -The answer is: technology - -Text: Quit smoking? -The answer is: health - -Text: Roger Federer reveals why he touched Rafael Nadals hand while they were crying -The answer is: sports - -Text: Business relief from Arizona minimum-wage hike looking more remote -The answer is: business - -Text: #TomCruise has arrived in Bari, Italy for #MissionImpossible. -The answer is: entertainment - -Text: CNBC Reports Rising Digital Profit as Print Advertising Falls -The answer is: -""", - **parameters, - ) - - print(response.text) - # Example response: - # business - # [END generativeaionvertexai_classification] - - return response.text - - -if __name__ == "__main__": - classify_news_items() diff --git a/generative_ai/text_models/classify_news_items_test.py b/generative_ai/text_models/classify_news_items_test.py deleted file mode 100644 index 23ff8e8d6c..0000000000 --- a/generative_ai/text_models/classify_news_items_test.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import backoff -from google.api_core.exceptions import ResourceExhausted - -import classify_news_items - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_classify_news_items() -> None: - content = classify_news_items.classify_news_items() - assert len(content) > 0 diff --git a/generative_ai/text_models/code_completion_test_function.py b/generative_ai/text_models/code_completion_test_function.py deleted file mode 100644 index 18d6e83f97..0000000000 --- a/generative_ai/text_models/code_completion_test_function.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def complete_test_function() -> object: - """Example of using Codey for Code Completion to complete a test function.""" - # [START aiplatform_sdk_code_completion_test_function] - from vertexai.language_models import CodeGenerationModel - - parameters = { - "temperature": 0.2, # Temperature controls the degree of randomness in token selection. - "max_output_tokens": 64, # Token limit determines the maximum amount of text output. - } - - code_completion_model = CodeGenerationModel.from_pretrained("code-gecko@001") - response = code_completion_model.predict( - prefix="""def reverse_string(s): - return s[::-1] - def test_empty_input_string()""", - **parameters, - ) - - print(f"Response from Model: {response.text}") - # [END aiplatform_sdk_code_completion_test_function] - - return response - - -if __name__ == "__main__": - complete_test_function() diff --git a/generative_ai/text_models/code_completion_test_function_test.py b/generative_ai/text_models/code_completion_test_function_test.py deleted file mode 100644 index d0ccfdb18e..0000000000 --- a/generative_ai/text_models/code_completion_test_function_test.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import backoff -from google.api_core.exceptions import ResourceExhausted - -import code_completion_test_function - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_code_completion_test_function() -> None: - content = code_completion_test_function.complete_test_function().text - # every function def ends with `:` - assert content.startswith(":") - # test functions use `assert` for validations - assert "assert" in content - # test function should `reverse_string` at-least once - assert "reverse_string" in content diff --git a/generative_ai/text_models/code_generation_unittest.py b/generative_ai/text_models/code_generation_unittest.py deleted file mode 100644 index 10545d16a7..0000000000 --- a/generative_ai/text_models/code_generation_unittest.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def generate_unittest() -> object: - """Example of using Codey for Code Generation to write a unit test.""" - # [START aiplatform_sdk_code_generation_unittest] - import textwrap - - from vertexai.language_models import CodeGenerationModel - - # TODO developer - override these parameters as needed: - parameters = { - "temperature": 0.5, # Temperature controls the degree of randomness in token selection. - "max_output_tokens": 256, # Token limit determines the maximum amount of text output. - } - - code_generation_model = CodeGenerationModel.from_pretrained("code-bison@001") - response = code_generation_model.predict( - prefix=textwrap.dedent( - """\ - Write a unit test for this function: - def is_leap_year(year): - if year % 4 == 0: - if year % 100 == 0: - if year % 400 == 0: - return True - else: - return False - else: - return True - else: - return False - """ - ), - **parameters, - ) - - print(f"Response from Model: {response.text}") - # [END aiplatform_sdk_code_generation_unittest] - - return response - - -if __name__ == "__main__": - generate_unittest() diff --git a/generative_ai/text_models/code_generation_unittest_test.py b/generative_ai/text_models/code_generation_unittest_test.py deleted file mode 100644 index e20754cfef..0000000000 --- a/generative_ai/text_models/code_generation_unittest_test.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import backoff -from google.api_core.exceptions import ResourceExhausted - -import code_generation_unittest - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_code_generation_unittest() -> None: - content = code_generation_unittest.generate_unittest().text - assert content diff --git a/generative_ai/text_models/extraction.py b/generative_ai/text_models/extraction.py deleted file mode 100644 index d104268cf2..0000000000 --- a/generative_ai/text_models/extraction.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def extractive_question_answering() -> str: - """Extractive Question Answering with a Large Language Model.""" - # [START aiplatform_sdk_extraction] - import vertexai - from vertexai.language_models import TextGenerationModel - - # TODO (developer): update project_id - vertexai.init(project=PROJECT_ID, location="us-central1") - parameters = { - "temperature": 0, # Temperature controls the degree of randomness in token selection. - "max_output_tokens": 256, # Token limit determines the maximum amount of text output. - "top_p": 0, # Tokens are selected from most probable to least until the sum of their probabilities equals the top_p value. - "top_k": 1, # A top_k of 1 means the selected token is the most probable among all tokens. - } - - model = TextGenerationModel.from_pretrained("text-bison@002") - response = model.predict( - prompt="""Background: There is evidence that there have been significant changes \ -in Amazon rainforest vegetation over the last 21,000 years through the Last \ -Glacial Maximum (LGM) and subsequent deglaciation. Analyses of sediment \ -deposits from Amazon basin paleo lakes and from the Amazon Fan indicate that \ -rainfall in the basin during the LGM was lower than for the present, and this \ -was almost certainly associated with reduced moist tropical vegetation cover \ -in the basin. There is debate, however, over how extensive this reduction \ -was. Some scientists argue that the rainforest was reduced to small, isolated \ -refugia separated by open forest and grassland; other scientists argue that \ -the rainforest remained largely intact but extended less far to the north, \ -south, and east than is seen today. This debate has proved difficult to \ -resolve because the practical limitations of working in the rainforest mean \ -that data sampling is biased away from the center of the Amazon basin, and \ -both explanations are reasonably well supported by the available data. - -Q: What does LGM stands for? -A: Last Glacial Maximum. - -Q: What did the analysis from the sediment deposits indicate? -A: Rainfall in the basin during the LGM was lower than for the present. - -Q: What are some of scientists arguments? -A: The rainforest was reduced to small, isolated refugia separated by open forest and grassland. - -Q: There have been major changes in Amazon rainforest vegetation over the last how many years? -A: 21,000. - -Q: What caused changes in the Amazon rainforest vegetation? -A: The Last Glacial Maximum (LGM) and subsequent deglaciation - -Q: What has been analyzed to compare Amazon rainfall in the past and present? -A: Sediment deposits. - -Q: What has the lower rainfall in the Amazon during the LGM been attributed to? -A:""", - **parameters, - ) - print(f"Response from Model: {response.text}") - - # [END aiplatform_sdk_extraction] - return response.text - - -if __name__ == "__main__": - extractive_question_answering() diff --git a/generative_ai/text_models/extraction_test.py b/generative_ai/text_models/extraction_test.py deleted file mode 100644 index a0f9f51782..0000000000 --- a/generative_ai/text_models/extraction_test.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import backoff -from google.api_core.exceptions import ResourceExhausted - -import extraction - - -_PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") -_LOCATION = "us-central1" - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_extractive_question_answering() -> None: - content = extraction.extractive_question_answering() - assert content.strip() == "Reduced moist tropical vegetation cover in the basin." diff --git a/generative_ai/text_models/list_tuned_code_generation_models.py b/generative_ai/text_models/list_tuned_code_generation_models.py deleted file mode 100644 index c28723015a..0000000000 --- a/generative_ai/text_models/list_tuned_code_generation_models.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def list_tuned_code_generation_models() -> None: - """List tuned models.""" - # [START aiplatform_sdk_list_tuned_code_generation_models] - - import vertexai - from vertexai.preview.language_models import CodeGenerationModel - - # TODO(developer): Update project_id - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - model = CodeGenerationModel.from_pretrained("code-bison@001") - tuned_model_names = model.list_tuned_model_names() - print(tuned_model_names) - # [END aiplatform_sdk_list_tuned_code_generation_models] - - return tuned_model_names - - -if __name__ == "__main__": - list_tuned_code_generation_models() diff --git a/generative_ai/text_models/list_tuned_code_generation_models_test.py b/generative_ai/text_models/list_tuned_code_generation_models_test.py deleted file mode 100644 index 5f2eb5f23f..0000000000 --- a/generative_ai/text_models/list_tuned_code_generation_models_test.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import backoff -from google.api_core.exceptions import ResourceExhausted -from google.cloud import aiplatform - -import list_tuned_code_generation_models - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_list_tuned_code_generation_models() -> None: - tuned_model_names = ( - list_tuned_code_generation_models.list_tuned_code_generation_models() - ) - filtered_models_counter = 0 - for tuned_model_name in tuned_model_names: - model_registry = aiplatform.models.ModelRegistry(model=tuned_model_name) - if ( - "Vertex LLM Test Fixture " - "(list_tuned_models_test.py::test_list_tuned_models)" - ) in model_registry.get_version_info("1").model_display_name: - filtered_models_counter += 1 - assert filtered_models_counter == 0 diff --git a/generative_ai/text_models/noxfile_config.py b/generative_ai/text_models/noxfile_config.py deleted file mode 100644 index 962ba40a92..0000000000 --- a/generative_ai/text_models/noxfile_config.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default TEST_CONFIG_OVERRIDE for python repos. - -# You can copy this file into your directory, then it will be imported from -# the noxfile.py. - -# The source of truth: -# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py - -TEST_CONFIG_OVERRIDE = { - # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.13"], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": True, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} diff --git a/generative_ai/text_models/requirements-test.txt b/generative_ai/text_models/requirements-test.txt deleted file mode 100644 index 92281986e5..0000000000 --- a/generative_ai/text_models/requirements-test.txt +++ /dev/null @@ -1,4 +0,0 @@ -backoff==2.2.1 -google-api-core==2.19.0 -pytest==8.2.0 -pytest-asyncio==0.23.6 diff --git a/generative_ai/text_models/requirements.txt b/generative_ai/text_models/requirements.txt deleted file mode 100644 index 09178aa830..0000000000 --- a/generative_ai/text_models/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -pandas==2.2.3; python_version == '3.7' -pandas==2.2.3; python_version == '3.8' -pandas==2.2.3; python_version > '3.8' -pillow==10.3.0; python_version < '3.8' -pillow==10.3.0; python_version >= '3.8' -google-cloud-aiplatform[all]==1.69.0 -sentencepiece==0.2.0 -google-auth==2.38.0 -anthropic[vertex]==0.28.0 -langchain-core==0.2.33 -langchain-google-vertexai==1.0.10 -numpy<3 -openai==1.68.2 -immutabledict==4.2.0 diff --git a/generative_ai/text_models/sentiment_analysis.py b/generative_ai/text_models/sentiment_analysis.py deleted file mode 100644 index ca7cf8f9da..0000000000 --- a/generative_ai/text_models/sentiment_analysis.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def sentiment_analysis() -> str: - """Sentiment analysis example with a Large Language Model.""" - # [START aiplatform_sdk_sentiment_analysis] - import vertexai - - from vertexai.language_models import TextGenerationModel - - # TODO(developer): update project_id, location & temperature - vertexai.init(project=PROJECT_ID, location="us-central1") - parameters = { - "temperature": 0, # Temperature controls the degree of randomness in token selection. - "max_output_tokens": 5, # Token limit determines the maximum amount of text output. - "top_p": 0, # Tokens are selected from most probable to least until the sum of their probabilities equals the top_p value. - "top_k": 1, # A top_k of 1 means the selected token is the most probable among all tokens. - } - - model = TextGenerationModel.from_pretrained("text-bison@002") - response = model.predict( - """I had to compare two versions of Hamlet for my Shakespeare class and \ -unfortunately I picked this version. Everything from the acting (the actors \ -deliver most of their lines directly to the camera) to the camera shots (all \ -medium or close up shots...no scenery shots and very little back ground in the \ -shots) were absolutely terrible. I watched this over my spring break and it is \ -very safe to say that I feel that I was gypped out of 114 minutes of my \ -vacation. Not recommended by any stretch of the imagination. -Classify the sentiment of the message: negative - -Something surprised me about this movie - it was actually original. It was not \ -the same old recycled crap that comes out of Hollywood every month. I saw this \ -movie on video because I did not even know about it before I saw it at my \ -local video store. If you see this movie available - rent it - you will not \ -regret it. -Classify the sentiment of the message: positive - -My family has watched Arthur Bach stumble and stammer since the movie first \ -came out. We have most lines memorized. I watched it two weeks ago and still \ -get tickled at the simple humor and view-at-life that Dudley Moore portrays. \ -Liza Minelli did a wonderful job as the side kick - though I\'m not her \ -biggest fan. This movie makes me just enjoy watching movies. My favorite scene \ -is when Arthur is visiting his fiancée\'s house. His conversation with the \ -butler and Susan\'s father is side-spitting. The line from the butler, \ -"Would you care to wait in the Library" followed by Arthur\'s reply, \ -"Yes I would, the bathroom is out of the question", is my NEWMAIL \ -notification on my computer. -Classify the sentiment of the message: positive - -This Charles outing is decent but this is a pretty low-key performance. Marlon \ -Brando stands out. There\'s a subplot with Mira Sorvino and Donald Sutherland \ -that forgets to develop and it hurts the film a little. I\'m still trying to \ -figure out why Charlie want to change his name. -Classify the sentiment of the message: negative - -Tweet: The Pixel 7 Pro, is too big to fit in my jeans pocket, so I bought \ -new jeans. -Classify the sentiment of the message: """, - **parameters, - ) - print(f"Response from Model: {response.text}") - # [END aiplatform_sdk_sentiment_analysis] - - return response.text - - -if __name__ == "__main__": - sentiment_analysis() diff --git a/generative_ai/text_models/sentiment_analysis_test.py b/generative_ai/text_models/sentiment_analysis_test.py deleted file mode 100644 index 16c6f086dd..0000000000 --- a/generative_ai/text_models/sentiment_analysis_test.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import backoff -from google.api_core.exceptions import ResourceExhausted - -import sentiment_analysis - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_sentiment_analysis() -> None: - content = sentiment_analysis.sentiment_analysis() - assert content is not None diff --git a/generative_ai/text_models/streaming_chat.py b/generative_ai/text_models/streaming_chat.py deleted file mode 100644 index 665ae177f9..0000000000 --- a/generative_ai/text_models/streaming_chat.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def streaming_prediction() -> str: - """Streaming Chat Example with a Large Language Model.""" - # [START aiplatform_streaming_chat] - import vertexai - - from vertexai import language_models - - # TODO(developer): update project_id & location - vertexai.init(project=PROJECT_ID, location="us-central1") - - chat_model = language_models.ChatModel.from_pretrained("chat-bison") - - parameters = { - # Temperature controls the degree of randomness in token selection. - "temperature": 0.8, - # Token limit determines the maximum amount of text output. - "max_output_tokens": 256, - # Tokens are selected from most probable to least until the - # sum of their probabilities equals the top_p value. - "top_p": 0.95, - # A top_k of 1 means the selected token is the most probable among - # all tokens. - "top_k": 40, - } - - chat = chat_model.start_chat( - context="My name is Miles. You are an astronomer, knowledgeable about the solar system.", - examples=[ - language_models.InputOutputTextPair( - input_text="How many moons does Mars have?", - output_text="The planet Mars has two moons, Phobos and Deimos.", - ), - ], - ) - - responses = chat.send_message_streaming( - message="How many planets are there in the solar system?", - **parameters, - ) - - results = [] - for response in responses: - print(response) - results.append(str(response)) - results = "".join(results) - print(results) - # [END aiplatform_streaming_chat] - return results - - -if __name__ == "__main__": - streaming_prediction() diff --git a/generative_ai/text_models/streaming_chat_test.py b/generative_ai/text_models/streaming_chat_test.py deleted file mode 100644 index c5a47271b2..0000000000 --- a/generative_ai/text_models/streaming_chat_test.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import backoff -from google.api_core.exceptions import ResourceExhausted - -import streaming_chat - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_streaming_prediction() -> None: - responses = streaming_chat.streaming_prediction() - assert "Earth" in responses diff --git a/generative_ai/text_models/streaming_code.py b/generative_ai/text_models/streaming_code.py deleted file mode 100644 index 0af43ed101..0000000000 --- a/generative_ai/text_models/streaming_code.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def streaming_prediction() -> str: - """Streaming Code Example with a Large Language Model.""" - # [START aiplatform_streaming_code] - import vertexai - from vertexai import language_models - - # TODO(developer): update project_id & location - vertexai.init(project=PROJECT_ID, location="us-central1") - - code_generation_model = language_models.CodeGenerationModel.from_pretrained( - "code-bison" - ) - parameters = { - # Temperature controls the degree of randomness in token selection. - "temperature": 0.8, - # Token limit determines the maximum amount of text output. - "max_output_tokens": 256, - } - - responses = code_generation_model.predict_streaming( - prefix="Write a function that checks if a year is a leap year.", - **parameters, - ) - - results = [] - for response in responses: - print(response) - results.append(str(response)) - results = "\n".join(results) - return results - - -# [END aiplatform_streaming_code] -if __name__ == "__main__": - streaming_prediction() diff --git a/generative_ai/text_models/streaming_code_test.py b/generative_ai/text_models/streaming_code_test.py deleted file mode 100644 index 2940e52168..0000000000 --- a/generative_ai/text_models/streaming_code_test.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import backoff -from google.api_core.exceptions import ResourceExhausted - -import streaming_code - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_streaming_prediction() -> None: - responses = streaming_code.streaming_prediction() - assert "year" in responses diff --git a/generative_ai/text_models/streaming_codechat.py b/generative_ai/text_models/streaming_codechat.py deleted file mode 100644 index 9c7c9c08d3..0000000000 --- a/generative_ai/text_models/streaming_codechat.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def streaming_prediction() -> str: - """Streaming Code Chat Example with a Large Language Model.""" - # [START aiplatform_streaming_codechat] - import vertexai - from vertexai import language_models - - # TODO(developer): update project_id & location - vertexai.init(project=PROJECT_ID, location="us-central1") - - codechat_model = language_models.CodeChatModel.from_pretrained("codechat-bison") - parameters = { - # Temperature controls the degree of randomness in token selection. - "temperature": 0.8, - # Token limit determines the maximum amount of text output. - "max_output_tokens": 1024, - } - codechat = codechat_model.start_chat() - - responses = codechat.send_message_streaming( - message="Please help write a function to calculate the min of two numbers", - **parameters, - ) - - results = [] - for response in responses: - print(response) - results.append(str(response)) - results = "\n".join(results) - print(results) - # [END aiplatform_streaming_codechat] - return results - - -if __name__ == "__main__": - streaming_prediction() diff --git a/generative_ai/text_models/streaming_codechat_test.py b/generative_ai/text_models/streaming_codechat_test.py deleted file mode 100644 index e51c084277..0000000000 --- a/generative_ai/text_models/streaming_codechat_test.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import backoff -from google.api_core.exceptions import ResourceExhausted - -import streaming_codechat - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_streaming_prediction() -> None: - responses = streaming_codechat.streaming_prediction() - assert "def" in responses diff --git a/generative_ai/text_models/summarization.py b/generative_ai/text_models/summarization.py deleted file mode 100644 index 4ad06e2edd..0000000000 --- a/generative_ai/text_models/summarization.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def text_summarization() -> str: - """Summarization Example with a Large Language Model""" - # [START aiplatform_sdk_summarization] - import vertexai - from vertexai.language_models import TextGenerationModel - - # TODO(developer): update project_id & location - vertexai.init(project=PROJECT_ID, location="us-central1") - - parameters = { - "temperature": 0, - "max_output_tokens": 256, - "top_p": 0.95, - "top_k": 40, - } - - model = TextGenerationModel.from_pretrained("text-bison@002") - response = model.predict( - """Provide a summary with about two sentences for the following article: - The efficient-market hypothesis (EMH) is a hypothesis in financial \ - economics that states that asset prices reflect all available \ - information. A direct implication is that it is impossible to \ - "beat the market" consistently on a risk-adjusted basis since market \ - prices should only react to new information. Because the EMH is \ - formulated in terms of risk adjustment, it only makes testable \ - predictions when coupled with a particular model of risk. As a \ - result, research in financial economics since at least the 1990s has \ - focused on market anomalies, that is, deviations from specific \ - models of risk. The idea that financial market returns are difficult \ - to predict goes back to Bachelier, Mandelbrot, and Samuelson, but \ - is closely associated with Eugene Fama, in part due to his \ - influential 1970 review of the theoretical and empirical research. \ - The EMH provides the basic logic for modern risk-based theories of \ - asset prices, and frameworks such as consumption-based asset pricing \ - and intermediary asset pricing can be thought of as the combination \ - of a model of risk with the EMH. Many decades of empirical research \ - on return predictability has found mixed evidence. Research in the \ - 1950s and 1960s often found a lack of predictability (e.g. Ball and \ - Brown 1968; Fama, Fisher, Jensen, and Roll 1969), yet the \ - 1980s-2000s saw an explosion of discovered return predictors (e.g. \ - Rosenberg, Reid, and Lanstein 1985; Campbell and Shiller 1988; \ - Jegadeesh and Titman 1993). Since the 2010s, studies have often \ - found that return predictability has become more elusive, as \ - predictability fails to work out-of-sample (Goyal and Welch 2008), \ - or has been weakened by advances in trading technology and investor \ - learning (Chordia, Subrahmanyam, and Tong 2014; McLean and Pontiff \ - 2016; Martineau 2021). - Summary:""", - **parameters, - ) - print(f"Response from Model: {response.text}") - # [END aiplatform_sdk_summarization] - - return response.text diff --git a/generative_ai/text_models/summarization_test.py b/generative_ai/text_models/summarization_test.py deleted file mode 100644 index 502c6a7e03..0000000000 --- a/generative_ai/text_models/summarization_test.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import backoff -from google.api_core.exceptions import ResourceExhausted - -import summarization - - -expected_response = """The efficient-market hypothesis""" - - -@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10) -def test_text_summarization() -> None: - content = summarization.text_summarization() - assert expected_response in content diff --git a/generative_ai/understand_audio/noxfile_config.py b/generative_ai/understand_audio/noxfile_config.py deleted file mode 100644 index 962ba40a92..0000000000 --- a/generative_ai/understand_audio/noxfile_config.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default TEST_CONFIG_OVERRIDE for python repos. - -# You can copy this file into your directory, then it will be imported from -# the noxfile.py. - -# The source of truth: -# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py - -TEST_CONFIG_OVERRIDE = { - # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.13"], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": True, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} diff --git a/generative_ai/understand_audio/requirements-test.txt b/generative_ai/understand_audio/requirements-test.txt deleted file mode 100644 index 92281986e5..0000000000 --- a/generative_ai/understand_audio/requirements-test.txt +++ /dev/null @@ -1,4 +0,0 @@ -backoff==2.2.1 -google-api-core==2.19.0 -pytest==8.2.0 -pytest-asyncio==0.23.6 diff --git a/generative_ai/understand_audio/requirements.txt b/generative_ai/understand_audio/requirements.txt deleted file mode 100644 index d43443792b..0000000000 --- a/generative_ai/understand_audio/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -pandas==2.2.3; python_version == '3.7' -pandas==2.2.3; python_version == '3.8' -pandas==2.2.3; python_version > '3.8' -pillow==10.3.0; python_version < '3.8' -pillow==10.3.0; python_version >= '3.8' -google-cloud-aiplatform[all]==1.71.1 -sentencepiece==0.2.0 -google-auth==2.38.0 -anthropic[vertex]==0.28.0 -langchain-core==0.2.33 -langchain-google-vertexai==1.0.10 -numpy<3 -openai==1.68.2 -immutabledict==4.2.0 diff --git a/generative_ai/understand_audio/summarization_example.py b/generative_ai/understand_audio/summarization_example.py deleted file mode 100644 index 80b160a181..0000000000 --- a/generative_ai/understand_audio/summarization_example.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def summarize_audio() -> str: - """Summarizes the content of an audio file using a pre-trained generative model.""" - # [START generativeaionvertexai_gemini_audio_summarization] - - import vertexai - from vertexai.generative_models import GenerativeModel, Part - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-2.0-flash-001") - - prompt = """ - Please provide a summary for the audio. - Provide chapter titles, be concise and short, no need to provide chapter summaries. - Do not make up any information that is not part of the audio and do not be verbose. - """ - - audio_file_uri = "gs://cloud-samples-data/generative-ai/audio/pixel.mp3" - audio_file = Part.from_uri(audio_file_uri, mime_type="audio/mpeg") - - contents = [audio_file, prompt] - - response = model.generate_content(contents) - print(response.text) - # Example response: - # **Made By Google Podcast Summary** - # **Chapter Titles:** - # * Introduction - # * Transformative Pixel Features - # ... - - # [END generativeaionvertexai_gemini_audio_summarization] - return response.text - - -if __name__ == "__main__": - summarize_audio() diff --git a/generative_ai/understand_audio/transcription_example.py b/generative_ai/understand_audio/transcription_example.py deleted file mode 100644 index e7e659f257..0000000000 --- a/generative_ai/understand_audio/transcription_example.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def transcript_audio() -> str: - """Transcribes the content of an audio file using a pre-trained generative model.""" - # [START generativeaionvertexai_gemini_audio_transcription] - - import vertexai - from vertexai.generative_models import GenerativeModel, GenerationConfig, Part - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-2.0-flash-001") - - prompt = """ - Can you transcribe this interview, in the format of timecode, speaker, caption. - Use speaker A, speaker B, etc. to identify speakers. - """ - - audio_file_uri = "gs://cloud-samples-data/generative-ai/audio/pixel.mp3" - audio_file = Part.from_uri(audio_file_uri, mime_type="audio/mpeg") - - contents = [audio_file, prompt] - - response = model.generate_content(contents, generation_config=GenerationConfig(audio_timestamp=True)) - - print(response.text) - # Example response: - # [00:00:00] Speaker A: Your devices are getting better over time... - # [00:00:16] Speaker B: Welcome to the Made by Google podcast, ... - # [00:01:00] Speaker A: So many features. I am a singer. ... - # [00:01:33] Speaker B: Amazing. DeCarlos, same question to you, ... - - # [END generativeaionvertexai_gemini_audio_transcription] - return response.text - - -if __name__ == "__main__": - transcript_audio() diff --git a/generative_ai/understand_audio/understand_audio_test.py b/generative_ai/understand_audio/understand_audio_test.py deleted file mode 100644 index 64b986feb0..0000000000 --- a/generative_ai/understand_audio/understand_audio_test.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import summarization_example -import transcription_example - - -def test_summarize_audio() -> None: - text = summarization_example.summarize_audio() - assert len(text) > 0 - - -def test_transcript_audio() -> None: - text = transcription_example.transcript_audio() - assert len(text) > 0 diff --git a/generative_ai/understand_docs/noxfile_config.py b/generative_ai/understand_docs/noxfile_config.py deleted file mode 100644 index 962ba40a92..0000000000 --- a/generative_ai/understand_docs/noxfile_config.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default TEST_CONFIG_OVERRIDE for python repos. - -# You can copy this file into your directory, then it will be imported from -# the noxfile.py. - -# The source of truth: -# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py - -TEST_CONFIG_OVERRIDE = { - # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.13"], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": True, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} diff --git a/generative_ai/understand_docs/pdf_example.py b/generative_ai/understand_docs/pdf_example.py deleted file mode 100644 index ac568c6512..0000000000 --- a/generative_ai/understand_docs/pdf_example.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def analyze_pdf() -> str: - # [START generativeaionvertexai_gemini_pdf] - import vertexai - - from vertexai.generative_models import GenerativeModel, Part - - # TODO(developer): Update project_id and location - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-2.0-flash-001") - - prompt = """ - You are a very professional document summarization specialist. - Please summarize the given document. - """ - - pdf_file = Part.from_uri( - uri="gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf", - mime_type="application/pdf", - ) - contents = [pdf_file, prompt] - - response = model.generate_content(contents) - print(response.text) - # Example response: - # Here's a summary of the provided text, which appears to be a research paper on the Gemini 1.5 Pro - # multimodal large language model: - # **Gemini 1.5 Pro: Key Advancements and Capabilities** - # The paper introduces Gemini 1.5 Pro, a highly compute-efficient multimodal model - # significantly advancing long-context capabilities - # ... - - # [END generativeaionvertexai_gemini_pdf] - return response.text - - -if __name__ == "__main__": - analyze_pdf() diff --git a/generative_ai/understand_docs/pdf_example_test.py b/generative_ai/understand_docs/pdf_example_test.py deleted file mode 100644 index db93aa4926..0000000000 --- a/generative_ai/understand_docs/pdf_example_test.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pdf_example - - -def test_gemini_pdf_example() -> None: - text = pdf_example.analyze_pdf() - assert len(text) > 0 diff --git a/generative_ai/understand_docs/requirements-test.txt b/generative_ai/understand_docs/requirements-test.txt deleted file mode 100644 index 92281986e5..0000000000 --- a/generative_ai/understand_docs/requirements-test.txt +++ /dev/null @@ -1,4 +0,0 @@ -backoff==2.2.1 -google-api-core==2.19.0 -pytest==8.2.0 -pytest-asyncio==0.23.6 diff --git a/generative_ai/understand_docs/requirements.txt b/generative_ai/understand_docs/requirements.txt deleted file mode 100644 index 6dd2c0972e..0000000000 --- a/generative_ai/understand_docs/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -pandas==2.2.3; python_version == '3.7' -pandas==2.2.3; python_version == '3.8' -pandas==2.2.3; python_version > '3.8' -pillow==10.3.0; python_version < '3.8' -pillow==10.3.0; python_version >= '3.8' -google-cloud-aiplatform[all]==1.69.0 -sentencepiece==0.2.0 -google-auth==2.38.0 -anthropic[vertex]==0.28.0 -langchain-core==0.2.43 -langchain-google-vertexai==1.0.10 -numpy<3 -openai==1.68.2 -immutabledict==4.2.0 diff --git a/generative_ai/understand_video/audio_video_example.py b/generative_ai/understand_video/audio_video_example.py deleted file mode 100644 index 838d7f29c5..0000000000 --- a/generative_ai/understand_video/audio_video_example.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def analyze_video_with_audio() -> str: - # [START generativeaionvertexai_gemini_video_with_audio] - - import vertexai - from vertexai.generative_models import GenerativeModel, Part - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-2.0-flash-001") - - prompt = """ - Provide a description of the video. - The description should also contain anything important which people say in the video. - """ - - video_file = Part.from_uri( - uri="gs://cloud-samples-data/generative-ai/video/pixel8.mp4", - mime_type="video/mp4", - ) - - contents = [video_file, prompt] - - response = model.generate_content(contents) - print(response.text) - # Example response: - # Here is a description of the video. - # ... Then, the scene changes to a woman named Saeko Shimada.. - # She says, "Tokyo has many faces. The city at night is totally different - # from what you see during the day." - # ... - - # [END generativeaionvertexai_gemini_video_with_audio] - return response.text - - -if __name__ == "__main__": - analyze_video_with_audio() diff --git a/generative_ai/understand_video/noxfile_config.py b/generative_ai/understand_video/noxfile_config.py deleted file mode 100644 index 962ba40a92..0000000000 --- a/generative_ai/understand_video/noxfile_config.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default TEST_CONFIG_OVERRIDE for python repos. - -# You can copy this file into your directory, then it will be imported from -# the noxfile.py. - -# The source of truth: -# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py - -TEST_CONFIG_OVERRIDE = { - # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.13"], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": True, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} diff --git a/generative_ai/understand_video/requirements-test.txt b/generative_ai/understand_video/requirements-test.txt deleted file mode 100644 index 92281986e5..0000000000 --- a/generative_ai/understand_video/requirements-test.txt +++ /dev/null @@ -1,4 +0,0 @@ -backoff==2.2.1 -google-api-core==2.19.0 -pytest==8.2.0 -pytest-asyncio==0.23.6 diff --git a/generative_ai/understand_video/requirements.txt b/generative_ai/understand_video/requirements.txt deleted file mode 100644 index 09178aa830..0000000000 --- a/generative_ai/understand_video/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -pandas==2.2.3; python_version == '3.7' -pandas==2.2.3; python_version == '3.8' -pandas==2.2.3; python_version > '3.8' -pillow==10.3.0; python_version < '3.8' -pillow==10.3.0; python_version >= '3.8' -google-cloud-aiplatform[all]==1.69.0 -sentencepiece==0.2.0 -google-auth==2.38.0 -anthropic[vertex]==0.28.0 -langchain-core==0.2.33 -langchain-google-vertexai==1.0.10 -numpy<3 -openai==1.68.2 -immutabledict==4.2.0 diff --git a/generative_ai/understand_video/single_turn_video_example.py b/generative_ai/understand_video/single_turn_video_example.py deleted file mode 100644 index 81eec8f5ae..0000000000 --- a/generative_ai/understand_video/single_turn_video_example.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_text() -> str: - # [START generativeaionvertexai_gemini_single_turn_video] - import vertexai - - from vertexai.generative_models import GenerativeModel, Part - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - - vertexai.init(project=PROJECT_ID, location="us-central1") - - vision_model = GenerativeModel("gemini-2.0-flash-001") - - # Generate text - response = vision_model.generate_content( - [ - Part.from_uri( - "gs://cloud-samples-data/video/animals.mp4", mime_type="video/mp4" - ), - "What is in the video?", - ] - ) - print(response.text) - # Example response: - # Here's a summary of the video's content. - # The video shows a series of animals at the Los Angeles Zoo interacting - # with waterproof cameras attached to various devices. - # ... - - # [END generativeaionvertexai_gemini_single_turn_video] - return response.text - - -if __name__ == "__main__": - generate_text() diff --git a/generative_ai/understand_video/understand_video_test.py b/generative_ai/understand_video/understand_video_test.py deleted file mode 100644 index d3cbf1ca63..0000000000 --- a/generative_ai/understand_video/understand_video_test.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import audio_video_example -import single_turn_video_example - - -def test_analyze_video_with_audio() -> None: - text = audio_video_example.analyze_video_with_audio() - assert len(text) > 0 - - -def test_gemini_single_turn_video_example() -> None: - text = single_turn_video_example.generate_text() - text = text.lower() - assert len(text) > 0 - assert any( - [_ in text for _ in ("zoo", "tiger", "leaf", "water", "animals", "photos")] - ) diff --git a/generative_ai/video/gemini_describe_http_video_example.py b/generative_ai/video/gemini_describe_http_video_example.py deleted file mode 100644 index 559ccc4819..0000000000 --- a/generative_ai/video/gemini_describe_http_video_example.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_content() -> str: - # [START generativeaionvertexai_gemini_describe_http_video] - import vertexai - from vertexai.generative_models import GenerativeModel, Part - - # TODO (developer): update project id - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-2.0-flash-001") - - contents = [ - # Text prompt - "Describe this video.", - # Example video ad for Pixel 8 - Part.from_uri( - "https://storage.googleapis.com/cloud-samples-data/generative-ai/video/pixel8.mp4", - "video/mp4", - ), - ] - - response = model.generate_content(contents) - print(response.text) - # Example response: - # 'Here is a description of the video.' - # 'This is a Google Pixel 8 advertisement featuring Saeko Shimada, a photographer' - # ' in Tokyo, Japan. The video opens with a view of a train passing ... ' - # [END generativeaionvertexai_gemini_describe_http_video] - return response.text - - -if __name__ == "__main__": - generate_content() diff --git a/generative_ai/video/gemini_youtube_video_key_moments_example.py b/generative_ai/video/gemini_youtube_video_key_moments_example.py deleted file mode 100644 index dc450de455..0000000000 --- a/generative_ai/video/gemini_youtube_video_key_moments_example.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_content() -> str: - # [START generativeaionvertexai_gemini_youtube_video_key_moments] - import vertexai - from vertexai.generative_models import GenerativeModel, Part - - # TODO (developer): update project id - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-2.0-flash-001") - - contents = [ - # Text prompt - "Identify the key moments of this video.", - # YouTube video of Paris 2024 Olympics - Part.from_uri("https://www.youtube.com/watch?v=6F5gZWcpNU4", "video/mp4"), - ] - - response = model.generate_content(contents) - print(response.text) - # Example response - # This video is a fast-paced, exciting montage of athletes competing in and celebrating their victories in the 2024 Summer Olympics in Paris, France. Key moments include: - # - [00:00:01] The Olympic rings are shown with laser lights and fireworks in the opening ceremonies. - # - [00:00:02–00:00:08] Various shots of the games’ venues are shown, including aerial views of skateboarding and volleyball venues, a view of the track and field stadium, and a shot of the Palace of Versailles. - # - [00:00:09–00:01:16] A fast-paced montage shows highlights from various Olympic competitions. - # - [00:01:17–00:01:29] The video switches to show athletes celebrating victories, both tears of joy and tears of sadness are shown. - # - [00:01:30–00:02:26] The montage then continues to showcase sporting events, including cycling, kayaking, swimming, track and field, gymnastics, surfing, basketball, and ping-pong. - # - [00:02:27–00:04:03] More athletes celebrate their wins. - # - [00:04:04–00:04:55] More Olympic sports are shown, followed by more celebrations. - # - [00:04:56] Olympic medals are shown. - # - [00:04:57] An aerial shot of the Eiffel Tower lit up with the Olympic rings is shown at night. - # - [00:04:58–00:05:05] The video ends with a black screen and the words, “Sport. And More Than Sport.” written beneath the Olympic rings. - # [END generativeaionvertexai_gemini_youtube_video_key_moments] - return response.text - - -if __name__ == "__main__": - generate_content() diff --git a/generative_ai/video/gemini_youtube_video_summarization_example.py b/generative_ai/video/gemini_youtube_video_summarization_example.py deleted file mode 100644 index 5e13253971..0000000000 --- a/generative_ai/video/gemini_youtube_video_summarization_example.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_content() -> str: - # [START generativeaionvertexai_gemini_youtube_video_summarization] - import vertexai - from vertexai.generative_models import GenerativeModel, Part - - # TODO (developer): update project id - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-2.0-flash-001") - - contents = [ - # Text prompt - "Summarize this video.", - # YouTube video of Google Pixel 9 - Part.from_uri("https://youtu.be/MsAPm8TCFhU", "video/mp4"), - ] - - response = model.generate_content(contents) - print(response.text) - # Example response: - # 'This Google Pixel 9 Pro advertisement shows how the Gemini AI feature enhances' - # ' the capabilities of the phone. The video starts with ...' - # [END generativeaionvertexai_gemini_youtube_video_summarization] - return response.text - - -if __name__ == "__main__": - generate_content() diff --git a/generative_ai/video/multimodal_example01.py b/generative_ai/video/multimodal_example01.py deleted file mode 100644 index 2be8c2e59a..0000000000 --- a/generative_ai/video/multimodal_example01.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def analyze_all_modalities() -> str: - # [START generativeaionvertexai_gemini_all_modalities] - - import vertexai - from vertexai.generative_models import GenerativeModel, Part - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-2.0-flash-001") - - video_file_uri = ( - "gs://cloud-samples-data/generative-ai/video/behind_the_scenes_pixel.mp4" - ) - - image_file_uri = "gs://cloud-samples-data/generative-ai/image/a-man-and-a-dog.png" - - prompt = """ - Watch each frame in the video carefully and answer the questions. - Only base your answers strictly on what information is available in the video attached. - Do not make up any information that is not part of the video and do not be too - verbose, be to the point. - - Questions: - - When is the moment in the image happening in the video? Provide a timestamp. - - What is the context of the moment and what does the narrator say about it? - """ - - contents = [ - Part.from_uri(video_file_uri, mime_type="video/mp4"), - Part.from_uri(image_file_uri, mime_type="image/png"), - prompt, - ] - - response = model.generate_content(contents) - print(response.text) - # Example response: - # Here are the answers to your questions. - # - **Timestamp:** 0:48 - # - **Context and Narration:** A man and his dog are sitting on a sofa - # and taking a selfie. The narrator says that the story is about a blind man - # and his girlfriend and follows them on their journey together and growing closer. - - # [END generativeaionvertexai_gemini_all_modalities] - return response.text - - -if __name__ == "__main__": - analyze_all_modalities() diff --git a/generative_ai/video/multimodal_example02.py b/generative_ai/video/multimodal_example02.py deleted file mode 100644 index a36ff1dabe..0000000000 --- a/generative_ai/video/multimodal_example02.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def generate_content() -> object: - # [START generativeaionvertexai_non_stream_multimodality_basic] - import vertexai - - from vertexai.generative_models import GenerativeModel, Part - - # TODO(developer): Update and un-comment below line - # PROJECT_ID = "your-project-id" - - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-2.0-flash-001") - response = model.generate_content( - [ - Part.from_uri( - "gs://cloud-samples-data/generative-ai/video/animals.mp4", "video/mp4" - ), - Part.from_uri( - "gs://cloud-samples-data/generative-ai/image/character.jpg", - "image/jpeg", - ), - "Are these video and image correlated?", - ] - ) - - print(response.text) - # Example response: - # No, the video and image are not correlated. - # The video shows a Google Photos project where animals at the - # Los Angeles Zoo take selfies using a specially designed camera rig. - # The image is a simple drawing of a wizard. - - # [END generativeaionvertexai_non_stream_multimodality_basic] - return response - - -if __name__ == "__main__": - generate_content() diff --git a/generative_ai/video/noxfile_config.py b/generative_ai/video/noxfile_config.py deleted file mode 100644 index 962ba40a92..0000000000 --- a/generative_ai/video/noxfile_config.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default TEST_CONFIG_OVERRIDE for python repos. - -# You can copy this file into your directory, then it will be imported from -# the noxfile.py. - -# The source of truth: -# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py - -TEST_CONFIG_OVERRIDE = { - # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.13"], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": True, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} diff --git a/generative_ai/video/requirements-test.txt b/generative_ai/video/requirements-test.txt deleted file mode 100644 index 92281986e5..0000000000 --- a/generative_ai/video/requirements-test.txt +++ /dev/null @@ -1,4 +0,0 @@ -backoff==2.2.1 -google-api-core==2.19.0 -pytest==8.2.0 -pytest-asyncio==0.23.6 diff --git a/generative_ai/video/requirements.txt b/generative_ai/video/requirements.txt deleted file mode 100644 index 09178aa830..0000000000 --- a/generative_ai/video/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -pandas==2.2.3; python_version == '3.7' -pandas==2.2.3; python_version == '3.8' -pandas==2.2.3; python_version > '3.8' -pillow==10.3.0; python_version < '3.8' -pillow==10.3.0; python_version >= '3.8' -google-cloud-aiplatform[all]==1.69.0 -sentencepiece==0.2.0 -google-auth==2.38.0 -anthropic[vertex]==0.28.0 -langchain-core==0.2.33 -langchain-google-vertexai==1.0.10 -numpy<3 -openai==1.68.2 -immutabledict==4.2.0 diff --git a/generative_ai/video/test_video_examples.py b/generative_ai/video/test_video_examples.py deleted file mode 100644 index f81c52660d..0000000000 --- a/generative_ai/video/test_video_examples.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gemini_describe_http_video_example -import gemini_youtube_video_key_moments_example -import gemini_youtube_video_summarization_example -import multimodal_example01 -import multimodal_example02 - - -def test_gemini_describe_http_video_example() -> None: - text = gemini_describe_http_video_example.generate_content() - assert len(text) > 0 - - -def test_gemini_youtube_video_key_moments_example() -> None: - text = gemini_youtube_video_key_moments_example.generate_content() - assert len(text) > 0 - - -def test_gemini_youtube_video_summarization_example() -> None: - text = gemini_youtube_video_summarization_example.generate_content() - assert len(text) > 0 - - -def test_analyze_all_modalities() -> None: - text = multimodal_example01.analyze_all_modalities() - assert len(text) > 0 - - -def test_stream_multi_modality_basic() -> None: - responses = multimodal_example02.generate_content() - assert responses From 33c747fa4c6d30f384124b79a650772bedaf0121 Mon Sep 17 00:00:00 2001 From: Sampath Kumar Date: Mon, 31 Mar 2025 19:48:46 +0200 Subject: [PATCH 05/19] feat(genai): update --- generative_ai/evaluation/pairwise_summarization_quality.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/generative_ai/evaluation/pairwise_summarization_quality.py b/generative_ai/evaluation/pairwise_summarization_quality.py index 3c24eccca0..88c8987190 100644 --- a/generative_ai/evaluation/pairwise_summarization_quality.py +++ b/generative_ai/evaluation/pairwise_summarization_quality.py @@ -52,7 +52,7 @@ def evaluate_output() -> EvalResult: eval_dataset = pd.DataFrame({"prompt": [prompt]}) # Baseline model for pairwise comparison - baseline_model = GenerativeModel("gemini-2.0-flash-001") + baseline_model = GenerativeModel("gemini-2.0-flash-lite-001") # Candidate model for pairwise comparison candidate_model = GenerativeModel( From c6dc63eda1f6b8a3327b7d61046b21455cedd836 Mon Sep 17 00:00:00 2001 From: Holt Skinner <13262395+holtskinner@users.noreply.github.com> Date: Mon, 31 Mar 2025 13:02:08 -0500 Subject: [PATCH 06/19] Update discoveryengine/answer_query_sample.py --- discoveryengine/answer_query_sample.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/discoveryengine/answer_query_sample.py b/discoveryengine/answer_query_sample.py index 54f655a712..5f546ab6f7 100644 --- a/discoveryengine/answer_query_sample.py +++ b/discoveryengine/answer_query_sample.py @@ -69,7 +69,7 @@ def answer_query_sample( ignore_non_answer_seeking_query=False, # Optional: Ignore non-answer seeking query ignore_low_relevant_content=False, # Optional: Return fallback answer when content is not relevant model_spec=discoveryengine.AnswerQueryRequest.AnswerGenerationSpec.ModelSpec( - model_version="gemini-2.0-flash-001/answer_gen/v2", # Optional: Model to use for answer generation + model_version="gemini-2.0-flash-001/answer_gen/v1", # Optional: Model to use for answer generation ), prompt_spec=discoveryengine.AnswerQueryRequest.AnswerGenerationSpec.PromptSpec( preamble="Give a detailed answer.", # Optional: Natural language instructions for customizing the answer. From b2d0459b6ead065d82f87b5c5179821d2cf172e7 Mon Sep 17 00:00:00 2001 From: Sampath Kumar Date: Mon, 31 Mar 2025 20:25:29 +0200 Subject: [PATCH 07/19] feat(genai): update --- genai/content_cache/noxfile_config.py | 42 +++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 genai/content_cache/noxfile_config.py diff --git a/genai/content_cache/noxfile_config.py b/genai/content_cache/noxfile_config.py new file mode 100644 index 0000000000..9a94062de7 --- /dev/null +++ b/genai/content_cache/noxfile_config.py @@ -0,0 +1,42 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Default TEST_CONFIG_OVERRIDE for python repos. + +# You can copy this file into your directory, then it will be imported from +# the noxfile.py. + +# The source of truth: +# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py + +TEST_CONFIG_OVERRIDE = { + # You can opt out from the test for specific Python versions. + "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.12"], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + "enforce_type_hints": True, + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + "envs": {}, +} \ No newline at end of file From 3d474dc8dca4cdf92c7e11a6b9f1a06b2a86fda3 Mon Sep 17 00:00:00 2001 From: Sampath Kumar Date: Mon, 31 Mar 2025 20:30:01 +0200 Subject: [PATCH 08/19] feat(genai): update --- translate/samples/snippets/translate_with_gemini.py | 2 +- translate/samples/snippets/translate_with_gemini_test.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/translate/samples/snippets/translate_with_gemini.py b/translate/samples/snippets/translate_with_gemini.py index 6cc5cec137..953d2b9150 100644 --- a/translate/samples/snippets/translate_with_gemini.py +++ b/translate/samples/snippets/translate_with_gemini.py @@ -31,7 +31,7 @@ def translate_text(text: str, target_language_code: str = "fr") -> GenerationRes responses: The response from the model containing the translated text. """ # Initializes the Vertex AI with the specified project and location - vertexai.init(project=PROJECT_ID, location="europe-west2") + vertexai.init(project=PROJECT_ID, location="us-central1") model = GenerativeModel("gemini-2.0-flash-001") diff --git a/translate/samples/snippets/translate_with_gemini_test.py b/translate/samples/snippets/translate_with_gemini_test.py index cc3d05ad16..720c4146b4 100644 --- a/translate/samples/snippets/translate_with_gemini_test.py +++ b/translate/samples/snippets/translate_with_gemini_test.py @@ -17,4 +17,4 @@ def test_translate_text_with_gemini() -> None: response = translate_with_gemini.translate_text("Hello World!", "fr") - assert "Bonjour le monde" in response.candidates[0].content.text + assert response.candidates[0].content.text From c176e96407958d2cde2dd25911540e2590c59c72 Mon Sep 17 00:00:00 2001 From: Sampath Kumar Date: Mon, 31 Mar 2025 20:37:42 +0200 Subject: [PATCH 09/19] feat(genai): cleanup --- .../samples/snippets/translate_with_gemini.py | 67 ------------------- .../snippets/translate_with_gemini_test.py | 20 ------ 2 files changed, 87 deletions(-) delete mode 100644 translate/samples/snippets/translate_with_gemini.py delete mode 100644 translate/samples/snippets/translate_with_gemini_test.py diff --git a/translate/samples/snippets/translate_with_gemini.py b/translate/samples/snippets/translate_with_gemini.py deleted file mode 100644 index 953d2b9150..0000000000 --- a/translate/samples/snippets/translate_with_gemini.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# [START aiplatform_gemini_translate] -import os - -import vertexai -from vertexai.generative_models import GenerationResponse, GenerativeModel, Part - -PROJECT_ID = os.environ.get("GOOGLE_CLOUD_PROJECT") - - -def translate_text(text: str, target_language_code: str = "fr") -> GenerationResponse: - """Translates the given text to the specified target language using the Gemini model. - Args: - text (str): The text to be translated. - target_language_code (str): The language code of the target language. Defaults to "fr" (French). - Available language codes: https://cloud.google.com/translate/docs/languages#neural_machine_translation_model - Returns: - responses: The response from the model containing the translated text. - """ - # Initializes the Vertex AI with the specified project and location - vertexai.init(project=PROJECT_ID, location="us-central1") - - model = GenerativeModel("gemini-2.0-flash-001") - - # Configuration for the text generation - generation_config = { - "candidate_count": 1, - "max_output_tokens": 50, - "temperature": 0.1, - "top_k": 1, - "top_p": 1.0, - } - - # Creates a prompt with the text to be translated and the target language code - promt = Part.from_text( - f"TEXT_TO_TRANSLATE:{text}. TARGET_LANGUAGE_CODE:{target_language_code}." - ) - - responses = model.generate_content( - contents=[promt], - generation_config=generation_config, - ) - - print(responses.candidates[0].content.text) - # Example response: - # Bonjour ! Comment allez-vous aujourd'hui ? - - return responses - - -# [END aiplatform_gemini_translate] - -if __name__ == "__main__": - translate_text(text="Hello! How are you doing today?", target_language_code="fr") diff --git a/translate/samples/snippets/translate_with_gemini_test.py b/translate/samples/snippets/translate_with_gemini_test.py deleted file mode 100644 index 720c4146b4..0000000000 --- a/translate/samples/snippets/translate_with_gemini_test.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import translate_with_gemini - - -def test_translate_text_with_gemini() -> None: - response = translate_with_gemini.translate_text("Hello World!", "fr") - assert response.candidates[0].content.text From 3a3e1822bc2cbe3703e717fbeccd7a98a31f2093 Mon Sep 17 00:00:00 2001 From: Sampath Kumar Date: Mon, 31 Mar 2025 20:48:43 +0200 Subject: [PATCH 10/19] feat(genai): cleanup --- genai/content_cache/noxfile_config.py | 42 ----------------------- genai/content_cache/requirements-test.txt | 2 -- genai/content_cache/requirements.txt | 1 - 3 files changed, 45 deletions(-) delete mode 100644 genai/content_cache/noxfile_config.py delete mode 100644 genai/content_cache/requirements-test.txt delete mode 100644 genai/content_cache/requirements.txt diff --git a/genai/content_cache/noxfile_config.py b/genai/content_cache/noxfile_config.py deleted file mode 100644 index 9a94062de7..0000000000 --- a/genai/content_cache/noxfile_config.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default TEST_CONFIG_OVERRIDE for python repos. - -# You can copy this file into your directory, then it will be imported from -# the noxfile.py. - -# The source of truth: -# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py - -TEST_CONFIG_OVERRIDE = { - # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.12"], - # Old samples are opted out of enforcing Python type hints - # All new samples should feature them - "enforce_type_hints": True, - # An envvar key for determining the project id to use. Change it - # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a - # build specific Cloud project. You can also use your own string - # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", - # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # If you need to use a specific version of pip, - # change pip_version_override to the string representation - # of the version number, for example, "20.2.4" - "pip_version_override": None, - # A dictionary you want to inject into your test. Don't put any - # secrets here. These values will override predefined values. - "envs": {}, -} \ No newline at end of file diff --git a/genai/content_cache/requirements-test.txt b/genai/content_cache/requirements-test.txt deleted file mode 100644 index e43b779272..0000000000 --- a/genai/content_cache/requirements-test.txt +++ /dev/null @@ -1,2 +0,0 @@ -google-api-core==2.24.0 -pytest==8.2.0 diff --git a/genai/content_cache/requirements.txt b/genai/content_cache/requirements.txt deleted file mode 100644 index 73d0828cb4..0000000000 --- a/genai/content_cache/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -google-genai==1.7.0 From 3fc518f42a4c464adb3ac15fe24af7003c357d09 Mon Sep 17 00:00:00 2001 From: Holt Skinner Date: Wed, 2 Apr 2025 12:56:10 -0500 Subject: [PATCH 11/19] re-add context caching samples with Gemini 2.0 --- .../contentcache_create_with_txt_gcs_pdf.py | 65 +++++++++++++++++++ genai/content_cache/contentcache_delete.py | 35 ++++++++++ genai/content_cache/contentcache_list.py | 42 ++++++++++++ genai/content_cache/contentcache_update.py | 59 +++++++++++++++++ .../contentcache_use_with_txt.py | 42 ++++++++++++ genai/content_cache/noxfile_config.py | 42 ++++++++++++ genai/content_cache/requirements-test.txt | 2 + genai/content_cache/requirements.txt | 1 + .../test_content_cache_examples.py | 49 ++++++++++++++ 9 files changed, 337 insertions(+) create mode 100644 genai/content_cache/contentcache_create_with_txt_gcs_pdf.py create mode 100644 genai/content_cache/contentcache_delete.py create mode 100644 genai/content_cache/contentcache_list.py create mode 100644 genai/content_cache/contentcache_update.py create mode 100644 genai/content_cache/contentcache_use_with_txt.py create mode 100644 genai/content_cache/noxfile_config.py create mode 100644 genai/content_cache/requirements-test.txt create mode 100644 genai/content_cache/requirements.txt create mode 100644 genai/content_cache/test_content_cache_examples.py diff --git a/genai/content_cache/contentcache_create_with_txt_gcs_pdf.py b/genai/content_cache/contentcache_create_with_txt_gcs_pdf.py new file mode 100644 index 0000000000..8b92e65b17 --- /dev/null +++ b/genai/content_cache/contentcache_create_with_txt_gcs_pdf.py @@ -0,0 +1,65 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def create_content_cache() -> str: + # [START googlegenaisdk_contentcache_create_with_txt_gcs_pdf] + from google import genai + from google.genai.types import Content, CreateCachedContentConfig, HttpOptions, Part + + client = genai.Client(http_options=HttpOptions(api_version="v1beta1")) + + system_instruction = """ + You are an expert researcher. You always stick to the facts in the sources provided, and never make up new facts. + Now look at these research papers, and answer the following questions. + """ + + contents = [ + Content( + role="user", + parts=[ + Part.from_uri( + file_uri="gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf", + mime_type="application/pdf", + ), + Part.from_uri( + file_uri="gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf", + mime_type="application/pdf", + ), + ], + ) + ] + + content_cache = client.caches.create( + model="gemini-2.0-flash-001", + config=CreateCachedContentConfig( + contents=contents, + system_instruction=system_instruction, + display_name="example-cache", + ttl="86400s", + ), + ) + + print(content_cache.name) + print(content_cache.usage_metadata) + # Example response: + # projects/111111111111/locations/us-central1/cachedContents/1111111111111111111 + # CachedContentUsageMetadata(audio_duration_seconds=None, image_count=167, + # text_count=153, total_token_count=43130, video_duration_seconds=None) + # [END googlegenaisdk_contentcache_create_with_txt_gcs_pdf] + return content_cache.name + + +if __name__ == "__main__": + create_content_cache() diff --git a/genai/content_cache/contentcache_delete.py b/genai/content_cache/contentcache_delete.py new file mode 100644 index 0000000000..9b8b331094 --- /dev/null +++ b/genai/content_cache/contentcache_delete.py @@ -0,0 +1,35 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def delete_context_caches(cache_name: str) -> str: + # [START googlegenaisdk_contentcache_delete] + from google import genai + from google.genai.types import HttpOptions + + client = genai.Client(http_options=HttpOptions(api_version="v1beta1")) + + # Delete content cache using name + # E.g cache_name = 'projects/111111111111/locations/us-central1/cachedContents/1111111111111111111' + client.caches.delete(name=cache_name) + print("Deleted Cache", cache_name) + # Example response + # Deleted Cache projects/111111111111/locations/us-central1/cachedContents/1111111111111111111 + # [END googlegenaisdk_contentcache_delete] + return cache_name + + +if __name__ == "__main__": + cache_name = input("Cache Name: ") + delete_context_caches(cache_name) diff --git a/genai/content_cache/contentcache_list.py b/genai/content_cache/contentcache_list.py new file mode 100644 index 0000000000..112fc9c43d --- /dev/null +++ b/genai/content_cache/contentcache_list.py @@ -0,0 +1,42 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def list_context_caches() -> str: + # [START googlegenaisdk_contentcache_list] + from google import genai + from google.genai.types import HttpOptions + + client = genai.Client(http_options=HttpOptions(api_version="v1beta1")) + + content_cache_list = client.caches.list() + + # Access individual properties of a ContentCache object(s) + for content_cache in content_cache_list: + print(f"Cache `{content_cache.name}` for model `{content_cache.model}`") + print(f"Last updated at: {content_cache.update_time}") + print(f"Expires at: {content_cache.expire_time}") + + # Example response: + # * Cache `projects/111111111111/locations/us-central1/cachedContents/1111111111111111111` for + # model `projects/111111111111/locations/us-central1/publishers/google/models/gemini-XXX-pro-XXX` + # * Last updated at: 2025-02-13 14:46:42.620490+00:00 + # * CachedContentUsageMetadata(audio_duration_seconds=None, image_count=167, text_count=153, total_token_count=43130, video_duration_seconds=None) + # ... + # [END googlegenaisdk_contentcache_list] + return [content_cache.name for content_cache in content_cache_list] + + +if __name__ == "__main__": + list_context_caches() diff --git a/genai/content_cache/contentcache_update.py b/genai/content_cache/contentcache_update.py new file mode 100644 index 0000000000..56748ce7ef --- /dev/null +++ b/genai/content_cache/contentcache_update.py @@ -0,0 +1,59 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def update_content_cache(cache_name: str) -> str: + # [START googlegenaisdk_contentcache_update] + from datetime import datetime as dt + from datetime import timezone as tz + from datetime import timedelta + + from google import genai + from google.genai.types import HttpOptions, UpdateCachedContentConfig + + client = genai.Client(http_options=HttpOptions(api_version="v1beta1")) + + # Get content cache by name + # cache_name = "projects/111111111111/locations/us-central1/cachedContents/1111111111111111111" + content_cache = client.caches.get(name=cache_name) + print("Expire time", content_cache.expire_time) + # Example response + # Expire time 2025-02-20 15:50:18.434482+00:00 + + # Update expire time using TTL + content_cache = client.caches.update( + name=cache_name, config=UpdateCachedContentConfig(ttl="36000s") + ) + time_diff = content_cache.expire_time - dt.now(tz.utc) + print("Expire time(after update):", content_cache.expire_time) + print("Expire time(in seconds):", time_diff.seconds) + # Example response + # Expire time(after update): 2025-02-14 01:51:42.571696+00:00 + # Expire time(in seconds): 35999 + + # Update expire time using specific time stamp + next_week_utc = dt.now(tz.utc) + timedelta(days=7) + content_cache = client.caches.update( + name=cache_name, config=UpdateCachedContentConfig(expireTime=next_week_utc) + ) + print("Expire time(after update):", content_cache.expire_time) + # Example response + # Expire time(after update): 2025-02-20 15:51:42.614968+00:00 + # [END googlegenaisdk_contentcache_update] + return cache_name + + +if __name__ == "__main__": + cache_name = input("Cache Name: ") + update_content_cache(cache_name) diff --git a/genai/content_cache/contentcache_use_with_txt.py b/genai/content_cache/contentcache_use_with_txt.py new file mode 100644 index 0000000000..94d3ceedea --- /dev/null +++ b/genai/content_cache/contentcache_use_with_txt.py @@ -0,0 +1,42 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def generate_content(cache_name: str) -> str: + # [START googlegenaisdk_contentcache_use_with_txt] + from google import genai + from google.genai.types import GenerateContentConfig, HttpOptions + + client = genai.Client(http_options=HttpOptions(api_version="v1beta1")) + + # Use content cache to generate text response + # E.g cache_name = 'projects/111111111111/locations/us-central1/cachedContents/1111111111111111111' + response = client.models.generate_content( + model="gemini-2.0-flash-001", + contents="Summarize the pdfs", + config=GenerateContentConfig( + cached_content=cache_name, + ), + ) + print(response.text) + # Example response + # The Gemini family of multimodal models from Google DeepMind demonstrates remarkable capabilities across various + # modalities, including image, audio, video, and text.... + # [END googlegenaisdk_contentcache_use_with_txt] + return response.text + + +if __name__ == "__main__": + cache_name = input("Cache Name: ") + generate_content(cache_name) diff --git a/genai/content_cache/noxfile_config.py b/genai/content_cache/noxfile_config.py new file mode 100644 index 0000000000..2a0f115c38 --- /dev/null +++ b/genai/content_cache/noxfile_config.py @@ -0,0 +1,42 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Default TEST_CONFIG_OVERRIDE for python repos. + +# You can copy this file into your directory, then it will be imported from +# the noxfile.py. + +# The source of truth: +# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py + +TEST_CONFIG_OVERRIDE = { + # You can opt out from the test for specific Python versions. + "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.12"], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + "enforce_type_hints": True, + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + "envs": {}, +} diff --git a/genai/content_cache/requirements-test.txt b/genai/content_cache/requirements-test.txt new file mode 100644 index 0000000000..e43b779272 --- /dev/null +++ b/genai/content_cache/requirements-test.txt @@ -0,0 +1,2 @@ +google-api-core==2.24.0 +pytest==8.2.0 diff --git a/genai/content_cache/requirements.txt b/genai/content_cache/requirements.txt new file mode 100644 index 0000000000..73d0828cb4 --- /dev/null +++ b/genai/content_cache/requirements.txt @@ -0,0 +1 @@ +google-genai==1.7.0 diff --git a/genai/content_cache/test_content_cache_examples.py b/genai/content_cache/test_content_cache_examples.py new file mode 100644 index 0000000000..d7d9e5abda --- /dev/null +++ b/genai/content_cache/test_content_cache_examples.py @@ -0,0 +1,49 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import contentcache_create_with_txt_gcs_pdf +import contentcache_delete +import contentcache_list +import contentcache_update +import contentcache_use_with_txt + + +os.environ["GOOGLE_GENAI_USE_VERTEXAI"] = "True" +os.environ["GOOGLE_CLOUD_LOCATION"] = "us-central1" +# The project name is included in the CICD pipeline +# os.environ['GOOGLE_CLOUD_PROJECT'] = "add-your-project-name" + + +def test_content_cache() -> None: + # Create a Cache + cache_name = contentcache_create_with_txt_gcs_pdf.create_content_cache() + assert cache_name + + # List cache + assert contentcache_list.list_context_caches() + + # Update cache + assert contentcache_update.update_content_cache(cache_name) + + # Use cache + assert contentcache_use_with_txt.generate_content(cache_name) + + # Delete cache + assert contentcache_delete.delete_context_caches(cache_name) + + +if __name__ == "__main__": + test_content_cache() From 3b90ed948d43f5ce19ef0a83ec8b096e49877941 Mon Sep 17 00:00:00 2001 From: Holt Skinner Date: Thu, 3 Apr 2025 10:36:32 -0500 Subject: [PATCH 12/19] Re-add Basic Example for text generation with Vertex AI SDK for Gemini 2.0 Flash Used in https://cloud.google.com/vertex-ai/generative-ai/docs/migrate-to-v2 --- .../text_generation/text_example01.py | 47 +++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 generative_ai/text_generation/text_example01.py diff --git a/generative_ai/text_generation/text_example01.py b/generative_ai/text_generation/text_example01.py new file mode 100644 index 0000000000..744ec4ee1e --- /dev/null +++ b/generative_ai/text_generation/text_example01.py @@ -0,0 +1,47 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os + +PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") + + +def generate_from_text_input() -> str: + # [START generativeaionvertexai_gemini_generate_from_text_input] + import vertexai + from vertexai.generative_models import GenerativeModel + + # TODO(developer): Update and un-comment below line + # PROJECT_ID = "your-project-id" + vertexai.init(project=PROJECT_ID, location="us-central1") + + model = GenerativeModel("gemini-2.0-flash-001") + + response = model.generate_content( + "What's a good name for a flower shop that specializes in selling bouquets of dried flowers?" + ) + + print(response.text) + # Example response: + # **Emphasizing the Dried Aspect:** + # * Everlasting Blooms + # * Dried & Delightful + # * The Petal Preserve + # ... + + # [END generativeaionvertexai_gemini_generate_from_text_input] + return response.text + + +if __name__ == "__main__": + generate_from_text_input() From 8866059cfe88afe72ef905d774aac06239dbd57e Mon Sep 17 00:00:00 2001 From: Holt Skinner Date: Thu, 3 Apr 2025 10:49:17 -0500 Subject: [PATCH 13/19] Move chat completions function calling sample to chat_completions directory --- ...chat_completions_function_calling_basic.py | 87 ++++++++++++++++++ ...hat_completions_function_calling_config.py | 88 +++++++++++++++++++ 2 files changed, 175 insertions(+) create mode 100644 generative_ai/chat_completions/chat_completions_function_calling_basic.py create mode 100644 generative_ai/chat_completions/chat_completions_function_calling_config.py diff --git a/generative_ai/chat_completions/chat_completions_function_calling_basic.py b/generative_ai/chat_completions/chat_completions_function_calling_basic.py new file mode 100644 index 0000000000..d64c9aa149 --- /dev/null +++ b/generative_ai/chat_completions/chat_completions_function_calling_basic.py @@ -0,0 +1,87 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") + + +def generate_text() -> object: + # [START generativeaionvertexai_gemini_chat_completions_function_calling_basic] + import openai + + from google.auth import default, transport + + # TODO(developer): Update & uncomment below line + # PROJECT_ID = "your-project-id" + location = "us-central1" + + # Programmatically get an access token + credentials, _ = default(scopes=["https://www.googleapis.com/auth/cloud-platform"]) + auth_request = transport.requests.Request() + credentials.refresh(auth_request) + + # # OpenAI Client + client = openai.OpenAI( + base_url=f"https://{location}-aiplatform.googleapis.com/v1beta1/projects/{PROJECT_ID}/locations/{location}/endpoints/openapi", + api_key=credentials.token, + ) + + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA or a zip code e.g. 95616", + }, + }, + "required": ["location"], + }, + }, + } + ] + + messages = [] + messages.append( + { + "role": "system", + "content": "Don't make assumptions about what values to plug into functions. Ask for clarification if a user request is ambiguous.", + } + ) + messages.append({"role": "user", "content": "What is the weather in Boston?"}) + + response = client.chat.completions.create( + model="google/gemini-2.0-flash-001", + messages=messages, + tools=tools, + ) + + print("Function:", response.choices[0].message.tool_calls[0].id) + print("Arguments:", response.choices[0].message.tool_calls[0].function.arguments) + # Example response: + # Function: get_current_weather + # Arguments: {"location":"Boston"} + + # [END generativeaionvertexai_gemini_chat_completions_function_calling_basic] + return response + + +if __name__ == "__main__": + generate_text() diff --git a/generative_ai/chat_completions/chat_completions_function_calling_config.py b/generative_ai/chat_completions/chat_completions_function_calling_config.py new file mode 100644 index 0000000000..80b00ac993 --- /dev/null +++ b/generative_ai/chat_completions/chat_completions_function_calling_config.py @@ -0,0 +1,88 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") + + +def generate_text() -> object: + # [START generativeaionvertexai_gemini_chat_completions_function_calling_config] + import openai + + from google.auth import default, transport + + # TODO(developer): Update & uncomment below line + # PROJECT_ID = "your-project-id" + location = "us-central1" + + # Programmatically get an access token + credentials, _ = default(scopes=["https://www.googleapis.com/auth/cloud-platform"]) + auth_request = transport.requests.Request() + credentials.refresh(auth_request) + + # OpenAI Client + client = openai.OpenAI( + base_url=f"https://{location}-aiplatform.googleapis.com/v1beta1/projects/{PROJECT_ID}/locations/{location}/endpoints/openapi", + api_key=credentials.token, + ) + + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA or a zip code e.g. 95616", + }, + }, + "required": ["location"], + }, + }, + } + ] + + messages = [] + messages.append( + { + "role": "system", + "content": "Don't make assumptions about what values to plug into functions. Ask for clarification if a user request is ambiguous.", + } + ) + messages.append({"role": "user", "content": "What is the weather in Boston, MA?"}) + + response = client.chat.completions.create( + model="google/gemini-2.0-flash-001", + messages=messages, + tools=tools, + tool_choice="auto", + ) + + print("Function:", response.choices[0].message.tool_calls[0].id) + print("Arguments:", response.choices[0].message.tool_calls[0].function.arguments) + # Example response: + # Function: get_current_weather + # Arguments: {"location":"Boston"} + # [END generativeaionvertexai_gemini_chat_completions_function_calling_config] + + return response + + +if __name__ == "__main__": + generate_text() From 7fba7a3acc869d18894a4df33ec54c5fc6449756 Mon Sep 17 00:00:00 2001 From: Holt Skinner Date: Thu, 3 Apr 2025 10:56:25 -0500 Subject: [PATCH 14/19] Restore Reasoning Engine/Agent Engine --- ...reate_reasoning_engine_advanced_example.py | 101 ++++++++++++++++++ .../create_reasoning_engine_example.py | 72 +++++++++++++ .../delete_reasoning_engine_example.py | 40 +++++++ .../get_reasoning_engine_example.py | 42 ++++++++ .../list_reasoning_engine_example.py | 45 ++++++++ .../reasoning_engine/noxfile_config.py | 42 ++++++++ .../query_reasoning_engine_example.py | 41 +++++++ .../reasoning_engine/requirements-test.txt | 4 + .../reasoning_engine/requirements.txt | 14 +++ .../test_reasoning_engine_examples.py | 77 +++++++++++++ 10 files changed, 478 insertions(+) create mode 100644 generative_ai/reasoning_engine/create_reasoning_engine_advanced_example.py create mode 100644 generative_ai/reasoning_engine/create_reasoning_engine_example.py create mode 100644 generative_ai/reasoning_engine/delete_reasoning_engine_example.py create mode 100644 generative_ai/reasoning_engine/get_reasoning_engine_example.py create mode 100644 generative_ai/reasoning_engine/list_reasoning_engine_example.py create mode 100644 generative_ai/reasoning_engine/noxfile_config.py create mode 100644 generative_ai/reasoning_engine/query_reasoning_engine_example.py create mode 100644 generative_ai/reasoning_engine/requirements-test.txt create mode 100644 generative_ai/reasoning_engine/requirements.txt create mode 100644 generative_ai/reasoning_engine/test_reasoning_engine_examples.py diff --git a/generative_ai/reasoning_engine/create_reasoning_engine_advanced_example.py b/generative_ai/reasoning_engine/create_reasoning_engine_advanced_example.py new file mode 100644 index 0000000000..f0a935ec01 --- /dev/null +++ b/generative_ai/reasoning_engine/create_reasoning_engine_advanced_example.py @@ -0,0 +1,101 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os + +from typing import Dict, Union + +from vertexai.preview import reasoning_engines + +PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") + + +def create_reasoning_engine_advanced( + staging_bucket: str, +) -> reasoning_engines.ReasoningEngine: + # [START generativeaionvertexai_create_reasoning_engine_advanced] + + from typing import List + + import vertexai + from vertexai.preview import reasoning_engines + + # TODO(developer): Update and un-comment below lines + # PROJECT_ID = "your-project-id" + # staging_bucket = "gs://YOUR_BUCKET_NAME" + + vertexai.init( + project=PROJECT_ID, location="us-central1", staging_bucket=staging_bucket + ) + + class LangchainApp: + def __init__(self, project: str, location: str) -> None: + self.project_id = project + self.location = location + + def set_up(self) -> None: + from langchain_core.prompts import ChatPromptTemplate + from langchain_google_vertexai import ChatVertexAI + + system = ( + "You are a helpful assistant that answers questions " + "about Google Cloud." + ) + human = "{text}" + prompt = ChatPromptTemplate.from_messages( + [("system", system), ("human", human)] + ) + chat = ChatVertexAI(project=self.project_id, location=self.location) + self.chain = prompt | chat + + def query(self, question: str) -> Union[str, List[Union[str, Dict]]]: + """Query the application. + Args: + question: The user prompt. + Returns: + str: The LLM response. + """ + return self.chain.invoke({"text": question}).content + + # Locally test + app = LangchainApp(project=PROJECT_ID, location="us-central1") + app.set_up() + print(app.query("What is Vertex AI?")) + + # Create a remote app with Reasoning Engine + # Deployment of the app should take a few minutes to complete. + reasoning_engine = reasoning_engines.ReasoningEngine.create( + LangchainApp(project=PROJECT_ID, location="us-central1"), + requirements=[ + "google-cloud-aiplatform[langchain,reasoningengine]", + "cloudpickle==3.0.0", + "pydantic==2.7.4", + ], + display_name="Demo LangChain App", + description="This is a simple LangChain app.", + # sys_version="3.10", # Optional + extra_packages=[], + ) + # Example response: + # Model_name will become a required arg for VertexAIEmbeddings starting... + # ... + # Create ReasoningEngine backing LRO: projects/123456789/locations/us-central1/reasoningEngines/... + # ReasoningEngine created. Resource name: projects/123456789/locations/us-central1/reasoningEngines/... + # ... + + # [END generativeaionvertexai_create_reasoning_engine_advanced] + return reasoning_engine + + +if __name__ == "__main__": + create_reasoning_engine_advanced("gs://your-bucket-unique-name") diff --git a/generative_ai/reasoning_engine/create_reasoning_engine_example.py b/generative_ai/reasoning_engine/create_reasoning_engine_example.py new file mode 100644 index 0000000000..c2ba4eba5f --- /dev/null +++ b/generative_ai/reasoning_engine/create_reasoning_engine_example.py @@ -0,0 +1,72 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os + +from vertexai.preview import reasoning_engines + +PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") + + +def create_reasoning_engine_basic( + staging_bucket: str, +) -> reasoning_engines.ReasoningEngine: + # [START generativeaionvertexai_create_reasoning_engine_basic] + import vertexai + from vertexai.preview import reasoning_engines + + # TODO(developer): Update and un-comment below lines + # PROJECT_ID = "your-project-id" + # staging_bucket = "gs://YOUR_BUCKET_NAME" + vertexai.init( + project=PROJECT_ID, location="us-central1", staging_bucket=staging_bucket + ) + + class SimpleAdditionApp: + def query(self, a: int, b: int) -> str: + """Query the application. + Args: + a: The first input number + b: The second input number + Returns: + int: The additional result. + """ + return f"{int(a)} + {int(b)} is {int(a + b)}" + + # Locally test + app = SimpleAdditionApp() + app.query(a=1, b=2) + + # Create a remote app with Reasoning Engine. + # This may take 1-2 minutes to finish. + reasoning_engine = reasoning_engines.ReasoningEngine.create( + SimpleAdditionApp(), + display_name="Demo Addition App", + description="A simple demo addition app", + requirements=["cloudpickle==3"], + extra_packages=[], + ) + # Example response: + # Using bucket YOUR_BUCKET_NAME + # Writing to gs://YOUR_BUCKET_NAME/reasoning_engine/reasoning_engine.pkl + # ... + # ReasoningEngine created. Resource name: projects/123456789/locations/us-central1/reasoningEngines/123456 + # To use this ReasoningEngine in another session: + # reasoning_engine = vertexai.preview.reasoning_engines.ReasoningEngine('projects/123456789/locations/... + + # [END generativeaionvertexai_create_reasoning_engine_basic] + return reasoning_engine + + +if __name__ == "__main__": + create_reasoning_engine_basic("gs://your-bucket-unique-name") diff --git a/generative_ai/reasoning_engine/delete_reasoning_engine_example.py b/generative_ai/reasoning_engine/delete_reasoning_engine_example.py new file mode 100644 index 0000000000..9f4e019b0e --- /dev/null +++ b/generative_ai/reasoning_engine/delete_reasoning_engine_example.py @@ -0,0 +1,40 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os + +PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") + + +def delete_reasoning_engine(reasoning_engine_id: str) -> None: + # [START generativeaionvertexai_delete_reasoning_engine] + import vertexai + from vertexai.preview import reasoning_engines + + # TODO(developer): Update and un-comment below lines + # PROJECT_ID = "your-project-id" + # reasoning_engine_id = "1234567890123456" + vertexai.init(project=PROJECT_ID, location="us-central1") + + reasoning_engine = reasoning_engines.ReasoningEngine(reasoning_engine_id) + reasoning_engine.delete() + # Example response: + # Deleting ReasoningEngine:projects/[PROJECT_ID]/locations/us-central1/reasoningEngines/1234567890123456 + # ... + # ... resource projects/[PROJECT_ID]/locations/us-central1/reasoningEngines/1234567890123456 deleted. + + # [END generativeaionvertexai_delete_reasoning_engine] + + +if __name__ == "__main__": + delete_reasoning_engine("1234567890123456") diff --git a/generative_ai/reasoning_engine/get_reasoning_engine_example.py b/generative_ai/reasoning_engine/get_reasoning_engine_example.py new file mode 100644 index 0000000000..956015e073 --- /dev/null +++ b/generative_ai/reasoning_engine/get_reasoning_engine_example.py @@ -0,0 +1,42 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os + +from vertexai.preview import reasoning_engines + +PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") + + +def get_reasoning_engine(reasoning_engine_id: str) -> reasoning_engines.ReasoningEngine: + # [START generativeaionvertexai_get_reasoning_engine] + import vertexai + from vertexai.preview import reasoning_engines + + # TODO(developer): Update and un-comment below lines + # PROJECT_ID = "your-project-id" + # reasoning_engine_id = "1234567890123456" + vertexai.init(project=PROJECT_ID, location="us-central1") + + reasoning_engine = reasoning_engines.ReasoningEngine(reasoning_engine_id) + print(reasoning_engine) + # Example response: + # + # resource name: projects/[PROJECT_ID]/locations/us-central1/reasoningEngines/1234567890123456 + + # [END generativeaionvertexai_get_reasoning_engine] + return reasoning_engine + + +if __name__ == "__main__": + get_reasoning_engine("1234567890123456") diff --git a/generative_ai/reasoning_engine/list_reasoning_engine_example.py b/generative_ai/reasoning_engine/list_reasoning_engine_example.py new file mode 100644 index 0000000000..c0354d7f4d --- /dev/null +++ b/generative_ai/reasoning_engine/list_reasoning_engine_example.py @@ -0,0 +1,45 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os + +from typing import List + +from vertexai.preview import reasoning_engines + +PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") + + +def list_reasoning_engines() -> List[reasoning_engines.ReasoningEngine]: + # [START generativeaionvertexai_list_reasoning_engines] + import vertexai + from vertexai.preview import reasoning_engines + + # TODO(developer): Update and un-comment below line + # PROJECT_ID = "your-project-id" + vertexai.init(project=PROJECT_ID, location="us-central1") + + reasoning_engine_list = reasoning_engines.ReasoningEngine.list() + print(reasoning_engine_list) + # Example response: + # [ + # resource name: projects/123456789/locations/us-central1/reasoningEngines/111111111111111111, + # + # resource name: projects/123456789/locations/us-central1/reasoningEngines/222222222222222222] + + # [END generativeaionvertexai_list_reasoning_engines] + return reasoning_engine_list + + +if __name__ == "__main__": + list_reasoning_engines() diff --git a/generative_ai/reasoning_engine/noxfile_config.py b/generative_ai/reasoning_engine/noxfile_config.py new file mode 100644 index 0000000000..962ba40a92 --- /dev/null +++ b/generative_ai/reasoning_engine/noxfile_config.py @@ -0,0 +1,42 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Default TEST_CONFIG_OVERRIDE for python repos. + +# You can copy this file into your directory, then it will be imported from +# the noxfile.py. + +# The source of truth: +# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py + +TEST_CONFIG_OVERRIDE = { + # You can opt out from the test for specific Python versions. + "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.13"], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + "enforce_type_hints": True, + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + "envs": {}, +} diff --git a/generative_ai/reasoning_engine/query_reasoning_engine_example.py b/generative_ai/reasoning_engine/query_reasoning_engine_example.py new file mode 100644 index 0000000000..bdaa3d39be --- /dev/null +++ b/generative_ai/reasoning_engine/query_reasoning_engine_example.py @@ -0,0 +1,41 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os + +PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") + + +def query_reasoning_engine(reasoning_engine_id: str) -> object: + # [START generativeaionvertexai_query_reasoning_engine] + import vertexai + from vertexai.preview import reasoning_engines + + # TODO(developer): Update and un-comment below lines + # PROJECT_ID = "your-project-id" + # reasoning_engine_id = "1234567890123456" + vertexai.init(project=PROJECT_ID, location="us-central1") + reasoning_engine = reasoning_engines.ReasoningEngine(reasoning_engine_id) + + # Replace with kwargs for `.query()` method. + response = reasoning_engine.query(a=1, b=2) + print(response) + # Example response: + # 1 + 2 is 3 + + # [END generativeaionvertexai_query_reasoning_engine] + return response + + +if __name__ == "__main__": + query_reasoning_engine("1234567890123456") diff --git a/generative_ai/reasoning_engine/requirements-test.txt b/generative_ai/reasoning_engine/requirements-test.txt new file mode 100644 index 0000000000..92281986e5 --- /dev/null +++ b/generative_ai/reasoning_engine/requirements-test.txt @@ -0,0 +1,4 @@ +backoff==2.2.1 +google-api-core==2.19.0 +pytest==8.2.0 +pytest-asyncio==0.23.6 diff --git a/generative_ai/reasoning_engine/requirements.txt b/generative_ai/reasoning_engine/requirements.txt new file mode 100644 index 0000000000..09178aa830 --- /dev/null +++ b/generative_ai/reasoning_engine/requirements.txt @@ -0,0 +1,14 @@ +pandas==2.2.3; python_version == '3.7' +pandas==2.2.3; python_version == '3.8' +pandas==2.2.3; python_version > '3.8' +pillow==10.3.0; python_version < '3.8' +pillow==10.3.0; python_version >= '3.8' +google-cloud-aiplatform[all]==1.69.0 +sentencepiece==0.2.0 +google-auth==2.38.0 +anthropic[vertex]==0.28.0 +langchain-core==0.2.33 +langchain-google-vertexai==1.0.10 +numpy<3 +openai==1.68.2 +immutabledict==4.2.0 diff --git a/generative_ai/reasoning_engine/test_reasoning_engine_examples.py b/generative_ai/reasoning_engine/test_reasoning_engine_examples.py new file mode 100644 index 0000000000..366f6d25b0 --- /dev/null +++ b/generative_ai/reasoning_engine/test_reasoning_engine_examples.py @@ -0,0 +1,77 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Generator + +import pytest + +import create_reasoning_engine_advanced_example +import create_reasoning_engine_example +import delete_reasoning_engine_example +import get_reasoning_engine_example +import list_reasoning_engine_example +import query_reasoning_engine_example + + +STAGING_BUCKET = "gs://ucaip-samples-us-central1" + + +@pytest.fixture(scope="module") +def reasoning_engine_id() -> Generator[str, None, None]: + reasoning_engine = create_reasoning_engine_example.create_reasoning_engine_basic( + STAGING_BUCKET + ) + yield reasoning_engine.resource_name + print("Deleting Reasoning Engine...") + delete_reasoning_engine_example.delete_reasoning_engine( + reasoning_engine.resource_name + ) + + +@pytest.mark.skip("TODO: Reasoning Engine Deployment Issue b/339643184") +def test_create_reasoning_engine_basic(reasoning_engine_id: str) -> None: + assert reasoning_engine_id + + +@pytest.mark.skip("TODO: Reasoning Engine Deployment Issue b/339643184") +def test_create_reasoning_engine_advanced() -> None: + reasoning_engine = ( + create_reasoning_engine_advanced_example.create_reasoning_engine_advanced( + STAGING_BUCKET + ) + ) + assert reasoning_engine + delete_reasoning_engine_example.delete_reasoning_engine( + reasoning_engine.resource_name + ) + + +@pytest.mark.skip("TODO: Resolve issue b/348193408") +def test_query_reasoning_engine(reasoning_engine_id: str) -> None: + response = query_reasoning_engine_example.query_reasoning_engine( + reasoning_engine_id + ) + assert response + assert response == "1 + 2 is 3" + + +def test_list_reasoning_engines() -> None: + response = list_reasoning_engine_example.list_reasoning_engines() + assert response + + +@pytest.mark.skip("TODO: Resolve issue b/348193408") +def test_get_reasoning_engine(reasoning_engine_id: str) -> None: + response = get_reasoning_engine_example.get_reasoning_engine(reasoning_engine_id) + assert response From 9e775ff4350c37354ff11d97893014019958c4b9 Mon Sep 17 00:00:00 2001 From: Sampath Kumar Date: Mon, 7 Apr 2025 10:03:55 -0700 Subject: [PATCH 15/19] feat(genai): add 2.0 chat function calling examples --- .../chat_completions/chat_completions_test.py | 10 ++ .../chat_function_calling_basic.py | 90 ++++++++++++++++++ .../chat_function_calling_config.py | 91 +++++++++++++++++++ 3 files changed, 191 insertions(+) create mode 100644 generative_ai/chat_completions/chat_function_calling_basic.py create mode 100644 generative_ai/chat_completions/chat_function_calling_config.py diff --git a/generative_ai/chat_completions/chat_completions_test.py b/generative_ai/chat_completions/chat_completions_test.py index 56489b53fc..3d0d629184 100644 --- a/generative_ai/chat_completions/chat_completions_test.py +++ b/generative_ai/chat_completions/chat_completions_test.py @@ -22,6 +22,8 @@ import chat_completions_streaming_image import chat_completions_streaming_text import chat_completions_streaming_text_self_deployed +import chat_function_calling_basic +import chat_function_calling_config PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") @@ -30,6 +32,14 @@ ENDPOINT_ID = "6714120476014149632" +def test_chat_function_calling_basic() -> None: + assert chat_function_calling_basic.generate_text() + + +def test_chat_function_calling_config() -> None: + assert chat_function_calling_config.generate_text() + + def test_authentication() -> None: response = chat_completions_authentication.generate_text(PROJECT_ID, LOCATION) assert response diff --git a/generative_ai/chat_completions/chat_function_calling_basic.py b/generative_ai/chat_completions/chat_function_calling_basic.py new file mode 100644 index 0000000000..9731a41582 --- /dev/null +++ b/generative_ai/chat_completions/chat_function_calling_basic.py @@ -0,0 +1,90 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") + + +def generate_text() -> object: + # [START generativeaionvertexai_gemini_chat_completions_function_calling_basic] + import vertexai + import openai + + from google.auth import default, transport + + # TODO(developer): Update & uncomment below line + # PROJECT_ID = "your-project-id" + location = "us-central1" + + vertexai.init(project=PROJECT_ID, location=location) + + # Programmatically get an access token + credentials, _ = default(scopes=["https://www.googleapis.com/auth/cloud-platform"]) + auth_request = transport.requests.Request() + credentials.refresh(auth_request) + + # # OpenAI Client + client = openai.OpenAI( + base_url=f"https://{location}-aiplatform.googleapis.com/v1beta1/projects/{PROJECT_ID}/locations/{location}/endpoints/openapi", + api_key=credentials.token, + ) + + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA or a zip code e.g. 95616", + }, + }, + "required": ["location"], + }, + }, + } + ] + + messages = [] + messages.append( + { + "role": "system", + "content": "Don't make assumptions about what values to plug into functions. Ask for clarification if a user request is ambiguous.", + } + ) + messages.append({"role": "user", "content": "What is the weather in Boston?"}) + + response = client.chat.completions.create( + model="google/gemini-2.0-flash-001", + messages=messages, + tools=tools, + ) + + print("Function:", response.choices[0].message.tool_calls[0].id) + print("Arguments:", response.choices[0].message.tool_calls[0].function.arguments) + # Example response: + # Function: get_current_weather + # Arguments: {"location":"Boston"} + + # [END generativeaionvertexai_gemini_chat_completions_function_calling_basic] + return response + + +if __name__ == "__main__": + generate_text() diff --git a/generative_ai/chat_completions/chat_function_calling_config.py b/generative_ai/chat_completions/chat_function_calling_config.py new file mode 100644 index 0000000000..720d72db70 --- /dev/null +++ b/generative_ai/chat_completions/chat_function_calling_config.py @@ -0,0 +1,91 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") + + +def generate_text() -> object: + # [START generativeaionvertexai_gemini_chat_completions_function_calling_config] + import vertexai + import openai + + from google.auth import default, transport + + # TODO(developer): Update & uncomment below line + # PROJECT_ID = "your-project-id" + location = "us-central1" + + vertexai.init(project=PROJECT_ID, location=location) + + # Programmatically get an access token + credentials, _ = default(scopes=["https://www.googleapis.com/auth/cloud-platform"]) + auth_request = transport.requests.Request() + credentials.refresh(auth_request) + + # OpenAI Client + client = openai.OpenAI( + base_url=f"https://{location}-aiplatform.googleapis.com/v1beta1/projects/{PROJECT_ID}/locations/{location}/endpoints/openapi", + api_key=credentials.token, + ) + + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA or a zip code e.g. 95616", + }, + }, + "required": ["location"], + }, + }, + } + ] + + messages = [] + messages.append( + { + "role": "system", + "content": "Don't make assumptions about what values to plug into functions. Ask for clarification if a user request is ambiguous.", + } + ) + messages.append({"role": "user", "content": "What is the weather in Boston, MA?"}) + + response = client.chat.completions.create( + model="google/gemini-2.0-flash-001", + messages=messages, + tools=tools, + tool_choice="auto", + ) + + print("Function:", response.choices[0].message.tool_calls[0].id) + print("Arguments:", response.choices[0].message.tool_calls[0].function.arguments) + # Example response: + # Function: get_current_weather + # Arguments: {"location":"Boston"} + # [END generativeaionvertexai_gemini_chat_completions_function_calling_config] + + return response + + +if __name__ == "__main__": + generate_text() From c0940eb9b89c87120e4f9b97aa9a538c7a5d3884 Mon Sep 17 00:00:00 2001 From: Sampath Kumar Date: Mon, 7 Apr 2025 10:26:56 -0700 Subject: [PATCH 16/19] feat(genai): update chat function calling examples location as per snippet bot test --- .../chat_completions/chat_completions_test.py | 10 ----- .../chat_function_calling_basic.py | 0 .../chat_function_calling_config.py | 0 .../function_calling/noxfile_config.py | 42 +++++++++++++++++++ .../function_calling/requirements-test.txt | 4 ++ .../function_calling/requirements.txt | 2 + .../function_calling/test_function_calling.py | 24 +++++++++++ 7 files changed, 72 insertions(+), 10 deletions(-) rename generative_ai/{chat_completions => function_calling}/chat_function_calling_basic.py (100%) rename generative_ai/{chat_completions => function_calling}/chat_function_calling_config.py (100%) create mode 100644 generative_ai/function_calling/noxfile_config.py create mode 100644 generative_ai/function_calling/requirements-test.txt create mode 100644 generative_ai/function_calling/requirements.txt create mode 100644 generative_ai/function_calling/test_function_calling.py diff --git a/generative_ai/chat_completions/chat_completions_test.py b/generative_ai/chat_completions/chat_completions_test.py index 3d0d629184..56489b53fc 100644 --- a/generative_ai/chat_completions/chat_completions_test.py +++ b/generative_ai/chat_completions/chat_completions_test.py @@ -22,8 +22,6 @@ import chat_completions_streaming_image import chat_completions_streaming_text import chat_completions_streaming_text_self_deployed -import chat_function_calling_basic -import chat_function_calling_config PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") @@ -32,14 +30,6 @@ ENDPOINT_ID = "6714120476014149632" -def test_chat_function_calling_basic() -> None: - assert chat_function_calling_basic.generate_text() - - -def test_chat_function_calling_config() -> None: - assert chat_function_calling_config.generate_text() - - def test_authentication() -> None: response = chat_completions_authentication.generate_text(PROJECT_ID, LOCATION) assert response diff --git a/generative_ai/chat_completions/chat_function_calling_basic.py b/generative_ai/function_calling/chat_function_calling_basic.py similarity index 100% rename from generative_ai/chat_completions/chat_function_calling_basic.py rename to generative_ai/function_calling/chat_function_calling_basic.py diff --git a/generative_ai/chat_completions/chat_function_calling_config.py b/generative_ai/function_calling/chat_function_calling_config.py similarity index 100% rename from generative_ai/chat_completions/chat_function_calling_config.py rename to generative_ai/function_calling/chat_function_calling_config.py diff --git a/generative_ai/function_calling/noxfile_config.py b/generative_ai/function_calling/noxfile_config.py new file mode 100644 index 0000000000..962ba40a92 --- /dev/null +++ b/generative_ai/function_calling/noxfile_config.py @@ -0,0 +1,42 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Default TEST_CONFIG_OVERRIDE for python repos. + +# You can copy this file into your directory, then it will be imported from +# the noxfile.py. + +# The source of truth: +# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py + +TEST_CONFIG_OVERRIDE = { + # You can opt out from the test for specific Python versions. + "ignored_versions": ["2.7", "3.7", "3.8", "3.10", "3.11", "3.13"], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + "enforce_type_hints": True, + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + "envs": {}, +} diff --git a/generative_ai/function_calling/requirements-test.txt b/generative_ai/function_calling/requirements-test.txt new file mode 100644 index 0000000000..3b9949d851 --- /dev/null +++ b/generative_ai/function_calling/requirements-test.txt @@ -0,0 +1,4 @@ +backoff==2.2.1 +google-api-core==2.24.0 +pytest==8.2.0 +pytest-asyncio==0.23.6 diff --git a/generative_ai/function_calling/requirements.txt b/generative_ai/function_calling/requirements.txt new file mode 100644 index 0000000000..68076775d7 --- /dev/null +++ b/generative_ai/function_calling/requirements.txt @@ -0,0 +1,2 @@ +google-auth==2.38.0 +openai==1.68.2 diff --git a/generative_ai/function_calling/test_function_calling.py b/generative_ai/function_calling/test_function_calling.py new file mode 100644 index 0000000000..fd522c9881 --- /dev/null +++ b/generative_ai/function_calling/test_function_calling.py @@ -0,0 +1,24 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import chat_function_calling_basic +import chat_function_calling_config + + +def test_chat_function_calling_basic() -> None: + assert chat_function_calling_basic.generate_text() + + +def test_chat_function_calling_config() -> None: + assert chat_function_calling_config.generate_text() From 086f373f724a56b6d70cc18324fb5185abb5e56d Mon Sep 17 00:00:00 2001 From: Sampath Kumar Date: Mon, 7 Apr 2025 10:58:43 -0700 Subject: [PATCH 17/19] fix: update requirements.txt --- generative_ai/function_calling/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/generative_ai/function_calling/requirements.txt b/generative_ai/function_calling/requirements.txt index 68076775d7..2ffbfa4cc6 100644 --- a/generative_ai/function_calling/requirements.txt +++ b/generative_ai/function_calling/requirements.txt @@ -1,2 +1,3 @@ google-auth==2.38.0 openai==1.68.2 +google-cloud-aiplatform==1.86.0 \ No newline at end of file From 5d966a8a3207c60c053b132e90c9db8cff682ad5 Mon Sep 17 00:00:00 2001 From: Sampath Kumar Date: Mon, 7 Apr 2025 21:05:52 -0700 Subject: [PATCH 18/19] clean: comment out un-used samples --- .../prompts/prompt_restore_version.py | 110 +++++++++--------- generative_ai/prompts/test_prompt_template.py | 8 +- 2 files changed, 59 insertions(+), 59 deletions(-) diff --git a/generative_ai/prompts/prompt_restore_version.py b/generative_ai/prompts/prompt_restore_version.py index 44473c300f..f2496dfccb 100644 --- a/generative_ai/prompts/prompt_restore_version.py +++ b/generative_ai/prompts/prompt_restore_version.py @@ -1,55 +1,55 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -from vertexai.preview.prompts import Prompt - -PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") - - -def restore_prompt_version() -> Prompt: - """Restores specified version for specified prompt.""" - - # [START generativeaionvertexai_prompt_restore_version] - import vertexai - from vertexai.preview import prompts - - # Initialize vertexai - vertexai.init(project=PROJECT_ID, location="us-central1") - - # Create local Prompt - prompt = Prompt( - prompt_name="zoologist", - prompt_data="Which animal is the fastest on earth?", - model_name="gemini-2.0-flash-001", - system_instruction="You are a zoologist. Answer in a short sentence.", - ) - # Save Prompt to online resource. - prompt1 = prompts.create_version(prompt=prompt) - prompt_id = prompt1.prompt_id - - # Restore to prompt version id 1 (original) - prompt_version_metadata = prompts.restore_version(prompt_id=prompt_id, version_id="1") - - # Fetch the newly restored latest version of the prompt - prompt1 = prompts.get(prompt_id=prompt_version_metadata.prompt_id) - - # Example response: - # Restored prompt version 1 under prompt id 12345678910 as version number 2 - # [END generativeaionvertexai_prompt_restore_version] - return prompt1 - - -if __name__ == "__main__": - restore_prompt_version() +# # Copyright 2024 Google LLC +# # +# # Licensed under the Apache License, Version 2.0 (the "License"); +# # you may not use this file except in compliance with the License. +# # You may obtain a copy of the License at +# # +# # https://www.apache.org/licenses/LICENSE-2.0 +# # +# # Unless required by applicable law or agreed to in writing, software +# # distributed under the License is distributed on an "AS IS" BASIS, +# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# # See the License for the specific language governing permissions and +# # limitations under the License. +# import os +# +# from vertexai.preview.prompts import Prompt +# +# PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") +# +# +# def restore_prompt_version() -> Prompt: +# """Restores specified version for specified prompt.""" +# +# # [START generativeaionvertexai_prompt_restore_version] +# import vertexai +# from vertexai.preview import prompts +# +# # Initialize vertexai +# vertexai.init(project=PROJECT_ID, location="us-central1") +# +# # Create local Prompt +# prompt = Prompt( +# prompt_name="zoologist", +# prompt_data="Which animal is the fastest on earth?", +# model_name="gemini-2.0-flash-001", +# system_instruction="You are a zoologist. Answer in a short sentence.", +# ) +# # Save Prompt to online resource. +# prompt1 = prompts.create_version(prompt=prompt) +# prompt_id = prompt1.prompt_id +# +# # Restore to prompt version id 1 (original) +# prompt_version_metadata = prompts.restore_version(prompt_id=prompt_id, version_id="1") +# +# # Fetch the newly restored latest version of the prompt +# prompt1 = prompts.get(prompt_id=prompt_version_metadata.prompt_id) +# +# # Example response: +# # Restored prompt version 1 under prompt id 12345678910 as version number 2 +# # [END generativeaionvertexai_prompt_restore_version] +# return prompt1 +# +# +# if __name__ == "__main__": +# restore_prompt_version() diff --git a/generative_ai/prompts/test_prompt_template.py b/generative_ai/prompts/test_prompt_template.py index a7749f1eb2..2eb7305783 100644 --- a/generative_ai/prompts/test_prompt_template.py +++ b/generative_ai/prompts/test_prompt_template.py @@ -17,7 +17,7 @@ import prompt_get import prompt_list_prompts import prompt_list_version -import prompt_restore_version +# import prompt_restore_version import prompt_template @@ -51,6 +51,6 @@ def test_prompt_delete() -> None: assert delete_prompt is None -def test_prompt_restore_version() -> None: - prompt1 = prompt_restore_version.restore_prompt_version() - assert prompt1 +# def test_prompt_restore_version() -> None: +# prompt1 = prompt_restore_version.restore_prompt_version() +# assert prompt1 From 3b77869e0ef550f51d558d45c9f73eea96a845bc Mon Sep 17 00:00:00 2001 From: Sampath Kumar Date: Mon, 7 Apr 2025 21:07:40 -0700 Subject: [PATCH 19/19] clean: comment out un-used samples --- discoveryengine/site_search_engine_sample.py | 256 +++++++++--------- .../site_search_engine_sample_test.py | 78 +++--- 2 files changed, 167 insertions(+), 167 deletions(-) diff --git a/discoveryengine/site_search_engine_sample.py b/discoveryengine/site_search_engine_sample.py index 990640a2ce..fd556d09a9 100644 --- a/discoveryengine/site_search_engine_sample.py +++ b/discoveryengine/site_search_engine_sample.py @@ -1,128 +1,128 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -def create_target_site( - project_id: str, - location: str, - data_store_id: str, - uri_pattern: str, -): - # [START genappbuilder_create_target_site] - from google.api_core.client_options import ClientOptions - - from google.cloud import discoveryengine_v1 as discoveryengine - - # TODO(developer): Uncomment these variables before running the sample. - # project_id = "YOUR_PROJECT_ID" - # location = "YOUR_LOCATION" # Values: "global" - # data_store_id = "YOUR_DATA_STORE_ID" - # NOTE: Do not include http or https protocol in the URI pattern - # uri_pattern = "cloud.google.com/generative-ai-app-builder/docs/*" - - # For more information, refer to: - # https://cloud.google.com/generative-ai-app-builder/docs/locations#specify_a_multi-region_for_your_data_store - client_options = ( - ClientOptions(api_endpoint=f"{location}-discoveryengine.googleapis.com") - if location != "global" - else None - ) - - # Create a client - client = discoveryengine.SiteSearchEngineServiceClient( - client_options=client_options - ) - - # The full resource name of the data store - # e.g. projects/{project}/locations/{location}/dataStores/{data_store_id} - site_search_engine = client.site_search_engine_path( - project=project_id, location=location, data_store=data_store_id - ) - - # Target Site to index - target_site = discoveryengine.TargetSite( - provided_uri_pattern=uri_pattern, - # Options: INCLUDE, EXCLUDE - type_=discoveryengine.TargetSite.Type.INCLUDE, - exact_match=False, - ) - - # Make the request - operation = client.create_target_site( - parent=site_search_engine, - target_site=target_site, - ) - - print(f"Waiting for operation to complete: {operation.operation.name}") - response = operation.result() - - # After the operation is complete, - # get information from operation metadata - metadata = discoveryengine.CreateTargetSiteMetadata(operation.metadata) - - # Handle the response - print(response) - print(metadata) - # [END genappbuilder_create_target_site] - - return response - - -def delete_target_site( - project_id: str, - location: str, - data_store_id: str, - target_site_id: str, -): - # [START genappbuilder_delete_target_site] - from google.api_core.client_options import ClientOptions - - from google.cloud import discoveryengine_v1 as discoveryengine - - # TODO(developer): Uncomment these variables before running the sample. - # project_id = "YOUR_PROJECT_ID" - # location = "YOUR_LOCATION" # Values: "global" - # data_store_id = "YOUR_DATA_STORE_ID" - # target_site_id = "YOUR_TARGET_SITE_ID" - - # For more information, refer to: - # https://cloud.google.com/generative-ai-app-builder/docs/locations#specify_a_multi-region_for_your_data_store - client_options = ( - ClientOptions(api_endpoint=f"{location}-discoveryengine.googleapis.com") - if location != "global" - else None - ) - - # Create a client - client = discoveryengine.SiteSearchEngineServiceClient( - client_options=client_options - ) - - # The full resource name of the data store - # e.g. projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store_id}/siteSearchEngine/targetSites/{target_site} - name = client.target_site_path( - project=project_id, - location=location, - data_store=data_store_id, - target_site=target_site_id, - ) - - # Make the request - operation = client.delete_target_site(name=name) - - print(f"Operation: {operation.operation.name}") - # [END genappbuilder_delete_target_site] - - return operation.operation.name +# # Copyright 2024 Google LLC +# # +# # Licensed under the Apache License, Version 2.0 (the "License"); +# # you may not use this file except in compliance with the License. +# # You may obtain a copy of the License at +# # +# # http://www.apache.org/licenses/LICENSE-2.0 +# # +# # Unless required by applicable law or agreed to in writing, software +# # distributed under the License is distributed on an "AS IS" BASIS, +# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# # See the License for the specific language governing permissions and +# # limitations under the License. +# # +# +# +# def create_target_site( +# project_id: str, +# location: str, +# data_store_id: str, +# uri_pattern: str, +# ): +# # [START genappbuilder_create_target_site] +# from google.api_core.client_options import ClientOptions +# +# from google.cloud import discoveryengine_v1 as discoveryengine +# +# # TODO(developer): Uncomment these variables before running the sample. +# # project_id = "YOUR_PROJECT_ID" +# # location = "YOUR_LOCATION" # Values: "global" +# # data_store_id = "YOUR_DATA_STORE_ID" +# # NOTE: Do not include http or https protocol in the URI pattern +# # uri_pattern = "cloud.google.com/generative-ai-app-builder/docs/*" +# +# # For more information, refer to: +# # https://cloud.google.com/generative-ai-app-builder/docs/locations#specify_a_multi-region_for_your_data_store +# client_options = ( +# ClientOptions(api_endpoint=f"{location}-discoveryengine.googleapis.com") +# if location != "global" +# else None +# ) +# +# # Create a client +# client = discoveryengine.SiteSearchEngineServiceClient( +# client_options=client_options +# ) +# +# # The full resource name of the data store +# # e.g. projects/{project}/locations/{location}/dataStores/{data_store_id} +# site_search_engine = client.site_search_engine_path( +# project=project_id, location=location, data_store=data_store_id +# ) +# +# # Target Site to index +# target_site = discoveryengine.TargetSite( +# provided_uri_pattern=uri_pattern, +# # Options: INCLUDE, EXCLUDE +# type_=discoveryengine.TargetSite.Type.INCLUDE, +# exact_match=False, +# ) +# +# # Make the request +# operation = client.create_target_site( +# parent=site_search_engine, +# target_site=target_site, +# ) +# +# print(f"Waiting for operation to complete: {operation.operation.name}") +# response = operation.result() +# +# # After the operation is complete, +# # get information from operation metadata +# metadata = discoveryengine.CreateTargetSiteMetadata(operation.metadata) +# +# # Handle the response +# print(response) +# print(metadata) +# # [END genappbuilder_create_target_site] +# +# return response +# +# +# def delete_target_site( +# project_id: str, +# location: str, +# data_store_id: str, +# target_site_id: str, +# ): +# # [START genappbuilder_delete_target_site] +# from google.api_core.client_options import ClientOptions +# +# from google.cloud import discoveryengine_v1 as discoveryengine +# +# # TODO(developer): Uncomment these variables before running the sample. +# # project_id = "YOUR_PROJECT_ID" +# # location = "YOUR_LOCATION" # Values: "global" +# # data_store_id = "YOUR_DATA_STORE_ID" +# # target_site_id = "YOUR_TARGET_SITE_ID" +# +# # For more information, refer to: +# # https://cloud.google.com/generative-ai-app-builder/docs/locations#specify_a_multi-region_for_your_data_store +# client_options = ( +# ClientOptions(api_endpoint=f"{location}-discoveryengine.googleapis.com") +# if location != "global" +# else None +# ) +# +# # Create a client +# client = discoveryengine.SiteSearchEngineServiceClient( +# client_options=client_options +# ) +# +# # The full resource name of the data store +# # e.g. projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store_id}/siteSearchEngine/targetSites/{target_site} +# name = client.target_site_path( +# project=project_id, +# location=location, +# data_store=data_store_id, +# target_site=target_site_id, +# ) +# +# # Make the request +# operation = client.delete_target_site(name=name) +# +# print(f"Operation: {operation.operation.name}") +# # [END genappbuilder_delete_target_site] +# +# return operation.operation.name diff --git a/discoveryengine/site_search_engine_sample_test.py b/discoveryengine/site_search_engine_sample_test.py index 51c9b79e80..82f4c79713 100644 --- a/discoveryengine/site_search_engine_sample_test.py +++ b/discoveryengine/site_search_engine_sample_test.py @@ -1,43 +1,43 @@ -# Copyright 2024 Google LLC +# # Copyright 2024 Google LLC +# # +# # Licensed under the Apache License, Version 2.0 (the "License"); +# # you may not use this file except in compliance with the License. +# # You may obtain a copy of the License at +# # +# # http://www.apache.org/licenses/LICENSE-2.0 +# # +# # Unless required by applicable law or agreed to in writing, software +# # distributed under the License is distributed on an "AS IS" BASIS, +# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# # See the License for the specific language governing permissions and +# # limitations under the License. +# # # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# import os +# import re # -# http://www.apache.org/licenses/LICENSE-2.0 +# from discoveryengine import site_search_engine_sample # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# project_id = os.environ["GOOGLE_CLOUD_PROJECT"] +# location = "global" +# data_store_id = "site-search-data-store" # - -import os -import re - -from discoveryengine import site_search_engine_sample - -project_id = os.environ["GOOGLE_CLOUD_PROJECT"] -location = "global" -data_store_id = "site-search-data-store" - - -def test_create_target_site(): - response = site_search_engine_sample.create_target_site( - project_id, - location, - data_store_id, - uri_pattern="cloud.google.com/generative-ai-app-builder/docs/*", - ) - assert response, response - match = re.search(r"\/targetSites\/([^\/]+)", response.name) - - if match: - target_site = match.group(1) - site_search_engine_sample.delete_target_site( - project_id=project_id, - location=location, - data_store_id=data_store_id, - target_site_id=target_site, - ) +# +# def test_create_target_site(): +# response = site_search_engine_sample.create_target_site( +# project_id, +# location, +# data_store_id, +# uri_pattern="cloud.google.com/generative-ai-app-builder/docs/*", +# ) +# assert response, response +# match = re.search(r"\/targetSites\/([^\/]+)", response.name) +# +# if match: +# target_site = match.group(1) +# site_search_engine_sample.delete_target_site( +# project_id=project_id, +# location=location, +# data_store_id=data_store_id, +# target_site_id=target_site, +# )