Skip to main content

Method Signature

def generate_videos(
    self,
    *,
    model: str,
    prompt: Optional[str] = None,
    image: Optional[Image] = None,
    video: Optional[Video] = None,
    source: Optional[GenerateVideosSource] = None,
    config: Optional[GenerateVideosConfig] = None,
) -> GenerateVideosOperation
async def generate_videos(
    self,
    *,
    model: str,
    prompt: Optional[str] = None,
    image: Optional[Image] = None,
    video: Optional[Video] = None,
    source: Optional[GenerateVideosSource] = None,
    config: Optional[GenerateVideosConfig] = None,
) -> GenerateVideosOperation

Description

Generates videos based on input (text, image, or video) using Veo models. This is a long-running operation that returns immediately with an operation object that can be polled for completion. Supported use cases:
  1. Text to video - Generate video from text prompt
  2. Image to video - Animate a static image (with optional text prompt)
  3. Image to video with interpolation - Generate video between two frames
  4. Video extension - Extend an existing video (with optional text prompt)

Parameters

model
str
required
The Veo model to use for generation.Examples:
  • 'veo-2.0-generate-001'
  • 'veo-001'
source
GenerateVideosSource
The input source for video generation (recommended).Note: Use source instead of the individual prompt, image, video parameters (which are deprecated).
prompt
str
Text prompt for video generation. Deprecated - use source.prompt instead.
image
Image
Input image for image-to-video. Deprecated - use source.image instead.
video
Video
Input video for video extension. Deprecated - use source.video instead.
config
GenerateVideosConfig
Configuration for video generation.

Response

Returns a GenerateVideosOperation object for long-running operation tracking.
name
str
The operation name/ID for polling
done
bool
Whether the operation is complete
metadata
dict
Operation metadata including progress information
error
dict
Error information if the operation failed
result
GenerateVideosResponse
The result when operation completes successfully.

Code Examples

Text to Video Generation

import time
from google import genai
from google.genai import types

client = genai.Client(vertexai=True, project='my-project', location='us-central1')

# Start video generation
operation = client.models.generate_videos(
    model='veo-2.0-generate-001',
    source=types.GenerateVideosSource(
        prompt='A neon hologram of a cat driving at top speed',
    ),
)

print(f"Operation started: {operation.name}")

# Poll until complete
while not operation.done:
    time.sleep(10)
    operation = client.operations.get(operation.name)
    print(f"Status: {'Done' if operation.done else 'In progress...'}")

# Access the result
if operation.result:
    video_uri = operation.result.generated_videos[0].video.uri
    print(f"Video generated: {video_uri}")
else:
    print(f"Error: {operation.error}")

Image to Video with Configuration

from google.genai import types

operation = client.models.generate_videos(
    model='veo-2.0-generate-001',
    source=types.GenerateVideosSource(
        image=types.Image.from_file('input.jpg'),
        prompt='The scene comes to life with gentle movement',
    ),
    config=types.GenerateVideosConfig(
        duration_seconds=8.0,
        aspect_ratio='16:9',
        resolution='1080p',
        fps=30,
    )
)

print(f"Operation: {operation.name}")

Frame Interpolation (Image to Video)

operation = client.models.generate_videos(
    model='veo-2.0-generate-001',
    source=types.GenerateVideosSource(
        image=types.Image.from_file('start_frame.jpg'),
    ),
    config=types.GenerateVideosConfig(
        last_frame=types.Image.from_file('end_frame.jpg'),
        duration_seconds=5.0,
    )
)

# The model generates smooth transitions between the two frames

Video Extension

operation = client.models.generate_videos(
    model='veo-2.0-generate-001',
    source=types.GenerateVideosSource(
        video=types.Video.from_uri('gs://my-bucket/input.mp4', 'video/mp4'),
        prompt='Continue the scene with the character walking forward',
    ),
    config=types.GenerateVideosConfig(
        duration_seconds=5.0,
    )
)

With Negative Prompt and Safety Controls

operation = client.models.generate_videos(
    model='veo-2.0-generate-001',
    source=types.GenerateVideosSource(
        prompt='A person dancing in a vibrant city',
    ),
    config=types.GenerateVideosConfig(
        negative_prompt='shaky camera, blurry, distorted, low quality',
        person_generation='ALLOW_ADULT',
        duration_seconds=8.0,
        enhance_prompt=True,
    )
)

Save to Cloud Storage with Audio

operation = client.models.generate_videos(
    model='veo-2.0-generate-001',
    source=types.GenerateVideosSource(
        prompt='A waterfall in a lush forest',
    ),
    config=types.GenerateVideosConfig(
        duration_seconds=8.0,
        generate_audio=True,
        output_gcs_uri='gs://my-bucket/videos/',
        compression_quality=90,
    )
)

With Pub/Sub Notification

operation = client.models.generate_videos(
    model='veo-2.0-generate-001',
    source=types.GenerateVideosSource(
        prompt='A futuristic spaceship flying through space',
    ),
    config=types.GenerateVideosConfig(
        pubsub_topic='projects/my-project/topics/video-complete',
        output_gcs_uri='gs://my-bucket/videos/',
    )
)

print(f"Operation {operation.name} will notify {config.pubsub_topic} when done")

Async Usage

import asyncio
from google import genai
from google.genai import types

client = genai.Client(vertexai=True, project='my-project', location='us-central1')

async def generate():
    operation = await client.aio.models.generate_videos(
        model='veo-2.0-generate-001',
        source=types.GenerateVideosSource(
            prompt='A robot exploring an alien planet',
        ),
    )
    
    print(f"Operation started: {operation.name}")
    
    # Poll until complete
    while not operation.done:
        await asyncio.sleep(10)
        operation = await client.aio.operations.get(operation.name)
    
    if operation.result:
        print(f"Video: {operation.result.generated_videos[0].video.uri}")

asyncio.run(generate())

Operation Polling

Video generation is a long-running operation. You must poll the operation to check completion:
import time

def wait_for_operation(client, operation):
    """Poll operation until complete."""
    while not operation.done:
        time.sleep(10)
        operation = client.operations.get(operation.name)
        
        if operation.metadata:
            progress = operation.metadata.get('progress', 0)
            print(f"Progress: {progress}%")
    
    if operation.error:
        raise Exception(f"Operation failed: {operation.error}")
    
    return operation.result

# Use it
operation = client.models.generate_videos(...)
result = wait_for_operation(client, operation)
print(f"Video: {result.generated_videos[0].video.uri}")

Notes

  • Video generation typically takes 3-10 minutes depending on duration and settings
  • Use source parameter instead of deprecated prompt, image, video parameters
  • The operation returns immediately; you must poll for completion
  • Use Pub/Sub notifications for production systems instead of polling
  • Some configuration options are only available on Vertex AI
  • Videos may be filtered by Responsible AI systems
  • Higher resolution and longer duration increase generation time
  • The enhance_prompt feature can improve results but may alter your intent

Build docs developers (and LLMs) love