#!/usr/bin/env python3 """ Simple image generator using AWS Bedrock """ import argparse import base64 import io import json import os from datetime import datetime import boto3 from PIL import Image def generate_image( prompt, model_id="amazon.titan-image-generator-v2:0", output_dir="generated_images" ): """Generate an image using AWS Bedrock based on the provided text prompt""" # Create Bedrock client bedrock_runtime = boto3.client( service_name="bedrock-runtime", region_name="us-east-1", # Change to your preferred region ) # Prepare request body based on the model if "stability" in model_id: request_body = { "text_prompts": [{"text": prompt}], "cfg_scale": 7, "steps": 30, "seed": 0, "width": 1024, "height": 1024, } elif "titan-image" in model_id: # Titan Image Generator format request_body = { "taskType": "TEXT_IMAGE", "textToImageParams": { "text": prompt, "negativeText": "blurry, bad quality, distorted", }, "imageGenerationConfig": { "numberOfImages": 1, "height": 1024, "width": 1024, "cfgScale": 8.0, }, } elif "claude" in model_id: # Claude format for image generation request_body = { "anthropic_version": "bedrock-2023-05-31", "max_tokens": 1024, "messages": [ { "role": "user", "content": [ {"type": "text", "text": f"Generate an image of: {prompt}"} ], } ], } else: # Generic format - try with simple prompt request_body = { "prompt": prompt, "negative_prompt": "blurry, bad quality, distorted", "image_size": "1024x1024", "num_images": 1, } # Invoke the model try: print(f"Attempting to generate image with model: {model_id}") print(f"Request body: {json.dumps(request_body, indent=2)}") response = bedrock_runtime.invoke_model( modelId=model_id, body=json.dumps(request_body) ) response_body = json.loads(response.get("body").read()) print(f"Response keys: {list(response_body.keys())}") # Handle response based on the model if "stability" in model_id: image_base64 = response_body.get("artifacts")[0].get("base64") elif "titan-image" in model_id: # For Titan model image_base64 = response_body.get("images")[0] elif "claude" in model_id: # For Claude model for content in response_body.get("content", []): if content.get("type") == "image": image_base64 = content.get("source", "") break else: # Try to find image data in response if "images" in response_body and len(response_body["images"]) > 0: image_base64 = response_body["images"][0] elif "image" in response_body: image_base64 = response_body["image"] else: print( f"Couldn't find image data in response: {json.dumps(response_body, indent=2)}" ) return None # Create output directory if it doesn't exist os.makedirs(output_dir, exist_ok=True) # Save the image image_data = base64.b64decode(image_base64) image = Image.open(io.BytesIO(image_data)) # Generate filename with timestamp timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") filename = f"{output_dir}/image_{timestamp}.png" image.save(filename) print(f"Image generated and saved to {filename}") return filename except Exception as e: print(f"Error generating image: {str(e)}") return None def list_available_models(): """List available Bedrock models""" try: bedrock = boto3.client("bedrock", region_name="us-east-1") response = bedrock.list_foundation_models() print("Available Bedrock models:") print("-" * 50) image_models = [] for model in response.get("modelSummaries", []): model_id = model.get("modelId") if model.get("inferenceTypesSupported") and "ON_DEMAND" in model.get( "inferenceTypesSupported" ): if "image" in model.get("outputModalities", []): image_models.append(model_id) print(f"✓ {model_id} (Image Generation)") print("-" * 50) print(f"Found {len(image_models)} models supporting image generation") return image_models except Exception as e: print(f"Error listing models: {str(e)}") return [] def main(): parser = argparse.ArgumentParser( description="Generate images from text using AWS Bedrock" ) parser.add_argument("prompt", nargs="?", help="Text prompt to generate image from") parser.add_argument( "--model", default="amazon.titan-image-generator-v1", help="Bedrock model ID (default: amazon.titan-image-generator-v1)", ) parser.add_argument( "--output-dir", default="generated_images", help="Directory to save generated images (default: generated_images)", ) parser.add_argument( "--list-models", action="store_true", help="List available Bedrock models for image generation", ) args = parser.parse_args() if args.list_models: list_available_models() elif args.prompt: generate_image(args.prompt, args.model, args.output_dir) else: parser.print_help() if __name__ == "__main__": main()