Configure Amazon Bedrock for AI model access including Gemma 3 12B and Titan Embeddings.

import boto3
import json
bedrock = boto3.client('bedrock-runtime', region_name='ap-southeast-1')
# Test Titan Text
response = bedrock.invoke_model(
modelId='amazon.titan-text-express-v1',
body=json.dumps({
'inputText': 'Hello, how are you?',
'textGenerationConfig': {
'maxTokenCount': 100,
'temperature': 0.7
}
})
)
print(json.loads(response['body'].read()))
# Generate embeddings
response = bedrock.invoke_model(
modelId='amazon.titan-embed-text-v2:0',
body=json.dumps({
'inputText': 'Document chunk text here'
})
)
embedding = json.loads(response['body'].read())['embedding']
# Store in OpenSearch or use for similarity search
For smart query generation:
import google.generativeai as genai
genai.configure(api_key=os.environ['GEMINI_API_KEY'])
model = genai.GenerativeModel('gemini-2.5-flash')
response = model.generate_content(
f"""Analyze this document and generate 10 intelligent questions:
{document_text}
"""
)
Proceed to CI/CD Pipeline.