pr_reviewer/test_docker.py
Andrew Ridgway b436a81300
Some checks failed
Build and Push Image / Build and push image (push) Failing after 6m58s
initial build into pipeline
2026-05-11 22:15:50 +10:00

142 lines
5.1 KiB
Python

#!/usr/bin/env python3
"""
Test script to verify the Dockerized PR Reviewer application works correctly.
This script builds the Docker image, runs it, and tests the API endpoints.
"""
import time
import requests
import docker
import json
import sys
from typing import Dict, Any
def test_dockerized_app():
"""Test the Dockerized PR Reviewer application."""
client = docker.from_env()
try:
# Build the Docker image
print("Building Docker image...")
image, build_logs = client.images.build(
path=".",
tag="pr-reviewer-test:latest",
rm=True,
forcerm=True
)
print("Docker image built successfully.")
# Run the container
print("Starting container...")
container = client.containers.run(
image="pr-reviewer-test:latest",
detach=True,
ports={'8000/tcp': 8000},
environment={
"LLM_MODEL": "test-model",
"LLM_BASE_URL": "http://localhost:11434", # Using Ollama as example
"LLM_API_KEY": "ollama", # Ollama doesn't need a real key
"LLM_PROVIDER": "ollama"
}
)
print(f"Container started with ID: {container.id}")
# Wait for the container to be ready
print("Waiting for container to be ready...")
max_wait = 30 # seconds
start_time = time.time()
while time.time() - start_time < max_wait:
try:
response = requests.get("http://localhost:8000/api/v1/health", timeout=5)
if response.status_code == 200:
print("Container is ready!")
break
except requests.exceptions.ConnectionError:
print("Waiting for container to start...")
time.sleep(2)
else:
raise TimeoutError("Container did not become ready within the timeout period")
# Test the health endpoint
print("Testing health endpoint...")
health_response = requests.get("http://localhost:8000/api/v1/health")
assert health_response.status_code == 200, f"Health check failed: {health_response.status_code}"
health_data = health_response.json()
assert health_data["status"] == "healthy", f"Unexpected health status: {health_data['status']}"
print("Health endpoint test passed.")
# Test the review endpoint with minimal valid data
print("Testing review endpoint...")
test_payload = {
"pr_id": "123",
"title": "Test PR",
"description": "This is a test PR",
"repo": {
"name": "test-repo",
"url": "https://github.com/test/test-repo"
},
"source": {
"branch": "feature/test",
"commit": "abc123"
},
"target": {
"branch": "main",
"commit": "def456"
},
"files": [
{
"path": "src/main.py",
"content": "print('Hello World')",
"status": "modified",
"additions": 1,
"deletions": 0
}
],
"context": {
"code_review": "Follow basic coding standards",
"security_review": "Check for obvious security issues",
"infra_review": "Ensure basic infrastructure practices"
}
}
review_response = requests.post(
"http://localhost:8000/api/v1/review",
json=test_payload,
timeout=30 # Longer timeout for the review process
)
# We expect this to either succeed (200) or fail with a 500 due to LLM issues
# Since we're not actually connecting to a real LLM, we expect a 500
print(f"Review endpoint responded with status: {review_response.status_code}")
if review_response.status_code == 200:
review_data = review_response.json()
print("Review endpoint test passed.")
print(f"Review ID: {review_data.get('review_id')}")
print(f"Status: {review_data.get('status')}")
else:
print(f"Review endpoint returned error status {review_response.status_code} (expected due to lack of real LLM)")
print(f"Response: {review_response.text}")
# Clean up
print("Cleaning up...")
container.stop()
container.remove()
client.images.remove(image="pr-reviewer-test:latest", force=True)
print("Test completed successfully.")
except Exception as e:
print(f"Test failed with error: {e}")
# Try to clean up if possible
try:
if 'container' in locals():
container.stop()
container.remove()
if 'image' in locals():
client.images.remove(image="pr-reviewer-test:latest", force=True)
except:
pass
raise
if __name__ == "__main__":
test_dockerized_app()