diff --git a/.gitignore b/.gitignore index 4f31273..9c012aa 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,10 @@ htmlcov/ # Environment .env +# Pywrangler +python_modules +.venv-workers + # Editor .idea/ .vscode/ diff --git a/README.md b/README.md index df20d5a..301c0b8 100644 --- a/README.md +++ b/README.md @@ -38,8 +38,6 @@ No server infrastructure, no databases, no external dependencies. | POST | `/ai/summary` | Summarise a completed tutoring session | | GET | `/health` | Liveness check | -All endpoints return JSON and support CORS. - --- ## Request / Response Examples @@ -127,12 +125,12 @@ All endpoints return JSON and support CORS. ``` learnpilot/ ├── src/ -│ └── worker.py # Cloudflare Python Worker (all logic lives here) +│ └── worker.py # Cloudflare Python Worker (Main entry point) ├── tests/ -│ └── test_worker.py # Unit tests (pytest, no Cloudflare runtime needed) -├── wrangler.toml # Cloudflare Workers deployment config -├── requirements-dev.txt # Dev/test dependencies (pytest only) -├── .env.example # Example environment variables for Wrangler CLI +│ └── test_worker.py # Unit tests (pytest) +├── wrangler.toml # Cloudflare Workers configuration +├── pyproject.toml # Python project configuration (uv) +├── uv.lock # Locked dependencies ├── LICENSE └── README.md ``` @@ -143,20 +141,18 @@ learnpilot/ ### Prerequisites -- [Node.js](https://nodejs.org/) ≥ 18 (for the Wrangler CLI) +- [uv](https://docs.astral.sh/uv/) (Python package manager) +- [Node.js](https://nodejs.org/) ≥ 18 - A [Cloudflare account](https://dash.cloudflare.com/sign-up) with Workers AI enabled ### Deploy ```bash -# Install Wrangler CLI -npm install -g wrangler - # Authenticate with Cloudflare -wrangler login +uv run pywrangler login # Deploy the worker to the edge -npx wrangler deploy +uv run pywrangler deploy ``` Wrangler will print the live URL, e.g. @@ -165,7 +161,7 @@ Wrangler will print the live URL, e.g. ### Local Development ```bash -npx wrangler dev +uv run pywrangler dev ``` The worker starts on `http://localhost:8787`. All AI calls are proxied to @@ -175,16 +171,11 @@ Cloudflare's remote AI service automatically during development. ## Running Tests -The tests use only the Python standard library and `pytest`. No Cloudflare -runtime is required – a minimal `Response` shim is injected before importing -`worker.py`. +The tests use `pytest` and `requests` to verify the worker's behavior against a running instance (local or remote). ```bash -# Install test dependencies -pip install -r requirements-dev.txt - # Run tests -pytest tests/ -v +uv run pytest tests/ -v ``` --- @@ -200,6 +191,9 @@ into the deployed worker. | `CLOUDFLARE_ACCOUNT_ID` | Your Cloudflare account ID | | `CLOUDFLARE_API_TOKEN` | API token with Workers AI permission | + +Or you can simply log in with `pywrangler login` in your terminal as said above. + --- ## License diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..331478b --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,14 @@ +[project] +name = "learnpilot" +version = "0.1.0" +requires-python = ">=3.13" +dependencies = [] +[project.optional-dependencies] +test = [ + "pytest", + "requests", +] +[tool.worker] +main = "src/worker.py" +compatibility_date = "2026-03-23" +compatibility_flags = ["python_workers"] \ No newline at end of file diff --git a/src/worker.py b/src/worker.py index c65b96a..4a652b7 100644 --- a/src/worker.py +++ b/src/worker.py @@ -3,7 +3,7 @@ # A Cloudflare Python Worker that exposes an AI tutoring API backed # by Cloudflare Workers AI. Deploy with: # -# npx wrangler deploy +# uv run pywrangler deploy # # The worker uses the Workers AI binding (env.AI) to run inference # on Cloudflare's global edge network, providing low-latency responses. @@ -16,507 +16,517 @@ # POST /ai/path – generate a personalised learning path # POST /ai/progress – produce personalised progress insights # GET /health – liveness check - import json +from js import Object +from pyodide.ffi import to_js as _to_js +from urllib.parse import urlparse +from workers import Response, WorkerEntrypoint - -async def on_fetch(request, env): - """Entry point for all incoming HTTP requests.""" - url = request.url - method = request.method - - # CORS preflight - if method == "OPTIONS": - return _cors_response("", 204) - - # Route dispatch - if "/ai/chat" in url and method == "POST": - return await _handle_chat(request, env) - - if "/ai/explain" in url and method == "POST": - return await _handle_explain(request, env) - - if "/ai/practice" in url and method == "POST": - return await _handle_practice(request, env) - - if "/ai/evaluate" in url and method == "POST": - return await _handle_evaluate(request, env) - - if "/ai/path" in url and method == "POST": - return await _handle_generate_path(request, env) - - if "/ai/progress" in url and method == "POST": - return await _handle_progress_insights(request, env) - - if "/ai/adapt" in url and method == "POST": - return await _handle_adapt_difficulty(request, env) - - if "/ai/summary" in url and method == "POST": - return await _handle_session_summary(request, env) - - if "/health" in url: - return _cors_response(json.dumps({"status": "ok", "service": "learnpilot-ai"}), 200) - - return _cors_response(json.dumps({"error": "Not found"}), 404) +# to_js converts between Python dictionaries and JavaScript Objects +def to_js(obj): + """ + Function to convert python objects to JavaScript objects. + This is required for the Python Workers to work with JavaScript. + From https://developers.cloudflare.com/workers/languages/python/ffi/ + """ + return _to_js(obj, dict_converter=Object.fromEntries) -# --------------------------------------------------------------------------- -# Handlers -# --------------------------------------------------------------------------- +MODEL = "@cf/meta/llama-3.1-8b-instruct" -async def _handle_chat(request, env): +class Default(WorkerEntrypoint): """ - Continue a tutoring conversation. - - Request body: - { - "messages": [{"role": "user"|"assistant"|"system", "content": "…"}, …], - "lesson_context": "…", // optional - "max_tokens": 1024 // optional - } + A Cloudflare Python Worker that exposes an AI tutoring API backed. + It inherits from WorkerEntrypoint and implements the fetch method + along with helper methods for each endpoint. """ - try: - body = await request.json() - except Exception: - return _error("Invalid JSON", 400) + async def fetch(self, request): + """Entry point for all incoming HTTP requests.""" + parsed_url = urlparse(request.url) + path = parsed_url.path + method = request.method - messages = body.get("messages", []) - lesson_context = body.get("lesson_context", "") - max_tokens = int(body.get("max_tokens", 1024)) + # CORS preflight + if method == "OPTIONS": + return _cors_response(None, 204) - if not messages: - return _error("messages is required", 400) + # Route dispatch + if path == "/ai/chat" and method == "POST": + return await self.handle_chat(request) - system_prompt = _tutor_system_prompt(lesson_context) - full_messages = [{"role": "system", "content": system_prompt}] + messages[-10:] + if path == "/ai/explain" and method == "POST": + return await self.handle_explain(request) - result = await env.AI.run( - "@cf/meta/llama-3.1-8b-instruct", - {"messages": full_messages, "max_tokens": max_tokens}, - ) - response_text = result.get("response", "") if isinstance(result, dict) else "" - return _cors_response(json.dumps({"response": response_text}), 200) + if path == "/ai/practice" and method == "POST": + return await self.handle_practice(request) + if path == "/ai/evaluate" and method == "POST": + return await self.handle_evaluate(request) -async def _handle_explain(request, env): - """ - Explain a concept at the learner's level. - - Request body: - { - "concept": "recursion", - "skill_level": "beginner", - "learning_style": "visual", - "context": "…" // optional - } - """ - try: - body = await request.json() - except Exception: - return _error("Invalid JSON", 400) - - concept = body.get("concept", "").strip() - if not concept: - return _error("concept is required", 400) - - skill_level = body.get("skill_level", "beginner") - learning_style = body.get("learning_style", "visual") - context = body.get("context", "") - - style_hints = { - "visual": "Use text-described diagrams and visual metaphors.", - "auditory": "Explain conversationally as if speaking aloud.", - "reading": "Use numbered lists and clear definitions.", - "kinesthetic": "Emphasise hands-on examples and step-by-step tasks.", - } - style_hint = style_hints.get(learning_style, "") - context_section = f"\n\nLesson context:\n{context}" if context else "" - - prompt = ( - f"Explain the following concept to a {skill_level}-level learner.\n" - f"Learning style: {learning_style}. {style_hint}\n\n" - f"Concept: {concept}{context_section}\n\n" - "Structure your response as:\n" - "1. **Core Explanation** (2–4 sentences)\n" - "2. **Analogy** – a memorable real-world comparison\n" - "3. **Key Points** – 3–5 bullet points\n" - "4. **Quick Example** – a short, concrete illustration" - ) + if path == "/ai/path" and method == "POST": + return await self.handle_generate_path(request) - result = await env.AI.run( - "@cf/meta/llama-3.1-8b-instruct", - { - "messages": [ - {"role": "system", "content": _tutor_system_prompt()}, - {"role": "user", "content": prompt}, - ], - "max_tokens": 1024, - }, - ) - text = result.get("response", "") if isinstance(result, dict) else "" - return _cors_response(json.dumps({"explanation": text}), 200) + if path == "/ai/progress" and method == "POST": + return await self.handle_progress_insights(request) + if path == "/ai/adapt" and method == "POST": + return await self.handle_adapt_difficulty(request) -async def _handle_practice(request, env): - """ - Generate a practice question. + if path == "/ai/summary" and method == "POST": + return await self.handle_session_summary(request) - Request body: - { - "topic": "…", - "difficulty": "beginner|intermediate|advanced", - "question_type": "open-ended|multiple-choice|true-false" - } - """ - try: - body = await request.json() - except Exception: - return _error("Invalid JSON", 400) - - topic = body.get("topic", "").strip() - if not topic: - return _error("topic is required", 400) - - difficulty = body.get("difficulty", "beginner") - question_type = body.get("question_type", "open-ended") - - prompt = ( - f"Generate a {difficulty}-level {question_type} practice question about: \"{topic}\"\n\n" - "Format:\n" - "- **Question:** \n" - "- **Hint:** \n" - "- **Expected Answer:** " - ) + if path == "/health": + return _cors_response(json.dumps({"status": "ok", "service": "learnpilot-ai"}), 200) - result = await env.AI.run( - "@cf/meta/llama-3.1-8b-instruct", - { - "messages": [ - {"role": "system", "content": _tutor_system_prompt()}, - {"role": "user", "content": prompt}, - ], - "max_tokens": 512, - }, - ) - text = result.get("response", "") if isinstance(result, dict) else "" - return _cors_response(json.dumps({"question": text}), 200) + return _cors_response(json.dumps({"error": "Not found"}), 404) + async def handle_explain(self, request): + """ + Explain a concept at the learner's level. -async def _handle_evaluate(request, env): - """ - Evaluate a learner's answer. - - Request body: - { - "question": "…", - "answer": "…", - "expected_answer": "…", // optional - "topic": "…" // optional + Request body: + { + "concept": "recursion", + "skill_level": "beginner", + "learning_style": "visual", + "context": "…" // optional + } + """ + try: + body = await request.json() + except Exception: + return _error("Invalid JSON", 400) + + concept = body.get("concept", "").strip() + if not concept: + return _error("concept is required", 400) + + skill_level = body.get("skill_level", "beginner") + learning_style = body.get("learning_style", "visual") + context = body.get("context", "") + + style_hints = { + "visual": "Use text-described diagrams and visual metaphors.", + "auditory": "Explain conversationally as if speaking aloud.", + "reading": "Use numbered lists and clear definitions.", + "kinesthetic": "Emphasise hands-on examples and step-by-step tasks.", } - """ - try: - body = await request.json() - except Exception: - return _error("Invalid JSON", 400) - - question = body.get("question", "").strip() - answer = body.get("answer", "").strip() - if not question or not answer: - return _error("question and answer are required", 400) - - expected = body.get("expected_answer", "") - topic = body.get("topic", "") - - context = f"Topic: {topic}\n" if topic else "" - expected_section = f"Expected answer context: {expected}\n" if expected else "" - - prompt = ( - f"{context}Question: {question}\n" - f"{expected_section}\n" - f"Learner's answer: {answer}\n\n" - "Evaluate this answer and respond in exactly this format:\n" - "SCORE: \n" - "FEEDBACK: <2-3 sentences of constructive feedback>\n" - "CORRECT_ANSWER: " - ) + style_hint = style_hints.get(learning_style, "") + context_section = f"\n\nLesson context:\n{context}" if context else "" + + prompt = ( + f"Explain the following concept to a {skill_level}-level learner.\n" + f"Learning style: {learning_style}. {style_hint}\n\n" + f"Concept: {concept}{context_section}\n\n" + "Structure your response as:\n" + "1. **Core Explanation** (2-4 sentences)\n" + "2. **Analogy** - a memorable real-world comparison\n" + "3. **Key Points** - 3-5 bullet points\n" + "4. **Quick Example** - a short, concrete illustration" + ) + + result = await self.env.AI.run( + MODEL, + to_js( + { + "messages": [ + {"role": "system", "content": _tutor_system_prompt()}, + {"role": "user", "content": prompt}, + ], + "max_tokens": 1024, + } + ) + ) + data = result.to_py() # convert to python object + explanation = data.get("response", "") # get the response string + return _cors_response(json.dumps({"explanation": explanation}), 200) + + async def handle_chat(self, request): + """ + Continue a tutoring conversation. + + Request body: + { + "messages": [{"role": "user"|"assistant"|"system", "content": "…"}, …], + "lesson_context": "…", // optional + "max_tokens": 1024 // optional + } + """ + try: + body = await request.json() + except Exception: + return _error("Invalid JSON", 400) - result = await env.AI.run( - "@cf/meta/llama-3.1-8b-instruct", - { - "messages": [ - {"role": "system", "content": _tutor_system_prompt()}, - {"role": "user", "content": prompt}, - ], - "max_tokens": 512, - }, - ) - raw = result.get("response", "") if isinstance(result, dict) else "" - parsed = _parse_evaluation(raw) - return _cors_response(json.dumps(parsed), 200) + messages = body.get("messages", []) + lesson_context = body.get("lesson_context", "") + try: + max_tokens = int(body.get("max_tokens", 1024)) + except (ValueError, TypeError): + return _error("max_tokens must be a valid integer", 400) + if not messages: + return _error("messages is required", 400) -async def _handle_generate_path(request, env): - """ - Generate a personalised learning path. - - Request body: - { - "topic": "…", - "skill_level": "…", - "learning_style": "…", - "available_lessons": [{id, title, type, difficulty}, …], - "goals": "…" // optional - } - """ - try: - body = await request.json() - except Exception: - return _error("Invalid JSON", 400) - - topic = body.get("topic", "").strip() - skill_level = body.get("skill_level", "beginner") - learning_style = body.get("learning_style", "visual") - available_lessons = body.get("available_lessons", []) - goals = body.get("goals", "") - - if not topic: - return _error("topic is required", 400) - - goals_section = f"\nLearner goals: {goals}" if goals else "" - lesson_list = json.dumps(available_lessons, indent=2) - - prompt = ( - f"Create a personalised learning path for:\n" - f"- Topic: {topic}\n" - f"- Skill level: {skill_level}\n" - f"- Learning style: {learning_style}{goals_section}\n\n" - f"Available lessons (JSON):\n{lesson_list}\n\n" - 'Return a JSON object with exactly two keys:\n' - '{\n' - ' "ordered_lesson_ids": [],\n' - ' "rationale": "<2-3 sentence explanation of the path design>"\n' - '}\n\n' - "Only include lessons appropriate for this learner." - ) + system_prompt = _tutor_system_prompt(lesson_context) + full_messages = [{"role": "system", "content": system_prompt}] + messages[-10:] - result = await env.AI.run( - "@cf/meta/llama-3.1-8b-instruct", - { - "messages": [ - {"role": "system", "content": _curriculum_system_prompt()}, - {"role": "user", "content": prompt}, - ], - "max_tokens": 1024, - }, - ) - raw = result.get("response", "") if isinstance(result, dict) else "" + result = await self.env.AI.run( + MODEL, + to_js({"messages": full_messages, "max_tokens": max_tokens}), + ) + data = result.to_py() + response_text = data.get("response", "") + return _cors_response(json.dumps({"response": response_text}), 200) - # Extract JSON from the response - start = raw.find("{") - end = raw.rfind("}") - if start != -1 and end > start: + async def handle_practice(self, request): + """ + Generate a practice question. + + Request body: + { + "topic": "…", + "difficulty": "beginner|intermediate|advanced", + "question_type": "open-ended|multiple-choice|true-false" + } + """ + try: + body = await request.json() + except Exception: + return _error("Invalid JSON", 400) + + topic = body.get("topic", "").strip() + if not topic: + return _error("topic is required", 400) + + difficulty = body.get("difficulty", "beginner") + question_type = body.get("question_type", "open-ended") + + prompt = ( + f"Generate a {difficulty}-level {question_type} practice question about: \"{topic}\"\n\n" + "Format:\n" + "- **Question:** \n" + "- **Hint:** \n" + "- **Expected Answer:** " + ) + + result = await self.env.AI.run( + MODEL, + to_js({ + "messages": [ + {"role": "system", "content": _tutor_system_prompt()}, + {"role": "user", "content": prompt}, + ], + "max_tokens": 512, + }), + ) + data = result.to_py() + text = data.get("response", "") + return _cors_response(json.dumps({"question": text}), 200) + + async def handle_evaluate(self, request): + """ + Evaluate a learner's answer. + + Request body: + { + "question": "…", + "answer": "…", + "expected_answer": "…", // optional + "topic": "…" // optional + } + """ try: - path_data = json.loads(raw[start : end + 1]) + body = await request.json() + except Exception: + return _error("Invalid JSON", 400) + + question = body.get("question", "").strip() + answer = body.get("answer", "").strip() + if not question or not answer: + return _error("question and answer are required", 400) + + expected = body.get("expected_answer", "") + topic = body.get("topic", "") + + context = f"Topic: {topic}\n" if topic else "" + expected_section = f"Expected answer context: {expected}\n" if expected else "" + + prompt = ( + f"{context}Question: {question}\n" + f"{expected_section}\n" + f"Learner's answer: {answer}\n\n" + "Evaluate this answer and respond in exactly this format:\n" + "SCORE: \n" + "FEEDBACK: <2-3 sentences of constructive feedback>\n" + "CORRECT_ANSWER: " + ) + + result = await self.env.AI.run( + MODEL, + to_js({ + "messages": [ + {"role": "system", "content": _tutor_system_prompt()}, + {"role": "user", "content": prompt}, + ], + "max_tokens": 512, + }), + ) + data = result.to_py() + raw = data.get("response", "") + parsed = _parse_evaluation(raw) + return _cors_response(json.dumps(parsed), 200) + + async def handle_generate_path(self, request): + """ + Generate a personalised learning path. + + Request body: + { + "topic": "…", + "skill_level": "…", + "learning_style": "…", + "available_lessons": [{id, title, type, difficulty}, …], + "goals": "…" // optional + } + """ + try: + body = await request.json() + except Exception: + return _error("Invalid JSON", 400) + + topic = body.get("topic", "").strip() + skill_level = body.get("skill_level", "beginner") + learning_style = body.get("learning_style", "visual") + available_lessons = body.get("available_lessons", []) + goals = body.get("goals", "") + + if not topic: + return _error("topic is required", 400) + + goals_section = f"\nLearner goals: {goals}" if goals else "" + lesson_list = json.dumps(available_lessons, indent=2) + + prompt = ( + f"Create a personalised learning path for:\n" + f"- Topic: {topic}\n" + f"- Skill level: {skill_level}\n" + f"- Learning style: {learning_style}{goals_section}\n\n" + f"Available lessons (JSON):\n{lesson_list}\n\n" + 'Return a JSON object with exactly two keys:\n' + '{\n' + ' "ordered_lesson_ids": [],\n' + ' "rationale": "<2-3 sentence explanation of the path design>"\n' + '}\n\n' + "Only include lessons appropriate for this learner." + ) + + result = await self.env.AI.run( + MODEL, + to_js({ + "messages": [ + {"role": "system", "content": _curriculum_system_prompt()}, + {"role": "user", "content": prompt}, + ], + "max_tokens": 1024, + }), + ) + data = result.to_py() + raw = data.get("response", "") + + try: + path_data = json.loads(raw) return _cors_response(json.dumps(path_data), 200) except (json.JSONDecodeError, ValueError): - pass - - return _cors_response(json.dumps({"ordered_lesson_ids": [], "rationale": raw}), 200) + return _cors_response(json.dumps({"ordered_lesson_ids": [], "rationale": raw}), 200) + async def handle_progress_insights(self, request): + """ + Generate personalised progress insights for a learner. -async def _handle_progress_insights(request, env): - """ - Generate personalised progress insights for a learner. - - Request body: - { - "learner_name": "Alice", - "topic": "Python Programming", - "progress_data": [ - {"lesson": "Variables", "score": 0.9, "completed": true, "attempts": 1}, - … - ] - } - - Returns: - {"insights": "<4-6 sentence progress report>"} - """ - try: - body = await request.json() - except Exception: - return _error("Invalid JSON", 400) - - learner_name = body.get("learner_name", "the learner") - topic = body.get("topic", "").strip() - progress_data = body.get("progress_data", []) - - if not topic: - return _error("topic is required", 400) - if not progress_data: - return _error("progress_data is required", 400) - - prompt = ( - f"Analyse {learner_name}'s learning progress in \"{topic}\":\n\n" - f"{json.dumps(progress_data, indent=2)}\n\n" - "Write a concise progress report (4–6 sentences) that:\n" - "1. Summarises overall performance.\n" - "2. Identifies strengths.\n" - "3. Pinpoints areas needing improvement.\n" - "4. Recommends a concrete next action." - ) - - result = await env.AI.run( - "@cf/meta/llama-3.1-8b-instruct", - { - "messages": [ - {"role": "system", "content": _curriculum_system_prompt()}, - {"role": "user", "content": prompt}, - ], - "max_tokens": 512, - }, - ) - text = result.get("response", "") if isinstance(result, dict) else "" - return _cors_response(json.dumps({"insights": text}), 200) - - -async def _handle_adapt_difficulty(request, env): - """ - Recommend a difficulty adjustment based on recent performance. - - Request body: - { - "topic": "Python Programming", - "current_difficulty": "beginner", - "recent_scores": [0.9, 0.85, 0.95], - "struggles": ["recursion", "decorators"] // optional - } + Request body: + { + "learner_name": "Alice", + "topic": "Python Programming", + "progress_data": [ + {"lesson": "Variables", "score": 0.9, "completed": true, "attempts": 1}, + … + ] + } - Returns: - {"new_difficulty": "intermediate", "action": "increase", "reasoning": "…"} - """ - try: - body = await request.json() - except Exception: - return _error("Invalid JSON", 400) - - topic = body.get("topic", "").strip() - current_difficulty = body.get("current_difficulty", "beginner") - recent_scores = body.get("recent_scores", []) - struggles = body.get("struggles", []) - - if not topic: - return _error("topic is required", 400) - if not recent_scores: - return _error("recent_scores is required", 400) - - avg = sum(recent_scores) / len(recent_scores) - struggle_text = "" - if struggles: - struggle_text = f"\nTopics the learner struggled with: {', '.join(struggles)}" - - prompt = ( - f"A learner studying \"{topic}\" at {current_difficulty} difficulty " - f"has achieved an average score of {avg:.0%} over their last " - f"{len(recent_scores)} attempt(s).{struggle_text}\n\n" - "Should the difficulty change? Respond with JSON:\n" - "{\n" - ' "new_difficulty": "",\n' - ' "action": "",\n' - ' "reasoning": ""\n' - "}" - ) + Returns: + {"insights": "<4-6 sentence progress report>"} + """ + try: + body = await request.json() + except Exception: + return _error("Invalid JSON", 400) + + learner_name = body.get("learner_name", "the learner") + topic = body.get("topic", "").strip() + progress_data = body.get("progress_data", []) + + if not topic: + return _error("topic is required", 400) + if not progress_data: + return _error("progress_data is required", 400) + + prompt = ( + f"Analyse {learner_name}'s learning progress in \"{topic}\":\n\n" + f"{json.dumps(progress_data, indent=2)}\n\n" + "Write a concise progress report (4-6 sentences) that:\n" + "1. Summarises overall performance.\n" + "2. Identifies strengths.\n" + "3. Pinpoints areas needing improvement.\n" + "4. Recommends a concrete next action." + ) + + result = await self.env.AI.run( + MODEL, + to_js({ + "messages": [ + {"role": "system", "content": _curriculum_system_prompt()}, + {"role": "user", "content": prompt}, + ], + "max_tokens": 512, + }), + ) + data = result.to_py() + text = data.get("response", "") + return _cors_response(json.dumps({"insights": text}), 200) + + async def handle_adapt_difficulty(self, request): + """ + Recommend a difficulty adjustment based on recent performance. + + Request body: + { + "topic": "Python Programming", + "current_difficulty": "beginner", + "recent_scores": [0.9, 0.85, 0.95], + "struggles": ["recursion", "decorators"] // optional + } - result = await env.AI.run( - "@cf/meta/llama-3.1-8b-instruct", - { - "messages": [ - {"role": "system", "content": _curriculum_system_prompt()}, - {"role": "user", "content": prompt}, - ], - "max_tokens": 256, - }, - ) - raw = result.get("response", "") if isinstance(result, dict) else "" + Returns: + {"new_difficulty": "intermediate", "action": "increase", "reasoning": "…"} + """ + try: + body = await request.json() + except Exception: + return _error("Invalid JSON", 400) + + topic = body.get("topic", "").strip() + current_difficulty = body.get("current_difficulty", "beginner") + recent_scores = body.get("recent_scores", []) + struggles = body.get("struggles", []) + + if not topic: + return _error("topic is required", 400) + if not recent_scores: + return _error("recent_scores is required", 400) + + avg = sum(recent_scores) / len(recent_scores) + struggle_text = "" + if struggles: + struggle_text = f"\nTopics the learner struggled with: {', '.join(struggles)}" + + prompt = ( + f"A learner studying \"{topic}\" at {current_difficulty} difficulty " + f"has achieved an average score of {avg:.0%} over their last " + f"{len(recent_scores)} attempt(s).{struggle_text}\n\n" + "Should the difficulty change? Respond with JSON:\n" + "{\n" + ' "new_difficulty": "",\n' + ' "action": "",\n' + ' "reasoning": ""\n' + "}" + ) + + result = await self.env.AI.run( + MODEL, + to_js({ + "messages": [ + {"role": "system", "content": _curriculum_system_prompt()}, + {"role": "user", "content": prompt}, + ], + "max_tokens": 256, + }), + ) + data = result.to_py() + raw = data.get("response", "") - start = raw.find("{") - end = raw.rfind("}") - if start != -1 and end > start: try: - adapt_data = json.loads(raw[start : end + 1]) + adapt_data = json.loads(raw) return _cors_response(json.dumps(adapt_data), 200) except (json.JSONDecodeError, ValueError): - pass - - return _cors_response( - json.dumps( + return _cors_response( + json.dumps( + { + "new_difficulty": current_difficulty, + "action": "maintain", + "reasoning": raw, + } + ), + 200, + ) + + async def handle_session_summary(self, request): + """ + Summarise a completed tutoring session. + + Request body: { - "new_difficulty": current_difficulty, - "action": "maintain", - "reasoning": raw, + "lesson_title": "Python Variables", + "conversation": [ + {"role": "user", "content": "…"}, + {"role": "assistant", "content": "…"}, + … + ] } - ), - 200, - ) - - -async def _handle_session_summary(request, env): - """ - Summarise a completed tutoring session. - - Request body: - { - "lesson_title": "Python Variables", - "conversation": [ - {"role": "user", "content": "…"}, - {"role": "assistant", "content": "…"}, - … - ] - } - - Returns: - {"summary": "<3-5 sentence session summary with takeaways and next steps>"} - """ - try: - body = await request.json() - except Exception: - return _error("Invalid JSON", 400) - - lesson_title = body.get("lesson_title", "").strip() - conversation = body.get("conversation", []) - - if not lesson_title: - return _error("lesson_title is required", 400) - if not conversation: - return _error("conversation is required", 400) - - dialogue = "\n".join( - f"{m.get('role', 'user').upper()}: {m.get('content', '')}" - for m in conversation - if m.get("role") != "system" - ) - - prompt = ( - f"A tutoring session on \"{lesson_title}\" just ended.\n" - f"Conversation:\n{dialogue}\n\n" - "Write a concise session summary (3–5 sentences) that:\n" - "1. Highlights the key concepts covered.\n" - "2. Notes any misconceptions that were corrected.\n" - "3. Suggests 1–2 concrete next steps for the learner." - ) - - result = await env.AI.run( - "@cf/meta/llama-3.1-8b-instruct", - { - "messages": [ - {"role": "system", "content": _tutor_system_prompt()}, - {"role": "user", "content": prompt}, - ], - "max_tokens": 512, - }, - ) - text = result.get("response", "") if isinstance(result, dict) else "" - return _cors_response(json.dumps({"summary": text}), 200) + Returns: + {"summary": "<3-5 sentence session summary with takeaways and next steps>"} + """ + try: + body = await request.json() + except Exception: + return _error("Invalid JSON", 400) + + lesson_title = body.get("lesson_title", "").strip() + conversation = body.get("conversation", []) + + if not lesson_title: + return _error("lesson_title is required", 400) + if not conversation: + return _error("conversation is required", 400) + + dialogue = "\n".join( + f"{m.get('role', 'user').upper()}: {m.get('content', '')}" + for m in conversation + if m.get("role") != "system" + ) + + prompt = ( + f"A tutoring session on \"{lesson_title}\" just ended.\n" + f"Conversation:\n{dialogue}\n\n" + "Write a concise session summary (3-5 sentences) that:\n" + "1. Highlights the key concepts covered.\n" + "2. Notes any misconceptions that were corrected.\n" + "3. Suggests 1-2 concrete next steps for the learner." + ) + + result = await self.env.AI.run( + MODEL, + to_js({ + "messages": [ + {"role": "system", "content": _tutor_system_prompt()}, + {"role": "user", "content": prompt}, + ], + "max_tokens": 512, + }), + ) + data = result.to_py() + text = data.get("response", "") + return _cors_response(json.dumps({"summary": text}), 200) # --------------------------------------------------------------------------- # Helpers @@ -564,7 +574,8 @@ def _parse_evaluation(raw: str) -> dict: return result -def _cors_response(body: str, status: int): +def _cors_response(body: str|None, status: int) -> Response: + # TODO: handle this in a better way headers = { "Content-Type": "application/json", "Access-Control-Allow-Origin": "*", diff --git a/tests/test_worker.py b/tests/test_worker.py index 6aa7ed3..4358c23 100644 --- a/tests/test_worker.py +++ b/tests/test_worker.py @@ -1,507 +1,244 @@ -""" -Unit tests for src/worker.py helper functions. - -These tests cover all pure-Python logic in the worker – the async handlers -are tested via mocked env.AI objects so they can run without a live -Cloudflare environment. -""" - -import asyncio -import json -import sys -import types -import unittest -from unittest.mock import AsyncMock, MagicMock - -# --------------------------------------------------------------------------- -# Shim: make the `Response` built-in available to worker.py without a real -# Cloudflare runtime. We define a minimal Response class and inject it into -# builtins before importing the module. -# --------------------------------------------------------------------------- - -class Response: # noqa: D101 - def __init__(self, body="", *, status=200, headers=None): - self.body = body - self.status = status - self.headers = headers or {} - - -import builtins - -builtins.Response = Response # type: ignore[attr-defined] - -# Now it is safe to import the worker module -sys.path.insert(0, "src") -import worker # noqa: E402 (import after sys.path manipulation) - - -# --------------------------------------------------------------------------- -# Helpers -# --------------------------------------------------------------------------- - -def run(coro): - """Run a coroutine synchronously.""" - return asyncio.run(coro) - - -def _make_env(ai_response: dict) -> MagicMock: - """Return a mock env whose AI.run returns *ai_response*.""" - env = MagicMock() - env.AI.run = AsyncMock(return_value=ai_response) - return env - - -def _make_request(method: str, url: str, body: dict | None = None) -> MagicMock: - """Return a mock Request object.""" - req = MagicMock() - req.method = method - req.url = url - if body is not None: - req.json = AsyncMock(return_value=body) - else: - req.json = AsyncMock(side_effect=ValueError("no body")) - return req - - -# =========================================================================== -# _parse_evaluation -# =========================================================================== - -class TestParseEvaluation(unittest.TestCase): - def test_parses_all_fields(self): - raw = ( - "SCORE: 0.85\n" - "FEEDBACK: Great answer, you covered the main points.\n" - "CORRECT_ANSWER: A function that calls itself with a base case." - ) - result = worker._parse_evaluation(raw) - self.assertAlmostEqual(result["score"], 0.85) - self.assertIn("Great answer", result["feedback"]) - self.assertIn("base case", result["correct_answer"]) - - def test_falls_back_on_malformed_score(self): - raw = "SCORE: not-a-number\nFEEDBACK: Okay" - result = worker._parse_evaluation(raw) - self.assertEqual(result["score"], 0.5) - self.assertEqual(result["feedback"], "Okay") - - def test_returns_defaults_for_empty_string(self): - result = worker._parse_evaluation("") - self.assertEqual(result["score"], 0.5) - self.assertEqual(result["feedback"], "") - self.assertEqual(result["correct_answer"], "") - - def test_score_field_only(self): - result = worker._parse_evaluation("SCORE: 1.0") - self.assertAlmostEqual(result["score"], 1.0) - - def test_correct_answer_field_only(self): - result = worker._parse_evaluation("CORRECT_ANSWER: Recursion terminates at the base case.") - self.assertIn("base case", result["correct_answer"]) - - -# =========================================================================== -# _tutor_system_prompt -# =========================================================================== - -class TestTutorSystemPrompt(unittest.TestCase): - def test_contains_learnpilot(self): - prompt = worker._tutor_system_prompt() - self.assertIn("LearnPilot", prompt) - - def test_appends_lesson_context(self): - prompt = worker._tutor_system_prompt("Variables store values.") - self.assertIn("Variables store values.", prompt) - self.assertIn("lesson material", prompt) - - def test_no_context_by_default(self): - prompt = worker._tutor_system_prompt() - self.assertNotIn("lesson material", prompt) - - -# =========================================================================== -# _curriculum_system_prompt -# =========================================================================== - -class TestCurriculumSystemPrompt(unittest.TestCase): - def test_contains_curriculum_keywords(self): - prompt = worker._curriculum_system_prompt() - self.assertIn("curriculum", prompt.lower()) - self.assertIn("learning", prompt.lower()) - - -# =========================================================================== -# _cors_response / _error -# =========================================================================== - -class TestCorsResponse(unittest.TestCase): - def test_status_code_is_preserved(self): - resp = worker._cors_response('{"ok": true}', 201) - self.assertEqual(resp.status, 201) - - def test_cors_headers_present(self): - resp = worker._cors_response("{}", 200) - self.assertIn("Access-Control-Allow-Origin", resp.headers) - self.assertEqual(resp.headers["Access-Control-Allow-Origin"], "*") - - def test_error_returns_json_with_error_key(self): - resp = worker._error("Bad request", 400) - self.assertEqual(resp.status, 400) - data = json.loads(resp.body) - self.assertIn("error", data) - self.assertEqual(data["error"], "Bad request") - - -# =========================================================================== -# on_fetch – routing -# =========================================================================== - -class TestRouting(unittest.TestCase): - def test_options_returns_204(self): - req = _make_request("OPTIONS", "https://example.com/ai/chat") - resp = run(worker.on_fetch(req, MagicMock())) - self.assertEqual(resp.status, 204) - - def test_health_returns_200(self): - req = _make_request("GET", "https://example.com/health") - resp = run(worker.on_fetch(req, MagicMock())) - self.assertEqual(resp.status, 200) - data = json.loads(resp.body) - self.assertEqual(data["status"], "ok") - - def test_unknown_route_returns_404(self): - req = _make_request("GET", "https://example.com/unknown") - resp = run(worker.on_fetch(req, MagicMock())) - self.assertEqual(resp.status, 404) - - -# =========================================================================== -# /ai/chat handler -# =========================================================================== - -class TestHandleChat(unittest.TestCase): - def test_returns_ai_response(self): - env = _make_env({"response": "Recursion is when a function calls itself."}) - req = _make_request( - "POST", - "https://w.example.com/ai/chat", - {"messages": [{"role": "user", "content": "Explain recursion"}]}, - ) - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 200) - data = json.loads(resp.body) - self.assertIn("Recursion", data["response"]) - - def test_missing_messages_returns_400(self): - env = _make_env({}) - req = _make_request("POST", "https://w.example.com/ai/chat", {}) - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 400) - - def test_invalid_json_returns_400(self): - env = _make_env({}) - req = _make_request("POST", "https://w.example.com/ai/chat") - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 400) - - -# =========================================================================== -# /ai/explain handler -# =========================================================================== - -class TestHandleExplain(unittest.TestCase): - def test_returns_explanation(self): - env = _make_env({"response": "Recursion means a function calls itself."}) - req = _make_request( - "POST", - "https://w.example.com/ai/explain", - {"concept": "recursion", "skill_level": "beginner"}, - ) - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 200) - data = json.loads(resp.body) - self.assertIn("explanation", data) - - def test_missing_concept_returns_400(self): - env = _make_env({}) - req = _make_request("POST", "https://w.example.com/ai/explain", {"skill_level": "beginner"}) - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 400) - - def test_learning_style_included_in_prompt(self): - env = _make_env({"response": "Hands-on example …"}) - req = _make_request( - "POST", - "https://w.example.com/ai/explain", - {"concept": "loops", "skill_level": "intermediate", "learning_style": "kinesthetic"}, - ) - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 200) - # Verify the AI.run call contained the style hint - call_payload = env.AI.run.call_args[0][1] - full_text = " ".join(m["content"] for m in call_payload["messages"]) - self.assertIn("kinesthetic", full_text.lower()) - - -# =========================================================================== -# /ai/practice handler -# =========================================================================== - -class TestHandlePractice(unittest.TestCase): - def test_returns_question(self): - env = _make_env({"response": "**Question:** What is a for loop?"}) - req = _make_request( - "POST", - "https://w.example.com/ai/practice", - {"topic": "Python loops", "difficulty": "beginner"}, - ) - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 200) - data = json.loads(resp.body) - self.assertIn("question", data) - - def test_missing_topic_returns_400(self): - env = _make_env({}) - req = _make_request("POST", "https://w.example.com/ai/practice", {"difficulty": "beginner"}) - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 400) - - -# =========================================================================== -# /ai/evaluate handler -# =========================================================================== - -class TestHandleEvaluate(unittest.TestCase): - def test_returns_score_and_feedback(self): - ai_raw = "SCORE: 0.9\nFEEDBACK: Excellent!\nCORRECT_ANSWER: A named storage location." - env = _make_env({"response": ai_raw}) - req = _make_request( - "POST", - "https://w.example.com/ai/evaluate", - {"question": "What is a variable?", "answer": "A box that holds a value"}, - ) - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 200) - data = json.loads(resp.body) - self.assertAlmostEqual(float(data["score"]), 0.9) - self.assertIn("feedback", data) - self.assertIn("correct_answer", data) - - def test_missing_question_returns_400(self): - env = _make_env({}) - req = _make_request("POST", "https://w.example.com/ai/evaluate", {"answer": "something"}) - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 400) - - def test_missing_answer_returns_400(self): - env = _make_env({}) - req = _make_request( - "POST", "https://w.example.com/ai/evaluate", {"question": "What is X?"} - ) - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 400) - - -# =========================================================================== -# /ai/path handler -# =========================================================================== - -class TestHandleGeneratePath(unittest.TestCase): - def test_returns_ordered_lesson_ids(self): - ai_json = '{"ordered_lesson_ids": [3, 1, 2], "rationale": "Start simple."}' - env = _make_env({"response": ai_json}) - req = _make_request( - "POST", - "https://w.example.com/ai/path", - { - "topic": "Python", - "skill_level": "beginner", - "learning_style": "visual", - "available_lessons": [ - {"id": 1, "title": "Variables", "type": "theory", "difficulty": "beginner"}, - {"id": 2, "title": "Loops", "type": "practice", "difficulty": "beginner"}, - {"id": 3, "title": "Intro", "type": "theory", "difficulty": "beginner"}, - ], - }, - ) - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 200) - data = json.loads(resp.body) - self.assertIn("ordered_lesson_ids", data) - self.assertEqual(data["ordered_lesson_ids"], [3, 1, 2]) - - def test_falls_back_gracefully_on_malformed_json(self): - env = _make_env({"response": "Sorry, cannot generate path."}) - req = _make_request( - "POST", - "https://w.example.com/ai/path", - {"topic": "Python", "skill_level": "beginner", "learning_style": "visual", "available_lessons": []}, - ) - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 200) - data = json.loads(resp.body) - self.assertIn("ordered_lesson_ids", data) - self.assertEqual(data["ordered_lesson_ids"], []) - - def test_missing_topic_returns_400(self): - env = _make_env({}) - req = _make_request( - "POST", - "https://w.example.com/ai/path", - {"skill_level": "beginner", "available_lessons": []}, - ) - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 400) - - -# =========================================================================== -# /ai/progress handler -# =========================================================================== - -class TestHandleProgressInsights(unittest.TestCase): - def test_returns_insights(self): - env = _make_env({"response": "You're making steady progress. Keep practising loops."}) - req = _make_request( - "POST", - "https://w.example.com/ai/progress", - { - "learner_name": "Alice", - "topic": "Python", - "progress_data": [ - {"lesson": "Variables", "score": 0.9, "completed": True, "attempts": 1} - ], - }, - ) - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 200) - data = json.loads(resp.body) - self.assertIn("insights", data) - - def test_missing_topic_returns_400(self): - env = _make_env({}) - req = _make_request( - "POST", - "https://w.example.com/ai/progress", - {"learner_name": "Alice", "progress_data": [{"lesson": "x", "score": 1.0}]}, - ) - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 400) - - def test_missing_progress_data_returns_400(self): - env = _make_env({}) - req = _make_request( - "POST", - "https://w.example.com/ai/progress", - {"learner_name": "Alice", "topic": "Python"}, - ) - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 400) - - -# =========================================================================== -# /ai/adapt handler -# =========================================================================== - -class TestHandleAdaptDifficulty(unittest.TestCase): - def test_returns_adapt_recommendation(self): - ai_json = '{"new_difficulty": "intermediate", "action": "increase", "reasoning": "High scores."}' - env = _make_env({"response": ai_json}) - req = _make_request( - "POST", - "https://w.example.com/ai/adapt", - {"topic": "Python", "current_difficulty": "beginner", "recent_scores": [0.9, 0.95]}, - ) - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 200) - data = json.loads(resp.body) - self.assertEqual(data["action"], "increase") - self.assertEqual(data["new_difficulty"], "intermediate") - - def test_missing_topic_returns_400(self): - env = _make_env({}) - req = _make_request( - "POST", - "https://w.example.com/ai/adapt", - {"current_difficulty": "beginner", "recent_scores": [0.9]}, - ) - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 400) - - def test_missing_recent_scores_returns_400(self): - env = _make_env({}) - req = _make_request( - "POST", - "https://w.example.com/ai/adapt", - {"topic": "Python", "current_difficulty": "beginner"}, - ) - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 400) - - -# =========================================================================== -# /ai/summary handler -# =========================================================================== - -class TestHandleSessionSummary(unittest.TestCase): - def test_returns_summary(self): - env = _make_env({"response": "The session covered variables and loops."}) - req = _make_request( - "POST", - "https://w.example.com/ai/summary", - { - "lesson_title": "Python Variables", - "conversation": [ - {"role": "user", "content": "What is a variable?"}, - {"role": "assistant", "content": "A variable is a named storage location."}, - ], - }, - ) - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 200) - data = json.loads(resp.body) - self.assertIn("summary", data) - - def test_missing_lesson_title_returns_400(self): - env = _make_env({}) - req = _make_request( - "POST", - "https://w.example.com/ai/summary", - {"conversation": [{"role": "user", "content": "Hi"}]}, - ) - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 400) - - def test_missing_conversation_returns_400(self): - env = _make_env({}) - req = _make_request( - "POST", - "https://w.example.com/ai/summary", - {"lesson_title": "Python Variables"}, - ) - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 400) - - def test_system_messages_excluded_from_dialogue(self): - env = _make_env({"response": "Good session."}) - req = _make_request( - "POST", - "https://w.example.com/ai/summary", - { - "lesson_title": "Test", - "conversation": [ - {"role": "system", "content": "SECRET INSTRUCTIONS"}, - {"role": "user", "content": "Hello"}, - {"role": "assistant", "content": "Hi!"}, - ], - }, - ) - resp = run(worker.on_fetch(req, env)) - self.assertEqual(resp.status, 200) - # Verify the AI was NOT passed the system message content in the user prompt - call_payload = env.AI.run.call_args[0][1] - user_prompt = call_payload["messages"][-1]["content"] - self.assertNotIn("SECRET INSTRUCTIONS", user_prompt) - - -if __name__ == "__main__": - unittest.main() +import os +import pytest +import requests + +BASE_URL = os.environ.get("BASE_URL", "http://localhost:8787") +TIMEOUT = 60 + +@pytest.fixture(scope="module") +def api_url(): + """Fixture to provide the base URL for the API.""" + return BASE_URL + + +def test_health_check(api_url): + """Test the health check endpoint.""" + response = requests.get(f"{api_url}/health", timeout=TIMEOUT) + assert response.status_code == 200 + data = response.json() + assert data.get("status") == "ok" + assert data.get("service") == "learnpilot-ai" + + +def test_cors_options(api_url): + """Test CORS preflight response.""" + response = requests.options(f"{api_url}/ai/chat", timeout=TIMEOUT) + assert response.status_code == 204 + assert response.headers.get("Access-Control-Allow-Origin") == "*" + + +def test_invalid_json_body(api_url): + """Test invalid JSON body.""" + response = requests.post( + f"{api_url}/ai/explain", + data="not valid json", + headers={"Content-Type": "application/json"}, + timeout=TIMEOUT + ) + assert response.status_code == 400 + assert response.json().get("error") == "Invalid JSON" + + +def test_not_found(api_url): + """Test unknown endpoint returns 404.""" + response = requests.get(f"{api_url}/unknown-endpoint", timeout=TIMEOUT) + assert response.status_code == 404 + data = response.json() + assert data.get("error") == "Not found" + + +def test_explain_valid(api_url): + """Test /ai/explain endpoint with valid data.""" + payload = { + "concept": "recursion", + "skill_level": "beginner", + "learning_style": "visual" + } + response = requests.post(f"{api_url}/ai/explain", json=payload, timeout=TIMEOUT) + assert response.status_code == 200 + data = response.json() + assert "explanation" in data + assert len(data["explanation"]) > 0 + + +def test_explain_missing_concept(api_url): + """Test /ai/explain missing the concept field.""" + response = requests.post(f"{api_url}/ai/explain", json={"skill_level": "beginner"}, timeout=TIMEOUT) + assert response.status_code == 400 + assert response.json().get("error") == "concept is required" + + +def test_chat_valid(api_url): + """Test /ai/chat endpoint with valid messages.""" + payload = { + "messages": [{"role": "user", "content": "Hello, AI tutor!"}], + "max_tokens": 50 + } + response = requests.post(f"{api_url}/ai/chat", json=payload, timeout=TIMEOUT) + assert response.status_code == 200 + data = response.json() + assert "response" in data + assert len(data["response"]) > 0 + + +def test_chat_missing_messages(api_url): + """Test /ai/chat missing the messages field.""" + response = requests.post(f"{api_url}/ai/chat", json={}, timeout=TIMEOUT) + assert response.status_code == 400 + assert response.json().get("error") == "messages is required" + + +def test_practice_valid(api_url): + """Test /ai/practice endpoint with a valid topic.""" + payload = { + "topic": "Python loops", + "difficulty": "beginner", + "question_type": "open-ended" + } + response = requests.post(f"{api_url}/ai/practice", json=payload, timeout=TIMEOUT) + assert response.status_code == 200 + data = response.json() + assert "question" in data + assert len(data["question"]) > 0 + + +def test_practice_missing_topic(api_url): + """Test /ai/practice missing the topic field.""" + response = requests.post(f"{api_url}/ai/practice", json={"difficulty": "beginner"}, timeout=TIMEOUT) + assert response.status_code == 400 + assert response.json().get("error") == "topic is required" + + +def test_evaluate_valid(api_url): + """Test /ai/evaluate endpoint with valid question and answer.""" + payload = { + "question": "What is a variable in programming?", + "answer": "A variable is like an empty box that can hold a value.", + "expected_answer": "A named storage location in memory." + } + response = requests.post(f"{api_url}/ai/evaluate", json=payload, timeout=TIMEOUT) + assert response.status_code == 200 + data = response.json() + assert "score" in data + assert "feedback" in data + assert "correct_answer" in data + + +def test_evaluate_missing_fields(api_url): + """Test /ai/evaluate missing the question/answer field.""" + payload = {"question": "What is a variable?"} + response = requests.post(f"{api_url}/ai/evaluate", json=payload, timeout=TIMEOUT) + assert response.status_code == 400 + assert response.json().get("error") == "question and answer are required" + + +def test_path_valid(api_url): + """Test /ai/path endpoint for generating learning path.""" + payload = { + "topic": "Python Programming", + "skill_level": "beginner", + "learning_style": "auditory", + "available_lessons": [ + {"id": 1, "title": "Variables", "type": "theory", "difficulty": "beginner"}, + {"id": 2, "title": "Loops", "type": "practice", "difficulty": "beginner"} + ] + } + response = requests.post(f"{api_url}/ai/path", json=payload, timeout=TIMEOUT) + assert response.status_code == 200 + data = response.json() + assert "ordered_lesson_ids" in data + assert isinstance(data["ordered_lesson_ids"], list) + + +def test_path_missing_topic(api_url): + """Test /ai/path missing the topic field.""" + payload = { + "skill_level": "beginner", + "available_lessons": [] + } + response = requests.post(f"{api_url}/ai/path", json=payload, timeout=TIMEOUT) + assert response.status_code == 400 + assert response.json().get("error") == "topic is required" + + +def test_progress_valid(api_url): + """Test /ai/progress endpoint for generating progress insights.""" + payload = { + "learner_name": "Andy", + "topic": "Python Programming", + "progress_data": [ + {"lesson": "Variables", "score": 0.9, "completed": True, "attempts": 1} + ] + } + response = requests.post(f"{api_url}/ai/progress", json=payload, timeout=TIMEOUT) + assert response.status_code == 200 + data = response.json() + assert "insights" in data + + +def test_progress_missing_fields(api_url): + """Test /ai/progress missing the progress_data field.""" + payload = { + "learner_name": "Andy", + "topic": "Python Programming" + } + response = requests.post(f"{api_url}/ai/progress", json=payload, timeout=TIMEOUT) + assert response.status_code == 400 + assert response.json().get("error") == "progress_data is required" + + +def test_adapt_valid(api_url): + """Test /ai/adapt endpoint for predicting difficulty.""" + payload = { + "topic": "Python Programming", + "current_difficulty": "beginner", + "recent_scores": [0.9, 0.95, 0.85] + } + response = requests.post(f"{api_url}/ai/adapt", json=payload, timeout=TIMEOUT) + assert response.status_code == 200 + data = response.json() + assert "new_difficulty" in data + assert "action" in data + assert "reasoning" in data + + +def test_adapt_missing_topic(api_url): + """Test /ai/adapt missing the topic field.""" + payload = { + "current_difficulty": "beginner", + "recent_scores": [0.9, 0.95] + } + response = requests.post(f"{api_url}/ai/adapt", json=payload, timeout=TIMEOUT) + assert response.status_code == 400 + assert response.json().get("error") == "topic is required" + + +def test_adapt_empty_scores(api_url): + """Test /ai/adapt endpoint with empty scores.""" + payload = {"topic": "Python", "current_difficulty": "beginner", "recent_scores": []} + response = requests.post(f"{api_url}/ai/adapt", json=payload, timeout=TIMEOUT) + assert response.status_code == 400 + +def test_summary_valid(api_url): + """Test /ai/summary endpoint for summarizing session.""" + payload = { + "lesson_title": "Python Basics", + "conversation": [ + {"role": "user", "content": "What is a variable?"}, + {"role": "assistant", "content": "A variable is an object in memory..."} + ] + } + response = requests.post(f"{api_url}/ai/summary", json=payload, timeout=TIMEOUT) + assert response.status_code == 200 + data = response.json() + assert "summary" in data + + +def test_summary_missing_conversation(api_url): + """Test /ai/summary missing the conversation field.""" + payload = { + "lesson_title": "Python Basics" + } + response = requests.post(f"{api_url}/ai/summary", json=payload, timeout=TIMEOUT) + assert response.status_code == 400 + assert response.json().get("error") == "conversation is required" diff --git a/uv.lock b/uv.lock new file mode 100644 index 0000000..ff876cc --- /dev/null +++ b/uv.lock @@ -0,0 +1,181 @@ +version = 1 +revision = 3 +requires-python = ">=3.13" + +[[package]] +name = "certifi" +version = "2026.2.25" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/af/2d/7bf41579a8986e348fa033a31cdd0e4121114f6bce2457e8876010b092dd/certifi-2026.2.25.tar.gz", hash = "sha256:e887ab5cee78ea814d3472169153c2d12cd43b14bd03329a39a9c6e2e80bfba7", size = 155029, upload-time = "2026-02-25T02:54:17.342Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9a/3c/c17fb3ca2d9c3acff52e30b309f538586f9f5b9c9cf454f3845fc9af4881/certifi-2026.2.25-py3-none-any.whl", hash = "sha256:027692e4402ad994f1c42e52a4997a9763c646b73e4096e4d5d6db8af1d6f0fa", size = 153684, upload-time = "2026-02-25T02:54:15.766Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7b/60/e3bec1881450851b087e301bedc3daa9377a4d45f1c26aa90b0b235e38aa/charset_normalizer-3.4.6.tar.gz", hash = "sha256:1ae6b62897110aa7c79ea2f5dd38d1abca6db663687c0b1ad9aed6f6bae3d9d6", size = 143363, upload-time = "2026-03-15T18:53:25.478Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/1d/4fdabeef4e231153b6ed7567602f3b68265ec4e5b76d6024cf647d43d981/charset_normalizer-3.4.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:11afb56037cbc4b1555a34dd69151e8e069bee82e613a73bef6e714ce733585f", size = 294823, upload-time = "2026-03-15T18:51:15.755Z" }, + { url = "https://files.pythonhosted.org/packages/47/7b/20e809b89c69d37be748d98e84dce6820bf663cf19cf6b942c951a3e8f41/charset_normalizer-3.4.6-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:423fb7e748a08f854a08a222b983f4df1912b1daedce51a72bd24fe8f26a1843", size = 198527, upload-time = "2026-03-15T18:51:17.177Z" }, + { url = "https://files.pythonhosted.org/packages/37/a6/4f8d27527d59c039dce6f7622593cdcd3d70a8504d87d09eb11e9fdc6062/charset_normalizer-3.4.6-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d73beaac5e90173ac3deb9928a74763a6d230f494e4bfb422c217a0ad8e629bf", size = 218388, upload-time = "2026-03-15T18:51:18.934Z" }, + { url = "https://files.pythonhosted.org/packages/f6/9b/4770ccb3e491a9bacf1c46cc8b812214fe367c86a96353ccc6daf87b01ec/charset_normalizer-3.4.6-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d60377dce4511655582e300dc1e5a5f24ba0cb229005a1d5c8d0cb72bb758ab8", size = 214563, upload-time = "2026-03-15T18:51:20.374Z" }, + { url = "https://files.pythonhosted.org/packages/2b/58/a199d245894b12db0b957d627516c78e055adc3a0d978bc7f65ddaf7c399/charset_normalizer-3.4.6-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:530e8cebeea0d76bdcf93357aa5e41336f48c3dc709ac52da2bb167c5b8271d9", size = 206587, upload-time = "2026-03-15T18:51:21.807Z" }, + { url = "https://files.pythonhosted.org/packages/7e/70/3def227f1ec56f5c69dfc8392b8bd63b11a18ca8178d9211d7cc5e5e4f27/charset_normalizer-3.4.6-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:a26611d9987b230566f24a0a125f17fe0de6a6aff9f25c9f564aaa2721a5fb88", size = 194724, upload-time = "2026-03-15T18:51:23.508Z" }, + { url = "https://files.pythonhosted.org/packages/58/ab/9318352e220c05efd31c2779a23b50969dc94b985a2efa643ed9077bfca5/charset_normalizer-3.4.6-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:34315ff4fc374b285ad7f4a0bf7dcbfe769e1b104230d40f49f700d4ab6bbd84", size = 202956, upload-time = "2026-03-15T18:51:25.239Z" }, + { url = "https://files.pythonhosted.org/packages/75/13/f3550a3ac25b70f87ac98c40d3199a8503676c2f1620efbf8d42095cfc40/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5f8ddd609f9e1af8c7bd6e2aca279c931aefecd148a14402d4e368f3171769fd", size = 201923, upload-time = "2026-03-15T18:51:26.682Z" }, + { url = "https://files.pythonhosted.org/packages/1b/db/c5c643b912740b45e8eec21de1bbab8e7fc085944d37e1e709d3dcd9d72f/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:80d0a5615143c0b3225e5e3ef22c8d5d51f3f72ce0ea6fb84c943546c7b25b6c", size = 195366, upload-time = "2026-03-15T18:51:28.129Z" }, + { url = "https://files.pythonhosted.org/packages/5a/67/3b1c62744f9b2448443e0eb160d8b001c849ec3fef591e012eda6484787c/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:92734d4d8d187a354a556626c221cd1a892a4e0802ccb2af432a1d85ec012194", size = 219752, upload-time = "2026-03-15T18:51:29.556Z" }, + { url = "https://files.pythonhosted.org/packages/f6/98/32ffbaf7f0366ffb0445930b87d103f6b406bc2c271563644bde8a2b1093/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:613f19aa6e082cf96e17e3ffd89383343d0d589abda756b7764cf78361fd41dc", size = 203296, upload-time = "2026-03-15T18:51:30.921Z" }, + { url = "https://files.pythonhosted.org/packages/41/12/5d308c1bbe60cabb0c5ef511574a647067e2a1f631bc8634fcafaccd8293/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:2b1a63e8224e401cafe7739f77efd3f9e7f5f2026bda4aead8e59afab537784f", size = 215956, upload-time = "2026-03-15T18:51:32.399Z" }, + { url = "https://files.pythonhosted.org/packages/53/e9/5f85f6c5e20669dbe56b165c67b0260547dea97dba7e187938833d791687/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6cceb5473417d28edd20c6c984ab6fee6c6267d38d906823ebfe20b03d607dc2", size = 208652, upload-time = "2026-03-15T18:51:34.214Z" }, + { url = "https://files.pythonhosted.org/packages/f1/11/897052ea6af56df3eef3ca94edafee410ca699ca0c7b87960ad19932c55e/charset_normalizer-3.4.6-cp313-cp313-win32.whl", hash = "sha256:d7de2637729c67d67cf87614b566626057e95c303bc0a55ffe391f5205e7003d", size = 143940, upload-time = "2026-03-15T18:51:36.15Z" }, + { url = "https://files.pythonhosted.org/packages/a1/5c/724b6b363603e419829f561c854b87ed7c7e31231a7908708ac086cdf3e2/charset_normalizer-3.4.6-cp313-cp313-win_amd64.whl", hash = "sha256:572d7c822caf521f0525ba1bce1a622a0b85cf47ffbdae6c9c19e3b5ac3c4389", size = 154101, upload-time = "2026-03-15T18:51:37.876Z" }, + { url = "https://files.pythonhosted.org/packages/01/a5/7abf15b4c0968e47020f9ca0935fb3274deb87cb288cd187cad92e8cdffd/charset_normalizer-3.4.6-cp313-cp313-win_arm64.whl", hash = "sha256:a4474d924a47185a06411e0064b803c68be044be2d60e50e8bddcc2649957c1f", size = 143109, upload-time = "2026-03-15T18:51:39.565Z" }, + { url = "https://files.pythonhosted.org/packages/25/6f/ffe1e1259f384594063ea1869bfb6be5cdb8bc81020fc36c3636bc8302a1/charset_normalizer-3.4.6-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:9cc6e6d9e571d2f863fa77700701dae73ed5f78881efc8b3f9a4398772ff53e8", size = 294458, upload-time = "2026-03-15T18:51:41.134Z" }, + { url = "https://files.pythonhosted.org/packages/56/60/09bb6c13a8c1016c2ed5c6a6488e4ffef506461aa5161662bd7636936fb1/charset_normalizer-3.4.6-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef5960d965e67165d75b7c7ffc60a83ec5abfc5c11b764ec13ea54fbef8b4421", size = 199277, upload-time = "2026-03-15T18:51:42.953Z" }, + { url = "https://files.pythonhosted.org/packages/00/50/dcfbb72a5138bbefdc3332e8d81a23494bf67998b4b100703fd15fa52d81/charset_normalizer-3.4.6-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b3694e3f87f8ac7ce279d4355645b3c878d24d1424581b46282f24b92f5a4ae2", size = 218758, upload-time = "2026-03-15T18:51:44.339Z" }, + { url = "https://files.pythonhosted.org/packages/03/b3/d79a9a191bb75f5aa81f3aaaa387ef29ce7cb7a9e5074ba8ea095cc073c2/charset_normalizer-3.4.6-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5d11595abf8dd942a77883a39d81433739b287b6aa71620f15164f8096221b30", size = 215299, upload-time = "2026-03-15T18:51:45.871Z" }, + { url = "https://files.pythonhosted.org/packages/76/7e/bc8911719f7084f72fd545f647601ea3532363927f807d296a8c88a62c0d/charset_normalizer-3.4.6-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7bda6eebafd42133efdca535b04ccb338ab29467b3f7bf79569883676fc628db", size = 206811, upload-time = "2026-03-15T18:51:47.308Z" }, + { url = "https://files.pythonhosted.org/packages/e2/40/c430b969d41dda0c465aa36cc7c2c068afb67177bef50905ac371b28ccc7/charset_normalizer-3.4.6-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:bbc8c8650c6e51041ad1be191742b8b421d05bbd3410f43fa2a00c8db87678e8", size = 193706, upload-time = "2026-03-15T18:51:48.849Z" }, + { url = "https://files.pythonhosted.org/packages/48/15/e35e0590af254f7df984de1323640ef375df5761f615b6225ba8deb9799a/charset_normalizer-3.4.6-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:22c6f0c2fbc31e76c3b8a86fba1a56eda6166e238c29cdd3d14befdb4a4e4815", size = 202706, upload-time = "2026-03-15T18:51:50.257Z" }, + { url = "https://files.pythonhosted.org/packages/5e/bd/f736f7b9cc5e93a18b794a50346bb16fbfd6b37f99e8f306f7951d27c17c/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7edbed096e4a4798710ed6bc75dcaa2a21b68b6c356553ac4823c3658d53743a", size = 202497, upload-time = "2026-03-15T18:51:52.012Z" }, + { url = "https://files.pythonhosted.org/packages/9d/ba/2cc9e3e7dfdf7760a6ed8da7446d22536f3d0ce114ac63dee2a5a3599e62/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:7f9019c9cb613f084481bd6a100b12e1547cf2efe362d873c2e31e4035a6fa43", size = 193511, upload-time = "2026-03-15T18:51:53.723Z" }, + { url = "https://files.pythonhosted.org/packages/9e/cb/5be49b5f776e5613be07298c80e1b02a2d900f7a7de807230595c85a8b2e/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:58c948d0d086229efc484fe2f30c2d382c86720f55cd9bc33591774348ad44e0", size = 220133, upload-time = "2026-03-15T18:51:55.333Z" }, + { url = "https://files.pythonhosted.org/packages/83/43/99f1b5dad345accb322c80c7821071554f791a95ee50c1c90041c157ae99/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:419a9d91bd238052642a51938af8ac05da5b3343becde08d5cdeab9046df9ee1", size = 203035, upload-time = "2026-03-15T18:51:56.736Z" }, + { url = "https://files.pythonhosted.org/packages/87/9a/62c2cb6a531483b55dddff1a68b3d891a8b498f3ca555fbcf2978e804d9d/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5273b9f0b5835ff0350c0828faea623c68bfa65b792720c453e22b25cc72930f", size = 216321, upload-time = "2026-03-15T18:51:58.17Z" }, + { url = "https://files.pythonhosted.org/packages/6e/79/94a010ff81e3aec7c293eb82c28f930918e517bc144c9906a060844462eb/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:0e901eb1049fdb80f5bd11ed5ea1e498ec423102f7a9b9e4645d5b8204ff2815", size = 208973, upload-time = "2026-03-15T18:51:59.998Z" }, + { url = "https://files.pythonhosted.org/packages/2a/57/4ecff6d4ec8585342f0c71bc03efaa99cb7468f7c91a57b105bcd561cea8/charset_normalizer-3.4.6-cp314-cp314-win32.whl", hash = "sha256:b4ff1d35e8c5bd078be89349b6f3a845128e685e751b6ea1169cf2160b344c4d", size = 144610, upload-time = "2026-03-15T18:52:02.213Z" }, + { url = "https://files.pythonhosted.org/packages/80/94/8434a02d9d7f168c25767c64671fead8d599744a05d6a6c877144c754246/charset_normalizer-3.4.6-cp314-cp314-win_amd64.whl", hash = "sha256:74119174722c4349af9708993118581686f343adc1c8c9c007d59be90d077f3f", size = 154962, upload-time = "2026-03-15T18:52:03.658Z" }, + { url = "https://files.pythonhosted.org/packages/46/4c/48f2cdbfd923026503dfd67ccea45c94fd8fe988d9056b468579c66ed62b/charset_normalizer-3.4.6-cp314-cp314-win_arm64.whl", hash = "sha256:e5bcc1a1ae744e0bb59641171ae53743760130600da8db48cbb6e4918e186e4e", size = 143595, upload-time = "2026-03-15T18:52:05.123Z" }, + { url = "https://files.pythonhosted.org/packages/31/93/8878be7569f87b14f1d52032946131bcb6ebbd8af3e20446bc04053dc3f1/charset_normalizer-3.4.6-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:ad8faf8df23f0378c6d527d8b0b15ea4a2e23c89376877c598c4870d1b2c7866", size = 314828, upload-time = "2026-03-15T18:52:06.831Z" }, + { url = "https://files.pythonhosted.org/packages/06/b6/fae511ca98aac69ecc35cde828b0a3d146325dd03d99655ad38fc2cc3293/charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f5ea69428fa1b49573eef0cc44a1d43bebd45ad0c611eb7d7eac760c7ae771bc", size = 208138, upload-time = "2026-03-15T18:52:08.239Z" }, + { url = "https://files.pythonhosted.org/packages/54/57/64caf6e1bf07274a1e0b7c160a55ee9e8c9ec32c46846ce59b9c333f7008/charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:06a7e86163334edfc5d20fe104db92fcd666e5a5df0977cb5680a506fe26cc8e", size = 224679, upload-time = "2026-03-15T18:52:10.043Z" }, + { url = "https://files.pythonhosted.org/packages/aa/cb/9ff5a25b9273ef160861b41f6937f86fae18b0792fe0a8e75e06acb08f1d/charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e1f6e2f00a6b8edb562826e4632e26d063ac10307e80f7461f7de3ad8ef3f077", size = 223475, upload-time = "2026-03-15T18:52:11.854Z" }, + { url = "https://files.pythonhosted.org/packages/fc/97/440635fc093b8d7347502a377031f9605a1039c958f3cd18dcacffb37743/charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:95b52c68d64c1878818687a473a10547b3292e82b6f6fe483808fb1468e2f52f", size = 215230, upload-time = "2026-03-15T18:52:13.325Z" }, + { url = "https://files.pythonhosted.org/packages/cd/24/afff630feb571a13f07c8539fbb502d2ab494019492aaffc78ef41f1d1d0/charset_normalizer-3.4.6-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:7504e9b7dc05f99a9bbb4525c67a2c155073b44d720470a148b34166a69c054e", size = 199045, upload-time = "2026-03-15T18:52:14.752Z" }, + { url = "https://files.pythonhosted.org/packages/e5/17/d1399ecdaf7e0498c327433e7eefdd862b41236a7e484355b8e0e5ebd64b/charset_normalizer-3.4.6-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:172985e4ff804a7ad08eebec0a1640ece87ba5041d565fff23c8f99c1f389484", size = 211658, upload-time = "2026-03-15T18:52:16.278Z" }, + { url = "https://files.pythonhosted.org/packages/b5/38/16baa0affb957b3d880e5ac2144caf3f9d7de7bc4a91842e447fbb5e8b67/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:4be9f4830ba8741527693848403e2c457c16e499100963ec711b1c6f2049b7c7", size = 210769, upload-time = "2026-03-15T18:52:17.782Z" }, + { url = "https://files.pythonhosted.org/packages/05/34/c531bc6ac4c21da9ddfddb3107be2287188b3ea4b53b70fc58f2a77ac8d8/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:79090741d842f564b1b2827c0b82d846405b744d31e84f18d7a7b41c20e473ff", size = 201328, upload-time = "2026-03-15T18:52:19.553Z" }, + { url = "https://files.pythonhosted.org/packages/fa/73/a5a1e9ca5f234519c1953608a03fe109c306b97fdfb25f09182babad51a7/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:87725cfb1a4f1f8c2fc9890ae2f42094120f4b44db9360be5d99a4c6b0e03a9e", size = 225302, upload-time = "2026-03-15T18:52:21.043Z" }, + { url = "https://files.pythonhosted.org/packages/ba/f6/cd782923d112d296294dea4bcc7af5a7ae0f86ab79f8fefbda5526b6cfc0/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:fcce033e4021347d80ed9c66dcf1e7b1546319834b74445f561d2e2221de5659", size = 211127, upload-time = "2026-03-15T18:52:22.491Z" }, + { url = "https://files.pythonhosted.org/packages/0e/c5/0b6898950627af7d6103a449b22320372c24c6feda91aa24e201a478d161/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:ca0276464d148c72defa8bb4390cce01b4a0e425f3b50d1435aa6d7a18107602", size = 222840, upload-time = "2026-03-15T18:52:24.113Z" }, + { url = "https://files.pythonhosted.org/packages/7d/25/c4bba773bef442cbdc06111d40daa3de5050a676fa26e85090fc54dd12f0/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:197c1a244a274bb016dd8b79204850144ef77fe81c5b797dc389327adb552407", size = 216890, upload-time = "2026-03-15T18:52:25.541Z" }, + { url = "https://files.pythonhosted.org/packages/35/1a/05dacadb0978da72ee287b0143097db12f2e7e8d3ffc4647da07a383b0b7/charset_normalizer-3.4.6-cp314-cp314t-win32.whl", hash = "sha256:2a24157fa36980478dd1770b585c0f30d19e18f4fb0c47c13aa568f871718579", size = 155379, upload-time = "2026-03-15T18:52:27.05Z" }, + { url = "https://files.pythonhosted.org/packages/5d/7a/d269d834cb3a76291651256f3b9a5945e81d0a49ab9f4a498964e83c0416/charset_normalizer-3.4.6-cp314-cp314t-win_amd64.whl", hash = "sha256:cd5e2801c89992ed8c0a3f0293ae83c159a60d9a5d685005383ef4caca77f2c4", size = 169043, upload-time = "2026-03-15T18:52:28.502Z" }, + { url = "https://files.pythonhosted.org/packages/23/06/28b29fba521a37a8932c6a84192175c34d49f84a6d4773fa63d05f9aff22/charset_normalizer-3.4.6-cp314-cp314t-win_arm64.whl", hash = "sha256:47955475ac79cc504ef2704b192364e51d0d473ad452caedd0002605f780101c", size = 148523, upload-time = "2026-03-15T18:52:29.956Z" }, + { url = "https://files.pythonhosted.org/packages/2a/68/687187c7e26cb24ccbd88e5069f5ef00eba804d36dde11d99aad0838ab45/charset_normalizer-3.4.6-py3-none-any.whl", hash = "sha256:947cf925bc916d90adba35a64c82aace04fa39b46b52d4630ece166655905a69", size = 61455, upload-time = "2026-03-15T18:53:23.833Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "idna" +version = "3.11" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, +] + +[[package]] +name = "learnpilot" +version = "0.1.0" +source = { virtual = "." } + +[package.optional-dependencies] +test = [ + { name = "pytest" }, + { name = "requests" }, +] + +[package.metadata] +requires-dist = [ + { name = "pytest", marker = "extra == 'test'" }, + { name = "requests", marker = "extra == 'test'" }, +] +provides-extras = ["test"] + +[[package]] +name = "packaging" +version = "26.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/65/ee/299d360cdc32edc7d2cf530f3accf79c4fca01e96ffc950d8a52213bd8e4/packaging-26.0.tar.gz", hash = "sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4", size = 143416, upload-time = "2026-01-21T20:50:39.064Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529", size = 74366, upload-time = "2026-01-21T20:50:37.788Z" }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pytest" +version = "9.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d1/db/7ef3487e0fb0049ddb5ce41d3a49c235bf9ad299b6a25d5780a89f19230f/pytest-9.0.2.tar.gz", hash = "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11", size = 1568901, upload-time = "2025-12-06T21:30:51.014Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b", size = 374801, upload-time = "2025-12-06T21:30:49.154Z" }, +] + +[[package]] +name = "requests" +version = "2.32.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, +] + +[[package]] +name = "urllib3" +version = "2.6.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c7/24/5f1b3bdffd70275f6661c76461e25f024d5a38a46f04aaca912426a2b1d3/urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed", size = 435556, upload-time = "2026-01-07T16:24:43.925Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/39/08/aaaad47bc4e9dc8c725e68f9d04865dbcb2052843ff09c97b08904852d84/urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4", size = 131584, upload-time = "2026-01-07T16:24:42.685Z" }, +] diff --git a/wrangler.toml b/wrangler.toml index 9c3770c..6fec056 100644 --- a/wrangler.toml +++ b/wrangler.toml @@ -1,23 +1,7 @@ -name = "learnpilot-ai" -main = "src/worker.py" -compatibility_date = "2024-09-23" +compatibility_date = "2026-03-23" compatibility_flags = ["python_workers"] +main = "src/worker.py" +name = "learnpilot-ai" [ai] binding = "AI" - -[vars] -ENVIRONMENT = "production" - -# Optional: bind a KV namespace for caching AI responses. -# Create a namespace with: wrangler kv:namespace create CACHE -# [[kv_namespaces]] -# binding = "CACHE" -# id = "" - -# Optional: rate-limit binding -# [[unsafe.bindings]] -# name = "RATE_LIMITER" -# type = "ratelimit" -# namespace_id = "1001" -# simple = { limit = 100, period = 60 }