Compare commits

..

40 Commits

Author SHA1 Message Date
3a909f5ac1 typo 2025-05-30 16:59:53 +10:00
d6d3e2f3af env set up for remote 2025-05-30 16:56:15 +10:00
0fc39350b0 fixing more merge conflicts 2025-05-30 16:55:11 +10:00
0e71556c15 env vars and starting work on repo_manager 2025-05-30 16:54:00 +10:00
=
9c9451d4e5 latest commits 2025-05-30 16:50:53 +10:00
=
3165a9ae08 getting gemma3 in the mix 2025-05-30 16:49:28 +10:00
=
99c3cbdb7f set up chroma 2025-05-30 16:46:41 +10:00
ce24a011ed cleanup directory 2025-05-30 16:45:39 +10:00
ae96075275 env set up for remote 2025-05-30 16:45:03 +10:00
641f11e0aa get rid of think tags 2025-05-30 16:44:31 +10:00
7c724d8177 merge conflict fixing finalisation 2025-05-30 16:36:18 +10:00
b87dc1da9e merge fix 2025-05-30 16:02:37 +10:00
a3db1ae993 env vars and starting work on repo_manager 2025-05-30 16:00:54 +10:00
=
67070df04b latest commits 2025-05-30 15:54:47 +10:00
=
a877cdc464 getting gemma3 in the mix 2025-05-30 15:52:08 +10:00
=
f2b862bb75 integrating agentic chroma 2025-05-30 15:50:48 +10:00
=
1630df04e6 integrating agentic chroma 2025-05-30 15:49:00 +10:00
=
c3c4445d33 set up chroma 2025-05-30 15:43:34 +10:00
9b57e2b9ea further directory cleanup 2025-05-30 15:42:30 +10:00
20233b6264 cleanup directory 2025-05-30 15:42:29 +10:00
4e65c60611 env set up for remote 2025-05-30 15:41:59 +10:00
d91d82b281 fixing more merge conflicts 2025-05-30 15:40:42 +10:00
9e9ac7b99d finished repo work 2025-05-30 15:17:52 +10:00
328e870bf0 finailising repo manager 2025-05-29 23:55:12 +10:00
546b86738a TODO: parse URL paramters correctly 2025-05-29 17:29:48 +10:00
1bb99c2343 change the .env to openthinkier as editor 2025-05-29 16:30:45 +10:00
=
c5444f1a7f merge is going to suck 2025-05-27 23:33:27 +10:00
=
4119b2ec41 fix dockerifle 2025-05-26 00:18:07 +10:00
=
01b7f1cd78 untested git stuff 2025-05-24 00:25:35 +10:00
c606f72d90 env vars and starting work on repo_manager 2025-05-23 15:47:25 +10:00
8a64d9c959 fix pyrefly typuing errors 2025-05-19 11:38:15 +10:00
0c090c8489 add vscode to gitignore 2025-05-19 11:28:10 +10:00
=
e0b2c80bc9 latest commits 2025-05-19 11:07:41 +10:00
=
44141ab545 pre attempt at langchain 2025-03-25 15:26:56 +10:00
=
e57d6eb6b6 getting gemma3 in the mix 2025-03-17 16:33:16 +10:00
=
c80f692cb0 update main.py 2025-02-27 09:44:19 +10:00
=
bc2f8a8bca move to vm 2025-02-27 09:41:01 +10:00
=
e7f7a79d86 integrating agentic chroma 2025-02-26 23:16:00 +10:00
=
9b11fea0e7 integrating agentic chroma 2025-02-26 23:13:27 +10:00
=
6320571528 set up chroma 2025-02-25 22:11:45 +10:00
8 changed files with 100 additions and 472 deletions

View File

@ -1,56 +0,0 @@
name: Create Blog Article if new notes exist
on:
schedule:
- cron: "15 18 * * *"
push:
branches:
- master
jobs:
prepare_blog_drafts_and_push:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install dependencies
shell: bash
run: |
apt update && apt upgrade -y
apt install rustc cargo python-is-python3 pip python3-venv python3-virtualenv libmagic-dev git -y
virtualenv .venv
source .venv/bin/activate
pip install --upgrade pip
pip install -r requirements.txt
git config --global user.name "Blog Creator"
git config --global user.email "ridgway.infrastructure@gmail.com"
git config --global push.autoSetupRemote true
- name: Create .env
shell: bash
run: |
echo "TRILIUM_HOST=${{ vars.TRILIUM_HOST }}" > .env
echo "TRILIUM_PORT='${{ vars.TRILIUM_PORT }}'" >> .env
echo "TRILIUM_PROTOCOL='${{ vars.TRILIUM_PROTOCOL }}'" >> .env
echo "TRILIUM_PASS='${{ secrets.TRILIUM_PASS }}'" >> .env
echo "TRILIUM_TOKEN='${{ secrets.TRILIUM_TOKEN }}'" >> .env
echo "OLLAMA_PROTOCOL='${{ vars.OLLAMA_PROTOCOL }}'" >> .env
echo "OLLAMA_HOST='${{ vars.OLLAMA_HOST }}'" >> .env
echo "OLLAMA_PORT='${{ vars.OLLAMA_PORT }}'" >> .env
echo "EMBEDDING_MODEL='${{ vars.EMBEDDING_MODEL }}'" >> .env
echo "EDITOR_MODEL='${{ vars.EDITOR_MODEL }}'" >> .env
export PURE='["${{ vars.CONTENT_CREATOR_MODELS_1 }}", "${{ vars.CONTENT_CREATOR_MODELS_2 }}", "${{ vars.CONTENT_CREATOR_MODELS_3 }}", "${{ vars.CONTENT_CREATOR_MODELS_4 }}"]'
echo "CONTENT_CREATOR_MODELS='$PURE'" >> .env
echo "GIT_PROTOCOL='${{ vars.GIT_PROTOCOL }}'" >> .env
echo "GIT_REMOTE='${{ vars.GIT_REMOTE }}'" >> .env
echo "GIT_USER='${{ vars.GIT_USER }}'" >> .env
echo "GIT_PASS='${{ secrets.GIT_PASS }}'" >> .env
echo "N8N_SECRET='${{ secrets.N8N_SECRET }}'" >> .env
echo "N8N_WEBHOOK_URL='${{ vars.N8N_WEBHOOK_URL }}'" >> .env
echo "CHROMA_HOST='${{ vars.CHROMA_HOST }}'" >> .env
echo "CHROMA_PORT='${{ vars.CHROMA_PORT }}'" >> .env
- name: Create Blogs
shell: bash
run: |
source .venv/bin/activate
python src/main.py

1
.gitignore vendored
View File

@ -7,4 +7,3 @@ __pycache__
pyproject.toml
.ropeproject
generated_files/*
pyright*

View File

@ -4,5 +4,3 @@ gitpython
PyGithub
chromadb
langchain-ollama
PyJWT
dotenv

View File

@ -1,54 +1,38 @@
import json
import os
import random
import re
import string
import time
from concurrent.futures import ThreadPoolExecutor, TimeoutError
import os, re, json, random, time, string
from ollama import Client
import chromadb
from langchain_ollama import ChatOllama
from ollama import Client
class OllamaGenerator:
def __init__(self, title: str, content: str, inner_title: str):
self.title = title
self.inner_title = inner_title
self.content = content
self.response = None
print("In Class")
print(os.environ["CONTENT_CREATOR_MODELS"])
try:
chroma_port = int(os.environ["CHROMA_PORT"])
except ValueError as e:
raise Exception(f"CHROMA_PORT is not an integer: {e}")
self.chroma = chromadb.HttpClient(
host=os.environ["CHROMA_HOST"], port=chroma_port
)
ollama_url = f"{os.environ['OLLAMA_PROTOCOL']}://{os.environ['OLLAMA_HOST']}:{os.environ['OLLAMA_PORT']}"
self.chroma = chromadb.HttpClient(host="172.18.0.2", port=8000)
ollama_url = f"{os.environ["OLLAMA_PROTOCOL"]}://{os.environ["OLLAMA_HOST"]}:{os.environ["OLLAMA_PORT"]}"
self.ollama_client = Client(host=ollama_url)
self.ollama_model = os.environ["EDITOR_MODEL"]
self.embed_model = os.environ["EMBEDDING_MODEL"]
self.agent_models = json.loads(os.environ["CONTENT_CREATOR_MODELS"])
self.llm = ChatOllama(
model=self.ollama_model, temperature=0.6, top_p=0.5
) # This is the level head in the room
self.llm = ChatOllama(model=self.ollama_model, temperature=0.6, top_p=0.5) #This is the level head in the room
self.prompt_inject = f"""
You are a journalist, Software Developer and DevOps expert
writing a 5000 word draft blog article for other tech enthusiasts.
writing a 1000 word draft blog for other tech enthusiasts.
You like to use almost no code examples and prefer to talk
in a light comedic tone. You are also Australian
As this person write this blog as a markdown document.
The title for the blog is {self.inner_title}.
Do not output the title in the markdown.
The basis for the content of the blog is:
<blog>{self.content}</blog>
{self.content}
"""
def split_into_chunks(self, text, chunk_size=100):
"""Split text into chunks of size chunk_size"""
words = re.findall(r"\S+", text)
'''Split text into chunks of size chunk_size'''
words = re.findall(r'\S+', text)
chunks = []
current_chunk = []
@ -59,19 +43,18 @@ class OllamaGenerator:
word_count += 1
if word_count >= chunk_size:
chunks.append(" ".join(current_chunk))
chunks.append(' '.join(current_chunk))
current_chunk = []
word_count = 0
if current_chunk:
chunks.append(" ".join(current_chunk))
chunks.append(' '.join(current_chunk))
return chunks
def generate_draft(self, model) -> str:
"""Generate a draft blog post using the specified model"""
def _generate():
'''Generate a draft blog post using the specified model'''
try:
# the idea behind this is to make the "creativity" random amongst the content creators
# contorlling temperature will allow cause the output to allow more "random" connections in sentences
# Controlling top_p will tighten or loosen the embedding connections made
@ -80,253 +63,89 @@ class OllamaGenerator:
temp = random.uniform(0.5, 1.0)
top_p = random.uniform(0.4, 0.8)
top_k = int(random.uniform(30, 80))
agent_llm = ChatOllama(
model=model, temperature=temp, top_p=top_p, top_k=top_k
)
agent_llm = ChatOllama(model=model, temperature=temp, top_p=top_p, top_k=top_k)
messages = [
(
"system",
"You are a creative writer specialising in writing about technology",
),
("human", self.prompt_inject),
("system", self.prompt_inject),
("human", "make the blog post in a format to be edited easily" )
]
response = agent_llm.invoke(messages)
return (
response.text if hasattr(response, "text") else str(response)
) # ['message']['content']
# self.response = self.ollama_client.chat(model=model,
# messages=[
# {
# 'role': 'user',
# 'content': f'{self.prompt_inject}',
# },
# ])
#print ("draft")
#print (response)
return response.text()#['message']['content']
# Retry mechanism with 30-minute timeout
timeout_seconds = 30 * 60 # 30 minutes
max_retries = 3
for attempt in range(max_retries):
try:
with ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(_generate)
result = future.result(timeout=timeout_seconds)
return result
except TimeoutError:
print(
f"AI call timed out after {timeout_seconds} seconds on attempt {attempt + 1}"
)
if attempt < max_retries - 1:
print("Retrying...")
time.sleep(5) # Wait 5 seconds before retrying
continue
else:
raise Exception(
f"AI call failed to complete after {max_retries} attempts with {timeout_seconds} second timeouts"
)
except Exception as e:
if attempt < max_retries - 1:
print(f"Attempt {attempt + 1} failed with error: {e}. Retrying...")
time.sleep(5) # Wait 5 seconds before retrying
continue
else:
raise Exception(
f"Failed to generate blog draft after {max_retries} attempts: {e}"
)
except Exception as e:
raise Exception(f"Failed to generate blog draft: {e}")
def get_draft_embeddings(self, draft_chunks):
"""Get embeddings for the draft chunks"""
try:
# Handle empty draft chunks
if not draft_chunks:
print("Warning: No draft chunks to embed")
return []
embeds = self.ollama_client.embed(
model=self.embed_model, input=draft_chunks
)
embeddings = embeds.get("embeddings", [])
# Check if embeddings were generated successfully
if not embeddings:
print("Warning: No embeddings generated")
return []
return embeddings
except Exception as e:
print(f"Error generating embeddings: {e}")
return []
'''Get embeddings for the draft chunks'''
embeds = self.ollama_client.embed(model=self.embed_model, input=draft_chunks)
return embeds.get('embeddings', [])
def id_generator(self, size=6, chars=string.ascii_uppercase + string.digits):
return "".join(random.choice(chars) for _ in range(size))
return ''.join(random.choice(chars) for _ in range(size))
def load_to_vector_db(self):
"""Load the generated blog drafts into a vector database"""
collection_name = (
f"blog_{self.title.lower().replace(' ', '_')}_{self.id_generator()}"
)
collection = self.chroma.get_or_create_collection(
name=collection_name
) # , metadata={"hnsw:space": "cosine"})
# if any(collection.name == collectionname for collectionname in self.chroma.list_collections()):
'''Load the generated blog drafts into a vector database'''
collection_name = f"blog_{self.title.lower().replace(" ", "_")}_{self.id_generator()}"
collection = self.chroma.get_or_create_collection(name=collection_name)#, metadata={"hnsw:space": "cosine"})
#if any(collection.name == collectionname for collectionname in self.chroma.list_collections()):
# self.chroma.delete_collection("blog_creator")
for model in self.agent_models:
print(f"Generating draft from {model} for load into vector database")
try:
draft_content = self.generate_draft(model)
draft_chunks = self.split_into_chunks(draft_content)
# Skip if no content was generated
if not draft_chunks or all(
chunk.strip() == "" for chunk in draft_chunks
):
print(f"Skipping {model} - no content generated")
continue
print(f"generating embeds for {model}")
embeds = self.get_draft_embeddings(draft_chunks)
# Skip if no embeddings were generated
if not embeds:
print(f"Skipping {model} - no embeddings generated")
continue
# Ensure we have the same number of embeddings as chunks
if len(embeds) != len(draft_chunks):
print(
f"Warning: Mismatch between chunks ({len(draft_chunks)}) and embeddings ({len(embeds)}) for {model}"
)
# Truncate or pad to match
min_length = min(len(embeds), len(draft_chunks))
draft_chunks = draft_chunks[:min_length]
embeds = embeds[:min_length]
if min_length == 0:
print(f"Skipping {model} - no valid content/embeddings pairs")
continue
ids = [model + str(i) for i in range(len(draft_chunks))]
chunknumber = list(range(len(draft_chunks)))
metadata = [{"model_agent": model} for index in chunknumber]
print(f"loading into collection for {model}")
collection.add(
documents=draft_chunks,
embeddings=embeds,
ids=ids,
metadatas=metadata,
)
except Exception as e:
print(f"Error processing model {model}: {e}")
# Continue with other models rather than failing completely
continue
print (f"Generating draft from {model} for load into vector database")
draft_chunks = self.split_into_chunks(self.generate_draft(model))
print(f"generating embeds")
embeds = self.get_draft_embeddings(draft_chunks)
ids = [model + str(i) for i in range(len(draft_chunks))]
chunknumber = list(range(len(draft_chunks)))
metadata = [{"model_agent": model} for index in chunknumber]
print(f'loading into collection')
collection.add(documents=draft_chunks, embeddings=embeds, ids=ids, metadatas=metadata)
return collection
def generate_markdown(self) -> str:
prompt_human = f"""
prompt_system = f"""
You are an editor taking information from {len(self.agent_models)} Software
Developers and Data experts
writing a 5000 word blog article. You like when they use almost no code examples.
You are also Australian. The content may have light comedic elements,
you are more professional and will attempt to tone these down
As this person produce the final version of this blog as a markdown document
keeping in mind the context provided by the previous drafts.
You are to produce the content not placeholders for further editors
writing a 3000 word blog for other tech enthusiasts.
You like when they use almost no code examples and the
voice is in a light comedic tone. You are also Australian
As this person produce and an amalgamtion of this blog as a markdown document.
The title for the blog is {self.inner_title}.
Do not output the title in the markdown. Avoid repeated sentences
The basis for the content of the blog is:
<blog>{self.content}</blog>
{self.content}
"""
def _generate_final_document():
try:
embed_result = self.ollama_client.embed(
model=self.embed_model, input=prompt_human
)
query_embed = embed_result.get("embeddings", [])
if not query_embed:
print(
"Warning: Failed to generate query embeddings, using empty list"
)
query_embed = [[]] # Use a single empty embedding as fallback
except Exception as e:
print(f"Error generating query embeddings: {e}")
# Generate empty embeddings as fallback
query_embed = [[]] # Use a single empty embedding as fallback
collection = self.load_to_vector_db()
# Try to query the collection, with fallback for empty collections
try:
collection_query = collection.query(
query_embeddings=query_embed, n_results=100
)
print("Showing pertinent info from drafts used in final edited edition")
# Get documents with error handling
query_result = collection.query(
query_embeddings=query_embed, n_results=100
)
documents = query_result.get("documents", [])
if documents and len(documents) > 0 and len(documents[0]) > 0:
pertinent_draft_info = "\n\n".join(documents[0])
else:
print("Warning: No relevant documents found in collection")
pertinent_draft_info = "No relevant information found in drafts."
except Exception as query_error:
print(f"Error querying collection: {query_error}")
pertinent_draft_info = (
"No relevant information found in drafts due to query error."
)
# print(pertinent_draft_info)
prompt_system = f"""Generate the final, 5000 word, draft of the blog using this information from the drafts: <context>{pertinent_draft_info}</context>
- Only output in markdown, do not wrap in markdown tags, Only provide the draft not a commentary on the drafts in the context
"""
print("Generating final document")
messages = [
("system", prompt_system),
("human", prompt_human),
]
response = self.llm.invoke(messages)
return response.text if hasattr(response, "text") else str(response)
try:
# Retry mechanism with 30-minute timeout
timeout_seconds = 30 * 60 # 30 minutes
max_retries = 3
for attempt in range(max_retries):
try:
with ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(_generate_final_document)
self.response = future.result(timeout=timeout_seconds)
break # Success, exit the retry loop
except TimeoutError:
print(
f"AI call timed out after {timeout_seconds} seconds on attempt {attempt + 1}"
)
if attempt < max_retries - 1:
print("Retrying...")
time.sleep(5) # Wait 5 seconds before retrying
continue
else:
raise Exception(
f"AI call failed to complete after {max_retries} attempts with {timeout_seconds} second timeouts"
)
except Exception as e:
if attempt < max_retries - 1:
print(
f"Attempt {attempt + 1} failed with error: {e}. Retrying..."
)
time.sleep(5) # Wait 5 seconds before retrying
continue
else:
raise Exception(
f"Failed to generate markdown after {max_retries} attempts: {e}"
)
query_embed = self.ollama_client.embed(model=self.embed_model, input=prompt_system)['embeddings']
collection = self.load_to_vector_db()
collection_query = collection.query(query_embeddings=query_embed, n_results=100)
print("Showing pertinent info from drafts used in final edited edition")
pertinent_draft_info = '\n\n'.join(collection.query(query_embeddings=query_embed, n_results=100)['documents'][0])
#print(pertinent_draft_info)
prompt_human = f"Generate the final document using this information from the drafts: {pertinent_draft_info} - ONLY OUTPUT THE MARKDOWN"
print("Generating final document")
messages = [("system", prompt_system), ("human", prompt_human),]
self.response = self.llm.invoke(messages).text()
# self.response = self.ollama_client.chat(model=self.ollama_model,
# messages=[
# {
# 'role': 'user',
# 'content': f'{prompt_enhanced}',
# },
# ])
# print ("Markdown Generated")
# print (self.response)
return self.response # ['message']['content']
#print ("Markdown Generated")
#print (self.response)
return self.response#['message']['content']
except Exception as e:
raise Exception(f"Failed to generate markdown: {e}")
@ -335,44 +154,9 @@ class OllamaGenerator:
with open(filename, "w") as f:
f.write(self.generate_markdown())
def generate_system_message(self, prompt_system, prompt_human):
def _generate():
messages = [
("system", prompt_system),
("human", prompt_human),
]
response = self.llm.invoke(messages)
ai_message = response.text if hasattr(response, "text") else str(response)
return ai_message
# Retry mechanism with 30-minute timeout
timeout_seconds = 30 * 60 # 30 minutes
max_retries = 3
for attempt in range(max_retries):
try:
with ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(_generate)
result = future.result(timeout=timeout_seconds)
return result
except TimeoutError:
print(
f"AI call timed out after {timeout_seconds} seconds on attempt {attempt + 1}"
)
if attempt < max_retries - 1:
print("Retrying...")
time.sleep(5) # Wait 5 seconds before retrying
continue
else:
raise Exception(
f"AI call failed to complete after {max_retries} attempts with {timeout_seconds} second timeouts"
)
except Exception as e:
if attempt < max_retries - 1:
print(f"Attempt {attempt + 1} failed with error: {e}. Retrying...")
time.sleep(5) # Wait 5 seconds before retrying
continue
else:
raise Exception(
f"Failed to generate system message after {max_retries} attempts: {e}"
)
def generate_commit_message(self):
prompt_system = "You are a blog creator commiting a piece of content to a central git repo"
prompt_human = f"Generate a 5 word git commit message describing {self.response}"
messages = [("system", prompt_system), ("human", prompt_human),]
commit_message = self.llm.invoke(messages).text()
return commit_message

View File

@ -1,13 +1,7 @@
import ai_generators.ollama_md_generator as omg
import trilium.notes as tn
import repo_management.repo_manager as git_repo
from notifications.n8n import N8NWebhookJwt
import string,os
from datetime import datetime
from dotenv import load_dotenv
load_dotenv()
print(os.environ["CONTENT_CREATOR_MODELS"])
tril = tn.TrilumNotes()
@ -30,51 +24,11 @@ for note in tril_notes:
ai_gen = omg.OllamaGenerator(os_friendly_title,
tril_notes[note]['content'],
tril_notes[note]['title'])
blog_path = f"generated_files/{os_friendly_title}.md"
blog_path = f"/blog_creator/generated_files/{os_friendly_title}.md"
ai_gen.save_to_file(blog_path)
# Generate commit messages and push to repo
print("Generating Commit Message")
git_sytem_prompt = "You are a blog creator commiting a piece of content to a central git repo"
git_human_prompt = f"Generate a 5 word git commit message describing {ai_gen.response}. ONLY OUTPUT THE RESPONSE"
commit_message = ai_gen.generate_system_message(git_sytem_prompt, git_human_prompt)
commit_message = ai_gen.generate_commit_message()
git_user = os.environ["GIT_USER"]
git_pass = os.environ["GIT_PASS"]
repo_manager = git_repo.GitRepository("blog/", git_user, git_pass)
print("Pushing to Repo")
repo_manager.create_copy_commit_push(blog_path, os_friendly_title, commit_message)
# Generate notification for Matrix
print("Generating Notification Message")
git_branch_url = f'https://git.aridgwayweb.com/armistace/blog/src/branch/{os_friendly_title}/src/content/{os_friendly_title}.md'
n8n_system_prompt = f"You are a blog creator notifiying the final editor of the final creation of blog available at {git_branch_url}"
n8n_prompt_human = f"""
Generate an informal 100 word
summary describing {ai_gen.response}.
Don't address it or use names. ONLY OUTPUT THE RESPONSE.
ONLY OUTPUT IN PLAINTEXT STRIP ALL MARKDOWN
"""
notification_message = ai_gen.generate_system_message(n8n_system_prompt, n8n_prompt_human)
secret_key = os.environ['N8N_SECRET']
webhook_url = os.environ['N8N_WEBHOOK_URL']
notification_string = f"""
<h2>{tril_notes[note]['title']}</h2>
<h3>Summary</h3>
<p>{notification_message}</p>
<h3>Branch</h3>
<p>{os_friendly_title}</p>
<p><a href="{git_branch_url}">Link to Branch</a></p>
"""
payload = {
"message": f"{notification_string}",
"timestamp": datetime.now().isoformat()
}
webhook_client = N8NWebhookJwt(secret_key, webhook_url)
print("Notifying")
n8n_result = webhook_client.send_webhook(payload)
print(f"N8N response: {n8n_result['status']}")

View File

@ -1,45 +0,0 @@
from datetime import datetime, timedelta
import jwt
import requests
from typing import Dict, Optional
class N8NWebhookJwt:
def __init__(self, secret_key: str, webhook_url: str):
self.secret_key = secret_key
self.webhook_url = webhook_url
self.token_expiration = datetime.now() + timedelta(hours=1)
def _generate_jwt_token(self, payload: Dict) -> str:
"""Generate JWT token with the given payload."""
# Include expiration time (optional)
payload["exp"] = self.token_expiration.timestamp()
encoded_jwt = jwt.encode(
payload,
self.secret_key,
algorithm="HS256",
)
return encoded_jwt #jwt.decode(encoded_jwt, self.secret_key, algorithms=['HS256'])
def send_webhook(self, payload: Dict) -> Dict:
"""Send a webhook request with JWT authentication."""
# Generate JWT token
token = self._generate_jwt_token(payload)
# Set headers with JWT token
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json"
}
# Send POST request
response = requests.post(
self.webhook_url,
json=payload,
headers=headers
)
# Handle response
if response.status_code == 200:
return {"status": "success", "response": response.json()}
else:
return {"status": "error", "response": response.status_code, "message": response.text}

View File

@ -1,11 +1,8 @@
import os
import shutil
import os, shutil
from urllib.parse import quote
from git import Repo
from git.exc import GitCommandError
class GitRepository:
# This is designed to be transitory it will desctruvtively create the repo at repo_path
# if you have uncommited changes you can kiss them goodbye!
@ -14,8 +11,8 @@ class GitRepository:
def __init__(self, repo_path, username=None, password=None):
git_protocol = os.environ["GIT_PROTOCOL"]
git_remote = os.environ["GIT_REMOTE"]
# if username is not set we don't need parse to the url
if username == None or password == None:
#if username is not set we don't need parse to the url
if username==None or password == None:
remote = f"{git_protocol}://{git_remote}"
else:
# of course if it is we need to parse and escape it so that it
@ -42,7 +39,7 @@ class GitRepository:
print(f"Cloning failed: {e}")
return False
def fetch(self, remote_name="origin", ref_name="main"):
def fetch(self, remote_name='origin', ref_name='main'):
"""Fetch updates from a remote repository with authentication"""
try:
self.repo.remotes[remote_name].fetch(ref_name=ref_name)
@ -51,7 +48,7 @@ class GitRepository:
print(f"Fetching failed: {e}")
return False
def pull(self, remote_name="origin", ref_name="main"):
def pull(self, remote_name='origin', ref_name='main'):
"""Pull updates from a remote repository with authentication"""
print("Pulling Latest Updates (if any)")
try:
@ -65,6 +62,18 @@ class GitRepository:
"""List all branches in the repository"""
return [branch.name for branch in self.repo.branches]
def create_and_switch_branch(self, branch_name, remote_name='origin', ref_name='main'):
"""Create a new branch in the repository with authentication."""
try:
print(f"Creating Branch {branch_name}")
# Use the same remote and ref as before
self.repo.git.branch(branch_name)
except GitCommandError:
print("Branch already exists switching")
# ensure remote commits are pulled into local
self.repo.git.checkout(branch_name)
def add_and_commit(self, message=None):
"""Add and commit changes to the repository."""
try:
@ -82,27 +91,12 @@ class GitRepository:
print(f"Commit failed: {e}")
return False
def create_copy_commit_push(self, file_path, title, commit_message):
# Check if branch exists remotely
remote_branches = [
ref.name.split("/")[-1] for ref in self.repo.remotes.origin.refs
]
def create_copy_commit_push(self, file_path, title, commit_messge):
self.create_and_switch_branch(title)
if title in remote_branches:
# Branch exists remotely, checkout and pull
self.repo.git.checkout(title)
self.pull(ref_name=title)
else:
# New branch, create from main
self.repo.git.checkout("-b", title, "origin/main")
self.pull(ref_name=title)
shutil.copy(f"{file_path}", f"{self.repo_path}src/content/")
# Ensure destination directory exists
dest_dir = f"{self.repo_path}src/content/"
os.makedirs(dest_dir, exist_ok=True)
self.add_and_commit(f"'{commit_messge}'")
# Copy file
shutil.copy(f"{file_path}", dest_dir)
# Commit and push
self.add_and_commit(commit_message)
self.repo.git.push("--set-upstream", "origin", title)
self.repo.git.push()