Compare commits

..

No commits in common. "master" and "matrix_notifications" have entirely different histories.

5 changed files with 97 additions and 378 deletions

View File

@ -1,56 +0,0 @@
name: Create Blog Article if new notes exist
on:
schedule:
- cron: "15 18 * * *"
push:
branches:
- master
jobs:
prepare_blog_drafts_and_push:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install dependencies
shell: bash
run: |
apt update && apt upgrade -y
apt install rustc cargo python-is-python3 pip python3-venv python3-virtualenv libmagic-dev git -y
virtualenv .venv
source .venv/bin/activate
pip install --upgrade pip
pip install -r requirements.txt
git config --global user.name "Blog Creator"
git config --global user.email "ridgway.infrastructure@gmail.com"
git config --global push.autoSetupRemote true
- name: Create .env
shell: bash
run: |
echo "TRILIUM_HOST=${{ vars.TRILIUM_HOST }}" > .env
echo "TRILIUM_PORT='${{ vars.TRILIUM_PORT }}'" >> .env
echo "TRILIUM_PROTOCOL='${{ vars.TRILIUM_PROTOCOL }}'" >> .env
echo "TRILIUM_PASS='${{ secrets.TRILIUM_PASS }}'" >> .env
echo "TRILIUM_TOKEN='${{ secrets.TRILIUM_TOKEN }}'" >> .env
echo "OLLAMA_PROTOCOL='${{ vars.OLLAMA_PROTOCOL }}'" >> .env
echo "OLLAMA_HOST='${{ vars.OLLAMA_HOST }}'" >> .env
echo "OLLAMA_PORT='${{ vars.OLLAMA_PORT }}'" >> .env
echo "EMBEDDING_MODEL='${{ vars.EMBEDDING_MODEL }}'" >> .env
echo "EDITOR_MODEL='${{ vars.EDITOR_MODEL }}'" >> .env
export PURE='["${{ vars.CONTENT_CREATOR_MODELS_1 }}", "${{ vars.CONTENT_CREATOR_MODELS_2 }}", "${{ vars.CONTENT_CREATOR_MODELS_3 }}", "${{ vars.CONTENT_CREATOR_MODELS_4 }}"]'
echo "CONTENT_CREATOR_MODELS='$PURE'" >> .env
echo "GIT_PROTOCOL='${{ vars.GIT_PROTOCOL }}'" >> .env
echo "GIT_REMOTE='${{ vars.GIT_REMOTE }}'" >> .env
echo "GIT_USER='${{ vars.GIT_USER }}'" >> .env
echo "GIT_PASS='${{ secrets.GIT_PASS }}'" >> .env
echo "N8N_SECRET='${{ secrets.N8N_SECRET }}'" >> .env
echo "N8N_WEBHOOK_URL='${{ vars.N8N_WEBHOOK_URL }}'" >> .env
echo "CHROMA_HOST='${{ vars.CHROMA_HOST }}'" >> .env
echo "CHROMA_PORT='${{ vars.CHROMA_PORT }}'" >> .env
- name: Create Blogs
shell: bash
run: |
source .venv/bin/activate
python src/main.py

View File

@ -5,4 +5,3 @@ PyGithub
chromadb chromadb
langchain-ollama langchain-ollama
PyJWT PyJWT
dotenv

View File

@ -1,54 +1,42 @@
import json import os, re, json, random, time, string
import os from ollama import Client
import random
import re
import string
import time
from concurrent.futures import ThreadPoolExecutor, TimeoutError
import chromadb import chromadb
from langchain_ollama import ChatOllama from langchain_ollama import ChatOllama
from ollama import Client
class OllamaGenerator: class OllamaGenerator:
def __init__(self, title: str, content: str, inner_title: str): def __init__(self, title: str, content: str, inner_title: str):
self.title = title self.title = title
self.inner_title = inner_title self.inner_title = inner_title
self.content = content self.content = content
self.response = None self.response = None
print("In Class")
print(os.environ["CONTENT_CREATOR_MODELS"])
try: try:
chroma_port = int(os.environ["CHROMA_PORT"]) chroma_port = int(os.environ['CHROMA_PORT'])
except ValueError as e: except ValueError as e:
raise Exception(f"CHROMA_PORT is not an integer: {e}") raise Exception(f"CHROMA_PORT is not an integer: {e}")
self.chroma = chromadb.HttpClient( self.chroma = chromadb.HttpClient(host=os.environ['CHROMA_HOST'], port=chroma_port)
host=os.environ["CHROMA_HOST"], port=chroma_port ollama_url = f"{os.environ["OLLAMA_PROTOCOL"]}://{os.environ["OLLAMA_HOST"]}:{os.environ["OLLAMA_PORT"]}"
)
ollama_url = f"{os.environ['OLLAMA_PROTOCOL']}://{os.environ['OLLAMA_HOST']}:{os.environ['OLLAMA_PORT']}"
self.ollama_client = Client(host=ollama_url) self.ollama_client = Client(host=ollama_url)
self.ollama_model = os.environ["EDITOR_MODEL"] self.ollama_model = os.environ["EDITOR_MODEL"]
self.embed_model = os.environ["EMBEDDING_MODEL"] self.embed_model = os.environ["EMBEDDING_MODEL"]
self.agent_models = json.loads(os.environ["CONTENT_CREATOR_MODELS"]) self.agent_models = json.loads(os.environ["CONTENT_CREATOR_MODELS"])
self.llm = ChatOllama( self.llm = ChatOllama(model=self.ollama_model, temperature=0.6, top_p=0.5) #This is the level head in the room
model=self.ollama_model, temperature=0.6, top_p=0.5
) # This is the level head in the room
self.prompt_inject = f""" self.prompt_inject = f"""
You are a journalist, Software Developer and DevOps expert You are a journalist, Software Developer and DevOps expert
writing a 5000 word draft blog article for other tech enthusiasts. writing a 3000 word draft blog article for other tech enthusiasts.
You like to use almost no code examples and prefer to talk You like to use almost no code examples and prefer to talk
in a light comedic tone. You are also Australian in a light comedic tone. You are also Australian
As this person write this blog as a markdown document. As this person write this blog as a markdown document.
The title for the blog is {self.inner_title}. The title for the blog is {self.inner_title}.
Do not output the title in the markdown. Do not output the title in the markdown.
The basis for the content of the blog is: The basis for the content of the blog is:
<blog>{self.content}</blog> {self.content}
""" """
def split_into_chunks(self, text, chunk_size=100): def split_into_chunks(self, text, chunk_size=100):
"""Split text into chunks of size chunk_size""" '''Split text into chunks of size chunk_size'''
words = re.findall(r"\S+", text) words = re.findall(r'\S+', text)
chunks = [] chunks = []
current_chunk = [] current_chunk = []
@ -59,19 +47,18 @@ class OllamaGenerator:
word_count += 1 word_count += 1
if word_count >= chunk_size: if word_count >= chunk_size:
chunks.append(" ".join(current_chunk)) chunks.append(' '.join(current_chunk))
current_chunk = [] current_chunk = []
word_count = 0 word_count = 0
if current_chunk: if current_chunk:
chunks.append(" ".join(current_chunk)) chunks.append(' '.join(current_chunk))
return chunks return chunks
def generate_draft(self, model) -> str: def generate_draft(self, model) -> str:
"""Generate a draft blog post using the specified model""" '''Generate a draft blog post using the specified model'''
try:
def _generate():
# the idea behind this is to make the "creativity" random amongst the content creators # the idea behind this is to make the "creativity" random amongst the content creators
# contorlling temperature will allow cause the output to allow more "random" connections in sentences # contorlling temperature will allow cause the output to allow more "random" connections in sentences
# Controlling top_p will tighten or loosen the embedding connections made # Controlling top_p will tighten or loosen the embedding connections made
@ -80,247 +67,83 @@ class OllamaGenerator:
temp = random.uniform(0.5, 1.0) temp = random.uniform(0.5, 1.0)
top_p = random.uniform(0.4, 0.8) top_p = random.uniform(0.4, 0.8)
top_k = int(random.uniform(30, 80)) top_k = int(random.uniform(30, 80))
agent_llm = ChatOllama( agent_llm = ChatOllama(model=model, temperature=temp, top_p=top_p, top_k=top_k)
model=model, temperature=temp, top_p=top_p, top_k=top_k
)
messages = [ messages = [
( ("system", self.prompt_inject),
"system", ("human", "make the blog post in a format to be edited easily" )
"You are a creative writer specialising in writing about technology",
),
("human", self.prompt_inject),
] ]
response = agent_llm.invoke(messages) response = agent_llm.invoke(messages)
return ( # self.response = self.ollama_client.chat(model=model,
response.text if hasattr(response, "text") else str(response) # messages=[
) # ['message']['content'] # {
# 'role': 'user',
# 'content': f'{self.prompt_inject}',
# },
# ])
#print ("draft")
#print (response)
return response.text()#['message']['content']
# Retry mechanism with 30-minute timeout
timeout_seconds = 30 * 60 # 30 minutes
max_retries = 3
for attempt in range(max_retries):
try:
with ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(_generate)
result = future.result(timeout=timeout_seconds)
return result
except TimeoutError:
print(
f"AI call timed out after {timeout_seconds} seconds on attempt {attempt + 1}"
)
if attempt < max_retries - 1:
print("Retrying...")
time.sleep(5) # Wait 5 seconds before retrying
continue
else:
raise Exception(
f"AI call failed to complete after {max_retries} attempts with {timeout_seconds} second timeouts"
)
except Exception as e: except Exception as e:
if attempt < max_retries - 1: raise Exception(f"Failed to generate blog draft: {e}")
print(f"Attempt {attempt + 1} failed with error: {e}. Retrying...")
time.sleep(5) # Wait 5 seconds before retrying
continue
else:
raise Exception(
f"Failed to generate blog draft after {max_retries} attempts: {e}"
)
def get_draft_embeddings(self, draft_chunks): def get_draft_embeddings(self, draft_chunks):
"""Get embeddings for the draft chunks""" '''Get embeddings for the draft chunks'''
try: embeds = self.ollama_client.embed(model=self.embed_model, input=draft_chunks)
# Handle empty draft chunks return embeds.get('embeddings', [])
if not draft_chunks:
print("Warning: No draft chunks to embed")
return []
embeds = self.ollama_client.embed(
model=self.embed_model, input=draft_chunks
)
embeddings = embeds.get("embeddings", [])
# Check if embeddings were generated successfully
if not embeddings:
print("Warning: No embeddings generated")
return []
return embeddings
except Exception as e:
print(f"Error generating embeddings: {e}")
return []
def id_generator(self, size=6, chars=string.ascii_uppercase + string.digits): def id_generator(self, size=6, chars=string.ascii_uppercase + string.digits):
return "".join(random.choice(chars) for _ in range(size)) return ''.join(random.choice(chars) for _ in range(size))
def load_to_vector_db(self): def load_to_vector_db(self):
"""Load the generated blog drafts into a vector database""" '''Load the generated blog drafts into a vector database'''
collection_name = ( collection_name = f"blog_{self.title.lower().replace(" ", "_")}_{self.id_generator()}"
f"blog_{self.title.lower().replace(' ', '_')}_{self.id_generator()}" collection = self.chroma.get_or_create_collection(name=collection_name)#, metadata={"hnsw:space": "cosine"})
)
collection = self.chroma.get_or_create_collection(
name=collection_name
) # , metadata={"hnsw:space": "cosine"})
#if any(collection.name == collectionname for collectionname in self.chroma.list_collections()): #if any(collection.name == collectionname for collectionname in self.chroma.list_collections()):
# self.chroma.delete_collection("blog_creator") # self.chroma.delete_collection("blog_creator")
for model in self.agent_models: for model in self.agent_models:
print (f"Generating draft from {model} for load into vector database") print (f"Generating draft from {model} for load into vector database")
try: draft_chunks = self.split_into_chunks(self.generate_draft(model))
draft_content = self.generate_draft(model) print(f"generating embeds")
draft_chunks = self.split_into_chunks(draft_content)
# Skip if no content was generated
if not draft_chunks or all(
chunk.strip() == "" for chunk in draft_chunks
):
print(f"Skipping {model} - no content generated")
continue
print(f"generating embeds for {model}")
embeds = self.get_draft_embeddings(draft_chunks) embeds = self.get_draft_embeddings(draft_chunks)
# Skip if no embeddings were generated
if not embeds:
print(f"Skipping {model} - no embeddings generated")
continue
# Ensure we have the same number of embeddings as chunks
if len(embeds) != len(draft_chunks):
print(
f"Warning: Mismatch between chunks ({len(draft_chunks)}) and embeddings ({len(embeds)}) for {model}"
)
# Truncate or pad to match
min_length = min(len(embeds), len(draft_chunks))
draft_chunks = draft_chunks[:min_length]
embeds = embeds[:min_length]
if min_length == 0:
print(f"Skipping {model} - no valid content/embeddings pairs")
continue
ids = [model + str(i) for i in range(len(draft_chunks))] ids = [model + str(i) for i in range(len(draft_chunks))]
chunknumber = list(range(len(draft_chunks))) chunknumber = list(range(len(draft_chunks)))
metadata = [{"model_agent": model} for index in chunknumber] metadata = [{"model_agent": model} for index in chunknumber]
print(f"loading into collection for {model}") print(f'loading into collection')
collection.add( collection.add(documents=draft_chunks, embeddings=embeds, ids=ids, metadatas=metadata)
documents=draft_chunks,
embeddings=embeds,
ids=ids,
metadatas=metadata,
)
except Exception as e:
print(f"Error processing model {model}: {e}")
# Continue with other models rather than failing completely
continue
return collection return collection
def generate_markdown(self) -> str: def generate_markdown(self) -> str:
prompt_human = f"""
prompt_system = f"""
You are an editor taking information from {len(self.agent_models)} Software You are an editor taking information from {len(self.agent_models)} Software
Developers and Data experts Developers and Data experts
writing a 5000 word blog article. You like when they use almost no code examples. writing a 3000 word blog article. You like when they use almost no code examples.
You are also Australian. The content may have light comedic elements, You are also Australian. The content may have light comedic elements,
you are more professional and will attempt to tone these down you are more professional and will attempt to tone these down
As this person produce the final version of this blog as a markdown document As this person produce and an amalgamtion of this blog as a markdown document.
keeping in mind the context provided by the previous drafts.
You are to produce the content not placeholders for further editors
The title for the blog is {self.inner_title}. The title for the blog is {self.inner_title}.
Do not output the title in the markdown. Avoid repeated sentences Do not output the title in the markdown. Avoid repeated sentences
The basis for the content of the blog is: The basis for the content of the blog is:
<blog>{self.content}</blog> {self.content}
""" """
def _generate_final_document():
try: try:
embed_result = self.ollama_client.embed( query_embed = self.ollama_client.embed(model=self.embed_model, input=prompt_system)['embeddings']
model=self.embed_model, input=prompt_human
)
query_embed = embed_result.get("embeddings", [])
if not query_embed:
print(
"Warning: Failed to generate query embeddings, using empty list"
)
query_embed = [[]] # Use a single empty embedding as fallback
except Exception as e:
print(f"Error generating query embeddings: {e}")
# Generate empty embeddings as fallback
query_embed = [[]] # Use a single empty embedding as fallback
collection = self.load_to_vector_db() collection = self.load_to_vector_db()
collection_query = collection.query(query_embeddings=query_embed, n_results=100)
# Try to query the collection, with fallback for empty collections
try:
collection_query = collection.query(
query_embeddings=query_embed, n_results=100
)
print("Showing pertinent info from drafts used in final edited edition") print("Showing pertinent info from drafts used in final edited edition")
pertinent_draft_info = '\n\n'.join(collection.query(query_embeddings=query_embed, n_results=100)['documents'][0])
# Get documents with error handling
query_result = collection.query(
query_embeddings=query_embed, n_results=100
)
documents = query_result.get("documents", [])
if documents and len(documents) > 0 and len(documents[0]) > 0:
pertinent_draft_info = "\n\n".join(documents[0])
else:
print("Warning: No relevant documents found in collection")
pertinent_draft_info = "No relevant information found in drafts."
except Exception as query_error:
print(f"Error querying collection: {query_error}")
pertinent_draft_info = (
"No relevant information found in drafts due to query error."
)
#print(pertinent_draft_info) #print(pertinent_draft_info)
prompt_system = f"""Generate the final, 5000 word, draft of the blog using this information from the drafts: <context>{pertinent_draft_info}</context> prompt_human = f"Generate the final document using this information from the drafts: {pertinent_draft_info} - Only output in markdown, do not wrap in markdown tags"
- Only output in markdown, do not wrap in markdown tags, Only provide the draft not a commentary on the drafts in the context
"""
print("Generating final document") print("Generating final document")
messages = [ messages = [("system", prompt_system), ("human", prompt_human),]
("system", prompt_system), self.response = self.llm.invoke(messages).text()
("human", prompt_human),
]
response = self.llm.invoke(messages)
return response.text if hasattr(response, "text") else str(response)
try:
# Retry mechanism with 30-minute timeout
timeout_seconds = 30 * 60 # 30 minutes
max_retries = 3
for attempt in range(max_retries):
try:
with ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(_generate_final_document)
self.response = future.result(timeout=timeout_seconds)
break # Success, exit the retry loop
except TimeoutError:
print(
f"AI call timed out after {timeout_seconds} seconds on attempt {attempt + 1}"
)
if attempt < max_retries - 1:
print("Retrying...")
time.sleep(5) # Wait 5 seconds before retrying
continue
else:
raise Exception(
f"AI call failed to complete after {max_retries} attempts with {timeout_seconds} second timeouts"
)
except Exception as e:
if attempt < max_retries - 1:
print(
f"Attempt {attempt + 1} failed with error: {e}. Retrying..."
)
time.sleep(5) # Wait 5 seconds before retrying
continue
else:
raise Exception(
f"Failed to generate markdown after {max_retries} attempts: {e}"
)
# self.response = self.ollama_client.chat(model=self.ollama_model, # self.response = self.ollama_client.chat(model=self.ollama_model,
# messages=[ # messages=[
# {
# 'role': 'user',
# 'content': f'{prompt_enhanced}', # 'content': f'{prompt_enhanced}',
# }, # },
# ]) # ])
@ -336,43 +159,6 @@ class OllamaGenerator:
f.write(self.generate_markdown()) f.write(self.generate_markdown())
def generate_system_message(self, prompt_system, prompt_human): def generate_system_message(self, prompt_system, prompt_human):
def _generate(): messages = [("system", prompt_system), ("human", prompt_human),]
messages = [ ai_message = self.llm.invoke(messages).text()
("system", prompt_system),
("human", prompt_human),
]
response = self.llm.invoke(messages)
ai_message = response.text if hasattr(response, "text") else str(response)
return ai_message return ai_message
# Retry mechanism with 30-minute timeout
timeout_seconds = 30 * 60 # 30 minutes
max_retries = 3
for attempt in range(max_retries):
try:
with ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(_generate)
result = future.result(timeout=timeout_seconds)
return result
except TimeoutError:
print(
f"AI call timed out after {timeout_seconds} seconds on attempt {attempt + 1}"
)
if attempt < max_retries - 1:
print("Retrying...")
time.sleep(5) # Wait 5 seconds before retrying
continue
else:
raise Exception(
f"AI call failed to complete after {max_retries} attempts with {timeout_seconds} second timeouts"
)
except Exception as e:
if attempt < max_retries - 1:
print(f"Attempt {attempt + 1} failed with error: {e}. Retrying...")
time.sleep(5) # Wait 5 seconds before retrying
continue
else:
raise Exception(
f"Failed to generate system message after {max_retries} attempts: {e}"
)

View File

@ -4,9 +4,6 @@ import repo_management.repo_manager as git_repo
from notifications.n8n import N8NWebhookJwt from notifications.n8n import N8NWebhookJwt
import string,os import string,os
from datetime import datetime from datetime import datetime
from dotenv import load_dotenv
load_dotenv()
print(os.environ["CONTENT_CREATOR_MODELS"])
tril = tn.TrilumNotes() tril = tn.TrilumNotes()
@ -30,7 +27,7 @@ for note in tril_notes:
ai_gen = omg.OllamaGenerator(os_friendly_title, ai_gen = omg.OllamaGenerator(os_friendly_title,
tril_notes[note]['content'], tril_notes[note]['content'],
tril_notes[note]['title']) tril_notes[note]['title'])
blog_path = f"generated_files/{os_friendly_title}.md" blog_path = f"/blog_creator/generated_files/{os_friendly_title}.md"
ai_gen.save_to_file(blog_path) ai_gen.save_to_file(blog_path)
@ -50,10 +47,9 @@ for note in tril_notes:
git_branch_url = f'https://git.aridgwayweb.com/armistace/blog/src/branch/{os_friendly_title}/src/content/{os_friendly_title}.md' git_branch_url = f'https://git.aridgwayweb.com/armistace/blog/src/branch/{os_friendly_title}/src/content/{os_friendly_title}.md'
n8n_system_prompt = f"You are a blog creator notifiying the final editor of the final creation of blog available at {git_branch_url}" n8n_system_prompt = f"You are a blog creator notifiying the final editor of the final creation of blog available at {git_branch_url}"
n8n_prompt_human = f""" n8n_prompt_human = f"""
Generate an informal 100 word Generate an informal 150 word
summary describing {ai_gen.response}. summary describing {ai_gen.response}.
Don't address it or use names. ONLY OUTPUT THE RESPONSE. Don't address it or use names. ONLY OUTPUT THE RESPONSE
ONLY OUTPUT IN PLAINTEXT STRIP ALL MARKDOWN
""" """
notification_message = ai_gen.generate_system_message(n8n_system_prompt, n8n_prompt_human) notification_message = ai_gen.generate_system_message(n8n_system_prompt, n8n_prompt_human)
secret_key = os.environ['N8N_SECRET'] secret_key = os.environ['N8N_SECRET']

View File

@ -1,11 +1,8 @@
import os import os, shutil
import shutil
from urllib.parse import quote from urllib.parse import quote
from git import Repo from git import Repo
from git.exc import GitCommandError from git.exc import GitCommandError
class GitRepository: class GitRepository:
# This is designed to be transitory it will desctruvtively create the repo at repo_path # This is designed to be transitory it will desctruvtively create the repo at repo_path
# if you have uncommited changes you can kiss them goodbye! # if you have uncommited changes you can kiss them goodbye!
@ -42,7 +39,7 @@ class GitRepository:
print(f"Cloning failed: {e}") print(f"Cloning failed: {e}")
return False return False
def fetch(self, remote_name="origin", ref_name="main"): def fetch(self, remote_name='origin', ref_name='main'):
"""Fetch updates from a remote repository with authentication""" """Fetch updates from a remote repository with authentication"""
try: try:
self.repo.remotes[remote_name].fetch(ref_name=ref_name) self.repo.remotes[remote_name].fetch(ref_name=ref_name)
@ -51,7 +48,7 @@ class GitRepository:
print(f"Fetching failed: {e}") print(f"Fetching failed: {e}")
return False return False
def pull(self, remote_name="origin", ref_name="main"): def pull(self, remote_name='origin', ref_name='main'):
"""Pull updates from a remote repository with authentication""" """Pull updates from a remote repository with authentication"""
print("Pulling Latest Updates (if any)") print("Pulling Latest Updates (if any)")
try: try:
@ -65,6 +62,18 @@ class GitRepository:
"""List all branches in the repository""" """List all branches in the repository"""
return [branch.name for branch in self.repo.branches] return [branch.name for branch in self.repo.branches]
def create_and_switch_branch(self, branch_name, remote_name='origin', ref_name='main'):
"""Create a new branch in the repository with authentication."""
try:
print(f"Creating Branch {branch_name}")
# Use the same remote and ref as before
self.repo.git.branch(branch_name)
except GitCommandError:
print("Branch already exists switching")
# ensure remote commits are pulled into local
self.repo.git.checkout(branch_name)
def add_and_commit(self, message=None): def add_and_commit(self, message=None):
"""Add and commit changes to the repository.""" """Add and commit changes to the repository."""
try: try:
@ -82,27 +91,12 @@ class GitRepository:
print(f"Commit failed: {e}") print(f"Commit failed: {e}")
return False return False
def create_copy_commit_push(self, file_path, title, commit_message): def create_copy_commit_push(self, file_path, title, commit_messge):
# Check if branch exists remotely self.create_and_switch_branch(title)
remote_branches = [
ref.name.split("/")[-1] for ref in self.repo.remotes.origin.refs
]
if title in remote_branches:
# Branch exists remotely, checkout and pull
self.repo.git.checkout(title)
self.pull(ref_name=title) self.pull(ref_name=title)
else: shutil.copy(f"{file_path}", f"{self.repo_path}src/content/")
# New branch, create from main
self.repo.git.checkout("-b", title, "origin/main")
# Ensure destination directory exists self.add_and_commit(f"'{commit_messge}'")
dest_dir = f"{self.repo_path}src/content/"
os.makedirs(dest_dir, exist_ok=True)
# Copy file self.repo.git.push()
shutil.copy(f"{file_path}", dest_dir)
# Commit and push
self.add_and_commit(commit_message)
self.repo.git.push("--set-upstream", "origin", title)