Compare commits
44 Commits
repo_work_
...
master
Author | SHA1 | Date | |
---|---|---|---|
bce439921f | |||
2de2d0fe3a | |||
cf795bbc35 | |||
a6ed20451a | |||
7fd32b3024 | |||
a88d233c6b | |||
2abc39e3ac | |||
f430998137 | |||
8dceb79d91 | |||
6c5b0f778d | |||
37ed8fd0f9 | |||
0594ea54aa | |||
60f7473297 | |||
ec69e8e4f7 | |||
62b1175aeb | |||
41f804a1eb | |||
f50d076164 | |||
fc4f9c5053 | |||
e3262cd366 | |||
341f3d8623 | |||
e2c29204fa | |||
f0e6a0cb52 | |||
7f0b0376d1 | |||
44b5ea6a68 | |||
a49457094d | |||
9296fda390 | |||
bb0d9090f3 | |||
703a2384e7 | |||
4b3f00c325 | |||
38dfe404d1 | |||
347ac63f86 | |||
506758f67d | |||
f0572ba9fb | |||
4686f3fae0 | |||
ea1c8cfb13 | |||
9ca7578d28 | |||
64b466c4ac | |||
49174de9ff | |||
59f9f01c69 | |||
a7eae4b09f | |||
c466b04a25 | |||
431e5c63aa | |||
6e117e3ce9 | |||
9a9228bc07 |
56
.gitea/workflows/deploy.yml
Normal file
56
.gitea/workflows/deploy.yml
Normal file
@ -0,0 +1,56 @@
|
||||
name: Create Blog Article if new notes exist
|
||||
on:
|
||||
schedule:
|
||||
- cron: "15 3 * * *"
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
prepare_blog_drafts_and_push:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
apt update && apt upgrade -y
|
||||
apt install rustc cargo python-is-python3 pip python3-venv python3-virtualenv libmagic-dev git -y
|
||||
virtualenv .venv
|
||||
source .venv/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
git config --global user.name "Blog Creator"
|
||||
git config --global user.email "ridgway.infrastructure@gmail.com"
|
||||
git config --global push.autoSetupRemote true
|
||||
|
||||
- name: Create .env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "TRILIUM_HOST=${{ vars.TRILIUM_HOST }}" > .env
|
||||
echo "TRILIUM_PORT='${{ vars.TRILIUM_PORT }}'" >> .env
|
||||
echo "TRILIUM_PROTOCOL='${{ vars.TRILIUM_PROTOCOL }}'" >> .env
|
||||
echo "TRILIUM_PASS='${{ secrets.TRILIUM_PASS }}'" >> .env
|
||||
echo "TRILIUM_TOKEN='${{ secrets.TRILIUM_TOKEN }}'" >> .env
|
||||
echo "OLLAMA_PROTOCOL='${{ vars.OLLAMA_PROTOCOL }}'" >> .env
|
||||
echo "OLLAMA_HOST='${{ vars.OLLAMA_HOST }}'" >> .env
|
||||
echo "OLLAMA_PORT='${{ vars.OLLAMA_PORT }}'" >> .env
|
||||
echo "EMBEDDING_MODEL='${{ vars.EMBEDDING_MODEL }}'" >> .env
|
||||
echo "EDITOR_MODEL='${{ vars.EDITOR_MODEL }}'" >> .env
|
||||
export PURE='["${{ vars.CONTENT_CREATOR_MODELS_1 }}", "${{ vars.CONTENT_CREATOR_MODELS_2 }}", "${{ vars.CONTENT_CREATOR_MODELS_3 }}", "${{ vars.CONTENT_CREATOR_MODELS_4 }}"]'
|
||||
echo "CONTENT_CREATOR_MODELS='$PURE'" >> .env
|
||||
echo "GIT_PROTOCOL='${{ vars.GIT_PROTOCOL }}'" >> .env
|
||||
echo "GIT_REMOTE='${{ vars.GIT_REMOTE }}'" >> .env
|
||||
echo "GIT_USER='${{ vars.GIT_USER }}'" >> .env
|
||||
echo "GIT_PASS='${{ secrets.GIT_PASS }}'" >> .env
|
||||
echo "N8N_SECRET='${{ secrets.N8N_SECRET }}'" >> .env
|
||||
echo "N8N_WEBHOOK_URL='${{ vars.N8N_WEBHOOK_URL }}'" >> .env
|
||||
echo "CHROMA_HOST='${{ vars.CHROMA_HOST }}'" >> .env
|
||||
echo "CHROMA_PORT='${{ vars.CHROMA_PORT }}'" >> .env
|
||||
|
||||
- name: Create Blogs
|
||||
shell: bash
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
python src/main.py
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -7,3 +7,4 @@ __pycache__
|
||||
pyproject.toml
|
||||
.ropeproject
|
||||
generated_files/*
|
||||
pyright*
|
||||
|
@ -4,3 +4,5 @@ gitpython
|
||||
PyGithub
|
||||
chromadb
|
||||
langchain-ollama
|
||||
PyJWT
|
||||
dotenv
|
||||
|
@ -11,7 +11,13 @@ class OllamaGenerator:
|
||||
self.inner_title = inner_title
|
||||
self.content = content
|
||||
self.response = None
|
||||
self.chroma = chromadb.HttpClient(host="172.18.0.2", port=8000)
|
||||
print("In Class")
|
||||
print(os.environ["CONTENT_CREATOR_MODELS"])
|
||||
try:
|
||||
chroma_port = int(os.environ['CHROMA_PORT'])
|
||||
except ValueError as e:
|
||||
raise Exception(f"CHROMA_PORT is not an integer: {e}")
|
||||
self.chroma = chromadb.HttpClient(host=os.environ['CHROMA_HOST'], port=chroma_port)
|
||||
ollama_url = f"{os.environ["OLLAMA_PROTOCOL"]}://{os.environ["OLLAMA_HOST"]}:{os.environ["OLLAMA_PORT"]}"
|
||||
self.ollama_client = Client(host=ollama_url)
|
||||
self.ollama_model = os.environ["EDITOR_MODEL"]
|
||||
@ -20,14 +26,14 @@ class OllamaGenerator:
|
||||
self.llm = ChatOllama(model=self.ollama_model, temperature=0.6, top_p=0.5) #This is the level head in the room
|
||||
self.prompt_inject = f"""
|
||||
You are a journalist, Software Developer and DevOps expert
|
||||
writing a 1000 word draft blog for other tech enthusiasts.
|
||||
writing a 3000 word draft blog article for other tech enthusiasts.
|
||||
You like to use almost no code examples and prefer to talk
|
||||
in a light comedic tone. You are also Australian
|
||||
As this person write this blog as a markdown document.
|
||||
The title for the blog is {self.inner_title}.
|
||||
Do not output the title in the markdown.
|
||||
The basis for the content of the blog is:
|
||||
{self.content}
|
||||
<blog>{self.content}</blog>
|
||||
"""
|
||||
|
||||
def split_into_chunks(self, text, chunk_size=100):
|
||||
@ -116,14 +122,15 @@ class OllamaGenerator:
|
||||
prompt_system = f"""
|
||||
You are an editor taking information from {len(self.agent_models)} Software
|
||||
Developers and Data experts
|
||||
writing a 3000 word blog for other tech enthusiasts.
|
||||
You like when they use almost no code examples and the
|
||||
voice is in a light comedic tone. You are also Australian
|
||||
As this person produce and an amalgamtion of this blog as a markdown document.
|
||||
writing a 3000 word blog article. You like when they use almost no code examples.
|
||||
You are also Australian. The content may have light comedic elements,
|
||||
you are more professional and will attempt to tone these down
|
||||
As this person produce the final version of this blog as a markdown document
|
||||
keeping in mind the context provided by the previous drafts.
|
||||
The title for the blog is {self.inner_title}.
|
||||
Do not output the title in the markdown. Avoid repeated sentences
|
||||
The basis for the content of the blog is:
|
||||
{self.content}
|
||||
<blog>{self.content}</blog>
|
||||
"""
|
||||
try:
|
||||
query_embed = self.ollama_client.embed(model=self.embed_model, input=prompt_system)['embeddings']
|
||||
@ -132,7 +139,9 @@ class OllamaGenerator:
|
||||
print("Showing pertinent info from drafts used in final edited edition")
|
||||
pertinent_draft_info = '\n\n'.join(collection.query(query_embeddings=query_embed, n_results=100)['documents'][0])
|
||||
#print(pertinent_draft_info)
|
||||
prompt_human = f"Generate the final document using this information from the drafts: {pertinent_draft_info} - ONLY OUTPUT THE MARKDOWN"
|
||||
prompt_human = f"""Generate the final, 3000 word, draft of the blog using this information from the drafts: <context>{pertinent_draft_info}</context>
|
||||
- Only output in markdown, do not wrap in markdown tags, Only provide the draft not a commentary on the drafts in the context
|
||||
"""
|
||||
print("Generating final document")
|
||||
messages = [("system", prompt_system), ("human", prompt_human),]
|
||||
self.response = self.llm.invoke(messages).text()
|
||||
@ -154,9 +163,7 @@ class OllamaGenerator:
|
||||
with open(filename, "w") as f:
|
||||
f.write(self.generate_markdown())
|
||||
|
||||
def generate_commit_message(self):
|
||||
prompt_system = "You are a blog creator commiting a piece of content to a central git repo"
|
||||
prompt_human = f"Generate a 5 word git commit message describing {self.response}"
|
||||
def generate_system_message(self, prompt_system, prompt_human):
|
||||
messages = [("system", prompt_system), ("human", prompt_human),]
|
||||
commit_message = self.llm.invoke(messages).text()
|
||||
return commit_message
|
||||
ai_message = self.llm.invoke(messages).text()
|
||||
return ai_message
|
||||
|
50
src/main.py
50
src/main.py
@ -1,7 +1,13 @@
|
||||
import ai_generators.ollama_md_generator as omg
|
||||
import trilium.notes as tn
|
||||
import repo_management.repo_manager as git_repo
|
||||
from notifications.n8n import N8NWebhookJwt
|
||||
import string,os
|
||||
from datetime import datetime
|
||||
from dotenv import load_dotenv
|
||||
load_dotenv()
|
||||
print(os.environ["CONTENT_CREATOR_MODELS"])
|
||||
|
||||
|
||||
tril = tn.TrilumNotes()
|
||||
|
||||
@ -24,11 +30,51 @@ for note in tril_notes:
|
||||
ai_gen = omg.OllamaGenerator(os_friendly_title,
|
||||
tril_notes[note]['content'],
|
||||
tril_notes[note]['title'])
|
||||
blog_path = f"/blog_creator/generated_files/{os_friendly_title}.md"
|
||||
blog_path = f"generated_files/{os_friendly_title}.md"
|
||||
ai_gen.save_to_file(blog_path)
|
||||
|
||||
|
||||
# Generate commit messages and push to repo
|
||||
commit_message = ai_gen.generate_commit_message()
|
||||
print("Generating Commit Message")
|
||||
git_sytem_prompt = "You are a blog creator commiting a piece of content to a central git repo"
|
||||
git_human_prompt = f"Generate a 5 word git commit message describing {ai_gen.response}. ONLY OUTPUT THE RESPONSE"
|
||||
commit_message = ai_gen.generate_system_message(git_sytem_prompt, git_human_prompt)
|
||||
git_user = os.environ["GIT_USER"]
|
||||
git_pass = os.environ["GIT_PASS"]
|
||||
repo_manager = git_repo.GitRepository("blog/", git_user, git_pass)
|
||||
print("Pushing to Repo")
|
||||
repo_manager.create_copy_commit_push(blog_path, os_friendly_title, commit_message)
|
||||
|
||||
# Generate notification for Matrix
|
||||
print("Generating Notification Message")
|
||||
git_branch_url = f'https://git.aridgwayweb.com/armistace/blog/src/branch/{os_friendly_title}/src/content/{os_friendly_title}.md'
|
||||
n8n_system_prompt = f"You are a blog creator notifiying the final editor of the final creation of blog available at {git_branch_url}"
|
||||
n8n_prompt_human = f"""
|
||||
Generate an informal 100 word
|
||||
summary describing {ai_gen.response}.
|
||||
Don't address it or use names. ONLY OUTPUT THE RESPONSE.
|
||||
ONLY OUTPUT IN PLAINTEXT STRIP ALL MARKDOWN
|
||||
"""
|
||||
notification_message = ai_gen.generate_system_message(n8n_system_prompt, n8n_prompt_human)
|
||||
secret_key = os.environ['N8N_SECRET']
|
||||
webhook_url = os.environ['N8N_WEBHOOK_URL']
|
||||
notification_string = f"""
|
||||
<h2>{tril_notes[note]['title']}</h2>
|
||||
<h3>Summary</h3>
|
||||
<p>{notification_message}</p>
|
||||
<h3>Branch</h3>
|
||||
<p>{os_friendly_title}</p>
|
||||
<p><a href="{git_branch_url}">Link to Branch</a></p>
|
||||
"""
|
||||
|
||||
payload = {
|
||||
"message": f"{notification_string}",
|
||||
"timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
webhook_client = N8NWebhookJwt(secret_key, webhook_url)
|
||||
|
||||
print("Notifying")
|
||||
n8n_result = webhook_client.send_webhook(payload)
|
||||
|
||||
print(f"N8N response: {n8n_result['status']}")
|
||||
|
0
src/notifications/__init__.py
Normal file
0
src/notifications/__init__.py
Normal file
45
src/notifications/n8n.py
Normal file
45
src/notifications/n8n.py
Normal file
@ -0,0 +1,45 @@
|
||||
from datetime import datetime, timedelta
|
||||
import jwt
|
||||
import requests
|
||||
from typing import Dict, Optional
|
||||
|
||||
class N8NWebhookJwt:
|
||||
def __init__(self, secret_key: str, webhook_url: str):
|
||||
self.secret_key = secret_key
|
||||
self.webhook_url = webhook_url
|
||||
self.token_expiration = datetime.now() + timedelta(hours=1)
|
||||
|
||||
def _generate_jwt_token(self, payload: Dict) -> str:
|
||||
"""Generate JWT token with the given payload."""
|
||||
# Include expiration time (optional)
|
||||
payload["exp"] = self.token_expiration.timestamp()
|
||||
encoded_jwt = jwt.encode(
|
||||
payload,
|
||||
self.secret_key,
|
||||
algorithm="HS256",
|
||||
)
|
||||
return encoded_jwt #jwt.decode(encoded_jwt, self.secret_key, algorithms=['HS256'])
|
||||
|
||||
def send_webhook(self, payload: Dict) -> Dict:
|
||||
"""Send a webhook request with JWT authentication."""
|
||||
# Generate JWT token
|
||||
token = self._generate_jwt_token(payload)
|
||||
|
||||
# Set headers with JWT token
|
||||
headers = {
|
||||
"Authorization": f"Bearer {token}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
# Send POST request
|
||||
response = requests.post(
|
||||
self.webhook_url,
|
||||
json=payload,
|
||||
headers=headers
|
||||
)
|
||||
|
||||
# Handle response
|
||||
if response.status_code == 200:
|
||||
return {"status": "success", "response": response.json()}
|
||||
else:
|
||||
return {"status": "error", "response": response.status_code, "message": response.text}
|
Loading…
x
Reference in New Issue
Block a user