Compare commits

..

3 Commits

Author SHA1 Message Date
733241554f
flip some prompting around
All checks were successful
Create Blog Article if new notes exist / prepare_blog_drafts_and_push (push) Successful in 8m43s
2025-09-17 10:55:17 +10:00
92e9f3dcc2
flip some prompting around
Some checks failed
Create Blog Article if new notes exist / prepare_blog_drafts_and_push (push) Failing after 8m7s
2025-09-17 09:33:53 +10:00
2fbe47e936 update to run at 3.15 am properly (life is UTC here)
All checks were successful
Create Blog Article if new notes exist / prepare_blog_drafts_and_push (push) Successful in 2h25m18s
2025-07-24 08:56:39 +10:00
4 changed files with 8 additions and 22 deletions

View File

@ -1,7 +1,7 @@
name: Create Blog Article if new notes exist name: Create Blog Article if new notes exist
on: on:
schedule: schedule:
- cron: "15 3 * * *" - cron: "15 18 * * *"
push: push:
branches: branches:
- master - master

View File

@ -6,4 +6,3 @@ chromadb
langchain-ollama langchain-ollama
PyJWT PyJWT
dotenv dotenv
UnleashClient

View File

@ -26,7 +26,7 @@ class OllamaGenerator:
self.llm = ChatOllama(model=self.ollama_model, temperature=0.6, top_p=0.5) #This is the level head in the room self.llm = ChatOllama(model=self.ollama_model, temperature=0.6, top_p=0.5) #This is the level head in the room
self.prompt_inject = f""" self.prompt_inject = f"""
You are a journalist, Software Developer and DevOps expert You are a journalist, Software Developer and DevOps expert
writing a 3000 word draft blog article for other tech enthusiasts. writing a 5000 word draft blog article for other tech enthusiasts.
You like to use almost no code examples and prefer to talk You like to use almost no code examples and prefer to talk
in a light comedic tone. You are also Australian in a light comedic tone. You are also Australian
As this person write this blog as a markdown document. As this person write this blog as a markdown document.
@ -71,8 +71,8 @@ class OllamaGenerator:
top_k = int(random.uniform(30, 80)) top_k = int(random.uniform(30, 80))
agent_llm = ChatOllama(model=model, temperature=temp, top_p=top_p, top_k=top_k) agent_llm = ChatOllama(model=model, temperature=temp, top_p=top_p, top_k=top_k)
messages = [ messages = [
("system", self.prompt_inject), ("system", "You are a creative writer specialising in writing about technology"),
("human", "make the blog post in a format to be edited easily" ) ("human", self.prompt_inject )
] ]
response = agent_llm.invoke(messages) response = agent_llm.invoke(messages)
# self.response = self.ollama_client.chat(model=model, # self.response = self.ollama_client.chat(model=model,
@ -119,10 +119,10 @@ class OllamaGenerator:
def generate_markdown(self) -> str: def generate_markdown(self) -> str:
prompt_system = f""" prompt_human = f"""
You are an editor taking information from {len(self.agent_models)} Software You are an editor taking information from {len(self.agent_models)} Software
Developers and Data experts Developers and Data experts
writing a 3000 word blog article. You like when they use almost no code examples. writing a 5000 word blog article. You like when they use almost no code examples.
You are also Australian. The content may have light comedic elements, You are also Australian. The content may have light comedic elements,
you are more professional and will attempt to tone these down you are more professional and will attempt to tone these down
As this person produce the final version of this blog as a markdown document As this person produce the final version of this blog as a markdown document
@ -134,13 +134,13 @@ class OllamaGenerator:
<blog>{self.content}</blog> <blog>{self.content}</blog>
""" """
try: try:
query_embed = self.ollama_client.embed(model=self.embed_model, input=prompt_system)['embeddings'] query_embed = self.ollama_client.embed(model=self.embed_model, input=prompt_human)['embeddings']
collection = self.load_to_vector_db() collection = self.load_to_vector_db()
collection_query = collection.query(query_embeddings=query_embed, n_results=100) collection_query = collection.query(query_embeddings=query_embed, n_results=100)
print("Showing pertinent info from drafts used in final edited edition") print("Showing pertinent info from drafts used in final edited edition")
pertinent_draft_info = '\n\n'.join(collection.query(query_embeddings=query_embed, n_results=100)['documents'][0]) pertinent_draft_info = '\n\n'.join(collection.query(query_embeddings=query_embed, n_results=100)['documents'][0])
#print(pertinent_draft_info) #print(pertinent_draft_info)
prompt_human = f"""Generate the final, 3000 word, draft of the blog using this information from the drafts: <context>{pertinent_draft_info}</context> prompt_system = f"""Generate the final, 5000 word, draft of the blog using this information from the drafts: <context>{pertinent_draft_info}</context>
- Only output in markdown, do not wrap in markdown tags, Only provide the draft not a commentary on the drafts in the context - Only output in markdown, do not wrap in markdown tags, Only provide the draft not a commentary on the drafts in the context
""" """
print("Generating final document") print("Generating final document")

View File

@ -1,13 +0,0 @@
from UnleashClient import UnleashClient
import asyncio
client = UnleashClient(
url="http://192.168.178.160:30007/api/",
app_name="unleash-onboarding-python",
custom_headers={'Authorization': 'default:development.6uQIie4GdslTxgYAWVu35sRBjjBMPRRKw6vBj6mFsgFfvdXuy73GgLQg'}) # in production use environment variable
client.initialize_client()
while True:
print(client.is_enabled("crew_ai_integration"))
asyncio.run(asyncio.sleep(1))