Compare commits

..

1 Commits

Author SHA1 Message Date
e8040e2ba8
unleash client 2025-09-17 09:25:55 +10:00
4 changed files with 22 additions and 8 deletions

View File

@ -1,7 +1,7 @@
name: Create Blog Article if new notes exist
on:
schedule:
- cron: "15 18 * * *"
- cron: "15 3 * * *"
push:
branches:
- master

View File

@ -6,3 +6,4 @@ chromadb
langchain-ollama
PyJWT
dotenv
UnleashClient

View File

@ -26,7 +26,7 @@ class OllamaGenerator:
self.llm = ChatOllama(model=self.ollama_model, temperature=0.6, top_p=0.5) #This is the level head in the room
self.prompt_inject = f"""
You are a journalist, Software Developer and DevOps expert
writing a 5000 word draft blog article for other tech enthusiasts.
writing a 3000 word draft blog article for other tech enthusiasts.
You like to use almost no code examples and prefer to talk
in a light comedic tone. You are also Australian
As this person write this blog as a markdown document.
@ -71,8 +71,8 @@ class OllamaGenerator:
top_k = int(random.uniform(30, 80))
agent_llm = ChatOllama(model=model, temperature=temp, top_p=top_p, top_k=top_k)
messages = [
("system", "You are a creative writer specialising in writing about technology"),
("human", self.prompt_inject )
("system", self.prompt_inject),
("human", "make the blog post in a format to be edited easily" )
]
response = agent_llm.invoke(messages)
# self.response = self.ollama_client.chat(model=model,
@ -119,10 +119,10 @@ class OllamaGenerator:
def generate_markdown(self) -> str:
prompt_human = f"""
prompt_system = f"""
You are an editor taking information from {len(self.agent_models)} Software
Developers and Data experts
writing a 5000 word blog article. You like when they use almost no code examples.
writing a 3000 word blog article. You like when they use almost no code examples.
You are also Australian. The content may have light comedic elements,
you are more professional and will attempt to tone these down
As this person produce the final version of this blog as a markdown document
@ -134,13 +134,13 @@ class OllamaGenerator:
<blog>{self.content}</blog>
"""
try:
query_embed = self.ollama_client.embed(model=self.embed_model, input=prompt_human)['embeddings']
query_embed = self.ollama_client.embed(model=self.embed_model, input=prompt_system)['embeddings']
collection = self.load_to_vector_db()
collection_query = collection.query(query_embeddings=query_embed, n_results=100)
print("Showing pertinent info from drafts used in final edited edition")
pertinent_draft_info = '\n\n'.join(collection.query(query_embeddings=query_embed, n_results=100)['documents'][0])
#print(pertinent_draft_info)
prompt_system = f"""Generate the final, 5000 word, draft of the blog using this information from the drafts: <context>{pertinent_draft_info}</context>
prompt_human = f"""Generate the final, 3000 word, draft of the blog using this information from the drafts: <context>{pertinent_draft_info}</context>
- Only output in markdown, do not wrap in markdown tags, Only provide the draft not a commentary on the drafts in the context
"""
print("Generating final document")

13
unleash_client.py Normal file
View File

@ -0,0 +1,13 @@
from UnleashClient import UnleashClient
import asyncio
client = UnleashClient(
url="http://192.168.178.160:30007/api/",
app_name="unleash-onboarding-python",
custom_headers={'Authorization': 'default:development.6uQIie4GdslTxgYAWVu35sRBjjBMPRRKw6vBj6mFsgFfvdXuy73GgLQg'}) # in production use environment variable
client.initialize_client()
while True:
print(client.is_enabled("crew_ai_integration"))
asyncio.run(asyncio.sleep(1))