Compare commits
14 Commits
pipeline_c
...
master
Author | SHA1 | Date | |
---|---|---|---|
bce439921f | |||
2de2d0fe3a | |||
cf795bbc35 | |||
a6ed20451a | |||
2abc39e3ac | |||
0594ea54aa | |||
60f7473297 | |||
ec69e8e4f7 | |||
41f804a1eb | |||
e3262cd366 | |||
e2c29204fa | |||
44b5ea6a68 | |||
9296fda390 | |||
703a2384e7 |
@ -33,7 +33,7 @@ class OllamaGenerator:
|
|||||||
The title for the blog is {self.inner_title}.
|
The title for the blog is {self.inner_title}.
|
||||||
Do not output the title in the markdown.
|
Do not output the title in the markdown.
|
||||||
The basis for the content of the blog is:
|
The basis for the content of the blog is:
|
||||||
{self.content}
|
<blog>{self.content}</blog>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def split_into_chunks(self, text, chunk_size=100):
|
def split_into_chunks(self, text, chunk_size=100):
|
||||||
@ -125,11 +125,12 @@ class OllamaGenerator:
|
|||||||
writing a 3000 word blog article. You like when they use almost no code examples.
|
writing a 3000 word blog article. You like when they use almost no code examples.
|
||||||
You are also Australian. The content may have light comedic elements,
|
You are also Australian. The content may have light comedic elements,
|
||||||
you are more professional and will attempt to tone these down
|
you are more professional and will attempt to tone these down
|
||||||
As this person produce and an amalgamtion of this blog as a markdown document.
|
As this person produce the final version of this blog as a markdown document
|
||||||
|
keeping in mind the context provided by the previous drafts.
|
||||||
The title for the blog is {self.inner_title}.
|
The title for the blog is {self.inner_title}.
|
||||||
Do not output the title in the markdown. Avoid repeated sentences
|
Do not output the title in the markdown. Avoid repeated sentences
|
||||||
The basis for the content of the blog is:
|
The basis for the content of the blog is:
|
||||||
{self.content}
|
<blog>{self.content}</blog>
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
query_embed = self.ollama_client.embed(model=self.embed_model, input=prompt_system)['embeddings']
|
query_embed = self.ollama_client.embed(model=self.embed_model, input=prompt_system)['embeddings']
|
||||||
@ -138,7 +139,9 @@ class OllamaGenerator:
|
|||||||
print("Showing pertinent info from drafts used in final edited edition")
|
print("Showing pertinent info from drafts used in final edited edition")
|
||||||
pertinent_draft_info = '\n\n'.join(collection.query(query_embeddings=query_embed, n_results=100)['documents'][0])
|
pertinent_draft_info = '\n\n'.join(collection.query(query_embeddings=query_embed, n_results=100)['documents'][0])
|
||||||
#print(pertinent_draft_info)
|
#print(pertinent_draft_info)
|
||||||
prompt_human = f"Generate the final document using this information from the drafts: {pertinent_draft_info} - Only output in markdown, do not wrap in markdown tags"
|
prompt_human = f"""Generate the final, 3000 word, draft of the blog using this information from the drafts: <context>{pertinent_draft_info}</context>
|
||||||
|
- Only output in markdown, do not wrap in markdown tags, Only provide the draft not a commentary on the drafts in the context
|
||||||
|
"""
|
||||||
print("Generating final document")
|
print("Generating final document")
|
||||||
messages = [("system", prompt_system), ("human", prompt_human),]
|
messages = [("system", prompt_system), ("human", prompt_human),]
|
||||||
self.response = self.llm.invoke(messages).text()
|
self.response = self.llm.invoke(messages).text()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user