Compare commits
No commits in common. "master" and "pipeline_creation" have entirely different histories.
master
...
pipeline_c
@ -33,7 +33,7 @@ class OllamaGenerator:
|
||||
The title for the blog is {self.inner_title}.
|
||||
Do not output the title in the markdown.
|
||||
The basis for the content of the blog is:
|
||||
<blog>{self.content}</blog>
|
||||
{self.content}
|
||||
"""
|
||||
|
||||
def split_into_chunks(self, text, chunk_size=100):
|
||||
@ -125,12 +125,11 @@ class OllamaGenerator:
|
||||
writing a 3000 word blog article. You like when they use almost no code examples.
|
||||
You are also Australian. The content may have light comedic elements,
|
||||
you are more professional and will attempt to tone these down
|
||||
As this person produce the final version of this blog as a markdown document
|
||||
keeping in mind the context provided by the previous drafts.
|
||||
As this person produce and an amalgamtion of this blog as a markdown document.
|
||||
The title for the blog is {self.inner_title}.
|
||||
Do not output the title in the markdown. Avoid repeated sentences
|
||||
The basis for the content of the blog is:
|
||||
<blog>{self.content}</blog>
|
||||
{self.content}
|
||||
"""
|
||||
try:
|
||||
query_embed = self.ollama_client.embed(model=self.embed_model, input=prompt_system)['embeddings']
|
||||
@ -139,9 +138,7 @@ class OllamaGenerator:
|
||||
print("Showing pertinent info from drafts used in final edited edition")
|
||||
pertinent_draft_info = '\n\n'.join(collection.query(query_embeddings=query_embed, n_results=100)['documents'][0])
|
||||
#print(pertinent_draft_info)
|
||||
prompt_human = f"""Generate the final, 3000 word, draft of the blog using this information from the drafts: <context>{pertinent_draft_info}</context>
|
||||
- Only output in markdown, do not wrap in markdown tags, Only provide the draft not a commentary on the drafts in the context
|
||||
"""
|
||||
prompt_human = f"Generate the final document using this information from the drafts: {pertinent_draft_info} - Only output in markdown, do not wrap in markdown tags"
|
||||
print("Generating final document")
|
||||
messages = [("system", prompt_system), ("human", prompt_human),]
|
||||
self.response = self.llm.invoke(messages).text()
|
||||
|
Loading…
x
Reference in New Issue
Block a user