...
This commit is contained in:
195
_archive/aiprompts/ask.py
Normal file
195
_archive/aiprompts/ask.py
Normal file
@@ -0,0 +1,195 @@
|
||||
import os
|
||||
import json
|
||||
import enum
|
||||
import textwrap
|
||||
from typing import List, Optional
|
||||
import logging
|
||||
from termcolor import colored
|
||||
|
||||
import ollama
|
||||
import openai
|
||||
from openai import OpenAI
|
||||
from ai.instruction import instructions_load, instructions_get, instructions_reset
|
||||
|
||||
# Set up logging
|
||||
logging.basicConfig(level=logging.INFO, format='%(message)s')
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class Model(enum.Enum):
|
||||
QWEN72I = "Qwen/Qwen2-72B-Instruct"
|
||||
MIXTRAL7I = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
||||
PHI3_MEDIUM = "phi3:medium-128k"
|
||||
PHI3_MINI = "phi3:mini"
|
||||
GPT35 = "gpt-3.5-turbo"
|
||||
GPT4 = "gpt-4"
|
||||
GPT4O = "gpt-4o"
|
||||
QWEN1L= "qwen2:1.5b" #local
|
||||
QWEN0L= "qwen2:0.5b" #local
|
||||
PHI3L = "phi3:3.8b"
|
||||
QWEN7L= "qwen2:7b" #local
|
||||
|
||||
class AIAssistant:
|
||||
def __init__(self):
|
||||
self.model = Model.QWEN72I
|
||||
self.openai_client = None
|
||||
self.deepinfra_client = None
|
||||
self._setup_clients()
|
||||
|
||||
def _setup_clients(self):
|
||||
openaikey = os.getenv("OPENAIKEY")
|
||||
if openaikey:
|
||||
logger.info(colored("OpenAI key set", "green"))
|
||||
openai.api_key = openaikey
|
||||
self.openai_client = openai
|
||||
|
||||
deepinfrakey = os.getenv("DEEPINFRAKEY")
|
||||
if deepinfrakey:
|
||||
logger.info(colored("DEEPINFRAKEY key set", "green"))
|
||||
self.deepinfra_client = OpenAI(
|
||||
api_key=deepinfrakey,
|
||||
base_url="https://api.deepinfra.com/v1/openai",
|
||||
)
|
||||
|
||||
def set_model(self, model: Model):
|
||||
self.model = model
|
||||
logger.info(colored(f"Model set to: {model.value}", "cyan"))
|
||||
|
||||
def ask(self, question: str, category: str = "", name: str = "", log: bool = True) -> str:
|
||||
logger.info(colored(f"Asking question in category: {category}, name: {name}", "yellow"))
|
||||
mm = instructions_get(category=category, name=name)
|
||||
mm.add_message(role="user", content=question)
|
||||
#mm.print_messages()
|
||||
|
||||
if self.model in [Model.GPT4O, Model.GPT4, Model.GPT35]:
|
||||
response = self._ask_openai(mm.messages, log)
|
||||
elif self.model in [Model.QWEN72I, Model.MIXTRAL7I]:
|
||||
response = self._ask_deepinfra(mm.messages, log)
|
||||
else:
|
||||
response = self._ask_ollama(mm.messages, log)
|
||||
|
||||
logger.info(colored("Ask completed", "green"))
|
||||
return response
|
||||
|
||||
def _ask_openai(self, messages, log: bool) -> str:
|
||||
response = self.openai_client.chat.completions.create(
|
||||
model=self.model.value,
|
||||
messages=messages,
|
||||
max_tokens=300
|
||||
)
|
||||
r = response.choices[0].message.content
|
||||
if log:
|
||||
logger.info(colored(f"OpenAI Response: {self.model.value}", "magenta"))
|
||||
logger.info(colored(r, "white"))
|
||||
return r
|
||||
|
||||
def _ask_ollama(self, messages, log: bool) -> str:
|
||||
response = ollama.chat(model=self.model.value, messages=messages)
|
||||
if log:
|
||||
logger.info(colored(response['message']['content'], "white"))
|
||||
return response['message']['content']
|
||||
|
||||
def _ask_deepinfra(self, messages, log: bool) -> str:
|
||||
chat_completion = self.deepinfra_client.chat.completions.create(
|
||||
model=self.model.value,
|
||||
messages=messages,
|
||||
max_tokens=None,
|
||||
stream=False
|
||||
)
|
||||
|
||||
if log:
|
||||
logger.info(colored(f"\nDeepInfra Response: {self.model.value}", "magenta"))
|
||||
logger.info(colored("-" * 20, "white"))
|
||||
logger.info(colored(chat_completion.choices[0].message.content, "white"))
|
||||
logger.info(colored("\nToken Usage:", "cyan"))
|
||||
logger.info(colored(f"Prompt tokens: {chat_completion.usage.prompt_tokens}", "white"))
|
||||
logger.info(colored(f"Completion tokens: {chat_completion.usage.completion_tokens}", "white"))
|
||||
|
||||
return chat_completion.choices[0].message.content
|
||||
|
||||
|
||||
def ai_assistent(reset:bool=True) -> AIAssistant:
|
||||
mypath="~/code/git.threefold.info/projectmycelium/hero_server/lib/ai/instructions"
|
||||
if reset:
|
||||
instructions_reset()
|
||||
instructions_load(mypath)
|
||||
return AIAssistant()
|
||||
|
||||
# Usage example:
|
||||
if __name__ == "__main__":
|
||||
|
||||
mypath="~/code/git.threefold.info/projectmycelium/hero_server/lib/ai/instructions"
|
||||
instructions_reset()
|
||||
instructions_load(mypath)
|
||||
|
||||
assistant = AIAssistant()
|
||||
|
||||
#assistant.set_model(Model.MIXTRAL7I) # Or any other model you prefer
|
||||
assistant.set_model(Model.QWEN72I)
|
||||
#assistant.set_model(Model.PHI3L)
|
||||
|
||||
# response = assistant.ask(
|
||||
# category='timemgmt',
|
||||
# name='schedule',
|
||||
# question='''
|
||||
# lets create a story
|
||||
|
||||
# we need to paint our church
|
||||
|
||||
# its long over due, the major complained,
|
||||
# and his mother isn't happy
|
||||
|
||||
# oh yes I forgot its election time
|
||||
|
||||
# tom and ben will own this story
|
||||
# its for our church in zanzibar
|
||||
|
||||
# we need to do it in 4 month from now
|
||||
|
||||
# our requirements are:
|
||||
|
||||
# we need to make sure it can withstand sun
|
||||
# color is white
|
||||
# cost below 1000 USD
|
||||
# '''
|
||||
# )
|
||||
#logger.info(colored("Final Response:", "green"))
|
||||
|
||||
|
||||
response = assistant.ask(
|
||||
category='',
|
||||
name='',
|
||||
question='''
|
||||
|
||||
based on following names [Isabelle, Kristof, Jan, Rob, Florine, Florian, Sabrina, Tom, Ben]
|
||||
|
||||
- find the owners of the story out of the text below, these owners are the ones who will do the task
|
||||
- see if these names are in the list above
|
||||
- if names match, return them, if not give error
|
||||
- return the names as a json list, don't give any other output
|
||||
|
||||
------
|
||||
|
||||
|
||||
we need to paint our church
|
||||
|
||||
its long over due, the major complained,
|
||||
and his mother isn't happy
|
||||
|
||||
oh yes I forgot its election time
|
||||
|
||||
tom and ben will own this story
|
||||
its for our church in zanzibar
|
||||
|
||||
we need to do it in 4 month from now
|
||||
|
||||
our requirements are:
|
||||
|
||||
we need to make sure it can withstand sun
|
||||
color is white
|
||||
cost below 1000 USD
|
||||
|
||||
'''
|
||||
)
|
||||
|
||||
|
||||
logger.info(colored(response, "white"))
|
158
_archive/aiprompts/instruction.py
Normal file
158
_archive/aiprompts/instruction.py
Normal file
@@ -0,0 +1,158 @@
|
||||
import os
|
||||
import json
|
||||
import redis
|
||||
from typing import List,Dict,Optional
|
||||
|
||||
redis_client = redis.Redis(host='localhost', port=6379, db=0)
|
||||
|
||||
#loads instructions from filesystem and stores in redis for further usage
|
||||
class MessageManager:
|
||||
def __init__(self, name = '', category = '', path: str = "", load: bool = True):
|
||||
self.name = name
|
||||
self.category = category
|
||||
self.messages : List[Dict[str, str]] = []
|
||||
if self.category=="":
|
||||
return
|
||||
if path:
|
||||
self.add(path)
|
||||
else:
|
||||
if load:
|
||||
self.load()
|
||||
|
||||
def add(self, dir_path: str, filter: Optional[List[str]] = None, save: bool = True):
|
||||
dir_path = os.path.expanduser(dir_path)
|
||||
def process_files(current_dir: str):
|
||||
files_to_process = []
|
||||
for root, _, files in os.walk(current_dir):
|
||||
for file in files:
|
||||
if file.startswith(('sys_', 'user_')):
|
||||
try:
|
||||
priority = int(file.split('_')[1])
|
||||
descr = '_'.join(file.split('_')[2:])
|
||||
if not filter or any(f in descr for f in filter):
|
||||
files_to_process.append((os.path.join(root, file), priority))
|
||||
except (IndexError, ValueError):
|
||||
print(f"Skipping file with invalid format: {file}")
|
||||
|
||||
for file_path, _ in sorted(files_to_process, key=lambda x: x[1]):
|
||||
file_name = os.path.basename(file_path)
|
||||
role = "system" if file_name.startswith('sys_') else "user"
|
||||
self.add_file(file_path, role)
|
||||
|
||||
process_files(dir_path)
|
||||
|
||||
if save:
|
||||
self.save()
|
||||
|
||||
|
||||
def add_file(self, file_path, role):
|
||||
file_path = os.path.expanduser(file_path)
|
||||
with open(file_path, 'r') as file:
|
||||
content = file.read().strip()
|
||||
if role == "system":
|
||||
self.add_message(role, content)
|
||||
elif role == "user":
|
||||
content_parts = content.split('--------', 1)
|
||||
if len(content_parts) == 2:
|
||||
content1, content2 = content_parts[0].strip(), content_parts[1].strip()
|
||||
self.add_message("user", content1)
|
||||
self.add_message("assistant", content2)
|
||||
else:
|
||||
raise Exception(f"File {file_path} does not contain the expected separator '--------'")
|
||||
else:
|
||||
raise Exception("Wrong role")
|
||||
|
||||
def add_message(self, role, content):
|
||||
if not self.__validate_message(role, content):
|
||||
raise ValueError(f"Invalid message format. Role: {role}, Content: {content}")
|
||||
self.messages.append({"role": role, "content": content})
|
||||
|
||||
def __validate_message(self, role, content):
|
||||
valid_roles = ["system", "user", "assistant"]
|
||||
return (
|
||||
isinstance(role, str) and
|
||||
role in valid_roles and
|
||||
isinstance(content, str) and
|
||||
len(content.strip()) > 0
|
||||
)
|
||||
|
||||
def print_messages(self):
|
||||
for message in self.messages:
|
||||
role = message["role"].capitalize()
|
||||
content = message["content"]
|
||||
print(f"\n{role}:\n{'-' * len(role)}")
|
||||
print(content)
|
||||
print("-" * 40)
|
||||
|
||||
def get_messages(self):
|
||||
return self.messages
|
||||
|
||||
def save(self):
|
||||
key = f"llm:instructions:{self.category}:{self.name}"
|
||||
value = json.dumps(self.messages)
|
||||
redis_client.set(key, value)
|
||||
|
||||
#return true if there where instructions
|
||||
def load(self):
|
||||
key = f"llm:instructions:{self.category}:{self.name}"
|
||||
value = redis_client.get(key)
|
||||
if value:
|
||||
self.messages = json.loads(value)
|
||||
return True
|
||||
return False
|
||||
|
||||
def delete(self):
|
||||
key = f"llm:instructions:{self.category}:{self.name}"
|
||||
return redis_client.delete(key)
|
||||
|
||||
def instructions_reset():
|
||||
pattern = "llm:instructions*"
|
||||
keys_to_delete = redis_client.scan_iter(match=pattern)
|
||||
for key in keys_to_delete:
|
||||
redis_client.delete(key)
|
||||
|
||||
#get message manager and get from redis
|
||||
def instructions_get( name:str, category:str) -> MessageManager:
|
||||
m= MessageManager(name, category)
|
||||
return m
|
||||
|
||||
def instructions_load(path: str) -> List[MessageManager]:
|
||||
path = os.path.expanduser(path)
|
||||
message_managers = []
|
||||
#print(f"load {path}")
|
||||
for item in os.listdir(path):
|
||||
cat_path = os.path.join(path, item)
|
||||
if os.path.isdir(cat_path):
|
||||
category = os.path.basename(cat_path)
|
||||
#print(f" load category: {cat_path}")
|
||||
# Process files in the category directory, these will be re=used in each messagemanager
|
||||
category_manager = MessageManager(name="", category=category)
|
||||
for item in os.listdir(cat_path):
|
||||
item_path = os.path.join(cat_path, item)
|
||||
if os.path.isfile(item_path):
|
||||
if item.startswith('sys_') or item.startswith('user_'):
|
||||
#print(f" load cat base: {item_path}")
|
||||
role = "system" if item.startswith('sys_') else "user"
|
||||
category_manager.add_file(item_path, role)
|
||||
elif os.path.isdir(item_path):
|
||||
#print(f" load cat: {item_path}")
|
||||
manager = MessageManager(name=item, category=category)
|
||||
manager.messages = category_manager.messages
|
||||
manager.add(item_path)
|
||||
message_managers.append(manager)
|
||||
|
||||
return message_managers
|
||||
|
||||
|
||||
# Usage example:
|
||||
if __name__ == "__main__":
|
||||
|
||||
# mypath="/Users/despiegk1/code/git.threefold.info/projectmycelium/hero_server/lib/ai/instructions/timemgmt"
|
||||
# #mypath=""
|
||||
# manager = MessageManager(name="schedule", category="timemgmt",path=mypath)
|
||||
# manager.print_messages()
|
||||
|
||||
mypath="/Users/despiegk1/code/git.threefold.info/projectmycelium/hero_server/lib/ai/instructions"
|
||||
instructions_reset()
|
||||
instructions_load(mypath)
|
||||
|
@@ -0,0 +1,10 @@
|
||||
|
||||
if the chat coming from user seems to say translate e.g. translate to french,
|
||||
then translate the text which came to french
|
||||
|
||||
don't do anything for heroscript
|
||||
|
||||
overrule all previous instructions
|
||||
|
||||
just output the translated text
|
||||
|
@@ -0,0 +1,42 @@
|
||||
|
||||
how do I use heroscript for agenda management
|
||||
|
||||
-------------------------
|
||||
|
||||
heroscript has basic notations to deal with calendars and events
|
||||
|
||||
when the user asks to translate an calendar action to heroscript use following rules and see example below
|
||||
|
||||
- all dates are in europe style: Format: DD/MM/YYYY e.g. 06/07/2023
|
||||
- if year not specified by user then always use current year which is 2024
|
||||
- if month not specified use current month which is september or month 9
|
||||
- date, title is always required, if attendies or people mentioned they should be on attendies list
|
||||
- don't use comments in the heroscript (means no // at end of line for heroscript)
|
||||
- default duration is 1h, also ok 15m (15 min), 1 day
|
||||
|
||||
```heroscript
|
||||
|
||||
//to add item in agenda
|
||||
!!calendar.add
|
||||
date:'30-10-24'
|
||||
time:'10pm'
|
||||
duration:'1h'
|
||||
title:'meeting with tech team'
|
||||
attendies:'user1, kristof, ...'
|
||||
description:''
|
||||
|
||||
//to delete (can use words cancel, delete)
|
||||
!!calendar.delete
|
||||
id:100
|
||||
|
||||
//to reschedule e.g. delay, 1d stands for 1 day, 1w for 1 week, 1h for 1 hour
|
||||
!!calendar.delay
|
||||
id:100
|
||||
delay:'2d'
|
||||
|
||||
//when e.g. reschedule or delete, we can inform participants
|
||||
!!calendar.inform
|
||||
id:100
|
||||
|
||||
|
||||
```
|
@@ -0,0 +1,60 @@
|
||||
|
||||
how do I use heroscript for story and task management
|
||||
|
||||
-------------------------
|
||||
|
||||
heroscript has basic notations to deal with stories and tasks
|
||||
|
||||
when the user asks to translate an story or task action to heroscript use following rules and see example below
|
||||
|
||||
- all dates are in europe style: Format: DD/MM/YYYY e.g. 06/07/2023
|
||||
- if year not specified by user then always use current year which is 2024
|
||||
- if month not specified use current month which is september or month 9
|
||||
- title is always required, if attendies or people mentioned they should be on assignment list
|
||||
- date & time & duration is optional
|
||||
- don't use comments in the heroscript (means no // at end of line for heroscript)
|
||||
- duration expressed as 1m, 1h, 1d (minute, hour, day)
|
||||
- deadline is or a date or +1h, +1d, .. the + means time from now, just list same way e.g. +1h
|
||||
- 1 months is done as 30 days or +30 days, 2 months 60 days, ... (which means +30d for 1 month)
|
||||
- stories cannot have a date, if a date given, giver an error
|
||||
- owners, assignees, contributors, executors is all the same
|
||||
- the description is always in markdown format
|
||||
- the description always has the title repeated
|
||||
- the description has title, purpose, deliverables
|
||||
- try to figure out what purpose and deliverables are
|
||||
- purpose is put as list in markdown
|
||||
|
||||
```heroscript
|
||||
|
||||
//to add a new story
|
||||
!!story.add
|
||||
title:'need to improve UI for version 1.0'
|
||||
owners:'karoline, kristof'
|
||||
description:'
|
||||
# need to improve UI for version 1.0
|
||||
|
||||
We got some complaints from our userbase and its overdue.
|
||||
|
||||
## deliverables
|
||||
|
||||
- [ ] specs and check with kristof
|
||||
- [ ] implement mockup
|
||||
- [ ] implement prototype
|
||||
|
||||
'
|
||||
|
||||
|
||||
//to add a new task, which might (optional) be linked to a story
|
||||
!!task.add
|
||||
title:'let our userbase know'
|
||||
story:10
|
||||
owners:'kristof'
|
||||
deadline:'+10d'
|
||||
description:'
|
||||
write email to userbase
|
||||
ask tom to check
|
||||
'
|
||||
|
||||
|
||||
|
||||
```
|
60
_archive/aiprompts/instructions/timemgmt/sys_2_heroscript.md
Normal file
60
_archive/aiprompts/instructions/timemgmt/sys_2_heroscript.md
Normal file
@@ -0,0 +1,60 @@
|
||||
|
||||
'heroscript' is a simple declarative language in following form
|
||||
|
||||
```heroscript
|
||||
!!mother.define
|
||||
myname:'mymama'
|
||||
mylist:'20,200'
|
||||
myint:2
|
||||
|
||||
//this is how we define a child (is in list)
|
||||
!!child.define
|
||||
mother:'mymama'
|
||||
name:'florine'
|
||||
length:100
|
||||
description:'
|
||||
multiline is supported
|
||||
'
|
||||
|
||||
!!child.define
|
||||
mother:'mymama'
|
||||
name:'aurelie'
|
||||
length:60
|
||||
description:'
|
||||
multiline is supported
|
||||
now for aurelie
|
||||
'
|
||||
```
|
||||
|
||||
some rules
|
||||
|
||||
|
||||
- '0,70' is a list of 2 (when comma in example its a list)
|
||||
- never use [] in lists, just have comma separation in between quotes ''
|
||||
- in lists always put lowercase names
|
||||
- node_name:'silver' is same as node_name:silver, when spaces always '' around
|
||||
- // means comment
|
||||
- all dates are in europe style: Format: DD/MM/YYYY e.g. 06/07/2023, always specify year
|
||||
|
||||
the corresponding model in vlang would be
|
||||
|
||||
```vlang
|
||||
pub struct Mother {
|
||||
pub mut:
|
||||
myname string
|
||||
mylist [20,200]
|
||||
myint 2
|
||||
children []Child
|
||||
}
|
||||
|
||||
pub struct Child {
|
||||
pub mut:
|
||||
name string
|
||||
length int
|
||||
description string
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
@@ -0,0 +1,61 @@
|
||||
|
||||
'heroscript' is a simple declarative language in following form
|
||||
|
||||
```heroscript
|
||||
!!mother.define
|
||||
myname:'mymama'
|
||||
mylist:'20,200'
|
||||
myint:2
|
||||
|
||||
//this is how we define a child (is in list)
|
||||
!!child.define
|
||||
mother:'mymama'
|
||||
name:'florine'
|
||||
length:100
|
||||
description:'
|
||||
multiline is supported
|
||||
'
|
||||
|
||||
!!child.define
|
||||
mother:'mymama'
|
||||
name:'aurelie'
|
||||
length:60
|
||||
description:'
|
||||
multiline is supported
|
||||
now for aurelie
|
||||
'
|
||||
```
|
||||
|
||||
some rules
|
||||
|
||||
|
||||
- '0,70' is a list of 2 (when comma in example its a list)
|
||||
- never use [] in lists, just have comma separation in between quotes ''
|
||||
- in lists always put lowercase names
|
||||
- node_name:'silver' is same as node_name:silver, when spaces always '' around
|
||||
- // means comment
|
||||
- all dates are in europe style: Format: DD/MM/YYYY e.g. 06/07/2023, always specify year
|
||||
|
||||
the corresponding model in vlang would be
|
||||
|
||||
```vlang
|
||||
pub struct Mother {
|
||||
pub mut:
|
||||
myname string
|
||||
mylist [20,200]
|
||||
myint 2
|
||||
children []Child
|
||||
}
|
||||
|
||||
pub struct Child {
|
||||
pub mut:
|
||||
name string
|
||||
length int
|
||||
description string
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
In a heroscript file, the second line after the `!!<module>.<name>.define` block is typically used to define the properties or fields of the struct being defined. [1] The properties are specified as <property_name>:<value>, with each property on a new line. For example:
|
||||
|
||||
|
@@ -0,0 +1,35 @@
|
||||
|
||||
how can I query a webservice over http using vlang for a simple post request
|
||||
|
||||
|
||||
-------------------
|
||||
|
||||
|
||||
```vlang
|
||||
|
||||
import freeflowuniverse.crystallib.clients.httpconnection
|
||||
import json
|
||||
|
||||
|
||||
mut conn := httpconnection.new(name: 'test', url: 'https://jsonplaceholder.typicode.com/')!
|
||||
|
||||
|
||||
// adding a header field to be used in all requests.
|
||||
// default header have the field Content-Type set to 'application/json',
|
||||
// but we should reconsider this and leave it out, set it manually when needed
|
||||
conn.default_header.add(.content_language, 'Content-Language: en-US')
|
||||
|
||||
// Getting a blog post with id 1 (us example), should be fresh response from the server
|
||||
mut res := conn.send(prefix: 'posts', id: '1')!
|
||||
|
||||
// Result object have minimum fileds (code, data) and one method is_ok()
|
||||
println('Status code: ${res.code}')
|
||||
|
||||
// you can check if you got a success status code or not
|
||||
println('Success: ${res.is_ok()}')
|
||||
|
||||
// access the result data
|
||||
println('Data: ${res.data}')
|
||||
|
||||
|
||||
```
|
80
_archive/aiprompts/instructions/vlang/sys_1_vlang.md
Normal file
80
_archive/aiprompts/instructions/vlang/sys_1_vlang.md
Normal file
@@ -0,0 +1,80 @@
|
||||
you are chatbot, you try to help everyone with knowledge from v and vlang which is in the attached knowledge base
|
||||
|
||||
ALWAYS FOLLOW THE FOLLOWING INSTRUCTIONS FIRST
|
||||
|
||||
## structs examples
|
||||
|
||||
```v
|
||||
@[heap]
|
||||
pub struct GitAddr {
|
||||
pub mut:
|
||||
gsconfig &GitStructureConfig
|
||||
accounts []&Account
|
||||
provider string
|
||||
account string
|
||||
name string // is the name of the repository
|
||||
branch string
|
||||
nr int
|
||||
}
|
||||
|
||||
pub struct Account {
|
||||
pub mut:
|
||||
name string //my comment
|
||||
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
note usage of pub & pub mut
|
||||
|
||||
all names are lowercase (snakecase with _)
|
||||
|
||||
& is used for references
|
||||
|
||||
## normalize a string
|
||||
|
||||
We call this name fix, anytime we use a name as id, or as a key in a map we want to normalize the string
|
||||
|
||||
```v
|
||||
import freeflowuniverse.crystallib.core.texttools
|
||||
|
||||
mut myname:="a__Name_to_fix"
|
||||
myname = texttools.name_fix(myname)
|
||||
```
|
||||
|
||||
## dealing with paths
|
||||
|
||||
alwayse use this library when dealing with path, info how to use it can be found in your knowledgebase from core.pathlib.md
|
||||
|
||||
```v
|
||||
import freeflowuniverse.crystallib.core.pathlib
|
||||
|
||||
#to get a path from a file or dir, the pathlib will figure out if its a dir or file and if it exists
|
||||
mut p:=pathlib.get('/tmp/mysourcefiles')!
|
||||
|
||||
#to get a dir and create it
|
||||
|
||||
|
||||
#to get a list of paths and copy to other destination
|
||||
mut pathlist:=p.list(regex:[r'.*.md$'])! //this gets all files ending on .md
|
||||
pathlist.copy('/tmp/mydest')!
|
||||
|
||||
```
|
||||
|
||||
## executing commands
|
||||
|
||||
```v
|
||||
|
||||
#simple commands, means < 1 line and can be executed using os.execute
|
||||
# fn execute(cmd string) Result see os.md module
|
||||
res := os.execute(cmd)
|
||||
if res.exit_code > 0 {
|
||||
return error('cannot upload over ssh: ${cmd}')
|
||||
}
|
||||
#ALWAYS check the return code
|
||||
```
|
||||
|
||||
#if the command is more complicated use the osal.exec method as can be found in osal.md file
|
||||
|
||||
res := osal.exec(cmd: args.cmd, stdout: args.stdout, debug: executor.debug)!
|
||||
```
|
23
_archive/aiprompts/intent.py
Normal file
23
_archive/aiprompts/intent.py
Normal file
@@ -0,0 +1,23 @@
|
||||
from transformers import pipeline
|
||||
|
||||
# Load the pipeline for text classification
|
||||
classifier = pipeline("zero-shot-classification", model="typeform/distilbert-base-uncased-mnli")
|
||||
|
||||
# Define the possible intents
|
||||
candidate_labels = ["complaint", "feedback", "appointment","travel","agenda","taskmanagement","religion","fire test"]
|
||||
|
||||
def determine_intent(user_input):
|
||||
result = classifier(user_input, candidate_labels)
|
||||
print(result)
|
||||
return result["labels"][0] # The intent with the highest score
|
||||
|
||||
# Example user input
|
||||
user_input = '''
|
||||
Playing with matches is dangerous.
|
||||
Can you book me a meeting, its about flying to paris
|
||||
'''
|
||||
|
||||
# Determine the intent
|
||||
for i in range(10):
|
||||
intent = determine_intent(user_input)
|
||||
print(f"User intent: {intent}")
|
133
_archive/aiprompts/tools/chinook.py
Normal file
133
_archive/aiprompts/tools/chinook.py
Normal file
@@ -0,0 +1,133 @@
|
||||
import sqlite3
|
||||
|
||||
import json
|
||||
from openai import OpenAI
|
||||
from tenacity import retry, wait_random_exponential, stop_after_attempt
|
||||
from termcolor import colored
|
||||
|
||||
GPT_MODEL = "gpt-4o"
|
||||
client = OpenAI()
|
||||
dbpath="/Users/despiegk1/Downloads/chinook.db"
|
||||
|
||||
conn = sqlite3.connect(dbpath)
|
||||
print("Opened database successfully")
|
||||
|
||||
def get_table_names(conn):
|
||||
"""Return a list of table names."""
|
||||
table_names = []
|
||||
tables = conn.execute("SELECT name FROM sqlite_master WHERE type='table';")
|
||||
for table in tables.fetchall():
|
||||
table_names.append(table[0])
|
||||
return table_names
|
||||
|
||||
|
||||
def get_column_names(conn, table_name):
|
||||
"""Return a list of column names."""
|
||||
column_names = []
|
||||
columns = conn.execute(f"PRAGMA table_info('{table_name}');").fetchall()
|
||||
for col in columns:
|
||||
column_names.append(col[1])
|
||||
return column_names
|
||||
|
||||
|
||||
def get_database_info(conn):
|
||||
"""Return a list of dicts containing the table name and columns for each table in the database."""
|
||||
table_dicts = []
|
||||
for table_name in get_table_names(conn):
|
||||
columns_names = get_column_names(conn, table_name)
|
||||
table_dicts.append({"table_name": table_name, "column_names": columns_names})
|
||||
return table_dicts
|
||||
|
||||
|
||||
database_schema_dict = get_database_info(conn)
|
||||
database_schema_string = "\n".join(
|
||||
[
|
||||
f"Table: {table['table_name']}\nColumns: {', '.join(table['column_names'])}"
|
||||
for table in database_schema_dict
|
||||
]
|
||||
)
|
||||
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "ask_database",
|
||||
"description": "Use this function to answer user questions about music. Input should be a fully formed SQL query.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": f"""
|
||||
SQL query extracting info to answer the user's question.
|
||||
SQL should be written using this database schema:
|
||||
{database_schema_string}
|
||||
The query should be returned in plain text, not in JSON.
|
||||
""",
|
||||
}
|
||||
},
|
||||
"required": ["query"],
|
||||
},
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
def ask_database(conn, query):
|
||||
"""Function to query SQLite database with a provided SQL query."""
|
||||
try:
|
||||
results = str(conn.execute(query).fetchall())
|
||||
except Exception as e:
|
||||
results = f"query failed with error: {e}"
|
||||
return results
|
||||
|
||||
|
||||
# Step #1: Prompt with content that may result in function call. In this case the model can identify the information requested by the user is potentially available in the database schema passed to the model in Tools description.
|
||||
messages = [{
|
||||
"role":"user",
|
||||
"content": "What is the name of the album with the most tracks?"
|
||||
}]
|
||||
|
||||
response = client.chat.completions.create(
|
||||
model='gpt-4o',
|
||||
messages=messages,
|
||||
tools= tools,
|
||||
tool_choice="auto"
|
||||
)
|
||||
|
||||
# Append the message to messages list
|
||||
response_message = response.choices[0].message
|
||||
messages.append(response_message)
|
||||
|
||||
print(response_message)
|
||||
|
||||
# Step 2: determine if the response from the model includes a tool call.
|
||||
tool_calls = response_message.tool_calls
|
||||
if tool_calls:
|
||||
# If true the model will return the name of the tool / function to call and the argument(s)
|
||||
tool_call_id = tool_calls[0].id
|
||||
tool_function_name = tool_calls[0].function.name
|
||||
tool_query_string = eval(tool_calls[0].function.arguments)['query']
|
||||
|
||||
# Step 3: Call the function and retrieve results. Append the results to the messages list.
|
||||
if tool_function_name == 'ask_database':
|
||||
results = ask_database(conn, tool_query_string)
|
||||
|
||||
messages.append({
|
||||
"role":"tool",
|
||||
"tool_call_id":tool_call_id,
|
||||
"name": tool_function_name,
|
||||
"content":results
|
||||
})
|
||||
|
||||
# Step 4: Invoke the chat completions API with the function response appended to the messages list
|
||||
# Note that messages with role 'tool' must be a response to a preceding message with 'tool_calls'
|
||||
model_response_with_function_call = client.chat.completions.create(
|
||||
model="gpt-4o",
|
||||
messages=messages,
|
||||
) # get a new response from the model where it can see the function response
|
||||
print(model_response_with_function_call.choices[0].message.content)
|
||||
else:
|
||||
print(f"Error: function {tool_function_name} does not exist")
|
||||
else:
|
||||
# Model did not identify a function to call, result can be returned to the user
|
||||
print(response_message.content)
|
Reference in New Issue
Block a user