53 lines
1.6 KiB
Python
53 lines
1.6 KiB
Python
"""A wrapper around LLMs, customized for our config"""
|
|
|
|
from gemini import Gemini
|
|
from logging import getLogger
|
|
from typing import Optional
|
|
|
|
|
|
logger = getLogger()
|
|
|
|
|
|
class Chat:
|
|
"""Represents a configured interaction with an LLM"""
|
|
|
|
__GENAI_INITIALIZED = False
|
|
|
|
def __init__(self) -> None:
|
|
self.__system_message = None
|
|
|
|
def set_system_message(self, message: Optional[str]) -> None:
|
|
"""Append a non-empty system message to initialize the chat."""
|
|
self.__system_message = message
|
|
|
|
def chat(self, message: str) -> Optional[str]:
|
|
"""Asks a question and returns the response.
|
|
|
|
Mostly used for debugging.
|
|
The 'chat" method is usually more convenient.
|
|
"""
|
|
logger.debug("Sending chat to LLM: %s" % message)
|
|
response = Gemini().chat(message, self.__system_message)
|
|
if response.success:
|
|
logger.debug('LLM responded: %s' % response.text_response)
|
|
return response.text_response
|
|
else:
|
|
logger.error('LLM failed to respond.')
|
|
return None
|
|
|
|
def smoke_test(self) -> str:
|
|
""" Sends a test message to the LLM that we know the response to.
|
|
|
|
Returns:
|
|
str: Error string if the test failed, empty string otherwise
|
|
"""
|
|
response = Gemini().chat('respond with "OK"', None)
|
|
if response.success:
|
|
if response.text_response == 'OK':
|
|
return ''
|
|
else:
|
|
return ('Smoke test failed: llm was expected '
|
|
'to return "OK", got: "%s"'
|
|
% response.text_response)
|
|
else:
|
|
return 'LLM failed to respond'
|