Streamlit app connected to a postgresql database using sqlChain and SQLagent to ask questions (answers differently each time executing the same query)

144 views Asked by At

I have my application, each time I rerun the application with the same question I get a different response. Can someone please help me get consistent results?

I have set the temperature at both llms to 0, why every time I rerun the same question I get a different response? How can I set my application so each time I rerun a get consistent results to the same question?

import os
import psycopg2
from langchain.agents import create_sql_agent
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.sql_database import SQLDatabase
from langchain.agents import AgentExecutor
from langchain.agents.agent_types import AgentType
from langchain.chat_models import AzureChatOpenAI
from langchain.llms import AzureOpenAI
import openai
from langchain.utilities import SQLDatabase
from dotenv import load_dotenv
from langchain_experimental.sql import SQLDatabaseChain
import streamlit as st 

#load .env and extract API credentials
load_dotenv()
OPENAI_API_KEY =os.environ.get("OPENAI_API_KEY")
OPENAI_API_BASE =os.environ.get("OPENAI_API_BASE")
password = os.environ.get('password')
host = os.environ.get('host')

#API settings for langchain
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
os.environ['OPENAI_API_TYPE'] = 'azure'
os.environ['OPENAI_API_VERSION'] = '2023-03-15-preview'
os.environ['OPENAI_API_BASE'] = OPENAI_API_BASE 

#API settings for embedding
openai.api_type = "azure"
openai.api_base = OPENAI_API_BASE 
openai.api_version = '2023-03-15-preview'
openai.api_key = OPENAI_API_KEY


database_secret = {
  "password": "xxxxx",
  "dbname": "xxx",
  "engine": "postgres",
  "port": "xx",
  "dbInstanceIdentifier": "xxxx",
  "host": 'xxxxx',
  "username": "postgres"
}

def createConnectionDict(secret):
    """Transforms a AWS Secret into a dictionary
    Args: secret (dict): Secret from AWS Secrets Manager
    Returns: dict: Dictionary with database connection details
    """
    #secret_json = json.loads(secret) #take a string as input and returns a dictionary as output.
    #secret_json = json.dumps(secret) #take a dictionary as input and returns a string as output.
    secret_json = secret #dict --> dict
    connection_dict = {
        "host"      : secret_json['host'],
        "database"  : secret_json['dbname'],
        "user"      : secret_json['username'],
        "password"  : secret_json['password']
        }
    return connection_dict

def createConnectionString(secret):
    """Transforms a AWS Secret into a string
    Args: secret (dict): Secret from AWS Secrets Manager
    Returns: strCon: connection string for create_engine
    """
    #secret_json = json.loads(secret) #take a string as input and returns a dictionary as output.
    #secret_json = json.dumps(secret) #take a dictionary as input and returns a string as output.
    secret_json = secret #dict --> dict
    connection_dict = {
        "host"      : secret_json['host'],
        "database"  : secret_json['dbname'],
        "user"      : secret_json['username'],
        "password"  : secret_json['password'],
        }
    #'postgresql://username:password@localhost:5432/mydatabase'
    strCon = "postgresql://" + connection_dict['user'] + ":" + connection_dict['password'] + "@" + connection_dict['host'] + ":5432/" + connection_dict['database']
    return strCon


def connect(params_dic):
    """Establishes a connection to PostgreSQL database
    Args: params_dic (dict): Dictionary with database connection details
    Returns: connection: Psycopg connection to PostgreSQL
    """
    conn = None
    try:
        print('Connecting to PostgreSQL database...')
        conn = psycopg2.connect(**params_dic)
    except (Exception, psycopg2.DatabaseError) as error:
        print("Error while connecting to PostgreSQL database:", error)
        raise error
    print("Connecting to PostgreSQL database successful\n\n")
    return conn


def get_response_from_agent(strCon, question, llm, db):
    try:
        toolkit = SQLDatabaseToolkit(db=db, llm=llm)
        agent_executor = create_sql_agent(
            llm=llm,
            toolkit=toolkit,
            verbose=False,
            agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
            handle_parsing_errors=True,
            top_k=1000
        )
        
        response = agent_executor.run(input=question)
        return response
    except ValueError as ve:
        # Handle the ValueError here, you can log the error or return a specific response
        print(f"ValueError: {ve}")
        return "An error occurred while processing the request."

def get_response_from_chain(strCon, question, llm, db):
    PROMPT = """ 
    Given an input question, first create a syntactically correct postgresql query to run,  
    then look at the results of the query and return the answer.Do not limit the answer.  
    The question: {question}
    """
    db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=False)  
    response = db_chain.run(PROMPT.format(question=question))
    return response

if __name__ == "__main__":

        
    param_dic = createConnectionDict(database_secret) 
    strCon = createConnectionString(database_secret) 
    postgresql_connection = connect(param_dic) 
    
    db = SQLDatabase.from_uri(strCon)
        
    llm = AzureChatOpenAI(temperature=0, model='gpt-3.5-turbo', deployment_name="gpt-35-turbo")
    llm1 = AzureOpenAI(deployment_name="text-davinci-003", model_name="text-davinci-003",temperature=0.0)   
    st.header("Talk with your PostgreSQL Database")
    user_question = st.text_input("Ask a question:")
    if user_question:
        # Get responses
        response1 = get_response_from_agent(strCon=strCon, question=user_question, llm=llm, db=db)
        response2 = get_response_from_chain(strCon=strCon, question=user_question, llm=llm, db=db)
        response3 = get_response_from_agent(strCon=strCon, question=user_question, llm=llm1, db=db)
        # Save responses to session state
        st.session_state.agent_response = response1
        st.session_state.chain_response = response2
        st.session_state.agent_response3 = response3
        # Display responses
        st.write(f"The answer from agent using GPT: {st.session_state.agent_response}")
        
        st.write(f"The answer from agent using Davinci: {st.session_state.agent_response3}")
        st.write(f"The answer from chain: {st.session_state.chain_response}")
1

There are 1 answers

0
Stetco Oana On

There is a parameter seed which makes the results consistent.

llm = AzureChatOpenAI(seed = 0,temperature=0, model='gpt-3.5-turbo', deployment_name="gpt-35-turbo")
llm1 = AzureOpenAI(seed = 0,deployment_name="text-davinci-003", model_name="text-davinci-003",temperature=0.0)   

https://cookbook.openai.com/examples/reproducible_outputs_with_the_seed_parameter