Skip to content

Instantly share code, notes, and snippets.

@JGalego
Last active May 7, 2024 01:37
Show Gist options
  • Select an option

  • Save JGalego/6d793f7ca1caec5bcd04edcd6c72f8de to your computer and use it in GitHub Desktop.

Select an option

Save JGalego/6d793f7ca1caec5bcd04edcd6c72f8de to your computer and use it in GitHub Desktop.

Revisions

  1. JGalego revised this gist Mar 15, 2024. 1 changed file with 49 additions and 40 deletions.
    89 changes: 49 additions & 40 deletions interviewer.py
    Original file line number Diff line number Diff line change
    @@ -59,29 +59,6 @@
    {candidate_cv}
    """

    # Create the prompt templates
    prompt = ChatPromptTemplate.from_messages(
    [
    ("system", SYSTEM_PROMPT),
    MessagesPlaceholder(variable_name="chat_history"),
    ]
    )

    # Initialize the chat model
    chat = BedrockChat(
    model_id="anthropic.claude-3-sonnet-20240229-v1:0",
    model_kwargs={
    'temperature': 0.1,
    'stop_sequences': [
    "\\n\\nHuman:",
    "\\n\\nCandidate:"
    ]
    }
    )

    # Create the chain
    chain = prompt | chat

    # Create a DynamoDB table to hold the conversation
    dynamodb_client = boto3.client("dynamodb")
    dynamodb = boto3.resource("dynamodb")
    @@ -118,6 +95,14 @@
    ####################
    # Helper Functions #
    ####################

    @st.cache_data
    def list_claude_models():
    bedrock = boto3.client("bedrock")
    return bedrock.list_foundation_models(
    byProvider="anthropic",
    byInferenceType="ON_DEMAND"
    )['modelSummaries']

    def process_cv():
    """Converts the candidate CV into a string"""
    @@ -155,7 +140,6 @@ def show_chat_history():

    def start_interview():
    """Jumpstarts the job interview"""
    print(st.session_state)
    if "uploaded_file" not in st.session_state or not st.session_state.uploaded_file:
    st.error("No file uploaded!")
    return
    @@ -174,17 +158,6 @@ def start_interview():

    st.title("Bedrock Interviewer πŸ§‘πŸ»β€πŸ’Ό")

    st.markdown(
    """
    <style>
    section[data-testid="stSidebar"] {
    width: 35% !important; # Set the width to your desired value
    }
    </style>
    """,
    unsafe_allow_html=True,
    )

    if "interview_in_progress" not in st.session_state:
    st.session_state.interview_in_progress = False

    @@ -193,22 +166,52 @@ def start_interview():
    **Bedrock Interviewer** is a simple chat application powered by [Amazon Bedrock](https://aws.amazon.com/bedrock/) that mimics a job interviewer. Its goal is to assess whether a candidate is a good fit for a job role by asking interview questions.
    """)

    st.session_state.job_role = st.sidebar.text_input(
    st.session_state.model = st.selectbox(
    label="Model",
    options=list_claude_models(),
    format_func=lambda model: model['modelName']
    )

    # Create the prompt templates
    prompt = ChatPromptTemplate.from_messages(
    [
    ("system", SYSTEM_PROMPT),
    MessagesPlaceholder(variable_name="chat_history"),
    ]
    )

    # Initialize the chat model
    chat = BedrockChat(
    model_id=st.session_state.model['modelId'],
    model_kwargs={
    'temperature': 0.1,
    'stop_sequences': [
    "\\n\\nHuman:",
    "\\n\\nCandidate:"
    ]
    }
    )

    # Create the chain
    chain = prompt | chat

    if not st.session_state.interview_in_progress:
    st.session_state.job_role = st.text_input(
    label="Job Role",
    value=DEFAULT_JOB_ROLE,
    )

    st.session_state.job_description = st.sidebar.text_area(
    st.session_state.job_description = st.text_area(
    label="Job Description",
    value=DEFAULT_JOB_DESCRIPTION,
    )

    st.session_state.uploaded_file = st.sidebar.file_uploader(
    st.session_state.uploaded_file = st.file_uploader(
    label="Upload CV",
    type=["pdf"],
    )

    st.sidebar.button(
    st.button(
    label="Start Interview 🀞",
    on_click=start_interview,
    )
    @@ -217,7 +220,7 @@ def start_interview():

    if len(history.messages) == 0 or \
    isinstance(history.messages[-1], HumanMessage):
    with st.spinner("Interviewer is thinking... πŸ’­"):
    with st.spinner(f"{st.session_state.model['modelName']} is thinking... πŸ’­"):
    question = chain.invoke({
    'job_role': st.session_state.job_role,
    'job_description': st.session_state.job_description,
    @@ -233,6 +236,12 @@ def start_interview():
    history.add_user_message(answer)
    st.markdown(answer)
    st.rerun()

    st.session_state.model = st.sidebar.selectbox(
    label="Model",
    options=list_claude_models(),
    format_func=lambda model: model['modelName']
    )

    reset_interview = st.sidebar.button(
    label="Reset Interview ⚠️",
  2. JGalego revised this gist Mar 8, 2024. 1 changed file with 53 additions and 27 deletions.
    80 changes: 53 additions & 27 deletions interviewer.py
    Original file line number Diff line number Diff line change
    @@ -14,6 +14,7 @@
    (Streamlit edition)
    """

    import os
    import uuid

    import boto3
    @@ -35,10 +36,19 @@
    # Constants
    INTERVIEWER_TABLE = "BedrockInterviewer"
    SESSION_ID = uuid.UUID(int=uuid.getnode()).hex
    DEFAULT_JOB_DESCRIPTION="job_description.txt"
    if os.path.isfile(DEFAULT_JOB_DESCRIPTION):
    with open(DEFAULT_JOB_DESCRIPTION, 'r', encoding="utf-8") as default_job_description_f:
    lines = default_job_description_f.readlines()
    DEFAULT_JOB_ROLE = lines[0].strip()
    DEFAULT_JOB_DESCRIPTION = "".join(lines[1:])
    else:
    DEFAULT_JOB_ROLE = None
    DEFAULT_JOB_DESCRIPTION = None

    # Prompts
    SYSTEM_PROMPT = """
    You are going to act as a job interviewer. Your goal is to ask me interview questions for a '{job_role}' position and assess whether I'm a good fit for the role. Ask one question at a time and one question only. Every question should be backed by one of Amazon's leadership principles. Make sure the candidate answers in the STAR (Situation, Task, Action, and Result) format.
    You are going to act as a job interviewer. Your goal is to ask me interview questions for a '{job_role}' position and assess whether I'm a good fit for the role. Ask one question at a time and one question only.
    ## Job Description
    @@ -53,21 +63,20 @@
    prompt = ChatPromptTemplate.from_messages(
    [
    ("system", SYSTEM_PROMPT),
    ("human", "I'm ready."), # for compatibility with the monkey patch
    MessagesPlaceholder(variable_name="chat_history"),
    ]
    )

    # Initialize the chat model
    chat = BedrockChat(
    model_id="anthropic.claude-3-sonnet-20240229-v1:0",
    model_kwargs={
    'temperature': 0.1,
    'stop_sequences': [
    "\\n\\nHuman:",
    "\\n\\nCandidate:"
    ]
    }
    model_id="anthropic.claude-3-sonnet-20240229-v1:0",
    model_kwargs={
    'temperature': 0.1,
    'stop_sequences': [
    "\\n\\nHuman:",
    "\\n\\nCandidate:"
    ]
    }
    )

    # Create the chain
    @@ -101,18 +110,23 @@
    session_id=SESSION_ID,
    )

    if len(history.messages) == 0:
    history.add_messages([
    HumanMessage(content="I'm ready to start the interview.")
    ])

    ####################
    # Helper Functions #
    ####################

    @st.cache_data()
    def process_cv():
    """Converts the candidate CV into a string"""
    reader = PdfReader(st.session_state.uploaded_file)
    st.session_state.candidate_cv = "".join([p.extract_text().strip() for p in reader.pages])

    def reset_interview():
    """Deletes the entire conversation history"""
    """Stops the interview and deletes the entire conversation history"""
    st.session_state.interview_in_progress=False
    history.clear()

    def process_chat_history():
    @@ -141,7 +155,8 @@ def show_chat_history():

    def start_interview():
    """Jumpstarts the job interview"""
    if "candidate_cv" not in st.session_state:
    print(st.session_state)
    if "uploaded_file" not in st.session_state or not st.session_state.uploaded_file:
    st.error("No file uploaded!")
    return
    if st.session_state.job_role == "":
    @@ -150,6 +165,7 @@ def start_interview():
    if st.session_state.job_description == "":
    st.error("Job description must not be empty!")
    return
    process_cv()
    st.session_state.interview_in_progress = True

    ########
    @@ -158,32 +174,42 @@ def start_interview():

    st.title("Bedrock Interviewer πŸ§‘πŸ»β€πŸ’Ό")

    st.sidebar.markdown("""
    **Bedrock Interviewer** is a simple chat application powered by [Amazon Bedrock](https://aws.amazon.com/bedrock/) that mimics a job interviewer. Its goal is to assess whether a candidate is a good fit for a job role by asking interview questions.
    """)
    st.markdown(
    """
    <style>
    section[data-testid="stSidebar"] {
    width: 35% !important; # Set the width to your desired value
    }
    </style>
    """,
    unsafe_allow_html=True,
    )

    if "interview_in_progress" not in st.session_state:
    st.session_state.interview_in_progress = False

    if not st.session_state.interview_in_progress:
    st.session_state.uploaded_file = st.file_uploader(
    label="Upload CV",
    type=["pdf"],
    )
    st.markdown("""
    **Bedrock Interviewer** is a simple chat application powered by [Amazon Bedrock](https://aws.amazon.com/bedrock/) that mimics a job interviewer. Its goal is to assess whether a candidate is a good fit for a job role by asking interview questions.
    """)

    if st.session_state.uploaded_file:
    process_cv()

    st.session_state.job_role = st.text_input(
    st.session_state.job_role = st.sidebar.text_input(
    label="Job Role",
    value=DEFAULT_JOB_ROLE,
    )

    st.session_state.job_description = st.text_area(
    st.session_state.job_description = st.sidebar.text_area(
    label="Job Description",
    value=DEFAULT_JOB_DESCRIPTION,
    )

    st.session_state.uploaded_file = st.sidebar.file_uploader(
    label="Upload CV",
    type=["pdf"],
    )

    st.button(
    label="Start Interview",
    st.sidebar.button(
    label="Start Interview 🀞",
    on_click=start_interview,
    )
    else:
  3. JGalego revised this gist Mar 8, 2024. 1 changed file with 11 additions and 149 deletions.
    160 changes: 11 additions & 149 deletions interviewer.py
    Original file line number Diff line number Diff line change
    @@ -38,7 +38,7 @@

    # Prompts
    SYSTEM_PROMPT = """
    You are going to act as a job interviewer. Your goal is to ask me interview questions for a '{job_role}' position and assess whether I'm a good fit for the role. Ask one question at a time and one question only.
    You are going to act as a job interviewer. Your goal is to ask me interview questions for a '{job_role}' position and assess whether I'm a good fit for the role. Ask one question at a time and one question only. Every question should be backed by one of Amazon's leadership principles. Make sure the candidate answers in the STAR (Situation, Task, Action, and Result) format.
    ## Job Description
    @@ -59,154 +59,16 @@
    )

    # Initialize the chat model
    # Claude3 (experimental)
    if st.sidebar.checkbox(label="Use Claude 3 πŸ§ͺ", value=False):
    import json
    from typing import List, Dict, Optional, Any

    from langchain_core.messages import (
    SystemMessage,
    BaseMessage,
    HumanMessage,
    AIMessage
    )
    from langchain_core.callbacks import CallbackManagerForLLMRun
    from langchain_community.llms.utils import enforce_stop_tokens
    from langchain.schema.output import (
    ChatResult, ChatGeneration
    )

    class BedrockChatV3(BedrockChat):
    """
    A monkey patch to support Claude 3
    Adapted from https://github.com/langchain-ai/langchain/issues/18514
    """
    def _format_messages(
    self,
    messages: List[BaseMessage]
    ) -> List[Dict[str, str]]:
    list_of_messages = []
    for i,message in enumerate(messages):
    if i % 2 == 0 and not isinstance(message, HumanMessage):
    raise Exception(
    f"Expected to see a HumanMessage at the position {i}, but found {message.__class__}"
    )
    elif i % 2 == 1 and not isinstance(message, AIMessage):
    raise Exception(
    f"Expected to see a AIMessage at the position {i}, but found {message.__class__}"
    )

    list_of_messages.append({
    'role': "user" if isinstance(message, HumanMessage) else "assistant",
    'content': message.content
    })
    return list_of_messages

    def _prepare_input_and_invoke(
    self,
    prompt: List[BaseMessage],
    stop: Optional[List[str]] = None,
    run_manager: Optional[CallbackManagerForLLMRun] = None,
    **kwargs: Any,
    ) -> str:
    _model_kwargs = self.model_kwargs or {}

    messages = prompt
    params = {**_model_kwargs, **kwargs}
    params["anthropic_version"] = "bedrock-2023-05-31"
    if "max_tokens" not in params:
    params["max_tokens"] = 256
    if self._guardrails_enabled:
    params.update(self._get_guardrails_canonical())
    # Assuming the first message contains instructions
    if isinstance(messages[0], SystemMessage):
    system = messages[0].content
    messages = messages[1:]
    messages = self._format_messages(messages)
    input_body = params
    input_body["system"] = system
    input_body["messages"] = messages
    body = json.dumps(input_body)
    accept = "application/json"
    contentType = "application/json"

    request_options = {
    "modelId": self.model_id,
    "accept": accept,
    "contentType": contentType,
    "body" : body
    }

    if self._guardrails_enabled:
    request_options["guardrail"] = "ENABLED"
    if self.guardrails.get("trace"):
    request_options["trace"] = "ENABLED"

    try:
    response = self.client.invoke_model(**request_options)
    body = json.loads(response.get("body").read().decode())
    text = body['content'][0]['text']

    except Exception as e:
    raise ValueError(f"Error raised by bedrock service: {e}")

    if stop is not None:
    text = enforce_stop_tokens(text, stop)

    services_trace = self._get_bedrock_services_signal(body)

    if services_trace.get("signal") and run_manager is not None:
    run_manager.on_llm_error(
    Exception(
    f"Error raised by bedrock service: {services_trace.get('reason')}"
    ),
    **services_trace,
    )

    return text

    def _generate(
    self,
    messages: List[BaseMessage],
    stop: Optional[List[str]] = None,
    run_manager: Optional[CallbackManagerForLLMRun] = None,
    **kwargs: Any,
    ) -> ChatResult:
    completion = ""

    params: Dict[str, Any] = {**kwargs}
    if stop:
    params["stop_sequences"] = stop

    completion = self._prepare_input_and_invoke(
    prompt=messages, stop=stop, run_manager=run_manager, **params
    )

    message = AIMessage(content=completion)
    return ChatResult(generations=[ChatGeneration(message=message)])

    chat = BedrockChatV3(
    model_id="anthropic.claude-3-sonnet-20240229-v1:0",
    model_kwargs={
    'temperature': 0.1,
    'stop_sequences': [
    "\\n\\nHuman:",
    "\\n\\nCandidate:"
    ]
    }
    )
    # Claude 2
    else:
    chat = BedrockChat(
    model_id="anthropic.claude-v2",
    model_kwargs={
    'temperature': 0.1,
    'stop_sequences': [
    "\\n\\nHuman:",
    "\\n\\nCandidate:"
    ]
    }
    )
    chat = BedrockChat(
    model_id="anthropic.claude-3-sonnet-20240229-v1:0",
    model_kwargs={
    'temperature': 0.1,
    'stop_sequences': [
    "\\n\\nHuman:",
    "\\n\\nCandidate:"
    ]
    }
    )

    # Create the chain
    chain = prompt | chat
  4. JGalego created this gist Mar 6, 2024.
    352 changes: 352 additions & 0 deletions interviewer.py
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,352 @@
    r"""
    ____ _ _
    | _ \ | | | |
    | |_) | ___ __| |_ __ ___ ___| | __
    | _ < / _ \/ _` | '__/ _ \ / __| |/ /
    | |_) | __/ (_| | | | (_) | (__| <
    |____/ \___|\__,_|_| \___/ \___|_|\_\
    |_ _| | | (_)
    | | _ __ | |_ ___ _ ____ ___ _____ _____ _ __
    | | | '_ \| __/ _ \ '__\ \ / / |/ _ \ \ /\ / / _ \ '__|
    _| |_| | | | || __/ | \ V /| | __/\ V V / __/ |
    |_____|_| |_|\__\___|_| \_/ |_|\___| \_/\_/ \___|_|
    (Streamlit edition)
    """

    import uuid

    import boto3
    import streamlit as st

    from langchain_community.chat_message_histories import DynamoDBChatMessageHistory
    from langchain_community.chat_models import BedrockChat

    from langchain_core.prompts import ChatPromptTemplate
    from langchain_core.prompts.chat import MessagesPlaceholder

    from langchain.schema import (
    AIMessage,
    HumanMessage,
    )

    from PyPDF2 import PdfReader

    # Constants
    INTERVIEWER_TABLE = "BedrockInterviewer"
    SESSION_ID = uuid.UUID(int=uuid.getnode()).hex

    # Prompts
    SYSTEM_PROMPT = """
    You are going to act as a job interviewer. Your goal is to ask me interview questions for a '{job_role}' position and assess whether I'm a good fit for the role. Ask one question at a time and one question only.
    ## Job Description
    {job_description}
    ## Candidate CV
    {candidate_cv}
    """

    # Create the prompt templates
    prompt = ChatPromptTemplate.from_messages(
    [
    ("system", SYSTEM_PROMPT),
    ("human", "I'm ready."), # for compatibility with the monkey patch
    MessagesPlaceholder(variable_name="chat_history"),
    ]
    )

    # Initialize the chat model
    # Claude3 (experimental)
    if st.sidebar.checkbox(label="Use Claude 3 πŸ§ͺ", value=False):
    import json
    from typing import List, Dict, Optional, Any

    from langchain_core.messages import (
    SystemMessage,
    BaseMessage,
    HumanMessage,
    AIMessage
    )
    from langchain_core.callbacks import CallbackManagerForLLMRun
    from langchain_community.llms.utils import enforce_stop_tokens
    from langchain.schema.output import (
    ChatResult, ChatGeneration
    )

    class BedrockChatV3(BedrockChat):
    """
    A monkey patch to support Claude 3
    Adapted from https://github.com/langchain-ai/langchain/issues/18514
    """
    def _format_messages(
    self,
    messages: List[BaseMessage]
    ) -> List[Dict[str, str]]:
    list_of_messages = []
    for i,message in enumerate(messages):
    if i % 2 == 0 and not isinstance(message, HumanMessage):
    raise Exception(
    f"Expected to see a HumanMessage at the position {i}, but found {message.__class__}"
    )
    elif i % 2 == 1 and not isinstance(message, AIMessage):
    raise Exception(
    f"Expected to see a AIMessage at the position {i}, but found {message.__class__}"
    )

    list_of_messages.append({
    'role': "user" if isinstance(message, HumanMessage) else "assistant",
    'content': message.content
    })
    return list_of_messages

    def _prepare_input_and_invoke(
    self,
    prompt: List[BaseMessage],
    stop: Optional[List[str]] = None,
    run_manager: Optional[CallbackManagerForLLMRun] = None,
    **kwargs: Any,
    ) -> str:
    _model_kwargs = self.model_kwargs or {}

    messages = prompt
    params = {**_model_kwargs, **kwargs}
    params["anthropic_version"] = "bedrock-2023-05-31"
    if "max_tokens" not in params:
    params["max_tokens"] = 256
    if self._guardrails_enabled:
    params.update(self._get_guardrails_canonical())
    # Assuming the first message contains instructions
    if isinstance(messages[0], SystemMessage):
    system = messages[0].content
    messages = messages[1:]
    messages = self._format_messages(messages)
    input_body = params
    input_body["system"] = system
    input_body["messages"] = messages
    body = json.dumps(input_body)
    accept = "application/json"
    contentType = "application/json"

    request_options = {
    "modelId": self.model_id,
    "accept": accept,
    "contentType": contentType,
    "body" : body
    }

    if self._guardrails_enabled:
    request_options["guardrail"] = "ENABLED"
    if self.guardrails.get("trace"):
    request_options["trace"] = "ENABLED"

    try:
    response = self.client.invoke_model(**request_options)
    body = json.loads(response.get("body").read().decode())
    text = body['content'][0]['text']

    except Exception as e:
    raise ValueError(f"Error raised by bedrock service: {e}")

    if stop is not None:
    text = enforce_stop_tokens(text, stop)

    services_trace = self._get_bedrock_services_signal(body)

    if services_trace.get("signal") and run_manager is not None:
    run_manager.on_llm_error(
    Exception(
    f"Error raised by bedrock service: {services_trace.get('reason')}"
    ),
    **services_trace,
    )

    return text

    def _generate(
    self,
    messages: List[BaseMessage],
    stop: Optional[List[str]] = None,
    run_manager: Optional[CallbackManagerForLLMRun] = None,
    **kwargs: Any,
    ) -> ChatResult:
    completion = ""

    params: Dict[str, Any] = {**kwargs}
    if stop:
    params["stop_sequences"] = stop

    completion = self._prepare_input_and_invoke(
    prompt=messages, stop=stop, run_manager=run_manager, **params
    )

    message = AIMessage(content=completion)
    return ChatResult(generations=[ChatGeneration(message=message)])

    chat = BedrockChatV3(
    model_id="anthropic.claude-3-sonnet-20240229-v1:0",
    model_kwargs={
    'temperature': 0.1,
    'stop_sequences': [
    "\\n\\nHuman:",
    "\\n\\nCandidate:"
    ]
    }
    )
    # Claude 2
    else:
    chat = BedrockChat(
    model_id="anthropic.claude-v2",
    model_kwargs={
    'temperature': 0.1,
    'stop_sequences': [
    "\\n\\nHuman:",
    "\\n\\nCandidate:"
    ]
    }
    )

    # Create the chain
    chain = prompt | chat

    # Create a DynamoDB table to hold the conversation
    dynamodb_client = boto3.client("dynamodb")
    dynamodb = boto3.resource("dynamodb")
    try:
    table = dynamodb.create_table(
    TableName=INTERVIEWER_TABLE,
    KeySchema=[{
    "AttributeName": "SessionId",
    "KeyType": "HASH"
    }],
    AttributeDefinitions=[{
    "AttributeName": "SessionId",
    "AttributeType": "S"
    }],
    BillingMode="PAY_PER_REQUEST",
    )
    except dynamodb_client.exceptions.ResourceInUseException:
    table = dynamodb.Table(INTERVIEWER_TABLE)

    # Wait until the table exists
    table.meta.client.get_waiter("table_exists").wait(TableName=INTERVIEWER_TABLE)

    # Make some history!
    history = DynamoDBChatMessageHistory(
    table_name=INTERVIEWER_TABLE,
    session_id=SESSION_ID,
    )

    ####################
    # Helper Functions #
    ####################

    @st.cache_data()
    def process_cv():
    """Converts the candidate CV into a string"""
    reader = PdfReader(st.session_state.uploaded_file)
    st.session_state.candidate_cv = "".join([p.extract_text().strip() for p in reader.pages])

    def reset_interview():
    """Deletes the entire conversation history"""
    history.clear()

    def process_chat_history():
    """Converts the chat history into something Streamlit can consume"""
    messages = []
    for message in history.messages:
    if isinstance(message, AIMessage):
    messages.append({
    'role': "assistant",
    'content': message.content
    })
    elif isinstance(message, HumanMessage):
    messages.append({
    'role': "human",
    'content': message.content
    })
    else:
    pass
    return messages

    def show_chat_history():
    """Display the entire interview history"""
    for message in process_chat_history():
    with st.chat_message(message["role"], avatar=message.get("avatar", None)):
    st.markdown(message["content"])

    def start_interview():
    """Jumpstarts the job interview"""
    if "candidate_cv" not in st.session_state:
    st.error("No file uploaded!")
    return
    if st.session_state.job_role == "":
    st.error("Job role must not be empty!")
    return
    if st.session_state.job_description == "":
    st.error("Job description must not be empty!")
    return
    st.session_state.interview_in_progress = True

    ########
    # Main #
    ########

    st.title("Bedrock Interviewer πŸ§‘πŸ»β€πŸ’Ό")

    st.sidebar.markdown("""
    **Bedrock Interviewer** is a simple chat application powered by [Amazon Bedrock](https://aws.amazon.com/bedrock/) that mimics a job interviewer. Its goal is to assess whether a candidate is a good fit for a job role by asking interview questions.
    """)

    if "interview_in_progress" not in st.session_state:
    st.session_state.interview_in_progress = False

    if not st.session_state.interview_in_progress:
    st.session_state.uploaded_file = st.file_uploader(
    label="Upload CV",
    type=["pdf"],
    )

    if st.session_state.uploaded_file:
    process_cv()

    st.session_state.job_role = st.text_input(
    label="Job Role",
    )

    st.session_state.job_description = st.text_area(
    label="Job Description",
    )

    st.button(
    label="Start Interview",
    on_click=start_interview,
    )
    else:
    show_chat_history()

    if len(history.messages) == 0 or \
    isinstance(history.messages[-1], HumanMessage):
    with st.spinner("Interviewer is thinking... πŸ’­"):
    question = chain.invoke({
    'job_role': st.session_state.job_role,
    'job_description': st.session_state.job_description,
    'candidate_cv': st.session_state.candidate_cv,
    'chat_history': history.messages,
    })
    history.add_ai_message(question)
    with st.chat_message("assistant"):
    st.markdown(question.content)

    if answer := st.chat_input():
    with st.chat_message("human"):
    history.add_user_message(answer)
    st.markdown(answer)
    st.rerun()

    reset_interview = st.sidebar.button(
    label="Reset Interview ⚠️",
    on_click=reset_interview,
    )