#importing the required libraries, please make sure you have them installed first, or install using !pip
from openai import OpenAI
import gradio as gr
XAI_API_KEY = "Your API Key goes here"
client = OpenAI(api_key=XAI_API_KEY, base_url="https://api.x.ai/v1")
def generate_reply(tweet_content, style): #function to send message to Grok
system_message = f"""You are an assistant trained to respond in the style of {style}.
Analyze the following tweet and reply humorously in 3-4 sentences, maintaining the wit characteristic of {style}.
Tweet: {tweet_content}"""
completion = client.chat.completions.create(
model="grok-beta",
messages=[{"role": "system", "content": system_message}]
)
reply = completion.choices[0].message.content
reply_sentences = reply.split('. ')
reply = '. '.join(reply_sentences[:4]) + ('.' if len(reply_sentences) > 4 else '') #reply_sentences limits the reply to 4 sentences
return reply
def witty_reply(tweet_content, style):#function for input/output interaction with Gradio
reply = generate_reply(tweet_content, style)
return reply
with gr.Blocks() as iface: #initiating gradio interface
tweet_content = gr.Textbox(label="Tweet Content", placeholder="Paste the full tweet with username here")
style = gr.Radio(choices=["Chandler", "Niles"], label="Choose Reply Style")
witty_response = gr.Textbox(label="Witty Reply", interactive=False)
# Button to generate reply
generate_btn = gr.Button("Generate Reply")
generate_btn.click(witty_reply, inputs=[tweet_content, style], outputs=witty_response)
iface.launch()
#importing necessary libraries
import os
from openai import OpenAI
import gradio as gr
XAI_API_KEY = os.getenv("XAI_API_KEY") # getting the xAI api keys from HF secret enviornment
client = OpenAI(api_key=XAI_API_KEY, base_url="https://api.x.ai/v1")
def generate_theory_question_and_answer(role): #messaging Grok to generate question and answer
system_message = f"Generate a theoretical interview question for a {role} role."
completion = client.chat.completions.create(
model="grok-beta",
messages=[{"role": "system", "content": system_message}]
)
question = completion.choices[0].message.content
system_message = f"Provide a possible answer to the question: '{question}' for a {role} role."
completion = client.chat.completions.create(
model="grok-beta",
messages=[{"role": "system", "content": system_message}]
)
answer = completion.choices[0].message.content
return question, answer
def run_interview_simulation(role): #calling Gradio
question, answer = generate_theory_question_and_answer(role)
return question, answer
with gr.Blocks() as iface: #launching Gradio interface
role = gr.Textbox(label="Job Role", placeholder="Enter job role here")
theory_question = gr.Textbox(label="Question", interactive=False)
grok_answer = gr.Textbox(label="Answer", interactive=False)
# Button for generating question and answer
generate_btn = gr.Button("Generate Question and Answer")
generate_btn.click(run_interview_simulation, inputs=role, outputs=[theory_question, grok_answer])
iface.launch()