mirror of
https://github.com/elder-plinius/AutoTemp.git
synced 2026-02-12 17:22:46 +00:00
Update autotemp.py
This commit is contained in:
47
autotemp.py
47
autotemp.py
@@ -4,6 +4,7 @@ import os
|
||||
import re
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
import gradio as gr
|
||||
import traceback
|
||||
|
||||
# Load environment variables from .env file
|
||||
load_dotenv()
|
||||
@@ -24,7 +25,7 @@ class AutoTemp:
|
||||
def generate_with_openai(self, prompt, temperature, top_p, retries=3):
|
||||
while retries > 0:
|
||||
try:
|
||||
response = openai.ChatCompletion.create(
|
||||
response = openai.chat.completions.create(
|
||||
model=self.model_version,
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
@@ -33,13 +34,17 @@ class AutoTemp:
|
||||
temperature=temperature,
|
||||
top_p=top_p
|
||||
)
|
||||
message = response['choices'][0]['message']['content']
|
||||
# Adjusted to use attribute access instead of dictionary access
|
||||
message = response.choices[0].message.content
|
||||
return message.strip()
|
||||
except Exception as e:
|
||||
retries -= 1
|
||||
print(f"Attempt failed with error: {e}") # Print the error for debugging
|
||||
if retries <= 0:
|
||||
print(f"Final error generating text at temperature {temperature} and top-p {top_p}: {e}")
|
||||
return f"Error generating text at temperature {temperature} and top-p {top_p}: {e}"
|
||||
|
||||
|
||||
def evaluate_output(self, output, temperature, top_p):
|
||||
fixed_top_p_for_evaluation = 1.0
|
||||
eval_prompt = f"""
|
||||
@@ -75,10 +80,14 @@ class AutoTemp:
|
||||
}
|
||||
for future in as_completed(future_to_temp):
|
||||
temp = future_to_temp[future]
|
||||
output_text = future.result()
|
||||
if output_text and not output_text.startswith("Error"):
|
||||
outputs[temp] = output_text
|
||||
scores[temp] = self.evaluate_output(output_text, temp, top_p) # Pass top_p here
|
||||
try:
|
||||
output_text = future.result()
|
||||
print(f"Output for temp {temp}: {output_text}") # Print the output for debugging
|
||||
if output_text and not output_text.startswith("Error"):
|
||||
outputs[temp] = output_text
|
||||
scores[temp] = self.evaluate_output(output_text, temp, top_p) # Pass top_p here
|
||||
except Exception as e:
|
||||
print(f"Error while generating or evaluating output for temp {temp}: {e}")
|
||||
|
||||
if not scores:
|
||||
return "No valid outputs generated.", None
|
||||
@@ -107,7 +116,7 @@ def main():
|
||||
inputs=[
|
||||
"text",
|
||||
"text",
|
||||
gr.Slider(minimum=0.0, maximum=1.0, step=0.1, default=1.0, label="Top-p value"),
|
||||
gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=1.0, label="top-p value"),
|
||||
"checkbox"
|
||||
],
|
||||
outputs="text",
|
||||
@@ -119,17 +128,29 @@ def main():
|
||||
Check the FAQs at the bottom of the page for more info.""",
|
||||
article="""**FAQs**
|
||||
|
||||
**What's Top-p?** 'Top-p' controls the diversity of AI responses: a low 'top-p' makes output more focused and predictable, while a high 'top-p' encourages variety and surprise. Pair with temperature to fine-tune AI creativity: higher temperatures with high 'top-p' for bold ideas, or lower temperatures with low 'top-p' for precise answers.
|
||||
**What's Top-p?** 'Top-p' controls the diversity of AI responses: a low 'top-p' makes output more focused and predictable, while a high 'top-p' encourages variety and surprise. Pair with temperature to fine-tune AI creativity: higher temperatures with high 'top-p' for bold ideas, or lower temperatures with low 'top-p' for precise answers.
|
||||
Using top_p=1 essentially disables the "nucleus sampling" feature, where only the most probable tokens are considered. This is equivalent to using full softmax probability distribution to sample the next word.
|
||||
|
||||
**How Does Temperature Affect AI Outputs?** Temperature controls the randomness of word selection. Lower temperatures lead to more predictable text, while higher temperatures allow for more novel text generation.
|
||||
|
||||
**How Does Top-p Influence Temperature Settings in AI Language Models?**
|
||||
Top-p and temperature are both parameters that control the randomness of AI-generated text, but they influence outcomes in subtly different ways. Here's a summary of their interaction:
|
||||
Top-p and temperature are both parameters that control the randomness of AI-generated text, but they influence outcomes in subtly different ways:
|
||||
|
||||
- At Low Temperatures (0.0 - 0.5): High top-p has a limited effect; low top-p further narrows word choice.
|
||||
- At Medium Temperatures (0.5 - 0.7): Top-p begins to have a noticeable impact; higher top-p adds variety.
|
||||
- At High Temperatures (0.8 - 1.0): Top-p is crucial; high top-p leads to creative, less coherent outputs.
|
||||
- At Extra-High Temperatures (1.1 - 2.0): Outputs become experimental; top-p's impact is less predictable.
|
||||
- **Low Temperatures (0.0 - 0.5):**
|
||||
- *Effect of Top-p:* A high `top_p` value will have minimal impact, as the model's output is already quite deterministic. A low `top_p` will further constrain the model, leading to very predictable outputs.
|
||||
- *Use Cases:* Ideal for tasks requiring precise, factual responses like technical explanations or legal advice. For example, explaining a scientific concept or drafting a formal business email.
|
||||
|
||||
- **Medium Temperatures (0.5 - 0.7):**
|
||||
- *Effect of Top-p:* `top_p` starts to influence the variety of the output. A higher `top_p` will introduce more diversity without sacrificing coherence.
|
||||
- *Use Cases:* Suitable for creative yet controlled content, such as writing an article on a current event or generating a business report that balances creativity with professionalism.
|
||||
|
||||
- **High Temperatures (0.8 - 1.0):**
|
||||
- *Effect of Top-p:* A high `top_p` is crucial for introducing creativity and surprise, but may result in less coherent outputs. A lower `top_p` can help maintain some coherence.
|
||||
- *Use Cases:* Good for brainstorming sessions, generating creative writing prompts, or coming up with out-of-the-box ideas where a mix of novelty and relevance is appreciated.
|
||||
|
||||
- **Extra-High Temperatures (1.1 - 2.0):**
|
||||
- *Effect of Top-p:* The output becomes more experimental and unpredictable, and `top_p`'s influence can vary widely. It's a balance between randomness and diversity.
|
||||
- *Use Cases:* Best for when you're seeking highly creative or abstract ideas, such as imagining a sci-fi scenario or coming up with a plot for a fantasy story, where coherence is less of a priority compared to novelty and uniqueness.
|
||||
|
||||
Adjusting both temperature and top-p helps tailor the AI's output to your specific needs.""",
|
||||
examples=[
|
||||
|
||||
Reference in New Issue
Block a user