diff --git a/docs/to-do.md b/docs/to-do.md index b78b0f967..877aa1f56 100644 --- a/docs/to-do.md +++ b/docs/to-do.md @@ -10,7 +10,7 @@ [ X ] Base Phi-3 template [ ] CoT template [ ] Few Shot template with examples -[ ] Reflextion template +[ ] Reflexion template ### Prompt Templates: Supporting Logic diff --git a/src/text_generation/common/constants.py b/src/text_generation/common/constants.py index bed11930b..68401d409 100644 --- a/src/text_generation/common/constants.py +++ b/src/text_generation/common/constants.py @@ -1,8 +1,8 @@ class Constants: - ASSISTANT_TOKEN = "<|assistant|>" - END_TOKEN = "<|end|>" - SYSTEM_TOKEN = "<|system|>" - USER_TOKEN = "<|user|>" + ASSISTANT_TOKEN = "<|assistant|>" + END_TOKEN = "<|end|>" + SYSTEM_TOKEN = "<|system|>" + USER_TOKEN = "<|user|>" class PromptTemplateIds: PHI_3_MINI_4K_INSTRUCT_BASIC = "phi-3-mini-4k-instruct-basic" diff --git a/tests/integration/test_prompt_template_repository.py b/tests/integration/test_prompt_template_repository.py index ea9ffec68..14596047a 100644 --- a/tests/integration/test_prompt_template_repository.py +++ b/tests/integration/test_prompt_template_repository.py @@ -1,5 +1,6 @@ import pytest -from langchain_core.prompts import PromptTemplate +from langchain_core.prompts import PromptTemplate, FewShotPromptTemplate, FewShotPromptWithTemplates +from langchain_core.example_selectors import SemanticSimilarityExampleSelector @pytest.mark.repository @@ -18,4 +19,76 @@ def test_1_save_templates(constants, prompt_template_service): ) id = constants.PromptTemplateIds.PHI_3_MINI_4K_INSTRUCT_BASIC - prompt_template_service.add(id, prompt_template) \ No newline at end of file + prompt_template_service.add(id, prompt_template) + + + +@pytest.mark.repository +def test_2_save_templates(constants, prompt_template_service): + + examples = [ + { + "input": "Explain quantum computing", + "bad_response": "Quantum computing is like, super complicated and uses quantum stuff. It's really hard to understand and involves particles and things." + }, + { + "input": "How do I bake a cake?", + "bad_response": "Mix stuff together and put it in oven. Cook until done." + } + ] + + example_template = PromptTemplate( + input_variables=["input", "bad_response"], + template="""Question: {input} + + BAD Response (avoid this style): + {bad_response}""" + ) + + prompt_template = FewShotPromptTemplate( + examples=examples, + example_prompt=example_template, + prefix="Learn from these examples showing good vs bad responses:", + suffix="Question: {input}\n\nProvide a GOOD response:", + input_variables=["input"], + example_separator="\n" + "="*50 + "\n" + ) + + # prompt_template.examples = [] + # formatted = prompt_template.format(input="What is machine learning?") + + id = constants.PromptTemplateIds.PHI_3_MINI_4K_INSTRUCT_FEW_SHOT_EXAMPLES + prompt_template_service.add(id, prompt_template) + +def test_2_take2(constants): + # Note: This requires embeddings and vector store + # For demonstration, we'll show the structure + + # Create example selector (you'd need actual embeddings) + example_selector = SemanticSimilarityExampleSelector.from_examples( + examples, + OpenAIEmbeddings(), # Replace with your preferred embeddings + FAISS, + k=2 # Select top 2 most similar examples + ) + + system_template = f"""{constants.SYSTEM_TOKEN} +You are a helpful AI assistant. Use the following examples to understand the expected response format and answer accordingly.{PHI3_END_TOKEN} +""" + + suffix_template = f"""{constants.USER_TOKEN} +{{input}}{constants.END_TOKEN} +{constants.ASSISTANT_TOKEN} +""" + + # Create few-shot prompt with semantic selection + few_shot_prompt = FewShotPromptTemplate( + example_selector=example_selector, + example_prompt=example_prompt, + prefix=system_template, + suffix=suffix_template, + input_variables=["input"], + example_separator="\n" + ) + + return few_shot_prompt \ No newline at end of file