This commit is contained in:
Aymeric 2024-12-12 17:22:20 +01:00
parent 465614295d
commit ea42023bcd
14 changed files with 58 additions and 103 deletions

View File

@ -1,100 +0,0 @@
#!/usr/bin/env python
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from transformers.utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_import_structure = {
"agents": [
"Agent",
"CodeAgent",
"ManagedAgent",
"ReactAgent",
"CodeAgent",
"JsonAgent",
"Toolbox",
],
"llm_engine": ["HfApiEngine", "TransformersEngine"],
"monitoring": ["stream_to_gradio"],
"tools": [
"PipelineTool",
"Tool",
"ToolCollection",
"launch_gradio_demo",
"load_tool",
"tool",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["default_tools"] = ["FinalAnswerTool", "PythonInterpreterTool"]
_import_structure["document_question_answering"] = ["DocumentQuestionAnsweringTool"]
_import_structure["image_question_answering"] = ["ImageQuestionAnsweringTool"]
_import_structure["search"] = ["DuckDuckGoSearchTool", "VisitWebpageTool"]
_import_structure["speech_to_text"] = ["SpeechToTextTool"]
_import_structure["text_to_speech"] = ["TextToSpeechTool"]
_import_structure["translation"] = ["TranslationTool"]
if TYPE_CHECKING:
from .agents import (
Agent,
ManagedAgent,
ReactAgent,
CodeAgent,
JsonAgent,
Toolbox,
)
from .llm_engine import HfApiEngine, TransformersEngine
from .monitoring import stream_to_gradio
from .tools import (
PipelineTool,
Tool,
ToolCollection,
launch_gradio_demo,
load_tool,
tool,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .default_tools import FinalAnswerTool, PythonInterpreterTool
from .tools.document_question_answering import DocumentQuestionAnsweringTool
from .tools.image_question_answering import ImageQuestionAnsweringTool
from .tools.search import DuckDuckGoSearchTool, VisitWebpageTool
from .tools.speech_to_text import SpeechToTextTool
from .tools.text_to_speech import TextToSpeechTool
from .tools.translation import TranslationTool
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__, globals()["__file__"], _import_structure, module_spec=__spec__
)

View File

@ -22,6 +22,3 @@ dependencies = [
"duckduckgo-search>=6.3.7", "duckduckgo-search>=6.3.7",
"python-dotenv>=1.0.1" "python-dotenv>=1.0.1"
] ]
[tool.setuptools]
packages = ["agents"]

36
src/agents/__init__.py Normal file
View File

@ -0,0 +1,36 @@
#!/usr/bin/env python
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from transformers.utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
from transformers.utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .agents import *
from .llm_engine import *
from .monitoring import *
from .tools import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)

View File

@ -266,3 +266,5 @@ def handle_agent_outputs(output, output_type=None):
if isinstance(output, _k): if isinstance(output, _k):
return _v(output) return _v(output)
return output return output
__all__ = ["AgentType", "AgentImage", "AgentText", "AgentAudio"]

View File

@ -1073,3 +1073,5 @@ And even if your task resolution is not successful, please return as much contex
return answer return answer
else: else:
return output return output
__all__ = ["BaseAgent", "ManagedAgent", "ReactAgent", "CodeAgent", "JsonAgent", "Toolbox"]

View File

@ -183,3 +183,5 @@ class UserInputTool(Tool):
def forward(self, question): def forward(self, question):
user_input = input(f"{question} => ") user_input = input(f"{question} => ")
return user_input return user_input
__all__ = ["PythonInterpreterTool", "FinalAnswerTool", "UserInputTool"]

View File

@ -110,3 +110,5 @@ class GradioUI:
).then(self.interact_with_agent, [stored_message, chatbot], [chatbot]) ).then(self.interact_with_agent, [stored_message, chatbot], [chatbot])
demo.launch() demo.launch()
__all__ = ["stream_to_gradio", "GradioUI"]

View File

@ -268,3 +268,5 @@ DEFAULT_CODEAGENT_REGEX_GRAMMAR = {
"type": "regex", "type": "regex",
"value": "Thought: .+?\\nCode:\\n```(?:py|python)?\\n(?:.|\\s)+?\\n```<end_action>", "value": "Thought: .+?\\nCode:\\n```(?:py|python)?\\n(?:.|\\s)+?\\n```<end_action>",
} }
__all__ = ["MessageRole", "llama_role_conversions", "get_clean_message_list", "HfEngine", "TransformersEngine", "HfApiEngine"]

View File

@ -43,3 +43,5 @@ class Monitor:
) )
console.print(f"- Input tokens: {self.total_input_token_count:,}") console.print(f"- Input tokens: {self.total_input_token_count:,}")
console.print(f"- Output tokens: {self.total_output_token_count:,}") console.print(f"- Output tokens: {self.total_output_token_count:,}")
__all__ = ["Monitor"]

View File

@ -490,3 +490,5 @@ Here is my new/updated plan of action to solve the task:
``` ```
{plan_update} {plan_update}
```""" ```"""
__all__ = ["USER_PROMPT_PLAN_UPDATE", "PLAN_UPDATE_FINAL_PLAN_REDACTION", "ONESHOT_CODE_SYSTEM_PROMPT", "CODE_SYSTEM_PROMPT", "JSON_SYSTEM_PROMPT"]

View File

@ -1001,3 +1001,5 @@ def evaluate_python_code(
msg = truncate_content(PRINT_OUTPUTS, max_length=MAX_LEN_OUTPUT) msg = truncate_content(PRINT_OUTPUTS, max_length=MAX_LEN_OUTPUT)
msg += f"EXECUTION FAILED:\nEvaluation stopped at line '{ast.get_source_segment(code, node)}' because of the following error:\n{e}" msg += f"EXECUTION FAILED:\nEvaluation stopped at line '{ast.get_source_segment(code, node)}' because of the following error:\n{e}"
raise InterpreterError(msg) raise InterpreterError(msg)
__all__ = ["evaluate_python_code"]

View File

@ -77,3 +77,5 @@ class VisitWebpageTool(Tool):
return f"Error fetching the webpage: {str(e)}" return f"Error fetching the webpage: {str(e)}"
except Exception as e: except Exception as e:
return f"An unexpected error occurred: {str(e)}" return f"An unexpected error occurred: {str(e)}"
__all__ = ["DuckDuckGoSearchTool", "VisitWebpageTool"]

View File

@ -1185,3 +1185,5 @@ class Toolbox:
for tool in self._tools.values(): for tool in self._tools.values():
toolbox_description += f"\t{tool.name}: {tool.description}\n" toolbox_description += f"\t{tool.name}: {tool.description}\n"
return toolbox_description return toolbox_description
__all__ = ["Tool", "tool", "load_tool", "launch_gradio_demo", "Toolbox"]

View File

@ -109,3 +109,5 @@ def truncate_content(
+ f"\n..._This content has been truncated to stay below {max_length} characters_...\n" + f"\n..._This content has been truncated to stay below {max_length} characters_...\n"
+ content[-MAX_LENGTH_TRUNCATE_CONTENT // 2 :] + content[-MAX_LENGTH_TRUNCATE_CONTENT // 2 :]
) )
__all__ = []