|
13 | 13 | # limitations under the License.
|
14 | 14 |
|
15 | 15 | import rclpy
|
| 16 | +import rclpy.qos |
16 | 17 | from langchain_core.messages import HumanMessage
|
17 | 18 |
|
18 | 19 | from rai.agents.conversational_agent import create_conversational_agent
|
|
21 | 22 | from rai.tools.ros.native import GetCameraImage, Ros2GetTopicsNamesAndTypesTool
|
22 | 23 | from rai.utils.model_initialization import get_llm_model
|
23 | 24 |
|
24 |
| -rclpy.init() |
25 |
| -node = RaiBaseNode(node_name="manipulation_demo") |
26 |
| -node.declare_parameter("conversion_ratio", 1.0) |
27 | 25 |
|
28 |
| -tools = [ |
29 |
| - GetObjectPositionsTool( |
30 |
| - node=node, |
31 |
| - target_frame="panda_link0", |
32 |
| - source_frame="RGBDCamera5", |
33 |
| - camera_topic="/color_image5", |
34 |
| - depth_topic="/depth_image5", |
35 |
| - camera_info_topic="/color_camera_info5", |
36 |
| - ), |
37 |
| - MoveToPointTool(node=node, manipulator_frame="panda_link0"), |
38 |
| - GetCameraImage(node=node), |
39 |
| - Ros2GetTopicsNamesAndTypesTool(node=node), |
40 |
| -] |
| 26 | +def create_agent(): |
| 27 | + rclpy.init() |
| 28 | + node = RaiBaseNode(node_name="manipulation_demo") |
| 29 | + node.declare_parameter("conversion_ratio", 1.0) |
| 30 | + node.qos_profile.reliability = rclpy.qos.ReliabilityPolicy.RELIABLE |
41 | 31 |
|
42 |
| -llm = get_llm_model(model_type="complex_model") |
| 32 | + tools = [ |
| 33 | + GetObjectPositionsTool( |
| 34 | + node=node, |
| 35 | + target_frame="panda_link0", |
| 36 | + source_frame="RGBDCamera5", |
| 37 | + camera_topic="/color_image5", |
| 38 | + depth_topic="/depth_image5", |
| 39 | + camera_info_topic="/color_camera_info5", |
| 40 | + ), |
| 41 | + MoveToPointTool(node=node, manipulator_frame="panda_link0"), |
| 42 | + GetCameraImage(node=node), |
| 43 | + Ros2GetTopicsNamesAndTypesTool(node=node), |
| 44 | + ] |
43 | 45 |
|
44 |
| -system_prompt = """ |
45 |
| -You are a robotic arm with interfaces to detect and manipulate objects. |
46 |
| -Here are the coordinates information: |
47 |
| -x - front to back (positive is forward) |
48 |
| -y - left to right (positive is right) |
49 |
| -z - up to down (positive is up) |
| 46 | + llm = get_llm_model(model_type="complex_model", streaming=True) |
50 | 47 |
|
51 |
| -Before starting the task, make sure to grab the camera image to understand the environment. |
52 |
| -""" |
| 48 | + system_prompt = """ |
| 49 | + You are a robotic arm with interfaces to detect and manipulate objects. |
| 50 | + Here are the coordinates information: |
| 51 | + x - front to back (positive is forward) |
| 52 | + y - left to right (positive is right) |
| 53 | + z - up to down (positive is up) |
53 | 54 |
|
54 |
| -agent = create_conversational_agent( |
55 |
| - llm=llm, |
56 |
| - tools=tools, |
57 |
| - system_prompt=system_prompt, |
58 |
| -) |
| 55 | + Before starting the task, make sure to grab the camera image to understand the environment. |
| 56 | + """ |
59 | 57 |
|
60 |
| -messages = [] |
61 |
| -while True: |
62 |
| - prompt = input("Enter a prompt: ") |
63 |
| - messages.append(HumanMessage(content=prompt)) |
64 |
| - output = agent.invoke({"messages": messages}) |
65 |
| - output["messages"][-1].pretty_print() |
| 58 | + agent = create_conversational_agent( |
| 59 | + llm=llm, |
| 60 | + tools=tools, |
| 61 | + system_prompt=system_prompt, |
| 62 | + ) |
| 63 | + return agent |
| 64 | + |
| 65 | + |
| 66 | +def main(): |
| 67 | + agent = create_agent() |
| 68 | + messages = [] |
| 69 | + while True: |
| 70 | + prompt = input("Enter a prompt: ") |
| 71 | + messages.append(HumanMessage(content=prompt)) |
| 72 | + output = agent.invoke({"messages": messages}) |
| 73 | + output["messages"][-1].pretty_print() |
| 74 | + |
| 75 | + |
| 76 | +if __name__ == "__main__": |
| 77 | + main() |
0 commit comments