Skip to content

Feature/purdue llm diff analysis #771

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 8 commits into
base: develop
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import us.ihmc.footstepPlanning.graphSearch.parameters.DefaultFootstepPlannerParametersReadOnly;
import us.ihmc.footstepPlanning.swing.SwingPlannerParametersBasics;
import us.ihmc.footstepPlanning.swing.SwingPlannerType;
import us.ihmc.log.LogTools;
import us.ihmc.rdx.input.ImGui3DViewInput;
import us.ihmc.rdx.ui.RDXBaseUI;
import us.ihmc.rdx.vr.RDXVRContext;
Expand Down Expand Up @@ -264,7 +265,8 @@ public void walkFromSteps()
// TODO dangerous. However when manually placing footsteps, this is great.
messageList.getQueueingProperties().setExecutionMode(ExecutionMode.QUEUE.toByte());
messageList.getQueueingProperties().setMessageId(UUID.randomUUID().getLeastSignificantBits());
messageList.setOffsetFootstepsHeightWithExecutionError(true);
messageList.setOffsetFootstepsHeightWithExecutionError(false);
messageList.setTrustHeightOfFootsteps(false);
messageList.setDefaultSwingDuration(locomotionParameters.getSwingTime());
messageList.setDefaultTransferDuration(locomotionParameters.getTransferTime());
messageList.setAreFootstepsAdjustable(locomotionParameters.getAreFootstepsAdjustable());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import us.ihmc.communication.crdt.CRDTInfo;
import us.ihmc.communication.crdt.CRDTStatusFootstepList;
import us.ihmc.euclid.geometry.interfaces.Pose3DReadOnly;
import us.ihmc.euclid.referenceFrame.FramePoint3D;
import us.ihmc.euclid.referenceFrame.ReferenceFrame;
import us.ihmc.euclid.transform.RigidBodyTransform;
import us.ihmc.euclid.tuple3D.Point3D;
Expand Down Expand Up @@ -71,33 +72,45 @@ public AI2RNodeExecutor(long id,
int commandedBehaviorIndex = -1;
for (int i = 0; i < state.getCheckPoints().size(); i++)
{
if (state.getCheckPoints().get(i).getDefinition().getName().toLowerCase().equals(behaviorToExecuteName))
if (state.getCheckPoints().get(i).getDefinition().getName().equals(behaviorToExecuteName))
{
commandedBehaviorIndex = state.getCheckPoints().get(i).getLeafIndex();
break;
}
}
LogTools.warn(commandedBehaviorIndex);

// Generic adaptable skills
// GoTo behavior - Navigation
if (behaviorToExecuteName.toLowerCase().contains("go"))
if (behaviorToExecuteName.contains("GOTO") && message.getAdaptingBehavior())
{
AI2RNavigationMessage navigationMessage = message.getNavigation();
// Set goals for GoTo behavior
String referenceFrame = navigationMessage.getReferenceFrameName().toString();
Point3D goalStancePoint = navigationMessage.getGoalStancePoint();
Point3D goalFocalPoint = navigationMessage.getGoalFocalPoint();
double distanceToReferenceFrame = navigationMessage.getDistanceToFrame();

for (var leaf : state.getActionSequence().getOrderedLeaves())
{
if (leaf.getDefinition().getName().toLowerCase().contains("go to action") && leaf instanceof FootstepPlanActionState gotoActionState)
{
gotoActionState.getDefinition().setParentFrameName(referenceFrame);

FramePoint3D goalStancePoint = new FramePoint3D(gotoActionState.getParentFrame());
LogTools.info(gotoActionState.getParentFrame().getName());
goalStancePoint.addX(distanceToReferenceFrame);
goalStancePoint.changeFrame(ReferenceFrame.getWorldFrame());

FramePoint3D goalFocalPoint = new FramePoint3D(gotoActionState.getParentFrame());
goalFocalPoint.changeFrame(ReferenceFrame.getWorldFrame());

gotoActionState.getDefinition().getGoalStancePoint().getValue().set(goalStancePoint);
gotoActionState.getDefinition().getGoalFocalPoint().getValue().set(goalFocalPoint);
break;
}
}
}
else // Hand Pose Adaptation
// Object pick and place
else if (behaviorToExecuteName.contains("PICKUP") || behaviorToExecuteName.contains("PLACE") && message.getAdaptingBehavior())
{
AI2RHandPoseAdaptationMessage handMessage = message.getHandPoseAdaptation();
for (var leaf : state.getActionSequence().getOrderedLeaves())
Expand Down Expand Up @@ -125,6 +138,8 @@ public AI2RNodeExecutor(long id,
failedLeaves.clear();
state.getActionSequence().setExecutionNextIndex(commandedBehaviorIndex);
state.getActionSequence().setAutomaticExecution(true);
statusMessage.setCompletedBehavior("-");
LogTools.warn("Automatic execution");
}
});
}
Expand Down Expand Up @@ -241,10 +256,6 @@ public void update()
// Jump to end of sequence
state.getActionSequence().setExecutionNextIndex(state.getCheckPoints().get(state.getCheckPoints().size() - 1).getLeafIndex());
}
else if (!state.getCheckPoints().get(i).getDefinition().getName().contains("END") && state.getCheckPoints().get(i).getIsExecuting())
{ // If we are executing another behavior checkpoint
statusMessage.setCompletedBehavior("");
}
}

// Check if Goto action is executing and if next steps are colliding with objects in the scene
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,10 @@
import us.ihmc.behaviors.behaviorTree.BehaviorTreeRootNodeState;
import us.ihmc.behaviors.behaviorTree.BehaviorTreeTools;
import us.ihmc.behaviors.sequence.ActionSequenceState;
import us.ihmc.behaviors.sequence.LeafNodeState;
import us.ihmc.behaviors.sequence.actions.CheckPointNodeState;
import us.ihmc.communication.crdt.CRDTInfo;
import us.ihmc.log.LogTools;
import us.ihmc.tools.io.WorkspaceResourceDirectory;

import java.util.ArrayList;
Expand Down Expand Up @@ -40,8 +42,8 @@ public void updateSubtree(BehaviorTreeNodeState<?> node)
{
checkPoints.add(checkPoint);
}
if (child instanceof ActionSequenceState sequence)
updateSubtree(sequence);
else
updateSubtree(child);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -280,6 +280,8 @@ private void buildAndSendCommandAndSetDesiredState()
footstepDataListMessage.setFinalTransferDuration(finalTransferDuration);
footstepDataListMessage.getQueueingProperties().setExecutionMode(definition.getExecutionMode().getValue().toByte());
footstepDataListMessage.getQueueingProperties().setMessageId(UUID.randomUUID().getLeastSignificantBits());
footstepDataListMessage.setOffsetFootstepsHeightWithExecutionError(false);
footstepDataListMessage.setTrustHeightOfFootsteps(false);
state.getLogger().info("Commanding {} footsteps", footstepDataListMessage.getFootstepDataList().size());
ros2ControllerHelper.publishToController(footstepDataListMessage);
for (RobotSide side : RobotSide.values)
Expand Down
2 changes: 1 addition & 1 deletion ihmc-interfaces/ros2ws/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,4 +39,4 @@ You can also test the scene graph only by running
```
$ python3 scene_graph_example.py
```
Observe that some scene nodes are added in the scene graph and in the scene and removed.
Observe that some scene nodes are added in the scene graph and in the scene and removed.
Binary file not shown.
243 changes: 243 additions & 0 deletions ihmc-interfaces/ros2ws/behavior_coordination.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,243 @@
import os
import time
import threading

import rclpy
from rclpy.node import Node
from rclpy.qos import QoSProfile, QoSReliabilityPolicy, QoSHistoryPolicy
from geometry_msgs.msg import Point
from geometry_msgs.msg import Quaternion
from behavior_msgs.msg import AI2RCommandMessage
from behavior_msgs.msg import AI2RObjectMessage
from behavior_msgs.msg import AI2RStatusMessage
from behavior_msgs.msg import AI2RNavigationMessage
from behavior_msgs.msg import AI2RHandPoseAdaptationMessage

import cv2
import numpy as np
from llm_interface import LLMInterface

# Example usage:
print(" Calling the LLM")
llm = LLMInterface(config_file="config.json")

ros2 = {}
initialized = False
waiting_for_command = True

# Store the list of behaviors to execute in sequence
behavior_queue = []
current_behavior_index = 0

def behavior_message_callback(msg):
#print("Received AI2R Status Message")
global initialized # Access the global variables
global waiting_for_command
global behavior_queue
global current_behavior_index

robot_pose = msg.robot_mid_feet_under_pelvis_pose_in_world

if not initialized:
# --------- Scene -----------
scene_objects = msg.objects
#print("Objects in the scene:")
#print("scene_objects: ",scene_objects)
if scene_objects: # This checks if the list is not empty
for obj in scene_objects:
id = obj.object_name
#print(f"{id}")
pose_in_world = obj.object_pose_in_world
pose_wrt_robot = obj.object_pose_in_robot_frame # This is the pose specified wrt to robot_pose
else:
print("-")
scene_objects_name = [obj.object_name for obj in msg.objects]

# --------- Behaviors -----------
behaviors = msg.available_behaviors
#print("Available behaviors:")
# if behaviors:
# for behavior in behaviors:
# print(behavior)
# else:
# print("-")


# Get available behaviors
available_behaviors = msg.available_behaviors

# Completed and failed behaviors
completed_behavior = msg.completed_behavior
failed_behavior = msg.failed_behavior
#initialized = True

#print("current_behavior_index: ", current_behavior_index)

if completed_behavior and completed_behavior != "-":
print(f"Completed Behavior: {completed_behavior}")
waiting_for_command = True

if failed_behavior:
print(f"[FAILURE] Failed Behavior: {failed_behavior}")

# Construct input for LLM decision-making
llm_input = {
# "task": current_task,
"scene_objects": scene_objects_name,
"available_behaviors": available_behaviors,
"previously_executed": completed_behavior if completed_behavior != "-" else "",
"failed_behaviors": failed_behavior if failed_behavior else "",
}

if waiting_for_command:
# Convert the input to a string for LLM processing
llm_input = str(llm_input)

print("LLM Input for reasoning:", llm_input)

# Query LLM for next action
response = llm.call_model(llm_input)
print("LLM Response:", response)

next_behavior = response.strip()

# Check if the LLM suggests a valid action
if waiting_for_command:
if (next_behavior in available_behaviors):
# Execute the suggested behavior
behavior_command = AI2RCommandMessage()
behavior_command.behavior_to_execute = next_behavior
print(f"Commanded Behavior: {next_behavior}")
ros2["behavior_publisher"].publish(behavior_command)
waiting_for_command = False
current_behavior_index += 1 # Move to the next behavior in the queue

else:
print("[WARNING] LLM suggested an invalid action or no action needed.")


# # Get available behaviors once at initialization
# behavior_queue = msg.available_behaviors
# print("Available behaviors:", behavior_queue)

# if not behavior_queue:
# print("[ERROR] No available behaviors.")
# return

# initialized = True
# current_behavior_index = 0 # Start from the first behavior

# # Monitor behavior execution
# completed_behavior = msg.completed_behavior
# if completed_behavior and completed_behavior in behavior_queue:
# print(f"Completed Behavior: {completed_behavior}")
# waiting_for_command = True
# current_behavior_index += 1 # Move to the next behavior

# if current_behavior_index < len(behavior_queue) and waiting_for_command:
# # Execute the next behavior in the list
# behavior_command = AI2RCommandMessage()
# behavior_command.behavior_to_execute = behavior_queue[current_behavior_index]
# print(f"Commanded Behavior: {behavior_command.behavior_to_execute}")
# ros2["behavior_publisher"].publish(behavior_command)

# waiting_for_command = False # Wait until the behavior completes

# if not initialized:
# # --------- Scene -----------
# scene_objects = msg.objects
# print("Objects in the scene:")
# #print("scene_objects: ",scene_objects)
# if scene_objects: # This checks if the list is not empty
# for obj in scene_objects:
# id = obj.object_name
# print(f"{id}")
# pose_in_world = obj.object_pose_in_world
# pose_wrt_robot = obj.object_pose_in_robot_frame # This is the pose specified wrt to robot_pose
# else:
# print("-")

# # --------- Behaviors -----------
# behaviors = msg.available_behaviors
# print("Available behaviors:")
# if behaviors:
# for behavior in behaviors:
# print(behavior)
# else:
# print("-")

# # --------- Reasoning -----------
# # CAN DO SOME REASONING HERE based on objects in the scene and available behaviors

# # --------- Monitoring -----------
# completed_behavior = msg.completed_behavior
# if not completed_behavior == "-":
# print("Completed Behavior: " + completed_behavior)
# waiting_for_command = True
# else:
# waiting_for_command = False

# failed_behavior = msg.failed_behavior
# if failed_behavior:
# print("[FAILURE] Failed behavior: " + failure_behavior)
# # Failure details
# failure = msg.failure
# print("Description: " + failure.action_name)
# print("Type: " + failure.action_type)
# print("Frame: " + failure.action_frame)

# position_error = failure.position_error
# # Convert Point to numpy array
# error_vector = np.array([position_error.x, position_error.y, position_error.z])
# # Calculate the Euclidean norm (L2 norm)
# norm = np.linalg.norm(error_vector)
# print(f"The position error is: {norm}")
# orientation_error = failure.orientation_error

# position_tolerance = failure.position_tolerance
# orientation_tolerance = failure.orientation_tolerance

# # --------- Reasoning -----------
# # CAN DO SOME REASONING HERE based on failed behaviors

# if waiting_for_command or not initialized:
# # --------- Coordination -----------
# behavior_command = AI2RCommandMessage()
# # DECIDE what behavior to execute based on reasoning. For example can decide to scan environment to detect objects
# behavior_command.behavior_to_execute = "GOTO"
# print("Commanded Behavior: " + behavior_command.behavior_to_execute)
# ros2["behavior_publisher"].publish(behavior_command)
# waiting_for_command = False
# initialized = True


def main(args=None):
rclpy.init(args=args)

qos_profile_reliable = QoSProfile(
reliability=QoSReliabilityPolicy.RELIABLE,
history=QoSHistoryPolicy.KEEP_LAST,
depth=1
)

node = rclpy.create_node('behavior_coordination_node')
ros2["node"] = node

behavior_subscriber = node.create_subscription(AI2RStatusMessage,
'/ihmc/behavior_tree/ai2r_status',
behavior_message_callback, qos_profile_reliable)
ros2["behavior_subscriber"] = behavior_subscriber

behavior_publisher = node.create_publisher(AI2RCommandMessage,
'/ihmc/behavior_tree/ai2r_command',
qos_profile_reliable)
ros2["behavior_publisher"] = behavior_publisher

# Wait until interrupt
try:
rclpy.spin(node)
except KeyboardInterrupt:
node.destroy_node()

if __name__ == '__main__':
main()
Loading
Loading